source stringlengths 3 92 | c stringlengths 26 2.25M |
|---|---|
util.h | /*
* This file is part of Quantum++.
*
* MIT License
*
* Copyright (c) 2013 - 2018 Vlad Gheorghiu (vgheorgh@gmail.com)
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
/**
* \file internal/util.h
* \brief Internal utility functions
*/
#ifndef INTERNAL_UTIL_H_
#define INTERNAL_UTIL_H_
namespace qpp {
/**
* \namespace qpp::internal
* \brief Internal utility functions, do not use them directly or modify them
*/
namespace internal {
// integer index to multi-index, use C-style array for speed
// standard lexicographical order, e.g. 00, 01, 10, 11
inline void n2multiidx(idx n, idx numdims, const idx* const dims,
idx* result) noexcept {
// error checks only in DEBUG version
#ifndef NDEBUG
if (numdims > 0) // numdims equal zero is a no-op
{
idx D = 1;
for (idx i = 0; i < numdims; ++i)
D *= dims[i];
assert(n < D);
}
#endif
// no error checks in release version to improve speed
for (idx i = 0; i < numdims; ++i) {
result[numdims - i - 1] = n % (dims[numdims - i - 1]);
n /= (dims[numdims - i - 1]);
}
}
// silence g++4.9 bogus warning -Warray-bounds and -Wmaybe-uninitialized
// in qpp::internal::multiidx2n()
#if (__GNUC__ && !__clang__)
#pragma GCC diagnostic push
#pragma GCC diagnostic ignored "-Warray-bounds"
#pragma GCC diagnostic ignored "-Wmaybe-uninitialized"
#endif
// multi-index to integer index, use C-style array for speed,
// standard lexicographical order, e.g. 00->0, 01->1, 10->2, 11->3
inline idx multiidx2n(const idx* const midx, idx numdims,
const idx* const dims) noexcept {
// error checks only in DEBUG version
#ifndef NDEBUG
assert(numdims > 0);
#endif
// no error checks in release version to improve speed
// Static allocation for speed!
// double the size for matrices reshaped as vectors
idx part_prod[2 * maxn];
idx result = 0;
part_prod[numdims - 1] = 1;
for (idx i = 1; i < numdims; ++i) {
part_prod[numdims - i - 1] = part_prod[numdims - i] * dims[numdims - i];
result += midx[numdims - i - 1] * part_prod[numdims - i - 1];
}
return result + midx[numdims - 1];
}
#if (__GNUC__ && !__clang__)
#pragma GCC diagnostic pop
#endif
// check square matrix
template <typename Derived>
bool check_square_mat(const Eigen::MatrixBase<Derived>& A) {
return A.rows() == A.cols();
}
// check whether input is a vector or not
template <typename Derived>
bool check_vector(const Eigen::MatrixBase<Derived>& A) {
return A.rows() == 1 || A.cols() == 1;
}
// check whether input is a row vector or not
template <typename Derived>
bool check_rvector(const Eigen::MatrixBase<Derived>& A) {
return A.rows() == 1;
}
// check whether input is a column vector or not
template <typename Derived>
bool check_cvector(const Eigen::MatrixBase<Derived>& A) {
return A.cols() == 1;
}
// check non-zero size of object that supports size() function
template <typename T>
bool check_nonzero_size(const T& x) noexcept {
return x.size() != 0;
}
// check that all sizes match
template <typename T1, typename T2>
bool check_matching_sizes(const T1& lhs, const T2& rhs) noexcept {
return lhs.size() == rhs.size();
}
// check that dims is a valid dimension vector
inline bool check_dims(const std::vector<idx>& dims) {
if (dims.size() == 0)
return false;
return std::find_if(std::begin(dims), std::end(dims),
[dims](idx i) -> bool {
if (i == 0)
return true;
else
return false;
}) == std::end(dims);
}
// check that valid dims match the dimensions
// of valid (non-zero sized) square matrix
template <typename Derived>
bool check_dims_match_mat(const std::vector<idx>& dims,
const Eigen::MatrixBase<Derived>& A) {
// error checks only in DEBUG version
#ifndef NDEBUG
assert(dims.size() > 0);
assert(A.rows() == A.cols());
#endif
idx proddim = std::accumulate(std::begin(dims), std::end(dims),
static_cast<idx>(1), std::multiplies<idx>());
return proddim == static_cast<idx>(A.rows());
}
// check that valid dims match the dimensions of valid column vector
template <typename Derived>
bool check_dims_match_cvect(const std::vector<idx>& dims,
const Eigen::MatrixBase<Derived>& A) {
// error checks only in DEBUG version
#ifndef NDEBUG
assert(dims.size() > 0);
assert(A.rows() > 0);
assert(A.cols() == 1);
#endif
idx proddim = std::accumulate(std::begin(dims), std::end(dims),
static_cast<idx>(1), std::multiplies<idx>());
return proddim == static_cast<idx>(A.rows());
}
// check that valid dims match the dimensions of valid row vector
template <typename Derived>
bool check_dims_match_rvect(const std::vector<idx>& dims,
const Eigen::MatrixBase<Derived>& A) {
// error checks only in DEBUG version
#ifndef NDEBUG
assert(dims.size() > 0);
assert(A.cols() > 0);
assert(A.rows() == 1);
#endif
idx proddim = std::accumulate(std::begin(dims), std::end(dims),
static_cast<idx>(1), std::multiplies<idx>());
;
return proddim == static_cast<idx>(A.cols());
}
// check that all elements in valid dims equal to dim
inline bool check_eq_dims(const std::vector<idx>& dims, idx dim) noexcept {
// error checks only in DEBUG version
#ifndef NDEBUG
assert(dims.size() > 0);
#endif
for (idx i : dims)
if (i != dim)
return false;
return true;
}
// check that subsys is valid with respect to valid dims
inline bool check_subsys_match_dims(const std::vector<idx>& subsys,
const std::vector<idx>& dims) {
// subsys can be empty
// check valid number of subsystems
if (subsys.size() > dims.size())
return false;
// sort the subsystems
std::vector<idx> subsyssort = subsys;
std::sort(std::begin(subsyssort), std::end(subsyssort));
// check duplicates
if (std::unique(std::begin(subsyssort), std::end(subsyssort)) !=
std::end(subsyssort))
return false;
// check range of subsystems
return std::find_if(std::begin(subsyssort), std::end(subsyssort),
[dims](idx i) -> bool {
return i > dims.size() - 1;
}) == std::end(subsyssort);
}
// check matrix is 2 x 2
template <typename Derived>
bool check_qubit_matrix(const Eigen::MatrixBase<Derived>& A) noexcept {
return A.rows() == 2 && A.cols() == 2;
}
// check column vector is 2 x 1
template <typename Derived>
bool check_qubit_cvector(const Eigen::MatrixBase<Derived>& A) noexcept {
return A.rows() == 2 && A.cols() == 1;
}
// check row vector is 1 x 2
template <typename Derived>
bool check_qubit_rvector(const Eigen::MatrixBase<Derived>& A) noexcept {
return A.rows() == 1 && A.cols() == 2;
}
// check row vector is 1 x 2 or 2 x 1
template <typename Derived>
bool check_qubit_vector(const Eigen::MatrixBase<Derived>& A) noexcept {
return (A.rows() == 1 && A.cols() == 2) || (A.rows() == 2 && A.cols() == 1);
}
// check valid permutation
inline bool check_perm(const std::vector<idx>& perm) {
if (perm.size() == 0)
return false;
std::vector<idx> ordered(perm.size());
std::iota(std::begin(ordered), std::end(ordered), 0);
return std::is_permutation(std::begin(ordered), std::end(ordered),
std::begin(perm));
}
// Kronecker product of 2 matrices, preserve return type
// internal function for the variadic template function wrapper kron()
template <typename Derived1, typename Derived2>
dyn_mat<typename Derived1::Scalar> kron2(const Eigen::MatrixBase<Derived1>& A,
const Eigen::MatrixBase<Derived2>& B) {
const dyn_mat<typename Derived1::Scalar>& rA = A.derived();
const dyn_mat<typename Derived2::Scalar>& rB = B.derived();
// EXCEPTION CHECKS
// check types
if (!std::is_same<typename Derived1::Scalar,
typename Derived2::Scalar>::value)
throw exception::TypeMismatch("qpp::kron()");
// check zero-size
if (!internal::check_nonzero_size(rA))
throw exception::ZeroSize("qpp::kron()");
// check zero-size
if (!internal::check_nonzero_size(rB))
throw exception::ZeroSize("qpp::kron()");
// END EXCEPTION CHECKS
idx Acols = static_cast<idx>(rA.cols());
idx Arows = static_cast<idx>(rA.rows());
idx Bcols = static_cast<idx>(rB.cols());
idx Brows = static_cast<idx>(rB.rows());
dyn_mat<typename Derived1::Scalar> result;
result.resize(Arows * Brows, Acols * Bcols);
#ifdef WITH_OPENMP_
#pragma omp parallel for collapse(2)
#endif // WITH_OPENMP_
// column major order for speed
for (idx j = 0; j < Acols; ++j)
for (idx i = 0; i < Arows; ++i)
result.block(i * Brows, j * Bcols, Brows, Bcols) = rA(i, j) * rB;
return result;
}
// Direct sum of 2 matrices, preserve return type
// internal function for the variadic template function wrapper dirsum()
template <typename Derived1, typename Derived2>
dyn_mat<typename Derived1::Scalar>
dirsum2(const Eigen::MatrixBase<Derived1>& A,
const Eigen::MatrixBase<Derived2>& B) {
const dyn_mat<typename Derived1::Scalar>& rA = A.derived();
const dyn_mat<typename Derived2::Scalar>& rB = B.derived();
// EXCEPTION CHECKS
// check types
if (!std::is_same<typename Derived1::Scalar,
typename Derived2::Scalar>::value)
throw exception::TypeMismatch("qpp::dirsum()");
// check zero-size
if (!internal::check_nonzero_size(rA))
throw exception::ZeroSize("qpp::dirsum()");
// check zero-size
if (!internal::check_nonzero_size(rB))
throw exception::ZeroSize("qpp::dirsum()");
// END EXCEPTION CHECKS
idx Acols = static_cast<idx>(rA.cols());
idx Arows = static_cast<idx>(rA.rows());
idx Bcols = static_cast<idx>(rB.cols());
idx Brows = static_cast<idx>(rB.rows());
dyn_mat<typename Derived1::Scalar> result =
dyn_mat<typename Derived1::Scalar>::Zero(Arows + Brows, Acols + Bcols);
result.block(0, 0, Arows, Acols) = rA;
result.block(Arows, Acols, Brows, Bcols) = rB;
return result;
}
// may be useful, extracts variadic template argument pack into a std::vector
template <typename T>
// ends the recursion
void variadic_vector_emplace(std::vector<T>&) {}
// may be useful, extracts variadic template argument pack into a std::vector
template <typename T, typename First, typename... Args>
void variadic_vector_emplace(std::vector<T>& v, First&& first, Args&&... args) {
v.emplace_back(std::forward<First>(first));
variadic_vector_emplace(v, std::forward<Args>(args)...);
}
// returns the number of subsystems (each subsystem assumed of the same
// dimension d) from an object (ket/bra/density matrix) of size sz
inline idx get_num_subsys(idx sz, idx d) {
// error checks only in DEBUG version
#ifndef NDEBUG
assert(sz > 0);
assert(d > 1);
#endif
return static_cast<idx>(std::llround(std::log2(sz) / std::log2(d)));
}
// returns the dimension of a subsystem (each subsystem assumed of the same
// dimension d) from an object (ket/bra/density matrix) of size sz consisting
// of N subsystems
inline idx get_dim_subsys(idx sz, idx N) {
// error checks only in DEBUG version
#ifndef NDEBUG
assert(N > 0);
assert(sz > 0);
#endif
if (N == 2)
return static_cast<idx>(std::llround(std::sqrt(sz)));
return static_cast<idx>(std::llround(std::pow(sz, 1. / N)));
}
// implementation details for pretty formatting
struct Display_Impl_ {
template <typename T>
// T must support rows(), cols(), operator()(idx, idx) const
std::ostream& display_impl_(const T& A, std::ostream& os,
double chop = qpp::chop) const {
std::ostringstream ostr;
ostr.copyfmt(os); // copy os' state
std::vector<std::string> vstr;
std::string strA;
for (idx i = 0; i < static_cast<idx>(A.rows()); ++i) {
for (idx j = 0; j < static_cast<idx>(A.cols()); ++j) {
strA.clear(); // clear the temporary string
ostr.clear();
ostr.str(std::string{}); // clear the ostringstream
// convert to complex
double re = static_cast<cplx>(A(i, j)).real();
double im = static_cast<cplx>(A(i, j)).imag();
if (std::abs(re) < chop && std::abs(im) < chop) {
ostr << "0 "; // otherwise segfault on destruction
// if using only vstr.push_back("0 ");
// bug in MATLAB libmx
vstr.push_back(ostr.str());
} else if (std::abs(re) < chop) {
ostr << im;
vstr.push_back(ostr.str() + "i");
} else if (std::abs(im) < chop) {
ostr << re;
vstr.push_back(ostr.str() + " ");
} else {
ostr << re;
strA = ostr.str();
strA += (im > 0 ? " + " : " - ");
ostr.clear();
ostr.str(std::string()); // clear
ostr << std::abs(im);
strA += ostr.str();
strA += "i";
vstr.push_back(strA);
}
}
}
// determine the maximum lenght of the entries in each column
std::vector<idx> maxlengthcols(A.cols(), 0);
for (idx i = 0; i < static_cast<idx>(A.rows()); ++i)
for (idx j = 0; j < static_cast<idx>(A.cols()); ++j)
if (vstr[i * A.cols() + j].size() > maxlengthcols[j])
maxlengthcols[j] = vstr[i * A.cols() + j].size();
// finally display it!
for (idx i = 0; i < static_cast<idx>(A.rows()); ++i) {
os << std::setw(static_cast<int>(maxlengthcols[0])) << std::right
<< vstr[i * A.cols()]; // display first column
// then the rest
for (idx j = 1; j < static_cast<idx>(A.cols()); ++j)
os << std::setw(static_cast<int>(maxlengthcols[j] + 2))
<< std::right << vstr[i * A.cols() + j];
if (i < static_cast<idx>(A.rows()) - 1)
os << std::endl;
}
return os;
}
};
} /* namespace internal */
} /* namespace qpp */
#endif /* INTERNAL_UTIL_H_ */
|
020_pi.c | #include <stdio.h>
#include <stdlib.h>
#include <omp.h>
double compute_partial_pi(long nr_tries, unsigned int *seed);
int main() {
const long nr_tries = 1000000;
const int nr_blocks = 10;
int num_threads = 1;
double nr_success = 0.0;
#pragma omp parallel default(none) shared(nr_success) shared(num_threads)
{
int thread_num = 0;
unsigned int seed = 0;
#ifdef __OPENMP
thread_num = omp_get_thread_num();
num_threads = omp_get_num_threads();
seed = thread_num;
#endif
printf("thread %d of %d\n", thread_num, num_threads);
long partial_nr_tries = nr_tries/(num_threads*nr_blocks);
#pragma omp for reduction(+:nr_success)
for (int i = 0; i < nr_blocks; i++)
nr_success += compute_partial_pi(partial_nr_tries, &seed);
}
printf("pi = %.15lf\n", 4.0*nr_success/(num_threads*nr_blocks));
return 0;
}
double random_number(unsigned int *seed) {
return ((double) rand_r(seed))/RAND_MAX;
}
double compute_partial_pi(long nr_tries, unsigned int *seed) {
double x, y, nr_success = 0.0;
for (long i = 0; i < nr_tries; i++) {
x = random_number(seed);
y = random_number(seed);
if (x*x + y*y < 1.0)
nr_success += 1.0;
}
#pragma omp barrier
return nr_success/nr_tries;
}
|
no_loop_1.c | #include <stdio.h>
#include <omp.h>
#pragma omp declare target
int foo(int i) { return i+1; }
#pragma omp end declare target
int main()
{
int N = 100000;
int a[N];
int b[N];
int i;
for (i=0; i<N; i++)
b[i]=i;
for (i=0; i<N; i++)
a[i]=0;
int j;
#pragma omp target teams distribute parallel for
{
for (j = 0; j< N; j++)
a[j]=b[j];
}
#pragma omp target teams distribute parallel for
{
for (int k = 0; k< N; k++)
a[k]=b[k];
}
#pragma omp target teams distribute parallel for
{
for (int k = 0; k< N; k++) {
a[k]=b[k];
foo(k);
}
}
#pragma omp target teams distribute parallel for
{
for (int k = 0; k< N; k++) {
a[k]=b[k];
omp_get_num_teams();
}
}
#pragma omp target teams distribute parallel for
{
for (int k = 0; k< N; k++) {
#pragma omp simd
for (int p = 0; p < N; p++)
a[k]=b[k];
}
}
int rc = 0;
for (i=0; i<N; i++)
if (a[i] != b[i] ) {
rc++;
printf ("Wrong value: a[%d]=%d\n", i, a[i]);
}
if (!rc)
printf("Success\n");
return rc;
}
/// CHECK: DEVID:[[S:[ ]*]][[DEVID:[0-9]+]] SGN:4
/// CHECK: DEVID:[[S:[ ]*]][[DEVID:[0-9]+]] SGN:4
/// CHECK: DEVID:[[S:[ ]*]][[DEVID:[0-9]+]] SGN:4
/// CHECK: DEVID:[[S:[ ]*]][[DEVID:[0-9]+]] SGN:2
/// CHECK: DEVID:[[S:[ ]*]][[DEVID:[0-9]+]] SGN:2
|
main.c | /*
* To change this license header, choose License Headers in Project Properties.
* To change this template file, choose Tools | Templates
* and open the template in the editor.
*/
/*
* File: main.c
* Author: Alexandros Ioannidis
*
* Created on January 7, 2016, 1:35 PM
*/
#include <stdio.h>
#include <stdlib.h>
#include <time.h>
#include <omp.h>
// ---------- CONSTANTS -------------------
#define VERSION "v1.1"
#define MASK_SIZE 3
#define IMAGE_GRAYSCALE_HUES 256
// ---------- GLOBAL VARIABLES ------------
// EDGE RECOGNITION MASK for Image Convolution calculation
int EDGE_MASK[][MASK_SIZE] = {
{0, 1, 0},
{1, -4, 1},
{0, 1, 0}
};
int **Image, // the total image
**AugImage, // the augmented image
**ImgConv; // image convolution
// ---------- FUNCTION PROTOTYPES for main() ----------------
void alloc_matrix(int ***data_ptr, int n, int m);
void free_matrix(int ***data_ptr, int n);
void readInputData(char*, int, int, int**);
void writeOutputData(char*, int, int, int**);
void flipHorizontal(int*, int, int, int*);
void flipVertical(int*, int, int, int*);
void print2DArray(int**, int, int);
void augmentImage(int**, int, int, int**);
void calcImgConv(int **AugImage, int **ImgConv, int rows, int cols);
int main(int argc, char** argv) {
if (argc != 4) {
fprintf(stderr, "SYNTAX: %s <imagesize> <inputfile> <outputfile>\n", argv[0]);
exit(1);
}
int imagesize = atoi(argv[1]); // read image size
char *inputfile = argv[2]; // read image filename
char *outputfile = argv[3]; // read filename for convolution
// get start time
double parallelStart_t, parallelEnd_t, end_t, start_t = omp_get_wtime()*1000;
int threads;
int FLIPPED_HOR[MASK_SIZE][MASK_SIZE];
// flip EDGE_MASK horizontally to FLIPPED_HOR
flipHorizontal(EDGE_MASK, MASK_SIZE, MASK_SIZE, FLIPPED_HOR);
// flip FLIPPED_HOR vertically to EDGE_MASK
flipVertical(FLIPPED_HOR, MASK_SIZE, MASK_SIZE, EDGE_MASK);
// allocate memory for Image
alloc_matrix(&Image, imagesize, imagesize);
// read Image from file
readInputData(inputfile, imagesize, imagesize, Image);
// allocate memory for augmented image
alloc_matrix(&AugImage, imagesize + 2, imagesize + 2);
// augment Image
augmentImage(Image, imagesize, imagesize, AugImage);
// destroy image matrix
free_matrix(&Image, imagesize);
// get parallel start time
parallelStart_t = omp_get_wtime()*1000;
// allocate memory for ImgConv
alloc_matrix(&ImgConv, imagesize, imagesize);
#pragma omp parallel
{
#pragma omp master
{
threads = omp_get_num_threads();
printf("OpenMP Image (%s) %dx%d Convolution - Threads %d\n", VERSION, imagesize, imagesize, threads);
}
// calculate convolution
calcImgConv(AugImage, ImgConv, imagesize, imagesize);
}
// get parallel end time
parallelEnd_t = omp_get_wtime()*1000;
// destroy augmented image
free_matrix(&AugImage, imagesize + 2);
// write Image to file
writeOutputData(outputfile, imagesize, imagesize, ImgConv);
// destroy ImgConv
free_matrix(&ImgConv, imagesize);
// get end time
end_t = omp_get_wtime()*1000;
printf("\nTotal duration:\t%0.2f msecs", (end_t-start_t));
printf("\nConvolution calculation duration:\t%0.2f msecs", (parallelEnd_t-parallelStart_t));
printf("\n");
return (EXIT_SUCCESS);
}
void alloc_matrix(int ***data_ptr, int n, int m) {
int row, i, j;
int **data;
data = (int **) malloc(n * sizeof (int *));
for (row = 0; row < n; row++)
data[row] = (int *) malloc(m * sizeof (int));
for (i = 0; i < n; i++)
for (j = 0; j < m; j++)
data[i][j] = i * j;
*data_ptr = data;
}
void free_matrix(int ***data_ptr, int n) {
int row;
int **data = *data_ptr;
for (row = 0; row < n; row++)
free(data[row]);
free(data);
}
void readInputData(char* file, int rows, int cols, int **image) {
int row, col;
FILE *fp;
// open file for reading
fp = fopen(file, "rb");
if (fp == NULL) {
return;
}
for (row = 0; row < rows; row++)
fread(&image[row][0], sizeof(int)*cols, 1, fp);
fclose(fp);
}
void writeOutputData(char* file, int rows, int cols, int **image) {
int row, col;
FILE *fp;
// open file for writing
fp = fopen(file, "wb");
if (fp == NULL) {
return;
}
for (row = 0; row < rows; row++)
fwrite(&image[row][0], sizeof(int)*cols, 1, fp);
fflush(fp);
fclose(fp);
}
void flipHorizontal(int *arr, int rows, int cols, int *fliparr) {
int row, col, colsmid = cols / 2;
for (row = 0; row < rows; row++)
for (col = 0; col <= colsmid; col++) {
*(fliparr + row * cols + col) = *(arr + row * cols + cols - 1 - col);
*(fliparr + row * cols + cols - 1 - col) = *(arr + row * cols + col);
}
}
void flipVertical(int *arr, int rows, int cols, int *fliparr) {
int row, col, rowssmid = rows / 2;
for (col = 0; col < cols; col++)
for (row = 0; row <= rowssmid; row++) {
*(fliparr + row * cols + col) = *(arr + (rows - 1 - row) * cols + col);
*(fliparr + (rows - 1 - row) * cols + col) = *(arr + row * cols + col);
}
}
void print2DArray(int **arr, int rows, int cols) {
int row, col, len;
len = sizeof(char) * (rows*cols + rows)*5 + 1;
char *s = malloc(len);
*s = '\0';
char t[100];
for (row = 0; row < rows; row++) {
for (col = 0; col < cols; col++) {
sprintf(t, "%4d ", arr[row][col]);
strcat(s, t);
}
strcat(s,"\n");
}
fprintf(stderr, "%s", s);
free(s);
}
void augmentImage(int **image, int rows, int cols, int **augimage) {
int row, col;
int rowsaug = rows + 2;
int colsaug = cols + 2;
// initialize to 0 the 1st and last columns of augimage
for (row = 0; row < rowsaug; row++) {
augimage[row][0] = 0;
augimage[row][colsaug - 1] = 0;
}
// initialize to 0 the 1st and last rows of augimage
for (col = 0; col < colsaug; col++) {
augimage[0][col] = 0;
augimage[rowsaug - 1][col] = 0;
}
// copy image rows to augimage rows
for (row = 1; row < rowsaug - 1; row++)
for (col = 1; col < colsaug - 1; col++)
augimage[row][col] = image[row - 1][col - 1];
}
void calcImgConv(int **AugImage, int **ImgConv, int rows, int cols) {
#pragma omp for collapse(2)
for (int x = 0; x < rows; x++)
for (int y = 0; y < cols; y++) {
ImgConv[x][y] = 0;
for (int k = 0; k < 3; k++)
for (int j = 0; j < 3; j++)
ImgConv[x][y] += EDGE_MASK[k][j] * AugImage[x + k][y + j];
}
}
|
convolution_sgemm_pack8to4_int8.h | // Tencent is pleased to support the open source community by making ncnn available.
//
// Copyright (C) 2021 THL A29 Limited, a Tencent company. All rights reserved.
//
// Licensed under the BSD 3-Clause License (the "License"); you may not use this file except
// in compliance with the License. You may obtain a copy of the License at
//
// https://opensource.org/licenses/BSD-3-Clause
//
// Unless required by applicable law or agreed to in writing, software distributed
// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
// CONDITIONS OF ANY KIND, either express or implied. See the License for the
// specific language governing permissions and limitations under the License.
static void im2col_sgemm_pack8to4_int8_sse(const Mat& bottom_im2col, Mat& top_blob, const Mat& kernel, const Option& opt)
{
// Mat bottom_im2col(size, maxk, inch, 8u, 8, opt.workspace_allocator);
const int size = bottom_im2col.w;
const int maxk = bottom_im2col.h;
const int inch = bottom_im2col.c;
const int outch = top_blob.c;
// permute
Mat tmp;
if (size >= 2)
tmp.create(2 * maxk, inch, size / 2 + size % 2, 8u, 8, opt.workspace_allocator);
else
tmp.create(maxk, inch, size, 8u, 8, opt.workspace_allocator);
{
int remain_size_start = 0;
int nn_size = size >> 1;
#pragma omp parallel for num_threads(opt.num_threads)
for (int ii = 0; ii < nn_size; ii++)
{
int i = remain_size_start + ii * 2;
int64_t* tmpptr = tmp.channel(i / 2);
for (int q = 0; q < inch; q++)
{
const int64_t* img0 = (const int64_t*)bottom_im2col.channel(q) + i;
for (int k = 0; k < maxk; k++)
{
__m128i _v = _mm_loadu_si128((const __m128i*)img0);
_mm_storeu_si128((__m128i*)tmpptr, _v);
tmpptr += 2;
img0 += size;
}
}
}
remain_size_start += nn_size << 1;
#pragma omp parallel for num_threads(opt.num_threads)
for (int i = remain_size_start; i < size; i++)
{
int64_t* tmpptr = tmp.channel(i / 2 + i % 2);
for (int q = 0; q < inch; q++)
{
const int64_t* img0 = (const int64_t*)bottom_im2col.channel(q) + i;
for (int k = 0; k < maxk; k++)
{
tmpptr[0] = img0[0];
tmpptr += 1;
img0 += size;
}
}
}
}
#pragma omp parallel for num_threads(opt.num_threads)
for (int p = 0; p < outch; p++)
{
int* outptr0 = top_blob.channel(p);
int i = 0;
for (; i + 1 < size; i += 2)
{
const signed char* tmpptr = tmp.channel(i / 2);
const signed char* kptr0 = kernel.channel(p);
int nn = inch * maxk; // inch always > 0
__m128i _sum00 = _mm_setzero_si128();
__m128i _sum01 = _mm_setzero_si128();
__m128i _sum02 = _mm_setzero_si128();
__m128i _sum03 = _mm_setzero_si128();
__m128i _sum10 = _mm_setzero_si128();
__m128i _sum11 = _mm_setzero_si128();
__m128i _sum12 = _mm_setzero_si128();
__m128i _sum13 = _mm_setzero_si128();
int j = 0;
for (; j < nn; j++)
{
// TODO use _mm_cvtepi8_epi16 on sse4.1
__m128i _val01 = _mm_loadu_si128((const __m128i*)tmpptr);
__m128i _extval01 = _mm_cmpgt_epi8(_mm_setzero_si128(), _val01);
__m128i _val0 = _mm_unpacklo_epi8(_val01, _extval01);
__m128i _val1 = _mm_unpackhi_epi8(_val01, _extval01);
// TODO use _mm_cvtepi8_epi16 on sse4.1
__m128i _w01 = _mm_loadu_si128((const __m128i*)kptr0);
__m128i _w23 = _mm_loadu_si128((const __m128i*)(kptr0 + 16));
__m128i _extw01 = _mm_cmpgt_epi8(_mm_setzero_si128(), _w01);
__m128i _extw23 = _mm_cmpgt_epi8(_mm_setzero_si128(), _w23);
__m128i _w0 = _mm_unpacklo_epi8(_w01, _extw01);
__m128i _w1 = _mm_unpackhi_epi8(_w01, _extw01);
__m128i _w2 = _mm_unpacklo_epi8(_w23, _extw23);
__m128i _w3 = _mm_unpackhi_epi8(_w23, _extw23);
__m128i _sl00 = _mm_mullo_epi16(_val0, _w0);
__m128i _sh00 = _mm_mulhi_epi16(_val0, _w0);
__m128i _sl01 = _mm_mullo_epi16(_val0, _w1);
__m128i _sh01 = _mm_mulhi_epi16(_val0, _w1);
__m128i _sl02 = _mm_mullo_epi16(_val0, _w2);
__m128i _sh02 = _mm_mulhi_epi16(_val0, _w2);
__m128i _sl03 = _mm_mullo_epi16(_val0, _w3);
__m128i _sh03 = _mm_mulhi_epi16(_val0, _w3);
__m128i _sl10 = _mm_mullo_epi16(_val1, _w0);
__m128i _sh10 = _mm_mulhi_epi16(_val1, _w0);
__m128i _sl11 = _mm_mullo_epi16(_val1, _w1);
__m128i _sh11 = _mm_mulhi_epi16(_val1, _w1);
__m128i _sl12 = _mm_mullo_epi16(_val1, _w2);
__m128i _sh12 = _mm_mulhi_epi16(_val1, _w2);
__m128i _sl13 = _mm_mullo_epi16(_val1, _w3);
__m128i _sh13 = _mm_mulhi_epi16(_val1, _w3);
_sum00 = _mm_add_epi32(_sum00, _mm_unpacklo_epi16(_sl00, _sh00));
_sum01 = _mm_add_epi32(_sum01, _mm_unpacklo_epi16(_sl01, _sh01));
_sum02 = _mm_add_epi32(_sum02, _mm_unpacklo_epi16(_sl02, _sh02));
_sum03 = _mm_add_epi32(_sum03, _mm_unpacklo_epi16(_sl03, _sh03));
_sum00 = _mm_add_epi32(_sum00, _mm_unpackhi_epi16(_sl00, _sh00));
_sum01 = _mm_add_epi32(_sum01, _mm_unpackhi_epi16(_sl01, _sh01));
_sum02 = _mm_add_epi32(_sum02, _mm_unpackhi_epi16(_sl02, _sh02));
_sum03 = _mm_add_epi32(_sum03, _mm_unpackhi_epi16(_sl03, _sh03));
_sum10 = _mm_add_epi32(_sum10, _mm_unpacklo_epi16(_sl10, _sh10));
_sum11 = _mm_add_epi32(_sum11, _mm_unpacklo_epi16(_sl11, _sh11));
_sum12 = _mm_add_epi32(_sum12, _mm_unpacklo_epi16(_sl12, _sh12));
_sum13 = _mm_add_epi32(_sum13, _mm_unpacklo_epi16(_sl13, _sh13));
_sum10 = _mm_add_epi32(_sum10, _mm_unpackhi_epi16(_sl10, _sh10));
_sum11 = _mm_add_epi32(_sum11, _mm_unpackhi_epi16(_sl11, _sh11));
_sum12 = _mm_add_epi32(_sum12, _mm_unpackhi_epi16(_sl12, _sh12));
_sum13 = _mm_add_epi32(_sum13, _mm_unpackhi_epi16(_sl13, _sh13));
tmpptr += 16;
kptr0 += 32;
}
// transpose 4x4
{
__m128i _tmp0, _tmp1, _tmp2, _tmp3;
_tmp0 = _mm_unpacklo_epi32(_sum00, _sum01);
_tmp1 = _mm_unpacklo_epi32(_sum02, _sum03);
_tmp2 = _mm_unpackhi_epi32(_sum00, _sum01);
_tmp3 = _mm_unpackhi_epi32(_sum02, _sum03);
_sum00 = _mm_unpacklo_epi64(_tmp0, _tmp1);
_sum01 = _mm_unpackhi_epi64(_tmp0, _tmp1);
_sum02 = _mm_unpacklo_epi64(_tmp2, _tmp3);
_sum03 = _mm_unpackhi_epi64(_tmp2, _tmp3);
}
{
__m128i _tmp0, _tmp1, _tmp2, _tmp3;
_tmp0 = _mm_unpacklo_epi32(_sum10, _sum11);
_tmp1 = _mm_unpacklo_epi32(_sum12, _sum13);
_tmp2 = _mm_unpackhi_epi32(_sum10, _sum11);
_tmp3 = _mm_unpackhi_epi32(_sum12, _sum13);
_sum10 = _mm_unpacklo_epi64(_tmp0, _tmp1);
_sum11 = _mm_unpackhi_epi64(_tmp0, _tmp1);
_sum12 = _mm_unpacklo_epi64(_tmp2, _tmp3);
_sum13 = _mm_unpackhi_epi64(_tmp2, _tmp3);
}
_sum00 = _mm_add_epi32(_sum00, _sum01);
_sum02 = _mm_add_epi32(_sum02, _sum03);
_sum10 = _mm_add_epi32(_sum10, _sum11);
_sum12 = _mm_add_epi32(_sum12, _sum13);
_sum00 = _mm_add_epi32(_sum00, _sum02);
_sum10 = _mm_add_epi32(_sum10, _sum12);
_mm_storeu_si128((__m128i*)outptr0, _sum00);
_mm_storeu_si128((__m128i*)(outptr0 + 4), _sum10);
outptr0 += 8;
}
for (; i < size; i++)
{
const signed char* tmpptr = tmp.channel(i / 2 + i % 2);
const signed char* kptr0 = kernel.channel(p);
int nn = inch * maxk; // inch always > 0
__m128i _sum0 = _mm_setzero_si128();
__m128i _sum1 = _mm_setzero_si128();
__m128i _sum2 = _mm_setzero_si128();
__m128i _sum3 = _mm_setzero_si128();
int j = 0;
for (; j < nn; j++)
{
// TODO use _mm_cvtepi8_epi16 on sse4.1
__m128i _val = _mm_loadl_epi64((const __m128i*)tmpptr);
_val = _mm_unpacklo_epi8(_val, _mm_cmpgt_epi8(_mm_setzero_si128(), _val));
// TODO use _mm_cvtepi8_epi16 on sse4.1
__m128i _w01 = _mm_loadu_si128((const __m128i*)kptr0);
__m128i _w23 = _mm_loadu_si128((const __m128i*)(kptr0 + 16));
__m128i _extw01 = _mm_cmpgt_epi8(_mm_setzero_si128(), _w01);
__m128i _extw23 = _mm_cmpgt_epi8(_mm_setzero_si128(), _w23);
__m128i _w0 = _mm_unpacklo_epi8(_w01, _extw01);
__m128i _w1 = _mm_unpackhi_epi8(_w01, _extw01);
__m128i _w2 = _mm_unpacklo_epi8(_w23, _extw23);
__m128i _w3 = _mm_unpackhi_epi8(_w23, _extw23);
__m128i _sl0 = _mm_mullo_epi16(_val, _w0);
__m128i _sh0 = _mm_mulhi_epi16(_val, _w0);
__m128i _sl1 = _mm_mullo_epi16(_val, _w1);
__m128i _sh1 = _mm_mulhi_epi16(_val, _w1);
__m128i _sl2 = _mm_mullo_epi16(_val, _w2);
__m128i _sh2 = _mm_mulhi_epi16(_val, _w2);
__m128i _sl3 = _mm_mullo_epi16(_val, _w3);
__m128i _sh3 = _mm_mulhi_epi16(_val, _w3);
_sum0 = _mm_add_epi32(_sum0, _mm_unpacklo_epi16(_sl0, _sh0));
_sum1 = _mm_add_epi32(_sum1, _mm_unpacklo_epi16(_sl1, _sh1));
_sum2 = _mm_add_epi32(_sum2, _mm_unpacklo_epi16(_sl2, _sh2));
_sum3 = _mm_add_epi32(_sum3, _mm_unpacklo_epi16(_sl3, _sh3));
_sum0 = _mm_add_epi32(_sum0, _mm_unpackhi_epi16(_sl0, _sh0));
_sum1 = _mm_add_epi32(_sum1, _mm_unpackhi_epi16(_sl1, _sh1));
_sum2 = _mm_add_epi32(_sum2, _mm_unpackhi_epi16(_sl2, _sh2));
_sum3 = _mm_add_epi32(_sum3, _mm_unpackhi_epi16(_sl3, _sh3));
tmpptr += 8;
kptr0 += 32;
}
// transpose 4x4
{
__m128i _tmp0, _tmp1, _tmp2, _tmp3;
_tmp0 = _mm_unpacklo_epi32(_sum0, _sum1);
_tmp1 = _mm_unpacklo_epi32(_sum2, _sum3);
_tmp2 = _mm_unpackhi_epi32(_sum0, _sum1);
_tmp3 = _mm_unpackhi_epi32(_sum2, _sum3);
_sum0 = _mm_unpacklo_epi64(_tmp0, _tmp1);
_sum1 = _mm_unpackhi_epi64(_tmp0, _tmp1);
_sum2 = _mm_unpacklo_epi64(_tmp2, _tmp3);
_sum3 = _mm_unpackhi_epi64(_tmp2, _tmp3);
}
_sum0 = _mm_add_epi32(_sum0, _sum1);
_sum2 = _mm_add_epi32(_sum2, _sum3);
_sum0 = _mm_add_epi32(_sum0, _sum2);
_mm_storeu_si128((__m128i*)outptr0, _sum0);
outptr0 += 4;
}
}
}
static void convolution_im2col_sgemm_transform_kernel_pack8to4_int8_sse(const Mat& _kernel, Mat& kernel_tm, int inch, int outch, int kernel_w, int kernel_h)
{
const int maxk = kernel_w * kernel_h;
// interleave
// src = maxk-inch-outch
// dst = 8a-4b-maxk-inch/8a-outch/4b
Mat kernel = _kernel.reshape(maxk, inch, outch);
kernel_tm.create(32 * maxk, inch / 8, outch / 4, 1u, nullptr);
for (int q = 0; q + 3 < outch; q += 4)
{
signed char* g00 = kernel_tm.channel(q / 4);
for (int p = 0; p + 7 < inch; p += 8)
{
for (int k = 0; k < maxk; k++)
{
for (int i = 0; i < 4; i++)
{
for (int j = 0; j < 8; j++)
{
const signed char* k00 = kernel.channel(q + i).row<const signed char>(p + j);
g00[0] = k00[k];
g00++;
}
}
}
}
}
}
static void convolution_im2col_sgemm_pack8to4_int8_sse(const Mat& bottom_blob, Mat& top_blob, const Mat& kernel, int kernel_w, int kernel_h, int dilation_w, int dilation_h, int stride_w, int stride_h, const Option& opt)
{
int w = bottom_blob.w;
int inch = bottom_blob.c;
int outw = top_blob.w;
int outh = top_blob.h;
const int size = outw * outh;
const int maxk = kernel_w * kernel_h;
// im2col
Mat bottom_im2col(size, maxk, inch, 8u, 8, opt.workspace_allocator);
{
const int gap = w * stride_h - outw * stride_w;
#pragma omp parallel for num_threads(opt.num_threads)
for (int p = 0; p < inch; p++)
{
const Mat img = bottom_blob.channel(p);
int64_t* ptr = bottom_im2col.channel(p);
for (int u = 0; u < kernel_h; u++)
{
for (int v = 0; v < kernel_w; v++)
{
const int64_t* sptr = img.row<const int64_t>(dilation_h * u) + dilation_w * v;
for (int i = 0; i < outh; i++)
{
int j = 0;
for (; j < outw; j++)
{
ptr[0] = sptr[0];
sptr += stride_w;
ptr += 1;
}
sptr += gap;
}
}
}
}
}
im2col_sgemm_pack8to4_int8_sse(bottom_im2col, top_blob, kernel, opt);
}
|
convolution_1x1.h | // Tencent is pleased to support the open source community by making ncnn available.
//
// Copyright (C) 2017 THL A29 Limited, a Tencent company. All rights reserved.
//
// Licensed under the BSD 3-Clause License (the "License"); you may not use this file except
// in compliance with the License. You may obtain a copy of the License at
//
// https://opensource.org/licenses/BSD-3-Clause
//
// Unless required by applicable law or agreed to in writing, software distributed
// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
// CONDITIONS OF ANY KIND, either express or implied. See the License for the
// specific language governing permissions and limitations under the License.
#if __ARM_NEON
#include <arm_neon.h>
#endif // __ARM_NEON
static void conv1x1s1_neon(const Mat& bottom_blob, Mat& top_blob, const Mat& _kernel, const Mat& _bias)
{
int inch = bottom_blob.c;
int outw = top_blob.w;
int outh = top_blob.h;
int outch = top_blob.c;
const float* kernel = _kernel;
const float* bias = _bias;
int nn_outch = outch >> 2;
int remain_outch_start = nn_outch << 2;
#pragma omp parallel for
for (int pp=0; pp<nn_outch; pp++)
{
int p = pp * 4;
Mat out0 = top_blob.channel(p);
Mat out1 = top_blob.channel(p+1);
Mat out2 = top_blob.channel(p+2);
Mat out3 = top_blob.channel(p+3);
const float bias0 = bias ? bias[p] : 0.f;
const float bias1 = bias ? bias[p+1] : 0.f;
const float bias2 = bias ? bias[p+2] : 0.f;
const float bias3 = bias ? bias[p+3] : 0.f;
out0.fill(bias0);
out1.fill(bias1);
out2.fill(bias2);
out3.fill(bias3);
int q = 0;
for (; q+3<inch; q+=4)
{
float* outptr0 = out0;
float* outptr1 = out1;
float* outptr2 = out2;
float* outptr3 = out3;
const float* img0 = bottom_blob.channel(q);
const float* img1 = bottom_blob.channel(q+1);
const float* img2 = bottom_blob.channel(q+2);
const float* img3 = bottom_blob.channel(q+3);
const float* kernel0 = kernel + p*inch + q;
const float* kernel1 = kernel + (p+1)*inch + q;
const float* kernel2 = kernel + (p+2)*inch + q;
const float* kernel3 = kernel + (p+3)*inch + q;
const float* r0 = img0;
const float* r1 = img1;
const float* r2 = img2;
const float* r3 = img3;
int size = outw * outh;
#if __ARM_NEON
int nn = size >> 3;
int remain = size & 7;
#else
int remain = size;
#endif // __ARM_NEON
#if __ARM_NEON
float32x4_t _k0 = vld1q_f32(kernel0);
float32x4_t _k1 = vld1q_f32(kernel1);
float32x4_t _k2 = vld1q_f32(kernel2);
float32x4_t _k3 = vld1q_f32(kernel3);
#if __aarch64__
for (; nn>0; nn--)
{
float32x4_t _p = vld1q_f32(r0);
float32x4_t _pn = vld1q_f32(r0+4);
float32x4_t _out0p = vld1q_f32(outptr0);
float32x4_t _out0pn = vld1q_f32(outptr0+4);
float32x4_t _out1p = vld1q_f32(outptr1);
float32x4_t _out1pn = vld1q_f32(outptr1+4);
float32x4_t _out2p = vld1q_f32(outptr2);
float32x4_t _out2pn = vld1q_f32(outptr2+4);
float32x4_t _out3p = vld1q_f32(outptr3);
float32x4_t _out3pn = vld1q_f32(outptr3+4);
_out0p = vfmaq_laneq_f32(_out0p, _p, _k0, 0);
_out0pn = vfmaq_laneq_f32(_out0pn, _pn, _k0, 0);
_out1p = vfmaq_laneq_f32(_out1p, _p, _k1, 0);
_out1pn = vfmaq_laneq_f32(_out1pn, _pn, _k1, 0);
_out2p = vfmaq_laneq_f32(_out2p, _p, _k2, 0);
_out2pn = vfmaq_laneq_f32(_out2pn, _pn, _k2, 0);
_out3p = vfmaq_laneq_f32(_out3p, _p, _k3, 0);
_out3pn = vfmaq_laneq_f32(_out3pn, _pn, _k3, 0);
float32x4_t _p1 = vld1q_f32(r1);
float32x4_t _p1n = vld1q_f32(r1+4);
_out0p = vfmaq_laneq_f32(_out0p, _p1, _k0, 1);
_out0pn = vfmaq_laneq_f32(_out0pn, _p1n, _k0, 1);
_out1p = vfmaq_laneq_f32(_out1p, _p1, _k1, 1);
_out1pn = vfmaq_laneq_f32(_out1pn, _p1n, _k1, 1);
_out2p = vfmaq_laneq_f32(_out2p, _p1, _k2, 1);
_out2pn = vfmaq_laneq_f32(_out2pn, _p1n, _k2, 1);
_out3p = vfmaq_laneq_f32(_out3p, _p1, _k3, 1);
_out3pn = vfmaq_laneq_f32(_out3pn, _p1n, _k3, 1);
float32x4_t _p2 = vld1q_f32(r2);
float32x4_t _p2n = vld1q_f32(r2+4);
_out0p = vfmaq_laneq_f32(_out0p, _p2, _k0, 2);
_out0pn = vfmaq_laneq_f32(_out0pn, _p2n, _k0, 2);
_out1p = vfmaq_laneq_f32(_out1p, _p2, _k1, 2);
_out1pn = vfmaq_laneq_f32(_out1pn, _p2n, _k1, 2);
_out2p = vfmaq_laneq_f32(_out2p, _p2, _k2, 2);
_out2pn = vfmaq_laneq_f32(_out2pn, _p2n, _k2, 2);
_out3p = vfmaq_laneq_f32(_out3p, _p2, _k3, 2);
_out3pn = vfmaq_laneq_f32(_out3pn, _p2n, _k3, 2);
float32x4_t _p3 = vld1q_f32(r3);
float32x4_t _p3n = vld1q_f32(r3+4);
_out0p = vfmaq_laneq_f32(_out0p, _p3, _k0, 3);
_out0pn = vfmaq_laneq_f32(_out0pn, _p3n, _k0, 3);
_out1p = vfmaq_laneq_f32(_out1p, _p3, _k1, 3);
_out1pn = vfmaq_laneq_f32(_out1pn, _p3n, _k1, 3);
_out2p = vfmaq_laneq_f32(_out2p, _p3, _k2, 3);
_out2pn = vfmaq_laneq_f32(_out2pn, _p3n, _k2, 3);
_out3p = vfmaq_laneq_f32(_out3p, _p3, _k3, 3);
_out3pn = vfmaq_laneq_f32(_out3pn, _p3n, _k3, 3);
vst1q_f32(outptr0, _out0p);
vst1q_f32(outptr0+4, _out0pn);
vst1q_f32(outptr1, _out1p);
vst1q_f32(outptr1+4, _out1pn);
vst1q_f32(outptr2, _out2p);
vst1q_f32(outptr2+4, _out2pn);
vst1q_f32(outptr3, _out3p);
vst1q_f32(outptr3+4, _out3pn);
r0 += 8;
r1 += 8;
r2 += 8;
r3 += 8;
outptr0 += 8;
outptr1 += 8;
outptr2 += 8;
outptr3 += 8;
}
#else
if (nn > 0)
{
asm volatile(
"pld [%5, #256] \n"
"vld1.f32 {d12-d15}, [%5 :128]! \n"
"pld [%1, #256] \n"
"vld1.f32 {d16-d19}, [%1 :128] \n"
"0: \n"
"vmla.f32 q8, q6, %e18[0] \n"
"pld [%2, #256] \n"
"vld1.f32 {d20-d23}, [%2 :128] \n"
"vmla.f32 q9, q7, %e18[0] \n"
"vmla.f32 q10, q6, %e19[0] \n"
"pld [%3, #256] \n"
"vld1.f32 {d24-d27}, [%3 :128] \n"
"vmla.f32 q11, q7, %e19[0] \n"
"vmla.f32 q12, q6, %e20[0] \n"
"pld [%4, #256] \n"
"vld1.f32 {d28-d31}, [%4 :128] \n"
"vmla.f32 q13, q7, %e20[0] \n"
"pld [%6, #256] \n"
"vld1.f32 {d8-d11}, [%6 :128]! \n"
"vmla.f32 q14, q6, %e21[0] \n"
"vmla.f32 q15, q7, %e21[0] \n"
"vmla.f32 q8, q4, %e18[1] \n"
"vmla.f32 q9, q5, %e18[1] \n"
"vmla.f32 q10, q4, %e19[1] \n"
"vmla.f32 q11, q5, %e19[1] \n"
"vmla.f32 q12, q4, %e20[1] \n"
"vmla.f32 q13, q5, %e20[1] \n"
"pld [%7, #256] \n"
"vld1.f32 {d12-d15}, [%7 :128]! \n"
"vmla.f32 q14, q4, %e21[1] \n"
"vmla.f32 q15, q5, %e21[1] \n"
"vmla.f32 q8, q6, %f18[0] \n"
"vmla.f32 q9, q7, %f18[0] \n"
"vmla.f32 q10, q6, %f19[0] \n"
"vmla.f32 q11, q7, %f19[0] \n"
"vmla.f32 q12, q6, %f20[0] \n"
"vmla.f32 q13, q7, %f20[0] \n"
"pld [%8, #256] \n"
"vld1.f32 {d8-d11}, [%8 :128]! \n"
"vmla.f32 q14, q6, %f21[0] \n"
"vmla.f32 q15, q7, %f21[0] \n"
"vmla.f32 q8, q4, %f18[1] \n"
"vmla.f32 q9, q5, %f18[1] \n"
"vmla.f32 q10, q4, %f19[1] \n"
"vmla.f32 q11, q5, %f19[1] \n"
"vst1.f32 {d16-d19}, [%1 :128]! \n"
"vmla.f32 q12, q4, %f20[1] \n"
"vmla.f32 q13, q5, %f20[1] \n"
"vst1.f32 {d20-d23}, [%2 :128]! \n"
"pld [%5, #256] \n"
"vld1.f32 {d12-d15}, [%5 :128]! \n"
"vmla.f32 q14, q4, %f21[1] \n"
"vmla.f32 q15, q5, %f21[1] \n"
"vst1.f32 {d24-d27}, [%3 :128]! \n"
"pld [%1, #256] \n"
"vld1.f32 {d16-d19}, [%1 :128] \n"
"subs %0, #1 \n"
"vst1.f32 {d28-d31}, [%4 :128]! \n"
"bne 0b \n"
"sub %5, #32 \n"
: "=r"(nn), // %0
"=r"(outptr0),// %1
"=r"(outptr1),// %2
"=r"(outptr2),// %3
"=r"(outptr3),// %4
"=r"(r0), // %5
"=r"(r1), // %6
"=r"(r2), // %7
"=r"(r3) // %8
: "0"(nn),
"1"(outptr0),
"2"(outptr1),
"3"(outptr2),
"4"(outptr3),
"5"(r0),
"6"(r1),
"7"(r2),
"8"(r3),
"w"(_k0), // %18
"w"(_k1), // %19
"w"(_k2), // %20
"w"(_k3) // %21
: "cc", "memory", "q4", "q5", "q6", "q7", "q8", "q9", "q10", "q11", "q12", "q13", "q14", "q15"
);
}
#endif // __aarch64__
#endif // __ARM_NEON
for (; remain>0; remain--)
{
// TODO neon optimize
float sum0 = *r0 * kernel0[0] + *r1 * kernel0[1] + *r2 * kernel0[2] + *r3 * kernel0[3];
float sum1 = *r0 * kernel1[0] + *r1 * kernel1[1] + *r2 * kernel1[2] + *r3 * kernel1[3];
float sum2 = *r0 * kernel2[0] + *r1 * kernel2[1] + *r2 * kernel2[2] + *r3 * kernel2[3];
float sum3 = *r0 * kernel3[0] + *r1 * kernel3[1] + *r2 * kernel3[2] + *r3 * kernel3[3];
*outptr0 += sum0;
*outptr1 += sum1;
*outptr2 += sum2;
*outptr3 += sum3;
r0++;
r1++;
r2++;
r3++;
outptr0++;
outptr1++;
outptr2++;
outptr3++;
}
}
for (; q<inch; q++)
{
float* outptr0 = out0;
float* outptr1 = out1;
float* outptr2 = out2;
float* outptr3 = out3;
const float* img0 = bottom_blob.channel(q);
const float* kernel0 = kernel + p*inch + q;
const float* kernel1 = kernel + (p+1)*inch + q;
const float* kernel2 = kernel + (p+2)*inch + q;
const float* kernel3 = kernel + (p+3)*inch + q;
const float k0 = kernel0[0];
const float k1 = kernel1[0];
const float k2 = kernel2[0];
const float k3 = kernel3[0];
const float* r0 = img0;
int size = outw * outh;
#if __ARM_NEON
int nn = size >> 3;
int remain = size & 7;
#else
int remain = size;
#endif // __ARM_NEON
#if __ARM_NEON
float32x4_t _k0 = vdupq_n_f32(k0);
float32x4_t _k1 = vdupq_n_f32(k1);
float32x4_t _k2 = vdupq_n_f32(k2);
float32x4_t _k3 = vdupq_n_f32(k3);
#if __aarch64__
for (; nn>0; nn--)
{
float32x4_t _p = vld1q_f32(r0);
float32x4_t _pn = vld1q_f32(r0+4);
float32x4_t _out0p = vld1q_f32(outptr0);
float32x4_t _out0pn = vld1q_f32(outptr0+4);
float32x4_t _out1p = vld1q_f32(outptr1);
float32x4_t _out1pn = vld1q_f32(outptr1+4);
float32x4_t _out2p = vld1q_f32(outptr2);
float32x4_t _out2pn = vld1q_f32(outptr2+4);
float32x4_t _out3p = vld1q_f32(outptr3);
float32x4_t _out3pn = vld1q_f32(outptr3+4);
_out0p = vfmaq_f32(_out0p, _p, _k0);
_out0pn = vfmaq_f32(_out0pn, _pn, _k0);
_out1p = vfmaq_f32(_out1p, _p, _k1);
_out1pn = vfmaq_f32(_out1pn, _pn, _k1);
_out2p = vfmaq_f32(_out2p, _p, _k2);
_out2pn = vfmaq_f32(_out2pn, _pn, _k2);
_out3p = vfmaq_f32(_out3p, _p, _k3);
_out3pn = vfmaq_f32(_out3pn, _pn, _k3);
vst1q_f32(outptr0, _out0p);
vst1q_f32(outptr0+4, _out0pn);
vst1q_f32(outptr1, _out1p);
vst1q_f32(outptr1+4, _out1pn);
vst1q_f32(outptr2, _out2p);
vst1q_f32(outptr2+4, _out2pn);
vst1q_f32(outptr3, _out3p);
vst1q_f32(outptr3+4, _out3pn);
r0 += 8;
outptr0 += 8;
outptr1 += 8;
outptr2 += 8;
outptr3 += 8;
}
#else
if (nn > 0)
{
asm volatile(
"pld [%5, #256] \n"
"vld1.f32 {d12-d15}, [%5 :128]! \n"
"0: \n"
"pld [%1, #256] \n"
"vld1.f32 {d16-d19}, [%1 :128] \n"
"vmla.f32 q8, q6, %q12 \n"
"vmla.f32 q9, q7, %q12 \n"
"pld [%2, #256] \n"
"vld1.f32 {d20-d23}, [%2 :128] \n"
"vmla.f32 q10, q6, %q13 \n"
"vmla.f32 q11, q7, %q13 \n"
"vst1.f32 {d16-d19}, [%1 :128]! \n"
"pld [%3, #256] \n"
"vld1.f32 {d24-d27}, [%3 :128] \n"
"vmla.f32 q12, q6, %q14 \n"
"vmla.f32 q13, q7, %q14 \n"
"vst1.f32 {d20-d23}, [%2 :128]! \n"
"pld [%4, #256] \n"
"vld1.f32 {d28-d31}, [%4 :128] \n"
"vmla.f32 q14, q6, %q15 \n"
"vmla.f32 q15, q7, %q15 \n"
"vst1.f32 {d24-d27}, [%3 :128]! \n"
"pld [%5, #256] \n"
"vld1.f32 {d12-d15}, [%5 :128]! \n"
"subs %0, #1 \n"
"vst1.f32 {d28-d31}, [%4 :128]! \n"
"bne 0b \n"
"sub %5, #32 \n"
: "=r"(nn), // %0
"=r"(outptr0),// %1
"=r"(outptr1),// %2
"=r"(outptr2),// %3
"=r"(outptr3),// %4
"=r"(r0) // %5
: "0"(nn),
"1"(outptr0),
"2"(outptr1),
"3"(outptr2),
"4"(outptr3),
"5"(r0),
"w"(_k0), // %12
"w"(_k1), // %13
"w"(_k2), // %14
"w"(_k3) // %15
: "cc", "memory", "q6", "q7", "q8", "q9", "q10", "q11", "q12", "q13", "q14", "q15"
);
}
#endif // __aarch64__
#endif // __ARM_NEON
for (; remain>0; remain--)
{
// TODO neon optimize
float sum0 = *r0 * k0;
float sum1 = *r0 * k1;
float sum2 = *r0 * k2;
float sum3 = *r0 * k3;
*outptr0 += sum0;
*outptr1 += sum1;
*outptr2 += sum2;
*outptr3 += sum3;
r0++;
outptr0++;
outptr1++;
outptr2++;
outptr3++;
}
}
}
#pragma omp parallel for
for (int p=remain_outch_start; p<outch; p++)
{
Mat out = top_blob.channel(p);
const float bias0 = bias ? bias[p] : 0.f;
out.fill(bias0);
int q = 0;
for (; q+3<inch; q+=4)
{
float* outptr = out;
const float* img0 = bottom_blob.channel(q);
const float* img1 = bottom_blob.channel(q+1);
const float* img2 = bottom_blob.channel(q+2);
const float* img3 = bottom_blob.channel(q+3);
const float* kernel0 = kernel + p*inch + q;
const float k0 = kernel0[0];
const float k1 = kernel0[1];
const float k2 = kernel0[2];
const float k3 = kernel0[3];
const float* r0 = img0;
const float* r1 = img1;
const float* r2 = img2;
const float* r3 = img3;
int size = outw * outh;
#if __ARM_NEON
int nn = size >> 3;
int remain = size & 7;
#else
int remain = size;
#endif // __ARM_NEON
#if __ARM_NEON
float32x4_t _k0 = vdupq_n_f32(k0);
float32x4_t _k1 = vdupq_n_f32(k1);
float32x4_t _k2 = vdupq_n_f32(k2);
float32x4_t _k3 = vdupq_n_f32(k3);
#if __aarch64__
for (; nn>0; nn--)
{
float32x4_t _p = vld1q_f32(r0);
float32x4_t _pn = vld1q_f32(r0+4);
float32x4_t _outp = vld1q_f32(outptr);
float32x4_t _outpn = vld1q_f32(outptr+4);
_outp = vfmaq_f32(_outp, _p, _k0);
_outpn = vfmaq_f32(_outpn, _pn, _k0);
float32x4_t _p1 = vld1q_f32(r1);
float32x4_t _p1n = vld1q_f32(r1+4);
_outp = vfmaq_f32(_outp, _p1, _k1);
_outpn = vfmaq_f32(_outpn, _p1n, _k1);
float32x4_t _p2 = vld1q_f32(r2);
float32x4_t _p2n = vld1q_f32(r2+4);
_outp = vfmaq_f32(_outp, _p2, _k2);
_outpn = vfmaq_f32(_outpn, _p2n, _k2);
float32x4_t _p3 = vld1q_f32(r3);
float32x4_t _p3n = vld1q_f32(r3+4);
_outp = vfmaq_f32(_outp, _p3, _k3);
_outpn = vfmaq_f32(_outpn, _p3n, _k3);
vst1q_f32(outptr, _outp);
vst1q_f32(outptr+4, _outpn);
r0 += 8;
r1 += 8;
r2 += 8;
r3 += 8;
outptr += 8;
}
#else
if (nn > 0)
{
asm volatile(
"pld [%2, #256] \n"
"vld1.f32 {d4-d7}, [%2 :128]! \n"
"0: \n"
"pld [%1, #256] \n"
"vld1.f32 {d0-d3}, [%1 :128] \n"
"vmla.f32 q0, q2, %q12 \n"
"vmla.f32 q1, q3, %q12 \n"
"pld [%3, #256] \n"
"vld1.f32 {d4-d7}, [%3 :128]! \n"
"vmla.f32 q0, q2, %q13 \n"
"vmla.f32 q1, q3, %q13 \n"
"pld [%4, #256] \n"
"vld1.f32 {d4-d7}, [%4 :128]! \n"
"vmla.f32 q0, q2, %q14 \n"
"vmla.f32 q1, q3, %q14 \n"
"pld [%5, #256] \n"
"vld1.f32 {d4-d7}, [%5 :128]! \n"
"vmla.f32 q0, q2, %q15 \n"
"vmla.f32 q1, q3, %q15 \n"
"pld [%2, #256] \n"
"vld1.f32 {d4-d7}, [%2 :128]! \n"
"subs %0, #1 \n"
"vst1.f32 {d0-d3}, [%1 :128]! \n"
"bne 0b \n"
"sub %2, #32 \n"
: "=r"(nn), // %0
"=r"(outptr), // %1
"=r"(r0), // %2
"=r"(r1), // %3
"=r"(r2), // %4
"=r"(r3) // %5
: "0"(nn),
"1"(outptr),
"2"(r0),
"3"(r1),
"4"(r2),
"5"(r3),
"w"(_k0), // %12
"w"(_k1), // %13
"w"(_k2), // %14
"w"(_k3) // %15
: "cc", "memory", "q0", "q1", "q2", "q3"
);
}
#endif // __aarch64__
#endif // __ARM_NEON
for (; remain>0; remain--)
{
float sum = *r0 * k0;
float sum1 = *r1 * k1;
float sum2 = *r2 * k2;
float sum3 = *r3 * k3;
*outptr += sum + sum1 + sum2 + sum3;
r0++;
r1++;
r2++;
r3++;
outptr++;
}
}
for (; q<inch; q++)
{
float* outptr = out;
const float* img0 = bottom_blob.channel(q);
const float* kernel0 = kernel + p*inch + q;
const float k0 = kernel0[0];
const float* r0 = img0;
int size = outw * outh;
#if __ARM_NEON
int nn = size >> 3;
int remain = size & 7;
#else
int remain = size;
#endif // __ARM_NEON
#if __ARM_NEON
float32x4_t _k0 = vdupq_n_f32(k0);
#if __aarch64__
for (; nn>0; nn--)
{
float32x4_t _p = vld1q_f32(r0);
float32x4_t _outp = vld1q_f32(outptr);
float32x4_t _pn = vld1q_f32(r0+4);
float32x4_t _outpn = vld1q_f32(outptr+4);
_outp = vfmaq_f32(_outp, _p, _k0);
_outpn = vfmaq_f32(_outpn, _pn, _k0);
vst1q_f32(outptr, _outp);
vst1q_f32(outptr+4, _outpn);
r0 += 8;
outptr += 8;
}
#else
if (nn > 0)
{
asm volatile(
"pld [%2, #256] \n"
"vld1.f32 {d4-d7}, [%2 :128]! \n"
"0: \n"
"pld [%1, #256] \n"
"vld1.f32 {d0-d3}, [%1 :128] \n"
"vmla.f32 q0, q2, %q6 \n"
"vmla.f32 q1, q3, %q6 \n"
"pld [%2, #256] \n"
"vld1.f32 {d4-d7}, [%2 :128]! \n"
"subs %0, #1 \n"
"vst1.f32 {d0-d3}, [%1 :128]! \n"
"bne 0b \n"
"sub %2, #32 \n"
: "=r"(nn), // %0
"=r"(outptr), // %1
"=r"(r0) // %2
: "0"(nn),
"1"(outptr),
"2"(r0),
"w"(_k0) // %6
: "cc", "memory", "q0", "q1", "q2", "q3"
);
}
#endif // __aarch64__
#endif // __ARM_NEON
for (; remain>0; remain--)
{
float sum = *r0 * k0;
*outptr += sum;
r0++;
outptr++;
}
}
}
}
static void conv1x1s2_neon(const Mat& bottom_blob, Mat& top_blob, const Mat& _kernel, const Mat& _bias)
{
int w = bottom_blob.w;
int inch = bottom_blob.c;
int outw = top_blob.w;
int outh = top_blob.h;
int outch = top_blob.c;
const int tailstep = w - 2*outw + w;
const float* kernel = _kernel;
const float* bias = _bias;
#pragma omp parallel for
for (int p=0; p<outch; p++)
{
Mat out = top_blob.channel(p);
const float bias0 = bias ? bias[p] : 0.f;
out.fill(bias0);
int q = 0;
for (; q+3<inch; q+=4)
{
float* outptr = out;
const float* img0 = bottom_blob.channel(q);
const float* img1 = bottom_blob.channel(q+1);
const float* img2 = bottom_blob.channel(q+2);
const float* img3 = bottom_blob.channel(q+3);
const float* kernel0 = kernel + p*inch + q;
const float k0 = kernel0[0];
const float k1 = kernel0[1];
const float k2 = kernel0[2];
const float k3 = kernel0[3];
const float* r0 = img0;
const float* r1 = img1;
const float* r2 = img2;
const float* r3 = img3;
for (int i = 0; i < outh; i++)
{
#if __ARM_NEON
int nn = outw >> 3;
int remain = outw & 7;
#else
int remain = outw;
#endif // __ARM_NEON
#if __ARM_NEON
float32x4_t _k0 = vdupq_n_f32(k0);
float32x4_t _k1 = vdupq_n_f32(k1);
float32x4_t _k2 = vdupq_n_f32(k2);
float32x4_t _k3 = vdupq_n_f32(k3);
#if __aarch64__
for (; nn>0; nn--)
{
float32x4x2_t _px2 = vld2q_f32(r0);
float32x4_t _p = _px2.val[0];
float32x4_t _outp = vld1q_f32(outptr);
float32x4x2_t _pnx2 = vld2q_f32(r0+8);
float32x4_t _pn = _pnx2.val[0];
float32x4_t _outpn = vld1q_f32(outptr+4);
_outp = vmlaq_f32(_outp, _p, _k0);
_outpn = vmlaq_f32(_outpn, _pn, _k0);
float32x4x2_t _p1x2 = vld2q_f32(r1);
float32x4_t _p1 = _p1x2.val[0];
float32x4x2_t _p1nx2 = vld2q_f32(r1+8);
float32x4_t _p1n = _p1nx2.val[0];
_outp = vmlaq_f32(_outp, _p1, _k1);
_outpn = vmlaq_f32(_outpn, _p1n, _k1);
float32x4x2_t _p2x2 = vld2q_f32(r2);
float32x4_t _p2 = _p2x2.val[0];
float32x4x2_t _p2nx2 = vld2q_f32(r2+8);
float32x4_t _p2n = _p2nx2.val[0];
_outp = vmlaq_f32(_outp, _p2, _k2);
_outpn = vmlaq_f32(_outpn, _p2n, _k2);
float32x4x2_t _p3x2 = vld2q_f32(r3);
float32x4_t _p3 = _p3x2.val[0];
float32x4x2_t _p3nx2 = vld2q_f32(r3+8);
float32x4_t _p3n = _p3nx2.val[0];
_outp = vmlaq_f32(_outp, _p3, _k3);
_outpn = vmlaq_f32(_outpn, _p3n, _k3);
vst1q_f32(outptr, _outp);
vst1q_f32(outptr+4, _outpn);
r0 += 16;
r1 += 16;
r2 += 16;
r3 += 16;
outptr += 8;
}
#else
if (nn > 0)
{
asm volatile(
"pld [%2, #512] \n"
"vld2.f32 {d4-d7}, [%2]! \n"
"vld2.f32 {d16-d19}, [%2]! \n"
"0: \n"
"pld [%1, #256] \n"
"vld1.f32 {d0-d3}, [%1] \n"
"vmla.f32 q0, q2, %q12 \n"
"vmla.f32 q1, q8, %q12 \n"
"pld [%3, #512] \n"
"vld2.f32 {d4-d7}, [%3]! \n"
"vld2.f32 {d16-d19}, [%3]! \n"
"vmla.f32 q0, q2, %q13 \n"
"vmla.f32 q1, q8, %q13 \n"
"pld [%4, #512] \n"
"vld2.f32 {d4-d7}, [%4]! \n"
"vld2.f32 {d16-d19}, [%4]! \n"
"vmla.f32 q0, q2, %q14 \n"
"vmla.f32 q1, q8, %q14 \n"
"pld [%5, #512] \n"
"vld2.f32 {d4-d7}, [%5]! \n"
"vld2.f32 {d16-d19}, [%5]! \n"
"vmla.f32 q0, q2, %q15 \n"
"vmla.f32 q1, q8, %q15 \n"
"pld [%2, #512] \n"
"vld2.f32 {d4-d7}, [%2]! \n"
"vld2.f32 {d16-d19}, [%2]! \n"
"subs %0, #1 \n"
"vst1.f32 {d0-d3}, [%1]! \n"
"bne 0b \n"
"sub %2, #64 \n"
: "=r"(nn), // %0
"=r"(outptr), // %1
"=r"(r0), // %2
"=r"(r1), // %3
"=r"(r2), // %4
"=r"(r3) // %5
: "0"(nn),
"1"(outptr),
"2"(r0),
"3"(r1),
"4"(r2),
"5"(r3),
"w"(_k0), // %12
"w"(_k1), // %13
"w"(_k2), // %14
"w"(_k3) // %15
: "cc", "memory", "q0", "q1", "q2", "q3", "q8", "q9"
);
}
#endif // __aarch64__
#endif // __ARM_NEON
for (; remain>0; remain--)
{
float sum = *r0 * k0;
float sum1 = *r1 * k1;
float sum2 = *r2 * k2;
float sum3 = *r3 * k3;
*outptr += sum + sum1 + sum2 + sum3;
r0 += 2;
r1 += 2;
r2 += 2;
r3 += 2;
outptr++;
}
r0 += tailstep;
r1 += tailstep;
r2 += tailstep;
r3 += tailstep;
}
}
for (; q<inch; q++)
{
float* outptr = out;
const float* img0 = bottom_blob.channel(q);
const float* kernel0 = kernel + p*inch + q;
const float k0 = kernel0[0];
const float* r0 = img0;
for (int i = 0; i < outh; i++)
{
#if __ARM_NEON
int nn = outw >> 3;
int remain = outw & 7;
#else
int remain = outw;
#endif // __ARM_NEON
#if __ARM_NEON
float32x4_t _k0 = vdupq_n_f32(k0);
#if __aarch64__
for (; nn>0; nn--)
{
float32x4x2_t _px2 = vld2q_f32(r0);
float32x4_t _p = _px2.val[0];
float32x4_t _outp = vld1q_f32(outptr);
float32x4x2_t _pnx2 = vld2q_f32(r0+8);
float32x4_t _pn = _pnx2.val[0];
float32x4_t _outpn = vld1q_f32(outptr+4);
_outp = vmlaq_f32(_outp, _p, _k0);
_outpn = vmlaq_f32(_outpn, _pn, _k0);
vst1q_f32(outptr, _outp);
vst1q_f32(outptr+4, _outpn);
r0 += 16;
outptr += 8;
}
#else
if (nn > 0)
{
asm volatile(
"pld [%2, #512] \n"
"vld2.f32 {d4-d7}, [%2]! \n"
"vld2.f32 {d16-d19}, [%2]! \n"
"0: \n"
"pld [%1, #256] \n"
"vld1.f32 {d0-d3}, [%1] \n"
"vmla.f32 q0, q2, %q6 \n"
"vmla.f32 q1, q8, %q6 \n"
"pld [%2, #512] \n"
"vld2.f32 {d4-d7}, [%2]! \n"
"vld2.f32 {d16-d19}, [%2]! \n"
"subs %0, #1 \n"
"vst1.f32 {d0-d3}, [%1]! \n"
"bne 0b \n"
"sub %2, #64 \n"
: "=r"(nn), // %0
"=r"(outptr), // %1
"=r"(r0) // %2
: "0"(nn),
"1"(outptr),
"2"(r0),
"w"(_k0) // %6
: "cc", "memory", "q0", "q1", "q2", "q3", "q8", "q9"
);
}
#endif // __aarch64__
#endif // __ARM_NEON
for (; remain>0; remain--)
{
float sum = *r0 * k0;
*outptr += sum;
r0 += 2;
outptr++;
}
r0 += tailstep;
}
}
}
}
|
GB_binop__ge_uint8.c | //------------------------------------------------------------------------------
// GB_binop: hard-coded functions for each built-in binary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated2/ folder, do not edit it
// (it is auto-generated from Generator/*).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_emult.h"
#include "GB_control.h"
#include "GB_ek_slice.h"
#include "GB_dense.h"
#include "GB_atomics.h"
#include "GB_bitmap_assign_methods.h"
#include "GB_binop__include.h"
// C=binop(A,B) is defined by the following types and operators:
// A+B function (eWiseAdd): GB (_AaddB__ge_uint8)
// A.*B function (eWiseMult): GB (_AemultB_01__ge_uint8)
// A.*B function (eWiseMult): GB (_AemultB_02__ge_uint8)
// A.*B function (eWiseMult): GB (_AemultB_03__ge_uint8)
// A.*B function (eWiseMult): GB (_AemultB_bitmap__ge_uint8)
// A*D function (colscale): GB (_AxD__ge_uint8)
// D*A function (rowscale): GB (_DxB__ge_uint8)
// C+=B function (dense accum): GB (_Cdense_accumB__ge_uint8)
// C+=b function (dense accum): GB (_Cdense_accumb__ge_uint8)
// C+=A+B function (dense ewise3): GB ((none))
// C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__ge_uint8)
// C=scalar+B GB (_bind1st__ge_uint8)
// C=scalar+B' GB (_bind1st_tran__ge_uint8)
// C=A+scalar GB (_bind2nd__ge_uint8)
// C=A'+scalar GB (_bind2nd_tran__ge_uint8)
// C type: bool
// A type: uint8_t
// B,b type: uint8_t
// BinaryOp: cij = (aij >= bij)
#define GB_ATYPE \
uint8_t
#define GB_BTYPE \
uint8_t
#define GB_CTYPE \
bool
// true if the types of A and B are identical
#define GB_ATYPE_IS_BTYPE \
1
// true if the types of C and A are identical
#define GB_CTYPE_IS_ATYPE \
0
// true if the types of C and B are identical
#define GB_CTYPE_IS_BTYPE \
0
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA,A_iso) \
uint8_t aij = GBX (Ax, pA, A_iso)
// bij = Bx [pB]
#define GB_GETB(bij,Bx,pB,B_iso) \
uint8_t bij = GBX (Bx, pB, B_iso)
// declare scalar of the same type as C
#define GB_CTYPE_SCALAR(t) \
bool t
// cij = Ax [pA]
#define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \
cij = GBX (Ax, pA, A_iso)
// cij = Bx [pB]
#define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \
cij = GBX (Bx, pB, B_iso)
#define GB_CX(p) Cx [p]
// binary operator
#define GB_BINOP(z,x,y,i,j) \
z = (x >= y) ;
// true if the binop must be flipped
#define GB_BINOP_FLIP \
0
// op is second
#define GB_OP_IS_SECOND \
0
// do the numerical phases of GB_add and GB_emult
#define GB_PHASE_2_OF_2
// hard-coded loops can be vectorized
#define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_GE || GxB_NO_UINT8 || GxB_NO_GE_UINT8)
//------------------------------------------------------------------------------
// C += A+B, all 3 matrices dense
//------------------------------------------------------------------------------
#if 0
// The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV.
void GB ((none))
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_accum_template.c"
}
#endif
//------------------------------------------------------------------------------
// C = A+B, all 3 matrices dense
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_ewise3_noaccum__ge_uint8)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_dense_ewise3_noaccum_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += B, accumulate a sparse matrix into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumB__ge_uint8)
(
GrB_Matrix C,
const GrB_Matrix B,
const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#if 0
{
#include "GB_dense_subassign_23_template.c"
}
#endif
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += b, accumulate a scalar into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumb__ge_uint8)
(
GrB_Matrix C,
const GB_void *p_bwork,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#if 0
{
// get the scalar b for C += b, of type uint8_t
uint8_t bwork = (*((uint8_t *) p_bwork)) ;
#include "GB_dense_subassign_22_template.c"
return (GrB_SUCCESS) ;
}
#endif
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = A*D, column scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_AxD__ge_uint8)
(
GrB_Matrix C,
const GrB_Matrix A, bool A_is_pattern,
const GrB_Matrix D, bool D_is_pattern,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
bool *restrict Cx = (bool *) C->x ;
#include "GB_AxB_colscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = D*B, row scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_DxB__ge_uint8)
(
GrB_Matrix C,
const GrB_Matrix D, bool D_is_pattern,
const GrB_Matrix B, bool B_is_pattern,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
bool *restrict Cx = (bool *) C->x ;
#include "GB_AxB_rowscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseAdd: C = A+B or C<M> = A+B
//------------------------------------------------------------------------------
GrB_Info GB (_AaddB__ge_uint8)
(
GrB_Matrix C,
const int C_sparsity,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool Ch_is_Mh,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
GB_WERK_DECLARE (M_ek_slicing, int64_t) ;
GB_WERK_DECLARE (A_ek_slicing, int64_t) ;
GB_WERK_DECLARE (B_ek_slicing, int64_t) ;
#include "GB_add_template.c"
GB_FREE_WORK ;
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C = A.*B or C<M> = A.*B
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_01__ge_uint8)
(
GrB_Matrix C,
const int C_sparsity,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_01_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_02__ge_uint8)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool flipxy,
const int64_t *restrict Cp_kfirst,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#if GB_BINOP_FLIP
// The operator is not commutative, and does not have a flipped
// variant. For example z=atan2(y,x).
if (flipxy)
{
// use fmult(y,x)
#undef GB_FLIPPED
#define GB_FLIPPED 1
#include "GB_emult_02_template.c"
}
else
{
// use fmult(x,y)
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
}
#else
// No need to handle the flip: the operator is either commutative, or
// has been handled by changing z=div(y,x) to z=rdiv(x,y) for example.
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
#endif
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_03__ge_uint8)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict Cp_kfirst,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_03_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_bitmap__ge_uint8)
(
GrB_Matrix C,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_bitmap_emult_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st
//------------------------------------------------------------------------------
GrB_Info GB (_bind1st__ge_uint8)
(
GB_void *Cx_output, // Cx and Bx may be aliased
const GB_void *x_input,
const GB_void *Bx_input,
const int8_t *restrict Bb,
int64_t bnz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
bool *Cx = (bool *) Cx_output ;
uint8_t x = (*((uint8_t *) x_input)) ;
uint8_t *Bx = (uint8_t *) Bx_input ;
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < bnz ; p++)
{
if (!GBB (Bb, p)) continue ;
uint8_t bij = GBX (Bx, p, false) ;
Cx [p] = (x >= bij) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd
//------------------------------------------------------------------------------
GrB_Info GB (_bind2nd__ge_uint8)
(
GB_void *Cx_output, // Cx and Ax may be aliased
const GB_void *Ax_input,
const GB_void *y_input,
const int8_t *restrict Ab,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
bool *Cx = (bool *) Cx_output ;
uint8_t *Ax = (uint8_t *) Ax_input ;
uint8_t y = (*((uint8_t *) y_input)) ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!GBB (Ab, p)) continue ;
uint8_t aij = GBX (Ax, p, false) ;
Cx [p] = (aij >= y) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (x, A'): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (x, aij), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
uint8_t aij = GBX (Ax, pA, false) ; \
Cx [pC] = (x >= aij) ; \
}
GrB_Info GB (_bind1st_tran__ge_uint8)
(
GrB_Matrix C,
const GB_void *x_input,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
// GB_unop_transpose.c uses GB_ATYPE, but A is
// the 2nd input to binary operator z=f(x,y).
#undef GB_ATYPE
#define GB_ATYPE \
uint8_t
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint8_t x = (*((const uint8_t *) x_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
#undef GB_ATYPE
#define GB_ATYPE \
uint8_t
}
//------------------------------------------------------------------------------
// C = op (A', y): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (aij, y), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
uint8_t aij = GBX (Ax, pA, false) ; \
Cx [pC] = (aij >= y) ; \
}
GrB_Info GB (_bind2nd_tran__ge_uint8)
(
GrB_Matrix C,
const GrB_Matrix A,
const GB_void *y_input,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint8_t y = (*((const uint8_t *) y_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
perturbations.c | /** @file perturbations.c Documented perturbation module
*
* Julien Lesgourgues, 23.09.2010
*
* Deals with the perturbation evolution.
* This mdule has two purposes:
*
* - at the beginning, to initialize the perturbations, i.e. to
* integrate the perturbation equations, and store temporarily the terms
* contributing to the source functions as a function of conformal
* time. Then, to perform a few manipulations of these terms in order to
* infer the actual source functions \f$ S^{X} (k, \tau) \f$, and to
* store them as a function of conformal time inside an interpolation
* table.
*
* - at any time in the code, to evaluate the source functions at a
* given conformal time (by interpolating within the interpolation
* table).
*
* Hence the following functions can be called from other modules:
*
* -# perturb_init() at the beginning (but after background_init() and thermodynamics_init())
* -# perturb_sources_at_tau() at any later time
* -# perturb_free() at the end, hen no more calls to perturb_sources_at_tau() are needed
*/
#include "perturbations.h"
/**
* Source function \f$ S^{X} (k, \tau) \f$ at a given conformal time tau.
*
* Evaluate source functions at given conformal time tau by reading
* the pre-computed table and interpolating.
*
* @param ppt Input : pointer to perturbation structure containing interpolation tables
* @param index_md Input : index of requested mode
* @param index_ic Input : index of requested initial condition
* @param index_type Input : index of requested source function type
* @param tau Input : any value of conformal time
* @param psource Output: vector (already allocated) of source function as a function of k
* @return the error status
*/
int perturb_sources_at_tau(
struct perturbs * ppt,
int index_md,
int index_ic,
int index_type,
double tau,
double * psource
) {
/** Summary: */
/** - interpolate in pre-computed table contained in ppt */
class_call(array_interpolate_two_bis(ppt->tau_sampling,
1,
0,
ppt->sources[index_md][index_ic*ppt->tp_size[index_md]+index_type],
ppt->k_size[index_md],
ppt->tau_size,
tau,
psource,
ppt->k_size[index_md],
ppt->error_message),
ppt->error_message,
ppt->error_message);
return _SUCCESS_;
}
/**
* Initialize the perturbs structure, and in particular the table of source functions.
*
* Main steps:
*
* - given the values of the flags describing which kind of
* perturbations should be considered (modes: scalar/vector/tensor,
* initial conditions, type of source functions needed...),
* initialize indices and wavenumber list
*
* - define the time sampling for the output source functions
*
* - for each mode (scalar/vector/tensor): initialize the indices of
* relevant perturbations, integrate the differential system,
* compute and store the source functions.
*
* @param ppr Input : pointer to precision structure
* @param pba Input : pointer to background strucutre
* @param pth Input : pointer to thermodynamics structure
* @param ppt Output: Initialized perturbation structure
* @return the error status
*/
int perturb_init(
struct precision * ppr,
struct background * pba,
struct thermo * pth,
struct perturbs * ppt
) {
/** Summary: */
/** - define local variables */
/* running index for modes */
int index_md;
/* running index for initial conditions */
int index_ic;
/* running index for wavenumbers */
int index_k;
/* pointer to one struct perturb_workspace per thread (one if no openmp) */
struct perturb_workspace ** pppw;
/* number of threads (always one if no openmp) */
int number_of_threads=1;
/* index of the thread (always 0 if no openmp) */
int thread=0;
/* This code can be optionally compiled with the openmp option for parallel computation.
Inside parallel regions, the use of the command "return" is forbidden.
For error management, instead of "return _FAILURE_", we will set the variable below
to "abort = _TRUE_". This will lead to a "return _FAILURE_" jus after leaving the
parallel region. */
int abort;
/* unsigned integer that will be set to the size of the workspace */
size_t sz;
#ifdef _OPENMP
/* instrumentation times */
double tstart, tstop, tspent;
#endif
/** - preliminary checks */
if (ppt->has_perturbations == _FALSE_) {
if (ppt->perturbations_verbose > 0)
printf("No sources requested. Perturbation module skipped.\n");
return _SUCCESS_;
}
else {
if (ppt->perturbations_verbose > 0)
printf("Computing sources\n");
}
class_test((ppt->gauge == synchronous) && (pba->has_cdm == _FALSE_),
ppt->error_message,
"In the synchronous gauge, it is not self-consistent to assume no CDM: the later is used to define the initial timelike hypersurface. You can either add a negligible amount of CDM or switch to newtonian gauge");
class_test ((ppr->tight_coupling_approximation < first_order_MB) ||
(ppr->tight_coupling_approximation > compromise_CLASS),
ppt->error_message,
"your tight_coupling_approximation is set to %d, out of range defined in perturbations.h",ppr->tight_coupling_approximation);
class_test ((ppr->radiation_streaming_approximation < rsa_null) ||
(ppr->radiation_streaming_approximation > rsa_none),
ppt->error_message,
"your radiation_streaming_approximation is set to %d, out of range defined in perturbations.h",ppr->radiation_streaming_approximation);
if (pba->has_ur == _TRUE_) {
class_test ((ppr->ur_fluid_approximation < ufa_mb) ||
(ppr->ur_fluid_approximation > ufa_none),
ppt->error_message,
"your ur_fluid_approximation is set to %d, out of range defined in perturbations.h",ppr->ur_fluid_approximation);
}
if (pba->has_ncdm == _TRUE_) {
class_test ((ppr->ncdm_fluid_approximation < ncdmfa_mb) ||
(ppr->ncdm_fluid_approximation > ncdmfa_none),
ppt->error_message,
"your ncdm_fluid_approximation is set to %d, out of range defined in perturbations.h",ppr->ncdm_fluid_approximation);
}
if (pba->has_fld == _TRUE_) {
class_test(pba->w0_fld+pba->wa_fld >= 0.,
ppt->error_message,
"So far, the fluid is meant to be negligible at early time, and not to be important for defining the initial conditions of other species. You are using parameters for which this assumption may break down, so maybe it's the case to fully implement the fluid in the initial condition routine");
class_test((pba->w0_fld==-1.) && (pba->wa_fld==0.),
ppt->error_message,
"Your choice of a fluid with (w0,wa)=(-1,0) is not valid due to instabilities in the unphysical perturbations of such a fluid. Try instead with a plain cosmological constant");
class_test(((pba->w0_fld + pba->wa_fld +1.0)*(pba->w0_fld+1.0)) < 0.0,
ppt->error_message,
"w crosses -1 between the infinite past and today, and this would lead to divergent perturbation equations for the fluid.");
}
if (pba->has_dcdm == _TRUE_) {
class_test((ppt->has_cdi == _TRUE_) || (ppt->has_bi == _TRUE_) || (ppt->has_nid == _TRUE_) || (ppt->has_niv == _TRUE_),
ppt->error_message,
"Non-adiabatic initial conditions not coded in presence of decaying dark matter");
}
if (pba->has_smg == _TRUE_) {
class_test(ppt->gauge == newtonian,
ppt->error_message,
"Asked for scalar modified gravity AND Newtonian gauge. Not yet implemented");
// TODO think of some suitable tests for the scalar field
}
class_test(ppt->has_vectors == _TRUE_,
ppt->error_message,
"Vectors not coded yet");
if ((ppt->has_niv == _TRUE_) && (ppt->perturbations_verbose > 0)) {
printf("Warning: the niv initial conditions in CLASS (and also in CAMB) should still be double-checked: if you want to do it and send feedback, you are welcome!\n");
}
if (ppt->has_tensors == _TRUE_) {
ppt->evolve_tensor_ur = _FALSE_;
ppt->evolve_tensor_ncdm = _FALSE_;
switch (ppt->tensor_method) {
case (tm_photons_only):
break;
case (tm_massless_approximation):
if ((pba->has_ur == _TRUE_) || (pba->has_ncdm == _TRUE_))
ppt->evolve_tensor_ur = _TRUE_;
break;
case (tm_exact):
if (pba->has_ur == _TRUE_)
ppt->evolve_tensor_ur = _TRUE_;
if (pba->has_ncdm == _TRUE_)
ppt->evolve_tensor_ncdm = _TRUE_;
break;
}
}
/** - initialize all indices and lists in perturbs structure using perturb_indices_of_perturbs() */
class_call(perturb_indices_of_perturbs(ppr,
pba,
pth,
ppt),
ppt->error_message,
ppt->error_message);
/** - define the common time sampling for all sources using
perturb_timesampling_for_sources() */
class_call(perturb_timesampling_for_sources(ppr,
pba,
pth,
ppt),
ppt->error_message,
ppt->error_message);
/** - if we want to store perturbations, write titles and allocate storage */
class_call(perturb_prepare_output(pba,ppt),
ppt->error_message,
ppt->error_message);
/** - create an array of workspaces in multi-thread case */
#ifdef _OPENMP
#pragma omp parallel
{
number_of_threads = omp_get_num_threads();
}
#endif
class_alloc(pppw,number_of_threads * sizeof(struct perturb_workspace *),ppt->error_message);
/** - loop over modes (scalar, tensors, etc). For each mode: */
for (index_md = 0; index_md < ppt->md_size; index_md++) {
if (ppt->perturbations_verbose > 1)
printf("Evolving mode %d/%d\n",index_md+1,ppt->md_size);
abort = _FALSE_;
sz = sizeof(struct perturb_workspace);
#pragma omp parallel \
shared(pppw,ppr,pba,pth,ppt,index_md,abort,number_of_threads) \
private(thread) \
num_threads(number_of_threads)
{
#ifdef _OPENMP
thread=omp_get_thread_num();
#endif
/** create a workspace (one per thread in multi-thread case) */
class_alloc_parallel(pppw[thread],sz,ppt->error_message);
/** (a) initialize indices of vectors of perturbations with perturb_indices_of_current_vectors() */
class_call_parallel(perturb_workspace_init(ppr,
pba,
pth,
ppt,
index_md,
pppw[thread]),
ppt->error_message,
ppt->error_message);
} /* end of parallel region */
if (abort == _TRUE_) return _FAILURE_;
/** (c) loop over initial conditions and wavenumbers; for each of them, evolve perturbations and compute source functions with perturb_solve() */
for (index_ic = 0; index_ic < ppt->ic_size[index_md]; index_ic++) {
if (ppt->perturbations_verbose > 1)
printf("Evolving ic %d/%d\n",index_ic+1,ppt->ic_size[index_md]);
if (ppt->perturbations_verbose > 1)
printf("evolving %d wavenumbers\n",ppt->k_size[index_md]);
abort = _FALSE_;
#pragma omp parallel \
shared(pppw,ppr,pba,pth,ppt,index_md,index_ic,abort,number_of_threads) \
private(index_k,thread,tstart,tstop,tspent) \
num_threads(number_of_threads)
{
#ifdef _OPENMP
thread=omp_get_thread_num();
tspent=0.;
#endif
#pragma omp for schedule (dynamic)
/* integrating backwards is slightly more optimal for parallel runs */
//for (index_k = 0; index_k < ppt->k_size; index_k++) {
for (index_k = ppt->k_size[index_md]-1; index_k >=0; index_k--) {
if ((ppt->perturbations_verbose > 2) && (abort == _FALSE_)) {
printf("evolving mode k=%e /Mpc (%d/%d)",ppt->k[index_md][index_k],index_k+1,ppt->k_size[index_md]);
if (pba->sgnK != 0)
printf(" (for scalar modes, corresponds to nu=%e)",sqrt(ppt->k[index_md][index_k]*ppt->k[index_md][index_k]+pba->K)/sqrt(pba->sgnK*pba->K));
printf("\n");
}
#ifdef _OPENMP
tstart = omp_get_wtime();
#endif
class_call_parallel(perturb_solve(ppr,
pba,
pth,
ppt,
index_md,
index_ic,
index_k,
pppw[thread]),
ppt->error_message,
ppt->error_message);
#ifdef _OPENMP
tstop = omp_get_wtime();
tspent += tstop-tstart;
#endif
#pragma omp flush(abort)
} /* end of loop over wavenumbers */
#ifdef _OPENMP
if (ppt->perturbations_verbose>1)
printf("In %s: time spent in parallel region (loop over k's) = %e s for thread %d\n",
__func__,tspent,omp_get_thread_num());
#endif
} /* end of parallel region */
if (abort == _TRUE_) return _FAILURE_;
} /* end of loop over initial conditions */
abort = _FALSE_;
#pragma omp parallel \
shared(pppw,ppt,index_md,abort,number_of_threads) \
private(thread) \
num_threads(number_of_threads)
{
#ifdef _OPENMP
thread=omp_get_thread_num();
#endif
class_call_parallel(perturb_workspace_free(ppt,index_md,pppw[thread]),
ppt->error_message,
ppt->error_message);
} /* end of parallel region */
if (abort == _TRUE_) return _FAILURE_;
} /* end loop over modes */
free(pppw);
return _SUCCESS_;
}
/**
* Free all memory space allocated by perturb_init().
*
* To be called at the end of each run, only when no further calls to
* perturb_sources_at_tau() are needed.
*
* @param ppt Input: perturbation structure to be freed
* @return the error status
*/
int perturb_free(
struct perturbs * ppt
) {
int index_md,index_ic,index_type;
int filenum;
if (ppt->has_perturbations == _TRUE_) {
for (index_md = 0; index_md < ppt->md_size; index_md++) {
for (index_ic = 0; index_ic < ppt->ic_size[index_md]; index_ic++) {
for (index_type = 0; index_type < ppt->tp_size[index_md]; index_type++) {
free(ppt->sources[index_md][index_ic*ppt->tp_size[index_md]+index_type]);
}
}
free(ppt->sources[index_md]);
free(ppt->k[index_md]);
}
free(ppt->tau_sampling);
free(ppt->tp_size);
free(ppt->ic_size);
free(ppt->k);
free(ppt->k_size_cmb);
free(ppt->k_size_cl);
free(ppt->k_size);
free(ppt->sources);
/** Stuff related to perturbations output: */
/** Free non-NULL pointers: */
if (ppt->index_k_output_values != NULL)
free(ppt->index_k_output_values);
for (filenum = 0; filenum<_MAX_NUMBER_OF_K_FILES_; filenum++){
if (ppt->scalar_perturbations_data[filenum] != NULL)
free(ppt->scalar_perturbations_data[filenum]);
if (ppt->vector_perturbations_data[filenum] != NULL)
free(ppt->vector_perturbations_data[filenum]);
if (ppt->tensor_perturbations_data[filenum] != NULL)
free(ppt->tensor_perturbations_data[filenum]);
}
}
return _SUCCESS_;
}
/**
* Initialize all indices and allocate most arrays in perturbs structure.
*
* @param ppr Input : pointer to precision structure
* @param pba Input : pointer to background strucutre
* @param pth Input : pointer to thermodynamics structure
* @param ppt Input/Output: Initialized perturbation structure
* @return the error status
*/
int perturb_indices_of_perturbs(
struct precision * ppr,
struct background * pba,
struct thermo * pth,
struct perturbs * ppt
) {
/** Summary: */
/** - define local variables */
int index_type;
int index_md;
int index_ic;
int index_type_common;
/** - count modes (scalar, vector, tensor) and assign corresponding indices */
index_md = 0;
class_define_index(ppt->index_md_scalars,ppt->has_scalars,index_md,1);
class_define_index(ppt->index_md_vectors,ppt->has_vectors,index_md,1);
class_define_index(ppt->index_md_tensors,ppt->has_tensors,index_md,1);
ppt->md_size = index_md;
class_test(index_md == 0,
ppt->error_message,
"you should have at least one out of {scalars, vectors, tensors} !!!");
/** - allocate array of number of types for each mode, ppt->tp_size[index_md] */
class_alloc(ppt->tp_size,ppt->md_size*sizeof(int),ppt->error_message);
/** - allocate array of number of initial conditions for each mode, ppt->ic_size[index_md] */
class_alloc(ppt->ic_size,ppt->md_size*sizeof(int),ppt->error_message);
/** - allocate array of arrays of source functions for each mode, ppt->source[index_md] */
class_alloc(ppt->sources,ppt->md_size * sizeof(double *),ppt->error_message);
/** initialization all flags to false (will eventually be set to true later) */
ppt->has_cmb = _FALSE_;
ppt->has_lss = _FALSE_;
ppt->has_source_t = _FALSE_;
ppt->has_source_p = _FALSE_;
ppt->has_source_delta_m = _FALSE_;
ppt->has_source_delta_g = _FALSE_;
ppt->has_source_delta_b = _FALSE_;
ppt->has_source_delta_cdm = _FALSE_;
ppt->has_source_delta_dcdm = _FALSE_;
ppt->has_source_delta_fld = _FALSE_;
ppt->has_source_delta_scf = _FALSE_;
ppt->has_source_phi_smg = _FALSE_; //scalar field
ppt->has_source_delta_dr = _FALSE_;
ppt->has_source_delta_ur = _FALSE_;
ppt->has_source_delta_ncdm = _FALSE_;
ppt->has_source_theta_m = _FALSE_;
ppt->has_source_theta_g = _FALSE_;
ppt->has_source_theta_b = _FALSE_;
ppt->has_source_theta_cdm = _FALSE_;
ppt->has_source_theta_dcdm = _FALSE_;
ppt->has_source_theta_fld = _FALSE_;
ppt->has_source_theta_scf = _FALSE_;
ppt->has_source_phi_prime_smg = _FALSE_; //scalar field
ppt->has_source_theta_dr = _FALSE_;
ppt->has_source_theta_ur = _FALSE_;
ppt->has_source_theta_ncdm = _FALSE_;
ppt->has_source_phi = _FALSE_;
ppt->has_source_phi_prime = _FALSE_;
ppt->has_source_phi_plus_psi = _FALSE_;
ppt->has_source_psi = _FALSE_;
/** - source flags and indices, for sources that all modes have in
common (temperature, polarization, ...). For temperature, the
term t2 is always non-zero, while other terms are non-zero only
for scalars and vectors. For polarization, the term e is always
non-zero, while the term b is only for vectors and tensors. */
if (ppt->has_cl_cmb_temperature == _TRUE_) {
ppt->has_source_t = _TRUE_;
ppt->has_cmb = _TRUE_;
}
if (ppt->has_cl_cmb_polarization == _TRUE_) {
ppt->has_source_p = _TRUE_;
ppt->has_cmb = _TRUE_;
}
index_type = 0;
class_define_index(ppt->index_tp_t2,ppt->has_source_t,index_type,1);
class_define_index(ppt->index_tp_p,ppt->has_source_p,index_type,1);
index_type_common = index_type;
/* indices for perturbed recombination */
class_define_index(ppt->index_tp_perturbed_recombination_delta_temp,ppt->has_perturbed_recombination,index_type,1);
class_define_index(ppt->index_tp_perturbed_recombination_delta_chi,ppt->has_perturbed_recombination,index_type,1);
/** define k values with perturb_get_k_list() */
class_call(perturb_get_k_list(ppr,
pba,
pth,
ppt),
ppt->error_message,
ppt->error_message);
/** - loop over modes. Initialize flags and indices which are specific to each mode. */
for (index_md = 0; index_md < ppt->md_size; index_md++) {
/** (a) scalars */
if (_scalars_) {
/** - source flags and indices, for sources that are specific to scalars */
if ((ppt->has_cl_cmb_lensing_potential == _TRUE_) || (ppt->has_cl_lensing_potential)) {
ppt->has_lss = _TRUE_;
ppt->has_source_phi_plus_psi = _TRUE_;
}
if ((ppt->has_pk_matter == _TRUE_) || (ppt->has_nl_corrections_based_on_delta_m)) {
ppt->has_lss = _TRUE_;
ppt->has_source_delta_m = _TRUE_;
}
if (ppt->has_density_transfers == _TRUE_) {
ppt->has_lss = _TRUE_;
ppt->has_source_delta_g = _TRUE_;
ppt->has_source_delta_b = _TRUE_;
if (pba->has_cdm == _TRUE_)
ppt->has_source_delta_cdm = _TRUE_;
if (pba->has_dcdm == _TRUE_)
ppt->has_source_delta_dcdm = _TRUE_;
if (pba->has_fld == _TRUE_)
ppt->has_source_delta_fld = _TRUE_;
if (pba->has_scf == _TRUE_)
ppt->has_source_delta_scf = _TRUE_;
if (pba->has_ur == _TRUE_)
ppt->has_source_delta_ur = _TRUE_;
if (pba->has_smg == _TRUE_)
ppt->has_source_phi_smg = _TRUE_;
if (pba->has_dr == _TRUE_)
ppt->has_source_delta_dr = _TRUE_;
if (pba->has_ncdm == _TRUE_)
ppt->has_source_delta_ncdm = _TRUE_;
}
if (ppt->has_velocity_transfers == _TRUE_) {
ppt->has_lss = _TRUE_;
ppt->has_source_theta_g = _TRUE_;
ppt->has_source_theta_b = _TRUE_;
if ((pba->has_cdm == _TRUE_) && (ppt->gauge != synchronous))
ppt->has_source_theta_cdm = _TRUE_;
if (pba->has_dcdm == _TRUE_)
ppt->has_source_theta_dcdm = _TRUE_;
if (pba->has_fld == _TRUE_)
ppt->has_source_theta_fld = _TRUE_;
if (pba->has_scf == _TRUE_)
ppt->has_source_theta_scf = _TRUE_;
if (pba->has_smg == _TRUE_)
ppt->has_source_phi_prime_smg = _TRUE_;
if (pba->has_ur == _TRUE_)
ppt->has_source_theta_ur = _TRUE_;
if (pba->has_dr == _TRUE_)
ppt->has_source_theta_dr = _TRUE_;
if (pba->has_ncdm == _TRUE_)
ppt->has_source_theta_ncdm = _TRUE_;
}
if (ppt->has_cl_number_count == _TRUE_) {
ppt->has_lss = _TRUE_;
if (ppt->has_nc_density == _TRUE_) {
ppt->has_source_delta_m = _TRUE_;
}
if (ppt->has_nc_rsd == _TRUE_) {
ppt->has_source_theta_m = _TRUE_;
}
if (ppt->has_nc_lens == _TRUE_) {
ppt->has_source_phi_plus_psi = _TRUE_;
}
if (ppt->has_nc_gr == _TRUE_) {
ppt->has_source_phi = _TRUE_;
ppt->has_source_psi = _TRUE_;
ppt->has_source_phi_prime = _TRUE_;
ppt->has_source_phi_plus_psi = _TRUE_;
}
}
index_type = index_type_common;
class_define_index(ppt->index_tp_t0, ppt->has_source_t, index_type,1);
class_define_index(ppt->index_tp_t1, ppt->has_source_t, index_type,1);
class_define_index(ppt->index_tp_delta_m, ppt->has_source_delta_m, index_type,1);
class_define_index(ppt->index_tp_delta_g, ppt->has_source_delta_g, index_type,1);
class_define_index(ppt->index_tp_delta_b, ppt->has_source_delta_b, index_type,1);
class_define_index(ppt->index_tp_delta_cdm, ppt->has_source_delta_cdm, index_type,1);
class_define_index(ppt->index_tp_delta_dcdm, ppt->has_source_delta_dcdm,index_type,1);
class_define_index(ppt->index_tp_delta_fld, ppt->has_source_delta_fld, index_type,1);
class_define_index(ppt->index_tp_delta_scf, ppt->has_source_delta_scf, index_type,1);
class_define_index(ppt->index_tp_phi_smg, ppt->has_source_phi_smg, index_type,1);
class_define_index(ppt->index_tp_delta_dr, ppt->has_source_delta_dr, index_type,1);
class_define_index(ppt->index_tp_delta_ur, ppt->has_source_delta_ur, index_type,1);
class_define_index(ppt->index_tp_delta_ncdm1,ppt->has_source_delta_ncdm,index_type,pba->N_ncdm);
class_define_index(ppt->index_tp_theta_m, ppt->has_source_theta_m, index_type,1);
class_define_index(ppt->index_tp_theta_g, ppt->has_source_theta_g, index_type,1);
class_define_index(ppt->index_tp_theta_b, ppt->has_source_theta_b, index_type,1);
class_define_index(ppt->index_tp_theta_cdm, ppt->has_source_theta_cdm, index_type,1);
class_define_index(ppt->index_tp_theta_dcdm, ppt->has_source_theta_dcdm,index_type,1);
class_define_index(ppt->index_tp_theta_fld, ppt->has_source_theta_fld, index_type,1);
class_define_index(ppt->index_tp_theta_scf, ppt->has_source_theta_scf, index_type,1);
class_define_index(ppt->index_tp_phi_prime_smg, ppt->has_source_phi_prime_smg, index_type,1);
class_define_index(ppt->index_tp_theta_dr, ppt->has_source_theta_dr, index_type,1);
class_define_index(ppt->index_tp_theta_ur, ppt->has_source_theta_ur, index_type,1);
class_define_index(ppt->index_tp_theta_ncdm1,ppt->has_source_theta_ncdm,index_type,pba->N_ncdm);
class_define_index(ppt->index_tp_phi, ppt->has_source_phi, index_type,1);
class_define_index(ppt->index_tp_phi_prime, ppt->has_source_phi_prime, index_type,1);
class_define_index(ppt->index_tp_phi_plus_psi,ppt->has_source_phi_plus_psi,index_type,1);
class_define_index(ppt->index_tp_psi, ppt->has_source_psi, index_type,1);
ppt->tp_size[index_md] = index_type;
class_test(index_type == 0,
ppt->error_message,
"inconsistent input: you asked for scalars, so you should have at least one non-zero scalar source type (temperature, polarisation, lensing/gravitational potential, ...). Please adjust your input.");
/** -- count scalar initial conditions (for scalars: ad, cdi, nid, niv; for tensors: only one) and assign corresponding indices */
index_ic = 0;
class_define_index(ppt->index_ic_ad, ppt->has_ad, index_ic,1);
class_define_index(ppt->index_ic_bi, ppt->has_bi, index_ic,1);
class_define_index(ppt->index_ic_cdi,ppt->has_cdi,index_ic,1);
class_define_index(ppt->index_ic_nid,ppt->has_nid,index_ic,1);
class_define_index(ppt->index_ic_niv,ppt->has_niv,index_ic,1);
ppt->ic_size[index_md] = index_ic;
class_test(index_ic == 0,
ppt->error_message,
"you should have at least one adiabatic or isocurvature initial condition...} !!!");
}
if (_vectors_) {
/** - source flags and indices, for sources that are specific to tensors */
index_type = index_type_common;
class_define_index(ppt->index_tp_t1,ppt->has_source_t,index_type,1);
ppt->tp_size[index_md] = index_type;
/**
class_test(index_type == 0,
ppt->error_message,
"inconsistent input: you asked for vectors, so you should have at least one non-zero vector source type (temperature or polarisation). Please adjust your input.");
*/
/** -- initial conditions for vectors*/
index_ic = 0;
/* not coded yet */
ppt->ic_size[index_md] = index_ic;
}
/** (b) tensors */
if (_tensors_) {
/** - source flags and indices, for sources that are specific to tensors */
index_type = index_type_common;
/* nothing specific, unlike for vectors and scalars! */
ppt->tp_size[index_md] = index_type;
/**
class_test(index_type == 0,
ppt->error_message,
"inconsistent input: you asked for tensors, so you should have at least one non-zero tensor source type (temperature or polarisation). Please adjust your input.");
*/
/** -- only one initial condition for tensors*/
index_ic = 0;
class_define_index(ppt->index_ic_ten,_TRUE_,index_ic,1);
ppt->ic_size[index_md] = index_ic;
}
/** (c) for each mode, allocate array of arrays of source functions for each initial conditions and wavenumber, (ppt->source[index_md])[index_ic][index_type] */
class_alloc(ppt->sources[index_md],
ppt->ic_size[index_md] * ppt->tp_size[index_md] * sizeof(double *),
ppt->error_message);
}
return _SUCCESS_;
}
/**
* Define time sampling for source functions.
*
* For each type, compute the list of values of tau at which sources
* will be sampled. Knowing the number of tau values, allocate all
* arrays of source functions.
*
* @param ppr Input : pointer to precision structure
* @param pba Input : pointer to background strucutre
* @param pth Input : pointer to thermodynamics structure
* @param ppt Input/Output: Initialized perturbation structure
* @return the error status
*/
int perturb_timesampling_for_sources(
struct precision * ppr,
struct background * pba,
struct thermo * pth,
struct perturbs * ppt
) {
/** Summary: */
/** - define local variables */
int counter;
int index_md;
int index_type;
int index_ic;
int last_index_back;
int last_index_thermo;
int first_index_back;
int first_index_thermo;
double tau;
double tau_ini;
double tau_lower;
double tau_upper;
double tau_mid;
double timescale_source;
double rate_thermo;
double rate_isw_squared;
double a_prime_over_a;
double a_primeprime_over_a;
double * pvecback;
double * pvecthermo;
/** - allocate background/thermodynamics vectors */
class_alloc(pvecback,pba->bg_size_short*sizeof(double),ppt->error_message);
class_alloc(pvecthermo,pth->th_size*sizeof(double),ppt->error_message);
/** - first, just count the number of sampling points in order to allocate the array containing all values: */
/** (a) if CMB requested, first sampling point = when the universe
stops being opaque; otherwise, start sampling gravitational
potential at recombination [however, if perturbed recombination
is requested, we also need to start the system before
recombination. Otherwise, the initial conditions for gas
temperature and ionization fraction perturbations (delta_T = 1/3
delta_b, delta_x_e) are not valid]. */
if ((ppt->has_cmb == _TRUE_)||(ppt->has_perturbed_recombination == _TRUE_)) {
/* using bisection, search time tau such that the ratio of thermo
to Hubble time scales tau_c/tau_h=aH/kappa' is equal to
start_sources_at_tau_c_over_tau_h */
tau_lower = pth->tau_ini;
class_call(background_at_tau(pba,
tau_lower,
pba->short_info,
pba->inter_normal,
&first_index_back,
pvecback),
pba->error_message,
ppt->error_message);
class_call(thermodynamics_at_z(pba,
pth,
1./pvecback[pba->index_bg_a]-1., /* redshift z=1/a-1 */
pth->inter_normal,
&first_index_thermo,
pvecback,
pvecthermo),
pth->error_message,
ppt->error_message);
class_test(pvecback[pba->index_bg_a]*
pvecback[pba->index_bg_H]/
pvecthermo[pth->index_th_dkappa] >
ppr->start_sources_at_tau_c_over_tau_h,
ppt->error_message,
"your choice of initial time for computing sources is inappropriate: it corresponds to an earlier time than the one at which the integration of thermodynamical variables started (tau=%g). You should increase either 'start_sources_at_tau_c_over_tau_h' or 'recfast_z_initial'\n",
tau_lower);
tau_upper = pth->tau_rec;
class_call(background_at_tau(pba,
tau_upper,
pba->short_info,
pba->inter_normal,
&first_index_back,
pvecback),
pba->error_message,
ppt->error_message);
class_call(thermodynamics_at_z(pba,
pth,
1./pvecback[pba->index_bg_a]-1., /* redshift z=1/a-1 */
pth->inter_normal,
&first_index_thermo,
pvecback,
pvecthermo),
pth->error_message,
ppt->error_message);
class_test(pvecback[pba->index_bg_a]*
pvecback[pba->index_bg_H]/
pvecthermo[pth->index_th_dkappa] <
ppr->start_sources_at_tau_c_over_tau_h,
ppt->error_message,
"your choice of initial time for computing sources is inappropriate: it corresponds to a time after recombination. You should decrease 'start_sources_at_tau_c_over_tau_h'\n");
tau_mid = 0.5*(tau_lower + tau_upper);
while (tau_upper - tau_lower > ppr->tol_tau_approx) {
class_call(background_at_tau(pba,
tau_mid,
pba->short_info,
pba->inter_normal,
&first_index_back,
pvecback),
pba->error_message,
ppt->error_message);
class_call(thermodynamics_at_z(pba,
pth,
1./pvecback[pba->index_bg_a]-1., /* redshift z=1/a-1 */
pth->inter_normal,
&first_index_thermo,
pvecback,
pvecthermo),
pth->error_message,
ppt->error_message);
if (pvecback[pba->index_bg_a]*
pvecback[pba->index_bg_H]/
pvecthermo[pth->index_th_dkappa] >
ppr->start_sources_at_tau_c_over_tau_h)
tau_upper = tau_mid;
else
tau_lower = tau_mid;
tau_mid = 0.5*(tau_lower + tau_upper);
}
tau_ini = tau_mid;
}
else {
/* case when CMB not requested: start at recombination time */
tau_ini = pth->tau_rec;
/* set values of first_index_back/thermo */
class_call(background_at_tau(pba,
tau_ini,
pba->short_info,
pba->inter_normal,
&first_index_back,
pvecback),
pba->error_message,
ppt->error_message);
class_call(thermodynamics_at_z(pba,
pth,
1./pvecback[pba->index_bg_a]-1., /* redshift z=1/a-1 */
pth->inter_normal,
&first_index_thermo,
pvecback,
pvecthermo),
pth->error_message,
ppt->error_message);
}
counter = 1;
/** (b) next sampling point = previous + ppr->perturb_sampling_stepsize * timescale_source, where:
- if CMB requested:
timescale_source1 = \f$ |g/\dot{g}| = |\dot{\kappa}-\ddot{\kappa}/\dot{\kappa}|^{-1} \f$;
timescale_source2 = \f$ |2\ddot{a}/a-(\dot{a}/a)^2|^{-1/2} \f$ (to sample correctly the late ISW effect; and
timescale_source=1/(1/timescale_source1+1/timescale_source2); repeat till today.
- if CMB not requested:
timescale_source = 1/aH; repeat till today.
*/
last_index_back = first_index_back;
last_index_thermo = first_index_thermo;
tau = tau_ini;
while (tau < pba->conformal_age) {
class_call(background_at_tau(pba,
tau,
pba->short_info,
pba->inter_closeby,
&last_index_back,
pvecback),
pba->error_message,
ppt->error_message);
class_call(thermodynamics_at_z(pba,
pth,
1./pvecback[pba->index_bg_a]-1., /* redshift z=1/a-1 */
pth->inter_closeby,
&last_index_thermo,
pvecback,
pvecthermo),
pth->error_message,
ppt->error_message);
if (ppt->has_cmb == _TRUE_) {
/* variation rate of thermodynamics variables */
rate_thermo = pvecthermo[pth->index_th_rate];
/* variation rate of metric due to late ISW effect (important at late times) */
a_prime_over_a = pvecback[pba->index_bg_H] * pvecback[pba->index_bg_a];
a_primeprime_over_a = pvecback[pba->index_bg_H_prime] * pvecback[pba->index_bg_a]
+ 2. * a_prime_over_a * a_prime_over_a;
rate_isw_squared = fabs(2.*a_primeprime_over_a-a_prime_over_a*a_prime_over_a);
/* compute rate */
timescale_source = sqrt(rate_thermo*rate_thermo+rate_isw_squared);
}
else {
/* variation rate given by Hubble time */
a_prime_over_a = pvecback[pba->index_bg_H] * pvecback[pba->index_bg_a];
timescale_source = a_prime_over_a;
}
/* check it is non-zero */
class_test(timescale_source == 0.,
ppt->error_message,
"null evolution rate, integration is diverging");
/* compute inverse rate */
timescale_source = 1./timescale_source;
class_test(fabs(ppr->perturb_sampling_stepsize*timescale_source/tau) < ppr->smallest_allowed_variation,
ppt->error_message,
"integration step =%e < machine precision : leads either to numerical error or infinite loop",ppr->perturb_sampling_stepsize*timescale_source);
tau = tau + ppr->perturb_sampling_stepsize*timescale_source;
counter++;
}
/** - infer total number of time steps, ppt->tau_size */
ppt->tau_size = counter;
/** - allocate array of time steps, ppt->tau_sampling[index_tau] */
class_alloc(ppt->tau_sampling,ppt->tau_size * sizeof(double),ppt->error_message);
/** - repeat the same steps, now filling the array with each tau value: */
/** (a) first sampling point = when the universe stops being opaque */
counter = 0;
ppt->tau_sampling[counter]=tau_ini;
/** (b) next sampling point = previous + ppr->perturb_sampling_stepsize * timescale_source, where
timescale_source1 = \f$ |g/\dot{g}| = |\dot{\kappa}-\ddot{\kappa}/\dot{\kappa}|^{-1} \f$;
timescale_source2 = \f$ |2\ddot{a}/a-(\dot{a}/a)^2|^{-1/2} \f$ (to sample correctly the late ISW effect; and
timescale_source=1/(1/timescale_source1+1/timescale_source2); repeat till today
- if CMB not requested:
timescale_source = 1/aH; repeat till today. */
last_index_back = first_index_back;
last_index_thermo = first_index_thermo;
tau = tau_ini;
while (tau < pba->conformal_age) {
class_call(background_at_tau(pba,
tau,
pba->short_info,
pba->inter_closeby,
&last_index_back,
pvecback),
pba->error_message,
ppt->error_message);
class_call(thermodynamics_at_z(pba,
pth,
1./pvecback[pba->index_bg_a]-1., /* redshift z=1/a-1 */
pth->inter_closeby,
&last_index_thermo,
pvecback,
pvecthermo),
pth->error_message,
ppt->error_message);
if (ppt->has_cmb == _TRUE_) {
/* variation rate of thermodynamics variables */
rate_thermo = pvecthermo[pth->index_th_rate];
/* variation rate of metric due to late ISW effect (important at late times) */
a_prime_over_a = pvecback[pba->index_bg_H] * pvecback[pba->index_bg_a];
a_primeprime_over_a = pvecback[pba->index_bg_H_prime] * pvecback[pba->index_bg_a]
+ 2. * a_prime_over_a * a_prime_over_a;
rate_isw_squared = fabs(2.*a_primeprime_over_a-a_prime_over_a*a_prime_over_a);
/* compute rate */
timescale_source = sqrt(rate_thermo*rate_thermo+rate_isw_squared);
}
else {
a_prime_over_a = pvecback[pba->index_bg_H] * pvecback[pba->index_bg_a];
timescale_source = a_prime_over_a;
}
/* check it is non-zero */
class_test(timescale_source == 0.,
ppt->error_message,
"null evolution rate, integration is diverging");
/* compute inverse rate */
timescale_source = 1./timescale_source;
class_test(fabs(ppr->perturb_sampling_stepsize*timescale_source/tau) < ppr->smallest_allowed_variation,
ppt->error_message,
"integration step =%e < machine precision : leads either to numerical error or infinite loop",ppr->perturb_sampling_stepsize*timescale_source);
tau = tau + ppr->perturb_sampling_stepsize*timescale_source;
counter++;
ppt->tau_sampling[counter]=tau;
}
/** - last sampling point = exactly today */
ppt->tau_sampling[counter] = pba->conformal_age;
free(pvecback);
free(pvecthermo);
/** - loop over modes, initial conditions and types. For each of
them, allocate array of source functions. */
for (index_md = 0; index_md < ppt->md_size; index_md++) {
for (index_ic = 0; index_ic < ppt->ic_size[index_md]; index_ic++) {
for (index_type = 0; index_type < ppt->tp_size[index_md]; index_type++) {
class_alloc(ppt->sources[index_md][index_ic*ppt->tp_size[index_md]+index_type],
ppt->k_size[index_md] * ppt->tau_size * sizeof(double),
ppt->error_message);
}
}
}
return _SUCCESS_;
}
/**
* Define the number of comoving wavenumbers using the information
* passed in the precision structure.
*
* @param ppr Input : pointer to precision structure
* @param pba Input : pointer to background strucutre
* @param pth Input : pointer to thermodynamics structure
* @param ppt Input : pointer to perturbation structure
* @param index_md Input: index describing the mode (scalar, tensor, etc.)
* @return the error status
*/
int perturb_get_k_list(
struct precision * ppr,
struct background * pba,
struct thermo * pth,
struct perturbs * ppt
) {
int index_k, index_k_output, index_mode;
double k,k_min=0.,k_rec,step,tau1;
double * k_max_cmb;
double * k_max_cl;
double k_max=0.;
double scale2;
double *tmp_k_list;
int newk_size, index_newk, add_k_output_value;
class_test(ppr->k_step_transition == 0.,
ppt->error_message,
"stop to avoid division by zero");
class_test(pth->rs_rec == 0.,
ppt->error_message,
"stop to avoid division by zero");
/* allocate arrays related to k list for each mode */
class_alloc(ppt->k_size_cmb,
ppt->md_size*sizeof(int),
ppt->error_message);
class_alloc(ppt->k_size_cl,
ppt->md_size*sizeof(int),
ppt->error_message);
class_alloc(ppt->k_size,
ppt->md_size*sizeof(int),
ppt->error_message);
class_alloc(ppt->k,
ppt->md_size*sizeof(double*),
ppt->error_message);
class_calloc(k_max_cmb,
ppt->md_size,
sizeof(double),
ppt->error_message);
class_calloc(k_max_cl,
ppt->md_size,
sizeof(double),
ppt->error_message);
/** - scalar modes */
if (ppt->has_scalars == _TRUE_) {
/* first value */
if (pba->sgnK == 0) {
/* K<0 (flat) : start close to zero */
k_min=ppr->k_min_tau0/pba->conformal_age;
}
else if (pba->sgnK == -1) {
/* K<0 (open) : start close to sqrt(-K)
(in transfer modules, for scalars, this will correspond to q close to zero;
for vectors and tensors, this value is even smaller than the minimum necessary value) */
k_min=sqrt(-pba->K+pow(ppr->k_min_tau0/pba->conformal_age/pth->angular_rescaling,2));
}
else if (pba->sgnK == 1) {
/* K>0 (closed): start from q=sqrt(k2+(1+m)K) equal to 3sqrt(K), i.e. k=sqrt((8-m)K) */
k_min = sqrt((8.-1.e-4)*pba->K);
}
/** - find k_max (as well as k_max_cmb[ppt->index_md_scalars], k_max_cl[ppt->index_md_scalars]) */
k_rec = 2. * _PI_ / pth->rs_rec; /* comoving scale corresponding to sound horizon at recombination */
k_max_cmb[ppt->index_md_scalars] = k_min;
k_max_cl[ppt->index_md_scalars] = k_min;
k_max = k_min;
if (ppt->has_cls == _TRUE_) {
/* find k_max_cmb[ppt->index_md_scalars] : */
/* choose a k_max_cmb[ppt->index_md_scalars] corresponding to a wavelength on the last
scattering surface seen today under an angle smaller than
pi/lmax: this is equivalent to
k_max_cl[ppt->index_md_scalars]*[comvoving.ang.diameter.distance] > l_max */
k_max_cmb[ppt->index_md_scalars] = ppr->k_max_tau0_over_l_max*ppt->l_scalar_max
/pba->conformal_age/pth->angular_rescaling;
k_max_cl[ppt->index_md_scalars] = k_max_cmb[ppt->index_md_scalars];
k_max = k_max_cmb[ppt->index_md_scalars];
/* find k_max_cl[ppt->index_md_scalars] : */
/* if we need density/lensing Cl's, we must impose a stronger condition,
such that the minimum wavelength on the shell corresponding
to the center of smallest redshift bin is seen under an
angle smaller than pi/lmax. So we must mutiply our previous
k_max_cl[ppt->index_md_scalars] by the ratio tau0/(tau0-tau[center of smallest
redhsift bin]). Note that we could do the same with the
lensing potential if we needed a very precise C_l^phi-phi at
large l. We don't do it by default, because the lensed ClT,
ClE would be marginally affected. */
if ((ppt->has_cl_number_count == _TRUE_) || (ppt->has_cl_lensing_potential == _TRUE_)) {
class_call(background_tau_of_z(pba,
ppt->selection_mean[0],
&tau1),
pba->error_message,
ppt->error_message);
k_max_cl[ppt->index_md_scalars] = MAX(k_max_cl[ppt->index_md_scalars],ppr->k_max_tau0_over_l_max*ppt->l_lss_max/(pba->conformal_age-tau1)); // to be very accurate we should use angular diameter distance to given redhsift instead of comoving radius: would implement corrections dependning on curvature
k_max = k_max_cl[ppt->index_md_scalars];
}
}
/* find k_max: */
if ((ppt->has_pk_matter == _TRUE_) || (ppt->has_density_transfers == _TRUE_) || (ppt->has_velocity_transfers == _TRUE_))
k_max = MAX(k_max,ppt->k_max_for_pk);
if (ppt->has_nl_corrections_based_on_delta_m == _TRUE_)
k_max = MAX(k_max,ppr->halofit_min_k_max);
/** - test that result for k_min, k_max make sense */
class_test(k_min<0.,
ppt->error_message,
"buggy definition of k_min");
class_test(k_max<0.,
ppt->error_message,
"buggy definition of k_max");
class_test(k_max<k_min,
ppt->error_message,
"buggy definition of k_min and/or k_max");
/* if K>0, the transfer function will be calculated for discrete
integer values of nu=3,4,5,... where nu=sqrt(k2+(1+m)K) and
m=0,1,2 for scalars/vectors/tensors. However we are free to
define in the perturbation module some arbitrary values of k:
later on, the transfer module will interpolate at values of k
corresponding exactly to integer values of nu. Hence, apart
from the value of k_min and the step size in thevicinity of
k_min, we define exactly the same sampling in the three cases
K=0, K<0, K>0 */
/* allocate array with, for the moment, the largest possible size */
class_alloc(ppt->k[ppt->index_md_scalars],
((int)((k_max_cmb[ppt->index_md_scalars]-k_min)/k_rec/MIN(ppr->k_step_super,ppr->k_step_sub))+
(int)(MAX(ppr->k_per_decade_for_pk,ppr->k_per_decade_for_bao)*log(k_max/k_min)/log(10.))+3)
*sizeof(double),ppt->error_message);
/* first value */
index_k=0;
k = k_min;
ppt->k[ppt->index_md_scalars][index_k] = k;
index_k++;
/* values until k_max_cmb[ppt->index_md_scalars] */
while (k < k_max_cmb[ppt->index_md_scalars]) {
/* the linear step is not constant, it has a step-like shape,
centered around the characteristic scale set by the sound
horizon at recombination (associated to the comoving wavenumber
k_rec) */
step = (ppr->k_step_super
+ 0.5 * (tanh((k-k_rec)/k_rec/ppr->k_step_transition)+1.)
* (ppr->k_step_sub-ppr->k_step_super)) * k_rec;
/* there is one other thing to take into account in the step
size. There are two other characteristic scales that matter for
the sampling: the Hubble scale today, k0=a0H0, and eventually
curvature scale sqrt(|K|). We define "scale2" as the sum of the
squared Hubble radius and squared curvature radius. We need to
increase the sampling for k<sqrt(scale2), in order to get the
first mutipoles accurate enough. The formula below reduces it
gradually in the k-->0 limit, by up to a factor 10. The actual
stepsize is still fixed by k_step_super, this is just a
reduction factor. */
scale2 = pow(pba->a_today*pba->H0,2)+fabs(pba->K);
step *= (k*k/scale2+1.)/(k*k/scale2+1./ppr->k_step_super_reduction);
class_test(step / k < ppr->smallest_allowed_variation,
ppt->error_message,
"k step =%e < machine precision : leads either to numerical error or infinite loop",
step * k_rec);
k += step;
class_test(k <= ppt->k[ppt->index_md_scalars][index_k-1],
ppt->error_message,
"consecutive values of k should differ and should be in growing order");
ppt->k[ppt->index_md_scalars][index_k] = k;
index_k++;
}
ppt->k_size_cmb[ppt->index_md_scalars] = index_k;
/* values until k_max_cl[ppt->index_md_scalars] */
while (k < k_max_cl[ppt->index_md_scalars]) {
k *= pow(10.,1./(ppr->k_per_decade_for_pk
+(ppr->k_per_decade_for_bao-ppr->k_per_decade_for_pk)
*(1.-tanh(pow((log(k)-log(ppr->k_bao_center*k_rec))/log(ppr->k_bao_width),4)))));
ppt->k[ppt->index_md_scalars][index_k] = k;
index_k++;
}
ppt->k_size_cl[ppt->index_md_scalars] = index_k;
/* values until k_max */
while (k < k_max) {
k *= pow(10.,1./(ppr->k_per_decade_for_pk
+(ppr->k_per_decade_for_bao-ppr->k_per_decade_for_pk)
*(1.-tanh(pow((log(k)-log(ppr->k_bao_center*k_rec))/log(ppr->k_bao_width),4)))));
ppt->k[ppt->index_md_scalars][index_k] = k;
index_k++;
}
ppt->k_size[ppt->index_md_scalars] = index_k;
class_realloc(ppt->k[ppt->index_md_scalars],
ppt->k[ppt->index_md_scalars],
ppt->k_size[ppt->index_md_scalars]*sizeof(double),
ppt->error_message);
}
/** - vector modes */
if (ppt->has_vectors == _TRUE_) {
/* first value */
if (pba->sgnK == 0) {
/* K<0 (flat) : start close to zero */
k_min=ppr->k_min_tau0/pba->conformal_age;
}
else if (pba->sgnK == -1) {
/* K<0 (open) : start close to sqrt(-K)
(in transfer modules, for scalars, this will correspond to q close to zero;
for vectors and tensors, this value is even smaller than the minimum necessary value) */
k_min=sqrt(-pba->K+pow(ppr->k_min_tau0/pba->conformal_age/pth->angular_rescaling,2));
}
else if (pba->sgnK == 1) {
/* K>0 (closed): start from q=sqrt(k2+(1+m)K) equal to 3sqrt(K), i.e. k=sqrt((8-m)K) */
k_min = sqrt((7.-1.e-4)*pba->K);
}
/** - find k_max (as well as k_max_cmb[ppt->index_md_vectors], k_max_cl[ppt->index_md_vectors]) */
k_rec = 2. * _PI_ / pth->rs_rec; /* comoving scale corresponding to sound horizon at recombination */
k_max_cmb[ppt->index_md_vectors] = k_min;
k_max_cl[ppt->index_md_vectors] = k_min;
k_max = k_min;
if (ppt->has_cls == _TRUE_) {
/* find k_max_cmb: */
/* choose a k_max_cmb corresponding to a wavelength on the last
scattering surface seen today under an angle smaller than
pi/lmax: this is equivalent to
k_max_cl*[comvoving.ang.diameter.distance] > l_max */
k_max_cmb[ppt->index_md_vectors] = ppr->k_max_tau0_over_l_max*ppt->l_vector_max
/pba->conformal_age/pth->angular_rescaling;
k_max_cl[ppt->index_md_vectors] = k_max_cmb[ppt->index_md_vectors];
k_max = k_max_cmb[ppt->index_md_vectors];
}
/** - test that result for k_min, k_max make sense */
class_test(k_min<0.,
ppt->error_message,
"buggy definition of k_min");
class_test(k_max<0.,
ppt->error_message,
"buggy definition of k_max");
class_test(k_max<k_min,
ppt->error_message,
"buggy definition of k_min and/or k_max");
/* if K>0, the transfer function will be calculated for discrete
integer values of nu=3,4,5,... where nu=sqrt(k2+(1+m)K) and
m=0,1,2 for scalars/vectors/tensors. However we are free to
define in the perturbation module some arbitrary values of k:
later on, the transfer module will interpolate at values of k
corresponding exactly to integer values of nu. Hence, apart
from the value of k_min and the step size in thevicinity of
k_min, we define exactly the same sampling in the three cases
K=0, K<0, K>0 */
/* allocate array with, for the moment, the largest possible size */
class_alloc(ppt->k[ppt->index_md_vectors],
((int)((k_max_cmb[ppt->index_md_vectors]-k_min)/k_rec/MIN(ppr->k_step_super,ppr->k_step_sub))+1)
*sizeof(double),ppt->error_message);
/* first value */
index_k=0;
k = k_min;
ppt->k[ppt->index_md_vectors][index_k] = k;
index_k++;
/* values until k_max_cmb[ppt->index_md_vectors] */
while (k < k_max_cmb[ppt->index_md_vectors]) {
/* the linear step is not constant, it has a step-like shape,
centered around the characteristic scale set by the sound
horizon at recombination (associated to the comoving wavenumber
k_rec) */
step = (ppr->k_step_super
+ 0.5 * (tanh((k-k_rec)/k_rec/ppr->k_step_transition)+1.)
* (ppr->k_step_sub-ppr->k_step_super)) * k_rec;
/* there is one other thing to take into account in the step
size. There are two other characteristic scales that matter for
the sampling: the Hubble scale today, k0=a0H0, and eventually
curvature scale sqrt(|K|). We define "scale2" as the sum of the
squared Hubble radius and squared curvature radius. We need to
increase the sampling for k<sqrt(scale2), in order to get the
first mutipoles accurate enough. The formula below reduces it
gradually in the k-->0 limit, by up to a factor 10. The actual
stepsize is still fixed by k_step_super, this is just a
reduction factor. */
scale2 = pow(pba->a_today*pba->H0,2)+fabs(pba->K);
step *= (k*k/scale2+1.)/(k*k/scale2+1./ppr->k_step_super_reduction);
class_test(step / k < ppr->smallest_allowed_variation,
ppt->error_message,
"k step =%e < machine precision : leads either to numerical error or infinite loop",
step * k_rec);
k += step;
class_test(k <= ppt->k[ppt->index_md_scalars][index_k-1],
ppt->error_message,
"consecutive values of k should differ and should be in growing order");
ppt->k[ppt->index_md_vectors][index_k] = k;
index_k++;
}
ppt->k_size_cmb[ppt->index_md_vectors] = index_k;
ppt->k_size_cl[ppt->index_md_vectors] = index_k;
ppt->k_size[ppt->index_md_vectors] = index_k;
class_realloc(ppt->k[ppt->index_md_vectors],
ppt->k[ppt->index_md_vectors],
ppt->k_size[ppt->index_md_vectors]*sizeof(double),
ppt->error_message);
}
/** - tensor modes */
if (ppt->has_tensors == _TRUE_) {
/* first value */
if (pba->sgnK == 0) {
/* K<0 (flat) : start close to zero */
k_min=ppr->k_min_tau0/pba->conformal_age;
}
else if (pba->sgnK == -1) {
/* K<0 (open) : start close to sqrt(-K)
(in transfer modules, for scalars, this will correspond to q close to zero;
for vectors and tensors, this value is even smaller than the minimum necessary value) */
k_min=sqrt(-pba->K+pow(ppr->k_min_tau0/pba->conformal_age/pth->angular_rescaling,2));
}
else if (pba->sgnK == 1) {
/* K>0 (closed): start from q=sqrt(k2+(1+m)K) equal to 3sqrt(K), i.e. k=sqrt((8-m)K) */
k_min = sqrt((6.-1.e-4)*pba->K);
}
/** - find k_max (as well as k_max_cmb[ppt->index_md_tensors], k_max_cl[ppt->index_md_tensors]) */
k_rec = 2. * _PI_ / pth->rs_rec; /* comoving scale corresponding to sound horizon at recombination */
k_max_cmb[ppt->index_md_tensors] = k_min;
k_max_cl[ppt->index_md_tensors] = k_min;
k_max = k_min;
if (ppt->has_cls == _TRUE_) {
/* find k_max_cmb[ppt->index_md_tensors]: */
/* choose a k_max_cmb[ppt->index_md_tensors] corresponding to a wavelength on the last
scattering surface seen today under an angle smaller than
pi/lmax: this is equivalent to
k_max_cl[ppt->index_md_tensors]*[comvoving.ang.diameter.distance] > l_max */
k_max_cmb[ppt->index_md_tensors] = ppr->k_max_tau0_over_l_max*ppt->l_tensor_max
/pba->conformal_age/pth->angular_rescaling;
k_max_cl[ppt->index_md_tensors] = k_max_cmb[ppt->index_md_tensors];
k_max = k_max_cmb[ppt->index_md_tensors];
}
/** - test that result for k_min, k_max make sense */
class_test(k_min<0.,
ppt->error_message,
"buggy definition of k_min");
class_test(k_max<0.,
ppt->error_message,
"buggy definition of k_max");
class_test(k_max<k_min,
ppt->error_message,
"buggy definition of k_min and/or k_max");
/* if K>0, the transfer function will be calculated for discrete
integer values of nu=3,4,5,... where nu=sqrt(k2+(1+m)K) and
m=0,1,2 for scalars/vectors/tensors. However we are free to
define in the perturbation module some arbitrary values of k:
later on, the transfer module will interpolate at values of k
corresponding exactly to integer values of nu. Hence, apart
from the value of k_min and the step size in thevicinity of
k_min, we define exactly the same sampling in the three cases
K=0, K<0, K>0 */
/* allocate array with, for the moment, the largest possible size */
class_alloc(ppt->k[ppt->index_md_tensors],
((int)((k_max_cmb[ppt->index_md_tensors]-k_min)/k_rec/MIN(ppr->k_step_super,ppr->k_step_sub))+1)
*sizeof(double),ppt->error_message);
/* first value */
index_k=0;
k = k_min;
ppt->k[ppt->index_md_tensors][index_k] = k;
index_k++;
/* values until k_max_cmb[ppt->index_md_tensors] */
while (k < k_max_cmb[ppt->index_md_tensors]) {
/* the linear step is not constant, it has a step-like shape,
centered around the characteristic scale set by the sound
horizon at recombination (associated to the comoving wavenumber
k_rec) */
step = (ppr->k_step_super
+ 0.5 * (tanh((k-k_rec)/k_rec/ppr->k_step_transition)+1.)
* (ppr->k_step_sub-ppr->k_step_super)) * k_rec;
/* there is one other thing to take into account in the step
size. There are two other characteristic scales that matter for
the sampling: the Hubble scale today, k0=a0H0, and eventually
curvature scale sqrt(|K|). We define "scale2" as the sum of the
squared Hubble radius and squared curvature radius. We need to
increase the sampling for k<sqrt(scale2), in order to get the
first mutipoles accurate enough. The formula below reduces it
gradually in the k-->0 limit, by up to a factor 10. The actual
stepsize is still fixed by k_step_super, this is just a
reduction factor. */
scale2 = pow(pba->a_today*pba->H0,2)+fabs(pba->K);
step *= (k*k/scale2+1.)/(k*k/scale2+1./ppr->k_step_super_reduction);
class_test(step / k < ppr->smallest_allowed_variation,
ppt->error_message,
"k step =%e < machine precision : leads either to numerical error or infinite loop",
step * k_rec);
k += step;
class_test(k <= ppt->k[ppt->index_md_tensors][index_k-1],
ppt->error_message,
"consecutive values of k should differ and should be in growing order");
ppt->k[ppt->index_md_tensors][index_k] = k;
index_k++;
}
ppt->k_size_cmb[ppt->index_md_tensors] = index_k;
ppt->k_size_cl[ppt->index_md_tensors] = index_k;
ppt->k_size[ppt->index_md_tensors] = index_k;
class_realloc(ppt->k[ppt->index_md_tensors],
ppt->k[ppt->index_md_tensors],
ppt->k_size[ppt->index_md_tensors]*sizeof(double),
ppt->error_message);
}
/* If user asked for k_output_values, add those to all k lists: */
if (ppt->k_output_values_num>0){
/* Allocate storage */
class_alloc(ppt->index_k_output_values,sizeof(double)*ppt->md_size*ppt->k_output_values_num,ppt->error_message);
/** Find indices in ppt->k[index_md] corresponding to 'k_output_values'.
We are assuming that ppt->k is sorted and growing, and we have made sure
that ppt->k_output_values is also sorted and growing.*/
for (index_mode=0; index_mode<ppt->md_size; index_mode++){
newk_size = ppt->k_size[index_mode]+ppt->k_output_values_num;
class_alloc(tmp_k_list,sizeof(double)*newk_size,ppt->error_message);
index_k=0;
index_k_output=0;
for (index_newk=0; index_newk<newk_size; index_newk++){
/** Decide if we should add k_output_value now. This has to be this complicated, since we
can only compare the k-values when both indices are in range.*/
if (index_k >= ppt->k_size[index_mode])
add_k_output_value = _TRUE_;
else if (index_k_output >= ppt->k_output_values_num)
add_k_output_value = _FALSE_;
else if (ppt->k_output_values[index_k_output] < ppt->k[index_mode][index_k])
add_k_output_value = _TRUE_;
else
add_k_output_value = _FALSE_;
if (add_k_output_value == _TRUE_){
tmp_k_list[index_newk] = ppt->k_output_values[index_k_output];
ppt->index_k_output_values[index_mode*ppt->k_output_values_num+index_k_output]=index_newk;
index_k_output++;
}
else{
tmp_k_list[index_newk] = ppt->k[index_mode][index_k];
index_k++;
}
}
free(ppt->k[index_mode]);
ppt->k[index_mode] = tmp_k_list;
ppt->k_size[index_mode] = newk_size;
index_k = newk_size-1;
while (ppt->k[index_mode][index_k] > k_max_cl[index_mode])
index_k--;
ppt->k_size_cl[index_mode] = MIN(index_k+2,ppt->k_size[index_mode]);
index_k = newk_size-1;
while (ppt->k[index_mode][index_k] > k_max_cmb[index_mode])
index_k--;
ppt->k_size_cmb[index_mode] = MIN(index_k+2,ppt->k_size[index_mode]);
/** The two MIN statements is here because in a normal run, the cl and cmb
arrays contain a single k value larger than their respective k_max.
We are mimicking this behaviour. */
}
}
/* For testing, can be useful to print the k list in a file:
FILE * out=fopen("output/k","w");
for (index_k=0; index_k < ppt->k_size[0]; index_k++) {
fprintf(out,"%e\n",ppt->k[0][index_k],pba->K);
}
fclose(out);
*/
/* finally, find the global k_min and k_max for the ensemble of all modes 9scalars, vectors, tensors) */
ppt->k_min = _HUGE_;
ppt->k_max = 0.;
if (ppt->has_scalars == _TRUE_) {
ppt->k_min = MIN(ppt->k_min,ppt->k[ppt->index_md_scalars][0]); /* first value, inferred from perturbations structure */
ppt->k_max = MAX(ppt->k_max,ppt->k[ppt->index_md_scalars][ppt->k_size[ppt->index_md_scalars]-1]); /* last value, inferred from perturbations structure */
}
if (ppt->has_vectors == _TRUE_) {
ppt->k_min = MIN(ppt->k_min,ppt->k[ppt->index_md_vectors][0]); /* first value, inferred from perturbations structure */
ppt->k_max = MAX(ppt->k_max,ppt->k[ppt->index_md_vectors][ppt->k_size[ppt->index_md_vectors]-1]); /* last value, inferred from perturbations structure */
}
if (ppt->has_tensors == _TRUE_) {
ppt->k_min = MIN(ppt->k_min,ppt->k[ppt->index_md_tensors][0]); /* first value, inferred from perturbations structure */
ppt->k_max = MAX(ppt->k_max,ppt->k[ppt->index_md_tensors][ppt->k_size[ppt->index_md_tensors]-1]); /* last value, inferred from perturbations structure */
}
free(k_max_cmb);
free(k_max_cl);
return _SUCCESS_;
}
/**
* Initialize a perturb_workspace structure. All fields are allocated
* here, with the exception of the perturb_vector '->pv' field, which
* is allocated separately in perturb_vector_init. We allocate one
* such perturb_workspace structure per thread and per mode
* (scalar/../tensor). Then, for each thread, all initial conditions
* and wavenumbers will use the same workspace.
*
* @param ppr Input: pointer to precision structure
* @param pba Input: pointer to background structure
* @param pth Input: pointer to the thermodynamics structure
* @param ppt Input: pointer to the perturbation structure
* @param index_md Input: index of mode under consideration (scalar/.../tensor)
* @param ppw Input/Output: pointer to perturb_workspace structure which fields are allocated or filled here
* @return the error status
*/
int perturb_workspace_init(
struct precision * ppr,
struct background * pba,
struct thermo * pth,
struct perturbs * ppt,
int index_md,
struct perturb_workspace * ppw
) {
/** Summary: */
/** - define local variables */
int index_mt=0;
int index_ap;
int l;
/** Compute maximum l_max for any multipole */;
if (_scalars_) {
ppw->max_l_max = MAX(ppr->l_max_g, ppr->l_max_pol_g);
if (pba->has_ur == _TRUE_) ppw->max_l_max = MAX(ppw->max_l_max, ppr->l_max_ur);
if (pba->has_ncdm == _TRUE_) ppw->max_l_max = MAX(ppw->max_l_max, ppr->l_max_ncdm);
if (pba->has_dr == _TRUE_) ppw->max_l_max = MAX(ppw->max_l_max, ppr->l_max_dr);
}
if (_tensors_) {
ppw->max_l_max = MAX(ppr->l_max_g_ten, ppr->l_max_pol_g_ten);
if (pba->has_ur == _TRUE_) ppw->max_l_max = MAX(ppw->max_l_max, ppr->l_max_ur);
if (pba->has_ncdm == _TRUE_) ppw->max_l_max = MAX(ppw->max_l_max, ppr->l_max_ncdm);
}
/** Allocate s_l[] array for freestreaming of multipoles (see arXiv:1305.3261) and initialise
to 1.0 which is the K=0 value. */
class_alloc(ppw->s_l, sizeof(double)*(ppw->max_l_max+1),ppt->error_message);
for (l=0; l<=ppw->max_l_max; l++){
ppw->s_l[l] = 1.0;
}
/** - define indices of metric perturbations obeying to constraint
equations (this can be done once and for all, because the
vector of metric perturbations is the same whatever the
approximation scheme, unlike the vector of quantities to
be integrated, which is allocated separately in
perturb_vector_init) */
if (_scalars_) {
/* newtonian gauge */
if (ppt->gauge == newtonian) {
class_define_index(ppw->index_mt_psi,_TRUE_,index_mt,1); /* psi */
class_define_index(ppw->index_mt_phi_prime,_TRUE_,index_mt,1); /* phi' */
}
/* synchronous gauge (note that eta is counted in the vector of
quantities to be integrated, while here we only consider
quantities obeying to constraint equations) */
if (ppt->gauge == synchronous) {
class_define_index(ppw->index_mt_h_prime,_TRUE_,index_mt,1); /* h' */
class_define_index(ppw->index_mt_h_prime_prime,_TRUE_,index_mt,1); /* h'' */
class_define_index(ppw->index_mt_eta_prime,_TRUE_,index_mt,1); /* eta' */
class_define_index(ppw->index_mt_alpha,_TRUE_,index_mt,1); /* alpha = (h' + 6 tau') / (2 k**2) */
class_define_index(ppw->index_mt_alpha_prime,_TRUE_,index_mt,1); /* alpha' */
class_define_index(ppw->index_mt_vx_smg,pba->has_smg,index_mt,1); /* vx_smg (can be dynamical or not) */
class_define_index(ppw->index_mt_vx_prime_smg,pba->has_smg,index_mt,1); /* vx_smg' (can be dynamical or not) */
class_define_index(ppw->index_mt_vx_prime_prime_smg,pba->has_smg,index_mt,1); /* vx_smg'' (passed to integrator) */
}
}
if (_vectors_) {
/* newtonian gauge */
if (ppt->gauge == newtonian) {
class_define_index(ppw->index_mt_V_prime,_TRUE_,index_mt,1);
}
if (ppt->gauge == synchronous) {
class_define_index(ppw->index_mt_hv_prime_prime,_TRUE_,index_mt,1);
}
}
if (_tensors_) {
class_define_index(ppw->index_mt_gw_prime_prime,_TRUE_,index_mt,1);
}
ppw->mt_size = index_mt;
/** - allocate some workspace in which we will store temporarily the
values of background, thermodynamics, metric and source
quantities at a given time */
class_alloc(ppw->pvecback,pba->bg_size_normal*sizeof(double),ppt->error_message);
class_alloc(ppw->pvecthermo,pth->th_size*sizeof(double),ppt->error_message);
class_alloc(ppw->pvecmetric,ppw->mt_size*sizeof(double),ppt->error_message);
/** - count number of approximation, initialize their indices, and allocate their flags */
index_ap=0;
class_define_index(ppw->index_ap_tca,_TRUE_,index_ap,1);
class_define_index(ppw->index_ap_rsa,_TRUE_,index_ap,1);
if (_scalars_) {
class_define_index(ppw->index_ap_ufa,pba->has_ur,index_ap,1);
class_define_index(ppw->index_ap_ncdmfa,pba->has_ncdm,index_ap,1);
}
ppw->ap_size=index_ap;
if (ppw->ap_size > 0)
class_alloc(ppw->approx,ppw->ap_size*sizeof(int),ppt->error_message);
/** - For definitness, initialize approximation flags to arbitrary
values (correct values are overwritten in
pertub_find_approximation_switches) */
if (_scalars_) {
ppw->approx[ppw->index_ap_tca]=(int)tca_on;
ppw->approx[ppw->index_ap_rsa]=(int)rsa_off;
if (pba->has_ur == _TRUE_) {
ppw->approx[ppw->index_ap_ufa]=(int)ufa_off;
}
if (pba->has_ncdm == _TRUE_) {
ppw->approx[ppw->index_ap_ncdmfa]=(int)ncdmfa_off;
}
}
if (_tensors_) {
ppw->approx[ppw->index_ap_tca]=(int)tca_on;
ppw->approx[ppw->index_ap_rsa]=(int)rsa_off;
}
/** - allocate fields where some of the perturbations are stored */
if (_scalars_) {
if ((ppt->has_density_transfers == _TRUE_) || (ppt->has_velocity_transfers == _TRUE_) || (ppt->has_source_delta_m == _TRUE_)) {
class_alloc(ppw->delta_ncdm,pba->N_ncdm*sizeof(double),ppt->error_message);
class_alloc(ppw->theta_ncdm,pba->N_ncdm*sizeof(double),ppt->error_message);
class_alloc(ppw->shear_ncdm,pba->N_ncdm*sizeof(double),ppt->error_message);
}
}
return _SUCCESS_;
}
/**
* Free the perturb_workspace structure (with the exception of the
* perturb_vector '->pv' field, which is freed separately in
* perturb_vector_free).
*
* @param ppt Input: pointer to the perturbation structure
* @param index_md Input: index of mode under consideration (scalar/.../tensor)
* @param ppw Input: pointer to perturb_workspace structure to be freed
* @return the error status
*/
int perturb_workspace_free (
struct perturbs * ppt,
int index_md,
struct perturb_workspace * ppw
) {
free(ppw->s_l);
free(ppw->pvecback);
free(ppw->pvecthermo);
free(ppw->pvecmetric);
if (ppw->ap_size > 0)
free(ppw->approx);
if (_scalars_) {
if ((ppt->has_density_transfers == _TRUE_) || (ppt->has_velocity_transfers == _TRUE_) || (ppt->has_source_delta_m == _TRUE_)) {
free(ppw->delta_ncdm);
free(ppw->theta_ncdm);
free(ppw->shear_ncdm);
}
}
free(ppw);
return _SUCCESS_;
}
/**
* Solve the perturbation evolution for a given mode, initial
* condition and wavenumber, and compute the corresponding source
* functions.
*
* For a given mode, initial condition and wavenumber, this function
* finds the time ranges over witch the perturbations can be described
* within a given approximation. For each such range, it initializes
* (or redistribute) perturbations using perturb_vector_init(), and
* integrates over time. Whenever a "source sampling time" is passed,
* the source terms are computed and stored in the source table using
* perturb_sources().
*
* @param ppr Input: pointer to precision structure
* @param pba Input: pointer to background structure
* @param pth Input: pointer to the thermodynamics structure
* @param ppt Input/Output: pointer to the perturbation structure (output source functions S(k,tau) written here)
* @param index_md Input: index of mode under consideration (scalar/.../tensor)
* @param index_ic Input: index of initial condition under consideration (ad, iso...)
* @param index_k Input: index of wavenumber
* @param ppw Input: pointer to perturb_workspace structure containing index values and workspaces
* @return the error status
*/
int perturb_solve(
struct precision * ppr,
struct background * pba,
struct thermo * pth,
struct perturbs * ppt,
int index_md,
int index_ic,
int index_k,
struct perturb_workspace * ppw
) {
/** Summary: */
/** - define local variables */
/* contains all fixed parameters, indices and workspaces used by the perturb_derivs function */
struct perturb_parameters_and_workspace ppaw;
/* conformal time */
double tau,tau_lower,tau_upper,tau_mid;
/* multipole */
int l;
/* index running over time */
int index_tau;
/* number of values in the tau_sampling array that should be considered for a given mode */
int tau_actual_size;
/* running index over types (temperature, etc) */
int index_type;
/* fourier mode */
double k;
/* number of time intervals where the approximation scheme is uniform */
int interval_number;
/* index running over such time intervals */
int index_interval;
/* number of time intervals where each particular approximation is uniform */
int * interval_number_of;
/* edge of intervals where approximation scheme is uniform: tau_ini, tau_switch_1, ..., tau_end */
double * interval_limit;
/* array of approximation scheme within each interval: interval_approx[index_interval][index_ap] */
int ** interval_approx;
/* index running over approximations */
int index_ap;
/* approximation scheme within previous interval: previous_approx[index_ap] */
int * previous_approx;
int n_ncdm,is_early_enough;
/* function pointer to ODE evolver and names of possible evolvers */
extern int evolver_rk();
extern int evolver_ndf15();
int (*generic_evolver)();
/* Related to the perturbation output */
int (*perhaps_print_variables)();
int index_ikout;
/** - initialize indices relevant for back/thermo tables search */
ppw->last_index_back=0;
ppw->last_index_thermo=0;
ppw->inter_mode = pba->inter_normal;
/** - get wavenumber value */
k = ppt->k[index_md][index_k];
class_test(k == 0.,
ppt->error_message,
"stop to avoid division by zero");
/** If non-zero curvature, update array of free-streaming coefficients ppw->s_l */
if (pba->has_curvature == _TRUE_){
for (l = 0; l<=ppw->max_l_max; l++){
ppw->s_l[l] = sqrt(MAX(1.0-pba->K*(l*l-1.0)/k/k,0.));
}
}
/** - maximum value of tau for which sources are calculated for this wavenumber */
/* by default, today */
tau_actual_size = ppt->tau_size;
/** - using bisection, compute minimum value of tau for which this
wavenumber is integrated */
/* will be at least the first time in the background table */
tau_lower = pba->tau_table[0];
class_call(background_at_tau(pba,
tau_lower,
pba->normal_info,
pba->inter_normal,
&(ppw->last_index_back),
ppw->pvecback),
pba->error_message,
ppt->error_message);
class_call(thermodynamics_at_z(pba,
pth,
1./ppw->pvecback[pba->index_bg_a]-1.,
pth->inter_normal,
&(ppw->last_index_thermo),
ppw->pvecback,
ppw->pvecthermo),
pth->error_message,
ppt->error_message);
/* check that this initial time is indeed OK given imposed
conditions on kappa' and on k/aH */
class_test(ppw->pvecback[pba->index_bg_a]*
ppw->pvecback[pba->index_bg_H]/
ppw->pvecthermo[pth->index_th_dkappa] >
ppr->start_small_k_at_tau_c_over_tau_h, ppt->error_message, "your choice of initial time for integrating wavenumbers is inappropriate: it corresponds to a time before that at which the background has been integrated. You should increase 'start_small_k_at_tau_c_over_tau_h' up to at least %g, or decrease 'a_ini_over_a_today_default'\n",
ppw->pvecback[pba->index_bg_a]*
ppw->pvecback[pba->index_bg_H]/
ppw->pvecthermo[pth->index_th_dkappa]);
class_test(k/ppw->pvecback[pba->index_bg_a]/ppw->pvecback[pba->index_bg_H] >
ppr->start_large_k_at_tau_h_over_tau_k,
ppt->error_message,
"your choice of initial time for integrating wavenumbers is inappropriate: it corresponds to a time before that at which the background has been integrated. You should increase 'start_large_k_at_tau_h_over_tau_k' up to at least %g, or decrease 'a_ini_over_a_today_default'\n",
ppt->k[index_md][ppt->k_size[index_md]-1]/ppw->pvecback[pba->index_bg_a]/ ppw->pvecback[pba->index_bg_H]);
if (pba->has_ncdm == _TRUE_) {
for (n_ncdm=0; n_ncdm < pba->N_ncdm; n_ncdm++) {
class_test(fabs(ppw->pvecback[pba->index_bg_p_ncdm1+n_ncdm]/ppw->pvecback[pba->index_bg_rho_ncdm1+n_ncdm]-1./3.)>ppr->tol_ncdm_initial_w,
ppt->error_message,
"your choice of initial time for integrating wavenumbers is inappropriate: it corresponds to a time at which the ncdm species number %d is not ultra-relativistic anymore, with w=%g, p=%g and rho=%g\n",
n_ncdm,
ppw->pvecback[pba->index_bg_p_ncdm1+n_ncdm]/ppw->pvecback[pba->index_bg_rho_ncdm1+n_ncdm],
ppw->pvecback[pba->index_bg_p_ncdm1+n_ncdm],
ppw->pvecback[pba->index_bg_rho_ncdm1+n_ncdm]);
}
}
/* is at most the time at which sources must be sampled */
tau_upper = ppt->tau_sampling[0];
/* start bisection */
tau_mid = 0.5*(tau_lower + tau_upper);
while ((tau_upper - tau_lower)/tau_lower > ppr->tol_tau_approx) {
is_early_enough = _TRUE_;
class_call(background_at_tau(pba,
tau_mid,
pba->normal_info,
pba->inter_normal,
&(ppw->last_index_back),
ppw->pvecback),
pba->error_message,
ppt->error_message);
/* if there are non-cold relics, check that they are relativistic enough */
if (pba->has_ncdm == _TRUE_) {
for (n_ncdm=0; n_ncdm < pba->N_ncdm; n_ncdm++) {
if (fabs(ppw->pvecback[pba->index_bg_p_ncdm1+n_ncdm]/ppw->pvecback[pba->index_bg_rho_ncdm1+n_ncdm]-1./3.) > ppr->tol_ncdm_initial_w)
is_early_enough = _FALSE_;
}
}
/* also check that the two conditions on (aH/kappa') and (aH/k) are fulfilled */
if (is_early_enough == _TRUE_) {
class_call(thermodynamics_at_z(pba,
pth,
1./ppw->pvecback[pba->index_bg_a]-1., /* redshift z=1/a-1 */
pth->inter_normal,
&(ppw->last_index_thermo),
ppw->pvecback,
ppw->pvecthermo),
pth->error_message,
ppt->error_message);
if ((ppw->pvecback[pba->index_bg_a]*
ppw->pvecback[pba->index_bg_H]/
ppw->pvecthermo[pth->index_th_dkappa] >
ppr->start_small_k_at_tau_c_over_tau_h) ||
(k/ppw->pvecback[pba->index_bg_a]/ppw->pvecback[pba->index_bg_H] >
ppr->start_large_k_at_tau_h_over_tau_k))
is_early_enough = _FALSE_;
}
if (is_early_enough == _TRUE_)
tau_lower = tau_mid;
else
tau_upper = tau_mid;
tau_mid = 0.5*(tau_lower + tau_upper);
}
tau = tau_mid;
/** - find the number of intervals over which approximation scheme is constant */
class_alloc(interval_number_of,ppw->ap_size*sizeof(int),ppt->error_message);
ppw->inter_mode = pba->inter_normal;
class_call(perturb_find_approximation_number(ppr,
pba,
pth,
ppt,
index_md,
k,
ppw,
tau,
ppt->tau_sampling[tau_actual_size-1],
&interval_number,
interval_number_of),
ppt->error_message,
ppt->error_message);
class_alloc(interval_limit,(interval_number+1)*sizeof(double),ppt->error_message);
class_alloc(interval_approx,interval_number*sizeof(int*),ppt->error_message);
for (index_interval=0; index_interval<interval_number; index_interval++)
class_alloc(interval_approx[index_interval],ppw->ap_size*sizeof(int),ppt->error_message);
class_call(perturb_find_approximation_switches(ppr,
pba,
pth,
ppt,
index_md,
k,
ppw,
tau,
ppt->tau_sampling[tau_actual_size-1],
ppr->tol_tau_approx,
interval_number,
interval_number_of,
interval_limit,
interval_approx),
ppt->error_message,
ppt->error_message);
free(interval_number_of);
/** - fill the structure containing all fixed parameters, indices
and workspaces needed by perturb_derivs */
ppaw.ppr = ppr;
ppaw.pba = pba;
ppaw.pth = pth;
ppaw.ppt = ppt;
ppaw.index_md = index_md;
ppaw.index_ic = index_ic;
ppaw.index_k = index_k;
ppaw.k = k;
ppaw.ppw = ppw;
ppaw.ppw->inter_mode = pba->inter_closeby;
ppaw.ppw->last_index_back = 0;
ppaw.ppw->last_index_thermo = 0;
/** - check whether we need to print perturbations to a file for this wavenumber */
perhaps_print_variables = NULL;
ppw->index_ikout = -1;
for (index_ikout=0; index_ikout<ppt->k_output_values_num; index_ikout++){
if (ppt->index_k_output_values[index_md*ppt->k_output_values_num+index_ikout] == index_k){
ppw->index_ikout = index_ikout;
perhaps_print_variables = perturb_print_variables;
/**class_call(perturb_prepare_output_file(
pba,ppt,ppw,index_ikout,index_md),
ppt->error_message,
ppt->error_message);
*/
}
}
/** - loop over intervals over which approximatiomn scheme is uniform. For each interval: */
for (index_interval=0; index_interval<interval_number; index_interval++) {
/** (a) fix the approximation scheme */
for (index_ap=0; index_ap<ppw->ap_size; index_ap++)
ppw->approx[index_ap]=interval_approx[index_interval][index_ap];
/** (b) get the previous approximation scheme. If the current
interval starts from the initial time tau_ini, the previous
approximation is set to be a NULL pointer, so that the
function perturb_vector_init() knows that perturbations must
be initialized */
if (index_interval==0) {
previous_approx=NULL;
}
else {
previous_approx=interval_approx[index_interval-1];
}
/** (c) define the vector of perturbations to be integrated
over. If the current interval starts from the initial time
tau_ini, fill the vector with initial conditions for each
mode. If it starts from an approximation switching point,
redistribute correctly the perturbations from the previous to
the new vector of perturbations. */
class_call(perturb_vector_init(ppr,
pba,
pth,
ppt,
index_md,
index_ic,
k,
interval_limit[index_interval],
ppw,
previous_approx),
ppt->error_message,
ppt->error_message);
/** (d) integrate the perturbations over the current interval. */
if(ppr->evolver == rk){
generic_evolver = evolver_rk;
}
else{
generic_evolver = evolver_ndf15;
}
class_call(generic_evolver(perturb_derivs,
interval_limit[index_interval],
interval_limit[index_interval+1],
ppw->pv->y,
ppw->pv->used_in_sources,
ppw->pv->pt_size,
&ppaw,
ppr->tol_perturb_integration,
ppr->smallest_allowed_variation,
perturb_timescale,
ppr->perturb_integration_stepsize,
ppt->tau_sampling,
tau_actual_size,
perturb_sources,
perhaps_print_variables,
ppt->error_message),
ppt->error_message,
ppt->error_message);
}
/** - if perturbations were printed in a file, close the file */
//if (perhaps_print_variables != NULL)
// fclose(ppw->perturb_output_file);
/** fill the source terms array with zeros for all times between
then last integrated time tau_max and tau_today. */
for (index_tau = tau_actual_size; index_tau < ppt->tau_size; index_tau++) {
for (index_type = 0; index_type < ppt->tp_size[index_md]; index_type++) {
ppt->sources[index_md]
[index_ic * ppt->tp_size[index_md] + index_type]
[index_tau * ppt->k_size[index_md] + index_k] = 0.;
}
}
/** - free quantitites allocated at the beginning of the routine */
class_call(perturb_vector_free(ppw->pv),
ppt->error_message,
ppt->error_message);
for (index_interval=0; index_interval<interval_number; index_interval++)
free(interval_approx[index_interval]);
free(interval_approx);
free(interval_limit);
return _SUCCESS_;
}
int perturb_prepare_output(struct background * pba,
struct perturbs * ppt){
int n_ncdm;
char tmp[40];
ppt->scalar_titles[0]='\0';
ppt->vector_titles[0]='\0';
ppt->tensor_titles[0]='\0';
if (ppt->k_output_values_num > 0) {
/** Write titles for all perturbations that we would like to print/store. */
if (ppt->has_scalars == _TRUE_){
class_store_columntitle(ppt->scalar_titles,"tau [Mpc]",_TRUE_);
class_store_columntitle(ppt->scalar_titles,"a",_TRUE_);
class_store_columntitle(ppt->scalar_titles,"delta_g",_TRUE_);
class_store_columntitle(ppt->scalar_titles,"theta_g",_TRUE_);
class_store_columntitle(ppt->scalar_titles,"shear_g",_TRUE_);
class_store_columntitle(ppt->scalar_titles,"pol0_g",_TRUE_);
class_store_columntitle(ppt->scalar_titles,"pol1_g",_TRUE_);
class_store_columntitle(ppt->scalar_titles,"pol2_g",_TRUE_);
class_store_columntitle(ppt->scalar_titles,"delta_b",_TRUE_);
class_store_columntitle(ppt->scalar_titles,"theta_b",_TRUE_);
class_store_columntitle(ppt->scalar_titles,"psi",_TRUE_);
class_store_columntitle(ppt->scalar_titles,"phi",_TRUE_);
/* Perturbed recombination */
class_store_columntitle(ppt->scalar_titles,"delta_Tb",ppt->has_perturbed_recombination);
class_store_columntitle(ppt->scalar_titles,"delta_chi",ppt->has_perturbed_recombination);
/* Ultrarelativistic species */
class_store_columntitle(ppt->scalar_titles,"delta_ur",pba->has_ur);
class_store_columntitle(ppt->scalar_titles,"theta_ur",pba->has_ur);
class_store_columntitle(ppt->scalar_titles,"shear_ur",pba->has_ur);
/* Cold dark matter */
class_store_columntitle(ppt->scalar_titles,"delta_cdm",pba->has_cdm);
class_store_columntitle(ppt->scalar_titles,"theta_cdm",pba->has_cdm);
/* Non-cold dark matter */
if ((pba->has_ncdm == _TRUE_) && ((ppt->has_density_transfers == _TRUE_) || (ppt->has_velocity_transfers == _TRUE_) || (ppt->has_source_delta_m == _TRUE_))) {
for(n_ncdm=0; n_ncdm < pba->N_ncdm; n_ncdm++){
sprintf(tmp,"delta_ncdm[%d]",n_ncdm);
class_store_columntitle(ppt->scalar_titles,tmp,_TRUE_);
sprintf(tmp,"theta_ncdm[%d]",n_ncdm);
class_store_columntitle(ppt->scalar_titles,tmp,_TRUE_);
sprintf(tmp,"shear_ncdm[%d]",n_ncdm);
class_store_columntitle(ppt->scalar_titles,tmp,_TRUE_);
}
}
/* Decaying cold dark matter */
class_store_columntitle(ppt->scalar_titles, "delta_dcdm", pba->has_dcdm);
class_store_columntitle(ppt->scalar_titles, "theta_dcdm", pba->has_dcdm);
/* Decay radiation */
class_store_columntitle(ppt->scalar_titles, "delta_dr", pba->has_dr);
class_store_columntitle(ppt->scalar_titles, "theta_dr", pba->has_dr);
class_store_columntitle(ppt->scalar_titles, "shear_dr", pba->has_dr);
/* Scalar field scf */
class_store_columntitle(ppt->scalar_titles, "delta_scf", pba->has_scf);
class_store_columntitle(ppt->scalar_titles, "theta_scf", pba->has_scf);
/* Scalar field smg */
class_store_columntitle(ppt->scalar_titles, "V_x_smg", pba->has_smg);
class_store_columntitle(ppt->scalar_titles, "V_x_prime_smg", pba->has_smg);
ppt->number_of_scalar_titles =
get_number_of_titles(ppt->scalar_titles);
}
if (ppt->has_tensors == _TRUE_){
class_store_columntitle(ppt->tensor_titles,"tau [Mpc]",_TRUE_);
class_store_columntitle(ppt->tensor_titles,"a",_TRUE_);
class_store_columntitle(ppt->tensor_titles,"delta_g",_TRUE_);
class_store_columntitle(ppt->tensor_titles,"shear_g",_TRUE_);
class_store_columntitle(ppt->tensor_titles,"l4_g",_TRUE_);
class_store_columntitle(ppt->tensor_titles,"pol0_g",_TRUE_);
class_store_columntitle(ppt->tensor_titles,"pol2_g",_TRUE_);
class_store_columntitle(ppt->tensor_titles,"pol4_g",_TRUE_);
class_store_columntitle(ppt->tensor_titles,"H (gw)",_TRUE_);
class_store_columntitle(ppt->tensor_titles,"Hdot (gwdot)",_TRUE_);
class_store_columntitle(ppt->tensor_titles,"delta_ur",ppt->evolve_tensor_ur);
class_store_columntitle(ppt->tensor_titles,"shear_ur",ppt->evolve_tensor_ur);
class_store_columntitle(ppt->tensor_titles,"l4_ur",ppt->evolve_tensor_ur);
if (ppt->evolve_tensor_ncdm == _TRUE_) {
for(n_ncdm=0; n_ncdm < pba->N_ncdm; n_ncdm++){
sprintf(tmp,"delta_ncdm[%d]",n_ncdm);
class_store_columntitle(ppt->tensor_titles,tmp,_TRUE_);
sprintf(tmp,"theta_ncdm[%d]",n_ncdm);
class_store_columntitle(ppt->tensor_titles,tmp,_TRUE_);
sprintf(tmp,"shear_ncdm[%d]",n_ncdm);
class_store_columntitle(ppt->tensor_titles,tmp,_TRUE_);
}
}
ppt->number_of_tensor_titles =
get_number_of_titles(ppt->tensor_titles);
}
}
return _SUCCESS_;
}
/**
* For a given mode and wavenumber, find the number of interval of
* times bewteen tau_ini and tau_end such that the approximation
* scheme (and the number of perturbation equations) is uniform.
*
* @param ppr Input: pointer to precision structure
* @param pba Input: pointer to background structure
* @param pth Input: pointer to the thermodynamics structure
* @param ppt Input: pointer to the perturbation structure
* @param index_md Input: index of mode under consideration (scalar/.../tensor)
* @param index_k Input: index of wavenumber
* @param ppw Input: pointer to perturb_workspace structure containing index values and workspaces
* @param tau_ini Input: initial time of the perturbation integration
* @param tau_end Input: final time of the perturbation integration
* @param interval_number Output: total number of intervals
* @param interval_number_of Output: number of intervals with respect to each particular approximation
* @return the error status
*/
int perturb_find_approximation_number(
struct precision * ppr,
struct background * pba,
struct thermo * pth,
struct perturbs * ppt,
int index_md,
double k,
struct perturb_workspace * ppw,
double tau_ini,
double tau_end,
int * interval_number,
int * interval_number_of /* interval_number_of[index_ap] (already allocated) */
){
/* index running over approximations */
int index_ap;
/* value of a given approximation at tau_ini and tau_end */
int flag_ini,flag_end;
/** - fix default number of intervals to one (if no approximation switch) */
*interval_number=1;
/** - loop over each approximation and add the number of approximation switching times */
for (index_ap=0; index_ap<ppw->ap_size; index_ap++) {
class_call(perturb_approximations(ppr,
pba,
pth,
ppt,
index_md,
k,
tau_ini,
ppw),
ppt->error_message,
ppt->error_message);
flag_ini = ppw->approx[index_ap];
class_call(perturb_approximations(ppr,
pba,
pth,
ppt,
index_md,
k,
tau_end,
ppw),
ppt->error_message,
ppt->error_message);
flag_end = ppw->approx[index_ap];
class_test(flag_end<flag_ini,
ppt->error_message,
"For each approximation scheme, the declaration of approximation labels in the enumeration must follow chronological order, e.g: enum approx_flags {flag1, flag2, flag3} with flag1 being the initial one and flag3 the final one");
*interval_number += flag_end-flag_ini;
interval_number_of[index_ap] = flag_end-flag_ini+1;
}
return _SUCCESS_;
}
/**
* For a given mode and wavenumber, find the values of time at which
* the approximation changes.
*
* @param ppr Input: pointer to precision structure
* @param pba Input: pointer to background structure
* @param pth Input: pointer to the thermodynamics structure
* @param ppt Input: pointer to the perturbation structure
* @param index_md Input: index of mode under consideration (scalar/.../tensor)
* @param index_k Input: index of wavenumber
* @param ppw Input: pointer to perturb_workspace structure containing index values and workspaces
* @param tau_ini Input: initial time of the perturbation integration
* @param tau_end Input: final time of the perturbation integration
* @param interval_number Input: total number of intervals
* @param interval_number_of Input: number of intervals with respect to each particular approximation
* @param interval_limit Output: value of time at the boundary of the intervals: tau_ini, tau_switch1, ..., tau_end
* @param interval_approx Output: value of approximations in each interval
* @return the error status
*/
int perturb_find_approximation_switches(
struct precision * ppr,
struct background * pba,
struct thermo * pth,
struct perturbs * ppt,
int index_md,
double k,
struct perturb_workspace * ppw,
double tau_ini,
double tau_end,
double precision,
int interval_number,
int * interval_number_of,
double * interval_limit, /* interval_limit[index_interval] (already allocated) */
int ** interval_approx /* interval_approx[index_interval][index_ap] (already allocated) */
){
int index_ap;
int index_switch;
int index_switch_tot;
int num_switch;
double tau_min,lower_bound,upper_bound;
double mid=0;
double * unsorted_tau_switch;
double next_tau_switch;
int flag_ini;
int num_switching_at_given_time;
/** - write in output arrays the initial time and approximation */
interval_limit[0]=tau_ini;
class_call(perturb_approximations(ppr,
pba,
pth,
ppt,
index_md,
k,
tau_ini,
ppw),
ppt->error_message,
ppt->error_message);
for (index_ap=0; index_ap<ppw->ap_size; index_ap++)
interval_approx[0][index_ap]=ppw->approx[index_ap];
/** - if there are no approximation switches, just write final time and return */
if (interval_number == 1) {
interval_limit[1]=tau_end;
}
/** - if there are switches, consider approximations one after each
other. Find switching time by bisection. Store all switches in
arbitrary order in array unsorted_tau_switch[] */
else {
class_alloc(unsorted_tau_switch,(interval_number-1)*sizeof(double),ppt->error_message);
index_switch_tot=0;
for (index_ap=0; index_ap<ppw->ap_size; index_ap++) {
if (interval_number_of[index_ap] > 1) {
num_switch = interval_number_of[index_ap]-1;
tau_min = tau_ini;
flag_ini = interval_approx[0][index_ap];
for (index_switch=0; index_switch<num_switch; index_switch++) {
lower_bound=tau_min;
upper_bound=tau_end;
mid = 0.5*(lower_bound+upper_bound);
while (upper_bound - lower_bound > precision) {
class_call(perturb_approximations(ppr,
pba,
pth,
ppt,
index_md,
k,
mid,
ppw),
ppt->error_message,
ppt->error_message);
if (ppw->approx[index_ap] > flag_ini+index_switch) {
upper_bound=mid;
}
else {
lower_bound=mid;
}
mid = 0.5*(lower_bound+upper_bound);
}
unsorted_tau_switch[index_switch_tot]=mid;
index_switch_tot++;
tau_min=mid;
}
}
}
class_test(index_switch_tot != (interval_number-1),
ppt->error_message,
"bug in approximation switch search routine: should have %d = %d",
index_switch_tot,interval_number-1);
/** - now sort interval limits in correct order */
index_switch_tot=1;
while (index_switch_tot < interval_number) {
next_tau_switch=tau_end;
for (index_switch=0; index_switch<interval_number-1; index_switch++) {
if ((unsorted_tau_switch[index_switch] > interval_limit[index_switch_tot-1]) &&
(unsorted_tau_switch[index_switch] < next_tau_switch)) {
next_tau_switch=unsorted_tau_switch[index_switch];
}
}
interval_limit[index_switch_tot]=next_tau_switch;
index_switch_tot++;
}
interval_limit[index_switch_tot]=tau_end;
class_test(index_switch_tot != interval_number,
ppt->error_message,
"most probably two approximation switching time were found to be equal, which cannot be handled\n");
/** - store each approximation in chronological order */
for (index_switch=1; index_switch<interval_number; index_switch++) {
class_call(perturb_approximations(ppr,
pba,
pth,
ppt,
index_md,
k,
0.5*(interval_limit[index_switch]+interval_limit[index_switch+1]),
ppw),
ppt->error_message,
ppt->error_message);
for (index_ap=0; index_ap<ppw->ap_size; index_ap++) {
interval_approx[index_switch][index_ap]=ppw->approx[index_ap];
/* check here that approximation does not go backward (remember
that by definition the value of an approximation can only
increase) */
class_test(interval_approx[index_switch][index_ap] < interval_approx[index_switch-1][index_ap],
ppt->error_message,
"The approximation with label %d is not defined correctly: it goes backward (from %d to %d) for k=%e and between tau=%e and %e; this cannot be handled\n",
index_ap,
interval_approx[index_switch-1][index_ap],
interval_approx[index_switch][index_ap],
k,
0.5*(interval_limit[index_switch-1]+interval_limit[index_switch]),
0.5*(interval_limit[index_switch]+interval_limit[index_switch+1])
);
}
/* check here that more than one approximation is not switched on at a given time */
num_switching_at_given_time=0;
for (index_ap=0; index_ap<ppw->ap_size; index_ap++) {
if (interval_approx[index_switch][index_ap] != interval_approx[index_switch-1][index_ap])
num_switching_at_given_time++;
}
class_test(num_switching_at_given_time != 1,
ppt->error_message,
"for k=%e, at tau=%g, you switch %d approximations at the same time, this cannot be handled. Usually happens in two cases: triggers for different approximations coincide, or one approx is reversible\n",
k,
interval_limit[index_switch],
num_switching_at_given_time);
if (ppt->perturbations_verbose>2) {
if (_scalars_) {
if ((interval_approx[index_switch-1][ppw->index_ap_tca]==(int)tca_on) &&
(interval_approx[index_switch][ppw->index_ap_tca]==(int)tca_off))
fprintf(stdout,"Mode k=%e: will switch off tight-coupling approximation at tau=%e\n",k,interval_limit[index_switch]);
//fprintf(stderr,"Mode k=%e: will switch off tight-coupling approximation at tau=%e\n",k,interval_limit[index_switch]); //TBC
if ((interval_approx[index_switch-1][ppw->index_ap_rsa]==(int)rsa_off) &&
(interval_approx[index_switch][ppw->index_ap_rsa]==(int)rsa_on))
fprintf(stdout,"Mode k=%e: will switch on radiation streaming approximation at tau=%e\n",k,interval_limit[index_switch]);
if (pba->has_ur == _TRUE_) {
if ((interval_approx[index_switch-1][ppw->index_ap_ufa]==(int)ufa_off) &&
(interval_approx[index_switch][ppw->index_ap_ufa]==(int)ufa_on)) {
fprintf(stdout,"Mode k=%e: will switch on ur fluid approximation at tau=%e\n",k,interval_limit[index_switch]);
}
}
if (pba->has_ncdm == _TRUE_) {
if ((interval_approx[index_switch-1][ppw->index_ap_ncdmfa]==(int)ncdmfa_off) &&
(interval_approx[index_switch][ppw->index_ap_ncdmfa]==(int)ncdmfa_on)) {
fprintf(stdout,"Mode k=%e: will switch on ncdm fluid approximation at tau=%e\n",k,interval_limit[index_switch]);
}
}
}
if (_tensors_) {
if ((interval_approx[index_switch-1][ppw->index_ap_tca]==(int)tca_on) &&
(interval_approx[index_switch][ppw->index_ap_tca]==(int)tca_off))
fprintf(stdout,"Mode k=%e: will switch off tight-coupling approximation for tensors at tau=%e\n",k,interval_limit[index_switch]);
if ((interval_approx[index_switch-1][ppw->index_ap_rsa]==(int)rsa_off) &&
(interval_approx[index_switch][ppw->index_ap_rsa]==(int)rsa_on))
fprintf(stdout,"Mode k=%e: will switch on radiation streaming approximation for tensors at tau=%e\n",k,interval_limit[index_switch]);
}
}
}
free(unsorted_tau_switch);
class_call(perturb_approximations(ppr,
pba,
pth,
ppt,
index_md,
k,
tau_end,
ppw),
ppt->error_message,
ppt->error_message);
}
return _SUCCESS_;
}
/**
* Initialize the field '->pv' of a perturb_workspace structure, which
* is a perturb_vector structure. This structure contains indices and
* values of all quantitites which need to be integrated with respect
* to time (and only them: quantitites fixed analytically or obeying a
* constraint equations are NOT included in this vector). This routine
* distinguishes between two cases:
*
* -> the input pa_old is set to the NULL pointer:
*
* This happens when we start integrating over a new wavenumber and we
* want to set initial conditions for the perturbations. Then, it is
* assumed that ppw->pv is not yet alloacted. This routine allocates
* it, defines all indices, and then fill the vector ppw->pv->y with
* the initial conditions defined in perturb_initial_conditions.
*
* -> the input pa_old is not set to the NULL pointer and describes
* some set of approximations:
*
* This happens when we need to change of approximation scheme while
* integrating over a given wavenumber. The new approximation
* described by ppw->pa is then different from pa_old. Then, this
* routine allocates a new vector with a new size and new index
* values; it fills this vector with initial condtions taken from the
* previous vector passed as an input in ppw->pv, and eventually with
* some analytic approximations for the new variables appearing at
* this time; then the new vector comes in replacement of the old one,
* which is freed.
*
* @param ppr Input: pointer to precision structure
* @param pba Input: pointer to background structure
* @param pth Input: pointer to the thermodynamics structure
* @param ppt Input: pointer to the perturbation structure
* @param index_md Input: index of mode under consideration (scalar/.../tensor)
* @param index_ic Input: index of initial condition under consideration (ad, iso...)
* @param k Input: wavenumber
* @param tau Input: conformal time
* @param ppw Input/Output: workspace containing input the approximation scheme, the background/thermodynamics/metric quantitites, and eventually the previous vector y; and in output the new vector y.
* @param pa_old Input: NULL is we need to set y to initial conditions for a new wavnumber; points towards a perturb_approximations if we want to switch of approximation.
* @return the error status
*/
int perturb_vector_init(
struct precision * ppr,
struct background * pba,
struct thermo * pth,
struct perturbs * ppt,
int index_md,
int index_ic,
double k,
double tau,
struct perturb_workspace * ppw, /* ppw->pv unallocated if pa_old = NULL, allocated and filled otherwise */
int * pa_old
) {
/** Summary: */
/** - define local variables */
struct perturb_vector * ppv;
int index_pt;
int l;
int n_ncdm,index_q,ncdm_l_size;
double rho_plus_p_ncdm,q,q2,epsilon,a,factor;
/** - allocate a new perturb_vector structure to which ppw->pv will point at the end of the routine */
class_alloc(ppv,sizeof(struct perturb_vector),ppt->error_message);
/** - initialize pointers to NULL (they will be allocated later if
needed), relevant for perturb_vector_free() */
ppv->l_max_ncdm = NULL;
ppv->q_size_ncdm = NULL;
/** - defines all indices in this new vector (depends on approximation scheme, described by the input structure ppw->pa) */
index_pt = 0;
if (_scalars_) {
/* reject inconsistent values of the number of mutipoles in photon temperature hierachy */
class_test(ppr->l_max_g < 4,
ppt->error_message,
"ppr->l_max_g should be at least 4, i.e. we must integrate at least over photon density, velocity, shear, third and fourth momentum");
/* reject inconsistent values of the number of mutipoles in photon polarization hierachy */
class_test(ppr->l_max_pol_g < 4,
ppt->error_message,
"ppr->l_max_pol_g should be at least 4");
/* reject inconsistent values of the number of mutipoles in decay radiation hierachy */
if (pba->has_dr == _TRUE_) {
class_test(ppr->l_max_dr < 4,
ppt->error_message,
"ppr->l_max_dr should be at least 4, i.e. we must integrate at least over neutrino/relic density, velocity, shear, third and fourth momentum");
}
/* reject inconsistent values of the number of mutipoles in ultra relativistic neutrino hierachy */
if (pba->has_ur == _TRUE_) {
class_test(ppr->l_max_ur < 4,
ppt->error_message,
"ppr->l_max_ur should be at least 4, i.e. we must integrate at least over neutrino/relic density, velocity, shear, third and fourth momentum");
}
/* photons */
if (ppw->approx[ppw->index_ap_rsa] == (int)rsa_off) { /* if radiation streaming approximation is off */
/* temperature */
ppv->l_max_g = ppr->l_max_g;
class_define_index(ppv->index_pt_delta_g,_TRUE_,index_pt,1); /* photon density */
class_define_index(ppv->index_pt_theta_g,_TRUE_,index_pt,1); /* photon velocity */
if (ppw->approx[ppw->index_ap_tca] == (int)tca_off) {
class_define_index(ppv->index_pt_shear_g,_TRUE_,index_pt,1); /* photon shear */
class_define_index(ppv->index_pt_l3_g,_TRUE_,index_pt,ppv->l_max_g-2); /* higher momenta */
/* polarization */
ppv->l_max_pol_g = ppr->l_max_pol_g;
class_define_index(ppv->index_pt_pol0_g,_TRUE_,index_pt,1);
class_define_index(ppv->index_pt_pol1_g,_TRUE_,index_pt,1);
class_define_index(ppv->index_pt_pol2_g,_TRUE_,index_pt,1);
class_define_index(ppv->index_pt_pol3_g,_TRUE_,index_pt,ppv->l_max_pol_g-2);
}
}
/* baryons */
class_define_index(ppv->index_pt_delta_b,_TRUE_,index_pt,1); /* baryon density */
class_define_index(ppv->index_pt_theta_b,_TRUE_,index_pt,1); /* baryon velocity */
/* cdm */
class_define_index(ppv->index_pt_delta_cdm,pba->has_cdm,index_pt,1); /* cdm density */
class_define_index(ppv->index_pt_theta_cdm,pba->has_cdm && (ppt->gauge == newtonian),index_pt,1); /* cdm velocity */
/* dcdm */
class_define_index(ppv->index_pt_delta_dcdm,pba->has_dcdm,index_pt,1); /* dcdm density */
class_define_index(ppv->index_pt_theta_dcdm,pba->has_dcdm,index_pt,1); /* dcdm velocity */
/* ultra relativistic decay radiation */
if (pba->has_dr==_TRUE_){
ppv->l_max_dr = ppr->l_max_dr;
class_define_index(ppv->index_pt_F0_dr,_TRUE_,index_pt,ppv->l_max_dr+1); /* all momenta in Boltzmann hierarchy */
}
/* fluid */
class_define_index(ppv->index_pt_delta_fld,pba->has_fld,index_pt,1); /* fluid density */
class_define_index(ppv->index_pt_theta_fld,pba->has_fld,index_pt,1); /* fluid velocity */
/* scalar field */
class_define_index(ppv->index_pt_phi_scf,pba->has_scf,index_pt,1); /* scalar field density */
class_define_index(ppv->index_pt_phi_prime_scf,pba->has_scf,index_pt,1); /* scalar field velocity */
/* scalar field: integration indices
*/
class_define_index(ppv->index_pt_vx_smg,pba->has_smg,index_pt,1); /* dynamical scalar field perturbation */
class_define_index(ppv->index_pt_vx_prime_smg,pba->has_smg,index_pt,1); /* dynamical scalar field velocity */
/* perturbed recombination: the indices are defined once tca is off. */
if ( (ppt->has_perturbed_recombination == _TRUE_) && (ppw->approx[ppw->index_ap_tca] == (int)tca_off) ){
class_define_index(ppv->index_pt_perturbed_recombination_delta_temp,_TRUE_,index_pt,1);
class_define_index(ppv->index_pt_perturbed_recombination_delta_chi,_TRUE_,index_pt,1);
}
/* ultra relativistic neutrinos */
if (pba->has_ur && (ppw->approx[ppw->index_ap_rsa] == (int)rsa_off)) {
class_define_index(ppv->index_pt_delta_ur,_TRUE_,index_pt,1); /* density of ultra-relativistic neutrinos/relics */
class_define_index(ppv->index_pt_theta_ur,_TRUE_,index_pt,1); /* velocity of ultra-relativistic neutrinos/relics */
class_define_index(ppv->index_pt_shear_ur,_TRUE_,index_pt,1); /* shear of ultra-relativistic neutrinos/relics */
if (ppw->approx[ppw->index_ap_ufa] == (int)ufa_off) {
ppv->l_max_ur = ppr->l_max_ur;
class_define_index(ppv->index_pt_l3_ur,_TRUE_,index_pt,ppv->l_max_ur-2); /* additional momenta in Boltzmann hierarchy (beyond l=0,1,2,3) */
}
}
/* non-cold dark matter */
if (pba->has_ncdm == _TRUE_) {
ppv->index_pt_psi0_ncdm1 = index_pt; /* density of ultra-relativistic neutrinos/relics */
ppv->N_ncdm = pba->N_ncdm;
class_alloc(ppv->l_max_ncdm,ppv->N_ncdm*sizeof(double),ppt->error_message);
class_alloc(ppv->q_size_ncdm,ppv->N_ncdm*sizeof(double),ppt->error_message);
for(n_ncdm = 0; n_ncdm < pba->N_ncdm; n_ncdm++){
// Set value of ppv->l_max_ncdm:
if(ppw->approx[ppw->index_ap_ncdmfa] == (int)ncdmfa_off){
/* reject inconsistent values of the number of mutipoles in ultra relativistic neutrino hierachy */
class_test(ppr->l_max_ncdm < 4,
ppt->error_message,
"ppr->l_max_ncdm=%d should be at least 4, i.e. we must integrate at least over first four momenta of non-cold dark matter perturbed phase-space distribution",n_ncdm);
//Copy value from precision parameter:
ppv->l_max_ncdm[n_ncdm] = ppr->l_max_ncdm;
ppv->q_size_ncdm[n_ncdm] = pba->q_size_ncdm[n_ncdm];
}
else{
// In the fluid approximaation, hierarcy is cut at lmax = 2 and q dependance is integrated out:
ppv->l_max_ncdm[n_ncdm] = 2;
ppv->q_size_ncdm[n_ncdm] = 1;
}
index_pt += (ppv->l_max_ncdm[n_ncdm]+1)*ppv->q_size_ncdm[n_ncdm];
}
}
/* metric (only quantitites to be integrated, not those obeying constraint equations) */
/* metric perturbation eta of synchronous gauge */
class_define_index(ppv->index_pt_eta,ppt->gauge == synchronous,index_pt,1);
/* metric perturbation phi of newtonian gauge ( we could fix it
using Einstein equations as a constraint equation for phi, but
integration is numerically more stable if we actually evolve
phi) */
class_define_index(ppv->index_pt_phi,ppt->gauge == newtonian,index_pt,1);
}
if (_vectors_) {
/* Vector baryon velocity: v_b^{(1)}. */
class_define_index(ppv->index_pt_theta_b,_TRUE_,index_pt,1);
/* eventually reject inconsistent values of the number of mutipoles in photon temperature hierachy and polarisation*/
if (ppw->approx[ppw->index_ap_rsa] == (int)rsa_off) { /* if radiation streaming approximation is off */
if (ppw->approx[ppw->index_ap_tca] == (int)tca_off) { /* if tight-coupling approximation is off */
ppv->l_max_g = ppr->l_max_g_ten;
class_define_index(ppv->index_pt_delta_g,_TRUE_,index_pt,1); /* photon density */
class_define_index(ppv->index_pt_theta_g,_TRUE_,index_pt,1); /* photon velocity */
class_define_index(ppv->index_pt_shear_g,_TRUE_,index_pt,1); /* photon shear */
class_define_index(ppv->index_pt_l3_g,_TRUE_,index_pt,ppv->l_max_g-2); /* photon l=3 */
ppv->l_max_pol_g = ppr->l_max_pol_g_ten;
class_define_index(ppv->index_pt_pol0_g,_TRUE_,index_pt,1); /* photon polarization, l=0 */
class_define_index(ppv->index_pt_pol1_g,_TRUE_,index_pt,1); /* photon polarization, l=1 */
class_define_index(ppv->index_pt_pol2_g,_TRUE_,index_pt,1); /* photon polarization, l=2 */
class_define_index(ppv->index_pt_pol3_g,_TRUE_,index_pt,ppv->l_max_pol_g-2); /* photon polarization, l=3 */
}
}
/** (b) metric perturbations V or h_v depending on gauge */
if (ppt->gauge == synchronous){
class_define_index(ppv->index_pt_hv_prime,_TRUE_,index_pt,1);
}
if (ppt->gauge == newtonian){
class_define_index(ppv->index_pt_V,_TRUE_,index_pt,1);
}
}
if (_tensors_) {
/* reject inconsistent values of the number of mutipoles in photon temperature hierachy */
class_test(ppr->l_max_g_ten < 4,
ppt->error_message,
"ppr->l_max_g_ten should be at least 4, i.e. we must integrate at least over photon density, velocity, shear, third momentum");
/* reject inconsistent values of the number of mutipoles in photon polarization hierachy */
class_test(ppr->l_max_pol_g_ten < 4,
ppt->error_message,
"ppr->l_max_pol_g_ten should be at least 4");
if (ppw->approx[ppw->index_ap_rsa] == (int)rsa_off) { /* if radiation streaming approximation is off */
if (ppw->approx[ppw->index_ap_tca] == (int)tca_off) { /* if tight-coupling approximation is off */
ppv->l_max_g = ppr->l_max_g_ten;
class_define_index(ppv->index_pt_delta_g,_TRUE_,index_pt,1); /* photon density */
class_define_index(ppv->index_pt_theta_g,_TRUE_,index_pt,1); /* photon velocity */
class_define_index(ppv->index_pt_shear_g,_TRUE_,index_pt,1); /* photon shear */
class_define_index(ppv->index_pt_l3_g,_TRUE_,index_pt,ppv->l_max_g-2); /* photon l=3 */
ppv->l_max_pol_g = ppr->l_max_pol_g_ten;
class_define_index(ppv->index_pt_pol0_g,_TRUE_,index_pt,1); /* photon polarization, l=0 */
class_define_index(ppv->index_pt_pol1_g,_TRUE_,index_pt,1); /* photon polarization, l=1 */
class_define_index(ppv->index_pt_pol2_g,_TRUE_,index_pt,1); /* photon polarization, l=2 */
class_define_index(ppv->index_pt_pol3_g,_TRUE_,index_pt,ppv->l_max_pol_g-2); /* photon polarization, l=3 */
}
}
/* ultra relativistic neutrinos */
class_define_index(ppv->index_pt_delta_ur,ppt->evolve_tensor_ur,index_pt,1); /* ur density */
class_define_index(ppv->index_pt_theta_ur,ppt->evolve_tensor_ur,index_pt,1); /* ur velocity */
class_define_index(ppv->index_pt_shear_ur,ppt->evolve_tensor_ur,index_pt,1); /* ur shear */
ppv->l_max_ur = ppr->l_max_ur;
class_define_index(ppv->index_pt_l3_ur,ppt->evolve_tensor_ur,index_pt,ppv->l_max_ur-2); /* additional momenta in Boltzmann hierarchy (beyond l=0,1,2,3) */
if (ppt->evolve_tensor_ncdm == _TRUE_) {
ppv->index_pt_psi0_ncdm1 = index_pt;
ppv->N_ncdm = pba->N_ncdm;
class_alloc(ppv->l_max_ncdm,ppv->N_ncdm*sizeof(double),ppt->error_message);
class_alloc(ppv->q_size_ncdm,ppv->N_ncdm*sizeof(double),ppt->error_message);
for(n_ncdm = 0; n_ncdm < pba->N_ncdm; n_ncdm++){
// Set value of ppv->l_max_ncdm:
class_test(ppr->l_max_ncdm < 4,
ppt->error_message,
"ppr->l_max_ncdm=%d should be at least 4, i.e. we must integrate at least over first four momenta of non-cold dark matter perturbed phase-space distribution",n_ncdm);
//Copy value from precision parameter:
ppv->l_max_ncdm[n_ncdm] = ppr->l_max_ncdm;
ppv->q_size_ncdm[n_ncdm] = pba->q_size_ncdm[n_ncdm];
index_pt += (ppv->l_max_ncdm[n_ncdm]+1)*ppv->q_size_ncdm[n_ncdm];
}
}
/** (b) metric perturbation h is a propagating degree of freedom, so h and hdot are included
in the vector of ordinary perturbations, no in that of metric perturbations */
class_define_index(ppv->index_pt_gw,_TRUE_,index_pt,1); /* tensor metric perturbation h (gravitational waves) */
class_define_index(ppv->index_pt_gwdot,_TRUE_,index_pt,1); /* its time-derivative */
}
ppv->pt_size = index_pt;
/** - allocate vectors for storing the values of all these
quantities and their time-derivatives at a given time */
class_calloc(ppv->y,ppv->pt_size,sizeof(double),ppt->error_message);
class_alloc(ppv->dy,ppv->pt_size*sizeof(double),ppt->error_message);
class_alloc(ppv->used_in_sources,ppv->pt_size*sizeof(int),ppt->error_message);
/** - specify which perturbations are needed in the evaluation of source terms */
/* take all of them by default */
for (index_pt=0; index_pt<ppv->pt_size; index_pt++)
ppv->used_in_sources[index_pt] = _TRUE_;
/* indicate which ones are not needed (this is just for saving time,
omitting perturbations in this list will not change the
results!) */
if (_scalars_) {
if (ppw->approx[ppw->index_ap_rsa] == (int)rsa_off) {
if (ppw->approx[ppw->index_ap_tca] == (int)tca_off) {
/* we don't need temperature multipoles above l=2 (but they are
defined only when rsa and tca are off) */
for (index_pt=ppv->index_pt_l3_g; index_pt <= ppv->index_pt_delta_g+ppv->l_max_g; index_pt++)
ppv->used_in_sources[index_pt]=_FALSE_;
/* for polarisation, we only need l=0,2 (but l =1,3, ... are
defined only when rsa and tca are off) */
ppv->used_in_sources[ppv->index_pt_pol1_g]=_FALSE_;
for (index_pt=ppv->index_pt_pol3_g; index_pt <= ppv->index_pt_pol0_g+ppv->l_max_pol_g; index_pt++)
ppv->used_in_sources[index_pt]=_FALSE_;
}
}
if (pba->has_ur == _TRUE_) {
if (ppw->approx[ppw->index_ap_rsa] == (int)rsa_off) {
if (ppw->approx[ppw->index_ap_ufa] == (int)ufa_off) {
/* we don't need ur multipoles above l=2 (but they are
defined only when rsa and ufa are off) */
for (index_pt=ppv->index_pt_l3_ur; index_pt <= ppv->index_pt_delta_ur+ppv->l_max_ur; index_pt++)
ppv->used_in_sources[index_pt]=_FALSE_;
}
}
}
if (pba->has_ncdm == _TRUE_) {
/* we don't need ncdm multipoles above l=2 (but they are
defined only when ncdmfa is off) */
index_pt = ppv->index_pt_psi0_ncdm1;
for(n_ncdm = 0; n_ncdm < ppv-> N_ncdm; n_ncdm++){
for(index_q=0; index_q < ppv->q_size_ncdm[n_ncdm]; index_q++){
for(l=0; l<=ppv->l_max_ncdm[n_ncdm]; l++){
if (l>2) ppv->used_in_sources[index_pt]=_FALSE_;
index_pt++;
}
}
}
}
}
if (_tensors_) {
if (ppw->approx[ppw->index_ap_rsa] == (int)rsa_off) { /* if radiation streaming approximation is off */
if (ppw->approx[ppw->index_ap_tca] == (int)tca_off) {
/* we don't need temperature multipoles above except l=0,2,4 */
ppv->used_in_sources[ppv->index_pt_theta_g]=_FALSE_;
ppv->used_in_sources[ppv->index_pt_l3_g]=_FALSE_;
for (index_pt=ppv->index_pt_delta_g+5; index_pt <= ppv->index_pt_delta_g+ppv->l_max_g; index_pt++)
ppv->used_in_sources[index_pt]=_FALSE_;
/* same for polarisation, we only need l=0,2,4 */
ppv->used_in_sources[ppv->index_pt_pol1_g]=_FALSE_;
ppv->used_in_sources[ppv->index_pt_pol3_g]=_FALSE_;
for (index_pt=ppv->index_pt_pol0_g+5; index_pt <= ppv->index_pt_pol0_g+ppv->l_max_pol_g; index_pt++)
ppv->used_in_sources[index_pt]=_FALSE_;
}
}
/* we need h' but not h */
ppv->used_in_sources[ppv->index_pt_gw]=_FALSE_;
}
/** - case of setting initial conditions for a new wavenumber */
if (pa_old == NULL) {
if (ppt->perturbations_verbose>2)
fprintf(stdout,"Mode k=%e: initializing vector at tau=%e\n",k,tau);
if (_scalars_) {
/** (a) check that current approximation scheme is consistent
with initial conditions */
class_test(ppw->approx[ppw->index_ap_rsa] == (int)rsa_on,
ppt->error_message,
"scalar initial conditions assume radiation streaming approximation turned off");
if (pba->has_ur == _TRUE_) {
class_test(ppw->approx[ppw->index_ap_ufa] == (int)ufa_on,
ppt->error_message,
"scalar initial conditions assume ur fluid approximation turned off");
}
if (pba->has_ncdm == _TRUE_) {
class_test(ppw->approx[ppw->index_ap_ncdmfa] == (int)ncdmfa_on,
ppt->error_message,
"scalar initial conditions assume ncdm fluid approximation turned off");
}
class_test(ppw->approx[ppw->index_ap_tca] == (int)tca_off,
ppt->error_message,
"scalar initial conditions assume tight-coupling approximation turned on");
}
if (_tensors_) {
class_test(ppw->approx[ppw->index_ap_tca] == (int)tca_off,
ppt->error_message,
"tensor initial conditions assume tight-coupling approximation turned on");
class_test(ppw->approx[ppw->index_ap_rsa] == (int)rsa_on,
ppt->error_message,
"tensor initial conditions assume radiation streaming approximation turned off");
}
/** (b) let ppw->pv points towards the perturb_vector structure
that we just created */
ppw->pv = ppv;
/** (c) fill the vector ppw->pv->y with appropriate initial conditions */
class_call(perturb_initial_conditions(ppr,
pba,
ppt,
index_md,
index_ic,
k,
tau,
ppw),
ppt->error_message,
ppt->error_message);
}
/** - case of switching approximation while a wavenumber is being integrated */
else {
/** (a) for the scalar mode: */
if (_scalars_) {
/** -- check that the change of approximation scheme makes
sense (note: before calling this routine there is already a
check that we wish to change only one approximation flag at
a time) */
class_test((pa_old[ppw->index_ap_tca] == (int)tca_off) && (ppw->approx[ppw->index_ap_tca] == (int)tca_on),
ppt->error_message,
"at tau=%g: the tight-coupling approximation can be switched off, not on",tau);
/** -- some variables (b, cdm, fld, ...) are not affected by
any approximation. They need to be reconducted whatever
the approximation switching is. We treat them here. Below
we will treat other variables case by case. */
ppv->y[ppv->index_pt_delta_b] =
ppw->pv->y[ppw->pv->index_pt_delta_b];
ppv->y[ppv->index_pt_theta_b] =
ppw->pv->y[ppw->pv->index_pt_theta_b];
if (pba->has_cdm == _TRUE_) {
ppv->y[ppv->index_pt_delta_cdm] =
ppw->pv->y[ppw->pv->index_pt_delta_cdm];
if (ppt->gauge == newtonian) {
ppv->y[ppv->index_pt_theta_cdm] =
ppw->pv->y[ppw->pv->index_pt_theta_cdm];
}
}
if (pba->has_dcdm == _TRUE_) {
ppv->y[ppv->index_pt_delta_dcdm] =
ppw->pv->y[ppw->pv->index_pt_delta_dcdm];
ppv->y[ppv->index_pt_theta_dcdm] =
ppw->pv->y[ppw->pv->index_pt_theta_dcdm];
}
if (pba->has_dr == _TRUE_){
for (l=0; l <= ppv->l_max_dr; l++)
ppv->y[ppv->index_pt_F0_dr+l] =
ppw->pv->y[ppw->pv->index_pt_F0_dr+l];
}
if (pba->has_fld == _TRUE_) {
ppv->y[ppv->index_pt_delta_fld] =
ppw->pv->y[ppw->pv->index_pt_delta_fld];
ppv->y[ppv->index_pt_theta_fld] =
ppw->pv->y[ppw->pv->index_pt_theta_fld];
}
if (pba->has_scf == _TRUE_) {
ppv->y[ppv->index_pt_phi_scf] =
ppw->pv->y[ppw->pv->index_pt_phi_scf];
ppv->y[ppv->index_pt_phi_prime_scf] =
ppw->pv->y[ppw->pv->index_pt_phi_prime_scf];
}
if (pba->has_smg == _TRUE_) {//pass the values only if the order is correct
ppv->y[ppv->index_pt_vx_smg] =
ppw->pv->y[ppw->pv->index_pt_vx_smg];
ppv->y[ppv->index_pt_vx_prime_smg] =
ppw->pvecmetric[ppw->index_mt_vx_prime_smg];
}
if (ppt->gauge == synchronous)
ppv->y[ppv->index_pt_eta] =
ppw->pv->y[ppw->pv->index_pt_eta];
if (ppt->gauge == newtonian)
ppv->y[ppv->index_pt_phi] =
ppw->pv->y[ppw->pv->index_pt_phi];
/* -- case of switching off tight coupling
approximation. Provide correct initial conditions to new set
of variables */
if ((pa_old[ppw->index_ap_tca] == (int)tca_on) && (ppw->approx[ppw->index_ap_tca] == (int)tca_off)) {
if (ppt->perturbations_verbose>2)
fprintf(stdout,"Mode k=%e: switch off tight-coupling approximation at tau=%e\n",k,tau);
ppv->y[ppv->index_pt_delta_g] =
ppw->pv->y[ppw->pv->index_pt_delta_g];
ppv->y[ppv->index_pt_theta_g] =
ppw->pv->y[ppw->pv->index_pt_theta_g];
/* tight-coupling approximation for shear_g (previously
computed in perturb_derivs: perturb_derivs is always
called at the end of generic_evolver, in order to update
all quantities in ppw to the time at which the
approximation is switched off) */
ppv->y[ppv->index_pt_shear_g] = ppw->tca_shear_g;
ppv->y[ppv->index_pt_l3_g] = 6./7.*k/ppw->pvecthermo[pth->index_th_dkappa]*ppw->s_l[3]*ppv->y[ppv->index_pt_shear_g]; /* second-order tight-coupling approximation for l=3 */
ppv->y[ppv->index_pt_pol0_g] = 2.5*ppv->y[ppv->index_pt_shear_g]; /* first-order tight-coupling approximation for polarization, l=0 */
ppv->y[ppv->index_pt_pol1_g] = k/ppw->pvecthermo[pth->index_th_dkappa]*(5.-2.*ppw->s_l[2])/6.*ppv->y[ppv->index_pt_shear_g]; /* second-order tight-coupling approximation for polarization, l=1 */
ppv->y[ppv->index_pt_pol2_g] = 0.5*ppv->y[ppv->index_pt_shear_g]; /* first-order tight-coupling approximation for polarization, l=2 */
ppv->y[ppv->index_pt_pol3_g] = k/ppw->pvecthermo[pth->index_th_dkappa]*3.*ppw->s_l[3]/14.*ppv->y[ppv->index_pt_shear_g]; /* second-order tight-coupling approximation for polarization, l=3 */
if (pba->has_ur == _TRUE_) {
ppv->y[ppv->index_pt_delta_ur] =
ppw->pv->y[ppw->pv->index_pt_delta_ur];
ppv->y[ppv->index_pt_theta_ur] =
ppw->pv->y[ppw->pv->index_pt_theta_ur];
ppv->y[ppv->index_pt_shear_ur] =
ppw->pv->y[ppw->pv->index_pt_shear_ur];
if (ppw->approx[ppw->index_ap_ufa] == (int)ufa_off) {
ppv->y[ppv->index_pt_l3_ur] =
ppw->pv->y[ppw->pv->index_pt_l3_ur];
for (l=4; l <= ppv->l_max_ur; l++)
ppv->y[ppv->index_pt_delta_ur+l] =
ppw->pv->y[ppw->pv->index_pt_delta_ur+l];
}
}
if (pba->has_ncdm == _TRUE_) {
index_pt = 0;
for(n_ncdm = 0; n_ncdm < ppv->N_ncdm; n_ncdm++){
for(index_q=0; index_q < ppv->q_size_ncdm[n_ncdm]; index_q++){
for(l=0; l<=ppv->l_max_ncdm[n_ncdm];l++){
// This is correct with or without ncdmfa, since ppv->lmax_ncdm is set accordingly.
ppv->y[ppv->index_pt_psi0_ncdm1+index_pt] =
ppw->pv->y[ppw->pv->index_pt_psi0_ncdm1+index_pt];
index_pt++;
}
}
}
}
/* perturbed recombination */
/* the initial conditions are set when tca is switched off (current block) */
if (ppt->has_perturbed_recombination == _TRUE_){
ppv->y[ppv->index_pt_perturbed_recombination_delta_temp] = 1./3.*ppv->y[ppw->pv->index_pt_delta_b];
ppv->y[ppv->index_pt_perturbed_recombination_delta_chi] =0.;
}
} // end of block tca ON -> tca OFF
/* perturbed recombination */
/* For any other transition in the approximation scheme, we should just copy the value of the perturbations, provided tca is already off (otherwise the indices are not yet allocated). For instance, we do not want to copy the values in the (k,tau) region where both UFA and TCA are engaged.*/
if ((ppt->has_perturbed_recombination == _TRUE_)&&(pa_old[ppw->index_ap_tca]==(int)tca_off)){
ppv->y[ppv->index_pt_perturbed_recombination_delta_temp] =
ppw->pv->y[ppw->pv->index_pt_perturbed_recombination_delta_temp];
ppv->y[ppv->index_pt_perturbed_recombination_delta_chi] =
ppw->pv->y[ppw->pv->index_pt_perturbed_recombination_delta_chi];
}
/* -- case of switching on radiation streaming
approximation. Provide correct initial conditions to new set
of variables */
if ((pa_old[ppw->index_ap_rsa] == (int)rsa_off) && (ppw->approx[ppw->index_ap_rsa] == (int)rsa_on)) {
if (ppt->perturbations_verbose>2)
fprintf(stdout,"Mode k=%e: switch on radiation streaming approximation at tau=%e with Omega_r=%g\n",k,tau,ppw->pvecback[pba->index_bg_Omega_r]);
if (pba->has_ncdm == _TRUE_) {
index_pt = 0;
for(n_ncdm = 0; n_ncdm < ppv->N_ncdm; n_ncdm++){
for(index_q=0; index_q < ppv->q_size_ncdm[n_ncdm]; index_q++){
for(l=0; l<=ppv->l_max_ncdm[n_ncdm]; l++){
ppv->y[ppv->index_pt_psi0_ncdm1+index_pt] =
ppw->pv->y[ppw->pv->index_pt_psi0_ncdm1+index_pt];
index_pt++;
}
}
}
}
}
/* -- case of switching on ur fluid
approximation. Provide correct initial conditions to new set
of variables */
if (pba->has_ur == _TRUE_) {
if ((pa_old[ppw->index_ap_ufa] == (int)ufa_off) && (ppw->approx[ppw->index_ap_ufa] == (int)ufa_on)) {
if (ppt->perturbations_verbose>2)
fprintf(stdout,"Mode k=%e: switch on ur fluid approximation at tau=%e\n",k,tau);
if (ppw->approx[ppw->index_ap_rsa] == (int)rsa_off) {
ppv->y[ppv->index_pt_delta_g] =
ppw->pv->y[ppw->pv->index_pt_delta_g];
ppv->y[ppv->index_pt_theta_g] =
ppw->pv->y[ppw->pv->index_pt_theta_g];
}
if ((ppw->approx[ppw->index_ap_tca] == (int)tca_off) && (ppw->approx[ppw->index_ap_rsa] == (int)rsa_off)) {
ppv->y[ppv->index_pt_shear_g] =
ppw->pv->y[ppw->pv->index_pt_shear_g];
ppv->y[ppv->index_pt_l3_g] =
ppw->pv->y[ppw->pv->index_pt_l3_g];
for (l = 4; l <= ppw->pv->l_max_g; l++) {
ppv->y[ppv->index_pt_delta_g+l] =
ppw->pv->y[ppw->pv->index_pt_delta_g+l];
}
ppv->y[ppv->index_pt_pol0_g] =
ppw->pv->y[ppw->pv->index_pt_pol0_g];
ppv->y[ppv->index_pt_pol1_g] =
ppw->pv->y[ppw->pv->index_pt_pol1_g];
ppv->y[ppv->index_pt_pol2_g] =
ppw->pv->y[ppw->pv->index_pt_pol2_g];
ppv->y[ppv->index_pt_pol3_g] =
ppw->pv->y[ppw->pv->index_pt_pol3_g];
for (l = 4; l <= ppw->pv->l_max_pol_g; l++) {
ppv->y[ppv->index_pt_pol0_g+l] =
ppw->pv->y[ppw->pv->index_pt_pol0_g+l];
}
}
if (ppw->approx[ppw->index_ap_rsa] == (int)rsa_off) {
ppv->y[ppv->index_pt_delta_ur] =
ppw->pv->y[ppw->pv->index_pt_delta_ur];
ppv->y[ppv->index_pt_theta_ur] =
ppw->pv->y[ppw->pv->index_pt_theta_ur];
ppv->y[ppv->index_pt_shear_ur] =
ppw->pv->y[ppw->pv->index_pt_shear_ur];
}
if (pba->has_ncdm == _TRUE_) {
index_pt = 0;
for(n_ncdm = 0; n_ncdm < ppv->N_ncdm; n_ncdm++){
for(index_q=0; index_q < ppv->q_size_ncdm[n_ncdm]; index_q++){
for(l=0; l<=ppv->l_max_ncdm[n_ncdm]; l++){
/** This is correct even when ncdmfa == off, since ppv->l_max_ncdm and
ppv->q_size_ncdm is updated.*/
ppv->y[ppv->index_pt_psi0_ncdm1+index_pt] =
ppw->pv->y[ppw->pv->index_pt_psi0_ncdm1+index_pt];
index_pt++;
}
}
}
}
}
}
/* -- case of switching on ncdm fluid
approximation. Provide correct initial conditions to new set
of variables */
if (pba->has_ncdm == _TRUE_) {
if ((pa_old[ppw->index_ap_ncdmfa] == (int)ncdmfa_off) && (ppw->approx[ppw->index_ap_ncdmfa] == (int)ncdmfa_on)) {
if (ppt->perturbations_verbose>2)
fprintf(stdout,"Mode k=%e: switch on ncdm fluid approximation at tau=%e\n",k,tau);
if (ppw->approx[ppw->index_ap_rsa] == (int)rsa_off) {
ppv->y[ppv->index_pt_delta_g] =
ppw->pv->y[ppw->pv->index_pt_delta_g];
ppv->y[ppv->index_pt_theta_g] =
ppw->pv->y[ppw->pv->index_pt_theta_g];
}
if ((ppw->approx[ppw->index_ap_tca] == (int)tca_off) && (ppw->approx[ppw->index_ap_rsa] == (int)rsa_off)) {
ppv->y[ppv->index_pt_shear_g] =
ppw->pv->y[ppw->pv->index_pt_shear_g];
ppv->y[ppv->index_pt_l3_g] =
ppw->pv->y[ppw->pv->index_pt_l3_g];
for (l = 4; l <= ppw->pv->l_max_g; l++) {
ppv->y[ppv->index_pt_delta_g+l] =
ppw->pv->y[ppw->pv->index_pt_delta_g+l];
}
ppv->y[ppv->index_pt_pol0_g] =
ppw->pv->y[ppw->pv->index_pt_pol0_g];
ppv->y[ppv->index_pt_pol1_g] =
ppw->pv->y[ppw->pv->index_pt_pol1_g];
ppv->y[ppv->index_pt_pol2_g] =
ppw->pv->y[ppw->pv->index_pt_pol2_g];
ppv->y[ppv->index_pt_pol3_g] =
ppw->pv->y[ppw->pv->index_pt_pol3_g];
for (l = 4; l <= ppw->pv->l_max_pol_g; l++) {
ppv->y[ppv->index_pt_pol0_g+l] =
ppw->pv->y[ppw->pv->index_pt_pol0_g+l];
}
}
if (pba->has_ur == _TRUE_) {
if (ppw->approx[ppw->index_ap_rsa] == (int)rsa_off) {
ppv->y[ppv->index_pt_delta_ur] =
ppw->pv->y[ppw->pv->index_pt_delta_ur];
ppv->y[ppv->index_pt_theta_ur] =
ppw->pv->y[ppw->pv->index_pt_theta_ur];
ppv->y[ppv->index_pt_shear_ur] =
ppw->pv->y[ppw->pv->index_pt_shear_ur];
if (ppw->approx[ppw->index_ap_ufa] == (int)ufa_off) {
ppv->y[ppv->index_pt_l3_ur] =
ppw->pv->y[ppw->pv->index_pt_l3_ur];
for (l=4; l <= ppv->l_max_ur; l++)
ppv->y[ppv->index_pt_delta_ur+l] =
ppw->pv->y[ppw->pv->index_pt_delta_ur+l];
}
}
}
a = ppw->pvecback[pba->index_bg_a];
index_pt = ppw->pv->index_pt_psi0_ncdm1;
for(n_ncdm = 0; n_ncdm < ppv->N_ncdm; n_ncdm++){
// We are in the fluid approximation, so ncdm_l_size is always 3.
ncdm_l_size = ppv->l_max_ncdm[n_ncdm]+1;
rho_plus_p_ncdm = ppw->pvecback[pba->index_bg_rho_ncdm1+n_ncdm]+
ppw->pvecback[pba->index_bg_p_ncdm1+n_ncdm];
for(l=0; l<=2; l++){
ppv->y[ppv->index_pt_psi0_ncdm1+ncdm_l_size*n_ncdm+l] = 0.0;
}
factor = pba->factor_ncdm[n_ncdm]*pow(pba->a_today/a,4);
for(index_q=0; index_q < ppw->pv->q_size_ncdm[n_ncdm]; index_q++){
// Integrate over distributions:
q = pba->q_ncdm[n_ncdm][index_q];
q2 = q*q;
epsilon = sqrt(q2+a*a*pba->M_ncdm[n_ncdm]*pba->M_ncdm[n_ncdm]);
ppv->y[ppv->index_pt_psi0_ncdm1+ncdm_l_size*n_ncdm] +=
pba->w_ncdm[n_ncdm][index_q]*q2*epsilon*
ppw->pv->y[index_pt];
ppv->y[ppv->index_pt_psi0_ncdm1+ncdm_l_size*n_ncdm+1] +=
pba->w_ncdm[n_ncdm][index_q]*q2*q*
ppw->pv->y[index_pt+1];
ppv->y[ppv->index_pt_psi0_ncdm1+ncdm_l_size*n_ncdm+2] +=
pba->w_ncdm[n_ncdm][index_q]*q2*q2/epsilon*
ppw->pv->y[index_pt+2];
//Jump to next momentum bin in ppw->pv->y:
index_pt += (ppw->pv->l_max_ncdm[n_ncdm]+1);
}
ppv->y[ppv->index_pt_psi0_ncdm1+ncdm_l_size*n_ncdm] *=factor/ppw->pvecback[pba->index_bg_rho_ncdm1+n_ncdm];
ppv->y[ppv->index_pt_psi0_ncdm1+ncdm_l_size*n_ncdm+1] *=k*factor/rho_plus_p_ncdm;
ppv->y[ppv->index_pt_psi0_ncdm1+ncdm_l_size*n_ncdm+2] *=2.0/3.0*factor/rho_plus_p_ncdm;
}
}
}
}
/** (b) for the vector mode */
if (_vectors_) {
/** -- check that the change of approximation scheme makes
sense (note: before calling this routine there is already a
check that we wish to change only one approximation flag at
a time) */
class_test((pa_old[ppw->index_ap_tca] == (int)tca_off) && (ppw->approx[ppw->index_ap_tca] == (int)tca_on),
ppt->error_message,
"at tau=%g: the tight-coupling approximation can be switched off, not on",tau);
/** -- some variables (gw, gwdot, ...) are not affected by
any approximation. They need to be reconducted whatever
the approximation switching is. We treat them here. Below
we will treat other variables case by case. */
if (ppt->gauge == synchronous){
ppv->y[ppv->index_pt_hv_prime] =
ppw->pv->y[ppw->pv->index_pt_hv_prime];
}
if (ppt->gauge == newtonian){
ppv->y[ppv->index_pt_V] =
ppw->pv->y[ppw->pv->index_pt_V];
}
ppv->y[ppv->index_pt_theta_b] =
ppw->pv->y[ppw->pv->index_pt_theta_b];
/* -- case of switching off tight coupling
approximation. Provide correct initial conditions to new set
of variables */
if ((pa_old[ppw->index_ap_tca] == (int)tca_on) && (ppw->approx[ppw->index_ap_tca] == (int)tca_off)) {
if (ppt->perturbations_verbose>2)
fprintf(stdout,"Mode k=%e: switch off tight-coupling approximation at tau=%e\n",k,tau);
ppv->y[ppv->index_pt_delta_g] = 0.0; //TBC
//-4./3.*ppw->pv->y[ppw->pv->index_pt_gwdot]/ppw->pvecthermo[pth->index_th_dkappa];
ppv->y[ppv->index_pt_pol0_g] = 0.0; //TBC
//1./3.*ppw->pv->y[ppw->pv->index_pt_gwdot]/ppw->pvecthermo[pth->index_th_dkappa];
}
/* -- case of switching on radiation streaming
approximation. Provide correct initial conditions to new set
of variables */
if ((pa_old[ppw->index_ap_rsa] == (int)rsa_off) && (ppw->approx[ppw->index_ap_rsa] == (int)rsa_on)) {
if (ppt->perturbations_verbose>2)
fprintf(stdout,"Mode k=%e: switch on radiation streaming approximation at tau=%e with Omega_r=%g\n",k,tau,ppw->pvecback[pba->index_bg_Omega_r]);
}
}
/** (c) for the tensor mode */
if (_tensors_) {
/** -- check that the change of approximation scheme makes
sense (note: before calling this routine there is already a
check that we wish to change only one approximation flag at
a time) */
class_test((pa_old[ppw->index_ap_tca] == (int)tca_off) && (ppw->approx[ppw->index_ap_tca] == (int)tca_on),
ppt->error_message,
"at tau=%g: the tight-coupling approximation can be switched off, not on",tau);
/** -- some variables (gw, gwdot, ...) are not affected by
any approximation. They need to be reconducted whatever
the approximation switching is. We treat them here. Below
we will treat other variables case by case. */
ppv->y[ppv->index_pt_gw] =
ppw->pv->y[ppw->pv->index_pt_gw];
ppv->y[ppv->index_pt_gwdot] =
ppw->pv->y[ppw->pv->index_pt_gwdot];
if (ppt->evolve_tensor_ur == _TRUE_){
/** For now, neutrinos go here. */
ppv->y[ppv->index_pt_delta_ur] =
ppw->pv->y[ppw->pv->index_pt_delta_ur];
ppv->y[ppv->index_pt_theta_ur] =
ppw->pv->y[ppw->pv->index_pt_theta_ur];
ppv->y[ppv->index_pt_shear_ur] =
ppw->pv->y[ppw->pv->index_pt_shear_ur];
ppv->y[ppv->index_pt_l3_ur] =
ppw->pv->y[ppw->pv->index_pt_l3_ur];
for (l=4; l <= ppv->l_max_ur; l++)
ppv->y[ppv->index_pt_delta_ur+l] =
ppw->pv->y[ppw->pv->index_pt_delta_ur+l];
}
if (ppt->evolve_tensor_ncdm == _TRUE_){
index_pt = 0;
for(n_ncdm = 0; n_ncdm < ppv->N_ncdm; n_ncdm++){
for(index_q=0; index_q < ppv->q_size_ncdm[n_ncdm]; index_q++){
for(l=0; l<=ppv->l_max_ncdm[n_ncdm];l++){
// This is correct with or without ncdmfa, since ppv->lmax_ncdm is set accordingly.
ppv->y[ppv->index_pt_psi0_ncdm1+index_pt] =
ppw->pv->y[ppw->pv->index_pt_psi0_ncdm1+index_pt];
index_pt++;
}
}
}
}
/* -- case of switching off tight coupling
approximation. Provide correct initial conditions to new set
of variables */
if ((pa_old[ppw->index_ap_tca] == (int)tca_on) && (ppw->approx[ppw->index_ap_tca] == (int)tca_off)) {
if (ppt->perturbations_verbose>2)
fprintf(stdout,"Mode k=%e: switch off tight-coupling approximation at tau=%e\n",k,tau);
ppv->y[ppv->index_pt_delta_g] = -4./3.*ppw->pv->y[ppw->pv->index_pt_gwdot]/ppw->pvecthermo[pth->index_th_dkappa];
ppv->y[ppv->index_pt_pol0_g] = 1./3.*ppw->pv->y[ppw->pv->index_pt_gwdot]/ppw->pvecthermo[pth->index_th_dkappa];
}
/* -- case of switching on radiation streaming
approximation. Provide correct initial conditions to new set
of variables */
if ((pa_old[ppw->index_ap_rsa] == (int)rsa_off) && (ppw->approx[ppw->index_ap_rsa] == (int)rsa_on)) {
if (ppt->perturbations_verbose>2)
fprintf(stdout,"Mode k=%e: switch on radiation streaming approximation at tau=%e with Omega_r=%g\n",k,tau,ppw->pvecback[pba->index_bg_Omega_r]);
}
}
/** (c) free the previous vector of perturbations */
class_call(perturb_vector_free(ppw->pv),
ppt->error_message,
ppt->error_message);
/** (d) let ppw->pv points towards the perturb_vector structure
that we just created */
ppw->pv = ppv;
}
return _SUCCESS_;
}
/**
* Free the perturb_vector structure.
*
* @param pv Input: pointer to perturb_vector structure to be freed
* @return the error status
*/
int perturb_vector_free(
struct perturb_vector * pv
) {
if (pv->l_max_ncdm != NULL) free(pv->l_max_ncdm);
if (pv->q_size_ncdm != NULL) free(pv->q_size_ncdm);
free(pv->y);
free(pv->dy);
free(pv->used_in_sources);
free(pv);
return _SUCCESS_;
}
/**
* For each mode, wavenumber and initial condition, this function
* initializes all values in the vector of perturbed variables (in a
* given gauge). It is assumed here that all values have been set
* previously to zero, only non-zero values are set here.
*
* @param ppr Input: pointer to precision structure
* @param pba Input: pointer to background structure
* @param ppt Input: pointer to the perturbation structure
* @param index_md Input: index of mode under consideration (scalar/.../tensor)
* @param index_ic Input: index of initial condition under consideration (ad, iso...)
* @param k Input: wavenumber
* @param tau Input: conformal time
* @param ppw Input/Output: workspace containing input the approximation scheme, the background/thermodynamics/metric quantitites, and eventually the previous vector y; and in output the new vector y.
* @return the error status
*/
int perturb_initial_conditions(struct precision * ppr,
struct background * pba,
struct perturbs * ppt,
int index_md,
int index_ic,
double k,
double tau,
struct perturb_workspace * ppw
) {
/** Summary: */
/** - declare local variables */
double a,a_prime_over_a;
double delta_ur=0.,theta_ur=0.,shear_ur=0.,l3_ur=0.,eta=0.,delta_cdm=0.,alpha, alpha_prime;
double delta_dr=0;
double q,epsilon,k2;
int index_q,n_ncdm,idx;
double rho_r,rho_m,rho_nu,rho_m_over_rho_r;
double fracnu,fracg,fracb,fraccdm,om;
double ktau_two,ktau_three;
double f_dr;
double delta_tot;
double velocity_tot;
double s2_squared;
/** - for scalars */
if (_scalars_) {
/** (a) compute relevant background quantities: compute rho_r,
rho_m, rho_nu (= all relativistic except photons), and their
ratio. */
class_call(background_at_tau(pba,
tau,
pba->normal_info,
pba->inter_normal,
&(ppw->last_index_back),
ppw->pvecback),
pba->error_message,
ppt->error_message);
a = ppw->pvecback[pba->index_bg_a];
a_prime_over_a = ppw->pvecback[pba->index_bg_H]*a;
/* 8piG/3 rho_r(t_i) */
rho_r = ppw->pvecback[pba->index_bg_rho_g];
/* 8piG/3 rho_m(t_i) */
rho_m = ppw->pvecback[pba->index_bg_rho_b];
/* 8piG/3 rho_nu(t_i) (all neutrinos and collisionless relics being relativistic at that time) */
rho_nu = 0.;
if (pba->has_cdm == _TRUE_) {
rho_m += ppw->pvecback[pba->index_bg_rho_cdm];
}
if (pba->has_dcdm == _TRUE_) {
rho_m += ppw->pvecback[pba->index_bg_rho_dcdm];
}
if (pba->has_dr == _TRUE_) {
rho_r += ppw->pvecback[pba->index_bg_rho_dr];
rho_nu += ppw->pvecback[pba->index_bg_rho_dr];
}
if (pba->has_ur == _TRUE_) {
rho_r += ppw->pvecback[pba->index_bg_rho_ur];
rho_nu += ppw->pvecback[pba->index_bg_rho_ur];
}
if (pba->has_ncdm == _TRUE_) {
for(n_ncdm=0; n_ncdm<pba->N_ncdm; n_ncdm++){
rho_r += ppw->pvecback[pba->index_bg_rho_ncdm1 + n_ncdm];
rho_nu += ppw->pvecback[pba->index_bg_rho_ncdm1 + n_ncdm];
}
}
class_test(rho_r == 0.,
ppt->error_message,
"stop to avoid division by zero");
/* f_nu = Omega_nu(t_i) / Omega_r(t_i) */
fracnu = rho_nu/rho_r;
/* f_g = Omega_g(t_i) / Omega_r(t_i) */
fracg = ppw->pvecback[pba->index_bg_rho_g]/rho_r;
/* f_b = Omega_b(t_i) / Omega_m(t_i) */
fracb = ppw->pvecback[pba->index_bg_rho_b]/rho_m;
/* f_cdm = Omega_cdm(t_i) / Omega_m(t_i) */
fraccdm = 1.-fracb;
/* Omega_m(t_i) / Omega_r(t_i) */
rho_m_over_rho_r = rho_m/rho_r;
/* omega = Omega_m(t_i) a(t_i) H(t_i) / sqrt(Omega_r(t_i))
= Omega_m(t_0) a(t_0) H(t_0) / sqrt(Omega_r(t_0)) assuming rho_m in a-3 and rho_r in a^-4
= (8piG/3 rho_m(t_i)) a(t_i) / sqrt(8piG/3 rho_r(t_i)) in Mpc-1
This (a priori stange) parameter is the relevant one for expressing a
as a function of tau during radiation and matter domination (but not DE domination).
Indeed the exact solution of Friedmann when there is only radiation and matter in
the universe is
a = [H(t_0)^2 Omega_m(t_0) a(t_0)^3 / 4] x [tau^2 + 4 tau / omega]
*/
om = a*rho_m/sqrt(rho_r);
/* (k tau)^2, (k tau)^3 */
ktau_two=k*k*tau*tau;
ktau_three=k*tau*ktau_two;
/* curvature-dependent factors */
s2_squared = 1.-3.*pba->K/k/k;
/** (b) starts by setting everything in synchronous gauge. If
another gauge is needed, we will perform a gauge
transformation below. */
/** (b.1) adiabatic */
if ((ppt->has_ad == _TRUE_) && (index_ic == ppt->index_ic_ad)) {
/* The following formulas are valid at leading order in
(k*tau) and (om*tau), and order zero in
tight-coupling. Identical to first order terms in CRS,
except for normalization (when ppr->curvature_ini=1, tau=1:
leads to factor 1/2 difference between CRS formulas with
beta1=0). Identical to CAMB when om set to zero in theta_g,
theta_ur, shear_ur, tau
In the non-flat case the relation R=eta is still valid
outsode the horizon for adiabatic IC. Hence eta is still
set to ppr->curvature_ini at leading order. Factors s2
appear through the solution of Einstein equations and
equations of motion. */
/* photon density */
ppw->pv->y[ppw->pv->index_pt_delta_g] = - ktau_two/3. * (1.-om*tau/5.)
* ppr->curvature_ini * s2_squared;
/* photon velocity */
ppw->pv->y[ppw->pv->index_pt_theta_g] = - k*ktau_three/36. * (1.-3.*(1.+5.*fracb-fracnu)/20./(1.-fracnu)*om*tau)
* ppr->curvature_ini * s2_squared;
/* tighly-coupled baryons */
ppw->pv->y[ppw->pv->index_pt_delta_b] = 3./4.*ppw->pv->y[ppw->pv->index_pt_delta_g]; /* baryon density */
ppw->pv->y[ppw->pv->index_pt_theta_b] = ppw->pv->y[ppw->pv->index_pt_theta_g]; /* baryon velocity */
if (pba->has_cdm == _TRUE_) {
ppw->pv->y[ppw->pv->index_pt_delta_cdm] = 3./4.*ppw->pv->y[ppw->pv->index_pt_delta_g]; /* cdm density */
/* cdm velocity velocity vanishes in the synchronous gauge */
}
if (pba->has_dcdm == _TRUE_) {
ppw->pv->y[ppw->pv->index_pt_delta_dcdm] = 3./4.*ppw->pv->y[ppw->pv->index_pt_delta_g]; /* dcdm density */
/* dcdm velocity velocity vanishes initially in the synchronous gauge */
}
/* fluid (assumes wa=0, if this is not the case the
fluid will catch anyway the attractor solution) */
if (pba->has_fld == _TRUE_) {
ppw->pv->y[ppw->pv->index_pt_delta_fld] = - ktau_two/4.*(1.+pba->w0_fld+pba->wa_fld)*(4.-3.*pba->cs2_fld)/(4.-6.*(pba->w0_fld+pba->wa_fld)+3.*pba->cs2_fld) * ppr->curvature_ini * s2_squared; /* from 1004.5509 */ //TBC: curvature
ppw->pv->y[ppw->pv->index_pt_theta_fld] = - k*ktau_three/4.*pba->cs2_fld/(4.-6.*(pba->w0_fld+pba->wa_fld)+3.*pba->cs2_fld) * ppr->curvature_ini * s2_squared; /* from 1004.5509 */ //TBC:curvature
}
if (pba->has_scf == _TRUE_) {
/** Canonical field (solving for the perturbations):
initial perturbations set to zero, they should reach the attractor soon enough.
TODO: Incorporate the attractor IC from 1004.5509
delta_phi = -(a/k)^2/phi'(rho + p)theta
delta_phi_prime = a^2/phi' (delta_rho_phi + V'delta_phi)
and assume theta, delta_rho as for perfect fluid
with c_s^2 = 1 and w = 1/3 (ASSUMES radiation TRACKING)
*/
ppw->pv->y[ppw->pv->index_pt_phi_scf] = 0.;
/** a*a/k/k/ppw->pvecback[pba->index_bg_phi_prime_scf]*k*ktau_three/4.*1./(4.-6.*(1./3.)+3.*1.) * (ppw->pvecback[pba->index_bg_rho_scf] + ppw->pvecback[pba->index_bg_p_scf])* ppr->curvature_ini * s2_squared; */
ppw->pv->y[ppw->pv->index_pt_phi_prime_scf] = 0.;
/** delta_fld expression * rho_scf with the w = 1/3, c_s = 1
a*a/ppw->pvecback[pba->index_bg_phi_prime_scf]*( - ktau_two/4.*(1.+1./3.)*(4.-3.*1.)/(4.-6.*(1/3.)+3.*1.)*ppw->pvecback[pba->index_bg_rho_scf] - ppw->pvecback[pba->index_bg_dV_scf]*ppw->pv->y[ppw->pv->index_pt_phi_scf])* ppr->curvature_ini * s2_squared; */
}
/* Initial conditions for the scalar field
* Rigth now these are Adiabatic/Single clock
* v_X = delta_phi/phi_dot
* phi(t,x) = phi(tau+delta tau(x))
* This leads to very simple expressions:
* v_X = delta tau = delta_cdm/a_prime_over_a and v_X_prime = 0
*/
if (pba->has_smg == _TRUE_) {
if (pba->pert_initial_conditions_smg == single_clock){
// single_clock IC given with respect to photons (because there are always photons)
ppw->pv->y[ppw->pv->index_pt_vx_smg] = -1/(4.*ppw->pvecback[pba->index_bg_H])*ppw->pv->y[ppw->pv->index_pt_delta_g];
// Single clock IC => v_x^prime = 0
ppw->pv->y[ppw->pv->index_pt_vx_prime_smg] = 0. ;
if(ppt->perturbations_verbose > 5)
printf("Single clock IC for smg: ");
}
if (pba->pert_initial_conditions_smg == zero){
ppw->pv->y[ppw->pv->index_pt_vx_smg] = 0.;
ppw->pv->y[ppw->pv->index_pt_vx_prime_smg] = 0. ;
if(ppt->perturbations_verbose > 5)
printf("Zero IC for smg: ");
}
if(ppt->perturbations_verbose > 5)
printf("Vx = %e, Vx'= %e \n",ppw->pv->y[ppw->pv->index_pt_vx_smg],ppw->pv->y[ppw->pv->index_pt_vx_prime_smg]);
}
/* all relativistic relics: ur, early ncdm, dr */
if ((pba->has_ur == _TRUE_) || (pba->has_ncdm == _TRUE_) || (pba->has_dr == _TRUE_)) {
delta_ur = ppw->pv->y[ppw->pv->index_pt_delta_g]; /* density of ultra-relativistic neutrinos/relics */
theta_ur = - k*ktau_three/36./(4.*fracnu+15.) * (4.*fracnu+11.+12.*s2_squared-3.*(8.*fracnu*fracnu+50.*fracnu+275.)/20./(2.*fracnu+15.)*tau*om) * ppr->curvature_ini * s2_squared; /* velocity of ultra-relativistic neutrinos/relics */ //TBC
shear_ur = ktau_two/(45.+12.*fracnu) * (3.*s2_squared-1.) * (1.+(4.*fracnu-5.)/4./(2.*fracnu+15.)*tau*om) * ppr->curvature_ini;//TBC /s2_squared; /* shear of ultra-relativistic neutrinos/relics */ //TBC:0
l3_ur = ktau_three*2./7./(12.*fracnu+45.)* ppr->curvature_ini;//TBC
if (pba->has_dr == _TRUE_) delta_dr = delta_ur;
}
/* synchronous metric perturbation eta */
//eta = ppr->curvature_ini * (1.-ktau_two/12./(15.+4.*fracnu)*(5.+4.*fracnu - (16.*fracnu*fracnu+280.*fracnu+325)/10./(2.*fracnu+15.)*tau*om)) / s2_squared;
//eta = ppr->curvature_ini * s2_squared * (1.-ktau_two/12./(15.+4.*fracnu)*(15.*s2_squared-10.+4.*s2_squared*fracnu - (16.*fracnu*fracnu+280.*fracnu+325)/10./(2.*fracnu+15.)*tau*om));
eta = ppr->curvature_ini * (1.-ktau_two/12./(15.+4.*fracnu)*(5.+4.*s2_squared*fracnu - (16.*fracnu*fracnu+280.*fracnu+325)/10./(2.*fracnu+15.)*tau*om));
}
/* isocurvature initial conditions taken from Bucher, Moodely,
Turok 99, with just a different normalization convention for
tau and the scale factor. [k tau] from BMT99 is left invariant
because it is the ratio [k/aH]. But [Omega_i,0 tau] from BMT99
must be replaced by [frac_i*om*tau/4]. Some doubts remain about
the niv formulas, that should be recheked at some point. We
also checked that for bi,cdi,nid, everything coincides exactly
with the CAMB formulas. */
/** (b.2) Cold dark matter Isocurvature */
if ((ppt->has_cdi == _TRUE_) && (index_ic == ppt->index_ic_cdi)) {
class_test(pba->has_cdm == _FALSE_,
ppt->error_message,
"not consistent to ask for CDI in absence of CDM!");
ppw->pv->y[ppw->pv->index_pt_delta_g] = ppr->entropy_ini*fraccdm*om*tau*(-2./3.+om*tau/4.);
ppw->pv->y[ppw->pv->index_pt_theta_g] = -ppr->entropy_ini*fraccdm*om*ktau_two/12.;
ppw->pv->y[ppw->pv->index_pt_delta_b] = 3./4.*ppw->pv->y[ppw->pv->index_pt_delta_g];
ppw->pv->y[ppw->pv->index_pt_theta_b] = ppw->pv->y[ppw->pv->index_pt_theta_g];
ppw->pv->y[ppw->pv->index_pt_delta_cdm] = ppr->entropy_ini+3./4.*ppw->pv->y[ppw->pv->index_pt_delta_g];
if ((pba->has_ur == _TRUE_) || (pba->has_ncdm == _TRUE_)) {
delta_ur = ppw->pv->y[ppw->pv->index_pt_delta_g];
theta_ur = ppw->pv->y[ppw->pv->index_pt_theta_g];
shear_ur = -ppr->entropy_ini*fraccdm*ktau_two*tau*om/6./(2.*fracnu+15.);
}
eta = -ppr->entropy_ini*fraccdm*om*tau*(1./6.-om*tau/16.);
}
/** (b.3) Baryon Isocurvature */
if ((ppt->has_bi == _TRUE_) && (index_ic == ppt->index_ic_bi)) {
ppw->pv->y[ppw->pv->index_pt_delta_g] = ppr->entropy_ini*fracb*om*tau*(-2./3.+om*tau/4.);
ppw->pv->y[ppw->pv->index_pt_theta_g] = -ppr->entropy_ini*fracb*om*ktau_two/12.;
ppw->pv->y[ppw->pv->index_pt_delta_b] = ppr->entropy_ini+3./4.*ppw->pv->y[ppw->pv->index_pt_delta_g];
ppw->pv->y[ppw->pv->index_pt_theta_b] = ppw->pv->y[ppw->pv->index_pt_theta_g];
if (pba->has_cdm == _TRUE_) {
ppw->pv->y[ppw->pv->index_pt_delta_cdm] = 3./4.*ppw->pv->y[ppw->pv->index_pt_delta_g];
}
if ((pba->has_ur == _TRUE_) || (pba->has_ncdm == _TRUE_)) {
delta_ur = ppw->pv->y[ppw->pv->index_pt_delta_g];
theta_ur = ppw->pv->y[ppw->pv->index_pt_theta_g];
shear_ur = -ppr->entropy_ini*fracb*ktau_two*tau*om/6./(2.*fracnu+15.);
}
eta = -ppr->entropy_ini*fracb*om*tau*(1./6.-om*tau/16.);
}
/** (b.4) Neutrino density Isocurvature */
if ((ppt->has_nid == _TRUE_) && (index_ic == ppt->index_ic_nid)) {
class_test((pba->has_ur == _FALSE_) && (pba->has_ncdm == _FALSE_),
ppt->error_message,
"not consistent to ask for NID in absence of ur or ncdm species!");
ppw->pv->y[ppw->pv->index_pt_delta_g] = ppr->entropy_ini*fracnu/fracg*(-1.+ktau_two/6.);
ppw->pv->y[ppw->pv->index_pt_theta_g] = -ppr->entropy_ini*fracnu/fracg*k*k*tau*(1./4.-fracb/fracg*3./16.*om*tau);
ppw->pv->y[ppw->pv->index_pt_delta_b] = ppr->entropy_ini*fracnu/fracg/8.*ktau_two;
ppw->pv->y[ppw->pv->index_pt_theta_b] = ppw->pv->y[ppw->pv->index_pt_theta_g];
if (pba->has_cdm == _TRUE_) {
ppw->pv->y[ppw->pv->index_pt_delta_cdm] = -ppr->entropy_ini*fracnu*fracb/fracg/80.*ktau_two*om*tau;
}
delta_ur = ppr->entropy_ini*(1.-ktau_two/6.);
theta_ur = ppr->entropy_ini*k*k*tau/4.;
shear_ur = ppr->entropy_ini*ktau_two/(4.*fracnu+15.)/2.;
eta = -ppr->entropy_ini*fracnu/(4.*fracnu+15.)/6.*ktau_two;
}
/** (b.5) Neutrino velocity Isocurvature */
if ((ppt->has_niv == _TRUE_) && (index_ic == ppt->index_ic_niv)) {
class_test((pba->has_ur == _FALSE_) && (pba->has_ncdm == _FALSE_),
ppt->error_message,
"not consistent to ask for NIV in absence of ur or ncdm species!");
ppw->pv->y[ppw->pv->index_pt_delta_g] = ppr->entropy_ini*k*tau*fracnu/fracg*
(1. - 3./16.*fracb*(2.+fracg)/fracg*om*tau); /* small diff wrt camb */
ppw->pv->y[ppw->pv->index_pt_theta_g] = ppr->entropy_ini*fracnu/fracg*3./4.*k*
(-1.+3./4.*fracb/fracg*om*tau+3./16.*om*om*tau*tau*fracb/fracg/fracg*(fracg-3.*fracb)+ktau_two/6.);
ppw->pv->y[ppw->pv->index_pt_delta_b] = 3./4.*ppw->pv->y[ppw->pv->index_pt_delta_g]; /* small diff wrt camb */
ppw->pv->y[ppw->pv->index_pt_theta_b] = ppw->pv->y[ppw->pv->index_pt_theta_g];
if (pba->has_cdm == _TRUE_) {
ppw->pv->y[ppw->pv->index_pt_delta_cdm] = -ppr->entropy_ini*9./64.*fracnu*fracb/fracg*k*tau*om*tau;
}
delta_ur = -ppr->entropy_ini*k*tau*(1.+3./16.*fracb*fracnu/fracg*om*tau); /* small diff wrt camb */
theta_ur = ppr->entropy_ini*3./4.*k*(1. - 1./6.*ktau_two*(4.*fracnu+9.)/(4.*fracnu+5.));
shear_ur = ppr->entropy_ini/(4.*fracnu+15.)*k*tau*(1. + 3.*om*tau*fracnu/(4.*fracnu+15.)); /* small diff wrt camb */
eta = ppr->entropy_ini*fracnu*k*tau*(-1./(4.*fracnu+5.) + (-3./64.*fracb/fracg+15./4./(4.*fracnu+15.)/(4.*fracnu+5.)*om*tau)); /* small diff wrt camb */
}
/** (c) If the needed gauge is really the synchronous gauge, we need to affect the previously computed value of eta to the actual variable eta */
if (ppt->gauge == synchronous) {
ppw->pv->y[ppw->pv->index_pt_eta] = eta;
}
/** (d) If the needed gauge is the newtonian gauge, we must compute alpha and then perform a gauge transformation for each variable */
if (ppt->gauge == newtonian) {
/* alpha is like in Ma & Bertschinger: (h'+6 eta')/(2k^2). We obtain it from the first two Einstein equations:
alpha = [eta + 3/2 (a'/a)^2 (delta_rho/rho_c) / k^2 /s_2^2 + 3/2 (a'/a)^3 3 ((rho+p)theta/rho_c) / k^4 / s_2^2] / (a'/a)
= [eta + 3/2 (a'/a)^2 / k^2 /s_2^2 {delta_tot + 3 (a'/a) /k^2 velocity_tot}] / (a'/a)
with
delta_tot = (delta_rho/rho_c)
= [rho_r delta_r + rho_m delta_m] / (rho_r + rho_m)
= [delta_r + (rho_m/rho_r) delta_m] / (1 + rho_m/rho_r)
= [(f_g delta_g + f_nu delta_nu) + (rho_m/rho_r) (f_b delta_b + f_cdm delta_cdm)] / (1 + rho_m/rho_r)
velocity_tot = ((rho+p)theta/rho_c)
= [(4/3) rho_r theta_r + rho_m theta_m] / (rho_r + rho_m)
= [(4/3) theta_r + (rho_m/rho_r) theta_m] / (1 + rho_m/rho_r)
= [(4/3) (f_g theta_g + f_nu theta_nu) + (rho_m/rho_r) (f_b delta_b + f_cdm 0)] / (1 + rho_m/rho_r)
*/
if (pba->has_cdm == _TRUE_)
delta_cdm = ppw->pv->y[ppw->pv->index_pt_delta_cdm];
else if (pba->has_dcdm == _TRUE_)
delta_cdm = ppw->pv->y[ppw->pv->index_pt_delta_dcdm];
else
delta_cdm=0.;
// note: if there are no neutrinos, fracnu, delta_ur and theta_ur below will consistently be zero.
delta_tot = (fracg*ppw->pv->y[ppw->pv->index_pt_delta_g]+fracnu*delta_ur+rho_m_over_rho_r*(fracb*ppw->pv->y[ppw->pv->index_pt_delta_b]+fraccdm*delta_cdm))/(1.+rho_m_over_rho_r);
velocity_tot = ((4./3.)*(fracg*ppw->pv->y[ppw->pv->index_pt_theta_g]+fracnu*theta_ur) + rho_m_over_rho_r*fracb*ppw->pv->y[ppw->pv->index_pt_theta_b])/(1.+rho_m_over_rho_r);
alpha = (eta + 3./2.*a_prime_over_a*a_prime_over_a/k/k/s2_squared*(delta_tot + 3.*a_prime_over_a/k/k*velocity_tot))/a_prime_over_a;
ppw->pv->y[ppw->pv->index_pt_phi] = eta - a_prime_over_a*alpha;
ppw->pv->y[ppw->pv->index_pt_delta_g] -= 4.*a_prime_over_a*alpha;
ppw->pv->y[ppw->pv->index_pt_theta_g] += k*k*alpha;
ppw->pv->y[ppw->pv->index_pt_delta_b] -= 3.*a_prime_over_a*alpha;
ppw->pv->y[ppw->pv->index_pt_theta_b] += k*k*alpha;
if (pba->has_cdm == _TRUE_) {
ppw->pv->y[ppw->pv->index_pt_delta_cdm] -= 3.*a_prime_over_a*alpha;
ppw->pv->y[ppw->pv->index_pt_theta_cdm] = k*k*alpha;
}
if (pba->has_dcdm == _TRUE_) {
ppw->pv->y[ppw->pv->index_pt_delta_dcdm] += (-3.*a_prime_over_a - a*pba->Gamma_dcdm)*alpha;
ppw->pv->y[ppw->pv->index_pt_theta_dcdm] = k*k*alpha;
}
/* fluid */
if (pba->has_fld == _TRUE_) {
ppw->pv->y[ppw->pv->index_pt_delta_fld] += 3*(1.+pba->w0_fld+pba->wa_fld)*a_prime_over_a*alpha;
ppw->pv->y[ppw->pv->index_pt_theta_fld] += k*k*alpha;
}
/* scalar field: check */
if (pba->has_scf == _TRUE_) {
alpha_prime = 0.0;
/**- 2. * a_prime_over_a * alpha + eta
- 4.5 * (a2/k2) * ppw->rho_plus_p_shear; */
ppw->pv->y[ppw->pv->index_pt_phi_scf] += alpha*ppw->pvecback[pba->index_bg_phi_prime_scf];
ppw->pv->y[ppw->pv->index_pt_phi_prime_scf] +=
(-2.*a_prime_over_a*alpha*ppw->pvecback[pba->index_bg_phi_prime_scf]
-a*a* dV_scf(pba,ppw->pvecback[pba->index_bg_phi_scf])*alpha
+ppw->pvecback[pba->index_bg_phi_prime_scf]*alpha_prime);
}
if ((pba->has_ur == _TRUE_) || (pba->has_ncdm == _TRUE_) || (pba->has_dr == _TRUE_)) {
delta_ur -= 4.*a_prime_over_a*alpha;
theta_ur += k*k*alpha;
/* shear and l3 are gauge invariant */
if (pba->has_dr == _TRUE_)
delta_dr += (-4.*a_prime_over_a + a*pba->Gamma_dcdm*ppw->pvecback[pba->index_bg_rho_dcdm]/ppw->pvecback[pba->index_bg_rho_dr])*alpha;
}
} /* end of gauge transformation to newtonian gauge */
/** (e) In any gauge, we should now implement the relativistic initial conditions in ur and ncdm variables */
if (pba->has_ur == _TRUE_) {
ppw->pv->y[ppw->pv->index_pt_delta_ur] = delta_ur;
ppw->pv->y[ppw->pv->index_pt_theta_ur] = theta_ur;
ppw->pv->y[ppw->pv->index_pt_shear_ur] = shear_ur;
ppw->pv->y[ppw->pv->index_pt_l3_ur] = l3_ur;
}
if (pba->has_ncdm == _TRUE_) {
idx = ppw->pv->index_pt_psi0_ncdm1;
for (n_ncdm=0; n_ncdm < pba->N_ncdm; n_ncdm++){
for (index_q=0; index_q < ppw->pv->q_size_ncdm[n_ncdm]; index_q++) {
q = pba->q_ncdm[n_ncdm][index_q];
epsilon = sqrt(q*q+a*a*pba->M_ncdm[n_ncdm]*pba->M_ncdm[n_ncdm]);
ppw->pv->y[idx] = -0.25 * delta_ur * pba->dlnf0_dlnq_ncdm[n_ncdm][index_q];
ppw->pv->y[idx+1] = -epsilon/3./q/k*theta_ur* pba->dlnf0_dlnq_ncdm[n_ncdm][index_q];
ppw->pv->y[idx+2] = -0.5 * shear_ur * pba->dlnf0_dlnq_ncdm[n_ncdm][index_q];
ppw->pv->y[idx+3] = -0.25 * l3_ur * pba->dlnf0_dlnq_ncdm[n_ncdm][index_q];
//Jump to next momentum bin:
idx += (ppw->pv->l_max_ncdm[n_ncdm]+1);
}
}
}
if (pba->has_dr == _TRUE_) {
f_dr = pow(pow(a/pba->a_today,2)/pba->H0,2)*ppw->pvecback[pba->index_bg_rho_dr];
ppw->pv->y[ppw->pv->index_pt_F0_dr] = delta_dr*f_dr;
ppw->pv->y[ppw->pv->index_pt_F0_dr+1] = 4./(3.*k)*theta_ur*f_dr;
ppw->pv->y[ppw->pv->index_pt_F0_dr+2] = 2.*shear_ur*f_dr;
ppw->pv->y[ppw->pv->index_pt_F0_dr+3] = l3_ur*f_dr;
}
}
/** - for tensors */
if (_tensors_) {
/* tensor initial conditions take into account the fact that
scalar (resp. tensor) Cl's are related to the real space
power spectrum of curvature (resp. of the tensor part of
metric perturbations)
<R(x) R(x)>, sum_ij<h_ij(x) h^ij(x)>
In momentum space it is conventional to use the modes R(k)
and h(k) where the quantity h obeying to the equation of
propagation
h'' + 2a'/a h + [k2+2K] h = 12piGa2 (rho+p) sigma = 8piGa2 p pi
and the power spectra in real space and momentum space are related through
<R(x) R(x)> = \int dk/k [k^3/2pi^2 <R(k)R(k)*>] = \int dk/k calPR(k)
sum_ij<h_ij(x) h^ij(x)> = \int dk/k [k^3/2pi^2 F(k^2/K) <h(k)h(k)*>] = \int dk/k F(k^2/K) calPh(k)
where calPR and calPh are the dimensionless spectrum of
curvature R, and F is a function of k2/K, where K is the curvature
parameter. F is equal to one in flat space (K=0), and coming
from the contraction of the laplacian eigentensor Q_ij with
itself. We will give F explicitely below.
Similarily the scalar (S) and tensor (T) C_ls are given by
C_l^S = 4pi \int dk/k [Delta_l^S(q)]^2 calPR(k)
C_l^T = 4pi \int dk/k [Delta_l^T(q)]^2 F(k^2/K) calPh(k)
The usual convention for the tensor-to-scalar ratio
r = A_t / A_s at pivot scale
= 16 epsilon in single-field inflation
is such that for constant calPR(k) and calPh(k),
r = 6 calPh(k) / calPR(k)
so calPh(k) = calPR(k) r / 6 = A_s r / 6 = A_t / 6
A priori it would make sense to say that for a power-law
primordial spectrum there is an extra factor (k/k_pivot)^n_t
(and eventually running and so on and so forth...)
However it has been shown that the minimal models of
inflation in a negatively curved bubble lead to
calP_h(k)=tanh(pi*nu/2). In open models it is customary to
define the tensor tilt in a non-flat universe as a deviation
from this behavior rather than from true scale-invariance in
the above sense.
Hence we should have
calPh(k) = (A_t/6) {tanh(pi*nu/2)} (k/k_pivot)^[n_t+...]
where the brackets mean "if K<0"
Then
C_l^T = 4pi \int dk/k [Delta_l^T(q)]^2 F(k^2/K) (A_t/6) {tanh(pi*nu/2)} k/k_pivot)^[n_t+...]
In the code, it is then a matter of choice to write:
* In the primordial module : calP_h(k) = (A_t/6) {tanh(pi*nu/2)} (k/k*)^n_T
* In the perturbation initial conditions: h = 1
* In the spectra module : C_l^T = 4/pi \int dk/k [Delta_l^T(q)]^2 F(k^2/K) calPh(k)
or:
* In the primordial module : calP_h(k) = A_t (k/k*)^n_T
* In the perturbation initial conditions: h = sqrt[F(k^2/K) / 6 {tanh(pi*nu/2)}]
* In the spectra module : C_l^T = 4/pi \int dk/k [Delta_l^T(q)]^2 calPh(k)
We choose this last option, such that the primordial and
spectra module differ minimally in flat and non-flat space. Then we must impose
h = sqrt[(F/6) tanh(pi*nu/2)]
The factor F is found to be given by:
sum_ij<h_ij(x) h^ij(x)> = \int dk/k [k2(k2-K)]/[(k2+3K)(k2+2K)] calP_h(k)
Introducing as usual q2 = k2 - 3K and using qdq = kdk this gives
sum_ij<h_ij(x) h^ij(x)> = \int dk/k [(q2-3K)(q2-4K)]/[q2(q2-K)] calP_h(k)
Using qdq = kdk this is equivalent to
sum_ij<h_ij(x) h^ij(x)> = \int dq/q [q2-4K]/[q2-K] calP_h(k(q))
Finally, introducing nu=q/sqrt(|K|) and sgnK=SIGN(k)=+-1, this could also be written
sum_ij<h_ij(x) h^ij(x)> = \int dnu/nu (nu2-4sgnK)/(nu2-sgnK) calP_h(k(nu))
Equation (43,44) of Hu, Seljak, White, Zaldarriaga is
equivalent to absorbing the above factor
(nu2-4sgnK)/(nu2-sgnK) in the definition of the primordial
spectrum. Since the initial condition should be written in terms of k rather than nu, they should read
h = sqrt[ [k2(k2-K)]/[(k2+3K)(k2+2K)] / 6 * tanh(pi*nu/2) ]
We leave the freedom to mutiply by an arbitrary number
ppr->gw_ini. The standard convenrtion corresponding to
standard definitions of r, A_T, n_T is however ppr->gw_ini=1.
*/
if (index_ic == ppt->index_ic_ten) {
ppw->pv->y[ppw->pv->index_pt_gw] = ppr->gw_ini/_SQRT6_;
}
k2 = k*k;
if (pba->sgnK != 0) {
ppw->pv->y[ppw->pv->index_pt_gw] *= sqrt(k2*(k2-pba->K)/(k2+3.*pba->K)/(k2+2.*pba->K));
}
if (pba->sgnK == -1) {
if (k*k+3*pba->K >= 0.) {
ppw->pv->y[ppw->pv->index_pt_gw] *= sqrt(tanh(_PI_/2.*sqrt(k2+3*pba->K)/sqrt(-pba->K)));
}
else {
ppw->pv->y[ppw->pv->index_pt_gw] = 0.;
}
}
}
return _SUCCESS_;
}
/**
* Evaluate background/thermodynamics at \f$ \tau \f$, infer useful flags / time scales for integrating perturbations.
*
* Evaluate background quantities at \f$ \tau \f$, as well as thermodynamics for scalar mode; infer useful flags and time scales for integrating the perturbations:
* - check whether tight-coupling approximation is needed.
* - check whether radiation (photons, massless neutrinos...) perturbations are needed.
* - choose step of integration: step = ppr->perturb_integration_stepsize * min_time_scale, where min_time_scale = smallest time scale involved in the equations. There are three time scales to compare:
* -# that of recombination, \f$ \tau_c = 1/\kappa' \f$
* -# Hubble time scale, \f$ \tau_h = a/a' \f$
* -# Fourier mode, \f$ \tau_k = 1/k \f$
*
* So, in general, min_time_scale = \f$ \min(\tau_c, \tau_b, \tau_h, \tau_k) \f$.
*
* However, if \f$ \tau_c \ll \tau_h \f$ and \f$ \tau_c
* \ll \tau_k \f$, we can use the tight-coupling regime for photons
* and write equations in such way that the time scale \f$
* \tau_c \f$ becomes irrelevant (no effective mass term in \f$
* 1/\tau_c \f$). Then, the smallest
* scale in the equations is only \f$ \min(\tau_h, \tau_k) \f$.
* In practise, it is sufficient to use only the condition \f$ \tau_c \ll \tau_h \f$.
*
* Also, if \f$ \rho_{matter} \gg \rho_{radiation} \f$ and \f$ k \gg
* aH \f$, we can switch off radiation perturbations (i.e. switch on
* the free-streaming approximation) and then the smallest scale is
* simply \f$ \tau_h \f$.
*
* @param ppr Input: pointer to precision structure
* @param pba Input: pointer to background structure
* @param pth Input: pointer to thermodynamics structure
* @param ppt Input: pointer to the perturbation structure
* @param index_md Input: index of mode under consideration (scalar/.../tensor)
* @param k Input: wavenumber
* @param tau Input: conformal time
* @param ppw Input/Output: in output contains the approximation to be used at this time
* @return the error status
*/
int perturb_approximations(
struct precision * ppr,
struct background * pba,
struct thermo * pth,
struct perturbs * ppt,
int index_md,
double k,
double tau,
struct perturb_workspace * ppw
) {
/** Summary: */
/** - define local variables */
/* (a) time scale of Fourier mode, \f$ \tau_k = 1/k \f$ */
double tau_k;
/* (b) time scale of expansion, \f$ \tau_h = a/a' \f$ */
double tau_h;
/* (c) time scale of recombination, \f$ \tau_{\gamma} = 1/\kappa' \f$ */
double tau_c;
/** - compute Fourier mode time scale = \f$ \tau_k = 1/k \f$ */
class_test(k == 0.,
ppt->error_message,
"stop to avoid division by zero");
tau_k = 1./k;
/** - evaluate background quantities with background_at_tau() and
Hubble time scale \f$ \tau_h = a/a' \f$ */
class_call(background_at_tau(pba,tau, pba->normal_info, ppw->inter_mode, &(ppw->last_index_back), ppw->pvecback),
pba->error_message,
ppt->error_message);
class_test(ppw->pvecback[pba->index_bg_H]*ppw->pvecback[pba->index_bg_a] == 0.,
ppt->error_message,
"aH=0, stop to avoid division by zero");
tau_h = 1./(ppw->pvecback[pba->index_bg_H]*ppw->pvecback[pba->index_bg_a]);
/** - for scalars modes: */
if (_scalars_) {
/** (a) evaluate thermodynamical quantities with thermodynamics_at_z() */
class_call(thermodynamics_at_z(pba,
pth,
1./ppw->pvecback[pba->index_bg_a]-1., /* redshift z=1/a-1 */
ppw->inter_mode,
&(ppw->last_index_thermo),
ppw->pvecback,
ppw->pvecthermo),
pth->error_message,
ppt->error_message);
/** (b.1) if \f$ \kappa'=0 \f$, recombination is finished; tight-coupling approximation must be off */
if (ppw->pvecthermo[pth->index_th_dkappa] == 0.) {
ppw->approx[ppw->index_ap_tca] = (int)tca_off;
}
/** (b.2) if \f$ \kappa' \neq 0 \f$, recombination is not finished: check tight-coupling approximation */
else {
/** (b.2.a) compute recombination time scale for photons, \f$ \tau_{\gamma} = 1/ \kappa' \f$ */
tau_c = 1./ppw->pvecthermo[pth->index_th_dkappa];
class_test(tau_c < 0.,
ppt->error_message,
"tau_c = 1/kappa' should always be positive unless there is something wrong in the thermodynamics module. However you have here tau_c=%e at z=%e, conformal time=%e x_e=%e. (This could come from the interpolation of a too poorly sampled reionisation history?).\n",
tau_c,
1./ppw->pvecback[pba->index_bg_a]-1.,
tau,
ppw->pvecthermo[pth->index_th_xe]);
/** (b.2.b) check whether tight-coupling approximation should be on */
if ((tau_c/tau_h < ppr->tight_coupling_trigger_tau_c_over_tau_h) &&
(tau_c/tau_k < ppr->tight_coupling_trigger_tau_c_over_tau_k)) {
ppw->approx[ppw->index_ap_tca] = (int)tca_on;
}
else {
ppw->approx[ppw->index_ap_tca] = (int)tca_off;
}
}
/* (c) free-streaming approximations */
if ((tau/tau_k > ppr->radiation_streaming_trigger_tau_over_tau_k) &&
(tau > pth->tau_free_streaming) &&
(ppr->radiation_streaming_approximation != rsa_none)) {
ppw->approx[ppw->index_ap_rsa] = (int)rsa_on;
}
else {
ppw->approx[ppw->index_ap_rsa] = (int)rsa_off;
}
if (pba->has_ur == _TRUE_) {
if ((tau/tau_k > ppr->ur_fluid_trigger_tau_over_tau_k) &&
(ppr->ur_fluid_approximation != ufa_none)) {
ppw->approx[ppw->index_ap_ufa] = (int)ufa_on;
}
else {
ppw->approx[ppw->index_ap_ufa] = (int)ufa_off;
}
}
if (pba->has_ncdm == _TRUE_) {
if ((tau/tau_k > ppr->ncdm_fluid_trigger_tau_over_tau_k) &&
(ppr->ncdm_fluid_approximation != ncdmfa_none)) {
ppw->approx[ppw->index_ap_ncdmfa] = (int)ncdmfa_on;
}
else {
ppw->approx[ppw->index_ap_ncdmfa] = (int)ncdmfa_off;
}
}
}
/** - for tensor modes: */
if (_tensors_) {
/** (a) evaluate thermodynamical quantities with thermodynamics_at_z() */
class_call(thermodynamics_at_z(pba,
pth,
1./ppw->pvecback[pba->index_bg_a]-1., /* redshift z=1/a-1 */
ppw->inter_mode,
&(ppw->last_index_thermo),
ppw->pvecback,
ppw->pvecthermo),
pth->error_message,
ppt->error_message);
/** (b.1) if \f$ \kappa'=0 \f$, recombination is finished; tight-coupling approximation must be off */
if (ppw->pvecthermo[pth->index_th_dkappa] == 0.) {
ppw->approx[ppw->index_ap_tca] = (int)tca_off;
}
/** (b.2) if \f$ \kappa' \neq 0 \f$, recombination is not finished: check tight-coupling approximation */
else {
/** (b.2.a) compute recombination time scale for photons, \f$ \tau_{\gamma} = 1/ \kappa' \f$ */
tau_c = 1./ppw->pvecthermo[pth->index_th_dkappa];
/** (b.2.b) check whether tight-coupling approximation should be on */
if ((tau_c/tau_h < ppr->tight_coupling_trigger_tau_c_over_tau_h) &&
(tau_c/tau_k < ppr->tight_coupling_trigger_tau_c_over_tau_k)) {
ppw->approx[ppw->index_ap_tca] = (int)tca_on;
}
else {
ppw->approx[ppw->index_ap_tca] = (int)tca_off;
}
}
if ((tau/tau_k > ppr->radiation_streaming_trigger_tau_over_tau_k) &&
(tau > pth->tau_free_streaming) &&
(ppr->radiation_streaming_approximation != rsa_none)) {
ppw->approx[ppw->index_ap_rsa] = (int)rsa_on;
}
else {
ppw->approx[ppw->index_ap_rsa] = (int)rsa_off;
}
}
return _SUCCESS_;
}
/**
* Compute typical timescale over which the perturbation equation
* vary. Some integrators (e.g. Runge-Kunta) benefit from calling this
* routine at each step in order to adapt the next step.
*
* This is one of the few functions in the code which are passed to the generic_integrator() routine.
* Since generic_integrator() should work with functions passed from various modules, the format of the arguments
* is a bit special:
* - fixed parameters and workspaces are passed through a generic pointer.
* generic_integrator() doesn't know the content of this pointer.
* - the error management is a bit special: errors are not written as usual to pth->error_message, but to a generic
* error_message passed in the list of arguments.
*
* @param tau Input : conformal time
* @param parameters_and_workspace Input : fixed parameters (e.g. indices), workspace, approximation used, etc.
* @param timescale Output: perturbation variation timescale (given the apprtoximation used)
* @param error_message Output: error message
*/
int perturb_timescale(
double tau,
void * parameters_and_workspace,
double * timescale,
ErrorMsg error_message
) {
/** Summary: */
/** - define local variables */
/* (a) time scale of Fourier mode, \f$ \tau_k = 1/k \f$ */
double tau_k;
/* (b) time scale of expansion, \f$ \tau_h = a/a' \f$ */
double tau_h;
/* (c) time scale of recombination, \f$ \tau_{\gamma} = 1/\kappa' \f$ */
double tau_c;
/* various pointers allowing to extract the fields of the
parameter_and_workspace input structure */
struct perturb_parameters_and_workspace * pppaw;
struct background * pba;
struct thermo * pth;
struct perturbs * ppt;
struct perturb_workspace * ppw;
double * pvecback;
double * pvecthermo;
/** - extract the fields of the parameter_and_workspace input structure */
pppaw = parameters_and_workspace;
pba = pppaw->pba;
pth = pppaw->pth;
ppt = pppaw->ppt;
ppw = pppaw->ppw;
pvecback = ppw->pvecback;
pvecthermo = ppw->pvecthermo;
/** - compute Fourier mode time scale = \f$ \tau_k = 1/k \f$ */
class_test(pppaw->k == 0.,
ppt->error_message,
"stop to avoid division by zero");
tau_k = 1./pppaw->k;
/** - evaluate background quantities with background_at_tau() and
Hubble time scale \f$ \tau_h = a/a' \f$ */
class_call(background_at_tau(pba,tau, pba->normal_info, ppw->inter_mode, &(ppw->last_index_back), pvecback),
pba->error_message,
error_message);
class_test(pvecback[pba->index_bg_H]*pvecback[pba->index_bg_a] == 0.,
error_message,
"aH=0, stop to avoid division by zero");
tau_h = 1./(pvecback[pba->index_bg_H]*pvecback[pba->index_bg_a]);
/** - for scalars modes: */
if ((ppt->has_scalars == _TRUE_) && (pppaw->index_md == ppt->index_md_scalars)) {
*timescale = tau_h;
if ((ppw->approx[ppw->index_ap_rsa] == (int)rsa_off) || (pba->has_ncdm == _TRUE_))
*timescale = MIN(tau_k,*timescale);
if (ppw->approx[ppw->index_ap_tca] == (int)tca_off) {
class_call(thermodynamics_at_z(pba,
pth,
1./pvecback[pba->index_bg_a]-1., /* redshift z=1/a-1 */
ppw->inter_mode,
&(ppw->last_index_thermo),
pvecback,
pvecthermo),
pth->error_message,
error_message);
if (pvecthermo[pth->index_th_dkappa] != 0.) {
/** (b.2.a) compute recombination time scale for photons, \f$ \tau_{\gamma} = 1/ \kappa' \f$ */
tau_c = 1./pvecthermo[pth->index_th_dkappa];
*timescale = MIN(tau_c,*timescale);
}
}
}
/** - for vector modes: */
if ((ppt->has_vectors == _TRUE_) && (pppaw->index_md == ppt->index_md_vectors)) {
*timescale = MIN(tau_h,tau_k);
if (ppw->approx[ppw->index_ap_tca] == (int)tca_off) {
class_call(thermodynamics_at_z(pba,
pth,
1./pvecback[pba->index_bg_a]-1., /* redshift z=1/a-1 */
ppw->inter_mode,
&(ppw->last_index_thermo),
pvecback,
pvecthermo),
pth->error_message,
error_message);
if (pvecthermo[pth->index_th_dkappa] != 0.) {
/** compute recombination time scale for photons, \f$ \tau_{\gamma} = 1/ \kappa' \f$ */
tau_c = 1./pvecthermo[pth->index_th_dkappa];
*timescale = MIN(tau_c,*timescale);
}
}
}
/** - for tensor modes: */
if ((ppt->has_tensors == _TRUE_) && (pppaw->index_md == ppt->index_md_tensors)) {
*timescale = MIN(tau_h,tau_k);
if (ppw->approx[ppw->index_ap_tca] == (int)tca_off) {
class_call(thermodynamics_at_z(pba,
pth,
1./pvecback[pba->index_bg_a]-1., /* redshift z=1/a-1 */
ppw->inter_mode,
&(ppw->last_index_thermo),
pvecback,
pvecthermo),
pth->error_message,
error_message);
if (pvecthermo[pth->index_th_dkappa] != 0.) {
/** compute recombination time scale for photons, \f$ \tau_{\gamma} = 1/ \kappa' \f$ */
tau_c = 1./pvecthermo[pth->index_th_dkappa];
*timescale = MIN(tau_c,*timescale);
}
}
}
return _SUCCESS_;
}
/**
* Compute metric perturbations (those not integrated over time) using Einstein equations
*
* @param ppr Input: pointer to precision structure
* @param pba Input: pointer to background structure
* @param ppt Input: pointer to the perturbation structure
* @param index_md Input: index of mode under consideration (scalar/.../tensor)
* @param k Input: wavenumber
* @param tau Input: conformal time
* @param y Input: vector of perturbations (those integrated over time) (already allocated)
* @param ppw Input/Output: in output contains the updated metric perturbations
* @return the error status
*/
int perturb_einstein(
struct precision * ppr,
struct background * pba,
struct thermo * pth,
struct perturbs * ppt,
int index_md,
double k,
double tau,
double * y,
struct perturb_workspace * ppw
) {
/** Summary: */
/** - define local variables */
double k2,a,a2,a_prime_over_a;
double s2_squared;
double shear_g = 0.;
double D=0, cs2num=0;
double l1=0, l2=0, l3=0, l4=0, l5=0, l6=0, l7=0, l8=0;
double M2=0, kin=0, bra=0, run=0, ten=0;
double rho_tot=0, p_tot=0, rho_smg=0, p_smg=0, H=0;
/** - wavenumber and scale factor related quantities */
k2 = k*k;
a = ppw->pvecback[pba->index_bg_a];
a2 = a * a;
a_prime_over_a = ppw->pvecback[pba->index_bg_H]*a;
s2_squared = 1.-3.*pba->K/k2;
/* sum up perturbations from all species */
class_call(perturb_total_stress_energy(ppr,pba,pth,ppt,index_md,k,y,ppw),
ppt->error_message,
ppt->error_message);
/** - for scalar modes: */
if (_scalars_) {
/** (c) infer metric perturbations from Einstein equations */
/* newtonian gauge */
if (ppt->gauge == newtonian) {
/* in principle we could get phi from the constrain equation:
ppw->pvecmetric[ppw->index_mt_phi] = -1.5 * (a2/k2/k2/s2/s2) * (k2 * delta_rho + 3.*a_prime_over_a * rho_plus_p_theta);
with s2_squared = sqrt(1-3K/k2) = ppw->s_l[2]*ppw->s_l[2]
This was the case in class v1.3. However the integration is
more stable is we treat phi as a dynamical variable
y[ppw->pv->index_pt_phi], which derivative is given by the
second equation below (credits to Guido Walter Pettinari). */
/* equation for psi */
ppw->pvecmetric[ppw->index_mt_psi] = y[ppw->pv->index_pt_phi] - 4.5 * (a2/k2) * ppw->rho_plus_p_shear;
/* equation for phi' */
ppw->pvecmetric[ppw->index_mt_phi_prime] = -a_prime_over_a * ppw->pvecmetric[ppw->index_mt_psi] + 1.5 * (a2/k2) * ppw->rho_plus_p_theta;
/* eventually, infer radiation streaming approximation for
gamma and ur (this is exactly the right place to do it
because the result depends on h_prime) */
if (ppw->approx[ppw->index_ap_rsa] == (int)rsa_on) {
class_call(perturb_rsa_delta_and_theta(ppr,pba,pth,ppt,k,y,a_prime_over_a,ppw->pvecthermo,ppw),
ppt->error_message,
ppt->error_message);
}
}
/* synchronous gauge */
if (ppt->gauge == synchronous) {
if (pba->has_smg == _TRUE_) {
M2 = ppw->pvecback[pba->index_bg_M2_smg];
kin = ppw->pvecback[pba->index_bg_kineticity_smg];
bra = ppw->pvecback[pba->index_bg_braiding_smg];
run = ppw->pvecback[pba->index_bg_mpl_running_smg];
ten = ppw->pvecback[pba->index_bg_tensor_excess_smg];
rho_tot = ppw->pvecback[pba->index_bg_rho_tot_wo_smg];
p_tot = ppw->pvecback[pba->index_bg_p_tot_wo_smg];
rho_smg = ppw->pvecback[pba->index_bg_rho_smg];
p_smg = ppw->pvecback[pba->index_bg_p_smg];
H = ppw->pvecback[pba->index_bg_H];
l1 = ppw->pvecback[pba->index_bg_lambda_1_smg];
l2 = ppw->pvecback[pba->index_bg_lambda_2_smg];
l3 = ppw->pvecback[pba->index_bg_lambda_3_smg];
l4 = ppw->pvecback[pba->index_bg_lambda_4_smg];
l5 = ppw->pvecback[pba->index_bg_lambda_5_smg];
l6 = ppw->pvecback[pba->index_bg_lambda_6_smg];
l7 = ppw->pvecback[pba->index_bg_lambda_7_smg];
l8 = ppw->pvecback[pba->index_bg_lambda_8_smg];
cs2num = ppw->pvecback[pba->index_bg_cs2num_smg];
D = ppw->pvecback[pba->index_bg_kinetic_D_smg];
/* write here the values, as taken from the integration */
ppw->pvecmetric[ppw->index_mt_vx_smg] = y[ppw->pv->index_pt_vx_smg];
ppw->pvecmetric[ppw->index_mt_vx_prime_smg] = y[ppw->pv->index_pt_vx_prime_smg];
/* scalar field equation */
ppw->pvecmetric[ppw->index_mt_vx_prime_prime_smg] = (-2.)*pow((-2.) + bra,-1)*cs2num*pow(H,-1)*pow(D,-1)*pow(k,2)*y[ppw->pv->index_pt_eta]*pow(a,-1) + (-3.)*pow((-2.) + bra,-1)*pow(H,-1)*pow(D,-1)*l2*pow(M2,-1)*ppw->delta_rho*a + (-9.)/2.*bra*pow(H,-1)*pow(D,-1)*pow(M2,-1)*ppw->delta_p*a + 8.*pow((-2.) + bra,-1)*H*pow(D,-1)*l7*ppw->pvecmetric[ppw->index_mt_vx_prime_smg]*a + (cs2num*pow(k,2) + (-4.)*pow(H,2)*l8*pow(a,2))*2.*pow((-2.) + bra,-1)*pow(D,-1)*ppw->pvecmetric[ppw->index_mt_vx_smg];
class_test(isnan(ppw->pvecmetric[ppw->index_mt_vx_prime_prime_smg]),
ppt->error_message,
" Isnan v_X'' at a =%e !",a);
/* first equation involving total density fluctuation */
ppw->pvecmetric[ppw->index_mt_h_prime] = (-4.)*pow((-2.) + bra,-1)*pow(H,-1)*pow(k,2)*y[ppw->pv->index_pt_eta]*pow(a,-1) + (-6.)*pow((-2.) + bra,-1)*pow(H,-1)*pow(M2,-1)*ppw->delta_rho*a + (3.*bra + kin)*2.*pow((-2.) + bra,-1)*H*ppw->pvecmetric[ppw->index_mt_vx_prime_smg]*a + (2.*pow((-2.) + bra,-1)*bra*pow(k,2) + ((-18.) + 15.*bra + 2.*kin)*pow((-2.) + bra,-1)*rho_smg*pow(a,2) + (18. + (-18.)*M2 + 15.*bra*M2 + 2.*kin*M2)*pow((-2.) + bra,-1)*rho_tot*pow(M2,-1)*pow(a,2) + (2. + (-2.)*M2 + bra*M2)*9.*pow((-2.) + bra,-1)*pow(M2,-1)*p_tot*pow(a,2) + 9.*p_smg*pow(a,2))*ppw->pvecmetric[ppw->index_mt_vx_smg];
/* eventually, infer radiation streaming approximation for gamma and ur (this is exactly the right place to do it because the result depends on h_prime) */
if (ppw->approx[ppw->index_ap_rsa] == (int)rsa_on) {
class_call(perturb_rsa_delta_and_theta(ppr,pba,pth,ppt,k,y,a_prime_over_a,ppw->pvecthermo,ppw),
ppt->error_message,
ppt->error_message);
/* update total theta given rsa approximation results */
ppw->rho_plus_p_theta += 4./3.*ppw->pvecback[pba->index_bg_rho_g]*ppw->rsa_theta_g;
if (pba->has_ur == _TRUE_) {
ppw->rho_plus_p_theta += 4./3.*ppw->pvecback[pba->index_bg_rho_ur]*ppw->rsa_theta_ur;
}
}
/* second equation involving total velocity */
ppw->pvecmetric[ppw->index_mt_eta_prime] = 1./2.*bra*H*ppw->pvecmetric[ppw->index_mt_vx_prime_smg]*a + 3./2.*pow(k,-2)*pow(M2,-1)*ppw->rho_plus_p_theta*pow(a,2) + (((-3.) + bra)*1./2.*rho_smg*pow(a,2) + (3. + (-3.)*M2 + bra*M2)*1./2.*rho_tot*pow(M2,-1)*pow(a,2) + ((-1.) + M2)*(-3.)/2.*pow(M2,-1)*p_tot*pow(a,2) + (-3.)/2.*p_smg*pow(a,2))*ppw->pvecmetric[ppw->index_mt_vx_smg];
/* second equation involving total velocity */
ppw->pvecmetric[ppw->index_mt_eta_prime] = 1./2.*bra*H*ppw->pvecmetric[ppw->index_mt_vx_prime_smg]*a + 3./2.*pow(k,-2)*pow(M2,-1)*ppw->rho_plus_p_theta*pow(a,2) + (((-3.) + bra)*1./2.*rho_smg*pow(a,2) + (3. + (-3.)*M2 + bra*M2)*1./2.*rho_tot*pow(M2,-1)*pow(a,2) + ((-1.) + M2)*(-3.)/2.*pow(M2,-1)*p_tot*pow(a,2) + (-3.)/2.*p_smg*pow(a,2))*ppw->pvecmetric[ppw->index_mt_vx_smg]; /* eta' */
/* third equation involving total pressure */
ppw->pvecmetric[ppw->index_mt_h_prime_prime] = 2.*pow(D,-1)*pow(k,2)*l1*y[ppw->pv->index_pt_eta] + 2.*H*pow(D,-1)*l3*ppw->pvecmetric[ppw->index_mt_h_prime]*a + (-9.)*kin*pow(D,-1)*pow(M2,-1)*ppw->delta_p*pow(a,2) + 3.*pow(H,2)*pow(D,-1)*l4*ppw->pvecmetric[ppw->index_mt_vx_prime_smg]*pow(a,2) + (2.*H*pow(D,-1)*pow(k,2)*l5*a + 6.*pow(H,3)*pow(D,-1)*l6*pow(a,3))*ppw->pvecmetric[ppw->index_mt_vx_smg];
/* alpha = (h'+6eta')/2k^2 */
ppw->pvecmetric[ppw->index_mt_alpha] = (ppw->pvecmetric[ppw->index_mt_h_prime] + 6.*ppw->pvecmetric[ppw->index_mt_eta_prime])/2./k2;
/* eventually, infer first-order tight-coupling approximation for photon
shear, then correct the total shear */
if (ppw->approx[ppw->index_ap_tca] == (int)tca_on) {
shear_g = 16./45./ppw->pvecthermo[pth->index_th_dkappa]*(y[ppw->pv->index_pt_theta_g]+k2*ppw->pvecmetric[ppw->index_mt_alpha]);
ppw->rho_plus_p_shear += 4./3.*ppw->pvecback[pba->index_bg_rho_g]*shear_g;
}
/* fourth equation involving total shear */
/* fourth equation involving total shear */
ppw->pvecmetric[ppw->index_mt_alpha_prime] = (1. + ten)*y[ppw->pv->index_pt_eta] + (2. + run)*(-1.)*H*ppw->pvecmetric[ppw->index_mt_alpha]*a + (run + (-1.)*ten)*H*ppw->pvecmetric[ppw->index_mt_vx_smg]*a + (-9.)/2.*pow(k,-2)*pow(M2,-1)*ppw->rho_plus_p_shear*pow(a,2);
}//end if has_smg
// Standard equations
else {
/* first equation involving total density fluctuation */
ppw->pvecmetric[ppw->index_mt_h_prime] =
( k2 * s2_squared * y[ppw->pv->index_pt_eta] + 1.5 * a2 * ppw->delta_rho)/(0.5*a_prime_over_a); /* h' */
/* eventually, infer radiation streaming approximation for
gamma and ur (this is exactly the right place to do it
because the result depends on h_prime) */
if (ppw->approx[ppw->index_ap_rsa] == (int)rsa_on) {
class_call(perturb_rsa_delta_and_theta(ppr,pba,pth,ppt,k,y,a_prime_over_a,ppw->pvecthermo,ppw),
ppt->error_message,
ppt->error_message);
/* update total theta given rsa approximation results */
ppw->rho_plus_p_theta += 4./3.*ppw->pvecback[pba->index_bg_rho_g]*ppw->rsa_theta_g;
if (pba->has_ur == _TRUE_) {
ppw->rho_plus_p_theta += 4./3.*ppw->pvecback[pba->index_bg_rho_ur]*ppw->rsa_theta_ur;
}
}
/* second equation involving total velocity */
ppw->pvecmetric[ppw->index_mt_eta_prime] = (1.5 * a2 * ppw->rho_plus_p_theta + 0.5 * pba->K * ppw->pvecmetric[ppw->index_mt_h_prime])/k2/s2_squared; /* eta' */
/* third equation involving total pressure */
ppw->pvecmetric[ppw->index_mt_h_prime_prime] =
- 2. * a_prime_over_a * ppw->pvecmetric[ppw->index_mt_h_prime]
+ 2. * k2 * s2_squared * y[ppw->pv->index_pt_eta]
- 9. * a2 * ppw->delta_p;
/* alpha = (h'+6eta')/2k^2 */
ppw->pvecmetric[ppw->index_mt_alpha] = (ppw->pvecmetric[ppw->index_mt_h_prime] + 6.*ppw->pvecmetric[ppw->index_mt_eta_prime])/2./k2;
/* eventually, infer first-order tight-coupling approximation for photon
shear, then correct the total shear */
if (ppw->approx[ppw->index_ap_tca] == (int)tca_on) {
shear_g = 16./45./ppw->pvecthermo[pth->index_th_dkappa]*(y[ppw->pv->index_pt_theta_g]+k2*ppw->pvecmetric[ppw->index_mt_alpha]);
ppw->rho_plus_p_shear += 4./3.*ppw->pvecback[pba->index_bg_rho_g]*shear_g;
}
/* fourth equation involving total shear */
ppw->pvecmetric[ppw->index_mt_alpha_prime] = //TBC
- 2. * a_prime_over_a * ppw->pvecmetric[ppw->index_mt_alpha]
+ y[ppw->pv->index_pt_eta]
- 4.5 * (a2/k2) * ppw->rho_plus_p_shear;
}// end of else (if no smg)
} //end of syncrhonous
/* transform (delta_m, theta_m) of the current gauge into
gauge-independent variables (you could comment this out if you
really want gauge-dependent results) */
if (ppt->has_source_delta_m == _TRUE_) {
ppw->delta_m += 3. *ppw->pvecback[pba->index_bg_a]*ppw->pvecback[pba->index_bg_H] * ppw->theta_m/k2;
// note: until 2.4.3 there was a typo, the factor was (-2 H'/H) instead
// of (3 aH). There is the same typo in the CLASSgal paper
// 1307.1459v1,v2,v3. It came from a confusion between (1+w_total)
// and (1+w_matter)=1 [the latter is the relevant one here].
//
// note2: at this point this gauge-invariant variable is only
// valid if all matter components are pressureless and
// stable. This relation will be generalised soon to the case
// of decaying dark matter.
}
if (ppt->has_source_theta_m == _TRUE_) {
if (ppt->gauge == synchronous) {
ppw->theta_m += ppw->pvecmetric[ppw->index_mt_alpha]*k2;
}
}
}
if (_vectors_) {
if (ppt->gauge == newtonian) {
ppw->pvecmetric[ppw->index_mt_V_prime] = -2.*a_prime_over_a*y[ppw->pv->index_pt_V] - 3.*ppw->vector_source_pi/k;
}
if (ppt->gauge == synchronous) {
// assuming vector_source_pi = p_class a^2 pi_T^{(1)} and vector_source_v = (rho_class+p_class)a^2 v^{(1)}
// from Hu and White:
ppw->pvecmetric[ppw->index_mt_hv_prime_prime] = -2.*a_prime_over_a*y[ppw->pv->index_pt_hv_prime] - 3.*ppw->vector_source_pi/k2;
// what we suspect:
//ppw->pvecmetric[ppw->index_mt_hv_prime_prime] = -2.*a_prime_over_a*y[ppw->pv->index_pt_hv_prime] - 3.*ppw->vector_source_pi;
// if we use the other equation:
//ppw->pvecmetric[ppw->index_mt_hv_prime] = -2./k/ (1.-2.*pba->K/k2) * 3. * ppw->vector_source_v;
}
}
if (_tensors_) {
/* single einstein equation for tensor perturbations */
if (pba->has_smg == _FALSE_) {
ppw->pvecmetric[ppw->index_mt_gw_prime_prime] = -2.*a_prime_over_a*y[ppw->pv->index_pt_gwdot]-(k2+2.*pba->K)*y[ppw->pv->index_pt_gw]+ppw->gw_source;
}
/* modified version if gravity is non-standard. Note that no curvature is allowed in this case */
else{
double M2 = ppw->pvecback[pba->index_bg_M2_smg];
double run = ppw->pvecback[pba->index_bg_mpl_running_smg];
double c_t2 = (1. + ppw->pvecback[pba->index_bg_tensor_excess_smg]);
ppw->pvecmetric[ppw->index_mt_gw_prime_prime] = -(2. + run)*a_prime_over_a*y[ppw->pv->index_pt_gwdot]-k2*c_t2*y[ppw->pv->index_pt_gw]+ppw->gw_source/M2;
}
}
return _SUCCESS_;
}
int perturb_total_stress_energy(
struct precision * ppr,
struct background * pba,
struct thermo * pth,
struct perturbs * ppt,
int index_md,
double k,
double * y,
struct perturb_workspace * ppw
) {
/** Summary: */
/** - define local variables */
double a,a2;
double delta_g=0.;
double theta_g=0.;
double shear_g=0.;
double delta_ur=0.;
double theta_ur=0.;
double shear_ur=0.;
double rho_delta_ncdm=0.;
double rho_plus_p_theta_ncdm=0.;
double rho_plus_p_shear_ncdm=0.;
double delta_p_ncdm=0.;
double factor;
double rho_plus_p_ncdm;
int index_q,n_ncdm,idx;
double epsilon,q,q2,cg2_ncdm,w_ncdm,rho_ncdm_bg,p_ncdm_bg,pseudo_p_ncdm;
double rho_m,delta_rho_m,rho_plus_p_m,rho_plus_p_theta_m;
double w;
double gwncdm;
double rho_relativistic;
double rho_dr_over_f;
double delta_rho_scf, delta_p_scf, psi;
/** - wavenumber and scale factor related quantities */
a = ppw->pvecback[pba->index_bg_a];
a2 = a * a;
if (_scalars_) {
/** (a) deal with approximation schemes */
/** (a.1) photons */
if (ppw->approx[ppw->index_ap_tca] == (int)tca_off) {
if (ppw->approx[ppw->index_ap_rsa] == (int)rsa_off) {
/** (a.1.1) no approximation */
delta_g = y[ppw->pv->index_pt_delta_g];
theta_g = y[ppw->pv->index_pt_theta_g];
shear_g = y[ppw->pv->index_pt_shear_g];
}
else {
/** (a.1.2) radiation streaming approximation */
delta_g = 0.; /* actual free streaming approximation imposed after evaluation of einstein equations */
theta_g = 0.; /* actual free streaming approximation imposed after evaluation of einstein equations */
shear_g = 0.; /* shear always neglected in radiation streaming approximation */
}
}
else {
/** (a.1.3) tight coupling approximation */
delta_g = y[ppw->pv->index_pt_delta_g];
theta_g = y[ppw->pv->index_pt_theta_g];
/* first-order tight-coupling approximation for photon shear */
if (ppt->gauge == newtonian) {
shear_g = 16./45./ppw->pvecthermo[pth->index_th_dkappa]*y[ppw->pv->index_pt_theta_g];
}
else {
shear_g = 0.; /* in the synchronous gauge, the expression of
shear_g (at first-order in a tight-coupling
expansion) is a function of h' and eta'; but h'
and eta' are calculated in perturb_einstein()
as a function of delta_g and theta_g. Hence,
we set shear_g temporarily to zero, and set it
to the right first-order value in
perturb_einstein(), just before using the
Einstein equation for the shear. */
}
}
/** (a.2) ur */
if (pba->has_ur == _TRUE_) {
if (ppw->approx[ppw->index_ap_rsa] == (int)rsa_off) {
delta_ur = y[ppw->pv->index_pt_delta_ur];
theta_ur = y[ppw->pv->index_pt_theta_ur];
shear_ur = y[ppw->pv->index_pt_shear_ur];
}
else {
delta_ur = 0.; /* actual free streaming approximation imposed after evaluation of 1st einstein equation */
theta_ur = 0.; /* actual free streaming approximation imposed after evaluation of 1st einstein equation */
shear_ur = 0.; /* shear always neglected in free streaming approximatio */
}
}
/** (b) compute the total density, velocity and shear perturbations */
/* photon and baryon contribution */
ppw->delta_rho = ppw->pvecback[pba->index_bg_rho_g]*delta_g
+ ppw->pvecback[pba->index_bg_rho_b]*y[ppw->pv->index_pt_delta_b];
ppw->rho_plus_p_theta = 4./3.*ppw->pvecback[pba->index_bg_rho_g]*theta_g
+ ppw->pvecback[pba->index_bg_rho_b]*y[ppw->pv->index_pt_theta_b];
ppw->rho_plus_p_shear = 4./3.*ppw->pvecback[pba->index_bg_rho_g]*shear_g;
ppw->delta_p = 1./3.*ppw->pvecback[pba->index_bg_rho_g]*delta_g
+ ppw->pvecthermo[pth->index_th_cb2]*ppw->pvecback[pba->index_bg_rho_b]*y[ppw->pv->index_pt_delta_b];
/* cdm contribution */
if (pba->has_cdm == _TRUE_) {
ppw->delta_rho = ppw->delta_rho + ppw->pvecback[pba->index_bg_rho_cdm]*y[ppw->pv->index_pt_delta_cdm];
if (ppt->gauge == newtonian)
ppw->rho_plus_p_theta = ppw->rho_plus_p_theta + ppw->pvecback[pba->index_bg_rho_cdm]*y[ppw->pv->index_pt_theta_cdm];
}
/* dcdm contribution */
if (pba->has_dcdm == _TRUE_) {
ppw->delta_rho += ppw->pvecback[pba->index_bg_rho_dcdm]*y[ppw->pv->index_pt_delta_dcdm];
ppw->rho_plus_p_theta += ppw->pvecback[pba->index_bg_rho_dcdm]*y[ppw->pv->index_pt_theta_dcdm];
}
/* fluid contribution */
if (pba->has_fld == _TRUE_) {
w = pba->w0_fld + pba->wa_fld * (1. - a / pba->a_today);
ppw->delta_rho += ppw->pvecback[pba->index_bg_rho_fld]*y[ppw->pv->index_pt_delta_fld];
ppw->rho_plus_p_theta += (1.+w)*ppw->pvecback[pba->index_bg_rho_fld]*y[ppw->pv->index_pt_theta_fld];
ppw->delta_p = ppw->delta_p + pba->cs2_fld * ppw->pvecback[pba->index_bg_rho_fld]*y[ppw->pv->index_pt_delta_fld];
}
/* ultra-relativistic decay radiation */
if (pba->has_dr == _TRUE_) {
/* We have delta_rho_dr = rho_dr * F0_dr / f, where F follows the
convention in astro-ph/9907388 and f is defined as
f = rho_dr*a^4/rho_crit_today. In CLASS density units
rho_crit_today = H0^2.
*/
rho_dr_over_f = pow(pba->H0/a2,2);
ppw->delta_rho += rho_dr_over_f*y[ppw->pv->index_pt_F0_dr];
ppw->rho_plus_p_theta += 4./3.*3./4*k*rho_dr_over_f*y[ppw->pv->index_pt_F0_dr+1];
ppw->rho_plus_p_shear += 2./3.*rho_dr_over_f*y[ppw->pv->index_pt_F0_dr+2];
ppw->delta_p += 1./3.*rho_dr_over_f*y[ppw->pv->index_pt_F0_dr];
}
/* ultra-relativistic neutrino/relics contribution */
if (pba->has_ur == _TRUE_) {
ppw->delta_rho = ppw->delta_rho + ppw->pvecback[pba->index_bg_rho_ur]*delta_ur;
ppw->rho_plus_p_theta = ppw->rho_plus_p_theta + 4./3.*ppw->pvecback[pba->index_bg_rho_ur]*theta_ur;
ppw->rho_plus_p_shear = ppw->rho_plus_p_shear + 4./3.*ppw->pvecback[pba->index_bg_rho_ur]*shear_ur;
ppw->delta_p += 1./3.*ppw->pvecback[pba->index_bg_rho_ur]*delta_ur;
}
/* non-cold dark matter contribution */
if (pba->has_ncdm == _TRUE_) {
idx = ppw->pv->index_pt_psi0_ncdm1;
if(ppw->approx[ppw->index_ap_ncdmfa] == (int)ncdmfa_on){
// The perturbations are evolved integrated:
for(n_ncdm=0; n_ncdm < pba->N_ncdm; n_ncdm++){
rho_ncdm_bg = ppw->pvecback[pba->index_bg_rho_ncdm1+n_ncdm];
p_ncdm_bg = ppw->pvecback[pba->index_bg_p_ncdm1+n_ncdm];
pseudo_p_ncdm = ppw->pvecback[pba->index_bg_pseudo_p_ncdm1+n_ncdm];
rho_plus_p_ncdm = rho_ncdm_bg + p_ncdm_bg;
w_ncdm = p_ncdm_bg/rho_ncdm_bg;
cg2_ncdm = w_ncdm*(1.0-1.0/(3.0+3.0*w_ncdm)*(3.0*w_ncdm-2.0+pseudo_p_ncdm/p_ncdm_bg));
if ((ppt->has_source_delta_ncdm == _TRUE_) || (ppt->has_source_theta_ncdm == _TRUE_) || (ppt->has_source_delta_m == _TRUE_)) {
ppw->delta_ncdm[n_ncdm] = y[idx];
ppw->theta_ncdm[n_ncdm] = y[idx+1];
ppw->shear_ncdm[n_ncdm] = y[idx+2];
}
ppw->delta_rho += rho_ncdm_bg*y[idx];
ppw->rho_plus_p_theta += rho_plus_p_ncdm*y[idx+1];
ppw->rho_plus_p_shear += rho_plus_p_ncdm*y[idx+2];
ppw->delta_p += cg2_ncdm*rho_ncdm_bg*y[idx];
idx += ppw->pv->l_max_ncdm[n_ncdm]+1;
}
}
else{
// We must integrate to find perturbations:
for(n_ncdm=0; n_ncdm < pba->N_ncdm; n_ncdm++){
rho_delta_ncdm = 0.0;
rho_plus_p_theta_ncdm = 0.0;
rho_plus_p_shear_ncdm = 0.0;
delta_p_ncdm = 0.0;
factor = pba->factor_ncdm[n_ncdm]*pow(pba->a_today/a,4);
for (index_q=0; index_q < ppw->pv->q_size_ncdm[n_ncdm]; index_q ++) {
q = pba->q_ncdm[n_ncdm][index_q];
q2 = q*q;
epsilon = sqrt(q2+pba->M_ncdm[n_ncdm]*pba->M_ncdm[n_ncdm]*a2);
rho_delta_ncdm += q2*epsilon*pba->w_ncdm[n_ncdm][index_q]*y[idx];
rho_plus_p_theta_ncdm += q2*q*pba->w_ncdm[n_ncdm][index_q]*y[idx+1];
rho_plus_p_shear_ncdm += q2*q2/epsilon*pba->w_ncdm[n_ncdm][index_q]*y[idx+2];
delta_p_ncdm += q2*q2/epsilon*pba->w_ncdm[n_ncdm][index_q]*y[idx];
//Jump to next momentum bin:
idx+=(ppw->pv->l_max_ncdm[n_ncdm]+1);
}
rho_delta_ncdm *= factor;
rho_plus_p_theta_ncdm *= k*factor;
rho_plus_p_shear_ncdm *= 2.0/3.0*factor;
delta_p_ncdm *= factor/3.;
if ((ppt->has_source_delta_ncdm == _TRUE_) || (ppt->has_source_theta_ncdm == _TRUE_) || (ppt->has_source_delta_m == _TRUE_)) {
ppw->delta_ncdm[n_ncdm] = rho_delta_ncdm/ppw->pvecback[pba->index_bg_rho_ncdm1+n_ncdm];
ppw->theta_ncdm[n_ncdm] = rho_plus_p_theta_ncdm/
(ppw->pvecback[pba->index_bg_rho_ncdm1+n_ncdm]+ppw->pvecback[pba->index_bg_p_ncdm1+n_ncdm]);
ppw->shear_ncdm[n_ncdm] = rho_plus_p_shear_ncdm/
(ppw->pvecback[pba->index_bg_rho_ncdm1+n_ncdm]+ppw->pvecback[pba->index_bg_p_ncdm1+n_ncdm]);
}
ppw->delta_rho += rho_delta_ncdm;
ppw->rho_plus_p_theta += rho_plus_p_theta_ncdm;
ppw->rho_plus_p_shear += rho_plus_p_shear_ncdm;
ppw->delta_p += delta_p_ncdm;
}
}
}
/* scalar field contribution.
In Newtonian gauge, delta_scf depends on the metric perturbation psi which is inferred
from rho_plus_p_shear. So the contribution from the scalar field must be below all
species with non-zero shear.
*/
if (pba->has_scf == _TRUE_) {
if (ppt->gauge == synchronous){
delta_rho_scf = 1./3.*
(1./a2*ppw->pvecback[pba->index_bg_phi_prime_scf]*y[ppw->pv->index_pt_phi_prime_scf]
+ ppw->pvecback[pba->index_bg_dV_scf]*y[ppw->pv->index_pt_phi_scf]);
delta_p_scf = 1./3.*
(1./a2*ppw->pvecback[pba->index_bg_phi_prime_scf]*y[ppw->pv->index_pt_phi_prime_scf]
- ppw->pvecback[pba->index_bg_dV_scf]*y[ppw->pv->index_pt_phi_scf]);
}
else{
/* equation for psi */
psi = y[ppw->pv->index_pt_phi] - 4.5 * (a2/k/k) * ppw->rho_plus_p_shear;
delta_rho_scf = 1./3.*
(1./a2*ppw->pvecback[pba->index_bg_phi_prime_scf]*y[ppw->pv->index_pt_phi_prime_scf]
+ ppw->pvecback[pba->index_bg_dV_scf]*y[ppw->pv->index_pt_phi_scf]
- 1./a2*pow(ppw->pvecback[pba->index_bg_phi_prime_scf],2)*psi);
delta_p_scf = 1./3.*
(1./a2*ppw->pvecback[pba->index_bg_phi_prime_scf]*y[ppw->pv->index_pt_phi_prime_scf]
- ppw->pvecback[pba->index_bg_dV_scf]*y[ppw->pv->index_pt_phi_scf]
- 1./a2*pow(ppw->pvecback[pba->index_bg_phi_prime_scf],2)*psi);
}
ppw->delta_rho += delta_rho_scf;
ppw->rho_plus_p_theta += 1./3.*
k*k/a2*ppw->pvecback[pba->index_bg_phi_prime_scf]*y[ppw->pv->index_pt_phi_scf];
ppw->delta_p += delta_p_scf;
}
/* store delta_m in the current gauge. In perturb_einstein, this
will be transformed later on into the gauge-independent variable D
= delta_m - 2H'/H \theta_m/k^2 . */
if (ppt->has_source_delta_m == _TRUE_) {
/* include baryons and cold dark matter */
delta_rho_m = ppw->pvecback[pba->index_bg_rho_b]*y[ppw->pv->index_pt_delta_b];
rho_m = ppw->pvecback[pba->index_bg_rho_b];
if (pba->has_cdm == _TRUE_) {
delta_rho_m += ppw->pvecback[pba->index_bg_rho_cdm]*y[ppw->pv->index_pt_delta_cdm];
rho_m += ppw->pvecback[pba->index_bg_rho_cdm];
}
/* include decaying cold dark matter */
if (pba->has_dcdm == _TRUE_) {
delta_rho_m += ppw->pvecback[pba->index_bg_rho_dcdm]*y[ppw->pv->index_pt_delta_dcdm];
rho_m += ppw->pvecback[pba->index_bg_rho_dcdm];
}
/* include any other species non-relativistic today (like ncdm species) */
if (pba->has_ncdm == _TRUE_) {
for(n_ncdm=0; n_ncdm < pba->N_ncdm; n_ncdm++){
delta_rho_m += ppw->pvecback[pba->index_bg_rho_ncdm1+n_ncdm]*ppw->delta_ncdm[n_ncdm];
rho_m += ppw->pvecback[pba->index_bg_rho_ncdm1+n_ncdm];
}
}
/* infer delta_m */
ppw->delta_m = delta_rho_m/rho_m;
}
/* store theta_m in the current gauge. In perturb_einstein, this
will be transformed later on into the gauge-independent variable
Theta . Note that computing theta_m is necessary also if we want
the delta_m source only, because the gauge-invariant delta_m
involves theta_m in the current gauge. */
if ((ppt->has_source_delta_m == _TRUE_) || (ppt->has_source_theta_m == _TRUE_)) {
/* include baryons and cold dark matter */
rho_plus_p_theta_m = ppw->pvecback[pba->index_bg_rho_b]*y[ppw->pv->index_pt_theta_b];
rho_plus_p_m = ppw->pvecback[pba->index_bg_rho_b];
if (pba->has_cdm == _TRUE_) {
if (ppt->gauge == newtonian)
rho_plus_p_theta_m += ppw->pvecback[pba->index_bg_rho_cdm]*y[ppw->pv->index_pt_theta_cdm];
rho_plus_p_m += ppw->pvecback[pba->index_bg_rho_cdm];
}
if (pba->has_dcdm == _TRUE_) {
rho_plus_p_theta_m += ppw->pvecback[pba->index_bg_rho_dcdm]*y[ppw->pv->index_pt_theta_dcdm];
rho_plus_p_m += ppw->pvecback[pba->index_bg_rho_dcdm];
}
/* include any other species non-relativistic today (like ncdm species) */
if (pba->has_ncdm == _TRUE_) {
for(n_ncdm=0; n_ncdm < pba->N_ncdm; n_ncdm++){
rho_plus_p_theta_m += (ppw->pvecback[pba->index_bg_rho_ncdm1+n_ncdm]+ppw->pvecback[pba->index_bg_p_ncdm1+n_ncdm])*ppw->theta_ncdm[n_ncdm];
rho_plus_p_m += (ppw->pvecback[pba->index_bg_rho_ncdm1+n_ncdm]+ppw->pvecback[pba->index_bg_p_ncdm1+n_ncdm]);
}
}
/* infer theta_m */
ppw->theta_m = rho_plus_p_theta_m/rho_plus_p_m;
}
}
if (_vectors_) {
ppw->vector_source_pi = 0.;
ppw->vector_source_v = 0.;
/** photon contribution to vector sources: */
if (ppw->approx[ppw->index_ap_rsa] == (int)rsa_off) { /* if radiation streaming approximation is off */
if (ppw->approx[ppw->index_ap_tca] == (int)tca_off) { /* if tight-coupling approximation is off */
ppw->vector_source_v += 4./3.*a2*ppw->pvecback[pba->index_bg_rho_g]
* (-1./4.*_SQRT2_)
* (y[ppw->pv->index_pt_delta_g]+2.*y[ppw->pv->index_pt_delta_g]+y[ppw->pv->index_pt_shear_g]);
ppw->vector_source_pi += 1./3.*a2*ppw->pvecback[pba->index_bg_rho_g]
* (6.*_SQRT2_/5./sqrt(1.-2.*pba->K/k/k))
* (4./3./k*y[ppw->pv->index_pt_theta_g]+y[ppw->pv->index_pt_l3_g]);
}
}
/* baryons */
}
if (_tensors_) {
ppw->gw_source = 0.0;
/** photon contribution to gravitational wave source: */
if (ppw->approx[ppw->index_ap_rsa] == (int)rsa_off) { /* if radiation streaming approximation is off */
if (ppw->approx[ppw->index_ap_tca] == (int)tca_off) { /* if tight-coupling approximation is off */
ppw->gw_source += (-_SQRT6_*4*a2*ppw->pvecback[pba->index_bg_rho_g]*
(1./15.*y[ppw->pv->index_pt_delta_g]+
4./21.*y[ppw->pv->index_pt_shear_g]+
1./35.*y[ppw->pv->index_pt_l3_g+1]));
}
}
/** ur contribution to gravitational wave source: */
if (ppt->evolve_tensor_ur == _TRUE_){
rho_relativistic = 0.;
if (ppt->tensor_method == tm_exact)
rho_relativistic += ppw->pvecback[pba->index_bg_rho_ur];
if (ppt->tensor_method == tm_massless_approximation) {
if (pba->has_ur == _TRUE_)
rho_relativistic += ppw->pvecback[pba->index_bg_rho_ur];
if (pba->has_ncdm == _TRUE_) {
for(n_ncdm = 0; n_ncdm < pba->N_ncdm; n_ncdm++) {
/* (3 p_ncdm1) is the "relativistic" contrinution to rho_ncdm1 */
rho_relativistic += 3.*ppw->pvecback[pba->index_bg_p_ncdm1+n_ncdm];
}
}
}
ppw->gw_source += (-_SQRT6_*4*a2*rho_relativistic*
(1./15.*y[ppw->pv->index_pt_delta_ur]+
4./21.*y[ppw->pv->index_pt_shear_ur]+
1./35.*y[ppw->pv->index_pt_l3_ur+1]));
}
/** ncdm contribution to gravitational wave source: */
if (ppt->evolve_tensor_ncdm == _TRUE_){
idx = ppw->pv->index_pt_psi0_ncdm1;
// We must integrate to find perturbations:
for(n_ncdm=0; n_ncdm < pba->N_ncdm; n_ncdm++){
gwncdm = 0.;
factor = pba->factor_ncdm[n_ncdm]*pow(pba->a_today/a,4);
for (index_q=0; index_q < ppw->pv->q_size_ncdm[n_ncdm]; index_q ++) {
q = pba->q_ncdm[n_ncdm][index_q];
q2 = q*q;
epsilon = sqrt(q2+pba->M_ncdm[n_ncdm]*pba->M_ncdm[n_ncdm]*a2);
gwncdm += q2*q2/epsilon*pba->w_ncdm[n_ncdm][index_q]*(1./15.*y[idx]+2./21.*y[idx+2]+1./35.*y[idx+4]);
//Jump to next momentum bin:
idx+=(ppw->pv->l_max_ncdm[n_ncdm]+1);
}
gwncdm *= -_SQRT6_*4*a2*factor;
ppw->gw_source += gwncdm;
}
}
}
return _SUCCESS_;
}
/**
* Compute the source functions (three terms for temperature, one for
* E or B modes, etc.)
*
* This is one of the few functions in the code which are passed to
* the generic_integrator() routine. Since generic_integrator()
* should work with functions passed from various modules, the format
* of the arguments is a bit special:
*
* - fixed parameters and workspaces are passed through a generic
* pointer. generic_integrator() doesn't know the content of this
* pointer.
*
* - the error management is a bit special: errors are not written as
* usual to pth->error_message, but to a generic error_message passed
* in the list of arguments.
*
* @param tau Input: conformal time
* @param y Input: vector of perturbations
* @param dy Input: vector of time derivative of perturbations
* @param index_tau Input: index in the array tau_sampling
* @param parameters_and_workspace Input/Output: in input, all parameters needed by perturb_derivs, in ourput, source terms
* @param error_message Output: error message
* @return the error status
*/
int perturb_sources(
double tau,
double * y,
double * dy,
int index_tau,
void * parameters_and_workspace,
ErrorMsg error_message
) {
/** Summary: */
/** - define local variables */
double P;
int index_type;
struct perturb_parameters_and_workspace * pppaw;
struct precision * ppr;
struct background * pba;
struct thermo * pth;
struct perturbs * ppt;
int index_md;
int index_ic;
int index_k;
double k;
double z;
struct perturb_workspace * ppw;
double * pvecback;
double * pvecthermo;
double * pvecmetric;
double delta_g, delta_rho_scf, rho_plus_p_theta_scf;
double a_prime_over_a=0.; /* (a'/a) */
double a_prime_over_a_prime=0.; /* (a'/a)' */
int switch_isw = 1;
double a_rel, a2_rel, f_dr;
/** - rename structure fields (just to avoid heavy notations) */
pppaw = parameters_and_workspace;
ppr = pppaw->ppr;
pba = pppaw->pba;
pth = pppaw->pth;
ppt = pppaw->ppt;
index_md = pppaw->index_md;
index_ic = pppaw->index_ic;
index_k = pppaw->index_k;
k = pppaw->k;
ppw = pppaw->ppw;
pvecback = ppw->pvecback;
pvecthermo = ppw->pvecthermo;
pvecmetric = ppw->pvecmetric;
/** - get background/thermo quantities in this point */
class_call(background_at_tau(pba,
tau,
pba->normal_info,
pba->inter_closeby,
&(ppw->last_index_back),
pvecback),
pba->error_message,
error_message);
z = pba->a_today/pvecback[pba->index_bg_a]-1.;
class_call(thermodynamics_at_z(pba,
pth,
z, /* redshift z=1/a-1 */
pth->inter_closeby,
&(ppw->last_index_thermo),
pvecback,
pvecthermo),
pth->error_message,
error_message);
a_rel = ppw->pvecback[pba->index_bg_a]/pba->a_today;
a2_rel = a_rel * a_rel;
/* derived background quantities, useful only in synchronous gauge */
if (ppt->gauge == synchronous) {
a_prime_over_a = pvecback[pba->index_bg_a] * pvecback[pba->index_bg_H]; /* (a'/a)=aH */
a_prime_over_a_prime = pvecback[pba->index_bg_H_prime] * pvecback[pba->index_bg_a] + pow(pvecback[pba->index_bg_H] * pvecback[pba->index_bg_a],2); /* (a'/a)' = aH'+(aH)^2 */
}
/* scalars */
if (_scalars_) {
/** - compute metric perturbations */
class_call(perturb_einstein(ppr,
pba,
pth,
ppt,
index_md,
k,
tau,
y,
ppw),
ppt->error_message,
error_message);
/** - compute quantities depending on approximation schemes */
if (ppw->approx[ppw->index_ap_rsa] == (int)rsa_on) {
delta_g = ppw->rsa_delta_g;
P = 0.;
}
else {
delta_g = y[ppw->pv->index_pt_delta_g];
if (ppw->approx[ppw->index_ap_tca] == (int)tca_on)
P = 5.* ppw->s_l[2] * ppw->tca_shear_g/8.; /* (2.5+0.5+2)shear_g/8 */
else
P = (y[ppw->pv->index_pt_pol0_g] + y[ppw->pv->index_pt_pol2_g] + 2.* ppw->s_l[2] *y[ppw->pv->index_pt_shear_g])/8.;
}
/** for each type, compute source terms */
/* scalar temperature */
if (ppt->has_source_t == _TRUE_) {
/* check whether integrated Sachs-Wolf term should be included */
if ((ppt->switch_eisw == 0) && (z >= ppt->eisw_lisw_split_z)){
switch_isw = 0;
}
if ((ppt->switch_lisw == 0) && (z < ppt->eisw_lisw_split_z)) {
switch_isw=0;
}
/* newtonian gauge: simplest form, not efficient numerically */
/*
if (ppt->gauge == newtonian) {
_set_source_(ppt->index_tp_t0) = pvecthermo[pth->index_th_exp_m_kappa] * pvecmetric[ppw->index_mt_phi_prime] + pvecthermo[pth->index_th_g] * delta_g / 4.;
_set_source_(ppt->index_tp_t1) = pvecthermo[pth->index_th_exp_m_kappa] * k* pvecmetric[ppw->index_mt_psi] + pvecthermo[pth->index_th_g] * y[ppw->pv->index_pt_theta_b]/k;
_set_source_(ppt->index_tp_t2) = pvecthermo[pth->index_th_g] * P;
}
*/
/* newtonian gauge: slightly more complicated form, but more efficient numerically */
if (ppt->gauge == newtonian) {
_set_source_(ppt->index_tp_t0) =
ppt->switch_sw * pvecthermo[pth->index_th_g] * (delta_g / 4. + pvecmetric[ppw->index_mt_psi])
+ switch_isw * (pvecthermo[pth->index_th_g] * (y[ppw->pv->index_pt_phi]-pvecmetric[ppw->index_mt_psi])
+ pvecthermo[pth->index_th_exp_m_kappa] * 2. * pvecmetric[ppw->index_mt_phi_prime])
+ ppt->switch_dop /k/k * (pvecthermo[pth->index_th_g] * dy[ppw->pv->index_pt_theta_b]
+ pvecthermo[pth->index_th_dg] * y[ppw->pv->index_pt_theta_b]);
_set_source_(ppt->index_tp_t1) = switch_isw * pvecthermo[pth->index_th_exp_m_kappa] * k* (pvecmetric[ppw->index_mt_psi]-y[ppw->pv->index_pt_phi]);
_set_source_(ppt->index_tp_t2) = ppt->switch_pol * pvecthermo[pth->index_th_g] * P;
}
/* synchronous gauge: simplest form, not efficient numerically */
/*
if (ppt->gauge == synchronous) {
_set_source_(ppt->index_tp_t0) = - pvecthermo[pth->index_th_exp_m_kappa] * pvecmetric[ppw->index_mt_h_prime] / 6. + pvecthermo[pth->index_th_g] / 4. * delta_g;
_set_source_(ppt->index_tp_t1) = pvecthermo[pth->index_th_g] * y[ppw->pv->index_pt_theta_b] / k;
_set_source_(ppt->index_tp_t2) = pvecthermo[pth->index_th_exp_m_kappa] * k*k* 2./3. * ppw->s_l[2] * pvecmetric[ppw->index_mt_alpha] + pvecthermo[pth->index_th_g] * P;
}
*/
/* synchronous gauge: slightly more complicated form, but more efficient numerically */
if (ppt->gauge == synchronous) {
_set_source_(ppt->index_tp_t0) =
ppt->switch_sw * pvecthermo[pth->index_th_g] * (delta_g/4. + pvecmetric[ppw->index_mt_alpha_prime])
+ switch_isw * (pvecthermo[pth->index_th_g] * (y[ppw->pv->index_pt_eta]
- pvecmetric[ppw->index_mt_alpha_prime]
- 2 * a_prime_over_a * pvecmetric[ppw->index_mt_alpha])
+ pvecthermo[pth->index_th_exp_m_kappa] * 2. * (pvecmetric[ppw->index_mt_eta_prime]
- a_prime_over_a_prime * pvecmetric[ppw->index_mt_alpha]
- a_prime_over_a * pvecmetric[ppw->index_mt_alpha_prime]))
+ ppt->switch_dop * (pvecthermo[pth->index_th_g] * (dy[ppw->pv->index_pt_theta_b]/k/k + pvecmetric[ppw->index_mt_alpha_prime])
+pvecthermo[pth->index_th_dg] * (y[ppw->pv->index_pt_theta_b]/k/k + pvecmetric[ppw->index_mt_alpha]));
_set_source_(ppt->index_tp_t1) =
switch_isw * pvecthermo[pth->index_th_exp_m_kappa] * k * (pvecmetric[ppw->index_mt_alpha_prime]
+ 2. * a_prime_over_a * pvecmetric[ppw->index_mt_alpha]
- y[ppw->pv->index_pt_eta]);
_set_source_(ppt->index_tp_t2) =
ppt->switch_pol * pvecthermo[pth->index_th_g] * P;
}
}
/* scalar polarization */
if (ppt->has_source_p == _TRUE_) {
/* all gauges. Note that the correct formula for the E source
should have a minus sign, as shown in Hu & White. We put a
plus sign to comply with the 'historical convention'
established in CMBFAST and CAMB. */
_set_source_(ppt->index_tp_p) = sqrt(6.) * pvecthermo[pth->index_th_g] * P;
}
/* now, non-CMB sources */
/* Bardeen potential -PHI_H = phi in Newtonian gauge */
if (ppt->has_source_phi == _TRUE_) {
if (ppt->gauge == newtonian)
_set_source_(ppt->index_tp_phi) = y[ppw->pv->index_pt_phi];
if (ppt->gauge == synchronous)
_set_source_(ppt->index_tp_phi) = y[ppw->pv->index_pt_eta] - a_prime_over_a * pvecmetric[ppw->index_mt_alpha];
}
/* its derivative phi' */
if (ppt->has_source_phi_prime == _TRUE_) {
if (ppt->gauge == newtonian)
_set_source_(ppt->index_tp_phi_prime) = dy[ppw->pv->index_pt_phi];
if (ppt->gauge == synchronous)
_set_source_(ppt->index_tp_phi_prime) = dy[ppw->pv->index_pt_eta]
- a_prime_over_a_prime * pvecmetric[ppw->index_mt_alpha]
- a_prime_over_a * pvecmetric[ppw->index_mt_alpha_prime];
}
/* diff of Bardeen potentials PHI_A-PHI_H = psi + phi in newtonian gauge */
if (ppt->has_source_phi_plus_psi == _TRUE_) {
if (ppt->gauge == newtonian)
_set_source_(ppt->index_tp_phi_plus_psi) =
y[ppw->pv->index_pt_phi] + pvecmetric[ppw->index_mt_psi];
if (ppt->gauge == synchronous)
_set_source_(ppt->index_tp_phi_plus_psi) =
y[ppw->pv->index_pt_eta] + pvecmetric[ppw->index_mt_alpha_prime];
}
/* Bardeen potential PHI_A = psi in newtonian gauge */
if (ppt->has_source_psi == _TRUE_) {
if (ppt->gauge == newtonian)
_set_source_(ppt->index_tp_psi) =
pvecmetric[ppw->index_mt_psi];
if (ppt->gauge == synchronous)
_set_source_(ppt->index_tp_psi) =
a_prime_over_a * pvecmetric[ppw->index_mt_alpha] + pvecmetric[ppw->index_mt_alpha_prime];
}
/* total matter overdensity (gauge-invariant, defined as in arXiv:1307.1459) */
if (ppt->has_source_delta_m == _TRUE_) {
_set_source_(ppt->index_tp_delta_m) = ppw->delta_m;
}
/* delta_g */
if (ppt->has_source_delta_g == _TRUE_) {
_set_source_(ppt->index_tp_delta_g) = delta_g;
}
/* delta_baryon */
if (ppt->has_source_delta_b == _TRUE_) {
_set_source_(ppt->index_tp_delta_b) = y[ppw->pv->index_pt_delta_b];
}
/* delta_cdm */
if (ppt->has_source_delta_cdm == _TRUE_) {
_set_source_(ppt->index_tp_delta_cdm) = y[ppw->pv->index_pt_delta_cdm];
}
/* delta_dcdm */
if (ppt->has_source_delta_dcdm == _TRUE_) {
_set_source_(ppt->index_tp_delta_dcdm) = y[ppw->pv->index_pt_delta_dcdm];
}
/* delta_fld */
if (ppt->has_source_delta_fld == _TRUE_) {
_set_source_(ppt->index_tp_delta_fld) = y[ppw->pv->index_pt_delta_fld];
}
/* delta_scf */
if (ppt->has_source_delta_scf == _TRUE_) {
if (ppt->gauge == synchronous){
delta_rho_scf = 1./3.*
(1./a2_rel*ppw->pvecback[pba->index_bg_phi_prime_scf]*y[ppw->pv->index_pt_phi_prime_scf]
+ ppw->pvecback[pba->index_bg_dV_scf]*y[ppw->pv->index_pt_phi_scf]);
}
else{
delta_rho_scf = 1./3.*
(1./a2_rel*ppw->pvecback[pba->index_bg_phi_prime_scf]*y[ppw->pv->index_pt_phi_prime_scf]
+ ppw->pvecback[pba->index_bg_dV_scf]*y[ppw->pv->index_pt_phi_scf]
- 1./a2_rel*pow(ppw->pvecback[pba->index_bg_phi_prime_scf],2)*ppw->pvecmetric[ppw->index_mt_psi]);
}
_set_source_(ppt->index_tp_delta_scf) = delta_rho_scf/pvecback[pba->index_bg_rho_scf];
}
/* phi_smg TODO: either change the name of the source or write delta_phi_dot */
if (ppt->has_source_phi_smg == _TRUE_) {
_set_source_(ppt->index_tp_phi_smg) = pvecmetric[ppw->index_mt_vx_smg];
}
/* delta_dr */
if (ppt->has_source_delta_dr == _TRUE_) {
f_dr = pow(a2_rel/pba->H0,2)*pvecback[pba->index_bg_rho_dr];
_set_source_(ppt->index_tp_delta_dr) = y[ppw->pv->index_pt_F0_dr]/f_dr;
}
/* delta_ur */
if (ppt->has_source_delta_ur == _TRUE_) {
if (ppw->approx[ppw->index_ap_rsa]==(int)rsa_off)
_set_source_(ppt->index_tp_delta_ur) = y[ppw->pv->index_pt_delta_ur];
else
_set_source_(ppt->index_tp_delta_ur) = ppw->rsa_delta_ur;
}
/* delta_ncdm1 */
if (ppt->has_source_delta_ncdm == _TRUE_) {
for (index_type = ppt->index_tp_delta_ncdm1; index_type < ppt->index_tp_delta_ncdm1+pba->N_ncdm; index_type++) {
_set_source_(index_type) = ppw->delta_ncdm[index_type - ppt->index_tp_delta_ncdm1];
}
}
/* total velocity (gauge-invariant, defined as in arXiv:1307.1459) */
if (ppt->has_source_theta_m == _TRUE_) {
_set_source_(ppt->index_tp_theta_m) = ppw->theta_m;
}
/* theta_g */
if (ppt->has_source_theta_g == _TRUE_) {
if (ppw->approx[ppw->index_ap_rsa]==(int)rsa_off)
_set_source_(ppt->index_tp_theta_g) = y[ppw->pv->index_pt_theta_g];
else
_set_source_(ppt->index_tp_theta_g) = ppw->rsa_theta_g;
}
/* theta_baryon */
if (ppt->has_source_theta_b == _TRUE_) {
_set_source_(ppt->index_tp_theta_b) = y[ppw->pv->index_pt_theta_b];
}
/* theta_cdm */
if (ppt->has_source_theta_cdm == _TRUE_) {
_set_source_(ppt->index_tp_theta_cdm) = y[ppw->pv->index_pt_theta_cdm];
}
/* theta_dcdm */
if (ppt->has_source_theta_dcdm == _TRUE_) {
_set_source_(ppt->index_tp_theta_dcdm) = y[ppw->pv->index_pt_theta_dcdm];
}
/* theta_fld */
if (ppt->has_source_theta_fld == _TRUE_) {
_set_source_(ppt->index_tp_theta_fld) = y[ppw->pv->index_pt_theta_fld];
}
/* theta_scf */
if (ppt->has_source_theta_scf == _TRUE_) {
rho_plus_p_theta_scf = 1./3.*
k*k/a2_rel*ppw->pvecback[pba->index_bg_phi_prime_scf]*y[ppw->pv->index_pt_phi_scf];
_set_source_(ppt->index_tp_theta_scf) = rho_plus_p_theta_scf/
(pvecback[pba->index_bg_rho_scf]+pvecback[pba->index_bg_p_scf]);
}
/* theta_dr */
if (ppt->has_source_theta_dr == _TRUE_) {
f_dr = pow(a2_rel/pba->H0,2)*pvecback[pba->index_bg_rho_dr];
_set_source_(ppt->index_tp_theta_dr) = 3./4.*k*y[ppw->pv->index_pt_F0_dr+1]/f_dr;
}
/* theta_ur */
if (ppt->has_source_theta_ur == _TRUE_) {
if (ppw->approx[ppw->index_ap_rsa]==(int)rsa_off)
_set_source_(ppt->index_tp_theta_ur) = y[ppw->pv->index_pt_theta_ur];
else
_set_source_(ppt->index_tp_theta_ur) = ppw->rsa_theta_ur;
}
/* theta_ncdm1 */
if (ppt->has_source_theta_ncdm == _TRUE_) {
for (index_type = ppt->index_tp_theta_ncdm1; index_type < ppt->index_tp_theta_ncdm1+pba->N_ncdm; index_type++) {
_set_source_(index_type) = ppw->theta_ncdm[index_type - ppt->index_tp_theta_ncdm1];
}
}
}
/* tensors */
if (_tensors_) {
/* compute quantities depending on approximation schemes */
if (ppw->approx[ppw->index_ap_rsa] == (int)rsa_off) {
if (ppw->approx[ppw->index_ap_tca] == (int)tca_off) {
P = -(1./10.*y[ppw->pv->index_pt_delta_g]
+2./7.*y[ppw->pv->index_pt_shear_g]
+3./70.*y[ppw->pv->index_pt_delta_g+4]
-3./5.*y[ppw->pv->index_pt_pol0_g]
+6./7.*y[ppw->pv->index_pt_pol2_g]
-3./70.*y[ppw->pv->index_pt_pol0_g+4])
/sqrt(6.);
}
else {
P = 2./5.*_SQRT6_*y[ppw->pv->index_pt_gwdot]/ppw->pvecthermo[pth->index_th_dkappa]; //TBC
}
}
else {
P = 0.;
}
/* tensor temperature */
if (ppt->has_source_t == _TRUE_) {
_set_source_(ppt->index_tp_t2) = - y[ppw->pv->index_pt_gwdot] * pvecthermo[pth->index_th_exp_m_kappa] + pvecthermo[pth->index_th_g] * P;
}
/* tensor polarization */
if (ppt->has_source_p == _TRUE_) {
/* Note that the correct formula for the polarisation source
should have a minus sign, as shown in Hu & White. We put a
plus sign to comply with the 'historical convention'
established in CMBFAST and CAMB. */
_set_source_(ppt->index_tp_p) = sqrt(6.) * pvecthermo[pth->index_th_g] * P;
}
}
return _SUCCESS_;
}
/**
* When testing the code or a cosmological model, it can be useful to
* output perturbations at each step of integration (and not just the
* delta's at each source sampling point, which is acheived simply by
* asking for matter transfer functions). Then this function can be
* passed to the generic_evolver routine.
*
* By default, instead of passing this function to generic_evolver,
* one passes a null pointer. Then this function is just not used.
*
* @param tau Input: conformal time
* @param y Input: vector of perturbations
* @param dy Input: vector of its derivatives (already allocated)
* @param parameters_and_workspace Input: fixed parameters (e.g. indices)
* @param error_message Output : error message
*
*/
int perturb_print_variables(double tau,
double * y,
double * dy,
void * parameters_and_workspace,
ErrorMsg error_message
) {
struct perturb_parameters_and_workspace * pppaw;
double k;
int index_md;
//struct precision * ppr;
struct background * pba;
struct thermo * pth;
struct perturbs * ppt;
struct perturb_workspace * ppw;
double * pvecback;
double * pvecmetric;
double delta_g,theta_g,shear_g,l4_g,pol0_g,pol1_g,pol2_g,pol4_g;
double delta_b,theta_b;
double delta_cdm=0.,theta_cdm=0.;
double delta_dcdm=0.,theta_dcdm=0.;
double delta_dr=0.,theta_dr=0.,shear_dr=0., f_dr=1.0;
double delta_ur=0.,theta_ur=0.,shear_ur=0.,l4_ur=0.;
double delta_rho_scf=0., rho_plus_p_theta_scf=0.;
double delta_scf=0., theta_scf=0.;
double V_x_smg=0., V_x_prime_smg=0.;
int n_ncdm;
double delta_ncdm,theta_ncdm,shear_ncdm;
double phi=0.,psi=0.,alpha=0.;
double delta_temp=0., delta_chi=0.;
double rho_delta_ncdm = 0.0;
double rho_plus_p_theta_ncdm = 0.0;
double rho_plus_p_shear_ncdm = 0.0;
double delta_p_ncdm = 0.0;
double factor = 0.0;
double q,q2,epsilon;
double a,a2,H;
int idx,index_q, storeidx;
double *dataptr;
/** - rename structure fields (just to avoid heavy notations) */
pppaw = parameters_and_workspace;
k = pppaw->k;
index_md = pppaw->index_md;
//ppr = pppaw->ppr;
pba = pppaw->pba;
pth = pppaw->pth;
ppt = pppaw->ppt;
ppw = pppaw->ppw;
pvecback = ppw->pvecback;
pvecmetric = ppw->pvecmetric;
a = pvecback[pba->index_bg_a];
a2 = a*a;
H = pvecback[pba->index_bg_H];
/** perturbed recombination **/
if ((ppt->has_perturbed_recombination == _TRUE_) && (ppw->approx[ppw->index_ap_tca] == (int)tca_off) ){
delta_temp = y[ppw->pv->index_pt_perturbed_recombination_delta_temp];
delta_chi =y[ppw->pv->index_pt_perturbed_recombination_delta_chi];
}
if (_scalars_) {
if (ppw->approx[ppw->index_ap_rsa]==(int)rsa_off) {
delta_g = y[ppw->pv->index_pt_delta_g];
theta_g = y[ppw->pv->index_pt_theta_g];
}
else {
delta_g = ppw->rsa_delta_g;
theta_g = ppw->rsa_theta_g;
}
if (ppw->approx[ppw->index_ap_rsa]==(int)rsa_off) {
if (ppw->approx[ppw->index_ap_tca]==(int)tca_on) {
shear_g = ppw->tca_shear_g;
//l3_g = 6./7.*k/ppw->pvecthermo[pth->index_th_dkappa]*ppw->tca_shear_g;
pol0_g = 2.5*ppw->tca_shear_g;
pol1_g = 7./12.*6./7.*k/ppw->pvecthermo[pth->index_th_dkappa]*ppw->tca_shear_g;
pol2_g = 0.5*ppw->tca_shear_g;
//pol3_g = 0.25*6./7.*k/ppw->pvecthermo[pth->index_th_dkappa]*ppw->tca_shear_g;
}
else {
shear_g = y[ppw->pv->index_pt_shear_g];
//l3_g = y[ppw->pv->index_pt_l3_g];
pol0_g = y[ppw->pv->index_pt_pol0_g];
pol1_g = y[ppw->pv->index_pt_pol1_g];
pol2_g = y[ppw->pv->index_pt_pol2_g];
//pol3_g = y[ppw->pv->index_pt_pol3_g];
}
}
else {
shear_g = 0;
//l3_g = 0;
pol0_g = 0;
pol1_g = 0;
pol2_g = 0;
//pol3_g = 0.;
}
if (pba->has_ur == _TRUE_) {
if (ppw->approx[ppw->index_ap_rsa]==(int)rsa_off) {
delta_ur = y[ppw->pv->index_pt_delta_ur];
theta_ur = y[ppw->pv->index_pt_theta_ur];
shear_ur = y[ppw->pv->index_pt_shear_ur];
}
else {
delta_ur = ppw->rsa_delta_ur;
theta_ur = ppw->rsa_theta_ur;
shear_ur = 0.;
}
}
delta_b = y[ppw->pv->index_pt_delta_b];
theta_b = y[ppw->pv->index_pt_theta_b];
if (pba->has_cdm == _TRUE_) {
delta_cdm = y[ppw->pv->index_pt_delta_cdm];
if (ppt->gauge == synchronous) {
theta_cdm = 0.;
}
else {
theta_cdm = y[ppw->pv->index_pt_theta_cdm];
}
}
/* gravitational potentials */
if (ppt->gauge == synchronous) {
alpha = pvecmetric[ppw->index_mt_alpha];
psi = pvecback[pba->index_bg_H]*pvecback[pba->index_bg_a] * alpha + pvecmetric[ppw->index_mt_alpha_prime];
phi = y[ppw->pv->index_pt_eta] - pvecback[pba->index_bg_H]*pvecback[pba->index_bg_a]*alpha;
}
else if (ppt->gauge == newtonian){
psi = pvecmetric[ppw->index_mt_psi];
phi = y[ppw->pv->index_pt_phi];
}
else{
psi = 0.0;
phi = 0.0;
}
if (pba->has_dcdm == _TRUE_) {
delta_dcdm = y[ppw->pv->index_pt_delta_dcdm];
theta_dcdm = y[ppw->pv->index_pt_theta_dcdm];
}
if (pba->has_dr == _TRUE_) {
f_dr = pow(pvecback[pba->index_bg_a]*pvecback[pba->index_bg_a]/pba->H0,2)*pvecback[pba->index_bg_rho_dr];
delta_dr = y[ppw->pv->index_pt_F0_dr]/f_dr;
theta_dr = y[ppw->pv->index_pt_F0_dr+1]*3./4.*k/f_dr;
shear_dr = y[ppw->pv->index_pt_F0_dr+2]*0.5/f_dr;
}
if (pba->has_scf == _TRUE_){
if (ppt->gauge == synchronous){
delta_rho_scf = 1./3.*
(1./a2*ppw->pvecback[pba->index_bg_phi_prime_scf]*y[ppw->pv->index_pt_phi_prime_scf]
+ ppw->pvecback[pba->index_bg_dV_scf]*y[ppw->pv->index_pt_phi_scf]);
}
else{
delta_rho_scf = 1./3.*
(1./a2*ppw->pvecback[pba->index_bg_phi_prime_scf]*y[ppw->pv->index_pt_phi_prime_scf]
+ ppw->pvecback[pba->index_bg_dV_scf]*y[ppw->pv->index_pt_phi_scf]
- 1./a2*pow(ppw->pvecback[pba->index_bg_phi_prime_scf],2)*ppw->pvecmetric[ppw->index_mt_psi]);
}
rho_plus_p_theta_scf = 1./3.*
k*k/a2*ppw->pvecback[pba->index_bg_phi_prime_scf]*y[ppw->pv->index_pt_phi_scf];
delta_scf = delta_rho_scf/pvecback[pba->index_bg_rho_scf];
theta_scf = rho_plus_p_theta_scf/(pvecback[pba->index_bg_rho_scf]+pvecback[pba->index_bg_p_scf]);
}
if (pba->has_smg == _TRUE_){
//TODO: write here the perturbation variables
V_x_smg = ppw->pvecmetric[ppw->index_mt_vx_smg];
V_x_prime_smg = ppw->pvecmetric[ppw->index_mt_vx_prime_smg];
}
/* converting synchronous variables to newtonian ones */
if (ppt->gauge == synchronous) {
/* density and velocity perturbations (comment out if you wish to keep synchronous variables) */
delta_g -= 4. * pvecback[pba->index_bg_H]*pvecback[pba->index_bg_a]*alpha;
theta_g += k*k*alpha;
delta_b -= 3. * pvecback[pba->index_bg_H]*pvecback[pba->index_bg_a]*alpha;
theta_b += k*k*alpha;
if (pba->has_ur == _TRUE_) {
delta_ur -= 4. * pvecback[pba->index_bg_H]*pvecback[pba->index_bg_a]*alpha;
theta_ur += k*k*alpha;
}
if (pba->has_dr == _TRUE_) {
delta_dr += (-4.*a*H+a*pba->Gamma_dcdm*pvecback[pba->index_bg_rho_dcdm]/pvecback[pba->index_bg_rho_dr])*alpha;
theta_dr += k*k*alpha;
}
if (pba->has_cdm == _TRUE_) {
delta_cdm -= 3. * pvecback[pba->index_bg_H]*pvecback[pba->index_bg_a]*alpha;
theta_cdm += k*k*alpha;
}
if (pba->has_dcdm == _TRUE_) {
delta_dcdm += alpha*(-a*pba->Gamma_dcdm-3.*a*H);
theta_dcdm += k*k*alpha;
}
if (pba->has_scf == _TRUE_) {
delta_scf += alpha*(-3.0*H*(1.0+pvecback[pba->index_bg_p_scf]/pvecback[pba->index_bg_rho_scf]));
theta_scf += k*k*alpha;
}
}
// fprintf(ppw->perturb_output_file," ");
/** Handle (re-)allocation */
if (ppt->scalar_perturbations_data[ppw->index_ikout] == NULL){
class_alloc(ppt->scalar_perturbations_data[ppw->index_ikout],
sizeof(double)*ppt->number_of_scalar_titles,
error_message);
ppt->size_scalar_perturbation_data[ppw->index_ikout] = 0;
}
else{
ppt->scalar_perturbations_data[ppw->index_ikout] =
realloc(ppt->scalar_perturbations_data[ppw->index_ikout],
sizeof(double)*(ppt->size_scalar_perturbation_data[ppw->index_ikout]+ppt->number_of_scalar_titles));
}
storeidx = 0;
dataptr = ppt->scalar_perturbations_data[ppw->index_ikout]+
ppt->size_scalar_perturbation_data[ppw->index_ikout];
ppt->size_scalar_perturbation_data[ppw->index_ikout] += ppt->number_of_scalar_titles;
class_store_double(dataptr, tau, _TRUE_, storeidx);
class_store_double(dataptr, pvecback[pba->index_bg_a], _TRUE_, storeidx);
class_store_double(dataptr, delta_g, _TRUE_, storeidx);
class_store_double(dataptr, theta_g, _TRUE_, storeidx);
class_store_double(dataptr, shear_g, _TRUE_, storeidx);
class_store_double(dataptr, pol0_g, _TRUE_, storeidx);
class_store_double(dataptr, pol1_g, _TRUE_, storeidx);
class_store_double(dataptr, pol2_g, _TRUE_, storeidx);
class_store_double(dataptr, delta_b, _TRUE_, storeidx);
class_store_double(dataptr, theta_b, _TRUE_, storeidx);
class_store_double(dataptr, psi, _TRUE_, storeidx);
class_store_double(dataptr, phi, _TRUE_, storeidx);
/* perturbed recombination */
class_store_double(dataptr, delta_temp, ppt->has_perturbed_recombination, storeidx);
class_store_double(dataptr, delta_chi, ppt->has_perturbed_recombination, storeidx);
/* Ultra relativistic species */
class_store_double(dataptr, delta_ur, pba->has_ur, storeidx);
class_store_double(dataptr, theta_ur, pba->has_ur, storeidx);
class_store_double(dataptr, shear_ur, pba->has_ur, storeidx);
/* Cold dark matter */
class_store_double(dataptr, delta_cdm, pba->has_cdm, storeidx);
class_store_double(dataptr, theta_cdm, pba->has_cdm, storeidx);
/* Non-cold Dark Matter */
if ((pba->has_ncdm == _TRUE_) && ((ppt->has_density_transfers == _TRUE_) || (ppt->has_velocity_transfers == _TRUE_) || (ppt->has_source_delta_m == _TRUE_))) {
for(n_ncdm=0; n_ncdm < pba->N_ncdm; n_ncdm++){
class_store_double(dataptr, ppw->delta_ncdm[n_ncdm], _TRUE_, storeidx);
class_store_double(dataptr, ppw->theta_ncdm[n_ncdm], _TRUE_, storeidx);
class_store_double(dataptr, ppw->shear_ncdm[n_ncdm], _TRUE_, storeidx);
}
}
/* Decaying cold dark matter */
class_store_double(dataptr, delta_dcdm, pba->has_dcdm, storeidx);
class_store_double(dataptr, theta_dcdm, pba->has_dcdm, storeidx);
/* Decay radiation */
class_store_double(dataptr, delta_dr, pba->has_dr, storeidx);
class_store_double(dataptr, theta_dr, pba->has_dr, storeidx);
class_store_double(dataptr, shear_dr, pba->has_dr, storeidx);
/* Scalar field scf*/
class_store_double(dataptr, delta_scf, pba->has_scf, storeidx);
class_store_double(dataptr, theta_scf, pba->has_scf, storeidx);
/* Scalar field smg*/
class_store_double(dataptr, V_x_smg, pba->has_smg, storeidx);
class_store_double(dataptr, V_x_prime_smg, pba->has_smg, storeidx);
}
if (_tensors_) {
if (ppw->approx[ppw->index_ap_rsa]==(int)rsa_off) {
if (ppw->approx[ppw->index_ap_tca]==(int)tca_off) {
delta_g = y[ppw->pv->index_pt_delta_g];
shear_g = y[ppw->pv->index_pt_shear_g];
l4_g = y[ppw->pv->index_pt_delta_g+4];
pol0_g = y[ppw->pv->index_pt_pol0_g];
pol2_g = y[ppw->pv->index_pt_pol2_g];
pol4_g = y[ppw->pv->index_pt_pol0_g+4];
}
else {
delta_g = -4./3.*ppw->pv->y[ppw->pv->index_pt_gwdot]/ppw->pvecthermo[pth->index_th_dkappa]; //TBC
shear_g = 0.;
l4_g = 0.;
pol0_g = 1./3.*ppw->pv->y[ppw->pv->index_pt_gwdot]/ppw->pvecthermo[pth->index_th_dkappa]; //TBC
pol2_g = 0.;
pol4_g = 0.;
}
}
else {
delta_g = 0.;
shear_g = 0.;
l4_g = 0.;
pol0_g = 0.;
pol2_g = 0.;
pol4_g = 0.;
}
if (ppt->evolve_tensor_ur == _TRUE_){
delta_ur = y[ppw->pv->index_pt_delta_ur];
shear_ur = y[ppw->pv->index_pt_shear_ur];
l4_ur = y[ppw->pv->index_pt_delta_ur+4];
}
/** Handle (re-)allocation */
if (ppt->tensor_perturbations_data[ppw->index_ikout] == NULL){
class_alloc(ppt->tensor_perturbations_data[ppw->index_ikout],
sizeof(double)*ppt->number_of_tensor_titles,
error_message);
ppt->size_tensor_perturbation_data[ppw->index_ikout] = 0;
}
else{
ppt->tensor_perturbations_data[ppw->index_ikout] =
realloc(ppt->tensor_perturbations_data[ppw->index_ikout],
sizeof(double)*(ppt->size_tensor_perturbation_data[ppw->index_ikout]+ppt->number_of_tensor_titles));
}
storeidx = 0;
dataptr = ppt->tensor_perturbations_data[ppw->index_ikout]+
ppt->size_tensor_perturbation_data[ppw->index_ikout];
ppt->size_tensor_perturbation_data[ppw->index_ikout] += ppt->number_of_tensor_titles;
//fprintf(ppw->perturb_output_file," ");
class_store_double(dataptr, tau, _TRUE_, storeidx);
class_store_double(dataptr, pvecback[pba->index_bg_a], _TRUE_, storeidx);
class_store_double(dataptr, delta_g, _TRUE_, storeidx);
class_store_double(dataptr, shear_g, _TRUE_, storeidx);
class_store_double(dataptr, l4_g, _TRUE_, storeidx);
class_store_double(dataptr, pol0_g, _TRUE_, storeidx);
class_store_double(dataptr, pol2_g, _TRUE_, storeidx);
class_store_double(dataptr, pol4_g, _TRUE_, storeidx);
class_store_double(dataptr, y[ppw->pv->index_pt_gw], _TRUE_, storeidx);
class_store_double(dataptr, y[ppw->pv->index_pt_gwdot], _TRUE_, storeidx);
class_store_double(dataptr, delta_ur, ppt->evolve_tensor_ur, storeidx);
class_store_double(dataptr, shear_ur, ppt->evolve_tensor_ur, storeidx);
class_store_double(dataptr, l4_ur, ppt->evolve_tensor_ur, storeidx);
//printf("index_pt_delta+ur = %d\n",ppw->pv->index_pt_delta_ur);
/* Non-cold Dark Matter */
if (ppt->evolve_tensor_ncdm == _TRUE_) {
idx = ppw->pv->index_pt_psi0_ncdm1;
for(n_ncdm=0; n_ncdm < pba->N_ncdm; n_ncdm++){
rho_delta_ncdm = 0.0;
rho_plus_p_theta_ncdm = 0.0;
rho_plus_p_shear_ncdm = 0.0;
delta_p_ncdm = 0.0;
factor = pba->factor_ncdm[n_ncdm]*pow(pba->a_today/a,4);
for (index_q=0; index_q < ppw->pv->q_size_ncdm[n_ncdm]; index_q ++) {
q = pba->q_ncdm[n_ncdm][index_q];
q2 = q*q;
epsilon = sqrt(q2+pba->M_ncdm[n_ncdm]*pba->M_ncdm[n_ncdm]*a2);
rho_delta_ncdm += q2*epsilon*pba->w_ncdm[n_ncdm][index_q]*y[idx];
rho_plus_p_theta_ncdm += q2*q*pba->w_ncdm[n_ncdm][index_q]*y[idx+1];
rho_plus_p_shear_ncdm += q2*q2/epsilon*pba->w_ncdm[n_ncdm][index_q]*y[idx+2];
delta_p_ncdm += q2*q2/epsilon*pba->w_ncdm[n_ncdm][index_q]*y[idx];
//Jump to next momentum bin:
idx+=(ppw->pv->l_max_ncdm[n_ncdm]+1);
}
rho_delta_ncdm *= factor;
rho_plus_p_theta_ncdm *= k*factor;
rho_plus_p_shear_ncdm *= 2.0/3.0*factor;
delta_p_ncdm *= factor/3.;
delta_ncdm = rho_delta_ncdm/ppw->pvecback[pba->index_bg_rho_ncdm1+n_ncdm];
theta_ncdm = rho_plus_p_theta_ncdm/
(ppw->pvecback[pba->index_bg_rho_ncdm1+n_ncdm]+ppw->pvecback[pba->index_bg_p_ncdm1+n_ncdm]);
shear_ncdm = rho_plus_p_shear_ncdm/
(ppw->pvecback[pba->index_bg_rho_ncdm1+n_ncdm]+ppw->pvecback[pba->index_bg_p_ncdm1+n_ncdm]);
class_store_double(dataptr, delta_ncdm, _TRUE_, storeidx);
class_store_double(dataptr, theta_ncdm, _TRUE_, storeidx);
class_store_double(dataptr, shear_ncdm, _TRUE_, storeidx);
}
}
// fprintf(ppw->perturb_output_file,"\n");
}
return _SUCCESS_;
}
/**
* Compute derivative of all perturbations to be integrated
*
* For each mode (scalar/vector/tensor) and each wavenumber k, this
* function computes the derivative of all values in the vector of
* perturbed variables to be integrated.
*
* This is one of the few functions in the code which are passed to the generic_integrator() routine.
* Since generic_integrator() should work with functions passed from various modules, the format of the arguments
* is a bit special:
* - fixed parameters and workspaces are passed through a generic pointer.
* generic_integrator() doesn't know what the content of this pointer is.
* - errors are not written as usual in pth->error_message, but in a generic
* error_message passed in the list of arguments.
*
* @param tau Input: conformal time
* @param y Input: vector of perturbations
* @param dy Ouput: vector of its derivatives (already allocated)
* @param parameters_and_workspace Input/Output: in input, fixed parameters (e.g. indices); in output, background and thermo quantities evaluated at tau.
* @param error_message Output : error message
*/
int perturb_derivs(double tau,
double * y,
double * dy,
void * parameters_and_workspace,
ErrorMsg error_message
) {
/** Summary: */
/** - define local variables */
/* multipole */
int l;
/* scale factor and other background quantities */
double a,a2,a_prime_over_a,R;
/* short-cut names for the fields of the input structure */
struct perturb_parameters_and_workspace * pppaw;
double k,k2;
int index_md;
struct precision * ppr;
struct background * pba;
struct thermo * pth;
struct perturbs * ppt;
struct perturb_workspace * ppw;
double * pvecback;
double * pvecthermo;
double * pvecmetric;
double * s_l;
struct perturb_vector * pv;
/* short-cut notations for the perturbations */
double delta_g=0.,theta_g=0.,shear_g=0.;
double delta_b,theta_b;
double cb2,cs2,ca2;
double metric_continuity=0.,metric_euler=0.,metric_shear=0.,metric_ufa_class=0.;
/* perturbed recombination (just to simplify the notation) */
double H0=0.,Nnow=0.,n_H=0.,fHe=0.;
double delta_temp=0.,delta_chi=0., chi=0.;
double alpha_rec=0.,delta_alpha_rec=0.;
double a_rad=0., Compton_CR =0.;
double Tb_in_K=0.;
/* Non-metric source terms for photons, i.e. \mathcal{P}^{(m)} from arXiv:1305.3261 */
double P0,P1,P2;
/* for use with fluid (fld): */
double w,w_prime;
/* for use with non-cold dark matter (ncdm): */
int index_q,n_ncdm,idx;
double q,epsilon,dlnf0_dlnq,qk_div_epsilon;
double rho_ncdm_bg,p_ncdm_bg,pseudo_p_ncdm,w_ncdm,ca2_ncdm,ceff2_ncdm=0.,cvis2_ncdm=0.;
/* for use with curvature */
double cotKgen, sqrt_absK;
double s2_squared, ssqrt3;
/* for use with dcdm and dr */
double f_dr, fprime_dr;
/** - rename the fields of the input structure (just to avoid heavy notations) */
pppaw = parameters_and_workspace;
k = pppaw->k;
k2=k*k;
index_md = pppaw->index_md;
ppr = pppaw->ppr;
pba = pppaw->pba;
pth = pppaw->pth;
ppt = pppaw->ppt;
ppw = pppaw->ppw;
s_l = ppw->s_l;
pvecback = ppw->pvecback;
pvecthermo = ppw->pvecthermo;
pvecmetric = ppw->pvecmetric;
pv = ppw->pv;
/** - get background/thermo quantities in this point */
class_call(background_at_tau(pba,
tau,
pba->normal_info,
pba->inter_closeby,
&(ppw->last_index_back),
pvecback),
pba->error_message,
error_message);
class_call(thermodynamics_at_z(pba,
pth,
1./pvecback[pba->index_bg_a]-1., /* redshift z=1/a-1 */
pth->inter_closeby,
&(ppw->last_index_thermo),
pvecback,
pvecthermo),
pth->error_message,
error_message);
/** get metric perturbations with perturb_einstein() */
class_call(perturb_einstein(ppr,
pba,
pth,
ppt,
index_md,
k,
tau,
y,
ppw),
ppt->error_message,
error_message);
/** - compute related background quantities */
a = pvecback[pba->index_bg_a];
a2 = a*a;
a_prime_over_a = pvecback[pba->index_bg_H] * a;
R = 4./3. * pvecback[pba->index_bg_rho_g]/pvecback[pba->index_bg_rho_b];
/** Compute 'generalised cotK function of argument sqrt(|K|)*tau, for closing hierarchy.
(see equation 2.34 in arXiv:1305.3261): */
if (pba->has_curvature == _FALSE_){
cotKgen = 1.0/(k*tau);
}
else{
sqrt_absK = sqrt(fabs(pba->K));
if (pba->K < 0)
cotKgen = sqrt_absK/k/tanh(sqrt_absK*tau);
else
cotKgen = sqrt_absK/k/tan(sqrt_absK*tau);
}
s2_squared = 1.-3.*pba->K/k2;
/** - for scalar mode: */
if (_scalars_) {
/** (a) define short-cut notations for the scalar perturbations */
if (ppw->approx[ppw->index_ap_rsa] == (int)rsa_off) {
delta_g = y[pv->index_pt_delta_g];
theta_g = y[pv->index_pt_theta_g];
}
delta_b = y[pv->index_pt_delta_b];
theta_b = y[pv->index_pt_theta_b];
cb2 = pvecthermo[pth->index_th_cb2];
/** (b) perturbed recombination **/
if ((ppt->has_perturbed_recombination == _TRUE_)&&(ppw->approx[ppw->index_ap_tca]==(int)tca_off)){
delta_temp= y[ppw->pv->index_pt_perturbed_recombination_delta_temp];
delta_chi= y[ppw->pv->index_pt_perturbed_recombination_delta_chi];
chi=pvecthermo[pth->index_th_xe];
// Conversion of H0 in inverse seconds (pba->H0 is [H0/c] in inverse Mpcs)
H0 = pba->H0 * _c_ / _Mpc_over_m_;
//Computation of Nnow in SI units
Nnow = 3.*H0*H0*pba->Omega0_b*(1.-pth->YHe)/(8.*_PI_*_G_*_m_H_);
// total amount of hydrogen today
n_H = (pba->a_today/a)*(pba->a_today/a)*(pba->a_today/a)* Nnow;
// Helium-to-hydrogen ratio
fHe = pth->YHe / (_not4_*(1-pth->YHe));
// The constant such that rho_gamma = a_rad * T^4
a_rad = 8./15.*pow(_PI_,5)*pow(_k_B_,4)/pow(_c_*_h_P_,3);
// Compton cooling rate in Mpc^(-1)
Compton_CR = 8./3. *_sigma_ * a_rad /(_m_e_ * _c_ *_c_) *_Mpc_over_m_ ;
// Temperature is already in Kelvin
Tb_in_K = pvecthermo[pth->index_th_Tb];
// Alpha in m^3/s, cf. Recfast paper
alpha_rec = 1.14 * 4.309e-19*pow((Tb_in_K * 1e-4),-0.6166)/(1+0.6703*pow((Tb_in_K * 1e-4),0.53)) ;
// delta alpha, dimensionless
delta_alpha_rec= (-0.6166 + 0.6703 * pow((Tb_in_K * 1e-4),0.53)*(-0.6166-0.53))/(1+0.6703*pow((Tb_in_K * 1e-4),0.53)) * delta_temp;
} // end of perturbed recombination related quantities
/** (c) compute metric-related quantities (depending on gauge; additional gauges can be coded below)
Each continuity equation contains a term in (theta+metric_continuity) with
metric_continuity = (h_prime/2) in synchronous gauge, (-3 phi_prime) in newtonian gauge
Each Euler equation contains a source term metric_euler with
metric_euler = 0 in synchronous gauge, (k2 psi) in newtonian gauge
Each shear derivative equation contains a source term metric_shear equal to
metric_shear = (h_prime+6eta_prime)/2 in synchronous gauge, 0 in newtonian gauge
metric_shear_prime is the derivative of metric_shear
In the ufa_class approximation, the leading-order source term is (h_prime/2) in synchronous gauge,
(-3 (phi_prime+psi_prime)) in newtonian gauge: we approximate the later by (-6 phi_prime) */
if (ppt->gauge == synchronous) {
metric_continuity = pvecmetric[ppw->index_mt_h_prime]/2.;
metric_euler = 0.;
metric_shear = k2 * pvecmetric[ppw->index_mt_alpha];
//metric_shear_prime = k2 * pvecmetric[ppw->index_mt_alpha_prime];
metric_ufa_class = pvecmetric[ppw->index_mt_h_prime]/2.;
}
if (ppt->gauge == newtonian) {
metric_continuity = -3.*pvecmetric[ppw->index_mt_phi_prime];
metric_euler = k2*pvecmetric[ppw->index_mt_psi];
metric_shear = 0.;
//metric_shear_prime = 0.;
metric_ufa_class = -6.*pvecmetric[ppw->index_mt_phi_prime];
}
/** (d) if some approximation schemes are turned on, enforce a few y[] values computed in perturb_einstein */
if (ppw->approx[ppw->index_ap_rsa] == (int)rsa_on) {
delta_g = ppw->rsa_delta_g;
theta_g = ppw->rsa_theta_g;
}
/** (e) BEGINNING OF ACTUAL SYSTEM OF EQUATIONS OF EVOLUTION: */
/* Note concerning perturbed recombination: $cb2*delta_b$ must be replaced everywhere by $cb2*(delta_b+delta_temp)$. If perturbed recombination is not required, delta_temp is equal to zero. */
/** -> photon temperature density */
if (ppw->approx[ppw->index_ap_rsa] == (int)rsa_off) {
dy[pv->index_pt_delta_g] = -4./3.*(theta_g+metric_continuity);
}
/** -> baryon density */
dy[pv->index_pt_delta_b] = -(theta_b+metric_continuity);
/** -> baryon velocity (depends on tight-coupling approximation=tca) */
if (ppw->approx[ppw->index_ap_tca] == (int)tca_off) {
/* without tca */
/** perturbed recombination has an impact **/
dy[pv->index_pt_theta_b] =
- a_prime_over_a*theta_b
+ metric_euler
+ k2*cb2*(delta_b+delta_temp)
+ R*pvecthermo[pth->index_th_dkappa]*(theta_g-theta_b);
}
else {
/* with tca */
class_call(perturb_tca_slip_and_shear(y,pppaw,error_message),
error_message,
error_message);
/** perturbed recombination has an impact **/
dy[pv->index_pt_theta_b] =
(-a_prime_over_a*theta_b
+k2*(cb2*(delta_b+delta_temp)+R*(delta_g/4.-s2_squared*ppw->tca_shear_g))
+R*ppw->tca_slip)/(1.+R)
+metric_euler;
}
/** -> photon temperature higher momenta and photon polarisation (depend on tight-coupling approximation) : */
if (ppw->approx[ppw->index_ap_rsa] == (int)rsa_off) {
/** ---> if photon tight-coupling is off: */
if (ppw->approx[ppw->index_ap_tca] == (int)tca_off) {
/** -----> define \f$ \Pi = G_{\gamma 0} + G_{\gamma 2} + F_{\gamma 2} \f$ */
P0 = (y[pv->index_pt_pol0_g] + y[pv->index_pt_pol2_g] + 2.*s_l[2]*y[pv->index_pt_shear_g])/8.;
/** -----> photon temperature velocity */
dy[pv->index_pt_theta_g] =
k2*(delta_g/4.-s2_squared*y[pv->index_pt_shear_g])
+ metric_euler
+ pvecthermo[pth->index_th_dkappa]*(theta_b-theta_g);
/** -----> photon temperature shear */
dy[pv->index_pt_shear_g] =
0.5*(8./15.*(theta_g+metric_shear)
-3./5.*k*s_l[3]/s_l[2]*y[pv->index_pt_l3_g]
-pvecthermo[pth->index_th_dkappa]*(2.*y[pv->index_pt_shear_g]-4./5./s_l[2]*P0));
/** -----> photon temperature l=3 */
l = 3;
dy[pv->index_pt_l3_g] = k/(2.0*l+1.0)*
(l*s_l[l]*2.*s_l[2]*y[pv->index_pt_shear_g]-(l+1.)*s_l[l+1]*y[pv->index_pt_l3_g+1])
- pvecthermo[pth->index_th_dkappa]*y[pv->index_pt_l3_g];
/** -----> photon temperature l>3 */
for (l = 4; l < pv->l_max_g; l++) {
dy[pv->index_pt_delta_g+l] = k/(2.0*l+1.0)*
(l*s_l[l]*y[pv->index_pt_delta_g+l-1]-(l+1)*s_l[l+1]*y[pv->index_pt_delta_g+l+1])
- pvecthermo[pth->index_th_dkappa]*y[pv->index_pt_delta_g+l];
}
/** -----> photon temperature lmax */
l = pv->l_max_g; /* l=lmax */
dy[pv->index_pt_delta_g+l] =
k*(s_l[l]*y[pv->index_pt_delta_g+l-1]-(1.+l)*cotKgen*y[pv->index_pt_delta_g+l])
- pvecthermo[pth->index_th_dkappa]*y[pv->index_pt_delta_g+l];
/** -----> photon polarisation l=0 */
dy[pv->index_pt_pol0_g] =
-k*y[pv->index_pt_pol0_g+1]
-pvecthermo[pth->index_th_dkappa]*(y[pv->index_pt_pol0_g]-4.*P0);
/** -----> photon polarisation l=1 */
dy[pv->index_pt_pol1_g] =
k/3.*(y[pv->index_pt_pol1_g-1]-2.*s_l[2]*y[pv->index_pt_pol1_g+1])
-pvecthermo[pth->index_th_dkappa]*y[pv->index_pt_pol1_g];
/** -----> photon polarisation l=2 */
dy[pv->index_pt_pol2_g] =
k/5.*(2.*s_l[2]*y[pv->index_pt_pol2_g-1]-3.*s_l[3]*y[pv->index_pt_pol2_g+1])
-pvecthermo[pth->index_th_dkappa]*(y[pv->index_pt_pol2_g]-4./5.*P0);
/** -----> photon polarisation l>2 */
for (l=3; l < pv->l_max_pol_g; l++)
dy[pv->index_pt_pol0_g+l] = k/(2.*l+1)*
(l*s_l[l]*y[pv->index_pt_pol0_g+l-1]-(l+1.)*s_l[l+1]*y[pv->index_pt_pol0_g+l+1])
-pvecthermo[pth->index_th_dkappa]*y[pv->index_pt_pol0_g+l];
/** -----> photon polarisation lmax_pol */
l = pv->l_max_pol_g;
dy[pv->index_pt_pol0_g+l] =
k*(s_l[l]*y[pv->index_pt_pol0_g+l-1]-(l+1)*cotKgen*y[pv->index_pt_pol0_g+l])
-pvecthermo[pth->index_th_dkappa]*y[pv->index_pt_pol0_g+l];
}
/** ---> if photon tight-coupling is on: */
else {
/** ----> in that case, only need photon velocity */
/** perturbed recombination has an impact **/
dy[pv->index_pt_theta_g] =
-(dy[pv->index_pt_theta_b]+a_prime_over_a*theta_b-cb2*k2*(delta_b+delta_temp))/R
+k2*(0.25*delta_g-s2_squared*ppw->tca_shear_g)+(1.+R)/R*metric_euler;
}
}
/** -> cdm */
if (pba->has_cdm == _TRUE_) {
/** ---> newtonian gauge: cdm density and velocity */
if (ppt->gauge == newtonian) {
dy[pv->index_pt_delta_cdm] = -(y[pv->index_pt_theta_cdm]+metric_continuity); /* cdm density */
dy[pv->index_pt_theta_cdm] = - a_prime_over_a*y[pv->index_pt_theta_cdm] + metric_euler; /* cdm velocity */
}
/** ---> synchronous gauge: cdm density only (velocity set to zero by definition of the gauge) */
if (ppt->gauge == synchronous) {
dy[pv->index_pt_delta_cdm] = -metric_continuity; /* cdm density */
}
}
/* perturbed recombination */
/* computes the derivatives of delta x_e and delta T_b */
if((ppt->has_perturbed_recombination == _TRUE_)&&(ppw->approx[ppw->index_ap_tca] == (int)tca_off)){
// alpha * n_H is in inverse seconds, so we have to multiply it by Mpc_in_sec
dy[ppw->pv->index_pt_perturbed_recombination_delta_chi] = - alpha_rec* a * chi*n_H *(delta_alpha_rec + delta_chi + delta_b) * _Mpc_over_m_ / _c_ ;
// see the documentation for this formula
dy[ppw->pv->index_pt_perturbed_recombination_delta_temp] = 2./3. * dy[ppw->pv->index_pt_delta_b] - a * Compton_CR * pow(pba->T_cmb/a, 4) * chi / (1.+chi+fHe) * ( (1.-pba->T_cmb*pba->a_today/a/pvecthermo[pth->index_th_Tb])*(delta_g + delta_chi*(1.+fHe)/(1.+chi+fHe)) + pba->T_cmb*pba->a_today/a/pvecthermo[pth->index_th_Tb] *(delta_temp - 1./4. * delta_g) );
}
/** -> dcdm and dr */
if (pba->has_dcdm == _TRUE_) {
/** -> dcdm */
dy[pv->index_pt_delta_dcdm] = -(y[pv->index_pt_theta_dcdm]+metric_continuity)
- a * pba->Gamma_dcdm / k2 * metric_euler; /* dcdm density */
dy[pv->index_pt_theta_dcdm] = - a_prime_over_a*y[pv->index_pt_theta_dcdm] + metric_euler; /* dcdm velocity */
}
/** -> dr */
if ((pba->has_dcdm == _TRUE_)&&(pba->has_dr == _TRUE_)) {
/* f = rho_dr*a^4/rho_crit_today. In CLASS density units
rho_crit_today = H0^2.
*/
f_dr = pow(pow(a/pba->a_today,2)/pba->H0,2)*pvecback[pba->index_bg_rho_dr];
fprime_dr = pba->Gamma_dcdm*pvecback[pba->index_bg_rho_dcdm]*pow(a,5)/pow(pba->H0,2);
/** -----> dr F0 */
dy[pv->index_pt_F0_dr] = -k*y[pv->index_pt_F0_dr+1]-4./3.*metric_continuity*f_dr+
fprime_dr*(y[pv->index_pt_delta_dcdm]+metric_euler/k2);
/** -----> dr F1 */
dy[pv->index_pt_F0_dr+1] = k/3.*y[pv->index_pt_F0_dr]-2./3.*k*y[pv->index_pt_F0_dr+2]*s2_squared +
4*metric_euler/(3.*k)*f_dr + fprime_dr/k*y[pv->index_pt_theta_dcdm];
/** -----> exact dr F2 */
dy[pv->index_pt_F0_dr+2] = 8./15.*(3./4.*k*y[pv->index_pt_F0_dr+1]+metric_shear*f_dr) -3./5.*k*s_l[3]/s_l[2]*y[pv->index_pt_F0_dr+3];
/** -----> exact dr l=3 */
l = 3;
dy[pv->index_pt_F0_dr+3] = k/(2.*l+1.)*
(l*s_l[l]*s_l[2]*y[pv->index_pt_F0_dr+2]-(l+1.)*s_l[l+1]*y[pv->index_pt_F0_dr+4]);
/** -----> exact dr l>3 */
for (l = 4; l < pv->l_max_dr; l++) {
dy[pv->index_pt_F0_dr+l] = k/(2.*l+1)*
(l*s_l[l]*y[pv->index_pt_F0_dr+l-1]-(l+1.)*s_l[l+1]*y[pv->index_pt_F0_dr+l+1]);
}
/** -----> exact dr lmax_dr */
l = pv->l_max_dr;
dy[pv->index_pt_F0_dr+l] =
k*(s_l[l]*y[pv->index_pt_F0_dr+l-1]-(1.+l)*cotKgen*y[pv->index_pt_F0_dr+l]);
}
/** -> fluid (fld) */
if (pba->has_fld == _TRUE_) {
/** ---> factors w, w_prime, adiabatic sound speed ca2 (all three background-related),
plus actual sound speed in the fluid rest frame cs2 */
w = pba->w0_fld + pba->wa_fld * (1. - a / pba->a_today);
w_prime = - pba->wa_fld * a / pba->a_today * a_prime_over_a;
ca2 = w - w_prime / 3. / (1.+w) / a_prime_over_a;
cs2 = pba->cs2_fld;
/** ---> fluid density */
dy[pv->index_pt_delta_fld] =
-(1+w)*(y[pv->index_pt_theta_fld]+metric_continuity)
-3.*(cs2-w)*a_prime_over_a*y[pv->index_pt_delta_fld]
-9.*(1+w)*(cs2-ca2)*a_prime_over_a*a_prime_over_a*y[pv->index_pt_theta_fld]/k2;
/** ---> fluid velocity */
dy[pv->index_pt_theta_fld] = /* fluid velocity */
-(1.-3.*cs2)*a_prime_over_a*y[pv->index_pt_theta_fld]
+cs2*k2/(1.+w)*y[pv->index_pt_delta_fld]
+metric_euler;
}
/** -> scalar field (scf) */
if (pba->has_scf == _TRUE_) {
/** ---> field value */
dy[pv->index_pt_phi_scf] = y[pv->index_pt_phi_prime_scf];
/** ---> Klein Gordon equation */
dy[pv->index_pt_phi_prime_scf] = - 2.*a_prime_over_a*y[pv->index_pt_phi_prime_scf]
- metric_continuity*pvecback[pba->index_bg_phi_prime_scf] // metric_continuity = h'/2
- (k2 + a2*pvecback[pba->index_bg_ddV_scf])*y[pv->index_pt_phi_scf]; //checked
}
if (pba->has_smg == _TRUE_) {
class_test(ppt->gauge == newtonian,
ppt->error_message,
"asked for scalar field AND Newtonian gauge. Not yet implemented");
/** ---> scalar field velocity */
dy[pv->index_pt_vx_smg] = pvecmetric[ppw->index_mt_vx_prime_smg]; //y[pv->index_pt_vx_prime_smg];
/** ---> Scalar field acceleration (passes the value obtained in perturb_einstein) */
dy[pv->index_pt_vx_prime_smg] = pvecmetric[ppw->index_mt_vx_prime_prime_smg];
}
/** -> ultra-relativistic neutrino/relics (ur) */
if (pba->has_ur == _TRUE_) {
/** ---> if radiation streaming approximation is off */
if (ppw->approx[ppw->index_ap_rsa] == (int)rsa_off) {
/** -----> ur density */
dy[pv->index_pt_delta_ur] = -4./3.*(y[pv->index_pt_theta_ur] + metric_continuity);
/** -----> ur velocity */
dy[pv->index_pt_theta_ur] = k2*(y[pv->index_pt_delta_ur]/4.-s2_squared*y[pv->index_pt_shear_ur]) + metric_euler;
if(ppw->approx[ppw->index_ap_ufa] == (int)ufa_off) {
/** -----> exact ur shear */
dy[pv->index_pt_shear_ur] = 0.5*(8./15.*(y[pv->index_pt_theta_ur]+metric_shear)
-3./5.*k*s_l[3]/s_l[2]*y[pv->index_pt_shear_ur+1]);
/** -----> exact ur l=3 */
l = 3;
dy[pv->index_pt_l3_ur] = k/(2.*l+1.)*
(l*2.*s_l[l]*s_l[2]*y[pv->index_pt_shear_ur]-(l+1.)*s_l[l+1]*y[pv->index_pt_l3_ur+1]);
/** -----> exact ur l>3 */
for (l = 4; l < pv->l_max_ur; l++) {
dy[pv->index_pt_delta_ur+l] = k/(2.*l+1)*
(l*s_l[l]*y[pv->index_pt_delta_ur+l-1]-(l+1.)*s_l[l+1]*y[pv->index_pt_delta_ur+l+1]);
}
/** -----> exact ur lmax_ur */
l = pv->l_max_ur;
dy[pv->index_pt_delta_ur+l] =
k*(s_l[l]*y[pv->index_pt_delta_ur+l-1]-(1.+l)*cotKgen*y[pv->index_pt_delta_ur+l]);
}
else {
/** -----> in fluid approximation (ufa): only ur shear neeeded */
//TBC: curvature?
/* a la Ma & Bertschinger */
if (ppr->ur_fluid_approximation == ufa_mb) {
dy[pv->index_pt_shear_ur] =
-3./tau*y[pv->index_pt_shear_ur]
+2./3.*(y[pv->index_pt_theta_ur]+metric_shear);
}
/* a la Hu */
if (ppr->ur_fluid_approximation == ufa_hu) {
dy[pv->index_pt_shear_ur] =
-3.*a_prime_over_a*y[pv->index_pt_shear_ur]
+2./3.*(y[pv->index_pt_theta_ur]+metric_shear);
}
/* a la CLASS */
if (ppr->ur_fluid_approximation == ufa_CLASS) {
dy[pv->index_pt_shear_ur] =
-3./tau*y[pv->index_pt_shear_ur]
+2./3.*(y[pv->index_pt_theta_ur]+metric_ufa_class);
}
}
}
}
/** -> non-cold dark matter (ncdm): massive neutrinos, WDM, etc. */
//TBC: curvature in all ncdm
if (pba->has_ncdm == _TRUE_) {
idx = pv->index_pt_psi0_ncdm1;
/** ---> first case: use a fluid approximation (ncdmfa) */
//TBC: curvature
if(ppw->approx[ppw->index_ap_ncdmfa] == (int)ncdmfa_on) {
/** -----> loop over species */
for (n_ncdm=0; n_ncdm<pv->N_ncdm; n_ncdm++) {
/** -----> define intermediate quantitites */
rho_ncdm_bg = pvecback[pba->index_bg_rho_ncdm1+n_ncdm]; /* background density */
p_ncdm_bg = pvecback[pba->index_bg_p_ncdm1+n_ncdm]; /* background pressure */
pseudo_p_ncdm = pvecback[pba->index_bg_pseudo_p_ncdm1+n_ncdm]; /* pseudo-pressure (see CLASS IV paper) */
w_ncdm = p_ncdm_bg/rho_ncdm_bg; /* eqaution of state parameter */
ca2_ncdm = w_ncdm/3.0/(1.0+w_ncdm)*(5.0-pseudo_p_ncdm/p_ncdm_bg); /* adiabatic sound speed */
/* c_eff is (delta p / delta rho) in the gauge under
consideration (not in the gauge comoving with the
fluid) */
/* c_vis is introduced in order to close the system */
/* different ansatz for sound speed c_eff and viscosity speed c_vis */
if (ppr->ncdm_fluid_approximation == ncdmfa_mb) {
ceff2_ncdm = ca2_ncdm;
cvis2_ncdm = 3.*w_ncdm*ca2_ncdm;
}
if (ppr->ncdm_fluid_approximation == ncdmfa_hu) {
ceff2_ncdm = ca2_ncdm;
cvis2_ncdm = w_ncdm;
}
if (ppr->ncdm_fluid_approximation == ncdmfa_CLASS) {
ceff2_ncdm = ca2_ncdm;
cvis2_ncdm = 3.*w_ncdm*ca2_ncdm;
}
/** -----> exact continuity equation */
dy[idx] = -(1.0+w_ncdm)*(y[idx+1]+metric_continuity)-
3.0*a_prime_over_a*(ceff2_ncdm-w_ncdm)*y[idx];
/** -----> exact euler equation */
dy[idx+1] = -a_prime_over_a*(1.0-3.0*ca2_ncdm)*y[idx+1]+
ceff2_ncdm/(1.0+w_ncdm)*k2*y[idx]-k2*y[idx+2]
+ metric_euler;
/** -----> different ansatz for approximate shear derivative */
if (ppr->ncdm_fluid_approximation == ncdmfa_mb) {
dy[idx+2] = -3.0*(a_prime_over_a*(2./3.-ca2_ncdm-pseudo_p_ncdm/p_ncdm_bg/3.)+1./tau)*y[idx+2]
+8.0/3.0*cvis2_ncdm/(1.0+w_ncdm)*s_l[2]*(y[idx+1]+metric_shear);
}
if (ppr->ncdm_fluid_approximation == ncdmfa_hu) {
dy[idx+2] = -3.0*a_prime_over_a*ca2_ncdm/w_ncdm*y[idx+2]
+8.0/3.0*cvis2_ncdm/(1.0+w_ncdm)*s_l[2]*(y[idx+1]+metric_shear);
}
if (ppr->ncdm_fluid_approximation == ncdmfa_CLASS) {
dy[idx+2] = -3.0*(a_prime_over_a*(2./3.-ca2_ncdm-pseudo_p_ncdm/p_ncdm_bg/3.)+1./tau)*y[idx+2]
+8.0/3.0*cvis2_ncdm/(1.0+w_ncdm)*s_l[2]*(y[idx+1]+metric_ufa_class);
}
/** -----> jump to next species */
idx += pv->l_max_ncdm[n_ncdm]+1;
}
}
/** ---> second case: use exact equation (Boltzmann hierarchy on momentum grid) */
else {
/** -----> loop over species */
for (n_ncdm=0; n_ncdm<pv->N_ncdm; n_ncdm++) {
/** -----> loop over momentum */
for (index_q=0; index_q < pv->q_size_ncdm[n_ncdm]; index_q++) {
/** -----> define intermediate quantitites */
dlnf0_dlnq = pba->dlnf0_dlnq_ncdm[n_ncdm][index_q];
q = pba->q_ncdm[n_ncdm][index_q];
epsilon = sqrt(q*q+a2*pba->M_ncdm[n_ncdm]*pba->M_ncdm[n_ncdm]);
qk_div_epsilon = k*q/epsilon;
/** -----> ncdm density for given momentum bin */
dy[idx] = -qk_div_epsilon*y[idx+1]+metric_continuity*dlnf0_dlnq/3.;
/** -----> ncdm velocity for given momentum bin */
dy[idx+1] = qk_div_epsilon/3.0*(y[idx] - 2*s_l[2]*y[idx+2])
-epsilon*metric_euler/(3*q*k)*dlnf0_dlnq;
/** -----> ncdm shear for given momentum bin */
dy[idx+2] = qk_div_epsilon/5.0*(2*s_l[2]*y[idx+1]-3.*s_l[3]*y[idx+3])
-s_l[2]*metric_shear*2./15.*dlnf0_dlnq;
/** -----> ncdm l>3 for given momentum bin */
for(l=3; l<pv->l_max_ncdm[n_ncdm]; l++){
dy[idx+l] = qk_div_epsilon/(2.*l+1.0)*(l*s_l[l]*y[idx+(l-1)]-(l+1.)*s_l[l+1]*y[idx+(l+1)]);
}
/** -----> ncdm lmax for given momentum bin (truncation as in Ma and Bertschinger)
but with curvature taken into account a la arXiv:1305.3261 */
dy[idx+l] = qk_div_epsilon*y[idx+l-1]-(1.+l)*k*cotKgen*y[idx+l];
/** -----> jump to next momentum bin or species */
idx += (pv->l_max_ncdm[n_ncdm]+1);
}
}
}
}
/** -> metric */
/** --> eta of synchronous gauge */
if (ppt->gauge == synchronous) {
dy[pv->index_pt_eta] = pvecmetric[ppw->index_mt_eta_prime];
}
if (ppt->gauge == newtonian) {
dy[pv->index_pt_phi] = pvecmetric[ppw->index_mt_phi_prime];
}
}
/** - vector mode */
if (_vectors_) {
fprintf(stderr,"we are in vectors\n");
ssqrt3 = sqrt(1.-2.*pba->K/k2);
cb2 = pvecthermo[pth->index_th_cb2];
/** -> baryon velocity */
if (ppt->gauge == synchronous) {
dy[pv->index_pt_theta_b] = -(1-3.*cb2)*a_prime_over_a*y[pv->index_pt_theta_b]
- pvecthermo[pth->index_th_dkappa]*(_SQRT2_/4.*delta_g + y[pv->index_pt_theta_b]);
}
else if (ppt->gauge == newtonian) {
dy[pv->index_pt_theta_b] = -(1-3.*cb2)*a_prime_over_a*y[pv->index_pt_theta_b]
- _SQRT2_/4.*pvecthermo[pth->index_th_dkappa]*(delta_g+2.*_SQRT2_*y[pv->index_pt_theta_b])
+ pvecmetric[ppw->index_mt_V_prime]+(1.-3.*cb2)*a_prime_over_a*y[pv->index_pt_V];
}
/*
if (ppw->approx[ppw->index_ap_rsa] == (int)rsa_off) {
if (ppw->approx[ppw->index_ap_tca]==(int)tca_off) {
*/
/* short-cut notations for the tensor perturbations */
delta_g = y[pv->index_pt_delta_g];
theta_g = y[pv->index_pt_theta_g];
shear_g = y[pv->index_pt_shear_g];
/* (P^{(1)}) (see Eq. B.23 in 1305.3261)*/
P1 = -_SQRT6_/40.*(
4./(3.*k)*theta_g //F1
+y[pv->index_pt_delta_g+3]
+2.*y[pv->index_pt_pol0_g]
+10./7.*y[pv->index_pt_pol2_g]
-4./7.*y[pv->index_pt_pol0_g+4]);
if (ppt->gauge == synchronous) {
/* photon density (delta_g = F_0) */
dy[pv->index_pt_delta_g] =
-4./3.*theta_g
-pvecthermo[pth->index_th_dkappa]*(delta_g+2.*_SQRT2_*y[pv->index_pt_theta_b]);
/* photon velocity (theta_g = (3k/4)*F_1) */
dy[pv->index_pt_theta_g] =
k2*(delta_g/4.-s_l[2]*shear_g)
-pvecthermo[pth->index_th_dkappa]*(theta_g+4.0/_SQRT6_*P1)
+4.0/(3.0*_SQRT2_)*ssqrt3*y[pv->index_pt_hv_prime];
}
else if (ppt->gauge == newtonian) {
/* photon density (delta_g = F_0) */
dy[pv->index_pt_delta_g] =
-4./3.*theta_g
-pvecthermo[pth->index_th_dkappa]*(delta_g+2.*_SQRT2_*y[pv->index_pt_theta_b])
-2.*_SQRT2_*pvecmetric[ppw->index_mt_V_prime];
/* photon velocity (theta_g = (3k/4)*F_1) */
dy[pv->index_pt_theta_g] =
k2*(delta_g/4.-s_l[2]*shear_g)
-pvecthermo[pth->index_th_dkappa]*(theta_g+4.0/_SQRT6_*P1);
}
/* photon shear (shear_g = F_2/2) */
dy[pv->index_pt_shear_g] =
4./15.*s_l[2]*theta_g-3./10.*k*s_l[3]*y[pv->index_pt_shear_g+1]
-pvecthermo[pth->index_th_dkappa]*shear_g;
/* photon l=3 */
dy[pv->index_pt_l3_g] =
k/7.*(6.*s_l[3]*shear_g-4.*s_l[4]*y[pv->index_pt_l3_g+1])
-pvecthermo[pth->index_th_dkappa]*y[pv->index_pt_l3_g];
/* additional momenta in Boltzmann hierarchy (beyond l=0,1,2,3,4) */
for (l=4; l < pv->l_max_g; l++)
dy[pv->index_pt_delta_g+l] =
k/(2.*l+1.)*(l*s_l[l]*y[pv->index_pt_delta_g+l-1]
-(l+1.)*s_l[l+1]*y[pv->index_pt_delta_g+l+1])
-pvecthermo[pth->index_th_dkappa]*y[pv->index_pt_delta_g+l];
/* l=lmax */
l = pv->l_max_g;
dy[pv->index_pt_delta_g+l] =
k*(s_l[l]*y[pv->index_pt_delta_g+l-1]
-(1.+l)*cotKgen*y[pv->index_pt_delta_g+l])
- pvecthermo[pth->index_th_dkappa]*y[pv->index_pt_delta_g+l];
/* photon polarization, l=0 (pol0_g = G_0)*/
dy[pv->index_pt_pol0_g] =
-k*y[pv->index_pt_pol0_g+1]
-pvecthermo[pth->index_th_dkappa]*(y[pv->index_pt_pol0_g]-_SQRT6_*P1);
/* additional momenta in Boltzmann hierarchy (beyond l=0,1,2,3,4) */
for (l=1; l < pv->l_max_pol_g; l++)
dy[pv->index_pt_pol0_g+l] =
k/(2.*l+1.)*(l*s_l[l]*y[pv->index_pt_pol0_g+l-1]
-(l+1.)*s_l[l+1]*y[pv->index_pt_pol0_g+l+1])
-pvecthermo[pth->index_th_dkappa]*y[pv->index_pt_pol0_g+l];
/* l=lmax */
l = pv->l_max_pol_g;
dy[pv->index_pt_pol0_g+l] =
k*(s_l[l]*y[pv->index_pt_pol0_g+l-1]
-(l+1.)*cotKgen*y[pv->index_pt_pol0_g+l])
-pvecthermo[pth->index_th_dkappa]*y[pv->index_pt_pol0_g+l];
/*
}
}
*/
if (ppt->gauge == synchronous) {
/* Vector metric perturbation in synchronous gauge: */
dy[pv->index_pt_hv_prime] = pvecmetric[ppw->index_mt_hv_prime_prime];
}
else if (ppt->gauge == newtonian){
/* Vector metric perturbation in Newtonian gauge: */
dy[pv->index_pt_V] = pvecmetric[ppw->index_mt_V_prime];
}
}
/** - tensor mode */
if (_tensors_) {
if (ppw->approx[ppw->index_ap_rsa] == (int)rsa_off) {
if (ppw->approx[ppw->index_ap_tca]==(int)tca_off) {
/* short-cut notations for the tensor perturbations */
delta_g = y[pv->index_pt_delta_g];
theta_g = y[pv->index_pt_theta_g];
shear_g = y[pv->index_pt_shear_g];
/* (P^{(2)}) */
P2 =-1.0/_SQRT6_*(
1./10.*delta_g
+2./7.*shear_g
+3./70.*y[pv->index_pt_delta_g+4]
-3./5.*y[pv->index_pt_pol0_g]
+6./7.*y[pv->index_pt_pol2_g]
-3./70.*y[pv->index_pt_pol0_g+4]);
/* above expression from paper, expression below matches old class but is not correct
P2 = -1.0/_SQRT6_*(
1./10.*delta_g
+2./35.*shear_g
+1./210.*y[pv->index_pt_delta_g+4]
-3./5.*y[pv->index_pt_pol0_g]
+6./35.*y[pv->index_pt_pol2_g]
-1./210.*y[pv->index_pt_pol0_g+4]
);
*/
/* photon density (delta_g = F_0) */
dy[pv->index_pt_delta_g] =
-4./3.*theta_g
-pvecthermo[pth->index_th_dkappa]*(delta_g+_SQRT6_*P2)
//+y[pv->index_pt_gwdot];
+_SQRT6_*y[pv->index_pt_gwdot]; //TBC
/* photon velocity (theta_g = (3k/4)*F_1) */
dy[pv->index_pt_theta_g] =
k2*(delta_g/4.-s_l[2]*shear_g)
-pvecthermo[pth->index_th_dkappa]*theta_g;
/* photon shear (shear_g = F_2/2) */
dy[pv->index_pt_shear_g] =
4./15.*s_l[2]*theta_g-3./10.*k*s_l[3]*y[pv->index_pt_shear_g+1]
-pvecthermo[pth->index_th_dkappa]*shear_g;
/* photon l=3 */
dy[pv->index_pt_l3_g] =
k/7.*(6.*s_l[3]*shear_g-4.*s_l[4]*y[pv->index_pt_l3_g+1])
-pvecthermo[pth->index_th_dkappa]*y[pv->index_pt_l3_g];
/* additional momenta in Boltzmann hierarchy (beyond l=0,1,2,3,4) */
for (l=4; l < pv->l_max_g; l++)
dy[pv->index_pt_delta_g+l] =
k/(2.*l+1.)*(l*s_l[l]*y[pv->index_pt_delta_g+l-1]
-(l+1.)*s_l[l+1]*y[pv->index_pt_delta_g+l+1])
-pvecthermo[pth->index_th_dkappa]*y[pv->index_pt_delta_g+l];
/* l=lmax */
l = pv->l_max_g;
dy[pv->index_pt_delta_g+l] =
k*(s_l[l]*y[pv->index_pt_delta_g+l-1]
-(1.+l)*cotKgen*y[pv->index_pt_delta_g+l])
- pvecthermo[pth->index_th_dkappa]*y[pv->index_pt_delta_g+l];
/* photon polarization, l=0 (pol0_g = G_0)*/
dy[pv->index_pt_pol0_g] =
-k*y[pv->index_pt_pol0_g+1]
-pvecthermo[pth->index_th_dkappa]*(y[pv->index_pt_pol0_g]-_SQRT6_*P2);
/* additional momenta in Boltzmann hierarchy (beyond l=0,1,2,3,4) */
for (l=1; l < pv->l_max_pol_g; l++)
dy[pv->index_pt_pol0_g+l] =
k/(2.*l+1.)*(l*s_l[l]*y[pv->index_pt_pol0_g+l-1]
-(l+1.)*s_l[l+1]*y[pv->index_pt_pol0_g+l+1])
-pvecthermo[pth->index_th_dkappa]*y[pv->index_pt_pol0_g+l];
/* l=lmax */
l = pv->l_max_pol_g;
dy[pv->index_pt_pol0_g+l] =
k*(s_l[l]*y[pv->index_pt_pol0_g+l-1]
-(l+1.)*cotKgen*y[pv->index_pt_pol0_g+l])
-pvecthermo[pth->index_th_dkappa]*y[pv->index_pt_pol0_g+l];
}
}
if (ppt->evolve_tensor_ur == _TRUE_) {
dy[pv->index_pt_delta_ur] = -4./3.*y[pv->index_pt_theta_ur]+_SQRT6_*y[pv->index_pt_gwdot];
dy[pv->index_pt_theta_ur] = k2*(y[pv->index_pt_delta_ur]/4.-s2_squared*y[pv->index_pt_shear_ur]);
dy[pv->index_pt_shear_ur] = (4./15.*y[pv->index_pt_theta_ur]
-3./10.*k*s_l[3]/s_l[2]*y[pv->index_pt_shear_ur+1]);
l = 3;
dy[pv->index_pt_l3_ur] = k/(2.*l+1.)*
(l*2.*s_l[l]*s_l[2]*y[pv->index_pt_shear_ur]-(l+1.)*s_l[l+1]*y[pv->index_pt_l3_ur+1]);
for (l = 4; l < pv->l_max_ur; l++) {
dy[pv->index_pt_delta_ur+l] = k/(2.*l+1)*
(l*s_l[l]*y[pv->index_pt_delta_ur+l-1]-(l+1.)*s_l[l+1]*y[pv->index_pt_delta_ur+l+1]);
}
l = pv->l_max_ur;
dy[pv->index_pt_delta_ur+l] =
k*(s_l[l]*y[pv->index_pt_delta_ur+l-1]-(1.+l)*cotKgen*y[pv->index_pt_delta_ur+l]);
}
/** -> non-cold dark matter (ncdm): massive neutrinos, WDM, etc. */
//TBC: curvature in all ncdm
if (ppt->evolve_tensor_ncdm == _TRUE_) {
idx = pv->index_pt_psi0_ncdm1;
/** -----> loop over species */
for (n_ncdm=0; n_ncdm<pv->N_ncdm; n_ncdm++) {
/** -----> loop over momentum */
for (index_q=0; index_q < pv->q_size_ncdm[n_ncdm]; index_q++) {
/** -----> define intermediate quantitites */
dlnf0_dlnq = pba->dlnf0_dlnq_ncdm[n_ncdm][index_q];
q = pba->q_ncdm[n_ncdm][index_q];
epsilon = sqrt(q*q+a2*pba->M_ncdm[n_ncdm]*pba->M_ncdm[n_ncdm]);
qk_div_epsilon = k*q/epsilon;
/** -----> ncdm density for given momentum bin */
dy[idx] = -qk_div_epsilon*y[idx+1]-0.25*_SQRT6_*y[pv->index_pt_gwdot]*dlnf0_dlnq;
/** -----> ncdm l>0 for given momentum bin */
for(l=1; l<pv->l_max_ncdm[n_ncdm]; l++){
dy[idx+l] = qk_div_epsilon/(2.*l+1.0)*(l*s_l[l]*y[idx+(l-1)]-(l+1.)*s_l[l+1]*y[idx+(l+1)]);
}
/** -----> ncdm lmax for given momentum bin (truncation as in Ma and Bertschinger)
but with curvature taken into account a la arXiv:1305.3261 */
dy[idx+l] = qk_div_epsilon*y[idx+l-1]-(1.+l)*k*cotKgen*y[idx+l];
/** -----> jump to next momentum bin or species */
idx += (pv->l_max_ncdm[n_ncdm]+1);
}
}
}
/* tensor metric perturbation h (gravitational waves) */
dy[pv->index_pt_gw] = y[pv->index_pt_gwdot];
/* its time-derivative */
dy[pv->index_pt_gwdot] = pvecmetric[ppw->index_mt_gw_prime_prime];
}
return _SUCCESS_;
}
int perturb_tca_slip_and_shear(double * y,
void * parameters_and_workspace,
ErrorMsg error_message
) {
/** Summary: */
/** - define local variables */
/* scale factor and other background quantities */
double a,a_prime_over_a,a_primeprime_over_a,R;
/* useful terms for tight-coupling approximation */
double slip=0.;
double tau_c=0.,dtau_c=0.;
double theta_prime,shear_g_prime=0.,theta_prime_prime;
double g0,g0_prime,g0_prime_prime;
double F=0.,F_prime=0.,F_prime_prime=0.;
/* short-cut names for the fields of the input structure */
struct perturb_parameters_and_workspace * pppaw;
double k,k2;
struct precision * ppr;
struct background * pba;
struct thermo * pth;
struct perturbs * ppt;
struct perturb_workspace * ppw;
double * pvecback;
double * pvecthermo;
double * pvecmetric;
struct perturb_vector * pv;
/* short-cut notations for the perturbations */
double delta_g=0.,theta_g=0.,shear_g=0.;
double delta_b,theta_b;
double Delta;
double cb2;
double metric_continuity=0.,metric_euler=0.,metric_shear=0.,metric_shear_prime=0.;
/* perturbed recombination */
double delta_temp=0.;
/* for use with curvature */
double s2_squared;
/** - rename the fields of the input structure (just to avoid heavy notations) */
pppaw = parameters_and_workspace;
k = pppaw->k;
k2=k*k;
ppr = pppaw->ppr;
pba = pppaw->pba;
pth = pppaw->pth;
ppt = pppaw->ppt;
ppw = pppaw->ppw;
pvecback = ppw->pvecback;
pvecthermo = ppw->pvecthermo;
pvecmetric = ppw->pvecmetric;
pv = ppw->pv;
/** - compute related background quantities */
a = pvecback[pba->index_bg_a];
a_prime_over_a = pvecback[pba->index_bg_H] * a;
a_primeprime_over_a = pvecback[pba->index_bg_H_prime] * a + 2. * a_prime_over_a * a_prime_over_a;
//z = pba->a_today-1.;
R = 4./3. * pvecback[pba->index_bg_rho_g]/pvecback[pba->index_bg_rho_b];
s2_squared = 1.-3.*pba->K/k2;
/** (a) define short-cut notations for the scalar perturbations */
if (ppw->approx[ppw->index_ap_rsa] == (int)rsa_off) {
delta_g = y[pv->index_pt_delta_g];
theta_g = y[pv->index_pt_theta_g];
}
delta_b = y[pv->index_pt_delta_b];
theta_b = y[pv->index_pt_theta_b];
cb2 = pvecthermo[pth->index_th_cb2];
/* perturbed recombination */
if ((ppt->has_perturbed_recombination == _TRUE_) && (ppw->approx[ppw->index_ap_tca] == (int)tca_off) ){
delta_temp = y[pv->index_pt_perturbed_recombination_delta_temp];
}
/** (b) define short-cut notations used only in tight-coupling approximation */
tau_c = 1./pvecthermo[pth->index_th_dkappa]; /* inverse of opacity */
dtau_c = -pvecthermo[pth->index_th_ddkappa]*tau_c*tau_c; /* its first derivative wrt conformal time */
F = tau_c/(1+R); /* F = tau_c/(1+R) */
if (ppr->tight_coupling_approximation >= (int)second_order_CLASS) {
F_prime = dtau_c/(1+R)+tau_c*a_prime_over_a*R/(1+R)/(1+R); /*F' needed by second_order_CLASS and compromise_CLASS */
if (ppr->tight_coupling_approximation == (int)second_order_CLASS) {
F_prime_prime =(- pvecthermo[pth->index_th_dddkappa]*tau_c*tau_c /* F'' needed by second_order_CLASS only */
+ 2.*pvecthermo[pth->index_th_ddkappa]*pvecthermo[pth->index_th_ddkappa]*tau_c*tau_c*tau_c)/(1+R)
+2.*dtau_c*a_prime_over_a*R/(1+R)/(1+R)
+tau_c*((a_primeprime_over_a-2.*a_prime_over_a*a_prime_over_a)+2.*a_prime_over_a*a_prime_over_a*R/(1+R))*R/(1+R)/(1+R);
}
}
/** (d) compute metric-related quantities (depending on gauge; additional gauges can be coded below)
Each continuity equation contains a term in (theta+metric_continuity) with
metric_continuity = (h_prime/2) in synchronous gauge, (-3 phi_prime) in newtonian gauge
Each Euler equation contains a source term metric_euler with
metric_euler = 0 in synchronous gauge, (k2 psi) in newtonian gauge
Each shear derivative equation contains a source term metric_shear equal to
metric_shear = (h_prime+6eta_prime)/2 in synchronous gauge, 0 in newtonian gauge
metric_shear_prime is the derivative of metric_shear
In the ufa_class approximation, the leading-order source term is (h_prime/2) in synchronous gauge,
(-3 (phi_prime+psi_prime)) in newtonian gauge: we approximate the later by (-6 phi_prime) */
if (ppt->gauge == synchronous) {
metric_continuity = pvecmetric[ppw->index_mt_h_prime]/2.;
metric_euler = 0.;
metric_shear = k2 * pvecmetric[ppw->index_mt_alpha];
metric_shear_prime = k2 * pvecmetric[ppw->index_mt_alpha_prime];
}
if (ppt->gauge == newtonian) {
metric_continuity = -3.*pvecmetric[ppw->index_mt_phi_prime];
metric_euler = k2*pvecmetric[ppw->index_mt_psi];
metric_shear = 0.;
metric_shear_prime = 0.;
}
/** (e) if some approximation schemes are turned on, enforce a few y[] values computed in perturb_einstein */
/* free-streaming photon velocity */
if (ppw->approx[ppw->index_ap_rsa] == (int)rsa_on)
theta_g = ppw->rsa_theta_g;
/** -----> like Ma & Bertschinger */
if (ppr->tight_coupling_approximation == (int)first_order_MB) {
slip=2.*R/(1.+R)*a_prime_over_a*(theta_b-theta_g)
+F*(-a_primeprime_over_a*theta_b
+k2*(-a_prime_over_a*delta_g/2.
+cb2*(-theta_b-metric_continuity)
-4./3.*(-theta_g-metric_continuity)/4.)
-a_prime_over_a*metric_euler);
}
/** -----> relax assumption dkappa~a^-2 (like in CAMB) */
if ((ppr->tight_coupling_approximation == (int)first_order_CAMB) || (ppr->tight_coupling_approximation == (int)compromise_CLASS)) {
slip=(dtau_c/tau_c-2.*a_prime_over_a/(1.+R))*(theta_b-theta_g)
+F*(-a_primeprime_over_a*theta_b
+k2*(-a_prime_over_a*delta_g/2.
+cb2*(-theta_b-metric_continuity)
-4./3.*(-theta_g-metric_continuity)/4.)
-a_prime_over_a*metric_euler);
}
/** -----> also relax assumption cb2~a^-1 */
if ((ppr->tight_coupling_approximation == (int)first_order_CLASS) || (ppr->tight_coupling_approximation == (int)second_order_CLASS)){
slip=(dtau_c/tau_c-2.*a_prime_over_a/(1.+R))*(theta_b-theta_g)
+F*(-a_primeprime_over_a*theta_b
+k2*(-a_prime_over_a*delta_g/2.
+pvecthermo[pth->index_th_dcb2]*delta_b
+cb2*(-theta_b-metric_continuity)
-4./3.*(-theta_g-metric_continuity)/4.)
-a_prime_over_a*metric_euler);
}
/** -----> intermediate quantities for 2nd order tca: shear_g at first order in tight-coupling */
shear_g=16./45.*tau_c*(theta_g+metric_shear);
/* (Ma & Bertschinger give (1/9)*(4/3) instead of (2/15)*(4/3)
because they didn't include the contribution of G_gamma0
and G_gamma2, which are of the same order as sigma_g. This
was already consistently included in CAMB) */
/** -----> intermediate quantities for 2nd order tca: zero order for theta_b' = theta_g' */
/** perturbed recombination has an impact **/
theta_prime = (-a_prime_over_a*theta_b+k2*(cb2*(delta_b+delta_temp)+R/4.*delta_g))/(1.+R) + metric_euler;
/** -----> intermediate quantities for 2nd order tca: shear_g_prime at first order in tight-coupling */
shear_g_prime=16./45.*(tau_c*(theta_prime+metric_shear_prime)+dtau_c*(theta_g+metric_shear));
/** -----> 2nd order as in CRS*/
if (ppr->tight_coupling_approximation == (int)second_order_CRS) {
if (ppt->gauge == newtonian) {
class_stop(error_message,
"the second_order_CRS approach to tight-coupling is coded in synchronous gauge, not newtonian: change gauge or try another tight-coupling scheme");
}
if (ppt->gauge == synchronous) {
class_test(pba->sgnK != 0,
ppt->error_message,
"the second_order_CRS approach to tight-coupling is coded in the flat case only: for non-flat try another tight-coupling scheme");
/* infer Delta from h'' using Einstein equation */
Delta = 2*k2*y[pv->index_pt_eta]
-2*a_prime_over_a*pvecmetric[ppw->index_mt_h_prime]
-pvecmetric[ppw->index_mt_h_prime_prime];
/* monster expression for slip at second-order in tight-coupling */
slip=(-2./(1.+R)*a_prime_over_a-pvecthermo[pth->index_th_ddkappa]/pvecthermo[pth->index_th_dkappa])*(theta_b-theta_g)
+(-a_primeprime_over_a*theta_b
-k2*a_prime_over_a*(delta_g/2.-2.*shear_g)
+k2*(cb2*(-theta_b-metric_continuity)
-4./3.*(-theta_g-metric_continuity)/4.
+shear_g_prime)
)/pvecthermo[pth->index_th_dkappa]/(1.+R)
-2.*R*(3.*a_prime_over_a*a_prime_over_a*cb2+(1.+R)*(a_primeprime_over_a-a_prime_over_a*a_prime_over_a)-3.*a_prime_over_a*a_prime_over_a)
/(1.+R)/(1.+R)/(1.+R)*(theta_b-theta_g)/pvecthermo[pth->index_th_dkappa]
+(
a_primeprime_over_a*a_prime_over_a*((2.-3.*cb2)*R-2.)*theta_b/(1.+R)
+a_prime_over_a*k2*(1.-3.*cb2)*theta_b/3./(1.+R)
/* perturbed recombination has an impact (next two lines) */
+a_primeprime_over_a*k2*cb2*(delta_b+delta_temp)/(1.+R)
+k2*k2*(3.*cb2-1.)*cb2*(delta_b+delta_temp)/3./(1.+R)
+k2*k2*R*(3.*cb2-1.)*delta_g/12./(1.+R)
+a_primeprime_over_a*k2*(2.+3.*R)*delta_g/4./(1.+R)
+a_prime_over_a*a_prime_over_a*k2*((2.-3.*cb2)*R-1.)*delta_g/2./(1.+R)
+a_prime_over_a*k2*cb2*(1.+(3.*cb2-2.)*R)*(-theta_b-metric_continuity)/(1.+R)
+a_prime_over_a*k2*(2.+(5.-3.*cb2)*R)*4./3.*(-theta_g-metric_continuity)/4./(1.+R)
+a_prime_over_a*(1.-3.*cb2)*k2*2.*metric_shear/3.
+k2*k2*(3.*cb2-1.)*y[pv->index_pt_eta]/3.
+2.*a_prime_over_a*k2*(3.*cb2-1.)*pvecmetric[ppw->index_mt_eta_prime]
+k2*(1.-3.*cb2)*Delta/6.
)/pvecthermo[pth->index_th_dkappa]/pvecthermo[pth->index_th_dkappa]/(1.+R)/(1.+R)
-(4.*a_primeprime_over_a*theta_b-4.*k2*cb2*(-theta_b-metric_continuity)+2.*a_prime_over_a*k2*delta_g+k2*4./3.*(-theta_g-metric_continuity))/2./(1.+R)/(1.+R)*pvecthermo[pth->index_th_ddkappa]/pvecthermo[pth->index_th_dkappa]/pvecthermo[pth->index_th_dkappa]/pvecthermo[pth->index_th_dkappa]
+4.*a_prime_over_a*R/(1.+R)/(1.+R)*pvecthermo[pth->index_th_ddkappa]/pvecthermo[pth->index_th_dkappa]/pvecthermo[pth->index_th_dkappa]*(theta_b-theta_g);
/* second-order correction to shear */
shear_g = (1.-11./6.*dtau_c)*shear_g-11./6.*tau_c*16./45.*tau_c*(theta_prime+k2*pvecmetric[ppw->index_mt_alpha_prime]);
}
}
/** -----> 2nd order like in CLASS paper */
if (ppr->tight_coupling_approximation == (int)second_order_CLASS) {
if (ppt->gauge == newtonian) {
class_stop(error_message,
"the second_order_CLASS approach to tight-coupling is coded in synchronous gauge, not newtonian: change gauge or try another tight-coupling scheme");
}
if (ppt->gauge == synchronous) {
/* zero order for theta_b'' = theta_g'' */
theta_prime_prime = ((R-1.)*a_prime_over_a*theta_prime-(a_primeprime_over_a-a_prime_over_a*a_prime_over_a)*theta_b
+k2*(pvecthermo[pth->index_th_dcb2]*delta_b+cb2*(-theta_b-metric_continuity)-a_prime_over_a*R/4.*delta_g+R/4.*4./3.*(-theta_g-metric_continuity)))/(1.+R);
/* zero-order quantities g0, g0', go'' */
g0 = -a_prime_over_a*theta_b + k2*(cb2*delta_b-delta_g/4.);
g0_prime = -a_prime_over_a*theta_prime-(a_primeprime_over_a-a_prime_over_a*a_prime_over_a)*theta_b+k2*(pvecthermo[pth->index_th_dcb2]*delta_b+(1./3.-cb2)*(theta_b+0.5*pvecmetric[ppw->index_mt_h_prime]));
g0_prime_prime = -a_prime_over_a*theta_prime_prime-2.*(a_primeprime_over_a-a_prime_over_a*a_prime_over_a)*theta_prime
-(2.*a_prime_over_a*a_prime_over_a*a_prime_over_a-3.*a_primeprime_over_a*a_prime_over_a)*theta_b
+k2*(pvecthermo[pth->index_th_ddcb2]*delta_b-2.*pvecthermo[pth->index_th_dcb2]*(theta_b+0.5*pvecmetric[ppw->index_mt_h_prime])+(1./3.-cb2)*(theta_prime+0.5*pvecmetric[ppw->index_mt_h_prime_prime]));
/* slip at second order */
slip = (1.-2*a_prime_over_a*F)*slip + F*k2*s2_squared*(2.*a_prime_over_a*shear_g+shear_g_prime)
-F*(F_prime_prime*g0+2.*F_prime*g0_prime+F*g0_prime_prime);
/* second-order correction to shear */
shear_g = (1.-11./6.*dtau_c)*shear_g-11./6.*tau_c*16./45.*tau_c*(theta_prime+metric_shear_prime);
}
}
/** -----> add only the most important 2nd order terms */
if (ppr->tight_coupling_approximation == (int)compromise_CLASS) {
/* slip at second order (only leading second-order terms) */
slip = (1.-2.*a_prime_over_a*F)*slip + F*k2*(2.*a_prime_over_a*s2_squared*shear_g+s2_squared*shear_g_prime-(1./3.-cb2)*(F*theta_prime+2.*F_prime*theta_b));
/* second-order correction to shear */
shear_g = (1.-11./6.*dtau_c)*shear_g-11./6.*tau_c*16./45.*tau_c*(theta_prime+metric_shear_prime);
}
/** ---> store tight-coupling values of photon shear and its derivative */
ppw->tca_shear_g = shear_g;
ppw->tca_slip = slip;
return _SUCCESS_;
}
int perturb_rsa_delta_and_theta(
struct precision * ppr,
struct background * pba,
struct thermo * pth,
struct perturbs * ppt,
double k,
double * y,
double a_prime_over_a,
double * pvecthermo,
struct perturb_workspace * ppw
) {
/** Summary: */
/** - define local variables */
double k2;
k2 = k*k;
// formulas below TBC for curvaturema
/* newtonian gauge */
if (ppt->gauge == newtonian) {
if (ppw->approx[ppw->index_ap_rsa] == (int)rsa_on) {
if (ppr->radiation_streaming_approximation == rsa_null) {
ppw->rsa_delta_g = 0.;
ppw->rsa_theta_g = 0.;
}
else {
ppw->rsa_delta_g = -4.*y[ppw->pv->index_pt_phi];
ppw->rsa_theta_g = 6.*ppw->pvecmetric[ppw->index_mt_phi_prime];
}
if (ppr->radiation_streaming_approximation == rsa_MD_with_reio) {
ppw->rsa_delta_g +=
-4./k2*ppw->pvecthermo[pth->index_th_dkappa]*y[ppw->pv->index_pt_theta_b];
ppw->rsa_theta_g +=
3./k2*(ppw->pvecthermo[pth->index_th_ddkappa]*y[ppw->pv->index_pt_theta_b]
+ppw->pvecthermo[pth->index_th_dkappa]*
(-a_prime_over_a*y[ppw->pv->index_pt_theta_b]
+ppw->pvecthermo[pth->index_th_cb2]*k2*y[ppw->pv->index_pt_delta_b]
+k2*y[ppw->pv->index_pt_phi]));
}
if (pba->has_ur == _TRUE_) {
if (ppr->radiation_streaming_approximation == rsa_null) {
ppw->rsa_delta_ur = 0.;
ppw->rsa_theta_ur = 0.;
}
else {
ppw->rsa_delta_ur = -4.*y[ppw->pv->index_pt_phi];
ppw->rsa_theta_ur = 6.*ppw->pvecmetric[ppw->index_mt_phi_prime];
}
}
}
}
/* synchronous gauge */
if (ppt->gauge == synchronous) {
if (ppw->approx[ppw->index_ap_rsa] == (int)rsa_on) {
if (ppr->radiation_streaming_approximation == rsa_null) {
ppw->rsa_delta_g = 0.;
ppw->rsa_theta_g = 0.;
}
else {
ppw->rsa_delta_g = 4./k2*(a_prime_over_a*ppw->pvecmetric[ppw->index_mt_h_prime]
-k2*y[ppw->pv->index_pt_eta]);
ppw->rsa_theta_g = -0.5*ppw->pvecmetric[ppw->index_mt_h_prime];
}
if (ppr->radiation_streaming_approximation == rsa_MD_with_reio) {
ppw->rsa_delta_g +=
-4./k2*ppw->pvecthermo[pth->index_th_dkappa]*(y[ppw->pv->index_pt_theta_b]+0.5*ppw->pvecmetric[ppw->index_mt_h_prime]);
ppw->rsa_theta_g +=
3./k2*(ppw->pvecthermo[pth->index_th_ddkappa]*
(y[ppw->pv->index_pt_theta_b]
+0.5*ppw->pvecmetric[ppw->index_mt_h_prime])
+ppw->pvecthermo[pth->index_th_dkappa]*
(-a_prime_over_a*y[ppw->pv->index_pt_theta_b]
+ ppw->pvecthermo[pth->index_th_cb2]*k2*y[ppw->pv->index_pt_delta_b]
-a_prime_over_a*ppw->pvecmetric[ppw->index_mt_h_prime]
+k2*y[ppw->pv->index_pt_eta]));
}
if (pba->has_ur == _TRUE_) {
if (ppr->radiation_streaming_approximation == rsa_null) {
ppw->rsa_delta_ur = 0.;
ppw->rsa_theta_ur = 0.;
}
else {
ppw->rsa_delta_ur = 4./k2*(a_prime_over_a*ppw->pvecmetric[ppw->index_mt_h_prime]
-k2*y[ppw->pv->index_pt_eta]);
ppw->rsa_theta_ur = -0.5*ppw->pvecmetric[ppw->index_mt_h_prime];
}
}
}
}
return _SUCCESS_;
}
|
pr48591.c | /* PR middle-end/48591 */
/* { dg-do run { target __float128 } } */
/* { dg-options "-O0" } */
/* { dg-add-options __float128 } */
extern void abort (void);
int
main ()
{
__float128 f = 0.0;
int i;
#pragma omp parallel for reduction(+:f)
for (i = 0; i < 128; i++)
f += 0.5Q;
if (f != 64.0Q)
abort ();
#pragma omp atomic
f += 8.5Q;
if (f != 72.5Q)
abort ();
return 0;
}
|
search.h | // -*- C++ -*-
// Copyright (C) 2007-2018 Free Software Foundation, Inc.
//
// This file is part of the GNU ISO C++ Library. This library is free
// software; you can redistribute it and/or modify it under the terms
// of the GNU General Public License as published by the Free Software
// Foundation; either version 3, or (at your option) any later
// version.
// This library is distributed in the hope that it will be useful, but
// WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
// General Public License for more details.
// Under Section 7 of GPL version 3, you are granted additional
// permissions described in the GCC Runtime Library Exception, version
// 3.1, as published by the Free Software Foundation.
// You should have received a copy of the GNU General Public License and
// a copy of the GCC Runtime Library Exception along with this program;
// see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
// <http://www.gnu.org/licenses/>.
/** @file parallel/search.h
* @brief Parallel implementation base for std::search() and
* std::search_n().
* This file is a GNU parallel extension to the Standard C++ Library.
*/
// Written by Felix Putze.
#ifndef _GLIBCXX_PARALLEL_SEARCH_H
#define _GLIBCXX_PARALLEL_SEARCH_H 1
#include <bits/stl_algobase.h>
#include <parallel/parallel.h>
#include <parallel/equally_split.h>
namespace __gnu_parallel
{
/**
* @brief Precalculate __advances for Knuth-Morris-Pratt algorithm.
* @param __elements Begin iterator of sequence to search for.
* @param __length Length of sequence to search for.
* @param __off Returned __offsets.
*/
template<typename _RAIter, typename _DifferenceTp>
void
__calc_borders(_RAIter __elements, _DifferenceTp __length,
_DifferenceTp* __off)
{
typedef _DifferenceTp _DifferenceType;
__off[0] = -1;
if (__length > 1)
__off[1] = 0;
_DifferenceType __k = 0;
for (_DifferenceType __j = 2; __j <= __length; __j++)
{
while ((__k >= 0) && !(__elements[__k] == __elements[__j-1]))
__k = __off[__k];
__off[__j] = ++__k;
}
}
// Generic parallel find algorithm (requires random access iterator).
/** @brief Parallel std::search.
* @param __begin1 Begin iterator of first sequence.
* @param __end1 End iterator of first sequence.
* @param __begin2 Begin iterator of second sequence.
* @param __end2 End iterator of second sequence.
* @param __pred Find predicate.
* @return Place of finding in first sequences. */
template<typename __RAIter1,
typename __RAIter2,
typename _Pred>
__RAIter1
__search_template(__RAIter1 __begin1, __RAIter1 __end1,
__RAIter2 __begin2, __RAIter2 __end2,
_Pred __pred)
{
typedef std::iterator_traits<__RAIter1> _TraitsType;
typedef typename _TraitsType::difference_type _DifferenceType;
_GLIBCXX_CALL((__end1 - __begin1) + (__end2 - __begin2));
_DifferenceType __pattern_length = __end2 - __begin2;
// Pattern too short.
if(__pattern_length <= 0)
return __end1;
// Last point to start search.
_DifferenceType __input_length = (__end1 - __begin1) - __pattern_length;
// Where is first occurrence of pattern? defaults to end.
_DifferenceType __result = (__end1 - __begin1);
_DifferenceType *__splitters;
// Pattern too long.
if (__input_length < 0)
return __end1;
omp_lock_t __result_lock;
omp_init_lock(&__result_lock);
_ThreadIndex __num_threads = std::max<_DifferenceType>
(1, std::min<_DifferenceType>(__input_length,
__get_max_threads()));
_DifferenceType __advances[__pattern_length];
__calc_borders(__begin2, __pattern_length, __advances);
# pragma omp parallel num_threads(__num_threads)
{
# pragma omp single
{
__num_threads = omp_get_num_threads();
__splitters = new _DifferenceType[__num_threads + 1];
__equally_split(__input_length, __num_threads, __splitters);
}
_ThreadIndex __iam = omp_get_thread_num();
_DifferenceType __start = __splitters[__iam],
__stop = __splitters[__iam + 1];
_DifferenceType __pos_in_pattern = 0;
bool __found_pattern = false;
while (__start <= __stop && !__found_pattern)
{
// Get new value of result.
#pragma omp flush(__result)
// No chance for this thread to find first occurrence.
if (__result < __start)
break;
while (__pred(__begin1[__start + __pos_in_pattern],
__begin2[__pos_in_pattern]))
{
++__pos_in_pattern;
if (__pos_in_pattern == __pattern_length)
{
// Found new candidate for result.
omp_set_lock(&__result_lock);
__result = std::min(__result, __start);
omp_unset_lock(&__result_lock);
__found_pattern = true;
break;
}
}
// Make safe jump.
__start += (__pos_in_pattern - __advances[__pos_in_pattern]);
__pos_in_pattern = (__advances[__pos_in_pattern] < 0
? 0 : __advances[__pos_in_pattern]);
}
} //parallel
omp_destroy_lock(&__result_lock);
delete[] __splitters;
// Return iterator on found element.
return (__begin1 + __result);
}
} // end namespace
#endif /* _GLIBCXX_PARALLEL_SEARCH_H */
|
agmgSetup.c | /*
The MIT License (MIT)
Copyright (c) 2017 Tim Warburton, Noel Chalmers, Jesse Chan, Ali Karakus, Rajesh Gandham
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
*/
#include "agmg.h"
csr *strong_graph(csr *A, dfloat threshold);
bool customLess(int smax, dfloat rmax, hlong imax, int s, dfloat r, hlong i);
hlong *form_aggregates(agmgLevel *level, csr *C);
void find_aggregate_owners(agmgLevel *level, hlong* FineToCoarse, setupAide options);
csr *construct_interpolator(agmgLevel *level, hlong *FineToCoarse, dfloat **nullCoarseA);
csr *transpose(agmgLevel* level, csr *A, hlong *globalRowStarts, hlong *globalColStarts);
csr *galerkinProd(agmgLevel *level, csr *R, csr *A, csr *P);
void coarsenAgmgLevel(agmgLevel *level, csr **coarseA, csr **P, csr **R, dfloat **nullCoarseA, setupAide options);
void agmgSetup(parAlmond_t *parAlmond, csr *A, dfloat *nullA, hlong *globalRowStarts, setupAide options){
int rank, size;
rank = agmg::rank;
size = agmg::size;
// approximate Nrows at coarsest level
int gCoarseSize = 1000;
double seed = (double) rank;
srand48(seed);
agmgLevel **levels = parAlmond->levels;
int lev = parAlmond->numLevels; //add this level to the end of the chain
levels[lev] = (agmgLevel *) calloc(1,sizeof(agmgLevel));
levels[lev]->gatherLevel = false;
levels[lev]->weightedInnerProds = false;
parAlmond->numLevels++;
//copy A matrix and null vector
levels[lev]->A = A;
levels[lev]->A->null = nullA;
levels[lev]->Nrows = A->Nrows;
levels[lev]->Ncols = A->Ncols;
SmoothType smoothType;
int ChebyshevIterations=2; //default to degree 2
if (options.compareArgs("PARALMOND SMOOTHER", "CHEBYSHEV")) {
smoothType = CHEBYSHEV;
options.getArgs("PARALMOND CHEBYSHEV DEGREE", ChebyshevIterations);
} else { //default to DAMPED_JACOBI
smoothType = DAMPED_JACOBI;
}
levels[lev]->ChebyshevIterations = ChebyshevIterations;
setupSmoother(parAlmond, levels[lev], smoothType);
levels[lev]->deviceA = newHYB(parAlmond, levels[lev]->A);
//set operator callback
void **args = (void **) calloc(2,sizeof(void*));
args[0] = (void *) parAlmond;
args[1] = (void *) levels[lev];
levels[lev]->AxArgs = args;
levels[lev]->smoothArgs = args;
levels[lev]->Ax = agmgAx;
levels[lev]->smooth = agmgSmooth;
levels[lev]->device_Ax = device_agmgAx;
levels[lev]->device_smooth = device_agmgSmooth;
//copy global partiton
levels[lev]->globalRowStarts = (hlong *) calloc(size+1,sizeof(hlong));
for (int r=0;r<size+1;r++)
levels[lev]->globalRowStarts[r] = globalRowStarts[r];
hlong localSize = (hlong) levels[lev]->A->Nrows;
hlong globalSize = 0;
MPI_Allreduce(&localSize, &globalSize, 1, MPI_HLONG, MPI_SUM, agmg::comm);
//if the system if already small, dont create MG levels
bool done = false;
if(globalSize <= gCoarseSize){
setupExactSolve(parAlmond, levels[lev],parAlmond->nullSpace,parAlmond->nullSpacePenalty);
//setupSmoother(parAlmond, levels[lev], smoothType);
done = true;
}
while(!done){
// create coarse MG level
levels[lev+1] = (agmgLevel *) calloc(1,sizeof(agmgLevel));
dfloat *nullCoarseA;
//printf("Setting up coarse level %d\n", lev+1);
coarsenAgmgLevel(levels[lev], &(levels[lev+1]->A), &(levels[lev+1]->P),
&(levels[lev+1]->R), &nullCoarseA, parAlmond->options);
//set dimensions of the fine level (max among the A,R ops)
levels[lev]->Ncols = mymax(levels[lev]->Ncols, levels[lev+1]->R->Ncols);
parAlmond->numLevels++;
levels[lev+1]->A->null = nullCoarseA;
levels[lev+1]->Nrows = levels[lev+1]->A->Nrows;
levels[lev+1]->Ncols = mymax(levels[lev+1]->A->Ncols, levels[lev+1]->P->Ncols);
levels[lev+1]->globalRowStarts = levels[lev]->globalAggStarts;
levels[lev+1]->ChebyshevIterations = ChebyshevIterations;
setupSmoother(parAlmond, levels[lev+1], smoothType);
levels[lev+1]->deviceA = newHYB (parAlmond, levels[lev+1]->A);
levels[lev+1]->deviceR = newHYB (parAlmond, levels[lev+1]->R);
levels[lev+1]->dcsrP = newDCOO(parAlmond, levels[lev+1]->P);
//set operator callback
void **args = (void **) calloc(2,sizeof(void*));
args[0] = (void *) parAlmond;
args[1] = (void *) levels[lev+1];
levels[lev+1]->AxArgs = args;
levels[lev+1]->coarsenArgs = args;
levels[lev+1]->prolongateArgs = args;
levels[lev+1]->smoothArgs = args;
levels[lev+1]->Ax = agmgAx;
levels[lev+1]->coarsen = agmgCoarsen;
levels[lev+1]->prolongate = agmgProlongate;
levels[lev+1]->smooth = agmgSmooth;
levels[lev+1]->device_Ax = device_agmgAx;
levels[lev+1]->device_coarsen = device_agmgCoarsen;
levels[lev+1]->device_prolongate = device_agmgProlongate;
levels[lev+1]->device_smooth = device_agmgSmooth;
const hlong localCoarseDim = (hlong) levels[lev+1]->A->Nrows;
hlong globalCoarseSize;
MPI_Allreduce(&localCoarseDim, &globalCoarseSize, 1, MPI_HLONG, MPI_SUM, agmg::comm);
if(globalCoarseSize <= gCoarseSize || globalSize < 2*globalCoarseSize){
setupExactSolve(parAlmond, levels[lev+1],parAlmond->nullSpace,parAlmond->nullSpacePenalty);
//setupSmoother(parAlmond, levels[lev+1], smoothType);
break;
}
globalSize = globalCoarseSize;
lev++;
}
//allocate vectors required
occa::device device = parAlmond->device;
for (int n=0;n<parAlmond->numLevels;n++) {
dlong N = levels[n]->Nrows;
dlong M = levels[n]->Ncols;
if ((n>0)&&(n<parAlmond->numLevels)) { //kcycle vectors
if (M) levels[n]->ckp1 = (dfloat *) calloc(M,sizeof(dfloat));
if (N) levels[n]->vkp1 = (dfloat *) calloc(N,sizeof(dfloat));
if (N) levels[n]->wkp1 = (dfloat *) calloc(N,sizeof(dfloat));
if (M) levels[n]->o_ckp1 = device.malloc(M*sizeof(dfloat),levels[n]->ckp1);
if (N) levels[n]->o_vkp1 = device.malloc(N*sizeof(dfloat),levels[n]->vkp1);
if (N) levels[n]->o_wkp1 = device.malloc(N*sizeof(dfloat),levels[n]->wkp1);
}
if (M) levels[n]->x = (dfloat *) calloc(M,sizeof(dfloat));
if (M) levels[n]->res = (dfloat *) calloc(M,sizeof(dfloat));
if (N) levels[n]->rhs = (dfloat *) calloc(N,sizeof(dfloat));
if (M) levels[n]->o_x = device.malloc(M*sizeof(dfloat),levels[n]->x);
if (M) levels[n]->o_res = device.malloc(M*sizeof(dfloat),levels[n]->res);
if (N) levels[n]->o_rhs = device.malloc(N*sizeof(dfloat),levels[n]->rhs);
}
//buffer for innerproducts in kcycle
dlong numBlocks = ((levels[0]->Nrows+RDIMX*RDIMY-1)/(RDIMX*RDIMY))/RLOAD;
parAlmond->rho = (dfloat*) calloc(3*numBlocks,sizeof(dfloat));
parAlmond->o_rho = device.malloc(3*numBlocks*sizeof(dfloat), parAlmond->rho);
}
void parAlmondReport(parAlmond_t *parAlmond) {
int rank, size;
rank = agmg::rank;
size = agmg::size;
if(rank==0) {
printf("------------------ParAlmond Report-----------------------------------\n");
printf("---------------------------------------------------------------------\n");
printf("level| active ranks | dimension | nnzs | nnz/row |\n");
printf(" | | (min,max,avg) | (min,max,avg) | (min,max,avg) |\n");
printf("---------------------------------------------------------------------\n");
}
for(int lev=0; lev<parAlmond->numLevels; lev++){
dlong Nrows = parAlmond->levels[lev]->Nrows;
hlong hNrows = (hlong) parAlmond->levels[lev]->Nrows;
int active = (Nrows>0) ? 1:0;
int totalActive=0;
MPI_Allreduce(&active, &totalActive, 1, MPI_INT, MPI_SUM, agmg::comm);
dlong minNrows=0, maxNrows=0;
hlong totalNrows=0;
dfloat avgNrows;
MPI_Allreduce(&Nrows, &maxNrows, 1, MPI_DLONG, MPI_MAX, agmg::comm);
MPI_Allreduce(&hNrows, &totalNrows, 1, MPI_HLONG, MPI_SUM, agmg::comm);
avgNrows = (dfloat) totalNrows/totalActive;
if (Nrows==0) Nrows=maxNrows; //set this so it's ignored for the global min
MPI_Allreduce(&Nrows, &minNrows, 1, MPI_DLONG, MPI_MIN, agmg::comm);
long long int nnz;
if (parAlmond->levels[lev]->A)
nnz = parAlmond->levels[lev]->A->diagNNZ+parAlmond->levels[lev]->A->offdNNZ;
else
nnz =0;
long long int minNnz=0, maxNnz=0, totalNnz=0;
dfloat avgNnz;
MPI_Allreduce(&nnz, &maxNnz, 1, MPI_LONG_LONG_INT, MPI_MAX, agmg::comm);
MPI_Allreduce(&nnz, &totalNnz, 1, MPI_LONG_LONG_INT, MPI_SUM, agmg::comm);
avgNnz = (dfloat) totalNnz/totalActive;
if (nnz==0) nnz = maxNnz; //set this so it's ignored for the global min
MPI_Allreduce(&nnz, &minNnz, 1, MPI_LONG_LONG_INT, MPI_MIN, agmg::comm);
Nrows = parAlmond->levels[lev]->Nrows;
dfloat nnzPerRow = (Nrows==0) ? 0 : (dfloat) nnz/Nrows;
dfloat minNnzPerRow=0, maxNnzPerRow=0, avgNnzPerRow=0;
MPI_Allreduce(&nnzPerRow, &maxNnzPerRow, 1, MPI_DFLOAT, MPI_MAX, agmg::comm);
MPI_Allreduce(&nnzPerRow, &avgNnzPerRow, 1, MPI_DFLOAT, MPI_SUM, agmg::comm);
avgNnzPerRow /= totalActive;
if (Nrows==0) nnzPerRow = maxNnzPerRow;
MPI_Allreduce(&nnzPerRow, &minNnzPerRow, 1, MPI_DFLOAT, MPI_MIN, agmg::comm);
if (rank==0){
printf(" %3d | %4d | %10.2f | %10.2f | %10.2f |\n",
lev, totalActive, (dfloat)minNrows, (dfloat)minNnz, minNnzPerRow);
printf(" | | %10.2f | %10.2f | %10.2f |\n",
(dfloat)maxNrows, (dfloat)maxNnz, maxNnzPerRow);
printf(" | | %10.2f | %10.2f | %10.2f |\n",
avgNrows, avgNnz, avgNnzPerRow);
}
}
if(rank==0)
printf("---------------------------------------------------------------------\n");
}
//create coarsened problem
void coarsenAgmgLevel(agmgLevel *level, csr **coarseA, csr **P, csr **R, dfloat **nullCoarseA, setupAide options){
// establish the graph of strong connections
level->threshold = 0.5;
csr *C = strong_graph(level->A, level->threshold);
hlong *FineToCoarse = form_aggregates(level, C);
find_aggregate_owners(level,FineToCoarse,options);
*P = construct_interpolator(level, FineToCoarse, nullCoarseA);
*R = transpose(level, *P, level->globalRowStarts, level->globalAggStarts);
*coarseA = galerkinProd(level, *R, level->A, *P);
}
csr * strong_graph(csr *A, dfloat threshold){
const dlong N = A->Nrows;
const dlong M = A->Ncols;
csr *C = (csr *) calloc(1, sizeof(csr));
C->Nrows = N;
C->Ncols = M;
C->diagRowStarts = (dlong *) calloc(N+1,sizeof(dlong));
C->offdRowStarts = (dlong *) calloc(N+1,sizeof(dlong));
dfloat *maxOD;
if (N) maxOD = (dfloat *) calloc(N,sizeof(dfloat));
//store the diagonal of A for all needed columns
dfloat *diagA = (dfloat *) calloc(M,sizeof(dfloat));
for (dlong i=0;i<N;i++)
diagA[i] = A->diagCoefs[A->diagRowStarts[i]];
csrHaloExchange(A, sizeof(dfloat), diagA, A->sendBuffer, diagA+A->NlocalCols);
#pragma omp parallel for
for(dlong i=0; i<N; i++){
dfloat sign = (diagA[i] >= 0) ? 1:-1;
dfloat Aii = fabs(diagA[i]);
//find maxOD
//local entries
dlong Jstart = A->diagRowStarts[i], Jend = A->diagRowStarts[i+1];
for(dlong jj= Jstart+1; jj<Jend; jj++){
dlong col = A->diagCols[jj];
dfloat Ajj = fabs(diagA[col]);
dfloat OD = -sign*A->diagCoefs[jj]/(sqrt(Aii)*sqrt(Ajj));
if(OD > maxOD[i]) maxOD[i] = OD;
}
//non-local entries
Jstart = A->offdRowStarts[i], Jend = A->offdRowStarts[i+1];
for(dlong jj= Jstart; jj<Jend; jj++){
dlong col = A->offdCols[jj];
dfloat Ajj = fabs(diagA[col]);
dfloat OD = -sign*A->offdCoefs[jj]/(sqrt(Aii)*sqrt(Ajj));
if(OD > maxOD[i]) maxOD[i] = OD;
}
int diag_strong_per_row = 1; // diagonal entry
//local entries
Jstart = A->diagRowStarts[i], Jend = A->diagRowStarts[i+1];
for(dlong jj = Jstart+1; jj<Jend; jj++){
dlong col = A->diagCols[jj];
dfloat Ajj = fabs(diagA[col]);
dfloat OD = -sign*A->diagCoefs[jj]/(sqrt(Aii)*sqrt(Ajj));
if(OD > threshold*maxOD[i]) diag_strong_per_row++;
}
int offd_strong_per_row = 0;
//non-local entries
Jstart = A->offdRowStarts[i], Jend = A->offdRowStarts[i+1];
for(dlong jj= Jstart; jj<Jend; jj++){
dlong col = A->offdCols[jj];
dfloat Ajj = fabs(diagA[col]);
dfloat OD = -sign*A->offdCoefs[jj]/(sqrt(Aii)*sqrt(Ajj));
if(OD > threshold*maxOD[i]) offd_strong_per_row++;
}
C->diagRowStarts[i+1] = diag_strong_per_row;
C->offdRowStarts[i+1] = offd_strong_per_row;
}
// cumulative sum
for(dlong i=1; i<N+1 ; i++) {
C->diagRowStarts[i] += C->diagRowStarts[i-1];
C->offdRowStarts[i] += C->offdRowStarts[i-1];
}
C->diagNNZ = C->diagRowStarts[N];
C->offdNNZ = C->offdRowStarts[N];
if (C->diagNNZ) C->diagCols = (dlong *) calloc(C->diagNNZ, sizeof(dlong));
if (C->offdNNZ) C->offdCols = (dlong *) calloc(C->offdNNZ, sizeof(dlong));
// fill in the columns for strong connections
#pragma omp parallel for
for(dlong i=0; i<N; i++){
dfloat sign = (diagA[i] >= 0) ? 1:-1;
dfloat Aii = fabs(diagA[i]);
dlong diagCounter = C->diagRowStarts[i];
dlong offdCounter = C->offdRowStarts[i];
//local entries
C->diagCols[diagCounter++] = i;// diag entry
dlong Jstart = A->diagRowStarts[i], Jend = A->diagRowStarts[i+1];
for(dlong jj = Jstart+1; jj<Jend; jj++){
dlong col = A->diagCols[jj];
dfloat Ajj = fabs(diagA[col]);
dfloat OD = -sign*A->diagCoefs[jj]/(sqrt(Aii)*sqrt(Ajj));
if(OD > threshold*maxOD[i])
C->diagCols[diagCounter++] = A->diagCols[jj];
}
Jstart = A->offdRowStarts[i], Jend = A->offdRowStarts[i+1];
for(dlong jj = Jstart; jj<Jend; jj++){
dlong col = A->offdCols[jj];
dfloat Ajj = fabs(diagA[col]);
dfloat OD = -sign*A->offdCoefs[jj]/(sqrt(Aii)*sqrt(Ajj));
if(OD > threshold*maxOD[i])
C->offdCols[offdCounter++] = A->offdCols[jj];
}
}
if(N) free(maxOD);
return C;
}
bool customLess(int smax, dfloat rmax, hlong imax, int s, dfloat r, hlong i){
if(s > smax) return true;
if(smax > s) return false;
if(r > rmax) return true;
if(rmax > r) return false;
if(i > imax) return true;
if(i < imax) return false;
return false;
}
hlong * form_aggregates(agmgLevel *level, csr *C){
int rank, size;
rank = agmg::rank;
size = agmg::size;
const dlong N = C->Nrows;
const dlong M = C->Ncols;
const dlong diagNNZ = C->diagNNZ;
const dlong offdNNZ = C->offdNNZ;
hlong *FineToCoarse = (hlong *) calloc(M, sizeof(hlong));
for (dlong i =0;i<M;i++) FineToCoarse[i] = -1;
dfloat *rands = (dfloat *) calloc(M, sizeof(dfloat));
int *states = (int *) calloc(M, sizeof(int));
dfloat *Tr = (dfloat *) calloc(M, sizeof(dfloat));
int *Ts = (int *) calloc(M, sizeof(int));
hlong *Ti = (hlong *) calloc(M, sizeof(hlong));
hlong *Tc = (hlong *) calloc(M, sizeof(hlong));
csr *A = level->A;
hlong *globalRowStarts = level->globalRowStarts;
int *intSendBuffer;
hlong *hlongSendBuffer;
dfloat *dfloatSendBuffer;
if (level->A->NsendTotal) {
intSendBuffer = (int *) calloc(A->NsendTotal,sizeof(int));
hlongSendBuffer = (hlong *) calloc(A->NsendTotal,sizeof(hlong));
dfloatSendBuffer = (dfloat *) calloc(A->NsendTotal,sizeof(dfloat));
}
for(dlong i=0; i<N; i++)
rands[i] = (dfloat) drand48();
for(dlong i=0; i<N; i++)
states[i] = 0;
// add the number of non-zeros in each column
//local non-zeros
for(dlong i=0; i<diagNNZ; i++)
rands[C->diagCols[i]] += 1.;
int *nnzCnt, *recvNnzCnt;
if (A->NHalo) nnzCnt = (int *) calloc(A->NHalo,sizeof(int));
if (A->NsendTotal) recvNnzCnt = (int *) calloc(A->NsendTotal,sizeof(int));
//count the non-local non-zeros
for (dlong i=0;i<offdNNZ;i++)
nnzCnt[C->offdCols[i]-A->NlocalCols]++;
//do a reverse halo exchange
int tag = 999;
// initiate immediate send and receives to each other process as needed
dlong recvOffset = 0;
dlong sendOffset = 0;
int sendMessage = 0, recvMessage = 0;
for(int r=0;r<size;++r){
if (A->NsendTotal) {
if(A->NsendPairs[r]) {
MPI_Irecv(recvNnzCnt+sendOffset, A->NsendPairs[r], MPI_INT, r, tag,
agmg::comm, (MPI_Request*)A->haloSendRequests+sendMessage);
sendOffset += A->NsendPairs[r];
++sendMessage;
}
}
if (A->NrecvTotal) {
if(A->NrecvPairs[r]){
MPI_Isend(nnzCnt+recvOffset, A->NrecvPairs[r], MPI_INT, r, tag,
agmg::comm, (MPI_Request*)A->haloRecvRequests+recvMessage);
recvOffset += A->NrecvPairs[r];
++recvMessage;
}
}
}
// Wait for all sent messages to have left and received messages to have arrived
if (A->NrecvTotal) {
MPI_Status *sendStatus = (MPI_Status*) calloc(A->NsendMessages, sizeof(MPI_Status));
MPI_Waitall(A->NsendMessages, (MPI_Request*)A->haloSendRequests, sendStatus);
free(sendStatus);
}
if (A->NsendTotal) {
MPI_Status *recvStatus = (MPI_Status*) calloc(A->NrecvMessages, sizeof(MPI_Status));
MPI_Waitall(A->NrecvMessages, (MPI_Request*)A->haloRecvRequests, recvStatus);
free(recvStatus);
}
for(int i=0;i<A->NsendTotal;++i){
// local index of outgoing element in halo exchange
dlong id = A->haloElementList[i];
rands[id] += recvNnzCnt[i];
}
if (A->NHalo) free(nnzCnt);
if (A->NsendTotal) free(recvNnzCnt);
//share randomizer values
csrHaloExchange(A, sizeof(dfloat), rands, dfloatSendBuffer, rands+A->NlocalCols);
hlong done = 0;
while(!done){
// first neighbours
#pragma omp parallel for
for(dlong i=0; i<N; i++){
int smax = states[i];
dfloat rmax = rands[i];
hlong imax = i + globalRowStarts[rank];
if(smax != 1){
//local entries
for(dlong jj=C->diagRowStarts[i]+1;jj<C->diagRowStarts[i+1];jj++){
const dlong col = C->diagCols[jj];
if(customLess(smax, rmax, imax, states[col], rands[col], col + globalRowStarts[rank])){
smax = states[col];
rmax = rands[col];
imax = col + globalRowStarts[rank];
}
}
//nonlocal entries
for(dlong jj=C->offdRowStarts[i];jj<C->offdRowStarts[i+1];jj++){
const dlong col = C->offdCols[jj];
if(customLess(smax, rmax, imax, states[col], rands[col], A->colMap[col])) {
smax = states[col];
rmax = rands[col];
imax = A->colMap[col];
}
}
}
Ts[i] = smax;
Tr[i] = rmax;
Ti[i] = imax;
}
//share results
csrHaloExchange(A, sizeof(dfloat), Tr, dfloatSendBuffer, Tr+A->NlocalCols);
csrHaloExchange(A, sizeof(int), Ts, intSendBuffer, Ts+A->NlocalCols);
csrHaloExchange(A, sizeof(hlong), Ti, hlongSendBuffer, Ti+A->NlocalCols);
// second neighbours
#pragma omp parallel for
for(dlong i=0; i<N; i++){
int smax = Ts[i];
dfloat rmax = Tr[i];
hlong imax = Ti[i];
//local entries
for(dlong jj=C->diagRowStarts[i]+1;jj<C->diagRowStarts[i+1];jj++){
const dlong col = C->diagCols[jj];
if(customLess(smax, rmax, imax, Ts[col], Tr[col], Ti[col])){
smax = Ts[col];
rmax = Tr[col];
imax = Ti[col];
}
}
//nonlocal entries
for(dlong jj=C->offdRowStarts[i];jj<C->offdRowStarts[i+1];jj++){
const dlong col = C->offdCols[jj];
if(customLess(smax, rmax, imax, Ts[col], Tr[col], Ti[col])){
smax = Ts[col];
rmax = Tr[col];
imax = Ti[col];
}
}
// if I am the strongest among all the 1 and 2 ring neighbours
// I am an MIS node
if((states[i] == 0) && (imax == (i + globalRowStarts[rank])))
states[i] = 1;
// if there is an MIS node within distance 2, I am removed
if((states[i] == 0) && (smax == 1))
states[i] = -1;
}
csrHaloExchange(A, sizeof(int), states, intSendBuffer, states+A->NlocalCols);
// if number of undecided nodes = 0, algorithm terminates
hlong cnt = std::count(states, states+N, 0);
MPI_Allreduce(&cnt,&done,1,MPI_HLONG, MPI_SUM,agmg::comm);
done = (done == 0) ? 1 : 0;
}
dlong numAggs = 0;
dlong *gNumAggs = (dlong *) calloc(size,sizeof(dlong));
level->globalAggStarts = (hlong *) calloc(size+1,sizeof(hlong));
// count the coarse nodes/aggregates
for(dlong i=0; i<N; i++)
if(states[i] == 1) numAggs++;
MPI_Allgather(&numAggs,1,MPI_DLONG,gNumAggs,1,MPI_DLONG,agmg::comm);
level->globalAggStarts[0] = 0;
for (int r=0;r<size;r++)
level->globalAggStarts[r+1] = level->globalAggStarts[r] + gNumAggs[r];
numAggs = 0;
// enumerate the coarse nodes/aggregates
for(dlong i=0; i<N; i++)
if(states[i] == 1)
FineToCoarse[i] = level->globalAggStarts[rank] + numAggs++;
//share the initial aggregate flags
csrHaloExchange(A, sizeof(hlong), FineToCoarse, hlongSendBuffer, FineToCoarse+A->NlocalCols);
// form the aggregates
#pragma omp parallel for
for(dlong i=0; i<N; i++){
int smax = states[i];
dfloat rmax = rands[i];
hlong imax = i + globalRowStarts[rank];
hlong cmax = FineToCoarse[i];
if(smax != 1){
//local entries
for(dlong jj=C->diagRowStarts[i]+1;jj<C->diagRowStarts[i+1];jj++){
const dlong col = C->diagCols[jj];
if(customLess(smax, rmax, imax, states[col], rands[col], col + globalRowStarts[rank])){
smax = states[col];
rmax = rands[col];
imax = col + globalRowStarts[rank];
cmax = FineToCoarse[col];
}
}
//nonlocal entries
for(dlong jj=C->offdRowStarts[i];jj<C->offdRowStarts[i+1];jj++){
const dlong col = C->offdCols[jj];
if(customLess(smax, rmax, imax, states[col], rands[col], A->colMap[col])){
smax = states[col];
rmax = rands[col];
imax = A->colMap[col];
cmax = FineToCoarse[col];
}
}
}
Ts[i] = smax;
Tr[i] = rmax;
Ti[i] = imax;
Tc[i] = cmax;
if((states[i] == -1) && (smax == 1) && (cmax > -1))
FineToCoarse[i] = cmax;
}
csrHaloExchange(A, sizeof(hlong), FineToCoarse, hlongSendBuffer, FineToCoarse+A->NlocalCols);
csrHaloExchange(A, sizeof(dfloat), Tr, dfloatSendBuffer, Tr+A->NlocalCols);
csrHaloExchange(A, sizeof(int), Ts, intSendBuffer, Ts+A->NlocalCols);
csrHaloExchange(A, sizeof(hlong), Ti, hlongSendBuffer, Ti+A->NlocalCols);
csrHaloExchange(A, sizeof(hlong), Tc, hlongSendBuffer, Tc+A->NlocalCols);
// second neighbours
#pragma omp parallel for
for(dlong i=0; i<N; i++){
int smax = Ts[i];
dfloat rmax = Tr[i];
hlong imax = Ti[i];
hlong cmax = Tc[i];
//local entries
for(dlong jj=C->diagRowStarts[i]+1;jj<C->diagRowStarts[i+1];jj++){
const dlong col = C->diagCols[jj];
if(customLess(smax, rmax, imax, Ts[col], Tr[col], Ti[col])){
smax = Ts[col];
rmax = Tr[col];
imax = Ti[col];
cmax = Tc[col];
}
}
//nonlocal entries
for(dlong jj=C->offdRowStarts[i];jj<C->offdRowStarts[i+1];jj++){
const dlong col = C->offdCols[jj];
if(customLess(smax, rmax, imax, Ts[col], Tr[col], Ti[col])){
smax = Ts[col];
rmax = Tr[col];
imax = Ti[col];
cmax = Tc[col];
}
}
if((states[i] == -1) && (smax == 1) && (cmax > -1))
FineToCoarse[i] = cmax;
}
csrHaloExchange(A, sizeof(hlong), FineToCoarse, hlongSendBuffer, FineToCoarse+A->NlocalCols);
free(rands);
free(states);
free(Tr);
free(Ts);
free(Ti);
free(Tc);
if (level->A->NsendTotal) {
free(intSendBuffer);
free(hlongSendBuffer);
free(dfloatSendBuffer);
}
//TODO maybe free C here?
return FineToCoarse;
}
typedef struct {
dlong fineId;
hlong coarseId;
hlong newCoarseId;
int originRank;
int ownerRank;
} parallelAggregate_t;
int compareOwner(const void *a, const void *b){
parallelAggregate_t *pa = (parallelAggregate_t *) a;
parallelAggregate_t *pb = (parallelAggregate_t *) b;
if (pa->ownerRank < pb->ownerRank) return -1;
if (pa->ownerRank > pb->ownerRank) return +1;
return 0;
};
int compareAgg(const void *a, const void *b){
parallelAggregate_t *pa = (parallelAggregate_t *) a;
parallelAggregate_t *pb = (parallelAggregate_t *) b;
if (pa->coarseId < pb->coarseId) return -1;
if (pa->coarseId > pb->coarseId) return +1;
if (pa->originRank < pb->originRank) return -1;
if (pa->originRank > pb->originRank) return +1;
return 0;
};
int compareOrigin(const void *a, const void *b){
parallelAggregate_t *pa = (parallelAggregate_t *) a;
parallelAggregate_t *pb = (parallelAggregate_t *) b;
if (pa->originRank < pb->originRank) return -1;
if (pa->originRank > pb->originRank) return +1;
return 0;
};
void find_aggregate_owners(agmgLevel *level, hlong* FineToCoarse, setupAide options) {
// MPI info
int rank, size;
rank = agmg::rank;
size = agmg::size;
dlong N = level->A->Nrows;
//Need to establish 'ownership' of aggregates
//Keep the current partitioning for STRONGNODES.
// The rank that had the strong node for each aggregate owns the aggregate
if (options.compareArgs("PARALMOND PARTITION", "STRONGNODES")) return;
//populate aggregate array
hlong gNumAggs = level->globalAggStarts[size]; //total number of aggregates
parallelAggregate_t *sendAggs;
if (N)
sendAggs = (parallelAggregate_t *) calloc(N,sizeof(parallelAggregate_t));
else
sendAggs = (parallelAggregate_t *) calloc(1,sizeof(parallelAggregate_t));
for (dlong i=0;i<N;i++) {
sendAggs[i].fineId = i;
sendAggs[i].originRank = rank;
sendAggs[i].coarseId = FineToCoarse[i];
//set a temporary owner. Evenly distibute aggregates amoungst ranks
sendAggs[i].ownerRank = (int) (FineToCoarse[i]*size)/gNumAggs;
}
// Make the MPI_PARALLEL_AGGREGATE data type
MPI_Datatype MPI_PARALLEL_AGGREGATE;
MPI_Datatype dtype[5] = {MPI_DLONG, MPI_HLONG, MPI_HLONG, MPI_INT, MPI_INT};
int blength[5] = {1, 1, 1, 1, 1};
MPI_Aint addr[5], displ[5];
MPI_Get_address ( &(sendAggs[0] ), addr+0);
MPI_Get_address ( &(sendAggs[0].coarseId ), addr+1);
MPI_Get_address ( &(sendAggs[0].newCoarseId), addr+2);
MPI_Get_address ( &(sendAggs[0].originRank ), addr+3);
MPI_Get_address ( &(sendAggs[0].ownerRank ), addr+4);
displ[0] = 0;
displ[1] = addr[1] - addr[0];
displ[2] = addr[2] - addr[0];
displ[3] = addr[3] - addr[0];
displ[4] = addr[4] - addr[0];
MPI_Type_create_struct (5, blength, displ, dtype, &MPI_PARALLEL_AGGREGATE);
MPI_Type_commit (&MPI_PARALLEL_AGGREGATE);
//sort by owning rank for all_reduce
qsort(sendAggs, N, sizeof(parallelAggregate_t), compareOwner);
int *sendCounts = (int *) calloc(size,sizeof(int));
int *recvCounts = (int *) calloc(size,sizeof(int));
int *sendOffsets = (int *) calloc(size+1,sizeof(int));
int *recvOffsets = (int *) calloc(size+1,sizeof(int));
for(dlong i=0;i<N;++i)
sendCounts[sendAggs[i].ownerRank]++;
// find how many nodes to expect (should use sparse version)
MPI_Alltoall(sendCounts, 1, MPI_INT, recvCounts, 1, MPI_INT, agmg::comm);
// find send and recv offsets for gather
dlong recvNtotal = 0;
for(int r=0;r<size;++r){
sendOffsets[r+1] = sendOffsets[r] + sendCounts[r];
recvOffsets[r+1] = recvOffsets[r] + recvCounts[r];
recvNtotal += recvCounts[r];
}
parallelAggregate_t *recvAggs = (parallelAggregate_t *) calloc(recvNtotal,sizeof(parallelAggregate_t));
MPI_Alltoallv(sendAggs, sendCounts, sendOffsets, MPI_PARALLEL_AGGREGATE,
recvAggs, recvCounts, recvOffsets, MPI_PARALLEL_AGGREGATE,
agmg::comm);
//sort by coarse aggregate number, and then by original rank
qsort(recvAggs, recvNtotal, sizeof(parallelAggregate_t), compareAgg);
//count the number of unique aggregates here
dlong NumUniqueAggs =0;
if (recvNtotal) NumUniqueAggs++;
for (dlong i=1;i<recvNtotal;i++)
if(recvAggs[i].coarseId!=recvAggs[i-1].coarseId) NumUniqueAggs++;
//get their locations in the array
dlong *aggStarts;
if (NumUniqueAggs)
aggStarts = (dlong *) calloc(NumUniqueAggs+1,sizeof(dlong));
dlong cnt = 1;
for (dlong i=1;i<recvNtotal;i++)
if(recvAggs[i].coarseId!=recvAggs[i-1].coarseId) aggStarts[cnt++] = i;
aggStarts[NumUniqueAggs] = recvNtotal;
if (options.compareArgs("PARALMOND PARTITION", "DISTRIBUTED")) { //rank that contributes most to the aggregate ownes it
//use a random dfloat for each rank to break ties.
dfloat rand = (dfloat) drand48();
dfloat *gRands = (dfloat *) calloc(size,sizeof(dfloat));
MPI_Allgather(&rand, 1, MPI_DFLOAT, gRands, 1, MPI_DFLOAT, agmg::comm);
//determine the aggregates majority owner
int *rankCounts = (int *) calloc(size,sizeof(int));
for (dlong n=0;n<NumUniqueAggs;n++) {
//populate randomizer
for (int r=0;r<size;r++)
rankCounts[r] = gRands[r];
//count the number of contributions to the aggregate from the separate ranks
for (dlong i=aggStarts[n];i<aggStarts[n+1];i++)
rankCounts[recvAggs[i].originRank]++;
//find which rank is contributing the most to this aggregate
int ownerRank = 0;
dfloat maxEntries = rankCounts[0];
for (int r=1;r<size;r++) {
if (rankCounts[r]>maxEntries) {
ownerRank = r;
maxEntries = rankCounts[r];
}
}
//set this aggregate's owner
for (dlong i=aggStarts[n];i<aggStarts[n+1];i++)
recvAggs[i].ownerRank = ownerRank;
}
free(gRands); free(rankCounts);
} else { //default SATURATE: always choose the lowest rank to own the aggregate
for (dlong n=0;n<NumUniqueAggs;n++) {
int minrank = size;
//count the number of contributions to the aggregate from the separate ranks
for (dlong i=aggStarts[n];i<aggStarts[n+1];i++){
minrank = (recvAggs[i].originRank<minrank) ? recvAggs[i].originRank : minrank;
}
//set this aggregate's owner
for (dlong i=aggStarts[n];i<aggStarts[n+1];i++)
recvAggs[i].ownerRank = minrank;
}
}
free(aggStarts);
//sort by owning rank
qsort(recvAggs, recvNtotal, sizeof(parallelAggregate_t), compareOwner);
int *newSendCounts = (int *) calloc(size,sizeof(int));
int *newRecvCounts = (int *) calloc(size,sizeof(int));
int *newSendOffsets = (int *) calloc(size+1,sizeof(int));
int *newRecvOffsets = (int *) calloc(size+1,sizeof(int));
for(dlong i=0;i<recvNtotal;++i)
newSendCounts[recvAggs[i].ownerRank]++;
// find how many nodes to expect (should use sparse version)
MPI_Alltoall(newSendCounts, 1, MPI_INT, newRecvCounts, 1, MPI_INT, agmg::comm);
// find send and recv offsets for gather
dlong newRecvNtotal = 0;
for(int r=0;r<size;++r){
newSendOffsets[r+1] = newSendOffsets[r] + newSendCounts[r];
newRecvOffsets[r+1] = newRecvOffsets[r] + newRecvCounts[r];
newRecvNtotal += newRecvCounts[r];
}
parallelAggregate_t *newRecvAggs = (parallelAggregate_t *) calloc(newRecvNtotal,sizeof(parallelAggregate_t));
MPI_Alltoallv( recvAggs, newSendCounts, newSendOffsets, MPI_PARALLEL_AGGREGATE,
newRecvAggs, newRecvCounts, newRecvOffsets, MPI_PARALLEL_AGGREGATE,
agmg::comm);
//sort by coarse aggregate number, and then by original rank
qsort(newRecvAggs, newRecvNtotal, sizeof(parallelAggregate_t), compareAgg);
//count the number of unique aggregates this rank owns
dlong numAggs = 0;
if (newRecvNtotal) numAggs++;
for (dlong i=1;i<newRecvNtotal;i++)
if(newRecvAggs[i].coarseId!=newRecvAggs[i-1].coarseId) numAggs++;
//determine a global numbering of the aggregates
dlong *lNumAggs = (dlong*) calloc(size,sizeof(dlong));
MPI_Allgather(&numAggs, 1, MPI_DLONG, lNumAggs, 1, MPI_INT, agmg::comm);
level->globalAggStarts[0] = 0;
for (int r=0;r<size;r++)
level->globalAggStarts[r+1] = level->globalAggStarts[r] + lNumAggs[r];
//set the new global coarse index
cnt = level->globalAggStarts[rank];
if (newRecvNtotal) newRecvAggs[0].newCoarseId = cnt;
for (dlong i=1;i<newRecvNtotal;i++) {
if(newRecvAggs[i].coarseId!=newRecvAggs[i-1].coarseId) cnt++;
newRecvAggs[i].newCoarseId = cnt;
}
//sort by owning rank
qsort(newRecvAggs, newRecvNtotal, sizeof(parallelAggregate_t), compareOrigin);
for(int r=0;r<size;r++) sendCounts[r] = 0;
for(int r=0;r<=size;r++) {
sendOffsets[r] = 0;
recvOffsets[r] = 0;
}
for(dlong i=0;i<newRecvNtotal;++i)
sendCounts[newRecvAggs[i].originRank]++;
// find how many nodes to expect (should use sparse version)
MPI_Alltoall(sendCounts, 1, MPI_INT, recvCounts, 1, MPI_INT, agmg::comm);
// find send and recv offsets for gather
recvNtotal = 0;
for(int r=0;r<size;++r){
sendOffsets[r+1] = sendOffsets[r] + sendCounts[r];
recvOffsets[r+1] = recvOffsets[r] + recvCounts[r];
recvNtotal += recvCounts[r];
}
//send the aggregate data back
MPI_Alltoallv(newRecvAggs, sendCounts, sendOffsets, MPI_PARALLEL_AGGREGATE,
sendAggs, recvCounts, recvOffsets, MPI_PARALLEL_AGGREGATE,
agmg::comm);
//clean up
MPI_Barrier(agmg::comm);
MPI_Type_free(&MPI_PARALLEL_AGGREGATE);
free(recvAggs);
free(sendCounts); free(recvCounts);
free(sendOffsets); free(recvOffsets);
free(newRecvAggs);
free(newSendCounts); free(newRecvCounts);
free(newSendOffsets); free(newRecvOffsets);
//record the new FineToCoarse map
for (dlong i=0;i<N;i++)
FineToCoarse[sendAggs[i].fineId] = sendAggs[i].newCoarseId;
free(sendAggs);
}
csr *construct_interpolator(agmgLevel *level, hlong *FineToCoarse, dfloat **nullCoarseA){
// MPI info
int rank, size;
rank = agmg::rank;
size = agmg::size;
const dlong N = level->A->Nrows;
// const dlong M = level->A->Ncols;
hlong *globalAggStarts = level->globalAggStarts;
const hlong globalAggOffset = level->globalAggStarts[rank];
const dlong NCoarse = (dlong) (globalAggStarts[rank+1]-globalAggStarts[rank]); //local num agg
csr* P = (csr *) calloc(1, sizeof(csr));
P->Nrows = N;
P->Ncols = NCoarse;
P->NlocalCols = NCoarse;
P->NHalo = 0;
P->diagRowStarts = (dlong *) calloc(N+1, sizeof(dlong));
P->offdRowStarts = (dlong *) calloc(N+1, sizeof(dlong));
// each row has exactly one nonzero per row
P->diagNNZ =0;
P->offdNNZ =0;
for(dlong i=0; i<N; i++) {
hlong col = FineToCoarse[i];
if ((col>globalAggOffset-1)&&(col<globalAggOffset+NCoarse)) {
P->diagNNZ++;
P->diagRowStarts[i+1]++;
} else {
P->offdNNZ++;
P->offdRowStarts[i+1]++;
}
}
for(dlong i=0; i<N; i++) {
P->diagRowStarts[i+1] += P->diagRowStarts[i];
P->offdRowStarts[i+1] += P->offdRowStarts[i];
}
if (P->diagNNZ) {
P->diagCols = (dlong *) calloc(P->diagNNZ, sizeof(dlong));
P->diagCoefs = (dfloat *) calloc(P->diagNNZ, sizeof(dfloat));
}
hlong *offdCols;
if (P->offdNNZ) {
offdCols = (hlong *) calloc(P->offdNNZ, sizeof(hlong));
P->offdCols = (dlong *) calloc(P->offdNNZ, sizeof(dlong));
P->offdCoefs = (dfloat *) calloc(P->offdNNZ, sizeof(dfloat));
}
dlong diagCnt = 0;
dlong offdCnt = 0;
for(dlong i=0; i<N; i++) {
hlong col = FineToCoarse[i];
if ((col>globalAggStarts[rank]-1)&&(col<globalAggStarts[rank+1])) {
P->diagCols[diagCnt] = (dlong) (col - globalAggOffset); //local index
P->diagCoefs[diagCnt++] = level->A->null[i];
} else {
offdCols[offdCnt] = col;
P->offdCoefs[offdCnt++] = level->A->null[i];
}
}
//record global indexing of columns
P->colMap = (hlong *) calloc(P->Ncols, sizeof(hlong));
for (dlong i=0;i<P->Ncols;i++)
P->colMap[i] = i + globalAggOffset;
if (P->offdNNZ) {
//we now need to reorder the x vector for the halo, and shift the column indices
hlong *col = (hlong *) calloc(P->offdNNZ,sizeof(hlong));
for (dlong i=0;i<P->offdNNZ;i++)
col[i] = offdCols[i]; //copy non-local column global ids
//sort by global index
std::sort(col,col+P->offdNNZ);
//count unique non-local column ids
P->NHalo = 0;
for (dlong i=1;i<P->offdNNZ;i++)
if (col[i]!=col[i-1]) col[++P->NHalo] = col[i];
P->NHalo++; //number of unique columns
P->Ncols += P->NHalo;
//save global column ids in colMap
P->colMap = (hlong *) realloc(P->colMap, P->Ncols*sizeof(hlong));
for (dlong i=0; i<P->NHalo; i++)
P->colMap[i+P->NlocalCols] = col[i];
free(col);
//shift the column indices to local indexing
for (dlong i=0;i<P->offdNNZ;i++) {
hlong gcol = offdCols[i];
for (dlong m=P->NlocalCols;m<P->Ncols;m++) {
if (gcol == P->colMap[m])
P->offdCols[i] = m;
}
}
free(offdCols);
}
csrHaloSetup(P,globalAggStarts);
// normalize the columns of P
*nullCoarseA = (dfloat *) calloc(P->Ncols,sizeof(dfloat));
//add local nonzeros
for(dlong i=0; i<P->diagNNZ; i++)
(*nullCoarseA)[P->diagCols[i]] += P->diagCoefs[i] * P->diagCoefs[i];
dfloat *nnzSum, *recvNnzSum;
if (P->NHalo) nnzSum = (dfloat *) calloc(P->NHalo,sizeof(dfloat));
if (P->NsendTotal) recvNnzSum = (dfloat *) calloc(P->NsendTotal,sizeof(dfloat));
//add the non-local non-zeros
for (dlong i=0;i<P->offdNNZ;i++)
nnzSum[P->offdCols[i]-P->NlocalCols] += P->offdCoefs[i] * P->offdCoefs[i];
//do a reverse halo exchange
int tag = 999;
// initiate immediate send and receives to each other process as needed
dlong recvOffset = 0;
dlong sendOffset = 0;
int sendMessage = 0, recvMessage = 0;
for(int r=0;r<size;++r){
if (P->NsendTotal) {
if(P->NsendPairs[r]) {
MPI_Irecv(recvNnzSum+sendOffset, P->NsendPairs[r], MPI_DFLOAT, r, tag,
agmg::comm, (MPI_Request*)P->haloSendRequests+sendMessage);
sendOffset += P->NsendPairs[r];
++sendMessage;
}
}
if (P->NrecvTotal) {
if(P->NrecvPairs[r]){
MPI_Isend(nnzSum+recvOffset, P->NrecvPairs[r], MPI_DFLOAT, r, tag,
agmg::comm, (MPI_Request*)P->haloRecvRequests+recvMessage);
recvOffset += P->NrecvPairs[r];
++recvMessage;
}
}
}
// Wait for all sent messages to have left and received messages to have arrived
if (P->NrecvTotal) {
MPI_Status *sendStatus = (MPI_Status*) calloc(P->NsendMessages, sizeof(MPI_Status));
MPI_Waitall(P->NsendMessages, (MPI_Request*)P->haloSendRequests, sendStatus);
free(sendStatus);
}
if (P->NsendTotal) {
MPI_Status *recvStatus = (MPI_Status*) calloc(P->NrecvMessages, sizeof(MPI_Status));
MPI_Waitall(P->NrecvMessages, (MPI_Request*)P->haloRecvRequests, recvStatus);
free(recvStatus);
}
for(dlong i=0;i<P->NsendTotal;++i){
// local index of outgoing element in halo exchange
dlong id = P->haloElementList[i];
(*nullCoarseA)[id] += recvNnzSum[i];
}
if (P->NHalo) free(nnzSum);
for(dlong i=0; i<NCoarse; i++)
(*nullCoarseA)[i] = sqrt((*nullCoarseA)[i]);
csrHaloExchange(P, sizeof(dfloat), *nullCoarseA, P->sendBuffer, *nullCoarseA+P->NlocalCols);
for(dlong i=0; i<P->diagNNZ; i++)
P->diagCoefs[i] /= (*nullCoarseA)[P->diagCols[i]];
for(dlong i=0; i<P->offdNNZ; i++)
P->offdCoefs[i] /= (*nullCoarseA)[P->offdCols[i]];
MPI_Barrier(agmg::comm);
if (P->NsendTotal) free(recvNnzSum);
return P;
}
typedef struct {
hlong row;
hlong col;
dfloat val;
int owner;
} nonzero_t;
int compareNonZero(const void *a, const void *b){
nonzero_t *pa = (nonzero_t *) a;
nonzero_t *pb = (nonzero_t *) b;
if (pa->owner < pb->owner) return -1;
if (pa->owner > pb->owner) return +1;
if (pa->row < pb->row) return -1;
if (pa->row > pb->row) return +1;
if (pa->col < pb->col) return -1;
if (pa->col > pb->col) return +1;
return 0;
};
csr * transpose(agmgLevel* level, csr *A,
hlong *globalRowStarts, hlong *globalColStarts){
// MPI info
int rank, size;
rank = agmg::rank;
size = agmg::size;
csr *At = (csr *) calloc(1,sizeof(csr));
At->Nrows = A->Ncols-A->NHalo;
At->Ncols = A->Nrows;
At->diagNNZ = A->diagNNZ; //local entries remain local
At->NlocalCols = At->Ncols;
At->diagRowStarts = (dlong *) calloc(At->Nrows+1, sizeof(dlong));
At->offdRowStarts = (dlong *) calloc(At->Nrows+1, sizeof(dlong));
//start with local entries
if (A->diagNNZ) {
At->diagCols = (dlong *) calloc(At->diagNNZ, sizeof(dlong));
At->diagCoefs = (dfloat *) calloc(At->diagNNZ, sizeof(dfloat));
}
// count the num of nonzeros per row for transpose
for(dlong i=0; i<A->diagNNZ; i++){
dlong row = A->diagCols[i];
At->diagRowStarts[row+1]++;
}
// cumulative sum for rows
for(dlong i=1; i<=At->Nrows; i++)
At->diagRowStarts[i] += At->diagRowStarts[i-1];
int *counter = (int *) calloc(At->Nrows+1,sizeof(int));
for (dlong i=0; i<At->Nrows+1; i++)
counter[i] = At->diagRowStarts[i];
for(dlong i=0; i<A->Nrows; i++){
const dlong Jstart = A->diagRowStarts[i], Jend = A->diagRowStarts[i+1];
for(dlong jj=Jstart; jj<Jend; jj++){
dlong row = A->diagCols[jj];
At->diagCols[counter[row]] = i;
At->diagCoefs[counter[row]] = A->diagCoefs[jj];
counter[row]++;
}
}
free(counter);
//record global indexing of columns
At->colMap = (hlong *) calloc(At->Ncols, sizeof(hlong));
for (dlong i=0;i<At->Ncols;i++)
At->colMap[i] = i + globalRowStarts[rank];
//now the nonlocal entries. Need to reverse the halo exchange to send the nonzeros
int tag = 999;
nonzero_t *sendNonZeros;
if (A->offdNNZ)
sendNonZeros = (nonzero_t *) calloc(A->offdNNZ,sizeof(nonzero_t));
int *Nsend = (int*) calloc(size, sizeof(int));
int *Nrecv = (int*) calloc(size, sizeof(int));
for(int r=0;r<size;r++) {
Nsend[r] =0;
Nrecv[r] =0;
}
// copy data from nonlocal entries into send buffer
for(dlong i=0;i<A->Nrows;++i){
for (dlong j=A->offdRowStarts[i];j<A->offdRowStarts[i+1];j++) {
hlong col = A->colMap[A->offdCols[j]]; //global ids
for (int r=0;r<size;r++) { //find owner's rank
if ((globalColStarts[r]-1<col) && (col < globalColStarts[r+1])) {
Nsend[r]++;
sendNonZeros[j].owner = r;
}
}
sendNonZeros[j].row = col;
sendNonZeros[j].col = i + globalRowStarts[rank]; //global ids
sendNonZeros[j].val = A->offdCoefs[j];
}
}
//sort outgoing nonzeros by owner, then row and col
if (A->offdNNZ)
qsort(sendNonZeros, A->offdNNZ, sizeof(nonzero_t), compareNonZero);
MPI_Alltoall(Nsend, 1, MPI_INT, Nrecv, 1, MPI_INT, agmg::comm);
//count incoming nonzeros
At->offdNNZ = 0;
for (int r=0;r<size;r++)
At->offdNNZ += Nrecv[r];
nonzero_t *recvNonZeros;
if (At->offdNNZ)
recvNonZeros = (nonzero_t *) calloc(At->offdNNZ,sizeof(nonzero_t));
// initiate immediate send and receives to each other process as needed
int recvOffset = 0;
int sendOffset = 0;
int sendMessage = 0, recvMessage = 0;
for(int r=0;r<size;++r){
if (At->offdNNZ) {
if(Nrecv[r]) {
MPI_Irecv(((char*)recvNonZeros)+recvOffset, Nrecv[r]*sizeof(nonzero_t),
MPI_CHAR, r, tag, agmg::comm,
(MPI_Request*)A->haloSendRequests+recvMessage);
recvOffset += Nrecv[r]*sizeof(nonzero_t);
++recvMessage;
}
}
if (A->offdNNZ) {
if(Nsend[r]){
MPI_Isend(((char*)sendNonZeros)+sendOffset, Nsend[r]*sizeof(nonzero_t),
MPI_CHAR, r, tag, agmg::comm,
(MPI_Request*)A->haloRecvRequests+sendMessage);
sendOffset += Nsend[r]*sizeof(nonzero_t);
++sendMessage;
}
}
}
// Wait for all sent messages to have left and received messages to have arrived
if (A->offdNNZ) {
MPI_Status *sendStatus = (MPI_Status*) calloc(sendMessage, sizeof(MPI_Status));
MPI_Waitall(sendMessage, (MPI_Request*)A->haloRecvRequests, sendStatus);
free(sendStatus);
}
if (At->offdNNZ) {
MPI_Status *recvStatus = (MPI_Status*) calloc(recvMessage, sizeof(MPI_Status));
MPI_Waitall(recvMessage, (MPI_Request*)A->haloSendRequests, recvStatus);
free(recvStatus);
}
if (A->offdNNZ) free(sendNonZeros);
//free(Nsend); free(Nrecv);
if (At->offdNNZ) {
//sort recieved nonzeros by row and col
qsort(recvNonZeros, At->offdNNZ, sizeof(nonzero_t), compareNonZero);
hlong *offdCols = (hlong *) calloc(At->offdNNZ,sizeof(hlong));
At->offdCols = (dlong *) calloc(At->offdNNZ,sizeof(dlong));
At->offdCoefs = (dfloat *) calloc(At->offdNNZ, sizeof(dfloat));
//find row starts
for(dlong n=0;n<At->offdNNZ;++n) {
dlong row = (dlong) (recvNonZeros[n].row - globalColStarts[rank]);
At->offdRowStarts[row+1]++;
}
//cumulative sum
for (dlong i=0;i<At->Nrows;i++)
At->offdRowStarts[i+1] += At->offdRowStarts[i];
//fill cols and coefs
for (dlong i=0; i<At->Nrows; i++) {
for (dlong j=At->offdRowStarts[i]; j<At->offdRowStarts[i+1]; j++) {
offdCols[j] = recvNonZeros[j].col;
At->offdCoefs[j] = recvNonZeros[j].val;
}
}
free(recvNonZeros);
//we now need to reorder the x vector for the halo, and shift the column indices
hlong *col = (hlong *) calloc(At->offdNNZ,sizeof(hlong));
for (dlong n=0;n<At->offdNNZ;n++)
col[n] = offdCols[n]; //copy non-local column global ids
//sort by global index
std::sort(col,col+At->offdNNZ);
//count unique non-local column ids
At->NHalo = 0;
for (dlong n=1;n<At->offdNNZ;n++)
if (col[n]!=col[n-1]) col[++At->NHalo] = col[n];
At->NHalo++; //number of unique columns
At->Ncols += At->NHalo;
//save global column ids in colMap
At->colMap = (hlong *) realloc(At->colMap,At->Ncols*sizeof(hlong));
for (dlong n=0; n<At->NHalo; n++)
At->colMap[n+At->NlocalCols] = col[n];
free(col);
//shift the column indices to local indexing
for (dlong n=0;n<At->offdNNZ;n++) {
hlong gcol = offdCols[n];
for (dlong m=At->NlocalCols;m<At->Ncols;m++) {
if (gcol == At->colMap[m])
At->offdCols[n] = m;
}
}
free(offdCols);
}
csrHaloSetup(At,globalRowStarts);
return At;
}
typedef struct {
hlong coarseId;
dfloat coef;
} pEntry_t;
typedef struct {
hlong I;
hlong J;
dfloat coef;
} rapEntry_t;
int compareRAPEntries(const void *a, const void *b){
rapEntry_t *pa = (rapEntry_t *) a;
rapEntry_t *pb = (rapEntry_t *) b;
if (pa->I < pb->I) return -1;
if (pa->I > pb->I) return +1;
if (pa->J < pb->J) return -1;
if (pa->J > pb->J) return +1;
return 0;
};
csr *galerkinProd(agmgLevel *level, csr *R, csr *A, csr *P){
// MPI info
int rank, size;
rank = agmg::rank;
size = agmg::size;
hlong *globalAggStarts = level->globalAggStarts;
// hlong *globalRowStarts = level->globalRowStarts;
hlong globalAggOffset = globalAggStarts[rank];
//The galerkin product can be computed as
// (RAP)_IJ = sum_{i in Agg_I} sum_{j in Agg_j} P_iI A_ij P_jJ
// Since each row of P has only one entry, we can share the ncessary
// P entries, form the products, and send them to their destination rank
dlong N = A->Nrows;
dlong M = A->Ncols;
//printf("Level has %d rows, and is making %d aggregates\n", N, globalAggStarts[rank+1]-globalAggStarts[rank]);
pEntry_t *PEntries;
if (M)
PEntries = (pEntry_t *) calloc(M,sizeof(pEntry_t));
else
PEntries = (pEntry_t *) calloc(1,sizeof(pEntry_t));
//record the entries of P that this rank has
dlong cnt =0;
for (dlong i=0;i<N;i++) {
for (dlong j=P->diagRowStarts[i];j<P->diagRowStarts[i+1];j++) {
PEntries[cnt].coarseId = P->diagCols[j] + globalAggOffset; //global ID
PEntries[cnt].coef = P->diagCoefs[j];
cnt++;
}
for (dlong j=P->offdRowStarts[i];j<P->offdRowStarts[i+1];j++) {
PEntries[cnt].coarseId = P->colMap[P->offdCols[j]]; //global ID
PEntries[cnt].coef = P->offdCoefs[j];
cnt++;
}
}
pEntry_t *entrySendBuffer;
if (A->NsendTotal)
entrySendBuffer = (pEntry_t *) calloc(A->NsendTotal,sizeof(pEntry_t));
//fill in the entires of P needed in the halo
csrHaloExchange(A, sizeof(pEntry_t), PEntries, entrySendBuffer, PEntries+A->NlocalCols);
if (A->NsendTotal) free(entrySendBuffer);
rapEntry_t *RAPEntries;
dlong totalNNZ = A->diagNNZ+A->offdNNZ;
if (totalNNZ)
RAPEntries = (rapEntry_t *) calloc(totalNNZ,sizeof(rapEntry_t));
else
RAPEntries = (rapEntry_t *) calloc(1,sizeof(rapEntry_t)); //MPI_AlltoAll doesnt like null pointers
// Make the MPI_RAPENTRY_T data type
MPI_Datatype MPI_RAPENTRY_T;
MPI_Datatype dtype[3] = {MPI_HLONG, MPI_HLONG, MPI_DFLOAT};
int blength[3] = {1, 1, 1};
MPI_Aint addr[3], displ[3];
MPI_Get_address ( &(RAPEntries[0] ), addr+0);
MPI_Get_address ( &(RAPEntries[0].J ), addr+1);
MPI_Get_address ( &(RAPEntries[0].coef), addr+2);
displ[0] = 0;
displ[1] = addr[1] - addr[0];
displ[2] = addr[2] - addr[0];
MPI_Type_create_struct (3, blength, displ, dtype, &MPI_RAPENTRY_T);
MPI_Type_commit (&MPI_RAPENTRY_T);
//for the RAP products
cnt =0;
for (dlong i=0;i<N;i++) {
for (dlong j=A->diagRowStarts[i];j<A->diagRowStarts[i+1];j++) {
dlong col = A->diagCols[j];
dfloat coef = A->diagCoefs[j];
RAPEntries[cnt].I = PEntries[i].coarseId;
RAPEntries[cnt].J = PEntries[col].coarseId;
RAPEntries[cnt].coef = coef*PEntries[i].coef*PEntries[col].coef;
cnt++;
}
}
for (dlong i=0;i<N;i++) {
for (dlong j=A->offdRowStarts[i];j<A->offdRowStarts[i+1];j++) {
dlong col = A->offdCols[j];
dfloat coef = A->offdCoefs[j];
RAPEntries[cnt].I = PEntries[i].coarseId;
RAPEntries[cnt].J = PEntries[col].coarseId;
RAPEntries[cnt].coef = PEntries[i].coef*coef*PEntries[col].coef;
cnt++;
}
}
//sort entries by the coarse row and col
if (totalNNZ) qsort(RAPEntries, totalNNZ, sizeof(rapEntry_t), compareRAPEntries);
int *sendCounts = (int *) calloc(size,sizeof(int));
int *recvCounts = (int *) calloc(size,sizeof(int));
int *sendOffsets = (int *) calloc(size+1,sizeof(int));
int *recvOffsets = (int *) calloc(size+1,sizeof(int));
for(dlong i=0;i<totalNNZ;++i) {
hlong id = RAPEntries[i].I;
for (int r=0;r<size;r++) {
if (globalAggStarts[r]-1<id && id < globalAggStarts[r+1])
sendCounts[r]++;
}
}
// find how many nodes to expect (should use sparse version)
MPI_Alltoall(sendCounts, 1, MPI_INT, recvCounts, 1, MPI_INT, agmg::comm);
// find send and recv offsets for gather
dlong recvNtotal = 0;
for(int r=0;r<size;++r){
sendOffsets[r+1] = sendOffsets[r] + sendCounts[r];
recvOffsets[r+1] = recvOffsets[r] + recvCounts[r];
recvNtotal += recvCounts[r];
}
rapEntry_t *recvRAPEntries;
if (recvNtotal)
recvRAPEntries = (rapEntry_t *) calloc(recvNtotal,sizeof(rapEntry_t));
else
recvRAPEntries = (rapEntry_t *) calloc(1,sizeof(rapEntry_t));//MPI_AlltoAll doesnt like null pointers
MPI_Alltoallv( RAPEntries, sendCounts, sendOffsets, MPI_RAPENTRY_T,
recvRAPEntries, recvCounts, recvOffsets, MPI_RAPENTRY_T,
agmg::comm);
//sort entries by the coarse row and col
if (recvNtotal) qsort(recvRAPEntries, recvNtotal, sizeof(rapEntry_t), compareRAPEntries);
//count total number of nonzeros;
dlong nnz =0;
if (recvNtotal) nnz++;
for (dlong i=1;i<recvNtotal;i++)
if ((recvRAPEntries[i].I!=recvRAPEntries[i-1].I)||
(recvRAPEntries[i].J!=recvRAPEntries[i-1].J)) nnz++;
rapEntry_t *newRAPEntries;
if (nnz)
newRAPEntries = (rapEntry_t *) calloc(nnz,sizeof(rapEntry_t));
else
newRAPEntries = (rapEntry_t *) calloc(1,sizeof(rapEntry_t));
//compress nonzeros
nnz = 0;
if (recvNtotal) newRAPEntries[nnz++] = recvRAPEntries[0];
for (dlong i=1;i<recvNtotal;i++) {
if ((recvRAPEntries[i].I!=recvRAPEntries[i-1].I)||
(recvRAPEntries[i].J!=recvRAPEntries[i-1].J)) {
newRAPEntries[nnz++] = recvRAPEntries[i];
} else {
newRAPEntries[nnz-1].coef += recvRAPEntries[i].coef;
}
}
dlong numAggs = (dlong) (globalAggStarts[rank+1]-globalAggStarts[rank]); //local number of aggregates
csr *RAP = (csr*) calloc(1,sizeof(csr));
RAP->Nrows = numAggs;
RAP->Ncols = numAggs;
RAP->NlocalCols = numAggs;
RAP->diagRowStarts = (dlong *) calloc(numAggs+1, sizeof(dlong));
RAP->offdRowStarts = (dlong *) calloc(numAggs+1, sizeof(dlong));
for (dlong n=0;n<nnz;n++) {
dlong row = (dlong) (newRAPEntries[n].I - globalAggOffset);
if ((newRAPEntries[n].J > globalAggStarts[rank]-1)&&
(newRAPEntries[n].J < globalAggStarts[rank+1])) {
RAP->diagRowStarts[row+1]++;
} else {
RAP->offdRowStarts[row+1]++;
}
}
// cumulative sum
for(dlong i=0; i<numAggs; i++) {
RAP->diagRowStarts[i+1] += RAP->diagRowStarts[i];
RAP->offdRowStarts[i+1] += RAP->offdRowStarts[i];
}
RAP->diagNNZ = RAP->diagRowStarts[numAggs];
RAP->offdNNZ = RAP->offdRowStarts[numAggs];
dlong *diagCols;
dfloat *diagCoefs;
if (RAP->diagNNZ) {
RAP->diagCols = (dlong *) calloc(RAP->diagNNZ, sizeof(dlong));
RAP->diagCoefs = (dfloat *) calloc(RAP->diagNNZ, sizeof(dfloat));
diagCols = (dlong *) calloc(RAP->diagNNZ, sizeof(dlong));
diagCoefs = (dfloat *) calloc(RAP->diagNNZ, sizeof(dfloat));
}
hlong *offdCols;
if (RAP->offdNNZ) {
offdCols = (hlong *) calloc(RAP->offdNNZ,sizeof(hlong));
RAP->offdCols = (dlong *) calloc(RAP->offdNNZ,sizeof(dlong));
RAP->offdCoefs = (dfloat *) calloc(RAP->offdNNZ, sizeof(dfloat));
}
dlong diagCnt =0;
dlong offdCnt =0;
for (dlong n=0;n<nnz;n++) {
if ((newRAPEntries[n].J > globalAggStarts[rank]-1)&&
(newRAPEntries[n].J < globalAggStarts[rank+1])) {
diagCols[diagCnt] = (dlong) (newRAPEntries[n].J - globalAggOffset);
diagCoefs[diagCnt] = newRAPEntries[n].coef;
diagCnt++;
} else {
offdCols[offdCnt] = newRAPEntries[n].J;
RAP->offdCoefs[offdCnt] = newRAPEntries[n].coef;
offdCnt++;
}
}
//move diagonal entries first
for (dlong i=0;i<RAP->Nrows;i++) {
dlong start = RAP->diagRowStarts[i];
int cnt = 1;
for (dlong j=RAP->diagRowStarts[i]; j<RAP->diagRowStarts[i+1]; j++) {
if (diagCols[j] == i) { //move diagonal to first entry
RAP->diagCols[start] = diagCols[j];
RAP->diagCoefs[start] = diagCoefs[j];
} else {
RAP->diagCols[start+cnt] = diagCols[j];
RAP->diagCoefs[start+cnt] = diagCoefs[j];
cnt++;
}
}
}
//record global indexing of columns
RAP->colMap = (hlong *) calloc(RAP->Ncols, sizeof(hlong));
for (dlong i=0;i<RAP->Ncols;i++)
RAP->colMap[i] = i + globalAggOffset;
if (RAP->offdNNZ) {
//we now need to reorder the x vector for the halo, and shift the column indices
hlong *col = (hlong *) calloc(RAP->offdNNZ,sizeof(hlong));
for (dlong n=0;n<RAP->offdNNZ;n++)
col[n] = offdCols[n]; //copy non-local column global ids
//sort by global index
std::sort(col,col+RAP->offdNNZ);
//count unique non-local column ids
RAP->NHalo = 0;
for (dlong n=1;n<RAP->offdNNZ;n++)
if (col[n]!=col[n-1]) col[++RAP->NHalo] = col[n];
RAP->NHalo++; //number of unique columns
RAP->Ncols += RAP->NHalo;
//save global column ids in colMap
RAP->colMap = (hlong *) realloc(RAP->colMap,RAP->Ncols*sizeof(hlong));
for (dlong n=0; n<RAP->NHalo; n++)
RAP->colMap[n+RAP->NlocalCols] = col[n];
//shift the column indices to local indexing
for (dlong n=0;n<RAP->offdNNZ;n++) {
hlong gcol = offdCols[n];
for (dlong m=RAP->NlocalCols;m<RAP->Ncols;m++) {
if (gcol == RAP->colMap[m])
RAP->offdCols[n] = m;
}
}
free(col);
free(offdCols);
}
csrHaloSetup(RAP,globalAggStarts);
//clean up
MPI_Barrier(agmg::comm);
MPI_Type_free(&MPI_RAPENTRY_T);
free(PEntries);
free(sendCounts); free(recvCounts);
free(sendOffsets); free(recvOffsets);
if (RAP->diagNNZ) {
free(diagCols);
free(diagCoefs);
}
free(RAPEntries);
free(newRAPEntries);
free(recvRAPEntries);
return RAP;
}
|
GB_unop__identity_int8_fp32.c | //------------------------------------------------------------------------------
// GB_unop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2022, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated2/ folder, do not edit it
// (it is auto-generated from Generator/*).
#include "GB.h"
#ifndef GBCUDA_DEV
#include "GB_control.h"
#include "GB_atomics.h"
#include "GB_unop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB (_unop_apply__identity_int8_fp32)
// op(A') function: GB (_unop_tran__identity_int8_fp32)
// C type: int8_t
// A type: float
// cast: int8_t cij = GB_cast_to_int8_t ((double) (aij))
// unaryop: cij = aij
#define GB_ATYPE \
float
#define GB_CTYPE \
int8_t
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
float aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = x ;
// casting
#define GB_CAST(z, aij) \
int8_t z = GB_cast_to_int8_t ((double) (aij)) ;
// cij = op (aij)
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
float aij = Ax [pA] ; \
/* Cx [pC] = op (cast (aij)) */ \
int8_t z = GB_cast_to_int8_t ((double) (aij)) ; \
Cx [pC] = z ; \
}
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_IDENTITY || GxB_NO_INT8 || GxB_NO_FP32)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB (_unop_apply__identity_int8_fp32)
(
int8_t *Cx, // Cx and Ax may be aliased
const float *Ax,
const int8_t *restrict Ab, // A->b if A is bitmap
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
if (Ab == NULL)
{
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
float aij = Ax [p] ;
int8_t z = GB_cast_to_int8_t ((double) (aij)) ;
Cx [p] = z ;
}
}
else
{
// bitmap case, no transpose; A->b already memcpy'd into C->b
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!Ab [p]) continue ;
float aij = Ax [p] ;
int8_t z = GB_cast_to_int8_t ((double) (aij)) ;
Cx [p] = z ;
}
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB (_unop_tran__identity_int8_fp32)
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
omp_hello.c | #include <omp.h>
#include <stdio.h>
#include <stdlib.h>
int main (int argc, char *argv[])
{
/* Uncomment the code below to manually set the number of threads */
// omp_set_dynamic(0);
// omp_set_num_threads(5);
int nthreads, tid;
#pragma omp parallel private(nthreads, tid)
{
tid = omp_get_thread_num();
printf("Hello World from thread = %d\n", tid);
if (tid == 0)
{
nthreads = omp_get_num_threads();
printf("Number of threads = %d\n", nthreads);
}
}
return 0;
} |
GB_unaryop__lnot_int64_uint64.c | //------------------------------------------------------------------------------
// GB_unaryop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2019, All Rights Reserved.
// http://suitesparse.com See GraphBLAS/Doc/License.txt for license.
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_iterator.h"
#include "GB_unaryop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB_unop__lnot_int64_uint64
// op(A') function: GB_tran__lnot_int64_uint64
// C type: int64_t
// A type: uint64_t
// cast: int64_t cij = (int64_t) aij
// unaryop: cij = !(aij != 0)
#define GB_ATYPE \
uint64_t
#define GB_CTYPE \
int64_t
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
uint64_t aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = !(x != 0) ;
// casting
#define GB_CASTING(z, x) \
int64_t z = (int64_t) x ;
// cij = op (cast (aij))
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
GB_GETA (aij, Ax, pA) ; \
/* Cx [pC] = op (cast (aij)) */ \
GB_CASTING (x, aij) ; \
GB_OP (GB_CX (pC), x) ; \
}
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_LNOT || GxB_NO_INT64 || GxB_NO_UINT64)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_unop__lnot_int64_uint64
(
int64_t *restrict Cx,
const uint64_t *restrict Ax,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (int64_t p = 0 ; p < anz ; p++)
{
GB_CAST_OP (p, p) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_tran__lnot_int64_uint64
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t *restrict *Rowcounts,
GBI_single_iterator Iter,
const int64_t *restrict A_slice,
int naslice
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#define GB_PHASE_2_OF_2
#include "GB_unaryop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
distort.c | /*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% DDDD IIIII SSSSS TTTTT OOO RRRR TTTTT %
% D D I SS T O O R R T %
% D D I SSS T O O RRRR T %
% D D I SS T O O R R T %
% DDDD IIIII SSSSS T OOO R R T %
% %
% %
% MagickCore Image Distortion Methods %
% %
% Software Design %
% Cristy %
% Anthony Thyssen %
% June 2007 %
% %
% %
% Copyright 1999-2018 ImageMagick Studio LLC, a non-profit organization %
% dedicated to making software imaging solutions freely available. %
% %
% You may not use this file except in compliance with the License. You may %
% obtain a copy of the License at %
% %
% https://imagemagick.org/script/license.php %
% %
% Unless required by applicable law or agreed to in writing, software %
% distributed under the License is distributed on an "AS IS" BASIS, %
% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. %
% See the License for the specific language governing permissions and %
% limitations under the License. %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
%
*/
/*
Include declarations.
*/
#include "magick/studio.h"
#include "magick/artifact.h"
#include "magick/cache.h"
#include "magick/cache-view.h"
#include "magick/channel.h"
#include "magick/color-private.h"
#include "magick/colorspace.h"
#include "magick/colorspace-private.h"
#include "magick/composite-private.h"
#include "magick/distort.h"
#include "magick/exception.h"
#include "magick/exception-private.h"
#include "magick/gem.h"
#include "magick/hashmap.h"
#include "magick/image.h"
#include "magick/list.h"
#include "magick/matrix.h"
#include "magick/memory_.h"
#include "magick/monitor-private.h"
#include "magick/option.h"
#include "magick/pixel.h"
#include "magick/pixel-accessor.h"
#include "magick/pixel-private.h"
#include "magick/resample.h"
#include "magick/resample-private.h"
#include "magick/registry.h"
#include "magick/resource_.h"
#include "magick/semaphore.h"
#include "magick/shear.h"
#include "magick/string_.h"
#include "magick/string-private.h"
#include "magick/thread-private.h"
#include "magick/token.h"
#include "magick/transform.h"
/*
Numerous internal routines for image distortions.
*/
static inline void AffineArgsToCoefficients(double *affine)
{
/* map external sx,ry,rx,sy,tx,ty to internal c0,c2,c4,c1,c3,c5 */
double tmp[4]; /* note indexes 0 and 5 remain unchanged */
tmp[0]=affine[1]; tmp[1]=affine[2]; tmp[2]=affine[3]; tmp[3]=affine[4];
affine[3]=tmp[0]; affine[1]=tmp[1]; affine[4]=tmp[2]; affine[2]=tmp[3];
}
static inline void CoefficientsToAffineArgs(double *coeff)
{
/* map internal c0,c1,c2,c3,c4,c5 to external sx,ry,rx,sy,tx,ty */
double tmp[4]; /* note indexes 0 and 5 remain unchanged */
tmp[0]=coeff[3]; tmp[1]=coeff[1]; tmp[2]=coeff[4]; tmp[3]=coeff[2];
coeff[1]=tmp[0]; coeff[2]=tmp[1]; coeff[3]=tmp[2]; coeff[4]=tmp[3];
}
static void InvertAffineCoefficients(const double *coeff,double *inverse)
{
/* From "Digital Image Warping" by George Wolberg, page 50 */
double determinant;
determinant=PerceptibleReciprocal(coeff[0]*coeff[4]-coeff[1]*coeff[3]);
inverse[0]=determinant*coeff[4];
inverse[1]=determinant*(-coeff[1]);
inverse[2]=determinant*(coeff[1]*coeff[5]-coeff[2]*coeff[4]);
inverse[3]=determinant*(-coeff[3]);
inverse[4]=determinant*coeff[0];
inverse[5]=determinant*(coeff[2]*coeff[3]-coeff[0]*coeff[5]);
}
static void InvertPerspectiveCoefficients(const double *coeff,
double *inverse)
{
/* From "Digital Image Warping" by George Wolberg, page 53 */
double determinant;
determinant=PerceptibleReciprocal(coeff[0]*coeff[4]-coeff[3]*coeff[1]);
inverse[0]=determinant*(coeff[4]-coeff[7]*coeff[5]);
inverse[1]=determinant*(coeff[7]*coeff[2]-coeff[1]);
inverse[2]=determinant*(coeff[1]*coeff[5]-coeff[4]*coeff[2]);
inverse[3]=determinant*(coeff[6]*coeff[5]-coeff[3]);
inverse[4]=determinant*(coeff[0]-coeff[6]*coeff[2]);
inverse[5]=determinant*(coeff[3]*coeff[2]-coeff[0]*coeff[5]);
inverse[6]=determinant*(coeff[3]*coeff[7]-coeff[6]*coeff[4]);
inverse[7]=determinant*(coeff[6]*coeff[1]-coeff[0]*coeff[7]);
}
/*
* Polynomial Term Defining Functions
*
* Order must either be an integer, or 1.5 to produce
* the 2 number_valuesal polynomial function...
* affine 1 (3) u = c0 + c1*x + c2*y
* bilinear 1.5 (4) u = '' + c3*x*y
* quadratic 2 (6) u = '' + c4*x*x + c5*y*y
* cubic 3 (10) u = '' + c6*x^3 + c7*x*x*y + c8*x*y*y + c9*y^3
* quartic 4 (15) u = '' + c10*x^4 + ... + c14*y^4
* quintic 5 (21) u = '' + c15*x^5 + ... + c20*y^5
* number in parenthesis minimum number of points needed.
* Anything beyond quintic, has not been implemented until
* a more automated way of determining terms is found.
* Note the slight re-ordering of the terms for a quadratic polynomial
* which is to allow the use of a bi-linear (order=1.5) polynomial.
* All the later polynomials are ordered simply from x^N to y^N
*/
static size_t poly_number_terms(double order)
{
/* Return the number of terms for a 2d polynomial */
if ( order < 1 || order > 5 ||
( order != floor(order) && (order-1.5) > MagickEpsilon) )
return 0; /* invalid polynomial order */
return((size_t) floor((order+1)*(order+2)/2));
}
static double poly_basis_fn(ssize_t n, double x, double y)
{
/* Return the result for this polynomial term */
switch(n) {
case 0: return( 1.0 ); /* constant */
case 1: return( x );
case 2: return( y ); /* affine order = 1 terms = 3 */
case 3: return( x*y ); /* bilinear order = 1.5 terms = 4 */
case 4: return( x*x );
case 5: return( y*y ); /* quadratic order = 2 terms = 6 */
case 6: return( x*x*x );
case 7: return( x*x*y );
case 8: return( x*y*y );
case 9: return( y*y*y ); /* cubic order = 3 terms = 10 */
case 10: return( x*x*x*x );
case 11: return( x*x*x*y );
case 12: return( x*x*y*y );
case 13: return( x*y*y*y );
case 14: return( y*y*y*y ); /* quartic order = 4 terms = 15 */
case 15: return( x*x*x*x*x );
case 16: return( x*x*x*x*y );
case 17: return( x*x*x*y*y );
case 18: return( x*x*y*y*y );
case 19: return( x*y*y*y*y );
case 20: return( y*y*y*y*y ); /* quintic order = 5 terms = 21 */
}
return( 0 ); /* should never happen */
}
static const char *poly_basis_str(ssize_t n)
{
/* return the result for this polynomial term */
switch(n) {
case 0: return(""); /* constant */
case 1: return("*ii");
case 2: return("*jj"); /* affine order = 1 terms = 3 */
case 3: return("*ii*jj"); /* bilinear order = 1.5 terms = 4 */
case 4: return("*ii*ii");
case 5: return("*jj*jj"); /* quadratic order = 2 terms = 6 */
case 6: return("*ii*ii*ii");
case 7: return("*ii*ii*jj");
case 8: return("*ii*jj*jj");
case 9: return("*jj*jj*jj"); /* cubic order = 3 terms = 10 */
case 10: return("*ii*ii*ii*ii");
case 11: return("*ii*ii*ii*jj");
case 12: return("*ii*ii*jj*jj");
case 13: return("*ii*jj*jj*jj");
case 14: return("*jj*jj*jj*jj"); /* quartic order = 4 terms = 15 */
case 15: return("*ii*ii*ii*ii*ii");
case 16: return("*ii*ii*ii*ii*jj");
case 17: return("*ii*ii*ii*jj*jj");
case 18: return("*ii*ii*jj*jj*jj");
case 19: return("*ii*jj*jj*jj*jj");
case 20: return("*jj*jj*jj*jj*jj"); /* quintic order = 5 terms = 21 */
}
return( "UNKNOWN" ); /* should never happen */
}
static double poly_basis_dx(ssize_t n, double x, double y)
{
/* polynomial term for x derivative */
switch(n) {
case 0: return( 0.0 ); /* constant */
case 1: return( 1.0 );
case 2: return( 0.0 ); /* affine order = 1 terms = 3 */
case 3: return( y ); /* bilinear order = 1.5 terms = 4 */
case 4: return( x );
case 5: return( 0.0 ); /* quadratic order = 2 terms = 6 */
case 6: return( x*x );
case 7: return( x*y );
case 8: return( y*y );
case 9: return( 0.0 ); /* cubic order = 3 terms = 10 */
case 10: return( x*x*x );
case 11: return( x*x*y );
case 12: return( x*y*y );
case 13: return( y*y*y );
case 14: return( 0.0 ); /* quartic order = 4 terms = 15 */
case 15: return( x*x*x*x );
case 16: return( x*x*x*y );
case 17: return( x*x*y*y );
case 18: return( x*y*y*y );
case 19: return( y*y*y*y );
case 20: return( 0.0 ); /* quintic order = 5 terms = 21 */
}
return( 0.0 ); /* should never happen */
}
static double poly_basis_dy(ssize_t n, double x, double y)
{
/* polynomial term for y derivative */
switch(n) {
case 0: return( 0.0 ); /* constant */
case 1: return( 0.0 );
case 2: return( 1.0 ); /* affine order = 1 terms = 3 */
case 3: return( x ); /* bilinear order = 1.5 terms = 4 */
case 4: return( 0.0 );
case 5: return( y ); /* quadratic order = 2 terms = 6 */
default: return( poly_basis_dx(n-1,x,y) ); /* weird but true */
}
/* NOTE: the only reason that last is not true for 'quadratic'
is due to the re-arrangement of terms to allow for 'bilinear'
*/
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% A f f i n e T r a n s f o r m I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% AffineTransformImage() transforms an image as dictated by the affine matrix.
% It allocates the memory necessary for the new Image structure and returns
% a pointer to the new image.
%
% The format of the AffineTransformImage method is:
%
% Image *AffineTransformImage(const Image *image,
% AffineMatrix *affine_matrix,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o affine_matrix: the affine matrix.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *AffineTransformImage(const Image *image,
const AffineMatrix *affine_matrix,ExceptionInfo *exception)
{
double
distort[6];
Image
*deskew_image;
/*
Affine transform image.
*/
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(affine_matrix != (AffineMatrix *) NULL);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
distort[0]=affine_matrix->sx;
distort[1]=affine_matrix->rx;
distort[2]=affine_matrix->ry;
distort[3]=affine_matrix->sy;
distort[4]=affine_matrix->tx;
distort[5]=affine_matrix->ty;
deskew_image=DistortImage(image,AffineProjectionDistortion,6,distort,
MagickTrue,exception);
return(deskew_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ G e n e r a t e C o e f f i c i e n t s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GenerateCoefficients() takes user provided input arguments and generates
% the coefficients, needed to apply the specific distortion for either
% distorting images (generally using control points) or generating a color
% gradient from sparsely separated color points.
%
% The format of the GenerateCoefficients() method is:
%
% Image *GenerateCoefficients(const Image *image,DistortImageMethod method,
% const size_t number_arguments,const double *arguments,
% size_t number_values, ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image to be distorted.
%
% o method: the method of image distortion/ sparse gradient
%
% o number_arguments: the number of arguments given.
%
% o arguments: the arguments for this distortion method.
%
% o number_values: the style and format of given control points, (caller type)
% 0: 2 dimensional mapping of control points (Distort)
% Format: u,v,x,y where u,v is the 'source' of the
% the color to be plotted, for DistortImage()
% N: Interpolation of control points with N values (usally r,g,b)
% Format: x,y,r,g,b mapping x,y to color values r,g,b
% IN future, variable number of values may be given (1 to N)
%
% o exception: return any errors or warnings in this structure
%
% Note that the returned array of double values must be freed by the
% calling method using RelinquishMagickMemory(). This however may change in
% the future to require a more 'method' specific method.
%
% Because of this this method should not be classed as stable or used
% outside other MagickCore library methods.
*/
static inline double MagickRound(double x)
{
/*
Round the fraction to nearest integer.
*/
if ((x-floor(x)) < (ceil(x)-x))
return(floor(x));
return(ceil(x));
}
static double *GenerateCoefficients(const Image *image,
DistortImageMethod *method,const size_t number_arguments,
const double *arguments,size_t number_values,ExceptionInfo *exception)
{
double
*coeff;
register size_t
i;
size_t
number_coeff, /* number of coefficients to return (array size) */
cp_size, /* number floating point numbers per control point */
cp_x,cp_y, /* the x,y indexes for control point */
cp_values; /* index of values for this control point */
/* number_values Number of values given per control point */
if ( number_values == 0 ) {
/* Image distortion using control points (or other distortion)
That is generate a mapping so that x,y->u,v given u,v,x,y
*/
number_values = 2; /* special case: two values of u,v */
cp_values = 0; /* the values i,j are BEFORE the destination CP x,y */
cp_x = 2; /* location of x,y in input control values */
cp_y = 3;
/* NOTE: cp_values, also used for later 'reverse map distort' tests */
}
else {
cp_x = 0; /* location of x,y in input control values */
cp_y = 1;
cp_values = 2; /* and the other values are after x,y */
/* Typically in this case the values are R,G,B color values */
}
cp_size = number_values+2; /* each CP defintion involves this many numbers */
/* If not enough control point pairs are found for specific distortions
fall back to Affine distortion (allowing 0 to 3 point pairs)
*/
if ( number_arguments < 4*cp_size &&
( *method == BilinearForwardDistortion
|| *method == BilinearReverseDistortion
|| *method == PerspectiveDistortion
) )
*method = AffineDistortion;
number_coeff=0;
switch (*method) {
case AffineDistortion:
/* also BarycentricColorInterpolate: */
number_coeff=3*number_values;
break;
case PolynomialDistortion:
/* number of coefficents depend on the given polynomal 'order' */
i = poly_number_terms(arguments[0]);
number_coeff = 2 + i*number_values;
if ( i == 0 ) {
(void) ThrowMagickException(exception,GetMagickModule(),OptionError,
"InvalidArgument","%s : '%s'","Polynomial",
"Invalid order, should be interger 1 to 5, or 1.5");
return((double *) NULL);
}
if ( number_arguments < 1+i*cp_size ) {
(void) ThrowMagickException(exception,GetMagickModule(),OptionError,
"InvalidArgument", "%s : 'require at least %.20g CPs'",
"Polynomial", (double) i);
return((double *) NULL);
}
break;
case BilinearReverseDistortion:
number_coeff=4*number_values;
break;
/*
The rest are constants as they are only used for image distorts
*/
case BilinearForwardDistortion:
number_coeff=10; /* 2*4 coeff plus 2 constants */
cp_x = 0; /* Reverse src/dest coords for forward mapping */
cp_y = 1;
cp_values = 2;
break;
#if 0
case QuadraterialDistortion:
number_coeff=19; /* BilinearForward + BilinearReverse */
#endif
break;
case ShepardsDistortion:
number_coeff=1; /* The power factor to use */
break;
case ArcDistortion:
number_coeff=5;
break;
case ScaleRotateTranslateDistortion:
case AffineProjectionDistortion:
case Plane2CylinderDistortion:
case Cylinder2PlaneDistortion:
number_coeff=6;
break;
case PolarDistortion:
case DePolarDistortion:
number_coeff=8;
break;
case PerspectiveDistortion:
case PerspectiveProjectionDistortion:
number_coeff=9;
break;
case BarrelDistortion:
case BarrelInverseDistortion:
number_coeff=10;
break;
default:
perror("unknown method given"); /* just fail assertion */
}
/* allocate the array of coefficients needed */
coeff = (double *) AcquireQuantumMemory(number_coeff,sizeof(*coeff));
if (coeff == (double *) NULL) {
(void) ThrowMagickException(exception,GetMagickModule(),
ResourceLimitError,"MemoryAllocationFailed",
"%s", "GenerateCoefficients");
return((double *) NULL);
}
/* zero out coefficients array */
for (i=0; i < number_coeff; i++)
coeff[i] = 0.0;
switch (*method)
{
case AffineDistortion:
{
/* Affine Distortion
v = c0*x + c1*y + c2
for each 'value' given
Input Arguments are sets of control points...
For Distort Images u,v, x,y ...
For Sparse Gradients x,y, r,g,b ...
*/
if ( number_arguments%cp_size != 0 ||
number_arguments < cp_size ) {
(void) ThrowMagickException(exception,GetMagickModule(),OptionError,
"InvalidArgument", "%s : 'require at least %.20g CPs'",
"Affine", 1.0);
coeff=(double *) RelinquishMagickMemory(coeff);
return((double *) NULL);
}
/* handle special cases of not enough arguments */
if ( number_arguments == cp_size ) {
/* Only 1 CP Set Given */
if ( cp_values == 0 ) {
/* image distortion - translate the image */
coeff[0] = 1.0;
coeff[2] = arguments[0] - arguments[2];
coeff[4] = 1.0;
coeff[5] = arguments[1] - arguments[3];
}
else {
/* sparse gradient - use the values directly */
for (i=0; i<number_values; i++)
coeff[i*3+2] = arguments[cp_values+i];
}
}
else {
/* 2 or more points (usally 3) given.
Solve a least squares simultaneous equation for coefficients.
*/
double
**matrix,
**vectors,
terms[3];
MagickBooleanType
status;
/* create matrix, and a fake vectors matrix */
matrix = AcquireMagickMatrix(3UL,3UL);
vectors = (double **) AcquireQuantumMemory(number_values,sizeof(*vectors));
if (matrix == (double **) NULL || vectors == (double **) NULL)
{
matrix = RelinquishMagickMatrix(matrix, 3UL);
vectors = (double **) RelinquishMagickMemory(vectors);
coeff = (double *) RelinquishMagickMemory(coeff);
(void) ThrowMagickException(exception,GetMagickModule(),
ResourceLimitError,"MemoryAllocationFailed",
"%s", "DistortCoefficients");
return((double *) NULL);
}
/* fake a number_values x3 vectors matrix from coefficients array */
for (i=0; i < number_values; i++)
vectors[i] = &(coeff[i*3]);
/* Add given control point pairs for least squares solving */
for (i=0; i < number_arguments; i+=cp_size) {
terms[0] = arguments[i+cp_x]; /* x */
terms[1] = arguments[i+cp_y]; /* y */
terms[2] = 1; /* 1 */
LeastSquaresAddTerms(matrix,vectors,terms,
&(arguments[i+cp_values]),3UL,number_values);
}
if ( number_arguments == 2*cp_size ) {
/* Only two pairs were given, but we need 3 to solve the affine.
Fake extra coordinates by rotating p1 around p0 by 90 degrees.
x2 = x0 - (y1-y0) y2 = y0 + (x1-x0)
*/
terms[0] = arguments[cp_x]
- ( arguments[cp_size+cp_y] - arguments[cp_y] ); /* x2 */
terms[1] = arguments[cp_y] +
+ ( arguments[cp_size+cp_x] - arguments[cp_x] ); /* y2 */
terms[2] = 1; /* 1 */
if ( cp_values == 0 ) {
/* Image Distortion - rotate the u,v coordients too */
double
uv2[2];
uv2[0] = arguments[0] - arguments[5] + arguments[1]; /* u2 */
uv2[1] = arguments[1] + arguments[4] - arguments[0]; /* v2 */
LeastSquaresAddTerms(matrix,vectors,terms,uv2,3UL,2UL);
}
else {
/* Sparse Gradient - use values of p0 for linear gradient */
LeastSquaresAddTerms(matrix,vectors,terms,
&(arguments[cp_values]),3UL,number_values);
}
}
/* Solve for LeastSquares Coefficients */
status=GaussJordanElimination(matrix,vectors,3UL,number_values);
matrix = RelinquishMagickMatrix(matrix, 3UL);
vectors = (double **) RelinquishMagickMemory(vectors);
if ( status == MagickFalse ) {
coeff = (double *) RelinquishMagickMemory(coeff);
(void) ThrowMagickException(exception,GetMagickModule(),OptionError,
"InvalidArgument","%s : 'Unsolvable Matrix'",
CommandOptionToMnemonic(MagickDistortOptions, *method) );
return((double *) NULL);
}
}
return(coeff);
}
case AffineProjectionDistortion:
{
/*
Arguments: Affine Matrix (forward mapping)
Arguments sx, rx, ry, sy, tx, ty
Where u = sx*x + ry*y + tx
v = rx*x + sy*y + ty
Returns coefficients (in there inverse form) ordered as...
sx ry tx rx sy ty
AffineProjection Distortion Notes...
+ Will only work with a 2 number_values for Image Distortion
+ Can not be used for generating a sparse gradient (interpolation)
*/
double inverse[8];
if (number_arguments != 6) {
coeff = (double *) RelinquishMagickMemory(coeff);
(void) ThrowMagickException(exception,GetMagickModule(),OptionError,
"InvalidArgument","%s : 'Needs 6 coeff values'",
CommandOptionToMnemonic(MagickDistortOptions, *method) );
return((double *) NULL);
}
/* FUTURE: trap test for sx*sy-rx*ry == 0 (determinant = 0, no inverse) */
for(i=0; i<6UL; i++ )
inverse[i] = arguments[i];
AffineArgsToCoefficients(inverse); /* map into coefficents */
InvertAffineCoefficients(inverse, coeff); /* invert */
*method = AffineDistortion;
return(coeff);
}
case ScaleRotateTranslateDistortion:
{
/* Scale, Rotate and Translate Distortion
An alternative Affine Distortion
Argument options, by number of arguments given:
7: x,y, sx,sy, a, nx,ny
6: x,y, s, a, nx,ny
5: x,y, sx,sy, a
4: x,y, s, a
3: x,y, a
2: s, a
1: a
Where actions are (in order of application)
x,y 'center' of transforms (default = image center)
sx,sy scale image by this amount (default = 1)
a angle of rotation (argument required)
nx,ny move 'center' here (default = x,y or no movement)
And convert to affine mapping coefficients
ScaleRotateTranslate Distortion Notes...
+ Does not use a set of CPs in any normal way
+ Will only work with a 2 number_valuesal Image Distortion
+ Cannot be used for generating a sparse gradient (interpolation)
*/
double
cosine, sine,
x,y,sx,sy,a,nx,ny;
/* set default center, and default scale */
x = nx = (double)(image->columns)/2.0 + (double)image->page.x;
y = ny = (double)(image->rows)/2.0 + (double)image->page.y;
sx = sy = 1.0;
switch ( number_arguments ) {
case 0:
coeff = (double *) RelinquishMagickMemory(coeff);
(void) ThrowMagickException(exception,GetMagickModule(),OptionError,
"InvalidArgument","%s : 'Needs at least 1 argument'",
CommandOptionToMnemonic(MagickDistortOptions, *method) );
return((double *) NULL);
case 1:
a = arguments[0];
break;
case 2:
sx = sy = arguments[0];
a = arguments[1];
break;
default:
x = nx = arguments[0];
y = ny = arguments[1];
switch ( number_arguments ) {
case 3:
a = arguments[2];
break;
case 4:
sx = sy = arguments[2];
a = arguments[3];
break;
case 5:
sx = arguments[2];
sy = arguments[3];
a = arguments[4];
break;
case 6:
sx = sy = arguments[2];
a = arguments[3];
nx = arguments[4];
ny = arguments[5];
break;
case 7:
sx = arguments[2];
sy = arguments[3];
a = arguments[4];
nx = arguments[5];
ny = arguments[6];
break;
default:
coeff = (double *) RelinquishMagickMemory(coeff);
(void) ThrowMagickException(exception,GetMagickModule(),OptionError,
"InvalidArgument","%s : 'Too Many Arguments (7 or less)'",
CommandOptionToMnemonic(MagickDistortOptions, *method) );
return((double *) NULL);
}
break;
}
/* Trap if sx or sy == 0 -- image is scaled out of existance! */
if ( fabs(sx) < MagickEpsilon || fabs(sy) < MagickEpsilon ) {
coeff = (double *) RelinquishMagickMemory(coeff);
(void) ThrowMagickException(exception,GetMagickModule(),OptionError,
"InvalidArgument","%s : 'Zero Scale Given'",
CommandOptionToMnemonic(MagickDistortOptions, *method) );
return((double *) NULL);
}
/* Save the given arguments as an affine distortion */
a=DegreesToRadians(a); cosine=cos(a); sine=sin(a);
*method = AffineDistortion;
coeff[0]=cosine/sx;
coeff[1]=sine/sx;
coeff[2]=x-nx*coeff[0]-ny*coeff[1];
coeff[3]=(-sine)/sy;
coeff[4]=cosine/sy;
coeff[5]=y-nx*coeff[3]-ny*coeff[4];
return(coeff);
}
case PerspectiveDistortion:
{ /*
Perspective Distortion (a ratio of affine distortions)
p(x,y) c0*x + c1*y + c2
u = ------ = ------------------
r(x,y) c6*x + c7*y + 1
q(x,y) c3*x + c4*y + c5
v = ------ = ------------------
r(x,y) c6*x + c7*y + 1
c8 = Sign of 'r', or the denominator affine, for the actual image.
This determines what part of the distorted image is 'ground'
side of the horizon, the other part is 'sky' or invalid.
Valid values are +1.0 or -1.0 only.
Input Arguments are sets of control points...
For Distort Images u,v, x,y ...
For Sparse Gradients x,y, r,g,b ...
Perspective Distortion Notes...
+ Can be thought of as ratio of 3 affine transformations
+ Not separatable: r() or c6 and c7 are used by both equations
+ All 8 coefficients must be determined simultaniously
+ Will only work with a 2 number_valuesal Image Distortion
+ Can not be used for generating a sparse gradient (interpolation)
+ It is not linear, but is simple to generate an inverse
+ All lines within an image remain lines.
+ but distances between points may vary.
*/
double
**matrix,
*vectors[1],
terms[8];
size_t
cp_u = cp_values,
cp_v = cp_values+1;
MagickBooleanType
status;
if ( number_arguments%cp_size != 0 ||
number_arguments < cp_size*4 ) {
(void) ThrowMagickException(exception,GetMagickModule(),OptionError,
"InvalidArgument", "%s : 'require at least %.20g CPs'",
CommandOptionToMnemonic(MagickDistortOptions, *method), 4.0);
coeff=(double *) RelinquishMagickMemory(coeff);
return((double *) NULL);
}
/* fake 1x8 vectors matrix directly using the coefficients array */
vectors[0] = &(coeff[0]);
/* 8x8 least-squares matrix (zeroed) */
matrix = AcquireMagickMatrix(8UL,8UL);
if (matrix == (double **) NULL) {
coeff=(double *) RelinquishMagickMemory(coeff);
(void) ThrowMagickException(exception,GetMagickModule(),
ResourceLimitError,"MemoryAllocationFailed",
"%s", "DistortCoefficients");
return((double *) NULL);
}
/* Add control points for least squares solving */
for (i=0; i < number_arguments; i+=4) {
terms[0]=arguments[i+cp_x]; /* c0*x */
terms[1]=arguments[i+cp_y]; /* c1*y */
terms[2]=1.0; /* c2*1 */
terms[3]=0.0;
terms[4]=0.0;
terms[5]=0.0;
terms[6]=-terms[0]*arguments[i+cp_u]; /* 1/(c6*x) */
terms[7]=-terms[1]*arguments[i+cp_u]; /* 1/(c7*y) */
LeastSquaresAddTerms(matrix,vectors,terms,&(arguments[i+cp_u]),
8UL,1UL);
terms[0]=0.0;
terms[1]=0.0;
terms[2]=0.0;
terms[3]=arguments[i+cp_x]; /* c3*x */
terms[4]=arguments[i+cp_y]; /* c4*y */
terms[5]=1.0; /* c5*1 */
terms[6]=-terms[3]*arguments[i+cp_v]; /* 1/(c6*x) */
terms[7]=-terms[4]*arguments[i+cp_v]; /* 1/(c7*y) */
LeastSquaresAddTerms(matrix,vectors,terms,&(arguments[i+cp_v]),
8UL,1UL);
}
/* Solve for LeastSquares Coefficients */
status=GaussJordanElimination(matrix,vectors,8UL,1UL);
matrix = RelinquishMagickMatrix(matrix, 8UL);
if ( status == MagickFalse ) {
coeff = (double *) RelinquishMagickMemory(coeff);
(void) ThrowMagickException(exception,GetMagickModule(),OptionError,
"InvalidArgument","%s : 'Unsolvable Matrix'",
CommandOptionToMnemonic(MagickDistortOptions, *method) );
return((double *) NULL);
}
/*
Calculate 9'th coefficient! The ground-sky determination.
What is sign of the 'ground' in r() denominator affine function?
Just use any valid image coordinate (first control point) in
destination for determination of what part of view is 'ground'.
*/
coeff[8] = coeff[6]*arguments[cp_x]
+ coeff[7]*arguments[cp_y] + 1.0;
coeff[8] = (coeff[8] < 0.0) ? -1.0 : +1.0;
return(coeff);
}
case PerspectiveProjectionDistortion:
{
/*
Arguments: Perspective Coefficents (forward mapping)
*/
if (number_arguments != 8) {
coeff = (double *) RelinquishMagickMemory(coeff);
(void) ThrowMagickException(exception,GetMagickModule(),OptionError,
"InvalidArgument", "%s : 'Needs 8 coefficient values'",
CommandOptionToMnemonic(MagickDistortOptions, *method));
return((double *) NULL);
}
/* FUTURE: trap test c0*c4-c3*c1 == 0 (determinate = 0, no inverse) */
InvertPerspectiveCoefficients(arguments, coeff);
/*
Calculate 9'th coefficient! The ground-sky determination.
What is sign of the 'ground' in r() denominator affine function?
Just use any valid image cocodinate in destination for determination.
For a forward mapped perspective the images 0,0 coord will map to
c2,c5 in the distorted image, so set the sign of denominator of that.
*/
coeff[8] = coeff[6]*arguments[2]
+ coeff[7]*arguments[5] + 1.0;
coeff[8] = (coeff[8] < 0.0) ? -1.0 : +1.0;
*method = PerspectiveDistortion;
return(coeff);
}
case BilinearForwardDistortion:
case BilinearReverseDistortion:
{
/* Bilinear Distortion (Forward mapping)
v = c0*x + c1*y + c2*x*y + c3;
for each 'value' given
This is actually a simple polynomial Distortion! The difference
however is when we need to reverse the above equation to generate a
BilinearForwardDistortion (see below).
Input Arguments are sets of control points...
For Distort Images u,v, x,y ...
For Sparse Gradients x,y, r,g,b ...
*/
double
**matrix,
**vectors,
terms[4];
MagickBooleanType
status;
/* check the number of arguments */
if ( number_arguments%cp_size != 0 ||
number_arguments < cp_size*4 ) {
(void) ThrowMagickException(exception,GetMagickModule(),OptionError,
"InvalidArgument", "%s : 'require at least %.20g CPs'",
CommandOptionToMnemonic(MagickDistortOptions, *method), 4.0);
coeff=(double *) RelinquishMagickMemory(coeff);
return((double *) NULL);
}
/* create matrix, and a fake vectors matrix */
matrix = AcquireMagickMatrix(4UL,4UL);
vectors = (double **) AcquireQuantumMemory(number_values,sizeof(*vectors));
if (matrix == (double **) NULL || vectors == (double **) NULL)
{
matrix = RelinquishMagickMatrix(matrix, 4UL);
vectors = (double **) RelinquishMagickMemory(vectors);
coeff = (double *) RelinquishMagickMemory(coeff);
(void) ThrowMagickException(exception,GetMagickModule(),
ResourceLimitError,"MemoryAllocationFailed",
"%s", "DistortCoefficients");
return((double *) NULL);
}
/* fake a number_values x4 vectors matrix from coefficients array */
for (i=0; i < number_values; i++)
vectors[i] = &(coeff[i*4]);
/* Add given control point pairs for least squares solving */
for (i=0; i < number_arguments; i+=cp_size) {
terms[0] = arguments[i+cp_x]; /* x */
terms[1] = arguments[i+cp_y]; /* y */
terms[2] = terms[0]*terms[1]; /* x*y */
terms[3] = 1; /* 1 */
LeastSquaresAddTerms(matrix,vectors,terms,
&(arguments[i+cp_values]),4UL,number_values);
}
/* Solve for LeastSquares Coefficients */
status=GaussJordanElimination(matrix,vectors,4UL,number_values);
matrix = RelinquishMagickMatrix(matrix, 4UL);
vectors = (double **) RelinquishMagickMemory(vectors);
if ( status == MagickFalse ) {
coeff = (double *) RelinquishMagickMemory(coeff);
(void) ThrowMagickException(exception,GetMagickModule(),OptionError,
"InvalidArgument","%s : 'Unsolvable Matrix'",
CommandOptionToMnemonic(MagickDistortOptions, *method) );
return((double *) NULL);
}
if ( *method == BilinearForwardDistortion ) {
/* Bilinear Forward Mapped Distortion
The above least-squares solved for coefficents but in the forward
direction, due to changes to indexing constants.
i = c0*x + c1*y + c2*x*y + c3;
j = c4*x + c5*y + c6*x*y + c7;
where i,j are in the destination image, NOT the source.
Reverse Pixel mapping however needs to use reverse of these
functions. It required a full page of algbra to work out the
reversed mapping formula, but resolves down to the following...
c8 = c0*c5-c1*c4;
c9 = 2*(c2*c5-c1*c6); // '2*a' in the quadratic formula
i = i - c3; j = j - c7;
b = c6*i - c2*j + c8; // So that a*y^2 + b*y + c == 0
c = c4*i - c0*j; // y = ( -b +- sqrt(bb - 4ac) ) / (2*a)
r = b*b - c9*(c+c);
if ( c9 != 0 )
y = ( -b + sqrt(r) ) / c9;
else
y = -c/b;
x = ( i - c1*y) / ( c1 - c2*y );
NB: if 'r' is negative there is no solution!
NB: the sign of the sqrt() should be negative if image becomes
flipped or flopped, or crosses over itself.
NB: techniqually coefficient c5 is not needed, anymore,
but kept for completness.
See Anthony Thyssen <A.Thyssen@griffith.edu.au>
or Fred Weinhaus <fmw@alink.net> for more details.
*/
coeff[8] = coeff[0]*coeff[5] - coeff[1]*coeff[4];
coeff[9] = 2*(coeff[2]*coeff[5] - coeff[1]*coeff[6]);
}
return(coeff);
}
#if 0
case QuadrilateralDistortion:
{
/* Map a Quadrilateral to a unit square using BilinearReverse
Then map that unit square back to the final Quadrilateral
using BilinearForward.
Input Arguments are sets of control points...
For Distort Images u,v, x,y ...
For Sparse Gradients x,y, r,g,b ...
*/
/* UNDER CONSTRUCTION */
return(coeff);
}
#endif
case PolynomialDistortion:
{
/* Polynomial Distortion
First two coefficents are used to hole global polynomal information
c0 = Order of the polynimial being created
c1 = number_of_terms in one polynomial equation
Rest of the coefficients map to the equations....
v = c0 + c1*x + c2*y + c3*x*y + c4*x^2 + c5*y^2 + c6*x^3 + ...
for each 'value' (number_values of them) given.
As such total coefficients = 2 + number_terms * number_values
Input Arguments are sets of control points...
For Distort Images order [u,v, x,y] ...
For Sparse Gradients order [x,y, r,g,b] ...
Polynomial Distortion Notes...
+ UNDER DEVELOPMENT -- Do not expect this to remain as is.
+ Currently polynomial is a reversed mapped distortion.
+ Order 1.5 is fudged to map into a bilinear distortion.
though it is not the same order as that distortion.
*/
double
**matrix,
**vectors,
*terms;
size_t
nterms; /* number of polynomial terms per number_values */
register ssize_t
j;
MagickBooleanType
status;
/* first two coefficients hold polynomial order information */
coeff[0] = arguments[0];
coeff[1] = (double) poly_number_terms(arguments[0]);
nterms = (size_t) coeff[1];
/* create matrix, a fake vectors matrix, and least sqs terms */
matrix = AcquireMagickMatrix(nterms,nterms);
vectors = (double **) AcquireQuantumMemory(number_values,sizeof(*vectors));
terms = (double *) AcquireQuantumMemory(nterms, sizeof(*terms));
if (matrix == (double **) NULL ||
vectors == (double **) NULL ||
terms == (double *) NULL )
{
matrix = RelinquishMagickMatrix(matrix, nterms);
vectors = (double **) RelinquishMagickMemory(vectors);
terms = (double *) RelinquishMagickMemory(terms);
coeff = (double *) RelinquishMagickMemory(coeff);
(void) ThrowMagickException(exception,GetMagickModule(),
ResourceLimitError,"MemoryAllocationFailed",
"%s", "DistortCoefficients");
return((double *) NULL);
}
/* fake a number_values x3 vectors matrix from coefficients array */
for (i=0; i < number_values; i++)
vectors[i] = &(coeff[2+i*nterms]);
/* Add given control point pairs for least squares solving */
for (i=1; i < number_arguments; i+=cp_size) { /* NB: start = 1 not 0 */
for (j=0; j < (ssize_t) nterms; j++)
terms[j] = poly_basis_fn(j,arguments[i+cp_x],arguments[i+cp_y]);
LeastSquaresAddTerms(matrix,vectors,terms,
&(arguments[i+cp_values]),nterms,number_values);
}
terms = (double *) RelinquishMagickMemory(terms);
/* Solve for LeastSquares Coefficients */
status=GaussJordanElimination(matrix,vectors,nterms,number_values);
matrix = RelinquishMagickMatrix(matrix, nterms);
vectors = (double **) RelinquishMagickMemory(vectors);
if ( status == MagickFalse ) {
coeff = (double *) RelinquishMagickMemory(coeff);
(void) ThrowMagickException(exception,GetMagickModule(),OptionError,
"InvalidArgument","%s : 'Unsolvable Matrix'",
CommandOptionToMnemonic(MagickDistortOptions, *method) );
return((double *) NULL);
}
return(coeff);
}
case ArcDistortion:
{
/* Arc Distortion
Args: arc_width rotate top_edge_radius bottom_edge_radius
All but first argument are optional
arc_width The angle over which to arc the image side-to-side
rotate Angle to rotate image from vertical center
top_radius Set top edge of source image at this radius
bottom_radius Set bootom edge to this radius (radial scaling)
By default, if the radii arguments are nor provided the image radius
is calculated so the horizontal center-line is fits the given arc
without scaling.
The output image size is ALWAYS adjusted to contain the whole image,
and an offset is given to position image relative to the 0,0 point of
the origin, allowing users to use relative positioning onto larger
background (via -flatten).
The arguments are converted to these coefficients
c0: angle for center of source image
c1: angle scale for mapping to source image
c2: radius for top of source image
c3: radius scale for mapping source image
c4: centerline of arc within source image
Note the coefficients use a center angle, so asymptotic join is
furthest from both sides of the source image. This also means that
for arc angles greater than 360 the sides of the image will be
trimmed equally.
Arc Distortion Notes...
+ Does not use a set of CPs
+ Will only work with Image Distortion
+ Can not be used for generating a sparse gradient (interpolation)
*/
if ( number_arguments >= 1 && arguments[0] < MagickEpsilon ) {
coeff = (double *) RelinquishMagickMemory(coeff);
(void) ThrowMagickException(exception,GetMagickModule(),OptionError,
"InvalidArgument","%s : 'Arc Angle Too Small'",
CommandOptionToMnemonic(MagickDistortOptions, *method) );
return((double *) NULL);
}
if ( number_arguments >= 3 && arguments[2] < MagickEpsilon ) {
coeff = (double *) RelinquishMagickMemory(coeff);
(void) ThrowMagickException(exception,GetMagickModule(),OptionError,
"InvalidArgument","%s : 'Outer Radius Too Small'",
CommandOptionToMnemonic(MagickDistortOptions, *method) );
return((double *) NULL);
}
coeff[0] = -MagickPI2; /* -90, place at top! */
if ( number_arguments >= 1 )
coeff[1] = DegreesToRadians(arguments[0]);
else
coeff[1] = MagickPI2; /* zero arguments - center is at top */
if ( number_arguments >= 2 )
coeff[0] += DegreesToRadians(arguments[1]);
coeff[0] /= Magick2PI; /* normalize radians */
coeff[0] -= MagickRound(coeff[0]);
coeff[0] *= Magick2PI; /* de-normalize back to radians */
coeff[3] = (double)image->rows-1;
coeff[2] = (double)image->columns/coeff[1] + coeff[3]/2.0;
if ( number_arguments >= 3 ) {
if ( number_arguments >= 4 )
coeff[3] = arguments[2] - arguments[3];
else
coeff[3] *= arguments[2]/coeff[2];
coeff[2] = arguments[2];
}
coeff[4] = ((double)image->columns-1.0)/2.0;
return(coeff);
}
case PolarDistortion:
case DePolarDistortion:
{
/* (De)Polar Distortion (same set of arguments)
Args: Rmax, Rmin, Xcenter,Ycenter, Afrom,Ato
DePolar can also have the extra arguments of Width, Height
Coefficients 0 to 5 is the sanatized version first 6 input args
Coefficient 6 is the angle to coord ratio and visa-versa
Coefficient 7 is the radius to coord ratio and visa-versa
WARNING: It is possible for Radius max<min and/or Angle from>to
*/
if ( number_arguments == 3
|| ( number_arguments > 6 && *method == PolarDistortion )
|| number_arguments > 8 ) {
(void) ThrowMagickException(exception,GetMagickModule(),
OptionError,"InvalidArgument", "%s : number of arguments",
CommandOptionToMnemonic(MagickDistortOptions, *method) );
coeff=(double *) RelinquishMagickMemory(coeff);
return((double *) NULL);
}
/* Rmax - if 0 calculate appropriate value */
if ( number_arguments >= 1 )
coeff[0] = arguments[0];
else
coeff[0] = 0.0;
/* Rmin - usally 0 */
coeff[1] = number_arguments >= 2 ? arguments[1] : 0.0;
/* Center X,Y */
if ( number_arguments >= 4 ) {
coeff[2] = arguments[2];
coeff[3] = arguments[3];
}
else { /* center of actual image */
coeff[2] = (double)(image->columns)/2.0+image->page.x;
coeff[3] = (double)(image->rows)/2.0+image->page.y;
}
/* Angle from,to - about polar center 0 is downward */
coeff[4] = -MagickPI;
if ( number_arguments >= 5 )
coeff[4] = DegreesToRadians(arguments[4]);
coeff[5] = coeff[4];
if ( number_arguments >= 6 )
coeff[5] = DegreesToRadians(arguments[5]);
if ( fabs(coeff[4]-coeff[5]) < MagickEpsilon )
coeff[5] += Magick2PI; /* same angle is a full circle */
/* if radius 0 or negative, its a special value... */
if ( coeff[0] < MagickEpsilon ) {
/* Use closest edge if radius == 0 */
if ( fabs(coeff[0]) < MagickEpsilon ) {
coeff[0]=MagickMin(fabs(coeff[2]-image->page.x),
fabs(coeff[3]-image->page.y));
coeff[0]=MagickMin(coeff[0],
fabs(coeff[2]-image->page.x-image->columns));
coeff[0]=MagickMin(coeff[0],
fabs(coeff[3]-image->page.y-image->rows));
}
/* furthest diagonal if radius == -1 */
if ( fabs(-1.0-coeff[0]) < MagickEpsilon ) {
double rx,ry;
rx = coeff[2]-image->page.x;
ry = coeff[3]-image->page.y;
coeff[0] = rx*rx+ry*ry;
ry = coeff[3]-image->page.y-image->rows;
coeff[0] = MagickMax(coeff[0],rx*rx+ry*ry);
rx = coeff[2]-image->page.x-image->columns;
coeff[0] = MagickMax(coeff[0],rx*rx+ry*ry);
ry = coeff[3]-image->page.y;
coeff[0] = MagickMax(coeff[0],rx*rx+ry*ry);
coeff[0] = sqrt(coeff[0]);
}
}
/* IF Rmax <= 0 or Rmin < 0 OR Rmax < Rmin, THEN error */
if ( coeff[0] < MagickEpsilon || coeff[1] < -MagickEpsilon
|| (coeff[0]-coeff[1]) < MagickEpsilon ) {
(void) ThrowMagickException(exception,GetMagickModule(),OptionError,
"InvalidArgument", "%s : Invalid Radius",
CommandOptionToMnemonic(MagickDistortOptions, *method) );
coeff=(double *) RelinquishMagickMemory(coeff);
return((double *) NULL);
}
/* converstion ratios */
if ( *method == PolarDistortion ) {
coeff[6]=(double) image->columns/(coeff[5]-coeff[4]);
coeff[7]=(double) image->rows/(coeff[0]-coeff[1]);
}
else { /* *method == DePolarDistortion */
coeff[6]=(coeff[5]-coeff[4])/image->columns;
coeff[7]=(coeff[0]-coeff[1])/image->rows;
}
return(coeff);
}
case Cylinder2PlaneDistortion:
case Plane2CylinderDistortion:
{
/* 3D Cylinder to/from a Tangential Plane
Projection between a clinder and flat plain from a point on the
center line of the cylinder.
The two surfaces coincide in 3D space at the given centers of
distortion (perpendicular to projection point) on both images.
Args: FOV_arc_width
Coefficents: FOV(radians), Radius, center_x,y, dest_center_x,y
FOV (Field Of View) the angular field of view of the distortion,
across the width of the image, in degrees. The centers are the
points of least distortion in the input and resulting images.
These centers are however determined later.
Coeff 0 is the FOV angle of view of image width in radians
Coeff 1 is calculated radius of cylinder.
Coeff 2,3 center of distortion of input image
Coefficents 4,5 Center of Distortion of dest (determined later)
*/
if ( arguments[0] < MagickEpsilon || arguments[0] > 160.0 ) {
(void) ThrowMagickException(exception,GetMagickModule(),OptionError,
"InvalidArgument", "%s : Invalid FOV Angle",
CommandOptionToMnemonic(MagickDistortOptions, *method) );
coeff=(double *) RelinquishMagickMemory(coeff);
return((double *) NULL);
}
coeff[0] = DegreesToRadians(arguments[0]);
if ( *method == Cylinder2PlaneDistortion )
/* image is curved around cylinder, so FOV angle (in radians)
* scales directly to image X coordinate, according to its radius.
*/
coeff[1] = (double) image->columns/coeff[0];
else
/* radius is distance away from an image with this angular FOV */
coeff[1] = (double) image->columns / ( 2 * tan(coeff[0]/2) );
coeff[2] = (double)(image->columns)/2.0+image->page.x;
coeff[3] = (double)(image->rows)/2.0+image->page.y;
coeff[4] = coeff[2];
coeff[5] = coeff[3]; /* assuming image size is the same */
return(coeff);
}
case BarrelDistortion:
case BarrelInverseDistortion:
{
/* Barrel Distortion
Rs=(A*Rd^3 + B*Rd^2 + C*Rd + D)*Rd
BarrelInv Distortion
Rs=Rd/(A*Rd^3 + B*Rd^2 + C*Rd + D)
Where Rd is the normalized radius from corner to middle of image
Input Arguments are one of the following forms (number of arguments)...
3: A,B,C
4: A,B,C,D
5: A,B,C X,Y
6: A,B,C,D X,Y
8: Ax,Bx,Cx,Dx Ay,By,Cy,Dy
10: Ax,Bx,Cx,Dx Ay,By,Cy,Dy X,Y
Returns 10 coefficent values, which are de-normalized (pixel scale)
Ax, Bx, Cx, Dx, Ay, By, Cy, Dy, Xc, Yc
*/
/* Radius de-normalization scaling factor */
double
rscale = 2.0/MagickMin((double) image->columns,(double) image->rows);
/* sanity check number of args must = 3,4,5,6,8,10 or error */
if ( (number_arguments < 3) || (number_arguments == 7) ||
(number_arguments == 9) || (number_arguments > 10) )
{
coeff=(double *) RelinquishMagickMemory(coeff);
(void) ThrowMagickException(exception,GetMagickModule(),
OptionError,"InvalidArgument", "%s : number of arguments",
CommandOptionToMnemonic(MagickDistortOptions, *method) );
return((double *) NULL);
}
/* A,B,C,D coefficients */
coeff[0] = arguments[0];
coeff[1] = arguments[1];
coeff[2] = arguments[2];
if ((number_arguments == 3) || (number_arguments == 5) )
coeff[3] = 1.0 - coeff[0] - coeff[1] - coeff[2];
else
coeff[3] = arguments[3];
/* de-normalize the coefficients */
coeff[0] *= pow(rscale,3.0);
coeff[1] *= rscale*rscale;
coeff[2] *= rscale;
/* Y coefficients: as given OR same as X coefficients */
if ( number_arguments >= 8 ) {
coeff[4] = arguments[4] * pow(rscale,3.0);
coeff[5] = arguments[5] * rscale*rscale;
coeff[6] = arguments[6] * rscale;
coeff[7] = arguments[7];
}
else {
coeff[4] = coeff[0];
coeff[5] = coeff[1];
coeff[6] = coeff[2];
coeff[7] = coeff[3];
}
/* X,Y Center of Distortion (image coodinates) */
if ( number_arguments == 5 ) {
coeff[8] = arguments[3];
coeff[9] = arguments[4];
}
else if ( number_arguments == 6 ) {
coeff[8] = arguments[4];
coeff[9] = arguments[5];
}
else if ( number_arguments == 10 ) {
coeff[8] = arguments[8];
coeff[9] = arguments[9];
}
else {
/* center of the image provided (image coodinates) */
coeff[8] = (double)image->columns/2.0 + image->page.x;
coeff[9] = (double)image->rows/2.0 + image->page.y;
}
return(coeff);
}
case ShepardsDistortion:
{
/* Shepards Distortion input arguments are the coefficents!
Just check the number of arguments is valid!
Args: u1,v1, x1,y1, ...
OR : u1,v1, r1,g1,c1, ...
*/
if ( number_arguments%cp_size != 0 ||
number_arguments < cp_size ) {
(void) ThrowMagickException(exception,GetMagickModule(),OptionError,
"InvalidArgument", "%s : 'requires CP's (4 numbers each)'",
CommandOptionToMnemonic(MagickDistortOptions, *method));
coeff=(double *) RelinquishMagickMemory(coeff);
return((double *) NULL);
}
/* User defined weighting power for Shepard's Method */
{ const char *artifact=GetImageArtifact(image,"shepards:power");
if ( artifact != (const char *) NULL ) {
coeff[0]=StringToDouble(artifact,(char **) NULL) / 2.0;
if ( coeff[0] < MagickEpsilon ) {
(void) ThrowMagickException(exception,GetMagickModule(),
OptionError,"InvalidArgument","%s", "-define shepards:power" );
coeff=(double *) RelinquishMagickMemory(coeff);
return((double *) NULL);
}
}
else
coeff[0]=1.0; /* Default power of 2 (Inverse Squared) */
}
return(coeff);
}
default:
break;
}
/* you should never reach this point */
perror("no method handler"); /* just fail assertion */
return((double *) NULL);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ D i s t o r t R e s i z e I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% DistortResizeImage() resize image using the equivalent but slower image
% distortion operator. The filter is applied using a EWA cylindrical
% resampling. But like resize the final image size is limited to whole pixels
% with no effects by virtual-pixels on the result.
%
% Note that images containing a transparency channel will be twice as slow to
% resize as images one without transparency.
%
% The format of the DistortResizeImage method is:
%
% Image *DistortResizeImage(const Image *image,const size_t columns,
% const size_t rows,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o columns: the number of columns in the resized image.
%
% o rows: the number of rows in the resized image.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *DistortResizeImage(const Image *image,
const size_t columns,const size_t rows,ExceptionInfo *exception)
{
#define DistortResizeImageTag "Distort/Image"
Image
*resize_image,
*tmp_image;
RectangleInfo
crop_area;
double
distort_args[12];
VirtualPixelMethod
vp_save;
/*
Distort resize image.
*/
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
if ((columns == 0) || (rows == 0))
return((Image *) NULL);
/* Do not short-circuit this resize if final image size is unchanged */
(void) memset(distort_args,0,12*sizeof(double));
distort_args[4]=(double) image->columns;
distort_args[6]=(double) columns;
distort_args[9]=(double) image->rows;
distort_args[11]=(double) rows;
vp_save=GetImageVirtualPixelMethod(image);
tmp_image=CloneImage(image,0,0,MagickTrue,exception);
if ( tmp_image == (Image *) NULL )
return((Image *) NULL);
(void) SetImageVirtualPixelMethod(tmp_image,TransparentVirtualPixelMethod);
if (image->matte == MagickFalse)
{
/*
Image has not transparency channel, so we free to use it
*/
(void) SetImageAlphaChannel(tmp_image,SetAlphaChannel);
resize_image=DistortImage(tmp_image,AffineDistortion,12,distort_args,
MagickTrue,exception),
tmp_image=DestroyImage(tmp_image);
if ( resize_image == (Image *) NULL )
return((Image *) NULL);
(void) SetImageAlphaChannel(resize_image,DeactivateAlphaChannel);
InheritException(exception,&image->exception);
}
else
{
/*
Image has transparency so handle colors and alpha separatly.
Basically we need to separate Virtual-Pixel alpha in the resized
image, so only the actual original images alpha channel is used.
*/
Image
*resize_alpha;
/* distort alpha channel separately */
(void) SeparateImageChannel(tmp_image,TrueAlphaChannel);
(void) SetImageAlphaChannel(tmp_image,OpaqueAlphaChannel);
resize_alpha=DistortImage(tmp_image,AffineDistortion,12,distort_args,
MagickTrue,exception),
tmp_image=DestroyImage(tmp_image);
if ( resize_alpha == (Image *) NULL )
return((Image *) NULL);
/* distort the actual image containing alpha + VP alpha */
tmp_image=CloneImage(image,0,0,MagickTrue,exception);
if ( tmp_image == (Image *) NULL )
return((Image *) NULL);
(void) SetImageVirtualPixelMethod(tmp_image,
TransparentVirtualPixelMethod);
(void) SetImageVirtualPixelMethod(tmp_image,
TransparentVirtualPixelMethod);
resize_image=DistortImage(tmp_image,AffineDistortion,12,distort_args,
MagickTrue,exception),
tmp_image=DestroyImage(tmp_image);
if ( resize_image == (Image *) NULL)
{
resize_alpha=DestroyImage(resize_alpha);
return((Image *) NULL);
}
/* replace resize images alpha with the separally distorted alpha */
(void) SetImageAlphaChannel(resize_image,DeactivateAlphaChannel);
(void) SetImageAlphaChannel(resize_alpha,DeactivateAlphaChannel);
(void) CompositeImage(resize_image,CopyOpacityCompositeOp,resize_alpha,
0,0);
InheritException(exception,&resize_image->exception);
resize_alpha=DestroyImage(resize_alpha);
}
(void) SetImageVirtualPixelMethod(resize_image,vp_save);
/*
Clean up the results of the Distortion
*/
crop_area.width=columns;
crop_area.height=rows;
crop_area.x=0;
crop_area.y=0;
tmp_image=resize_image;
resize_image=CropImage(tmp_image,&crop_area,exception);
tmp_image=DestroyImage(tmp_image);
if (resize_image != (Image *) NULL)
{
resize_image->matte=image->matte;
resize_image->compose=image->compose;
resize_image->page.width=0;
resize_image->page.height=0;
}
return(resize_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% D i s t o r t I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% DistortImage() distorts an image using various distortion methods, by
% mapping color lookups of the source image to a new destination image
% usally of the same size as the source image, unless 'bestfit' is set to
% true.
%
% If 'bestfit' is enabled, and distortion allows it, the destination image is
% adjusted to ensure the whole source 'image' will just fit within the final
% destination image, which will be sized and offset accordingly. Also in
% many cases the virtual offset of the source image will be taken into
% account in the mapping.
%
% If the '-verbose' control option has been set print to standard error the
% equicelent '-fx' formula with coefficients for the function, if practical.
%
% The format of the DistortImage() method is:
%
% Image *DistortImage(const Image *image,const DistortImageMethod method,
% const size_t number_arguments,const double *arguments,
% MagickBooleanType bestfit, ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image to be distorted.
%
% o method: the method of image distortion.
%
% ArcDistortion always ignores source image offset, and always
% 'bestfit' the destination image with the top left corner offset
% relative to the polar mapping center.
%
% Affine, Perspective, and Bilinear, do least squares fitting of the
% distrotion when more than the minimum number of control point pairs
% are provided.
%
% Perspective, and Bilinear, fall back to a Affine distortion when less
% than 4 control point pairs are provided. While Affine distortions
% let you use any number of control point pairs, that is Zero pairs is
% a No-Op (viewport only) distortion, one pair is a translation and
% two pairs of control points do a scale-rotate-translate, without any
% shearing.
%
% o number_arguments: the number of arguments given.
%
% o arguments: an array of floating point arguments for this method.
%
% o bestfit: Attempt to 'bestfit' the size of the resulting image.
% This also forces the resulting image to be a 'layered' virtual
% canvas image. Can be overridden using 'distort:viewport' setting.
%
% o exception: return any errors or warnings in this structure
%
% Extra Controls from Image meta-data (artifacts)...
%
% o "verbose"
% Output to stderr alternatives, internal coefficents, and FX
% equivalents for the distortion operation (if feasible).
% This forms an extra check of the distortion method, and allows users
% access to the internal constants IM calculates for the distortion.
%
% o "distort:viewport"
% Directly set the output image canvas area and offest to use for the
% resulting image, rather than use the original images canvas, or a
% calculated 'bestfit' canvas.
%
% o "distort:scale"
% Scale the size of the output canvas by this amount to provide a
% method of Zooming, and for super-sampling the results.
%
% Other settings that can effect results include
%
% o 'interpolate' For source image lookups (scale enlargements)
%
% o 'filter' Set filter to use for area-resampling (scale shrinking).
% Set to 'point' to turn off and use 'interpolate' lookup
% instead
%
*/
MagickExport Image *DistortImage(const Image *image,DistortImageMethod method,
const size_t number_arguments,const double *arguments,
MagickBooleanType bestfit,ExceptionInfo *exception)
{
#define DistortImageTag "Distort/Image"
double
*coeff,
output_scaling;
Image
*distort_image;
RectangleInfo
geometry; /* geometry of the distorted space viewport */
MagickBooleanType
viewport_given;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
/*
Handle Special Compound Distortions
*/
if (method == ResizeDistortion)
{
if (number_arguments != 2)
{
(void) ThrowMagickException(exception,GetMagickModule(),OptionError,
"InvalidArgument","%s : '%s'","Resize",
"Invalid number of args: 2 only");
return((Image *) NULL);
}
distort_image=DistortResizeImage(image,(size_t) arguments[0],
(size_t) arguments[1],exception);
return(distort_image);
}
/*
Convert input arguments (usually as control points for reverse mapping)
into mapping coefficients to apply the distortion.
Note that some distortions are mapped to other distortions,
and as such do not require specific code after this point.
*/
coeff=GenerateCoefficients(image,&method,number_arguments,arguments,0,
exception);
if (coeff == (double *) NULL)
return((Image *) NULL);
/*
Determine the size and offset for a 'bestfit' destination.
Usally the four corners of the source image is enough.
*/
/* default output image bounds, when no 'bestfit' is requested */
geometry.width=image->columns;
geometry.height=image->rows;
geometry.x=0;
geometry.y=0;
if ( method == ArcDistortion ) {
bestfit = MagickTrue; /* always calculate a 'best fit' viewport */
}
/* Work out the 'best fit', (required for ArcDistortion) */
if ( bestfit ) {
PointInfo
s,d,min,max; /* source, dest coords --mapping--> min, max coords */
MagickBooleanType
fix_bounds = MagickTrue; /* enlarge bounds for VP handling */
s.x=s.y=min.x=max.x=min.y=max.y=0.0; /* keep compiler happy */
/* defines to figure out the bounds of the distorted image */
#define InitalBounds(p) \
{ \
/* printf("%lg,%lg -> %lg,%lg\n", s.x,s.y, d.x,d.y); */ \
min.x = max.x = p.x; \
min.y = max.y = p.y; \
}
#define ExpandBounds(p) \
{ \
/* printf("%lg,%lg -> %lg,%lg\n", s.x,s.y, d.x,d.y); */ \
min.x = MagickMin(min.x,p.x); \
max.x = MagickMax(max.x,p.x); \
min.y = MagickMin(min.y,p.y); \
max.y = MagickMax(max.y,p.y); \
}
switch (method)
{
case AffineDistortion:
{ double inverse[6];
InvertAffineCoefficients(coeff, inverse);
s.x = (double) image->page.x;
s.y = (double) image->page.y;
d.x = inverse[0]*s.x+inverse[1]*s.y+inverse[2];
d.y = inverse[3]*s.x+inverse[4]*s.y+inverse[5];
InitalBounds(d);
s.x = (double) image->page.x+image->columns;
s.y = (double) image->page.y;
d.x = inverse[0]*s.x+inverse[1]*s.y+inverse[2];
d.y = inverse[3]*s.x+inverse[4]*s.y+inverse[5];
ExpandBounds(d);
s.x = (double) image->page.x;
s.y = (double) image->page.y+image->rows;
d.x = inverse[0]*s.x+inverse[1]*s.y+inverse[2];
d.y = inverse[3]*s.x+inverse[4]*s.y+inverse[5];
ExpandBounds(d);
s.x = (double) image->page.x+image->columns;
s.y = (double) image->page.y+image->rows;
d.x = inverse[0]*s.x+inverse[1]*s.y+inverse[2];
d.y = inverse[3]*s.x+inverse[4]*s.y+inverse[5];
ExpandBounds(d);
break;
}
case PerspectiveDistortion:
{ double inverse[8], scale;
InvertPerspectiveCoefficients(coeff, inverse);
s.x = (double) image->page.x;
s.y = (double) image->page.y;
scale=inverse[6]*s.x+inverse[7]*s.y+1.0;
scale=PerceptibleReciprocal(scale);
d.x = scale*(inverse[0]*s.x+inverse[1]*s.y+inverse[2]);
d.y = scale*(inverse[3]*s.x+inverse[4]*s.y+inverse[5]);
InitalBounds(d);
s.x = (double) image->page.x+image->columns;
s.y = (double) image->page.y;
scale=inverse[6]*s.x+inverse[7]*s.y+1.0;
scale=PerceptibleReciprocal(scale);
d.x = scale*(inverse[0]*s.x+inverse[1]*s.y+inverse[2]);
d.y = scale*(inverse[3]*s.x+inverse[4]*s.y+inverse[5]);
ExpandBounds(d);
s.x = (double) image->page.x;
s.y = (double) image->page.y+image->rows;
scale=inverse[6]*s.x+inverse[7]*s.y+1.0;
scale=PerceptibleReciprocal(scale);
d.x = scale*(inverse[0]*s.x+inverse[1]*s.y+inverse[2]);
d.y = scale*(inverse[3]*s.x+inverse[4]*s.y+inverse[5]);
ExpandBounds(d);
s.x = (double) image->page.x+image->columns;
s.y = (double) image->page.y+image->rows;
scale=inverse[6]*s.x+inverse[7]*s.y+1.0;
scale=PerceptibleReciprocal(scale);
d.x = scale*(inverse[0]*s.x+inverse[1]*s.y+inverse[2]);
d.y = scale*(inverse[3]*s.x+inverse[4]*s.y+inverse[5]);
ExpandBounds(d);
break;
}
case ArcDistortion:
{ double a, ca, sa;
/* Forward Map Corners */
a = coeff[0]-coeff[1]/2; ca = cos(a); sa = sin(a);
d.x = coeff[2]*ca;
d.y = coeff[2]*sa;
InitalBounds(d);
d.x = (coeff[2]-coeff[3])*ca;
d.y = (coeff[2]-coeff[3])*sa;
ExpandBounds(d);
a = coeff[0]+coeff[1]/2; ca = cos(a); sa = sin(a);
d.x = coeff[2]*ca;
d.y = coeff[2]*sa;
ExpandBounds(d);
d.x = (coeff[2]-coeff[3])*ca;
d.y = (coeff[2]-coeff[3])*sa;
ExpandBounds(d);
/* Orthogonal points along top of arc */
for( a=(double) (ceil((double) ((coeff[0]-coeff[1]/2.0)/MagickPI2))*MagickPI2);
a<(coeff[0]+coeff[1]/2.0); a+=MagickPI2 ) {
ca = cos(a); sa = sin(a);
d.x = coeff[2]*ca;
d.y = coeff[2]*sa;
ExpandBounds(d);
}
/*
Convert the angle_to_width and radius_to_height
to appropriate scaling factors, to allow faster processing
in the mapping function.
*/
coeff[1] = (double) (Magick2PI*image->columns/coeff[1]);
coeff[3] = (double)image->rows/coeff[3];
break;
}
case PolarDistortion:
{
if (number_arguments < 2)
coeff[2] = coeff[3] = 0.0;
min.x = coeff[2]-coeff[0];
max.x = coeff[2]+coeff[0];
min.y = coeff[3]-coeff[0];
max.y = coeff[3]+coeff[0];
/* should be about 1.0 if Rmin = 0 */
coeff[7]=(double) geometry.height/(coeff[0]-coeff[1]);
break;
}
case DePolarDistortion:
{
/* direct calculation as it needs to tile correctly
* for reversibility in a DePolar-Polar cycle */
fix_bounds = MagickFalse;
geometry.x = geometry.y = 0;
geometry.height = (size_t) ceil(coeff[0]-coeff[1]);
geometry.width = (size_t)
ceil((coeff[0]-coeff[1])*(coeff[5]-coeff[4])*0.5);
/* correct scaling factors relative to new size */
coeff[6]=(coeff[5]-coeff[4])/geometry.width; /* changed width */
coeff[7]=(coeff[0]-coeff[1])/geometry.height; /* should be about 1.0 */
break;
}
case Cylinder2PlaneDistortion:
{
/* direct calculation so center of distortion is either a pixel
* center, or pixel edge. This allows for reversibility of the
* distortion */
geometry.x = geometry.y = 0;
geometry.width = (size_t) ceil( 2.0*coeff[1]*tan(coeff[0]/2.0) );
geometry.height = (size_t) ceil( 2.0*coeff[3]/cos(coeff[0]/2.0) );
/* correct center of distortion relative to new size */
coeff[4] = (double) geometry.width/2.0;
coeff[5] = (double) geometry.height/2.0;
fix_bounds = MagickFalse;
break;
}
case Plane2CylinderDistortion:
{
/* direct calculation center is either pixel center, or pixel edge
* so as to allow reversibility of the image distortion */
geometry.x = geometry.y = 0;
geometry.width = (size_t) ceil(coeff[0]*coeff[1]); /* FOV * radius */
geometry.height = (size_t) (2*coeff[3]); /* input image height */
/* correct center of distortion relative to new size */
coeff[4] = (double) geometry.width/2.0;
coeff[5] = (double) geometry.height/2.0;
fix_bounds = MagickFalse;
break;
}
case ShepardsDistortion:
case BilinearForwardDistortion:
case BilinearReverseDistortion:
#if 0
case QuadrilateralDistortion:
#endif
case PolynomialDistortion:
case BarrelDistortion:
case BarrelInverseDistortion:
default:
/* no calculated bestfit available for these distortions */
bestfit = MagickFalse;
fix_bounds = MagickFalse;
break;
}
/* Set the output image geometry to calculated 'bestfit'.
Yes this tends to 'over do' the file image size, ON PURPOSE!
Do not do this for DePolar which needs to be exact for virtual tiling.
*/
if ( fix_bounds ) {
geometry.x = (ssize_t) floor(min.x-0.5);
geometry.y = (ssize_t) floor(min.y-0.5);
geometry.width=(size_t) ceil(max.x-geometry.x+0.5);
geometry.height=(size_t) ceil(max.y-geometry.y+0.5);
}
} /* end bestfit destination image calculations */
/* The user provided a 'viewport' expert option which may
overrides some parts of the current output image geometry.
This also overrides its default 'bestfit' setting.
*/
{ const char *artifact=GetImageArtifact(image,"distort:viewport");
viewport_given = MagickFalse;
if ( artifact != (const char *) NULL ) {
MagickStatusType flags=ParseAbsoluteGeometry(artifact,&geometry);
if (flags==NoValue)
(void) ThrowMagickException(exception,GetMagickModule(),
OptionWarning,"InvalidGeometry","`%s' `%s'",
"distort:viewport",artifact);
else
viewport_given = MagickTrue;
}
}
/* Verbose output */
if ( GetImageArtifact(image,"verbose") != (const char *) NULL ) {
register ssize_t
i;
char image_gen[MaxTextExtent];
const char *lookup;
/* Set destination image size and virtual offset */
if ( bestfit || viewport_given ) {
(void) FormatLocaleString(image_gen, MaxTextExtent," -size %.20gx%.20g "
"-page %+.20g%+.20g xc: +insert \\\n",(double) geometry.width,
(double) geometry.height,(double) geometry.x,(double) geometry.y);
lookup="v.p{ xx-v.page.x-.5, yy-v.page.y-.5 }";
}
else {
image_gen[0] = '\0'; /* no destination to generate */
lookup = "p{ xx-page.x-.5, yy-page.y-.5 }"; /* simplify lookup */
}
switch (method) {
case AffineDistortion:
{
double *inverse;
inverse = (double *) AcquireQuantumMemory(6,sizeof(*inverse));
if (inverse == (double *) NULL) {
coeff = (double *) RelinquishMagickMemory(coeff);
(void) ThrowMagickException(exception,GetMagickModule(),
ResourceLimitError,"MemoryAllocationFailed",
"%s", "DistortImages");
return((Image *) NULL);
}
InvertAffineCoefficients(coeff, inverse);
CoefficientsToAffineArgs(inverse);
(void) FormatLocaleFile(stderr, "Affine Projection:\n");
(void) FormatLocaleFile(stderr, " -distort AffineProjection \\\n '");
for (i=0; i < 5; i++)
(void) FormatLocaleFile(stderr, "%lf,", inverse[i]);
(void) FormatLocaleFile(stderr, "%lf'\n", inverse[5]);
inverse = (double *) RelinquishMagickMemory(inverse);
(void) FormatLocaleFile(stderr, "Affine Distort, FX Equivelent:\n");
(void) FormatLocaleFile(stderr, "%s", image_gen);
(void) FormatLocaleFile(stderr, " -fx 'ii=i+page.x+0.5; jj=j+page.y+0.5;\n");
(void) FormatLocaleFile(stderr, " xx=%+lf*ii %+lf*jj %+lf;\n",
coeff[0], coeff[1], coeff[2]);
(void) FormatLocaleFile(stderr, " yy=%+lf*ii %+lf*jj %+lf;\n",
coeff[3], coeff[4], coeff[5]);
(void) FormatLocaleFile(stderr, " %s' \\\n", lookup);
break;
}
case PerspectiveDistortion:
{
double *inverse;
inverse = (double *) AcquireQuantumMemory(8,sizeof(*inverse));
if (inverse == (double *) NULL) {
coeff = (double *) RelinquishMagickMemory(coeff);
(void) ThrowMagickException(exception,GetMagickModule(),
ResourceLimitError,"MemoryAllocationFailed",
"%s", "DistortCoefficients");
return((Image *) NULL);
}
InvertPerspectiveCoefficients(coeff, inverse);
(void) FormatLocaleFile(stderr, "Perspective Projection:\n");
(void) FormatLocaleFile(stderr, " -distort PerspectiveProjection \\\n '");
for (i=0; i<4; i++)
(void) FormatLocaleFile(stderr, "%lf, ", inverse[i]);
(void) FormatLocaleFile(stderr, "\n ");
for (; i<7; i++)
(void) FormatLocaleFile(stderr, "%lf, ", inverse[i]);
(void) FormatLocaleFile(stderr, "%lf'\n", inverse[7]);
inverse = (double *) RelinquishMagickMemory(inverse);
(void) FormatLocaleFile(stderr, "Perspective Distort, FX Equivelent:\n");
(void) FormatLocaleFile(stderr, "%s", image_gen);
(void) FormatLocaleFile(stderr, " -fx 'ii=i+page.x+0.5; jj=j+page.y+0.5;\n");
(void) FormatLocaleFile(stderr, " rr=%+lf*ii %+lf*jj + 1;\n",
coeff[6], coeff[7]);
(void) FormatLocaleFile(stderr, " xx=(%+lf*ii %+lf*jj %+lf)/rr;\n",
coeff[0], coeff[1], coeff[2]);
(void) FormatLocaleFile(stderr, " yy=(%+lf*ii %+lf*jj %+lf)/rr;\n",
coeff[3], coeff[4], coeff[5]);
(void) FormatLocaleFile(stderr, " rr%s0 ? %s : blue' \\\n",
coeff[8] < 0 ? "<" : ">", lookup);
break;
}
case BilinearForwardDistortion:
(void) FormatLocaleFile(stderr, "BilinearForward Mapping Equations:\n");
(void) FormatLocaleFile(stderr, "%s", image_gen);
(void) FormatLocaleFile(stderr, " i = %+lf*x %+lf*y %+lf*x*y %+lf;\n",
coeff[0], coeff[1], coeff[2], coeff[3]);
(void) FormatLocaleFile(stderr, " j = %+lf*x %+lf*y %+lf*x*y %+lf;\n",
coeff[4], coeff[5], coeff[6], coeff[7]);
#if 0
/* for debugging */
(void) FormatLocaleFile(stderr, " c8 = %+lf c9 = 2*a = %+lf;\n",
coeff[8], coeff[9]);
#endif
(void) FormatLocaleFile(stderr, "BilinearForward Distort, FX Equivelent:\n");
(void) FormatLocaleFile(stderr, "%s", image_gen);
(void) FormatLocaleFile(stderr, " -fx 'ii=i+page.x%+lf; jj=j+page.y%+lf;\n",
0.5-coeff[3], 0.5-coeff[7]);
(void) FormatLocaleFile(stderr, " bb=%lf*ii %+lf*jj %+lf;\n",
coeff[6], -coeff[2], coeff[8]);
/* Handle Special degenerate (non-quadratic) or trapezoidal case */
if ( coeff[9] != 0 ) {
(void) FormatLocaleFile(stderr, " rt=bb*bb %+lf*(%lf*ii%+lf*jj);\n",
-2*coeff[9], coeff[4], -coeff[0]);
(void) FormatLocaleFile(stderr, " yy=( -bb + sqrt(rt) ) / %lf;\n",
coeff[9]);
} else
(void) FormatLocaleFile(stderr, " yy=(%lf*ii%+lf*jj)/bb;\n",
-coeff[4], coeff[0]);
(void) FormatLocaleFile(stderr, " xx=(ii %+lf*yy)/(%lf %+lf*yy);\n",
-coeff[1], coeff[0], coeff[2]);
if ( coeff[9] != 0 )
(void) FormatLocaleFile(stderr, " (rt < 0 ) ? red : %s'\n", lookup);
else
(void) FormatLocaleFile(stderr, " %s' \\\n", lookup);
break;
case BilinearReverseDistortion:
#if 0
(void) FormatLocaleFile(stderr, "Polynomial Projection Distort:\n");
(void) FormatLocaleFile(stderr, " -distort PolynomialProjection \\\n");
(void) FormatLocaleFile(stderr, " '1.5, %lf, %lf, %lf, %lf,\n",
coeff[3], coeff[0], coeff[1], coeff[2]);
(void) FormatLocaleFile(stderr, " %lf, %lf, %lf, %lf'\n",
coeff[7], coeff[4], coeff[5], coeff[6]);
#endif
(void) FormatLocaleFile(stderr, "BilinearReverse Distort, FX Equivelent:\n");
(void) FormatLocaleFile(stderr, "%s", image_gen);
(void) FormatLocaleFile(stderr, " -fx 'ii=i+page.x+0.5; jj=j+page.y+0.5;\n");
(void) FormatLocaleFile(stderr, " xx=%+lf*ii %+lf*jj %+lf*ii*jj %+lf;\n",
coeff[0], coeff[1], coeff[2], coeff[3]);
(void) FormatLocaleFile(stderr, " yy=%+lf*ii %+lf*jj %+lf*ii*jj %+lf;\n",
coeff[4], coeff[5], coeff[6], coeff[7]);
(void) FormatLocaleFile(stderr, " %s' \\\n", lookup);
break;
case PolynomialDistortion:
{
size_t nterms = (size_t) coeff[1];
(void) FormatLocaleFile(stderr, "Polynomial (order %lg, terms %lu), FX Equivelent\n",
coeff[0],(unsigned long) nterms);
(void) FormatLocaleFile(stderr, "%s", image_gen);
(void) FormatLocaleFile(stderr, " -fx 'ii=i+page.x+0.5; jj=j+page.y+0.5;\n");
(void) FormatLocaleFile(stderr, " xx =");
for (i=0; i<(ssize_t) nterms; i++) {
if ( i != 0 && i%4 == 0 ) (void) FormatLocaleFile(stderr, "\n ");
(void) FormatLocaleFile(stderr, " %+lf%s", coeff[2+i],
poly_basis_str(i));
}
(void) FormatLocaleFile(stderr, ";\n yy =");
for (i=0; i<(ssize_t) nterms; i++) {
if ( i != 0 && i%4 == 0 ) (void) FormatLocaleFile(stderr, "\n ");
(void) FormatLocaleFile(stderr, " %+lf%s", coeff[2+i+nterms],
poly_basis_str(i));
}
(void) FormatLocaleFile(stderr, ";\n %s' \\\n", lookup);
break;
}
case ArcDistortion:
{
(void) FormatLocaleFile(stderr, "Arc Distort, Internal Coefficients:\n");
for ( i=0; i<5; i++ )
(void) FormatLocaleFile(stderr, " c%.20g = %+lf\n", (double) i, coeff[i]);
(void) FormatLocaleFile(stderr, "Arc Distort, FX Equivelent:\n");
(void) FormatLocaleFile(stderr, "%s", image_gen);
(void) FormatLocaleFile(stderr, " -fx 'ii=i+page.x; jj=j+page.y;\n");
(void) FormatLocaleFile(stderr, " xx=(atan2(jj,ii)%+lf)/(2*pi);\n",
-coeff[0]);
(void) FormatLocaleFile(stderr, " xx=xx-round(xx);\n");
(void) FormatLocaleFile(stderr, " xx=xx*%lf %+lf;\n",
coeff[1], coeff[4]);
(void) FormatLocaleFile(stderr, " yy=(%lf - hypot(ii,jj)) * %lf;\n",
coeff[2], coeff[3]);
(void) FormatLocaleFile(stderr, " v.p{xx-.5,yy-.5}' \\\n");
break;
}
case PolarDistortion:
{
(void) FormatLocaleFile(stderr, "Polar Distort, Internal Coefficents\n");
for ( i=0; i<8; i++ )
(void) FormatLocaleFile(stderr, " c%.20g = %+lf\n", (double) i, coeff[i]);
(void) FormatLocaleFile(stderr, "Polar Distort, FX Equivelent:\n");
(void) FormatLocaleFile(stderr, "%s", image_gen);
(void) FormatLocaleFile(stderr, " -fx 'ii=i+page.x%+lf; jj=j+page.y%+lf;\n",
-coeff[2], -coeff[3]);
(void) FormatLocaleFile(stderr, " xx=(atan2(ii,jj)%+lf)/(2*pi);\n",
-(coeff[4]+coeff[5])/2 );
(void) FormatLocaleFile(stderr, " xx=xx-round(xx);\n");
(void) FormatLocaleFile(stderr, " xx=xx*2*pi*%lf + v.w/2;\n",
coeff[6] );
(void) FormatLocaleFile(stderr, " yy=(hypot(ii,jj)%+lf)*%lf;\n",
-coeff[1], coeff[7] );
(void) FormatLocaleFile(stderr, " v.p{xx-.5,yy-.5}' \\\n");
break;
}
case DePolarDistortion:
{
(void) FormatLocaleFile(stderr, "DePolar Distort, Internal Coefficents\n");
for ( i=0; i<8; i++ )
(void) FormatLocaleFile(stderr, " c%.20g = %+lf\n", (double) i, coeff[i]);
(void) FormatLocaleFile(stderr, "DePolar Distort, FX Equivelent:\n");
(void) FormatLocaleFile(stderr, "%s", image_gen);
(void) FormatLocaleFile(stderr, " -fx 'aa=(i+.5)*%lf %+lf;\n", coeff[6], +coeff[4] );
(void) FormatLocaleFile(stderr, " rr=(j+.5)*%lf %+lf;\n", coeff[7], +coeff[1] );
(void) FormatLocaleFile(stderr, " xx=rr*sin(aa) %+lf;\n", coeff[2] );
(void) FormatLocaleFile(stderr, " yy=rr*cos(aa) %+lf;\n", coeff[3] );
(void) FormatLocaleFile(stderr, " v.p{xx-.5,yy-.5}' \\\n");
break;
}
case Cylinder2PlaneDistortion:
{
(void) FormatLocaleFile(stderr, "Cylinder to Plane Distort, Internal Coefficents\n");
(void) FormatLocaleFile(stderr, " cylinder_radius = %+lf\n", coeff[1]);
(void) FormatLocaleFile(stderr, "Cylinder to Plane Distort, FX Equivelent:\n");
(void) FormatLocaleFile(stderr, "%s", image_gen);
(void) FormatLocaleFile(stderr, " -fx 'ii=i+page.x%+lf+0.5; jj=j+page.y%+lf+0.5;\n",
-coeff[4], -coeff[5]);
(void) FormatLocaleFile(stderr, " aa=atan(ii/%+lf);\n", coeff[1] );
(void) FormatLocaleFile(stderr, " xx=%lf*aa%+lf;\n",
coeff[1], coeff[2] );
(void) FormatLocaleFile(stderr, " yy=jj*cos(aa)%+lf;\n", coeff[3] );
(void) FormatLocaleFile(stderr, " %s' \\\n", lookup);
break;
}
case Plane2CylinderDistortion:
{
(void) FormatLocaleFile(stderr, "Plane to Cylinder Distort, Internal Coefficents\n");
(void) FormatLocaleFile(stderr, " cylinder_radius = %+lf\n", coeff[1]);
(void) FormatLocaleFile(stderr, "Plane to Cylinder Distort, FX Equivelent:\n");
(void) FormatLocaleFile(stderr, "%s", image_gen);
(void) FormatLocaleFile(stderr, " -fx 'ii=i+page.x%+lf+0.5; jj=j+page.y%+lf+0.5;\n",
-coeff[4], -coeff[5]);
(void) FormatLocaleFile(stderr, " ii=ii/%+lf;\n", coeff[1] );
(void) FormatLocaleFile(stderr, " xx=%lf*tan(ii)%+lf;\n",
coeff[1], coeff[2] );
(void) FormatLocaleFile(stderr, " yy=jj/cos(ii)%+lf;\n",
coeff[3] );
(void) FormatLocaleFile(stderr, " %s' \\\n", lookup);
break;
break;
}
case BarrelDistortion:
case BarrelInverseDistortion:
{ double xc,yc;
/* NOTE: This does the barrel roll in pixel coords not image coords
** The internal distortion must do it in image coordinates,
** so that is what the center coeff (8,9) is given in.
*/
xc = ((double)image->columns-1.0)/2.0 + image->page.x;
yc = ((double)image->rows-1.0)/2.0 + image->page.y;
(void) FormatLocaleFile(stderr, "Barrel%s Distort, FX Equivelent:\n",
method == BarrelDistortion ? "" : "Inv");
(void) FormatLocaleFile(stderr, "%s", image_gen);
if ( fabs(coeff[8]-xc-0.5) < 0.1 && fabs(coeff[9]-yc-0.5) < 0.1 )
(void) FormatLocaleFile(stderr, " -fx 'xc=(w-1)/2; yc=(h-1)/2;\n");
else
(void) FormatLocaleFile(stderr, " -fx 'xc=%lf; yc=%lf;\n",
coeff[8]-0.5, coeff[9]-0.5);
(void) FormatLocaleFile(stderr,
" ii=i-xc; jj=j-yc; rr=hypot(ii,jj);\n");
(void) FormatLocaleFile(stderr, " ii=ii%s(%lf*rr*rr*rr %+lf*rr*rr %+lf*rr %+lf);\n",
method == BarrelDistortion ? "*" : "/",
coeff[0],coeff[1],coeff[2],coeff[3]);
(void) FormatLocaleFile(stderr, " jj=jj%s(%lf*rr*rr*rr %+lf*rr*rr %+lf*rr %+lf);\n",
method == BarrelDistortion ? "*" : "/",
coeff[4],coeff[5],coeff[6],coeff[7]);
(void) FormatLocaleFile(stderr, " v.p{fx*ii+xc,fy*jj+yc}' \\\n");
}
default:
break;
}
}
/* The user provided a 'scale' expert option will scale the
output image size, by the factor given allowing for super-sampling
of the distorted image space. Any scaling factors must naturally
be halved as a result.
*/
{ const char *artifact;
artifact=GetImageArtifact(image,"distort:scale");
output_scaling = 1.0;
if (artifact != (const char *) NULL) {
output_scaling = fabs(StringToDouble(artifact,(char **) NULL));
geometry.width=(size_t) (output_scaling*geometry.width+0.5);
geometry.height=(size_t) (output_scaling*geometry.height+0.5);
geometry.x=(ssize_t) (output_scaling*geometry.x+0.5);
geometry.y=(ssize_t) (output_scaling*geometry.y+0.5);
if ( output_scaling < 0.1 ) {
coeff = (double *) RelinquishMagickMemory(coeff);
(void) ThrowMagickException(exception,GetMagickModule(),
OptionError,"InvalidArgument","%s","-define distort:scale" );
return((Image *) NULL);
}
output_scaling = 1/output_scaling;
}
}
#define ScaleFilter(F,A,B,C,D) \
ScaleResampleFilter( (F), \
output_scaling*(A), output_scaling*(B), \
output_scaling*(C), output_scaling*(D) )
/*
Initialize the distort image attributes.
*/
distort_image=CloneImage(image,geometry.width,geometry.height,MagickTrue,
exception);
if (distort_image == (Image *) NULL)
{
coeff=(double *) RelinquishMagickMemory(coeff);
return((Image *) NULL);
}
/* if image is ColorMapped - change it to DirectClass */
if (SetImageStorageClass(distort_image,DirectClass) == MagickFalse)
{
coeff=(double *) RelinquishMagickMemory(coeff);
InheritException(exception,&distort_image->exception);
distort_image=DestroyImage(distort_image);
return((Image *) NULL);
}
if ((IsPixelGray(&distort_image->background_color) == MagickFalse) &&
(IsGrayColorspace(distort_image->colorspace) != MagickFalse))
(void) SetImageColorspace(distort_image,sRGBColorspace);
if (distort_image->background_color.opacity != OpaqueOpacity)
distort_image->matte=MagickTrue;
distort_image->page.x=geometry.x;
distort_image->page.y=geometry.y;
{ /* ----- MAIN CODE -----
Sample the source image to each pixel in the distort image.
*/
CacheView
*distort_view;
MagickBooleanType
status;
MagickOffsetType
progress;
MagickPixelPacket
zero;
ResampleFilter
**magick_restrict resample_filter;
ssize_t
j;
status=MagickTrue;
progress=0;
GetMagickPixelPacket(distort_image,&zero);
resample_filter=AcquireResampleFilterThreadSet(image,
UndefinedVirtualPixelMethod,MagickFalse,exception);
distort_view=AcquireAuthenticCacheView(distort_image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(progress,status) \
magick_number_threads(image,distort_image,distort_image->rows,1)
#endif
for (j=0; j < (ssize_t) distort_image->rows; j++)
{
const int
id = GetOpenMPThreadId();
double
validity; /* how mathematically valid is this the mapping */
MagickBooleanType
sync;
MagickPixelPacket
pixel, /* pixel color to assign to distorted image */
invalid; /* the color to assign when distort result is invalid */
PointInfo
d,
s; /* transform destination image x,y to source image x,y */
register IndexPacket
*magick_restrict indexes;
register ssize_t
i;
register PixelPacket
*magick_restrict q;
q=QueueCacheViewAuthenticPixels(distort_view,0,j,distort_image->columns,1,
exception);
if (q == (PixelPacket *) NULL)
{
status=MagickFalse;
continue;
}
indexes=GetCacheViewAuthenticIndexQueue(distort_view);
pixel=zero;
/* Define constant scaling vectors for Affine Distortions
Other methods are either variable, or use interpolated lookup
*/
switch (method)
{
case AffineDistortion:
ScaleFilter( resample_filter[id],
coeff[0], coeff[1],
coeff[3], coeff[4] );
break;
default:
break;
}
/* Initialize default pixel validity
* negative: pixel is invalid output 'matte_color'
* 0.0 to 1.0: antialiased, mix with resample output
* 1.0 or greater: use resampled output.
*/
validity = 1.0;
GetMagickPixelPacket(distort_image,&invalid);
SetMagickPixelPacket(distort_image,&distort_image->matte_color,
(IndexPacket *) NULL, &invalid);
if (distort_image->colorspace == CMYKColorspace)
ConvertRGBToCMYK(&invalid); /* what about other color spaces? */
for (i=0; i < (ssize_t) distort_image->columns; i++)
{
/* map pixel coordinate to distortion space coordinate */
d.x = (double) (geometry.x+i+0.5)*output_scaling;
d.y = (double) (geometry.y+j+0.5)*output_scaling;
s = d; /* default is a no-op mapping */
switch (method)
{
case AffineDistortion:
{
s.x=coeff[0]*d.x+coeff[1]*d.y+coeff[2];
s.y=coeff[3]*d.x+coeff[4]*d.y+coeff[5];
/* Affine partial derivitives are constant -- set above */
break;
}
case PerspectiveDistortion:
{
double
p,q,r,abs_r,abs_c6,abs_c7,scale;
/* perspective is a ratio of affines */
p=coeff[0]*d.x+coeff[1]*d.y+coeff[2];
q=coeff[3]*d.x+coeff[4]*d.y+coeff[5];
r=coeff[6]*d.x+coeff[7]*d.y+1.0;
/* Pixel Validity -- is it a 'sky' or 'ground' pixel */
validity = (r*coeff[8] < 0.0) ? 0.0 : 1.0;
/* Determine horizon anti-alias blending */
abs_r = fabs(r)*2;
abs_c6 = fabs(coeff[6]);
abs_c7 = fabs(coeff[7]);
if ( abs_c6 > abs_c7 ) {
if ( abs_r < abs_c6*output_scaling )
validity = 0.5 - coeff[8]*r/(coeff[6]*output_scaling);
}
else if ( abs_r < abs_c7*output_scaling )
validity = 0.5 - coeff[8]*r/(coeff[7]*output_scaling);
/* Perspective Sampling Point (if valid) */
if ( validity > 0.0 ) {
/* divide by r affine, for perspective scaling */
scale = 1.0/r;
s.x = p*scale;
s.y = q*scale;
/* Perspective Partial Derivatives or Scaling Vectors */
scale *= scale;
ScaleFilter( resample_filter[id],
(r*coeff[0] - p*coeff[6])*scale,
(r*coeff[1] - p*coeff[7])*scale,
(r*coeff[3] - q*coeff[6])*scale,
(r*coeff[4] - q*coeff[7])*scale );
}
break;
}
case BilinearReverseDistortion:
{
/* Reversed Mapped is just a simple polynomial */
s.x=coeff[0]*d.x+coeff[1]*d.y+coeff[2]*d.x*d.y+coeff[3];
s.y=coeff[4]*d.x+coeff[5]*d.y
+coeff[6]*d.x*d.y+coeff[7];
/* Bilinear partial derivitives of scaling vectors */
ScaleFilter( resample_filter[id],
coeff[0] + coeff[2]*d.y,
coeff[1] + coeff[2]*d.x,
coeff[4] + coeff[6]*d.y,
coeff[5] + coeff[6]*d.x );
break;
}
case BilinearForwardDistortion:
{
/* Forward mapped needs reversed polynomial equations
* which unfortunatally requires a square root! */
double b,c;
d.x -= coeff[3]; d.y -= coeff[7];
b = coeff[6]*d.x - coeff[2]*d.y + coeff[8];
c = coeff[4]*d.x - coeff[0]*d.y;
validity = 1.0;
/* Handle Special degenerate (non-quadratic) case
* Currently without horizon anti-alising */
if ( fabs(coeff[9]) < MagickEpsilon )
s.y = -c/b;
else {
c = b*b - 2*coeff[9]*c;
if ( c < 0.0 )
validity = 0.0;
else
s.y = ( -b + sqrt(c) )/coeff[9];
}
if ( validity > 0.0 )
s.x = ( d.x - coeff[1]*s.y) / ( coeff[0] + coeff[2]*s.y );
/* NOTE: the sign of the square root should be -ve for parts
where the source image becomes 'flipped' or 'mirrored'.
FUTURE: Horizon handling
FUTURE: Scaling factors or Deritives (how?)
*/
break;
}
#if 0
case BilinearDistortion:
/* Bilinear mapping of any Quadrilateral to any Quadrilateral */
/* UNDER DEVELOPMENT */
break;
#endif
case PolynomialDistortion:
{
/* multi-ordered polynomial */
register ssize_t
k;
ssize_t
nterms=(ssize_t)coeff[1];
PointInfo
du,dv; /* the du,dv vectors from unit dx,dy -- derivatives */
s.x=s.y=du.x=du.y=dv.x=dv.y=0.0;
for(k=0; k < nterms; k++) {
s.x += poly_basis_fn(k,d.x,d.y)*coeff[2+k];
du.x += poly_basis_dx(k,d.x,d.y)*coeff[2+k];
du.y += poly_basis_dy(k,d.x,d.y)*coeff[2+k];
s.y += poly_basis_fn(k,d.x,d.y)*coeff[2+k+nterms];
dv.x += poly_basis_dx(k,d.x,d.y)*coeff[2+k+nterms];
dv.y += poly_basis_dy(k,d.x,d.y)*coeff[2+k+nterms];
}
ScaleFilter( resample_filter[id], du.x,du.y,dv.x,dv.y );
break;
}
case ArcDistortion:
{
/* what is the angle and radius in the destination image */
s.x = (double) ((atan2(d.y,d.x) - coeff[0])/Magick2PI);
s.x -= MagickRound(s.x); /* angle */
s.y = hypot(d.x,d.y); /* radius */
/* Arc Distortion Partial Scaling Vectors
Are derived by mapping the perpendicular unit vectors
dR and dA*R*2PI rather than trying to map dx and dy
The results is a very simple orthogonal aligned ellipse.
*/
if ( s.y > MagickEpsilon )
ScaleFilter( resample_filter[id],
(double) (coeff[1]/(Magick2PI*s.y)), 0, 0, coeff[3] );
else
ScaleFilter( resample_filter[id],
distort_image->columns*2, 0, 0, coeff[3] );
/* now scale the angle and radius for source image lookup point */
s.x = s.x*coeff[1] + coeff[4] + image->page.x +0.5;
s.y = (coeff[2] - s.y) * coeff[3] + image->page.y;
break;
}
case PolarDistortion:
{ /* 2D Cartesain to Polar View */
d.x -= coeff[2];
d.y -= coeff[3];
s.x = atan2(d.x,d.y) - (coeff[4]+coeff[5])/2;
s.x /= Magick2PI;
s.x -= MagickRound(s.x);
s.x *= Magick2PI; /* angle - relative to centerline */
s.y = hypot(d.x,d.y); /* radius */
/* Polar Scaling vectors are based on mapping dR and dA vectors
This results in very simple orthogonal scaling vectors
*/
if ( s.y > MagickEpsilon )
ScaleFilter( resample_filter[id],
(double) (coeff[6]/(Magick2PI*s.y)), 0, 0, coeff[7] );
else
ScaleFilter( resample_filter[id],
distort_image->columns*2, 0, 0, coeff[7] );
/* now finish mapping radius/angle to source x,y coords */
s.x = s.x*coeff[6] + (double)image->columns/2.0 + image->page.x;
s.y = (s.y-coeff[1])*coeff[7] + image->page.y;
break;
}
case DePolarDistortion:
{ /* @D Polar to Carteasain */
/* ignore all destination virtual offsets */
d.x = ((double)i+0.5)*output_scaling*coeff[6]+coeff[4];
d.y = ((double)j+0.5)*output_scaling*coeff[7]+coeff[1];
s.x = d.y*sin(d.x) + coeff[2];
s.y = d.y*cos(d.x) + coeff[3];
/* derivatives are usless - better to use SuperSampling */
break;
}
case Cylinder2PlaneDistortion:
{ /* 3D Cylinder to Tangential Plane */
double ax, cx;
/* relative to center of distortion */
d.x -= coeff[4]; d.y -= coeff[5];
d.x /= coeff[1]; /* x' = x/r */
ax=atan(d.x); /* aa = atan(x/r) = u/r */
cx=cos(ax); /* cx = cos(atan(x/r)) = 1/sqrt(x^2+u^2) */
s.x = coeff[1]*ax; /* u = r*atan(x/r) */
s.y = d.y*cx; /* v = y*cos(u/r) */
/* derivatives... (see personnal notes) */
ScaleFilter( resample_filter[id],
1.0/(1.0+d.x*d.x), 0.0, -d.x*s.y*cx*cx/coeff[1], s.y/d.y );
#if 0
if ( i == 0 && j == 0 ) {
fprintf(stderr, "x=%lf y=%lf u=%lf v=%lf\n", d.x*coeff[1], d.y, s.x, s.y);
fprintf(stderr, "phi = %lf\n", (double)(ax * 180.0/MagickPI) );
fprintf(stderr, "du/dx=%lf du/dx=%lf dv/dx=%lf dv/dy=%lf\n",
1.0/(1.0+d.x*d.x), 0.0, -d.x*s.y*cx*cx/coeff[1], s.y/d.y );
fflush(stderr); }
#endif
/* add center of distortion in source */
s.x += coeff[2]; s.y += coeff[3];
break;
}
case Plane2CylinderDistortion:
{ /* 3D Cylinder to Tangential Plane */
/* relative to center of distortion */
d.x -= coeff[4]; d.y -= coeff[5];
/* is pixel valid - horizon of a infinite Virtual-Pixel Plane
* (see Anthony Thyssen's personal note) */
validity = (double) ((coeff[1]*MagickPI2 - fabs(d.x))/output_scaling + 0.5);
if ( validity > 0.0 ) {
double cx,tx;
d.x /= coeff[1]; /* x'= x/r */
cx = 1/cos(d.x); /* cx = 1/cos(x/r) */
tx = tan(d.x); /* tx = tan(x/r) */
s.x = coeff[1]*tx; /* u = r * tan(x/r) */
s.y = d.y*cx; /* v = y / cos(x/r) */
/* derivatives... (see Anthony Thyssen's personal notes) */
ScaleFilter( resample_filter[id],
cx*cx, 0.0, s.y*cx/coeff[1], cx );
#if 1
/*if ( i == 0 && j == 0 ) {*/
if ( d.x == 0.5 && d.y == 0.5 ) {
fprintf(stderr, "x=%lf y=%lf u=%lf v=%lf\n", d.x*coeff[1], d.y, s.x, s.y);
fprintf(stderr, "radius = %lf phi = %lf validity = %lf\n",
coeff[1], (double)(d.x * 180.0/MagickPI), validity );
fprintf(stderr, "du/dx=%lf du/dx=%lf dv/dx=%lf dv/dy=%lf\n",
cx*cx, 0.0, s.y*cx/coeff[1], cx);
fflush(stderr); }
#endif
}
/* add center of distortion in source */
s.x += coeff[2]; s.y += coeff[3];
break;
}
case BarrelDistortion:
case BarrelInverseDistortion:
{ /* Lens Barrel Distionion Correction */
double r,fx,fy,gx,gy;
/* Radial Polynomial Distortion (de-normalized) */
d.x -= coeff[8];
d.y -= coeff[9];
r = sqrt(d.x*d.x+d.y*d.y);
if ( r > MagickEpsilon ) {
fx = ((coeff[0]*r + coeff[1])*r + coeff[2])*r + coeff[3];
fy = ((coeff[4]*r + coeff[5])*r + coeff[6])*r + coeff[7];
gx = ((3*coeff[0]*r + 2*coeff[1])*r + coeff[2])/r;
gy = ((3*coeff[4]*r + 2*coeff[5])*r + coeff[6])/r;
/* adjust functions and scaling for 'inverse' form */
if ( method == BarrelInverseDistortion ) {
fx = 1/fx; fy = 1/fy;
gx *= -fx*fx; gy *= -fy*fy;
}
/* Set the source pixel to lookup and EWA derivative vectors */
s.x = d.x*fx + coeff[8];
s.y = d.y*fy + coeff[9];
ScaleFilter( resample_filter[id],
gx*d.x*d.x + fx, gx*d.x*d.y,
gy*d.x*d.y, gy*d.y*d.y + fy );
}
else {
/* Special handling to avoid divide by zero when r==0
**
** The source and destination pixels match in this case
** which was set at the top of the loop using s = d;
** otherwise... s.x=coeff[8]; s.y=coeff[9];
*/
if ( method == BarrelDistortion )
ScaleFilter( resample_filter[id],
coeff[3], 0, 0, coeff[7] );
else /* method == BarrelInverseDistortion */
/* FUTURE, trap for D==0 causing division by zero */
ScaleFilter( resample_filter[id],
1.0/coeff[3], 0, 0, 1.0/coeff[7] );
}
break;
}
case ShepardsDistortion:
{ /* Shepards Method, or Inverse Weighted Distance for
displacement around the destination image control points
The input arguments are the coefficents to the function.
This is more of a 'displacement' function rather than an
absolute distortion function.
Note: We can not determine derivatives using shepards method
so only a point sample interpolatation can be used.
*/
size_t
i;
double
denominator;
denominator = s.x = s.y = 0;
for(i=0; i<number_arguments; i+=4) {
double weight =
((double)d.x-arguments[i+2])*((double)d.x-arguments[i+2])
+ ((double)d.y-arguments[i+3])*((double)d.y-arguments[i+3]);
weight = pow(weight,coeff[0]); /* shepards power factor */
weight = ( weight < 1.0 ) ? 1.0 : 1.0/weight;
s.x += (arguments[ i ]-arguments[i+2])*weight;
s.y += (arguments[i+1]-arguments[i+3])*weight;
denominator += weight;
}
s.x /= denominator;
s.y /= denominator;
s.x += d.x; /* make it as relative displacement */
s.y += d.y;
break;
}
default:
break; /* use the default no-op given above */
}
/* map virtual canvas location back to real image coordinate */
if ( bestfit && method != ArcDistortion ) {
s.x -= image->page.x;
s.y -= image->page.y;
}
s.x -= 0.5;
s.y -= 0.5;
if ( validity <= 0.0 ) {
/* result of distortion is an invalid pixel - don't resample */
SetPixelPacket(distort_image,&invalid,q,indexes);
}
else {
/* resample the source image to find its correct color */
(void) ResamplePixelColor(resample_filter[id],s.x,s.y,&pixel);
/* if validity between 0.0 and 1.0 mix result with invalid pixel */
if ( validity < 1.0 ) {
/* Do a blend of sample color and invalid pixel */
/* should this be a 'Blend', or an 'Over' compose */
MagickPixelCompositeBlend(&pixel,validity,&invalid,(1.0-validity),
&pixel);
}
SetPixelPacket(distort_image,&pixel,q,indexes);
}
q++;
indexes++;
}
sync=SyncCacheViewAuthenticPixels(distort_view,exception);
if (sync == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp critical (MagickCore_DistortImage)
#endif
proceed=SetImageProgress(image,DistortImageTag,progress++,
image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
distort_view=DestroyCacheView(distort_view);
resample_filter=DestroyResampleFilterThreadSet(resample_filter);
if (status == MagickFalse)
distort_image=DestroyImage(distort_image);
}
/* Arc does not return an offset unless 'bestfit' is in effect
And the user has not provided an overriding 'viewport'.
*/
if ( method == ArcDistortion && !bestfit && !viewport_given ) {
distort_image->page.x = 0;
distort_image->page.y = 0;
}
coeff=(double *) RelinquishMagickMemory(coeff);
return(distort_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% R o t a t e I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% RotateImage() creates a new image that is a rotated copy of an existing
% one. Positive angles rotate counter-clockwise (right-hand rule), while
% negative angles rotate clockwise. Rotated images are usually larger than
% the originals and have 'empty' triangular corners. X axis. Empty
% triangles left over from shearing the image are filled with the background
% color defined by member 'background_color' of the image. RotateImage
% allocates the memory necessary for the new Image structure and returns a
% pointer to the new image.
%
% The format of the RotateImage method is:
%
% Image *RotateImage(const Image *image,const double degrees,
% ExceptionInfo *exception)
%
% A description of each parameter follows.
%
% o image: the image.
%
% o degrees: Specifies the number of degrees to rotate the image.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *RotateImage(const Image *image,const double degrees,
ExceptionInfo *exception)
{
Image
*distort_image,
*rotate_image;
MagickRealType
angle;
PointInfo
shear;
size_t
rotations;
/*
Adjust rotation angle.
*/
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
angle=fmod(degrees,360.0);
while (angle < -45.0)
angle+=360.0;
for (rotations=0; angle > 45.0; rotations++)
angle-=90.0;
rotations%=4;
shear.x=(-tan((double) DegreesToRadians(angle)/2.0));
shear.y=sin((double) DegreesToRadians(angle));
if ((fabs(shear.x) < MagickEpsilon) && (fabs(shear.y) < MagickEpsilon))
return(IntegralRotateImage(image,rotations,exception));
distort_image=CloneImage(image,0,0,MagickTrue,exception);
if (distort_image == (Image *) NULL)
return((Image *) NULL);
(void) SetImageVirtualPixelMethod(distort_image,BackgroundVirtualPixelMethod);
rotate_image=DistortImage(distort_image,ScaleRotateTranslateDistortion,1,
°rees,MagickTrue,exception);
distort_image=DestroyImage(distort_image);
return(rotate_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% S p a r s e C o l o r I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% SparseColorImage(), given a set of coordinates, interpolates the colors
% found at those coordinates, across the whole image, using various methods.
%
% The format of the SparseColorImage() method is:
%
% Image *SparseColorImage(const Image *image,const ChannelType channel,
% const SparseColorMethod method,const size_t number_arguments,
% const double *arguments,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image to be filled in.
%
% o channel: Specify which color values (in RGBKA sequence) are being set.
% This also determines the number of color_values in above.
%
% o method: the method to fill in the gradient between the control points.
%
% The methods used for SparseColor() are often simular to methods
% used for DistortImage(), and even share the same code for determination
% of the function coefficents, though with more dimensions (or resulting
% values).
%
% o number_arguments: the number of arguments given.
%
% o arguments: array of floating point arguments for this method--
% x,y,color_values-- with color_values given as normalized values.
%
% o exception: return any errors or warnings in this structure
%
*/
MagickExport Image *SparseColorImage(const Image *image,
const ChannelType channel,const SparseColorMethod method,
const size_t number_arguments,const double *arguments,
ExceptionInfo *exception)
{
#define SparseColorTag "Distort/SparseColor"
SparseColorMethod
sparse_method;
double
*coeff;
Image
*sparse_image;
size_t
number_colors;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
/* Determine number of color values needed per control point */
number_colors=0;
if ( channel & RedChannel ) number_colors++;
if ( channel & GreenChannel ) number_colors++;
if ( channel & BlueChannel ) number_colors++;
if ( channel & IndexChannel ) number_colors++;
if ( channel & OpacityChannel ) number_colors++;
/*
Convert input arguments into mapping coefficients, this this case
we are mapping (distorting) colors, rather than coordinates.
*/
{ DistortImageMethod
distort_method;
distort_method=(DistortImageMethod) method;
if ( distort_method >= SentinelDistortion )
distort_method = ShepardsDistortion; /* Pretend to be Shepards */
coeff = GenerateCoefficients(image, &distort_method, number_arguments,
arguments, number_colors, exception);
if ( coeff == (double *) NULL )
return((Image *) NULL);
/*
Note some Distort Methods may fall back to other simpler methods,
Currently the only fallback of concern is Bilinear to Affine
(Barycentric), which is alaso sparse_colr method. This also ensures
correct two and one color Barycentric handling.
*/
sparse_method = (SparseColorMethod) distort_method;
if ( distort_method == ShepardsDistortion )
sparse_method = method; /* return non-distort methods to normal */
if ( sparse_method == InverseColorInterpolate )
coeff[0]=0.5; /* sqrt() the squared distance for inverse */
}
/* Verbose output */
if ( GetImageArtifact(image,"verbose") != (const char *) NULL ) {
switch (sparse_method) {
case BarycentricColorInterpolate:
{
register ssize_t x=0;
(void) FormatLocaleFile(stderr, "Barycentric Sparse Color:\n");
if ( channel & RedChannel )
(void) FormatLocaleFile(stderr, " -channel R -fx '%+lf*i %+lf*j %+lf' \\\n",
coeff[x], coeff[x+1], coeff[x+2]),x+=3;
if ( channel & GreenChannel )
(void) FormatLocaleFile(stderr, " -channel G -fx '%+lf*i %+lf*j %+lf' \\\n",
coeff[x], coeff[x+1], coeff[x+2]),x+=3;
if ( channel & BlueChannel )
(void) FormatLocaleFile(stderr, " -channel B -fx '%+lf*i %+lf*j %+lf' \\\n",
coeff[x], coeff[x+1], coeff[x+2]),x+=3;
if ( channel & IndexChannel )
(void) FormatLocaleFile(stderr, " -channel K -fx '%+lf*i %+lf*j %+lf' \\\n",
coeff[x], coeff[x+1], coeff[x+2]),x+=3;
if ( channel & OpacityChannel )
(void) FormatLocaleFile(stderr, " -channel A -fx '%+lf*i %+lf*j %+lf' \\\n",
coeff[x], coeff[x+1], coeff[x+2]),x+=3;
break;
}
case BilinearColorInterpolate:
{
register ssize_t x=0;
(void) FormatLocaleFile(stderr, "Bilinear Sparse Color\n");
if ( channel & RedChannel )
(void) FormatLocaleFile(stderr, " -channel R -fx '%+lf*i %+lf*j %+lf*i*j %+lf;\n",
coeff[ x ], coeff[x+1],
coeff[x+2], coeff[x+3]),x+=4;
if ( channel & GreenChannel )
(void) FormatLocaleFile(stderr, " -channel G -fx '%+lf*i %+lf*j %+lf*i*j %+lf;\n",
coeff[ x ], coeff[x+1],
coeff[x+2], coeff[x+3]),x+=4;
if ( channel & BlueChannel )
(void) FormatLocaleFile(stderr, " -channel B -fx '%+lf*i %+lf*j %+lf*i*j %+lf;\n",
coeff[ x ], coeff[x+1],
coeff[x+2], coeff[x+3]),x+=4;
if ( channel & IndexChannel )
(void) FormatLocaleFile(stderr, " -channel K -fx '%+lf*i %+lf*j %+lf*i*j %+lf;\n",
coeff[ x ], coeff[x+1],
coeff[x+2], coeff[x+3]),x+=4;
if ( channel & OpacityChannel )
(void) FormatLocaleFile(stderr, " -channel A -fx '%+lf*i %+lf*j %+lf*i*j %+lf;\n",
coeff[ x ], coeff[x+1],
coeff[x+2], coeff[x+3]),x+=4;
break;
}
default:
/* sparse color method is too complex for FX emulation */
break;
}
}
/* Generate new image for generated interpolated gradient.
* ASIDE: Actually we could have just replaced the colors of the original
* image, but IM Core policy, is if storage class could change then clone
* the image.
*/
sparse_image=CloneImage(image,0,0,MagickTrue,exception);
if (sparse_image == (Image *) NULL)
return((Image *) NULL);
if (SetImageStorageClass(sparse_image,DirectClass) == MagickFalse)
{ /* if image is ColorMapped - change it to DirectClass */
InheritException(exception,&image->exception);
sparse_image=DestroyImage(sparse_image);
return((Image *) NULL);
}
{ /* ----- MAIN CODE ----- */
CacheView
*sparse_view;
MagickBooleanType
status;
MagickOffsetType
progress;
ssize_t
j;
status=MagickTrue;
progress=0;
sparse_view=AcquireAuthenticCacheView(sparse_image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(progress,status) \
magick_number_threads(image,sparse_image,sparse_image->rows,1)
#endif
for (j=0; j < (ssize_t) sparse_image->rows; j++)
{
MagickBooleanType
sync;
MagickPixelPacket
pixel; /* pixel to assign to distorted image */
register IndexPacket
*magick_restrict indexes;
register ssize_t
i;
register PixelPacket
*magick_restrict q;
q=GetCacheViewAuthenticPixels(sparse_view,0,j,sparse_image->columns,
1,exception);
if (q == (PixelPacket *) NULL)
{
status=MagickFalse;
continue;
}
indexes=GetCacheViewAuthenticIndexQueue(sparse_view);
GetMagickPixelPacket(sparse_image,&pixel);
for (i=0; i < (ssize_t) image->columns; i++)
{
SetMagickPixelPacket(image,q,indexes,&pixel);
switch (sparse_method)
{
case BarycentricColorInterpolate:
{
register ssize_t x=0;
if ( channel & RedChannel )
pixel.red = coeff[x]*i +coeff[x+1]*j
+coeff[x+2], x+=3;
if ( channel & GreenChannel )
pixel.green = coeff[x]*i +coeff[x+1]*j
+coeff[x+2], x+=3;
if ( channel & BlueChannel )
pixel.blue = coeff[x]*i +coeff[x+1]*j
+coeff[x+2], x+=3;
if ( channel & IndexChannel )
pixel.index = coeff[x]*i +coeff[x+1]*j
+coeff[x+2], x+=3;
if ( channel & OpacityChannel )
pixel.opacity = coeff[x]*i +coeff[x+1]*j
+coeff[x+2], x+=3;
break;
}
case BilinearColorInterpolate:
{
register ssize_t x=0;
if ( channel & RedChannel )
pixel.red = coeff[x]*i + coeff[x+1]*j +
coeff[x+2]*i*j + coeff[x+3], x+=4;
if ( channel & GreenChannel )
pixel.green = coeff[x]*i + coeff[x+1]*j +
coeff[x+2]*i*j + coeff[x+3], x+=4;
if ( channel & BlueChannel )
pixel.blue = coeff[x]*i + coeff[x+1]*j +
coeff[x+2]*i*j + coeff[x+3], x+=4;
if ( channel & IndexChannel )
pixel.index = coeff[x]*i + coeff[x+1]*j +
coeff[x+2]*i*j + coeff[x+3], x+=4;
if ( channel & OpacityChannel )
pixel.opacity = coeff[x]*i + coeff[x+1]*j +
coeff[x+2]*i*j + coeff[x+3], x+=4;
break;
}
case InverseColorInterpolate:
case ShepardsColorInterpolate:
{ /* Inverse (Squared) Distance weights average (IDW) */
size_t
k;
double
denominator;
if ( channel & RedChannel ) pixel.red = 0.0;
if ( channel & GreenChannel ) pixel.green = 0.0;
if ( channel & BlueChannel ) pixel.blue = 0.0;
if ( channel & IndexChannel ) pixel.index = 0.0;
if ( channel & OpacityChannel ) pixel.opacity = 0.0;
denominator = 0.0;
for(k=0; k<number_arguments; k+=2+number_colors) {
register ssize_t x=(ssize_t) k+2;
double weight =
((double)i-arguments[ k ])*((double)i-arguments[ k ])
+ ((double)j-arguments[k+1])*((double)j-arguments[k+1]);
weight = pow(weight,coeff[0]); /* inverse of power factor */
weight = ( weight < 1.0 ) ? 1.0 : 1.0/weight;
if ( channel & RedChannel )
pixel.red += arguments[x++]*weight;
if ( channel & GreenChannel )
pixel.green += arguments[x++]*weight;
if ( channel & BlueChannel )
pixel.blue += arguments[x++]*weight;
if ( channel & IndexChannel )
pixel.index += arguments[x++]*weight;
if ( channel & OpacityChannel )
pixel.opacity += arguments[x++]*weight;
denominator += weight;
}
if ( channel & RedChannel ) pixel.red /= denominator;
if ( channel & GreenChannel ) pixel.green /= denominator;
if ( channel & BlueChannel ) pixel.blue /= denominator;
if ( channel & IndexChannel ) pixel.index /= denominator;
if ( channel & OpacityChannel ) pixel.opacity /= denominator;
break;
}
case ManhattanColorInterpolate:
{
size_t
k;
double
minimum = MagickMaximumValue;
/*
Just use the closest control point you can find!
*/
for(k=0; k<number_arguments; k+=2+number_colors) {
double distance =
fabs((double)i-arguments[ k ])
+ fabs((double)j-arguments[k+1]);
if ( distance < minimum ) {
register ssize_t x=(ssize_t) k+2;
if ( channel & RedChannel ) pixel.red = arguments[x++];
if ( channel & GreenChannel ) pixel.green = arguments[x++];
if ( channel & BlueChannel ) pixel.blue = arguments[x++];
if ( channel & IndexChannel ) pixel.index = arguments[x++];
if ( channel & OpacityChannel ) pixel.opacity = arguments[x++];
minimum = distance;
}
}
break;
}
case VoronoiColorInterpolate:
default:
{
size_t
k;
double
minimum = MagickMaximumValue;
/*
Just use the closest control point you can find!
*/
for(k=0; k<number_arguments; k+=2+number_colors) {
double distance =
((double)i-arguments[ k ])*((double)i-arguments[ k ])
+ ((double)j-arguments[k+1])*((double)j-arguments[k+1]);
if ( distance < minimum ) {
register ssize_t x=(ssize_t) k+2;
if ( channel & RedChannel ) pixel.red = arguments[x++];
if ( channel & GreenChannel ) pixel.green = arguments[x++];
if ( channel & BlueChannel ) pixel.blue = arguments[x++];
if ( channel & IndexChannel ) pixel.index = arguments[x++];
if ( channel & OpacityChannel ) pixel.opacity = arguments[x++];
minimum = distance;
}
}
break;
}
}
/* set the color directly back into the source image */
if ( channel & RedChannel )
pixel.red=ClampPixel(QuantumRange*pixel.red);
if ( channel & GreenChannel )
pixel.green=ClampPixel(QuantumRange*pixel.green);
if ( channel & BlueChannel )
pixel.blue=ClampPixel(QuantumRange*pixel.blue);
if ( channel & IndexChannel )
pixel.index=ClampPixel(QuantumRange*pixel.index);
if ( channel & OpacityChannel )
pixel.opacity=ClampPixel(QuantumRange*pixel.opacity);
SetPixelPacket(sparse_image,&pixel,q,indexes);
q++;
indexes++;
}
sync=SyncCacheViewAuthenticPixels(sparse_view,exception);
if (sync == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp critical (MagickCore_SparseColorImage)
#endif
proceed=SetImageProgress(image,SparseColorTag,progress++,image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
sparse_view=DestroyCacheView(sparse_view);
if (status == MagickFalse)
sparse_image=DestroyImage(sparse_image);
}
coeff = (double *) RelinquishMagickMemory(coeff);
return(sparse_image);
}
|
schur_eliminator_impl.h | // Ceres Solver - A fast non-linear least squares minimizer
// Copyright 2010, 2011, 2012 Google Inc. All rights reserved.
// http://code.google.com/p/ceres-solver/
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are met:
//
// * Redistributions of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
// * Neither the name of Google Inc. nor the names of its contributors may be
// used to endorse or promote products derived from this software without
// specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
// POSSIBILITY OF SUCH DAMAGE.
//
// Author: sameeragarwal@google.com (Sameer Agarwal)
//
// TODO(sameeragarwal): row_block_counter can perhaps be replaced by
// Chunk::start ?
#ifndef CERES_INTERNAL_SCHUR_ELIMINATOR_IMPL_H_
#define CERES_INTERNAL_SCHUR_ELIMINATOR_IMPL_H_
// Eigen has an internal threshold switching between different matrix
// multiplication algorithms. In particular for matrices larger than
// EIGEN_CACHEFRIENDLY_PRODUCT_THRESHOLD it uses a cache friendly
// matrix matrix product algorithm that has a higher setup cost. For
// matrix sizes close to this threshold, especially when the matrices
// are thin and long, the default choice may not be optimal. This is
// the case for us, as the default choice causes a 30% performance
// regression when we moved from Eigen2 to Eigen3.
#define EIGEN_CACHEFRIENDLY_PRODUCT_THRESHOLD 10
#ifdef CERES_USE_OPENMP
#include <omp.h>
#endif
#include <algorithm>
#include <map>
#include "ceres/block_random_access_matrix.h"
#include "ceres/block_sparse_matrix.h"
#include "ceres/block_structure.h"
#include "ceres/internal/eigen.h"
#include "ceres/internal/fixed_array.h"
#include "ceres/internal/scoped_ptr.h"
#include "ceres/map_util.h"
#include "ceres/schur_eliminator.h"
#include "ceres/small_blas.h"
#include "ceres/stl_util.h"
#include "Eigen/Dense"
#include "glog/logging.h"
namespace ceres {
namespace internal {
template <int kRowBlockSize, int kEBlockSize, int kFBlockSize>
SchurEliminator<kRowBlockSize, kEBlockSize, kFBlockSize>::~SchurEliminator() {
STLDeleteElements(&rhs_locks_);
}
template <int kRowBlockSize, int kEBlockSize, int kFBlockSize>
void
SchurEliminator<kRowBlockSize, kEBlockSize, kFBlockSize>::
Init(int num_eliminate_blocks, const CompressedRowBlockStructure* bs) {
CHECK_GT(num_eliminate_blocks, 0)
<< "SchurComplementSolver cannot be initialized with "
<< "num_eliminate_blocks = 0.";
num_eliminate_blocks_ = num_eliminate_blocks;
const int num_col_blocks = bs->cols.size();
const int num_row_blocks = bs->rows.size();
buffer_size_ = 1;
chunks_.clear();
lhs_row_layout_.clear();
int lhs_num_rows = 0;
// Add a map object for each block in the reduced linear system
// and build the row/column block structure of the reduced linear
// system.
lhs_row_layout_.resize(num_col_blocks - num_eliminate_blocks_);
for (int i = num_eliminate_blocks_; i < num_col_blocks; ++i) {
lhs_row_layout_[i - num_eliminate_blocks_] = lhs_num_rows;
lhs_num_rows += bs->cols[i].size;
}
int r = 0;
// Iterate over the row blocks of A, and detect the chunks. The
// matrix should already have been ordered so that all rows
// containing the same y block are vertically contiguous. Along
// the way also compute the amount of space each chunk will need
// to perform the elimination.
while (r < num_row_blocks) {
const int chunk_block_id = bs->rows[r].cells.front().block_id;
if (chunk_block_id >= num_eliminate_blocks_) {
break;
}
chunks_.push_back(Chunk());
Chunk& chunk = chunks_.back();
chunk.size = 0;
chunk.start = r;
int buffer_size = 0;
const int e_block_size = bs->cols[chunk_block_id].size;
// Add to the chunk until the first block in the row is
// different than the one in the first row for the chunk.
while (r + chunk.size < num_row_blocks) {
const CompressedRow& row = bs->rows[r + chunk.size];
if (row.cells.front().block_id != chunk_block_id) {
break;
}
// Iterate over the blocks in the row, ignoring the first
// block since it is the one to be eliminated.
for (int c = 1; c < row.cells.size(); ++c) {
const Cell& cell = row.cells[c];
if (InsertIfNotPresent(
&(chunk.buffer_layout), cell.block_id, buffer_size)) {
buffer_size += e_block_size * bs->cols[cell.block_id].size;
}
}
buffer_size_ = max(buffer_size, buffer_size_);
++chunk.size;
}
CHECK_GT(chunk.size, 0);
r += chunk.size;
}
const Chunk& chunk = chunks_.back();
uneliminated_row_begins_ = chunk.start + chunk.size;
if (num_threads_ > 1) {
random_shuffle(chunks_.begin(), chunks_.end());
}
buffer_.reset(new double[buffer_size_ * num_threads_]);
// chunk_outer_product_buffer_ only needs to store e_block_size *
// f_block_size, which is always less than buffer_size_, so we just
// allocate buffer_size_ per thread.
chunk_outer_product_buffer_.reset(new double[buffer_size_ * num_threads_]);
STLDeleteElements(&rhs_locks_);
rhs_locks_.resize(num_col_blocks - num_eliminate_blocks_);
for (int i = 0; i < num_col_blocks - num_eliminate_blocks_; ++i) {
rhs_locks_[i] = new Mutex;
}
}
template <int kRowBlockSize, int kEBlockSize, int kFBlockSize>
void
SchurEliminator<kRowBlockSize, kEBlockSize, kFBlockSize>::
Eliminate(const BlockSparseMatrix* A,
const double* b,
const double* D,
BlockRandomAccessMatrix* lhs,
double* rhs) {
if (lhs->num_rows() > 0) {
lhs->SetZero();
VectorRef(rhs, lhs->num_rows()).setZero();
}
const CompressedRowBlockStructure* bs = A->block_structure();
const int num_col_blocks = bs->cols.size();
// Add the diagonal to the schur complement.
if (D != NULL) {
#pragma omp parallel for num_threads(num_threads_) schedule(dynamic)
for (int i = num_eliminate_blocks_; i < num_col_blocks; ++i) {
const int block_id = i - num_eliminate_blocks_;
int r, c, row_stride, col_stride;
CellInfo* cell_info = lhs->GetCell(block_id, block_id,
&r, &c,
&row_stride, &col_stride);
if (cell_info != NULL) {
const int block_size = bs->cols[i].size;
typename EigenTypes<kFBlockSize>::ConstVectorRef
diag(D + bs->cols[i].position, block_size);
CeresMutexLock l(&cell_info->m);
MatrixRef m(cell_info->values, row_stride, col_stride);
m.block(r, c, block_size, block_size).diagonal()
+= diag.array().square().matrix();
}
}
}
// Eliminate y blocks one chunk at a time. For each chunk,x3
// compute the entries of the normal equations and the gradient
// vector block corresponding to the y block and then apply
// Gaussian elimination to them. The matrix ete stores the normal
// matrix corresponding to the block being eliminated and array
// buffer_ contains the non-zero blocks in the row corresponding
// to this y block in the normal equations. This computation is
// done in ChunkDiagonalBlockAndGradient. UpdateRhs then applies
// gaussian elimination to the rhs of the normal equations,
// updating the rhs of the reduced linear system by modifying rhs
// blocks for all the z blocks that share a row block/residual
// term with the y block. EliminateRowOuterProduct does the
// corresponding operation for the lhs of the reduced linear
// system.
#pragma omp parallel for num_threads(num_threads_) schedule(dynamic)
for (int i = 0; i < chunks_.size(); ++i) {
#ifdef CERES_USE_OPENMP
int thread_id = omp_get_thread_num();
#else
int thread_id = 0;
#endif
double* buffer = buffer_.get() + thread_id * buffer_size_;
const Chunk& chunk = chunks_[i];
const int e_block_id = bs->rows[chunk.start].cells.front().block_id;
const int e_block_size = bs->cols[e_block_id].size;
VectorRef(buffer, buffer_size_).setZero();
typename EigenTypes<kEBlockSize, kEBlockSize>::Matrix
ete(e_block_size, e_block_size);
if (D != NULL) {
const typename EigenTypes<kEBlockSize>::ConstVectorRef
diag(D + bs->cols[e_block_id].position, e_block_size);
ete = diag.array().square().matrix().asDiagonal();
} else {
ete.setZero();
}
FixedArray<double, 8> g(e_block_size);
typename EigenTypes<kEBlockSize>::VectorRef gref(g.get(), e_block_size);
gref.setZero();
// We are going to be computing
//
// S += F'F - F'E(E'E)^{-1}E'F
//
// for each Chunk. The computation is broken down into a number of
// function calls as below.
// Compute the outer product of the e_blocks with themselves (ete
// = E'E). Compute the product of the e_blocks with the
// corresonding f_blocks (buffer = E'F), the gradient of the terms
// in this chunk (g) and add the outer product of the f_blocks to
// Schur complement (S += F'F).
ChunkDiagonalBlockAndGradient(
chunk, A, b, chunk.start, &ete, g.get(), buffer, lhs);
// Normally one wouldn't compute the inverse explicitly, but
// e_block_size will typically be a small number like 3, in
// which case its much faster to compute the inverse once and
// use it to multiply other matrices/vectors instead of doing a
// Solve call over and over again.
typename EigenTypes<kEBlockSize, kEBlockSize>::Matrix inverse_ete =
ete
.template selfadjointView<Eigen::Upper>()
.llt()
.solve(Matrix::Identity(e_block_size, e_block_size));
// For the current chunk compute and update the rhs of the reduced
// linear system.
//
// rhs = F'b - F'E(E'E)^(-1) E'b
FixedArray<double, 8> inverse_ete_g(e_block_size);
MatrixVectorMultiply<kEBlockSize, kEBlockSize, 0>(
inverse_ete.data(),
e_block_size,
e_block_size,
g.get(),
inverse_ete_g.get());
UpdateRhs(chunk, A, b, chunk.start, inverse_ete_g.get(), rhs);
// S -= F'E(E'E)^{-1}E'F
ChunkOuterProduct(bs, inverse_ete, buffer, chunk.buffer_layout, lhs);
}
// For rows with no e_blocks, the schur complement update reduces to
// S += F'F.
NoEBlockRowsUpdate(A, b, uneliminated_row_begins_, lhs, rhs);
}
template <int kRowBlockSize, int kEBlockSize, int kFBlockSize>
void
SchurEliminator<kRowBlockSize, kEBlockSize, kFBlockSize>::
BackSubstitute(const BlockSparseMatrix* A,
const double* b,
const double* D,
const double* z,
double* y) {
const CompressedRowBlockStructure* bs = A->block_structure();
#pragma omp parallel for num_threads(num_threads_) schedule(dynamic)
for (int i = 0; i < chunks_.size(); ++i) {
const Chunk& chunk = chunks_[i];
const int e_block_id = bs->rows[chunk.start].cells.front().block_id;
const int e_block_size = bs->cols[e_block_id].size;
double* y_ptr = y + bs->cols[e_block_id].position;
typename EigenTypes<kEBlockSize>::VectorRef y_block(y_ptr, e_block_size);
typename EigenTypes<kEBlockSize, kEBlockSize>::Matrix
ete(e_block_size, e_block_size);
if (D != NULL) {
const typename EigenTypes<kEBlockSize>::ConstVectorRef
diag(D + bs->cols[e_block_id].position, e_block_size);
ete = diag.array().square().matrix().asDiagonal();
} else {
ete.setZero();
}
const double* values = A->values();
for (int j = 0; j < chunk.size; ++j) {
const CompressedRow& row = bs->rows[chunk.start + j];
const Cell& e_cell = row.cells.front();
DCHECK_EQ(e_block_id, e_cell.block_id);
FixedArray<double, 8> sj(row.block.size);
typename EigenTypes<kRowBlockSize>::VectorRef(sj.get(), row.block.size) =
typename EigenTypes<kRowBlockSize>::ConstVectorRef
(b + bs->rows[chunk.start + j].block.position, row.block.size);
for (int c = 1; c < row.cells.size(); ++c) {
const int f_block_id = row.cells[c].block_id;
const int f_block_size = bs->cols[f_block_id].size;
const int r_block = f_block_id - num_eliminate_blocks_;
MatrixVectorMultiply<kRowBlockSize, kFBlockSize, -1>(
values + row.cells[c].position, row.block.size, f_block_size,
z + lhs_row_layout_[r_block],
sj.get());
}
MatrixTransposeVectorMultiply<kRowBlockSize, kEBlockSize, 1>(
values + e_cell.position, row.block.size, e_block_size,
sj.get(),
y_ptr);
MatrixTransposeMatrixMultiply
<kRowBlockSize, kEBlockSize, kRowBlockSize, kEBlockSize, 1>(
values + e_cell.position, row.block.size, e_block_size,
values + e_cell.position, row.block.size, e_block_size,
ete.data(), 0, 0, e_block_size, e_block_size);
}
ete.llt().solveInPlace(y_block);
}
}
// Update the rhs of the reduced linear system. Compute
//
// F'b - F'E(E'E)^(-1) E'b
template <int kRowBlockSize, int kEBlockSize, int kFBlockSize>
void
SchurEliminator<kRowBlockSize, kEBlockSize, kFBlockSize>::
UpdateRhs(const Chunk& chunk,
const BlockSparseMatrix* A,
const double* b,
int row_block_counter,
const double* inverse_ete_g,
double* rhs) {
const CompressedRowBlockStructure* bs = A->block_structure();
const int e_block_id = bs->rows[chunk.start].cells.front().block_id;
const int e_block_size = bs->cols[e_block_id].size;
int b_pos = bs->rows[row_block_counter].block.position;
const double* values = A->values();
for (int j = 0; j < chunk.size; ++j) {
const CompressedRow& row = bs->rows[row_block_counter + j];
const Cell& e_cell = row.cells.front();
typename EigenTypes<kRowBlockSize>::Vector sj =
typename EigenTypes<kRowBlockSize>::ConstVectorRef
(b + b_pos, row.block.size);
MatrixVectorMultiply<kRowBlockSize, kEBlockSize, -1>(
values + e_cell.position, row.block.size, e_block_size,
inverse_ete_g, sj.data());
for (int c = 1; c < row.cells.size(); ++c) {
const int block_id = row.cells[c].block_id;
const int block_size = bs->cols[block_id].size;
const int block = block_id - num_eliminate_blocks_;
CeresMutexLock l(rhs_locks_[block]);
MatrixTransposeVectorMultiply<kRowBlockSize, kFBlockSize, 1>(
values + row.cells[c].position,
row.block.size, block_size,
sj.data(), rhs + lhs_row_layout_[block]);
}
b_pos += row.block.size;
}
}
// Given a Chunk - set of rows with the same e_block, e.g. in the
// following Chunk with two rows.
//
// E F
// [ y11 0 0 0 | z11 0 0 0 z51]
// [ y12 0 0 0 | z12 z22 0 0 0]
//
// this function computes twp matrices. The diagonal block matrix
//
// ete = y11 * y11' + y12 * y12'
//
// and the off diagonal blocks in the Guass Newton Hessian.
//
// buffer = [y11'(z11 + z12), y12' * z22, y11' * z51]
//
// which are zero compressed versions of the block sparse matrices E'E
// and E'F.
//
// and the gradient of the e_block, E'b.
template <int kRowBlockSize, int kEBlockSize, int kFBlockSize>
void
SchurEliminator<kRowBlockSize, kEBlockSize, kFBlockSize>::
ChunkDiagonalBlockAndGradient(
const Chunk& chunk,
const BlockSparseMatrix* A,
const double* b,
int row_block_counter,
typename EigenTypes<kEBlockSize, kEBlockSize>::Matrix* ete,
double* g,
double* buffer,
BlockRandomAccessMatrix* lhs) {
const CompressedRowBlockStructure* bs = A->block_structure();
int b_pos = bs->rows[row_block_counter].block.position;
const int e_block_size = ete->rows();
// Iterate over the rows in this chunk, for each row, compute the
// contribution of its F blocks to the Schur complement, the
// contribution of its E block to the matrix EE' (ete), and the
// corresponding block in the gradient vector.
const double* values = A->values();
for (int j = 0; j < chunk.size; ++j) {
const CompressedRow& row = bs->rows[row_block_counter + j];
if (row.cells.size() > 1) {
EBlockRowOuterProduct(A, row_block_counter + j, lhs);
}
// Extract the e_block, ETE += E_i' E_i
const Cell& e_cell = row.cells.front();
MatrixTransposeMatrixMultiply
<kRowBlockSize, kEBlockSize, kRowBlockSize, kEBlockSize, 1>(
values + e_cell.position, row.block.size, e_block_size,
values + e_cell.position, row.block.size, e_block_size,
ete->data(), 0, 0, e_block_size, e_block_size);
// g += E_i' b_i
MatrixTransposeVectorMultiply<kRowBlockSize, kEBlockSize, 1>(
values + e_cell.position, row.block.size, e_block_size,
b + b_pos,
g);
// buffer = E'F. This computation is done by iterating over the
// f_blocks for each row in the chunk.
for (int c = 1; c < row.cells.size(); ++c) {
const int f_block_id = row.cells[c].block_id;
const int f_block_size = bs->cols[f_block_id].size;
double* buffer_ptr =
buffer + FindOrDie(chunk.buffer_layout, f_block_id);
MatrixTransposeMatrixMultiply
<kRowBlockSize, kEBlockSize, kRowBlockSize, kFBlockSize, 1>(
values + e_cell.position, row.block.size, e_block_size,
values + row.cells[c].position, row.block.size, f_block_size,
buffer_ptr, 0, 0, e_block_size, f_block_size);
}
b_pos += row.block.size;
}
}
// Compute the outer product F'E(E'E)^{-1}E'F and subtract it from the
// Schur complement matrix, i.e
//
// S -= F'E(E'E)^{-1}E'F.
template <int kRowBlockSize, int kEBlockSize, int kFBlockSize>
void
SchurEliminator<kRowBlockSize, kEBlockSize, kFBlockSize>::
ChunkOuterProduct(const CompressedRowBlockStructure* bs,
const Matrix& inverse_ete,
const double* buffer,
const BufferLayoutType& buffer_layout,
BlockRandomAccessMatrix* lhs) {
// This is the most computationally expensive part of this
// code. Profiling experiments reveal that the bottleneck is not the
// computation of the right-hand matrix product, but memory
// references to the left hand side.
const int e_block_size = inverse_ete.rows();
BufferLayoutType::const_iterator it1 = buffer_layout.begin();
#ifdef CERES_USE_OPENMP
int thread_id = omp_get_thread_num();
#else
int thread_id = 0;
#endif
double* b1_transpose_inverse_ete =
chunk_outer_product_buffer_.get() + thread_id * buffer_size_;
// S(i,j) -= bi' * ete^{-1} b_j
for (; it1 != buffer_layout.end(); ++it1) {
const int block1 = it1->first - num_eliminate_blocks_;
const int block1_size = bs->cols[it1->first].size;
MatrixTransposeMatrixMultiply
<kEBlockSize, kFBlockSize, kEBlockSize, kEBlockSize, 0>(
buffer + it1->second, e_block_size, block1_size,
inverse_ete.data(), e_block_size, e_block_size,
b1_transpose_inverse_ete, 0, 0, block1_size, e_block_size);
BufferLayoutType::const_iterator it2 = it1;
for (; it2 != buffer_layout.end(); ++it2) {
const int block2 = it2->first - num_eliminate_blocks_;
int r, c, row_stride, col_stride;
CellInfo* cell_info = lhs->GetCell(block1, block2,
&r, &c,
&row_stride, &col_stride);
if (cell_info != NULL) {
const int block2_size = bs->cols[it2->first].size;
CeresMutexLock l(&cell_info->m);
MatrixMatrixMultiply
<kFBlockSize, kEBlockSize, kEBlockSize, kFBlockSize, -1>(
b1_transpose_inverse_ete, block1_size, e_block_size,
buffer + it2->second, e_block_size, block2_size,
cell_info->values, r, c, row_stride, col_stride);
}
}
}
}
// For rows with no e_blocks, the schur complement update reduces to S
// += F'F. This function iterates over the rows of A with no e_block,
// and calls NoEBlockRowOuterProduct on each row.
template <int kRowBlockSize, int kEBlockSize, int kFBlockSize>
void
SchurEliminator<kRowBlockSize, kEBlockSize, kFBlockSize>::
NoEBlockRowsUpdate(const BlockSparseMatrix* A,
const double* b,
int row_block_counter,
BlockRandomAccessMatrix* lhs,
double* rhs) {
const CompressedRowBlockStructure* bs = A->block_structure();
const double* values = A->values();
for (; row_block_counter < bs->rows.size(); ++row_block_counter) {
const CompressedRow& row = bs->rows[row_block_counter];
for (int c = 0; c < row.cells.size(); ++c) {
const int block_id = row.cells[c].block_id;
const int block_size = bs->cols[block_id].size;
const int block = block_id - num_eliminate_blocks_;
MatrixTransposeVectorMultiply<Eigen::Dynamic, Eigen::Dynamic, 1>(
values + row.cells[c].position, row.block.size, block_size,
b + row.block.position,
rhs + lhs_row_layout_[block]);
}
NoEBlockRowOuterProduct(A, row_block_counter, lhs);
}
}
// A row r of A, which has no e_blocks gets added to the Schur
// Complement as S += r r'. This function is responsible for computing
// the contribution of a single row r to the Schur complement. It is
// very similar in structure to EBlockRowOuterProduct except for
// one difference. It does not use any of the template
// parameters. This is because the algorithm used for detecting the
// static structure of the matrix A only pays attention to rows with
// e_blocks. This is becase rows without e_blocks are rare and
// typically arise from regularization terms in the original
// optimization problem, and have a very different structure than the
// rows with e_blocks. Including them in the static structure
// detection will lead to most template parameters being set to
// dynamic. Since the number of rows without e_blocks is small, the
// lack of templating is not an issue.
template <int kRowBlockSize, int kEBlockSize, int kFBlockSize>
void
SchurEliminator<kRowBlockSize, kEBlockSize, kFBlockSize>::
NoEBlockRowOuterProduct(const BlockSparseMatrix* A,
int row_block_index,
BlockRandomAccessMatrix* lhs) {
const CompressedRowBlockStructure* bs = A->block_structure();
const CompressedRow& row = bs->rows[row_block_index];
const double* values = A->values();
for (int i = 0; i < row.cells.size(); ++i) {
const int block1 = row.cells[i].block_id - num_eliminate_blocks_;
DCHECK_GE(block1, 0);
const int block1_size = bs->cols[row.cells[i].block_id].size;
int r, c, row_stride, col_stride;
CellInfo* cell_info = lhs->GetCell(block1, block1,
&r, &c,
&row_stride, &col_stride);
if (cell_info != NULL) {
CeresMutexLock l(&cell_info->m);
// This multiply currently ignores the fact that this is a
// symmetric outer product.
MatrixTransposeMatrixMultiply
<Eigen::Dynamic, Eigen::Dynamic, Eigen::Dynamic, Eigen::Dynamic, 1>(
values + row.cells[i].position, row.block.size, block1_size,
values + row.cells[i].position, row.block.size, block1_size,
cell_info->values, r, c, row_stride, col_stride);
}
for (int j = i + 1; j < row.cells.size(); ++j) {
const int block2 = row.cells[j].block_id - num_eliminate_blocks_;
DCHECK_GE(block2, 0);
DCHECK_LT(block1, block2);
int r, c, row_stride, col_stride;
CellInfo* cell_info = lhs->GetCell(block1, block2,
&r, &c,
&row_stride, &col_stride);
if (cell_info != NULL) {
const int block2_size = bs->cols[row.cells[j].block_id].size;
CeresMutexLock l(&cell_info->m);
MatrixTransposeMatrixMultiply
<Eigen::Dynamic, Eigen::Dynamic, Eigen::Dynamic, Eigen::Dynamic, 1>(
values + row.cells[i].position, row.block.size, block1_size,
values + row.cells[j].position, row.block.size, block2_size,
cell_info->values, r, c, row_stride, col_stride);
}
}
}
}
// For a row with an e_block, compute the contribition S += F'F. This
// function has the same structure as NoEBlockRowOuterProduct, except
// that this function uses the template parameters.
template <int kRowBlockSize, int kEBlockSize, int kFBlockSize>
void
SchurEliminator<kRowBlockSize, kEBlockSize, kFBlockSize>::
EBlockRowOuterProduct(const BlockSparseMatrix* A,
int row_block_index,
BlockRandomAccessMatrix* lhs) {
const CompressedRowBlockStructure* bs = A->block_structure();
const CompressedRow& row = bs->rows[row_block_index];
const double* values = A->values();
for (int i = 1; i < row.cells.size(); ++i) {
const int block1 = row.cells[i].block_id - num_eliminate_blocks_;
DCHECK_GE(block1, 0);
const int block1_size = bs->cols[row.cells[i].block_id].size;
int r, c, row_stride, col_stride;
CellInfo* cell_info = lhs->GetCell(block1, block1,
&r, &c,
&row_stride, &col_stride);
if (cell_info != NULL) {
CeresMutexLock l(&cell_info->m);
// block += b1.transpose() * b1;
MatrixTransposeMatrixMultiply
<kRowBlockSize, kFBlockSize, kRowBlockSize, kFBlockSize, 1>(
values + row.cells[i].position, row.block.size, block1_size,
values + row.cells[i].position, row.block.size, block1_size,
cell_info->values, r, c, row_stride, col_stride);
}
for (int j = i + 1; j < row.cells.size(); ++j) {
const int block2 = row.cells[j].block_id - num_eliminate_blocks_;
DCHECK_GE(block2, 0);
DCHECK_LT(block1, block2);
const int block2_size = bs->cols[row.cells[j].block_id].size;
int r, c, row_stride, col_stride;
CellInfo* cell_info = lhs->GetCell(block1, block2,
&r, &c,
&row_stride, &col_stride);
if (cell_info != NULL) {
// block += b1.transpose() * b2;
CeresMutexLock l(&cell_info->m);
MatrixTransposeMatrixMultiply
<kRowBlockSize, kFBlockSize, kRowBlockSize, kFBlockSize, 1>(
values + row.cells[i].position, row.block.size, block1_size,
values + row.cells[j].position, row.block.size, block2_size,
cell_info->values, r, c, row_stride, col_stride);
}
}
}
}
} // namespace internal
} // namespace ceres
#endif // CERES_INTERNAL_SCHUR_ELIMINATOR_IMPL_H_
|
GB_binop__ne_fp64.c |
//------------------------------------------------------------------------------
// GB_binop: hard-coded functions for each built-in binary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2022, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated2/ folder, do not edit it
// (it is auto-generated from Generator/*).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_emult.h"
#include "GB_control.h"
#include "GB_ek_slice.h"
#include "GB_dense.h"
#include "GB_atomics.h"
#include "GB_bitmap_assign_methods.h"
#include "GB_binop__include.h"
// C=binop(A,B) is defined by the following types and operators:
// A+B function (eWiseAdd): GB (_AaddB__ne_fp64)
// A.*B function (eWiseMult): GB (_AemultB_08__ne_fp64)
// A.*B function (eWiseMult): GB (_AemultB_02__ne_fp64)
// A.*B function (eWiseMult): GB (_AemultB_04__ne_fp64)
// A.*B function (eWiseMult): GB (_AemultB_bitmap__ne_fp64)
// A*D function (colscale): GB (_AxD__ne_fp64)
// D*A function (rowscale): GB (_DxB__ne_fp64)
// C+=B function (dense accum): GB (_Cdense_accumB__ne_fp64)
// C+=b function (dense accum): GB (_Cdense_accumb__ne_fp64)
// C+=A+B function (dense ewise3): GB ((none))
// C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__ne_fp64)
// C=scalar+B GB (_bind1st__ne_fp64)
// C=scalar+B' GB (_bind1st_tran__ne_fp64)
// C=A+scalar GB (_bind2nd__ne_fp64)
// C=A'+scalar GB (_bind2nd_tran__ne_fp64)
// C type: bool
// A type: double
// A pattern? 0
// B type: double
// B pattern? 0
// BinaryOp: cij = (aij != bij)
#define GB_ATYPE \
double
#define GB_BTYPE \
double
#define GB_CTYPE \
bool
// true if the types of A and B are identical
#define GB_ATYPE_IS_BTYPE \
1
// true if the types of C and A are identical
#define GB_CTYPE_IS_ATYPE \
0
// true if the types of C and B are identical
#define GB_CTYPE_IS_BTYPE \
0
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA,A_iso) \
double aij = GBX (Ax, pA, A_iso)
// true if values of A are not used
#define GB_A_IS_PATTERN \
0 \
// bij = Bx [pB]
#define GB_GETB(bij,Bx,pB,B_iso) \
double bij = GBX (Bx, pB, B_iso)
// true if values of B are not used
#define GB_B_IS_PATTERN \
0 \
// declare scalar of the same type as C
#define GB_CTYPE_SCALAR(t) \
bool t
// cij = Ax [pA]
#define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \
cij = GBX (Ax, pA, A_iso)
// cij = Bx [pB]
#define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \
cij = GBX (Bx, pB, B_iso)
#define GB_CX(p) Cx [p]
// binary operator
#define GB_BINOP(z,x,y,i,j) \
z = (x != y) ;
// true if the binop must be flipped
#define GB_BINOP_FLIP \
0
// op is second
#define GB_OP_IS_SECOND \
0
// do the numerical phases of GB_add and GB_emult
#define GB_PHASE_2_OF_2
// hard-coded loops can be vectorized
#define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_NE || GxB_NO_FP64 || GxB_NO_NE_FP64)
//------------------------------------------------------------------------------
// C += A+B, all 3 matrices dense
//------------------------------------------------------------------------------
#if 0
// The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV.
void GB ((none))
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_accum_template.c"
}
#endif
//------------------------------------------------------------------------------
// C = A+B, all 3 matrices dense
//------------------------------------------------------------------------------
void GB (_Cdense_ewise3_noaccum__ne_fp64)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_noaccum_template.c"
}
//------------------------------------------------------------------------------
// C += B, accumulate a sparse matrix into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumB__ne_fp64)
(
GrB_Matrix C,
const GrB_Matrix B,
const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#if 0
{
#include "GB_dense_subassign_23_template.c"
}
#endif
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += b, accumulate a scalar into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumb__ne_fp64)
(
GrB_Matrix C,
const GB_void *p_bwork,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#if 0
{
// get the scalar b for C += b, of type double
double bwork = (*((double *) p_bwork)) ;
#include "GB_dense_subassign_22_template.c"
return (GrB_SUCCESS) ;
}
#endif
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = A*D, column scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_AxD__ne_fp64)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix D,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
bool *restrict Cx = (bool *) C->x ;
#include "GB_AxB_colscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = D*B, row scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_DxB__ne_fp64)
(
GrB_Matrix C,
const GrB_Matrix D,
const GrB_Matrix B,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
bool *restrict Cx = (bool *) C->x ;
#include "GB_AxB_rowscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseAdd: C=A+B, C<M>=A+B, C<!M>=A+B
//------------------------------------------------------------------------------
GrB_Info GB (_AaddB__ne_fp64)
(
GrB_Matrix C,
const int C_sparsity,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool is_eWiseUnion,
const GB_void *alpha_scalar_in,
const GB_void *beta_scalar_in,
const bool Ch_is_Mh,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
GB_WERK_DECLARE (M_ek_slicing, int64_t) ;
GB_WERK_DECLARE (A_ek_slicing, int64_t) ;
GB_WERK_DECLARE (B_ek_slicing, int64_t) ;
double alpha_scalar ;
double beta_scalar ;
if (is_eWiseUnion)
{
alpha_scalar = (*((double *) alpha_scalar_in)) ;
beta_scalar = (*((double *) beta_scalar_in )) ;
}
#include "GB_add_template.c"
GB_FREE_WORKSPACE ;
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, or C<M!>=A.*B where C is sparse/hyper
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_08__ne_fp64)
(
GrB_Matrix C,
const int C_sparsity,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_08_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_02__ne_fp64)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool flipxy,
const int64_t *restrict Cp_kfirst,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#if GB_BINOP_FLIP
// The operator is not commutative, and does not have a flipped
// variant. For example z=atan2(y,x).
if (flipxy)
{
// use fmult(y,x)
#undef GB_FLIPPED
#define GB_FLIPPED 1
#include "GB_emult_02_template.c"
}
else
{
// use fmult(x,y)
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
}
#else
// No need to handle the flip: the operator is either commutative, or
// has been handled by changing z=div(y,x) to z=rdiv(x,y) for example.
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
#endif
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_04__ne_fp64)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict Cp_kfirst,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_04_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_bitmap__ne_fp64)
(
GrB_Matrix C,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_bitmap_emult_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st
//------------------------------------------------------------------------------
GrB_Info GB (_bind1st__ne_fp64)
(
GB_void *Cx_output, // Cx and Bx may be aliased
const GB_void *x_input,
const GB_void *Bx_input,
const int8_t *restrict Bb,
int64_t bnz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
bool *Cx = (bool *) Cx_output ;
double x = (*((double *) x_input)) ;
double *Bx = (double *) Bx_input ;
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < bnz ; p++)
{
if (!GBB (Bb, p)) continue ;
double bij = GBX (Bx, p, false) ;
Cx [p] = (x != bij) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd
//------------------------------------------------------------------------------
GrB_Info GB (_bind2nd__ne_fp64)
(
GB_void *Cx_output, // Cx and Ax may be aliased
const GB_void *Ax_input,
const GB_void *y_input,
const int8_t *restrict Ab,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
bool *Cx = (bool *) Cx_output ;
double *Ax = (double *) Ax_input ;
double y = (*((double *) y_input)) ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!GBB (Ab, p)) continue ;
double aij = GBX (Ax, p, false) ;
Cx [p] = (aij != y) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (x, A'): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (x, aij), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
double aij = GBX (Ax, pA, false) ; \
Cx [pC] = (x != aij) ; \
}
GrB_Info GB (_bind1st_tran__ne_fp64)
(
GrB_Matrix C,
const GB_void *x_input,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
// GB_unop_transpose.c uses GB_ATYPE, but A is
// the 2nd input to binary operator z=f(x,y).
#undef GB_ATYPE
#define GB_ATYPE \
double
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
double x = (*((const double *) x_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
#undef GB_ATYPE
#define GB_ATYPE \
double
}
//------------------------------------------------------------------------------
// C = op (A', y): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (aij, y), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
double aij = GBX (Ax, pA, false) ; \
Cx [pC] = (aij != y) ; \
}
GrB_Info GB (_bind2nd_tran__ne_fp64)
(
GrB_Matrix C,
const GrB_Matrix A,
const GB_void *y_input,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
double y = (*((const double *) y_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
ocp_nlp_sqp_rti.c | /*
* Copyright 2019 Gianluca Frison, Dimitris Kouzoupis, Robin Verschueren,
* Andrea Zanelli, Niels van Duijkeren, Jonathan Frey, Tommaso Sartor,
* Branimir Novoselnik, Rien Quirynen, Rezart Qelibari, Dang Doan,
* Jonas Koenemann, Yutao Chen, Tobias Schöls, Jonas Schlagenhauf, Moritz Diehl
*
* This file is part of acados.
*
* The 2-Clause BSD License
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.;
*/
#include "acados/ocp_nlp/ocp_nlp_sqp_rti.h"
// external
#include <assert.h>
#include <math.h>
#include <string.h>
#include <stdio.h>
#include <stdlib.h>
#if defined(ACADOS_WITH_OPENMP)
#include <omp.h>
#endif
// blasfeo
#include "blasfeo/include/blasfeo_d_aux.h"
#include "blasfeo/include/blasfeo_d_aux_ext_dep.h"
#include "blasfeo/include/blasfeo_d_blas.h"
// acados
#include "acados/ocp_nlp/ocp_nlp_common.h"
#include "acados/ocp_nlp/ocp_nlp_dynamics_cont.h"
#include "acados/ocp_nlp/ocp_nlp_reg_common.h"
#include "acados/ocp_qp/ocp_qp_common.h"
#include "acados/utils/mem.h"
#include "acados/utils/print.h"
#include "acados/utils/timing.h"
#include "acados/utils/types.h"
#include "acados_c/ocp_qp_interface.h"
/************************************************
* options
************************************************/
int ocp_nlp_sqp_rti_opts_calculate_size(void *config_, void *dims_)
{
ocp_nlp_dims *dims = dims_;
ocp_nlp_config *config = config_;
int size = 0;
size += sizeof(ocp_nlp_sqp_rti_opts);
size += ocp_nlp_opts_calculate_size(config, dims);
return size;
}
void *ocp_nlp_sqp_rti_opts_assign(void *config_, void *dims_, void *raw_memory)
{
ocp_nlp_dims *dims = dims_;
ocp_nlp_config *config = config_;
char *c_ptr = (char *) raw_memory;
ocp_nlp_sqp_rti_opts *opts = (ocp_nlp_sqp_rti_opts *) c_ptr;
c_ptr += sizeof(ocp_nlp_sqp_rti_opts);
opts->nlp_opts = ocp_nlp_opts_assign(config, dims, c_ptr);
c_ptr += ocp_nlp_opts_calculate_size(config, dims);
assert((char *) raw_memory + ocp_nlp_sqp_rti_opts_calculate_size(config,
dims) >= c_ptr);
return opts;
}
void ocp_nlp_sqp_rti_opts_initialize_default(void *config_,
void *dims_, void *opts_)
{
ocp_nlp_dims *dims = dims_;
ocp_nlp_config *config = config_;
ocp_nlp_sqp_rti_opts *opts = opts_;
ocp_nlp_opts *nlp_opts = opts->nlp_opts;
// ocp_qp_xcond_solver_config *qp_solver = config->qp_solver;
ocp_nlp_dynamics_config **dynamics = config->dynamics;
ocp_nlp_constraints_config **constraints = config->constraints;
int ii;
int N = dims->N;
// this first !!!
ocp_nlp_opts_initialize_default(config, dims, nlp_opts);
// SQP RTI opts
// opts->compute_dual_sol = 1;
opts->ext_qp_res = 0;
opts->warm_start_first_qp = false;
opts->rti_phase = 0;
opts->print_level = 0;
// overwrite default submodules opts
// do not compute adjoint in dynamics and constraints
int compute_adj = 0;
// dynamics
for (ii = 0; ii < N; ii++)
{
dynamics[ii]->opts_set(dynamics[ii],
opts->nlp_opts->dynamics[ii], "compute_adj", &compute_adj);
}
// constraints
for (ii = 0; ii <= N; ii++)
{
constraints[ii]->opts_set(constraints[ii],
opts->nlp_opts->constraints[ii], "compute_adj", &compute_adj);
}
return;
}
void ocp_nlp_sqp_rti_opts_update(void *config_, void *dims_, void *opts_)
{
ocp_nlp_dims *dims = dims_;
ocp_nlp_config *config = config_;
ocp_nlp_sqp_rti_opts *opts = opts_;
ocp_nlp_opts *nlp_opts = opts->nlp_opts;
ocp_nlp_opts_update(config, dims, nlp_opts);
return;
}
void ocp_nlp_sqp_rti_opts_set(void *config_, void *opts_,
const char *field, void* value)
{
ocp_nlp_sqp_rti_opts *opts = (ocp_nlp_sqp_rti_opts *) opts_;
ocp_nlp_config *config = config_;
ocp_nlp_opts *nlp_opts = opts->nlp_opts;
int ii;
char module[MAX_STR_LEN];
char *ptr_module = NULL;
int module_length = 0;
// extract module name
char *char_ = strchr(field, '_');
if (char_!=NULL)
{
module_length = char_-field;
for (ii=0; ii<module_length; ii++)
module[ii] = field[ii];
module[module_length] = '\0'; // add end of string
ptr_module = module;
}
// pass options to QP module
if ( ptr_module!=NULL && (!strcmp(ptr_module, "qp")) )
{
// config->qp_solver->opts_set(config->qp_solver,
// opts->qp_solver_opts, field+module_length+1, value);
ocp_nlp_opts_set(config, nlp_opts, field, value);
if (!strcmp(field, "qp_warm_start"))
{
int* i_ptr = (int *) value;
opts->qp_warm_start = *i_ptr;
}
}
else // nlp opts
{
if (!strcmp(field, "ext_qp_res"))
{
int* ext_qp_res = (int *) value;
opts->ext_qp_res = *ext_qp_res;
}
else if (!strcmp(field, "warm_start_first_qp"))
{
bool* warm_start_first_qp = (bool *) value;
opts->warm_start_first_qp = *warm_start_first_qp;
}
else if (!strcmp(field, "rti_phase"))
{
int* rti_phase = (int *) value;
if (*rti_phase < 0 || *rti_phase > 2) {
printf("\nerror: ocp_nlp_sqp_opts_set: invalid value for rti_phase field.");
printf("possible values are: 0, 1, 2\n");
exit(1);
} else opts->rti_phase = *rti_phase;
}
else if (!strcmp(field, "print_level"))
{
int* print_level = (int *) value;
if (*print_level < 0)
{
printf("\nerror: ocp_nlp_sqp_rti_opts_set: invalid value for print_level field, need int >=0, got %d.", *print_level);
exit(1);
}
opts->print_level = *print_level;
}
else
{
ocp_nlp_opts_set(config, nlp_opts, field, value);
// printf("\nerror: ocp_nlp_sqp_rti_opts_set: wrong field: %s\n",
// field);
// exit(1);
}
}
return;
}
void ocp_nlp_sqp_rti_opts_set_at_stage(void *config_, void *opts_,
int stage, const char *field, void* value)
{
ocp_nlp_config *config = config_;
ocp_nlp_sqp_rti_opts *opts = (ocp_nlp_sqp_rti_opts *) opts_;
ocp_nlp_opts *nlp_opts = opts->nlp_opts;
ocp_nlp_opts_set_at_stage(config, nlp_opts, stage, field, value);
return;
}
/************************************************
* memory
************************************************/
int ocp_nlp_sqp_rti_memory_calculate_size(void *config_,
void *dims_, void *opts_)
{
ocp_nlp_dims *dims = dims_;
ocp_nlp_config *config = config_;
ocp_nlp_sqp_rti_opts *opts = opts_;
ocp_nlp_opts *nlp_opts = opts->nlp_opts;
// ocp_qp_xcond_solver_config *qp_solver = config->qp_solver;
// ocp_nlp_dynamics_config **dynamics = config->dynamics;
// ocp_nlp_cost_config **cost = config->cost;
// ocp_nlp_constraints_config **constraints = config->constraints;
// int N = dims->N;
// int *nx = dims->nx;
// int *nu = dims->nu;
// int *nz = dims->nz;
int size = 0;
size += sizeof(ocp_nlp_sqp_rti_memory);
// nlp mem
size += ocp_nlp_memory_calculate_size(config, dims, nlp_opts);
// stat
int stat_m = 1+1;
int stat_n = 2;
if (opts->ext_qp_res)
stat_n += 4;
size += stat_n*stat_m*sizeof(double);
size += 8; // initial align
make_int_multiple_of(8, &size);
return size;
}
void *ocp_nlp_sqp_rti_memory_assign(void *config_, void *dims_,
void *opts_, void *raw_memory)
{
ocp_nlp_dims *dims = dims_;
ocp_nlp_config *config = config_;
ocp_nlp_sqp_rti_opts *opts = opts_;
ocp_nlp_opts *nlp_opts = opts->nlp_opts;
// ocp_qp_xcond_solver_config *qp_solver = config->qp_solver;
// ocp_nlp_dynamics_config **dynamics = config->dynamics;
// ocp_nlp_cost_config **cost = config->cost;
// ocp_nlp_constraints_config **constraints = config->constraints;
char *c_ptr = (char *) raw_memory;
// int ii;
// int N = dims->N;
// int *nx = dims->nx;
// int *nu = dims->nu;
// int *nz = dims->nz;
// initial align
align_char_to(8, &c_ptr);
ocp_nlp_sqp_rti_memory *mem = (ocp_nlp_sqp_rti_memory *) c_ptr;
c_ptr += sizeof(ocp_nlp_sqp_rti_memory);
// nlp mem
mem->nlp_mem = ocp_nlp_memory_assign(config, dims, nlp_opts, c_ptr);
c_ptr += ocp_nlp_memory_calculate_size(config, dims, nlp_opts);
// stat
mem->stat = (double *) c_ptr;
mem->stat_m = 1+1;
mem->stat_n = 2;
if (opts->ext_qp_res)
mem->stat_n += 4;
c_ptr += mem->stat_m*mem->stat_n*sizeof(double);
mem->status = ACADOS_READY;
assert((char *) raw_memory+ocp_nlp_sqp_rti_memory_calculate_size(
config, dims, opts) >= c_ptr);
return mem;
}
/************************************************
* workspace
************************************************/
int ocp_nlp_sqp_rti_workspace_calculate_size(void *config_,
void *dims_, void *opts_)
{
ocp_nlp_dims *dims = dims_;
ocp_nlp_config *config = config_;
ocp_nlp_sqp_rti_opts *opts = opts_;
ocp_nlp_opts *nlp_opts = opts->nlp_opts;
int size = 0;
// sqp
size += sizeof(ocp_nlp_sqp_rti_workspace);
// nlp
size += ocp_nlp_workspace_calculate_size(config, dims, nlp_opts);
// qp in
size += ocp_qp_in_calculate_size(dims->qp_solver->orig_dims);
// qp out
size += ocp_qp_out_calculate_size(dims->qp_solver->orig_dims);
if (opts->ext_qp_res)
{
// qp res
size += ocp_qp_res_calculate_size(dims->qp_solver->orig_dims);
// qp res ws
size += ocp_qp_res_workspace_calculate_size(dims->qp_solver->orig_dims);
}
return size;
}
static void ocp_nlp_sqp_rti_cast_workspace(
ocp_nlp_config *config, ocp_nlp_dims *dims, ocp_nlp_sqp_rti_opts *opts,
ocp_nlp_sqp_rti_memory *mem, ocp_nlp_sqp_rti_workspace *work)
{
ocp_nlp_opts *nlp_opts = opts->nlp_opts;
ocp_nlp_memory *nlp_mem = mem->nlp_mem;
// sqp
char *c_ptr = (char *) work;
c_ptr += sizeof(ocp_nlp_sqp_rti_workspace);
// nlp
work->nlp_work = ocp_nlp_workspace_assign(
config, dims, nlp_opts, nlp_mem, c_ptr);
c_ptr += ocp_nlp_workspace_calculate_size(config, dims, nlp_opts);
// qp in
work->tmp_qp_in = ocp_qp_in_assign(dims->qp_solver->orig_dims, c_ptr);
c_ptr += ocp_qp_in_calculate_size(dims->qp_solver->orig_dims);
// qp out
work->tmp_qp_out = ocp_qp_out_assign(dims->qp_solver->orig_dims, c_ptr);
c_ptr += ocp_qp_out_calculate_size(dims->qp_solver->orig_dims);
if (opts->ext_qp_res)
{
// qp res
work->qp_res = ocp_qp_res_assign(dims->qp_solver->orig_dims, c_ptr);
c_ptr += ocp_qp_res_calculate_size(dims->qp_solver->orig_dims);
// qp res ws
work->qp_res_ws = ocp_qp_res_workspace_assign(
dims->qp_solver->orig_dims, c_ptr);
c_ptr += ocp_qp_res_workspace_calculate_size(
dims->qp_solver->orig_dims);
}
assert((char *) work + ocp_nlp_sqp_rti_workspace_calculate_size(config,
dims, opts) >= c_ptr);
return;
}
/************************************************
* functions
************************************************/
int ocp_nlp_sqp_rti(void *config_, void *dims_, void *nlp_in_, void *nlp_out_,
void *opts_, void *mem_, void *work_)
{
ocp_nlp_out *nlp_out = nlp_out_;
ocp_nlp_sqp_rti_memory *mem = mem_;
// zero timers
acados_timer timer0;
double total_time = 0.0;
mem->time_tot = 0.0;
ocp_nlp_sqp_rti_opts *nlp_opts = opts_;
int rti_phase = nlp_opts->rti_phase;
acados_tic(&timer0);
switch(rti_phase)
{
// perform preparation and feedback rti_phase
case 0:
ocp_nlp_sqp_rti_preparation_step(
config_, dims_, nlp_in_, nlp_out_, opts_, mem_, work_);
ocp_nlp_sqp_rti_feedback_step(
config_, dims_, nlp_in_, nlp_out_, opts_, mem_, work_);
break;
// perform preparation rti_phase
case 1:
ocp_nlp_sqp_rti_preparation_step(
config_, dims_, nlp_in_, nlp_out_, opts_, mem_, work_);
break;
// perform feedback rti_phase
case 2:
ocp_nlp_sqp_rti_feedback_step(
config_, dims_, nlp_in_, nlp_out_, opts_, mem_, work_);
break;
}
total_time += acados_toc(&timer0);
mem->time_tot = total_time;
nlp_out->total_time = total_time;
return mem->status;
}
void ocp_nlp_sqp_rti_preparation_step(void *config_, void *dims_,
void *nlp_in_, void *nlp_out_, void *opts_, void *mem_, void *work_)
{
acados_timer timer1;
ocp_nlp_dims *dims = dims_;
ocp_nlp_config *config = config_;
ocp_nlp_sqp_rti_opts *opts = opts_;
ocp_nlp_opts *nlp_opts = opts->nlp_opts;
ocp_nlp_sqp_rti_memory *mem = mem_;
ocp_nlp_in *nlp_in = nlp_in_;
ocp_nlp_out *nlp_out = nlp_out_;
ocp_nlp_memory *nlp_mem = mem->nlp_mem;
// ocp_qp_xcond_solver_config *qp_solver = config->qp_solver;
ocp_nlp_sqp_rti_workspace *work = work_;
ocp_nlp_sqp_rti_cast_workspace(config, dims, opts, mem, work);
ocp_nlp_workspace *nlp_work = work->nlp_work;
mem->time_lin = 0.0;
mem->time_reg = 0.0;
int N = dims->N;
int ii;
#if defined(ACADOS_WITH_OPENMP)
// backup number of threads
int num_threads_bkp = omp_get_num_threads();
// set number of threads
omp_set_num_threads(opts->nlp_opts->num_threads);
#pragma omp parallel
{ // beginning of parallel region
#endif
// alias to dynamics_memory
#if defined(ACADOS_WITH_OPENMP)
#pragma omp for nowait
#endif
for (ii = 0; ii < N; ii++)
{
config->dynamics[ii]->memory_set_ux_ptr(
nlp_out->ux+ii, nlp_mem->dynamics[ii]);
config->dynamics[ii]->memory_set_tmp_ux_ptr(
nlp_work->tmp_nlp_out->ux+ii, nlp_mem->dynamics[ii]);
config->dynamics[ii]->memory_set_ux1_ptr(
nlp_out->ux+ii+1, nlp_mem->dynamics[ii]);
config->dynamics[ii]->memory_set_tmp_ux1_ptr(
nlp_work->tmp_nlp_out->ux+ii+1, nlp_mem->dynamics[ii]);
config->dynamics[ii]->memory_set_pi_ptr(
nlp_out->pi+ii, nlp_mem->dynamics[ii]);
config->dynamics[ii]->memory_set_tmp_pi_ptr(
nlp_work->tmp_nlp_out->pi+ii, nlp_mem->dynamics[ii]);
config->dynamics[ii]->memory_set_BAbt_ptr(
nlp_mem->qp_in->BAbt+ii, nlp_mem->dynamics[ii]);
config->dynamics[ii]->memory_set_RSQrq_ptr(
nlp_mem->qp_in->RSQrq+ii, nlp_mem->dynamics[ii]);
config->dynamics[ii]->memory_set_dzduxt_ptr(
nlp_mem->dzduxt+ii, nlp_mem->dynamics[ii]);
config->dynamics[ii]->memory_set_sim_guess_ptr(
nlp_mem->sim_guess+ii, nlp_mem->set_sim_guess+ii,
nlp_mem->dynamics[ii]);
config->dynamics[ii]->memory_set_z_alg_ptr(
nlp_mem->z_alg+ii, nlp_mem->dynamics[ii]);
}
// alias to cost_memory
#if defined(ACADOS_WITH_OPENMP)
#pragma omp for nowait
#endif
for (ii = 0; ii <= N; ii++)
{
config->cost[ii]->memory_set_ux_ptr(
nlp_out->ux+ii, nlp_mem->cost[ii]);
config->cost[ii]->memory_set_tmp_ux_ptr(
nlp_work->tmp_nlp_out->ux+ii, nlp_mem->cost[ii]);
config->cost[ii]->memory_set_z_alg_ptr(
nlp_mem->z_alg+ii, nlp_mem->cost[ii]);
config->cost[ii]->memory_set_dzdux_tran_ptr(
nlp_mem->dzduxt+ii, nlp_mem->cost[ii]);
config->cost[ii]->memory_set_RSQrq_ptr(
nlp_mem->qp_in->RSQrq+ii, nlp_mem->cost[ii]);
config->cost[ii]->memory_set_Z_ptr(
nlp_mem->qp_in->Z+ii, nlp_mem->cost[ii]);
}
// alias to constraints_memory
#if defined(ACADOS_WITH_OPENMP)
#pragma omp for nowait
#endif
for (ii = 0; ii <= N; ii++)
{
config->constraints[ii]->memory_set_ux_ptr(
nlp_out->ux+ii, nlp_mem->constraints[ii]);
config->constraints[ii]->memory_set_tmp_ux_ptr(
nlp_work->tmp_nlp_out->ux+ii, nlp_mem->constraints[ii]);
config->constraints[ii]->memory_set_lam_ptr(
nlp_out->lam+ii, nlp_mem->constraints[ii]);
config->constraints[ii]->memory_set_tmp_lam_ptr(
nlp_work->tmp_nlp_out->lam+ii, nlp_mem->constraints[ii]);
config->constraints[ii]->memory_set_z_alg_ptr(
nlp_mem->z_alg+ii, nlp_mem->constraints[ii]);
config->constraints[ii]->memory_set_dzdux_tran_ptr(
nlp_mem->dzduxt+ii, nlp_mem->constraints[ii]);
config->constraints[ii]->memory_set_DCt_ptr(
nlp_mem->qp_in->DCt+ii, nlp_mem->constraints[ii]);
config->constraints[ii]->memory_set_RSQrq_ptr(
nlp_mem->qp_in->RSQrq+ii, nlp_mem->constraints[ii]);
config->constraints[ii]->memory_set_idxb_ptr(
nlp_mem->qp_in->idxb[ii], nlp_mem->constraints[ii]);
config->constraints[ii]->memory_set_idxs_rev_ptr(
nlp_mem->qp_in->idxs_rev[ii], nlp_mem->constraints[ii]);
config->constraints[ii]->memory_set_idxe_ptr(
nlp_mem->qp_in->idxe[ii], nlp_mem->constraints[ii]);
}
// alias to regularize memory
config->regularize->memory_set_RSQrq_ptr(
dims->regularize, nlp_mem->qp_in->RSQrq, nlp_mem->regularize_mem);
config->regularize->memory_set_rq_ptr(
dims->regularize, nlp_mem->qp_in->rqz, nlp_mem->regularize_mem);
config->regularize->memory_set_BAbt_ptr(
dims->regularize, nlp_mem->qp_in->BAbt, nlp_mem->regularize_mem);
config->regularize->memory_set_b_ptr(
dims->regularize, nlp_mem->qp_in->b, nlp_mem->regularize_mem);
config->regularize->memory_set_idxb_ptr(
dims->regularize, nlp_mem->qp_in->idxb, nlp_mem->regularize_mem);
config->regularize->memory_set_DCt_ptr(
dims->regularize, nlp_mem->qp_in->DCt, nlp_mem->regularize_mem);
config->regularize->memory_set_ux_ptr(
dims->regularize, nlp_mem->qp_out->ux, nlp_mem->regularize_mem);
config->regularize->memory_set_pi_ptr(
dims->regularize, nlp_mem->qp_out->pi, nlp_mem->regularize_mem);
config->regularize->memory_set_lam_ptr(
dims->regularize, nlp_mem->qp_out->lam, nlp_mem->regularize_mem);
// copy sampling times into dynamics model
#if defined(ACADOS_WITH_OPENMP)
#pragma omp for nowait
#endif
// NOTE(oj): this will lead in an error for irk_gnsf, T must be set in precompute;
// -> remove here and make sure precompute is called everywhere (e.g. Python interface).
for (ii = 0; ii < N; ii++)
{
config->dynamics[ii]->model_set(config->dynamics[ii], dims->dynamics[ii],
nlp_in->dynamics[ii], "T", nlp_in->Ts+ii);
}
#if defined(ACADOS_WITH_OPENMP)
} // end of parallel region
#endif
// initialize QP
ocp_nlp_initialize_qp(config, dims, nlp_in, nlp_out,
nlp_opts, nlp_mem, nlp_work);
/* SQP body */
int sqp_iter = 0;
nlp_mem->sqp_iter = &sqp_iter;
// linearizate NLP and update QP matrices
acados_tic(&timer1);
ocp_nlp_approximate_qp_matrices(config, dims, nlp_in,
nlp_out, nlp_opts, nlp_mem, nlp_work);
mem->time_lin += acados_toc(&timer1);
#if defined(ACADOS_WITH_OPENMP)
// restore number of threads
omp_set_num_threads(num_threads_bkp);
#endif
return;
}
void ocp_nlp_sqp_rti_feedback_step(void *config_, void *dims_,
void *nlp_in_, void *nlp_out_, void *opts_, void *mem_, void *work_)
{
acados_timer timer1;
ocp_nlp_dims *dims = dims_;
ocp_nlp_config *config = config_;
ocp_nlp_sqp_rti_opts *opts = opts_;
ocp_nlp_opts *nlp_opts = opts->nlp_opts;
ocp_nlp_sqp_rti_memory *mem = mem_;
ocp_nlp_in *nlp_in = nlp_in_;
ocp_nlp_out *nlp_out = nlp_out_;
ocp_nlp_memory *nlp_mem = mem->nlp_mem;
ocp_qp_xcond_solver_config *qp_solver = config->qp_solver;
ocp_nlp_sqp_rti_workspace *work = work_;
ocp_nlp_sqp_rti_cast_workspace(config, dims, opts, mem, work);
ocp_nlp_workspace *nlp_work = work->nlp_work;
int qp_iter = 0;
int qp_status = 0;
double tmp_time;
mem->time_qp_sol = 0.0;
mem->time_qp_solver_call = 0.0;
mem->time_qp_xcond = 0.0;
// embed initial value (this actually updates all bounds at stage 0...)
ocp_nlp_embed_initial_value(config, dims, nlp_in,
nlp_out, nlp_opts, nlp_mem, nlp_work);
// update QP rhs for SQP (step prim var, abs dual var)
ocp_nlp_approximate_qp_vectors_sqp(config, dims, nlp_in,
nlp_out, nlp_opts, nlp_mem, nlp_work);
// regularize Hessian
acados_tic(&timer1);
config->regularize->regularize_hessian(config->regularize,
dims->regularize, opts->nlp_opts->regularize, nlp_mem->regularize_mem);
mem->time_reg += acados_toc(&timer1);
if (opts->print_level > 0) {
printf("\n------- qp_in --------\n");
print_ocp_qp_in(nlp_mem->qp_in);
}
if (!opts->warm_start_first_qp)
{
int tmp_int = 0;
config->qp_solver->opts_set(config->qp_solver,
opts->nlp_opts->qp_solver_opts, "warm_start", &tmp_int);
}
// solve qp
acados_tic(&timer1);
qp_status = qp_solver->evaluate(qp_solver, dims->qp_solver,
nlp_mem->qp_in, nlp_mem->qp_out, opts->nlp_opts->qp_solver_opts,
nlp_mem->qp_solver_mem, nlp_work->qp_work);
mem->time_qp_sol += acados_toc(&timer1);
qp_solver->memory_get(qp_solver, nlp_mem->qp_solver_mem, "time_qp_solver_call", &tmp_time);
mem->time_qp_solver_call += tmp_time;
qp_solver->memory_get(qp_solver, nlp_mem->qp_solver_mem, "time_qp_xcond", &tmp_time);
mem->time_qp_xcond += tmp_time;
// compute correct dual solution in case of Hessian regularization
acados_tic(&timer1);
config->regularize->correct_dual_sol(config->regularize,
dims->regularize, opts->nlp_opts->regularize, nlp_mem->regularize_mem);
mem->time_reg += acados_toc(&timer1);
// TODO move into QP solver memory ???
qp_info *qp_info_;
ocp_qp_out_get(nlp_mem->qp_out, "qp_info", &qp_info_);
nlp_out->qp_iter = qp_info_->num_iter;
qp_iter = qp_info_->num_iter;
// compute external QP residuals (for debugging)
if (opts->ext_qp_res)
{
ocp_qp_res_compute(nlp_mem->qp_in, nlp_mem->qp_out,
work->qp_res, work->qp_res_ws);
ocp_qp_res_compute_nrm_inf(work->qp_res, mem->stat+(mem->stat_n*1+2));
// printf("\nsqp_iter %d, res %e %e %e %e\n", sqp_iter,
// inf_norm_qp_res[0], inf_norm_qp_res[1],
// inf_norm_qp_res[2], inf_norm_qp_res[3]);
}
// printf("\n------- qp_out (sqp iter %d) ---------\n", sqp_iter);
// print_ocp_qp_out(nlp_mem->qp_out);
// exit(1);
// save statistics
mem->stat[mem->stat_n*1+0] = qp_status;
mem->stat[mem->stat_n*1+1] = qp_iter;
if ((qp_status!=ACADOS_SUCCESS) & (qp_status!=ACADOS_MAXITER))
{
// print_ocp_qp_in(mem->qp_in);
printf("QP solver returned error status %d\n", qp_status);
mem->status = ACADOS_QP_FAILURE;
return;
}
ocp_nlp_update_variables_sqp(config, dims, nlp_in,
nlp_out, nlp_opts, nlp_mem, nlp_work);
// ocp_nlp_dims_print(nlp_out->dims);
// ocp_nlp_out_print(nlp_out);
// exit(1);
// print_ocp_qp_in(mem->qp_in);
mem->status = ACADOS_SUCCESS;
}
int ocp_nlp_sqp_rti_precompute(void *config_, void *dims_, void *nlp_in_,
void *nlp_out_, void *opts_, void *mem_, void *work_)
{
ocp_nlp_dims *dims = dims_;
ocp_nlp_config *config = config_;
ocp_nlp_sqp_rti_opts *opts = opts_;
ocp_nlp_sqp_rti_memory *mem = mem_;
ocp_nlp_in *nlp_in = nlp_in_;
// ocp_nlp_out *nlp_out = nlp_out_;
ocp_nlp_memory *nlp_mem = mem->nlp_mem;
ocp_nlp_sqp_rti_workspace *work = work_;
ocp_nlp_sqp_rti_cast_workspace(config, dims, opts, mem, work);
ocp_nlp_workspace *nlp_work = work->nlp_work;
int N = dims->N;
int status = ACADOS_SUCCESS;
int ii;
// TODO(giaf) flag to enable/disable checks
for (ii = 0; ii <= N; ii++)
{
int module_val;
config->constraints[ii]->dims_get(config->constraints[ii],
dims->constraints[ii], "ns", &module_val);
if (dims->ns[ii] != module_val)
{
printf("ocp_nlp_sqp_rti_precompute: inconsistent dimension ns \
for stage %d with constraint module, got %d, module: %d.",
ii, dims->ns[ii], module_val);
exit(1);
}
}
// precompute
for (ii = 0; ii < N; ii++)
{
// set T
config->dynamics[ii]->model_set(config->dynamics[ii],
dims->dynamics[ii], nlp_in->dynamics[ii], "T", nlp_in->Ts+ii);
// dynamics precompute
status = config->dynamics[ii]->precompute(config->dynamics[ii],
dims->dynamics[ii], nlp_in->dynamics[ii],
opts->nlp_opts->dynamics[ii],
nlp_mem->dynamics[ii],
nlp_work->dynamics[ii]);
if (status != ACADOS_SUCCESS)
return status;
}
return status;
}
void ocp_nlp_sqp_rti_eval_param_sens(void *config_, void *dims_, void *opts_,
void *mem_, void *work_, char *field, int stage, int index,
void *sens_nlp_out_)
{
ocp_nlp_dims *dims = dims_;
ocp_nlp_config *config = config_;
ocp_nlp_sqp_rti_opts *opts = opts_;
ocp_nlp_sqp_rti_memory *mem = mem_;
ocp_nlp_memory *nlp_mem = mem->nlp_mem;
ocp_nlp_out *sens_nlp_out = sens_nlp_out_;
ocp_nlp_sqp_rti_workspace *work = work_;
ocp_nlp_sqp_rti_cast_workspace(config, dims, opts, mem, work);
ocp_nlp_workspace *nlp_work = work->nlp_work;
d_ocp_qp_copy_all(nlp_mem->qp_in, work->tmp_qp_in);
d_ocp_qp_set_rhs_zero(work->tmp_qp_in);
double one = 1.0;
if ((!strcmp("ex", field)) & (stage==0))
{
d_ocp_qp_set_el("lbx", stage, index, &one, work->tmp_qp_in);
d_ocp_qp_set_el("ubx", stage, index, &one, work->tmp_qp_in);
// d_ocp_qp_print(work->tmp_qp_in->dim, work->tmp_qp_in);
config->qp_solver->eval_sens(config->qp_solver, dims->qp_solver,
work->tmp_qp_in, work->tmp_qp_out, opts->nlp_opts->qp_solver_opts,
nlp_mem->qp_solver_mem, nlp_work->qp_work);
// d_ocp_qp_sol_print(work->tmp_qp_out->dim, work->tmp_qp_out);
// exit(1);
/* copy tmp_qp_out into sens_nlp_out */
int i;
int N = dims->N;
int *nv = dims->nv;
int *nx = dims->nx;
// int *nu = dims->nu;
int *ni = dims->ni;
// int *nz = dims->nz;
for (i = 0; i <= N; i++)
{
blasfeo_dveccp(nv[i], work->tmp_qp_out->ux + i, 0,
sens_nlp_out->ux + i, 0);
if (i < N)
blasfeo_dveccp(nx[i + 1], work->tmp_qp_out->pi + i, 0,
sens_nlp_out->pi + i, 0);
blasfeo_dveccp(2 * ni[i], work->tmp_qp_out->lam + i, 0,
sens_nlp_out->lam + i, 0);
blasfeo_dveccp(2 * ni[i], work->tmp_qp_out->t + i, 0,
sens_nlp_out->t + i, 0);
}
}
else
{
printf("\nerror: field %s at stage %d not available in \
ocp_nlp_sqp_rti_eval_param_sens\n", field, stage);
exit(1);
}
return;
}
// TODO rename memory_get ???
void ocp_nlp_sqp_rti_get(void *config_, void *dims_, void *mem_,
const char *field, void *return_value_)
{
ocp_nlp_config *config = config_;
ocp_nlp_dims *dims = dims_;
ocp_nlp_sqp_rti_memory *mem = mem_;
if (!strcmp("sqp_iter", field))
{
int *value = return_value_;
*value = 1;
}
else if (!strcmp("status", field))
{
int *value = return_value_;
*value = mem->status;
}
else if (!strcmp("time_tot", field) || !strcmp("tot_time", field))
{
double *value = return_value_;
*value = mem->time_tot;
}
else if (!strcmp("time_qp_sol", field) || !strcmp("time_qp", field))
{
double *value = return_value_;
*value = mem->time_qp_sol;
}
else if (!strcmp("time_qp_solver", field) || !strcmp("time_qp_solver_call", field))
{
double *value = return_value_;
*value = mem->time_qp_solver_call;
}
else if (!strcmp("time_qp_xcond", field))
{
double *value = return_value_;
*value = mem->time_qp_xcond;
}
else if (!strcmp("time_lin", field))
{
double *value = return_value_;
*value = mem->time_lin;
}
else if (!strcmp("time_reg", field))
{
double *value = return_value_;
*value = mem->time_reg;
}
else if (!strcmp("time_sim", field) || !strcmp("time_sim_ad", field) || !strcmp("time_sim_la", field))
{
double tmp = 0.0;
double *ptr = return_value_;
int N = dims->N;
int ii;
for (ii=0; ii<N; ii++)
{
config->dynamics[ii]->memory_get(config->dynamics[ii], dims->dynamics[ii], mem->nlp_mem->dynamics[ii], field, &tmp);
*ptr += tmp;
}
}
else if (!strcmp("stat", field))
{
double **value = return_value_;
*value = mem->stat;
}
else if (!strcmp("statistics", field))
{
int n_row = 2;
double *value = return_value_;
for (int ii=0; ii<n_row; ii++)
{
value[ii+0] = ii;
for (int jj=0; jj<mem->stat_n; jj++)
value[ii+(jj+1)*n_row] = mem->stat[jj+ii*mem->stat_n];
}
}
else if (!strcmp("stat_m", field))
{
int *value = return_value_;
*value = mem->stat_m;
}
else if (!strcmp("stat_n", field))
{
int *value = return_value_;
*value = mem->stat_n;
}
else if (!strcmp("nlp_mem", field))
{
void **value = return_value_;
*value = mem->nlp_mem;
}
else if (!strcmp("qp_xcond_dims", field))
{
void **value = return_value_;
*value = dims->qp_solver->xcond_dims;
}
else if (!strcmp("qp_xcond_in", field))
{
void **value = return_value_;
*value = mem->nlp_mem->qp_solver_mem->xcond_qp_in;
}
else if (!strcmp("qp_xcond_out", field))
{
void **value = return_value_;
*value = mem->nlp_mem->qp_solver_mem->xcond_qp_out;
}
else if (!strcmp("qp_in", field))
{
void **value = return_value_;
*value = mem->nlp_mem->qp_in;
}
else if (!strcmp("qp_out", field))
{
void **value = return_value_;
*value = mem->nlp_mem->qp_out;
}
else if (!strcmp("qp_iter", field))
{
config->qp_solver->memory_get(config->qp_solver,
mem->nlp_mem->qp_solver_mem, "iter", return_value_);
}
else
{
printf("\nerror: output type %s not available in \
ocp_nlp_sqp_rti module\n", field);
exit(1);
}
}
void ocp_nlp_sqp_rti_config_initialize_default(void *config_)
{
ocp_nlp_config *config = (ocp_nlp_config *) config_;
config->opts_calculate_size = &ocp_nlp_sqp_rti_opts_calculate_size;
config->opts_assign = &ocp_nlp_sqp_rti_opts_assign;
config->opts_initialize_default = &ocp_nlp_sqp_rti_opts_initialize_default;
config->opts_update = &ocp_nlp_sqp_rti_opts_update;
config->opts_set = &ocp_nlp_sqp_rti_opts_set;
config->opts_set_at_stage = &ocp_nlp_sqp_rti_opts_set_at_stage;
config->memory_calculate_size = &ocp_nlp_sqp_rti_memory_calculate_size;
config->memory_assign = &ocp_nlp_sqp_rti_memory_assign;
config->workspace_calculate_size = &ocp_nlp_sqp_rti_workspace_calculate_size;
config->evaluate = &ocp_nlp_sqp_rti;
config->eval_param_sens = &ocp_nlp_sqp_rti_eval_param_sens;
config->config_initialize_default = &ocp_nlp_sqp_rti_config_initialize_default;
config->precompute = &ocp_nlp_sqp_rti_precompute;
config->get = &ocp_nlp_sqp_rti_get;
return;
}
|
GB_binop__eq_uint16.c | //------------------------------------------------------------------------------
// GB_binop: hard-coded functions for each built-in binary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2020, All Rights Reserved.
// http://suitesparse.com See GraphBLAS/Doc/License.txt for license.
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_ek_slice.h"
#include "GB_dense.h"
#include "GB_mkl.h"
#include "GB_binop__include.h"
// C=binop(A,B) is defined by the following types and operators:
// A+B function (eWiseAdd): GB_AaddB__eq_uint16
// A.*B function (eWiseMult): GB_AemultB__eq_uint16
// A*D function (colscale): GB_AxD__eq_uint16
// D*A function (rowscale): GB_DxB__eq_uint16
// C+=B function (dense accum): GB_Cdense_accumB__eq_uint16
// C+=b function (dense accum): GB_Cdense_accumb__eq_uint16
// C+=A+B function (dense ewise3): (none)
// C=A+B function (dense ewise3): GB_Cdense_ewise3_noaccum__eq_uint16
// C=scalar+B GB_bind1st__eq_uint16
// C=scalar+B' GB_bind1st_tran__eq_uint16
// C=A+scalar GB_bind2nd__eq_uint16
// C=A'+scalar GB_bind2nd_tran__eq_uint16
// C type: bool
// A type: uint16_t
// B,b type: uint16_t
// BinaryOp: cij = (aij == bij)
#define GB_ATYPE \
uint16_t
#define GB_BTYPE \
uint16_t
#define GB_CTYPE \
bool
// true if the types of A and B are identical
#define GB_ATYPE_IS_BTYPE \
1
// true if the types of C and A are identical
#define GB_CTYPE_IS_ATYPE \
0
// true if the types of C and B are identical
#define GB_CTYPE_IS_BTYPE \
0
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
uint16_t aij = Ax [pA]
// bij = Bx [pB]
#define GB_GETB(bij,Bx,pB) \
uint16_t bij = Bx [pB]
// declare scalar of the same type as C
#define GB_CTYPE_SCALAR(t) \
bool t
// cij = Ax [pA]
#define GB_COPY_A_TO_C(cij,Ax,pA) \
cij = Ax [pA]
// cij = Bx [pB]
#define GB_COPY_B_TO_C(cij,Bx,pB) \
cij = Bx [pB]
#define GB_CX(p) Cx [p]
// binary operator
#define GB_BINOP(z, x, y) \
z = (x == y) ;
// op is second
#define GB_OP_IS_SECOND \
0
// op is plus_fp32 or plus_fp64
#define GB_OP_IS_PLUS_REAL \
0
// op is minus_fp32 or minus_fp64
#define GB_OP_IS_MINUS_REAL \
0
// GB_cblas_*axpy gateway routine, if it exists for this operator and type:
#define GB_CBLAS_AXPY \
(none)
// do the numerical phases of GB_add and GB_emult
#define GB_PHASE_2_OF_2
// hard-coded loops can be vectorized
#define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_EQ || GxB_NO_UINT16 || GxB_NO_EQ_UINT16)
//------------------------------------------------------------------------------
// C += A+B, all 3 matrices dense
//------------------------------------------------------------------------------
#if 0
// The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV.
void (none)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_accum_template.c"
}
#endif
//------------------------------------------------------------------------------
// C = A+B, all 3 matrices dense
//------------------------------------------------------------------------------
GrB_Info GB_Cdense_ewise3_noaccum__eq_uint16
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_dense_ewise3_noaccum_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += B, accumulate a sparse matrix into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB_Cdense_accumB__eq_uint16
(
GrB_Matrix C,
const GrB_Matrix B,
const int64_t *GB_RESTRICT kfirst_slice,
const int64_t *GB_RESTRICT klast_slice,
const int64_t *GB_RESTRICT pstart_slice,
const int ntasks,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#if 0
{
#include "GB_dense_subassign_23_template.c"
}
#endif
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += b, accumulate a scalar into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB_Cdense_accumb__eq_uint16
(
GrB_Matrix C,
const GB_void *p_bwork,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#if 0
{
// get the scalar b for C += b, of type uint16_t
uint16_t bwork = (*((uint16_t *) p_bwork)) ;
#include "GB_dense_subassign_22_template.c"
return (GrB_SUCCESS) ;
}
#endif
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = A*D, column scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB_AxD__eq_uint16
(
GrB_Matrix C,
const GrB_Matrix A, bool A_is_pattern,
const GrB_Matrix D, bool D_is_pattern,
const int64_t *GB_RESTRICT kfirst_slice,
const int64_t *GB_RESTRICT klast_slice,
const int64_t *GB_RESTRICT pstart_slice,
const int ntasks,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
bool *GB_RESTRICT Cx = (bool *) C->x ;
#include "GB_AxB_colscale_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = D*B, row scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB_DxB__eq_uint16
(
GrB_Matrix C,
const GrB_Matrix D, bool D_is_pattern,
const GrB_Matrix B, bool B_is_pattern,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
bool *GB_RESTRICT Cx = (bool *) C->x ;
#include "GB_AxB_rowscale_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseAdd: C = A+B or C<M> = A+B
//------------------------------------------------------------------------------
GrB_Info GB_AaddB__eq_uint16
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const GrB_Matrix A,
const GrB_Matrix B,
const bool Ch_is_Mh,
const int64_t *GB_RESTRICT C_to_M,
const int64_t *GB_RESTRICT C_to_A,
const int64_t *GB_RESTRICT C_to_B,
const GB_task_struct *GB_RESTRICT TaskList,
const int ntasks,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_add_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C = A.*B or C<M> = A.*B
//------------------------------------------------------------------------------
GrB_Info GB_AemultB__eq_uint16
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *GB_RESTRICT C_to_M,
const int64_t *GB_RESTRICT C_to_A,
const int64_t *GB_RESTRICT C_to_B,
const GB_task_struct *GB_RESTRICT TaskList,
const int ntasks,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st
//------------------------------------------------------------------------------
GrB_Info GB_bind1st__eq_uint16
(
GB_void *Cx_output, // Cx and Bx may be aliased
const GB_void *x_input,
const GB_void *Bx_input,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
bool *Cx = (bool *) Cx_output ;
uint16_t x = (*((uint16_t *) x_input)) ;
uint16_t *Bx = (uint16_t *) Bx_input ;
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
uint16_t bij = Bx [p] ;
Cx [p] = (x == bij) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd
//------------------------------------------------------------------------------
GrB_Info GB_bind2nd__eq_uint16
(
GB_void *Cx_output, // Cx and Ax may be aliased
const GB_void *Ax_input,
const GB_void *y_input,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
bool *Cx = (bool *) Cx_output ;
uint16_t *Ax = (uint16_t *) Ax_input ;
uint16_t y = (*((uint16_t *) y_input)) ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
uint16_t aij = Ax [p] ;
Cx [p] = (aij == y) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (x, A'): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (x, aij), no typcasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
uint16_t aij = Ax [pA] ; \
Cx [pC] = (x == aij) ; \
}
GrB_Info GB_bind1st_tran__eq_uint16
(
GrB_Matrix C,
const GB_void *x_input,
const GrB_Matrix A,
int64_t *GB_RESTRICT *Rowcounts,
GBI_single_iterator Iter,
const int64_t *GB_RESTRICT A_slice,
int naslice
)
{
// GB_unop_transpose.c uses GB_ATYPE, but A is
// the 2nd input to binary operator z=f(x,y).
#undef GB_ATYPE
#define GB_ATYPE \
uint16_t
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint16_t x = (*((const uint16_t *) x_input)) ;
#define GB_PHASE_2_OF_2
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
#undef GB_ATYPE
#define GB_ATYPE \
uint16_t
}
//------------------------------------------------------------------------------
// C = op (A', y): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (aij, y), no typcasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
uint16_t aij = Ax [pA] ; \
Cx [pC] = (aij == y) ; \
}
GrB_Info GB_bind2nd_tran__eq_uint16
(
GrB_Matrix C,
const GrB_Matrix A,
const GB_void *y_input,
int64_t *GB_RESTRICT *Rowcounts,
GBI_single_iterator Iter,
const int64_t *GB_RESTRICT A_slice,
int naslice
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint16_t y = (*((const uint16_t *) y_input)) ;
#define GB_PHASE_2_OF_2
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
bins_dynamic.h | // | / |
// ' / __| _` | __| _ \ __|
// . \ | ( | | ( |\__ `
// _|\_\_| \__,_|\__|\___/ ____/
// Multi-Physics
//
// License: BSD License
// Kratos default license: kratos/license.txt
//
// Main authors: Nelson Lafontaine
// Carlos Roig
//
#if !defined(KRATOS_BINS_DYNAMIC_CONTAINER_H_INCLUDE)
#define KRATOS_BINS_DYNAMIC_CONTAINER_H_INCLUDE
#include <array>
#include <cmath>
#include <algorithm>
#include "tree.h"
namespace Kratos
{
template<
std::size_t TDimension,
class TPointType,
class TContainerType,
class TPointerType = typename TContainerType::value_type,
class TIteratorType = typename TContainerType::iterator,
class TDistanceIteratorType = typename std::vector<double>::iterator,
class TDistanceFunction = Kratos::SearchUtils::SquaredDistanceFunction<TDimension,TPointType>
>
class BinsDynamic : public TreeNode<TDimension,TPointType, TPointerType, TIteratorType, TDistanceIteratorType, typename std::vector<TPointerType>::iterator >
{
public:
/// Pointer definition of BinsDynamic
KRATOS_CLASS_POINTER_DEFINITION(BinsDynamic);
enum { Dimension = TDimension };
typedef TPointType PointType;
typedef TContainerType ContainerType;
typedef TIteratorType IteratorType;
typedef TDistanceIteratorType DistanceIteratorType;
typedef TPointerType PointerType;
typedef TDistanceFunction DistanceFunction;
typedef TreeNode<Dimension,TPointType,TPointerType,TIteratorType,TDistanceIteratorType> TreeNodeType;
typedef typename TreeNodeType::CoordinateType CoordinateType; // double
typedef typename TreeNodeType::SizeType SizeType; // std::size_t
typedef typename TreeNodeType::IndexType IndexType; // std::size_t
typedef Tvector<CoordinateType,Dimension> CoordinateArray;
typedef Tvector<SizeType,Dimension> SizeArray;
typedef Tvector<IndexType,Dimension> IndexArray;
typedef typename TreeNodeType::IteratorIteratorType IteratorIteratorType;
typedef typename TreeNodeType::SearchStructureType SearchStructureType;
// Local Container ( PointPointer Container per Cell )
// can be different to ContainerType
// not always LocalIterator == ContainerType ( if ContainerType = C array )
typedef std::vector<PointerType> LocalContainerType;
typedef typename LocalContainerType::iterator LocalIterator;
// Global Container
typedef Tvector<IndexType,Dimension> CellType;
typedef std::vector<LocalContainerType> CellContainerType;
// typedef typename CellContainerType::iterator CellContainerIterator;
typedef Kratos::SearchUtils::SearchNearestInRange<PointType,PointerType,LocalIterator,DistanceFunction,CoordinateType> SearchNearestInRange;
typedef Kratos::SearchUtils::SearchRadiusInRange<PointType,LocalIterator,DistanceIteratorType,DistanceFunction,SizeType,CoordinateType,IteratorType> SearchRadiusInRange;
typedef Kratos::SearchUtils::SearchBoxInRange<PointType,LocalIterator,SizeType,Dimension,IteratorType> SearchBoxInRange;
typedef std::vector<CoordinateType> CoordinateVectorType;
typedef std::vector<IteratorType> IteratorVectorType;
typedef std::vector<DistanceIteratorType> DistanceIteratorVectorType;
// Legacy typedef ( to preserve compativility in case someone was using this definitions)
typedef LocalContainerType PointVector;
typedef LocalIterator PointIterator;
typedef TreeNodeType LeafType;
public:
//************************************************************************
// constructor 1
BinsDynamic() : mPointBegin(this->NullIterator()), mPointEnd(this->NullIterator()), mNumCells(0)
{};
//************************************************************************
BinsDynamic( IteratorType const& PointBegin, IteratorType const& PointEnd, SizeType BucketSize = 1 )
: mPointBegin(PointBegin), mPointEnd(PointEnd)
{
if(mPointBegin==mPointEnd)
return;
mNumCells = std::distance(mPointBegin,mPointEnd);
CalculateBoundingBox();
CalculateCellSize(mNumCells);
AllocateCellsContainer();
GenerateBins();
}
//************************************************************************
BinsDynamic( IteratorType const& PointBegin, IteratorType const& PointEnd, PointType const& MinPoint, PointType const& MaxPoint, SizeType BucketSize = 1 )
: mPointBegin(PointBegin), mPointEnd(PointEnd)
{
if(mPointBegin==mPointEnd)
return;
mNumCells = std::distance(mPointBegin,mPointEnd);
for(SizeType i = 0 ; i < Dimension ; i++)
{
mMinPoint[i] = MinPoint[i];
mMaxPoint[i] = MaxPoint[i];
}
CalculateCellSize(mNumCells);
AllocateCellsContainer();
GenerateBins();
}
//************************************************************************
BinsDynamic( PointType const& MinPoint, PointType const& MaxPoint, SizeType BucketSize )
: mNumCells(0)
{
for(SizeType i = 0 ; i < Dimension ; i++)
{
mMinPoint[i] = MinPoint[i];
mMaxPoint[i] = MaxPoint[i];
}
AssignCellSize(BucketSize);
AllocateCellsContainer();
}
//************************************************************************
BinsDynamic( IteratorType const& PointBegin, IteratorType const& PointEnd, CoordinateType BoxSize, SizeType BucketSize = 1 )
: mPointBegin(PointBegin), mPointEnd(PointEnd)
{
if(mPointBegin==mPointEnd)
return;
mNumCells = std::distance(mPointBegin,mPointEnd);
CalculateBoundingBox();
AssignCellSize(BoxSize);
AllocateCellsContainer();
GenerateBins();
}
//************************************************************************
// destructor
virtual ~BinsDynamic() { }
//************************************************************************
IteratorType Begin()
{
return mPointBegin;
}
//************************************************************************
IteratorType End()
{
return mPointBegin;
}
//************************************************************************
KRATOS_DEPRECATED CoordinateType CellSize( SizeType const& iDim )
{
return mCellSize[iDim];
}
//************************************************************************
KRATOS_DEPRECATED SizeType NumCell( SizeType const& iDim )
{
return mN[iDim];
}
/**
* @brief Get the Cell Container object
*
* @return CellContainerType& The Cell Container object
*/
CellContainerType& GetCellContainer() {
return mCells;
}
/**
* @brief Get the Divisions object
*
* @return SizeArray& Array containing the number of Cells in each dimension
*/
SizeArray& GetDivisions() {
return mN;
}
/**
* @brief Get the Cell Size object
*
* @return CoordinateArray& Array containing the size of the Cell in each dimension
*/
CoordinateArray& GetCellSize() {
return mCellSize;
}
/**
* @brief Get the Min Point object
*
* @return PointType& Min point of the bins
*/
PointType& GetMinPoint() {
return mMinPoint;
}
/**
* @brief Get the Max Point object
*
* @return PointType& Max point of the bins
*/
PointType& GetMaxPoint() {
return mMaxPoint;
}
//************************************************************************
void CalculateBoundingBox()
{
for(SizeType i = 0 ; i < Dimension ; i++)
{
mMinPoint[i] = (**mPointBegin)[i];
mMaxPoint[i] = (**mPointBegin)[i];
}
for(IteratorType Point = mPointBegin ; Point != mPointEnd ; Point++)
for(SizeType i = 0 ; i < Dimension ; i++)
{
if( (**Point)[i] < mMinPoint[i] ) mMinPoint[i] = (**Point)[i];
if( (**Point)[i] > mMaxPoint[i] ) mMaxPoint[i] = (**Point)[i];
}
}
//************************************************************************
/**
* @brief Calculates the cell size of the bins.
*
* Calculates the cell size of the bins using an average aproximation of the objects in the bins.
*
* @param ApproximatedSize Aproximate number of objects that will be stored in the bins
*/
void CalculateCellSize(std::size_t ApproximatedSize)
{
std::size_t average_number_of_cells = static_cast<std::size_t>(std::pow(static_cast<double>(ApproximatedSize), 1.00 / Dimension));
std::array<double, 3> lengths;
double average_length = 0.00;
for (int i = 0; i < Dimension; i++) {
lengths[i] = mMaxPoint[i] - mMinPoint[i];
average_length += lengths[i];
}
average_length *= 1.00 / 3.00;
if (average_length < std::numeric_limits<double>::epsilon()) {
for(int i = 0; i < Dimension; i++) {
mN[i] = 1;
}
return;
}
for (int i = 0; i < Dimension; i++) {
mN[i] = static_cast<std::size_t>(lengths[i] / average_length * (double)average_number_of_cells) + 1;
if (mN[i] > 1) {
mCellSize[i] = lengths[i] / mN[i];
} else {
mCellSize[i] = average_length;
}
mInvCellSize[i] = 1.00 / mCellSize[i];
}
}
//************************************************************************
void AssignCellSize( CoordinateType BoxSize )
{
for(SizeType i = 0 ; i < Dimension ; i++)
{
mCellSize[i] = BoxSize;
mInvCellSize[i] = 1.00 / mCellSize[i];
mN[i] = static_cast<SizeType>( (mMaxPoint[i]-mMinPoint[i]) / mCellSize[i]) + 1;
}
}
//************************************************************************
void AllocateCellsContainer()
{
SizeType Size = 1;
for(SizeType i = 0 ; i < Dimension ; i++)
Size *= mN[i];
// Resize Global Container
mCells.resize(Size);
}
//************************************************************************
void GenerateBins()
{
for(IteratorType i_point = mPointBegin ; i_point != mPointEnd ; i_point++)
mCells[CalculateIndex(**i_point)].push_back(*i_point);
}
//************************************************************************
IndexType CalculatePosition( CoordinateType const& ThisCoord, SizeType ThisDimension )
{
CoordinateType d_index = (ThisCoord - mMinPoint[ThisDimension]) * mInvCellSize[ThisDimension];
IndexType index = static_cast<IndexType>( (d_index < 0.00) ? 0.00 : d_index );
return (index > mN[ThisDimension]-1) ? mN[ThisDimension]-1 : index;
}
//************************************************************************
IndexType CalculateIndex( PointType const& ThisPoint )
{
IndexType Index = 0;
for(SizeType iDim = Dimension-1 ; iDim > 0 ; iDim--)
{
Index += CalculatePosition(ThisPoint[iDim],iDim);
Index *= mN[iDim-1];
}
Index += CalculatePosition(ThisPoint[0],0);
return Index;
}
//************************************************************************
IndexType CalculateIndex( CellType const& ThisIndex )
{
IndexType Index = 0;
for(SizeType iDim = Dimension-1 ; iDim > 0 ; iDim--)
{
Index += ThisIndex[iDim];
Index *= mN[iDim-1];
}
Index += ThisIndex[0];
return Index;
}
//************************************************************************
CellType CalculateCell( PointType const& ThisPoint )
{
CellType Cell;
for(SizeType i = 0 ; i < Dimension ; i++)
Cell[i] = CalculatePosition(ThisPoint[i],i);
return Cell;
}
CellType CalculateCell( PointType const& ThisPoint, CoordinateType Radius )
{
CellType Cell;
for(SizeType i = 0 ; i < Dimension ; i++)
Cell[i] = CalculatePosition(ThisPoint[i]+Radius,i);
return Cell;
}
//************************************************************************
void AddPoint( PointerType const& ThisPoint )
{
mCells[CalculateIndex(*ThisPoint)].push_back(ThisPoint);
mNumCells++;
}
//************************************************************************
PointerType ExistPoint( PointerType const& ThisPoint, CoordinateType const Tolerance = static_cast<CoordinateType>(10.0*DBL_EPSILON) )
{
PointerType Nearest;
CoordinateType Distance = static_cast<CoordinateType>(DBL_MAX);
bool Found;
SearchStructureType Box( CalculateCell(*ThisPoint,-Tolerance), CalculateCell(*ThisPoint,Tolerance), mN );
SearchNearestInBox( *ThisPoint, Nearest, Distance, Box, Found );
if(Found)
return Nearest;
return this->NullPointer();
}
//************************************************************************
PointerType SearchNearestPoint( PointType const& ThisPoint )
{
if( mPointBegin == mPointEnd )
return this->NullPointer();
PointerType Result = *mPointBegin;
CoordinateType ResultDistance = static_cast<CoordinateType>(DBL_MAX);
SearchStructureType Box( CalculateCell(ThisPoint), mN );
SearchNearestPointLocal( ThisPoint, Result, ResultDistance, Box );
return Result;
}
//************************************************************************
PointerType SearchNearestPoint( PointType const& ThisPoint, CoordinateType& ResultDistance )
{
if( mPointBegin == mPointEnd )
return this->NullPointer();
PointerType Result = *mPointBegin;
ResultDistance = static_cast<CoordinateType>(DBL_MAX);
SearchStructureType Box( CalculateCell(ThisPoint), mN );
SearchNearestPointLocal( ThisPoint, Result, ResultDistance, Box);
return Result;
}
//************************************************************************
// New Thread Safe!!!
PointerType SearchNearestPoint( PointType const& ThisPoint, CoordinateType& rResultDistance, SearchStructureType& Box )
{
PointerType Result = *mPointBegin; //static_cast<PointerType>(NULL);
rResultDistance = static_cast<CoordinateType>(DBL_MAX);
Box.Set( CalculateCell(ThisPoint), mN );
SearchNearestPointLocal( ThisPoint, Result, rResultDistance, Box);
return Result;
}
//************************************************************************
void SearchNearestPoint( PointType const& ThisPoint, PointerType& rResult, CoordinateType& rResultDistance ) override
{
SearchStructureType Box;
Box.Set( CalculateCell(ThisPoint), mN );
SearchNearestPointLocal(ThisPoint,rResult,rResultDistance,Box);
}
//************************************************************************
void SearchNearestPoint( PointType const& ThisPoint, PointerType& rResult, CoordinateType& rResultDistance, SearchStructureType& Box ) override
{
// This case is when BinStatic is a LeafType in Other Spacial Structure
// Then, it is possible a better Result before this search
Box.Set( CalculateCell(ThisPoint), mN );
SearchNearestPointLocal( ThisPoint, rResult, rResultDistance, Box );
}
//************************************************************************
void SearchNearestPoint( PointType* const& ThisPoints, SizeType const& NumberOfPoints, IteratorType &Results, std::vector<CoordinateType> ResultsDistances)
{
#pragma omp parallel for
for(int k=0; k< NumberOfPoints; k++)
Results[k] = SearchNearestPoint(ThisPoints[k],ResultsDistances[k]);
}
//************************************************************************
void SearchNearestPointLocal( PointType const& ThisPoint, PointerType& rResult, CoordinateType& rResultDistance, SearchStructureType& Box )
{
if( mPointBegin == mPointEnd )
return;
bool Found = false;
// set mBox
Box.Set( CalculateCell(ThisPoint), mN );
// initial search
++Box;
SearchNearestInBox( ThisPoint, rResult, rResultDistance, Box, Found );
// increase mBox and try again
while(!Found)
{
++Box;
SearchNearestInBox( ThisPoint, rResult, rResultDistance, Box, Found );
}
}
//************************************************************************
SizeType SearchInRadius( PointType const& ThisPoint, CoordinateType const& Radius, IteratorType Results,
DistanceIteratorType ResultsDistances, SizeType const& MaxNumberOfResults )
{
CoordinateType Radius2 = Radius * Radius;
SizeType NumberOfResults = 0;
SearchStructureType Box( CalculateCell(ThisPoint,-Radius), CalculateCell(ThisPoint,Radius), mN );
SearchInRadiusLocal( ThisPoint, Radius, Radius2, Results, ResultsDistances, NumberOfResults, MaxNumberOfResults, Box );
return NumberOfResults;
}
//************************************************************************
SizeType SearchInRadius( PointType const& ThisPoint, CoordinateType const& Radius, IteratorType Results,
DistanceIteratorType ResultsDistances, SizeType const& MaxNumberOfResults, SearchStructureType& Box )
{
CoordinateType Radius2 = Radius * Radius;
SizeType NumberOfResults = 0;
Box.Set( CalculateCell(ThisPoint,-Radius), CalculateCell(ThisPoint,Radius), mN );
SearchInRadiusLocal( ThisPoint, Radius, Radius2, Results, ResultsDistances, NumberOfResults, MaxNumberOfResults, Box );
return NumberOfResults;
}
//************************************************************************
void SearchInRadius( PointType const& ThisPoint, CoordinateType const& Radius, CoordinateType const& Radius2, IteratorType& Results,
DistanceIteratorType& ResultsDistances, SizeType& NumberOfResults, SizeType const& MaxNumberOfResults ) override
{
SearchStructureType Box( CalculateCell(ThisPoint,-Radius), CalculateCell(ThisPoint,Radius), mN );
SearchInRadiusLocal( ThisPoint, Radius, Radius2, Results, ResultsDistances, NumberOfResults, MaxNumberOfResults, Box);
}
//************************************************************************
void SearchInRadius( PointType const& ThisPoint, CoordinateType const& Radius, CoordinateType const& Radius2, IteratorType& Results,
DistanceIteratorType& ResultsDistances, SizeType& NumberOfResults, SizeType const& MaxNumberOfResults, SearchStructureType& Box ) override
{
Box.Set( CalculateCell(ThisPoint,-Radius), CalculateCell(ThisPoint,Radius), mN );
SearchInRadiusLocal( ThisPoint, Radius, Radius2, Results, ResultsDistances, NumberOfResults, MaxNumberOfResults, Box);
}
//************************************************************************
void SearchInRadius( PointerType const& ThisPoints, SizeType const& NumberOfPoints, CoordinateVectorType const& Radius, IteratorVectorType Results,
DistanceIteratorVectorType ResultsDistances, std::vector<SizeType>& NumberOfResults, SizeType const& MaxNumberOfResults )
{
#pragma omp parallel for
for(int k=0; k< NumberOfPoints; k++)
NumberOfResults[k] = SearchInRadius(ThisPoints[k],Radius[k],Results[k],ResultsDistances[k],MaxNumberOfResults);
}
// **** THREAD SAFE
// Dimension = 1
void SearchInRadiusLocal( PointType const& ThisPoint, CoordinateType const& Radius, CoordinateType const& Radius2, IteratorType& Results,
DistanceIteratorType& ResultsDistances, SizeType& NumberOfResults, SizeType const& MaxNumberOfResults,
SearchStructure<IndexType,SizeType,CoordinateType,IteratorType,IteratorIteratorType,1>& Box )
{
for(IndexType I = Box.Axis[0].Begin() ; I <= Box.Axis[0].End() ; I += Box.Axis[0].Block )
SearchRadiusInRange()(mCells[I].begin(),mCells[I].end(),ThisPoint,Radius2,Results,ResultsDistances,NumberOfResults,MaxNumberOfResults);
}
// Dimension = 2
void SearchInRadiusLocal( PointType const& ThisPoint, CoordinateType const& Radius, CoordinateType const& Radius2, IteratorType& Results,
DistanceIteratorType& ResultsDistances, SizeType& NumberOfResults, SizeType const& MaxNumberOfResults,
SearchStructure<IndexType,SizeType,CoordinateType,IteratorType,IteratorIteratorType,2>& Box )
{
for(IndexType II = Box.Axis[1].Begin() ; II <= Box.Axis[1].End() ; II += Box.Axis[1].Block )
for(IndexType I = II + Box.Axis[0].Begin() ; I <= II + Box.Axis[0].End() ; I += Box.Axis[0].Block )
SearchRadiusInRange()(mCells[I].begin(),mCells[I].end(),ThisPoint,Radius2,Results,ResultsDistances,NumberOfResults,MaxNumberOfResults);
}
// Dimension = 3
void SearchInRadiusLocal( PointType const& ThisPoint, CoordinateType const& Radius, CoordinateType const& Radius2, IteratorType& Results,
DistanceIteratorType& ResultsDistances, SizeType& NumberOfResults, SizeType const& MaxNumberOfResults,
SearchStructure<IndexType,SizeType,CoordinateType,IteratorType,IteratorIteratorType,3>& Box )
{
for(IndexType III = Box.Axis[2].Begin() ; III <= Box.Axis[2].End() ; III += Box.Axis[2].Block )
for(IndexType II = III + Box.Axis[1].Begin() ; II <= III + Box.Axis[1].End() ; II += Box.Axis[1].Block )
for(IndexType I = II + Box.Axis[0].Begin() ; I <= II + Box.Axis[0].End() ; I += Box.Axis[0].Block )
SearchRadiusInRange()(mCells[I].begin(),mCells[I].end(),ThisPoint,Radius2,Results,ResultsDistances,NumberOfResults,MaxNumberOfResults);
}
//************************************************************************
SizeType SearchInRadius( PointType const& ThisPoint, CoordinateType Radius, IteratorType Results, SizeType MaxNumberOfResults )
{
CoordinateType Radius2 = Radius * Radius;
SizeType NumberOfResults = 0;
SearchStructureType Box( CalculateCell(ThisPoint,-Radius), CalculateCell(ThisPoint,Radius), mN );
SearchInRadiusLocal( ThisPoint, Radius, Radius2, Results, NumberOfResults, MaxNumberOfResults, Box );
return NumberOfResults;
}
//************************************************************************
SizeType SearchInRadius( PointType const& ThisPoint, CoordinateType Radius, IteratorType Results,
SizeType MaxNumberOfResults, SearchStructureType& Box )
{
CoordinateType Radius2 = Radius * Radius;
SizeType NumberOfResults = 0;
Box.Set( CalculateCell(ThisPoint,-Radius), CalculateCell(ThisPoint,Radius), mN );
SearchInRadiusLocal( ThisPoint, Radius, Radius2, Results, NumberOfResults, MaxNumberOfResults, Box );
return NumberOfResults;
}
//************************************************************************
void SearchInRadius( PointType const& ThisPoint, CoordinateType const& Radius, CoordinateType const& Radius2, IteratorType& Results,
SizeType& NumberOfResults, SizeType const& MaxNumberOfResults ) override
{
SearchStructureType Box( CalculateCell(ThisPoint,-Radius), CalculateCell(ThisPoint,Radius), mN );
SearchInRadiusLocal( ThisPoint, Radius, Radius2, Results, NumberOfResults, MaxNumberOfResults, Box );
}
//************************************************************************
void SearchInRadius( PointType const& ThisPoint, CoordinateType const& Radius, CoordinateType const& Radius2, IteratorType& Results,
SizeType& NumberOfResults, SizeType const& MaxNumberOfResults, SearchStructureType& Box ) override
{
Box.Set( CalculateCell(ThisPoint,-Radius), CalculateCell(ThisPoint,Radius), mN );
SearchInRadiusLocal( ThisPoint, Radius, Radius2, Results, NumberOfResults, MaxNumberOfResults, Box );
}
//************************************************************************
// **** THREAD SAFE
// Dimension = 1
void SearchInRadiusLocal( PointType const& ThisPoint, CoordinateType const& Radius, CoordinateType const& Radius2, IteratorType& Results,
SizeType& NumberOfResults, SizeType const& MaxNumberOfResults,
SearchStructure<IndexType,SizeType,CoordinateType,IteratorType,IteratorIteratorType,1>& Box )
{
for(IndexType I = Box.Axis[0].Begin() ; I <= Box.Axis[0].End() ; I++ )
SearchRadiusInRange()(mCells[I].begin(),mCells[I].end(),ThisPoint,Radius2,Results,NumberOfResults,MaxNumberOfResults);
}
// Dimension = 2
void SearchInRadiusLocal( PointType const& ThisPoint, CoordinateType const& Radius, CoordinateType const& Radius2, IteratorType& Results,
SizeType& NumberOfResults, SizeType const& MaxNumberOfResults,
SearchStructure<IndexType,SizeType,CoordinateType,IteratorType,IteratorIteratorType,2>& Box )
{
for(IndexType II = Box.Axis[1].Begin() ; II <= Box.Axis[1].End() ; II += Box.Axis[1].Block )
for(IndexType I = II + Box.Axis[0].Begin() ; I <= II + Box.Axis[0].End() ; I++ )
SearchRadiusInRange()(mCells[I].begin(),mCells[I].end(),ThisPoint,Radius2,Results,NumberOfResults,MaxNumberOfResults);
}
// Dimension = 3
void SearchInRadiusLocal( PointType const& ThisPoint, CoordinateType const& Radius, CoordinateType const& Radius2, IteratorType& Results,
SizeType& NumberOfResults, SizeType const& MaxNumberOfResults,
SearchStructure<IndexType,SizeType,CoordinateType,IteratorType,IteratorIteratorType,3>& Box )
{
for(IndexType III = Box.Axis[2].Begin() ; III <= Box.Axis[2].End() ; III += Box.Axis[2].Block )
for(IndexType II = III + Box.Axis[1].Begin() ; II <= III + Box.Axis[1].End() ; II += Box.Axis[1].Block )
for(IndexType I = II + Box.Axis[0].Begin() ; I <= II + Box.Axis[0].End() ; I++ )
SearchRadiusInRange()(mCells[I].begin(),mCells[I].end(),ThisPoint,Radius2,Results,NumberOfResults,MaxNumberOfResults);
}
//************************************************************************
//************************************************************************
// Dimension = 1
void SearchNearestInBox( PointType const& ThisPoint, PointerType& ResultPoint, CoordinateType& ResultDistance,
SearchStructure<IndexType,SizeType,CoordinateType,IteratorType,IteratorIteratorType,1>& Box, bool& Found )
{
Found = false;
for(IndexType I = Box.Axis[0].Begin() ; I <= Box.Axis[0].End() ; I += Box.Axis[0].Block )
SearchNearestInRange()( mCells[I].begin(), mCells[I].end(), ThisPoint, ResultPoint, ResultDistance, Found );
}
// Dimension = 2
void SearchNearestInBox( PointType const& ThisPoint, PointerType& ResultPoint, CoordinateType& ResultDistance,
SearchStructure<IndexType,SizeType,CoordinateType,IteratorType,IteratorIteratorType,2>& Box, bool& Found )
{
Found = false;
for(IndexType II = Box.Axis[1].Begin() ; II <= Box.Axis[1].End() ; II += Box.Axis[1].Block )
for(IndexType I = II + Box.Axis[0].Begin() ; I <= II + Box.Axis[0].End() ; I += Box.Axis[0].Block )
SearchNearestInRange()( mCells[I].begin(), mCells[I].end(), ThisPoint, ResultPoint, ResultDistance, Found );
}
// Dimension = 3
void SearchNearestInBox( PointType const& ThisPoint, PointerType& ResultPoint, CoordinateType& ResultDistance,
SearchStructure<IndexType,SizeType,CoordinateType,IteratorType,IteratorIteratorType,3>& Box, bool& Found )
{
Found = false;
for(IndexType III = Box.Axis[2].Begin() ; III <= Box.Axis[2].End() ; III += Box.Axis[2].Block )
for(IndexType II = III + Box.Axis[1].Begin() ; II <= III + Box.Axis[1].End() ; II += Box.Axis[1].Block )
for(IndexType I = II + Box.Axis[0].Begin() ; I <= II + Box.Axis[0].End() ; I += Box.Axis[0].Block )
SearchNearestInRange()( mCells[I].begin(), mCells[I].end(), ThisPoint, ResultPoint, ResultDistance, Found );
}
//************************************************************************
//************************************************************************
SizeType SearchInBox( PointType const& SearchMinPoint, PointType const& SearchMaxPoint, IteratorType Results,
SizeType MaxNumberOfResults )
{
SizeType NumberOfResults = 0;
SearchStructureType Box( CalculateCell(SearchMinPoint), CalculateCell(SearchMaxPoint), mN );
SearchInBoxLocal( SearchMinPoint, SearchMaxPoint, Results, NumberOfResults, MaxNumberOfResults, Box );
return NumberOfResults;
}
//************************************************************************
void SearchInBox(PointType const& SearchMinPoint, PointType const& SearchMaxPoint, IteratorType& Results, SizeType& NumberOfResults,
SizeType const& MaxNumberOfResults ) override
{
NumberOfResults = 0;
SearchStructureType Box( CalculateCell(SearchMinPoint), CalculateCell(SearchMaxPoint), mN );
SearchInBoxLocal( SearchMinPoint, SearchMaxPoint, Results, NumberOfResults, MaxNumberOfResults, Box );
}
//************************************************************************
// Dimension = 1
void SearchInBoxLocal( PointType const& SearchMinPoint, PointType const& SearchMaxPoint, IteratorType& ResultsPoint,
SizeType& NumberOfResults, SizeType const& MaxNumberOfResults,
SearchStructure<IndexType,SizeType,CoordinateType,IteratorType,IteratorIteratorType,1>& Box )
{
for(IndexType I = Box.Axis[0].Begin() ; I <= Box.Axis[0].End() ; I += Box.Axis[0].Block )
SearchBoxInRange()(SearchMinPoint,SearchMaxPoint,mCells[I].begin(),mCells[I].end(),ResultsPoint,NumberOfResults,MaxNumberOfResults);
}
// Dimension = 2
void SearchInBoxLocal( PointType const& SearchMinPoint, PointType const& SearchMaxPoint, IteratorType& ResultsPoint,
SizeType& NumberOfResults, SizeType const& MaxNumberOfResults,
SearchStructure<IndexType,SizeType,CoordinateType,IteratorType,IteratorIteratorType,2>& Box )
{
for(IndexType II = Box.Axis[1].Begin() ; II <= Box.Axis[1].End() ; II += Box.Axis[1].Block )
for(IndexType I = II + Box.Axis[0].Begin() ; I <= II + Box.Axis[0].End() ; I += Box.Axis[0].Block )
SearchBoxInRange()(SearchMinPoint,SearchMaxPoint,mCells[I].begin(),mCells[I].end(),ResultsPoint,NumberOfResults,MaxNumberOfResults);
}
// Dimension = 3
void SearchInBoxLocal( PointType const& SearchMinPoint, PointType const& SearchMaxPoint, IteratorType& ResultsPoint,
SizeType& NumberOfResults, SizeType const& MaxNumberOfResults,
SearchStructure<IndexType,SizeType,CoordinateType,IteratorType,IteratorIteratorType,3>& Box )
{
for(IndexType III = Box.Axis[2].Begin() ; III <= Box.Axis[2].End() ; III += Box.Axis[2].Block )
for(IndexType II = III + Box.Axis[1].Begin() ; II <= III + Box.Axis[1].End() ; II += Box.Axis[1].Block )
for(IndexType I = II + Box.Axis[0].Begin() ; I <= II + Box.Axis[0].End() ; I += Box.Axis[0].Block )
SearchBoxInRange()(SearchMinPoint,SearchMaxPoint,mCells[I].begin(),mCells[I].end(),ResultsPoint,NumberOfResults,MaxNumberOfResults);
}
//************************************************************************
/// Turn back information as a string.
virtual std::string Info() const
{
return "BinsDynamic";
}
/// Print information about this object.
virtual void PrintInfo(std::ostream& rOStream) const
{
rOStream << "BinsDynamic";
}
/// Print object's data.
void PrintData(std::ostream& rOStream, std::string const& Perfix = std::string()) const override
{
rOStream << Perfix << "Bin[" << SearchUtils::PointerDistance(mPointBegin, mPointEnd) << "] : " << std::endl;
for(typename CellContainerType::const_iterator i_cell = mCells.begin() ; i_cell != mCells.end() ; i_cell++)
{
rOStream << Perfix << "[ " ;
for(typename LocalContainerType::const_iterator i_point = i_cell->begin() ; i_point != i_cell->end() ; i_point++)
rOStream << **i_point << " ";
rOStream << " ]" << std::endl;
}
rOStream << std::endl;
}
/// Print Size of Container
void PrintSize( std::ostream& rout )
{
rout << " BinsSize: ";
for(SizeType i = 0 ; i < Dimension ; i++)
rout << "[" << mN[i] << "]";
rout << std::endl;
}
/// Print Limits Points of the Container
void PrintBox( std::ostream& rout )
{
rout << " BinsBox: Min [";
mMinPoint.Print(rout);
rout << "]; Max [";
mMaxPoint.Print(rout);
rout << "]; Size [";
mCellSize.Print(rout);
rout << "]" << std::endl;
}
/// Assignment operator.
BinsDynamic& operator=(BinsDynamic const& rOther);
/// Copy constructor.
BinsDynamic(BinsDynamic const& rOther);
private:
IteratorType mPointBegin;
IteratorType mPointEnd;
PointType mMinPoint;
PointType mMaxPoint;
CoordinateArray mCellSize;
CoordinateArray mInvCellSize;
SizeArray mN;
SizeType mNumCells;
// Bins Access Vector ( vector<Iterator> )
CellContainerType mCells;
// Work Variables ( For non-copy of Search Variables )
//BinBox SearchBox;
public:
static TreeNodeType* Construct(IteratorType PointsBegin, IteratorType PointsEnd, PointType MaxPoint, PointType MinPoint, SizeType BucketSize)
{
SizeType number_of_points = SearchUtils::PointerDistance(PointsBegin,PointsEnd);
if (number_of_points == 0)
return NULL;
else
{
return new BinsDynamic( PointsBegin, PointsEnd, MinPoint, MaxPoint, BucketSize );
}
}
};
template<
std::size_t TDimension,
class TPointType,
class TContainerType,
class TPointerType,
class TIteratorType,
class TDistanceIteratorType,
class TDistanceFunction >
std::ostream & operator<<( std::ostream& rOStream,
BinsDynamic<TDimension,TPointType,TContainerType,TPointerType,TIteratorType,TDistanceIteratorType,TDistanceFunction>& rThis)
{
rThis.PrintInfo(rOStream);
rOStream << std::endl;
rThis.PrintSize(rOStream);
rThis.PrintData(rOStream);
return rOStream;
}
}
#endif // KRATOS_BINS_DYNAMIC_CONTAINER_H_INCLUD
|
expected_output.c | #include <stdio.h>
#include <unistd.h>
#include <string.h>
#include <math.h>
#include <polybench.h>
#include "bicg.h"
/**
* This version is stamped on May 10, 2016
*
* Contact:
* Louis-Noel Pouchet <pouchet.ohio-state.edu>
* Tomofumi Yuki <tomofumi.yuki.fr>
*
* Web address: http://polybench.sourceforge.net
*/
/*bicg.c: this file is part of PolyBench/C*/
/*Include polybench common header.*/
/*Include benchmark-specific header.*/
/*Array initialization.*/
static void init_array(int m, int n, double A[2100][1900], double r[2100], double p[1900]) {
int i, j;
for(i = 0; i < m; i++)
p[i] = (double) (i % m) / m;
for(i = 0; i < n; i++) {
r[i] = (double) (i % n) / n;
for(j = 0; j < m; j++)
A[i][j] = (double) (i * (j + 1) % n) / n;
}
}
/*DCE code. Must scan the entire live-out data.
Can be used also to check the correctness of the output.*/
static void print_array(int m, int n, double s[1900], double q[2100]) {
int i;
fprintf(stderr, "==BEGIN DUMP_ARRAYS==\n");
fprintf(stderr, "begin dump: %s", "s");
for(i = 0; i < m; i++) {
if(i % 20 == 0) fprintf(stderr, "\n");
fprintf(stderr, "%0.2lf ", s[i]);
}
fprintf(stderr, "\nend dump: %s\n", "s");
fprintf(stderr, "begin dump: %s", "q");
for(i = 0; i < n; i++) {
if(i % 20 == 0) fprintf(stderr, "\n");
fprintf(stderr, "%0.2lf ", q[i]);
}
fprintf(stderr, "\nend dump: %s\n", "q");
fprintf(stderr, "==END DUMP_ARRAYS==\n");
}
/*Main computational kernel. The whole function will be timed,
including the call and return.*/
static void kernel_bicg(int m, int n, double A[2100][1900], double s[1900], double q[2100], double p[1900], double r[2100]) {
int i, j;
#pragma omp parallel for default(shared) private(i) firstprivate(m)
for(i = 0; i < m; i++)
s[i] = 0;
#pragma omp parallel for default(shared) private(i, j) firstprivate(n, m, r, A, p) reduction(+ : s[:1900])
for(i = 0; i < n; i++) {
q[i] = 0.0;
// #pragma omp parallel for default(shared) private(j) firstprivate(m, i, r, A, p) reduction(+ : q[i])
for(j = 0; j < m; j++) {
s[j] = s[j] + r[i] * A[i][j];
q[i] = q[i] + A[i][j] * p[j];
}
}
}
int main(int argc, char **argv) {
/*Retrieve problem size.*/
int n = 2100;
int m = 1900;
/*Variable declaration/allocation.*/
double (*A)[2100][1900];
A = (double (*)[2100][1900]) polybench_alloc_data((2100 + 0) * (1900 + 0), sizeof(double));
;
double (*s)[1900];
s = (double (*)[1900]) polybench_alloc_data(1900 + 0, sizeof(double));
;
double (*q)[2100];
q = (double (*)[2100]) polybench_alloc_data(2100 + 0, sizeof(double));
;
double (*p)[1900];
p = (double (*)[1900]) polybench_alloc_data(1900 + 0, sizeof(double));
;
double (*r)[2100];
r = (double (*)[2100]) polybench_alloc_data(2100 + 0, sizeof(double));
;
/*Initialize array(s).*/
init_array(m, n, *A, *r, *p);
/*Start timer.*/
;
/*Run kernel.*/
kernel_bicg(m, n, *A, *s, *q, *p, *r);
/*Stop and print timer.*/
;
;
/*Prevent dead-code elimination. All live-out data must be printed
by the function call in argument.*/
if(argc > 42 && !strcmp(argv[0], "")) print_array(m, n, *s, *q);
/*Be clean.*/
free((void *) A);
;
free((void *) s);
;
free((void *) q);
;
free((void *) p);
;
free((void *) r);
;
return 0;
}
|
GB_binop__isge_uint64.c |
//------------------------------------------------------------------------------
// GB_binop: hard-coded functions for each built-in binary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2022, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated2/ folder, do not edit it
// (it is auto-generated from Generator/*).
#include "GB.h"
#ifndef GBCUDA_DEV
#include "GB_emult.h"
#include "GB_control.h"
#include "GB_ek_slice.h"
#include "GB_dense.h"
#include "GB_atomics.h"
#include "GB_bitmap_assign_methods.h"
#include "GB_binop__include.h"
// C=binop(A,B) is defined by the following types and operators:
// A+B function (eWiseAdd): GB (_AaddB__isge_uint64)
// A.*B function (eWiseMult): GB (_AemultB_08__isge_uint64)
// A.*B function (eWiseMult): GB (_AemultB_02__isge_uint64)
// A.*B function (eWiseMult): GB (_AemultB_04__isge_uint64)
// A.*B function (eWiseMult): GB (_AemultB_bitmap__isge_uint64)
// A*D function (colscale): GB (_AxD__isge_uint64)
// D*A function (rowscale): GB (_DxB__isge_uint64)
// C+=B function (dense accum): GB (_Cdense_accumB__isge_uint64)
// C+=b function (dense accum): GB (_Cdense_accumb__isge_uint64)
// C+=A+B function (dense ewise3): GB ((none))
// C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__isge_uint64)
// C=scalar+B GB (_bind1st__isge_uint64)
// C=scalar+B' GB (_bind1st_tran__isge_uint64)
// C=A+scalar GB (_bind2nd__isge_uint64)
// C=A'+scalar GB (_bind2nd_tran__isge_uint64)
// C type: uint64_t
// A type: uint64_t
// A pattern? 0
// B type: uint64_t
// B pattern? 0
// BinaryOp: cij = (aij >= bij)
#define GB_ATYPE \
uint64_t
#define GB_BTYPE \
uint64_t
#define GB_CTYPE \
uint64_t
// true if the types of A and B are identical
#define GB_ATYPE_IS_BTYPE \
1
// true if the types of C and A are identical
#define GB_CTYPE_IS_ATYPE \
1
// true if the types of C and B are identical
#define GB_CTYPE_IS_BTYPE \
1
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA,A_iso) \
uint64_t aij = GBX (Ax, pA, A_iso)
// true if values of A are not used
#define GB_A_IS_PATTERN \
0 \
// bij = Bx [pB]
#define GB_GETB(bij,Bx,pB,B_iso) \
uint64_t bij = GBX (Bx, pB, B_iso)
// true if values of B are not used
#define GB_B_IS_PATTERN \
0 \
// declare scalar of the same type as C
#define GB_CTYPE_SCALAR(t) \
uint64_t t
// cij = Ax [pA]
#define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \
cij = GBX (Ax, pA, A_iso)
// cij = Bx [pB]
#define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \
cij = GBX (Bx, pB, B_iso)
#define GB_CX(p) Cx [p]
// binary operator
#define GB_BINOP(z,x,y,i,j) \
z = (x >= y) ;
// true if the binop must be flipped
#define GB_BINOP_FLIP \
0
// op is second
#define GB_OP_IS_SECOND \
0
// do the numerical phases of GB_add and GB_emult
#define GB_PHASE_2_OF_2
// hard-coded loops can be vectorized
#define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_ISGE || GxB_NO_UINT64 || GxB_NO_ISGE_UINT64)
//------------------------------------------------------------------------------
// C += A+B, all 3 matrices dense
//------------------------------------------------------------------------------
#if 0
// The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV.
void GB ((none))
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_accum_template.c"
}
#endif
//------------------------------------------------------------------------------
// C = A+B, all 3 matrices dense
//------------------------------------------------------------------------------
void GB (_Cdense_ewise3_noaccum__isge_uint64)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_noaccum_template.c"
}
//------------------------------------------------------------------------------
// C += B, accumulate a sparse matrix into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumB__isge_uint64)
(
GrB_Matrix C,
const GrB_Matrix B,
const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
#include "GB_dense_subassign_23_template.c"
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += b, accumulate a scalar into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumb__isge_uint64)
(
GrB_Matrix C,
const GB_void *p_bwork,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
// get the scalar b for C += b, of type uint64_t
uint64_t bwork = (*((uint64_t *) p_bwork)) ;
#include "GB_dense_subassign_22_template.c"
return (GrB_SUCCESS) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = A*D, column scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_AxD__isge_uint64)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix D,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint64_t *restrict Cx = (uint64_t *) C->x ;
#include "GB_AxB_colscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = D*B, row scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_DxB__isge_uint64)
(
GrB_Matrix C,
const GrB_Matrix D,
const GrB_Matrix B,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint64_t *restrict Cx = (uint64_t *) C->x ;
#include "GB_AxB_rowscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseAdd: C=A+B, C<M>=A+B, C<!M>=A+B
//------------------------------------------------------------------------------
GrB_Info GB (_AaddB__isge_uint64)
(
GrB_Matrix C,
const int C_sparsity,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool is_eWiseUnion,
const GB_void *alpha_scalar_in,
const GB_void *beta_scalar_in,
const bool Ch_is_Mh,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
GB_WERK_DECLARE (M_ek_slicing, int64_t) ;
GB_WERK_DECLARE (A_ek_slicing, int64_t) ;
GB_WERK_DECLARE (B_ek_slicing, int64_t) ;
uint64_t alpha_scalar ;
uint64_t beta_scalar ;
if (is_eWiseUnion)
{
alpha_scalar = (*((uint64_t *) alpha_scalar_in)) ;
beta_scalar = (*((uint64_t *) beta_scalar_in )) ;
}
#include "GB_add_template.c"
GB_FREE_WORKSPACE ;
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, or C<M!>=A.*B where C is sparse/hyper
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_08__isge_uint64)
(
GrB_Matrix C,
const int C_sparsity,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_08_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_02__isge_uint64)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool flipxy,
const int64_t *restrict Cp_kfirst,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#if GB_BINOP_FLIP
// The operator is not commutative, and does not have a flipped
// variant. For example z=atan2(y,x).
if (flipxy)
{
// use fmult(y,x)
#undef GB_FLIPPED
#define GB_FLIPPED 1
#include "GB_emult_02_template.c"
}
else
{
// use fmult(x,y)
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
}
#else
// No need to handle the flip: the operator is either commutative, or
// has been handled by changing z=div(y,x) to z=rdiv(x,y) for example.
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
#endif
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_04__isge_uint64)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict Cp_kfirst,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_04_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_bitmap__isge_uint64)
(
GrB_Matrix C,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_bitmap_emult_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st
//------------------------------------------------------------------------------
GrB_Info GB (_bind1st__isge_uint64)
(
GB_void *Cx_output, // Cx and Bx may be aliased
const GB_void *x_input,
const GB_void *Bx_input,
const int8_t *restrict Bb,
int64_t bnz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint64_t *Cx = (uint64_t *) Cx_output ;
uint64_t x = (*((uint64_t *) x_input)) ;
uint64_t *Bx = (uint64_t *) Bx_input ;
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < bnz ; p++)
{
if (!GBB (Bb, p)) continue ;
uint64_t bij = GBX (Bx, p, false) ;
Cx [p] = (x >= bij) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd
//------------------------------------------------------------------------------
GrB_Info GB (_bind2nd__isge_uint64)
(
GB_void *Cx_output, // Cx and Ax may be aliased
const GB_void *Ax_input,
const GB_void *y_input,
const int8_t *restrict Ab,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
uint64_t *Cx = (uint64_t *) Cx_output ;
uint64_t *Ax = (uint64_t *) Ax_input ;
uint64_t y = (*((uint64_t *) y_input)) ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!GBB (Ab, p)) continue ;
uint64_t aij = GBX (Ax, p, false) ;
Cx [p] = (aij >= y) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (x, A'): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (x, aij), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
uint64_t aij = GBX (Ax, pA, false) ; \
Cx [pC] = (x >= aij) ; \
}
GrB_Info GB (_bind1st_tran__isge_uint64)
(
GrB_Matrix C,
const GB_void *x_input,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
// GB_unop_transpose.c uses GB_ATYPE, but A is
// the 2nd input to binary operator z=f(x,y).
#undef GB_ATYPE
#define GB_ATYPE \
uint64_t
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint64_t x = (*((const uint64_t *) x_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
#undef GB_ATYPE
#define GB_ATYPE \
uint64_t
}
//------------------------------------------------------------------------------
// C = op (A', y): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (aij, y), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
uint64_t aij = GBX (Ax, pA, false) ; \
Cx [pC] = (aij >= y) ; \
}
GrB_Info GB (_bind2nd_tran__isge_uint64)
(
GrB_Matrix C,
const GrB_Matrix A,
const GB_void *y_input,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint64_t y = (*((const uint64_t *) y_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
tree.h | /*!
* Copyright (c) 2016 Microsoft Corporation. All rights reserved.
* Licensed under the MIT License. See LICENSE file in the project root for license information.
*/
#ifndef LIGHTGBM_TREE_H_
#define LIGHTGBM_TREE_H_
#include <LightGBM/dataset.h>
#include <LightGBM/meta.h>
#include <string>
#include <map>
#include <memory>
#include <unordered_map>
#include <vector>
namespace LightGBM
{
#define kCategoricalMask (1)
#define kDefaultLeftMask (2)
/*!
* \brief Tree model
*/
class Tree
{
public:
/*!
* \brief Constructor
* \param max_leaves The number of max leaves
* \param track_branch_features Whether to keep track of ancestors of leaf nodes
*/
explicit Tree(int max_leaves, bool track_branch_features);
/*!
* \brief Constructor, from a string
* \param str Model string
* \param used_len used count of str
*/
Tree(const char *str, size_t *used_len);
~Tree();
/*!
* \brief Performing a split on tree leaves.
* \param leaf Index of leaf to be split
* \param feature Index of feature; the converted index after removing useless features
* \param real_feature Index of feature, the original index on data
* \param threshold_bin Threshold(bin) of split
* \param threshold_double Threshold on feature value
* \param left_value Model Left child output
* \param right_value Model Right child output
* \param left_cnt Count of left child
* \param right_cnt Count of right child
* \param left_weight Weight of left child
* \param right_weight Weight of right child
* \param gain Split gain
* \param missing_type missing type
* \param default_left default direction for missing value
* \return The index of new leaf.
*/
int Split(int leaf, int feature, int real_feature, uint32_t threshold_bin,
double threshold_double, double left_value, double right_value,
int left_cnt, int right_cnt, double left_weight, double right_weight,
float gain, MissingType missing_type, bool default_left);
/*!
* \brief Performing a split on tree leaves, with categorical feature
* \param leaf Index of leaf to be split
* \param feature Index of feature; the converted index after removing useless features
* \param real_feature Index of feature, the original index on data
* \param threshold_bin Threshold(bin) of split, use bitset to represent
* \param num_threshold_bin size of threshold_bin
* \param threshold Thresholds of real feature value, use bitset to represent
* \param num_threshold size of threshold
* \param left_value Model Left child output
* \param right_value Model Right child output
* \param left_cnt Count of left child
* \param right_cnt Count of right child
* \param left_weight Weight of left child
* \param right_weight Weight of right child
* \param gain Split gain
* \return The index of new leaf.
*/
int SplitCategorical(int leaf, int feature, int real_feature, const uint32_t *threshold_bin, int num_threshold_bin,
const uint32_t *threshold, int num_threshold, double left_value, double right_value,
int left_cnt, int right_cnt, double left_weight, double right_weight, float gain, MissingType missing_type);
/*! \brief Get the output of one leaf */
inline double LeafOutput(int leaf) const { return leaf_value_[leaf]; }
/*! \brief Set the output of one leaf */
inline void SetLeafOutput(int leaf, double output)
{
leaf_value_[leaf] = MaybeRoundToZero(output);
}
/*!
* \brief Adding prediction value of this tree model to scores
* \param data The dataset
* \param num_data Number of total data
* \param score Will add prediction to score
*/
void AddPredictionToScore(const Dataset *data,
data_size_t num_data,
double *score) const;
/*!
* \brief Adding prediction value of this tree model to scores
* \param data The dataset
* \param used_data_indices Indices of used data
* \param num_data Number of total data
* \param score Will add prediction to score
*/
void AddPredictionToScore(const Dataset *data,
const data_size_t *used_data_indices,
data_size_t num_data, double *score) const;
/*!
* \brief Get upper bound leaf value of this tree model
*/
double GetUpperBoundValue() const;
/*!
* \brief Get lower bound leaf value of this tree model
*/
double GetLowerBoundValue() const;
/*!
* \brief Prediction on one record
* \param feature_values Feature value of this record
* \return Prediction result
*/
inline double Predict(const double *feature_values) const;
inline double PredictByMap(const std::unordered_map<int, double> &feature_values) const;
inline int PredictLeafIndex(const double *feature_values) const;
inline int PredictLeafIndexByMap(const std::unordered_map<int, double> &feature_values) const;
inline void PredictContrib(const double *feature_values, int num_features, double *output);
inline void PredictContribByMap(const std::unordered_map<int, double> &feature_values,
int num_features, std::unordered_map<int, double> *output);
/*! \brief Get Number of leaves*/
inline int num_leaves() const { return num_leaves_; }
/*! \brief Get depth of specific leaf*/
inline int leaf_depth(int leaf_idx) const { return leaf_depth_[leaf_idx]; }
/*! \brief Get parent of specific leaf*/
inline int leaf_parent(int leaf_idx) const { return leaf_parent_[leaf_idx]; }
/*! \brief Get feature of specific split*/
inline int split_feature(int split_idx) const { return split_feature_[split_idx]; }
/*! \brief Get features on leaf's branch*/
inline std::vector<int, mi_stl_allocator<int>> branch_features(int leaf) const { return branch_features_[leaf]; }
inline double split_gain(int split_idx) const { return split_gain_[split_idx]; }
inline double internal_value(int node_idx) const
{
return internal_value_[node_idx];
}
inline bool IsNumericalSplit(int node_idx) const
{
return !GetDecisionType(decision_type_[node_idx], kCategoricalMask);
}
inline int left_child(int node_idx) const { return left_child_[node_idx]; }
inline int right_child(int node_idx) const { return right_child_[node_idx]; }
inline int split_feature_inner(int node_idx) const
{
return split_feature_inner_[node_idx];
}
inline uint32_t threshold_in_bin(int node_idx) const
{
return threshold_in_bin_[node_idx];
}
/*! \brief Get the number of data points that fall at or below this node*/
inline int data_count(int node) const { return node >= 0 ? internal_count_[node] : leaf_count_[~node]; }
/*!
* \brief Shrinkage for the tree's output
* shrinkage rate (a.k.a learning rate) is used to tune the training process
* \param rate The factor of shrinkage
*/
inline void Shrinkage(double rate)
{
#pragma omp parallel for schedule(static, 1024) if (num_leaves_ >= 2048)
for (int i = 0; i < num_leaves_ - 1; ++i)
{
leaf_value_[i] = MaybeRoundToZero(leaf_value_[i] * rate);
internal_value_[i] = MaybeRoundToZero(internal_value_[i] * rate);
}
leaf_value_[num_leaves_ - 1] =
MaybeRoundToZero(leaf_value_[num_leaves_ - 1] * rate);
shrinkage_ *= rate;
}
inline double shrinkage() const { return shrinkage_; }
inline void AddBias(double val)
{
#pragma omp parallel for schedule(static, 1024) if (num_leaves_ >= 2048)
for (int i = 0; i < num_leaves_ - 1; ++i)
{
leaf_value_[i] = MaybeRoundToZero(leaf_value_[i] + val);
internal_value_[i] = MaybeRoundToZero(internal_value_[i] + val);
}
leaf_value_[num_leaves_ - 1] =
MaybeRoundToZero(leaf_value_[num_leaves_ - 1] + val);
// force to 1.0
shrinkage_ = 1.0f;
}
inline void AsConstantTree(double val)
{
num_leaves_ = 1;
shrinkage_ = 1.0f;
leaf_value_[0] = val;
}
/*! \brief Serialize this object to string*/
std::string ToString() const;
/*! \brief Serialize this object to json*/
std::string ToJSON() const;
/*! \brief Serialize this object to if-else statement*/
std::string ToIfElse(int index, bool predict_leaf_index) const;
inline static bool IsZero(double fval)
{
return (fval >= -kZeroThreshold && fval <= kZeroThreshold);
}
inline static double MaybeRoundToZero(double fval)
{
return IsZero(fval) ? 0 : fval;
}
inline static bool GetDecisionType(int8_t decision_type, int8_t mask)
{
return (decision_type & mask) > 0;
}
inline static void SetDecisionType(int8_t *decision_type, bool input, int8_t mask)
{
if (input)
{
(*decision_type) |= mask;
}
else
{
(*decision_type) &= (127 - mask);
}
}
inline static int8_t GetMissingType(int8_t decision_type)
{
return (decision_type >> 2) & 3;
}
inline static void SetMissingType(int8_t *decision_type, int8_t input)
{
(*decision_type) &= 3;
(*decision_type) |= (input << 2);
}
void RecomputeMaxDepth();
int NextLeafId() const { return num_leaves_; }
private:
std::string NumericalDecisionIfElse(int node) const;
std::string CategoricalDecisionIfElse(int node) const;
inline int NumericalDecision(double fval, int node) const
{
uint8_t missing_type = GetMissingType(decision_type_[node]);
if (std::isnan(fval) && missing_type != MissingType::NaN)
{
fval = 0.0f;
}
if ((missing_type == MissingType::Zero && IsZero(fval)) || (missing_type == MissingType::NaN && std::isnan(fval)))
{
if (GetDecisionType(decision_type_[node], kDefaultLeftMask))
{
return left_child_[node];
}
else
{
return right_child_[node];
}
}
if (fval <= threshold_[node])
{
return left_child_[node];
}
else
{
return right_child_[node];
}
}
inline int NumericalDecisionInner(uint32_t fval, int node, uint32_t default_bin, uint32_t max_bin) const
{
uint8_t missing_type = GetMissingType(decision_type_[node]);
if ((missing_type == MissingType::Zero && fval == default_bin) || (missing_type == MissingType::NaN && fval == max_bin))
{
if (GetDecisionType(decision_type_[node], kDefaultLeftMask))
{
return left_child_[node];
}
else
{
return right_child_[node];
}
}
if (fval <= threshold_in_bin_[node])
{
return left_child_[node];
}
else
{
return right_child_[node];
}
}
inline int CategoricalDecision(double fval, int node) const
{
uint8_t missing_type = GetMissingType(decision_type_[node]);
int int_fval = static_cast<int>(fval);
if (int_fval < 0)
{
return right_child_[node];
;
}
else if (std::isnan(fval))
{
// NaN is always in the right
if (missing_type == MissingType::NaN)
{
return right_child_[node];
}
int_fval = 0;
}
int cat_idx = static_cast<int>(threshold_[node]);
if (Common::FindInBitset(cat_threshold_.data() + cat_boundaries_[cat_idx],
cat_boundaries_[cat_idx + 1] - cat_boundaries_[cat_idx], int_fval))
{
return left_child_[node];
}
return right_child_[node];
}
inline int CategoricalDecisionInner(uint32_t fval, int node) const
{
int cat_idx = static_cast<int>(threshold_in_bin_[node]);
if (Common::FindInBitset(cat_threshold_inner_.data() + cat_boundaries_inner_[cat_idx],
cat_boundaries_inner_[cat_idx + 1] - cat_boundaries_inner_[cat_idx], fval))
{
return left_child_[node];
}
return right_child_[node];
}
inline int Decision(double fval, int node) const
{
if (GetDecisionType(decision_type_[node], kCategoricalMask))
{
return CategoricalDecision(fval, node);
}
else
{
return NumericalDecision(fval, node);
}
}
inline int DecisionInner(uint32_t fval, int node, uint32_t default_bin, uint32_t max_bin) const
{
if (GetDecisionType(decision_type_[node], kCategoricalMask))
{
return CategoricalDecisionInner(fval, node);
}
else
{
return NumericalDecisionInner(fval, node, default_bin, max_bin);
}
}
inline void Split(int leaf, int feature, int real_feature, double left_value, double right_value, int left_cnt, int right_cnt,
double left_weight, double right_weight, float gain);
/*!
* \brief Find leaf index of which record belongs by features
* \param feature_values Feature value of this record
* \return Leaf index
*/
inline int GetLeaf(const double *feature_values) const;
inline int GetLeafByMap(const std::unordered_map<int, double> &feature_values) const;
/*! \brief Serialize one node to json*/
std::string NodeToJSON(int index) const;
/*! \brief Serialize one node to if-else statement*/
std::string NodeToIfElse(int index, bool predict_leaf_index) const;
std::string NodeToIfElseByMap(int index, bool predict_leaf_index) const;
double ExpectedValue() const;
/*! \brief This is used fill in leaf_depth_ after reloading a model*/
inline void RecomputeLeafDepths(int node = 0, int depth = 0);
/*!
* \brief Used by TreeSHAP for data we keep about our decision path
*/
struct PathElement
{
int feature_index;
double zero_fraction;
double one_fraction;
// note that pweight is included for convenience and is not tied with the other attributes,
// the pweight of the i'th path element is the permutation weight of paths with i-1 ones in them
double pweight;
PathElement() {}
PathElement(int i, double z, double o, double w) : feature_index(i), zero_fraction(z), one_fraction(o), pweight(w) {}
};
/*! \brief Polynomial time algorithm for SHAP values (arXiv:1706.06060)*/
void TreeSHAP(const double *feature_values, double *phi,
int node, int unique_depth,
PathElement *parent_unique_path, double parent_zero_fraction,
double parent_one_fraction, int parent_feature_index) const;
void TreeSHAPByMap(const std::unordered_map<int, double> &feature_values,
std::unordered_map<int, double> *phi,
int node, int unique_depth,
PathElement *parent_unique_path, double parent_zero_fraction,
double parent_one_fraction, int parent_feature_index) const;
/*! \brief Extend our decision path with a fraction of one and zero extensions for TreeSHAP*/
static void ExtendPath(PathElement *unique_path, int unique_depth,
double zero_fraction, double one_fraction, int feature_index);
/*! \brief Undo a previous extension of the decision path for TreeSHAP*/
static void UnwindPath(PathElement *unique_path, int unique_depth, int path_index);
/*! determine what the total permutation weight would be if we unwound a previous extension in the decision path*/
static double UnwoundPathSum(const PathElement *unique_path, int unique_depth, int path_index);
/*! \brief Number of max leaves*/
int max_leaves_;
/*! \brief Number of current leaves*/
int num_leaves_;
// following values used for non-leaf node
/*! \brief A non-leaf node's left child */
std::vector<int, mi_stl_allocator<int>> left_child_;
/*! \brief A non-leaf node's right child */
std::vector<int, mi_stl_allocator<int>> right_child_;
/*! \brief A non-leaf node's split feature */
std::vector<int, mi_stl_allocator<int>> split_feature_inner_;
/*! \brief A non-leaf node's split feature, the original index */
std::vector<int, mi_stl_allocator<int>> split_feature_;
/*! \brief A non-leaf node's split threshold in bin */
std::vector<uint32_t, mi_stl_allocator<uint32_t>> threshold_in_bin_;
/*! \brief A non-leaf node's split threshold in feature value */
std::vector<double, mi_stl_allocator<double>> threshold_;
int num_cat_;
std::vector<int, mi_stl_allocator<int>> cat_boundaries_inner_;
std::vector<uint32_t, mi_stl_allocator<uint32_t>> cat_threshold_inner_;
std::vector<int, mi_stl_allocator<int>> cat_boundaries_;
std::vector<uint32_t, mi_stl_allocator<uint32_t>> cat_threshold_;
/*! \brief Store the information for categorical feature handle and missing value handle. */
std::vector<int8_t, mi_stl_allocator<int8_t>> decision_type_;
/*! \brief A non-leaf node's split gain */
std::vector<float, mi_stl_allocator<float>> split_gain_;
// used for leaf node
/*! \brief The parent of leaf */
std::vector<int, mi_stl_allocator<int>> leaf_parent_;
/*! \brief Output of leaves */
std::vector<double, mi_stl_allocator<double>> leaf_value_;
/*! \brief weight of leaves */
std::vector<double, mi_stl_allocator<double>> leaf_weight_;
/*! \brief DataCount of leaves */
std::vector<int, mi_stl_allocator<int>> leaf_count_;
/*! \brief Output of non-leaf nodes */
std::vector<double, mi_stl_allocator<double>> internal_value_;
/*! \brief weight of non-leaf nodes */
std::vector<double, mi_stl_allocator<double>> internal_weight_;
/*! \brief DataCount of non-leaf nodes */
std::vector<int, mi_stl_allocator<int>> internal_count_;
/*! \brief Depth for leaves */
std::vector<int, mi_stl_allocator<int>> leaf_depth_;
/*! \brief whether to keep track of ancestor nodes for each leaf (only needed when feature interactions are restricted) */
bool track_branch_features_;
/*! \brief Features on leaf's branch, original index */
std::vector<std::vector<int, mi_stl_allocator<int>>, mi_stl_allocator<std::vector<int, mi_stl_allocator<int>>>> branch_features_;
double shrinkage_;
int max_depth_;
};
inline void Tree::Split(int leaf, int feature, int real_feature,
double left_value, double right_value, int left_cnt, int right_cnt,
double left_weight, double right_weight, float gain)
{
int new_node_idx = num_leaves_ - 1;
// update parent info
int parent = leaf_parent_[leaf];
if (parent >= 0)
{
// if cur node is left child
if (left_child_[parent] == ~leaf)
{
left_child_[parent] = new_node_idx;
}
else
{
right_child_[parent] = new_node_idx;
}
}
// add new node
split_feature_inner_[new_node_idx] = feature;
split_feature_[new_node_idx] = real_feature;
split_gain_[new_node_idx] = gain;
// add two new leaves
left_child_[new_node_idx] = ~leaf;
right_child_[new_node_idx] = ~num_leaves_;
// update new leaves
leaf_parent_[leaf] = new_node_idx;
leaf_parent_[num_leaves_] = new_node_idx;
// save current leaf value to internal node before change
internal_weight_[new_node_idx] = leaf_weight_[leaf];
internal_value_[new_node_idx] = leaf_value_[leaf];
internal_count_[new_node_idx] = left_cnt + right_cnt;
leaf_value_[leaf] = std::isnan(left_value) ? 0.0f : left_value;
leaf_weight_[leaf] = left_weight;
leaf_count_[leaf] = left_cnt;
leaf_value_[num_leaves_] = std::isnan(right_value) ? 0.0f : right_value;
leaf_weight_[num_leaves_] = right_weight;
leaf_count_[num_leaves_] = right_cnt;
// update leaf depth
leaf_depth_[num_leaves_] = leaf_depth_[leaf] + 1;
leaf_depth_[leaf]++;
if (track_branch_features_)
{
branch_features_[num_leaves_] = branch_features_[leaf];
branch_features_[num_leaves_].push_back(split_feature_[new_node_idx]);
branch_features_[leaf].push_back(split_feature_[new_node_idx]);
}
}
inline double Tree::Predict(const double *feature_values) const
{
if (num_leaves_ > 1)
{
int leaf = GetLeaf(feature_values);
return LeafOutput(leaf);
}
else
{
return leaf_value_[0];
}
}
inline double Tree::PredictByMap(const std::unordered_map<int, double> &feature_values) const
{
if (num_leaves_ > 1)
{
int leaf = GetLeafByMap(feature_values);
return LeafOutput(leaf);
}
else
{
return leaf_value_[0];
}
}
inline int Tree::PredictLeafIndex(const double *feature_values) const
{
if (num_leaves_ > 1)
{
int leaf = GetLeaf(feature_values);
return leaf;
}
else
{
return 0;
}
}
inline int Tree::PredictLeafIndexByMap(const std::unordered_map<int, double> &feature_values) const
{
if (num_leaves_ > 1)
{
int leaf = GetLeafByMap(feature_values);
return leaf;
}
else
{
return 0;
}
}
inline void Tree::PredictContrib(const double *feature_values, int num_features, double *output)
{
output[num_features] += ExpectedValue();
// Run the recursion with preallocated space for the unique path data
if (num_leaves_ > 1)
{
CHECK_GE(max_depth_, 0);
const int max_path_len = max_depth_ + 1;
std::vector<PathElement, mi_stl_allocator<PathElement>> unique_path_data(max_path_len * (max_path_len + 1) / 2);
TreeSHAP(feature_values, output, 0, 0, unique_path_data.data(), 1, 1, -1);
}
}
inline void Tree::PredictContribByMap(const std::unordered_map<int, double> &feature_values,
int num_features, std::unordered_map<int, double> *output)
{
(*output)[num_features] += ExpectedValue();
// Run the recursion with preallocated space for the unique path data
if (num_leaves_ > 1)
{
CHECK_GE(max_depth_, 0);
const int max_path_len = max_depth_ + 1;
std::vector<PathElement, mi_stl_allocator<PathElement>> unique_path_data(max_path_len * (max_path_len + 1) / 2);
TreeSHAPByMap(feature_values, output, 0, 0, unique_path_data.data(), 1, 1, -1);
}
}
inline void Tree::RecomputeLeafDepths(int node, int depth)
{
if (node == 0)
leaf_depth_.resize(num_leaves());
if (node < 0)
{
leaf_depth_[~node] = depth;
}
else
{
RecomputeLeafDepths(left_child_[node], depth + 1);
RecomputeLeafDepths(right_child_[node], depth + 1);
}
}
inline int Tree::GetLeaf(const double *feature_values) const
{
int node = 0;
if (num_cat_ > 0)
{
while (node >= 0)
{
node = Decision(feature_values[split_feature_[node]], node);
}
}
else
{
while (node >= 0)
{
node = NumericalDecision(feature_values[split_feature_[node]], node);
}
}
return ~node;
}
inline int Tree::GetLeafByMap(const std::unordered_map<int, double> &feature_values) const
{
int node = 0;
if (num_cat_ > 0)
{
while (node >= 0)
{
node = Decision(feature_values.count(split_feature_[node]) > 0 ? feature_values.at(split_feature_[node]) : 0.0f, node);
}
}
else
{
while (node >= 0)
{
node = NumericalDecision(feature_values.count(split_feature_[node]) > 0 ? feature_values.at(split_feature_[node]) : 0.0f, node);
}
}
return ~node;
}
} // namespace LightGBM
#endif // LightGBM_TREE_H_
|
test6.c | //#include<omp.h>
int main() {
#pragma omp parallel
{
int i, x;
int y;
#pragma omp for nowait
for(i = 0; i < 10; i++) {
#pragma omp critical (someName)
printf("1.) Iteration %d with thread %d\n", i, omp_get_thread_num());
if(omp_get_thread_num()%2 == 0)
sleep(5);
}
#pragma omp for nowait
for(i = 0; i < 10; i++) {
#pragma omp critical
{
printf("2.) Iteration %d with thread %d\n", i, omp_get_thread_num());
#pragma omp atomic
x = x + 1;
#pragma omp atomic update
x = x + 1;
#pragma omp atomic read
y = x;
#pragma omp atomic write
x = y;
#pragma omp atomic capture
x = y = 10;
}
}
#pragma omp atomic
x = x + 1;
#pragma omp atomic update
x = x + 1;
#pragma omp atomic read
y = x;
#pragma omp atomic write
x = y;
#pragma omp atomic capture
x = y = 10;
y++;
}
}
|
expected_output.c | #include <stdio.h>
#include <unistd.h>
#include <string.h>
#include <math.h>
#include <polybench.h>
#include "gesummv.h"
/**
* This version is stamped on May 10, 2016
*
* Contact:
* Louis-Noel Pouchet <pouchet.ohio-state.edu>
* Tomofumi Yuki <tomofumi.yuki.fr>
*
* Web address: http://polybench.sourceforge.net
*/
/*gesummv.c: this file is part of PolyBench/C*/
/*Include polybench common header.*/
/*Include benchmark-specific header.*/
/*Array initialization.*/
static void init_array(int n, double *alpha, double *beta, double A[1300][1300], double B[1300][1300], double x[1300]) {
int i, j;
*alpha = 1.5;
*beta = 1.2;
for(i = 0; i < n; i++) {
x[i] = (double) (i % n) / n;
for(j = 0; j < n; j++) {
A[i][j] = (double) ((i * j + 1) % n) / n;
B[i][j] = (double) ((i * j + 2) % n) / n;
}
}
}
/*DCE code. Must scan the entire live-out data.
Can be used also to check the correctness of the output.*/
static void print_array(int n, double y[1300]) {
int i;
fprintf(stderr, "==BEGIN DUMP_ARRAYS==\n");
fprintf(stderr, "begin dump: %s", "y");
for(i = 0; i < n; i++) {
if(i % 20 == 0) fprintf(stderr, "\n");
fprintf(stderr, "%0.2lf ", y[i]);
}
fprintf(stderr, "\nend dump: %s\n", "y");
fprintf(stderr, "==END DUMP_ARRAYS==\n");
}
/*Main computational kernel. The whole function will be timed,
including the call and return.*/
static void kernel_gesummv(int n, double alpha, double beta, double A[1300][1300], double B[1300][1300], double tmp[1300], double x[1300], double y[1300]) {
int i, j;
#pragma omp parallel for default(shared) private(i, j) firstprivate(n, alpha, beta, A, x, B)
for(i = 0; i < n; i++) {
tmp[i] = 0.0;
y[i] = 0.0;
// #pragma omp parallel for default(shared) private(j) firstprivate(n, i, A, x, B) reduction(+ : tmp[i]) reduction(+ : y[i])
for(j = 0; j < n; j++) {
tmp[i] = A[i][j] * x[j] + tmp[i];
y[i] = B[i][j] * x[j] + y[i];
}
y[i] = alpha * tmp[i] + beta * y[i];
}
}
int main(int argc, char **argv) {
/*Retrieve problem size.*/
int n = 1300;
/*Variable declaration/allocation.*/
double alpha;
double beta;
double (*A)[1300][1300];
A = (double (*)[1300][1300]) polybench_alloc_data((1300 + 0) * (1300 + 0), sizeof(double));
;
double (*B)[1300][1300];
B = (double (*)[1300][1300]) polybench_alloc_data((1300 + 0) * (1300 + 0), sizeof(double));
;
double (*tmp)[1300];
tmp = (double (*)[1300]) polybench_alloc_data(1300 + 0, sizeof(double));
;
double (*x)[1300];
x = (double (*)[1300]) polybench_alloc_data(1300 + 0, sizeof(double));
;
double (*y)[1300];
y = (double (*)[1300]) polybench_alloc_data(1300 + 0, sizeof(double));
;
/*Initialize array(s).*/
init_array(n, &alpha, &beta, *A, *B, *x);
/*Start timer.*/
;
/*Run kernel.*/
kernel_gesummv(n, alpha, beta, *A, *B, *tmp, *x, *y);
/*Stop and print timer.*/
;
;
/*Prevent dead-code elimination. All live-out data must be printed
by the function call in argument.*/
if(argc > 42 && !strcmp(argv[0], "")) print_array(n, *y);
/*Be clean.*/
free((void *) A);
;
free((void *) B);
;
free((void *) tmp);
;
free((void *) x);
;
free((void *) y);
;
return 0;
}
|
connected_layer.c | #include "connected_layer.h"
#include "batchnorm_layer.h"
#include "convolutional_layer.h"
#include "utils.h"
#include "dark_cuda.h"
#include "blas.h"
#include "gemm.h"
#include <math.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
size_t get_connected_workspace_size(layer l)
{
#ifdef CUDNN
return get_convolutional_workspace_size(l);
/*
if (gpu_index >= 0) {
size_t most = 0;
size_t s = 0;
CHECK_CUDNN(cudnnGetConvolutionForwardWorkspaceSize(cudnn_handle(),
l.srcTensorDesc,
l.weightDesc,
l.convDesc,
l.dstTensorDesc,
l.fw_algo,
&s));
if (s > most) most = s;
CHECK_CUDNN(cudnnGetConvolutionBackwardFilterWorkspaceSize(cudnn_handle(),
l.srcTensorDesc,
l.ddstTensorDesc,
l.convDesc,
l.dweightDesc,
l.bf_algo,
&s));
if (s > most) most = s;
CHECK_CUDNN(cudnnGetConvolutionBackwardDataWorkspaceSize(cudnn_handle(),
l.weightDesc,
l.ddstTensorDesc,
l.convDesc,
l.dsrcTensorDesc,
l.bd_algo,
&s));
if (s > most) most = s;
return most;
}
*/
#endif
return 0;
}
connected_layer make_connected_layer(int batch, int steps, int inputs, int outputs, ACTIVATION activation, int batch_normalize)
{
int total_batch = batch*steps;
int i;
connected_layer l = { (LAYER_TYPE)0 };
l.type = CONNECTED;
l.inputs = inputs;
l.outputs = outputs;
l.batch= batch;
l.batch_normalize = batch_normalize;
l.h = 1;
l.w = 1;
l.c = inputs;
l.out_h = 1;
l.out_w = 1;
l.out_c = outputs;
l.n = l.out_c;
l.size = 1;
l.stride = 1;
l.pad = 0;
l.activation = activation;
l.learning_rate_scale = 1;
l.output = (float*)calloc(total_batch * outputs, sizeof(float));
l.delta = (float*)calloc(total_batch * outputs, sizeof(float));
l.weight_updates = (float*)calloc(inputs * outputs, sizeof(float));
l.bias_updates = (float*)calloc(outputs, sizeof(float));
l.weights = (float*)calloc(outputs * inputs, sizeof(float));
l.biases = (float*)calloc(outputs, sizeof(float));
l.forward = forward_connected_layer;
l.backward = backward_connected_layer;
l.update = update_connected_layer;
//float scale = 1./sqrt(inputs);
float scale = sqrt(2.f/inputs);
for(i = 0; i < outputs*inputs; ++i){
l.weights[i] = scale*rand_uniform(-1, 1);
}
for(i = 0; i < outputs; ++i){
l.biases[i] = 0;
}
if(batch_normalize){
l.scales = (float*)calloc(outputs, sizeof(float));
l.scale_updates = (float*)calloc(outputs, sizeof(float));
for(i = 0; i < outputs; ++i){
l.scales[i] = 1;
}
l.mean = (float*)calloc(outputs, sizeof(float));
l.mean_delta = (float*)calloc(outputs, sizeof(float));
l.variance = (float*)calloc(outputs, sizeof(float));
l.variance_delta = (float*)calloc(outputs, sizeof(float));
l.rolling_mean = (float*)calloc(outputs, sizeof(float));
l.rolling_variance = (float*)calloc(outputs, sizeof(float));
l.x = (float*)calloc(total_batch * outputs, sizeof(float));
l.x_norm = (float*)calloc(total_batch * outputs, sizeof(float));
}
#ifdef GPU
l.forward_gpu = forward_connected_layer_gpu;
l.backward_gpu = backward_connected_layer_gpu;
l.update_gpu = update_connected_layer_gpu;
l.weights_gpu = cuda_make_array(l.weights, outputs*inputs);
l.biases_gpu = cuda_make_array(l.biases, outputs);
l.weight_updates_gpu = cuda_make_array(l.weight_updates, outputs*inputs);
l.bias_updates_gpu = cuda_make_array(l.bias_updates, outputs);
l.output_gpu = cuda_make_array(l.output, outputs*total_batch);
l.delta_gpu = cuda_make_array(l.delta, outputs*total_batch);
if (batch_normalize) {
l.scales_gpu = cuda_make_array(l.scales, outputs);
l.scale_updates_gpu = cuda_make_array(l.scale_updates, outputs);
l.mean_gpu = cuda_make_array(l.mean, outputs);
l.variance_gpu = cuda_make_array(l.variance, outputs);
l.rolling_mean_gpu = cuda_make_array(l.mean, outputs);
l.rolling_variance_gpu = cuda_make_array(l.variance, outputs);
l.mean_delta_gpu = cuda_make_array(l.mean, outputs);
l.variance_delta_gpu = cuda_make_array(l.variance, outputs);
l.x_gpu = cuda_make_array(l.output, total_batch*outputs);
l.x_norm_gpu = cuda_make_array(l.output, total_batch*outputs);
}
#ifdef CUDNN
create_convolutional_cudnn_tensors(&l);
cudnn_convolutional_setup(&l, cudnn_fastest); // cudnn_fastest, cudnn_smallest
l.workspace_size = get_connected_workspace_size(l);
#endif // CUDNN
#endif // GPU
fprintf(stderr, "connected %4d -> %4d\n", inputs, outputs);
return l;
}
void update_connected_layer(connected_layer l, int batch, float learning_rate, float momentum, float decay)
{
axpy_cpu(l.outputs, learning_rate/batch, l.bias_updates, 1, l.biases, 1);
scal_cpu(l.outputs, momentum, l.bias_updates, 1);
if(l.batch_normalize){
axpy_cpu(l.outputs, learning_rate/batch, l.scale_updates, 1, l.scales, 1);
scal_cpu(l.outputs, momentum, l.scale_updates, 1);
}
axpy_cpu(l.inputs*l.outputs, -decay*batch, l.weights, 1, l.weight_updates, 1);
axpy_cpu(l.inputs*l.outputs, learning_rate/batch, l.weight_updates, 1, l.weights, 1);
scal_cpu(l.inputs*l.outputs, momentum, l.weight_updates, 1);
}
void forward_connected_layer(connected_layer l, network_state state)
{
int i;
fill_cpu(l.outputs*l.batch, 0, l.output, 1);
int m = l.batch;
int k = l.inputs;
int n = l.outputs;
float *a = state.input;
float *b = l.weights;
float *c = l.output;
gemm(0,1,m,n,k,1,a,k,b,k,1,c,n);
if(l.batch_normalize){
if(state.train){
mean_cpu(l.output, l.batch, l.outputs, 1, l.mean);
variance_cpu(l.output, l.mean, l.batch, l.outputs, 1, l.variance);
scal_cpu(l.outputs, .95f, l.rolling_mean, 1);
axpy_cpu(l.outputs, .05f, l.mean, 1, l.rolling_mean, 1);
scal_cpu(l.outputs, .95f, l.rolling_variance, 1);
axpy_cpu(l.outputs, .05f, l.variance, 1, l.rolling_variance, 1);
copy_cpu(l.outputs*l.batch, l.output, 1, l.x, 1);
normalize_cpu(l.output, l.mean, l.variance, l.batch, l.outputs, 1);
copy_cpu(l.outputs*l.batch, l.output, 1, l.x_norm, 1);
} else {
normalize_cpu(l.output, l.rolling_mean, l.rolling_variance, l.batch, l.outputs, 1);
}
scale_bias(l.output, l.scales, l.batch, l.outputs, 1);
}
#pragma omp parallel for
for(i = 0; i < l.batch; ++i){
axpy_cpu(l.outputs, 1, l.biases, 1, l.output + i*l.outputs, 1);
}
activate_array(l.output, l.outputs*l.batch, l.activation);
}
void backward_connected_layer(connected_layer l, network_state state)
{
int i;
gradient_array(l.output, l.outputs*l.batch, l.activation, l.delta);
for(i = 0; i < l.batch; ++i){
axpy_cpu(l.outputs, 1, l.delta + i*l.outputs, 1, l.bias_updates, 1);
}
if(l.batch_normalize){
backward_scale_cpu(l.x_norm, l.delta, l.batch, l.outputs, 1, l.scale_updates);
scale_bias(l.delta, l.scales, l.batch, l.outputs, 1);
mean_delta_cpu(l.delta, l.variance, l.batch, l.outputs, 1, l.mean_delta);
variance_delta_cpu(l.x, l.delta, l.mean, l.variance, l.batch, l.outputs, 1, l.variance_delta);
normalize_delta_cpu(l.x, l.mean, l.variance, l.mean_delta, l.variance_delta, l.batch, l.outputs, 1, l.delta);
}
int m = l.outputs;
int k = l.batch;
int n = l.inputs;
float *a = l.delta;
float *b = state.input;
float *c = l.weight_updates;
gemm(1,0,m,n,k,1,a,m,b,n,1,c,n);
m = l.batch;
k = l.outputs;
n = l.inputs;
a = l.delta;
b = l.weights;
c = state.delta;
if(c) gemm(0,0,m,n,k,1,a,k,b,n,1,c,n);
}
void denormalize_connected_layer(layer l)
{
int i, j;
for(i = 0; i < l.outputs; ++i){
float scale = l.scales[i]/sqrt(l.rolling_variance[i] + .000001f);
for(j = 0; j < l.inputs; ++j){
l.weights[i*l.inputs + j] *= scale;
}
l.biases[i] -= l.rolling_mean[i] * scale;
l.scales[i] = 1;
l.rolling_mean[i] = 0;
l.rolling_variance[i] = 1;
}
}
void statistics_connected_layer(layer l)
{
if(l.batch_normalize){
printf("Scales ");
print_statistics(l.scales, l.outputs);
/*
printf("Rolling Mean ");
print_statistics(l.rolling_mean, l.outputs);
printf("Rolling Variance ");
print_statistics(l.rolling_variance, l.outputs);
*/
}
printf("Biases ");
print_statistics(l.biases, l.outputs);
printf("Weights ");
print_statistics(l.weights, l.outputs);
}
#ifdef GPU
void pull_connected_layer(connected_layer l)
{
cuda_pull_array(l.weights_gpu, l.weights, l.inputs*l.outputs);
cuda_pull_array(l.biases_gpu, l.biases, l.outputs);
cuda_pull_array(l.weight_updates_gpu, l.weight_updates, l.inputs*l.outputs);
cuda_pull_array(l.bias_updates_gpu, l.bias_updates, l.outputs);
if (l.batch_normalize){
cuda_pull_array(l.scales_gpu, l.scales, l.outputs);
cuda_pull_array(l.rolling_mean_gpu, l.rolling_mean, l.outputs);
cuda_pull_array(l.rolling_variance_gpu, l.rolling_variance, l.outputs);
}
CHECK_CUDA(cudaPeekAtLastError());
}
void push_connected_layer(connected_layer l)
{
cuda_push_array(l.weights_gpu, l.weights, l.inputs*l.outputs);
cuda_push_array(l.biases_gpu, l.biases, l.outputs);
cuda_push_array(l.weight_updates_gpu, l.weight_updates, l.inputs*l.outputs);
cuda_push_array(l.bias_updates_gpu, l.bias_updates, l.outputs);
if (l.batch_normalize){
cuda_push_array(l.scales_gpu, l.scales, l.outputs);
cuda_push_array(l.rolling_mean_gpu, l.rolling_mean, l.outputs);
cuda_push_array(l.rolling_variance_gpu, l.rolling_variance, l.outputs);
}
CHECK_CUDA(cudaPeekAtLastError());
}
void update_connected_layer_gpu(connected_layer l, int batch, float learning_rate, float momentum, float decay)
{
axpy_ongpu(l.outputs, learning_rate/batch, l.bias_updates_gpu, 1, l.biases_gpu, 1);
scal_ongpu(l.outputs, momentum, l.bias_updates_gpu, 1);
if(l.batch_normalize){
axpy_ongpu(l.outputs, learning_rate/batch, l.scale_updates_gpu, 1, l.scales_gpu, 1);
scal_ongpu(l.outputs, momentum, l.scale_updates_gpu, 1);
}
axpy_ongpu(l.inputs*l.outputs, -decay*batch, l.weights_gpu, 1, l.weight_updates_gpu, 1);
axpy_ongpu(l.inputs*l.outputs, learning_rate/batch, l.weight_updates_gpu, 1, l.weights_gpu, 1);
scal_ongpu(l.inputs*l.outputs, momentum, l.weight_updates_gpu, 1);
}
void forward_connected_layer_gpu(connected_layer l, network_state state)
{
fill_ongpu(l.outputs*l.batch, 0, l.output_gpu, 1);
int m = l.batch;
int k = l.inputs;
int n = l.outputs;
float * a = state.input;
float * b = l.weights_gpu;
float * c = l.output_gpu;
#ifdef CUDNN
float one = 1; // alpha[0], beta[0]
float alpha = 1, beta = 0;
CHECK_CUDNN(cudnnConvolutionForward(cudnn_handle(),
&alpha, //&one,
l.srcTensorDesc,
state.input,
l.weightDesc,
l.weights_gpu,
l.convDesc,
l.fw_algo,
state.workspace,
l.workspace_size,
&beta, //&one,
l.dstTensorDesc,
l.output_gpu));
#else // CUDNN
gemm_ongpu(0,1,m,n,k,1,a,k,b,k,1,c,n);
#endif // CUDNN
if (l.batch_normalize) {
forward_batchnorm_layer_gpu(l, state);
}
else {
add_bias_gpu(l.output_gpu, l.biases_gpu, l.batch, l.outputs, 1);
}
//for(i = 0; i < l.batch; ++i) axpy_ongpu(l.outputs, 1, l.biases_gpu, 1, l.output_gpu + i*l.outputs, 1);
activate_array_ongpu(l.output_gpu, l.outputs*l.batch, l.activation);
}
void backward_connected_layer_gpu(connected_layer l, network_state state)
{
int i;
constrain_ongpu(l.outputs*l.batch, 1, l.delta_gpu, 1);
gradient_array_ongpu(l.output_gpu, l.outputs*l.batch, l.activation, l.delta_gpu);
for(i = 0; i < l.batch; ++i){
axpy_ongpu(l.outputs, 1, l.delta_gpu + i*l.outputs, 1, l.bias_updates_gpu, 1);
}
if(l.batch_normalize){
backward_batchnorm_layer_gpu(l, state);
}
#ifdef CUDNN_DISABLED
float one = 1;
// calculate conv weight updates
// if used: beta=1 then loss decreases faster
CHECK_CUDNN(cudnnConvolutionBackwardFilter(cudnn_handle(),
&one,
l.srcTensorDesc,
state.input,
l.ddstTensorDesc,
l.delta_gpu,
l.convDesc,
l.bf_algo,
state.workspace,
l.workspace_size,
&one,
l.dweightDesc,
l.weight_updates_gpu));
if (state.delta) {
// http://docs.nvidia.com/deeplearning/sdk/cudnn-developer-guide/index.html#cudnnConvolutionBackwardData
// calculate delta for the next layer
CHECK_CUDNN(cudnnConvolutionBackwardData(cudnn_handle(),
&one,
l.weightDesc,
l.weights_gpu,
l.ddstTensorDesc,
l.delta_gpu,
l.convDesc,
l.bd_algo,
state.workspace,
l.workspace_size,
&one,
l.dsrcTensorDesc,
state.delta));
}
#else // CUDNN
int m = l.outputs;
int k = l.batch;
int n = l.inputs;
float * a = l.delta_gpu;
float * b = state.input;
float * c = l.weight_updates_gpu;
gemm_ongpu(1,0,m,n,k,1,a,m,b,n,1,c,n);
m = l.batch;
k = l.outputs;
n = l.inputs;
a = l.delta_gpu;
b = l.weights_gpu;
c = state.delta;
if(c) gemm_ongpu(0,0,m,n,k,1,a,k,b,n,1,c,n);
#endif // CUDNN
}
#endif
|
distribution.h | #ifndef DISTRIBUTION_H
#define DISTRIBUTION_H
#include <cmath>
#include <omp.h>
const float delta_max = 1.0f;
#pragma omp declare simd
float dist_func(const float alpha, const float rn);
#endif |
Forces_PS.h | /*
* force_ps.h
*
* Created on: Sep 20, 2016
* Author: isivkov
*/
#ifndef SRC_FORCES_PS_H_
#define SRC_FORCES_PS_H_
#include "../simulation_context.h"
#include "../periodic_function.h"
#include "../augmentation_operator.h"
#include "../Beta_projectors/beta_projectors.h"
#include "../Beta_projectors/beta_projectors_gradient.h"
#include "../potential.h"
#include "../density.h"
namespace sirius
{
class Forces_PS
{
private:
Simulation_context &ctx_;
Density &density_;
Potential &potential_;
K_point_set& kset_;
mdarray<double,2> local_forces_;
mdarray<double,2> ultrasoft_forces_;
mdarray<double,2> nonlocal_forces_;
mdarray<double,2> nlcc_forces_;
mdarray<double,2> ewald_forces_;
template<typename T>
void add_k_point_contribution_to_nonlocal2(K_point& kpoint, mdarray<double,2>& forces)
{
Unit_cell &unit_cell = ctx_.unit_cell();
Beta_projectors &bp = kpoint.beta_projectors();
Beta_projectors_gradient bp_grad(&bp);
// from formula
double main_two_factor = -2.0;
#ifdef __GPU
for (int ispn = 0; ispn < ctx_.num_spins(); ispn++)
{
if( bp.proc_unit() == GPU )
{
int nbnd = kpoint.num_occupied_bands(ispn);
kpoint.spinor_wave_functions(ispn).allocate_on_device();
kpoint.spinor_wave_functions(ispn).copy_to_device(0, nbnd);
}
}
#endif
bp_grad.prepare();
bp.prepare();
for (int icnk = 0; icnk < bp.num_beta_chunks(); icnk++)
{
// generate chunk for inner product of beta gradient
bp_grad.generate(icnk);
// generate chunk for inner product of beta
bp.generate(icnk);
for (int ispn = 0; ispn < ctx_.num_spins(); ispn++)
{
/* total number of occupied bands for this spin */
int nbnd = kpoint.num_occupied_bands(ispn);
// inner product of beta gradient and WF
bp_grad.inner<T>(icnk, kpoint.spinor_wave_functions(ispn), 0, nbnd);
// get inner product
std::array<matrix<T>, 3> bp_grad_phi_chunk = bp_grad.beta_phi<T>(icnk, nbnd);
// inner product of beta and WF
bp.inner<T>(icnk, kpoint.spinor_wave_functions(ispn), 0, nbnd);
// get inner product
matrix<T> bp_phi_chunk = bp.beta_phi<T>(icnk, nbnd);
splindex<block> spl_nbnd(nbnd, kpoint.comm().size(), kpoint.comm().rank());
int nbnd_loc = spl_nbnd.local_size();
int bnd_offset = spl_nbnd.global_offset();
#pragma omp parallel for
for(int ia_chunk = 0; ia_chunk < bp.beta_chunk(icnk).num_atoms_; ia_chunk++)
{
int ia = bp.beta_chunk(icnk).desc_(3, ia_chunk);
int offs = bp.beta_chunk(icnk).desc_(1, ia_chunk);
int nbf = bp.beta_chunk(icnk).desc_(0, ia_chunk);
int iat = unit_cell.atom(ia).type_id();
// linalg<CPU>::gemm(0, 0, nbf, n__, nbf,
// op_.at<CPU>(packed_mtrx_offset_(ia), ispn__), nbf,
// beta_phi.at<CPU>(offs, 0), nbeta,
// work_.at<CPU>(offs), nbeta);
// mpi
// TODO make in smart way with matrix multiplication
for (int ibnd_loc = 0; ibnd_loc < nbnd_loc; ibnd_loc++)
{
int ibnd = spl_nbnd[ibnd_loc];
auto D_aug_mtrx = [&](int i, int j)
{
if (unit_cell.atom(ia).type().pp_desc().augment) {
return unit_cell.atom(ia).d_mtrx(i, j, ispn) - kpoint.band_energy(ibnd) *
ctx_.augmentation_op(iat).q_mtrx(i, j);
} else {
return unit_cell.atom(ia).d_mtrx(i, j, ispn) - kpoint.band_energy(ibnd);
}
};
for(int ibf = 0; ibf < unit_cell.atom(ia).type().mt_lo_basis_size(); ibf++ )
{
for(int jbf = 0; jbf < unit_cell.atom(ia).type().mt_lo_basis_size(); jbf++ )
{
// calc scalar part of the forces
double_complex scalar_part = main_two_factor *
kpoint.band_occupancy(ibnd + ispn * ctx_.num_fv_states()) * kpoint.weight() *
D_aug_mtrx(ibf, jbf) *
std::conj(bp_phi_chunk(offs + jbf, ibnd));
// multiply scalar part by gradient components
for(int comp: {0,1,2}) forces(comp,ia) += (scalar_part * bp_grad_phi_chunk[comp](offs + ibf, ibnd)).real();
}
}
}
}
}
}
bp.dismiss();
bp_grad.dismiss();
#ifdef __GPU
for (int ispn = 0; ispn < ctx_.num_spins(); ispn++)
{
if( bp.proc_unit() == GPU )
{
kpoint.spinor_wave_functions(ispn).deallocate_on_device();
}
}
#endif
}
//---------------------------------------------------------------
//---------------------------------------------------------------
template<typename T>
void add_k_point_contribution_to_nonlocal(K_point& kpoint, mdarray<double,2>& forces)
{
Unit_cell &unit_cell = ctx_.unit_cell();
Beta_projectors &bp = kpoint.beta_projectors();
Beta_projectors_gradient bp_grad(&bp);
// from formula
double main_two_factor = -2.0;
for (int icnk = 0; icnk < bp.num_beta_chunks(); icnk++)
{
// generate chunk for inner product of beta gradient
bp_grad.generate(icnk);
// generate chunk for inner product of beta
bp.generate(icnk);
for (int ispn = 0; ispn < ctx_.num_spins(); ispn++)
{
/* total number of occupied bands for this spin */
int nbnd = kpoint.num_occupied_bands(ispn);
splindex<block> spl_nbnd(nbnd, kpoint.comm().size(), kpoint.comm().rank());
int nbnd_loc = spl_nbnd.local_size();
int bnd_offset = spl_nbnd.global_offset();
printf("rank: %d nbnd: %d nbnd_loc: %d bnd_offset: %d wf_size: %d %d beta_gk_size: %d %d\n",
ctx_.comm().rank(),
nbnd,
nbnd_loc,
bnd_offset,
kpoint.spinor_wave_functions(ispn).pw_coeffs().prime().size(0),
kpoint.spinor_wave_functions(ispn).pw_coeffs().prime().size(1),
bp.beta_gk().size(0),
bp.beta_gk().size(1));
printf("kp vec: %f %f %f \n", kpoint.vk()[0],kpoint.vk()[1], kpoint.vk()[2]);
printf("nl1\n");
// inner product of beta and WF
bp.inner<T>(icnk, kpoint.spinor_wave_functions(ispn), bnd_offset, nbnd_loc);
printf("nl2\n");
// get inner product
matrix<T> bp_phi_chunk = bp.beta_phi<T>(icnk, nbnd_loc);
printf("nl3\n");
// inner product of beta gradient and WF
bp_grad.inner<T>(icnk, kpoint.spinor_wave_functions(ispn), bnd_offset, nbnd_loc);
printf("nl0\n");
// get inner product
std::array<matrix<T>, 3> bp_grad_phi_chunk = bp_grad.beta_phi<T>(icnk, nbnd_loc);
#pragma omp parallel for
for(int ia_chunk = 0; ia_chunk < bp.beta_chunk(icnk).num_atoms_; ia_chunk++)
{
int ia = bp.beta_chunk(icnk).desc_(3, ia_chunk);
int offs = bp.beta_chunk(icnk).desc_(1, ia_chunk);
int iat = unit_cell.atom(ia).type_id();
// mpi
// TODO make in smart way with matrix multiplication
for (int ibnd_loc = 0; ibnd_loc < nbnd_loc; ibnd_loc++)
{
int ibnd = spl_nbnd[ibnd_loc];
auto D_aug_mtrx = [&](int i, int j)
{
if (unit_cell.atom(ia).type().pp_desc().augment) {
return unit_cell.atom(ia).d_mtrx(i, j, ispn) - kpoint.band_energy(ibnd) *
ctx_.augmentation_op(iat).q_mtrx(i, j);
} else {
return unit_cell.atom(ia).d_mtrx(i, j, ispn) - kpoint.band_energy(ibnd);
}
};
for(int ibf = 0; ibf < unit_cell.atom(ia).type().mt_lo_basis_size(); ibf++ )
{
for(int jbf = 0; jbf < unit_cell.atom(ia).type().mt_lo_basis_size(); jbf++ )
{
// calc scalar part of the forces
double_complex scalar_part = main_two_factor *
kpoint.band_occupancy(ibnd + ispn * ctx_.num_fv_states()) * kpoint.weight() *
D_aug_mtrx(ibf, jbf) *
std::conj(bp_phi_chunk(offs + jbf, ibnd_loc));
// multiply scalar part by gradient components
for(int comp: {0,1,2}) forces(comp,ia) += (scalar_part * bp_grad_phi_chunk[comp](offs + ibf, ibnd_loc)).real();
}
}
}
}
}
}
}
void symmetrize_forces(mdarray<double,2>& unsym_forces, mdarray<double,2>& sym_forces );
public:
Forces_PS(Simulation_context &ctx__,
Density& density__,
Potential& potential__,
K_point_set& kset__)
: ctx_(ctx__)
, density_(density__)
, potential_(potential__)
, kset_(kset__)
{
local_forces_ = mdarray<double,2>(3, ctx_.unit_cell().num_atoms());
ultrasoft_forces_ = mdarray<double,2>(3, ctx_.unit_cell().num_atoms());
nonlocal_forces_ = mdarray<double,2>(3, ctx_.unit_cell().num_atoms());
nlcc_forces_ = mdarray<double,2>(3, ctx_.unit_cell().num_atoms());
ewald_forces_ = mdarray<double,2>(3, ctx_.unit_cell().num_atoms());
}
void calc_local_forces(mdarray<double,2>& forces);
void calc_ultrasoft_forces(mdarray<double,2>& forces);
void calc_nonlocal_forces(mdarray<double,2>& forces);
void calc_nlcc_forces(mdarray<double,2>& forces);
void calc_ewald_forces(mdarray<double,2>& forces);
void calc_forces_contributions();
mdarray<double,2> const& local_forces()
{
return local_forces_;
}
mdarray<double,2> const& ultrasoft_forces()
{
return ultrasoft_forces_;
}
mdarray<double,2> const& nonlocal_forces()
{
return nonlocal_forces_;
}
mdarray<double,2> const& nlcc_forces()
{
return nlcc_forces_;
}
mdarray<double,2> const& ewald_forces()
{
return ewald_forces_;
}
mdarray<double,2> sum_forces();
void sum_forces(mdarray<double,2>& inout_total_forces);
};
}
#endif /* SRC_FORCES_PS_H_ */
|
effects.c | #define _POSIX_C_SOURCE 200809
#include <omp.h>
#include <stdlib.h>
#include <stdbool.h>
#include <dlfcn.h>
#include <string.h>
#include <errno.h>
#include <sys/wait.h>
#include <unistd.h>
#include <spawn.h>
#include "effects.h"
#include "log.h"
extern char **environ;
static void blur_h(uint32_t *dest, uint32_t *src, int width, int height,
int radius) {
double coeff = 1.0 / (radius * 2 + 1);
#pragma omp parallel for
for (int i = 0; i < height; ++i) {
int iwidth = i * width;
double r_acc = 0.0;
double g_acc = 0.0;
double b_acc = 0.0;
for (int j = -radius; j < width; ++j) {
if (j - radius - 1 >= 0) {
int index = iwidth + j - radius - 1;
r_acc -= coeff * ((src[index] & 0xff0000) >> 16);
g_acc -= coeff * ((src[index] & 0x00ff00) >> 8);
b_acc -= coeff * ((src[index] & 0x0000ff));
}
if (j + radius < width) {
int index = iwidth + j + radius;
r_acc += coeff * ((src[index] & 0xff0000) >> 16);
g_acc += coeff * ((src[index] & 0x00ff00) >> 8);
b_acc += coeff * ((src[index] & 0x0000ff));
}
if (j < 0)
continue;
int index = iwidth + j;
dest[index] = 0 |
(((uint32_t)(r_acc + 0.5) & 0xff) << 16) |
(((uint32_t)(g_acc + 0.5) & 0xff) << 8) |
(((uint32_t)(b_acc + 0.5) & 0xff));
}
}
}
static void blur_v(uint32_t *dest, uint32_t *src, int width, int height,
int radius) {
double coeff = 1.0 / (radius * 2 + 1);
#pragma omp parallel for
for (int j = 0; j < width; ++j) {
double r_acc = 0.0;
double g_acc = 0.0;
double b_acc = 0.0;
for (int i = -radius; i < height; ++i) {
if (i - radius - 1 >= 0) {
int index = (i - radius - 1) * width + j;
r_acc -= coeff * ((src[index] & 0xff0000) >> 16);
g_acc -= coeff * ((src[index] & 0x00ff00) >> 8);
b_acc -= coeff * ((src[index] & 0x0000ff));
}
if (i + radius < height) {
int index = (i + radius) * width + j;
r_acc += coeff * ((src[index] & 0xff0000) >> 16);
g_acc += coeff * ((src[index] & 0x00ff00) >> 8);
b_acc += coeff * ((src[index] & 0x0000ff));
}
if (i < 0)
continue;
int index = i * width + j;
dest[index] = 0 |
(((uint32_t)(r_acc + 0.5) & 0xff) << 16) |
(((uint32_t)(g_acc + 0.5) & 0xff) << 8) |
(((uint32_t)(b_acc + 0.5) & 0xff));
}
}
}
static void blur_once(uint32_t *dest, uint32_t *src, uint32_t *scratch,
int width, int height, int radius) {
blur_h(scratch, src, width, height, radius);
blur_v(dest, scratch, width, height, radius);
}
// This effect_blur function, and the associated blur_* functions,
// are my own adaptations of code in yvbbrjdr's i3lock-fancy-rapid:
// https://github.com/yvbbrjdr/i3lock-fancy-rapid
static void effect_blur(uint32_t *dest, uint32_t *src, int width, int height,
int radius, int times) {
uint32_t *origdest = dest;
uint32_t *scratch = malloc(width * height * sizeof(*scratch));
blur_once(dest, src, scratch, width, height, radius);
for (int i = 0; i < times - 1; ++i) {
uint32_t *tmp = src;
src = dest;
dest = tmp;
blur_once(dest, src, scratch, width, height, radius);
}
free(scratch);
// We're flipping between using dest and src;
// if the last buffer we used was src, copy that over to dest.
if (dest != origdest)
memcpy(origdest, dest, width * height * sizeof(*dest));
}
static void effect_scale(uint32_t *dest, uint32_t *src, int swidth, int sheight,
double scale) {
int dwidth = swidth * scale;
int dheight = sheight * scale;
double fact = 1.0 / scale;
#pragma omp parallel for
for (int dy = 0; dy < dheight; ++dy) {
int sy = dy * fact;
if (sy >= sheight) continue;
for (int dx = 0; dx < dwidth; ++dx) {
int sx = dx * fact;
if (sx >= swidth) continue;
dest[dy * dwidth + dx] = src[sy * swidth + sx];
}
}
}
static void effect_greyscale(uint32_t *data, int width, int height) {
#pragma omp parallel for
for (int y = 0; y < height; ++y) {
for (int x = 0; x < width; ++x) {
int index = y * width + x;
int r = (data[index] & 0xff0000) >> 16;
int g = (data[index] & 0x00ff00) >> 8;
int b = (data[index] & 0x0000ff);
int luma = 0.2989 * r + 0.5870 * g + 0.1140 * b;
if (luma < 0) luma = 0;
if (luma > 255) luma = 255;
luma &= 0xFF;
data[index] = luma << 16 | luma << 8 | luma;
}
}
}
static void effect_vignette(uint32_t *data, int width, int height,
double base, double factor) {
base = fmin(1, fmax(0, base));
factor = fmin(1 - base, fmax(0, factor));
#pragma omp parallel for
for (int y = 0; y < height; ++y) {
for (int x = 0; x < width; ++x) {
double xf = (x * 1.0) / width;
double yf = (y * 1.0) / height;
double vignette_factor = base + factor
* 16 * xf * yf * (1.0 - xf) * (1.0 - yf);
int index = y * width + x;
int r = (data[index] & 0xff0000) >> 16;
int g = (data[index] & 0x00ff00) >> 8;
int b = (data[index] & 0x0000ff);
r = (int)(r * vignette_factor) & 0xFF;
g = (int)(g * vignette_factor) & 0xFF;
b = (int)(b * vignette_factor) & 0xFF;
data[index] = r << 16 | g << 8 | b;
}
}
}
static void effect_custom(uint32_t *data, int width, int height,
char *path) {
void *dl = dlopen(path, RTLD_LAZY);
if (dl == NULL) {
swaylock_log(LOG_ERROR, "Custom effect: %s", dlerror());
return;
}
void (*effect_func)(uint32_t *data, int width, int height) =
dlsym(dl, "swaylock_effect");
if (effect_func != NULL) {
effect_func(data, width, height);
dlclose(dl);
return;
}
uint32_t (*pixel_func)(uint32_t pix, int x, int y, int width, int height) =
dlsym(dl, "swaylock_pixel");
if (pixel_func != NULL) {
#pragma omp parallel for
for (int y = 0; y < height; ++y) {
for (int x = 0; x < width; ++x) {
data[y * width + x] =
pixel_func(data[y * width + x], x, y, width, height);
}
}
dlclose(dl);
return;
}
swaylock_log(LOG_ERROR, "Custom effect: %s", dlerror());
}
cairo_surface_t *swaylock_effects_run(cairo_surface_t *surface,
struct swaylock_effect *effects, int count) {
for (int i = 0; i < count; ++i) {
struct swaylock_effect *effect = &effects[i];
switch (effect->tag) {
case EFFECT_BLUR: {
cairo_surface_t *surf = cairo_image_surface_create(
CAIRO_FORMAT_RGB24,
cairo_image_surface_get_width(surface),
cairo_image_surface_get_height(surface));
if (cairo_surface_status(surf) != CAIRO_STATUS_SUCCESS) {
swaylock_log(LOG_ERROR, "Failed to create surface for blur effect");
cairo_surface_destroy(surf);
break;
}
effect_blur(
(uint32_t *)cairo_image_surface_get_data(surf),
(uint32_t *)cairo_image_surface_get_data(surface),
cairo_image_surface_get_width(surface),
cairo_image_surface_get_height(surface),
effect->e.blur.radius, effect->e.blur.times);
cairo_surface_flush(surf);
cairo_surface_destroy(surface);
surface = surf;
break;
}
case EFFECT_SCALE: {
cairo_surface_t *surf = cairo_image_surface_create(
CAIRO_FORMAT_RGB24,
cairo_image_surface_get_width(surface) * effect->e.scale,
cairo_image_surface_get_height(surface) * effect->e.scale);
if (cairo_surface_status(surf) != CAIRO_STATUS_SUCCESS) {
swaylock_log(LOG_ERROR, "Failed to create surface for scale effect");
cairo_surface_destroy(surf);
break;
}
effect_scale(
(uint32_t *)cairo_image_surface_get_data(surf),
(uint32_t *)cairo_image_surface_get_data(surface),
cairo_image_surface_get_width(surface),
cairo_image_surface_get_height(surface),
effect->e.scale);
cairo_surface_flush(surf);
cairo_surface_destroy(surface);
surface = surf;
break;
}
case EFFECT_GREYSCALE: {
effect_greyscale(
(uint32_t *)cairo_image_surface_get_data(surface),
cairo_image_surface_get_width(surface),
cairo_image_surface_get_height(surface));
cairo_surface_flush(surface);
break;
}
case EFFECT_VIGNETTE: {
effect_vignette(
(uint32_t *)cairo_image_surface_get_data(surface),
cairo_image_surface_get_width(surface),
cairo_image_surface_get_height(surface),
effect->e.vignette.base,
effect->e.vignette.factor);
cairo_surface_flush(surface);
break;
}
case EFFECT_CUSTOM: {
effect_custom(
(uint32_t *)cairo_image_surface_get_data(surface),
cairo_image_surface_get_width(surface),
cairo_image_surface_get_height(surface),
effect->e.custom);
cairo_surface_flush(surface);
break;
} }
}
return surface;
}
|
jacobi-block-task.ref.c | #include <sys/time.h>
#include <time.h>
#include <stdio.h>
static unsigned long long current_time_ns() {
#ifdef __MACH__
clock_serv_t cclock;
mach_timespec_t mts;
host_get_clock_service(mach_host_self(), CALENDAR_CLOCK, &cclock);
clock_get_time(cclock, &mts);
mach_port_deallocate(mach_task_self(), cclock);
unsigned long long s = 1000000000ULL * (unsigned long long)mts.tv_sec;
return (unsigned long long)mts.tv_nsec + s;
#else
struct timespec t ={0,0};
clock_gettime(CLOCK_MONOTONIC, &t);
unsigned long long s = 1000000000ULL * (unsigned long long)t.tv_sec;
return (((unsigned long long)t.tv_nsec)) + s;
#endif
}
# include "poisson.h"
/* #pragma omp task/taskwait version of SWEEP. */
void sweep (int nx, int ny, double dx, double dy, double *f_,
int itold, int itnew, double *u_, double *unew_, int block_size)
{
int it;
int block_x, block_y;
if (block_size == 0)
block_size = nx;
int max_blocks_x = (nx / block_size);
int max_blocks_y = (ny / block_size);
#pragma omp parallel shared(u_, unew_, f_, max_blocks_x, max_blocks_y, nx, ny, dx, dy, itold, itnew, block_size) private(it, block_x, block_y)
{
#pragma omp single
{
for (it = itold + 1; it <= itnew; it++) {
// Save the current estimate.
for (block_x = 0; block_x < max_blocks_x; block_x++) {
for (block_y = 0; block_y < max_blocks_y; block_y++) {
#pragma omp task shared(u_, unew_, nx, ny, block_size) firstprivate(block_x, block_y)
copy_block(nx, ny, block_x, block_y, u_, unew_, block_size);
}
}
#pragma omp taskwait
;
// Compute a new estimate.
for (block_x = 0; block_x < max_blocks_x; block_x++) {
for (block_y = 0; block_y < max_blocks_y; block_y++) {
#pragma omp task default(none) shared(u_, unew_, f_, dx, dy, nx, ny, block_size) firstprivate(block_x, block_y)
compute_estimate(block_x, block_y, u_, unew_, f_, dx, dy,
nx, ny, block_size);
}
}
#pragma omp taskwait
;
}
}
}
}
|
draw.c | /*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% DDDD RRRR AAA W W %
% D D R R A A W W %
% D D RRRR AAAAA W W W %
% D D R RN A A WW WW %
% DDDD R R A A W W %
% %
% %
% MagickCore Image Drawing Methods %
% %
% %
% Software Design %
% Cristy %
% July 1998 %
% %
% %
% Copyright 1999-2020 ImageMagick Studio LLC, a non-profit organization %
% dedicated to making software imaging solutions freely available. %
% %
% You may not use this file except in compliance with the License. You may %
% obtain a copy of the License at %
% %
% https://imagemagick.org/script/license.php %
% %
% Unless required by applicable law or agreed to in writing, software %
% distributed under the License is distributed on an "AS IS" BASIS, %
% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. %
% See the License for the specific language governing permissions and %
% limitations under the License. %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% Bill Radcliffe of Corbis (www.corbis.com) contributed the polygon
% rendering code based on Paul Heckbert's "Concave Polygon Scan Conversion",
% Graphics Gems, 1990. Leonard Rosenthal and David Harr of Appligent
% (www.appligent.com) contributed the dash pattern, linecap stroking
% algorithm, and minor rendering improvements.
%
*/
/*
Include declarations.
*/
#include "MagickCore/studio.h"
#include "MagickCore/annotate.h"
#include "MagickCore/artifact.h"
#include "MagickCore/blob.h"
#include "MagickCore/cache.h"
#include "MagickCore/cache-private.h"
#include "MagickCore/cache-view.h"
#include "MagickCore/channel.h"
#include "MagickCore/color.h"
#include "MagickCore/colorspace-private.h"
#include "MagickCore/composite.h"
#include "MagickCore/composite-private.h"
#include "MagickCore/constitute.h"
#include "MagickCore/draw.h"
#include "MagickCore/draw-private.h"
#include "MagickCore/enhance.h"
#include "MagickCore/exception.h"
#include "MagickCore/exception-private.h"
#include "MagickCore/gem.h"
#include "MagickCore/geometry.h"
#include "MagickCore/image-private.h"
#include "MagickCore/list.h"
#include "MagickCore/log.h"
#include "MagickCore/memory-private.h"
#include "MagickCore/monitor.h"
#include "MagickCore/monitor-private.h"
#include "MagickCore/option.h"
#include "MagickCore/paint.h"
#include "MagickCore/pixel-accessor.h"
#include "MagickCore/pixel-private.h"
#include "MagickCore/property.h"
#include "MagickCore/resample.h"
#include "MagickCore/resample-private.h"
#include "MagickCore/resource_.h"
#include "MagickCore/splay-tree.h"
#include "MagickCore/string_.h"
#include "MagickCore/string-private.h"
#include "MagickCore/thread-private.h"
#include "MagickCore/token.h"
#include "MagickCore/transform-private.h"
#include "MagickCore/utility.h"
/*
Define declarations.
*/
#define BezierQuantum 200
#define PrimitiveExtentPad 2048
#define MaxBezierCoordinates 67108864
#define ThrowPointExpectedException(token,exception) \
{ \
(void) ThrowMagickException(exception,GetMagickModule(),DrawError, \
"NonconformingDrawingPrimitiveDefinition","`%s'",token); \
status=MagickFalse; \
break; \
}
/*
Typedef declarations.
*/
typedef struct _EdgeInfo
{
SegmentInfo
bounds;
double
scanline;
PointInfo
*points;
size_t
number_points;
ssize_t
direction;
MagickBooleanType
ghostline;
size_t
highwater;
} EdgeInfo;
typedef struct _ElementInfo
{
double
cx,
cy,
major,
minor,
angle;
} ElementInfo;
typedef struct _MVGInfo
{
PrimitiveInfo
**primitive_info;
size_t
*extent;
ssize_t
offset;
PointInfo
point;
ExceptionInfo
*exception;
} MVGInfo;
typedef struct _PolygonInfo
{
EdgeInfo
*edges;
size_t
number_edges;
} PolygonInfo;
typedef enum
{
MoveToCode,
OpenCode,
GhostlineCode,
LineToCode,
EndCode
} PathInfoCode;
typedef struct _PathInfo
{
PointInfo
point;
PathInfoCode
code;
} PathInfo;
/*
Forward declarations.
*/
static Image
*DrawClippingMask(Image *,const DrawInfo *,const char *,const char *,
ExceptionInfo *);
static MagickBooleanType
DrawStrokePolygon(Image *,const DrawInfo *,const PrimitiveInfo *,
ExceptionInfo *),
RenderMVGContent(Image *,const DrawInfo *,const size_t,ExceptionInfo *),
TraceArc(MVGInfo *,const PointInfo,const PointInfo,const PointInfo),
TraceArcPath(MVGInfo *,const PointInfo,const PointInfo,const PointInfo,
const double,const MagickBooleanType,const MagickBooleanType),
TraceBezier(MVGInfo *,const size_t),
TraceCircle(MVGInfo *,const PointInfo,const PointInfo),
TraceEllipse(MVGInfo *,const PointInfo,const PointInfo,const PointInfo),
TraceLine(PrimitiveInfo *,const PointInfo,const PointInfo),
TraceRectangle(PrimitiveInfo *,const PointInfo,const PointInfo),
TraceRoundRectangle(MVGInfo *,const PointInfo,const PointInfo,PointInfo),
TraceSquareLinecap(PrimitiveInfo *,const size_t,const double);
static PrimitiveInfo
*TraceStrokePolygon(const Image *,const DrawInfo *,const PrimitiveInfo *);
static ssize_t
TracePath(MVGInfo *,const char *,ExceptionInfo *);
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% A c q u i r e D r a w I n f o %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% AcquireDrawInfo() returns a DrawInfo structure properly initialized.
%
% The format of the AcquireDrawInfo method is:
%
% DrawInfo *AcquireDrawInfo(void)
%
*/
MagickExport DrawInfo *AcquireDrawInfo(void)
{
DrawInfo
*draw_info;
draw_info=(DrawInfo *) AcquireCriticalMemory(sizeof(*draw_info));
GetDrawInfo((ImageInfo *) NULL,draw_info);
return(draw_info);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% C l o n e D r a w I n f o %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% CloneDrawInfo() makes a copy of the given draw_info structure. If NULL
% is specified, a new DrawInfo structure is created initialized to default
% values.
%
% The format of the CloneDrawInfo method is:
%
% DrawInfo *CloneDrawInfo(const ImageInfo *image_info,
% const DrawInfo *draw_info)
%
% A description of each parameter follows:
%
% o image_info: the image info.
%
% o draw_info: the draw info.
%
*/
MagickExport DrawInfo *CloneDrawInfo(const ImageInfo *image_info,
const DrawInfo *draw_info)
{
DrawInfo
*clone_info;
ExceptionInfo
*exception;
clone_info=(DrawInfo *) AcquireCriticalMemory(sizeof(*clone_info));
GetDrawInfo(image_info,clone_info);
if (draw_info == (DrawInfo *) NULL)
return(clone_info);
exception=AcquireExceptionInfo();
if (draw_info->id != (char *) NULL)
(void) CloneString(&clone_info->id,draw_info->id);
if (draw_info->primitive != (char *) NULL)
(void) CloneString(&clone_info->primitive,draw_info->primitive);
if (draw_info->geometry != (char *) NULL)
(void) CloneString(&clone_info->geometry,draw_info->geometry);
clone_info->compliance=draw_info->compliance;
clone_info->viewbox=draw_info->viewbox;
clone_info->affine=draw_info->affine;
clone_info->gravity=draw_info->gravity;
clone_info->fill=draw_info->fill;
clone_info->stroke=draw_info->stroke;
clone_info->stroke_width=draw_info->stroke_width;
if (draw_info->fill_pattern != (Image *) NULL)
clone_info->fill_pattern=CloneImage(draw_info->fill_pattern,0,0,MagickTrue,
exception);
if (draw_info->stroke_pattern != (Image *) NULL)
clone_info->stroke_pattern=CloneImage(draw_info->stroke_pattern,0,0,
MagickTrue,exception);
clone_info->stroke_antialias=draw_info->stroke_antialias;
clone_info->text_antialias=draw_info->text_antialias;
clone_info->fill_rule=draw_info->fill_rule;
clone_info->linecap=draw_info->linecap;
clone_info->linejoin=draw_info->linejoin;
clone_info->miterlimit=draw_info->miterlimit;
clone_info->dash_offset=draw_info->dash_offset;
clone_info->decorate=draw_info->decorate;
clone_info->compose=draw_info->compose;
if (draw_info->text != (char *) NULL)
(void) CloneString(&clone_info->text,draw_info->text);
if (draw_info->font != (char *) NULL)
(void) CloneString(&clone_info->font,draw_info->font);
if (draw_info->metrics != (char *) NULL)
(void) CloneString(&clone_info->metrics,draw_info->metrics);
if (draw_info->family != (char *) NULL)
(void) CloneString(&clone_info->family,draw_info->family);
clone_info->style=draw_info->style;
clone_info->stretch=draw_info->stretch;
clone_info->weight=draw_info->weight;
if (draw_info->encoding != (char *) NULL)
(void) CloneString(&clone_info->encoding,draw_info->encoding);
clone_info->pointsize=draw_info->pointsize;
clone_info->kerning=draw_info->kerning;
clone_info->interline_spacing=draw_info->interline_spacing;
clone_info->interword_spacing=draw_info->interword_spacing;
clone_info->direction=draw_info->direction;
if (draw_info->density != (char *) NULL)
(void) CloneString(&clone_info->density,draw_info->density);
clone_info->align=draw_info->align;
clone_info->undercolor=draw_info->undercolor;
clone_info->border_color=draw_info->border_color;
if (draw_info->server_name != (char *) NULL)
(void) CloneString(&clone_info->server_name,draw_info->server_name);
if (draw_info->dash_pattern != (double *) NULL)
{
register ssize_t
x;
for (x=0; fabs(draw_info->dash_pattern[x]) >= MagickEpsilon; x++) ;
clone_info->dash_pattern=(double *) AcquireQuantumMemory((size_t) (2*x+2),
sizeof(*clone_info->dash_pattern));
if (clone_info->dash_pattern == (double *) NULL)
ThrowFatalException(ResourceLimitFatalError,
"UnableToAllocateDashPattern");
(void) memset(clone_info->dash_pattern,0,(size_t) (2*x+2)*
sizeof(*clone_info->dash_pattern));
(void) memcpy(clone_info->dash_pattern,draw_info->dash_pattern,(size_t)
(x+1)*sizeof(*clone_info->dash_pattern));
}
clone_info->gradient=draw_info->gradient;
if (draw_info->gradient.stops != (StopInfo *) NULL)
{
size_t
number_stops;
number_stops=clone_info->gradient.number_stops;
clone_info->gradient.stops=(StopInfo *) AcquireQuantumMemory((size_t)
number_stops,sizeof(*clone_info->gradient.stops));
if (clone_info->gradient.stops == (StopInfo *) NULL)
ThrowFatalException(ResourceLimitFatalError,
"UnableToAllocateDashPattern");
(void) memcpy(clone_info->gradient.stops,draw_info->gradient.stops,
(size_t) number_stops*sizeof(*clone_info->gradient.stops));
}
clone_info->bounds=draw_info->bounds;
clone_info->fill_alpha=draw_info->fill_alpha;
clone_info->stroke_alpha=draw_info->stroke_alpha;
clone_info->element_reference=draw_info->element_reference;
clone_info->clip_path=draw_info->clip_path;
clone_info->clip_units=draw_info->clip_units;
if (draw_info->clip_mask != (char *) NULL)
(void) CloneString(&clone_info->clip_mask,draw_info->clip_mask);
if (draw_info->clipping_mask != (Image *) NULL)
clone_info->clipping_mask=CloneImage(draw_info->clipping_mask,0,0,
MagickTrue,exception);
if (draw_info->composite_mask != (Image *) NULL)
clone_info->composite_mask=CloneImage(draw_info->composite_mask,0,0,
MagickTrue,exception);
clone_info->render=draw_info->render;
clone_info->debug=IsEventLogging();
exception=DestroyExceptionInfo(exception);
return(clone_info);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ C o n v e r t P a t h T o P o l y g o n %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ConvertPathToPolygon() converts a path to the more efficient sorted
% rendering form.
%
% The format of the ConvertPathToPolygon method is:
%
% PolygonInfo *ConvertPathToPolygon(const PathInfo *path_info)
%
% A description of each parameter follows:
%
% o Method ConvertPathToPolygon returns the path in a more efficient sorted
% rendering form of type PolygonInfo.
%
% o draw_info: Specifies a pointer to an DrawInfo structure.
%
% o path_info: Specifies a pointer to an PathInfo structure.
%
%
*/
#if defined(__cplusplus) || defined(c_plusplus)
extern "C" {
#endif
static int DrawCompareEdges(const void *p_edge,const void *q_edge)
{
#define DrawCompareEdge(p,q) \
{ \
if (((p)-(q)) < 0.0) \
return(-1); \
if (((p)-(q)) > 0.0) \
return(1); \
}
register const PointInfo
*p,
*q;
/*
Edge sorting for right-handed coordinate system.
*/
p=((const EdgeInfo *) p_edge)->points;
q=((const EdgeInfo *) q_edge)->points;
DrawCompareEdge(p[0].y,q[0].y);
DrawCompareEdge(p[0].x,q[0].x);
DrawCompareEdge((p[1].x-p[0].x)*(q[1].y-q[0].y),(p[1].y-p[0].y)*
(q[1].x-q[0].x));
DrawCompareEdge(p[1].y,q[1].y);
DrawCompareEdge(p[1].x,q[1].x);
return(0);
}
#if defined(__cplusplus) || defined(c_plusplus)
}
#endif
static void LogPolygonInfo(const PolygonInfo *polygon_info)
{
register EdgeInfo
*p;
register ssize_t
i,
j;
(void) LogMagickEvent(DrawEvent,GetMagickModule()," begin active-edge");
p=polygon_info->edges;
for (i=0; i < (ssize_t) polygon_info->number_edges; i++)
{
(void) LogMagickEvent(DrawEvent,GetMagickModule()," edge %.20g:",
(double) i);
(void) LogMagickEvent(DrawEvent,GetMagickModule()," direction: %s",
p->direction != MagickFalse ? "down" : "up");
(void) LogMagickEvent(DrawEvent,GetMagickModule()," ghostline: %s",
p->ghostline != MagickFalse ? "transparent" : "opaque");
(void) LogMagickEvent(DrawEvent,GetMagickModule(),
" bounds: %g,%g - %g,%g",p->bounds.x1,p->bounds.y1,
p->bounds.x2,p->bounds.y2);
for (j=0; j < (ssize_t) p->number_points; j++)
(void) LogMagickEvent(DrawEvent,GetMagickModule()," %g,%g",
p->points[j].x,p->points[j].y);
p++;
}
(void) LogMagickEvent(DrawEvent,GetMagickModule()," end active-edge");
}
static void ReversePoints(PointInfo *points,const size_t number_points)
{
PointInfo
point;
register ssize_t
i;
for (i=0; i < (ssize_t) (number_points >> 1); i++)
{
point=points[i];
points[i]=points[number_points-(i+1)];
points[number_points-(i+1)]=point;
}
}
static PolygonInfo *ConvertPathToPolygon(const PathInfo *path_info)
{
long
direction,
next_direction;
PointInfo
point,
*points;
PolygonInfo
*polygon_info;
SegmentInfo
bounds;
register ssize_t
i,
n;
MagickBooleanType
ghostline;
size_t
edge,
number_edges,
number_points;
/*
Convert a path to the more efficient sorted rendering form.
*/
polygon_info=(PolygonInfo *) AcquireMagickMemory(sizeof(*polygon_info));
if (polygon_info == (PolygonInfo *) NULL)
return((PolygonInfo *) NULL);
number_edges=16;
polygon_info->edges=(EdgeInfo *) AcquireQuantumMemory(number_edges,
sizeof(*polygon_info->edges));
if (polygon_info->edges == (EdgeInfo *) NULL)
return((PolygonInfo *) NULL);
(void) memset(polygon_info->edges,0,number_edges*
sizeof(*polygon_info->edges));
direction=0;
edge=0;
ghostline=MagickFalse;
n=0;
number_points=0;
points=(PointInfo *) NULL;
(void) memset(&point,0,sizeof(point));
(void) memset(&bounds,0,sizeof(bounds));
polygon_info->edges[edge].number_points=(size_t) n;
polygon_info->edges[edge].scanline=0.0;
polygon_info->edges[edge].highwater=0;
polygon_info->edges[edge].ghostline=ghostline;
polygon_info->edges[edge].direction=(ssize_t) direction;
polygon_info->edges[edge].points=points;
polygon_info->edges[edge].bounds=bounds;
polygon_info->number_edges=0;
for (i=0; path_info[i].code != EndCode; i++)
{
if ((path_info[i].code == MoveToCode) || (path_info[i].code == OpenCode) ||
(path_info[i].code == GhostlineCode))
{
/*
Move to.
*/
if ((points != (PointInfo *) NULL) && (n >= 2))
{
if (edge == number_edges)
{
number_edges<<=1;
polygon_info->edges=(EdgeInfo *) ResizeQuantumMemory(
polygon_info->edges,(size_t) number_edges,
sizeof(*polygon_info->edges));
if (polygon_info->edges == (EdgeInfo *) NULL)
return((PolygonInfo *) NULL);
}
polygon_info->edges[edge].number_points=(size_t) n;
polygon_info->edges[edge].scanline=(-1.0);
polygon_info->edges[edge].highwater=0;
polygon_info->edges[edge].ghostline=ghostline;
polygon_info->edges[edge].direction=(ssize_t) (direction > 0);
if (direction < 0)
ReversePoints(points,(size_t) n);
polygon_info->edges[edge].points=points;
polygon_info->edges[edge].bounds=bounds;
polygon_info->edges[edge].bounds.y1=points[0].y;
polygon_info->edges[edge].bounds.y2=points[n-1].y;
points=(PointInfo *) NULL;
ghostline=MagickFalse;
edge++;
}
if (points == (PointInfo *) NULL)
{
number_points=16;
points=(PointInfo *) AcquireQuantumMemory((size_t) number_points,
sizeof(*points));
if (points == (PointInfo *) NULL)
return((PolygonInfo *) NULL);
}
ghostline=path_info[i].code == GhostlineCode ? MagickTrue : MagickFalse;
point=path_info[i].point;
points[0]=point;
bounds.x1=point.x;
bounds.x2=point.x;
direction=0;
n=1;
continue;
}
/*
Line to.
*/
next_direction=((path_info[i].point.y > point.y) ||
((fabs(path_info[i].point.y-point.y) < MagickEpsilon) &&
(path_info[i].point.x > point.x))) ? 1 : -1;
if ((points != (PointInfo *) NULL) && (direction != 0) &&
(direction != next_direction))
{
/*
New edge.
*/
point=points[n-1];
if (edge == number_edges)
{
number_edges<<=1;
polygon_info->edges=(EdgeInfo *) ResizeQuantumMemory(
polygon_info->edges,(size_t) number_edges,
sizeof(*polygon_info->edges));
if (polygon_info->edges == (EdgeInfo *) NULL)
return((PolygonInfo *) NULL);
}
polygon_info->edges[edge].number_points=(size_t) n;
polygon_info->edges[edge].scanline=(-1.0);
polygon_info->edges[edge].highwater=0;
polygon_info->edges[edge].ghostline=ghostline;
polygon_info->edges[edge].direction=(ssize_t) (direction > 0);
if (direction < 0)
ReversePoints(points,(size_t) n);
polygon_info->edges[edge].points=points;
polygon_info->edges[edge].bounds=bounds;
polygon_info->edges[edge].bounds.y1=points[0].y;
polygon_info->edges[edge].bounds.y2=points[n-1].y;
number_points=16;
points=(PointInfo *) AcquireQuantumMemory((size_t) number_points,
sizeof(*points));
if (points == (PointInfo *) NULL)
return((PolygonInfo *) NULL);
n=1;
ghostline=MagickFalse;
points[0]=point;
bounds.x1=point.x;
bounds.x2=point.x;
edge++;
}
direction=next_direction;
if (points == (PointInfo *) NULL)
continue;
if (n == (ssize_t) number_points)
{
number_points<<=1;
points=(PointInfo *) ResizeQuantumMemory(points,(size_t) number_points,
sizeof(*points));
if (points == (PointInfo *) NULL)
return((PolygonInfo *) NULL);
}
point=path_info[i].point;
points[n]=point;
if (point.x < bounds.x1)
bounds.x1=point.x;
if (point.x > bounds.x2)
bounds.x2=point.x;
n++;
}
if (points != (PointInfo *) NULL)
{
if (n < 2)
points=(PointInfo *) RelinquishMagickMemory(points);
else
{
if (edge == number_edges)
{
number_edges<<=1;
polygon_info->edges=(EdgeInfo *) ResizeQuantumMemory(
polygon_info->edges,(size_t) number_edges,
sizeof(*polygon_info->edges));
if (polygon_info->edges == (EdgeInfo *) NULL)
return((PolygonInfo *) NULL);
}
polygon_info->edges[edge].number_points=(size_t) n;
polygon_info->edges[edge].scanline=(-1.0);
polygon_info->edges[edge].highwater=0;
polygon_info->edges[edge].ghostline=ghostline;
polygon_info->edges[edge].direction=(ssize_t) (direction > 0);
if (direction < 0)
ReversePoints(points,(size_t) n);
polygon_info->edges[edge].points=points;
polygon_info->edges[edge].bounds=bounds;
polygon_info->edges[edge].bounds.y1=points[0].y;
polygon_info->edges[edge].bounds.y2=points[n-1].y;
ghostline=MagickFalse;
edge++;
}
}
polygon_info->number_edges=edge;
qsort(polygon_info->edges,(size_t) polygon_info->number_edges,
sizeof(*polygon_info->edges),DrawCompareEdges);
if (IsEventLogging() != MagickFalse)
LogPolygonInfo(polygon_info);
return(polygon_info);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ C o n v e r t P r i m i t i v e T o P a t h %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ConvertPrimitiveToPath() converts a PrimitiveInfo structure into a vector
% path structure.
%
% The format of the ConvertPrimitiveToPath method is:
%
% PathInfo *ConvertPrimitiveToPath(const DrawInfo *draw_info,
% const PrimitiveInfo *primitive_info)
%
% A description of each parameter follows:
%
% o Method ConvertPrimitiveToPath returns a vector path structure of type
% PathInfo.
%
% o draw_info: a structure of type DrawInfo.
%
% o primitive_info: Specifies a pointer to an PrimitiveInfo structure.
%
%
*/
static void LogPathInfo(const PathInfo *path_info)
{
register const PathInfo
*p;
(void) LogMagickEvent(DrawEvent,GetMagickModule()," begin vector-path");
for (p=path_info; p->code != EndCode; p++)
(void) LogMagickEvent(DrawEvent,GetMagickModule(),
" %g,%g %s",p->point.x,p->point.y,p->code == GhostlineCode ?
"moveto ghostline" : p->code == OpenCode ? "moveto open" :
p->code == MoveToCode ? "moveto" : p->code == LineToCode ? "lineto" :
"?");
(void) LogMagickEvent(DrawEvent,GetMagickModule()," end vector-path");
}
static PathInfo *ConvertPrimitiveToPath(const PrimitiveInfo *primitive_info)
{
MagickBooleanType
closed_subpath;
PathInfo
*path_info;
PathInfoCode
code;
PointInfo
p,
q;
register ssize_t
i,
n;
ssize_t
coordinates,
start;
/*
Converts a PrimitiveInfo structure into a vector path structure.
*/
switch (primitive_info->primitive)
{
case AlphaPrimitive:
case ColorPrimitive:
case ImagePrimitive:
case PointPrimitive:
case TextPrimitive:
return((PathInfo *) NULL);
default:
break;
}
for (i=0; primitive_info[i].primitive != UndefinedPrimitive; i++) ;
path_info=(PathInfo *) AcquireQuantumMemory((size_t) (3UL*i+1UL),
sizeof(*path_info));
if (path_info == (PathInfo *) NULL)
return((PathInfo *) NULL);
coordinates=0;
closed_subpath=MagickFalse;
n=0;
p.x=(-1.0);
p.y=(-1.0);
q.x=(-1.0);
q.y=(-1.0);
start=0;
for (i=0; primitive_info[i].primitive != UndefinedPrimitive; i++)
{
code=LineToCode;
if (coordinates <= 0)
{
/*
New subpath.
*/
coordinates=(ssize_t) primitive_info[i].coordinates;
p=primitive_info[i].point;
start=n;
code=MoveToCode;
closed_subpath=primitive_info[i].closed_subpath;
}
coordinates--;
if ((code == MoveToCode) || (coordinates <= 0) ||
(fabs(q.x-primitive_info[i].point.x) >= MagickEpsilon) ||
(fabs(q.y-primitive_info[i].point.y) >= MagickEpsilon))
{
/*
Eliminate duplicate points.
*/
path_info[n].code=code;
path_info[n].point=primitive_info[i].point;
q=primitive_info[i].point;
n++;
}
if (coordinates > 0)
continue; /* next point in current subpath */
if (closed_subpath != MagickFalse)
{
closed_subpath=MagickFalse;
continue;
}
/*
Mark the p point as open if the subpath is not closed.
*/
path_info[start].code=OpenCode;
path_info[n].code=GhostlineCode;
path_info[n].point=primitive_info[i].point;
n++;
path_info[n].code=LineToCode;
path_info[n].point=p;
n++;
}
path_info[n].code=EndCode;
path_info[n].point.x=0.0;
path_info[n].point.y=0.0;
if (IsEventLogging() != MagickFalse)
LogPathInfo(path_info);
path_info=(PathInfo *) ResizeQuantumMemory(path_info,(size_t) (n+1),
sizeof(*path_info));
return(path_info);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% D e s t r o y D r a w I n f o %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% DestroyDrawInfo() deallocates memory associated with an DrawInfo structure.
%
% The format of the DestroyDrawInfo method is:
%
% DrawInfo *DestroyDrawInfo(DrawInfo *draw_info)
%
% A description of each parameter follows:
%
% o draw_info: the draw info.
%
*/
MagickExport DrawInfo *DestroyDrawInfo(DrawInfo *draw_info)
{
assert(draw_info != (DrawInfo *) NULL);
if (draw_info->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"...");
assert(draw_info->signature == MagickCoreSignature);
if (draw_info->id != (char *) NULL)
draw_info->id=DestroyString(draw_info->id);
if (draw_info->primitive != (char *) NULL)
draw_info->primitive=DestroyString(draw_info->primitive);
if (draw_info->text != (char *) NULL)
draw_info->text=DestroyString(draw_info->text);
if (draw_info->geometry != (char *) NULL)
draw_info->geometry=DestroyString(draw_info->geometry);
if (draw_info->fill_pattern != (Image *) NULL)
draw_info->fill_pattern=DestroyImage(draw_info->fill_pattern);
if (draw_info->stroke_pattern != (Image *) NULL)
draw_info->stroke_pattern=DestroyImage(draw_info->stroke_pattern);
if (draw_info->font != (char *) NULL)
draw_info->font=DestroyString(draw_info->font);
if (draw_info->metrics != (char *) NULL)
draw_info->metrics=DestroyString(draw_info->metrics);
if (draw_info->family != (char *) NULL)
draw_info->family=DestroyString(draw_info->family);
if (draw_info->encoding != (char *) NULL)
draw_info->encoding=DestroyString(draw_info->encoding);
if (draw_info->density != (char *) NULL)
draw_info->density=DestroyString(draw_info->density);
if (draw_info->server_name != (char *) NULL)
draw_info->server_name=(char *)
RelinquishMagickMemory(draw_info->server_name);
if (draw_info->dash_pattern != (double *) NULL)
draw_info->dash_pattern=(double *) RelinquishMagickMemory(
draw_info->dash_pattern);
if (draw_info->gradient.stops != (StopInfo *) NULL)
draw_info->gradient.stops=(StopInfo *) RelinquishMagickMemory(
draw_info->gradient.stops);
if (draw_info->clip_mask != (char *) NULL)
draw_info->clip_mask=DestroyString(draw_info->clip_mask);
if (draw_info->clipping_mask != (Image *) NULL)
draw_info->clipping_mask=DestroyImage(draw_info->clipping_mask);
if (draw_info->composite_mask != (Image *) NULL)
draw_info->composite_mask=DestroyImage(draw_info->composite_mask);
draw_info->signature=(~MagickCoreSignature);
draw_info=(DrawInfo *) RelinquishMagickMemory(draw_info);
return(draw_info);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ D e s t r o y E d g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% DestroyEdge() destroys the specified polygon edge.
%
% The format of the DestroyEdge method is:
%
% ssize_t DestroyEdge(PolygonInfo *polygon_info,const int edge)
%
% A description of each parameter follows:
%
% o polygon_info: Specifies a pointer to an PolygonInfo structure.
%
% o edge: the polygon edge number to destroy.
%
*/
static size_t DestroyEdge(PolygonInfo *polygon_info,
const size_t edge)
{
assert(edge < polygon_info->number_edges);
polygon_info->edges[edge].points=(PointInfo *) RelinquishMagickMemory(
polygon_info->edges[edge].points);
polygon_info->number_edges--;
if (edge < polygon_info->number_edges)
(void) memmove(polygon_info->edges+edge,polygon_info->edges+edge+1,
(size_t) (polygon_info->number_edges-edge)*sizeof(*polygon_info->edges));
return(polygon_info->number_edges);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ D e s t r o y P o l y g o n I n f o %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% DestroyPolygonInfo() destroys the PolygonInfo data structure.
%
% The format of the DestroyPolygonInfo method is:
%
% PolygonInfo *DestroyPolygonInfo(PolygonInfo *polygon_info)
%
% A description of each parameter follows:
%
% o polygon_info: Specifies a pointer to an PolygonInfo structure.
%
*/
static PolygonInfo *DestroyPolygonInfo(PolygonInfo *polygon_info)
{
register ssize_t
i;
if (polygon_info->edges != (EdgeInfo *) NULL)
{
for (i=0; i < (ssize_t) polygon_info->number_edges; i++)
polygon_info->edges[i].points=(PointInfo *)
RelinquishMagickMemory(polygon_info->edges[i].points);
polygon_info->edges=(EdgeInfo *) RelinquishMagickMemory(
polygon_info->edges);
}
return((PolygonInfo *) RelinquishMagickMemory(polygon_info));
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% D r a w A f f i n e I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% DrawAffineImage() composites the source over the destination image as
% dictated by the affine transform.
%
% The format of the DrawAffineImage method is:
%
% MagickBooleanType DrawAffineImage(Image *image,const Image *source,
% const AffineMatrix *affine,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o source: the source image.
%
% o affine: the affine transform.
%
% o exception: return any errors or warnings in this structure.
%
*/
static SegmentInfo AffineEdge(const Image *image,const AffineMatrix *affine,
const double y,const SegmentInfo *edge)
{
double
intercept,
z;
register double
x;
SegmentInfo
inverse_edge;
/*
Determine left and right edges.
*/
inverse_edge.x1=edge->x1;
inverse_edge.y1=edge->y1;
inverse_edge.x2=edge->x2;
inverse_edge.y2=edge->y2;
z=affine->ry*y+affine->tx;
if (affine->sx >= MagickEpsilon)
{
intercept=(-z/affine->sx);
x=intercept;
if (x > inverse_edge.x1)
inverse_edge.x1=x;
intercept=(-z+(double) image->columns)/affine->sx;
x=intercept;
if (x < inverse_edge.x2)
inverse_edge.x2=x;
}
else
if (affine->sx < -MagickEpsilon)
{
intercept=(-z+(double) image->columns)/affine->sx;
x=intercept;
if (x > inverse_edge.x1)
inverse_edge.x1=x;
intercept=(-z/affine->sx);
x=intercept;
if (x < inverse_edge.x2)
inverse_edge.x2=x;
}
else
if ((z < 0.0) || ((size_t) floor(z+0.5) >= image->columns))
{
inverse_edge.x2=edge->x1;
return(inverse_edge);
}
/*
Determine top and bottom edges.
*/
z=affine->sy*y+affine->ty;
if (affine->rx >= MagickEpsilon)
{
intercept=(-z/affine->rx);
x=intercept;
if (x > inverse_edge.x1)
inverse_edge.x1=x;
intercept=(-z+(double) image->rows)/affine->rx;
x=intercept;
if (x < inverse_edge.x2)
inverse_edge.x2=x;
}
else
if (affine->rx < -MagickEpsilon)
{
intercept=(-z+(double) image->rows)/affine->rx;
x=intercept;
if (x > inverse_edge.x1)
inverse_edge.x1=x;
intercept=(-z/affine->rx);
x=intercept;
if (x < inverse_edge.x2)
inverse_edge.x2=x;
}
else
if ((z < 0.0) || ((size_t) floor(z+0.5) >= image->rows))
{
inverse_edge.x2=edge->x2;
return(inverse_edge);
}
return(inverse_edge);
}
static AffineMatrix InverseAffineMatrix(const AffineMatrix *affine)
{
AffineMatrix
inverse_affine;
double
determinant;
determinant=PerceptibleReciprocal(affine->sx*affine->sy-affine->rx*
affine->ry);
inverse_affine.sx=determinant*affine->sy;
inverse_affine.rx=determinant*(-affine->rx);
inverse_affine.ry=determinant*(-affine->ry);
inverse_affine.sy=determinant*affine->sx;
inverse_affine.tx=(-affine->tx)*inverse_affine.sx-affine->ty*
inverse_affine.ry;
inverse_affine.ty=(-affine->tx)*inverse_affine.rx-affine->ty*
inverse_affine.sy;
return(inverse_affine);
}
MagickExport MagickBooleanType DrawAffineImage(Image *image,
const Image *source,const AffineMatrix *affine,ExceptionInfo *exception)
{
AffineMatrix
inverse_affine;
CacheView
*image_view,
*source_view;
MagickBooleanType
status;
PixelInfo
zero;
PointInfo
extent[4],
min,
max;
register ssize_t
i;
SegmentInfo
edge;
ssize_t
start,
stop,
y;
/*
Determine bounding box.
*/
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(source != (const Image *) NULL);
assert(source->signature == MagickCoreSignature);
assert(affine != (AffineMatrix *) NULL);
extent[0].x=0.0;
extent[0].y=0.0;
extent[1].x=(double) source->columns-1.0;
extent[1].y=0.0;
extent[2].x=(double) source->columns-1.0;
extent[2].y=(double) source->rows-1.0;
extent[3].x=0.0;
extent[3].y=(double) source->rows-1.0;
for (i=0; i < 4; i++)
{
PointInfo
point;
point=extent[i];
extent[i].x=point.x*affine->sx+point.y*affine->ry+affine->tx;
extent[i].y=point.x*affine->rx+point.y*affine->sy+affine->ty;
}
min=extent[0];
max=extent[0];
for (i=1; i < 4; i++)
{
if (min.x > extent[i].x)
min.x=extent[i].x;
if (min.y > extent[i].y)
min.y=extent[i].y;
if (max.x < extent[i].x)
max.x=extent[i].x;
if (max.y < extent[i].y)
max.y=extent[i].y;
}
/*
Affine transform image.
*/
if (SetImageStorageClass(image,DirectClass,exception) == MagickFalse)
return(MagickFalse);
status=MagickTrue;
edge.x1=MagickMax(min.x,0.0);
edge.y1=MagickMax(min.y,0.0);
edge.x2=MagickMin(max.x,(double) image->columns-1.0);
edge.y2=MagickMin(max.y,(double) image->rows-1.0);
inverse_affine=InverseAffineMatrix(affine);
GetPixelInfo(image,&zero);
start=(ssize_t) ceil(edge.y1-0.5);
stop=(ssize_t) floor(edge.y2+0.5);
source_view=AcquireVirtualCacheView(source,exception);
image_view=AcquireAuthenticCacheView(image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(status) \
magick_number_threads(source,image,stop-start,1)
#endif
for (y=start; y <= stop; y++)
{
PixelInfo
composite,
pixel;
PointInfo
point;
register ssize_t
x;
register Quantum
*magick_restrict q;
SegmentInfo
inverse_edge;
ssize_t
x_offset;
inverse_edge=AffineEdge(source,&inverse_affine,(double) y,&edge);
if (inverse_edge.x2 < inverse_edge.x1)
continue;
q=GetCacheViewAuthenticPixels(image_view,(ssize_t) ceil(inverse_edge.x1-
0.5),y,(size_t) (floor(inverse_edge.x2+0.5)-ceil(inverse_edge.x1-0.5)+1),
1,exception);
if (q == (Quantum *) NULL)
continue;
pixel=zero;
composite=zero;
x_offset=0;
for (x=(ssize_t) ceil(inverse_edge.x1-0.5); x <= (ssize_t) floor(inverse_edge.x2+0.5); x++)
{
point.x=(double) x*inverse_affine.sx+y*inverse_affine.ry+
inverse_affine.tx;
point.y=(double) x*inverse_affine.rx+y*inverse_affine.sy+
inverse_affine.ty;
status=InterpolatePixelInfo(source,source_view,UndefinedInterpolatePixel,
point.x,point.y,&pixel,exception);
if (status == MagickFalse)
break;
GetPixelInfoPixel(image,q,&composite);
CompositePixelInfoOver(&pixel,pixel.alpha,&composite,composite.alpha,
&composite);
SetPixelViaPixelInfo(image,&composite,q);
x_offset++;
q+=GetPixelChannels(image);
}
if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse)
status=MagickFalse;
}
source_view=DestroyCacheView(source_view);
image_view=DestroyCacheView(image_view);
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ D r a w B o u n d i n g R e c t a n g l e s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% DrawBoundingRectangles() draws the bounding rectangles on the image. This
% is only useful for developers debugging the rendering algorithm.
%
% The format of the DrawBoundingRectangles method is:
%
% MagickBooleanType DrawBoundingRectangles(Image *image,
% const DrawInfo *draw_info,PolygonInfo *polygon_info,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o draw_info: the draw info.
%
% o polygon_info: Specifies a pointer to a PolygonInfo structure.
%
% o exception: return any errors or warnings in this structure.
%
*/
static inline double SaneStrokeWidth(const Image *image,
const DrawInfo *draw_info)
{
return(MagickMin((double) draw_info->stroke_width,
(2.0*sqrt(2.0)+MagickEpsilon)*MagickMax(image->columns,image->rows)));
}
static MagickBooleanType DrawBoundingRectangles(Image *image,
const DrawInfo *draw_info,const PolygonInfo *polygon_info,
ExceptionInfo *exception)
{
double
mid;
DrawInfo
*clone_info;
MagickStatusType
status;
PointInfo
end,
resolution,
start;
PrimitiveInfo
primitive_info[6];
register ssize_t
i;
SegmentInfo
bounds;
ssize_t
coordinates;
(void) memset(primitive_info,0,sizeof(primitive_info));
clone_info=CloneDrawInfo((ImageInfo *) NULL,draw_info);
status=QueryColorCompliance("#000F",AllCompliance,&clone_info->fill,
exception);
if (status == MagickFalse)
{
clone_info=DestroyDrawInfo(clone_info);
return(MagickFalse);
}
resolution.x=96.0;
resolution.y=96.0;
if (clone_info->density != (char *) NULL)
{
GeometryInfo
geometry_info;
MagickStatusType
flags;
flags=ParseGeometry(clone_info->density,&geometry_info);
resolution.x=geometry_info.rho;
resolution.y=geometry_info.sigma;
if ((flags & SigmaValue) == MagickFalse)
resolution.y=resolution.x;
}
mid=(resolution.x/96.0)*ExpandAffine(&clone_info->affine)*
SaneStrokeWidth(image,clone_info)/2.0;
bounds.x1=0.0;
bounds.y1=0.0;
bounds.x2=0.0;
bounds.y2=0.0;
if (polygon_info != (PolygonInfo *) NULL)
{
bounds=polygon_info->edges[0].bounds;
for (i=1; i < (ssize_t) polygon_info->number_edges; i++)
{
if (polygon_info->edges[i].bounds.x1 < (double) bounds.x1)
bounds.x1=polygon_info->edges[i].bounds.x1;
if (polygon_info->edges[i].bounds.y1 < (double) bounds.y1)
bounds.y1=polygon_info->edges[i].bounds.y1;
if (polygon_info->edges[i].bounds.x2 > (double) bounds.x2)
bounds.x2=polygon_info->edges[i].bounds.x2;
if (polygon_info->edges[i].bounds.y2 > (double) bounds.y2)
bounds.y2=polygon_info->edges[i].bounds.y2;
}
bounds.x1-=mid;
bounds.x1=bounds.x1 < 0.0 ? 0.0 : bounds.x1 >= (double)
image->columns ? (double) image->columns-1 : bounds.x1;
bounds.y1-=mid;
bounds.y1=bounds.y1 < 0.0 ? 0.0 : bounds.y1 >= (double)
image->rows ? (double) image->rows-1 : bounds.y1;
bounds.x2+=mid;
bounds.x2=bounds.x2 < 0.0 ? 0.0 : bounds.x2 >= (double)
image->columns ? (double) image->columns-1 : bounds.x2;
bounds.y2+=mid;
bounds.y2=bounds.y2 < 0.0 ? 0.0 : bounds.y2 >= (double)
image->rows ? (double) image->rows-1 : bounds.y2;
for (i=0; i < (ssize_t) polygon_info->number_edges; i++)
{
if (polygon_info->edges[i].direction != 0)
status=QueryColorCompliance("#f00",AllCompliance,&clone_info->stroke,
exception);
else
status=QueryColorCompliance("#0f0",AllCompliance,&clone_info->stroke,
exception);
if (status == MagickFalse)
break;
start.x=(double) (polygon_info->edges[i].bounds.x1-mid);
start.y=(double) (polygon_info->edges[i].bounds.y1-mid);
end.x=(double) (polygon_info->edges[i].bounds.x2+mid);
end.y=(double) (polygon_info->edges[i].bounds.y2+mid);
primitive_info[0].primitive=RectanglePrimitive;
status&=TraceRectangle(primitive_info,start,end);
primitive_info[0].method=ReplaceMethod;
coordinates=(ssize_t) primitive_info[0].coordinates;
primitive_info[coordinates].primitive=UndefinedPrimitive;
status=DrawPrimitive(image,clone_info,primitive_info,exception);
if (status == MagickFalse)
break;
}
if (i < (ssize_t) polygon_info->number_edges)
{
clone_info=DestroyDrawInfo(clone_info);
return(status == 0 ? MagickFalse : MagickTrue);
}
}
status=QueryColorCompliance("#00f",AllCompliance,&clone_info->stroke,
exception);
if (status == MagickFalse)
{
clone_info=DestroyDrawInfo(clone_info);
return(MagickFalse);
}
start.x=(double) (bounds.x1-mid);
start.y=(double) (bounds.y1-mid);
end.x=(double) (bounds.x2+mid);
end.y=(double) (bounds.y2+mid);
primitive_info[0].primitive=RectanglePrimitive;
status&=TraceRectangle(primitive_info,start,end);
primitive_info[0].method=ReplaceMethod;
coordinates=(ssize_t) primitive_info[0].coordinates;
primitive_info[coordinates].primitive=UndefinedPrimitive;
status=DrawPrimitive(image,clone_info,primitive_info,exception);
clone_info=DestroyDrawInfo(clone_info);
return(status == 0 ? MagickFalse : MagickTrue);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% D r a w C l i p P a t h %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% DrawClipPath() draws the clip path on the image mask.
%
% The format of the DrawClipPath method is:
%
% MagickBooleanType DrawClipPath(Image *image,const DrawInfo *draw_info,
% const char *id,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o draw_info: the draw info.
%
% o id: the clip path id.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport MagickBooleanType DrawClipPath(Image *image,
const DrawInfo *draw_info,const char *id,ExceptionInfo *exception)
{
const char
*clip_path;
Image
*clipping_mask;
MagickBooleanType
status;
clip_path=GetImageArtifact(image,id);
if (clip_path == (const char *) NULL)
return(MagickFalse);
clipping_mask=DrawClippingMask(image,draw_info,draw_info->clip_mask,clip_path,
exception);
if (clipping_mask == (Image *) NULL)
return(MagickFalse);
status=SetImageMask(image,WritePixelMask,clipping_mask,exception);
clipping_mask=DestroyImage(clipping_mask);
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% D r a w C l i p p i n g M a s k %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% DrawClippingMask() draws the clip path and returns it as an image clipping
% mask.
%
% The format of the DrawClippingMask method is:
%
% Image *DrawClippingMask(Image *image,const DrawInfo *draw_info,
% const char *id,const char *clip_path,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o draw_info: the draw info.
%
% o id: the clip path id.
%
% o clip_path: the clip path.
%
% o exception: return any errors or warnings in this structure.
%
*/
static Image *DrawClippingMask(Image *image,const DrawInfo *draw_info,
const char *id,const char *clip_path,ExceptionInfo *exception)
{
DrawInfo
*clone_info;
Image
*clip_mask,
*separate_mask;
MagickStatusType
status;
/*
Draw a clip path.
*/
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(draw_info != (const DrawInfo *) NULL);
clip_mask=AcquireImage((const ImageInfo *) NULL,exception);
status=SetImageExtent(clip_mask,image->columns,image->rows,exception);
if (status == MagickFalse)
return(DestroyImage(clip_mask));
status=SetImageMask(clip_mask,WritePixelMask,(Image *) NULL,exception);
status=QueryColorCompliance("#0000",AllCompliance,
&clip_mask->background_color,exception);
clip_mask->background_color.alpha=(MagickRealType) TransparentAlpha;
clip_mask->background_color.alpha_trait=BlendPixelTrait;
status=SetImageBackgroundColor(clip_mask,exception);
if (image->debug != MagickFalse)
(void) LogMagickEvent(DrawEvent,GetMagickModule(),"\nbegin clip-path %s",
id);
clone_info=CloneDrawInfo((ImageInfo *) NULL,draw_info);
(void) CloneString(&clone_info->primitive,clip_path);
status=QueryColorCompliance("#ffffff",AllCompliance,&clone_info->fill,
exception);
if (clone_info->clip_mask != (char *) NULL)
clone_info->clip_mask=DestroyString(clone_info->clip_mask);
status=QueryColorCompliance("#00000000",AllCompliance,&clone_info->stroke,
exception);
clone_info->stroke_width=0.0;
clone_info->alpha=OpaqueAlpha;
clone_info->clip_path=MagickTrue;
status=RenderMVGContent(clip_mask,clone_info,0,exception);
clone_info=DestroyDrawInfo(clone_info);
separate_mask=SeparateImage(clip_mask,AlphaChannel,exception);
if (separate_mask != (Image *) NULL)
{
clip_mask=DestroyImage(clip_mask);
clip_mask=separate_mask;
status=NegateImage(clip_mask,MagickFalse,exception);
if (status == MagickFalse)
clip_mask=DestroyImage(clip_mask);
}
if (status == MagickFalse)
clip_mask=DestroyImage(clip_mask);
if (image->debug != MagickFalse)
(void) LogMagickEvent(DrawEvent,GetMagickModule(),"end clip-path");
return(clip_mask);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% D r a w C o m p o s i t e M a s k %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% DrawCompositeMask() draws the mask path and returns it as an image mask.
%
% The format of the DrawCompositeMask method is:
%
% Image *DrawCompositeMask(Image *image,const DrawInfo *draw_info,
% const char *id,const char *mask_path,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o draw_info: the draw info.
%
% o id: the mask path id.
%
% o mask_path: the mask path.
%
% o exception: return any errors or warnings in this structure.
%
*/
static Image *DrawCompositeMask(Image *image,const DrawInfo *draw_info,
const char *id,const char *mask_path,ExceptionInfo *exception)
{
Image
*composite_mask,
*separate_mask;
DrawInfo
*clone_info;
MagickStatusType
status;
/*
Draw a mask path.
*/
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(draw_info != (const DrawInfo *) NULL);
composite_mask=AcquireImage((const ImageInfo *) NULL,exception);
status=SetImageExtent(composite_mask,image->columns,image->rows,exception);
if (status == MagickFalse)
return(DestroyImage(composite_mask));
status=SetImageMask(composite_mask,CompositePixelMask,(Image *) NULL,
exception);
status=QueryColorCompliance("#0000",AllCompliance,
&composite_mask->background_color,exception);
composite_mask->background_color.alpha=(MagickRealType) TransparentAlpha;
composite_mask->background_color.alpha_trait=BlendPixelTrait;
(void) SetImageBackgroundColor(composite_mask,exception);
if (image->debug != MagickFalse)
(void) LogMagickEvent(DrawEvent,GetMagickModule(),"\nbegin mask-path %s",
id);
clone_info=CloneDrawInfo((ImageInfo *) NULL,draw_info);
(void) CloneString(&clone_info->primitive,mask_path);
status=QueryColorCompliance("#ffffff",AllCompliance,&clone_info->fill,
exception);
status=QueryColorCompliance("#00000000",AllCompliance,&clone_info->stroke,
exception);
clone_info->stroke_width=0.0;
clone_info->alpha=OpaqueAlpha;
status=RenderMVGContent(composite_mask,clone_info,0,exception);
clone_info=DestroyDrawInfo(clone_info);
separate_mask=SeparateImage(composite_mask,AlphaChannel,exception);
if (separate_mask != (Image *) NULL)
{
composite_mask=DestroyImage(composite_mask);
composite_mask=separate_mask;
status=NegateImage(composite_mask,MagickFalse,exception);
if (status == MagickFalse)
composite_mask=DestroyImage(composite_mask);
}
if (status == MagickFalse)
composite_mask=DestroyImage(composite_mask);
if (image->debug != MagickFalse)
(void) LogMagickEvent(DrawEvent,GetMagickModule(),"end mask-path");
return(composite_mask);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ D r a w D a s h P o l y g o n %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% DrawDashPolygon() draws a dashed polygon (line, rectangle, ellipse) on the
% image while respecting the dash offset and dash pattern attributes.
%
% The format of the DrawDashPolygon method is:
%
% MagickBooleanType DrawDashPolygon(const DrawInfo *draw_info,
% const PrimitiveInfo *primitive_info,Image *image,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o draw_info: the draw info.
%
% o primitive_info: Specifies a pointer to a PrimitiveInfo structure.
%
% o image: the image.
%
% o exception: return any errors or warnings in this structure.
%
*/
static MagickBooleanType DrawDashPolygon(const DrawInfo *draw_info,
const PrimitiveInfo *primitive_info,Image *image,ExceptionInfo *exception)
{
double
length,
maximum_length,
offset,
scale,
total_length;
DrawInfo
*clone_info;
MagickStatusType
status;
PrimitiveInfo
*dash_polygon;
register double
dx,
dy;
register ssize_t
i;
size_t
number_vertices;
ssize_t
j,
n;
assert(draw_info != (const DrawInfo *) NULL);
if (image->debug != MagickFalse)
(void) LogMagickEvent(DrawEvent,GetMagickModule()," begin draw-dash");
for (i=0; primitive_info[i].primitive != UndefinedPrimitive; i++) ;
number_vertices=(size_t) i;
dash_polygon=(PrimitiveInfo *) AcquireQuantumMemory((size_t)
(2UL*number_vertices+32UL),sizeof(*dash_polygon));
if (dash_polygon == (PrimitiveInfo *) NULL)
return(MagickFalse);
(void) memset(dash_polygon,0,(2UL*number_vertices+32UL)*
sizeof(*dash_polygon));
clone_info=CloneDrawInfo((ImageInfo *) NULL,draw_info);
clone_info->miterlimit=0;
dash_polygon[0]=primitive_info[0];
scale=ExpandAffine(&draw_info->affine);
length=scale*draw_info->dash_pattern[0];
offset=fabs(draw_info->dash_offset) >= MagickEpsilon ?
scale*draw_info->dash_offset : 0.0;
j=1;
for (n=0; offset > 0.0; j=0)
{
if (draw_info->dash_pattern[n] <= 0.0)
break;
length=scale*(draw_info->dash_pattern[n]+(n == 0 ? -0.5 : 0.5));
if (offset > length)
{
offset-=length;
n++;
length=scale*draw_info->dash_pattern[n];
continue;
}
if (offset < length)
{
length-=offset;
offset=0.0;
break;
}
offset=0.0;
n++;
}
status=MagickTrue;
maximum_length=0.0;
total_length=0.0;
for (i=1; (i < (ssize_t) number_vertices) && (length >= 0.0); i++)
{
dx=primitive_info[i].point.x-primitive_info[i-1].point.x;
dy=primitive_info[i].point.y-primitive_info[i-1].point.y;
maximum_length=hypot(dx,dy);
if (maximum_length > MaxBezierCoordinates)
break;
if (fabs(length) < MagickEpsilon)
{
if (fabs(draw_info->dash_pattern[n]) >= MagickEpsilon)
n++;
if (fabs(draw_info->dash_pattern[n]) < MagickEpsilon)
n=0;
length=scale*draw_info->dash_pattern[n];
}
for (total_length=0.0; (length >= 0.0) && (maximum_length >= (total_length+length)); )
{
total_length+=length;
if ((n & 0x01) != 0)
{
dash_polygon[0]=primitive_info[0];
dash_polygon[0].point.x=(double) (primitive_info[i-1].point.x+dx*
total_length*PerceptibleReciprocal(maximum_length));
dash_polygon[0].point.y=(double) (primitive_info[i-1].point.y+dy*
total_length*PerceptibleReciprocal(maximum_length));
j=1;
}
else
{
if ((j+1) > (ssize_t) number_vertices)
break;
dash_polygon[j]=primitive_info[i-1];
dash_polygon[j].point.x=(double) (primitive_info[i-1].point.x+dx*
total_length*PerceptibleReciprocal(maximum_length));
dash_polygon[j].point.y=(double) (primitive_info[i-1].point.y+dy*
total_length*PerceptibleReciprocal(maximum_length));
dash_polygon[j].coordinates=1;
j++;
dash_polygon[0].coordinates=(size_t) j;
dash_polygon[j].primitive=UndefinedPrimitive;
status&=DrawStrokePolygon(image,clone_info,dash_polygon,exception);
if (status == MagickFalse)
break;
}
if (fabs(draw_info->dash_pattern[n]) >= MagickEpsilon)
n++;
if (fabs(draw_info->dash_pattern[n]) < MagickEpsilon)
n=0;
length=scale*draw_info->dash_pattern[n];
}
length-=(maximum_length-total_length);
if ((n & 0x01) != 0)
continue;
dash_polygon[j]=primitive_info[i];
dash_polygon[j].coordinates=1;
j++;
}
if ((status != MagickFalse) && (total_length < maximum_length) &&
((n & 0x01) == 0) && (j > 1))
{
dash_polygon[j]=primitive_info[i-1];
dash_polygon[j].point.x+=MagickEpsilon;
dash_polygon[j].point.y+=MagickEpsilon;
dash_polygon[j].coordinates=1;
j++;
dash_polygon[0].coordinates=(size_t) j;
dash_polygon[j].primitive=UndefinedPrimitive;
status&=DrawStrokePolygon(image,clone_info,dash_polygon,exception);
}
dash_polygon=(PrimitiveInfo *) RelinquishMagickMemory(dash_polygon);
clone_info=DestroyDrawInfo(clone_info);
if (image->debug != MagickFalse)
(void) LogMagickEvent(DrawEvent,GetMagickModule()," end draw-dash");
return(status != 0 ? MagickTrue : MagickFalse);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% D r a w G r a d i e n t I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% DrawGradientImage() draws a linear gradient on the image.
%
% The format of the DrawGradientImage method is:
%
% MagickBooleanType DrawGradientImage(Image *image,
% const DrawInfo *draw_info,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o draw_info: the draw info.
%
% o exception: return any errors or warnings in this structure.
%
*/
static inline double GetStopColorOffset(const GradientInfo *gradient,
const ssize_t x,const ssize_t y)
{
switch (gradient->type)
{
case UndefinedGradient:
case LinearGradient:
{
double
gamma,
length,
offset,
scale;
PointInfo
p,
q;
const SegmentInfo
*gradient_vector;
gradient_vector=(&gradient->gradient_vector);
p.x=gradient_vector->x2-gradient_vector->x1;
p.y=gradient_vector->y2-gradient_vector->y1;
q.x=(double) x-gradient_vector->x1;
q.y=(double) y-gradient_vector->y1;
length=sqrt(q.x*q.x+q.y*q.y);
gamma=sqrt(p.x*p.x+p.y*p.y)*length;
gamma=PerceptibleReciprocal(gamma);
scale=p.x*q.x+p.y*q.y;
offset=gamma*scale*length;
return(offset);
}
case RadialGradient:
{
PointInfo
v;
if (gradient->spread == RepeatSpread)
{
v.x=(double) x-gradient->center.x;
v.y=(double) y-gradient->center.y;
return(sqrt(v.x*v.x+v.y*v.y));
}
v.x=(double) (((x-gradient->center.x)*cos(DegreesToRadians(
gradient->angle)))+((y-gradient->center.y)*sin(DegreesToRadians(
gradient->angle))))*PerceptibleReciprocal(gradient->radii.x);
v.y=(double) (((x-gradient->center.x)*sin(DegreesToRadians(
gradient->angle)))-((y-gradient->center.y)*cos(DegreesToRadians(
gradient->angle))))*PerceptibleReciprocal(gradient->radii.y);
return(sqrt(v.x*v.x+v.y*v.y));
}
}
return(0.0);
}
static int StopInfoCompare(const void *x,const void *y)
{
StopInfo
*stop_1,
*stop_2;
stop_1=(StopInfo *) x;
stop_2=(StopInfo *) y;
if (stop_1->offset > stop_2->offset)
return(1);
if (fabs(stop_1->offset-stop_2->offset) <= MagickEpsilon)
return(0);
return(-1);
}
MagickExport MagickBooleanType DrawGradientImage(Image *image,
const DrawInfo *draw_info,ExceptionInfo *exception)
{
CacheView
*image_view;
const GradientInfo
*gradient;
const SegmentInfo
*gradient_vector;
double
length;
MagickBooleanType
status;
PixelInfo
zero;
PointInfo
point;
RectangleInfo
bounding_box;
ssize_t
y;
/*
Draw linear or radial gradient on image.
*/
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(draw_info != (const DrawInfo *) NULL);
gradient=(&draw_info->gradient);
qsort(gradient->stops,gradient->number_stops,sizeof(StopInfo),
StopInfoCompare);
gradient_vector=(&gradient->gradient_vector);
point.x=gradient_vector->x2-gradient_vector->x1;
point.y=gradient_vector->y2-gradient_vector->y1;
length=sqrt(point.x*point.x+point.y*point.y);
bounding_box=gradient->bounding_box;
status=MagickTrue;
GetPixelInfo(image,&zero);
image_view=AcquireAuthenticCacheView(image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(status) \
magick_number_threads(image,image,bounding_box.height-bounding_box.y,1)
#endif
for (y=bounding_box.y; y < (ssize_t) bounding_box.height; y++)
{
double
alpha,
offset;
PixelInfo
composite,
pixel;
register Quantum
*magick_restrict q;
register ssize_t
i,
x;
ssize_t
j;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception);
if (q == (Quantum *) NULL)
{
status=MagickFalse;
continue;
}
pixel=zero;
composite=zero;
offset=GetStopColorOffset(gradient,0,y);
if (gradient->type != RadialGradient)
offset*=PerceptibleReciprocal(length);
for (x=bounding_box.x; x < (ssize_t) bounding_box.width; x++)
{
GetPixelInfoPixel(image,q,&pixel);
switch (gradient->spread)
{
case UndefinedSpread:
case PadSpread:
{
if ((x != (ssize_t) ceil(gradient_vector->x1-0.5)) ||
(y != (ssize_t) ceil(gradient_vector->y1-0.5)))
{
offset=GetStopColorOffset(gradient,x,y);
if (gradient->type != RadialGradient)
offset*=PerceptibleReciprocal(length);
}
for (i=0; i < (ssize_t) gradient->number_stops; i++)
if (offset < gradient->stops[i].offset)
break;
if ((offset < 0.0) || (i == 0))
composite=gradient->stops[0].color;
else
if ((offset > 1.0) || (i == (ssize_t) gradient->number_stops))
composite=gradient->stops[gradient->number_stops-1].color;
else
{
j=i;
i--;
alpha=(offset-gradient->stops[i].offset)/
(gradient->stops[j].offset-gradient->stops[i].offset);
CompositePixelInfoBlend(&gradient->stops[i].color,1.0-alpha,
&gradient->stops[j].color,alpha,&composite);
}
break;
}
case ReflectSpread:
{
if ((x != (ssize_t) ceil(gradient_vector->x1-0.5)) ||
(y != (ssize_t) ceil(gradient_vector->y1-0.5)))
{
offset=GetStopColorOffset(gradient,x,y);
if (gradient->type != RadialGradient)
offset*=PerceptibleReciprocal(length);
}
if (offset < 0.0)
offset=(-offset);
if ((ssize_t) fmod(offset,2.0) == 0)
offset=fmod(offset,1.0);
else
offset=1.0-fmod(offset,1.0);
for (i=0; i < (ssize_t) gradient->number_stops; i++)
if (offset < gradient->stops[i].offset)
break;
if (i == 0)
composite=gradient->stops[0].color;
else
if (i == (ssize_t) gradient->number_stops)
composite=gradient->stops[gradient->number_stops-1].color;
else
{
j=i;
i--;
alpha=(offset-gradient->stops[i].offset)/
(gradient->stops[j].offset-gradient->stops[i].offset);
CompositePixelInfoBlend(&gradient->stops[i].color,1.0-alpha,
&gradient->stops[j].color,alpha,&composite);
}
break;
}
case RepeatSpread:
{
double
repeat;
MagickBooleanType
antialias;
antialias=MagickFalse;
repeat=0.0;
if ((x != (ssize_t) ceil(gradient_vector->x1-0.5)) ||
(y != (ssize_t) ceil(gradient_vector->y1-0.5)))
{
offset=GetStopColorOffset(gradient,x,y);
if (gradient->type == LinearGradient)
{
repeat=fmod(offset,length);
if (repeat < 0.0)
repeat=length-fmod(-repeat,length);
else
repeat=fmod(offset,length);
antialias=(repeat < length) && ((repeat+1.0) > length) ?
MagickTrue : MagickFalse;
offset=PerceptibleReciprocal(length)*repeat;
}
else
{
repeat=fmod(offset,gradient->radius);
if (repeat < 0.0)
repeat=gradient->radius-fmod(-repeat,gradient->radius);
else
repeat=fmod(offset,gradient->radius);
antialias=repeat+1.0 > gradient->radius ? MagickTrue :
MagickFalse;
offset=repeat/gradient->radius;
}
}
for (i=0; i < (ssize_t) gradient->number_stops; i++)
if (offset < gradient->stops[i].offset)
break;
if (i == 0)
composite=gradient->stops[0].color;
else
if (i == (ssize_t) gradient->number_stops)
composite=gradient->stops[gradient->number_stops-1].color;
else
{
j=i;
i--;
alpha=(offset-gradient->stops[i].offset)/
(gradient->stops[j].offset-gradient->stops[i].offset);
if (antialias != MagickFalse)
{
if (gradient->type == LinearGradient)
alpha=length-repeat;
else
alpha=gradient->radius-repeat;
i=0;
j=(ssize_t) gradient->number_stops-1L;
}
CompositePixelInfoBlend(&gradient->stops[i].color,1.0-alpha,
&gradient->stops[j].color,alpha,&composite);
}
break;
}
}
CompositePixelInfoOver(&composite,composite.alpha,&pixel,pixel.alpha,
&pixel);
SetPixelViaPixelInfo(image,&pixel,q);
q+=GetPixelChannels(image);
}
if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse)
status=MagickFalse;
}
image_view=DestroyCacheView(image_view);
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% D r a w I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% DrawImage() draws a graphic primitive on your image. The primitive
% may be represented as a string or filename. Precede the filename with an
% "at" sign (@) and the contents of the file are drawn on the image. You
% can affect how text is drawn by setting one or more members of the draw
% info structure.
%
% The format of the DrawImage method is:
%
% MagickBooleanType DrawImage(Image *image,const DrawInfo *draw_info,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o draw_info: the draw info.
%
% o exception: return any errors or warnings in this structure.
%
*/
static MagickBooleanType CheckPrimitiveExtent(MVGInfo *mvg_info,
const size_t pad)
{
double
extent;
size_t
quantum;
/*
Check if there is enough storage for drawing pimitives.
*/
extent=(double) mvg_info->offset+pad+PrimitiveExtentPad;
quantum=sizeof(**mvg_info->primitive_info);
if (((extent*quantum) < (double) SSIZE_MAX) &&
((extent*quantum) < (double) GetMaxMemoryRequest()))
{
if (extent <= (double) *mvg_info->extent)
return(MagickTrue);
*mvg_info->primitive_info=(PrimitiveInfo *) ResizeQuantumMemory(
*mvg_info->primitive_info,(size_t) extent,quantum);
if (*mvg_info->primitive_info != (PrimitiveInfo *) NULL)
{
register ssize_t
i;
*mvg_info->extent=(size_t) extent;
for (i=mvg_info->offset+1; i < (ssize_t) extent; i++)
(*mvg_info->primitive_info)[i].primitive=UndefinedPrimitive;
return(MagickTrue);
}
}
/*
Reallocation failed, allocate a primitive to facilitate unwinding.
*/
(void) ThrowMagickException(mvg_info->exception,GetMagickModule(),
ResourceLimitError,"MemoryAllocationFailed","`%s'","");
if (*mvg_info->primitive_info != (PrimitiveInfo *) NULL)
*mvg_info->primitive_info=(PrimitiveInfo *) RelinquishMagickMemory(
*mvg_info->primitive_info);
*mvg_info->primitive_info=(PrimitiveInfo *) AcquireCriticalMemory(
PrimitiveExtentPad*quantum);
(void) memset(*mvg_info->primitive_info,0,PrimitiveExtentPad*quantum);
*mvg_info->extent=1;
return(MagickFalse);
}
MagickExport int MVGMacroCompare(const void *target,const void *source)
{
const char
*p,
*q;
p=(const char *) target;
q=(const char *) source;
return(strcmp(p,q));
}
static SplayTreeInfo *GetMVGMacros(const char *primitive)
{
char
*macro,
*token;
const char
*q;
size_t
extent;
SplayTreeInfo
*macros;
/*
Scan graphic primitives for definitions and classes.
*/
if (primitive == (const char *) NULL)
return((SplayTreeInfo *) NULL);
macros=NewSplayTree(MVGMacroCompare,RelinquishMagickMemory,
RelinquishMagickMemory);
macro=AcquireString(primitive);
token=AcquireString(primitive);
extent=strlen(token)+MagickPathExtent;
for (q=primitive; *q != '\0'; )
{
if (GetNextToken(q,&q,extent,token) < 1)
break;
if (*token == '\0')
break;
if (LocaleCompare("push",token) == 0)
{
register const char
*end,
*start;
(void) GetNextToken(q,&q,extent,token);
if (*q == '"')
{
char
name[MagickPathExtent];
const char
*p;
ssize_t
n;
/*
Named macro (e.g. push graphic-context "wheel").
*/
(void) GetNextToken(q,&q,extent,token);
start=q;
end=q;
(void) CopyMagickString(name,token,MagickPathExtent);
n=1;
for (p=q; *p != '\0'; )
{
if (GetNextToken(p,&p,extent,token) < 1)
break;
if (*token == '\0')
break;
if (LocaleCompare(token,"pop") == 0)
{
end=p-strlen(token)-1;
n--;
}
if (LocaleCompare(token,"push") == 0)
n++;
if ((n == 0) && (end > start))
{
/*
Extract macro.
*/
(void) GetNextToken(p,&p,extent,token);
(void) CopyMagickString(macro,start,(size_t) (end-start));
(void) AddValueToSplayTree(macros,ConstantString(name),
ConstantString(macro));
break;
}
}
}
}
}
token=DestroyString(token);
macro=DestroyString(macro);
return(macros);
}
static inline MagickBooleanType IsPoint(const char *point)
{
char
*p;
double
value;
value=StringToDouble(point,&p);
return((fabs(value) < MagickEpsilon) && (p == point) ? MagickFalse :
MagickTrue);
}
static inline MagickBooleanType TracePoint(PrimitiveInfo *primitive_info,
const PointInfo point)
{
primitive_info->coordinates=1;
primitive_info->closed_subpath=MagickFalse;
primitive_info->point=point;
return(MagickTrue);
}
static MagickBooleanType RenderMVGContent(Image *image,
const DrawInfo *draw_info,const size_t depth,ExceptionInfo *exception)
{
#define RenderImageTag "Render/Image"
AffineMatrix
affine,
current;
char
keyword[MagickPathExtent],
geometry[MagickPathExtent],
*next_token,
pattern[MagickPathExtent],
*primitive,
*token;
const char
*q;
double
angle,
coordinates,
cursor,
factor,
primitive_extent;
DrawInfo
*clone_info,
**graphic_context;
MagickBooleanType
proceed;
MagickStatusType
status;
MVGInfo
mvg_info;
PointInfo
point;
PrimitiveInfo
*primitive_info;
PrimitiveType
primitive_type;
register const char
*p;
register ssize_t
i,
x;
SegmentInfo
bounds;
size_t
extent,
number_points,
number_stops;
SplayTreeInfo
*macros;
ssize_t
defsDepth,
j,
k,
n,
symbolDepth;
StopInfo
*stops;
TypeMetric
metrics;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(draw_info != (DrawInfo *) NULL);
assert(draw_info->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"...");
if (depth > MagickMaxRecursionDepth)
ThrowBinaryException(DrawError,"VectorGraphicsNestedTooDeeply",
image->filename);
if ((draw_info->primitive == (char *) NULL) ||
(*draw_info->primitive == '\0'))
return(MagickFalse);
if (image->debug != MagickFalse)
(void) LogMagickEvent(DrawEvent,GetMagickModule(),"begin draw-image");
if (SetImageStorageClass(image,DirectClass,exception) == MagickFalse)
return(MagickFalse);
if (image->alpha_trait == UndefinedPixelTrait)
{
status=SetImageAlphaChannel(image,OpaqueAlphaChannel,exception);
if (status == MagickFalse)
return(MagickFalse);
}
if ((*draw_info->primitive == '@') && (strlen(draw_info->primitive) > 1) &&
(*(draw_info->primitive+1) != '-') && (depth == 0))
primitive=FileToString(draw_info->primitive+1,~0UL,exception);
else
primitive=AcquireString(draw_info->primitive);
if (primitive == (char *) NULL)
return(MagickFalse);
primitive_extent=(double) strlen(primitive);
(void) SetImageArtifact(image,"mvg:vector-graphics",primitive);
n=0;
number_stops=0;
stops=(StopInfo *) NULL;
/*
Allocate primitive info memory.
*/
graphic_context=(DrawInfo **) AcquireMagickMemory(sizeof(*graphic_context));
if (graphic_context == (DrawInfo **) NULL)
{
primitive=DestroyString(primitive);
ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed",
image->filename);
}
number_points=PrimitiveExtentPad;
primitive_info=(PrimitiveInfo *) AcquireQuantumMemory((size_t) number_points,
sizeof(*primitive_info));
if (primitive_info == (PrimitiveInfo *) NULL)
{
primitive=DestroyString(primitive);
for ( ; n >= 0; n--)
graphic_context[n]=DestroyDrawInfo(graphic_context[n]);
graphic_context=(DrawInfo **) RelinquishMagickMemory(graphic_context);
ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed",
image->filename);
}
(void) memset(primitive_info,0,(size_t) number_points*
sizeof(*primitive_info));
(void) memset(&mvg_info,0,sizeof(mvg_info));
mvg_info.primitive_info=(&primitive_info);
mvg_info.extent=(&number_points);
mvg_info.exception=exception;
graphic_context[n]=CloneDrawInfo((ImageInfo *) NULL,draw_info);
graphic_context[n]->viewbox=image->page;
if ((image->page.width == 0) || (image->page.height == 0))
{
graphic_context[n]->viewbox.width=image->columns;
graphic_context[n]->viewbox.height=image->rows;
}
token=AcquireString(primitive);
extent=strlen(token)+MagickPathExtent;
defsDepth=0;
symbolDepth=0;
cursor=0.0;
macros=GetMVGMacros(primitive);
status=MagickTrue;
for (q=primitive; *q != '\0'; )
{
/*
Interpret graphic primitive.
*/
if (GetNextToken(q,&q,MagickPathExtent,keyword) < 1)
break;
if (*keyword == '\0')
break;
if (*keyword == '#')
{
/*
Comment.
*/
while ((*q != '\n') && (*q != '\0'))
q++;
continue;
}
p=q-strlen(keyword)-1;
primitive_type=UndefinedPrimitive;
current=graphic_context[n]->affine;
GetAffineMatrix(&affine);
*token='\0';
switch (*keyword)
{
case ';':
break;
case 'a':
case 'A':
{
if (LocaleCompare("affine",keyword) == 0)
{
(void) GetNextToken(q,&q,extent,token);
affine.sx=StringToDouble(token,&next_token);
if (token == next_token)
ThrowPointExpectedException(token,exception);
(void) GetNextToken(q,&q,extent,token);
if (*token == ',')
(void) GetNextToken(q,&q,extent,token);
affine.rx=StringToDouble(token,&next_token);
if (token == next_token)
ThrowPointExpectedException(token,exception);
(void) GetNextToken(q,&q,extent,token);
if (*token == ',')
(void) GetNextToken(q,&q,extent,token);
affine.ry=StringToDouble(token,&next_token);
if (token == next_token)
ThrowPointExpectedException(token,exception);
(void) GetNextToken(q,&q,extent,token);
if (*token == ',')
(void) GetNextToken(q,&q,extent,token);
affine.sy=StringToDouble(token,&next_token);
if (token == next_token)
ThrowPointExpectedException(token,exception);
(void) GetNextToken(q,&q,extent,token);
if (*token == ',')
(void) GetNextToken(q,&q,extent,token);
affine.tx=StringToDouble(token,&next_token);
if (token == next_token)
ThrowPointExpectedException(token,exception);
(void) GetNextToken(q,&q,extent,token);
if (*token == ',')
(void) GetNextToken(q,&q,extent,token);
affine.ty=StringToDouble(token,&next_token);
if (token == next_token)
ThrowPointExpectedException(token,exception);
break;
}
if (LocaleCompare("alpha",keyword) == 0)
{
primitive_type=AlphaPrimitive;
break;
}
if (LocaleCompare("arc",keyword) == 0)
{
primitive_type=ArcPrimitive;
break;
}
status=MagickFalse;
break;
}
case 'b':
case 'B':
{
if (LocaleCompare("bezier",keyword) == 0)
{
primitive_type=BezierPrimitive;
break;
}
if (LocaleCompare("border-color",keyword) == 0)
{
(void) GetNextToken(q,&q,extent,token);
status&=QueryColorCompliance(token,AllCompliance,
&graphic_context[n]->border_color,exception);
break;
}
status=MagickFalse;
break;
}
case 'c':
case 'C':
{
if (LocaleCompare("class",keyword) == 0)
{
const char
*mvg_class;
(void) GetNextToken(q,&q,extent,token);
if (*token == '\0')
{
status=MagickFalse;
break;
}
if (LocaleCompare(token,graphic_context[n]->id) == 0)
break;
mvg_class=(const char *) GetValueFromSplayTree(macros,token);
if (mvg_class != (const char *) NULL)
{
char
*elements;
ssize_t
offset;
/*
Inject class elements in stream.
*/
offset=(ssize_t) (p-primitive);
elements=AcquireString(primitive);
elements[offset]='\0';
(void) ConcatenateString(&elements,mvg_class);
(void) ConcatenateString(&elements,"\n");
(void) ConcatenateString(&elements,q);
primitive=DestroyString(primitive);
primitive=elements;
q=primitive+offset;
}
break;
}
if (LocaleCompare("clip-path",keyword) == 0)
{
const char
*clip_path;
/*
Take a node from within the MVG document, and duplicate it here.
*/
(void) GetNextToken(q,&q,extent,token);
if (*token == '\0')
{
status=MagickFalse;
break;
}
(void) CloneString(&graphic_context[n]->clip_mask,token);
clip_path=(const char *) GetValueFromSplayTree(macros,token);
if (clip_path != (const char *) NULL)
{
if (graphic_context[n]->clipping_mask != (Image *) NULL)
graphic_context[n]->clipping_mask=
DestroyImage(graphic_context[n]->clipping_mask);
graphic_context[n]->clipping_mask=DrawClippingMask(image,
graphic_context[n],token,clip_path,exception);
if (graphic_context[n]->compliance != SVGCompliance)
{
clip_path=(const char *) GetValueFromSplayTree(macros,
graphic_context[n]->clip_mask);
if (clip_path != (const char *) NULL)
(void) SetImageArtifact(image,
graphic_context[n]->clip_mask,clip_path);
status&=DrawClipPath(image,graphic_context[n],
graphic_context[n]->clip_mask,exception);
}
}
break;
}
if (LocaleCompare("clip-rule",keyword) == 0)
{
ssize_t
fill_rule;
(void) GetNextToken(q,&q,extent,token);
fill_rule=ParseCommandOption(MagickFillRuleOptions,MagickFalse,
token);
if (fill_rule == -1)
{
status=MagickFalse;
break;
}
graphic_context[n]->fill_rule=(FillRule) fill_rule;
break;
}
if (LocaleCompare("clip-units",keyword) == 0)
{
ssize_t
clip_units;
(void) GetNextToken(q,&q,extent,token);
clip_units=ParseCommandOption(MagickClipPathOptions,MagickFalse,
token);
if (clip_units == -1)
{
status=MagickFalse;
break;
}
graphic_context[n]->clip_units=(ClipPathUnits) clip_units;
if (clip_units == ObjectBoundingBox)
{
GetAffineMatrix(¤t);
affine.sx=draw_info->bounds.x2;
affine.sy=draw_info->bounds.y2;
affine.tx=draw_info->bounds.x1;
affine.ty=draw_info->bounds.y1;
break;
}
break;
}
if (LocaleCompare("circle",keyword) == 0)
{
primitive_type=CirclePrimitive;
break;
}
if (LocaleCompare("color",keyword) == 0)
{
primitive_type=ColorPrimitive;
break;
}
if (LocaleCompare("compliance",keyword) == 0)
{
/*
MVG compliance associates a clipping mask with an image; SVG
compliance associates a clipping mask with a graphics context.
*/
(void) GetNextToken(q,&q,extent,token);
graphic_context[n]->compliance=(ComplianceType) ParseCommandOption(
MagickComplianceOptions,MagickFalse,token);
break;
}
status=MagickFalse;
break;
}
case 'd':
case 'D':
{
if (LocaleCompare("decorate",keyword) == 0)
{
ssize_t
decorate;
(void) GetNextToken(q,&q,extent,token);
decorate=ParseCommandOption(MagickDecorateOptions,MagickFalse,
token);
if (decorate == -1)
{
status=MagickFalse;
break;
}
graphic_context[n]->decorate=(DecorationType) decorate;
break;
}
if (LocaleCompare("density",keyword) == 0)
{
(void) GetNextToken(q,&q,extent,token);
(void) CloneString(&graphic_context[n]->density,token);
break;
}
if (LocaleCompare("direction",keyword) == 0)
{
ssize_t
direction;
(void) GetNextToken(q,&q,extent,token);
direction=ParseCommandOption(MagickDirectionOptions,MagickFalse,
token);
if (direction == -1)
status=MagickFalse;
else
graphic_context[n]->direction=(DirectionType) direction;
break;
}
status=MagickFalse;
break;
}
case 'e':
case 'E':
{
if (LocaleCompare("ellipse",keyword) == 0)
{
primitive_type=EllipsePrimitive;
break;
}
if (LocaleCompare("encoding",keyword) == 0)
{
(void) GetNextToken(q,&q,extent,token);
(void) CloneString(&graphic_context[n]->encoding,token);
break;
}
status=MagickFalse;
break;
}
case 'f':
case 'F':
{
if (LocaleCompare("fill",keyword) == 0)
{
(void) GetNextToken(q,&q,extent,token);
if (graphic_context[n]->clip_path != MagickFalse)
break;
(void) FormatLocaleString(pattern,MagickPathExtent,"%s",token);
if (GetImageArtifact(image,pattern) != (const char *) NULL)
(void) DrawPatternPath(image,draw_info,token,
&graphic_context[n]->fill_pattern,exception);
else
{
status&=QueryColorCompliance(token,AllCompliance,
&graphic_context[n]->fill,exception);
if (graphic_context[n]->fill_alpha != OpaqueAlpha)
graphic_context[n]->fill.alpha=graphic_context[n]->fill_alpha;
}
break;
}
if (LocaleCompare("fill-opacity",keyword) == 0)
{
double
opacity;
(void) GetNextToken(q,&q,extent,token);
if (graphic_context[n]->clip_path != MagickFalse)
break;
factor=strchr(token,'%') != (char *) NULL ? 0.01 : 1.0;
opacity=MagickMin(MagickMax(factor*
StringToDouble(token,&next_token),0.0),1.0);
if (token == next_token)
ThrowPointExpectedException(token,exception);
if (graphic_context[n]->compliance == SVGCompliance)
graphic_context[n]->fill_alpha*=opacity;
else
graphic_context[n]->fill_alpha=QuantumRange*opacity;
if (graphic_context[n]->fill.alpha != TransparentAlpha)
graphic_context[n]->fill.alpha=graphic_context[n]->fill_alpha;
else
graphic_context[n]->fill.alpha=(MagickRealType)
ClampToQuantum(QuantumRange*(1.0-opacity));
break;
}
if (LocaleCompare("fill-rule",keyword) == 0)
{
ssize_t
fill_rule;
(void) GetNextToken(q,&q,extent,token);
fill_rule=ParseCommandOption(MagickFillRuleOptions,MagickFalse,
token);
if (fill_rule == -1)
{
status=MagickFalse;
break;
}
graphic_context[n]->fill_rule=(FillRule) fill_rule;
break;
}
if (LocaleCompare("font",keyword) == 0)
{
(void) GetNextToken(q,&q,extent,token);
(void) CloneString(&graphic_context[n]->font,token);
if (LocaleCompare("none",token) == 0)
graphic_context[n]->font=(char *) RelinquishMagickMemory(
graphic_context[n]->font);
break;
}
if (LocaleCompare("font-family",keyword) == 0)
{
(void) GetNextToken(q,&q,extent,token);
(void) CloneString(&graphic_context[n]->family,token);
break;
}
if (LocaleCompare("font-size",keyword) == 0)
{
(void) GetNextToken(q,&q,extent,token);
graphic_context[n]->pointsize=StringToDouble(token,&next_token);
if (token == next_token)
ThrowPointExpectedException(token,exception);
break;
}
if (LocaleCompare("font-stretch",keyword) == 0)
{
ssize_t
stretch;
(void) GetNextToken(q,&q,extent,token);
stretch=ParseCommandOption(MagickStretchOptions,MagickFalse,token);
if (stretch == -1)
{
status=MagickFalse;
break;
}
graphic_context[n]->stretch=(StretchType) stretch;
break;
}
if (LocaleCompare("font-style",keyword) == 0)
{
ssize_t
style;
(void) GetNextToken(q,&q,extent,token);
style=ParseCommandOption(MagickStyleOptions,MagickFalse,token);
if (style == -1)
{
status=MagickFalse;
break;
}
graphic_context[n]->style=(StyleType) style;
break;
}
if (LocaleCompare("font-weight",keyword) == 0)
{
ssize_t
weight;
(void) GetNextToken(q,&q,extent,token);
weight=ParseCommandOption(MagickWeightOptions,MagickFalse,token);
if (weight == -1)
weight=(ssize_t) StringToUnsignedLong(token);
graphic_context[n]->weight=(size_t) weight;
break;
}
status=MagickFalse;
break;
}
case 'g':
case 'G':
{
if (LocaleCompare("gradient-units",keyword) == 0)
{
(void) GetNextToken(q,&q,extent,token);
break;
}
if (LocaleCompare("gravity",keyword) == 0)
{
ssize_t
gravity;
(void) GetNextToken(q,&q,extent,token);
gravity=ParseCommandOption(MagickGravityOptions,MagickFalse,token);
if (gravity == -1)
{
status=MagickFalse;
break;
}
graphic_context[n]->gravity=(GravityType) gravity;
break;
}
status=MagickFalse;
break;
}
case 'i':
case 'I':
{
if (LocaleCompare("image",keyword) == 0)
{
ssize_t
compose;
primitive_type=ImagePrimitive;
(void) GetNextToken(q,&q,extent,token);
compose=ParseCommandOption(MagickComposeOptions,MagickFalse,token);
if (compose == -1)
{
status=MagickFalse;
break;
}
graphic_context[n]->compose=(CompositeOperator) compose;
break;
}
if (LocaleCompare("interline-spacing",keyword) == 0)
{
(void) GetNextToken(q,&q,extent,token);
graphic_context[n]->interline_spacing=StringToDouble(token,
&next_token);
if (token == next_token)
ThrowPointExpectedException(token,exception);
break;
}
if (LocaleCompare("interword-spacing",keyword) == 0)
{
(void) GetNextToken(q,&q,extent,token);
graphic_context[n]->interword_spacing=StringToDouble(token,
&next_token);
if (token == next_token)
ThrowPointExpectedException(token,exception);
break;
}
status=MagickFalse;
break;
}
case 'k':
case 'K':
{
if (LocaleCompare("kerning",keyword) == 0)
{
(void) GetNextToken(q,&q,extent,token);
graphic_context[n]->kerning=StringToDouble(token,&next_token);
if (token == next_token)
ThrowPointExpectedException(token,exception);
break;
}
status=MagickFalse;
break;
}
case 'l':
case 'L':
{
if (LocaleCompare("letter-spacing",keyword) == 0)
{
(void) GetNextToken(q,&q,extent,token);
if (IsPoint(token) == MagickFalse)
break;
clone_info=CloneDrawInfo((ImageInfo *) NULL,graphic_context[n]);
clone_info->text=AcquireString(" ");
status&=GetTypeMetrics(image,clone_info,&metrics,exception);
graphic_context[n]->kerning=metrics.width*
StringToDouble(token,&next_token);
clone_info=DestroyDrawInfo(clone_info);
if (token == next_token)
ThrowPointExpectedException(token,exception);
break;
}
if (LocaleCompare("line",keyword) == 0)
{
primitive_type=LinePrimitive;
break;
}
status=MagickFalse;
break;
}
case 'm':
case 'M':
{
if (LocaleCompare("mask",keyword) == 0)
{
const char
*mask_path;
/*
Take a node from within the MVG document, and duplicate it here.
*/
(void) GetNextToken(q,&q,extent,token);
mask_path=(const char *) GetValueFromSplayTree(macros,token);
if (mask_path != (const char *) NULL)
{
if (graphic_context[n]->composite_mask != (Image *) NULL)
graphic_context[n]->composite_mask=
DestroyImage(graphic_context[n]->composite_mask);
graphic_context[n]->composite_mask=DrawCompositeMask(image,
graphic_context[n],token,mask_path,exception);
if (graphic_context[n]->compliance != SVGCompliance)
status=SetImageMask(image,CompositePixelMask,
graphic_context[n]->composite_mask,exception);
}
break;
}
break;
}
case 'o':
case 'O':
{
if (LocaleCompare("offset",keyword) == 0)
{
(void) GetNextToken(q,&q,extent,token);
break;
}
if (LocaleCompare("opacity",keyword) == 0)
{
double
opacity;
(void) GetNextToken(q,&q,extent,token);
if (graphic_context[n]->clip_path != MagickFalse)
break;
factor=strchr(token,'%') != (char *) NULL ? 0.01 : 1.0;
opacity=MagickMin(MagickMax(factor*
StringToDouble(token,&next_token),0.0),1.0);
if (token == next_token)
ThrowPointExpectedException(token,exception);
if (graphic_context[n]->compliance == SVGCompliance)
{
graphic_context[n]->fill_alpha*=opacity;
graphic_context[n]->stroke_alpha*=opacity;
}
else
{
graphic_context[n]->fill_alpha=QuantumRange*opacity;
graphic_context[n]->stroke_alpha=QuantumRange*opacity;
}
break;
}
status=MagickFalse;
break;
}
case 'p':
case 'P':
{
if (LocaleCompare("path",keyword) == 0)
{
primitive_type=PathPrimitive;
break;
}
if (LocaleCompare("point",keyword) == 0)
{
primitive_type=PointPrimitive;
break;
}
if (LocaleCompare("polyline",keyword) == 0)
{
primitive_type=PolylinePrimitive;
break;
}
if (LocaleCompare("polygon",keyword) == 0)
{
primitive_type=PolygonPrimitive;
break;
}
if (LocaleCompare("pop",keyword) == 0)
{
if (GetNextToken(q,&q,extent,token) < 1)
break;
if (LocaleCompare("class",token) == 0)
break;
if (LocaleCompare("clip-path",token) == 0)
break;
if (LocaleCompare("defs",token) == 0)
{
defsDepth--;
graphic_context[n]->render=defsDepth > 0 ? MagickFalse :
MagickTrue;
break;
}
if (LocaleCompare("gradient",token) == 0)
break;
if (LocaleCompare("graphic-context",token) == 0)
{
if (n <= 0)
{
(void) ThrowMagickException(exception,GetMagickModule(),
DrawError,"UnbalancedGraphicContextPushPop","`%s'",token);
status=MagickFalse;
n=0;
break;
}
if ((graphic_context[n]->clip_mask != (char *) NULL) &&
(graphic_context[n]->compliance != SVGCompliance))
if (LocaleCompare(graphic_context[n]->clip_mask,
graphic_context[n-1]->clip_mask) != 0)
status=SetImageMask(image,WritePixelMask,(Image *) NULL,
exception);
graphic_context[n]=DestroyDrawInfo(graphic_context[n]);
n--;
break;
}
if (LocaleCompare("mask",token) == 0)
break;
if (LocaleCompare("pattern",token) == 0)
break;
if (LocaleCompare("symbol",token) == 0)
{
symbolDepth--;
graphic_context[n]->render=symbolDepth > 0 ? MagickFalse :
MagickTrue;
break;
}
status=MagickFalse;
break;
}
if (LocaleCompare("push",keyword) == 0)
{
if (GetNextToken(q,&q,extent,token) < 1)
break;
if (LocaleCompare("class",token) == 0)
{
/*
Class context.
*/
for (p=q; *q != '\0'; )
{
if (GetNextToken(q,&q,extent,token) < 1)
break;
if (LocaleCompare(token,"pop") != 0)
continue;
(void) GetNextToken(q,(const char **) NULL,extent,token);
if (LocaleCompare(token,"class") != 0)
continue;
break;
}
(void) GetNextToken(q,&q,extent,token);
break;
}
if (LocaleCompare("clip-path",token) == 0)
{
(void) GetNextToken(q,&q,extent,token);
for (p=q; *q != '\0'; )
{
if (GetNextToken(q,&q,extent,token) < 1)
break;
if (LocaleCompare(token,"pop") != 0)
continue;
(void) GetNextToken(q,(const char **) NULL,extent,token);
if (LocaleCompare(token,"clip-path") != 0)
continue;
break;
}
if ((q == (char *) NULL) || (p == (char *) NULL) || ((q-4) < p))
{
status=MagickFalse;
break;
}
(void) GetNextToken(q,&q,extent,token);
break;
}
if (LocaleCompare("defs",token) == 0)
{
defsDepth++;
graphic_context[n]->render=defsDepth > 0 ? MagickFalse :
MagickTrue;
break;
}
if (LocaleCompare("gradient",token) == 0)
{
char
key[2*MagickPathExtent],
name[MagickPathExtent],
type[MagickPathExtent];
SegmentInfo
segment;
(void) GetNextToken(q,&q,extent,token);
(void) CopyMagickString(name,token,MagickPathExtent);
(void) GetNextToken(q,&q,extent,token);
(void) CopyMagickString(type,token,MagickPathExtent);
(void) GetNextToken(q,&q,extent,token);
segment.x1=StringToDouble(token,&next_token);
if (token == next_token)
ThrowPointExpectedException(token,exception);
(void) GetNextToken(q,&q,extent,token);
if (*token == ',')
(void) GetNextToken(q,&q,extent,token);
segment.y1=StringToDouble(token,&next_token);
if (token == next_token)
ThrowPointExpectedException(token,exception);
(void) GetNextToken(q,&q,extent,token);
if (*token == ',')
(void) GetNextToken(q,&q,extent,token);
segment.x2=StringToDouble(token,&next_token);
if (token == next_token)
ThrowPointExpectedException(token,exception);
(void) GetNextToken(q,&q,extent,token);
if (*token == ',')
(void) GetNextToken(q,&q,extent,token);
segment.y2=StringToDouble(token,&next_token);
if (token == next_token)
ThrowPointExpectedException(token,exception);
if (LocaleCompare(type,"radial") == 0)
{
(void) GetNextToken(q,&q,extent,token);
if (*token == ',')
(void) GetNextToken(q,&q,extent,token);
}
for (p=q; *q != '\0'; )
{
if (GetNextToken(q,&q,extent,token) < 1)
break;
if (LocaleCompare(token,"pop") != 0)
continue;
(void) GetNextToken(q,(const char **) NULL,extent,token);
if (LocaleCompare(token,"gradient") != 0)
continue;
break;
}
if ((q == (char *) NULL) || (p == (char *) NULL) || ((q-4) < p))
{
status=MagickFalse;
break;
}
(void) CopyMagickString(token,p,(size_t) (q-p-4+1));
bounds.x1=graphic_context[n]->affine.sx*segment.x1+
graphic_context[n]->affine.ry*segment.y1+
graphic_context[n]->affine.tx;
bounds.y1=graphic_context[n]->affine.rx*segment.x1+
graphic_context[n]->affine.sy*segment.y1+
graphic_context[n]->affine.ty;
bounds.x2=graphic_context[n]->affine.sx*segment.x2+
graphic_context[n]->affine.ry*segment.y2+
graphic_context[n]->affine.tx;
bounds.y2=graphic_context[n]->affine.rx*segment.x2+
graphic_context[n]->affine.sy*segment.y2+
graphic_context[n]->affine.ty;
(void) FormatLocaleString(key,MagickPathExtent,"%s",name);
(void) SetImageArtifact(image,key,token);
(void) FormatLocaleString(key,MagickPathExtent,"%s-type",name);
(void) SetImageArtifact(image,key,type);
(void) FormatLocaleString(key,MagickPathExtent,"%s-geometry",
name);
(void) FormatLocaleString(geometry,MagickPathExtent,
"%gx%g%+.15g%+.15g",
MagickMax(fabs(bounds.x2-bounds.x1+1.0),1.0),
MagickMax(fabs(bounds.y2-bounds.y1+1.0),1.0),
bounds.x1,bounds.y1);
(void) SetImageArtifact(image,key,geometry);
(void) GetNextToken(q,&q,extent,token);
break;
}
if (LocaleCompare("graphic-context",token) == 0)
{
n++;
graphic_context=(DrawInfo **) ResizeQuantumMemory(
graphic_context,(size_t) (n+1),sizeof(*graphic_context));
if (graphic_context == (DrawInfo **) NULL)
{
(void) ThrowMagickException(exception,GetMagickModule(),
ResourceLimitError,"MemoryAllocationFailed","`%s'",
image->filename);
break;
}
graphic_context[n]=CloneDrawInfo((ImageInfo *) NULL,
graphic_context[n-1]);
if (*q == '"')
{
(void) GetNextToken(q,&q,extent,token);
(void) CloneString(&graphic_context[n]->id,token);
}
break;
}
if (LocaleCompare("mask",token) == 0)
{
(void) GetNextToken(q,&q,extent,token);
break;
}
if (LocaleCompare("pattern",token) == 0)
{
char
key[2*MagickPathExtent],
name[MagickPathExtent];
RectangleInfo
bounds;
(void) GetNextToken(q,&q,extent,token);
(void) CopyMagickString(name,token,MagickPathExtent);
(void) GetNextToken(q,&q,extent,token);
bounds.x=(ssize_t) ceil(StringToDouble(token,&next_token)-0.5);
if (token == next_token)
ThrowPointExpectedException(token,exception);
(void) GetNextToken(q,&q,extent,token);
if (*token == ',')
(void) GetNextToken(q,&q,extent,token);
bounds.y=(ssize_t) ceil(StringToDouble(token,&next_token)-0.5);
if (token == next_token)
ThrowPointExpectedException(token,exception);
(void) GetNextToken(q,&q,extent,token);
if (*token == ',')
(void) GetNextToken(q,&q,extent,token);
bounds.width=(size_t) floor(StringToDouble(token,&next_token)+
0.5);
if (token == next_token)
ThrowPointExpectedException(token,exception);
(void) GetNextToken(q,&q,extent,token);
if (*token == ',')
(void) GetNextToken(q,&q,extent,token);
bounds.height=(size_t) floor(StringToDouble(token,&next_token)+
0.5);
if (token == next_token)
ThrowPointExpectedException(token,exception);
for (p=q; *q != '\0'; )
{
if (GetNextToken(q,&q,extent,token) < 1)
break;
if (LocaleCompare(token,"pop") != 0)
continue;
(void) GetNextToken(q,(const char **) NULL,extent,token);
if (LocaleCompare(token,"pattern") != 0)
continue;
break;
}
if ((q == (char *) NULL) || (p == (char *) NULL) || ((q-4) < p))
{
status=MagickFalse;
break;
}
(void) CopyMagickString(token,p,(size_t) (q-p-4+1));
(void) FormatLocaleString(key,MagickPathExtent,"%s",name);
(void) SetImageArtifact(image,key,token);
(void) FormatLocaleString(key,MagickPathExtent,"%s-geometry",
name);
(void) FormatLocaleString(geometry,MagickPathExtent,
"%.20gx%.20g%+.20g%+.20g",(double) bounds.width,(double)
bounds.height,(double) bounds.x,(double) bounds.y);
(void) SetImageArtifact(image,key,geometry);
(void) GetNextToken(q,&q,extent,token);
break;
}
if (LocaleCompare("symbol",token) == 0)
{
symbolDepth++;
graphic_context[n]->render=symbolDepth > 0 ? MagickFalse :
MagickTrue;
break;
}
status=MagickFalse;
break;
}
status=MagickFalse;
break;
}
case 'r':
case 'R':
{
if (LocaleCompare("rectangle",keyword) == 0)
{
primitive_type=RectanglePrimitive;
break;
}
if (LocaleCompare("rotate",keyword) == 0)
{
(void) GetNextToken(q,&q,extent,token);
angle=StringToDouble(token,&next_token);
if (token == next_token)
ThrowPointExpectedException(token,exception);
affine.sx=cos(DegreesToRadians(fmod((double) angle,360.0)));
affine.rx=sin(DegreesToRadians(fmod((double) angle,360.0)));
affine.ry=(-sin(DegreesToRadians(fmod((double) angle,360.0))));
affine.sy=cos(DegreesToRadians(fmod((double) angle,360.0)));
break;
}
if (LocaleCompare("roundRectangle",keyword) == 0)
{
primitive_type=RoundRectanglePrimitive;
break;
}
status=MagickFalse;
break;
}
case 's':
case 'S':
{
if (LocaleCompare("scale",keyword) == 0)
{
(void) GetNextToken(q,&q,extent,token);
affine.sx=StringToDouble(token,&next_token);
if (token == next_token)
ThrowPointExpectedException(token,exception);
(void) GetNextToken(q,&q,extent,token);
if (*token == ',')
(void) GetNextToken(q,&q,extent,token);
affine.sy=StringToDouble(token,&next_token);
if (token == next_token)
ThrowPointExpectedException(token,exception);
break;
}
if (LocaleCompare("skewX",keyword) == 0)
{
(void) GetNextToken(q,&q,extent,token);
angle=StringToDouble(token,&next_token);
if (token == next_token)
ThrowPointExpectedException(token,exception);
affine.ry=sin(DegreesToRadians(angle));
break;
}
if (LocaleCompare("skewY",keyword) == 0)
{
(void) GetNextToken(q,&q,extent,token);
angle=StringToDouble(token,&next_token);
if (token == next_token)
ThrowPointExpectedException(token,exception);
affine.rx=(-tan(DegreesToRadians(angle)/2.0));
break;
}
if (LocaleCompare("stop-color",keyword) == 0)
{
PixelInfo
stop_color;
number_stops++;
if (number_stops == 1)
stops=(StopInfo *) AcquireQuantumMemory(2,sizeof(*stops));
else
if (number_stops > 2)
stops=(StopInfo *) ResizeQuantumMemory(stops,number_stops,
sizeof(*stops));
if (stops == (StopInfo *) NULL)
{
(void) ThrowMagickException(exception,GetMagickModule(),
ResourceLimitError,"MemoryAllocationFailed","`%s'",
image->filename);
break;
}
(void) GetNextToken(q,&q,extent,token);
status&=QueryColorCompliance(token,AllCompliance,&stop_color,
exception);
stops[number_stops-1].color=stop_color;
(void) GetNextToken(q,&q,extent,token);
factor=strchr(token,'%') != (char *) NULL ? 0.01 : 1.0;
stops[number_stops-1].offset=factor*StringToDouble(token,
&next_token);
if (token == next_token)
ThrowPointExpectedException(token,exception);
break;
}
if (LocaleCompare("stroke",keyword) == 0)
{
(void) GetNextToken(q,&q,extent,token);
if (graphic_context[n]->clip_path != MagickFalse)
break;
(void) FormatLocaleString(pattern,MagickPathExtent,"%s",token);
if (GetImageArtifact(image,pattern) != (const char *) NULL)
(void) DrawPatternPath(image,draw_info,token,
&graphic_context[n]->stroke_pattern,exception);
else
{
status&=QueryColorCompliance(token,AllCompliance,
&graphic_context[n]->stroke,exception);
if (graphic_context[n]->stroke_alpha != OpaqueAlpha)
graphic_context[n]->stroke.alpha=
graphic_context[n]->stroke_alpha;
}
break;
}
if (LocaleCompare("stroke-antialias",keyword) == 0)
{
(void) GetNextToken(q,&q,extent,token);
graphic_context[n]->stroke_antialias=StringToLong(token) != 0 ?
MagickTrue : MagickFalse;
break;
}
if (LocaleCompare("stroke-dasharray",keyword) == 0)
{
if (graphic_context[n]->dash_pattern != (double *) NULL)
graphic_context[n]->dash_pattern=(double *)
RelinquishMagickMemory(graphic_context[n]->dash_pattern);
if (IsPoint(q) != MagickFalse)
{
const char
*r;
r=q;
(void) GetNextToken(r,&r,extent,token);
if (*token == ',')
(void) GetNextToken(r,&r,extent,token);
for (x=0; IsPoint(token) != MagickFalse; x++)
{
(void) GetNextToken(r,&r,extent,token);
if (*token == ',')
(void) GetNextToken(r,&r,extent,token);
}
graphic_context[n]->dash_pattern=(double *)
AcquireQuantumMemory((size_t) (2*x+2),
sizeof(*graphic_context[n]->dash_pattern));
if (graphic_context[n]->dash_pattern == (double *) NULL)
{
(void) ThrowMagickException(exception,GetMagickModule(),
ResourceLimitError,"MemoryAllocationFailed","`%s'",
image->filename);
status=MagickFalse;
break;
}
(void) memset(graphic_context[n]->dash_pattern,0,(size_t)
(2*x+2)*sizeof(*graphic_context[n]->dash_pattern));
for (j=0; j < x; j++)
{
(void) GetNextToken(q,&q,extent,token);
if (*token == ',')
(void) GetNextToken(q,&q,extent,token);
graphic_context[n]->dash_pattern[j]=StringToDouble(token,
&next_token);
if (token == next_token)
ThrowPointExpectedException(token,exception);
if (graphic_context[n]->dash_pattern[j] < 0.0)
status=MagickFalse;
}
if ((x & 0x01) != 0)
for ( ; j < (2*x); j++)
graphic_context[n]->dash_pattern[j]=
graphic_context[n]->dash_pattern[j-x];
graphic_context[n]->dash_pattern[j]=0.0;
break;
}
(void) GetNextToken(q,&q,extent,token);
break;
}
if (LocaleCompare("stroke-dashoffset",keyword) == 0)
{
(void) GetNextToken(q,&q,extent,token);
graphic_context[n]->dash_offset=StringToDouble(token,&next_token);
if (token == next_token)
ThrowPointExpectedException(token,exception);
break;
}
if (LocaleCompare("stroke-linecap",keyword) == 0)
{
ssize_t
linecap;
(void) GetNextToken(q,&q,extent,token);
linecap=ParseCommandOption(MagickLineCapOptions,MagickFalse,token);
if (linecap == -1)
{
status=MagickFalse;
break;
}
graphic_context[n]->linecap=(LineCap) linecap;
break;
}
if (LocaleCompare("stroke-linejoin",keyword) == 0)
{
ssize_t
linejoin;
(void) GetNextToken(q,&q,extent,token);
linejoin=ParseCommandOption(MagickLineJoinOptions,MagickFalse,
token);
if (linejoin == -1)
{
status=MagickFalse;
break;
}
graphic_context[n]->linejoin=(LineJoin) linejoin;
break;
}
if (LocaleCompare("stroke-miterlimit",keyword) == 0)
{
(void) GetNextToken(q,&q,extent,token);
graphic_context[n]->miterlimit=StringToUnsignedLong(token);
break;
}
if (LocaleCompare("stroke-opacity",keyword) == 0)
{
double
opacity;
(void) GetNextToken(q,&q,extent,token);
if (graphic_context[n]->clip_path != MagickFalse)
break;
factor=strchr(token,'%') != (char *) NULL ? 0.01 : 1.0;
opacity=MagickMin(MagickMax(factor*
StringToDouble(token,&next_token),0.0),1.0);
if (token == next_token)
ThrowPointExpectedException(token,exception);
if (graphic_context[n]->compliance == SVGCompliance)
graphic_context[n]->stroke_alpha*=opacity;
else
graphic_context[n]->stroke_alpha=QuantumRange*opacity;
if (graphic_context[n]->stroke.alpha != TransparentAlpha)
graphic_context[n]->stroke.alpha=graphic_context[n]->stroke_alpha;
else
graphic_context[n]->stroke.alpha=(MagickRealType)
ClampToQuantum(QuantumRange*(1.0-opacity));
break;
}
if (LocaleCompare("stroke-width",keyword) == 0)
{
(void) GetNextToken(q,&q,extent,token);
if (graphic_context[n]->clip_path != MagickFalse)
break;
graphic_context[n]->stroke_width=StringToDouble(token,&next_token);
if (token == next_token)
ThrowPointExpectedException(token,exception);
break;
}
status=MagickFalse;
break;
}
case 't':
case 'T':
{
if (LocaleCompare("text",keyword) == 0)
{
primitive_type=TextPrimitive;
cursor=0.0;
break;
}
if (LocaleCompare("text-align",keyword) == 0)
{
ssize_t
align;
(void) GetNextToken(q,&q,extent,token);
align=ParseCommandOption(MagickAlignOptions,MagickFalse,token);
if (align == -1)
{
status=MagickFalse;
break;
}
graphic_context[n]->align=(AlignType) align;
break;
}
if (LocaleCompare("text-anchor",keyword) == 0)
{
ssize_t
align;
(void) GetNextToken(q,&q,extent,token);
align=ParseCommandOption(MagickAlignOptions,MagickFalse,token);
if (align == -1)
{
status=MagickFalse;
break;
}
graphic_context[n]->align=(AlignType) align;
break;
}
if (LocaleCompare("text-antialias",keyword) == 0)
{
(void) GetNextToken(q,&q,extent,token);
graphic_context[n]->text_antialias=StringToLong(token) != 0 ?
MagickTrue : MagickFalse;
break;
}
if (LocaleCompare("text-undercolor",keyword) == 0)
{
(void) GetNextToken(q,&q,extent,token);
status&=QueryColorCompliance(token,AllCompliance,
&graphic_context[n]->undercolor,exception);
break;
}
if (LocaleCompare("translate",keyword) == 0)
{
(void) GetNextToken(q,&q,extent,token);
affine.tx=StringToDouble(token,&next_token);
if (token == next_token)
ThrowPointExpectedException(token,exception);
(void) GetNextToken(q,&q,extent,token);
if (*token == ',')
(void) GetNextToken(q,&q,extent,token);
affine.ty=StringToDouble(token,&next_token);
if (token == next_token)
ThrowPointExpectedException(token,exception);
cursor=0.0;
break;
}
status=MagickFalse;
break;
}
case 'u':
case 'U':
{
if (LocaleCompare("use",keyword) == 0)
{
const char
*use;
/*
Get a macro from the MVG document, and "use" it here.
*/
(void) GetNextToken(q,&q,extent,token);
use=(const char *) GetValueFromSplayTree(macros,token);
if (use != (const char *) NULL)
{
clone_info=CloneDrawInfo((ImageInfo *) NULL,graphic_context[n]);
(void) CloneString(&clone_info->primitive,use);
status=RenderMVGContent(image,clone_info,depth+1,exception);
clone_info=DestroyDrawInfo(clone_info);
}
break;
}
break;
}
case 'v':
case 'V':
{
if (LocaleCompare("viewbox",keyword) == 0)
{
(void) GetNextToken(q,&q,extent,token);
graphic_context[n]->viewbox.x=(ssize_t) ceil(StringToDouble(token,
&next_token)-0.5);
if (token == next_token)
ThrowPointExpectedException(token,exception);
(void) GetNextToken(q,&q,extent,token);
if (*token == ',')
(void) GetNextToken(q,&q,extent,token);
graphic_context[n]->viewbox.y=(ssize_t) ceil(StringToDouble(token,
&next_token)-0.5);
if (token == next_token)
ThrowPointExpectedException(token,exception);
(void) GetNextToken(q,&q,extent,token);
if (*token == ',')
(void) GetNextToken(q,&q,extent,token);
graphic_context[n]->viewbox.width=(size_t) floor(StringToDouble(
token,&next_token)+0.5);
if (token == next_token)
ThrowPointExpectedException(token,exception);
(void) GetNextToken(q,&q,extent,token);
if (*token == ',')
(void) GetNextToken(q,&q,extent,token);
graphic_context[n]->viewbox.height=(size_t) floor(StringToDouble(
token,&next_token)+0.5);
if (token == next_token)
ThrowPointExpectedException(token,exception);
break;
}
status=MagickFalse;
break;
}
case 'w':
case 'W':
{
if (LocaleCompare("word-spacing",keyword) == 0)
{
(void) GetNextToken(q,&q,extent,token);
graphic_context[n]->interword_spacing=StringToDouble(token,
&next_token);
if (token == next_token)
ThrowPointExpectedException(token,exception);
break;
}
status=MagickFalse;
break;
}
default:
{
status=MagickFalse;
break;
}
}
if (status == MagickFalse)
break;
if ((fabs(affine.sx-1.0) >= MagickEpsilon) ||
(fabs(affine.rx) >= MagickEpsilon) || (fabs(affine.ry) >= MagickEpsilon) ||
(fabs(affine.sy-1.0) >= MagickEpsilon) ||
(fabs(affine.tx) >= MagickEpsilon) || (fabs(affine.ty) >= MagickEpsilon))
{
graphic_context[n]->affine.sx=current.sx*affine.sx+current.ry*affine.rx;
graphic_context[n]->affine.rx=current.rx*affine.sx+current.sy*affine.rx;
graphic_context[n]->affine.ry=current.sx*affine.ry+current.ry*affine.sy;
graphic_context[n]->affine.sy=current.rx*affine.ry+current.sy*affine.sy;
graphic_context[n]->affine.tx=current.sx*affine.tx+current.ry*affine.ty+
current.tx;
graphic_context[n]->affine.ty=current.rx*affine.tx+current.sy*affine.ty+
current.ty;
}
if (primitive_type == UndefinedPrimitive)
{
if (*q == '\0')
{
if (number_stops > 1)
{
GradientType
type;
type=LinearGradient;
if (draw_info->gradient.type == RadialGradient)
type=RadialGradient;
(void) GradientImage(image,type,PadSpread,stops,number_stops,
exception);
}
if (number_stops > 0)
stops=(StopInfo *) RelinquishMagickMemory(stops);
}
if ((image->debug != MagickFalse) && (q > p))
(void) LogMagickEvent(DrawEvent,GetMagickModule()," %.*s",(int)
(q-p-1),p);
continue;
}
/*
Parse the primitive attributes.
*/
for (i=0; primitive_info[i].primitive != UndefinedPrimitive; i++)
if ((primitive_info[i].primitive == TextPrimitive) ||
(primitive_info[i].primitive == ImagePrimitive))
if (primitive_info[i].text != (char *) NULL)
primitive_info[i].text=DestroyString(primitive_info[i].text);
i=0;
mvg_info.offset=i;
j=0;
primitive_info[0].point.x=0.0;
primitive_info[0].point.y=0.0;
primitive_info[0].coordinates=0;
primitive_info[0].method=FloodfillMethod;
primitive_info[0].closed_subpath=MagickFalse;
for (x=0; *q != '\0'; x++)
{
/*
Define points.
*/
if (IsPoint(q) == MagickFalse)
break;
(void) GetNextToken(q,&q,extent,token);
point.x=StringToDouble(token,&next_token);
if (token == next_token)
ThrowPointExpectedException(token,exception);
(void) GetNextToken(q,&q,extent,token);
if (*token == ',')
(void) GetNextToken(q,&q,extent,token);
point.y=StringToDouble(token,&next_token);
if (token == next_token)
ThrowPointExpectedException(token,exception);
(void) GetNextToken(q,(const char **) NULL,extent,token);
if (*token == ',')
(void) GetNextToken(q,&q,extent,token);
primitive_info[i].primitive=primitive_type;
primitive_info[i].point=point;
primitive_info[i].coordinates=0;
primitive_info[i].method=FloodfillMethod;
primitive_info[i].closed_subpath=MagickFalse;
i++;
mvg_info.offset=i;
if (i < (ssize_t) number_points)
continue;
status&=CheckPrimitiveExtent(&mvg_info,number_points);
}
if (status == MagickFalse)
break;
if ((primitive_info[j].primitive == TextPrimitive) ||
(primitive_info[j].primitive == ImagePrimitive))
if (primitive_info[j].text != (char *) NULL)
primitive_info[j].text=DestroyString(primitive_info[j].text);
primitive_info[j].primitive=primitive_type;
primitive_info[j].coordinates=(size_t) x;
primitive_info[j].method=FloodfillMethod;
primitive_info[j].closed_subpath=MagickFalse;
/*
Circumscribe primitive within a circle.
*/
bounds.x1=primitive_info[j].point.x;
bounds.y1=primitive_info[j].point.y;
bounds.x2=primitive_info[j].point.x;
bounds.y2=primitive_info[j].point.y;
for (k=1; k < (ssize_t) primitive_info[j].coordinates; k++)
{
point=primitive_info[j+k].point;
if (point.x < bounds.x1)
bounds.x1=point.x;
if (point.y < bounds.y1)
bounds.y1=point.y;
if (point.x > bounds.x2)
bounds.x2=point.x;
if (point.y > bounds.y2)
bounds.y2=point.y;
}
/*
Speculate how many points our primitive might consume.
*/
coordinates=(double) primitive_info[j].coordinates;
switch (primitive_type)
{
case RectanglePrimitive:
{
coordinates*=5.0;
break;
}
case RoundRectanglePrimitive:
{
double
alpha,
beta,
radius;
alpha=bounds.x2-bounds.x1;
beta=bounds.y2-bounds.y1;
radius=hypot((double) alpha,(double) beta);
coordinates*=5.0;
coordinates+=2.0*((size_t) ceil((double) MagickPI*radius))+6.0*
BezierQuantum+360.0;
break;
}
case BezierPrimitive:
{
coordinates=(double) (BezierQuantum*primitive_info[j].coordinates);
if (primitive_info[j].coordinates > (107*BezierQuantum))
{
(void) ThrowMagickException(exception,GetMagickModule(),DrawError,
"TooManyBezierCoordinates","`%s'",token);
status=MagickFalse;
break;
}
break;
}
case PathPrimitive:
{
char
*s,
*t;
(void) GetNextToken(q,&q,extent,token);
coordinates=1.0;
t=token;
for (s=token; *s != '\0'; s=t)
{
double
value;
value=StringToDouble(s,&t);
(void) value;
if (s == t)
{
t++;
continue;
}
coordinates++;
}
for (s=token; *s != '\0'; s++)
if (strspn(s,"AaCcQqSsTt") != 0)
coordinates+=(20.0*BezierQuantum)+360.0;
break;
}
case CirclePrimitive:
case ArcPrimitive:
case EllipsePrimitive:
{
double
alpha,
beta,
radius;
alpha=bounds.x2-bounds.x1;
beta=bounds.y2-bounds.y1;
radius=hypot(alpha,beta);
coordinates=2.0*(ceil(MagickPI*radius))+6.0*BezierQuantum+360.0;
break;
}
default:
break;
}
if (status == MagickFalse)
break;
if (((size_t) (i+coordinates)) >= number_points)
{
/*
Resize based on speculative points required by primitive.
*/
number_points+=coordinates+1;
if (number_points < (size_t) coordinates)
{
(void) ThrowMagickException(exception,GetMagickModule(),
ResourceLimitError,"MemoryAllocationFailed","`%s'",
image->filename);
break;
}
mvg_info.offset=i;
status&=CheckPrimitiveExtent(&mvg_info,number_points);
}
status&=CheckPrimitiveExtent(&mvg_info,PrimitiveExtentPad);
if (status == MagickFalse)
break;
mvg_info.offset=j;
switch (primitive_type)
{
case PointPrimitive:
default:
{
if (primitive_info[j].coordinates != 1)
{
status=MagickFalse;
break;
}
status&=TracePoint(primitive_info+j,primitive_info[j].point);
i=(ssize_t) (j+primitive_info[j].coordinates);
break;
}
case LinePrimitive:
{
if (primitive_info[j].coordinates != 2)
{
status=MagickFalse;
break;
}
status&=TraceLine(primitive_info+j,primitive_info[j].point,
primitive_info[j+1].point);
i=(ssize_t) (j+primitive_info[j].coordinates);
break;
}
case RectanglePrimitive:
{
if (primitive_info[j].coordinates != 2)
{
status=MagickFalse;
break;
}
status&=TraceRectangle(primitive_info+j,primitive_info[j].point,
primitive_info[j+1].point);
i=(ssize_t) (j+primitive_info[j].coordinates);
break;
}
case RoundRectanglePrimitive:
{
if (primitive_info[j].coordinates != 3)
{
status=MagickFalse;
break;
}
if ((primitive_info[j+2].point.x < 0.0) ||
(primitive_info[j+2].point.y < 0.0))
{
status=MagickFalse;
break;
}
if ((primitive_info[j+1].point.x-primitive_info[j].point.x) < 0.0)
{
status=MagickFalse;
break;
}
if ((primitive_info[j+1].point.y-primitive_info[j].point.y) < 0.0)
{
status=MagickFalse;
break;
}
status&=TraceRoundRectangle(&mvg_info,primitive_info[j].point,
primitive_info[j+1].point,primitive_info[j+2].point);
i=(ssize_t) (j+primitive_info[j].coordinates);
break;
}
case ArcPrimitive:
{
if (primitive_info[j].coordinates != 3)
{
primitive_type=UndefinedPrimitive;
break;
}
status&=TraceArc(&mvg_info,primitive_info[j].point,
primitive_info[j+1].point,primitive_info[j+2].point);
i=(ssize_t) (j+primitive_info[j].coordinates);
break;
}
case EllipsePrimitive:
{
if (primitive_info[j].coordinates != 3)
{
status=MagickFalse;
break;
}
if ((primitive_info[j+1].point.x < 0.0) ||
(primitive_info[j+1].point.y < 0.0))
{
status=MagickFalse;
break;
}
status&=TraceEllipse(&mvg_info,primitive_info[j].point,
primitive_info[j+1].point,primitive_info[j+2].point);
i=(ssize_t) (j+primitive_info[j].coordinates);
break;
}
case CirclePrimitive:
{
if (primitive_info[j].coordinates != 2)
{
status=MagickFalse;
break;
}
status&=TraceCircle(&mvg_info,primitive_info[j].point,
primitive_info[j+1].point);
i=(ssize_t) (j+primitive_info[j].coordinates);
break;
}
case PolylinePrimitive:
{
if (primitive_info[j].coordinates < 1)
{
status=MagickFalse;
break;
}
break;
}
case PolygonPrimitive:
{
if (primitive_info[j].coordinates < 3)
{
status=MagickFalse;
break;
}
primitive_info[i]=primitive_info[j];
primitive_info[i].coordinates=0;
primitive_info[j].coordinates++;
primitive_info[j].closed_subpath=MagickTrue;
i++;
break;
}
case BezierPrimitive:
{
if (primitive_info[j].coordinates < 3)
{
status=MagickFalse;
break;
}
status&=TraceBezier(&mvg_info,primitive_info[j].coordinates);
i=(ssize_t) (j+primitive_info[j].coordinates);
break;
}
case PathPrimitive:
{
coordinates=(double) TracePath(&mvg_info,token,exception);
if (coordinates < 0.0)
{
status=MagickFalse;
break;
}
i=(ssize_t) (j+coordinates);
break;
}
case AlphaPrimitive:
case ColorPrimitive:
{
ssize_t
method;
if (primitive_info[j].coordinates != 1)
{
status=MagickFalse;
break;
}
(void) GetNextToken(q,&q,extent,token);
method=ParseCommandOption(MagickMethodOptions,MagickFalse,token);
if (method == -1)
{
status=MagickFalse;
break;
}
primitive_info[j].method=(PaintMethod) method;
break;
}
case TextPrimitive:
{
char
geometry[MagickPathExtent];
if (primitive_info[j].coordinates != 1)
{
status=MagickFalse;
break;
}
if (*token != ',')
(void) GetNextToken(q,&q,extent,token);
(void) CloneString(&primitive_info[j].text,token);
/*
Compute text cursor offset.
*/
clone_info=CloneDrawInfo((ImageInfo *) NULL,graphic_context[n]);
if ((fabs(mvg_info.point.x-primitive_info->point.x) < MagickEpsilon) &&
(fabs(mvg_info.point.y-primitive_info->point.y) < MagickEpsilon))
{
mvg_info.point=primitive_info->point;
primitive_info->point.x+=cursor;
}
else
{
mvg_info.point=primitive_info->point;
cursor=0.0;
}
(void) FormatLocaleString(geometry,MagickPathExtent,"%+f%+f",
primitive_info->point.x,primitive_info->point.y);
clone_info->render=MagickFalse;
clone_info->text=AcquireString(token);
status&=GetTypeMetrics(image,clone_info,&metrics,exception);
clone_info=DestroyDrawInfo(clone_info);
cursor+=metrics.width;
if (graphic_context[n]->compliance != SVGCompliance)
cursor=0.0;
break;
}
case ImagePrimitive:
{
if (primitive_info[j].coordinates != 2)
{
status=MagickFalse;
break;
}
(void) GetNextToken(q,&q,extent,token);
(void) CloneString(&primitive_info[j].text,token);
break;
}
}
mvg_info.offset=i;
if ((image->debug != MagickFalse) && (q > p))
(void) LogMagickEvent(DrawEvent,GetMagickModule()," %.*s",(int) (q-p-1),
p);
if (status == MagickFalse)
break;
primitive_info[i].primitive=UndefinedPrimitive;
if (i == 0)
continue;
/*
Transform points.
*/
for (i=0; primitive_info[i].primitive != UndefinedPrimitive; i++)
{
point=primitive_info[i].point;
primitive_info[i].point.x=graphic_context[n]->affine.sx*point.x+
graphic_context[n]->affine.ry*point.y+graphic_context[n]->affine.tx;
primitive_info[i].point.y=graphic_context[n]->affine.rx*point.x+
graphic_context[n]->affine.sy*point.y+graphic_context[n]->affine.ty;
point=primitive_info[i].point;
if (point.x < graphic_context[n]->bounds.x1)
graphic_context[n]->bounds.x1=point.x;
if (point.y < graphic_context[n]->bounds.y1)
graphic_context[n]->bounds.y1=point.y;
if (point.x > graphic_context[n]->bounds.x2)
graphic_context[n]->bounds.x2=point.x;
if (point.y > graphic_context[n]->bounds.y2)
graphic_context[n]->bounds.y2=point.y;
if (primitive_info[i].primitive == ImagePrimitive)
break;
if (i >= (ssize_t) number_points)
ThrowFatalException(ResourceLimitFatalError,"MemoryAllocationFailed");
}
if (graphic_context[n]->render != MagickFalse)
{
if ((n != 0) && (graphic_context[n]->compliance != SVGCompliance) &&
(graphic_context[n]->clip_mask != (char *) NULL) &&
(LocaleCompare(graphic_context[n]->clip_mask,
graphic_context[n-1]->clip_mask) != 0))
{
const char
*clip_path;
clip_path=(const char *) GetValueFromSplayTree(macros,
graphic_context[n]->clip_mask);
if (clip_path != (const char *) NULL)
(void) SetImageArtifact(image,graphic_context[n]->clip_mask,
clip_path);
status&=DrawClipPath(image,graphic_context[n],
graphic_context[n]->clip_mask,exception);
}
status&=DrawPrimitive(image,graphic_context[n],primitive_info,
exception);
}
proceed=SetImageProgress(image,RenderImageTag,q-primitive,(MagickSizeType)
primitive_extent);
if (proceed == MagickFalse)
break;
if (status == 0)
break;
}
if (image->debug != MagickFalse)
(void) LogMagickEvent(DrawEvent,GetMagickModule(),"end draw-image");
/*
Relinquish resources.
*/
macros=DestroySplayTree(macros);
token=DestroyString(token);
if (primitive_info != (PrimitiveInfo *) NULL)
{
for (i=0; primitive_info[i].primitive != UndefinedPrimitive; i++)
if ((primitive_info[i].primitive == TextPrimitive) ||
(primitive_info[i].primitive == ImagePrimitive))
if (primitive_info[i].text != (char *) NULL)
primitive_info[i].text=DestroyString(primitive_info[i].text);
primitive_info=(PrimitiveInfo *) RelinquishMagickMemory(primitive_info);
}
primitive=DestroyString(primitive);
if (stops != (StopInfo *) NULL)
stops=(StopInfo *) RelinquishMagickMemory(stops);
for ( ; n >= 0; n--)
graphic_context[n]=DestroyDrawInfo(graphic_context[n]);
graphic_context=(DrawInfo **) RelinquishMagickMemory(graphic_context);
if (status == MagickFalse)
ThrowBinaryException(DrawError,"NonconformingDrawingPrimitiveDefinition",
keyword);
return(status != 0 ? MagickTrue : MagickFalse);
}
MagickExport MagickBooleanType DrawImage(Image *image,const DrawInfo *draw_info,
ExceptionInfo *exception)
{
return(RenderMVGContent(image,draw_info,0,exception));
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% D r a w P a t t e r n P a t h %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% DrawPatternPath() draws a pattern.
%
% The format of the DrawPatternPath method is:
%
% MagickBooleanType DrawPatternPath(Image *image,const DrawInfo *draw_info,
% const char *name,Image **pattern,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o draw_info: the draw info.
%
% o name: the pattern name.
%
% o image: the image.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport MagickBooleanType DrawPatternPath(Image *image,
const DrawInfo *draw_info,const char *name,Image **pattern,
ExceptionInfo *exception)
{
char
property[MagickPathExtent];
const char
*geometry,
*path,
*type;
DrawInfo
*clone_info;
ImageInfo
*image_info;
MagickBooleanType
status;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(draw_info != (const DrawInfo *) NULL);
assert(name != (const char *) NULL);
(void) FormatLocaleString(property,MagickPathExtent,"%s",name);
path=GetImageArtifact(image,property);
if (path == (const char *) NULL)
return(MagickFalse);
(void) FormatLocaleString(property,MagickPathExtent,"%s-geometry",name);
geometry=GetImageArtifact(image,property);
if (geometry == (const char *) NULL)
return(MagickFalse);
if ((*pattern) != (Image *) NULL)
*pattern=DestroyImage(*pattern);
image_info=AcquireImageInfo();
image_info->size=AcquireString(geometry);
*pattern=AcquireImage(image_info,exception);
image_info=DestroyImageInfo(image_info);
(void) QueryColorCompliance("#00000000",AllCompliance,
&(*pattern)->background_color,exception);
(void) SetImageBackgroundColor(*pattern,exception);
if (image->debug != MagickFalse)
(void) LogMagickEvent(DrawEvent,GetMagickModule(),
"begin pattern-path %s %s",name,geometry);
clone_info=CloneDrawInfo((ImageInfo *) NULL,draw_info);
clone_info->fill_pattern=NewImageList();
clone_info->stroke_pattern=NewImageList();
(void) FormatLocaleString(property,MagickPathExtent,"%s-type",name);
type=GetImageArtifact(image,property);
if (type != (const char *) NULL)
clone_info->gradient.type=(GradientType) ParseCommandOption(
MagickGradientOptions,MagickFalse,type);
(void) CloneString(&clone_info->primitive,path);
status=RenderMVGContent(*pattern,clone_info,0,exception);
clone_info=DestroyDrawInfo(clone_info);
if (image->debug != MagickFalse)
(void) LogMagickEvent(DrawEvent,GetMagickModule(),"end pattern-path");
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ D r a w P o l y g o n P r i m i t i v e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% DrawPolygonPrimitive() draws a polygon on the image.
%
% The format of the DrawPolygonPrimitive method is:
%
% MagickBooleanType DrawPolygonPrimitive(Image *image,
% const DrawInfo *draw_info,const PrimitiveInfo *primitive_info,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o draw_info: the draw info.
%
% o primitive_info: Specifies a pointer to a PrimitiveInfo structure.
%
% o exception: return any errors or warnings in this structure.
%
*/
static PolygonInfo **DestroyPolygonThreadSet(PolygonInfo **polygon_info)
{
register ssize_t
i;
assert(polygon_info != (PolygonInfo **) NULL);
for (i=0; i < (ssize_t) GetMagickResourceLimit(ThreadResource); i++)
if (polygon_info[i] != (PolygonInfo *) NULL)
polygon_info[i]=DestroyPolygonInfo(polygon_info[i]);
polygon_info=(PolygonInfo **) RelinquishMagickMemory(polygon_info);
return(polygon_info);
}
static PolygonInfo **AcquirePolygonThreadSet(
const PrimitiveInfo *primitive_info)
{
PathInfo
*magick_restrict path_info;
PolygonInfo
**polygon_info;
register ssize_t
i;
size_t
number_threads;
number_threads=(size_t) GetMagickResourceLimit(ThreadResource);
polygon_info=(PolygonInfo **) AcquireQuantumMemory(number_threads,
sizeof(*polygon_info));
if (polygon_info == (PolygonInfo **) NULL)
return((PolygonInfo **) NULL);
(void) memset(polygon_info,0,number_threads*sizeof(*polygon_info));
path_info=ConvertPrimitiveToPath(primitive_info);
if (path_info == (PathInfo *) NULL)
return(DestroyPolygonThreadSet(polygon_info));
for (i=0; i < (ssize_t) number_threads; i++)
{
polygon_info[i]=ConvertPathToPolygon(path_info);
if (polygon_info[i] == (PolygonInfo *) NULL)
return(DestroyPolygonThreadSet(polygon_info));
}
path_info=(PathInfo *) RelinquishMagickMemory(path_info);
return(polygon_info);
}
static double GetFillAlpha(PolygonInfo *polygon_info,const double mid,
const MagickBooleanType fill,const FillRule fill_rule,const ssize_t x,
const ssize_t y,double *stroke_alpha)
{
double
alpha,
beta,
distance,
subpath_alpha;
PointInfo
delta;
register const PointInfo
*q;
register EdgeInfo
*p;
register ssize_t
i;
ssize_t
j,
winding_number;
/*
Compute fill & stroke opacity for this (x,y) point.
*/
*stroke_alpha=0.0;
subpath_alpha=0.0;
p=polygon_info->edges;
for (j=0; j < (ssize_t) polygon_info->number_edges; j++, p++)
{
if ((double) y <= (p->bounds.y1-mid-0.5))
break;
if ((double) y > (p->bounds.y2+mid+0.5))
{
(void) DestroyEdge(polygon_info,(size_t) j);
continue;
}
if (((double) x <= (p->bounds.x1-mid-0.5)) ||
((double) x > (p->bounds.x2+mid+0.5)))
continue;
i=(ssize_t) MagickMax((double) p->highwater,1.0);
for ( ; i < (ssize_t) p->number_points; i++)
{
if ((double) y <= (p->points[i-1].y-mid-0.5))
break;
if ((double) y > (p->points[i].y+mid+0.5))
continue;
if (p->scanline != (double) y)
{
p->scanline=(double) y;
p->highwater=(size_t) i;
}
/*
Compute distance between a point and an edge.
*/
q=p->points+i-1;
delta.x=(q+1)->x-q->x;
delta.y=(q+1)->y-q->y;
beta=delta.x*(x-q->x)+delta.y*(y-q->y);
if (beta <= 0.0)
{
delta.x=(double) x-q->x;
delta.y=(double) y-q->y;
distance=delta.x*delta.x+delta.y*delta.y;
}
else
{
alpha=delta.x*delta.x+delta.y*delta.y;
if (beta >= alpha)
{
delta.x=(double) x-(q+1)->x;
delta.y=(double) y-(q+1)->y;
distance=delta.x*delta.x+delta.y*delta.y;
}
else
{
alpha=PerceptibleReciprocal(alpha);
beta=delta.x*(y-q->y)-delta.y*(x-q->x);
distance=alpha*beta*beta;
}
}
/*
Compute stroke & subpath opacity.
*/
beta=0.0;
if (p->ghostline == MagickFalse)
{
alpha=mid+0.5;
if ((*stroke_alpha < 1.0) &&
(distance <= ((alpha+0.25)*(alpha+0.25))))
{
alpha=mid-0.5;
if (distance <= ((alpha+0.25)*(alpha+0.25)))
*stroke_alpha=1.0;
else
{
beta=1.0;
if (fabs(distance-1.0) >= MagickEpsilon)
beta=sqrt((double) distance);
alpha=beta-mid-0.5;
if (*stroke_alpha < ((alpha-0.25)*(alpha-0.25)))
*stroke_alpha=(alpha-0.25)*(alpha-0.25);
}
}
}
if ((fill == MagickFalse) || (distance > 1.0) || (subpath_alpha >= 1.0))
continue;
if (distance <= 0.0)
{
subpath_alpha=1.0;
continue;
}
if (distance > 1.0)
continue;
if (fabs(beta) < MagickEpsilon)
{
beta=1.0;
if (fabs(distance-1.0) >= MagickEpsilon)
beta=sqrt(distance);
}
alpha=beta-1.0;
if (subpath_alpha < (alpha*alpha))
subpath_alpha=alpha*alpha;
}
}
/*
Compute fill opacity.
*/
if (fill == MagickFalse)
return(0.0);
if (subpath_alpha >= 1.0)
return(1.0);
/*
Determine winding number.
*/
winding_number=0;
p=polygon_info->edges;
for (j=0; j < (ssize_t) polygon_info->number_edges; j++, p++)
{
if ((double) y <= p->bounds.y1)
break;
if (((double) y > p->bounds.y2) || ((double) x <= p->bounds.x1))
continue;
if ((double) x > p->bounds.x2)
{
winding_number+=p->direction ? 1 : -1;
continue;
}
i=(ssize_t) MagickMax((double) p->highwater,1.0);
for ( ; i < (ssize_t) (p->number_points-1); i++)
if ((double) y <= p->points[i].y)
break;
q=p->points+i-1;
if ((((q+1)->x-q->x)*(y-q->y)) <= (((q+1)->y-q->y)*(x-q->x)))
winding_number+=p->direction ? 1 : -1;
}
if (fill_rule != NonZeroRule)
{
if ((MagickAbsoluteValue(winding_number) & 0x01) != 0)
return(1.0);
}
else
if (MagickAbsoluteValue(winding_number) != 0)
return(1.0);
return(subpath_alpha);
}
static MagickBooleanType DrawPolygonPrimitive(Image *image,
const DrawInfo *draw_info,const PrimitiveInfo *primitive_info,
ExceptionInfo *exception)
{
CacheView
*image_view;
MagickBooleanType
fill,
status;
double
mid;
PolygonInfo
**magick_restrict polygon_info;
register EdgeInfo
*p;
register ssize_t
i;
SegmentInfo
bounds;
ssize_t
start_y,
stop_y,
y;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(draw_info != (DrawInfo *) NULL);
assert(draw_info->signature == MagickCoreSignature);
assert(primitive_info != (PrimitiveInfo *) NULL);
if (primitive_info->coordinates <= 1)
return(MagickTrue);
/*
Compute bounding box.
*/
polygon_info=AcquirePolygonThreadSet(primitive_info);
if (polygon_info == (PolygonInfo **) NULL)
return(MagickFalse);
DisableMSCWarning(4127)
if (0)
{
status=DrawBoundingRectangles(image,draw_info,polygon_info[0],exception);
if (status == MagickFalse)
{
polygon_info=DestroyPolygonThreadSet(polygon_info);
return(status);
}
}
RestoreMSCWarning
if (image->debug != MagickFalse)
(void) LogMagickEvent(DrawEvent,GetMagickModule()," begin draw-polygon");
fill=(primitive_info->method == FillToBorderMethod) ||
(primitive_info->method == FloodfillMethod) ? MagickTrue : MagickFalse;
mid=ExpandAffine(&draw_info->affine)*SaneStrokeWidth(image,draw_info)/2.0;
bounds=polygon_info[0]->edges[0].bounds;
for (i=1; i < (ssize_t) polygon_info[0]->number_edges; i++)
{
p=polygon_info[0]->edges+i;
if (p->bounds.x1 < bounds.x1)
bounds.x1=p->bounds.x1;
if (p->bounds.y1 < bounds.y1)
bounds.y1=p->bounds.y1;
if (p->bounds.x2 > bounds.x2)
bounds.x2=p->bounds.x2;
if (p->bounds.y2 > bounds.y2)
bounds.y2=p->bounds.y2;
}
bounds.x1-=(mid+1.0);
bounds.y1-=(mid+1.0);
bounds.x2+=(mid+1.0);
bounds.y2+=(mid+1.0);
if ((bounds.x1 >= (double) image->columns) ||
(bounds.y1 >= (double) image->rows) ||
(bounds.x2 <= 0.0) || (bounds.y2 <= 0.0))
{
polygon_info=DestroyPolygonThreadSet(polygon_info);
return(MagickTrue); /* virtual polygon */
}
bounds.x1=bounds.x1 < 0.0 ? 0.0 : bounds.x1 >= (double) image->columns-1.0 ?
(double) image->columns-1.0 : bounds.x1;
bounds.y1=bounds.y1 < 0.0 ? 0.0 : bounds.y1 >= (double) image->rows-1.0 ?
(double) image->rows-1.0 : bounds.y1;
bounds.x2=bounds.x2 < 0.0 ? 0.0 : bounds.x2 >= (double) image->columns-1.0 ?
(double) image->columns-1.0 : bounds.x2;
bounds.y2=bounds.y2 < 0.0 ? 0.0 : bounds.y2 >= (double) image->rows-1.0 ?
(double) image->rows-1.0 : bounds.y2;
status=MagickTrue;
image_view=AcquireAuthenticCacheView(image,exception);
if ((primitive_info->coordinates == 1) ||
(polygon_info[0]->number_edges == 0))
{
/*
Draw point.
*/
start_y=(ssize_t) ceil(bounds.y1-0.5);
stop_y=(ssize_t) floor(bounds.y2+0.5);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(status) \
magick_number_threads(image,image,stop_y-start_y+1,1)
#endif
for (y=start_y; y <= stop_y; y++)
{
MagickBooleanType
sync;
PixelInfo
pixel;
register ssize_t
x;
register Quantum
*magick_restrict q;
ssize_t
start_x,
stop_x;
if (status == MagickFalse)
continue;
start_x=(ssize_t) ceil(bounds.x1-0.5);
stop_x=(ssize_t) floor(bounds.x2+0.5);
x=start_x;
q=GetCacheViewAuthenticPixels(image_view,x,y,(size_t) (stop_x-x+1),1,
exception);
if (q == (Quantum *) NULL)
{
status=MagickFalse;
continue;
}
GetPixelInfo(image,&pixel);
for ( ; x <= stop_x; x++)
{
if ((x == (ssize_t) ceil(primitive_info->point.x-0.5)) &&
(y == (ssize_t) ceil(primitive_info->point.y-0.5)))
{
GetFillColor(draw_info,x-start_x,y-start_y,&pixel,exception);
SetPixelViaPixelInfo(image,&pixel,q);
}
q+=GetPixelChannels(image);
}
sync=SyncCacheViewAuthenticPixels(image_view,exception);
if (sync == MagickFalse)
status=MagickFalse;
}
image_view=DestroyCacheView(image_view);
polygon_info=DestroyPolygonThreadSet(polygon_info);
if (image->debug != MagickFalse)
(void) LogMagickEvent(DrawEvent,GetMagickModule(),
" end draw-polygon");
return(status);
}
/*
Draw polygon or line.
*/
start_y=(ssize_t) ceil(bounds.y1-0.5);
stop_y=(ssize_t) floor(bounds.y2+0.5);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(status) \
magick_number_threads(image,image,stop_y-start_y+1,1)
#endif
for (y=start_y; y <= stop_y; y++)
{
const int
id = GetOpenMPThreadId();
register Quantum
*magick_restrict q;
register ssize_t
x;
ssize_t
start_x,
stop_x;
if (status == MagickFalse)
continue;
start_x=(ssize_t) ceil(bounds.x1-0.5);
stop_x=(ssize_t) floor(bounds.x2+0.5);
q=GetCacheViewAuthenticPixels(image_view,start_x,y,(size_t) (stop_x-start_x+
1),1,exception);
if (q == (Quantum *) NULL)
{
status=MagickFalse;
continue;
}
for (x=start_x; x <= stop_x; x++)
{
double
fill_alpha,
stroke_alpha;
PixelInfo
fill_color,
stroke_color;
/*
Fill and/or stroke.
*/
fill_alpha=GetFillAlpha(polygon_info[id],mid,fill,draw_info->fill_rule,
x,y,&stroke_alpha);
if (draw_info->stroke_antialias == MagickFalse)
{
fill_alpha=fill_alpha > 0.25 ? 1.0 : 0.0;
stroke_alpha=stroke_alpha > 0.25 ? 1.0 : 0.0;
}
GetFillColor(draw_info,x-start_x,y-start_y,&fill_color,exception);
CompositePixelOver(image,&fill_color,fill_alpha*fill_color.alpha,q,
(double) GetPixelAlpha(image,q),q);
GetStrokeColor(draw_info,x-start_x,y-start_y,&stroke_color,exception);
CompositePixelOver(image,&stroke_color,stroke_alpha*stroke_color.alpha,q,
(double) GetPixelAlpha(image,q),q);
q+=GetPixelChannels(image);
}
if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse)
status=MagickFalse;
}
image_view=DestroyCacheView(image_view);
polygon_info=DestroyPolygonThreadSet(polygon_info);
if (image->debug != MagickFalse)
(void) LogMagickEvent(DrawEvent,GetMagickModule()," end draw-polygon");
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% D r a w P r i m i t i v e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% DrawPrimitive() draws a primitive (line, rectangle, ellipse) on the image.
%
% The format of the DrawPrimitive method is:
%
% MagickBooleanType DrawPrimitive(Image *image,const DrawInfo *draw_info,
% PrimitiveInfo *primitive_info,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o draw_info: the draw info.
%
% o primitive_info: Specifies a pointer to a PrimitiveInfo structure.
%
% o exception: return any errors or warnings in this structure.
%
*/
static inline double ConstrainCoordinate(double x)
{
if (x < (double) -(SSIZE_MAX-512))
return((double) -(SSIZE_MAX-512));
if (x > (double) (SSIZE_MAX-512))
return((double) (SSIZE_MAX-512));
return(x);
}
static void LogPrimitiveInfo(const PrimitiveInfo *primitive_info)
{
const char
*methods[] =
{
"point",
"replace",
"floodfill",
"filltoborder",
"reset",
"?"
};
PointInfo
p,
point,
q;
register ssize_t
i,
x;
ssize_t
coordinates,
y;
x=(ssize_t) ceil(primitive_info->point.x-0.5);
y=(ssize_t) ceil(primitive_info->point.y-0.5);
switch (primitive_info->primitive)
{
case AlphaPrimitive:
{
(void) LogMagickEvent(DrawEvent,GetMagickModule(),
"AlphaPrimitive %.20g,%.20g %s",(double) x,(double) y,
methods[primitive_info->method]);
return;
}
case ColorPrimitive:
{
(void) LogMagickEvent(DrawEvent,GetMagickModule(),
"ColorPrimitive %.20g,%.20g %s",(double) x,(double) y,
methods[primitive_info->method]);
return;
}
case ImagePrimitive:
{
(void) LogMagickEvent(DrawEvent,GetMagickModule(),
"ImagePrimitive %.20g,%.20g",(double) x,(double) y);
return;
}
case PointPrimitive:
{
(void) LogMagickEvent(DrawEvent,GetMagickModule(),
"PointPrimitive %.20g,%.20g %s",(double) x,(double) y,
methods[primitive_info->method]);
return;
}
case TextPrimitive:
{
(void) LogMagickEvent(DrawEvent,GetMagickModule(),
"TextPrimitive %.20g,%.20g",(double) x,(double) y);
return;
}
default:
break;
}
coordinates=0;
p=primitive_info[0].point;
q.x=(-1.0);
q.y=(-1.0);
for (i=0; primitive_info[i].primitive != UndefinedPrimitive; i++)
{
point=primitive_info[i].point;
if (coordinates <= 0)
{
coordinates=(ssize_t) primitive_info[i].coordinates;
(void) LogMagickEvent(DrawEvent,GetMagickModule(),
" begin open (%.20g)",(double) coordinates);
p=point;
}
point=primitive_info[i].point;
if ((fabs(q.x-point.x) >= MagickEpsilon) ||
(fabs(q.y-point.y) >= MagickEpsilon))
(void) LogMagickEvent(DrawEvent,GetMagickModule(),
" %.20g: %.18g,%.18g",(double) coordinates,point.x,point.y);
else
(void) LogMagickEvent(DrawEvent,GetMagickModule(),
" %.20g: %g %g (duplicate)",(double) coordinates,point.x,point.y);
q=point;
coordinates--;
if (coordinates > 0)
continue;
if ((fabs(p.x-point.x) >= MagickEpsilon) ||
(fabs(p.y-point.y) >= MagickEpsilon))
(void) LogMagickEvent(DrawEvent,GetMagickModule()," end last (%.20g)",
(double) coordinates);
else
(void) LogMagickEvent(DrawEvent,GetMagickModule()," end open (%.20g)",
(double) coordinates);
}
}
MagickExport MagickBooleanType DrawPrimitive(Image *image,
const DrawInfo *draw_info,const PrimitiveInfo *primitive_info,
ExceptionInfo *exception)
{
CacheView
*image_view;
MagickStatusType
status;
register ssize_t
i,
x;
ssize_t
y;
if (image->debug != MagickFalse)
{
(void) LogMagickEvent(DrawEvent,GetMagickModule(),
" begin draw-primitive");
(void) LogMagickEvent(DrawEvent,GetMagickModule(),
" affine: %g,%g,%g,%g,%g,%g",draw_info->affine.sx,
draw_info->affine.rx,draw_info->affine.ry,draw_info->affine.sy,
draw_info->affine.tx,draw_info->affine.ty);
}
status=MagickTrue;
if ((IsGrayColorspace(image->colorspace) != MagickFalse) &&
((IsPixelInfoGray(&draw_info->fill) == MagickFalse) ||
(IsPixelInfoGray(&draw_info->stroke) == MagickFalse)))
status&=SetImageColorspace(image,sRGBColorspace,exception);
if (draw_info->compliance == SVGCompliance)
{
status&=SetImageMask(image,WritePixelMask,draw_info->clipping_mask,
exception);
status&=SetImageMask(image,CompositePixelMask,draw_info->composite_mask,
exception);
}
x=(ssize_t) ceil(ConstrainCoordinate(primitive_info->point.x-0.5));
y=(ssize_t) ceil(ConstrainCoordinate(primitive_info->point.y-0.5));
image_view=AcquireAuthenticCacheView(image,exception);
switch (primitive_info->primitive)
{
case AlphaPrimitive:
{
if (image->alpha_trait == UndefinedPixelTrait)
status&=SetImageAlphaChannel(image,OpaqueAlphaChannel,exception);
switch (primitive_info->method)
{
case PointMethod:
default:
{
PixelInfo
pixel;
register Quantum
*q;
q=GetCacheViewAuthenticPixels(image_view,x,y,1,1,exception);
if (q == (Quantum *) NULL)
break;
GetFillColor(draw_info,x,y,&pixel,exception);
SetPixelAlpha(image,ClampToQuantum(pixel.alpha),q);
status&=SyncCacheViewAuthenticPixels(image_view,exception);
break;
}
case ReplaceMethod:
{
PixelInfo
pixel,
target;
status&=GetOneCacheViewVirtualPixelInfo(image_view,x,y,&target,
exception);
GetPixelInfo(image,&pixel);
for (y=0; y < (ssize_t) image->rows; y++)
{
register Quantum
*magick_restrict q;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,
exception);
if (q == (Quantum *) NULL)
break;
for (x=0; x < (ssize_t) image->columns; x++)
{
GetPixelInfoPixel(image,q,&pixel);
if (IsFuzzyEquivalencePixelInfo(&pixel,&target) == MagickFalse)
{
q+=GetPixelChannels(image);
continue;
}
GetFillColor(draw_info,x,y,&pixel,exception);
SetPixelAlpha(image,ClampToQuantum(pixel.alpha),q);
q+=GetPixelChannels(image);
}
status&=SyncCacheViewAuthenticPixels(image_view,exception);
if (status == MagickFalse)
break;
}
break;
}
case FloodfillMethod:
case FillToBorderMethod:
{
ChannelType
channel_mask;
PixelInfo
target;
status&=GetOneVirtualPixelInfo(image,TileVirtualPixelMethod,x,y,
&target,exception);
if (primitive_info->method == FillToBorderMethod)
{
target.red=(double) draw_info->border_color.red;
target.green=(double) draw_info->border_color.green;
target.blue=(double) draw_info->border_color.blue;
}
channel_mask=SetImageChannelMask(image,AlphaChannel);
status&=FloodfillPaintImage(image,draw_info,&target,x,y,
primitive_info->method == FloodfillMethod ? MagickFalse :
MagickTrue,exception);
(void) SetImageChannelMask(image,channel_mask);
break;
}
case ResetMethod:
{
PixelInfo
pixel;
for (y=0; y < (ssize_t) image->rows; y++)
{
register Quantum
*magick_restrict q;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,
exception);
if (q == (Quantum *) NULL)
break;
for (x=0; x < (ssize_t) image->columns; x++)
{
GetFillColor(draw_info,x,y,&pixel,exception);
SetPixelAlpha(image,ClampToQuantum(pixel.alpha),q);
q+=GetPixelChannels(image);
}
status&=SyncCacheViewAuthenticPixels(image_view,exception);
if (status == MagickFalse)
break;
}
break;
}
}
break;
}
case ColorPrimitive:
{
switch (primitive_info->method)
{
case PointMethod:
default:
{
PixelInfo
pixel;
register Quantum
*q;
q=GetCacheViewAuthenticPixels(image_view,x,y,1,1,exception);
if (q == (Quantum *) NULL)
break;
GetPixelInfo(image,&pixel);
GetFillColor(draw_info,x,y,&pixel,exception);
SetPixelViaPixelInfo(image,&pixel,q);
status&=SyncCacheViewAuthenticPixels(image_view,exception);
break;
}
case ReplaceMethod:
{
PixelInfo
pixel,
target;
status&=GetOneCacheViewVirtualPixelInfo(image_view,x,y,&target,
exception);
for (y=0; y < (ssize_t) image->rows; y++)
{
register Quantum
*magick_restrict q;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,
exception);
if (q == (Quantum *) NULL)
break;
for (x=0; x < (ssize_t) image->columns; x++)
{
GetPixelInfoPixel(image,q,&pixel);
if (IsFuzzyEquivalencePixelInfo(&pixel,&target) == MagickFalse)
{
q+=GetPixelChannels(image);
continue;
}
GetFillColor(draw_info,x,y,&pixel,exception);
SetPixelViaPixelInfo(image,&pixel,q);
q+=GetPixelChannels(image);
}
status&=SyncCacheViewAuthenticPixels(image_view,exception);
if (status == MagickFalse)
break;
}
break;
}
case FloodfillMethod:
case FillToBorderMethod:
{
PixelInfo
target;
status&=GetOneVirtualPixelInfo(image,TileVirtualPixelMethod,x,y,
&target,exception);
if (primitive_info->method == FillToBorderMethod)
{
target.red=(double) draw_info->border_color.red;
target.green=(double) draw_info->border_color.green;
target.blue=(double) draw_info->border_color.blue;
}
status&=FloodfillPaintImage(image,draw_info,&target,x,y,
primitive_info->method == FloodfillMethod ? MagickFalse :
MagickTrue,exception);
break;
}
case ResetMethod:
{
PixelInfo
pixel;
GetPixelInfo(image,&pixel);
for (y=0; y < (ssize_t) image->rows; y++)
{
register Quantum
*magick_restrict q;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,
exception);
if (q == (Quantum *) NULL)
break;
for (x=0; x < (ssize_t) image->columns; x++)
{
GetFillColor(draw_info,x,y,&pixel,exception);
SetPixelViaPixelInfo(image,&pixel,q);
q+=GetPixelChannels(image);
}
status&=SyncCacheViewAuthenticPixels(image_view,exception);
if (status == MagickFalse)
break;
}
break;
}
}
break;
}
case ImagePrimitive:
{
AffineMatrix
affine;
char
composite_geometry[MagickPathExtent];
Image
*composite_image,
*composite_images;
ImageInfo
*clone_info;
RectangleInfo
geometry;
ssize_t
x1,
y1;
if (primitive_info->text == (char *) NULL)
break;
clone_info=AcquireImageInfo();
composite_images=(Image *) NULL;
if (LocaleNCompare(primitive_info->text,"data:",5) == 0)
composite_images=ReadInlineImage(clone_info,primitive_info->text,
exception);
else
if (*primitive_info->text != '\0')
{
(void) CopyMagickString(clone_info->filename,primitive_info->text,
MagickPathExtent);
composite_images=ReadImage(clone_info,exception);
}
clone_info=DestroyImageInfo(clone_info);
if (composite_images == (Image *) NULL)
{
status=MagickFalse;
break;
}
composite_image=RemoveFirstImageFromList(&composite_images);
composite_images=DestroyImageList(composite_images);
(void) SetImageProgressMonitor(composite_image,(MagickProgressMonitor)
NULL,(void *) NULL);
x1=(ssize_t) ceil(primitive_info[1].point.x-0.5);
y1=(ssize_t) ceil(primitive_info[1].point.y-0.5);
if (((x1 != 0L) && (x1 != (ssize_t) composite_image->columns)) ||
((y1 != 0L) && (y1 != (ssize_t) composite_image->rows)))
{
/*
Resize image.
*/
(void) FormatLocaleString(composite_geometry,MagickPathExtent,
"%gx%g!",primitive_info[1].point.x,primitive_info[1].point.y);
composite_image->filter=image->filter;
status&=TransformImage(&composite_image,(char *) NULL,
composite_geometry,exception);
}
if (composite_image->alpha_trait == UndefinedPixelTrait)
status&=SetImageAlphaChannel(composite_image,OpaqueAlphaChannel,
exception);
if (draw_info->alpha != OpaqueAlpha)
status&=SetImageAlpha(composite_image,draw_info->alpha,exception);
SetGeometry(image,&geometry);
image->gravity=draw_info->gravity;
geometry.x=x;
geometry.y=y;
(void) FormatLocaleString(composite_geometry,MagickPathExtent,
"%.20gx%.20g%+.20g%+.20g",(double) composite_image->columns,(double)
composite_image->rows,(double) geometry.x,(double) geometry.y);
(void) ParseGravityGeometry(image,composite_geometry,&geometry,exception);
affine=draw_info->affine;
affine.tx=(double) geometry.x;
affine.ty=(double) geometry.y;
composite_image->interpolate=image->interpolate;
if ((draw_info->compose == OverCompositeOp) ||
(draw_info->compose == SrcOverCompositeOp))
status&=DrawAffineImage(image,composite_image,&affine,exception);
else
status&=CompositeImage(image,composite_image,draw_info->compose,
MagickTrue,geometry.x,geometry.y,exception);
composite_image=DestroyImage(composite_image);
break;
}
case PointPrimitive:
{
PixelInfo
fill_color;
register Quantum
*q;
if ((y < 0) || (y >= (ssize_t) image->rows))
break;
if ((x < 0) || (x >= (ssize_t) image->columns))
break;
q=GetCacheViewAuthenticPixels(image_view,x,y,1,1,exception);
if (q == (Quantum *) NULL)
break;
GetFillColor(draw_info,x,y,&fill_color,exception);
CompositePixelOver(image,&fill_color,(double) fill_color.alpha,q,(double)
GetPixelAlpha(image,q),q);
status&=SyncCacheViewAuthenticPixels(image_view,exception);
break;
}
case TextPrimitive:
{
char
geometry[MagickPathExtent];
DrawInfo
*clone_info;
if (primitive_info->text == (char *) NULL)
break;
clone_info=CloneDrawInfo((ImageInfo *) NULL,draw_info);
(void) CloneString(&clone_info->text,primitive_info->text);
(void) FormatLocaleString(geometry,MagickPathExtent,"%+f%+f",
primitive_info->point.x,primitive_info->point.y);
(void) CloneString(&clone_info->geometry,geometry);
status&=AnnotateImage(image,clone_info,exception);
clone_info=DestroyDrawInfo(clone_info);
break;
}
default:
{
double
mid,
scale;
DrawInfo
*clone_info;
if (IsEventLogging() != MagickFalse)
LogPrimitiveInfo(primitive_info);
scale=ExpandAffine(&draw_info->affine);
if ((draw_info->dash_pattern != (double *) NULL) &&
(fabs(draw_info->dash_pattern[0]) >= MagickEpsilon) &&
(fabs(scale*draw_info->stroke_width) >= MagickEpsilon) &&
(draw_info->stroke.alpha != (Quantum) TransparentAlpha))
{
/*
Draw dash polygon.
*/
clone_info=CloneDrawInfo((ImageInfo *) NULL,draw_info);
clone_info->stroke_width=0.0;
clone_info->stroke.alpha=(MagickRealType) TransparentAlpha;
status&=DrawPolygonPrimitive(image,clone_info,primitive_info,
exception);
clone_info=DestroyDrawInfo(clone_info);
if (status != MagickFalse)
status&=DrawDashPolygon(draw_info,primitive_info,image,exception);
break;
}
mid=ExpandAffine(&draw_info->affine)*SaneStrokeWidth(image,draw_info)/2.0;
if ((mid > 1.0) &&
((draw_info->stroke.alpha != (Quantum) TransparentAlpha) ||
(draw_info->stroke_pattern != (Image *) NULL)))
{
double
x,
y;
MagickBooleanType
closed_path;
/*
Draw strokes while respecting line cap/join attributes.
*/
closed_path=primitive_info[0].closed_subpath;
i=(ssize_t) primitive_info[0].coordinates;
x=fabs(primitive_info[i-1].point.x-primitive_info[0].point.x);
y=fabs(primitive_info[i-1].point.y-primitive_info[0].point.y);
if ((x < MagickEpsilon) && (y < MagickEpsilon))
closed_path=MagickTrue;
if ((((draw_info->linecap == RoundCap) ||
(closed_path != MagickFalse)) &&
(draw_info->linejoin == RoundJoin)) ||
(primitive_info[i].primitive != UndefinedPrimitive))
{
status&=DrawPolygonPrimitive(image,draw_info,primitive_info,
exception);
break;
}
clone_info=CloneDrawInfo((ImageInfo *) NULL,draw_info);
clone_info->stroke_width=0.0;
clone_info->stroke.alpha=(MagickRealType) TransparentAlpha;
status&=DrawPolygonPrimitive(image,clone_info,primitive_info,
exception);
clone_info=DestroyDrawInfo(clone_info);
if (status != MagickFalse)
status&=DrawStrokePolygon(image,draw_info,primitive_info,exception);
break;
}
status&=DrawPolygonPrimitive(image,draw_info,primitive_info,exception);
break;
}
}
image_view=DestroyCacheView(image_view);
if (draw_info->compliance == SVGCompliance)
{
status&=SetImageMask(image,WritePixelMask,(Image *) NULL,exception);
status&=SetImageMask(image,CompositePixelMask,(Image *) NULL,exception);
}
if (image->debug != MagickFalse)
(void) LogMagickEvent(DrawEvent,GetMagickModule()," end draw-primitive");
return(status != 0 ? MagickTrue : MagickFalse);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ D r a w S t r o k e P o l y g o n %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% DrawStrokePolygon() draws a stroked polygon (line, rectangle, ellipse) on
% the image while respecting the line cap and join attributes.
%
% The format of the DrawStrokePolygon method is:
%
% MagickBooleanType DrawStrokePolygon(Image *image,
% const DrawInfo *draw_info,const PrimitiveInfo *primitive_info)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o draw_info: the draw info.
%
% o primitive_info: Specifies a pointer to a PrimitiveInfo structure.
%
%
*/
static MagickBooleanType DrawRoundLinecap(Image *image,
const DrawInfo *draw_info,const PrimitiveInfo *primitive_info,
ExceptionInfo *exception)
{
PrimitiveInfo
linecap[5];
register ssize_t
i;
for (i=0; i < 4; i++)
linecap[i]=(*primitive_info);
linecap[0].coordinates=4;
linecap[1].point.x+=2.0*MagickEpsilon;
linecap[2].point.x+=2.0*MagickEpsilon;
linecap[2].point.y+=2.0*MagickEpsilon;
linecap[3].point.y+=2.0*MagickEpsilon;
linecap[4].primitive=UndefinedPrimitive;
return(DrawPolygonPrimitive(image,draw_info,linecap,exception));
}
static MagickBooleanType DrawStrokePolygon(Image *image,
const DrawInfo *draw_info,const PrimitiveInfo *primitive_info,
ExceptionInfo *exception)
{
DrawInfo
*clone_info;
MagickBooleanType
closed_path;
MagickStatusType
status;
PrimitiveInfo
*stroke_polygon;
register const PrimitiveInfo
*p,
*q;
/*
Draw stroked polygon.
*/
if (image->debug != MagickFalse)
(void) LogMagickEvent(DrawEvent,GetMagickModule(),
" begin draw-stroke-polygon");
clone_info=CloneDrawInfo((ImageInfo *) NULL,draw_info);
clone_info->fill=draw_info->stroke;
if (clone_info->fill_pattern != (Image *) NULL)
clone_info->fill_pattern=DestroyImage(clone_info->fill_pattern);
if (clone_info->stroke_pattern != (Image *) NULL)
clone_info->fill_pattern=CloneImage(clone_info->stroke_pattern,0,0,
MagickTrue,exception);
clone_info->stroke.alpha=(MagickRealType) TransparentAlpha;
clone_info->stroke_width=0.0;
clone_info->fill_rule=NonZeroRule;
status=MagickTrue;
for (p=primitive_info; p->primitive != UndefinedPrimitive; p+=p->coordinates)
{
if (p->coordinates == 1)
continue;
stroke_polygon=TraceStrokePolygon(image,draw_info,p);
if (stroke_polygon == (PrimitiveInfo *) NULL)
{
status=0;
break;
}
status&=DrawPolygonPrimitive(image,clone_info,stroke_polygon,exception);
stroke_polygon=(PrimitiveInfo *) RelinquishMagickMemory(stroke_polygon);
if (status == 0)
break;
q=p+p->coordinates-1;
closed_path=p->closed_subpath;
if ((draw_info->linecap == RoundCap) && (closed_path == MagickFalse))
{
status&=DrawRoundLinecap(image,draw_info,p,exception);
status&=DrawRoundLinecap(image,draw_info,q,exception);
}
}
clone_info=DestroyDrawInfo(clone_info);
if (image->debug != MagickFalse)
(void) LogMagickEvent(DrawEvent,GetMagickModule(),
" end draw-stroke-polygon");
return(status != 0 ? MagickTrue : MagickFalse);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% G e t A f f i n e M a t r i x %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetAffineMatrix() returns an AffineMatrix initialized to the identity
% matrix.
%
% The format of the GetAffineMatrix method is:
%
% void GetAffineMatrix(AffineMatrix *affine_matrix)
%
% A description of each parameter follows:
%
% o affine_matrix: the affine matrix.
%
*/
MagickExport void GetAffineMatrix(AffineMatrix *affine_matrix)
{
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"...");
assert(affine_matrix != (AffineMatrix *) NULL);
(void) memset(affine_matrix,0,sizeof(*affine_matrix));
affine_matrix->sx=1.0;
affine_matrix->sy=1.0;
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ G e t D r a w I n f o %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetDrawInfo() initializes draw_info to default values from image_info.
%
% The format of the GetDrawInfo method is:
%
% void GetDrawInfo(const ImageInfo *image_info,DrawInfo *draw_info)
%
% A description of each parameter follows:
%
% o image_info: the image info..
%
% o draw_info: the draw info.
%
*/
MagickExport void GetDrawInfo(const ImageInfo *image_info,DrawInfo *draw_info)
{
char
*next_token;
const char
*option;
ExceptionInfo
*exception;
ImageInfo
*clone_info;
/*
Initialize draw attributes.
*/
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"...");
assert(draw_info != (DrawInfo *) NULL);
(void) memset(draw_info,0,sizeof(*draw_info));
clone_info=CloneImageInfo(image_info);
GetAffineMatrix(&draw_info->affine);
exception=AcquireExceptionInfo();
(void) QueryColorCompliance("#000F",AllCompliance,&draw_info->fill,
exception);
(void) QueryColorCompliance("#FFF0",AllCompliance,&draw_info->stroke,
exception);
draw_info->stroke_antialias=clone_info->antialias;
draw_info->stroke_width=1.0;
draw_info->fill_rule=EvenOddRule;
draw_info->alpha=OpaqueAlpha;
draw_info->fill_alpha=OpaqueAlpha;
draw_info->stroke_alpha=OpaqueAlpha;
draw_info->linecap=ButtCap;
draw_info->linejoin=MiterJoin;
draw_info->miterlimit=10;
draw_info->decorate=NoDecoration;
draw_info->pointsize=12.0;
draw_info->undercolor.alpha=(MagickRealType) TransparentAlpha;
draw_info->compose=OverCompositeOp;
draw_info->render=MagickTrue;
draw_info->clip_path=MagickFalse;
draw_info->debug=IsEventLogging();
if (clone_info->font != (char *) NULL)
draw_info->font=AcquireString(clone_info->font);
if (clone_info->density != (char *) NULL)
draw_info->density=AcquireString(clone_info->density);
draw_info->text_antialias=clone_info->antialias;
if (fabs(clone_info->pointsize) >= MagickEpsilon)
draw_info->pointsize=clone_info->pointsize;
draw_info->border_color=clone_info->border_color;
if (clone_info->server_name != (char *) NULL)
draw_info->server_name=AcquireString(clone_info->server_name);
option=GetImageOption(clone_info,"direction");
if (option != (const char *) NULL)
draw_info->direction=(DirectionType) ParseCommandOption(
MagickDirectionOptions,MagickFalse,option);
else
draw_info->direction=UndefinedDirection;
option=GetImageOption(clone_info,"encoding");
if (option != (const char *) NULL)
(void) CloneString(&draw_info->encoding,option);
option=GetImageOption(clone_info,"family");
if (option != (const char *) NULL)
(void) CloneString(&draw_info->family,option);
option=GetImageOption(clone_info,"fill");
if (option != (const char *) NULL)
(void) QueryColorCompliance(option,AllCompliance,&draw_info->fill,
exception);
option=GetImageOption(clone_info,"gravity");
if (option != (const char *) NULL)
draw_info->gravity=(GravityType) ParseCommandOption(MagickGravityOptions,
MagickFalse,option);
option=GetImageOption(clone_info,"interline-spacing");
if (option != (const char *) NULL)
draw_info->interline_spacing=StringToDouble(option,&next_token);
option=GetImageOption(clone_info,"interword-spacing");
if (option != (const char *) NULL)
draw_info->interword_spacing=StringToDouble(option,&next_token);
option=GetImageOption(clone_info,"kerning");
if (option != (const char *) NULL)
draw_info->kerning=StringToDouble(option,&next_token);
option=GetImageOption(clone_info,"stroke");
if (option != (const char *) NULL)
(void) QueryColorCompliance(option,AllCompliance,&draw_info->stroke,
exception);
option=GetImageOption(clone_info,"strokewidth");
if (option != (const char *) NULL)
draw_info->stroke_width=StringToDouble(option,&next_token);
option=GetImageOption(clone_info,"style");
if (option != (const char *) NULL)
draw_info->style=(StyleType) ParseCommandOption(MagickStyleOptions,
MagickFalse,option);
option=GetImageOption(clone_info,"undercolor");
if (option != (const char *) NULL)
(void) QueryColorCompliance(option,AllCompliance,&draw_info->undercolor,
exception);
option=GetImageOption(clone_info,"weight");
if (option != (const char *) NULL)
{
ssize_t
weight;
weight=ParseCommandOption(MagickWeightOptions,MagickFalse,option);
if (weight == -1)
weight=(ssize_t) StringToUnsignedLong(option);
draw_info->weight=(size_t) weight;
}
exception=DestroyExceptionInfo(exception);
draw_info->signature=MagickCoreSignature;
clone_info=DestroyImageInfo(clone_info);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ P e r m u t a t e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% Permutate() returns the permuation of the (n,k).
%
% The format of the Permutate method is:
%
% void Permutate(ssize_t n,ssize_t k)
%
% A description of each parameter follows:
%
% o n:
%
% o k:
%
%
*/
static inline double Permutate(const ssize_t n,const ssize_t k)
{
double
r;
register ssize_t
i;
r=1.0;
for (i=k+1; i <= n; i++)
r*=i;
for (i=1; i <= (n-k); i++)
r/=i;
return(r);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ T r a c e P r i m i t i v e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% TracePrimitive is a collection of methods for generating graphic
% primitives such as arcs, ellipses, paths, etc.
%
*/
static MagickBooleanType TraceArc(MVGInfo *mvg_info,const PointInfo start,
const PointInfo end,const PointInfo degrees)
{
PointInfo
center,
radius;
center.x=0.5*(end.x+start.x);
center.y=0.5*(end.y+start.y);
radius.x=fabs(center.x-start.x);
radius.y=fabs(center.y-start.y);
return(TraceEllipse(mvg_info,center,radius,degrees));
}
static MagickBooleanType TraceArcPath(MVGInfo *mvg_info,const PointInfo start,
const PointInfo end,const PointInfo arc,const double angle,
const MagickBooleanType large_arc,const MagickBooleanType sweep)
{
double
alpha,
beta,
delta,
factor,
gamma,
theta;
MagickStatusType
status;
PointInfo
center,
points[3],
radii;
register double
cosine,
sine;
PrimitiveInfo
*primitive_info;
register PrimitiveInfo
*p;
register ssize_t
i;
size_t
arc_segments;
ssize_t
offset;
offset=mvg_info->offset;
primitive_info=(*mvg_info->primitive_info)+mvg_info->offset;
primitive_info->coordinates=0;
if ((fabs(start.x-end.x) < MagickEpsilon) &&
(fabs(start.y-end.y) < MagickEpsilon))
return(TracePoint(primitive_info,end));
radii.x=fabs(arc.x);
radii.y=fabs(arc.y);
if ((radii.x < MagickEpsilon) || (radii.y < MagickEpsilon))
return(TraceLine(primitive_info,start,end));
cosine=cos(DegreesToRadians(fmod((double) angle,360.0)));
sine=sin(DegreesToRadians(fmod((double) angle,360.0)));
center.x=(double) (cosine*(end.x-start.x)/2+sine*(end.y-start.y)/2);
center.y=(double) (cosine*(end.y-start.y)/2-sine*(end.x-start.x)/2);
delta=(center.x*center.x)/(radii.x*radii.x)+(center.y*center.y)/
(radii.y*radii.y);
if (delta < MagickEpsilon)
return(TraceLine(primitive_info,start,end));
if (delta > 1.0)
{
radii.x*=sqrt((double) delta);
radii.y*=sqrt((double) delta);
}
points[0].x=(double) (cosine*start.x/radii.x+sine*start.y/radii.x);
points[0].y=(double) (cosine*start.y/radii.y-sine*start.x/radii.y);
points[1].x=(double) (cosine*end.x/radii.x+sine*end.y/radii.x);
points[1].y=(double) (cosine*end.y/radii.y-sine*end.x/radii.y);
alpha=points[1].x-points[0].x;
beta=points[1].y-points[0].y;
if (fabs(alpha*alpha+beta*beta) < MagickEpsilon)
return(TraceLine(primitive_info,start,end));
factor=PerceptibleReciprocal(alpha*alpha+beta*beta)-0.25;
if (factor <= 0.0)
factor=0.0;
else
{
factor=sqrt((double) factor);
if (sweep == large_arc)
factor=(-factor);
}
center.x=(double) ((points[0].x+points[1].x)/2-factor*beta);
center.y=(double) ((points[0].y+points[1].y)/2+factor*alpha);
alpha=atan2(points[0].y-center.y,points[0].x-center.x);
theta=atan2(points[1].y-center.y,points[1].x-center.x)-alpha;
if ((theta < 0.0) && (sweep != MagickFalse))
theta+=2.0*MagickPI;
else
if ((theta > 0.0) && (sweep == MagickFalse))
theta-=2.0*MagickPI;
arc_segments=(size_t) ceil(fabs((double) (theta/(0.5*MagickPI+
MagickEpsilon))));
status=MagickTrue;
p=primitive_info;
for (i=0; i < (ssize_t) arc_segments; i++)
{
beta=0.5*((alpha+(i+1)*theta/arc_segments)-(alpha+i*theta/arc_segments));
gamma=(8.0/3.0)*sin(fmod((double) (0.5*beta),DegreesToRadians(360.0)))*
sin(fmod((double) (0.5*beta),DegreesToRadians(360.0)))/
sin(fmod((double) beta,DegreesToRadians(360.0)));
points[0].x=(double) (center.x+cos(fmod((double) (alpha+(double) i*theta/
arc_segments),DegreesToRadians(360.0)))-gamma*sin(fmod((double) (alpha+
(double) i*theta/arc_segments),DegreesToRadians(360.0))));
points[0].y=(double) (center.y+sin(fmod((double) (alpha+(double) i*theta/
arc_segments),DegreesToRadians(360.0)))+gamma*cos(fmod((double) (alpha+
(double) i*theta/arc_segments),DegreesToRadians(360.0))));
points[2].x=(double) (center.x+cos(fmod((double) (alpha+(double) (i+1)*
theta/arc_segments),DegreesToRadians(360.0))));
points[2].y=(double) (center.y+sin(fmod((double) (alpha+(double) (i+1)*
theta/arc_segments),DegreesToRadians(360.0))));
points[1].x=(double) (points[2].x+gamma*sin(fmod((double) (alpha+(double)
(i+1)*theta/arc_segments),DegreesToRadians(360.0))));
points[1].y=(double) (points[2].y-gamma*cos(fmod((double) (alpha+(double)
(i+1)*theta/arc_segments),DegreesToRadians(360.0))));
p->point.x=(p == primitive_info) ? start.x : (p-1)->point.x;
p->point.y=(p == primitive_info) ? start.y : (p-1)->point.y;
(p+1)->point.x=(double) (cosine*radii.x*points[0].x-sine*radii.y*
points[0].y);
(p+1)->point.y=(double) (sine*radii.x*points[0].x+cosine*radii.y*
points[0].y);
(p+2)->point.x=(double) (cosine*radii.x*points[1].x-sine*radii.y*
points[1].y);
(p+2)->point.y=(double) (sine*radii.x*points[1].x+cosine*radii.y*
points[1].y);
(p+3)->point.x=(double) (cosine*radii.x*points[2].x-sine*radii.y*
points[2].y);
(p+3)->point.y=(double) (sine*radii.x*points[2].x+cosine*radii.y*
points[2].y);
if (i == (ssize_t) (arc_segments-1))
(p+3)->point=end;
status&=TraceBezier(mvg_info,4);
if (status == 0)
break;
p=(*mvg_info->primitive_info)+mvg_info->offset;
mvg_info->offset+=p->coordinates;
p+=p->coordinates;
}
if (status == 0)
return(MagickFalse);
mvg_info->offset=offset;
primitive_info=(*mvg_info->primitive_info)+mvg_info->offset;
primitive_info->coordinates=(size_t) (p-primitive_info);
primitive_info->closed_subpath=MagickFalse;
for (i=0; i < (ssize_t) primitive_info->coordinates; i++)
{
p->primitive=primitive_info->primitive;
p--;
}
return(MagickTrue);
}
static MagickBooleanType TraceBezier(MVGInfo *mvg_info,
const size_t number_coordinates)
{
double
alpha,
*coefficients,
weight;
PointInfo
end,
point,
*points;
PrimitiveInfo
*primitive_info;
register PrimitiveInfo
*p;
register ssize_t
i,
j;
size_t
control_points,
quantum;
/*
Allocate coefficients.
*/
primitive_info=(*mvg_info->primitive_info)+mvg_info->offset;
quantum=number_coordinates;
for (i=0; i < (ssize_t) number_coordinates; i++)
{
for (j=i+1; j < (ssize_t) number_coordinates; j++)
{
alpha=fabs(primitive_info[j].point.x-primitive_info[i].point.x);
if (alpha > (double) SSIZE_MAX)
{
(void) ThrowMagickException(mvg_info->exception,GetMagickModule(),
ResourceLimitError,"MemoryAllocationFailed","`%s'","");
return(MagickFalse);
}
if (alpha > (double) quantum)
quantum=(size_t) alpha;
alpha=fabs(primitive_info[j].point.y-primitive_info[i].point.y);
if (alpha > (double) SSIZE_MAX)
{
(void) ThrowMagickException(mvg_info->exception,GetMagickModule(),
ResourceLimitError,"MemoryAllocationFailed","`%s'","");
return(MagickFalse);
}
if (alpha > (double) quantum)
quantum=(size_t) alpha;
}
}
primitive_info=(*mvg_info->primitive_info)+mvg_info->offset;
quantum=MagickMin(quantum/number_coordinates,BezierQuantum);
coefficients=(double *) AcquireQuantumMemory(number_coordinates,
sizeof(*coefficients));
points=(PointInfo *) AcquireQuantumMemory(quantum,number_coordinates*
sizeof(*points));
if ((coefficients == (double *) NULL) || (points == (PointInfo *) NULL))
{
if (points != (PointInfo *) NULL)
points=(PointInfo *) RelinquishMagickMemory(points);
if (coefficients != (double *) NULL)
coefficients=(double *) RelinquishMagickMemory(coefficients);
(void) ThrowMagickException(mvg_info->exception,GetMagickModule(),
ResourceLimitError,"MemoryAllocationFailed","`%s'","");
return(MagickFalse);
}
control_points=quantum*number_coordinates;
if (CheckPrimitiveExtent(mvg_info,control_points+1) == MagickFalse)
{
points=(PointInfo *) RelinquishMagickMemory(points);
coefficients=(double *) RelinquishMagickMemory(coefficients);
return(MagickFalse);
}
primitive_info=(*mvg_info->primitive_info)+mvg_info->offset;
/*
Compute bezier points.
*/
end=primitive_info[number_coordinates-1].point;
for (i=0; i < (ssize_t) number_coordinates; i++)
coefficients[i]=Permutate((ssize_t) number_coordinates-1,i);
weight=0.0;
for (i=0; i < (ssize_t) control_points; i++)
{
p=primitive_info;
point.x=0.0;
point.y=0.0;
alpha=pow((double) (1.0-weight),(double) number_coordinates-1.0);
for (j=0; j < (ssize_t) number_coordinates; j++)
{
point.x+=alpha*coefficients[j]*p->point.x;
point.y+=alpha*coefficients[j]*p->point.y;
alpha*=weight/(1.0-weight);
p++;
}
points[i]=point;
weight+=1.0/control_points;
}
/*
Bezier curves are just short segmented polys.
*/
p=primitive_info;
for (i=0; i < (ssize_t) control_points; i++)
{
if (TracePoint(p,points[i]) == MagickFalse)
{
points=(PointInfo *) RelinquishMagickMemory(points);
coefficients=(double *) RelinquishMagickMemory(coefficients);
return(MagickFalse);
}
p+=p->coordinates;
}
if (TracePoint(p,end) == MagickFalse)
{
points=(PointInfo *) RelinquishMagickMemory(points);
coefficients=(double *) RelinquishMagickMemory(coefficients);
return(MagickFalse);
}
p+=p->coordinates;
primitive_info->coordinates=(size_t) (p-primitive_info);
primitive_info->closed_subpath=MagickFalse;
for (i=0; i < (ssize_t) primitive_info->coordinates; i++)
{
p->primitive=primitive_info->primitive;
p--;
}
points=(PointInfo *) RelinquishMagickMemory(points);
coefficients=(double *) RelinquishMagickMemory(coefficients);
return(MagickTrue);
}
static MagickBooleanType TraceCircle(MVGInfo *mvg_info,const PointInfo start,
const PointInfo end)
{
double
alpha,
beta,
radius;
PointInfo
offset,
degrees;
alpha=end.x-start.x;
beta=end.y-start.y;
radius=hypot((double) alpha,(double) beta);
offset.x=(double) radius;
offset.y=(double) radius;
degrees.x=0.0;
degrees.y=360.0;
return(TraceEllipse(mvg_info,start,offset,degrees));
}
static MagickBooleanType TraceEllipse(MVGInfo *mvg_info,const PointInfo center,
const PointInfo radii,const PointInfo arc)
{
double
coordinates,
delta,
step,
x,
y;
PointInfo
angle,
point;
PrimitiveInfo
*primitive_info;
register PrimitiveInfo
*p;
register ssize_t
i;
/*
Ellipses are just short segmented polys.
*/
primitive_info=(*mvg_info->primitive_info)+mvg_info->offset;
primitive_info->coordinates=0;
if ((fabs(radii.x) < MagickEpsilon) || (fabs(radii.y) < MagickEpsilon))
return(MagickTrue);
delta=2.0*PerceptibleReciprocal(MagickMax(radii.x,radii.y));
step=MagickPI/8.0;
if ((delta >= 0.0) && (delta < (MagickPI/8.0)))
step=MagickPI/4.0/(MagickPI*PerceptibleReciprocal(delta)/2.0);
angle.x=DegreesToRadians(arc.x);
y=arc.y;
while (y < arc.x)
y+=360.0;
angle.y=DegreesToRadians(y);
coordinates=ceil((angle.y-angle.x)/step+1.0);
if (coordinates > (double) SSIZE_MAX)
{
(void) ThrowMagickException(mvg_info->exception,GetMagickModule(),
ResourceLimitError,"MemoryAllocationFailed","`%s'","");
return(MagickFalse);
}
if (CheckPrimitiveExtent(mvg_info,(size_t) coordinates) == MagickFalse)
return(MagickFalse);
primitive_info=(*mvg_info->primitive_info)+mvg_info->offset;
for (p=primitive_info; angle.x < angle.y; angle.x+=step)
{
point.x=cos(fmod(angle.x,DegreesToRadians(360.0)))*radii.x+center.x;
point.y=sin(fmod(angle.x,DegreesToRadians(360.0)))*radii.y+center.y;
if (TracePoint(p,point) == MagickFalse)
return(MagickFalse);
p+=p->coordinates;
}
point.x=cos(fmod(angle.y,DegreesToRadians(360.0)))*radii.x+center.x;
point.y=sin(fmod(angle.y,DegreesToRadians(360.0)))*radii.y+center.y;
if (TracePoint(p,point) == MagickFalse)
return(MagickFalse);
p+=p->coordinates;
primitive_info->coordinates=(size_t) (p-primitive_info);
primitive_info->closed_subpath=MagickFalse;
x=fabs(primitive_info[0].point.x-
primitive_info[primitive_info->coordinates-1].point.x);
y=fabs(primitive_info[0].point.y-
primitive_info[primitive_info->coordinates-1].point.y);
if ((x < MagickEpsilon) && (y < MagickEpsilon))
primitive_info->closed_subpath=MagickTrue;
for (i=0; i < (ssize_t) primitive_info->coordinates; i++)
{
p->primitive=primitive_info->primitive;
p--;
}
return(MagickTrue);
}
static MagickBooleanType TraceLine(PrimitiveInfo *primitive_info,
const PointInfo start,const PointInfo end)
{
if (TracePoint(primitive_info,start) == MagickFalse)
return(MagickFalse);
if ((fabs(start.x-end.x) < MagickEpsilon) &&
(fabs(start.y-end.y) < MagickEpsilon))
{
primitive_info->primitive=PointPrimitive;
primitive_info->coordinates=1;
return(MagickTrue);
}
if (TracePoint(primitive_info+1,end) == MagickFalse)
return(MagickFalse);
(primitive_info+1)->primitive=primitive_info->primitive;
primitive_info->coordinates=2;
primitive_info->closed_subpath=MagickFalse;
return(MagickTrue);
}
static ssize_t TracePath(MVGInfo *mvg_info,const char *path,
ExceptionInfo *exception)
{
char
*next_token,
token[MagickPathExtent];
const char
*p;
double
x,
y;
int
attribute,
last_attribute;
MagickBooleanType
status;
PointInfo
end = {0.0, 0.0},
points[4] = { {0.0, 0.0}, {0.0, 0.0}, {0.0, 0.0}, {0.0, 0.0} },
point = {0.0, 0.0},
start = {0.0, 0.0};
PrimitiveInfo
*primitive_info;
PrimitiveType
primitive_type;
register PrimitiveInfo
*q;
register ssize_t
i;
size_t
number_coordinates,
z_count;
ssize_t
subpath_offset;
subpath_offset=mvg_info->offset;
primitive_info=(*mvg_info->primitive_info)+mvg_info->offset;
status=MagickTrue;
attribute=0;
number_coordinates=0;
z_count=0;
primitive_type=primitive_info->primitive;
q=primitive_info;
for (p=path; *p != '\0'; )
{
if (status == MagickFalse)
break;
while (isspace((int) ((unsigned char) *p)) != 0)
p++;
if (*p == '\0')
break;
last_attribute=attribute;
attribute=(int) (*p++);
switch (attribute)
{
case 'a':
case 'A':
{
double
angle = 0.0;
MagickBooleanType
large_arc = MagickFalse,
sweep = MagickFalse;
PointInfo
arc = {0.0, 0.0};
/*
Elliptical arc.
*/
do
{
(void) GetNextToken(p,&p,MagickPathExtent,token);
if (*token == ',')
(void) GetNextToken(p,&p,MagickPathExtent,token);
arc.x=StringToDouble(token,&next_token);
if (token == next_token)
ThrowPointExpectedException(token,exception);
(void) GetNextToken(p,&p,MagickPathExtent,token);
if (*token == ',')
(void) GetNextToken(p,&p,MagickPathExtent,token);
arc.y=StringToDouble(token,&next_token);
if (token == next_token)
ThrowPointExpectedException(token,exception);
(void) GetNextToken(p,&p,MagickPathExtent,token);
if (*token == ',')
(void) GetNextToken(p,&p,MagickPathExtent,token);
angle=StringToDouble(token,&next_token);
if (token == next_token)
ThrowPointExpectedException(token,exception);
(void) GetNextToken(p,&p,MagickPathExtent,token);
if (*token == ',')
(void) GetNextToken(p,&p,MagickPathExtent,token);
large_arc=StringToLong(token) != 0 ? MagickTrue : MagickFalse;
(void) GetNextToken(p,&p,MagickPathExtent,token);
if (*token == ',')
(void) GetNextToken(p,&p,MagickPathExtent,token);
sweep=StringToLong(token) != 0 ? MagickTrue : MagickFalse;
if (*token == ',')
(void) GetNextToken(p,&p,MagickPathExtent,token);
(void) GetNextToken(p,&p,MagickPathExtent,token);
if (*token == ',')
(void) GetNextToken(p,&p,MagickPathExtent,token);
x=StringToDouble(token,&next_token);
if (token == next_token)
ThrowPointExpectedException(token,exception);
(void) GetNextToken(p,&p,MagickPathExtent,token);
if (*token == ',')
(void) GetNextToken(p,&p,MagickPathExtent,token);
y=StringToDouble(token,&next_token);
if (token == next_token)
ThrowPointExpectedException(token,exception);
end.x=(double) (attribute == (int) 'A' ? x : point.x+x);
end.y=(double) (attribute == (int) 'A' ? y : point.y+y);
if (TraceArcPath(mvg_info,point,end,arc,angle,large_arc,sweep) == MagickFalse)
return(-1);
q=(*mvg_info->primitive_info)+mvg_info->offset;
mvg_info->offset+=q->coordinates;
q+=q->coordinates;
point=end;
while (isspace((int) ((unsigned char) *p)) != 0)
p++;
if (*p == ',')
p++;
} while (IsPoint(p) != MagickFalse);
break;
}
case 'c':
case 'C':
{
/*
Cubic Bézier curve.
*/
do
{
points[0]=point;
for (i=1; i < 4; i++)
{
(void) GetNextToken(p,&p,MagickPathExtent,token);
if (*token == ',')
(void) GetNextToken(p,&p,MagickPathExtent,token);
x=StringToDouble(token,&next_token);
if (token == next_token)
ThrowPointExpectedException(token,exception);
(void) GetNextToken(p,&p,MagickPathExtent,token);
if (*token == ',')
(void) GetNextToken(p,&p,MagickPathExtent,token);
y=StringToDouble(token,&next_token);
if (token == next_token)
ThrowPointExpectedException(token,exception);
end.x=(double) (attribute == (int) 'C' ? x : point.x+x);
end.y=(double) (attribute == (int) 'C' ? y : point.y+y);
points[i]=end;
}
for (i=0; i < 4; i++)
(q+i)->point=points[i];
if (TraceBezier(mvg_info,4) == MagickFalse)
return(-1);
q=(*mvg_info->primitive_info)+mvg_info->offset;
mvg_info->offset+=q->coordinates;
q+=q->coordinates;
point=end;
while (isspace((int) ((unsigned char) *p)) != 0)
p++;
if (*p == ',')
p++;
} while (IsPoint(p) != MagickFalse);
break;
}
case 'H':
case 'h':
{
do
{
(void) GetNextToken(p,&p,MagickPathExtent,token);
if (*token == ',')
(void) GetNextToken(p,&p,MagickPathExtent,token);
x=StringToDouble(token,&next_token);
if (token == next_token)
ThrowPointExpectedException(token,exception);
point.x=(double) (attribute == (int) 'H' ? x: point.x+x);
if (CheckPrimitiveExtent(mvg_info,PrimitiveExtentPad) == MagickFalse)
return(-1);
q=(*mvg_info->primitive_info)+mvg_info->offset;
if (TracePoint(q,point) == MagickFalse)
return(-1);
mvg_info->offset+=q->coordinates;
q+=q->coordinates;
while (isspace((int) ((unsigned char) *p)) != 0)
p++;
if (*p == ',')
p++;
} while (IsPoint(p) != MagickFalse);
break;
}
case 'l':
case 'L':
{
/*
Line to.
*/
do
{
(void) GetNextToken(p,&p,MagickPathExtent,token);
if (*token == ',')
(void) GetNextToken(p,&p,MagickPathExtent,token);
x=StringToDouble(token,&next_token);
if (token == next_token)
ThrowPointExpectedException(token,exception);
(void) GetNextToken(p,&p,MagickPathExtent,token);
if (*token == ',')
(void) GetNextToken(p,&p,MagickPathExtent,token);
y=StringToDouble(token,&next_token);
if (token == next_token)
ThrowPointExpectedException(token,exception);
point.x=(double) (attribute == (int) 'L' ? x : point.x+x);
point.y=(double) (attribute == (int) 'L' ? y : point.y+y);
if (CheckPrimitiveExtent(mvg_info,PrimitiveExtentPad) == MagickFalse)
return(-1);
q=(*mvg_info->primitive_info)+mvg_info->offset;
if (TracePoint(q,point) == MagickFalse)
return(-1);
mvg_info->offset+=q->coordinates;
q+=q->coordinates;
while (isspace((int) ((unsigned char) *p)) != 0)
p++;
if (*p == ',')
p++;
} while (IsPoint(p) != MagickFalse);
break;
}
case 'M':
case 'm':
{
/*
Move to.
*/
if (mvg_info->offset != subpath_offset)
{
primitive_info=(*mvg_info->primitive_info)+subpath_offset;
primitive_info->coordinates=(size_t) (q-primitive_info);
number_coordinates+=primitive_info->coordinates;
primitive_info=q;
subpath_offset=mvg_info->offset;
}
i=0;
do
{
(void) GetNextToken(p,&p,MagickPathExtent,token);
if (*token == ',')
(void) GetNextToken(p,&p,MagickPathExtent,token);
x=StringToDouble(token,&next_token);
if (token == next_token)
ThrowPointExpectedException(token,exception);
(void) GetNextToken(p,&p,MagickPathExtent,token);
if (*token == ',')
(void) GetNextToken(p,&p,MagickPathExtent,token);
y=StringToDouble(token,&next_token);
if (token == next_token)
ThrowPointExpectedException(token,exception);
point.x=(double) (attribute == (int) 'M' ? x : point.x+x);
point.y=(double) (attribute == (int) 'M' ? y : point.y+y);
if (i == 0)
start=point;
i++;
if (CheckPrimitiveExtent(mvg_info,PrimitiveExtentPad) == MagickFalse)
return(-1);
q=(*mvg_info->primitive_info)+mvg_info->offset;
if (TracePoint(q,point) == MagickFalse)
return(-1);
mvg_info->offset+=q->coordinates;
q+=q->coordinates;
while (isspace((int) ((unsigned char) *p)) != 0)
p++;
if (*p == ',')
p++;
} while (IsPoint(p) != MagickFalse);
break;
}
case 'q':
case 'Q':
{
/*
Quadratic Bézier curve.
*/
do
{
points[0]=point;
for (i=1; i < 3; i++)
{
(void) GetNextToken(p,&p,MagickPathExtent,token);
if (*token == ',')
(void) GetNextToken(p,&p,MagickPathExtent,token);
x=StringToDouble(token,&next_token);
if (token == next_token)
ThrowPointExpectedException(token,exception);
(void) GetNextToken(p,&p,MagickPathExtent,token);
if (*token == ',')
(void) GetNextToken(p,&p,MagickPathExtent,token);
y=StringToDouble(token,&next_token);
if (token == next_token)
ThrowPointExpectedException(token,exception);
if (*p == ',')
p++;
end.x=(double) (attribute == (int) 'Q' ? x : point.x+x);
end.y=(double) (attribute == (int) 'Q' ? y : point.y+y);
points[i]=end;
}
for (i=0; i < 3; i++)
(q+i)->point=points[i];
if (TraceBezier(mvg_info,3) == MagickFalse)
return(-1);
q=(*mvg_info->primitive_info)+mvg_info->offset;
mvg_info->offset+=q->coordinates;
q+=q->coordinates;
point=end;
while (isspace((int) ((unsigned char) *p)) != 0)
p++;
if (*p == ',')
p++;
} while (IsPoint(p) != MagickFalse);
break;
}
case 's':
case 'S':
{
/*
Cubic Bézier curve.
*/
do
{
points[0]=points[3];
points[1].x=2.0*points[3].x-points[2].x;
points[1].y=2.0*points[3].y-points[2].y;
for (i=2; i < 4; i++)
{
(void) GetNextToken(p,&p,MagickPathExtent,token);
if (*token == ',')
(void) GetNextToken(p,&p,MagickPathExtent,token);
x=StringToDouble(token,&next_token);
if (token == next_token)
ThrowPointExpectedException(token,exception);
(void) GetNextToken(p,&p,MagickPathExtent,token);
if (*token == ',')
(void) GetNextToken(p,&p,MagickPathExtent,token);
y=StringToDouble(token,&next_token);
if (token == next_token)
ThrowPointExpectedException(token,exception);
if (*p == ',')
p++;
end.x=(double) (attribute == (int) 'S' ? x : point.x+x);
end.y=(double) (attribute == (int) 'S' ? y : point.y+y);
points[i]=end;
}
if (strchr("CcSs",last_attribute) == (char *) NULL)
{
points[0]=point;
points[1]=point;
}
for (i=0; i < 4; i++)
(q+i)->point=points[i];
if (TraceBezier(mvg_info,4) == MagickFalse)
return(-1);
q=(*mvg_info->primitive_info)+mvg_info->offset;
mvg_info->offset+=q->coordinates;
q+=q->coordinates;
point=end;
last_attribute=attribute;
while (isspace((int) ((unsigned char) *p)) != 0)
p++;
if (*p == ',')
p++;
} while (IsPoint(p) != MagickFalse);
break;
}
case 't':
case 'T':
{
/*
Quadratic Bézier curve.
*/
do
{
points[0]=points[2];
points[1].x=2.0*points[2].x-points[1].x;
points[1].y=2.0*points[2].y-points[1].y;
for (i=2; i < 3; i++)
{
(void) GetNextToken(p,&p,MagickPathExtent,token);
if (*token == ',')
(void) GetNextToken(p,&p,MagickPathExtent,token);
x=StringToDouble(token,&next_token);
if (token == next_token)
ThrowPointExpectedException(token,exception);
(void) GetNextToken(p,&p,MagickPathExtent,token);
if (*token == ',')
(void) GetNextToken(p,&p,MagickPathExtent,token);
y=StringToDouble(token,&next_token);
if (token == next_token)
ThrowPointExpectedException(token,exception);
end.x=(double) (attribute == (int) 'T' ? x : point.x+x);
end.y=(double) (attribute == (int) 'T' ? y : point.y+y);
points[i]=end;
}
if (status == MagickFalse)
break;
if (strchr("QqTt",last_attribute) == (char *) NULL)
{
points[0]=point;
points[1]=point;
}
for (i=0; i < 3; i++)
(q+i)->point=points[i];
if (TraceBezier(mvg_info,3) == MagickFalse)
return(-1);
q=(*mvg_info->primitive_info)+mvg_info->offset;
mvg_info->offset+=q->coordinates;
q+=q->coordinates;
point=end;
last_attribute=attribute;
while (isspace((int) ((unsigned char) *p)) != 0)
p++;
if (*p == ',')
p++;
} while (IsPoint(p) != MagickFalse);
break;
}
case 'v':
case 'V':
{
/*
Line to.
*/
do
{
(void) GetNextToken(p,&p,MagickPathExtent,token);
if (*token == ',')
(void) GetNextToken(p,&p,MagickPathExtent,token);
y=StringToDouble(token,&next_token);
if (token == next_token)
ThrowPointExpectedException(token,exception);
point.y=(double) (attribute == (int) 'V' ? y : point.y+y);
if (CheckPrimitiveExtent(mvg_info,PrimitiveExtentPad) == MagickFalse)
return(-1);
q=(*mvg_info->primitive_info)+mvg_info->offset;
if (TracePoint(q,point) == MagickFalse)
return(-1);
mvg_info->offset+=q->coordinates;
q+=q->coordinates;
while (isspace((int) ((unsigned char) *p)) != 0)
p++;
if (*p == ',')
p++;
} while (IsPoint(p) != MagickFalse);
break;
}
case 'z':
case 'Z':
{
/*
Close path.
*/
point=start;
if (CheckPrimitiveExtent(mvg_info,PrimitiveExtentPad) == MagickFalse)
return(-1);
q=(*mvg_info->primitive_info)+mvg_info->offset;
if (TracePoint(q,point) == MagickFalse)
return(-1);
mvg_info->offset+=q->coordinates;
q+=q->coordinates;
primitive_info=(*mvg_info->primitive_info)+subpath_offset;
primitive_info->coordinates=(size_t) (q-primitive_info);
primitive_info->closed_subpath=MagickTrue;
number_coordinates+=primitive_info->coordinates;
primitive_info=q;
subpath_offset=mvg_info->offset;
z_count++;
break;
}
default:
{
ThrowPointExpectedException(token,exception);
break;
}
}
}
if (status == MagickFalse)
return(-1);
primitive_info=(*mvg_info->primitive_info)+subpath_offset;
primitive_info->coordinates=(size_t) (q-primitive_info);
number_coordinates+=primitive_info->coordinates;
for (i=0; i < (ssize_t) number_coordinates; i++)
{
q--;
q->primitive=primitive_type;
if (z_count > 1)
q->method=FillToBorderMethod;
}
q=primitive_info;
return((ssize_t) number_coordinates);
}
static MagickBooleanType TraceRectangle(PrimitiveInfo *primitive_info,
const PointInfo start,const PointInfo end)
{
PointInfo
point;
register PrimitiveInfo
*p;
register ssize_t
i;
p=primitive_info;
if (TracePoint(p,start) == MagickFalse)
return(MagickFalse);
p+=p->coordinates;
point.x=start.x;
point.y=end.y;
if (TracePoint(p,point) == MagickFalse)
return(MagickFalse);
p+=p->coordinates;
if (TracePoint(p,end) == MagickFalse)
return(MagickFalse);
p+=p->coordinates;
point.x=end.x;
point.y=start.y;
if (TracePoint(p,point) == MagickFalse)
return(MagickFalse);
p+=p->coordinates;
if (TracePoint(p,start) == MagickFalse)
return(MagickFalse);
p+=p->coordinates;
primitive_info->coordinates=(size_t) (p-primitive_info);
primitive_info->closed_subpath=MagickTrue;
for (i=0; i < (ssize_t) primitive_info->coordinates; i++)
{
p->primitive=primitive_info->primitive;
p--;
}
return(MagickTrue);
}
static MagickBooleanType TraceRoundRectangle(MVGInfo *mvg_info,
const PointInfo start,const PointInfo end,PointInfo arc)
{
PointInfo
degrees,
point,
segment;
PrimitiveInfo
*primitive_info;
register PrimitiveInfo
*p;
register ssize_t
i;
ssize_t
offset;
offset=mvg_info->offset;
segment.x=fabs(end.x-start.x);
segment.y=fabs(end.y-start.y);
if ((segment.x < MagickEpsilon) || (segment.y < MagickEpsilon))
{
(*mvg_info->primitive_info+mvg_info->offset)->coordinates=0;
return(MagickTrue);
}
if (arc.x > (0.5*segment.x))
arc.x=0.5*segment.x;
if (arc.y > (0.5*segment.y))
arc.y=0.5*segment.y;
point.x=start.x+segment.x-arc.x;
point.y=start.y+arc.y;
degrees.x=270.0;
degrees.y=360.0;
if (TraceEllipse(mvg_info,point,arc,degrees) == MagickFalse)
return(MagickFalse);
p=(*mvg_info->primitive_info)+mvg_info->offset;
mvg_info->offset+=p->coordinates;
point.x=start.x+segment.x-arc.x;
point.y=start.y+segment.y-arc.y;
degrees.x=0.0;
degrees.y=90.0;
if (TraceEllipse(mvg_info,point,arc,degrees) == MagickFalse)
return(MagickFalse);
p=(*mvg_info->primitive_info)+mvg_info->offset;
mvg_info->offset+=p->coordinates;
point.x=start.x+arc.x;
point.y=start.y+segment.y-arc.y;
degrees.x=90.0;
degrees.y=180.0;
if (TraceEllipse(mvg_info,point,arc,degrees) == MagickFalse)
return(MagickFalse);
p=(*mvg_info->primitive_info)+mvg_info->offset;
mvg_info->offset+=p->coordinates;
point.x=start.x+arc.x;
point.y=start.y+arc.y;
degrees.x=180.0;
degrees.y=270.0;
if (TraceEllipse(mvg_info,point,arc,degrees) == MagickFalse)
return(MagickFalse);
p=(*mvg_info->primitive_info)+mvg_info->offset;
mvg_info->offset+=p->coordinates;
if (CheckPrimitiveExtent(mvg_info,PrimitiveExtentPad) == MagickFalse)
return(MagickFalse);
p=(*mvg_info->primitive_info)+mvg_info->offset;
if (TracePoint(p,(*mvg_info->primitive_info+offset)->point) == MagickFalse)
return(MagickFalse);
p+=p->coordinates;
mvg_info->offset=offset;
primitive_info=(*mvg_info->primitive_info)+offset;
primitive_info->coordinates=(size_t) (p-primitive_info);
primitive_info->closed_subpath=MagickTrue;
for (i=0; i < (ssize_t) primitive_info->coordinates; i++)
{
p->primitive=primitive_info->primitive;
p--;
}
return(MagickTrue);
}
static MagickBooleanType TraceSquareLinecap(PrimitiveInfo *primitive_info,
const size_t number_vertices,const double offset)
{
double
distance;
register double
dx,
dy;
register ssize_t
i;
ssize_t
j;
dx=0.0;
dy=0.0;
for (i=1; i < (ssize_t) number_vertices; i++)
{
dx=primitive_info[0].point.x-primitive_info[i].point.x;
dy=primitive_info[0].point.y-primitive_info[i].point.y;
if ((fabs((double) dx) >= MagickEpsilon) ||
(fabs((double) dy) >= MagickEpsilon))
break;
}
if (i == (ssize_t) number_vertices)
i=(ssize_t) number_vertices-1L;
distance=hypot((double) dx,(double) dy);
primitive_info[0].point.x=(double) (primitive_info[i].point.x+
dx*(distance+offset)/distance);
primitive_info[0].point.y=(double) (primitive_info[i].point.y+
dy*(distance+offset)/distance);
for (j=(ssize_t) number_vertices-2; j >= 0; j--)
{
dx=primitive_info[number_vertices-1].point.x-primitive_info[j].point.x;
dy=primitive_info[number_vertices-1].point.y-primitive_info[j].point.y;
if ((fabs((double) dx) >= MagickEpsilon) ||
(fabs((double) dy) >= MagickEpsilon))
break;
}
distance=hypot((double) dx,(double) dy);
primitive_info[number_vertices-1].point.x=(double) (primitive_info[j].point.x+
dx*(distance+offset)/distance);
primitive_info[number_vertices-1].point.y=(double) (primitive_info[j].point.y+
dy*(distance+offset)/distance);
return(MagickTrue);
}
static PrimitiveInfo *TraceStrokePolygon(const Image *image,
const DrawInfo *draw_info,const PrimitiveInfo *primitive_info)
{
#define MaxStrokePad (6*BezierQuantum+360)
#define CheckPathExtent(pad_p,pad_q) \
{ \
if ((ssize_t) (p+(pad_p)) >= (ssize_t) extent_p) \
{ \
if (~extent_p < (pad_p)) \
stroke_p=(PointInfo *) RelinquishMagickMemory(stroke_p); \
else \
{ \
extent_p+=(pad_p); \
stroke_p=(PointInfo *) ResizeQuantumMemory(stroke_p,extent_p+ \
MaxStrokePad,sizeof(*stroke_p)); \
} \
} \
if ((ssize_t) (q+(pad_q)) >= (ssize_t) extent_q) \
{ \
if (~extent_q < (pad_q)) \
stroke_q=(PointInfo *) RelinquishMagickMemory(stroke_q); \
else \
{ \
extent_q+=(pad_q); \
stroke_q=(PointInfo *) ResizeQuantumMemory(stroke_q,extent_q+ \
MaxStrokePad,sizeof(*stroke_q)); \
} \
} \
if ((stroke_p == (PointInfo *) NULL) || (stroke_q == (PointInfo *) NULL)) \
{ \
if (stroke_p != (PointInfo *) NULL) \
stroke_p=(PointInfo *) RelinquishMagickMemory(stroke_p); \
if (stroke_q != (PointInfo *) NULL) \
stroke_q=(PointInfo *) RelinquishMagickMemory(stroke_q); \
polygon_primitive=(PrimitiveInfo *) \
RelinquishMagickMemory(polygon_primitive); \
return((PrimitiveInfo *) NULL); \
} \
}
typedef struct _StrokeSegment
{
double
p,
q;
} StrokeSegment;
double
delta_theta,
dot_product,
mid,
miterlimit;
MagickBooleanType
closed_path;
PointInfo
box_p[5],
box_q[5],
center,
offset,
*stroke_p,
*stroke_q;
PrimitiveInfo
*polygon_primitive,
*stroke_polygon;
register ssize_t
i;
size_t
arc_segments,
extent_p,
extent_q,
number_vertices;
ssize_t
j,
n,
p,
q;
StrokeSegment
dx = {0.0, 0.0},
dy = {0.0, 0.0},
inverse_slope = {0.0, 0.0},
slope = {0.0, 0.0},
theta = {0.0, 0.0};
/*
Allocate paths.
*/
number_vertices=primitive_info->coordinates;
polygon_primitive=(PrimitiveInfo *) AcquireQuantumMemory((size_t)
number_vertices+2UL,sizeof(*polygon_primitive));
if (polygon_primitive == (PrimitiveInfo *) NULL)
return((PrimitiveInfo *) NULL);
(void) memcpy(polygon_primitive,primitive_info,(size_t) number_vertices*
sizeof(*polygon_primitive));
closed_path=primitive_info[0].closed_subpath;
if (((draw_info->linejoin == RoundJoin) ||
(draw_info->linejoin == MiterJoin)) && (closed_path != MagickFalse))
{
polygon_primitive[number_vertices]=primitive_info[1];
number_vertices++;
}
polygon_primitive[number_vertices].primitive=UndefinedPrimitive;
/*
Compute the slope for the first line segment, p.
*/
dx.p=0.0;
dy.p=0.0;
for (n=1; n < (ssize_t) number_vertices; n++)
{
dx.p=polygon_primitive[n].point.x-polygon_primitive[0].point.x;
dy.p=polygon_primitive[n].point.y-polygon_primitive[0].point.y;
if ((fabs(dx.p) >= MagickEpsilon) || (fabs(dy.p) >= MagickEpsilon))
break;
}
if (n == (ssize_t) number_vertices)
{
if ((draw_info->linecap != RoundCap) || (closed_path != MagickFalse))
{
/*
Zero length subpath.
*/
stroke_polygon=(PrimitiveInfo *) AcquireCriticalMemory(
sizeof(*stroke_polygon));
stroke_polygon[0]=polygon_primitive[0];
stroke_polygon[0].coordinates=0;
polygon_primitive=(PrimitiveInfo *) RelinquishMagickMemory(
polygon_primitive);
return(stroke_polygon);
}
n=(ssize_t) number_vertices-1L;
}
extent_p=2*number_vertices;
extent_q=2*number_vertices;
stroke_p=(PointInfo *) AcquireQuantumMemory((size_t) extent_p+MaxStrokePad,
sizeof(*stroke_p));
stroke_q=(PointInfo *) AcquireQuantumMemory((size_t) extent_q+MaxStrokePad,
sizeof(*stroke_q));
if ((stroke_p == (PointInfo *) NULL) || (stroke_q == (PointInfo *) NULL))
{
if (stroke_p != (PointInfo *) NULL)
stroke_p=(PointInfo *) RelinquishMagickMemory(stroke_p);
if (stroke_q != (PointInfo *) NULL)
stroke_q=(PointInfo *) RelinquishMagickMemory(stroke_q);
polygon_primitive=(PrimitiveInfo *)
RelinquishMagickMemory(polygon_primitive);
return((PrimitiveInfo *) NULL);
}
slope.p=0.0;
inverse_slope.p=0.0;
if (fabs(dx.p) < MagickEpsilon)
{
if (dx.p >= 0.0)
slope.p=dy.p < 0.0 ? -1.0/MagickEpsilon : 1.0/MagickEpsilon;
else
slope.p=dy.p < 0.0 ? 1.0/MagickEpsilon : -1.0/MagickEpsilon;
}
else
if (fabs(dy.p) < MagickEpsilon)
{
if (dy.p >= 0.0)
inverse_slope.p=dx.p < 0.0 ? -1.0/MagickEpsilon : 1.0/MagickEpsilon;
else
inverse_slope.p=dx.p < 0.0 ? 1.0/MagickEpsilon : -1.0/MagickEpsilon;
}
else
{
slope.p=dy.p/dx.p;
inverse_slope.p=(-1.0/slope.p);
}
mid=ExpandAffine(&draw_info->affine)*SaneStrokeWidth(image,draw_info)/2.0;
miterlimit=(double) (draw_info->miterlimit*draw_info->miterlimit*mid*mid);
if ((draw_info->linecap == SquareCap) && (closed_path == MagickFalse))
(void) TraceSquareLinecap(polygon_primitive,number_vertices,mid);
offset.x=sqrt((double) (mid*mid/(inverse_slope.p*inverse_slope.p+1.0)));
offset.y=(double) (offset.x*inverse_slope.p);
if ((dy.p*offset.x-dx.p*offset.y) > 0.0)
{
box_p[0].x=polygon_primitive[0].point.x-offset.x;
box_p[0].y=polygon_primitive[0].point.y-offset.x*inverse_slope.p;
box_p[1].x=polygon_primitive[n].point.x-offset.x;
box_p[1].y=polygon_primitive[n].point.y-offset.x*inverse_slope.p;
box_q[0].x=polygon_primitive[0].point.x+offset.x;
box_q[0].y=polygon_primitive[0].point.y+offset.x*inverse_slope.p;
box_q[1].x=polygon_primitive[n].point.x+offset.x;
box_q[1].y=polygon_primitive[n].point.y+offset.x*inverse_slope.p;
}
else
{
box_p[0].x=polygon_primitive[0].point.x+offset.x;
box_p[0].y=polygon_primitive[0].point.y+offset.y;
box_p[1].x=polygon_primitive[n].point.x+offset.x;
box_p[1].y=polygon_primitive[n].point.y+offset.y;
box_q[0].x=polygon_primitive[0].point.x-offset.x;
box_q[0].y=polygon_primitive[0].point.y-offset.y;
box_q[1].x=polygon_primitive[n].point.x-offset.x;
box_q[1].y=polygon_primitive[n].point.y-offset.y;
}
/*
Create strokes for the line join attribute: bevel, miter, round.
*/
p=0;
q=0;
stroke_q[p++]=box_q[0];
stroke_p[q++]=box_p[0];
for (i=(ssize_t) n+1; i < (ssize_t) number_vertices; i++)
{
/*
Compute the slope for this line segment, q.
*/
dx.q=polygon_primitive[i].point.x-polygon_primitive[n].point.x;
dy.q=polygon_primitive[i].point.y-polygon_primitive[n].point.y;
dot_product=dx.q*dx.q+dy.q*dy.q;
if (dot_product < 0.25)
continue;
slope.q=0.0;
inverse_slope.q=0.0;
if (fabs(dx.q) < MagickEpsilon)
{
if (dx.q >= 0.0)
slope.q=dy.q < 0.0 ? -1.0/MagickEpsilon : 1.0/MagickEpsilon;
else
slope.q=dy.q < 0.0 ? 1.0/MagickEpsilon : -1.0/MagickEpsilon;
}
else
if (fabs(dy.q) < MagickEpsilon)
{
if (dy.q >= 0.0)
inverse_slope.q=dx.q < 0.0 ? -1.0/MagickEpsilon : 1.0/MagickEpsilon;
else
inverse_slope.q=dx.q < 0.0 ? 1.0/MagickEpsilon : -1.0/MagickEpsilon;
}
else
{
slope.q=dy.q/dx.q;
inverse_slope.q=(-1.0/slope.q);
}
offset.x=sqrt((double) (mid*mid/(inverse_slope.q*inverse_slope.q+1.0)));
offset.y=(double) (offset.x*inverse_slope.q);
dot_product=dy.q*offset.x-dx.q*offset.y;
if (dot_product > 0.0)
{
box_p[2].x=polygon_primitive[n].point.x-offset.x;
box_p[2].y=polygon_primitive[n].point.y-offset.y;
box_p[3].x=polygon_primitive[i].point.x-offset.x;
box_p[3].y=polygon_primitive[i].point.y-offset.y;
box_q[2].x=polygon_primitive[n].point.x+offset.x;
box_q[2].y=polygon_primitive[n].point.y+offset.y;
box_q[3].x=polygon_primitive[i].point.x+offset.x;
box_q[3].y=polygon_primitive[i].point.y+offset.y;
}
else
{
box_p[2].x=polygon_primitive[n].point.x+offset.x;
box_p[2].y=polygon_primitive[n].point.y+offset.y;
box_p[3].x=polygon_primitive[i].point.x+offset.x;
box_p[3].y=polygon_primitive[i].point.y+offset.y;
box_q[2].x=polygon_primitive[n].point.x-offset.x;
box_q[2].y=polygon_primitive[n].point.y-offset.y;
box_q[3].x=polygon_primitive[i].point.x-offset.x;
box_q[3].y=polygon_primitive[i].point.y-offset.y;
}
if (fabs((double) (slope.p-slope.q)) < MagickEpsilon)
{
box_p[4]=box_p[1];
box_q[4]=box_q[1];
}
else
{
box_p[4].x=(double) ((slope.p*box_p[0].x-box_p[0].y-slope.q*box_p[3].x+
box_p[3].y)/(slope.p-slope.q));
box_p[4].y=(double) (slope.p*(box_p[4].x-box_p[0].x)+box_p[0].y);
box_q[4].x=(double) ((slope.p*box_q[0].x-box_q[0].y-slope.q*box_q[3].x+
box_q[3].y)/(slope.p-slope.q));
box_q[4].y=(double) (slope.p*(box_q[4].x-box_q[0].x)+box_q[0].y);
}
CheckPathExtent(MaxStrokePad,MaxStrokePad);
dot_product=dx.q*dy.p-dx.p*dy.q;
if (dot_product <= 0.0)
switch (draw_info->linejoin)
{
case BevelJoin:
{
stroke_q[q++]=box_q[1];
stroke_q[q++]=box_q[2];
dot_product=(box_q[4].x-box_p[4].x)*(box_q[4].x-box_p[4].x)+
(box_q[4].y-box_p[4].y)*(box_q[4].y-box_p[4].y);
if (dot_product <= miterlimit)
stroke_p[p++]=box_p[4];
else
{
stroke_p[p++]=box_p[1];
stroke_p[p++]=box_p[2];
}
break;
}
case MiterJoin:
{
dot_product=(box_q[4].x-box_p[4].x)*(box_q[4].x-box_p[4].x)+
(box_q[4].y-box_p[4].y)*(box_q[4].y-box_p[4].y);
if (dot_product <= miterlimit)
{
stroke_q[q++]=box_q[4];
stroke_p[p++]=box_p[4];
}
else
{
stroke_q[q++]=box_q[1];
stroke_q[q++]=box_q[2];
stroke_p[p++]=box_p[1];
stroke_p[p++]=box_p[2];
}
break;
}
case RoundJoin:
{
dot_product=(box_q[4].x-box_p[4].x)*(box_q[4].x-box_p[4].x)+
(box_q[4].y-box_p[4].y)*(box_q[4].y-box_p[4].y);
if (dot_product <= miterlimit)
stroke_p[p++]=box_p[4];
else
{
stroke_p[p++]=box_p[1];
stroke_p[p++]=box_p[2];
}
center=polygon_primitive[n].point;
theta.p=atan2(box_q[1].y-center.y,box_q[1].x-center.x);
theta.q=atan2(box_q[2].y-center.y,box_q[2].x-center.x);
if (theta.q < theta.p)
theta.q+=2.0*MagickPI;
arc_segments=(size_t) ceil((double) ((theta.q-theta.p)/
(2.0*sqrt((double) (1.0/mid)))));
CheckPathExtent(MaxStrokePad,arc_segments+MaxStrokePad);
stroke_q[q].x=box_q[1].x;
stroke_q[q].y=box_q[1].y;
q++;
for (j=1; j < (ssize_t) arc_segments; j++)
{
delta_theta=(double) (j*(theta.q-theta.p)/arc_segments);
stroke_q[q].x=(double) (center.x+mid*cos(fmod((double)
(theta.p+delta_theta),DegreesToRadians(360.0))));
stroke_q[q].y=(double) (center.y+mid*sin(fmod((double)
(theta.p+delta_theta),DegreesToRadians(360.0))));
q++;
}
stroke_q[q++]=box_q[2];
break;
}
default:
break;
}
else
switch (draw_info->linejoin)
{
case BevelJoin:
{
stroke_p[p++]=box_p[1];
stroke_p[p++]=box_p[2];
dot_product=(box_q[4].x-box_p[4].x)*(box_q[4].x-box_p[4].x)+
(box_q[4].y-box_p[4].y)*(box_q[4].y-box_p[4].y);
if (dot_product <= miterlimit)
stroke_q[q++]=box_q[4];
else
{
stroke_q[q++]=box_q[1];
stroke_q[q++]=box_q[2];
}
break;
}
case MiterJoin:
{
dot_product=(box_q[4].x-box_p[4].x)*(box_q[4].x-box_p[4].x)+
(box_q[4].y-box_p[4].y)*(box_q[4].y-box_p[4].y);
if (dot_product <= miterlimit)
{
stroke_q[q++]=box_q[4];
stroke_p[p++]=box_p[4];
}
else
{
stroke_q[q++]=box_q[1];
stroke_q[q++]=box_q[2];
stroke_p[p++]=box_p[1];
stroke_p[p++]=box_p[2];
}
break;
}
case RoundJoin:
{
dot_product=(box_q[4].x-box_p[4].x)*(box_q[4].x-box_p[4].x)+
(box_q[4].y-box_p[4].y)*(box_q[4].y-box_p[4].y);
if (dot_product <= miterlimit)
stroke_q[q++]=box_q[4];
else
{
stroke_q[q++]=box_q[1];
stroke_q[q++]=box_q[2];
}
center=polygon_primitive[n].point;
theta.p=atan2(box_p[1].y-center.y,box_p[1].x-center.x);
theta.q=atan2(box_p[2].y-center.y,box_p[2].x-center.x);
if (theta.p < theta.q)
theta.p+=2.0*MagickPI;
arc_segments=(size_t) ceil((double) ((theta.p-theta.q)/
(2.0*sqrt((double) (1.0/mid)))));
CheckPathExtent(arc_segments+MaxStrokePad,MaxStrokePad);
stroke_p[p++]=box_p[1];
for (j=1; j < (ssize_t) arc_segments; j++)
{
delta_theta=(double) (j*(theta.q-theta.p)/arc_segments);
stroke_p[p].x=(double) (center.x+mid*cos(fmod((double)
(theta.p+delta_theta),DegreesToRadians(360.0))));
stroke_p[p].y=(double) (center.y+mid*sin(fmod((double)
(theta.p+delta_theta),DegreesToRadians(360.0))));
p++;
}
stroke_p[p++]=box_p[2];
break;
}
default:
break;
}
slope.p=slope.q;
inverse_slope.p=inverse_slope.q;
box_p[0]=box_p[2];
box_p[1]=box_p[3];
box_q[0]=box_q[2];
box_q[1]=box_q[3];
dx.p=dx.q;
dy.p=dy.q;
n=i;
}
stroke_p[p++]=box_p[1];
stroke_q[q++]=box_q[1];
/*
Trace stroked polygon.
*/
stroke_polygon=(PrimitiveInfo *) AcquireQuantumMemory((size_t)
(p+q+2UL*closed_path+2UL),sizeof(*stroke_polygon));
if (stroke_polygon != (PrimitiveInfo *) NULL)
{
for (i=0; i < (ssize_t) p; i++)
{
stroke_polygon[i]=polygon_primitive[0];
stroke_polygon[i].point=stroke_p[i];
}
if (closed_path != MagickFalse)
{
stroke_polygon[i]=polygon_primitive[0];
stroke_polygon[i].point=stroke_polygon[0].point;
i++;
}
for ( ; i < (ssize_t) (p+q+closed_path); i++)
{
stroke_polygon[i]=polygon_primitive[0];
stroke_polygon[i].point=stroke_q[p+q+closed_path-(i+1)];
}
if (closed_path != MagickFalse)
{
stroke_polygon[i]=polygon_primitive[0];
stroke_polygon[i].point=stroke_polygon[p+closed_path].point;
i++;
}
stroke_polygon[i]=polygon_primitive[0];
stroke_polygon[i].point=stroke_polygon[0].point;
i++;
stroke_polygon[i].primitive=UndefinedPrimitive;
stroke_polygon[0].coordinates=(size_t) (p+q+2*closed_path+1);
}
stroke_p=(PointInfo *) RelinquishMagickMemory(stroke_p);
stroke_q=(PointInfo *) RelinquishMagickMemory(stroke_q);
polygon_primitive=(PrimitiveInfo *) RelinquishMagickMemory(polygon_primitive);
return(stroke_polygon);
}
|
declare-pr90861.c | /* Verify that OpenACC 'declare' cleans up for VLAs. */
/* { dg-additional-options "-fdump-tree-gimple" } */
void f1 (void)
{
#define N_f1 1000
int A_f1[N_f1];
#pragma acc declare copy(A_f1)
/* { dg-final { scan-tree-dump-times {#pragma omp target oacc_declare map\(to:A_f1} 1 gimple } }
{ dg-final { scan-tree-dump-times {#pragma omp target oacc_declare map\(from:A_f1} 1 gimple } } */
}
void f2 (void)
{
int N_f2 = 1000;
int A_f2[N_f2];
#pragma acc declare copy(A_f2)
/* { dg-final { scan-tree-dump-times {#pragma omp target oacc_declare map\(to:\(\*A_f2} 1 gimple } }
{ dg-final { scan-tree-dump-times {#pragma omp target oacc_declare map\(from:\(\*A_f2} 1 gimple } } */
}
|
mxEvaluatePostFunc2d.c | #include "mex.h"
#include "mxSWE2d.h"
#ifdef _OPENMP
#include <omp.h>
#endif
#define NRHS 5
#define NLHS 1
#define NVAR 3
void mexFunction(int nlhs, mxArray* plhs[], int nrhs, const mxArray* prhs[]) {
/* check input & output */
if (nrhs != NRHS) {
mexPrintf("Matlab:%s:InvalidNumberInput,\n", __FILE__);
mexPrintf("%d inputs required.\n", NRHS);
}
if (nlhs != NLHS) {
mexPrintf("Matlab:%s:InvalidNumberOutput,\n", __FILE__);
mexPrintf("%d inputs required.\n", NLHS);
}
/* get inputs */
double hcrit = mxGetScalar(prhs[0]);
double* fphys = mxGetPr(prhs[1]);
double* hc = mxGetPr(prhs[2]);
double* qxc = mxGetPr(prhs[3]);
double* qyc = mxGetPr(prhs[4]);
/* get dimensions */
const mwSize* dims = mxGetDimensions(prhs[1]);
const size_t Np = dims[0];
const size_t K = dims[1];
const size_t ndimOut = 3;
const mwSize dimOut[3] = {Np, K, NVAR};
plhs[0] = mxCreateNumericArray(ndimOut, dimOut, mxDOUBLE_CLASS, mxREAL);
double* h = fphys;
double* qx = fphys + K * Np;
double* qy = fphys + 2 * K * Np;
double* h_pos = mxGetPr(plhs[0]);
double* qx_pos = h_pos + K * Np;
double* qy_pos = h_pos + 2 * K * Np;
const double ksi = 0.0;
// cell area and scalar averages
#ifdef _OPENMP
#pragma omp parallel for num_threads(DG_THREADS)
#endif
for (int k = 0; k < K; k++) {
double hmean = hc[k];
double qxmean = qxc[k];
double qymean = qyc[k];
if (hmean <= ksi) {
for (int n = 0; n < Np; n++) {
size_t sk = k * Np + n;
h[sk] = 0;
qx[sk] = 0;
qy[sk] = 0;
}
continue;
}
double hmin = h[k * Np];
for (int n = 0; n < Np; n++) {
hmin = min(hmin, h[k * Np + n]);
}
double theta;
if (hmin < hmean) {
theta = min((hmean - ksi) / (hmean - hmin), 1.0);
} else {
theta = 0.0;
}
for (int n = 0; n < Np; n++) {
size_t sk = k * Np + n;
h_pos[sk] = theta * (h[sk] - hmean) + hmean;
qx_pos[sk] = theta * (qx[sk] - qxmean) + qxmean;
qy_pos[sk] = theta * (qy[sk] - qymean) + qymean;
if (h_pos[sk] < hcrit) { // dry nodes
qx_pos[sk] = 0.0;
qy_pos[sk] = 0.0;
}
}
}
return;
} |
sudoku.c | #include <stdio.h>
#include <stdlib.h>
#include <math.h>
#include <omp.h>
// Sudoku-Solver
// May 2018: Sven Bingert, Triet Doan
//
// ------------------------------------------------------
// print the sudoku on screen
// ------------------------------------------------------
void printsudoku(int n, int m, int field[n][m])
{
printf("\n");
for (int i = 0; i < n; i++)
{
for (int j = 0; j < m; j++)
{
printf("%2d ", field[i][j]);
}
printf("\n");
}
}
// ------------------------------------------------------
// search for fitting number in cell i,j
// ------------------------------------------------------
void testnumber(int n, int m, int field[n][m], int i, int j)
{
// I'm at the row i, column j
// Test each number k
// If only one number fits -> success
//
int k_success;
int k_tobeset;
int n_numbers = 0;
int mb, nb;
int ldim = (int)sqrt((double)n);
// for some reason unable to use default(none) here
#pragma omp parallel for shared(ldim, k_tobeset, m, n, field, i, j) \
private(k_success, mb, nb) reduction(+: n_numbers)
for (int k = 1; k < n + 1; k++)
{
//
k_success = 1;
for (int l = 0; l < n; l++)
{
if (field[i][l] == k)
{
k_success = 0;
}
}
for (int l = 0; l < m; l++)
{
if (field[l][j] == k)
{
k_success = 0;
}
}
// Check if the number in the region alread exists
nb = i - i % ldim;
mb = j - j % ldim;
for (int l1 = nb; l1 < nb + ldim; l1++)
{
for (int l2 = mb; l2 < mb + ldim; l2++)
{
if (field[l1][l2] == k)
{
k_success = 0;
}
}
}
if (k_success == 1)
{
k_tobeset = k;
}
n_numbers = n_numbers + k_success;
}
// Success, new number will be added
if (n_numbers == 1)
{
field[i][j] = k_tobeset;
// printsudoku(n, m, field);
}
}
// ------------------------------------------------------
// Main Function
// ------------------------------------------------------
int main(int argc, const char *argv[])
{
//
int c, n, m;
FILE *file;
int minval;
//
// Read dimension and sudoku from file
// input.file
// input2.file
//
file = fopen("input2.file", "r");
//
fscanf(file, "%2d,", &n);
fscanf(file, "%2d,", &m);
//
int field[n][m];
for (int i = 0; i < n; i++)
{
for (int j = 0; j < m; j++)
{
fscanf(file, "%2d,", &field[i][j]);
}
}
// printsudoku(n, m, field);
fclose(file);
//
// Start of the main loop, each field is tested if not 0
//
double start = omp_get_wtime();
minval = 0;
while (minval == 0)
{
for (int i = 0; i < n; i++)
{
for (int j = 0; j < m; j++)
{
if (field[i][j] == 0)
{
testnumber(n, m, field, i, j);
}
}
}
//
// The Sudoku is succesfully filled if
// the array contains no zeros
// Another option would be to compute the sum
// of all fields, which is fixed for given dimension.
// #pragma omp master
minval = 1;
// #pragma omp barrier
#pragma omp parallel for default(none) shared(n, m, field, minval) schedule(guided)
for (int i = 0; i < n; i++)
{
for (int j = 0; j < m; j++)
{
if (field[i][j] == 0)
{
#pragma omp atomic write
minval = 0;
}
}
}
}
double duration = omp_get_wtime() - start;
// printsudoku(n, m, field);
printf("%f\n", duration);
return 0;
}
|
ofmo-integ.c | /**
* @file ofmo-integ.c
* Hartree-Fock分子軌道法、および、それに基づいたFMO法で
* 必要となる各種分子積分を行うための最上位関数群を定義している
* ファイル。
* */
/**
* @defgroup integ 分子積分クラス
* 通常のHartree-Fock(HF)分子軌道計算で用いる1電子積分(運動エネルギー
* 積分、核-引力積分、重なり積分)や2電子積分だけでなく、FMO法に出現する
* 4中心(3中心、2中心)の各種クーロン積分、および、カットオフテーブル
* を作成する関数などを定義している。
*
* @ingroup ofmo
*
* */
/**
* @defgroup integ-top 分子積分の最上位クラス
* @brief 積分計算を必要とする関数から呼ばれる最上位関数
* @ingroup integ
* */
/**
* @defgroup integ-med 同じタイプの積分をまとめて行う関数群
* @brief 同じタイプの積分をまとめて計算するための関数クラス
* @ingroup integ
* */
/**
* @defgroup integ-core 1つの縮約積分を計算する関数クラス
* @brief 1つの縮約積分を計算するための関数クラス
* @ingroup integ
* */
/**
* @defgroup integ-misc 雑多な処理を行う関数クラス
* @brief 積分計算以外の処理を行う関数クラス
* @ingroup integ
* */
/**
* @defgroup integ-fmt 誤差関数計算を行う関数クラス
* @brief 分子積分で必要となる誤差関数の計算を行う関数クラス
* @ingroup integ
* */
#include <stdio.h>
#include <stdlib.h>
#include <math.h>
#include <string.h>
#include "ofmo-cutoff.h"
#include "ofmo-ifc2c.h"
#include "ofmo-oneint.h"
#include "ofmo-twoint.h"
#include "ofmo-twoint-buffer.h"
#include "ofmo-twoint-direct.h"
#include "ofmo-ifc4c.h"
#include "ofmo-ifc3c.h"
#include "fmt.h"
#include "fmt-m.h"
#include "ofmo-rys-xxxx.h"
#include "ofmo-os-xxxx.h"
#include "ofmo-def.h"
#include "ofmo-prof.h"
#include "ofmo-parallel.h"
#include "ofmo-tlog.h"
#ifdef OFMO_SKELETON
#include "rhf/skel-w2e.h"
#else
#define start_w2e()
#define set_w2e(Labcd)
#endif
double x_coef; //DFT; HF exchange coef.
//#define Free(a) if ( a != NULL ) free( a ); a = NULL
extern int ofmo_twoint_xxxx(
const int *pnworkers, const int *pworkerid,
const int *pLa, const int *pLb, const int *pLc, const int *pLd,
const int shel_atm[], const int shel_ini[],
const double atom_x[], const double atom_y[],
const double atom_z[], const int leading_cs_pair[],
const double csp_schwarz[],
const int csp_ics[], const int csp_jcs[],
const int csp_leading_ps_pair[],
const double psp_zeta[], const double psp_dkps[],
const double psp_xiza[],
// for partially direct SCF
const long *pebuf_max_nzeri, long *ebuf_non_zero_eri,
double ebuf_val[], short int ebuf_ind4[],
int *last_ijcs, int *last_klcs );
extern int ofmo_twoint_direct_xxxx(
const int *pnworkers, const int *pworkerid,
const int *pLa, const int *pLb, const int *pLc, const int *pLd,
const int shel_atm[], const int shel_ini[],
const double atom_x[], const double atom_y[],
const double atom_z[], const int leading_cs_pair[],
const double csp_schwarz[],
const int csp_ics[], const int csp_jcs[],
const int csp_leading_ps_pair[],
const double psp_zeta[], const double psp_dkps[],
const double psp_xiza[],
// for direct SCF
const long *petmp_max_nzeri, long *petmp_non_zero_eri,
double etmp_val[], short int etmp_ind4[],
const int *plast_ijcs, const int *plast_klcs,
// density matrix & G-matrix data
const int *pnao, const double Ds[], double G[] );
extern int ofmo_twoint_rys_xxxx(
const int *pnworkers, const int *pworkerid,
const int *pLa, const int *pLb, const int *pLc, const int *pLd,
const int shel_atm[], const int shel_ini[],
const double atom_x[], const double atom_y[],
const double atom_z[], const int leading_cs_pair[],
const double csp_schwarz[],
const int csp_ics[], const int csp_jcs[],
const int csp_leading_ps_pair[],
const double psp_zeta[], const double psp_dkps[],
const double psp_xiza[],
// for partially direct SCF
const long *pebuf_max_nzeri, long *ebuf_non_zero_eri,
double ebuf_val[], short int ebuf_ind4[],
int *last_ijcs, int *last_klcs );
extern int ofmo_twoint_direct_rys_xxxx(
const int *pnworkers, const int *pworkerid,
const int *pLa, const int *pLb, const int *pLc, const int *pLd,
const int shel_atm[], const int shel_ini[],
const double atom_x[], const double atom_y[],
const double atom_z[], const int leading_cs_pair[],
const double csp_schwarz[],
const int csp_ics[], const int csp_jcs[],
const int csp_leading_ps_pair[],
const double psp_zeta[], const double psp_dkps[],
const double psp_xiza[],
// for direct SCF
const long *petmp_max_nzeri, long *petmp_non_zero_eri,
double etmp_val[], short int etmp_ind4[],
const int *plast_ijcs, const int *plast_klcs,
// density matrix & G-matrix data
const int *pnao, const double Ds[], double G[] );
extern int ofmo_OS_integ_init( const int maxlqn );
extern int ofmo_oneint_gen_init( const int maxlqn );
extern int ofmo_Rys_integ_init( const int maxlqn );
/*// for Fortran code
extern void fmt4_initialize_();
extern void fmt4_gen_initialize_();
extern void fort_const_();*/
#ifdef USE_CUDA
#include "cuda/cudalib.h"
#include "cuda/cuda-integ.h"
#include "cuda/cuda-ifc4c.h"
#include "cuda/cuda-ifc4c-calc.h"
extern int cuda_fmt_initialize();
#endif
/* ====================================================
負荷分散などのための制御変数に関係する関数群
==================================================== */
// to control load-balancing
static int *target_type = NULL;
static size_t *loop_offset = NULL;
static int OFMO_MAX_THREADS = 1;
static void finalize_ctrl() {
Free( target_type );
Free( loop_offset );
OFMO_MAX_THREADS = 1;
}
static int init_ctrl() {
static int called = false;
int maxthreads, i;
if ( !called ) {
maxthreads = omp_get_max_threads();
target_type = (int*)malloc( sizeof(int) * maxthreads );
loop_offset = (size_t*)malloc( sizeof(size_t) * maxthreads );
OFMO_MAX_THREADS = maxthreads;
for ( i=0; i<maxthreads; i++ ) {
target_type[i] = -1;
loop_offset[i] = 0;
}
atexit( finalize_ctrl );
called = true;
}
return 0;
}
size_t ofmo_integ_get_loop_offset( const int mythread ) {
return loop_offset[mythread];
}
void ofmo_integ_set_loop_offset( const int mythread,
const size_t offset ) {
loop_offset[mythread] = offset;
}
void ofmo_integ_set_target_type( const int mythread,
const int ttype ) {
target_type[mythread] = ttype;
}
static int ofmo_integ_get_target_type( const int mythread ) {
return target_type[mythread];
}
int ofmo_integ_init( int maxlqn ) {
static int called = false;
if ( called == false ) {
init_ctrl(); // added for load-balancing
fmt_initialize( maxlqn );
fmt_m_init();
#ifdef USE_CUDA
int ret = cuda_fmt_initialize();
if (ret<0) exit(1);
#endif
ofmo_twoint_init();
ofmo_ifc3c_os_init();
ofmo_ifc3c_rys_init();
ofmo_ifc2c_init();
ofmo_oneint_init();
ofmo_cutoff_init();
//
ofmo_OS_integ_init( maxlqn );
ofmo_oneint_gen_init( maxlqn );
//
ofmo_Rys_integ_init( maxlqn );
called = true;
}
return 0;
}
extern int ofmo_cutoff_xx(
// input arguments
const int *pLa, const int *pLb, const int leading_cs[],
const int shel_tem[], const int shel_atm[], const int shel_add[],
const double atom_x[], const double atom_y[],
const double atom_z[],
const double prim_exp[], const double prim_coe[],
// output arguments
int leading_cs_pair[],
double csp_schwarz[], int csp_ics[], int csp_jcs[],
int csp_leading_ps_pair[],
double psp_zeta[], double psp_dkps[], double psp_xiza[] );
/* top level code for making cutoff table */
static int (*calc_schwarz[]) (
const int *pLa, const int *pLb, const int leading_cs[],
const int shel_tem[], const int shel_atm[], const int shel_add[],
const double atom_x[], const double atom_y[],
const double atom_z[],
const double prim_exp[], const double prim_coe[],
int leading_cs_pair[],
double csp_schwarz[], int csp_ics[], int csp_jcs[],
int csp_leading_ps_pair[],
double psp_zeta[], double psp_dkps[], double psp_xiza[]) = {
ofmo_cutoff_ss_, /*ofmo_cutoff_ps_, ofmo_cutoff_pp_,
ofmo_cutoff_ds_, ofmo_cutoff_dp_, ofmo_cutoff_dd_,*/
ofmo_cutoff_xx, ofmo_cutoff_xx,
ofmo_cutoff_xx, ofmo_cutoff_xx, ofmo_cutoff_xx
};
/** Schwarzの不等式を用いるためのカットオフテーブル作成関数
* @ingroup integ-top
*
* Schwarzの不等式を用いたカットオフテーブルを作成する。
* ソート基底関数データを与えると、Schwarzの不等式を用いるための
* カットオフテーブルが計算されて返ってくる。
*
* @attention
* @li 出力用の各種配列は、呼び出し時には確保されている必要がある。
* @li 出力用各種配列(\c csp_schwarz[] や \c csp_ics[] など)が複数
* スレッド間で共有されていなければ、スレッドセーフである。
*
* @param[in] maxlqn 最大軌道量子数
* @param[in] leading_cs[lqn] 軌道量子数 \c lqn の先頭CS番号
* @param[in] shel_tem[ics] CS番号 \c ics のCSの縮約長
* @param[in] shel_atm[ics] CS番号 \c ics のCSが属する原子の番号
* @param[in] shel_add[ics] CS番号 \c ics のCSに含まれるPSの先頭PS番号
* @param[in] atom_x[iat] 原子の番号 \c iat のx座標(au単位)
* @param[in] atom_y[iat] 原子の番号 \c iat のy座標(au単位)
* @param[in] atom_z[iat] 原子の番号 \c iat のz座標(au単位)
* @param[in] prim_exp[ips] PS番号 \c ips のPSの軌道指数
* @param[in] prim_coe[ips] PS番号 \c ips のPSの規格化定数込みの縮約係数
*
* @param[out] leading_cs_pair[itype] CSペアタイプ番号 \c itype の
* 先頭CSペア番号
* @param[out] csp_schwarz[icsp] CSペア番号 \c icsp のSchwarz積分
* @param[out] csp_ics[icsp] CSペア番号 \c icsp の1つ目のCS番号
* @param[out] csp_jcs[icsp] CSペア番号 \c icsp の2つめのCS番号。ただし、
* \f$ \tt{csp\_ics[icsp]} \ge \tt{csp\_jcs[icsp]} \f$ である。
* @param[out] csp_leading_ps_pair[icsp] CSペア番号 \c icsp に含まれる
* PSペアの先頭PSペア番号
* @param[out] psp_zeta[ipsp] PSペア番号 \c ipsp の軌道指数和
* \f$ \zeta = \zeta_a + \zeta_b \f$
* @param[out] psp_dkps[ipsp] PSペア番号 \c ipsp の線型結合定数
* \f[ K_{ab} = \sqrt2 \pi^{5/4} \frac1{\zeta_a+\zeta_b}
* \exp\left[ -\frac{\zeta_a \zeta_b}{\zeta_a + \zeta_b}
* ( \boldmath A \unboldmath - \boldmath B \unboldmath )^2
* \right]\f]
* @param[out] psp_xiza[ipsp] PSペア番号 \c ipsp の
* \f$ \frac{\xi}{\zeta_a} = \frac{\zeta_b}{\zeta_a+\zeta_b} \f$
*
* @retval 0 正常終了
* @retval -1 異常終了(サポートしていない積分タイプがあったなど)
*
* */
int ofmo_cutoff_make_table(
// input arguments
const int maxlqn, const int leading_cs[],
const int shel_tem[], const int shel_atm[], const int shel_add[],
const double atom_x[], const double atom_y[],
const double atom_z[],
const double prim_exp[], const double prim_coe[],
// output arguments
int leading_cs_pair[],
double csp_schwarz[], int csp_ics[], int csp_jcs[],
int csp_leading_ps_pair[],
double psp_zeta[], double psp_dkps[], double psp_xiza[] ) {
int La, Lb, Lab;
leading_cs_pair[0] = 0;
csp_leading_ps_pair[0] = 0;
for ( La=0; La<=maxlqn; La++ ) {
for ( Lb=0; Lb<=La; Lb++ ) {
Lab = La*(La+1)/2 + Lb;
if ( Lab == 0 ) {
ofmo_cutoff_ss_(
&La, &Lb, leading_cs,
shel_tem, shel_atm, shel_add,
atom_x, atom_y, atom_z,
prim_exp, prim_coe,
leading_cs_pair,
csp_schwarz, csp_ics, csp_jcs,
csp_leading_ps_pair,
psp_zeta, psp_dkps, psp_xiza );
} else {
ofmo_cutoff_xx(
&La, &Lb, leading_cs,
shel_tem, shel_atm, shel_add,
atom_x, atom_y, atom_z,
prim_exp, prim_coe,
leading_cs_pair,
csp_schwarz, csp_ics, csp_jcs,
csp_leading_ps_pair,
psp_zeta, psp_dkps, psp_xiza );
}
#ifdef SORT_CSP
ofmo_cutoff_sort_( La, Lb, leading_cs,
shel_tem, shel_atm, shel_add,
atom_x, atom_y, atom_z,
prim_exp, prim_coe,
leading_cs_pair,
csp_schwarz, csp_ics, csp_jcs,
csp_leading_ps_pair,
psp_zeta, psp_dkps, psp_xiza );
#endif
} // Lb
} // La
return 0;
}
static int isum( const int n, const int a[] ) {
int i, sum;
for ( i=0, sum=0; i<n; i++ ) sum += a[i];
return sum;
}
extern int ofmo_oneint_xx(
const int *pnworkers, const int *pworkerid,
const int *pLa, const int *pLb, const int leading_cs[],
const int shel_tem[], const int shel_atm[],
const int shel_add[], const int shel_ini[],
const double atom_x[], const double atom_y[],
const double atom_z[],
const double prim_exp[], const double prim_coe[],
const int *pnat, const int atomic_number[],
double S[], double H[] );
static int (*oneint_func[]) (
// parallelization
const int *pnworkers, const int *pworkerid,
// integral type data
const int *pLa, const int *pLb,
// basis set data for fragment
const int leading_cs[],
const int shel_tem[], const int shel_atm[],
const int shel_add[], const int shel_ini[],
const double atom_x[], const double atom_y[],
const double atom_z[],
const double prim_exp[], const double prim_coe[],
const int *pnat, const int atomic_number[],
double S[], double H[] ) = {
/*ofmo_oneint_ss_, ofmo_oneint_ps_, ofmo_oneint_pp_,
ofmo_oneint_ds_, ofmo_oneint_dp_, ofmo_oneint_dd_,*/
ofmo_oneint_ss__, ofmo_oneint_ps__, ofmo_oneint_pp__,
/*ofmo_oneint_xx, ofmo_oneint_xx,
ofmo_oneint_xx, ofmo_oneint_xx, ofmo_oneint_xx,*/
ofmo_oneint_ds__, ofmo_oneint_dp__, ofmo_oneint_dd__,
};
/** ソートされた一電子積分計算関数
* @ingroup integ-top
*
* ソート基底関数を与えると、一電子積分を計算して
* ソートされた重なり積分、一電子ハミルトン行列を返す。
*
* @attention
* @li 出力用の各種配列は、呼び出し時には確保されている必要がある
* @li スレッド並列時の関数呼び出しは、スレッド並列領域内から行う必要
* がある
* @li 1プロセスで実行する場合には、関数終了時点で(スレッド
* 並列化時には全スレッドが関数から返った時点で)完全な重なり行列や
* 一電子ハミルトン行列が得られる。
* @li 複数プロセスで実行する場合には、関数終了時点では部分の結果しか
* 得られていない。完全な結果を得るためには、\c MPI_Allreduce などの
* 関数を用いたリダクション処理が必要である。
*
* @param[in] nworkers 計算に用いるワーカプロセス(スレッド)数
* @param[in] workerid 各ワーカプロセス(スレッド)のID。
* \f$ 0\le\tt{workerid}<\tt{nworkers} \f$である。
* @param[in] maxlqn 最大軌道量子数
* @param[in] leading_cs[lqn] 軌道量子数 \c lqn の先頭CS番号
* @param[in] shel_tem[ics] CS番号 \c ics のCSの縮約長
* @param[in] shel_atm[ics] CS番号 \c ics のCSが属する原子の番号
* @param[in] shel_add[ics] CS番号 \c ics のCSに含まれるPSの先頭PS番号
* @param[in] shel_ini[ics] CS番号 \c ics のCSに含まれるAOの先頭AO番号
* @param[in] atom_x[iat] 原子の番号 \c iat のx座標(au単位)
* @param[in] atom_y[iat] 原子の番号 \c iat のy座標(au単位)
* @param[in] atom_z[iat] 原子の番号 \c iat のz座標(au単位)
* @param[in] prim_exp[ips] PS番号 \c ips のPSの軌道指数
* @param[in] prim_coe[ips] PS番号 \c ips のPSの規格化定数込みの縮約係数
*
* @param[in] nat 原子数
* @param[in] atomic_number[iat] 原子の番号 \c iat の原子番号
*
* @param[out] S[] 重なり行列(圧縮"U"形式)。
* @param[out] H[] 一電子ハミルトン行列(圧縮"U"形式)。
*
* @retval 0 正常終了
* @retval -1 異常終了(いま(2011/06/13)のところ考えていない)
* */
int ofmo_integ_oneint_sorted(
const int nworkers, const int workerid,
const int maxlqn, const int leading_cs[],
const int shel_tem[], const int shel_atm[],
const int shel_add[], const int shel_ini[],
const double atom_x[], const double atom_y[],
const double atom_z[],
const double prim_exp[], const double prim_coe[],
const int nat, const int atomic_number[],
double S[], double H[]) {
int La, Lb, Lab, sum;
//double dsum;
sum = isum( nat, atomic_number );
ofmo_oneint_set_sum_atomic_numbers( sum );
for ( La=0; La<=maxlqn; La++ ) {
for ( Lb=0; Lb<=La; Lb++ ) {
Lab = La*(La+1)/2+Lb;
if ( Lab == 0 ) {
ofmo_oneint_ss__(
&nworkers, &workerid,
&La, &Lb, leading_cs,
shel_tem, shel_atm, shel_add, shel_ini,
atom_x, atom_y, atom_z, prim_exp, prim_coe,
&nat, atomic_number, S, H );
} else {
ofmo_oneint_xx(
&nworkers, &workerid,
&La, &Lb, leading_cs,
shel_tem, shel_atm, shel_add, shel_ini,
atom_x, atom_y, atom_z, prim_exp, prim_coe,
&nat, atomic_number, S, H );
}
}
}
return 0;
}
static int (*calc_twoint_buffer[])(
const int *pnworkers, const int *pworkerid,
const int *pLa, const int *pLb, const int *pLc, const int *pLd,
const int shel_atm[], const int shel_ini[],
const double atom_x[], const double atom_y[],
const double atom_z[], const int leading_cs_pair[],
const double csp_schwarz[],
const int csp_ics[], const int csp_jcs[],
const int csp_leading_ps_pair[],
const double psp_zeta[], const double psp_dkps[],
const double psp_xiza[],
// for partially direct SCF
const long *pebuf_max_nzeri, long *ebuf_non_zero_eri,
double ebuf_val[], short int ebuf_ind4[],
int *last_ijcs, int *last_klcs ) = {
ofmo_twoint_buffer_ssss__, // (SS,SS)はすべて同じコードを使用
/*// original (OS, 個別、d関数は元の並び)
ofmo_twoint_buffer_psss__, ofmo_twoint_buffer_psps__,
ofmo_twoint_buffer_ppss__, ofmo_twoint_buffer_ppps__,
ofmo_twoint_buffer_pppp__, ofmo_twoint_buffer_dsss__,
ofmo_twoint_buffer_dsps__, ofmo_twoint_buffer_dspp__,
ofmo_twoint_buffer_dsds__, ofmo_twoint_buffer_dpss__,
ofmo_twoint_buffer_dpps__, ofmo_twoint_buffer_dppp__,
ofmo_twoint_buffer_dpds__, ofmo_twoint_buffer_dpdp__,
ofmo_twoint_buffer_ddss__, ofmo_twoint_buffer_ddps__,
ofmo_twoint_buffer_ddpp__, ofmo_twoint_buffer_ddds__,
ofmo_twoint_buffer_dddp__, ofmo_twoint_buffer_dddd__,*/
/*// Obara-Saika(一般式、C言語)
ofmo_twoint_xxxx, ofmo_twoint_xxxx,
ofmo_twoint_xxxx, ofmo_twoint_xxxx, ofmo_twoint_xxxx,
ofmo_twoint_xxxx, ofmo_twoint_xxxx, ofmo_twoint_xxxx,
ofmo_twoint_xxxx, ofmo_twoint_xxxx, ofmo_twoint_xxxx,
ofmo_twoint_xxxx, ofmo_twoint_xxxx, ofmo_twoint_xxxx,
ofmo_twoint_xxxx, ofmo_twoint_xxxx, ofmo_twoint_xxxx,
ofmo_twoint_xxxx, ofmo_twoint_xxxx, ofmo_twoint_xxxx,*/
// Obara-Saika(個別、C言語)
ofmo_twoint_os_psss, ofmo_twoint_os_psps,
ofmo_twoint_os_ppss, ofmo_twoint_os_ppps, ofmo_twoint_os_pppp,
ofmo_twoint_os_dsss, ofmo_twoint_os_dsps, ofmo_twoint_os_dspp,
ofmo_twoint_os_dsds, ofmo_twoint_os_dpss, ofmo_twoint_os_dpps,
ofmo_twoint_os_dppp, ofmo_twoint_os_dpds, ofmo_twoint_os_dpdp,
ofmo_twoint_os_ddss, ofmo_twoint_os_ddps, ofmo_twoint_os_ddpp,
ofmo_twoint_os_ddds, ofmo_twoint_os_dddp, ofmo_twoint_os_dddd,
/*// Rys求積法(一般式、C言語)
ofmo_twoint_rys_xxxx, ofmo_twoint_rys_xxxx,
ofmo_twoint_rys_xxxx, ofmo_twoint_rys_xxxx, ofmo_twoint_rys_xxxx,
ofmo_twoint_rys_xxxx, ofmo_twoint_rys_xxxx, ofmo_twoint_rys_xxxx,
ofmo_twoint_rys_xxxx, ofmo_twoint_rys_xxxx, ofmo_twoint_rys_xxxx,
ofmo_twoint_rys_xxxx, ofmo_twoint_rys_xxxx, ofmo_twoint_rys_xxxx,
ofmo_twoint_rys_xxxx, ofmo_twoint_rys_xxxx, ofmo_twoint_rys_xxxx,
ofmo_twoint_rys_xxxx, ofmo_twoint_rys_xxxx, ofmo_twoint_rys_xxxx,*/
/* // Rsy求積法(個別、C言語)
ofmo_twoint_rys_psss, ofmo_twoint_rys_psps,
ofmo_twoint_rys_ppss, ofmo_twoint_rys_ppps,
ofmo_twoint_rys_pppp, ofmo_twoint_rys_dsss,
ofmo_twoint_rys_dsps, ofmo_twoint_rys_dspp,
ofmo_twoint_rys_dsds, ofmo_twoint_rys_dpss,
ofmo_twoint_rys_dpps, ofmo_twoint_rys_dppp,
ofmo_twoint_rys_dpds, ofmo_twoint_rys_dpdp,
ofmo_twoint_rys_ddss, ofmo_twoint_rys_ddps,
ofmo_twoint_rys_ddpp, ofmo_twoint_rys_ddds,
ofmo_twoint_rys_dddp, ofmo_twoint_rys_dddd,*/
};
static int (*calc_twoint_direct[])(
// paralleization
const int *pnworkers, const int *pworkerid,
// integral type data
const int *pLa, const int *pLb, const int *pLc, const int *pLd,
// basis set & cutoff table data
const int shel_atm[], const int shel_ini[],
const double atom_x[], const double atom_y[],
const double atom_z[], const int leading_cs_pair[],
const double csp_schwarz[],
const int csp_ics[], const int csp_jcs[],
const int csp_leading_ps_pair[],
const double psp_zeta[], const double psp_dkps[],
const double psp_xiza[],
// concerned about buffered direct method
const long *petmp_max_nzeri, long *petmp_non_zero_eri,
double etmp_val[], short int etmp_ind4[],
const int *plast_ijcs, const int *plast_klcs,
// density matrix & G-matrix data
const int *pnao, const double Ds[], double G[] ) = {
ofmo_twoint_direct_ssss__,
/*// Obara-Saika式(一般式、C言語)
ofmo_twoint_direct_xxxx, ofmo_twoint_direct_xxxx,
ofmo_twoint_direct_xxxx, ofmo_twoint_direct_xxxx,
ofmo_twoint_direct_xxxx, ofmo_twoint_direct_xxxx,
ofmo_twoint_direct_xxxx, ofmo_twoint_direct_xxxx,
ofmo_twoint_direct_xxxx, ofmo_twoint_direct_xxxx,
ofmo_twoint_direct_xxxx, ofmo_twoint_direct_xxxx,
ofmo_twoint_direct_xxxx, ofmo_twoint_direct_xxxx,
ofmo_twoint_direct_xxxx, ofmo_twoint_direct_xxxx,
ofmo_twoint_direct_xxxx, ofmo_twoint_direct_xxxx,
ofmo_twoint_direct_xxxx, ofmo_twoint_direct_xxxx,*/
// Obara-Saika式(個別、C言語)
ofmo_twoint_direct_os_psss, ofmo_twoint_direct_os_psps,
ofmo_twoint_direct_os_ppss, ofmo_twoint_direct_os_ppps,
ofmo_twoint_direct_os_pppp, ofmo_twoint_direct_os_dsss,
ofmo_twoint_direct_os_dsps, ofmo_twoint_direct_os_dspp,
ofmo_twoint_direct_os_dsds, ofmo_twoint_direct_os_dpss,
ofmo_twoint_direct_os_dpps, ofmo_twoint_direct_os_dppp,
ofmo_twoint_direct_os_dpds, ofmo_twoint_direct_os_dpdp,
ofmo_twoint_direct_os_ddss, ofmo_twoint_direct_os_ddps,
ofmo_twoint_direct_os_ddpp, ofmo_twoint_direct_os_ddds,
ofmo_twoint_direct_os_dddp, ofmo_twoint_direct_os_dddd,
/*// Rys求積法(一般式、C言語)
ofmo_twoint_direct_rys_xxxx, ofmo_twoint_direct_rys_xxxx,
ofmo_twoint_direct_rys_xxxx, ofmo_twoint_direct_rys_xxxx,
ofmo_twoint_direct_rys_xxxx, ofmo_twoint_direct_rys_xxxx,
ofmo_twoint_direct_rys_xxxx, ofmo_twoint_direct_rys_xxxx,
ofmo_twoint_direct_rys_xxxx, ofmo_twoint_direct_rys_xxxx,
ofmo_twoint_direct_rys_xxxx, ofmo_twoint_direct_rys_xxxx,
ofmo_twoint_direct_rys_xxxx, ofmo_twoint_direct_rys_xxxx,
ofmo_twoint_direct_rys_xxxx, ofmo_twoint_direct_rys_xxxx,
ofmo_twoint_direct_rys_xxxx, ofmo_twoint_direct_rys_xxxx,
ofmo_twoint_direct_rys_xxxx, ofmo_twoint_direct_rys_xxxx,*/
/*// Rys求積法(個別、C言語)
ofmo_twoint_direct_rys_psss, ofmo_twoint_direct_rys_psps,
ofmo_twoint_direct_rys_ppss, ofmo_twoint_direct_rys_ppps,
ofmo_twoint_direct_rys_pppp, ofmo_twoint_direct_rys_dsss,
ofmo_twoint_direct_rys_dsps, ofmo_twoint_direct_rys_dspp,
ofmo_twoint_direct_rys_dsds, ofmo_twoint_direct_rys_dpss,
ofmo_twoint_direct_rys_dpps, ofmo_twoint_direct_rys_dppp,
ofmo_twoint_direct_rys_dpds, ofmo_twoint_direct_rys_dpdp,
ofmo_twoint_direct_rys_ddss, ofmo_twoint_direct_rys_ddps,
ofmo_twoint_direct_rys_ddpp, ofmo_twoint_direct_rys_ddds,
ofmo_twoint_direct_rys_dddp, ofmo_twoint_direct_rys_dddd,*/
};
/** @example oneint-serial.c
* 1電子積分関数のシリアル実行時のコード例。
* */
/** @example oneint-mt.c
* 1電子積分関数のスレッド並列実行時のコード例。
*
* 関数は、スレッド並列領域内から呼ばれている。
* 関数が終了して全スレッドで同期がとれた時点で、完全な行列が得られる。
* */
/** @example oneint-mpi.c
* 1電子積分関数のフラットMPI並列時のコード例。
*
* 関数呼び出し後にリダクション処理を行うことで、完全な行列が得られる。
* */
/** @example oneint-hybrid.c
* 1電子積分関数のOpenMPとMPIによるハイブリッド並列化の例。
*
* フラットMPIの場合と同様に、関数呼び出し後にリダクション処理を行う
* ことで、完全な行列が得られる。
*
* */
/** 電子反発積分(二電子積分)計算関数(1)
* @ingroup integ-top
*
* 指定されたサイズのバッファに入るだけの二電子積分を計算、保存する。
* バッファが一杯になったら、それ以降の積分計算は行わない。また、
* 二電子ハミルトン行列(G行列)の計算は行わない。
*
* @attention
* @li G行列生成関数 \c ofmo_integ_gen_gmat を呼び出す前に、一回、
* 呼び出す必要がある。この関数を呼ばずに \c ofmo_integ_gen_gmat 関数
* を呼び出すと、結果がおかしくなる場合がある。
* @li FMO計算のように一回の実行で複数回のSCF計算を行う場合には、
* SCF計算を行う度に、一度、呼び出す必要がある。
* @li OpenMPによるスレッド並列化が行われている。スレッド並列実行を
* 行うためには、この関数をスレッド並列領域内で呼び出す必要がある。
* @li 適切に\c nworkers と \c workerid を設定すれば、MPIと組み合わせた
* ハイブリッド並列化にも対応している。
* @li 二電子積分を保存するためのバッファは、関数内部で確保される。また、
* 確保したバッファは、プログラム終了時に開放される。
*
* @param[in] nworkers 計算に用いるワーカプロセス(スレッド)数
* @param[in] workerid 各ワーカプロセス(スレッド)のID。
* \f$ 0\le\tt{workerid}<\tt{nworkers} \f$である。
* @param[in] ebuf_buffer_size_mb 二電子積分の値を保存するための
* バッファサイズ(MB単位)。このバッファには、積分の値だけでなく
* 4つの添字も保存される。
* @param[in] maxlqn 最大軌道量子数
* @param[in] shel_atm[ics] CS番号 \c ics のCSが属する原子の番号
* @param[in] shel_ini[ics] CS番号 \c ics のCSに含まれるAOの先頭AO番号
* @param[in] atom_x[iat] 原子の番号 \c iat のx座標(au単位)
* @param[in] atom_y[iat] 原子の番号 \c iat のy座標(au単位)
* @param[in] atom_z[iat] 原子の番号 \c iat のz座標(au単位)
* @param[in] leading_cs_pair[itype] CSペアタイプ番号 \c itype の
* 先頭CSペア番号
* @param[in] csp_schwarz[icsp] CSペア番号 \c icsp のSchwarz積分
* @param[in] csp_ics[icsp] CSペア番号 \c icsp の1つ目のCS番号
* @param[in] csp_jcs[icsp] CSペア番号 \c icsp の2つめのCS番号。ただし、
* \f$ \tt{csp\_ics[icsp]} \ge \tt{csp\_jcs[icsp]} \f$ である。
* @param[in] csp_leading_ps_pair[icsp] CSペア番号 \c icsp に含まれる
* PSペアの先頭PSペア番号
* @param[in] psp_zeta[ipsp] PSペア番号 \c ipsp の軌道指数和
* \f$ \zeta = \zeta_a + \zeta_b \f$
* @param[in] psp_dkps[ipsp] PSペア番号 \c ipsp の線型結合定数
* \f[ K_{ab} = \sqrt2 \pi^{5/4} \frac1{\zeta_a+\zeta_b}
* \exp\left[ -\frac{\zeta_a \zeta_b}{\zeta_a + \zeta_b}
* ( \boldmath A \unboldmath - \boldmath B \unboldmath )^2
* \right]\f]
* @param[in] psp_xiza[ipsp] PSペア番号 \c ipsp の
* \f$ \frac{\xi}{\zeta_a} = \frac{\zeta_b}{\zeta_a+\zeta_b} \f$
*
*
* @retval 0 すべての積分がバッファに保存されて終了(in core SCF)
* @retval 1 バッファが一杯になって終了(partially direct SCF)
*
* */
int ofmo_integ_twoint_first(
// parallelization
const int nworkers, const int workerid,
// buffer size
const size_t ebuf_buffer_size_mb,
// basis set & cutoff table data
const int maxlqn, const int shel_atm[], const int shel_ini[],
const double atom_x[], const double atom_y[],
const double atom_z[],
const int leading_cs_pair[], const double csp_schwarz[],
const int csp_ics[], const int csp_jcs[],
const int csp_leading_ps_pair[],
const double psp_zeta[], const double psp_dkps[],
const double psp_xiza[] ) {
int La, Lb, Lc, Ld, Lab, Lcd, Labcd;
int mythread = 0, ret_code;
int last_ijcs, last_klcs;
long ebuf_non_zero_eri, ebuf_max_nzeri;
short int *ebuf_ind4;
double *ebuf_eri;
// debug
char *CS = "spdfg";
// added for load-balancing
int local_id=workerid;
size_t offset;
mythread = omp_get_thread_num();
ebuf_max_nzeri = (long)
ofmo_twoint_set_buffer_size( mythread, ebuf_buffer_size_mb );
ebuf_ind4 = ofmo_twoint_get_ebuf_ind4( mythread );
ebuf_eri = ofmo_twoint_get_ebuf_eri( mythread );
last_ijcs = last_klcs = -1;
ebuf_non_zero_eri = 0;
// added for load-balancing
//offset = ofmo_integ_get_loop_offset( mythread );
offset = 0;
// initialize
ofmo_twoint_set_last_eri_type( mythread, -1 );
if (ebuf_max_nzeri<=0) return -1;
for ( La=0; La<=maxlqn; La++ ) {
for ( Lb=0; Lb<=La; Lb++ ) {
Lab = La*(La+1)/2 + Lb;
for ( Lc=0; Lc<=La; Lc++ ) {
for ( Ld=0; Ld<=(Lc==La? Lb : Lc ); Ld++ ) {
Lcd = Lc*(Lc+1)/2 + Ld;
Labcd = Lab*(Lab+1)/2 + Lcd;
#ifdef USE_CUDA
if (cuda_use_Device() && cuda_get_optCPU(Labcd)!=0) {
offset+=(leading_cs_pair[Lab+1]-leading_cs_pair[Lab]);
continue;
}
#endif
local_id =
(int)((offset+(size_t)workerid)%(size_t)nworkers);
ret_code = calc_twoint_buffer[Labcd](
&nworkers, &local_id,
//&nworkers, &workerid,
&La, &Lb, &Lc, &Ld,
shel_atm, shel_ini, atom_x, atom_y, atom_z,
leading_cs_pair, csp_schwarz, csp_ics, csp_jcs,
csp_leading_ps_pair,
psp_zeta, psp_dkps, psp_xiza,
&ebuf_max_nzeri, &ebuf_non_zero_eri,
ebuf_eri, ebuf_ind4,
&last_ijcs, &last_klcs );
if ( ret_code == OFMO_EBUF_FULL ) {
ofmo_twoint_set_last_eri_type( mythread, Labcd );
ofmo_twoint_set_last_ijcs( mythread, last_ijcs );
ofmo_twoint_set_last_klcs( mythread, last_klcs );
ofmo_twoint_set_stored_nzeri( mythread,
(size_t)ebuf_non_zero_eri );
//ofmo_integ_set_loop_offset( mythread, offset );
//return 0;
return Labcd;
}
// added for load-balancing
offset+=(leading_cs_pair[Lab+1]-leading_cs_pair[Lab]);
}// Ld
} // Lc
} // Lb
} // La
ofmo_twoint_set_last_eri_type( mythread, 100000 );
ofmo_twoint_set_last_ijcs( mythread, last_ijcs );
ofmo_twoint_set_last_klcs( mythread, last_klcs );
ofmo_twoint_set_stored_nzeri( mythread, (size_t)ebuf_non_zero_eri );
// added for load-balancing
//ofmo_integ_set_loop_offset( mythread, offset );
//return 0;
return 100000;
}
int ofmo_integ_add_fock( const int nao, const size_t nstored_eri,
const double eri_val[], const short int eri_ind4[],
const double D[], double G[] ) {
int i, j, k, l, ij, kl, ik, il, jk, jl, i0, j0;
size_t ix, ix4;
double x, x4;
/*// debug
{
int mythread;
mythread = omp_get_thread_num();
printf("thrd= %d, nzeri= %lld\n", mythread,
(long long)nstored_eri );
fflush( stdout );
}*/
for ( ix=0, ix4=0; ix<nstored_eri; ix++, ix4+=4 ) {
x = eri_val[ix];
i = (int)eri_ind4[ix4+0];
j = (int)eri_ind4[ix4+1];
k = (int)eri_ind4[ix4+2];
l = (int)eri_ind4[ix4+3];
x4 = x * 4.e0;
x = x * x_coef; //DFT
i0 = i*nao;
j0 = j*nao;
ij = i0 + j;
ik = i0 + k;
il = i0 + l;
jk = j0 + k;
jl = j0 + l;
kl = k*nao + l;
G[ij] += D[kl]*x4;
G[kl] += D[ij]*x4;
G[ik] -= D[jl]*x;
G[il] -= D[jk]*x;
G[jk] -= D[il]*x;
G[jl] -= D[ik]*x;
}
return 0;
}
/*
* バッファに保存されている2電子積分を用いてFock行列を計算する
* ただし、正方行列を圧縮形式に折りたたまないと、正確にならない
* 引数の密度行列DとFock行列Gは正方行列として扱う
* スレッド並列部分で呼び出す
* Gは初期化されていることを期待している
* 以下の変数はスレッド毎に異なるはず
* non_zero_eri
* ebuf_eri[]
* ebuf_ind4[]
* G[]
* */
static int ofmo_twoint_fock_incore_partial(
const int mythread, const int nao, const double D[],
double G[] ) {
short int *ebuf_ind4;
double *ebuf_eri;
size_t non_zero_eri;
ebuf_eri = ofmo_twoint_get_ebuf_eri( mythread );
ebuf_ind4 = ofmo_twoint_get_ebuf_ind4( mythread );
non_zero_eri = ofmo_twoint_get_stored_nzeri( mythread );
ofmo_integ_add_fock( nao, non_zero_eri, ebuf_eri, ebuf_ind4, D, G );
return 0;
}
static void unpack_matrix( const int n, const double SRC[], double DST[] )
{
int i, j, ij;
ij = 0;
for ( i=0; i<n; i++ ) {
for ( j=0; j<=i; j++ ) {
DST[i*n+j] = DST[j*n+i] = SRC[ij];
ij++;
}
}
}
/** 電子反発積分(二電子積分)計算関数(2)
* @ingroup integ-top
*
* \c ofmo_integ_twoint 関数呼び出しで計算し保存された二電子積分を
* 用いて、二電子ハミルトン行列(G行列)を計算する。バッファに保存
* されていない積分は、計算して、G行列に加算する。
*
* @attention
* @li この関数は、OpenMPを用いたスレッド並列化を行っている。
* スレッド並列実行をするためには、スレッド並列領域内から
* この関数を呼び出す必要がある。
* @li \c nworkers とワーカID \c workerid を適切に設定すれば、OpenMPと
* MPIとのハイブリッド並列実行が可能である。MPI並列時に、完全な
* G行列を得るためには、この関数の終了後に
* \c MPI_Allreduce 関数などを用いたリダクション処理を行う必要がある。
* @li \c ofmo_integ_twoint_first が事前に呼び出されている必要がある。
* そうでない場合には、結果がおかしくなる場合がある。
* @li 得られるG行列は、軌道量子数の大きさで
* ソートされたものである。元の並びのG行列が欲しい場合には、
* 要素の並べ替えが必要である。
*
* @param[in] nworkers 計算に用いるワーカプロセス(スレッド)数
* @param[in] workerid 各ワーカプロセス(スレッド)のID。
* \f$ 0\le\tt{workerid}<\tt{nworkers} \f$である。
* @param[in] maxlqn 最大軌道量子数
* @param[in] shel_atm[ics] CS番号 \c ics のCSが属する原子の番号
* @param[in] shel_ini[ics] CS番号 \c ics のCSに含まれるAOの先頭AO番号
* @param[in] atom_x[iat] 原子の番号 \c iat のx座標(au単位)
* @param[in] atom_y[iat] 原子の番号 \c iat のy座標(au単位)
* @param[in] atom_z[iat] 原子の番号 \c iat のz座標(au単位)
* @param[in] leading_cs_pair[itype] CSペアタイプ番号 \c itype の
* 先頭CSペア番号
* @param[in] csp_schwarz[icsp] CSペア番号 \c icsp のSchwarz積分
* @param[in] csp_ics[icsp] CSペア番号 \c icsp の1つ目のCS番号
* @param[in] csp_jcs[icsp] CSペア番号 \c icsp の2つめのCS番号。ただし、
* \f$ \tt{csp\_ics[icsp]} \ge \tt{csp\_jcs[icsp]} \f$ である。
* @param[in] csp_leading_ps_pair[icsp] CSペア番号 \c icsp に含まれる
* PSペアの先頭PSペア番号
* @param[in] psp_zeta[ipsp] PSペア番号 \c ipsp の軌道指数和
* \f$ \zeta = \zeta_a + \zeta_b \f$
* @param[in] psp_dkps[ipsp] PSペア番号 \c ipsp の線型結合定数
* \f[ K_{ab} = \sqrt2 \pi^{5/4} \frac1{\zeta_a+\zeta_b}
* \exp\left[ -\frac{\zeta_a \zeta_b}{\zeta_a + \zeta_b}
* ( \boldmath A \unboldmath - \boldmath B \unboldmath )^2
* \right]\f]
* @param[in] psp_xiza[ipsp] PSペア番号 \c ipsp の
* \f$ \frac{\xi}{\zeta_a} = \frac{\zeta_b}{\zeta_a+\zeta_b} \f$
* @param[in] nao AO数
* @param[in] D[] 密度行列(圧縮"U"形式)
*
* @param[out] G[] 二電子ハミルトン行列(G行列、圧縮"U"形式)
*
* @retval 0 正常終了(すべての積分が保存されても、バッファサイズの不足で
* 保存されていない積分があっても、正常終了である)
* @retval -1 異常終了(2011/0613現在では考えていない)
*
* */
int ofmo_integ_gen_gmat(
// parallelization
const int nworkers, const int workerid,
// basis set & cutoff table data
const int maxlqn, const int shel_atm[], const int shel_ini[],
const double atom_x[], const double atom_y[],
const double atom_z[], const int leading_cs_pair[],
const int leading_cs[],
const double csp_schwarz[],
const int csp_ics[], const int csp_jcs[],
const int csp_leading_ps_pair[],
const double psp_zeta[], const double psp_dkps[],
const double psp_xiza[],
// density matrix data & G-matrix (output)
const int nao, const double D[], double G[] ) {
// DFT flag
//const double cHFx ) {
int nao2;
double *D_SQ=NULL;
int mythread = 0;
int La, Lb, Lc, Ld, Lab, Lcd, Labcd;
int last_eri_type, last_ijcs, last_klcs;
int nnao;
double *Gtmp;
long nzeri, max_nzeri;
short *etmp_ind4;
double *etmp_val;
// added
int local_id;
size_t offset;
//DFT_Bgn
//x_coef = cHFx;
x_coef = 1.0;
//DFT_End
// debug
char *CS = "spdfg";
int g_last_eri_type = ofmo_twoint_get_global_last_eri_type();
#pragma omp critical
D_SQ = ofmo_twoint_alloc_square_density( nao );
#pragma omp single
{
unpack_matrix( nao, D, D_SQ );
nao2 = nao*(nao+1)/2;
memset( G, '\0', sizeof(double)*nao2 );
}
float *Dcs;
Dcs = ofmo_twoint_gen_Dcs(maxlqn, nao, leading_cs, D);
#ifdef USE_CUDA
#pragma omp master
{
int ret = 0;
int ncs = leading_cs[maxlqn+1];
ret = cuda_genGmat_Init(ncs, nao, Dcs, D_SQ, x_coef);
if (ret<0) exit(1);
}
#endif
mythread = omp_get_thread_num();
#pragma omp barrier
Gtmp = ofmo_twoint_alloc_local_gmat( mythread, nao );
last_eri_type = ofmo_twoint_get_last_eri_type( mythread );
nnao = nao*nao;
memset( Gtmp, '\0', sizeof(double)*nnao );
//
etmp_ind4 = ofmo_twoint_getadd_integ_ind4( mythread );
etmp_val = ofmo_twoint_getadd_integ_val( mythread );
max_nzeri = (long)ofmo_twoint_get_max_stored_integ( mythread );
ofmo_twoint_set_stored_integ( mythread, 0 );
// added for load-balancing
//offset = ofmo_integ_get_loop_offset( mythread );
offset = 0;
#ifndef USE_CUDA
nzeri = 0;
for ( La=0; La<=maxlqn; La++ ) {
for ( Lb=0; Lb<=La; Lb++ ) {
Lab = La*(La+1)/2 + Lb;
for ( Lc=0; Lc<=La; Lc++ ) {
for ( Ld=0; Ld<=(Lc==La? Lb : Lc ); Ld++ ) {
Lcd = Lc*(Lc+1)/2 + Ld;
Labcd = Lab*(Lab+1)/2 + Lcd;
//if ( Labcd < last_eri_type ) continue;
if ( Labcd < last_eri_type ) {
offset+=(leading_cs_pair[Lab+1]-leading_cs_pair[Lab]);
continue;
}
if ( Labcd == last_eri_type ) {
last_ijcs =
ofmo_twoint_get_last_ijcs( mythread );
last_klcs =
ofmo_twoint_get_last_klcs( mythread );
/*// debug
printf("#D thd=%d, (%c%c|%c%c) ijcs=%d, klcs=%d\n",
mythread, CS[La], CS[Lb], CS[Lc], CS[Ld],
last_ijcs, last_klcs );
fflush(stdout);*/
} else {
last_ijcs = last_klcs = -1;
}
local_id =
(int)((offset+(size_t)workerid)%(size_t)nworkers);
start_w2e();
calc_twoint_direct[Labcd](
&nworkers, &local_id,
//&nworkers, &workerid,
&La, &Lb, &Lc, &Ld,
shel_atm, shel_ini,
atom_x, atom_y, atom_z,
leading_cs_pair,
csp_schwarz, csp_ics, csp_jcs,
csp_leading_ps_pair,
psp_zeta, psp_dkps, psp_xiza,
&max_nzeri, &nzeri,
etmp_val, etmp_ind4,
&last_ijcs, &last_klcs,
&nao, D_SQ, Gtmp );
set_w2e(Labcd);
// added for load-balancing
offset+=(leading_cs_pair[Lab+1]-leading_cs_pair[Lab]);
}// Ld
} // Lc
} // Lb
} // La
if ( nzeri > 0 )
ofmo_integ_add_fock( nao, nzeri, etmp_val, etmp_ind4, D_SQ, Gtmp );
start_w2e();
ofmo_twoint_fock_incore_partial( mythread, nao, D_SQ, Gtmp );
set_w2e(-1);
#else /* USE_CUDA */
nzeri = 0;
// idev=1 for GPU, 0 for CPU
for (int idev=1; idev>=0; idev--) {
offset = 0;
for ( La=0; La<=maxlqn; La++ ) {
for ( Lb=0; Lb<=La; Lb++ ) {
Lab = La*(La+1)/2 + Lb;
for ( Lc=0; Lc<=La; Lc++ ) {
for ( Ld=0; Ld<=(Lc==La? Lb : Lc ); Ld++ ) {
Lcd = Lc*(Lc+1)/2 + Ld;
Labcd = Lab*(Lab+1)/2 + Lcd;
int gpu = (cuda_use_Device() && cuda_get_optCPU(Labcd)!=0);
//if ( Labcd < last_eri_type ) continue;
if (!gpu && Labcd < last_eri_type ) {
offset+=(leading_cs_pair[Lab+1]-leading_cs_pair[Lab]);
continue;
}
if ( Labcd == last_eri_type ) {
last_ijcs =
ofmo_twoint_get_last_ijcs( mythread );
last_klcs =
ofmo_twoint_get_last_klcs( mythread );
} else {
last_ijcs = last_klcs = -1;
}
local_id =
(int)((offset+(size_t)workerid)%(size_t)nworkers);
start_w2e();
//if (idev==0 && Labcd<=g_last_eri_type) {
if (idev==0 && !gpu) {
calc_twoint_direct[Labcd](
&nworkers, &local_id,
//&nworkers, &workerid,
&La, &Lb, &Lc, &Ld,
shel_atm, shel_ini,
atom_x, atom_y, atom_z,
leading_cs_pair,
csp_schwarz, csp_ics, csp_jcs,
csp_leading_ps_pair,
psp_zeta, psp_dkps, psp_xiza,
&max_nzeri, &nzeri,
etmp_val, etmp_ind4,
&last_ijcs, &last_klcs,
&nao, D_SQ, Gtmp );
//} else if (idev>0 && Labcd>g_last_eri_type) {
} else if (idev>0 && gpu) {
cuda_calc_twoint_direct(Labcd,
nworkers, local_id,
//nworkers, workerid,
La, Lb, Lc, Ld,
shel_atm, shel_ini,
atom_x, atom_y, atom_z,
leading_cs_pair,
csp_schwarz, csp_ics, csp_jcs,
csp_leading_ps_pair,
psp_zeta, psp_dkps, psp_xiza,
max_nzeri, &nzeri,
etmp_val, etmp_ind4,
last_ijcs, last_klcs,
nao, D_SQ, Gtmp );
}
set_w2e(Labcd);
// added for load-balancing
offset+=(leading_cs_pair[Lab+1]-leading_cs_pair[Lab]);
}// Ld
} // Lc
} // Lb
} // La
} // idev
if ( nzeri > 0 )
ofmo_integ_add_fock( nao, nzeri, etmp_val, etmp_ind4, D_SQ, Gtmp );
start_w2e();
ofmo_twoint_fock_incore_partial( mythread, nao, D_SQ, Gtmp );
set_w2e(-1);
#pragma omp master
{
int ret;
ret = cuda_genGmat_Add(nao, Gtmp);
if (ret<0) exit(1);
}
#endif /* USE_CUDA */
// 計算が終わったスレッドの結果を順次Gに加算する
#pragma omp critical
{
int i, j, ij;
ij = 0;
for ( i=0; i<nao; i++ ) {
for ( j=0; j<i; j++ ) {
G[ij] += Gtmp[i*nao+j] + Gtmp[j*nao+i];
ij++;
}
G[ij] += Gtmp[i*nao+i];
ij++;
}
}
return 0;
}
//static int (*calc_twoint_direct[])(
static int (*calc_ifc4c[])(
// parallelization
const int *pnworkers, const int *pworkerid,
// integral type data
const int *pLa, const int *pLb, const int *pLc, const int *pLd,
// basis and cutoff table data for fragment
const int shel_atm_frg[], const int shel_ini_frg[],
const double atom_x_frg[], const double atom_y_frg[],
const double atom_z_frg[], const int leading_cs_pair_frg[],
const double csp_schwarz_frg[],
const int csp_ics_frg[], const int csp_jcs_frg[],
const int csp_leading_ps_pair_frg[],
const double psp_zeta_frg[], const double psp_dkps_frg[],
const double psp_xiza_frg[],
// basis and cutoff table data for monomer
const int shel_atm_mon[], const int shel_ini_mon[],
const double atom_x_mon[], const double atom_y_mon[],
const double atom_z_mon[], const int leading_cs_pair_mon[],
const double csp_schwarz_mon[],
const int csp_ics_mon[], const int csp_jcs_mon[],
const int csp_leading_ps_pair_mon[],
const double psp_zeta_mon[], const double psp_dkps_mon[],
const double psp_xiza_mon[],
// density matrix of monomer
const double D_mon[],
// (output) Coulomb potential
double V_frg[] ) = {
// original
/*ofmo_ifc4c_ssss__, ofmo_ifc4c_ssps__, ofmo_ifc4c_sspp__,
ofmo_ifc4c_ssds__, ofmo_ifc4c_ssdp__, ofmo_ifc4c_ssdd__,
ofmo_ifc4c_psss__, ofmo_ifc4c_psps__, ofmo_ifc4c_pspp__,
ofmo_ifc4c_psds__, ofmo_ifc4c_psdp__, ofmo_ifc4c_psdd__,
ofmo_ifc4c_ppss__, ofmo_ifc4c_ppps__, ofmo_ifc4c_pppp__,
ofmo_ifc4c_ppds__, ofmo_ifc4c_ppdp__, ofmo_ifc4c_ppdd__,
ofmo_ifc4c_dsss__, ofmo_ifc4c_dsps__, ofmo_ifc4c_dspp__,
ofmo_ifc4c_dsds__, ofmo_ifc4c_dsdp__, ofmo_ifc4c_dsdd__,
ofmo_ifc4c_dpss__, ofmo_ifc4c_dpps__, ofmo_ifc4c_dppp__,
ofmo_ifc4c_dpds__, ofmo_ifc4c_dpdp__, ofmo_ifc4c_dpdd__,
ofmo_ifc4c_ddss__, ofmo_ifc4c_ddps__, ofmo_ifc4c_ddpp__,
ofmo_ifc4c_ddds__, ofmo_ifc4c_dddp__, ofmo_ifc4c_dddd__,*/
// OS
ofmo_ifc4c_os_ssss, ofmo_ifc4c_os_ssps, ofmo_ifc4c_os_sspp,
ofmo_ifc4c_os_ssds, ofmo_ifc4c_os_ssdp, ofmo_ifc4c_os_ssdd,
ofmo_ifc4c_os_psss, ofmo_ifc4c_os_psps, ofmo_ifc4c_os_pspp,
ofmo_ifc4c_os_psds, ofmo_ifc4c_os_psdp, ofmo_ifc4c_os_psdd,
ofmo_ifc4c_os_ppss, ofmo_ifc4c_os_ppps, ofmo_ifc4c_os_pppp,
ofmo_ifc4c_os_ppds, ofmo_ifc4c_os_ppdp, ofmo_ifc4c_os_ppdd,
ofmo_ifc4c_os_dsss, ofmo_ifc4c_os_dsps, ofmo_ifc4c_os_dspp,
ofmo_ifc4c_os_dsds, ofmo_ifc4c_os_dsdp, ofmo_ifc4c_os_dsdd,
ofmo_ifc4c_os_dpss, ofmo_ifc4c_os_dpps, ofmo_ifc4c_os_dppp,
ofmo_ifc4c_os_dpds, ofmo_ifc4c_os_dpdp, ofmo_ifc4c_os_dpdd,
ofmo_ifc4c_os_ddss, ofmo_ifc4c_os_ddps, ofmo_ifc4c_os_ddpp,
ofmo_ifc4c_os_ddds, ofmo_ifc4c_os_dddp, ofmo_ifc4c_os_dddd,
// Rys
/*ofmo_ifc4c_rys_ssss, ofmo_ifc4c_rys_ssps, ofmo_ifc4c_rys_sspp,
ofmo_ifc4c_rys_ssds, ofmo_ifc4c_rys_ssdp, ofmo_ifc4c_rys_ssdd,
ofmo_ifc4c_rys_psss, ofmo_ifc4c_rys_psps, ofmo_ifc4c_rys_pspp,
ofmo_ifc4c_rys_psds, ofmo_ifc4c_rys_psdp, ofmo_ifc4c_rys_psdd,
ofmo_ifc4c_rys_ppss, ofmo_ifc4c_rys_ppps, ofmo_ifc4c_rys_pppp,
ofmo_ifc4c_rys_ppds, ofmo_ifc4c_rys_ppdp, ofmo_ifc4c_rys_ppdd,
ofmo_ifc4c_rys_dsss, ofmo_ifc4c_rys_dsps, ofmo_ifc4c_rys_dspp,
ofmo_ifc4c_rys_dsds, ofmo_ifc4c_rys_dsdp, ofmo_ifc4c_rys_dsdd,
ofmo_ifc4c_rys_dpss, ofmo_ifc4c_rys_dpps, ofmo_ifc4c_rys_dppp,
ofmo_ifc4c_rys_dpds, ofmo_ifc4c_rys_dpdp, ofmo_ifc4c_rys_dpdd,
ofmo_ifc4c_rys_ddss, ofmo_ifc4c_rys_ddps, ofmo_ifc4c_rys_ddpp,
ofmo_ifc4c_rys_ddds, ofmo_ifc4c_rys_dddp, ofmo_ifc4c_rys_dddd,*/
};
/** 4中心クーロン相互作用項の計算を行う関数
* @ingroup integ-top
*
* FMO計算に現れる、2つのモノマー間の4中心クーロン相互作用項を
* 計算する。
* 4中心クーロン積分を計算して、積分と与えられた密度行列を元に、
* 4中心クーロン相互作用項を求める。
*
* @attention
* @li OpenMPを用いたスレッド並列化を行っている。スレッド並列時には、
* スレッド並列領域内でこの関数を呼び出す必要がある。
* @li \c nworkers とワーカID \c workerid を適切に設定すれば、OpenMPと
* MPIとのハイブリッド並列実行が可能である。MPI並列時に、完全な
* クーロン項を得るためには、この関数の終了後に
* \c MPI_Allreduce 関数などを用いたリダクション処理を行う必要がある。
* @li 得られるクーロン項 \c V_frg[] は、軌道量子数の大きさで
* ソートされたものである。元の並びのクーロン項が欲しい場合には、
* 要素の並べ替えが必要である。
*
* @param[in] nworkers 計算に用いるワーカプロセス(スレッド)数
* @param[in] workerid ワーカID。
* \f$ 0\le \tt{workerid} < \tt{nworkers} \f$
*
* @param[in] maxlqn 最大軌道量子数
* @param[in] shel_atm_frg[ics] 対象フラグメントの、
* CS番号 \c ics のCSが属する原子の番号
* @param[in] shel_ini_frg[ics] 対象フラグメントの、
* CS番号 \c ics のCSに含まれるAOの先頭AO番号
* @param[in] atom_x_frg[iat] 対象フラグメントの、
* 原子の番号 \c iat のx座標(au単位)
* @param[in] atom_y_frg[iat] 対象フラグメントの、
* 原子の番号 \c iat のy座標(au単位)
* @param[in] atom_z_frg[iat] 対象フラグメントの、
* 原子の番号 \c iat のz座標(au単位)
* @param[in] leading_cs_pair_frg[itype] 対象フラグメントの、
* CSペアタイプ番号 \c itype の先頭CSペア番号
* @param[in] csp_schwarz_frg[icsp] 対象フラグメントの、
* CSペア番号 \c icsp のSchwarz積分
* @param[in] csp_ics_frg[icsp] 対象フラグメントの、
* CSペア番号 \c icsp の1つ目のCS番号
* @param[in] csp_jcs_frg[icsp] 対象フラグメントの、
* CSペア番号 \c icsp の2つめのCS番号。ただし、
* \f$ \tt{csp\_ics[icsp]} \ge \tt{csp\_jcs[icsp]} \f$ である。
* @param[in] csp_leading_ps_pair_frg[icsp] 対象フラグメントの、
* CSペア番号 \c icsp に含まれるPSペアの先頭PSペア番号
* @param[in] psp_zeta_frg[ipsp] 対象フラグメントの、
* PSペア番号 \c ipsp の軌道指数和
* \f$ \zeta = \zeta_a + \zeta_b \f$
* @param[in] psp_dkps_frg[ipsp] 対象フラグメントの、
* PSペア番号 \c ipsp の線型結合定数
* \f[ K_{ab} = \sqrt2 \pi^{5/4} \frac1{\zeta_a+\zeta_b}
* \exp\left[ -\frac{\zeta_a \zeta_b}{\zeta_a + \zeta_b}
* ( \boldmath A \unboldmath - \boldmath B \unboldmath )^2
* \right]\f]
* @param[in] psp_xiza_frg[ipsp] 対象フラグメントの、
* PSペア番号 \c ipsp の、
* \f$ \frac{\xi}{\zeta_a} = \frac{\zeta_b}{\zeta_a+\zeta_b} \f$
*
* @param[in] shel_atm_mon[ics] 相手モノマーの、
* CS番号 \c ics のCSが属する原子の番号
* @param[in] shel_ini_mon[ics] 相手モノマーの、
* CS番号 \c ics のCSに含まれるAOの先頭AO番号
* @param[in] atom_x_mon[iat] 相手モノマーの、
* 原子の番号 \c iat のx座標(au単位)
* @param[in] atom_y_mon[iat] 相手モノマーの、
* 原子の番号 \c iat のy座標(au単位)
* @param[in] atom_z_mon[iat] 相手モノマーの、
* 原子の番号 \c iat のz座標(au単位)
* @param[in] leading_cs_pair_mon[itype] 相手モノマーの、
* CSペアタイプ番号 \c itype の先頭CSペア番号
* @param[in] csp_schwarz_mon[icsp] 相手モノマーの、
* CSペア番号 \c icsp のSchwarz積分
* @param[in] csp_ics_mon[icsp] 相手モノマーの、
* CSペア番号 \c icsp の1つ目のCS番号
* @param[in] csp_jcs_mon[icsp] 相手モノマーの、
* CSペア番号 \c icsp の2つめのCS番号。ただし、
* \f$ \tt{csp\_ics[icsp]} \ge \tt{csp\_jcs[icsp]} \f$ である。
* @param[in] csp_leading_ps_pair_mon[icsp] ターゲットフラグメントの、
* CSペア番号 \c icsp に含まれるPSペアの先頭PSペア番号
* @param[in] psp_zeta_mon[ipsp] 相手モノマーの、
* PSペア番号 \c ipsp の軌道指数和
* \f$ \zeta = \zeta_a + \zeta_b \f$
* @param[in] psp_dkps_mon[ipsp] 相手モノマーの、
* PSペア番号 \c ipsp の線型結合定数
* \f[ K_{ab} = \sqrt2 \pi^{5/4} \frac1{\zeta_a+\zeta_b}
* \exp\left[ -\frac{\zeta_a \zeta_b}{\zeta_a + \zeta_b}
* ( \boldmath A \unboldmath - \boldmath B \unboldmath )^2
* \right]\f]
* @param[in] psp_xiza_mon[ipsp] 相手モノマーの、
* PSペア番号 \c ipsp の
* \f$ \frac{\xi}{\zeta_a} = \frac{\zeta_b}{\zeta_a+\zeta_b} \f$
*
* @param[in] nao_mon 相手モノマーのAO数
* @param[in] D_mon[] 相手モノマーの密度行列(圧縮"U"形式)
*
* @param[out] V_frg[] 対象フラグメントにおける
* 相手モノマーとの間の4中心クーロン相互作用項
* (G行列、圧縮"U"形式)。この配列は、スレッドごとに別領域
* を与える必要がある。
*
* @retval 0 正常終了(すべての積分が保存されても、バッファサイズの不足で
* 保存されていない積分があっても、正常終了である)
* @retval -1 異常終了(2011/06/14現在では考えていない)
*
* */
int ofmo_integ_ifc4c_sorted_partial(
// parallelization
const int nworkers, const int workerid,
// basis and cutoff table data for fragment
const int maxlqn,
const int shel_atm_frg[], const int shel_ini_frg[],
const double atom_x_frg[], const double atom_y_frg[],
const double atom_z_frg[], const int leading_cs_pair_frg[],
const double csp_schwarz_frg[],
const int csp_ics_frg[], const int csp_jcs_frg[],
const int csp_leading_ps_pair_frg[],
const double psp_zeta_frg[], const double psp_dkps_frg[],
const double psp_xiza_frg[],
// basis and cutoff table data for monomer
const int shel_atm_mon[], const int shel_ini_mon[],
const double atom_x_mon[], const double atom_y_mon[],
const double atom_z_mon[], const int leading_cs_pair_mon[],
const double csp_schwarz_mon[],
const int csp_ics_mon[], const int csp_jcs_mon[],
const int csp_leading_ps_pair_mon[],
const double psp_zeta_mon[], const double psp_dkps_mon[],
const double psp_xiza_mon[],
//
const int leading_cs_mon[],
// density matrix of monomer
const int nao_mon, const double D_mon[],
// (output) Coulomb potential
double V_frg[] ) {
int La, Lb, Lc, Ld, Lab, Lcd, Labcd;
// added for load-balancing
int offset=0, local_id=workerid, mythread;
//
int type, pos;
int ma[] = {0, 1, 1, 2, 2, 2,};
int mb[] = {0, 0, 1, 0, 1, 2,};
// added for load-balancing
mythread = omp_get_thread_num();
//type = ofmo_integ_get_target_type( mythread );
type = -1; // 動的負荷分散しない
#pragma omp master
TLOG_LOG_IN(5);
float *Dcs;
Dcs = ofmo_twoint_gen_Dcs(maxlqn, nao_mon, leading_cs_mon, D_mon);
#ifdef USE_CUDA
#pragma omp master
{
int ret = 0;
int ncs_mon = leading_cs_mon[maxlqn+1];
ret = cuda_ifc4c_SetDcs(ncs_mon, Dcs);
if (ret<0) exit(1);
ret = cuda_ifc4c_calc_Init();
if (ret<0) exit(1);
}
#endif
#ifndef USE_CUDA
offset = ofmo_integ_get_loop_offset( mythread );
offset = 0;
local_id = (offset+workerid)%nworkers;
for ( La=0; La<=maxlqn; La++ ) {
for ( Lb=0; Lb<=La; Lb++ ) {
Lab = La*(La+1)/2 + Lb;
for ( Lc=0; Lc<=maxlqn; Lc++ ) {
for ( Ld=0; Ld<=Lc; Ld++ ) {
Lcd = Lc*(Lc+1)/2 + Ld;
//Labcd = Lab*maxlqn2 + Lcd;
Labcd = Lab*6 + Lcd;
calc_ifc4c[Labcd](
&nworkers, &local_id,
&La, &Lb, &Lc, &Ld,
shel_atm_frg, shel_ini_frg,
atom_x_frg, atom_y_frg, atom_z_frg,
leading_cs_pair_frg,
csp_schwarz_frg, csp_ics_frg, csp_jcs_frg,
csp_leading_ps_pair_frg,
psp_zeta_frg, psp_dkps_frg, psp_xiza_frg,
shel_atm_mon, shel_ini_mon,
atom_x_mon, atom_y_mon, atom_z_mon,
leading_cs_pair_mon,
csp_schwarz_mon, csp_ics_mon, csp_jcs_mon,
csp_leading_ps_pair_mon,
psp_zeta_mon, psp_dkps_mon, psp_xiza_mon,
D_mon, V_frg );
// added for load-balancing
offset +=
(leading_cs_pair_frg[Lab+1]-leading_cs_pair_frg[Lab]);
local_id = (offset+workerid)%nworkers;
}
}
}
}
#else /* USE_CUDA */
offset = ofmo_integ_get_loop_offset( mythread );
offset = 0;
local_id = (offset+workerid)%nworkers;
// idev=1 for GPU, 0 for CPU
for (int idev=1; idev>=0; idev--) {
for ( La=0; La<=maxlqn; La++ ) {
for ( Lb=0; Lb<=La; Lb++ ) {
Lab = La*(La+1)/2 + Lb;
for ( Lc=0; Lc<=maxlqn; Lc++ ) {
for ( Ld=0; Ld<=Lc; Ld++ ) {
Lcd = Lc*(Lc+1)/2 + Ld;
//Labcd = Lab*maxlqn2 + Lcd;
Labcd = Lab*6 + Lcd;
cuda_ifc4c_calc(idev,
&nworkers, &local_id,
&La, &Lb, &Lc, &Ld,
shel_atm_frg, shel_ini_frg,
atom_x_frg, atom_y_frg, atom_z_frg,
leading_cs_pair_frg,
csp_schwarz_frg, csp_ics_frg, csp_jcs_frg,
csp_leading_ps_pair_frg,
psp_zeta_frg, psp_dkps_frg, psp_xiza_frg,
shel_atm_mon, shel_ini_mon,
atom_x_mon, atom_y_mon, atom_z_mon,
leading_cs_pair_mon,
csp_schwarz_mon, csp_ics_mon, csp_jcs_mon,
csp_leading_ps_pair_mon,
psp_zeta_mon, psp_dkps_mon, psp_xiza_mon,
D_mon, V_frg );
// added for load-balancing
offset +=
(leading_cs_pair_frg[Lab+1]-leading_cs_pair_frg[Lab]);
local_id = (offset+workerid)%nworkers;
}
}
}
}
}
#endif /* USE_CUDA */
// added for load-balancing
ofmo_integ_set_loop_offset( mythread, offset );
#pragma omp master
TLOG_LOG_OUT(5);
return 0;
}
static int (*calc_ifc3c[]) (
// parallelization
const int *pnworkers, const int *pworkerid,
// integral type data
const int *pLa, const int *pLb, const int *pLc,
// basis and cutoff table data for fragment
const int shel_atm_frg[], const int shel_ini_frg[],
const double atom_x_frg[], const double atom_y_frg[],
const double atom_z_frg[], const int leading_cs_pair_frg[],
//const double csp_schwarz_frg[],
const int csp_ics_frg[], const int csp_jcs_frg[],
const int csp_leading_ps_pair_frg[],
const double psp_zeta_frg[], const double psp_dkps_frg[],
const double psp_xiza_frg[],
// basis set data for monomer
const int leading_cs_mon[],
const int shel_tem_mon[], const int shel_atm_mon[],
const int shel_add_mon[], const int shel_ini_mon[],
const double atom_x_mon[], const double atom_y_mon[],
const double atom_z_mon[],
const double prim_exp_mon[], const double prim_coe_mon[],
// monomer AO population
const double ao_pop_mon[],
// (output) Coulomb potential
double V_frg[] ) = {
// OS
ofmo_ifc3c_os_ssss, ofmo_ifc3c_os_sspp, ofmo_ifc3c_os_ssdd,
ofmo_ifc3c_os_psss, ofmo_ifc3c_os_pspp, ofmo_ifc3c_os_psdd,
ofmo_ifc3c_os_ppss, ofmo_ifc3c_os_pppp, ofmo_ifc3c_os_ppdd,
ofmo_ifc3c_os_dsss, ofmo_ifc3c_os_dspp, ofmo_ifc3c_os_dsdd,
ofmo_ifc3c_os_dpss, ofmo_ifc3c_os_dppp, ofmo_ifc3c_os_dpdd,
ofmo_ifc3c_os_ddss, ofmo_ifc3c_os_ddpp, ofmo_ifc3c_os_dddd,
// Rys
/*ofmo_ifc3c_rys_ssss, ofmo_ifc3c_rys_sspp, ofmo_ifc3c_rys_ssdd,
ofmo_ifc3c_rys_psss, ofmo_ifc3c_rys_pspp, ofmo_ifc3c_rys_psdd,
ofmo_ifc3c_rys_ppss, ofmo_ifc3c_rys_pppp, ofmo_ifc3c_rys_ppdd,
ofmo_ifc3c_rys_dsss, ofmo_ifc3c_rys_dspp, ofmo_ifc3c_rys_dsdd,
ofmo_ifc3c_rys_dpss, ofmo_ifc3c_rys_dppp, ofmo_ifc3c_rys_dpdd,
ofmo_ifc3c_rys_ddss, ofmo_ifc3c_rys_ddpp, ofmo_ifc3c_rys_dddd,*/
};
/** 3中心クーロン相互作用項の計算を行う関数
* @ingroup integ-top
*
* FMO計算に現れる、2つのモノマー間の3中心クーロン相互作用項を
* 計算する。
* 3中心クーロン積分を計算して、積分と与えられた密度行列を元に、
* 3中心クーロン相互作用項を求める。
*
* @attention
* @li この関数はOpenMPを用いたスレッド並列化が行われている。スレッド並列
* 実行のためには、スレッド並列領域内からこの関数を呼び出す
* 必要がある。
* @li \c nworkers と \c workerid を適切に設定すると、OpenMPとMPIの
* ハイブリッド並列実行が可能である。MPI並列を利用する際には、
* 関数終了後に、\c MPI_Allreduce 関数などを用いたリダクション処理
* を行うことで、完全なクーロン項が得られる。
* @li 得られるクーロン項 \c V_frg[] は、軌道量子数の大きさで
* ソートされたものである。元の並びのクーロン項が欲しい場合には、
* 要素の並べ替えが必要である。
*
* @param[in] nworkers 計算に用いるワーカプロセス(スレッド)数
* @param[in] workerid ワーカID。
* \f$ 0\le \tt{workerid} < \tt{nworkers} \f$
*
* @param[in] maxlqn 最大軌道量子数
* @param[in] shel_atm_frg[ics] 対象フラグメントの、
* CS番号 \c ics のCSが属する原子の番号
* @param[in] shel_ini_frg[ics] 対象フラグメントの、
* CS番号 \c ics のCSに含まれるAOの先頭AO番号
* @param[in] atom_x_frg[iat] 対象フラグメントの、
* 原子の番号 \c iat のx座標(au単位)
* @param[in] atom_y_frg[iat] 対象フラグメントの、
* 原子の番号 \c iat のy座標(au単位)
* @param[in] atom_z_frg[iat] 対象フラグメントの、
* 原子の番号 \c iat のz座標(au単位)
* @param[in] leading_cs_pair_frg[itype] 対象フラグメントの、
* CSペアタイプ番号 \c itype の先頭CSペア番号
* @param[in] csp_schwarz_frg[icsp] 対象フラグメントの、
* CSペア番号 \c icsp のSchwarz積分
* @param[in] csp_ics_frg[icsp] 対象フラグメントの、
* CSペア番号 \c icsp の1つ目のCS番号
* @param[in] csp_jcs_frg[icsp] 対象フラグメントの、
* CSペア番号 \c icsp の2つめのCS番号。ただし、
* \f$ \tt{csp\_ics[icsp]} \ge \tt{csp\_jcs[icsp]} \f$ である。
* @param[in] csp_leading_ps_pair_frg[icsp] 対象フラグメントの、
* CSペア番号 \c icsp に含まれるPSペアの先頭PSペア番号
* @param[in] psp_zeta_frg[ipsp] 対象フラグメントの、
* PSペア番号 \c ipsp の軌道指数和
* \f$ \zeta = \zeta_a + \zeta_b \f$
* @param[in] psp_dkps_frg[ipsp] 対象フラグメントの、
* PSペア番号 \c ipsp の線型結合定数
* \f[ K_{ab} = \sqrt2 \pi^{5/4} \frac1{\zeta_a+\zeta_b}
* \exp\left[ -\frac{\zeta_a \zeta_b}{\zeta_a + \zeta_b}
* ( \boldmath A \unboldmath - \boldmath B \unboldmath )^2
* \right]\f]
* @param[in] psp_xiza_frg[ipsp] 対象フラグメントの、
* PSペア番号 \c ipsp の
* \f$ \frac{\xi}{\zeta_a} = \frac{\zeta_b}{\zeta_a+\zeta_b} \f$
*
* @param[in] leading_cs_mon[lqn] 相手モノマーの、
* 軌道量子数 \c lqn の先頭CS番号
* @param[in] shel_tem_mon[ics] 相手モノマーの、CS番号 \c ics のCSの縮約長
* @param[in] shel_atm_mon[ics] 相手モノマーの、
* CS番号 \c ics のCSが属する原子の番号
* @param[in] shel_add_mon[ics] 相手モノマーの、CS番号 \c ics のCSに属する
* PSの先頭PS番号
* @param[in] shel_ini_mon[ics] 相手モノマーの、CS番号 \c ics のCSに
* 含まれるAOの先頭AO番号
* @param[in] atom_x_mon[iat] 相手モノマーの、
* 原子の番号 \c iat のx座標(au単位)
* @param[in] atom_y_mon[iat] 相手モノマーの、
* 原子の番号 \c iat のy座標(au単位)
* @param[in] atom_z_mon[iat] 相手モノマーの、
* 原子の番号 \c iat のz座標(au単位)
* @param[in] prim_exp_mon[ips] 相手モノマーの、PS番号 \c ips のPSの
* 軌道指数
* @param[in] prim_coe_mon[ips] 相手モノマーの、PS番号 \c ips のPSの
* 規格化定数込みの縮約係数
*
* @param[in] ao_pop_mon[] 相手モノマーの、AO population
*
* @param[out] V_frg[] 対象フラグメントにおける
* 相手モノマーとの間の3中心クーロン相互作用項
* (G行列、圧縮"U"形式)。この配列は、スレッドごとに別領域
* を与える必要がある。
*
* @retval 0 正常終了(すべての積分が保存されても、バッファサイズの不足で
* 保存されていない積分があっても、正常終了である)
* @retval -1 異常終了(2011/06/14現在では考えていない)
*
* */
int ofmo_integ_ifc3c_sorted_partial(
// parallelization
const int nworkers, const int workerid,
// basis and cutoff table data for fragment
const int maxlqn,
const int shel_atm_frg[], const int shel_ini_frg[],
const double atom_x_frg[], const double atom_y_frg[],
const double atom_z_frg[], const int leading_cs_pair_frg[],
//const double csp_schwarz_frg[],
const int csp_ics_frg[], const int csp_jcs_frg[],
const int csp_leading_ps_pair_frg[],
const double psp_zeta_frg[], const double psp_dkps_frg[],
const double psp_xiza_frg[],
// basis set data for monomer
const int leading_cs_mon[],
const int shel_tem_mon[], const int shel_atm_mon[],
const int shel_add_mon[], const int shel_ini_mon[],
const double atom_x_mon[], const double atom_y_mon[],
const double atom_z_mon[],
const double prim_exp_mon[], const double prim_coe_mon[],
// monomer AO population
const double ao_pop_mon[],
// (output) Coulomb potential
double V_frg[] ) {
int La, Lb, Lc, Lab, Labc;
// added for load-balancing
int offset, local_id=workerid, mythread;
//
int type, pos;
int ma[] = {0, 1, 1, 2, 2, 2,};
int mb[] = {0, 0, 1, 0, 1, 2,};
// added for load-balancing
mythread = omp_get_thread_num();
//type = ofmo_integ_get_target_type( mythread ); // 動的負荷分散しない
type = -1;
offset = ofmo_integ_get_loop_offset( mythread );
local_id = (offset+workerid)%nworkers;
for ( La=0; La<=maxlqn; La++ ) {
for ( Lb=0; Lb<=La; Lb++ ) {
Lab = La*(La+1)/2 + Lb;
for ( Lc=0; Lc<=maxlqn; Lc++ ) {
Labc = 3*Lab + Lc;
calc_ifc3c[Labc](
&nworkers, &local_id,
&La, &Lb, &Lc,
shel_atm_frg, shel_ini_frg,
atom_x_frg, atom_y_frg, atom_z_frg,
leading_cs_pair_frg,
csp_ics_frg, csp_jcs_frg,
csp_leading_ps_pair_frg,
psp_zeta_frg, psp_dkps_frg, psp_xiza_frg,
leading_cs_mon,
shel_tem_mon, shel_atm_mon, shel_add_mon,
shel_ini_mon, atom_x_mon, atom_y_mon,
atom_z_mon,
prim_exp_mon, prim_coe_mon,
ao_pop_mon, V_frg );
// added for load-balancing
offset += (leading_cs_pair_frg[Lab+1]
- leading_cs_pair_frg[Lab]);
local_id = (offset+workerid)%nworkers;
}
}
}
ofmo_integ_set_loop_offset( mythread, offset );
return 0;
}
static int (*calc_ifc2c[]) (
// parallelization
const int *nworkers, const int *workerid,
// integral type data
const int *pLa, const int *pLb,
// basis set data for fragment
const int leading_cs_frg[],
const int shel_tem_frg[], const int shel_atm_frg[],
const int shel_add_frg[], const int shel_ini_frg[],
const double atom_x_frg[], const double atom_y_frg[],
const double atom_z_frg[],
const double prim_exp_frg[], const double prim_coe_frg[],
// atomic charge and atomic coordinate data for monomer
const int *nat_mon, const double atom_x_mon[],
const double atom_y_mon[], const double atom_z_mon[],
const double atm_pop_mon[],
// output
double V_frg[] ) = {
ofmo_ifc2c_ss__, ofmo_ifc2c_ps__, ofmo_ifc2c_pp__,
ofmo_ifc2c_ds__, ofmo_ifc2c_dp__, ofmo_ifc2c_dd__,
};
/** 2中心クーロン相互作用項の計算を行う関数
* @ingroup integ-top
*
* FMO計算で現れる2中心クーロン相互作用項を計算する関数。
* 計算対象のフラグメントのソート基底関数と、相手モノマーのatomic
* populationを与えると、2中心クーロン作用項が計算される。
*
* @attention
* @li スレッド並列実行を行う場合には、スレッド並列領域内から
* この関数を呼び出す必要がある。
* @li \c nworkers と \c workerid を適切に設定すると、OpenMPとMPIの
* ハイブリッド並列実行が可能である。ただし、MPI並列利用時には、
* 関数終了後に、\c MPI_Allreduce などを用いたリダクション処理を
* 行うことで、完全なクーロン項が得られる
* @li 得られるクーロン項 \c V_frg[] は、軌道量子数の大きさで
* ソートされたものである。元の並びのクーロン項が欲しい場合には、
* 要素の並べ替えが必要である。
*
* @param[in] nworkers 計算に用いるワーカプロセス(スレッド)数
* @param[in] workerid 各ワーカプロセス(スレッド)のID。
* \f$ 0\le\tt{workerid}<\tt{nworkers} \f$である。
* @param[in] maxlqn 最大軌道量子数
* @param[in] leading_cs_frg[lqn] 対象フラグメントの、
* 軌道量子数 \c lqn の先頭CS番号
* @param[in] shel_tem_frg[ics] 対象フラグメントの、
* CS番号 \c ics のCSの縮約長
* @param[in] shel_atm_frg[ics] 対象フラグメントの、
* CS番号 \c ics のCSが属する原子の番号
* @param[in] shel_add_frg[ics] 対象フラグメントの、
* CS番号 \c ics のCSに含まれるPSの先頭PS番号
* @param[in] shel_ini_frg[ics] 対象フラグメントの、
* CS番号 \c ics のCSに含まれるAOの先頭AO番号
* @param[in] atom_x_frg[iat] 対象フラグメントの、
* 原子の番号 \c iat のx座標(au単位)
* @param[in] atom_y_frg[iat] 対象フラグメントの、
* 原子の番号 \c iat のy座標(au単位)
* @param[in] atom_z_frg[iat] 対象フラグメントの、
* 原子の番号 \c iat のz座標(au単位)
* @param[in] prim_exp_frg[ips] 対象フラグメントの、
* PS番号 \c ips のPSの軌道指数
* @param[in] prim_coe_frg[ips] 対象フラグメントの、
* PS番号 \c ips のPSの規格化定数込みの縮約係数
*
* @param[in] nat_mon 相手モノマーの、原子数
* @param[in] atom_x_mon[iat] 相手モノマーの、
* 原子の番号 \c iat のx座標(au単位)
* @param[in] atom_y_mon[iat] 相手モノマーの、
* 原子の番号 \c iat のy座標(au単位)
* @param[in] atom_z_mon[iat] 相手モノマーの、
* 原子の番号 \c iat のz座標(au単位)
* @param[in] atm_pop_mon[iat] 相手モノマーの、
* 原子の番号 \c iat の原子番号
*
* @param[out] V_frg[] 対象フラグメントにおける
* 相手モノマーとの間の2中心クーロン相互作用項
* (G行列、圧縮"U"形式)。この配列は、同一プロセス内のスレッド
* 間で共有である。
*
* @retval 0 正常終了
* @retval -1 異常終了(いま(2011/06/13)のところ考えていない)
*
* */
int ofmo_integ_ifc2c_sorted_partial(
// parallelization
const int nworkers, const int workerid,
// input data of fragment
const int maxlqn, const int leading_cs_frg[],
const int shel_tem_frg[], const int shel_atm_frg[],
const int shel_add_frg[], const int shel_ini_frg[],
const double atom_x_frg[], const double atom_y_frg[],
const double atom_z_frg[],
const double prim_exp_frg[], const double prim_coe_frg[],
// input data of counter monomer
const int nat_mon, const double atom_x_mon[],
const double atom_y_mon[], const double atom_z_mon[],
const double atm_pop_mon[],
// output data
double V_frg[] ) {
int La, Lb, Lab;
int type, mythread;
int ma[] = { 0, 1, 1, 2, 2, 2, };
int mb[] = { 0, 0, 1, 0, 1, 2, };
mythread = omp_get_thread_num();
//type = ofmo_integ_get_target_type( mythread );
type = -1;
for ( La=0; La<=maxlqn; La++ ) {
for ( Lb=0; Lb<=La; Lb++ ) {
Lab = La*(La+1)/2 + Lb;
calc_ifc2c[Lab](
&nworkers, &workerid,
&La, &Lb, leading_cs_frg,
shel_tem_frg, shel_atm_frg, shel_add_frg,
shel_ini_frg,
atom_x_frg, atom_y_frg, atom_z_frg,
prim_exp_frg, prim_coe_frg,
&nat_mon, atom_x_mon, atom_y_mon, atom_z_mon,
atm_pop_mon, V_frg );
}
}
return 0;
}
|
8683.c | /* POLYBENCH/GPU-OPENMP
*
* This file is a part of the Polybench/GPU-OpenMP suite
*
* Contact:
* William Killian <killian@udel.edu>
*
* Copyright 2013, The University of Delaware
*/
#include <stdio.h>
#include <unistd.h>
#include <string.h>
#include <math.h>
/* Include polybench common header. */
#include <polybench.h>
/* Include benchmark-specific header. */
/* Default data type is double, default size is 4000. */
#include "3mm.h"
/* Array initialization. */
static
void init_array(int ni, int nj, int nk, int nl, int nm,
DATA_TYPE POLYBENCH_2D(A,NI,NK,ni,nk),
DATA_TYPE POLYBENCH_2D(B,NK,NJ,nk,nj),
DATA_TYPE POLYBENCH_2D(C,NJ,NM,nj,nm),
DATA_TYPE POLYBENCH_2D(D,NM,NL,nm,nl))
{
int i, j;
for (i = 0; i < ni; i++)
for (j = 0; j < nk; j++)
A[i][j] = ((DATA_TYPE) i*j) / ni;
for (i = 0; i < nk; i++)
for (j = 0; j < nj; j++)
B[i][j] = ((DATA_TYPE) i*(j+1)) / nj;
for (i = 0; i < nj; i++)
for (j = 0; j < nm; j++)
C[i][j] = ((DATA_TYPE) i*(j+3)) / nl;
for (i = 0; i < nm; i++)
for (j = 0; j < nl; j++)
D[i][j] = ((DATA_TYPE) i*(j+2)) / nk;
}
/* DCE code. Must scan the entire live-out data.
Can be used also to check the correctness of the output. */
static
void print_array(int ni, int nl,
DATA_TYPE POLYBENCH_2D(G,NI,NL,ni,nl))
{
int i, j;
for (i = 0; i < ni; i++)
for (j = 0; j < nl; j++) {
fprintf (stderr, DATA_PRINTF_MODIFIER, G[i][j]);
if ((i * ni + j) % 20 == 0) fprintf (stderr, "\n");
}
fprintf (stderr, "\n");
}
/* Main computational kernel. The whole function will be timed,
including the call and return. */
static
void kernel_3mm(int ni, int nj, int nk, int nl, int nm,
DATA_TYPE POLYBENCH_2D(E,NI,NJ,ni,nj),
DATA_TYPE POLYBENCH_2D(A,NI,NK,ni,nk),
DATA_TYPE POLYBENCH_2D(B,NK,NJ,nk,nj),
DATA_TYPE POLYBENCH_2D(F,NJ,NL,nj,nl),
DATA_TYPE POLYBENCH_2D(C,NJ,NM,nj,nm),
DATA_TYPE POLYBENCH_2D(D,NM,NL,nm,nl),
DATA_TYPE POLYBENCH_2D(G,NI,NL,ni,nl))
{
int i, j, k;
#pragma scop
#pragma omp parallel private (j, k) num_threads(2)
{
/* E := A*B */
#pragma omp for schedule(dynamic, 8)
for (i = 0; i < _PB_NI; i++)
for (j = 0; j < _PB_NJ; j++)
{
E[i][j] = 0;
for (k = 0; k < _PB_NK; ++k)
E[i][j] += A[i][k] * B[k][j];
}
/* F := C*D */
#pragma omp for schedule(dynamic, 8)
for (i = 0; i < _PB_NJ; i++)
for (j = 0; j < _PB_NL; j++)
{
F[i][j] = 0;
for (k = 0; k < _PB_NM; ++k)
F[i][j] += C[i][k] * D[k][j];
}
/* G := E*F */
#pragma omp for schedule(dynamic, 8)
for (i = 0; i < _PB_NI; i++)
for (j = 0; j < _PB_NL; j++)
{
G[i][j] = 0;
for (k = 0; k < _PB_NJ; ++k)
G[i][j] += E[i][k] * F[k][j];
}
}
#pragma endscop
}
int main(int argc, char** argv)
{
/* Retrieve problem size. */
int ni = NI;
int nj = NJ;
int nk = NK;
int nl = NL;
int nm = NM;
/* Variable declaration/allocation. */
POLYBENCH_2D_ARRAY_DECL(E, DATA_TYPE, NI, NJ, ni, nj);
POLYBENCH_2D_ARRAY_DECL(A, DATA_TYPE, NI, NK, ni, nk);
POLYBENCH_2D_ARRAY_DECL(B, DATA_TYPE, NK, NJ, nk, nj);
POLYBENCH_2D_ARRAY_DECL(F, DATA_TYPE, NJ, NL, nj, nl);
POLYBENCH_2D_ARRAY_DECL(C, DATA_TYPE, NJ, NM, nj, nm);
POLYBENCH_2D_ARRAY_DECL(D, DATA_TYPE, NM, NL, nm, nl);
POLYBENCH_2D_ARRAY_DECL(G, DATA_TYPE, NI, NL, ni, nl);
/* Initialize array(s). */
init_array (ni, nj, nk, nl, nm,
POLYBENCH_ARRAY(A),
POLYBENCH_ARRAY(B),
POLYBENCH_ARRAY(C),
POLYBENCH_ARRAY(D));
/* Start timer. */
polybench_start_instruments;
/* Run kernel. */
kernel_3mm (ni, nj, nk, nl, nm,
POLYBENCH_ARRAY(E),
POLYBENCH_ARRAY(A),
POLYBENCH_ARRAY(B),
POLYBENCH_ARRAY(F),
POLYBENCH_ARRAY(C),
POLYBENCH_ARRAY(D),
POLYBENCH_ARRAY(G));
/* Stop and print timer. */
polybench_stop_instruments;
polybench_print_instruments;
/* Prevent dead-code elimination. All live-out data must be printed
by the function call in argument. */
polybench_prevent_dce(print_array(ni, nl, POLYBENCH_ARRAY(G)));
/* Be clean. */
POLYBENCH_FREE_ARRAY(E);
POLYBENCH_FREE_ARRAY(A);
POLYBENCH_FREE_ARRAY(B);
POLYBENCH_FREE_ARRAY(F);
POLYBENCH_FREE_ARRAY(C);
POLYBENCH_FREE_ARRAY(D);
POLYBENCH_FREE_ARRAY(G);
return 0;
}
|
metric.c | /*---------------------------------------------------------------------------------
METRIC.C
-Helper functions for metric tensors
-Compute 4x4 matrix minor, adjoint, determinant and inverse
-Compute connection coefficients
-Raise and lower rank-1 tensors
-Take dot product of a contravariant and covariant rank-1 tensor
---------------------------------------------------------------------------------*/
#include "decs.h"
double MINOR(double m[16], int r0, int r1, int r2, int c0, int c1, int c2);
void adjoint(double m[16], double adjOut[16]);
double determinant(double m[16]);
inline double gcon_func(double gcov[NDIM][NDIM], double gcon[NDIM][NDIM])
{
double gdet = invert(&gcov[0][0],&gcon[0][0]);
return sqrt(fabs(gdet));
}
inline void get_gcov(struct GridGeom *G, int i, int j, int loc, double gcov[NDIM][NDIM]) {
DLOOP2 gcov[mu][nu] = G->gcov[loc][mu][nu][j][i];
}
inline void get_gcon(struct GridGeom *G, int i, int j, int loc, double gcon[NDIM][NDIM])
{
DLOOP2 gcon[mu][nu] = G->gcon[loc][mu][nu][j][i];
}
// Calculate connection coefficient
inline void conn_func(struct GridGeom *G, int i, int j)
{
double tmp[NDIM][NDIM][NDIM];
double X[NDIM], Xh[NDIM], Xl[NDIM];
double gh[NDIM][NDIM];
double gl[NDIM][NDIM];
coord(i, j, CENT, X);
for (int mu = 0; mu < NDIM; mu++) {
for (int kap = 0; kap < NDIM; kap++) {
Xh[kap] = X[kap];
}
for (int kap = 0; kap < NDIM; kap++) {
Xl[kap] = X[kap];
}
Xh[mu] += DELTA;
Xl[mu] -= DELTA;
gcov_func(Xh, gh);
gcov_func(Xl, gl);
for (int lam = 0; lam < NDIM; lam++) {
for (int nu = 0; nu < NDIM; nu++) {
G->conn[lam][nu][mu][j][i] = (gh[lam][nu] - gl[lam][nu])/(Xh[mu] -
Xl[mu]);
}
}
}
// Rearrange to find \Gamma_{lam nu mu}
for (int lam = 0; lam < NDIM; lam++) {
for (int nu = 0; nu < NDIM; nu++) {
for (int mu = 0; mu < NDIM; mu++) {
tmp[lam][nu][mu] = 0.5 * (G->conn[nu][lam][mu][j][i] +
G->conn[mu][lam][nu][j][i] -
G->conn[mu][nu][lam][j][i]);
}
}
}
// now mu nu kap
// Raise index to get \Gamma^lam_{nu mu}
for (int lam = 0; lam < NDIM; lam++) {
for (int nu = 0; nu < NDIM; nu++) {
for (int mu = 0; mu < NDIM; mu++) {
G->conn[lam][nu][mu][j][i] = 0.;
for (int kap = 0; kap < NDIM; kap++)
G->conn[lam][nu][mu][j][i] += G->gcon[CENT][lam][kap][j][i]*
tmp[kap][nu][mu];
}
}
}
}
// Lower a contravariant rank-1 tensor to a covariant one
inline void lower_grid(GridVector vcon, GridVector vcov, struct GridGeom *G, int i,
int j, int loc)
{
for (int mu = 0; mu < NDIM; mu++) {
vcov[mu][j][i] = 0.;
for (int nu = 0; nu < NDIM; nu++) {
vcov[mu][j][i] += G->gcov[loc][mu][nu][j][i]*vcon[nu][j][i];
}
}
}
// Lower the grid of contravariant rank-1 tensors to covariant ones
void lower_grid_vec(GridVector vcon, GridVector vcov, struct GridGeom *G, int jstart, int jstop, int istart, int istop, int loc)
{
#pragma omp parallel for simd collapse(3)
DLOOP1 {
ZSLOOP(jstart, jstop, istart, istop) vcov[mu][j][i] = 0.;
}
#pragma omp parallel for simd collapse(4)
DLOOP2 {
ZSLOOP(jstart, jstop, istart, istop) vcov[mu][j][i] += G->gcov[loc][mu][nu][j][i]*vcon[nu][j][i];
}
}
inline void raise_grid(GridVector vcov, GridVector vcon, struct GridGeom *G, int i, int j, int loc)
{
for (int mu = 0; mu < NDIM; mu++) {
vcon[mu][j][i] = 0.;
for (int nu = 0; nu < NDIM; nu++) {
vcon[mu][j][i] += G->gcon[loc][mu][nu][j][i]*vcov[nu][j][i];
}
}
}
// Take dot product of a contravariant and covariant rank-1 tensor
inline double dot(double vcon[NDIM], double vcov[NDIM])
{
double dot = 0.;
for (int mu = 0; mu < NDIM; mu++) {
dot += vcon[mu]*vcov[mu];
}
return dot;
}
// Minor of a 4x4 matrix
inline double MINOR(double m[16], int r0, int r1, int r2, int c0, int c1, int c2)
{
return m[4*r0+c0]*(m[4*r1+c1]*m[4*r2+c2] - m[4*r2+c1]*m[4*r1+c2]) -
m[4*r0+c1]*(m[4*r1+c0]*m[4*r2+c2] - m[4*r2+c0]*m[4*r1+c2]) +
m[4*r0+c2]*(m[4*r1+c0]*m[4*r2+c1] - m[4*r2+c0]*m[4*r1+c1]);
}
inline void adjoint(double m[16], double adjOut[16])
{
adjOut[ 0] = MINOR(m,1,2,3,1,2,3);
adjOut[ 1] = -MINOR(m,0,2,3,1,2,3);
adjOut[ 2] = MINOR(m,0,1,3,1,2,3);
adjOut[ 3] = -MINOR(m,0,1,2,1,2,3);
adjOut[ 4] = -MINOR(m,1,2,3,0,2,3);
adjOut[ 5] = MINOR(m,0,2,3,0,2,3);
adjOut[ 6] = -MINOR(m,0,1,3,0,2,3);
adjOut[ 7] = MINOR(m,0,1,2,0,2,3);
adjOut[ 8] = MINOR(m,1,2,3,0,1,3);
adjOut[ 9] = -MINOR(m,0,2,3,0,1,3);
adjOut[10] = MINOR(m,0,1,3,0,1,3);
adjOut[11] = -MINOR(m,0,1,2,0,1,3);
adjOut[12] = -MINOR(m,1,2,3,0,1,2);
adjOut[13] = MINOR(m,0,2,3,0,1,2);
adjOut[14] = -MINOR(m,0,1,3,0,1,2);
adjOut[15] = MINOR(m,0,1,2,0,1,2);
}
// Computes determinant of 4x4 tensor
inline double determinant(double m[16])
{
return m[0]*MINOR(m,1,2,3,1,2,3) -
m[1]*MINOR(m,1,2,3,0,2,3) +
m[2]*MINOR(m,1,2,3,0,1,3) -
m[3]*MINOR(m,1,2,3,0,1,2);
}
// Computes inverse of a 4x4 matrix
inline double invert(double *m, double *invOut)
{
adjoint(m, invOut);
double det = determinant(m);
double inv_det = 1. / det;
for (int i = 0; i < 16; ++i) {
invOut[i] = invOut[i]*inv_det;
}
return det;
}
|
threads.c | #include <stdio.h>
#include <omp.h>
int main()
{
#pragma omp target parallel num_threads(1)
printf ("Thread: %d\n", omp_get_thread_num());
return 0;
}
|
main.c | /*BHEADER****************************************************************
* (c) 2007 The Regents of the University of California *
* *
* See the file COPYRIGHT_and_DISCLAIMER for a complete copyright *
* notice and disclaimer. *
* *
*EHEADER****************************************************************/
//--------------
// A micro kernel
//--------------
#include <stdio.h>
#include <stdlib.h>
#include "omp.h"
#include "headers.h"
//
const int testIter = /*50000;*/1000;
double totalWallTime = 0.0;
//
void test_Matvec();
void test_Relax();
void test_Axpy();
//
int main(int argc, char *argv[])
{
double t0 = 0.0,
t1 = 0.0,
del_wtime = 0.0;
int max_num_threads;
printf("\n");
printf("//------------ \n");
printf("// \n");
printf("// CORAL AMGmk Benchmark Version 1.0 \n");
printf("// \n");
printf("//------------ \n");
omp_set_num_threads(1);
#pragma omp parallel
#pragma omp master
max_num_threads = omp_get_num_threads();
printf("\nmax_num_threads = %d \n\n",max_num_threads );
printf("\n testIter = %d \n\n", testIter );
t0 = omp_get_wtime();
// Matvec
totalWallTime = 0.0;
test_Matvec();
printf("\n");
printf("//------------ \n");
printf("// \n");
printf("// MATVEC\n");
printf("// \n");
printf("//------------ \n");
printf("\nWall time = %f seconds. \n", totalWallTime);
// Relax
totalWallTime = 0.0;
test_Relax();
//__WHATIF__BEGIN__
printf("\n");
printf("//------------ \n");
printf("// \n");
printf("// Relax\n");
printf("// \n");
printf("//------------ \n");
printf("\nWall time = %f seconds. \n", totalWallTime);
// Axpy
totalWallTime = 0.0;
test_Axpy();
printf("\n");
printf("//------------ \n");
printf("// \n");
printf("// Axpy\n");
printf("// \n");
printf("//------------ \n");
printf("\nWall time = %f seconds. \n", totalWallTime);
t1 = omp_get_wtime();;
del_wtime = t1 - t0;
printf("\nTotal Wall time = %f seconds. \n", del_wtime);
//__WHATIF__END__
return 0;
}
void test_Matvec()
{
double t0 = 0.0,
t1 = 0.0;
hypre_CSRMatrix *A;
hypre_Vector *x, *y, *sol;
int nx, ny, nz, i;
double *values;
double *y_data, *sol_data;
double error, diff;
nx = 50; /* size per proc nx*ny*nz */
ny = 50;
nz = 50;
values = hypre_CTAlloc(double, 4);
values[0] = 6;
values[1] = -1;
values[2] = -1;
values[3] = -1;
A = GenerateSeqLaplacian(nx, ny, nz, values, &y, &x, &sol);
hypre_SeqVectorSetConstantValues(x,1);
hypre_SeqVectorSetConstantValues(y,0);
t0 = omp_get_wtime();
for (i=0; i<testIter; ++i)
hypre_CSRMatrixMatvec(1,A,x,0,y);
t1 = omp_get_wtime() ;
totalWallTime += t1 - t0;
y_data = hypre_VectorData(y);
sol_data = hypre_VectorData(sol);
error = 0;
for (i=0; i < nx*ny*nz; i++)
{
diff = fabs(y_data[i]-sol_data[i]);
if (diff > error) error = diff;
}
if (error > 0) printf(" \n Matvec: error: %e\n", error);
hypre_TFree(values);
hypre_CSRMatrixDestroy(A);
hypre_SeqVectorDestroy(x);
hypre_SeqVectorDestroy(y);
hypre_SeqVectorDestroy(sol);
}
void test_Relax()
{
double t0 = 0.0,
t1 = 0.0;
hypre_CSRMatrix *A;
hypre_Vector *x, *y, *sol;
int nx, ny, nz, i;
double *values;
double *x_data;
double diff, error;
nx = 50; /* size per proc nx*ny*nz */
ny = 50;
nz = 50;
values = hypre_CTAlloc(double, 4);
values[0] = 6;
values[1] = -1;
values[2] = -1;
values[3] = -1;
A = GenerateSeqLaplacian(nx, ny, nz, values, &y, &x, &sol);
hypre_SeqVectorSetConstantValues(x,1);
t0 = omp_get_wtime();
for (i=0; i<testIter; ++i)
hypre_BoomerAMGSeqRelax(A, sol, x);
t1 = omp_get_wtime();
totalWallTime += t1 - t0;
x_data = hypre_VectorData(x);
error = 0;
for (i=0; i < nx*ny*nz; i++)
{
diff = fabs(x_data[i]-1);
if (diff > error) error = diff;
}
if (error > 0) printf(" \n Relax: error: %e\n", error);
hypre_TFree(values);
hypre_CSRMatrixDestroy(A);
hypre_SeqVectorDestroy(x);
hypre_SeqVectorDestroy(y);
hypre_SeqVectorDestroy(sol);
}
void test_Axpy()
{
double t0 = 0.0,
t1 = 0.0;
hypre_Vector *x, *y;
int nx, i;
double alpha=0.5;
double diff, error;
double *y_data;
nx = 125000; /* size per proc */
x = hypre_SeqVectorCreate(nx);
y = hypre_SeqVectorCreate(nx);
hypre_SeqVectorInitialize(x);
hypre_SeqVectorInitialize(y);
hypre_SeqVectorSetConstantValues(x,1);
hypre_SeqVectorSetConstantValues(y,1);
t0 = omp_get_wtime();
for (i=0; i<testIter; ++i)
hypre_SeqVectorAxpy(alpha,x,y);
t1 = omp_get_wtime();
y_data = hypre_VectorData(y);
error = 0;
for (i=0; i < nx; i++)
{
diff = fabs(y_data[i]-1-0.5*(double)testIter);
if (diff > error) error = diff;
}
if (error > 0) printf(" \n Axpy: error: %e\n", error);
totalWallTime += t1 - t0;
hypre_SeqVectorDestroy(x);
hypre_SeqVectorDestroy(y);
}
|
topotherm.c | /*
* Saturation function over ice and water
*/
#include <stdio.h>
#include <stdlib.h>
#include <math.h>
#include <errno.h>
#include <omp.h>
#include "envphys_c.h"
#include "envphys.h"
// stephman boltzman constant
#define STEF_BOLTZ 5.6697e-8
extern int errno;
void topotherm(
int ngrid, /* number of grid points */
double *ta, /* air temperature */
double *tw, /* dew point temperature */
double *z, /* elevation */
double *skvfac, /* sky view factor */
int nthreads, /* number of threads for parrallel processing */
double *thermal /* thermal radiation (return) */
)
{
int samp;
double ta_p, tw_p, z_p, skvfac_p; // pixel values
double ea; /* vapor pressure */
double emiss; /* atmos. emiss. */
double T0; /* Sea Level ta */
double lw_in; /* lw irradiance */
double press; /* air pressure */
omp_set_dynamic(0); // Explicitly disable dynamic teams
omp_set_num_threads(nthreads); // Use N threads for all consecutive parallel regions
#pragma omp parallel shared(ngrid, ta, tw, z, skvfac) private(samp, ta_p, tw_p, z_p, skvfac_p, ea, emiss, T0, press, lw_in)
{
#pragma omp for
for (samp=0; samp < ngrid; samp++) {
ta_p = ta[samp];
tw_p = tw[samp];
z_p = z[samp];
skvfac_p = skvfac[samp];
/* convert ta and tw from C to K */
ta_p += FREEZE;
tw_p += FREEZE;
if(ta_p < 0 || tw_p < 0){
printf("ta or tw < 0 at pixel %i", samp);
exit(-1);
}
/* calculate theoretical sea level */
/* atmospheric emissivity */
/* from reference level ta, tw, and z */
if(tw_p > ta_p) {
tw_p = ta_p;
}
ea = sati(tw_p);
emiss = brutsaert(ta_p,
STD_LAPSE_M, ea,
z_p, SEA_LEVEL);
/* calculate sea level air temp */
T0 = ta_p - (z_p * STD_LAPSE_M);
/* adjust emiss for elev, terrain */
/* veg, and cloud shading */
press = HYSTAT(SEA_LEVEL, T0,
STD_LAPSE, (z_p/1000.),
GRAVITY, MOL_AIR);
/* elevation correction */
emiss *= press/SEA_LEVEL;
/* terrain factor correction */
emiss = (emiss * skvfac_p) + (1.0 - skvfac_p);
/* check for emissivity > 1.0 */
if (emiss > 1.0)
emiss = 1.0;
/* calculate incoming lw rad */
lw_in = emiss * STEF_BOLTZ *ta_p*ta_p*ta_p*ta_p;
/* set output band */
thermal[samp] = lw_in;
}
}
}
/*
* Saturation vapor pressure over water
*/
double
satw(
double tk) /* air temperature (K) */
{
double x;
double l10;
if (tk <= 0.) {
printf("tk < 0 satw");
exit(-1);
}
errno = 0;
l10 = log(1.e1);
x = -7.90298*(BOIL/tk-1.) + 5.02808*log(BOIL/tk)/l10 -
1.3816e-7*(pow(1.e1,1.1344e1*(1.-tk/BOIL))-1.) +
8.1328e-3*(pow(1.e1,-3.49149*(BOIL/tk-1.))-1.) +
log(SEA_LEVEL)/l10;
x = pow(1.e1,x);
if (errno) {
perror("satw: bad return from log or pow");
}
return(x);
}
/*
* Saturation vapor pressure over ice
*/
double
sati(
double tk) /* air temperature (K) */
{
double l10;
double x;
if (tk <= 0.) {
printf("tk < 0 satw");
exit(-1);
}
if (tk > FREEZE) {
x = satw(tk);
return(x);
}
errno = 0;
l10 = log(1.e1);
x = pow(1.e1,-9.09718*((FREEZE/tk)-1.) - 3.56654*log(FREEZE/tk)/l10 +
8.76793e-1*(1.-(tk/FREEZE)) + log(6.1071)/l10);
if (errno) {
perror("sati: bad return from log or pow");
}
return(x*1.e2);
}
/*
* calculates atmospheric emissivity using a modified form of the equations by W. Brutsaert
*/
double
brutsaert(
double ta, /* air temp (K) */
double lmba, /* temperature lapse rate (deg/m) */
double ea, /* vapor pressure (Pa) */
double z, /* elevation (z) */
double pa) /* air pressure (Pa) */
{
double t_prime;
double rh;
double e_prime;
double air_emiss;
t_prime = ta - (lmba * z);
rh = ea / sati(ta);
if (rh > 1.0) {
rh = 1.0;
}
e_prime = (rh * sati(t_prime))/100.0;
/* e_prime = rh * sati(t_prime); */
air_emiss = (1.24*pow((e_prime/t_prime), 1./7.))*pa/SEA_LEVEL;
/* "if" statement below is new */
if (air_emiss > 1.0) {
air_emiss = 1.0;
}
return(air_emiss);
}
|
lu2lib.c | //
// lu2lib.c
//
// J. Makino
// Time-stamp: <11/06/20 17:50:32 makino>
#include <stdio.h>
#include <stdlib.h>
#include <math.h>
#include <timerlib.h>
#ifndef NOBLAS
#ifdef MKL
#include <mkl_cblas.h>
#else
#include <cblas.h>
#endif
#endif
#ifdef USEGDR
#include "gdrdgemm.h"
#endif
#define FTYPE double
#include <emmintrin.h>
typedef double v2df __attribute__((vector_size(16)));
typedef union {v2df v; double s[2];}v2u;
#ifndef USEGDR
void gdrsetboardid(int boardid)
{}
#endif
void matmul2_host(int n,
FTYPE a[n][n],
FTYPE b[n][n],
FTYPE c[n][n])
{
int i, j, k;
for(i=0;i<n;i++){
for(j=0;j<n;j++){
c[i][j]=0.0e0;
for(k=0;k<n;k++) c[i][j]+= a[i][k]*b[k][j];
}
}
}
// simplest version
void matmul_for_small_nk_0(int n1, double a[][n1],
int n2, double b[][n2],
int n3, double c[][n3],
int m,
int kk,
int n)
{
// simplest version
int i,j,k;
for(j=0;j<n;j++)
for(i=0;i<m;i++)
for(k=0;k<kk;k++)
c[i][j] -= a[i][k]*b[k][j];
}
// make copy of B
void matmul_for_small_nk_1(int n1, double a[][n1],
int n2, double b[][n2],
int n3, double c[][n3],
int m,
int kk,
int n)
{
int i,j,k;
double bcopy[n][kk];
for(k=0;k<kk;k++)
for(j=0;j<n;j++)
bcopy[j][k] = b[k][j];
for(i=0;i<m;i++){
for(j=0;j<n;j++){
register double tmp=0.0;
for(k=0;k<kk;k++){
tmp += a[i][k]*bcopy[j][k];
}
c[i][j] -= tmp;
}
}
}
// hand-unroll innermost loop
void matmul_for_small_nk_2(int n1, double a[][n1],
int n2, double b[][n2],
int n3, double c[][n3],
int m,
int kk,
int n)
{
int i,j,k;
double bcopy[n][kk];
for(j=0;j<n;j++)
for(k=0;k<kk;k++)
bcopy[j][k] = b[k][j];
for(i=0;i<m;i++){
double *ap=a[i];
for(j=0;j<n;j++){
double *bp = bcopy[j];
double tmp=0.0;
for(k=0;k<kk;k+=8)
tmp += ap[k]*bp[k]
+ ap[k+1]*bp[k+1]
+ ap[k+2]*bp[k+2]
+ ap[k+3]*bp[k+3]
+ ap[k+4]*bp[k+4]
+ ap[k+5]*bp[k+5]
+ ap[k+6]*bp[k+6]
+ ap[k+7]*bp[k+7];
c[i][j]-=tmp;
}
}
}
// hand-unroll mid-loop
void matmul_for_small_nk_3(int n1, double a[][n1],
int n2, double b[][n2],
int n3, double c[][n3],
int m,
int kk,
int n)
{
int i,j,k;
double bcopy[n][kk];
for(j=0;j<n;j++)
for(k=0;k<kk;k++)
bcopy[j][k] = b[k][j];
for(i=0;i<m;i++){
double *ap=a[i];
for(j=0;j<n;j+=4){
double *bp = bcopy[j];
double *bpp = bcopy[j+1];
double *bp2 = bcopy[j+2];
double *bp3 = bcopy[j+3];
double tmp=0.0;
double tmp1=0.0;
double tmp2=0.0;
double tmp3=0.0;
for(k=0;k<kk;k+=8){
tmp += ap[k]*bp[k]
+ ap[k+1]*bp[k+1]
+ ap[k+2]*bp[k+2]
+ ap[k+3]*bp[k+3]
+ ap[k+4]*bp[k+4]
+ ap[k+5]*bp[k+5]
+ ap[k+6]*bp[k+6]
+ ap[k+7]*bp[k+7];
tmp1 += ap[k]*bpp[k]
+ ap[k+1]*bpp[k+1]
+ ap[k+2]*bpp[k+2]
+ ap[k+3]*bpp[k+3]
+ ap[k+4]*bpp[k+4]
+ ap[k+5]*bpp[k+5]
+ ap[k+6]*bpp[k+6]
+ ap[k+7]*bpp[k+7];
tmp2 += ap[k]*bp2[k]
+ ap[k+1]*bp2[k+1]
+ ap[k+2]*bp2[k+2]
+ ap[k+3]*bp2[k+3]
+ ap[k+4]*bp2[k+4]
+ ap[k+5]*bp2[k+5]
+ ap[k+6]*bp2[k+6]
+ ap[k+7]*bp2[k+7];
tmp3 += ap[k]*bp3[k]
+ ap[k+1]*bp3[k+1]
+ ap[k+2]*bp3[k+2]
+ ap[k+3]*bp3[k+3]
+ ap[k+4]*bp3[k+4]
+ ap[k+5]*bp3[k+5]
+ ap[k+6]*bp3[k+6]
+ ap[k+7]*bp3[k+7];
}
c[i][j]-=tmp;
c[i][j+1]-=tmp1;
c[i][j+2]-=tmp2;
c[i][j+3]-=tmp3;
}
}
}
// hand-unroll mid-loop by 2
void matmul_for_small_nk_4(int n1, double a[][n1],
int n2, double b[][n2],
int n3, double c[][n3],
int m,
int kk,
int n)
{
int i,j,k;
double bcopy[n][kk];
for(j=0;j<n;j++)
for(k=0;k<kk;k++)
bcopy[j][k] = b[k][j];
for(i=0;i<m;i++){
double *ap=a[i];
for(j=0;j<n;j+=2){
double *bp = bcopy[j];
double *bpp = bcopy[j+1];
double tmp=0.0;
double tmp1=0.0;
for(k=0;k<kk;k+=8){
tmp += ap[k]*bp[k]
+ ap[k+1]*bp[k+1]
+ ap[k+2]*bp[k+2]
+ ap[k+3]*bp[k+3]
+ ap[k+4]*bp[k+4]
+ ap[k+5]*bp[k+5]
+ ap[k+6]*bp[k+6]
+ ap[k+7]*bp[k+7];
tmp1 += ap[k]*bpp[k]
+ ap[k+1]*bpp[k+1]
+ ap[k+2]*bpp[k+2]
+ ap[k+3]*bpp[k+3]
+ ap[k+4]*bpp[k+4]
+ ap[k+5]*bpp[k+5]
+ ap[k+6]*bpp[k+6]
+ ap[k+7]*bpp[k+7];
}
c[i][j]-=tmp;
c[i][j+1]-=tmp1;
}
}
}
// use sse2 for dot product
void matmul_for_small_nk_5(int n1, double a[][n1],
int n2, double b[][n2],
int n3, double c[][n3],
int m,
int kk,
int n)
{
int i,j,k;
int nh = n/2;
double bcopy[n][kk];
v2df bcopy2[nh][kk];
v2df acopy[kk];
double acopyd[kk];
for(j=0;j<nh;j++)
for(k=0;k<kk;k++)
bcopy2[j][k] = *((v2df*)(b[k]+j+j));
for(i=0;i<m;i++){
double *ap=a[i];
double *acp = (double*) acopy;
register v2df tmp= (v2df){0.0,0.0};
v2df * cp = (v2df*) (&(c[i][0]));
for(k=0;k<kk;k+=4){
__builtin_prefetch((double*)a[i+4]+k,0);
}
for(j=0;j<n;j+=4){
__builtin_prefetch(c[i+4]+j,0);
}
for(k=0;k<kk;k+=2){
// v2df aa = *((v2df*)(ap+k));
// acopy[k]=__builtin_ia32_shufpd(aa,aa,0x0);
// acopy[k+1]= __builtin_ia32_shufpd(aa,aa,0x5);
acp[k*2]=acp[k*2+1]=ap[k];
acp[k*2+2]=acp[k*2+3]=ap[k+1];
}
for(j=0;j<nh;j++){
tmp = (v2df){0.0,0.0};
v2df * bp = bcopy2[j];
for(k=0;k<kk;k+=2){
tmp += acopy[k]*bp[k]
+acopy[k+1]*bp[k+1]
#if 0
+acopy[k+2]*bp[k+2]
+acopy[k+3]*bp[k+3]
+acopy[k+4]*bp[k+4]
+acopy[k+5]*bp[k+5]
+acopy[k+6]*bp[k+6]
+acopy[k+7]*bp[k+7]
#endif
;
}
cp[j] -= tmp;
}
}
}
// use sse2 for dot product
void matmul_for_small_nk_6(int n1, double a[][n1],
int n2, double b[][n2],
int n3, double c[][n3],
int m,
int kk,
int n)
{
int i,j,k;
int nh = n/2;
double bcopy[n][kk];
v2df bcopy2[nh][kk];
v2df acopy[kk];
v2df acopy2[kk];
v2df acopy3[kk];
v2df acopy4[kk];
double *acp = (double*) acopy;
double *acp2 = (double*) acopy2;
double *acp3 = (double*) acopy3;
double *acp4 = (double*) acopy4;
for(j=0;j<nh;j++)
for(k=0;k<kk;k++)
bcopy2[j][k] = *((v2df*)(b[k]+j+j));
for(i=0;i<m;i+=4){
double *ap=a[i];
double *ap2=a[i+1];
double *ap3=a[i+2];
double *ap4=a[i+3];
register v2df tmp, tmp2, tmp3, tmp4;
v2df * cp = (v2df*) (&(c[i][0]));
v2df * cp2 = (v2df*) (&(c[i+1][0]));
v2df * cp3 = (v2df*) (&(c[i+2][0]));
v2df * cp4 = (v2df*) (&(c[i+3][0]));
for(k=0;k<kk;k+=4){
__builtin_prefetch((double*)a[i+4]+k,0);
__builtin_prefetch((double*)a[i+5]+k,0);
__builtin_prefetch((double*)a[i+6]+k,0);
__builtin_prefetch((double*)a[i+7]+k,0);
__builtin_prefetch(c[i+4]+j,0);
__builtin_prefetch(c[i+5]+j,0);
__builtin_prefetch(c[i+6]+j,0);
__builtin_prefetch(c[i+7]+j,0);
}
for(k=0;k<kk;k+=2){
v2df * aa = (v2df*)(ap+k);
acopy [k]= (v2df) __builtin_ia32_shufpd(*aa,*aa,0x0);
acopy[k+1]=(v2df) __builtin_ia32_shufpd(*aa,*aa,0xff);
aa = (v2df*)(ap2+k);
acopy2 [k]= (v2df) __builtin_ia32_shufpd(*aa,*aa,0x0);
acopy2[k+1]=(v2df) __builtin_ia32_shufpd(*aa,*aa,0xff);
// acp[k*2]=acp[k*2+1]=ap[k];
// acp[k*2+2]=acp[k*2+3]=ap[k+1];
// acp2[k*2]=acp2[k*2+1]=ap2[k];
// acp2[k*2+2]=acp2[k*2+3]=ap2[k+1];
}
for(k=0;k<kk;k+=2){
acp3[k*2]=acp3[k*2+1]=ap3[k];
acp3[k*2+2]=acp3[k*2+3]=ap3[k+1];
acp4[k*2]=acp4[k*2+1]=ap4[k];
acp4[k*2+2]=acp4[k*2+3]=ap4[k+1];
}
for(j=0;j<nh;j++){
tmp = tmp2= tmp3= tmp4= (v2df){0.0,0.0};
v2df * bp = bcopy2[j];
#if 0
for(k=0;k<kk;k+=4){
tmp += acopy[k]*bp[k]
+acopy[k+1]*bp[k+1]
+acopy[k+2]*bp[k+2]
+acopy[k+3]*bp[k+3];
tmp2 += acopy2[k]*bp[k]
+acopy2[k+1]*bp[k+1]
+acopy2[k+2]*bp[k+2]
+acopy2[k+3]*bp[k+3];
tmp3 += acopy3[k]*bp[k]
+acopy3[k+1]*bp[k+1]
+acopy3[k+2]*bp[k+2]
+acopy3[k+3]*bp[k+3];
tmp4 += acopy4[k]*bp[k]
+acopy4[k+1]*bp[k+1]
+acopy4[k+2]*bp[k+2]
+acopy4[k+3]*bp[k+3];
}
#endif
for(k=0;k<kk;k+=2){
tmp += acopy[k]*bp[k]
+acopy[k+1]*bp[k+1];
tmp2 += acopy2[k]*bp[k]
+acopy2[k+1]*bp[k+1];
tmp3 += acopy3[k]*bp[k]
+acopy3[k+1]*bp[k+1];
tmp4 += acopy4[k]*bp[k]
+acopy4[k+1]*bp[k+1];
}
cp[j] -= tmp;
cp2[j] -= tmp2;
cp3[j] -= tmp3;
cp4[j] -= tmp4;
}
}
}
void matmul_for_small_nk7(int n1, double a[][n1],
int n2, double b[][n2],
int n3, double c[][n3],
int m,
int kk,
int n)
{
int i,j;
int nh = n/2;
register int k;
double bcopy[n][kk];
v2df bcopy2[nh][kk];
v2df acopy[kk];
v2df acopy2[kk];
double *acp = (double*) acopy;
double *acp2 = (double*) acopy2;
unsigned long bpcount, apcount, dotcount;
bpcount= apcount= dotcount=0;
// BEGIN_TSC;
for(k=0;k<kk;k++)
for(j=0;j<nh;j++)
bcopy2[j][k] = *((v2df*)(b[k]+j+j));
// END_TSC(bpcount);
for(i=0;i<m;i+=2){
// BEGIN_TSC;
double *ap=a[i];
double *ap2=a[i+1];
register v2df tmp, tmp2;
v2df * cp = (v2df*) (&(c[i][0]));
v2df * cp2 = (v2df*) (&(c[i+1][0]));
for(k=0;k<kk;k+=2){
v2df * aa = (v2df*)(ap+k);
__builtin_prefetch((double*)a[i+4]+k,0);
__builtin_prefetch((double*)a[i+5]+k,0);
acopy [k]= (v2df) __builtin_ia32_shufpd(*aa,*aa,0x0);
acopy[k+1]=(v2df) __builtin_ia32_shufpd(*aa,*aa,0xff);
aa = (v2df*)(ap2+k);
acopy2 [k]= (v2df) __builtin_ia32_shufpd(*aa,*aa,0x0);
acopy2[k+1]=(v2df) __builtin_ia32_shufpd(*aa,*aa,0xff);
// acp[k*2]=acp[k*2+1]=ap[k];
// acp[k*2+2]=acp[k*2+3]=ap[k+1];
// acp2[k*2]=acp2[k*2+1]=ap2[k];
// acp2[k*2+2]=acp2[k*2+3]=ap2[k+1];
}
// END_TSC(apcount);
// BEGIN_TSC;
for(j=0;j<nh;j++){
tmp = tmp2= (v2df){0.0,0.0};
v2df ctmp= cp[j];
v2df ctmp2 = cp2[j] ;
v2df * bp = bcopy2[j];
__builtin_prefetch(c[i+4]+j,0);
__builtin_prefetch(c[i+5]+j,0);
for(k=0;k<kk;k+=8){
int k2 = k+4;
v2df *avp = acopy+k;
v2df *avp2 = acopy2+k;
v2df *bvp = bp+k;
tmp += avp[0]*bvp[0];
tmp2 += avp2[0]*bvp[0];
tmp +=avp[1]*bvp[1];
tmp2+=avp2[1]*bvp[1];
tmp +=avp[2]*bvp[2];
tmp2+=avp2[2]*bvp[2];
tmp +=avp[3]*bvp[3];
tmp2+=avp2[3]*bvp[3];
tmp += avp[4]*bvp[4];
tmp2 += avp2[4]*bvp[4];
tmp +=avp[5]*bvp[5];
tmp2+=avp2[5]*bvp[5];
tmp +=avp[6]*bvp[6];
tmp2+=avp2[6]*bvp[6];
tmp +=avp[7]*bvp[7];
tmp2+=avp2[7]*bvp[7];
}
#if 0
for(k=0;k<kk;k+=8){
int k2 = k+4;
tmp += acopy[k]*bp[k];
tmp2 += acopy2[k]*bp[k];
tmp +=acopy[k+1]*bp[k+1];
tmp2+=acopy2[k+1]*bp[k+1];
tmp +=acopy[k+2]*bp[k+2];
tmp2+=acopy2[k+2]*bp[k+2];
tmp +=acopy[k+3]*bp[k+3];
tmp2+=acopy2[k+3]*bp[k+3];
tmp += acopy[k2]*bp[k2];
tmp2 += acopy2[k2]*bp[k2];
tmp +=acopy[k2+1]*bp[k2+1];
tmp2+=acopy2[k2+1]*bp[k2+1];
tmp +=acopy[k2+2]*bp[k2+2];
tmp2+=acopy2[k2+2]*bp[k2+2];
tmp +=acopy[k2+3]*bp[k2+3];
tmp2+=acopy2[k2+3]*bp[k2+3];
}
#endif
#if 0
for(k=0;k<kk;k+=2){
tmp += acopy[k]*bp[k];
tmp __builtin_prefetch(c[i+4+(j&1)]+j,0);
+=acopy[k+1]*bp[k+1];
tmp2 += acopy2[k]*bp[k];
tmp2+=acopy2[k+1]*bp[k+1];
}
#endif
cp[j] = ctmp -tmp;
cp2[j] = ctmp2 -tmp2;
}
// END_TSC(dotcount);
}
// printf("m, kk, n = %d %d %d counts = %g %g %g\n", m,kk,n,
// (double)bpcount, (double)apcount, (double)dotcount);
}
// XMM registers
#define X0 "%xmm0"
#define X1 "%xmm1"
#define X2 "%xmm2"
#define X3 "%xmm3"
#define X4 "%xmm4"
#define X5 "%xmm5"
#define X6 "%xmm6"
#define X7 "%xmm7"
#define X8 "%xmm8"
#define X9 "%xmm9"
#define X10 "%xmm10"
#define X11 "%xmm11"
#define X12 "%xmm12"
#define X13 "%xmm13"
#define X14 "%xmm14"
#define X15 "%xmm15"
#define LOADPD(mem, reg) asm("movapd %0, %"reg::"m"(mem));
#define STORPD(reg, mem) asm("movapd %"reg " , %0"::"m"(mem));
#define MOVNTPD(reg, mem) asm("movntpd %"reg " , %0"::"m"(mem));
#define MOVAPD(src, dst) asm("movapd " src "," dst);
#define MOVQ(src, dst) asm("movq " src "," dst);
#define BCAST0(reg) asm("shufpd $0x00, " reg "," reg);
#define BCAST1(reg) asm("shufpd $0xff, " reg "," reg);
#define MULPD(src, dst) asm("mulpd " src "," dst);
#define ADDPD(src, dst) asm("addpd " src "," dst);
#define SUBPD(src, dst) asm("subpd " src "," dst);
void matmul_for_nk8_0(int n1, double a[][n1],
int n2, double b[][n2],
int n3, double c[][n3],
int m,
int n)
{
int i,j;
int kk = 8;
int nh = n/2;
register int k;
double bcopy[n][kk];
v2df bcopy2[nh][kk];
#define PREFETCHL 32
for(i=0;i<PREFETCHL;i++){
__builtin_prefetch((double*)a[i],0,0);
__builtin_prefetch(c[i+8],1,0);
}
for(k=0;k<kk;k++)
for(j=0;j<nh;j++)
bcopy2[j][k] = *((v2df*)(b[k]+j+j));
for(i=0;i<m;i++){
// BEGIN_TSC;
// v2df acopy[8];
v2df *ap = (v2df*) a[i];
v2df * cp = (v2df*) (&(c[i][0]));
__builtin_prefetch((double*)a[i+PREFETCHL],0,0);
int k;
for(j=0;j<nh;j+=4){
__builtin_prefetch(c[i+PREFETCHL]+j,1,3);
v2df * bvp0 = bcopy2[j];
v2df * bvp1 = bcopy2[j+1];
v2df * bvp2 = bcopy2[j+2];
v2df * bvp3 = bcopy2[j+3];
LOADPD(cp[j],X12);
LOADPD(cp[j+1],X13);
LOADPD(cp[j+2],X14);
LOADPD(cp[j+3],X15);
LOADPD(ap[0],X0);
LOADPD(bvp0[0],X4);
LOADPD(bvp1[0],X5);
LOADPD(bvp2[0],X6);
LOADPD(bvp3[0],X7);
LOADPD(bvp0[1],X8);
LOADPD(bvp1[1],X9);
LOADPD(bvp2[1],X10);
LOADPD(bvp3[1],X11);
MOVAPD(X0,X1);
BCAST0(X0);
BCAST1(X1);
MULPD(X0,X4);
MULPD(X0,X5);
MULPD(X0,X6);
MULPD(X0,X7);
MULPD(X1,X8);
MULPD(X1,X9);
MULPD(X1,X10);
MULPD(X1,X11);
SUBPD(X4,X12);
SUBPD(X5,X13);
SUBPD(X6,X14);
SUBPD(X7,X15);
SUBPD(X8,X12);
SUBPD(X9,X13);
SUBPD(X10,X14);
SUBPD(X11,X15);
LOADPD(ap[1],X0);
LOADPD(bvp0[2],X4);
LOADPD(bvp1[2],X5);
LOADPD(bvp2[2],X6);
LOADPD(bvp3[2],X7);
LOADPD(bvp0[3],X8);
LOADPD(bvp1[3],X9);
LOADPD(bvp2[3],X10);
LOADPD(bvp3[3],X11);
MOVAPD(X0,X1);
BCAST0(X0);
BCAST1(X1);
MULPD(X0,X4);
MULPD(X0,X5);
MULPD(X0,X6);
MULPD(X0,X7);
MULPD(X1,X8);
MULPD(X1,X9);
MULPD(X1,X10);
MULPD(X1,X11);
SUBPD(X4,X12);
SUBPD(X5,X13);
SUBPD(X6,X14);
SUBPD(X7,X15);
SUBPD(X8,X12);
SUBPD(X9,X13);
SUBPD(X10,X14);
SUBPD(X11,X15);
LOADPD(ap[2],X0);
LOADPD(bvp0[4],X4);
LOADPD(bvp1[4],X5);
LOADPD(bvp2[4],X6);
LOADPD(bvp3[4],X7);
LOADPD(bvp0[5],X8);
LOADPD(bvp1[5],X9);
LOADPD(bvp2[5],X10);
LOADPD(bvp3[5],X11);
MOVAPD(X0,X1);
BCAST0(X0);
BCAST1(X1);
MULPD(X0,X4);
MULPD(X0,X5);
MULPD(X0,X6);
MULPD(X0,X7);
MULPD(X1,X8);
MULPD(X1,X9);
MULPD(X1,X10);
MULPD(X1,X11);
SUBPD(X4,X12);
SUBPD(X5,X13);
SUBPD(X6,X14);
SUBPD(X7,X15);
SUBPD(X8,X12);
SUBPD(X9,X13);
SUBPD(X10,X14);
SUBPD(X11,X15);
LOADPD(ap[3],X0);
LOADPD(bvp0[6],X4);
LOADPD(bvp1[6],X5);
LOADPD(bvp2[6],X6);
LOADPD(bvp3[6],X7);
LOADPD(bvp0[7],X8);
LOADPD(bvp1[7],X9);
LOADPD(bvp2[7],X10);
LOADPD(bvp3[7],X11);
MOVAPD(X0,X1);
BCAST0(X0);
BCAST1(X1);
MULPD(X0,X4);
MULPD(X0,X5);
MULPD(X0,X6);
MULPD(X0,X7);
MULPD(X1,X8);
MULPD(X1,X9);
MULPD(X1,X10);
MULPD(X1,X11);
SUBPD(X4,X12);
SUBPD(X5,X13);
SUBPD(X6,X14);
SUBPD(X7,X15);
SUBPD(X8,X12);
SUBPD(X9,X13);
SUBPD(X10,X14);
SUBPD(X11,X15);
STORPD(X12,cp[j+0]);
STORPD(X13,cp[j+1]);
STORPD(X14,cp[j+2]);
STORPD(X15,cp[j+3]);
}
}
}
void matmul_for_nk16_0a(int n1, double a[][n1],
int n2, double b[][n2],
int n3, double c[][n3],
int m,
int n)
{
int i,j;
int kk = 16;
int nh = n/2;
register int k;
v2df bcopy2[nh][kk];
#ifdef PREFETCHL
#undef PREFETCHL
#endif
#define PREFETCHL 32
for(i=0;i<PREFETCHL;i++){
__builtin_prefetch((double*)a[i],0,0);
__builtin_prefetch((double*)a[i]+8,0,0);
__builtin_prefetch(c[i+8],1,0);
__builtin_prefetch(c[i+8]+8,1,0);
}
for(k=0;k<kk;k++)
for(j=0;j<nh;j++)
bcopy2[j][k] = *((v2df*)(b[k]+j+j));
for(i=0;i<m;i+=2){
// BEGIN_TSC;
v2df *ap = (v2df*) a[i];
v2df * cp = (v2df*) (&(c[i][0]));
v2df *app = (v2df*) a[i+1];
v2df * cpp = (v2df*) (&(c[i+1][0]));
__builtin_prefetch((double*)a[i+PREFETCHL],0,0);
__builtin_prefetch((double*)a[i+PREFETCHL]+8,0,0);
__builtin_prefetch((double*)a[i+PREFETCHL+1],0,0);
__builtin_prefetch((double*)a[i+PREFETCHL+1]+8,0,0);
int k;
for(j=0;j<nh;j+=2){
__builtin_prefetch(c[i+PREFETCHL]+j,1,0);
v2df * bvp0 = bcopy2[j];
v2df * bvp1 = bcopy2[j+1];
LOADPD(cp[j],X12);
LOADPD(cp[j+1],X13);
LOADPD(cpp[j],X14);
LOADPD(cpp[j+1],X15);
for(k=0;k<8;k++){
LOADPD(ap[k],X0);
LOADPD(app[k],X2);
MOVAPD(X0,X1);
BCAST0(X0);
BCAST1(X1);
MOVAPD(X2,X3);
BCAST0(X2);
BCAST1(X3);
LOADPD(bvp0[k*2],X4);
MOVAPD(X4,X6);
MULPD(X0,X4);
SUBPD(X4,X12);
LOADPD(bvp1[k*2],X5);
MOVAPD(X5,X7);
MULPD(X0,X5);
SUBPD(X5,X13);
LOADPD(bvp0[k*2+1],X8);
MOVAPD(X8,X10);
MULPD(X1,X8);
SUBPD(X8,X12);
LOADPD(bvp1[k*2+1],X9);
MOVAPD(X9,X11);
MULPD(X1,X9);
SUBPD(X9,X13);
MULPD(X2,X6);
SUBPD(X6,X14);
MULPD(X2,X7);
SUBPD(X7,X15);
MULPD(X3,X10);
SUBPD(X10,X14);
MULPD(X3,X11);
SUBPD(X11,X15);
}
STORPD(X12,cp[j+0]);
STORPD(X13,cp[j+1]);
STORPD(X14,cpp[j+0]);
STORPD(X15,cpp[j+1]);
}
}
}
void matmul_for_nk16_0c(int n1, double a[][n1],
int n2, double b[][n2],
int n3, double c[][n3],
int m,
int n)
{
int i,j;
int kk = 16;
int nh = n/2;
register int k;
v2df bcopy2[nh][kk];
#ifdef PREFETCHL
#undef PREFETCHL
#endif
#define PREFETCHL 16
for(i=0;i<PREFETCHL;i++){
__builtin_prefetch((double*)a[i],0,0);
__builtin_prefetch((double*)a[i]+8,0,0);
__builtin_prefetch(c[i+8],1,0);
__builtin_prefetch(c[i+8]+8,1,0);
}
for(k=0;k<kk;k++)
for(j=0;j<nh;j++)
bcopy2[j][k] = *((v2df*)(b[k]+j+j));
for(i=0;i<m;i++){
// BEGIN_TSC;
v2df *ap = (v2df*) a[i];
v2df * cp = (v2df*) (&(c[i][0]));
__builtin_prefetch((double*)a[i+PREFETCHL],0,0);
__builtin_prefetch((double*)a[i+PREFETCHL]+8,0,0);
int k;
for(j=0;j<nh;j+=4){
__builtin_prefetch(c[i+PREFETCHL]+j,1,0);
v2df * bvp0 = bcopy2[j];
v2df * bvp1 = bcopy2[j+1];
v2df * bvp2 = bcopy2[j+2];
v2df * bvp3 = bcopy2[j+3];
LOADPD(cp[j],X12);
LOADPD(cp[j+1],X13);
LOADPD(cp[j+2],X14);
LOADPD(cp[j+3],X15);
for(k=0;k<8;k++){
LOADPD(ap[k],X0);
LOADPD(bvp0[k*2],X4);
LOADPD(bvp1[k*2],X5);
LOADPD(bvp2[k*2],X6);
LOADPD(bvp3[k*2],X7);
MOVAPD(X0,X1);
BCAST0(X0);
BCAST1(X1);
LOADPD(bvp0[k*2+1],X8);
LOADPD(bvp1[k*2+1],X9);
LOADPD(bvp2[k*2+1],X10);
LOADPD(bvp3[k*2+1],X11);
MULPD(X0,X4);
MULPD(X0,X5);
MULPD(X0,X6);
MULPD(X0,X7);
MULPD(X1,X8);
MULPD(X1,X9);
MULPD(X1,X10);
MULPD(X1,X11);
SUBPD(X4,X12);
SUBPD(X5,X13);
SUBPD(X6,X14);
SUBPD(X7,X15);
SUBPD(X8,X12);
SUBPD(X9,X13);
SUBPD(X10,X14);
SUBPD(X11,X15);
}
STORPD(X12,cp[j+0]);
STORPD(X13,cp[j+1]);
STORPD(X14,cp[j+2]);
STORPD(X15,cp[j+3]);
}
}
}
void matmul_for_nk32_0(int n1, double a[][n1],
int n2, double b[][n2],
int n3, double c[][n3],
int m,
int n)
{
int i,j;
int kk = 32;
int nh = n/2;
register int k;
v2df bcopy2[nh][kk];
#ifdef PREFETCHL
#undef PREFETCHL
#endif
#define PREFETCHL 8
for(i=0;i<PREFETCHL;i++){
__builtin_prefetch((double*)a[i],0,0);
__builtin_prefetch((double*)a[i]+8,0,0);
__builtin_prefetch((double*)a[i]+16,0,0);
__builtin_prefetch((double*)a[i]+24,0,0);
__builtin_prefetch(c[i+8],1,0);
__builtin_prefetch(c[i+8]+8,1,0);
__builtin_prefetch(c[i+8]+16,1,0);
__builtin_prefetch(c[i+8]+24,1,0);
}
for(k=0;k<kk;k++)
for(j=0;j<nh;j++)
bcopy2[j][k] = *((v2df*)(b[k]+j+j));
for(i=0;i<m;i++){
// BEGIN_TSC;
v2df *ap = (v2df*) a[i];
v2df * cp = (v2df*) (&(c[i][0]));
__builtin_prefetch((double*)a[i+PREFETCHL],0,0);
__builtin_prefetch((double*)a[i+PREFETCHL]+8,0,0);
__builtin_prefetch((double*)a[i+PREFETCHL]+16,0,0);
__builtin_prefetch((double*)a[i+PREFETCHL]+24,0,0);
int k;
for(j=0;j<nh;j+=4){
__builtin_prefetch(c[i+PREFETCHL]+j,1,0);
v2df * bvp0 = bcopy2[j];
v2df * bvp1 = bcopy2[j+1];
v2df * bvp2 = bcopy2[j+2];
v2df * bvp3 = bcopy2[j+3];
LOADPD(cp[j],X12);
LOADPD(cp[j+1],X13);
LOADPD(cp[j+2],X14);
LOADPD(cp[j+3],X15);
for(k=0;k<16;k++){
LOADPD(ap[k],X0);
LOADPD(bvp0[k*2],X4);
LOADPD(bvp1[k*2],X5);
LOADPD(bvp2[k*2],X6);
LOADPD(bvp3[k*2],X7);
MOVAPD(X0,X1);
BCAST0(X0);
BCAST1(X1);
MULPD(X0,X4);
MULPD(X0,X5);
MULPD(X0,X6);
MULPD(X0,X7);
LOADPD(bvp0[k*2+1],X8);
LOADPD(bvp1[k*2+1],X9);
LOADPD(bvp2[k*2+1],X10);
LOADPD(bvp3[k*2+1],X11);
MULPD(X1,X8);
MULPD(X1,X9);
MULPD(X1,X10);
MULPD(X1,X11);
SUBPD(X4,X12);
SUBPD(X5,X13);
SUBPD(X6,X14);
SUBPD(X7,X15);
SUBPD(X8,X12);
SUBPD(X9,X13);
SUBPD(X10,X14);
SUBPD(X11,X15);
}
STORPD(X12,cp[j+0]);
STORPD(X13,cp[j+1]);
STORPD(X14,cp[j+2]);
STORPD(X15,cp[j+3]);
}
}
}
void matmul_for_nk16_0b(int n1, double a[][n1],
int n2, double b[][n2],
int n3, double c[][n3],
int m,
int n)
{
int i,j;
int kk = 16;
int nh = n/2;
register int k;
v2df bcopy2[nh][kk];
#ifdef PREFETCHL
#undef PREFETCHL
#endif
#define PREFETCHL 16
for(i=0;i<PREFETCHL;i++){
__builtin_prefetch((double*)a[i],0,0);
__builtin_prefetch((double*)a[i]+8,0,0);
__builtin_prefetch(c[i+8],1,0);
__builtin_prefetch(c[i+8]+8,1,0);
}
for(k=0;k<kk;k++)
for(j=0;j<nh;j++)
bcopy2[j][k] = *((v2df*)(b[k]+j+j));
for(i=0;i<m;i++){
// BEGIN_TSC;
v2df *ap = (v2df*) a[i];
v2df * cp = (v2df*) (&(c[i][0]));
__builtin_prefetch((double*)a[i+PREFETCHL],0,0);
__builtin_prefetch((double*)a[i+PREFETCHL]+8,0,0);
int k;
for(j=0;j<nh;j+=4){
__builtin_prefetch(c[i+PREFETCHL]+j,1,0);
v2df * bvp0 = bcopy2[j];
v2df * bvp1 = bcopy2[j+1];
v2df * bvp2 = bcopy2[j+2];
v2df * bvp3 = bcopy2[j+3];
LOADPD(cp[j],X12);
LOADPD(cp[j+1],X13);
LOADPD(cp[j+2],X14);
LOADPD(cp[j+3],X15);
LOADPD(ap[0],X0);
LOADPD(bvp0[0],X4);
LOADPD(bvp1[0],X5);
LOADPD(bvp2[0],X6);
LOADPD(bvp3[0],X7);
LOADPD(bvp0[1],X8);
LOADPD(bvp1[1],X9);
LOADPD(bvp2[1],X10);
LOADPD(bvp3[1],X11);
MOVAPD(X0,X1);
BCAST0(X0);
BCAST1(X1);
MULPD(X0,X4);
MULPD(X0,X5);
MULPD(X0,X6);
MULPD(X0,X7);
MULPD(X1,X8);
MULPD(X1,X9);
MULPD(X1,X10);
MULPD(X1,X11);
SUBPD(X4,X12);
SUBPD(X5,X13);
SUBPD(X6,X14);
SUBPD(X7,X15);
SUBPD(X8,X12);
SUBPD(X9,X13);
SUBPD(X10,X14);
SUBPD(X11,X15);
LOADPD(ap[1],X0);
LOADPD(bvp0[2],X4);
LOADPD(bvp1[2],X5);
LOADPD(bvp2[2],X6);
LOADPD(bvp3[2],X7);
LOADPD(bvp0[3],X8);
LOADPD(bvp1[3],X9);
LOADPD(bvp2[3],X10);
LOADPD(bvp3[3],X11);
MOVAPD(X0,X1);
BCAST0(X0);
BCAST1(X1);
MULPD(X0,X4);
MULPD(X0,X5);
MULPD(X0,X6);
MULPD(X0,X7);
MULPD(X1,X8);
MULPD(X1,X9);
MULPD(X1,X10);
MULPD(X1,X11);
SUBPD(X4,X12);
SUBPD(X5,X13);
SUBPD(X6,X14);
SUBPD(X7,X15);
SUBPD(X8,X12);
SUBPD(X9,X13);
SUBPD(X10,X14);
SUBPD(X11,X15);
LOADPD(ap[2],X0);
LOADPD(bvp0[4],X4);
LOADPD(bvp1[4],X5);
LOADPD(bvp2[4],X6);
LOADPD(bvp3[4],X7);
LOADPD(bvp0[5],X8);
LOADPD(bvp1[5],X9);
LOADPD(bvp2[5],X10);
LOADPD(bvp3[5],X11);
MOVAPD(X0,X1);
BCAST0(X0);
BCAST1(X1);
MULPD(X0,X4);
MULPD(X0,X5);
MULPD(X0,X6);
MULPD(X0,X7);
MULPD(X1,X8);
MULPD(X1,X9);
MULPD(X1,X10);
MULPD(X1,X11);
SUBPD(X4,X12);
SUBPD(X5,X13);
SUBPD(X6,X14);
SUBPD(X7,X15);
SUBPD(X8,X12);
SUBPD(X9,X13);
SUBPD(X10,X14);
SUBPD(X11,X15);
LOADPD(ap[3],X0);
LOADPD(bvp0[6],X4);
LOADPD(bvp1[6],X5);
LOADPD(bvp2[6],X6);
LOADPD(bvp3[6],X7);
LOADPD(bvp0[7],X8);
LOADPD(bvp1[7],X9);
LOADPD(bvp2[7],X10);
LOADPD(bvp3[7],X11);
MOVAPD(X0,X1);
BCAST0(X0);
BCAST1(X1);
MULPD(X0,X4);
MULPD(X0,X5);
MULPD(X0,X6);
MULPD(X0,X7);
MULPD(X1,X8);
MULPD(X1,X9);
MULPD(X1,X10);
MULPD(X1,X11);
SUBPD(X4,X12);
SUBPD(X5,X13);
SUBPD(X6,X14);
SUBPD(X7,X15);
SUBPD(X8,X12);
SUBPD(X9,X13);
SUBPD(X10,X14);
SUBPD(X11,X15);
LOADPD(ap[4],X0);
LOADPD(bvp0[8],X4);
LOADPD(bvp1[8],X5);
LOADPD(bvp2[8],X6);
LOADPD(bvp3[8],X7);
LOADPD(bvp0[9],X8);
LOADPD(bvp1[9],X9);
LOADPD(bvp2[9],X10);
LOADPD(bvp3[9],X11);
MOVAPD(X0,X1);
BCAST0(X0);
BCAST1(X1);
MULPD(X0,X4);
MULPD(X0,X5);
MULPD(X0,X6);
MULPD(X0,X7);
MULPD(X1,X8);
MULPD(X1,X9);
MULPD(X1,X10);
MULPD(X1,X11);
SUBPD(X4,X12);
SUBPD(X5,X13);
SUBPD(X6,X14);
SUBPD(X7,X15);
SUBPD(X8,X12);
SUBPD(X9,X13);
SUBPD(X10,X14);
SUBPD(X11,X15);
LOADPD(ap[5],X0);
LOADPD(bvp0[10],X4);
LOADPD(bvp1[10],X5);
LOADPD(bvp2[10],X6);
LOADPD(bvp3[10],X7);
LOADPD(bvp0[11],X8);
LOADPD(bvp1[11],X9);
LOADPD(bvp2[11],X10);
LOADPD(bvp3[11],X11);
MOVAPD(X0,X1);
BCAST0(X0);
BCAST1(X1);
MULPD(X0,X4);
MULPD(X0,X5);
MULPD(X0,X6);
MULPD(X0,X7);
MULPD(X1,X8);
MULPD(X1,X9);
MULPD(X1,X10);
MULPD(X1,X11);
SUBPD(X4,X12);
SUBPD(X5,X13);
SUBPD(X6,X14);
SUBPD(X7,X15);
SUBPD(X8,X12);
SUBPD(X9,X13);
SUBPD(X10,X14);
SUBPD(X11,X15);
LOADPD(ap[6],X0);
LOADPD(bvp0[12],X4);
LOADPD(bvp1[12],X5);
LOADPD(bvp2[12],X6);
LOADPD(bvp3[12],X7);
LOADPD(bvp0[13],X8);
LOADPD(bvp1[13],X9);
LOADPD(bvp2[13],X10);
LOADPD(bvp3[13],X11);
MOVAPD(X0,X1);
BCAST0(X0);
BCAST1(X1);
MULPD(X0,X4);
MULPD(X0,X5);
MULPD(X0,X6);
MULPD(X0,X7);
MULPD(X1,X8);
MULPD(X1,X9);
MULPD(X1,X10);
MULPD(X1,X11);
SUBPD(X4,X12);
SUBPD(X5,X13);
SUBPD(X6,X14);
SUBPD(X7,X15);
SUBPD(X8,X12);
SUBPD(X9,X13);
SUBPD(X10,X14);
SUBPD(X11,X15);
LOADPD(ap[7],X0);
LOADPD(bvp0[14],X4);
LOADPD(bvp1[14],X5);
LOADPD(bvp2[14],X6);
LOADPD(bvp3[14],X7);
LOADPD(bvp0[15],X8);
LOADPD(bvp1[15],X9);
LOADPD(bvp2[15],X10);
LOADPD(bvp3[15],X11);
MOVAPD(X0,X1);
BCAST0(X0);
BCAST1(X1);
MULPD(X0,X4);
MULPD(X0,X5);
MULPD(X0,X6);
MULPD(X0,X7);
MULPD(X1,X8);
MULPD(X1,X9);
MULPD(X1,X10);
MULPD(X1,X11);
SUBPD(X4,X12);
SUBPD(X5,X13);
SUBPD(X6,X14);
SUBPD(X7,X15);
SUBPD(X8,X12);
SUBPD(X9,X13);
SUBPD(X10,X14);
SUBPD(X11,X15);
STORPD(X12,cp[j+0]);
STORPD(X13,cp[j+1]);
STORPD(X14,cp[j+2]);
STORPD(X15,cp[j+3]);
}
}
}
void matmul_for_nk8_0d(int n1, double a[][n1],
int n2, double b[][n2],
int n3, double c[][n3],
int m,
int n)
{
int i,j;
int kk = 8;
int nh = n/2;
register int k;
double bcopy[n][kk];
v2df bcopy2[nh][kk];
v2df acopy[kk];
v2df acopy2[kk];
v2df awork[4];
v2df awork2[4];
double *acp = (double*) acopy;
double *acp2 = (double*) acopy2;
unsigned long bpcount, apcount, dotcount;
bpcount= apcount= dotcount=0;
for(k=0;k<kk;k++)
for(j=0;j<nh;j++)
bcopy2[j][k] = *((v2df*)(b[k]+j+j));
for(i=0;i<m;i++){
// BEGIN_TSC;
double *ap=a[i];
register v2df tmp, tmp2;
v2df * cp = (v2df*) (&(c[i][0]));
v2df * aa = (v2df*)(ap);
__builtin_prefetch((double*)a[i+8],0,0);
v2df acopy0=(v2df){a[i][0], a[i][0]};
v2df acopy1=(v2df){a[i][1], a[i][1]};
v2df acopy2=(v2df){a[i][2], a[i][2]};
v2df acopy3=(v2df){a[i][3], a[i][3]};
v2df acopy4=(v2df){a[i][4], a[i][4]};
v2df acopy5=(v2df){a[i][5], a[i][5]};
v2df acopy6=(v2df){a[i][6], a[i][6]};
v2df acopy7=(v2df){a[i][7], a[i][7]};
v2df zero=(v2df){0.0, 0.0};
LOADPD(acopy0,X0);
LOADPD(acopy1,X1);
LOADPD(acopy2,X2);
LOADPD(acopy3,X3);
LOADPD(acopy4,X4);
LOADPD(acopy5,X5);
LOADPD(acopy6,X6);
LOADPD(acopy7,X7);
for(j=0;j<nh;j++){
__builtin_prefetch(c[i+8]+j,1,0);
v2df * bvp = bcopy2[j];
LOADPD(cp[j],X14);
LOADPD(bvp[0],X8);
LOADPD(bvp[1],X9);
MULPD(X0,X8);
MULPD(X1,X9);
LOADPD(bvp[2],X10);
LOADPD(bvp[3],X11);
ADDPD(X9,X8);
MULPD(X2,X10);
MULPD(X3,X11);
ADDPD(X11,X10);
LOADPD(bvp[4],X9);
LOADPD(bvp[5],X11);
LOADPD(bvp[6],X12);
LOADPD(bvp[7],X13);
MULPD(X4,X9);
MULPD(X5,X11);
ADDPD(X10,X8);
ADDPD(X11,X9);
MULPD(X6,X12);
MULPD(X7,X13);
ADDPD(X13,X12);
ADDPD(X9,X8);
ADDPD(X12,X8);
SUBPD(X8,X14);
STORPD(X14,cp[j]);
}
}
}
void matmul_for_nk8_0c(int n1, double a[][n1],
int n2, double b[][n2],
int n3, double c[][n3],
int m,
int n)
{
int i,j;
int kk = 8;
int nh = n/2;
register int k;
double bcopy[n][kk];
v2df bcopy2[nh][kk];
v2df acopy[kk];
v2df acopy2[kk];
v2df awork[4];
v2df awork2[4];
double *acp = (double*) acopy;
double *acp2 = (double*) acopy2;
unsigned long bpcount, apcount, dotcount;
bpcount= apcount= dotcount=0;
for(k=0;k<kk;k++)
for(j=0;j<nh;j++)
bcopy2[j][k] = *((v2df*)(b[k]+j+j));
for(i=0;i<m;i++){
// BEGIN_TSC;
double *ap=a[i];
register v2df tmp, tmp2;
v2df * cp = (v2df*) (&(c[i][0]));
v2df * aa = (v2df*)(ap);
__builtin_prefetch((double*)a[i+8],0,0);
register v2df acopy0=(v2df){a[i][0], a[i][0]};
register v2df acopy1=(v2df){a[i][1], a[i][1]};
register v2df acopy2=(v2df){a[i][2], a[i][2]};
register v2df acopy3=(v2df){a[i][3], a[i][3]};
register v2df acopy4=(v2df){a[i][4], a[i][4]};
register v2df acopy5=(v2df){a[i][5], a[i][5]};
register v2df acopy6=(v2df){a[i][6], a[i][6]};
register v2df acopy7=(v2df){a[i][7], a[i][7]};
for(j=0;j<nh;j++){
tmp = (v2df){0.0,0.0};
v2df ctmp= cp[j];
v2df * bp = bcopy2[j];
__builtin_prefetch(c[i+4]+j,1,0);
v2df *bvp = bp;
tmp += acopy0*bvp[0];
tmp +=acopy1*bvp[1];
tmp +=acopy2*bvp[2];
tmp +=acopy3*bvp[3];
tmp +=acopy4*bvp[4];
tmp +=acopy5*bvp[5];
tmp +=acopy6*bvp[6];
tmp +=acopy7*bvp[7];
cp[j] = ctmp -tmp;
}
}
// printf("m, kk, n = %d %d %d counts = %g %g %g\n", m,kk,n,
// (double)bpcount, (double)apcount, (double)dotcount);
}
void matmul_for_nk8_0b(int n1, double a[][n1],
int n2, double b[][n2],
int n3, double c[][n3],
int m,
int n)
{
int i,j;
int kk = 8;
int nh = n/2;
register int k;
double bcopy[n][kk];
v2df bcopy2[nh][kk];
v2df acopy[kk];
v2df acopy2[kk];
v2df awork[4];
v2df awork2[4];
double *acp = (double*) acopy;
double *acp2 = (double*) acopy2;
unsigned long bpcount, apcount, dotcount;
bpcount= apcount= dotcount=0;
for(k=0;k<kk;k++)
for(j=0;j<nh;j++)
bcopy2[j][k] = *((v2df*)(b[k]+j+j));
for(i=0;i<m;i++){
// BEGIN_TSC;
double *ap=a[i];
register v2df tmp, tmp2;
v2df * cp = (v2df*) (&(c[i][0]));
v2df * aa = (v2df*)(ap);
__builtin_prefetch((double*)a[i+8],0,0);
acopy[0]=(v2df){a[i][0], a[i][0]};
acopy[1]=(v2df){a[i][1], a[i][1]};
acopy[2]=(v2df){a[i][2], a[i][2]};
acopy[3]=(v2df){a[i][3], a[i][3]};
acopy[4]=(v2df){a[i][4], a[i][4]};
acopy[5]=(v2df){a[i][5], a[i][5]};
acopy[6]=(v2df){a[i][6], a[i][6]};
acopy[7]=(v2df){a[i][7], a[i][7]};
for(j=0;j<nh;j++){
tmp = tmp2= (v2df){0.0,0.0};
v2df ctmp= cp[j];
v2df * bp = bcopy2[j];
__builtin_prefetch(c[i+4]+j,1,0);
v2df *avp = acopy;
v2df *bvp = bp;
tmp += avp[0]*bvp[0];
tmp +=avp[1]*bvp[1];
tmp +=avp[2]*bvp[2];
tmp +=avp[3]*bvp[3];
tmp += avp[4]*bvp[4];
tmp +=avp[5]*bvp[5];
tmp +=avp[6]*bvp[6];
tmp +=avp[7]*bvp[7];
cp[j] = ctmp -tmp;
}
}
// printf("m, kk, n = %d %d %d counts = %g %g %g\n", m,kk,n,
// (double)bpcount, (double)apcount, (double)dotcount);
}
void matmul_for_nk8_0a(int n1, double a[][n1],
int n2, double b[][n2],
int n3, double c[][n3],
int m,
int n)
{
int i,j;
int kk = 8;
int nh = n/2;
register int k;
double bcopy[n][kk];
v2df bcopy2[nh][kk];
v2df acopy[kk];
v2df acopy2[kk];
v2df awork[4];
v2df awork2[4];
double *acp = (double*) acopy;
double *acp2 = (double*) acopy2;
unsigned long bpcount, apcount, dotcount;
bpcount= apcount= dotcount=0;
for(k=0;k<kk;k++)
for(j=0;j<nh;j++)
bcopy2[j][k] = *((v2df*)(b[k]+j+j));
for(i=0;i<m;i+=2){
// BEGIN_TSC;
double *ap=a[i];
double *ap2=a[i+1];
register v2df tmp, tmp2;
v2df * cp = (v2df*) (&(c[i][0]));
v2df * cp2 = (v2df*) (&(c[i+1][0]));
v2df * aa = (v2df*)(ap);
__builtin_prefetch((double*)a[i+8],0,0);
__builtin_prefetch((double*)a[i+9],0,0);
acopy[0]=(v2df){a[i][0], a[i][0]};
acopy[1]=(v2df){a[i][1], a[i][1]};
acopy[2]=(v2df){a[i][2], a[i][2]};
acopy[3]=(v2df){a[i][3], a[i][3]};
acopy[4]=(v2df){a[i][4], a[i][4]};
acopy[5]=(v2df){a[i][5], a[i][5]};
acopy[6]=(v2df){a[i][6], a[i][6]};
acopy[7]=(v2df){a[i][7], a[i][7]};
aa = (v2df*)(ap2);
acopy2[0]= (v2df) __builtin_ia32_shufpd(*aa,*aa,0x0);
acopy2[1]=(v2df) __builtin_ia32_shufpd(*aa,*aa,0xff);
aa++;
acopy2[2]= (v2df) __builtin_ia32_shufpd(*aa,*aa,0x0);
acopy2[3]=(v2df) __builtin_ia32_shufpd(*aa,*aa,0xff);
aa++;
acopy2[4]= (v2df) __builtin_ia32_shufpd(*aa,*aa,0x0);
acopy2[5]=(v2df) __builtin_ia32_shufpd(*aa,*aa,0xff);
aa++;
acopy2[6]= (v2df) __builtin_ia32_shufpd(*aa,*aa,0x0);
acopy2[7]=(v2df) __builtin_ia32_shufpd(*aa,*aa,0xff);
aa++;
for(j=0;j<nh;j++){
tmp = tmp2= (v2df){0.0,0.0};
v2df ctmp= cp[j];
v2df ctmp2 = cp2[j] ;
v2df * bp = bcopy2[j];
__builtin_prefetch(c[i+4]+j,1,0);
__builtin_prefetch(c[i+5]+j,1,0);
v2df *avp = acopy;
v2df *avp2 = acopy2;
v2df *bvp = bp;
tmp += avp[0]*bvp[0];
tmp2 += avp2[0]*bvp[0];
tmp +=avp[1]*bvp[1];
tmp2+=avp2[1]*bvp[1];
tmp +=avp[2]*bvp[2];
tmp2+=avp2[2]*bvp[2];
tmp +=avp[3]*bvp[3];
tmp2+=avp2[3]*bvp[3];
tmp += avp[4]*bvp[4];
tmp2 += avp2[4]*bvp[4];
tmp +=avp[5]*bvp[5];
tmp2+=avp2[5]*bvp[5];
tmp +=avp[6]*bvp[6];
tmp2+=avp2[6]*bvp[6];
tmp +=avp[7]*bvp[7];
tmp2+=avp2[7]*bvp[7];
cp[j] = ctmp -tmp;
cp2[j] = ctmp2 -tmp2;
}
}
// printf("m, kk, n = %d %d %d counts = %g %g %g\n", m,kk,n,
// (double)bpcount, (double)apcount, (double)dotcount);
}
void matmul_for_nk8_1(int n1, double a[][n1],
int n2, double b[][n2],
int n3, double c[][n3],
int m,
int n)
{
int i,j,k;
const int kk = 8;
const int kh = kk/2;
int nh = n/2;
v2df bcopy[n][kh];
v2df acopy[kk][kh];
unsigned long bpcount, apcount, dotcount;
bpcount= apcount= dotcount=0;
for(j=0;j<n;j++)
for(k=0;k<kh;k++)
bcopy[j][k] = (v2df){b[k*2][j],b[k*2+1][j]};
// printf("copy b end\n");
for(i=0;i<m;i+=kk){
for(k=0;k<kk;k++){
v2df *ak = (v2df*)(a[i+k]);
v2df * awp =acopy+k;
awp[0]=ak[0];
awp[1]=ak[1];
awp[2]=ak[2];
awp[3]=ak[3];
}
// printf("copy a end\n");
for(k=0;k<kk;k++){
v2u tmp, tmp1;
v2df * ap = acopy[k];
for(j=0;j<n;j+=2){
tmp.v = ap[0]*bcopy[j][0]
+ ap[1]*bcopy[j][1]
+ ap[2]*bcopy[j][2]
+ ap[3]*bcopy[j][3];
tmp1.v = ap[0]*bcopy[j+1][0]
+ ap[1]*bcopy[j+1][1]
+ ap[2]*bcopy[j+1][2]
+ ap[3]*bcopy[j+1][3];
c[k+i][j] -= tmp.s[0]+tmp.s[1];
c[k+i][j+1] -= tmp1.s[0]+tmp1.s[1];
}
}
// printf("calc c end\n");
}
// printf("m, kk, n = %d %d %d counts = %g %g %g\n", m,kk,n,
// (double)bpcount, (double)apcount, (double)dotcount);
}
void matmul_for_nk8_2(int n1, double a[][n1],
int n2, double b[][n2],
int n3, double c[][n3],
int m,
int n)
{
int i,j,k;
const int kk = 8;
const int kh = kk/2;
int nh = n/2;
v2df bcopy[nh][kk];
v2df acopy[kk][kh];
v2df ccopy[kk][kh];
v2df acopy2[kk][kk];
unsigned long bpcount, apcount, dotcount;
bpcount= apcount= dotcount=0;
for(k=0;k<kk;k++)
for(j=0;j<nh;j++)
bcopy[j][k] = *((v2df*)(b[k]+j+j));
// printf("copy b end\n");
for(i=0;i<m;i+=kk){
for(k=0;k<kk;k++){
__builtin_prefetch(a+i+k+8,0,0);
__builtin_prefetch(c+i+k+8,1,0);
v2df *ak = (v2df*)(a[i+k]);
v2df * awp = (v2df*)(acopy+k);
v2df *ck = (v2df*)(c[i+k]);
v2df * cwp = (v2df*)(ccopy+k);
awp[0]=ak[0];
awp[1]=ak[1];
awp[2]=ak[2];
awp[3]=ak[3];
cwp[0]=ck[0];
cwp[1]=ck[1];
cwp[2]=ck[2];
cwp[3]=ck[3];
}
for (j=0;j<n;j++){
double * ap = (double*)( acopy+j);
for (k=0;k<kk;k++){
acopy2[j][k]=(v2df){ap[k],ap[k]};
}
}
// printf("copy a end\n");
for(k=0;k<kk;k++){
v2df * cp = (v2df*) ccopy[k];
v2df * ap = acopy2[k];
for(j=0;j<nh;j++){
v2df * bp = bcopy[j];
cp[j] -= ap[0]*bp[0]
+ ap[1]*bp[1]
+ ap[2]*bp[2]
+ ap[3]*bp[3]
+ ap[4]*bp[4]
+ ap[5]*bp[5]
+ ap[6]*bp[6]
+ ap[7]*bp[7];
}
}
for(k=0;k<kk;k++){
v2df *ck = (v2df*)(c[i+k]);
v2df * cwp = (v2df*)(ccopy+k);
#if 0
ck[0] = cwp[0];
ck[1] = cwp[1];
ck[2] = cwp[2];
ck[3] = cwp[3];
#endif
__builtin_ia32_movntpd((double*)(ck),cwp[0]);
__builtin_ia32_movntpd((double*)(ck+1),cwp[1]);
__builtin_ia32_movntpd((double*)(ck+2),cwp[2]);
__builtin_ia32_movntpd((double*)(ck+3),cwp[3]);
}
// printf("calc c end\n");
}
// printf("m, kk, n = %d %d %d counts = %g %g %g\n", m,kk,n,
// (double)bpcount, (double)apcount, (double)dotcount);
}
void matmul_for_nk8(int n1, double a[][n1],
int n2, double b[][n2],
int n3, double c[][n3],
int m,
int n)
{
int i,j,k;
const int kk = 8;
const int kh = kk/2;
int nh = n/2;
v2df bcopy[nh][kk];
v2df acopy2[kk][kk];
// unsigned long bpcount, apcount, dotcount;
// bpcount= apcount= dotcount=0;
BEGIN_TSC;
for(k=0;k<kk;k++)
for(j=0;j<nh;j++)
bcopy[j][k] = *((v2df*)(b[k]+j+j));
// END_TSC(bpcount);
// printf("copy b end\n");
#pragma omp parallel for private(i,j,k,acopy2) schedule(static)
for(i=0;i<m;i+=kk){
// BEGIN_TSC;
for(k=0;k<kk;k++){
__builtin_prefetch(a+i+k+16,0,0);
__builtin_prefetch(c+i+k+16,1,0);
}
for (j=0;j<n;j++){
double * ap = (double*)( a[i+j]);
for (k=0;k<kk;k++){
acopy2[j][k]=(v2df){ap[k],ap[k]};
}
}
// END_TSC(apcount);
// printf("copy a end\n");
// BEGIN_TSC;
for(k=0;k<kk;k++){
v2df * cp = (v2df*) (c[i+k]);
v2df * ap = acopy2[k];
for(j=0;j<nh;j++){
v2df * bp = bcopy[j];
cp[j] -= ap[0]*bp[0] + ap[1]*bp[1]
+ ap[2]*bp[2] + ap[3]*bp[3]
+ ap[4]*bp[4] + ap[5]*bp[5]
+ ap[6]*bp[6] + ap[7]*bp[7];
}
}
// printf("calc c end\n");
// END_TSC(dotcount);
}
// printf("m, kk, n = %d %d %d counts = %g %g %g\n", m,kk,n,
// (double)bpcount, (double)apcount, (double)dotcount);
END_TSC(t,10);
}
void matmul_for_nk8_3(int n1, double a[][n1],
int n2, double b[][n2],
int n3, double c[][n3],
int m,
int n)
{
int ii;
int dm = (m+31)/32;
dm*= 8;
#pragma omp parallel for private(ii) schedule(static)
for(ii=0;ii<4;ii++){
int ifirst, iend;
ifirst = ii*dm;
iend = ifirst+dm;
if (iend > m) iend = m;
// fprintf(stderr, "m, i, ifirst, iend = %d %d %d %d\n", m, ii, ifirst, iend);
if (ifirst < m){
matmul_for_nk8_0(n1, a[ifirst], n2, b, n3, c[ifirst], iend-ifirst, n);
}
}
}
void matmul_for_nk16_0(int n1, double a[][n1],
int n2, double b[][n2],
int n3, double c[][n3],
int m,
int n)
{
int i;
int mm=64;
for(i=0;i<m;i+=mm){
if (i+mm >m) mm=m-i;
matmul_for_nk8_0(n1, (double(*)[]) (a[i]), n2, b,
n3, (double(*)[]) (c[i]), mm, 16);
matmul_for_nk8_0(n1, (double(*)[]) (&a[i][8]), n2,(double(*)[])(b[8]),
n3, (double(*)[]) (c[i]), mm, 16);
}
}
void matmul_for_nk16(int n1, double a[][n1],
int n2, double b[][n2],
int n3, double c[][n3],
int m,
int n)
{
if (m < 64){
matmul_for_nk16_0c(n1, a, n2, b, n3, c, m, n);
return;
}
int ii;
int dm = (m+63)/64;
dm*= 16;
#pragma omp parallel for private(ii) schedule(static)
for(ii=0;ii<4;ii++){
int ifirst, iend;
ifirst = ii*dm;
iend = ifirst+dm;
if (iend > m) iend = m;
// fprintf(stderr, "m, i, ifirst, iend = %d %d %d %d\n", m, ii, ifirst, iend);
if (ifirst < m){
matmul_for_nk16_0c(n1, a[ifirst], n2, b, n3, c[ifirst], iend-ifirst, n);
}
}
}
void matmul_for_nk32(int n1, double a[][n1],
int n2, double b[][n2],
int n3, double c[][n3],
int m,
int n)
{
int ii;
int dm = (m+127)/128;
dm*= 32;
#pragma omp parallel for private(ii) schedule(static)
for(ii=0;ii<4;ii++){
int ifirst, iend;
ifirst = ii*dm;
iend = ifirst+dm;
if (iend > m) iend = m;
// fprintf(stderr, "m, i, ifirst, iend = %d %d %d %d\n", m, ii, ifirst, iend);
if (ifirst < m){
matmul_for_nk32_0(n1, a[ifirst], n2, b, n3, c[ifirst], iend-ifirst, n);
}
}
}
void matmul_for_small_nk_7(int n1, double a[][n1],
int n2, double b[][n2],
int n3, double c[][n3],
int m,
int kk,
int n)
{
int i,j;
int nh = n/2;
register int k;
double bcopy[n][kk];
v2df bcopy2[nh][kk];
v2df acopy[kk];
v2df acopy2[kk];
double *acp = (double*) acopy;
double *acp2 = (double*) acopy2;
unsigned long bpcount, apcount, dotcount;
if (kk == 8){
matmul_for_nk8(n1, a, n2, b, n3, c, m, n);
return;
}
BEGIN_TSC;
bpcount= apcount= dotcount=0;
// BEGIN_TSC;
for(k=0;k<kk;k++)
for(j=0;j<nh;j++)
bcopy2[j][k] = *((v2df*)(b[k]+j+j));
// END_TSC(bpcount);
for(i=0;i<m;i+=2){
// BEGIN_TSC;
double *ap=a[i];
double *ap2=a[i+1];
register v2df tmp, tmp2;
v2df * cp = (v2df*) (&(c[i][0]));
v2df * cp2 = (v2df*) (&(c[i+1][0]));
for(k=0;k<kk;k+=2){
v2df * aa = (v2df*)(ap+k);
__builtin_prefetch((double*)a[i+4]+k,0);
__builtin_prefetch((double*)a[i+5]+k,0);
acopy [k]= (v2df) __builtin_ia32_shufpd(*aa,*aa,0x0);
acopy[k+1]=(v2df) __builtin_ia32_shufpd(*aa,*aa,0xff);
aa = (v2df*)(ap2+k);
acopy2 [k]= (v2df) __builtin_ia32_shufpd(*aa,*aa,0x0);
acopy2[k+1]=(v2df) __builtin_ia32_shufpd(*aa,*aa,0xff);
// acp[k*2]=acp[k*2+1]=ap[k];
// acp[k*2+2]=acp[k*2+3]=ap[k+1];
// acp2[k*2]=acp2[k*2+1]=ap2[k];
// acp2[k*2+2]=acp2[k*2+3]=ap2[k+1];
}
// END_TSC(apcount);
// BEGIN_TSC;
for(j=0;j<nh;j++){
tmp = tmp2= (v2df){0.0,0.0};
v2df ctmp= cp[j];
v2df ctmp2 = cp2[j] ;
v2df * bp = bcopy2[j];
__builtin_prefetch(c[i+4]+j,0);
__builtin_prefetch(c[i+5]+j,0);
for(k=0;k<kk;k+=8){
int k2 = k+4;
v2df *avp = acopy+k;
v2df *avp2 = acopy2+k;
v2df *bvp = bp+k;
tmp += avp[0]*bvp[0];
tmp2 += avp2[0]*bvp[0];
tmp +=avp[1]*bvp[1];
tmp2+=avp2[1]*bvp[1];
tmp +=avp[2]*bvp[2];
tmp2+=avp2[2]*bvp[2];
tmp +=avp[3]*bvp[3];
tmp2+=avp2[3]*bvp[3];
tmp += avp[4]*bvp[4];
tmp2 += avp2[4]*bvp[4];
tmp +=avp[5]*bvp[5];
tmp2+=avp2[5]*bvp[5];
tmp +=avp[6]*bvp[6];
tmp2+=avp2[6]*bvp[6];
tmp +=avp[7]*bvp[7];
tmp2+=avp2[7]*bvp[7];
}
cp[j] = ctmp -tmp;
cp2[j] = ctmp2 -tmp2;
}
// END_TSC(dotcount);
}
// printf("m, kk, n = %d %d %d counts = %g %g %g\n", m,kk,n,
// (double)bpcount, (double)apcount, (double)dotcount);
END_TSC(t,11);
}
void matmul_for_small_nk(int n1, double a[][n1],
int n2, double b[][n2],
int n3, double c[][n3],
int m,
int kk,
int n)
{
int i,ii;
int nh = n/2;
register int k;
double bcopy[n][kk];
v2df bcopy2[nh][kk];
v2df acopy[kk];
v2df acopy2[kk];
double *acp = (double*) acopy;
double *acp2 = (double*) acopy2;
if (kk == 8){
matmul_for_nk8_3(n1, a, n2, b, n3, c, m, n);
return;
}
if (kk == 16){
matmul_for_nk16(n1, a, n2, b, n3, c, m, n);
return;
}
if (kk == 32){
matmul_for_nk32(n1, a, n2, b, n3, c, m, n);
return;
}
BEGIN_TSC;
for(k=0;k<kk;k++){
int j;
for(j=0;j<nh;j++)
bcopy2[j][k] = *((v2df*)(b[k]+j+j));
}
#pragma omp parallel for private(i,k,acopy,acopy2) schedule(static)
for(i=0;i<m;i+=2){
int j;
double *ap=a[i];
double *ap2=a[i+1];
register v2df tmp, tmp2;
v2df * cp = (v2df*) (&(c[i][0]));
v2df * cp2 = (v2df*) (&(c[i+1][0]));
for(k=0;k<kk;k+=2){
v2df * aa = (v2df*)(ap+k);
acopy [k]= (v2df) __builtin_ia32_shufpd(*aa,*aa,0x0);
acopy[k+1]=(v2df) __builtin_ia32_shufpd(*aa,*aa,0xff);
aa = (v2df*)(ap2+k);
acopy2 [k]= (v2df) __builtin_ia32_shufpd(*aa,*aa,0x0);
acopy2[k+1]=(v2df) __builtin_ia32_shufpd(*aa,*aa,0xff);
}
__builtin_prefetch(a[i+4],0,3);
__builtin_prefetch(c[i+4],1);
__builtin_prefetch(a[i+5],0,3);
__builtin_prefetch(c[i+5],1);
__builtin_prefetch(a[i+20],0,3);
__builtin_prefetch(c[i+20],1);
__builtin_prefetch(a[i+21],0,3);
__builtin_prefetch(c[i+21],1);
for(j=0;j<nh;j++){
tmp = tmp2= (v2df){0.0,0.0};
v2df ctmp= cp[j];
v2df ctmp2 = cp2[j] ;
v2df * bp = bcopy2[j];
for(k=0;k<kk;k+=8){
int k2 = k+4;
v2df *avp = acopy+k;
v2df *avp2 = acopy2+k;
v2df *bvp = bp+k;
tmp += avp[0]*bvp[0];
tmp2 += avp2[0]*bvp[0];
tmp +=avp[1]*bvp[1];
tmp2+=avp2[1]*bvp[1];
tmp +=avp[2]*bvp[2];
tmp2+=avp2[2]*bvp[2];
tmp +=avp[3]*bvp[3];
tmp2+=avp2[3]*bvp[3];
tmp += avp[4]*bvp[4];
tmp2 += avp2[4]*bvp[4];
tmp +=avp[5]*bvp[5];
tmp2+=avp2[5]*bvp[5];
tmp +=avp[6]*bvp[6];
tmp2+=avp2[6]*bvp[6];
tmp +=avp[7]*bvp[7];
tmp2+=avp2[7]*bvp[7];
}
cp[j] = ctmp -tmp;
cp2[j] = ctmp2 -tmp2;
}
}
END_TSC(t,11);
}
void mydgemm(int m,
int n,
int k,
double alpha,
double * a,
int na,
double * b,
int nb,
double beta,
double * c,
int nc)
{
double t0, t1, t2;
if (k>= 512){
get_cputime(&t0,&t1);
}
BEGIN_TSC;
BEGIN_TIMER(timer);
#ifdef USEGDR
if ((k>512) || ((k==512) && ((n>=1024)||(m>=1024)))){
// if (k>=2048){
mygdrdgemm(m, n, k, alpha, a, na, b, nb, beta, c, nc);
}else{
if ((k<=16) && (alpha == -1.0) && (beta == 1.0)){
matmul_for_small_nk(na, a, nb, b, nc, c, m, n, k);
}else{
cblas_dgemm( CblasRowMajor, CblasNoTrans, CblasNoTrans,
m,n, k, alpha, a, na, b, nb, beta, c, nc);
}
}
#else
cblas_dgemm( CblasRowMajor, CblasNoTrans, CblasNoTrans,
m,n, k, alpha, a, na, b, nb, beta, c, nc);
#endif
// cblas_dgemm( CblasRowMajor, CblasNoTrans, CblasNoTrans,
// m,n, k, alpha, a, na, b, nb, beta, c, nc);
if (k==2048){
END_TIMER(timer,31,((double)(m))*n*k*2);
END_TSC(t,14);
}else if (k==1024){
END_TIMER(timer,32,((double)(m))*n*k*2);
END_TSC(t,15);
}else if (k==512){
END_TIMER(timer,33,((double)(m))*n*k*2);
END_TSC(t,17);
}else{
END_TIMER(timer,34,((double)(m))*n*k*2);
END_TSC(t,18);
}
if (k>= 512){
get_cputime(&t0,&t1);
dprintf(10,"dgemm M=%d N=%d K=%d time=%10.4g %g Gflops\n",
m,n,k,t0, ((double)m)*n*k*2/t0/1e9);
}
}
void reset_gdr(int m, double a[][m], int nb, double awork[][nb], int n)
{
#ifdef USEGDR
double aw2[nb][nb];
if (nb < 2048){
fprintf(stderr,"reset_gdr nb = %d <2048 not supported\n", nb);
exit(-1);
}
gdr_check_and_restart(a, awork, aw2);
int i,j;
dprintf(9,"reset_gdr clear awork\n");
for (i=0;i<nb;i++){
for (j=0;j<n;j++){
awork[j][i]=0;
}
}
dprintf(9,"reset_gdr clear aw2\n");
for (i=0;i<nb;i++){
for (j=0;j<nb;j++){
aw2[j][i]=0;
}
}
dprintf(9,"reset_gdr try_dgemm\n");
gdrsetforceswapab();
cblas_dgemm( CblasRowMajor, CblasNoTrans, CblasNoTrans,
n,nb, nb, 1.0, awork, nb, aw2, nb, 0.0, a, m);
mydgemm(n,nb,nb,1.0,awork,nb,aw2,nb,0.0,a,m);
#endif
}
#ifndef USEGDR
void gdrsetforceswapab(){}
void gdrresetforceswapab(){}
void gdrsetskipsendjmat(){};
void gdrresetskipsendjmat(){}
void gdrsetnboards(){}
void set_matmul_msg_level(int level){}
void gdrdgemm_set_stress_factor(int x){}
#endif
|
pi_final.c | /*
*
* And we've come full circle. The serial pi-program modified by 2-ish lines to become parallel
*
* Author: Matt Cufari
* Version 1.0.0
* Date Created Jan 4 2021
* Date Last Modified Jan 4 2021
*
*/
#include <omp.h>
#include <stdio.h>
static int num_steps = 1000000;
double step;
int main(){
int i;
double x, sum, pi;
step = 1.0 /(double)(num_steps);
sum = 0.0;
#pragma omp parallel for private(x) reduction(+:sum)
for(i = 0; i < num_steps; i++){
x = (i+0.5)*step;
sum += 4.0/(1.0+x*x);
}
pi = sum*step;
printf("The value of pi is: %f\n", pi);
}
|
convolution_3x3_int8.h | // BUG1989 is pleased to support the open source community by supporting ncnn available.
//
// Copyright (C) 2019 BUG1989. All rights reserved.
//
// Licensed under the BSD 3-Clause License (the "License"); you may not use this file except
// in compliance with the License. You may obtain a copy of the License at
//
// https://opensource.org/licenses/BSD-3-Clause
//
// Unless required by applicable law or agreed to in writing, software distributed
// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
// CONDITIONS OF ANY KIND, either express or implied. See the License for the
// specific language governing permissions and limitations under the License.
static void conv3x3s1_int8_sse(const Mat &bottom_blob, Mat &top_blob, const Mat &_kernel, const Option& opt)
{
int w = bottom_blob.w;
int inch = bottom_blob.c;
int outw = top_blob.w;
int outh = top_blob.h;
int outch = top_blob.c;
const signed char *kernel = _kernel;
#pragma omp parallel for num_threads(opt.num_threads)
for (int p = 0; p < outch; p++)
{
Mat out0 = top_blob.channel(p);
out0.fill(0);
const signed char *kernel0 = (const signed char *)kernel + p * inch * 9;
for (int q = 0; q < inch; q++)
{
int *outptr0 = out0;
const signed char *img0 = bottom_blob.channel(q);
const signed char *r0 = img0;
const signed char *r1 = img0 + w;
const signed char *r2 = img0 + w * 2;
for (int i = 0; i < outh; i++)
{
int remain = outw;
for (; remain > 0; remain--)
{
int sum0 = 0;
sum0 += (int)r0[0] * kernel0[0];
sum0 += (int)r0[1] * kernel0[1];
sum0 += (int)r0[2] * kernel0[2];
sum0 += (int)r1[0] * kernel0[3];
sum0 += (int)r1[1] * kernel0[4];
sum0 += (int)r1[2] * kernel0[5];
sum0 += (int)r2[0] * kernel0[6];
sum0 += (int)r2[1] * kernel0[7];
sum0 += (int)r2[2] * kernel0[8];
*outptr0 += sum0;
r0++;
r1++;
r2++;
outptr0++;
}
r0 += 2;
r1 += 2;
r2 += 2;
}
kernel0 += 9;
}
}
}
static void conv3x3s1_winograd23_transform_kernel_int8_sse(const Mat& kernel, Mat& kernel_tm, int inch, int outch)
{
kernel_tm.create(4*4, inch, outch, 2ul);
// G
const short ktm[4][3] = {
{ 2, 0, 0},
{ 1, 1, 1},
{ 1, -1, 1},
{ 0, 0, 2}
};
#pragma omp parallel for
for (int p = 0; p<outch; p++)
{
for (int q = 0; q<inch; q++)
{
const signed char* kernel0 = (const signed char*)kernel + p*inch * 9 + q * 9;
short* kernel_tm0 = kernel_tm.channel(p).row<short>(q);
// transform kernel
const signed char* k0 = kernel0;
const signed char* k1 = kernel0 + 3;
const signed char* k2 = kernel0 + 6;
// h
short tmp[4][3];
for (int i=0; i<4; i++)
{
tmp[i][0] = (short)k0[0] * ktm[i][0] + k0[1] * ktm[i][1] + k0[2] * ktm[i][2];
tmp[i][1] = (short)k1[0] * ktm[i][0] + k1[1] * ktm[i][1] + k1[2] * ktm[i][2];
tmp[i][2] = (short)k2[0] * ktm[i][0] + k2[1] * ktm[i][1] + k2[2] * ktm[i][2];
}
// U
for (int j=0; j<4; j++)
{
short* tmpp = &tmp[j][0];
for (int i=0; i<4; i++)
{
kernel_tm0[j*4 + i] = tmpp[0] * ktm[i][0] + tmpp[1] * ktm[i][1] + tmpp[2] * ktm[i][2];
}
}
}
}
}
static void conv3x3s1_winograd23_int8_sse(const Mat& bottom_blob, Mat& top_blob, const Mat& kernel_tm, const Option& opt)
{
int w = bottom_blob.w;
int h = bottom_blob.h;
int inch = bottom_blob.c;
int outw = top_blob.w;
int outh = top_blob.h;
int outch = top_blob.c;
// pad to 2n+2, winograd F(2,3)
Mat bottom_blob_bordered = bottom_blob;
outw = (outw + 1) / 2 * 2;
outh = (outh + 1) / 2 * 2;
w = outw + 2;
h = outh + 2;
Option opt_b = opt;
opt_b.blob_allocator = opt.workspace_allocator;
copy_make_border(bottom_blob, bottom_blob_bordered, 0, h - bottom_blob.h, 0, w - bottom_blob.w, 0, 0.f, opt_b);
// BEGIN transform input
Mat bottom_blob_tm;
{
int w_tm = outw / 2 * 4;
int h_tm = outh / 2 * 4;
int nColBlocks = h_tm/4; // may be the block num in Feathercnn
int nRowBlocks = w_tm/4;
const int tiles = nColBlocks * nRowBlocks;
bottom_blob_tm.create(4*4, tiles, inch, 2u, opt.workspace_allocator);
// BT
// const float itm[4][4] = {
// {1.0f, 0.0f, -1.0f, 0.0f},
// {0.0f, 1.0f, 1.00f, 0.0f},
// {0.0f, -1.0f, 1.00f, 0.0f},
// {0.0f, -1.0f, 0.00f, 1.0f}
// };
#pragma omp parallel for num_threads(opt.num_threads)
for (int q=0; q<inch; q++)
{
const signed char* img = bottom_blob_bordered.channel(q);
short* out_tm0 = bottom_blob_tm.channel(q);
for (int j = 0; j < nColBlocks; j++)
{
const signed char* r0 = img + w * j * 2;
const signed char* r1 = r0 + w;
const signed char* r2 = r1 + w;
const signed char* r3 = r2 + w;
for (int i = 0; i < nRowBlocks; i++)
{
short d0[4],d1[4],d2[4],d3[4];
short w0[4],w1[4],w2[4],w3[4];
short t0[4],t1[4],t2[4],t3[4];
// load
for (int n = 0; n < 4; n++)
{
d0[n] = r0[n];
d1[n] = r1[n];
d2[n] = r2[n];
d3[n] = r3[n];
}
// w = B_t * d
for (int n = 0; n < 4; n++)
{
w0[n] = d0[n] - d2[n];
w1[n] = d1[n] + d2[n];
w2[n] = d2[n] - d1[n];
w3[n] = d3[n] - d1[n];
}
// transpose d to d_t
{
t0[0]=w0[0]; t1[0]=w0[1]; t2[0]=w0[2]; t3[0]=w0[3];
t0[1]=w1[0]; t1[1]=w1[1]; t2[1]=w1[2]; t3[1]=w1[3];
t0[2]=w2[0]; t1[2]=w2[1]; t2[2]=w2[2]; t3[2]=w2[3];
t0[3]=w3[0]; t1[3]=w3[1]; t2[3]=w3[2]; t3[3]=w3[3];
}
// U = B_t * d_t
for (int n = 0; n < 4; n++)
{
d0[n] = t0[n] - t2[n];
d1[n] = t1[n] + t2[n];
d2[n] = t2[n] - t1[n];
d3[n] = t3[n] - t1[n];
}
// save to out_tm
for (int n = 0; n < 4; n++)
{
out_tm0[n ] = d0[n];
out_tm0[n+ 4] = d1[n];
out_tm0[n+ 8] = d2[n];
out_tm0[n+12] = d3[n];
}
r0 += 2;
r1 += 2;
r2 += 2;
r3 += 2;
out_tm0 += 16;
}
}
}
}
bottom_blob_bordered = Mat();
// BEGIN dot
Mat top_blob_tm;
{
int w_tm = outw / 2 * 4;
int h_tm = outh / 2 * 4;
int nColBlocks = h_tm/4; // may be the block num in Feathercnn
int nRowBlocks = w_tm/4;
const int tiles = nColBlocks * nRowBlocks;
top_blob_tm.create(16, tiles, outch, 4u, opt.workspace_allocator);
int nn_outch = outch >> 2;
int remain_outch_start = nn_outch << 2;
#pragma omp parallel for num_threads(opt.num_threads)
for (int pp=0; pp<nn_outch; pp++)
{
int p = pp * 4;
Mat out0_tm = top_blob_tm.channel(p);
Mat out1_tm = top_blob_tm.channel(p+1);
Mat out2_tm = top_blob_tm.channel(p+2);
Mat out3_tm = top_blob_tm.channel(p+3);
const Mat kernel0_tm = kernel_tm.channel(p);
const Mat kernel1_tm = kernel_tm.channel(p+1);
const Mat kernel2_tm = kernel_tm.channel(p+2);
const Mat kernel3_tm = kernel_tm.channel(p+3);
for (int i=0; i<tiles; i++)
{
int* output0_tm = out0_tm.row<int>(i);
int* output1_tm = out1_tm.row<int>(i);
int* output2_tm = out2_tm.row<int>(i);
int* output3_tm = out3_tm.row<int>(i);
int sum0[16] = {0};
int sum1[16] = {0};
int sum2[16] = {0};
int sum3[16] = {0};
int q = 0;
for (; q+3<inch; q+=4)
{
const short* r0 = bottom_blob_tm.channel(q).row<short>(i);
const short* r1 = bottom_blob_tm.channel(q+1).row<short>(i);
const short* r2 = bottom_blob_tm.channel(q+2).row<short>(i);
const short* r3 = bottom_blob_tm.channel(q+3).row<short>(i);
const short* k0 = kernel0_tm.row<short>(q);
const short* k1 = kernel1_tm.row<short>(q);
const short* k2 = kernel2_tm.row<short>(q);
const short* k3 = kernel3_tm.row<short>(q);
for (int n=0; n<16; n++)
{
sum0[n] += (int)r0[n] * k0[n];
k0 += 16;
sum0[n] += (int)r1[n] * k0[n];
k0 += 16;
sum0[n] += (int)r2[n] * k0[n];
k0 += 16;
sum0[n] += (int)r3[n] * k0[n];
k0 -= 16 * 3;
sum1[n] += (int)r0[n] * k1[n];
k1 += 16;
sum1[n] += (int)r1[n] * k1[n];
k1 += 16;
sum1[n] += (int)r2[n] * k1[n];
k1 += 16;
sum1[n] += (int)r3[n] * k1[n];
k1 -= 16 * 3;
sum2[n] += (int)r0[n] * k2[n];
k2 += 16;
sum2[n] += (int)r1[n] * k2[n];
k2 += 16;
sum2[n] += (int)r2[n] * k2[n];
k2 += 16;
sum2[n] += (int)r3[n] * k2[n];
k2 -= 16 * 3;
sum3[n] += (int)r0[n] * k3[n];
k3 += 16;
sum3[n] += (int)r1[n] * k3[n];
k3 += 16;
sum3[n] += (int)r2[n] * k3[n];
k3 += 16;
sum3[n] += (int)r3[n] * k3[n];
k3 -= 16 * 3;
}
}
for (; q<inch; q++)
{
const short* r0 = bottom_blob_tm.channel(q).row<short>(i);
const short* k0 = kernel0_tm.row<short>(q);
const short* k1 = kernel1_tm.row<short>(q);
const short* k2 = kernel2_tm.row<short>(q);
const short* k3 = kernel3_tm.row<short>(q);
for (int n=0; n<16; n++)
{
sum0[n] += (int)r0[n] * k0[n];
sum1[n] += (int)r0[n] * k1[n];
sum2[n] += (int)r0[n] * k2[n];
sum3[n] += (int)r0[n] * k3[n];
}
}
for (int n=0; n<16; n++)
{
output0_tm[n] = sum0[n];
output1_tm[n] = sum1[n];
output2_tm[n] = sum2[n];
output3_tm[n] = sum3[n];
}
}
}
#pragma omp parallel for num_threads(opt.num_threads)
for (int p=remain_outch_start; p<outch; p++)
{
Mat out0_tm = top_blob_tm.channel(p);
const Mat kernel0_tm = kernel_tm.channel(p);
for (int i=0; i<tiles; i++)
{
int* output0_tm = out0_tm.row<int>(i);
int sum0[16] = {0};
int q = 0;
for (; q+3<inch; q+=4)
{
const short* r0 = bottom_blob_tm.channel(q).row<short>(i);
const short* r1 = bottom_blob_tm.channel(q+1).row<short>(i);
const short* r2 = bottom_blob_tm.channel(q+2).row<short>(i);
const short* r3 = bottom_blob_tm.channel(q+3).row<short>(i);
const short* k0 = kernel0_tm.row<short>(q);
const short* k1 = kernel0_tm.row<short>(q+1);
const short* k2 = kernel0_tm.row<short>(q+2);
const short* k3 = kernel0_tm.row<short>(q+3);
for (int n=0; n<16; n++)
{
sum0[n] += (int)r0[n] * k0[n];
sum0[n] += (int)r1[n] * k1[n];
sum0[n] += (int)r2[n] * k2[n];
sum0[n] += (int)r3[n] * k3[n];
}
}
for (; q<inch; q++)
{
const short* r0 = bottom_blob_tm.channel(q).row<short>(i);
const short* k0 = kernel0_tm.row<short>(q);
for (int n=0; n<16; n++)
{
sum0[n] += (int)r0[n] * k0[n];
}
}
for (int n=0; n<16; n++)
{
output0_tm[n] = sum0[n];
}
}
}
}
bottom_blob_tm = Mat();
// END dot
// BEGIN transform output
Mat top_blob_bordered;
top_blob_bordered.create(outw, outh, outch, 4u, opt.workspace_allocator);
{
// AT
// const float itm[2][4] = {
// {1.0f, 1.0f, 1.0f, 0.0f},
// {0.0f, 1.0f, -1.0f, 1.0f}
// };
int w_tm = outw / 2 * 4;
int h_tm = outh / 2 * 4;
int nColBlocks = h_tm/4; // may be the block num in Feathercnn
int nRowBlocks = w_tm/4;
#pragma omp parallel for num_threads(opt.num_threads)
for (int p=0; p<outch; p++)
{
Mat out_tm = top_blob_tm.channel(p);
Mat out = top_blob_bordered.channel(p);
for (int j=0; j<nColBlocks; j++)
{
int* outRow0 = out.row<int>(j*2);
int* outRow1 = out.row<int>(j*2+1);
for(int i=0; i<nRowBlocks; i++)
{
int* out_tile = out_tm.row<int>(j*nRowBlocks + i);
int s0[4],s1[4],s2[4],s3[4];
int w0[4],w1[4];
int d0[2],d1[2],d2[2],d3[2];
int o0[2],o1[2];
// load
for (int n = 0; n < 4; n++)
{
s0[n] = out_tile[n];
s1[n] = out_tile[n+ 4];
s2[n] = out_tile[n+ 8];
s3[n] = out_tile[n+12];
}
// w = A_T * W
for (int n = 0; n < 4; n++)
{
w0[n] = s0[n] + s1[n] + s2[n];
w1[n] = s1[n] - s2[n] + s3[n];
}
// transpose w to w_t
{
d0[0] = w0[0]; d0[1] = w1[0];
d1[0] = w0[1]; d1[1] = w1[1];
d2[0] = w0[2]; d2[1] = w1[2];
d3[0] = w0[3]; d3[1] = w1[3];
}
// Y = A_T * w_t
for (int n = 0; n < 2; n++)
{
o0[n] = d0[n] + d1[n] + d2[n];
o1[n] = d1[n] - d2[n] + d3[n];
}
// save to top blob tm,why right 2,because the G' = G*2
outRow0[0] = o0[0] >> 2;
outRow0[1] = o0[1] >> 2;
outRow1[0] = o1[0] >> 2;
outRow1[1] = o1[1] >> 2;
outRow0 += 2;
outRow1 += 2;
}
}
}
}
// END transform output
// cut result pad
copy_cut_border(top_blob_bordered, top_blob, 0, top_blob_bordered.h - top_blob.h, 0, top_blob_bordered.w - top_blob.w, opt);
}
static void conv3x3s1_winograd43_transform_kernel_int8_sse(const Mat& kernel, Mat& kernel_tm, int inch, int outch)
{
kernel_tm.create(6*6, inch, outch, 2ul);
// G
// const float ktm[6][3] = {
// { 1.0f/4, 0.0f, 0.0f},
// { -1.0f/6, -1.0f/6, -1.0f/6},
// { -1.0f/6, 1.0f/6, -1.0f/6},
// { 1.0f/24, 1.0f/12, 1.0f/6},
// { 1.0f/24, -1.0f/12, 1.0f/6},
// { 0.0f, 0.0f, 1.0f}
// };
const short ktm[6][3] = {
{ 6, 0, 0},
{ -4, -4, -4},
{ -4, 4, -4},
{ 1, 2, 4},
{ 1, -2, 4},
{ 0, 0, 24}
};
#pragma omp parallel for
for (int p = 0; p<outch; p++)
{
for (int q = 0; q<inch; q++)
{
const signed char* kernel0 = (const signed char*)kernel + p*inch * 9 + q * 9;
short* kernel_tm0 = kernel_tm.channel(p).row<short>(q);
// transform kernel
const signed char* k0 = kernel0;
const signed char* k1 = kernel0 + 3;
const signed char* k2 = kernel0 + 6;
// h
short tmp[6][3];
for (int i=0; i<6; i++)
{
tmp[i][0] = k0[0] * ktm[i][0] + k0[1] * ktm[i][1] + k0[2] * ktm[i][2];
tmp[i][1] = k1[0] * ktm[i][0] + k1[1] * ktm[i][1] + k1[2] * ktm[i][2];
tmp[i][2] = k2[0] * ktm[i][0] + k2[1] * ktm[i][1] + k2[2] * ktm[i][2];
}
// U
for (int j=0; j<6; j++)
{
short* tmpp = &tmp[j][0];
for (int i=0; i<6; i++)
{
kernel_tm0[j*6 + i] = tmpp[0] * ktm[i][0] + tmpp[1] * ktm[i][1] + tmpp[2] * ktm[i][2];
}
}
}
}
}
static void conv3x3s1_winograd43_int8_sse(const Mat& bottom_blob, Mat& top_blob, const Mat& kernel_tm, const Option& opt)
{
int w = bottom_blob.w;
int h = bottom_blob.h;
int inch = bottom_blob.c;
int outw = top_blob.w;
int outh = top_blob.h;
int outch = top_blob.c;
// pad to 4n+2, winograd F(4,3)
Mat bottom_blob_bordered = bottom_blob;
outw = (outw + 3) / 4 * 4;
outh = (outh + 3) / 4 * 4;
w = outw + 2;
h = outh + 2;
Option opt_b = opt;
opt_b.blob_allocator = opt.workspace_allocator;
copy_make_border(bottom_blob, bottom_blob_bordered, 0, h - bottom_blob.h, 0, w - bottom_blob.w, 0, 0.f, opt_b);
// BEGIN transform input
Mat bottom_blob_tm;
{
int w_tm = outw / 4 * 6;
int h_tm = outh / 4 * 6;
int nColBlocks = h_tm/6; // may be the block num in Feathercnn
int nRowBlocks = w_tm/6;
const int tiles = nColBlocks * nRowBlocks;
bottom_blob_tm.create(6*6, tiles, inch, 2u, opt.workspace_allocator);
// BT
// const float itm[4][4] = {
// {4.0f, 0.0f, -5.0f, 0.0f, 1.0f, 0.0f},
// {0.0f,-4.0f, -4.0f, 1.0f, 1.0f, 0.0f},
// {0.0f, 4.0f, -4.0f,-1.0f, 1.0f, 0.0f},
// {0.0f,-2.0f, -1.0f, 2.0f, 1.0f, 0.0f},
// {0.0f, 2.0f, -1.0f,-2.0f, 1.0f, 0.0f},
// {0.0f, 4.0f, 0.0f,-5.0f, 0.0f, 1.0f}
// };
// 0 = 4 * r00 - 5 * r02 + r04
// 1 = -4 * (r01 + r02) + r03 + r04
// 2 = 4 * (r01 - r02) - r03 + r04
// 3 = -2 * r01 - r02 + 2 * r03 + r04
// 4 = 2 * r01 - r02 - 2 * r03 + r04
// 5 = 4 * r01 - 5 * r03 + r05
#pragma omp parallel for num_threads(opt.num_threads)
for (int q=0; q<inch; q++)
{
const signed char* img = bottom_blob_bordered.channel(q);
short* out_tm0 = bottom_blob_tm.channel(q);
for (int j = 0; j < nColBlocks; j++)
{
const signed char* r0 = img + w * j * 4;
const signed char* r1 = r0 + w;
const signed char* r2 = r1 + w;
const signed char* r3 = r2 + w;
const signed char* r4 = r3 + w;
const signed char* r5 = r4 + w;
for (int i = 0; i < nRowBlocks; i++)
{
short d0[6],d1[6],d2[6],d3[6],d4[6],d5[6];
short w0[6],w1[6],w2[6],w3[6],w4[6],w5[6];
short t0[6],t1[6],t2[6],t3[6],t4[6],t5[6];
// load
for (int n = 0; n < 6; n++)
{
d0[n] = r0[n];
d1[n] = r1[n];
d2[n] = r2[n];
d3[n] = r3[n];
d4[n] = r4[n];
d5[n] = r5[n];
}
// w = B_t * d
for (int n = 0; n < 6; n++)
{
w0[n] = 4*d0[n] - 5*d2[n] + d4[n];
w1[n] = -4*d1[n] - 4*d2[n] + d3[n] + d4[n];
w2[n] = 4*d1[n] - 4*d2[n] - d3[n] + d4[n];
w3[n] = -2*d1[n] - d2[n] + 2*d3[n] + d4[n];
w4[n] = 2*d1[n] - d2[n] - 2*d3[n] + d4[n];
w5[n] = 4*d1[n] - 5*d3[n] + d5[n];
}
// transpose d to d_t
{
t0[0]=w0[0]; t1[0]=w0[1]; t2[0]=w0[2]; t3[0]=w0[3]; t4[0]=w0[4]; t5[0]=w0[5];
t0[1]=w1[0]; t1[1]=w1[1]; t2[1]=w1[2]; t3[1]=w1[3]; t4[1]=w1[4]; t5[1]=w1[5];
t0[2]=w2[0]; t1[2]=w2[1]; t2[2]=w2[2]; t3[2]=w2[3]; t4[2]=w2[4]; t5[2]=w2[5];
t0[3]=w3[0]; t1[3]=w3[1]; t2[3]=w3[2]; t3[3]=w3[3]; t4[3]=w3[4]; t5[3]=w3[5];
t0[4]=w4[0]; t1[4]=w4[1]; t2[4]=w4[2]; t3[4]=w4[3]; t4[4]=w4[4]; t5[4]=w4[5];
t0[5]=w5[0]; t1[5]=w5[1]; t2[5]=w5[2]; t3[5]=w5[3]; t4[5]=w5[4]; t5[5]=w5[5];
}
// d = B_t * d_t
for (int n = 0; n < 6; n++)
{
d0[n] = 4*t0[n] - 5*t2[n] + t4[n];
d1[n] = - 4*t1[n] - 4*t2[n] + t3[n] + t4[n];
d2[n] = 4*t1[n] - 4*t2[n] - t3[n] + t4[n];
d3[n] = - 2*t1[n] - t2[n] + 2*t3[n] + t4[n];
d4[n] = 2*t1[n] - t2[n] - 2*t3[n] + t4[n];
d5[n] = 4*t1[n] - 5*t3[n] + t5[n];
}
// save to out_tm
for (int n = 0; n < 6; n++)
{
out_tm0[n ] = d0[n];
out_tm0[n+ 6] = d1[n];
out_tm0[n+12] = d2[n];
out_tm0[n+18] = d3[n];
out_tm0[n+24] = d4[n];
out_tm0[n+30] = d5[n];
}
r0 += 4;
r1 += 4;
r2 += 4;
r3 += 4;
r4 += 4;
r5 += 4;
out_tm0 += 36;
}
}
}
}
bottom_blob_bordered = Mat();
// BEGIN dot
Mat top_blob_tm;
{
int w_tm = outw / 4 * 6;
int h_tm = outh / 4 * 6;
int nColBlocks = h_tm/6; // may be the block num in Feathercnn
int nRowBlocks = w_tm/6;
const int tiles = nColBlocks * nRowBlocks;
top_blob_tm.create(36, tiles, outch, 4u, opt.workspace_allocator);
#pragma omp parallel for num_threads(opt.num_threads)
for (int p=0; p<outch; p++)
{
Mat out0_tm = top_blob_tm.channel(p);
const Mat kernel0_tm = kernel_tm.channel(p);
for (int i=0; i<tiles; i++)
{
int* output0_tm = out0_tm.row<int>(i);
int sum0[36] = {0};
for (int q=0; q<inch; q++)
{
const short* r0 = bottom_blob_tm.channel(q).row<short>(i);
const short* k0 = kernel0_tm.row<short>(q);
for (int n=0; n<36; n++)
{
sum0[n] += (int)r0[n] * k0[n];
}
}
for (int n=0; n<36; n++)
{
output0_tm[n] = sum0[n];
}
}
}
}
bottom_blob_tm = Mat();
// END dot
// BEGIN transform output
Mat top_blob_bordered;
top_blob_bordered.create(outw, outh, outch, 4u, opt.workspace_allocator);
{
// AT
// const float itm[4][6] = {
// {1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 0.0f},
// {0.0f, 1.0f, -1.0f, 2.0f, -2.0f, 0.0f},
// {0.0f, 1.0f, 1.0f, 4.0f, 4.0f, 0.0f},
// {0.0f, 1.0f, -1.0f, 8.0f, -8.0f, 1.0f}
// };
// 0 = r00 + r01 + r02 + r03 + r04
// 1 = r01 - r02 + 2 * (r03 - r04)
// 2 = r01 + r02 + 4 * (r03 + r04)
// 3 = r01 - r02 + 8 * (r03 - r04) + r05
int w_tm = outw / 4 * 6;
int h_tm = outh / 4 * 6;
int nColBlocks = h_tm/6; // may be the block num in Feathercnn
int nRowBlocks = w_tm/6;
#pragma omp parallel for num_threads(opt.num_threads)
for (int p=0; p<outch; p++)
{
Mat out_tm = top_blob_tm.channel(p);
Mat out = top_blob_bordered.channel(p);
for (int j=0; j<nColBlocks; j++)
{
int* outRow0 = out.row<int>(j*4);
int* outRow1 = out.row<int>(j*4+1);
int* outRow2 = out.row<int>(j*4+2);
int* outRow3 = out.row<int>(j*4+3);
for(int i=0; i<nRowBlocks; i++)
{
int* out_tile = out_tm.row<int>(j*nRowBlocks + i);
int s0[6],s1[6],s2[6],s3[6],s4[6],s5[6];
int w0[6],w1[6],w2[6],w3[6];
int d0[4],d1[4],d2[4],d3[4],d4[4],d5[4];
int o0[4],o1[4],o2[4],o3[4];
// load
for (int n = 0; n < 6; n++)
{
s0[n] = out_tile[n];
s1[n] = out_tile[n+ 6];
s2[n] = out_tile[n+12];
s3[n] = out_tile[n+18];
s4[n] = out_tile[n+24];
s5[n] = out_tile[n+30];
}
// w = A_T * W
for (int n = 0; n < 6; n++)
{
w0[n] = s0[n] + s1[n] + s2[n] + s3[n] + s4[n];
w1[n] = s1[n] - s2[n] + 2*s3[n] - 2*s4[n];
w2[n] = s1[n] + s2[n] + 4*s3[n] + 4*s4[n];
w3[n] = s1[n] - s2[n] + 8*s3[n] - 8*s4[n] + s5[n];
}
// transpose w to w_t
{
d0[0] = w0[0]; d0[1] = w1[0]; d0[2] = w2[0]; d0[3] = w3[0];
d1[0] = w0[1]; d1[1] = w1[1]; d1[2] = w2[1]; d1[3] = w3[1];
d2[0] = w0[2]; d2[1] = w1[2]; d2[2] = w2[2]; d2[3] = w3[2];
d3[0] = w0[3]; d3[1] = w1[3]; d3[2] = w2[3]; d3[3] = w3[3];
d4[0] = w0[4]; d4[1] = w1[4]; d4[2] = w2[4]; d4[3] = w3[4];
d5[0] = w0[5]; d5[1] = w1[5]; d5[2] = w2[5]; d5[3] = w3[5];
}
// Y = A_T * w_t
for (int n = 0; n < 4; n++)
{
o0[n] = d0[n] + d1[n] + d2[n] + d3[n] + d4[n];
o1[n] = d1[n] - d2[n] + 2*d3[n] - 2*d4[n];
o2[n] = d1[n] + d2[n] + 4*d3[n] + 4*d4[n];
o3[n] = d1[n] - d2[n] + 8*d3[n] - 8*d4[n] + d5[n];
}
// save to top blob tm
for (int n = 0; n < 4; n++)
{
outRow0[n] = o0[n] / 576;
outRow1[n] = o1[n] / 576;
outRow2[n] = o2[n] / 576;
outRow3[n] = o3[n] / 576;
}
outRow0 += 4;
outRow1 += 4;
outRow2 += 4;
outRow3 += 4;
}
}
}
}
// END transform output
// cut result pad
copy_cut_border(top_blob_bordered, top_blob, 0, top_blob_bordered.h - top_blob.h, 0, top_blob_bordered.w - top_blob.w, opt);
}
static void conv3x3s2_int8_sse(const Mat &bottom_blob, Mat &top_blob, const Mat &_kernel, const Option& opt)
{
int w = bottom_blob.w;
int inch = bottom_blob.c;
int outw = top_blob.w;
int outh = top_blob.h;
int outch = top_blob.c;
const int tailstep = w - 2 * outw + w;
const signed char *kernel = _kernel;
#pragma omp parallel for num_threads(opt.num_threads)
for (int p = 0; p < outch; p++)
{
Mat out0 = top_blob.channel(p);
out0.fill(0);
const signed char *kernel0 = (const signed char *)kernel + p * inch * 9;
for (int q = 0; q < inch; q++)
{
int *outptr0 = out0;
const signed char *img0 = bottom_blob.channel(q);
const signed char *r0 = img0;
const signed char *r1 = img0 + w;
const signed char *r2 = img0 + w * 2;
for (int i = 0; i < outh; i++)
{
int remain = outw;
for (; remain > 0; remain--)
{
int sum0 = 0;
sum0 += (int)r0[0] * kernel0[0];
sum0 += (int)r0[1] * kernel0[1];
sum0 += (int)r0[2] * kernel0[2];
sum0 += (int)r1[0] * kernel0[3];
sum0 += (int)r1[1] * kernel0[4];
sum0 += (int)r1[2] * kernel0[5];
sum0 += (int)r2[0] * kernel0[6];
sum0 += (int)r2[1] * kernel0[7];
sum0 += (int)r2[2] * kernel0[8];
*outptr0 += sum0;
r0 += 2;
r1 += 2;
r2 += 2;
outptr0++;
}
r0 += tailstep;
r1 += tailstep;
r2 += tailstep;
}
kernel0 += 9;
}
}
}
static void conv3x3s1_int8_dequant_sse(const Mat &bottom_blob, Mat &top_blob, const Mat &_kernel, const Mat &_bias, std::vector<float> scales_dequant, const Option& opt)
{
int kernel_w = 3;
int kernel_h = 3;
int stride_w = 1;
int stride_h = 1;
conv_im2col_sgemm_int8_dequant_sse(bottom_blob, top_blob, _kernel, kernel_w, kernel_h, stride_w, stride_h, _bias, scales_dequant, opt);
}
static void conv3x3s2_int8_dequant_sse(const Mat &bottom_blob, Mat &top_blob, const Mat &_kernel, const Mat &_bias, std::vector<float> scales_dequant, const Option& opt)
{
int kernel_w = 3;
int kernel_h = 3;
int stride_w = 2;
int stride_h = 2;
conv_im2col_sgemm_int8_dequant_sse(bottom_blob, top_blob, _kernel, kernel_w, kernel_h, stride_w, stride_h, _bias, scales_dequant, opt);
}
static void conv3x3s1_int8_requant_sse(const Mat &bottom_blob, Mat &top_blob, const Mat &_kernel, const Mat &_bias, std::vector<float> scales_requant, const Option& opt)
{
int kernel_w = 3;
int kernel_h = 3;
int stride_w = 1;
int stride_h = 1;
conv_im2col_sgemm_int8_requant_sse(bottom_blob, top_blob, _kernel, kernel_w, kernel_h, stride_w, stride_h, _bias, scales_requant, opt);
}
static void conv3x3s2_int8_requant_sse(const Mat &bottom_blob, Mat &top_blob, const Mat &_kernel, const Mat &_bias, std::vector<float> scales_requant, const Option& opt)
{
int kernel_w = 3;
int kernel_h = 3;
int stride_w = 2;
int stride_h = 2;
conv_im2col_sgemm_int8_requant_sse(bottom_blob, top_blob, _kernel, kernel_w, kernel_h, stride_w, stride_h, _bias, scales_requant, opt);
}
|
primitives.h | #pragma once
#include <vector>
#include <cstdint>
using namespace std;
/*
* Both Primitives Are Inclusive
* histogram is for cache-aware thread-local histogram purpose
* output should be different from the variables captured in function object f
* size is the original size for the flagged prefix sum
*/
template<typename F>
void FlagPrefixSumOMP(vector<uint32_t> &histogram, uint32_t *output, size_t size, F f, int omp_num_threads) {
static thread_local int tid = omp_get_thread_num();
// 1st Pass: Histogram.
auto avg = size / omp_num_threads;
auto it_beg = avg * tid;
auto histogram_idx = (tid + 1) * CACHE_LINE_ENTRY;
histogram[histogram_idx] = 0;
auto it_end = tid == omp_num_threads - 1 ? size : avg * (tid + 1);
auto prev = 0u;
for (auto it = it_beg; it < it_end; it++) {
if (f(it)) {
histogram[histogram_idx]++;
output[it] = prev + 1;
} else {
output[it] = prev;
}
prev = output[it];
}
#pragma omp barrier
// 2nd Pass: single-prefix-sum & Add previous sum.
#pragma omp single
{
for (auto tid = 0; tid < omp_num_threads; tid++) {
auto histogram_idx = (tid + 1) * CACHE_LINE_ENTRY;
auto prev_histogram_idx = (tid) * CACHE_LINE_ENTRY;
histogram[histogram_idx] += histogram[prev_histogram_idx];
}
}
{
auto prev_sum = histogram[tid * CACHE_LINE_ENTRY];
for (auto it = it_beg; it < it_end; it++) {
output[it] += prev_sum;
}
#pragma omp barrier
}
}
template<typename F>
void InclusivePrefixSumOMP(vector<uint32_t> &histogram, uint32_t *output, size_t size, F f, int omp_num_threads) {
static thread_local int tid = omp_get_thread_num();
// 1st Pass: Histogram.
auto avg = size / omp_num_threads;
auto it_beg = avg * tid;
auto histogram_idx = (tid + 1) * CACHE_LINE_ENTRY;
histogram[histogram_idx] = 0;
auto it_end = tid == omp_num_threads - 1 ? size : avg * (tid + 1);
auto prev = 0u;
for (auto it = it_beg; it < it_end; it++) {
auto value = f(it);
histogram[histogram_idx] += value;
prev += value;
output[it] = prev;
}
#pragma omp barrier
// 2nd Pass: single-prefix-sum & Add previous sum.
#pragma omp single
{
for (auto tid = 0; tid < omp_num_threads; tid++) {
auto histogram_idx = (tid + 1) * CACHE_LINE_ENTRY;
auto prev_histogram_idx = (tid) * CACHE_LINE_ENTRY;
histogram[histogram_idx] += histogram[prev_histogram_idx];
}
}
{
auto prev_sum = histogram[tid * CACHE_LINE_ENTRY];
for (auto it = it_beg; it < it_end; it++) {
output[it] += prev_sum;
}
#pragma omp barrier
}
} |
5298.c | // this source is derived from CHILL AST originally from file '/uufs/chpc.utah.edu/common/home/u1142914/lib/ytopt_vinu/polybench/polybench-code/stencils/heat-3d/kernel.c' as parsed by frontend compiler rose
void kernel_heat_3d(int tsteps, int n, double A[120 + 0][120 + 0][120 + 0], double B[120 + 0][120 + 0][120 + 0]) {
int t14;
int t12;
int t10;
int t8;
int t6;
int t4;
int t2;
for (t2 = 1; t2 <= 500; t2 += 1) {
#pragma omp parallel for private(t4,t6,t8,t10,t12,t14)
for (t4 = 1; t4 <= n - 2; t4 += 8)
for (t6 = t4; t6 <= (t4 + 7 < n - 2 ? t4 + 7 : n - 2); t6 += 1)
for (t8 = 1; t8 <= n - 2; t8 += 64)
for (t10 = t8; t10 <= (n - 2 < t8 + 63 ? n - 2 : t8 + 63); t10 += 1)
for (t12 = 1; t12 <= n - 2; t12 += 16)
for (t14 = t12; t14 <= (n - 2 < t12 + 15 ? n - 2 : t12 + 15); t14 += 1)
B[t6][t10][t14] = 0.125 * (A[t6 + 1][t10][t14] - 2 * A[t6][t10][t14] + A[t6 - 1][t10][t14]) + 0.125 * (A[t6][t10 + 1][t14] - 2 * A[t6][t10][t14] + A[t6][t10 - 1][t14]) + 0.125 * (A[t6][t10][t14 + 1] - 2 * A[t6][t10][t14] + A[t6][t10][t14 - 1]) + A[t6][t10][t14];
#pragma omp parallel for private(t4,t6,t8,t10,t12,t14)
for (t4 = 1; t4 <= n - 2; t4 += 8)
for (t6 = t4; t6 <= (t4 + 7 < n - 2 ? t4 + 7 : n - 2); t6 += 1)
for (t8 = 1; t8 <= n - 2; t8 += 64)
for (t10 = t8; t10 <= (n - 2 < t8 + 63 ? n - 2 : t8 + 63); t10 += 1)
for (t12 = 1; t12 <= n - 2; t12 += 16)
for (t14 = t12; t14 <= (n - 2 < t12 + 15 ? n - 2 : t12 + 15); t14 += 1)
A[t6][t10][t14] = 0.125 * (B[t6 + 1][t10][t14] - 2 * B[t6][t10][t14] + B[t6 - 1][t10][t14]) + 0.125 * (B[t6][t10 + 1][t14] - 2 * B[t6][t10][t14] + B[t6][t10 - 1][t14]) + 0.125 * (B[t6][t10][t14 + 1] - 2 * B[t6][t10][t14] + B[t6][t10][t14 - 1]) + B[t6][t10][t14];
}
}
|
mlp_example_f32_numa.c | /******************************************************************************
* Copyright (c) Intel Corporation - All rights reserved. *
* This file is part of the LIBXSMM library. *
* *
* For information on the license, see the LICENSE file. *
* Further information: https://github.com/hfp/libxsmm/ *
* SPDX-License-Identifier: BSD-3-Clause *
******************************************************************************/
/* Alexander Heinecke (Intel Corp.)
******************************************************************************/
#include <libxsmm.h>
#include <libxsmm_sync.h>
#include <stdlib.h>
#include <string.h>
#include <stdio.h>
#include <math.h>
#if defined(_OPENMP)
# include <omp.h>
#endif
#include <numa.h>
#define CHECK_L1
/* include c-based dnn library */
#include "../common/dnn_common.h"
LIBXSMM_INLINE void my_init_buf(float* buf, size_t size, int initPos, int initOne)
{
int i;
zero_buf(buf, size);
for (i = 0; i < (int)size; ++i) {
buf[i] = (float)((initOne != 0) ? 1.0 : ((initPos != 0) ? libxsmm_rng_f64() : (0.05 - libxsmm_rng_f64()/10.0)));
}
}
typedef enum my_eltwise_fuse {
MY_ELTWISE_FUSE_NONE = 0,
MY_ELTWISE_FUSE_BIAS = 1,
MY_ELTWISE_FUSE_RELU = 2,
MY_ELTWISE_FUSE_BIAS_RELU = MY_ELTWISE_FUSE_BIAS | MY_ELTWISE_FUSE_RELU
} my_eltwise_fuse;
typedef enum my_pass {
MY_PASS_FWD = 1,
MY_PASS_BWD_D = 2,
MY_PASS_BWD_W = 4,
MY_PASS_BWD = 6
} my_pass;
typedef struct my_opt_config {
libxsmm_blasint C;
libxsmm_blasint K;
libxsmm_blasint bc;
libxsmm_blasint bk;
libxsmm_blasint threads;
float lr;
size_t scratch_size;
libxsmm_barrier* barrier;
} my_opt_config;
typedef struct my_smax_fwd_config {
libxsmm_blasint N;
libxsmm_blasint C;
libxsmm_blasint bn;
libxsmm_blasint bc;
libxsmm_blasint threads;
size_t scratch_size;
libxsmm_barrier* barrier;
} my_smax_fwd_config;
typedef struct my_smax_bwd_config {
libxsmm_blasint N;
libxsmm_blasint C;
libxsmm_blasint bn;
libxsmm_blasint bc;
libxsmm_blasint threads;
size_t scratch_size;
float loss_weight;
libxsmm_barrier* barrier;
} my_smax_bwd_config;
typedef struct my_fc_fwd_config {
libxsmm_blasint N;
libxsmm_blasint C;
libxsmm_blasint K;
libxsmm_blasint bn;
libxsmm_blasint bc;
libxsmm_blasint bk;
libxsmm_blasint threads;
my_eltwise_fuse fuse_type;
libxsmm_blasint fwd_bf;
libxsmm_blasint fwd_2d_blocking;
libxsmm_blasint fwd_col_teams;
libxsmm_blasint fwd_row_teams;
size_t scratch_size;
libxsmm_barrier* barrier;
libxsmm_smmfunction_reducebatch_strd gemm_fwd;
libxsmm_smmfunction_reducebatch_strd gemm_fwd2;
} my_fc_fwd_config;
typedef struct my_fc_bwd_config {
libxsmm_blasint N;
libxsmm_blasint C;
libxsmm_blasint K;
libxsmm_blasint bn;
libxsmm_blasint bc;
libxsmm_blasint bk;
libxsmm_blasint threads;
my_eltwise_fuse fuse_type;
libxsmm_blasint bwd_bf;
libxsmm_blasint bwd_2d_blocking;
libxsmm_blasint bwd_col_teams;
libxsmm_blasint bwd_row_teams;
libxsmm_blasint upd_bf;
libxsmm_blasint upd_2d_blocking;
libxsmm_blasint upd_col_teams;
libxsmm_blasint upd_row_teams;
libxsmm_blasint ifm_subtasks;
libxsmm_blasint ofm_subtasks;
size_t scratch_size;
libxsmm_barrier* barrier;
libxsmm_smmfunction_reducebatch_strd gemm_bwd;
libxsmm_smmfunction_reducebatch_strd gemm_bwd2;
libxsmm_smmfunction_reducebatch_strd gemm_upd;
libxsmm_smmfunction_reducebatch_strd gemm_upd2;
libxsmm_xtransfunction tr_kernel;
} my_fc_bwd_config;
typedef struct my_numa_thr_cfg {
int thr_s;
int thr_e;
int *blocksOFm_s;
int *blocksOFm_e;
int *blocksIFm_s;
int *blocksIFm_e;
int *blocksOFm_tr_s;
int *blocksOFm_tr_e;
int *blocksIFm_tr_s;
int *blocksIFm_tr_e;
float **scratch;
size_t *layer_size;
int **fwd_ofm_to_numa;
float *bwd_d_scratch;
size_t bwd_d_scratch_size;
float *bwd_w_scratch;
size_t bwd_w_layer_size;
} my_numa_thr_cfg;
my_fc_fwd_config setup_my_fc_fwd(libxsmm_blasint N, libxsmm_blasint C, libxsmm_blasint K, libxsmm_blasint bn,
libxsmm_blasint bc, libxsmm_blasint bk, libxsmm_blasint threads, my_eltwise_fuse fuse_type) {
my_fc_fwd_config res;
libxsmm_blasint lda = bk;
libxsmm_blasint ldb = bc;
libxsmm_blasint ldc = bk;
float alpha = 1.0f;
float beta = 1.0f;
float zerobeta = 0.0f;
/* setting up some handle values */
res.N = N;
res.C = C;
res.K = K;
res.bn = bn;
res.bc = bc;
res.bk = bk;
res.threads = threads;
res.fuse_type = fuse_type;
/* setup parallelization strategy */
if (threads == 16) {
res.fwd_bf = 1;
res.fwd_2d_blocking = 1;
res.fwd_col_teams = 2;
res.fwd_row_teams = 8;
} else {
res.fwd_bf = 1;
res.fwd_2d_blocking = 0;
res.fwd_col_teams = 1;
res.fwd_row_teams = 1;
}
#if 0
res.fwd_bf = atoi(getenv("FWD_BF"));
res.fwd_2d_blocking = atoi(getenv("FWD_2D_BLOCKING"));
res.fwd_col_teams = atoi(getenv("FWD_COL_TEAMS"));
res.fwd_row_teams = atoi(getenv("FWD_ROW_TEAMS"));
#endif
/* setting up the barrier */
res.barrier = libxsmm_barrier_create(threads, 1);
/* TPP creation */
res.gemm_fwd = libxsmm_smmdispatch_reducebatch_strd(res.bk, res.bn, res.bc,
res.bk*res.bc*sizeof(float), res.bc*res.bn*sizeof(float),
&lda, &ldb, &ldc, &alpha, &beta, NULL, NULL);
if ( res.gemm_fwd == NULL ) {
fprintf( stderr, "JIT for BRGEMM TPP gemm_fwd failed. Bailing...!\n");
exit(-1);
}
res.gemm_fwd2 = libxsmm_smmdispatch_reducebatch_strd(res.bk, res.bn, res.bc,
res.bk*res.bc*sizeof(float), res.bc*res.bn*sizeof(float),
&lda, &ldb, &ldc, &alpha, &zerobeta, NULL, NULL);
if ( res.gemm_fwd2 == NULL ) {
fprintf( stderr, "JIT for BRGEMM TPP gemm_fwd2 failed. Bailing...!\n");
exit(-1);
}
/* init scratch */
res.scratch_size = 0;
return res;
}
my_fc_bwd_config setup_my_fc_bwd(libxsmm_blasint N, libxsmm_blasint C, libxsmm_blasint K, libxsmm_blasint bn,
libxsmm_blasint bc, libxsmm_blasint bk, libxsmm_blasint threads, my_eltwise_fuse fuse_type) {
my_fc_bwd_config res;
const libxsmm_trans_descriptor* tr_desc = 0;
libxsmm_descriptor_blob blob;
libxsmm_blasint lda = bc;
libxsmm_blasint ldb = bk;
libxsmm_blasint ldc = bc;
float alpha = 1.0f;
float beta = 1.0f;
float zerobeta = 0.0f;
int updflags = LIBXSMM_GEMM_FLAGS( 'N', 'T' );
libxsmm_blasint updM;
libxsmm_blasint updN;
/* setting up some handle values */
res.N = N;
res.C = C;
res.K = K;
res.bn = bn;
res.bc = bc;
res.bk = bk;
res.threads = threads;
res.fuse_type = fuse_type;
/* setup parallelization strategy */
if (threads == 16) {
res.bwd_bf = 1;
res.bwd_2d_blocking = 1;
res.bwd_col_teams = 2;
res.bwd_row_teams = 8;
res.upd_bf = 1;
res.upd_2d_blocking = 0;
res.upd_col_teams = 1;
res.upd_row_teams = 1;
res.ifm_subtasks = 1;
res.ofm_subtasks = 1;
} else {
res.bwd_bf = 1;
res.bwd_2d_blocking = 0;
res.bwd_col_teams = 1;
res.bwd_row_teams = 1;
res.upd_bf = 1;
res.upd_2d_blocking = 0;
res.upd_col_teams = 1;
res.upd_row_teams = 1;
res.ifm_subtasks = 1;
res.ofm_subtasks = 1;
}
#if 0
res.bwd_bf = atoi(getenv("BWD_BF"));
res.bwd_2d_blocking = atoi(getenv("BWD_2D_BLOCKING"));
res.bwd_col_teams = atoi(getenv("BWD_COL_TEAMS"));
res.bwd_row_teams = atoi(getenv("BWD_ROW_TEAMS"));
res.upd_bf = atoi(getenv("UPD_BF"));
res.upd_2d_blocking = atoi(getenv("UPD_2D_BLOCKING"));
res.upd_col_teams = atoi(getenv("UPD_COL_TEAMS"));
res.upd_row_teams = atoi(getenv("UPD_ROW_TEAMS"));
res.ifm_subtasks = atoi(getenv("IFM_SUBTASKS"));
res.ofm_subtasks = atoi(getenv("OFM_SUBTASKS"));
#endif
/* setting up the barrier */
res.barrier = libxsmm_barrier_create(threads, 1);
/* TPP creation */
/* BWD GEMM */
res.gemm_bwd = libxsmm_smmdispatch_reducebatch_strd(res.bc, res.bn, res.bk,
res.bk*res.bc*sizeof(float), res.bk*res.bn*sizeof(float),
&lda, &ldb, &ldc, &alpha, &beta, NULL, NULL);
if ( res.gemm_bwd == NULL ) {
fprintf( stderr, "JIT for BRGEMM TPP gemm_bwd failed. Bailing...!\n");
exit(-1);
}
res.gemm_bwd2 = libxsmm_smmdispatch_reducebatch_strd(res.bc, res.bn, res.bk,
res.bk*res.bc*sizeof(float), res.bk*res.bn*sizeof(float),
&lda, &ldb, &ldc, &alpha, &zerobeta, NULL, NULL);
if ( res.gemm_bwd2 == NULL ) {
fprintf( stderr, "JIT for BRGEMM TPP gemm_bwd2 failed. Bailing...!\n");
exit(-1);
}
/* Transpose kernel used for weight transpose in bwd pass */
tr_desc = libxsmm_trans_descriptor_init(&blob, sizeof(float), res.bk, res.bc, res.bc);
res.tr_kernel = libxsmm_dispatch_trans(tr_desc);
if ( res.tr_kernel == NULL ) {
fprintf( stderr, "JIT for transpose TPP tr_kernel failed. Bailing...!\n");
exit(-1);
}
/* UPD GEMM */
lda = res.bk;
ldb = res.bc;
ldc = res.bk;
updM = res.bk/res.ofm_subtasks;
updN = res.bc/res.ifm_subtasks;
res.gemm_upd = libxsmm_smmdispatch_reducebatch_strd(updM, updN, res.bn,
res.K*res.bn*sizeof(float), res.C*res.bn*sizeof(float),
&lda, &ldb, &ldc, &alpha, &beta, &updflags, NULL);
if ( res.gemm_upd == NULL ) {
fprintf( stderr, "JIT for BRGEMM TPP gemm_upd failed. Bailing...!\n");
exit(-1);
}
res.gemm_upd2 = libxsmm_smmdispatch_reducebatch_strd(updM, updN, res.bn,
res.K*res.bn*sizeof(float), res.C*res.bn*sizeof(float),
&lda, &ldb, &ldc, &alpha, &zerobeta, &updflags, NULL);
if ( res.gemm_upd2 == NULL ) {
fprintf( stderr, "JIT for BRGEMM TPP gemm_upd2 failed. Bailing...!\n");
exit(-1);
}
/* init scratch */
res.scratch_size = sizeof(float) * ( (((size_t)res.C + (size_t)res.K) * (size_t)res.N) + ((size_t)res.C * (size_t)res.K) );
return res;
}
my_opt_config setup_my_opt(libxsmm_blasint C, libxsmm_blasint K, libxsmm_blasint bc, libxsmm_blasint bk,
libxsmm_blasint threads, float lr) {
my_opt_config res;
/* setting up some handle values */
res.C = C;
res.K = K;
res.bc = bc;
res.bk = bk;
res.threads = threads;
res.lr = lr;
/* setting up the barrier */
res.barrier = libxsmm_barrier_create(threads, 1);
/* init scratch */
res.scratch_size = 0;
return res;
}
my_smax_fwd_config setup_my_smax_fwd(libxsmm_blasint N, libxsmm_blasint C, libxsmm_blasint bn, libxsmm_blasint bc,
libxsmm_blasint threads) {
my_smax_fwd_config res;
/* setting up some handle values */
res.C = C;
res.N = N;
res.bc = bc;
res.bn = bn;
res.threads = threads;
/* setting up the barrier */
res.barrier = libxsmm_barrier_create(threads, 1);
/* init scratch */
res.scratch_size = 0;
return res;
}
my_smax_bwd_config setup_my_smax_bwd(libxsmm_blasint N, libxsmm_blasint C, libxsmm_blasint bn, libxsmm_blasint bc,
libxsmm_blasint threads, float loss_weight) {
my_smax_bwd_config res;
/* setting up some handle values */
res.C = C;
res.N = N;
res.bc = bc;
res.bn = bn;
res.threads = threads;
res.loss_weight = loss_weight;
/* setting up the barrier */
res.barrier = libxsmm_barrier_create(threads, 1);
/* init scratch */
res.scratch_size = 0;
return res;
}
void my_fc_fwd_exec( my_fc_fwd_config cfg, const float* in_act_ptr, float* out_act_ptr,
const float* bias_ptr, unsigned char* relu_ptr, int start_tid, int my_tid, void* scratch, my_numa_thr_cfg *numa_thr_cfg, int layer) {
const libxsmm_blasint nBlocksIFm = cfg.C / cfg.bc;
const libxsmm_blasint nBlocksOFm = cfg.K / cfg.bk;
const libxsmm_blasint nBlocksMB = cfg.N / cfg.bn;
/* computing first logical thread */
const libxsmm_blasint ltid = my_tid - start_tid;
/* number of tasks that could be run in parallel */
const libxsmm_blasint work = nBlocksOFm * nBlocksMB;
/* compute chunk size */
const libxsmm_blasint chunksize = (work % cfg.threads == 0) ?
(work / cfg.threads) : ((work / cfg.threads) + 1);
/* compute thr_begin and thr_end */
const libxsmm_blasint thr_begin = (ltid * chunksize < work) ? (ltid * chunksize) : work;
const libxsmm_blasint thr_end = ((ltid + 1) * chunksize < work) ? ((ltid + 1) * chunksize) : work;
/* loop variables */
libxsmm_blasint mb1ofm1 = 0, mb1 = 0, ofm1 = 0, ifm1 = 0, mb2 = 0, ofm2 = 0;
libxsmm_blasint N_tasks_per_thread = 0, M_tasks_per_thread = 0;
libxsmm_blasint my_M_start = 0, my_M_end = 0, my_N_start = 0, my_N_end = 0;
libxsmm_blasint my_col_id = 0, my_row_id = 0, col_teams = 0, row_teams = 0;
LIBXSMM_VLA_DECL(4, float, output, out_act_ptr, nBlocksOFm, cfg.bn, cfg.bk);
LIBXSMM_VLA_DECL(4, const float, input, in_act_ptr, nBlocksIFm, cfg.bn, cfg.bc);
LIBXSMM_VLA_DECL(4, const float, filter, numa_thr_cfg->scratch[layer], nBlocksIFm, cfg.bc, cfg.bk);
LIBXSMM_VLA_DECL(2, const float, bias, bias_ptr, cfg.bk);
LIBXSMM_VLA_DECL(4, unsigned char, relumask, relu_ptr, nBlocksOFm, cfg.bn, cfg.bk);
unsigned long long blocks = nBlocksIFm;
libxsmm_blasint CB_BLOCKS = nBlocksIFm, BF = 1;
LIBXSMM_UNUSED( scratch );
BF = cfg.fwd_bf;
CB_BLOCKS = nBlocksIFm/BF;
blocks = CB_BLOCKS;
col_teams = cfg.fwd_col_teams;
row_teams = cfg.fwd_row_teams;
my_row_id = ltid % row_teams;
my_col_id = ltid / row_teams;
N_tasks_per_thread = LIBXSMM_UPDIV(nBlocksMB, col_teams);
M_tasks_per_thread = LIBXSMM_UPDIV(nBlocksOFm, row_teams);
my_N_start = LIBXSMM_MIN(my_col_id * N_tasks_per_thread, nBlocksMB);
my_N_end = LIBXSMM_MIN((my_col_id+1) * N_tasks_per_thread, nBlocksMB);
my_M_start = LIBXSMM_MIN(my_row_id * M_tasks_per_thread, nBlocksOFm);
my_M_end = LIBXSMM_MIN((my_row_id+1) * M_tasks_per_thread, nBlocksOFm);
const libxsmm_blasint ofm_start = numa_thr_cfg->blocksOFm_s[layer];
/* lazy barrier init */
libxsmm_barrier_init(cfg.barrier, ltid);
if (cfg.fwd_2d_blocking == 1) {
if (BF > 1) {
for (ifm1 = 0; ifm1 < BF; ++ifm1) {
for (ofm1 = my_M_start; ofm1 < my_M_end; ++ofm1) {
for (mb1 = my_N_start; mb1 < my_N_end; ++mb1) {
/* Initialize output slice */
if ( ifm1 == 0 ) {
if ( (cfg.fuse_type & MY_ELTWISE_FUSE_BIAS) == MY_ELTWISE_FUSE_BIAS ) {
for ( mb2 = 0; mb2 < cfg.bn; ++mb2 ) {
for ( ofm2 = 0; ofm2 < cfg.bk; ++ofm2 ) {
LIBXSMM_VLA_ACCESS(4, output, mb1, ofm1, mb2, ofm2, nBlocksOFm, cfg.bn, cfg.bk) = LIBXSMM_VLA_ACCESS(2, bias, ofm1, ofm2, cfg.bk);
}
}
} else {
for ( mb2 = 0; mb2 < cfg.bn; ++mb2 ) {
for ( ofm2 = 0; ofm2 < cfg.bk; ++ofm2 ) {
LIBXSMM_VLA_ACCESS(4, output, mb1, ofm1, mb2, ofm2, nBlocksOFm, cfg.bn, cfg.bk) = (float)0;
}
}
}
}
/* BRGEMM */
cfg.gemm_fwd( &LIBXSMM_VLA_ACCESS(4, filter, ofm1, ifm1*CB_BLOCKS, 0, 0, nBlocksIFm, cfg.bc, cfg.bk),
&LIBXSMM_VLA_ACCESS(4, input, mb1, ifm1*CB_BLOCKS, 0, 0, nBlocksIFm, cfg.bn, cfg.bc),
&LIBXSMM_VLA_ACCESS(4, output, mb1, ofm1, 0, 0, nBlocksOFm, cfg.bn, cfg.bk), &blocks);
/* apply post BRGEMM fusion */
if ( ifm1 == BF-1 ) {
if ( (cfg.fuse_type & MY_ELTWISE_FUSE_RELU) == MY_ELTWISE_FUSE_RELU ) {
for ( mb2 = 0; mb2 < cfg.bn; ++mb2 ) {
for ( ofm2 = 0; ofm2 < cfg.bk; ++ofm2 ) {
float l_cur_out = LIBXSMM_VLA_ACCESS(4, output, mb1, ofm1, mb2, ofm2, nBlocksOFm, cfg.bn, cfg.bk);
LIBXSMM_VLA_ACCESS(4, relumask, mb1, ofm1, mb2, ofm2, nBlocksOFm, cfg.bn, cfg.bk) = (unsigned char)(( l_cur_out > (float)0 ) ? 1 : 0);
l_cur_out = (l_cur_out > (float)0) ? l_cur_out : (float)0;
LIBXSMM_VLA_ACCESS(4, output, mb1, ofm1, mb2, ofm2, nBlocksOFm, cfg.bn, cfg.bk) = l_cur_out;
}
}
}
}
}
}
}
} else {
for (ofm1 = my_M_start; ofm1 < my_M_end; ++ofm1) {
for (mb1 = my_N_start; mb1 < my_N_end; ++mb1) {
if ( (cfg.fuse_type & MY_ELTWISE_FUSE_BIAS) == MY_ELTWISE_FUSE_BIAS ) {
for ( mb2 = 0; mb2 < cfg.bn; ++mb2 ) {
for ( ofm2 = 0; ofm2 < cfg.bk; ++ofm2 ) {
LIBXSMM_VLA_ACCESS(4, output, mb1, ofm1, mb2, ofm2, nBlocksOFm, cfg.bn, cfg.bk) = LIBXSMM_VLA_ACCESS(2, bias, ofm1, ofm2, cfg.bk);
}
}
cfg.gemm_fwd( &LIBXSMM_VLA_ACCESS(4, filter, ofm1-ofm_start, 0, 0, 0, nBlocksIFm, cfg.bc, cfg.bk),
&LIBXSMM_VLA_ACCESS(4, input, mb1, 0, 0, 0, nBlocksIFm, cfg.bn, cfg.bc),
&LIBXSMM_VLA_ACCESS(4, output, mb1, ofm1, 0, 0, nBlocksOFm, cfg.bn, cfg.bk), &blocks);
} else {
cfg.gemm_fwd2( &LIBXSMM_VLA_ACCESS(4, filter, ofm1-ofm_start, 0, 0, 0, nBlocksIFm, cfg.bc, cfg.bk),
&LIBXSMM_VLA_ACCESS(4, input, mb1, 0, 0, 0, nBlocksIFm, cfg.bn, cfg.bc),
&LIBXSMM_VLA_ACCESS(4, output, mb1, ofm1, 0, 0, nBlocksOFm, cfg.bn, cfg.bk), &blocks);
}
/* post GEMM fusion */
if ( (cfg.fuse_type & MY_ELTWISE_FUSE_RELU) == MY_ELTWISE_FUSE_RELU ) {
for ( mb2 = 0; mb2 < cfg.bn; ++mb2 ) {
for ( ofm2 = 0; ofm2 < cfg.bk; ++ofm2 ) {
float l_cur_out = LIBXSMM_VLA_ACCESS(4, output, mb1, ofm1, mb2, ofm2, nBlocksOFm, cfg.bn, cfg.bk);
LIBXSMM_VLA_ACCESS(4, relumask, mb1, ofm1, mb2, ofm2, nBlocksOFm, cfg.bn, cfg.bk) = (unsigned char)(( l_cur_out > (float)0 ) ? 1 : 0);
l_cur_out = ( l_cur_out > (float)0 ) ? l_cur_out : (float)0;
LIBXSMM_VLA_ACCESS(4, output, mb1, ofm1, mb2, ofm2, nBlocksOFm, cfg.bn, cfg.bk) = l_cur_out;
}
}
}
}
}
}
} else {
if (BF > 1) {
for ( ifm1 = 0; ifm1 < BF; ++ifm1 ) {
for ( mb1ofm1 = thr_begin; mb1ofm1 < thr_end; ++mb1ofm1 ) {
mb1 = mb1ofm1%nBlocksMB;
ofm1 = mb1ofm1/nBlocksMB;
/* Initialize output slice */
if ( ifm1 == 0 ) {
if ( (cfg.fuse_type & MY_ELTWISE_FUSE_BIAS) == MY_ELTWISE_FUSE_BIAS ) {
for ( mb2 = 0; mb2 < cfg.bn; ++mb2 ) {
for ( ofm2 = 0; ofm2 < cfg.bk; ++ofm2 ) {
LIBXSMM_VLA_ACCESS(4, output, mb1, ofm1, mb2, ofm2, nBlocksOFm, cfg.bn, cfg.bk) = LIBXSMM_VLA_ACCESS(2, bias, ofm1, ofm2, cfg.bk);
}
}
} else {
for ( mb2 = 0; mb2 < cfg.bn; ++mb2 ) {
for ( ofm2 = 0; ofm2 < cfg.bk; ++ofm2 ) {
LIBXSMM_VLA_ACCESS(4, output, mb1, ofm1, mb2, ofm2, nBlocksOFm, cfg.bn, cfg.bk) = (float)0;
}
}
}
}
/* BRGEMM */
cfg.gemm_fwd( &LIBXSMM_VLA_ACCESS(4, filter, ofm1, ifm1*CB_BLOCKS, 0, 0, nBlocksIFm, cfg.bc, cfg.bk),
&LIBXSMM_VLA_ACCESS(4, input, mb1, ifm1*CB_BLOCKS, 0, 0, nBlocksIFm, cfg.bn, cfg.bc),
&LIBXSMM_VLA_ACCESS(4, output, mb1, ofm1, 0, 0, nBlocksOFm, cfg.bn, cfg.bk), &blocks);
/* post GEMM fusion */
if ( ifm1 == BF-1 ) {
if ( (cfg.fuse_type & MY_ELTWISE_FUSE_RELU) == MY_ELTWISE_FUSE_RELU ) {
for ( mb2 = 0; mb2 < cfg.bn; ++mb2 ) {
for ( ofm2 = 0; ofm2 < cfg.bk; ++ofm2 ) {
float l_cur_out = LIBXSMM_VLA_ACCESS(4, output, mb1, ofm1, mb2, ofm2, nBlocksOFm, cfg.bn, cfg.bk);
LIBXSMM_VLA_ACCESS(4, relumask, mb1, ofm1, mb2, ofm2, nBlocksOFm, cfg.bn, cfg.bk) = (unsigned char)(( l_cur_out > (float)0 ) ? 1 : 0);
l_cur_out = (l_cur_out > (float)0) ? l_cur_out : (float)0;
LIBXSMM_VLA_ACCESS(4, output, mb1, ofm1, mb2, ofm2, nBlocksOFm, cfg.bn, cfg.bk) = l_cur_out;
}
}
}
}
}
}
} else {
for ( mb1ofm1 = thr_begin; mb1ofm1 < thr_end; ++mb1ofm1 ) {
mb1 = mb1ofm1%nBlocksMB;
ofm1 = mb1ofm1/nBlocksMB;
if ( (cfg.fuse_type & MY_ELTWISE_FUSE_BIAS) == MY_ELTWISE_FUSE_BIAS ) {
for ( mb2 = 0; mb2 < cfg.bn; ++mb2 ) {
for ( ofm2 = 0; ofm2 < cfg.bk; ++ofm2 ) {
LIBXSMM_VLA_ACCESS(4, output, mb1, ofm1, mb2, ofm2, nBlocksOFm, cfg.bn, cfg.bk) = LIBXSMM_VLA_ACCESS(2, bias, ofm1, ofm2, cfg.bk);
}
}
cfg.gemm_fwd( &LIBXSMM_VLA_ACCESS(4, filter, ofm1-ofm_start, 0, 0, 0, nBlocksIFm, cfg.bc, cfg.bk),
&LIBXSMM_VLA_ACCESS(4, input, mb1, 0, 0, 0, nBlocksIFm, cfg.bn, cfg.bc),
&LIBXSMM_VLA_ACCESS(4, output, mb1, ofm1, 0, 0, nBlocksOFm, cfg.bn, cfg.bk), &blocks);
} else {
cfg.gemm_fwd2( &LIBXSMM_VLA_ACCESS(4, filter, ofm1-ofm_start, 0, 0, 0, nBlocksIFm, cfg.bc, cfg.bk),
&LIBXSMM_VLA_ACCESS(4, input, mb1, 0, 0, 0, nBlocksIFm, cfg.bn, cfg.bc),
&LIBXSMM_VLA_ACCESS(4, output, mb1, ofm1, 0, 0, nBlocksOFm, cfg.bn, cfg.bk), &blocks);
}
/* post GEMM fusion */
if ( (cfg.fuse_type & MY_ELTWISE_FUSE_RELU) == MY_ELTWISE_FUSE_RELU ) {
for ( mb2 = 0; mb2 < cfg.bn; ++mb2 ) {
for ( ofm2 = 0; ofm2 < cfg.bk; ++ofm2 ) {
float l_cur_out = LIBXSMM_VLA_ACCESS(4, output, mb1, ofm1, mb2, ofm2, nBlocksOFm, cfg.bn, cfg.bk);
LIBXSMM_VLA_ACCESS(4, relumask, mb1, ofm1, mb2, ofm2, nBlocksOFm, cfg.bn, cfg.bk) = (unsigned char)(( l_cur_out > (float)0 ) ? 1 : 0);
l_cur_out = ( l_cur_out > (float)0 ) ? l_cur_out : (float)0;
LIBXSMM_VLA_ACCESS(4, output, mb1, ofm1, mb2, ofm2, nBlocksOFm, cfg.bn, cfg.bk) = l_cur_out;
}
}
}
}
}
}
libxsmm_barrier_wait(cfg.barrier, ltid);
}
void my_fc_bwd_d_transpose( my_fc_bwd_config cfg, int my_tid, my_numa_thr_cfg **numa_thr_cfg_, int numa_node, int layer, int *ofm_to_node) {
my_numa_thr_cfg *numa_thr_cfg = *numa_thr_cfg_;
/* Transpose kernel to transpose filters */
libxsmm_xtransfunction tr_kernel = cfg.tr_kernel;
/* here we assume that input and output blocking is similar */
const libxsmm_blasint bk = cfg.bk;
const libxsmm_blasint bc = cfg.bc;
const libxsmm_blasint nBlocksIFm = cfg.C / bc;
const libxsmm_blasint nBlocksOFm = cfg.K / bk;
/* computing first logical thread */
const libxsmm_blasint ltid = my_tid - numa_thr_cfg[numa_node].thr_s;
const libxsmm_blasint l_nBlocksIFm = (numa_thr_cfg[numa_node].blocksIFm_tr_e[layer] - numa_thr_cfg[numa_node].blocksIFm_tr_s[layer]) + 1;
/* number of tasks for transpose that could be run in parallel */
const libxsmm_blasint transpose_work = l_nBlocksIFm * nBlocksOFm;
/* compute chunk size */
int thr = numa_thr_cfg[numa_node].thr_e - numa_thr_cfg[numa_node].thr_s;
const libxsmm_blasint transpose_chunksize = (transpose_work % thr == 0) ? (transpose_work / thr) : ((transpose_work / thr) + 1);
/* compute thr_begin and thr_end */
const libxsmm_blasint transpose_thr_begin = (ltid * transpose_chunksize < transpose_work) ? (ltid * transpose_chunksize) : transpose_work;
const libxsmm_blasint transpose_thr_end = ((ltid + 1) * transpose_chunksize < transpose_work) ? ((ltid + 1) * transpose_chunksize) : transpose_work;
float *filter_tr = numa_thr_cfg[numa_node].bwd_d_scratch;
/* lazy barrier init */
libxsmm_barrier_init(cfg.barrier, my_tid);
/* transpose weight */
int ifm1ofm1 = 0;
for (ifm1ofm1 = transpose_thr_begin; ifm1ofm1 < transpose_thr_end; ++ifm1ofm1) {
const unsigned int ubk = (unsigned int)bk;
const unsigned int ubc = (unsigned int)bc;
int ofm1 = ifm1ofm1 / l_nBlocksIFm;
int ifm1 = ifm1ofm1 % l_nBlocksIFm;
my_numa_thr_cfg *l_numa_thr_cfg = &numa_thr_cfg[ofm_to_node[ofm1]];
float *inp = l_numa_thr_cfg->scratch[layer];
inp = inp + (ofm1 - l_numa_thr_cfg->blocksOFm_s[layer]) * nBlocksIFm * bc * bk
+ (ifm1 + numa_thr_cfg[numa_node].blocksIFm_tr_s[layer]) * bc * bk;
float *out = filter_tr + ifm1 * nBlocksOFm * bk * bc + ofm1 * bk * bc;
tr_kernel(inp, &ubk, out, &ubc);
}
libxsmm_barrier_wait(cfg.barrier, my_tid);
}
void my_fc_bwd_exec( my_fc_bwd_config cfg, float* din_act_ptr,
float* dout_act_ptr, float* dwt_ptr, const float* in_act_ptr,
float* dbias_ptr, const unsigned char* relu_ptr, my_pass pass, int start_tid, int my_tid, void* scratch, my_numa_thr_cfg *numa_thr_cfg, int layer ) {
/* here we assume that input and output blocking is similar */
const libxsmm_blasint bn = cfg.bn;
const libxsmm_blasint bk = cfg.bk;
const libxsmm_blasint bc = cfg.bc;
const libxsmm_blasint nBlocksIFm = cfg.C / bc;
const libxsmm_blasint nBlocksOFm = cfg.K / bk;
const libxsmm_blasint nBlocksMB = cfg.N / bn;
/* computing first logical thread */
const libxsmm_blasint ltid = my_tid - start_tid;
/* number of tasks for transpose that could be run in parallel */
const libxsmm_blasint eltwise_work = nBlocksOFm * nBlocksMB;
/* compute chunk size */
const libxsmm_blasint eltwise_chunksize = (eltwise_work % cfg.threads == 0) ? (eltwise_work / cfg.threads) : ((eltwise_work / cfg.threads) + 1);
/* compute thr_begin and thr_end */
const libxsmm_blasint eltwise_thr_begin = (ltid * eltwise_chunksize < eltwise_work) ? (ltid * eltwise_chunksize) : eltwise_work;
const libxsmm_blasint eltwise_thr_end = ((ltid + 1) * eltwise_chunksize < eltwise_work) ? ((ltid + 1) * eltwise_chunksize) : eltwise_work;
libxsmm_blasint mb1ofm1;
/* number of tasks for transpose that could be run in parallel */
const libxsmm_blasint dbias_work = nBlocksOFm;
/* compute chunk size */
const libxsmm_blasint dbias_chunksize = (dbias_work % cfg.threads == 0) ? (dbias_work / cfg.threads) : ((dbias_work / cfg.threads) + 1);
/* compute thr_begin and thr_end */
const libxsmm_blasint dbias_thr_begin = (ltid * dbias_chunksize < dbias_work) ? (ltid * dbias_chunksize) : dbias_work;
const libxsmm_blasint dbias_thr_end = ((ltid + 1) * dbias_chunksize < dbias_work) ? ((ltid + 1) * dbias_chunksize) : dbias_work;
/* loop variables */
libxsmm_blasint ofm1 = 0, mb1 = 0, ofm2 = 0, mb2 = 0;
float *grad_output_ptr = (((cfg.fuse_type & MY_ELTWISE_FUSE_RELU) == MY_ELTWISE_FUSE_RELU) ? ((float*)scratch)+(cfg.C*cfg.K) : dout_act_ptr);
LIBXSMM_VLA_DECL(4, const float, doutput_orig, dout_act_ptr, nBlocksOFm, bn, bk);
LIBXSMM_VLA_DECL(4, float, doutput, grad_output_ptr, nBlocksOFm, bn, bk);
LIBXSMM_VLA_DECL(2, float, dbias, dbias_ptr, cfg.bk);
LIBXSMM_VLA_DECL(4, const unsigned char, relumask, relu_ptr, nBlocksOFm, cfg.bn, cfg.bk);
const libxsmm_blasint ifm_start = numa_thr_cfg->blocksIFm_tr_s[layer];
/* lazy barrier init */
libxsmm_barrier_init(cfg.barrier, ltid);
if ( (cfg.fuse_type & MY_ELTWISE_FUSE_RELU) == MY_ELTWISE_FUSE_RELU ) {
for ( mb1ofm1 = eltwise_thr_begin; mb1ofm1 < eltwise_thr_end; ++mb1ofm1 ) {
mb1 = mb1ofm1%nBlocksMB;
ofm1 = mb1ofm1/nBlocksMB;
for ( mb2 = 0; mb2 < cfg.bn; ++mb2 ) {
for ( ofm2 = 0; ofm2 < cfg.bk; ++ofm2 ) {
float l_cur_out = LIBXSMM_VLA_ACCESS(4, doutput_orig, mb1, ofm1, mb2, ofm2, nBlocksOFm, cfg.bn, cfg.bk);
l_cur_out = (LIBXSMM_VLA_ACCESS(4, relumask, mb1, ofm1, mb2, ofm2, nBlocksOFm, cfg.bn, cfg.bk) != 0) ? l_cur_out : (float)0;
LIBXSMM_VLA_ACCESS(4, doutput, mb1, ofm1, mb2, ofm2, nBlocksOFm, cfg.bn, cfg.bk) = l_cur_out;
}
}
}
/* wait for eltwise to finish */
libxsmm_barrier_wait(cfg.barrier, ltid);
}
if ( (cfg.fuse_type & MY_ELTWISE_FUSE_BIAS) == MY_ELTWISE_FUSE_BIAS ) {
for ( ofm1 = dbias_thr_begin; ofm1 < dbias_thr_end; ++ofm1 ) {
for ( ofm2 = 0; ofm2 < cfg.bk; ++ofm2 ) {
LIBXSMM_VLA_ACCESS( 2, dbias, ofm1, ofm2, cfg.bk ) = 0.0f;
}
for ( mb1 = 0; mb1 < nBlocksMB; ++mb1 ) {
for ( mb2 = 0; mb2 < cfg.bn; ++mb2 ) {
for ( ofm2 = 0; ofm2 < cfg.bk; ++ofm2 ) {
LIBXSMM_VLA_ACCESS( 2, dbias, ofm1, ofm2, cfg.bk ) += LIBXSMM_VLA_ACCESS(4, doutput, mb1, ofm1, mb2, ofm2, nBlocksOFm, cfg.bn, cfg.bk);
}
}
}
}
/* wait for eltwise to finish */
libxsmm_barrier_wait(cfg.barrier, ltid);
}
if ( (pass & MY_PASS_BWD_D) == MY_PASS_BWD_D ) {
const libxsmm_blasint use_2d_blocking = cfg.bwd_2d_blocking;
/* number of tasks that could be run in parallel */
const libxsmm_blasint work = nBlocksIFm * nBlocksMB;
/* compute chunk size */
const libxsmm_blasint chunksize = (work % cfg.threads == 0) ? (work / cfg.threads) : ((work / cfg.threads) + 1);
/* compute thr_begin and thr_end */
const libxsmm_blasint thr_begin = (ltid * chunksize < work) ? (ltid * chunksize) : work;
const libxsmm_blasint thr_end = ((ltid + 1) * chunksize < work) ? ((ltid + 1) * chunksize) : work;
/* loop variables */
libxsmm_blasint ifm1 = 0, ifm2 = 0, mb1ifm1 = 0;
libxsmm_blasint N_tasks_per_thread = 0, M_tasks_per_thread = 0, my_M_start = 0, my_M_end = 0, my_N_start = 0, my_N_end = 0, my_col_id = 0, my_row_id = 0, col_teams = 0, row_teams = 0;
LIBXSMM_VLA_DECL(4, float, dinput, din_act_ptr, nBlocksIFm, bn, bc);
LIBXSMM_VLA_DECL(4, float, filter_tr, numa_thr_cfg->bwd_d_scratch, nBlocksOFm, bk, bc);
unsigned long long blocks = nBlocksOFm;
libxsmm_blasint KB_BLOCKS = nBlocksOFm, BF = 1;
BF = cfg.bwd_bf;
KB_BLOCKS = nBlocksOFm/BF;
blocks = KB_BLOCKS;
if (use_2d_blocking == 1) {
col_teams = cfg.bwd_col_teams;
row_teams = cfg.bwd_row_teams;
my_row_id = ltid % row_teams;
my_col_id = ltid / row_teams;
N_tasks_per_thread = LIBXSMM_UPDIV(nBlocksMB, col_teams);
M_tasks_per_thread = LIBXSMM_UPDIV(nBlocksIFm, row_teams);
my_N_start = LIBXSMM_MIN(my_col_id * N_tasks_per_thread, nBlocksMB);
my_N_end = LIBXSMM_MIN((my_col_id+1) * N_tasks_per_thread, nBlocksMB);
my_M_start = LIBXSMM_MIN(my_row_id * M_tasks_per_thread, nBlocksIFm);
my_M_end = LIBXSMM_MIN((my_row_id+1) * M_tasks_per_thread, nBlocksIFm);
}
if (use_2d_blocking == 1) {
if (BF > 1) {
for ( ofm1 = 0; ofm1 < BF; ++ofm1 ) {
for (ifm1 = my_M_start; ifm1 < my_M_end; ++ifm1) {
for (mb1 = my_N_start; mb1 < my_N_end; ++mb1) {
/* Initialize intermediate f32 tensor */
if ( ofm1 == 0 ) {
for ( mb2 = 0; mb2 < bn; ++mb2 ) {
for ( ifm2 = 0; ifm2 < bc; ++ifm2 ) {
LIBXSMM_VLA_ACCESS(4, dinput, mb1, ifm1, mb2, ifm2, nBlocksIFm, bn, bc) = (float)0;
}
}
}
cfg.gemm_bwd( &LIBXSMM_VLA_ACCESS(4, filter_tr, ifm1, ofm1*KB_BLOCKS, 0, 0, nBlocksOFm, bk, bc ),
&LIBXSMM_VLA_ACCESS(4, doutput, mb1, ofm1*KB_BLOCKS, 0, 0, nBlocksOFm, bn, bk),
&LIBXSMM_VLA_ACCESS(4, dinput, mb1, ifm1, 0, 0, nBlocksIFm, bn, bc), &blocks);
}
}
}
} else {
for (ifm1 = my_M_start; ifm1 < my_M_end; ++ifm1) {
for (mb1 = my_N_start; mb1 < my_N_end; ++mb1) {
cfg.gemm_bwd2( &LIBXSMM_VLA_ACCESS(4, filter_tr, ifm1, 0, 0, 0, nBlocksOFm, bk, bc),
&LIBXSMM_VLA_ACCESS(4, doutput, mb1, 0, 0, 0, nBlocksOFm, bn, bk),
&LIBXSMM_VLA_ACCESS(4, dinput, mb1, ifm1, 0, 0, nBlocksIFm, bn, bc), &blocks);
}
}
}
} else {
if (BF > 1) {
for ( ofm1 = 0; ofm1 < BF; ++ofm1 ) {
for ( mb1ifm1 = thr_begin; mb1ifm1 < thr_end; ++mb1ifm1 ) {
mb1 = mb1ifm1%nBlocksMB;
ifm1 = mb1ifm1/nBlocksMB;
/* Initialize intermediate f32 tensor */
if ( ofm1 == 0 ) {
for ( mb2 = 0; mb2 < bn; ++mb2 ) {
for ( ifm2 = 0; ifm2 < bc; ++ifm2 ) {
LIBXSMM_VLA_ACCESS(4, dinput, mb1, ifm1, mb2, ifm2, nBlocksIFm, bn, bc) = (float)0;
}
}
}
cfg.gemm_bwd( &LIBXSMM_VLA_ACCESS(4, filter_tr, ifm1, ofm1*KB_BLOCKS, 0, 0, nBlocksOFm, bk, bc ),
&LIBXSMM_VLA_ACCESS(4, doutput, mb1, ofm1*KB_BLOCKS, 0, 0, nBlocksOFm, bn, bk),
&LIBXSMM_VLA_ACCESS(4, dinput, mb1, ifm1, 0, 0, nBlocksIFm, bn, bc), &blocks);
}
}
} else {
for ( mb1ifm1 = thr_begin; mb1ifm1 < thr_end; ++mb1ifm1 ) {
mb1 = mb1ifm1%nBlocksMB;
ifm1 = mb1ifm1/nBlocksMB;
cfg.gemm_bwd2( &LIBXSMM_VLA_ACCESS(4, filter_tr, ifm1 - ifm_start, 0, 0, 0, nBlocksOFm, bk, bc ),
&LIBXSMM_VLA_ACCESS(4, doutput, mb1, 0, 0, 0, nBlocksOFm, bn, bk),
&LIBXSMM_VLA_ACCESS(4, dinput, mb1, ifm1, 0, 0, nBlocksIFm, bn, bc), &blocks);
}
}
}
libxsmm_barrier_wait(cfg.barrier, ltid);
}
if ( (pass & MY_PASS_BWD_W) == MY_PASS_BWD_W ) {
/* number of tasks that could be run in parallel */
const libxsmm_blasint ofm_subtasks = (cfg.upd_2d_blocking == 1) ? 1 : cfg.ofm_subtasks;
const libxsmm_blasint ifm_subtasks = (cfg.upd_2d_blocking == 1) ? 1 : cfg.ifm_subtasks;
const libxsmm_blasint bbk = (cfg.upd_2d_blocking == 1) ? bk : bk/ofm_subtasks;
const libxsmm_blasint bbc = (cfg.upd_2d_blocking == 1) ? bc : bc/ifm_subtasks;
const libxsmm_blasint work = nBlocksIFm * ifm_subtasks * nBlocksOFm * ofm_subtasks;
const libxsmm_blasint Cck_work = nBlocksIFm * ifm_subtasks * ofm_subtasks;
const libxsmm_blasint Cc_work = nBlocksIFm * ifm_subtasks;
/* 2D blocking parameters */
libxsmm_blasint use_2d_blocking = cfg.upd_2d_blocking;
libxsmm_blasint N_tasks_per_thread = 0, M_tasks_per_thread = 0, my_M_start = 0, my_M_end = 0, my_N_start = 0, my_N_end = 0, my_col_id = 0, my_row_id = 0, col_teams = 0, row_teams = 0;
/* compute chunk size */
const libxsmm_blasint chunksize = (work % cfg.threads == 0) ? (work / cfg.threads) : ((work / cfg.threads) + 1);
/* compute thr_begin and thr_end */
const libxsmm_blasint thr_begin = (ltid * chunksize < work) ? (ltid * chunksize) : work;
const libxsmm_blasint thr_end = ((ltid + 1) * chunksize < work) ? ((ltid + 1) * chunksize) : work;
libxsmm_blasint BF = cfg.upd_bf;
/* loop variables */
libxsmm_blasint ifm1ofm1 = 0, ifm1 = 0, ifm2 = 0, bfn = 0, ii = 0, jj = 0;
/* Batch reduce related variables */
unsigned long long blocks = nBlocksMB/BF;
LIBXSMM_VLA_DECL(4, const float, input, in_act_ptr, nBlocksIFm, bn, bc);
LIBXSMM_VLA_DECL(4, float, dfilter, dwt_ptr, nBlocksIFm, bc, bk);
if (use_2d_blocking == 1) {
col_teams = cfg.upd_col_teams;
row_teams = cfg.upd_row_teams;
my_row_id = ltid % row_teams;
my_col_id = ltid / row_teams;
N_tasks_per_thread = LIBXSMM_UPDIV(nBlocksIFm, col_teams);
M_tasks_per_thread = LIBXSMM_UPDIV(nBlocksOFm, row_teams);
my_N_start = LIBXSMM_MIN(my_col_id * N_tasks_per_thread, nBlocksIFm);
my_N_end = LIBXSMM_MIN((my_col_id+1) * N_tasks_per_thread, nBlocksIFm);
my_M_start = LIBXSMM_MIN(my_row_id * M_tasks_per_thread, nBlocksOFm);
my_M_end = LIBXSMM_MIN((my_row_id+1) * M_tasks_per_thread, nBlocksOFm);
}
if (use_2d_blocking == 1) {
if (BF == 1) {
for (ofm1 = my_M_start; ofm1 < my_M_end; ++ofm1) {
for (ifm1 = my_N_start; ifm1 < my_N_end; ++ifm1) {
cfg.gemm_upd2(&LIBXSMM_VLA_ACCESS(4, doutput, 0, ofm1, 0, 0, nBlocksOFm, bn, bk),
&LIBXSMM_VLA_ACCESS(4, input, 0, ifm1, 0, 0, nBlocksIFm, bn, bc),
&LIBXSMM_VLA_ACCESS(4, dfilter, ofm1, ifm1, 0, 0, nBlocksIFm, bc, bk), &blocks);
}
}
} else {
for (bfn = 0; bfn < BF; bfn++) {
for (ofm1 = my_M_start; ofm1 < my_M_end; ++ofm1) {
for (ifm1 = my_N_start; ifm1 < my_N_end; ++ifm1) {
/* initialize current work task to zero */
if (bfn == 0) {
for (ii = 0; ii<bc; ii++) {
for (jj = 0; jj<bk; jj++) {
LIBXSMM_VLA_ACCESS(4, dfilter, ofm1, ifm1, ii, jj, nBlocksIFm, bc, bk) = (float)0;
}
}
}
cfg.gemm_upd( &LIBXSMM_VLA_ACCESS(4, doutput, bfn*blocks, ofm1, 0, 0, nBlocksOFm, bn, bk),
&LIBXSMM_VLA_ACCESS(4, input, bfn*blocks, ifm1, 0, 0, nBlocksIFm, bn, bc),
&LIBXSMM_VLA_ACCESS(4, dfilter, ofm1, ifm1, 0, 0, nBlocksIFm, bc, bk), &blocks);
}
}
}
}
} else {
if (BF == 1) {
for ( ifm1ofm1 = thr_begin; ifm1ofm1 < thr_end; ++ifm1ofm1 ) {
ofm1 = ifm1ofm1 / Cck_work;
ofm2 = (ifm1ofm1 % Cck_work) / Cc_work;
ifm1 = ((ifm1ofm1 % Cck_work) % Cc_work) / ifm_subtasks;
ifm2 = ((ifm1ofm1 % Cck_work) % Cc_work) % ifm_subtasks;
cfg.gemm_upd2( &LIBXSMM_VLA_ACCESS(4, doutput, 0, ofm1, 0, ofm2*bbk, nBlocksOFm, bn, bk),
&LIBXSMM_VLA_ACCESS(4, input, 0, ifm1, 0, ifm2*bbc, nBlocksIFm, bn, bc),
&LIBXSMM_VLA_ACCESS(4, dfilter, ofm1, ifm1, ifm2*bbc, ofm2*bbk, nBlocksIFm, bc, bk), &blocks);
}
} else {
for (bfn = 0; bfn < BF; bfn++) {
for ( ifm1ofm1 = thr_begin; ifm1ofm1 < thr_end; ++ifm1ofm1 ) {
ofm1 = ifm1ofm1 / Cck_work;
ofm2 = (ifm1ofm1 % Cck_work) / Cc_work;
ifm1 = ((ifm1ofm1 % Cck_work) % Cc_work) / ifm_subtasks;
ifm2 = ((ifm1ofm1 % Cck_work) % Cc_work) % ifm_subtasks;
/* initialize current work task to zero */
if (bfn == 0) {
for (ii = 0; ii<bbc; ii++) {
for (jj = 0; jj<bbk; jj++) {
LIBXSMM_VLA_ACCESS(4, dfilter, ofm1, ifm1, ifm2*bbc+ii, ofm2*bbk+jj, nBlocksIFm, bc, bk) = (float)0;
}
}
}
cfg.gemm_upd( &LIBXSMM_VLA_ACCESS(4, doutput, bfn*blocks, ofm1, 0, ofm2*bbk, nBlocksOFm, bn, bk),
&LIBXSMM_VLA_ACCESS(4, input, bfn*blocks, ifm1, 0, ifm2*bbc, nBlocksIFm, bn, bc),
&LIBXSMM_VLA_ACCESS(4, dfilter, ofm1, ifm1, ifm2*bbc, ofm2*bbk, nBlocksIFm, bc, bk), &blocks);
}
}
}
}
libxsmm_barrier_wait(cfg.barrier, ltid);
}
}
void my_opt_exec( my_opt_config cfg, const float* delwt_ptr, int start_tid, int my_tid,
my_numa_thr_cfg *numa_thr_cfg, int l, my_fc_fwd_config my_fc_fwd) {
const libxsmm_blasint ltid = my_tid - numa_thr_cfg->thr_s;
const libxsmm_blasint nBlocksIFm = my_fc_fwd.C / my_fc_fwd.bc;
const libxsmm_blasint IFM_shift = my_fc_fwd.bc * my_fc_fwd.bk;
const libxsmm_blasint OFM_shift = nBlocksIFm * my_fc_fwd.bc * my_fc_fwd.bk;
const libxsmm_blasint work = ((numa_thr_cfg->blocksOFm_e[l] - numa_thr_cfg->blocksOFm_s[l]) + 1) * nBlocksIFm;
/* compute chunk size */
int thr = numa_thr_cfg->thr_e - numa_thr_cfg->thr_s;
const libxsmm_blasint chunksize = (work % thr == 0) ? (work / thr) : ((work / thr) + 1);
/* compute thr_begin and thr_end */
const libxsmm_blasint thr_begin = (ltid * chunksize < work) ? (ltid * chunksize) : work;
const libxsmm_blasint thr_end = ((ltid + 1) * chunksize < work) ? ((ltid + 1) * chunksize) : work;
libxsmm_barrier_init( cfg.barrier, my_tid );
__m512 vlr = _mm512_set1_ps( cfg.lr );
float *dw_prt = (float*)delwt_ptr + numa_thr_cfg->blocksOFm_s[l] * OFM_shift;
int j = 0, i = 0;
for (j = thr_begin; j < thr_end; j++) {
int ofm = j / nBlocksIFm;
int ifm = j % nBlocksIFm;
float *out = numa_thr_cfg->scratch[l] + ofm * OFM_shift + ifm * IFM_shift;
float *inp = dw_prt + ofm * OFM_shift + ifm * IFM_shift;
for (i = 0; i < IFM_shift; i += 16)
_mm512_storeu_ps( out+i, _mm512_sub_ps( _mm512_loadu_ps( out+i ), _mm512_mul_ps( vlr, _mm512_loadu_ps( inp + i ) ) ) ) ;
}
libxsmm_barrier_wait( cfg.barrier, my_tid );
}
void my_smax_fwd_exec( my_smax_fwd_config cfg, const float* in_act_ptr, float* out_act_ptr, const int* label_ptr, float* loss, int start_tid, int my_tid, void* scratch ) {
libxsmm_blasint bn = cfg.bn;
libxsmm_blasint Bn = cfg.N/cfg.bn;
libxsmm_blasint bc = cfg.bc;
libxsmm_blasint Bc = cfg.C/cfg.bc;
/* loop counters */
libxsmm_blasint i = 0;
libxsmm_blasint img1, img2, ifm1, ifm2;
/* computing first logical thread */
const libxsmm_blasint ltid = my_tid - start_tid;
/* number of tasks that could run in parallel for the batch */
const libxsmm_blasint n_work = Bn * bn;
/* compute chunk size */
const libxsmm_blasint n_chunksize = (n_work % cfg.threads == 0) ? (n_work / cfg.threads) : ((n_work / cfg.threads) + 1);
/* compute thr_begin and thr_end */
const libxsmm_blasint n_thr_begin = (ltid * n_chunksize < n_work) ? (ltid * n_chunksize) : n_work;
const libxsmm_blasint n_thr_end = ((ltid + 1) * n_chunksize < n_work) ? ((ltid + 1) * n_chunksize) : n_work;
LIBXSMM_VLA_DECL(4, float, output, out_act_ptr, Bc, bn, bc);
LIBXSMM_VLA_DECL(4, const float, input, in_act_ptr, Bc, bn, bc);
LIBXSMM_VLA_DECL(2, const int, label, label_ptr, bn);
/* lazy barrier init */
libxsmm_barrier_init( cfg.barrier, ltid );
for ( i = n_thr_begin; i < n_thr_end; ++i ) {
float max = FLT_MIN;
float sum_of_exp = 0.0f;
img1 = i/bn;
img2 = i%bn;
/* set output to input and set compute max per image */
for ( ifm1 = 0; ifm1 < Bc; ++ifm1 ) {
for ( ifm2 = 0; ifm2 < bc; ++ifm2 ) {
LIBXSMM_VLA_ACCESS( 4, output, img1, ifm1, img2, ifm2, Bc, bn, bc ) = LIBXSMM_VLA_ACCESS( 4, input, img1, ifm1, img2, ifm2, Bc, bn, bc );
if ( LIBXSMM_VLA_ACCESS( 4, input, img1, ifm1, img2, ifm2, Bc, bn, bc ) > max ) {
max = LIBXSMM_VLA_ACCESS( 4, input, img1, ifm1, img2, ifm2, Bc, bn, bc );
}
}
}
/* sum exp over outputs */
for ( ifm1 = 0; ifm1 < Bc; ++ifm1 ) {
for ( ifm2 = 0; ifm2 < bc; ++ifm2 ) {
LIBXSMM_VLA_ACCESS( 4, output, img1, ifm1, img2, ifm2, Bc, bn, bc ) = (float)exp( (double)(LIBXSMM_VLA_ACCESS( 4, output, img1, ifm1, img2, ifm2, Bc, bn, bc ) - max) );
sum_of_exp += LIBXSMM_VLA_ACCESS( 4, output, img1, ifm1, img2, ifm2, Bc, bn, bc );
}
}
/* scale output */
sum_of_exp = 1.0f/sum_of_exp;
for ( ifm1 = 0; ifm1 < Bc; ++ifm1 ) {
for ( ifm2 = 0; ifm2 < bc; ++ifm2 ) {
LIBXSMM_VLA_ACCESS( 4, output, img1, ifm1, img2, ifm2, Bc, bn, bc ) = LIBXSMM_VLA_ACCESS( 4, output, img1, ifm1, img2, ifm2, Bc, bn, bc ) * sum_of_exp;
}
}
}
libxsmm_barrier_wait( cfg.barrier, ltid );
/* calculate loss single threaded */
if ( ltid == 0 ) {
(*loss) = 0.0f;
for ( img1 = 0; img1 < Bn; ++img1 ) {
for ( img2 = 0; img2 <bn; ++img2 ) {
libxsmm_blasint ifm = (libxsmm_blasint)LIBXSMM_VLA_ACCESS( 2, label, img1, img2, bn );
libxsmm_blasint ifm1b = ifm/bc;
libxsmm_blasint ifm2b = ifm%bc;
float val = ( LIBXSMM_VLA_ACCESS( 4, output, img1, ifm1b, img2, ifm2b, Bc, bn, bc ) > FLT_MIN ) ? LIBXSMM_VLA_ACCESS( 4, output, img1, ifm1b, img2, ifm2b, Bc, bn, bc ) : FLT_MIN;
*loss = LIBXSMM_LOGF( val );
}
}
*loss = ((-1.0f)*(*loss))/cfg.N;
}
libxsmm_barrier_wait( cfg.barrier, ltid );
}
void my_smax_bwd_exec( my_smax_bwd_config cfg, float* delin_act_ptr, const float* out_act_ptr, const int* label_ptr, int start_tid, int my_tid, void* scratch ) {
libxsmm_blasint bn = cfg.bn;
libxsmm_blasint Bn = cfg.N/cfg.bn;
libxsmm_blasint bc = cfg.bc;
libxsmm_blasint Bc = cfg.C/cfg.bc;
/* loop counters */
libxsmm_blasint i = 0;
libxsmm_blasint img1, img2, ifm1, ifm2;
float rcp_N = 1.0f/cfg.N;
/* computing first logical thread */
const libxsmm_blasint ltid = my_tid - start_tid;
/* number of tasks that could run in parallel for the batch */
const libxsmm_blasint n_work = Bn * bn;
/* compute chunk size */
const libxsmm_blasint n_chunksize = (n_work % cfg.threads == 0) ? (n_work / cfg.threads) : ((n_work / cfg.threads) + 1);
/* compute thr_begin and thr_end */
const libxsmm_blasint n_thr_begin = (ltid * n_chunksize < n_work) ? (ltid * n_chunksize) : n_work;
const libxsmm_blasint n_thr_end = ((ltid + 1) * n_chunksize < n_work) ? ((ltid + 1) * n_chunksize) : n_work;
LIBXSMM_VLA_DECL(4, const float, output, out_act_ptr, Bc, bn, bc);
LIBXSMM_VLA_DECL(4, float, dinput, delin_act_ptr, Bc, bn, bc);
LIBXSMM_VLA_DECL(2, const int, label, label_ptr, bn);
/* lazy barrier init */
libxsmm_barrier_init( cfg.barrier, ltid );
for ( i = n_thr_begin; i < n_thr_end; ++i ) {
img1 = i/bn;
img2 = i%bn;
/* set output to input and set compute max per image */
for ( ifm1 = 0; ifm1 < Bc; ++ifm1 ) {
for ( ifm2 = 0; ifm2 < bc; ++ifm2 ) {
if ( (ifm1*Bc)+ifm2 == (libxsmm_blasint)LIBXSMM_VLA_ACCESS( 2, label, img1, img2, bn ) ) {
LIBXSMM_VLA_ACCESS( 4, dinput, img1, ifm1, img2, ifm2, Bc, bn, bc ) =
( LIBXSMM_VLA_ACCESS( 4, output, img1, ifm1, img2, ifm2, Bc, bn, bc ) - 1.0f ) * rcp_N * cfg.loss_weight;
} else {
LIBXSMM_VLA_ACCESS( 4, dinput, img1, ifm1, img2, ifm2, Bc, bn, bc ) =
LIBXSMM_VLA_ACCESS( 4, output, img1, ifm1, img2, ifm2, Bc, bn, bc ) * rcp_N * cfg.loss_weight;
}
}
}
}
libxsmm_barrier_wait( cfg.barrier, ltid );
}
void *numa_alloc_onnode_aligned(size_t size, int numa_node, int alignment_) {
#if 0
int alignment = alignment_ - 1;
size_t adj_size = sizeof(size_t) + alignment;
void *r_ptr = NULL;
void *t_ptr = numa_alloc_onnode(size + adj_size, numa_node);
if (t_ptr == NULL) return NULL;
r_ptr = (void *)(((size_t)t_ptr + adj_size) & ~alignment);
*((size_t*)r_ptr - 1) = (size_t)r_ptr - (size_t)t_ptr;
return r_ptr;
#else
return numa_alloc_onnode(size, numa_node);
#endif
}
void numa_free_aligned(void *ptr, size_t size) {
#if 0
if (ptr == NULL) return;
void *t_ptr = (void*)((size_t*)ptr - *((size_t*)ptr - 1));
numa_free(t_ptr, size);
#else
numa_free(ptr, size);
#endif
}
int setup_my_numa(my_numa_thr_cfg **numa_thr_cfg_, int num_layers, int n_threads) {
int max_nodes = numa_max_node() + 1;
int max_cfg_nodes = numa_num_configured_nodes();
int max_cfg_cpus = numa_num_configured_cpus();
int max_task_cpus = numa_num_task_cpus();
my_numa_thr_cfg *numa_thr_cfg = (my_numa_thr_cfg *) malloc(sizeof(my_numa_thr_cfg) * max_cfg_nodes);
printf("NUMA configuration:\n");
printf("There are %d numa nodes on the system\n", max_nodes);
printf("There are %d configured numa nodes on the system\n", max_cfg_nodes);
printf("There are %d configured CPUs on the system\n", max_cfg_cpus);
printf("There are %d CPUs asigned for the current task\n", max_task_cpus);
struct bitmask* bmask = numa_bitmask_alloc(max_cfg_cpus);
int thr_count = 0, i = 0;
for (i = 0; i < max_cfg_nodes; i++) {
numa_node_to_cpus(i, bmask);
numa_thr_cfg[i].scratch = (float**) malloc(sizeof(float*) * num_layers);
numa_thr_cfg[i].layer_size = (size_t*)malloc(sizeof(size_t)*num_layers);
numa_thr_cfg[i].blocksOFm_s = (int*)malloc(sizeof(int)*num_layers);
numa_thr_cfg[i].blocksOFm_e = (int*)malloc(sizeof(int)*num_layers);
numa_thr_cfg[i].blocksIFm_s = (int*)malloc(sizeof(int)*num_layers);
numa_thr_cfg[i].blocksIFm_e = (int*)malloc(sizeof(int)*num_layers);
numa_thr_cfg[i].blocksOFm_tr_s = (int*)malloc(sizeof(int)*num_layers);
numa_thr_cfg[i].blocksOFm_tr_e = (int*)malloc(sizeof(int)*num_layers);
numa_thr_cfg[i].blocksIFm_tr_s = (int*)malloc(sizeof(int)*num_layers);
numa_thr_cfg[i].blocksIFm_tr_e = (int*)malloc(sizeof(int)*num_layers);
/*
printf("@@@@@ node %d size %zd cpus ", i, bmask->size);
size_t j = 0;
for(j = 0; j < bmask->size; j++)
printf("%d", numa_bitmask_isbitset(bmask, j));
printf("\n");
*/
int num_threads_in_mask = 0;
int t = 0;
for (t = 0; t < bmask->size; t++)
if (numa_bitmask_isbitset(bmask, t)) num_threads_in_mask++;
int node_threads = 0;
while(thr_count < n_threads && node_threads < num_threads_in_mask) {
if (numa_bitmask_isbitset(bmask, thr_count)) {
numa_thr_cfg[i].thr_s = thr_count;
break;
}
thr_count++; node_threads++;
}
while(thr_count < n_threads && node_threads < num_threads_in_mask) {
if (numa_bitmask_isbitset(bmask, thr_count))
numa_thr_cfg[i].thr_e = thr_count;
thr_count++; node_threads++;
}
}
*numa_thr_cfg_ = numa_thr_cfg;
return 1;
}
int setup_my_numa_fwd(my_numa_thr_cfg **numa_thr_cfg_, int num_layers, my_fc_fwd_config* my_fc_fwd) {
my_numa_thr_cfg *numa_thr_cfg = *numa_thr_cfg_;
int max_cfg_nodes = numa_num_configured_nodes();
int i = 0;
for (i = 0; i < max_cfg_nodes; i++) {
int l = 0;
for (l = 0; l < num_layers; l++) {
const libxsmm_blasint nBlocksOFm = my_fc_fwd[l].K / my_fc_fwd[l].bk;
const libxsmm_blasint nBlocksMB = my_fc_fwd[l].N / my_fc_fwd[l].bn;
if (my_fc_fwd[l].fwd_bf > 1) {
printf("@@@ NUMA ERROR: doesn't support this configuration\n");
return -1;
}
int thr = 0;
if (my_fc_fwd[l].fwd_2d_blocking == 1) {
libxsmm_blasint row_teams = my_fc_fwd[l].fwd_row_teams;
libxsmm_blasint M_tasks_per_thread = LIBXSMM_UPDIV(nBlocksOFm, row_teams);
numa_thr_cfg[i].blocksOFm_s[l] = nBlocksOFm;
numa_thr_cfg[i].blocksOFm_e[l] = 0;
for (thr = numa_thr_cfg[i].thr_s; thr <= numa_thr_cfg[i].thr_e; thr++) {
libxsmm_blasint my_row_id = thr % row_teams; /* ltid */
libxsmm_blasint my_M_start = LIBXSMM_MIN(my_row_id * M_tasks_per_thread, nBlocksOFm);
libxsmm_blasint my_M_end = LIBXSMM_MIN((my_row_id+1) * M_tasks_per_thread, nBlocksOFm);
numa_thr_cfg[i].blocksOFm_s[l] = (my_M_start < numa_thr_cfg[i].blocksOFm_s[l])
? my_M_start
: numa_thr_cfg[i].blocksOFm_s[l];
numa_thr_cfg[i].blocksOFm_e[l] = (my_M_end > numa_thr_cfg[i].blocksOFm_e[l])
? my_M_end
: numa_thr_cfg[i].blocksOFm_e[l];
}
} else {
numa_thr_cfg[i].blocksOFm_s[l] = nBlocksOFm;
numa_thr_cfg[i].blocksOFm_e[l] = 0;
for (thr = numa_thr_cfg[i].thr_s; thr <= numa_thr_cfg[i].thr_e; thr++) {
const libxsmm_blasint work = nBlocksOFm * nBlocksMB;
const libxsmm_blasint chunksize = (work % my_fc_fwd[l].threads == 0) ?
(work / my_fc_fwd[l].threads) : ((work / my_fc_fwd[l].threads) + 1);
const libxsmm_blasint thr_begin = (thr * chunksize < work) ? (thr * chunksize) : work;
const libxsmm_blasint thr_end = ((thr + 1) * chunksize < work) ? ((thr + 1) * chunksize) : work;
int ofm_s = thr_begin / nBlocksMB;
int ofm_e = (thr_end-1) / nBlocksMB;
numa_thr_cfg[i].blocksOFm_s[l] = (ofm_s < numa_thr_cfg[i].blocksOFm_s[l])
? ofm_s
: numa_thr_cfg[i].blocksOFm_s[l];
numa_thr_cfg[i].blocksOFm_e[l] = (ofm_e > numa_thr_cfg[i].blocksOFm_e[l])
? ofm_e
: numa_thr_cfg[i].blocksOFm_e[l];
}
#if 0
printf("numa_thr_cfg[%d].blocksOFm_s[%d] %d numa_thr_cfg[%d].blocksOFm_e[%d] %d\n",
i, l, numa_thr_cfg[i].blocksOFm_s[l], i, l, numa_thr_cfg[i].blocksOFm_e[l]);
#endif
}
}
}
return 1;
}
void set_fwd_ofm_to_node(int **fwd_ofm_to_node, my_numa_thr_cfg **numa_thr_cfg_, int num_layers, my_fc_fwd_config* my_fc_fwd) {
int max_cfg_nodes = numa_num_configured_nodes();
my_numa_thr_cfg *numa_thr_cfg = *numa_thr_cfg_;
int l, ofm, i;
for (l = 0; l < num_layers; l++) {
const libxsmm_blasint nBlocksOFm = my_fc_fwd[l].K / my_fc_fwd[l].bk;
fwd_ofm_to_node[l] = (int*) malloc(sizeof(int) * nBlocksOFm);
int *l_fwd_ofm_to_node = fwd_ofm_to_node[l];
for (i = 0; i < max_cfg_nodes; i++) {
for (ofm = 0; ofm < nBlocksOFm; ofm++) {
if (ofm >= numa_thr_cfg[i].blocksOFm_s[l] && ofm <= numa_thr_cfg[i].blocksOFm_e[l])
l_fwd_ofm_to_node[ofm] = i;
}
}
}
#if 0
for (l = 0; l < num_layers; l++) {
const libxsmm_blasint nBlocksOFm = my_fc_fwd[l].K / my_fc_fwd[l].bk;
int *l_fwd_ofm_to_node = fwd_ofm_to_node[l];
for (ofm = 0; ofm < nBlocksOFm; ofm++)
printf("%d l_fwd_ofm_to_node[%d] %d | %d\n", l, ofm, l_fwd_ofm_to_node[ofm], nBlocksOFm);
}
#endif
}
void free_fwd_ofm_to_node(int **fwd_ofm_to_node, int num_layers) {
int l;
for (l = 0; l < num_layers; l++) {
free(fwd_ofm_to_node[l]);
}
}
int setup_my_numa_bwd_d(my_numa_thr_cfg **numa_thr_cfg_, int num_layers, my_fc_bwd_config* my_fc_bwd) {
my_numa_thr_cfg *numa_thr_cfg = *numa_thr_cfg_;
int max_cfg_nodes = numa_num_configured_nodes();
int i = 0;
for (i = 0; i < max_cfg_nodes; i++) {
int l = 0;
for (l = 0; l < num_layers; l++) {
if (my_fc_bwd[l].bwd_bf > 1) {
printf("@@@ NUMA ERROR: doesn't support this configuration\n");
return -1;
}
int thr = 0;
const libxsmm_blasint nBlocksIFm = my_fc_bwd[l].C / my_fc_bwd[l].bc;
const libxsmm_blasint nBlocksMB = my_fc_bwd[l].N / my_fc_bwd[l].bn;
if (my_fc_bwd[l].bwd_2d_blocking == 1) {
printf("@@@ NUMA ERROR: doesn't support this configuration\n");
return -1;
} else {
numa_thr_cfg[i].blocksIFm_tr_s[l] = nBlocksIFm;
numa_thr_cfg[i].blocksIFm_tr_e[l] = 0;
for (thr = numa_thr_cfg[i].thr_s; thr <= numa_thr_cfg[i].thr_e; thr++) {
/* number of tasks that could be run in parallel */
const libxsmm_blasint work = nBlocksIFm * nBlocksMB;
/* compute chunk size */
const libxsmm_blasint chunksize = (work % my_fc_bwd[l].threads == 0) ?
(work / my_fc_bwd[l].threads) : ((work / my_fc_bwd[l].threads) + 1);
/* compute thr_begin and thr_end */
const libxsmm_blasint thr_begin = (thr * chunksize < work) ? (thr * chunksize) : work;
const libxsmm_blasint thr_end = ((thr + 1) * chunksize < work) ? ((thr + 1) * chunksize) : work;
int ifm_s = thr_begin / nBlocksMB;
int ifm_e = (thr_end-1) / nBlocksMB;
numa_thr_cfg[i].blocksIFm_tr_s[l] = (ifm_s < numa_thr_cfg[i].blocksIFm_tr_s[l])
? ifm_s
: numa_thr_cfg[i].blocksIFm_tr_s[l];
numa_thr_cfg[i].blocksIFm_tr_e[l] = (ifm_e > numa_thr_cfg[i].blocksIFm_tr_e[l])
? ifm_e
: numa_thr_cfg[i].blocksIFm_tr_e[l];
}
#if 0
printf("numa_thr_cfg[%d].blocksIFm_tr_s[%d] %d numa_thr_cfg[%d].blocksIFm_tr_e[%d] %d\n",
i, l, numa_thr_cfg[i].blocksIFm_tr_s[l], i, l, numa_thr_cfg[i].blocksIFm_tr_e[l]);
#endif
}
}
}
return 1;
}
int allocate_numa_buffers_fwd(my_numa_thr_cfg **numa_thr_cfg_, int num_layers, my_fc_fwd_config* my_fc_fwd) {
my_numa_thr_cfg *numa_thr_cfg = *numa_thr_cfg_;
int max_cfg_nodes = numa_num_configured_nodes();
int i = 0, l = 0;
for (i = 0; i < max_cfg_nodes; i++) {
for (l = 0; l < num_layers; l++) {
const libxsmm_blasint nBlocksIFm = my_fc_fwd[l].C / my_fc_fwd[l].bc;
const libxsmm_blasint OFM_shift = nBlocksIFm * my_fc_fwd[l].bc * my_fc_fwd[l].bk;
int l_nBlocksOFm = (numa_thr_cfg[i].blocksOFm_e[l] - numa_thr_cfg[i].blocksOFm_s[l]) + 1;
if (l_nBlocksOFm <= 0)
continue;
numa_thr_cfg[i].layer_size[l] = sizeof(float) * ((l_nBlocksOFm) * OFM_shift);
numa_thr_cfg[i].scratch[l] = (float*)numa_alloc_onnode_aligned(numa_thr_cfg[i].layer_size[l], i, 2097152);
if (numa_thr_cfg[i].scratch[l] == NULL) {
printf("@@@ NUMA ERROR: cannot allocate on node #%d\n", i);
return -1;
}
}
}
return 1;
}
int allocate_numa_buffers_bwd_d(my_numa_thr_cfg **numa_thr_cfg_, int num_layers, my_fc_bwd_config* my_fc_bwd) {
my_numa_thr_cfg *numa_thr_cfg = *numa_thr_cfg_;
int max_cfg_nodes = numa_num_configured_nodes();
int i = 0, l = 0;
for (i = 0; i < max_cfg_nodes; i++) {
int l_nBlocksIFm = 0;
for (l = 0; l < num_layers; l++) {
const libxsmm_blasint nBlocksOFm = my_fc_bwd[l].K / my_fc_bwd[l].bk;
const libxsmm_blasint IFM_shift = nBlocksOFm * my_fc_bwd[l].bc * my_fc_bwd[l].bk;
if (l_nBlocksIFm <= ((numa_thr_cfg[i].blocksIFm_tr_e[l] - numa_thr_cfg[i].blocksIFm_tr_s[l]) + 1) * IFM_shift)
l_nBlocksIFm = ((numa_thr_cfg[i].blocksIFm_tr_e[l] - numa_thr_cfg[i].blocksIFm_tr_s[l]) + 1) * IFM_shift;
}
numa_thr_cfg[i].bwd_d_scratch_size = sizeof(float) * (l_nBlocksIFm);
numa_thr_cfg[i].bwd_d_scratch = (float*)numa_alloc_onnode_aligned(numa_thr_cfg[i].bwd_d_scratch_size, i, 2097152);
if (numa_thr_cfg[i].bwd_d_scratch == NULL) {
printf("@@@ NUMA ERROR: cannot allocate on node #%d\n", i);
return -1;
}
}
return 1;
}
int copy_to_numa_buffers_fwd_inf(my_numa_thr_cfg **numa_thr_cfg_, int num_layers, my_fc_fwd_config* my_fc_fwd, float **fil_libxsmm) {
my_numa_thr_cfg *numa_thr_cfg = *numa_thr_cfg_;
int max_cfg_nodes = numa_num_configured_nodes();
int i,l;
#ifndef COPY_ON_LOCAL_NODES
#pragma omp parallel for collapse(2) private (i,l)
#else
#pragma omp parallel private (i,l)
{
int tid = omp_get_thread_num();
#endif
for (i = 0; i < max_cfg_nodes; i++) {
#ifdef COPY_ON_LOCAL_NODES
if (tid >= numa_thr_cfg[i].thr_s && tid <= numa_thr_cfg[i].thr_e) {
numa_run_on_node(i);
}
if (tid == numa_thr_cfg[i].thr_s) {
#endif
for (l = 0; l < num_layers; l++) {
const libxsmm_blasint nBlocksIFm = my_fc_fwd[l].C / my_fc_fwd[l].bc;
const libxsmm_blasint BOFM_shift = nBlocksIFm * my_fc_fwd[l].bc * my_fc_fwd[l].bk;
int l_nBlocksOFm = (numa_thr_cfg[i].blocksOFm_e[l] - numa_thr_cfg[i].blocksOFm_s[l]) + 1;
int j = 0;
for (j = 0; j < l_nBlocksOFm ; j++) {
size_t l_BOFM_shift = j * BOFM_shift;
float *out = numa_thr_cfg[i].scratch[l] + l_BOFM_shift;
float *inp = fil_libxsmm[l] + numa_thr_cfg[i].blocksOFm_s[l] * BOFM_shift + l_BOFM_shift;
memcpy(out, inp, sizeof(float) * nBlocksIFm * my_fc_fwd[l].bc * my_fc_fwd[l].bk);
}
}
#ifdef COPY_ON_LOCAL_NODES
}
#endif
}
#ifdef COPY_ON_LOCAL_NODES
}
#endif
return 1;
}
int copy_to_numa_buffers_fwd(my_numa_thr_cfg *numa_thr_cfg, my_fc_fwd_config my_fc_fwd, float *fil_libxsmm, int numa_node, int l, int my_tid, int dir) {
const libxsmm_blasint ltid = my_tid - numa_thr_cfg->thr_s;
const libxsmm_blasint nBlocksIFm = my_fc_fwd.C / my_fc_fwd.bc;
const libxsmm_blasint IFM_shift = my_fc_fwd.bc * my_fc_fwd.bk;
const libxsmm_blasint OFM_shift = nBlocksIFm * my_fc_fwd.bc * my_fc_fwd.bk;
const libxsmm_blasint work = ((numa_thr_cfg->blocksOFm_e[l] - numa_thr_cfg->blocksOFm_s[l]) + 1) * nBlocksIFm;
/* compute chunk size */
int thr = numa_thr_cfg->thr_e - numa_thr_cfg->thr_s;
const libxsmm_blasint chunksize = (work % thr == 0) ? (work / thr) : ((work / thr) + 1);
/* compute thr_begin and thr_end */
const libxsmm_blasint thr_begin = (ltid * chunksize < work) ? (ltid * chunksize) : work;
const libxsmm_blasint thr_end = ((ltid + 1) * chunksize < work) ? ((ltid + 1) * chunksize) : work;
/*libxsmm_barrier_init( my_fc_fwd.barrier, my_tid );*/
float *inp, *out;
if (dir) {
inp = numa_thr_cfg->scratch[l];
out = fil_libxsmm + numa_thr_cfg->blocksOFm_s[l] * OFM_shift;
} else {
out = numa_thr_cfg->scratch[l];
inp = fil_libxsmm + numa_thr_cfg->blocksOFm_s[l] * OFM_shift;
}
int j = 0;
for (j = thr_begin; j < thr_end; j++) {
int ofm = j / nBlocksIFm;
int ifm = j % nBlocksIFm;
float *l_out = out + ofm * OFM_shift + ifm * IFM_shift;
float *l_inp = inp + ofm * OFM_shift + ifm * IFM_shift;
memcpy(l_out, l_inp, sizeof(float) * IFM_shift);
}
/*libxsmm_barrier_wait( my_fc_fwd.barrier, my_tid );*/
return 1;
}
int main(int argc, char* argv[])
{
float **act_libxsmm, **fil_libxsmm, **delact_libxsmm, **delfil_libxsmm;
float **bias_libxsmm, **delbias_libxsmm;
unsigned char **relumask_libxsmm;
int *label_libxsmm;
my_eltwise_fuse my_fuse;
my_fc_fwd_config* my_fc_fwd;
my_fc_bwd_config* my_fc_bwd;
my_opt_config* my_opt;
my_smax_fwd_config my_smax_fwd;
my_smax_bwd_config my_smax_bwd;
void* scratch = NULL;
size_t scratch_size = 0;
/* some parameters we can overwrite via cli,
default is some inner layer of overfeat */
int iters = 10; /* repetitions of benchmark */
int MB = 256; /* mini-batch size, "N" */
int fuse_type = 0; /* 0: nothing fused, 1: relu fused, 2: elementwise fused, 3: relu and elementwise fused */
char type = 'A'; /* 'A': ALL, 'F': FP, 'B': BP, 'U', WU */
int bn = 32;
int bk = 32;
int bc = 32;
int *C; /* number of input feature maps, "C" */
int num_layers = 0;
#if defined(_OPENMP)
int nThreads = omp_get_max_threads(); /* number of threads */
#else
int nThreads = 1; /* number of threads */
#endif
unsigned long long l_start, l_end;
unsigned long long *fwd_time, *bwd_time, *solver_time;
double l_total = 0.0;
double gflop = 0.0;
int i, j;
double fil_size = 0.0;
double act_size = 0.0;
float lr = 0.2f;
float loss_weight = 0.1f;
libxsmm_matdiff_info norms_fwd, norms_bwd, norms_upd, diff;
libxsmm_matdiff_clear(&norms_fwd);
libxsmm_matdiff_clear(&norms_bwd);
libxsmm_matdiff_clear(&norms_upd);
libxsmm_matdiff_clear(&diff);
if (argc > 1 && !strncmp(argv[1], "-h", 3)) {
printf("Usage: %s iters MB fuse_type type bn bk bc C1 C2 ... CN\n", argv[0]);
return 0;
}
libxsmm_rng_set_seed(1);
/* reading new values from cli */
i = 1;
num_layers = argc - 9;
if (argc > i) iters = atoi(argv[i++]);
if (argc > i) MB = atoi(argv[i++]);
if (argc > i) fuse_type = atoi(argv[i++]);
if (argc > i) type = *(argv[i++]);
if (argc > i) bn = atoi(argv[i++]);
if (argc > i) bk = atoi(argv[i++]);
if (argc > i) bc = atoi(argv[i++]);
/* allocate the number of channles buffer */
if ( num_layers < 1 ) {
printf("Usage: %s iters MB fuse_type type bn bk bc C1 C2 ... CN\n", argv[0]);
return 0;
}
C = (int*)malloc((num_layers+2)*sizeof(int));
for (j = 0 ; i < argc; ++i, ++j ) {
C[j] = atoi(argv[i]);
}
/* handle softmax config */
C[num_layers+1] = C[num_layers];
if (type != 'A' && type != 'F' && type != 'B') {
printf("type needs to be 'A' (All), 'F' (FP only), 'B' (BP only)\n");
return -1;
}
if ( (fuse_type < 0) || (fuse_type > 5) ) {
printf("fuse type needs to be 0 (None), 1 (Bias), 2 (ReLU), 3 (Sigmoid), 4 (Bias+ReLU), 5 (Bias+Sigmoid)\n");
return -1;
}
#if defined(__SSE3__)
_MM_SET_FLUSH_ZERO_MODE(_MM_FLUSH_ZERO_ON);
_MM_SET_DENORMALS_ZERO_MODE(_MM_DENORMALS_ZERO_ON);
_MM_SET_ROUNDING_MODE(_MM_ROUND_NEAREST);
#endif
/* print some summary */
printf("##########################################\n");
printf("# Setting Up (Common) #\n");
printf("##########################################\n");
printf("PARAMS: N:%d\n", MB);
printf("PARAMS: Layers: %d\n", num_layers);
printf("PARAMS: ITERS:%d", iters); printf(" Threads:%d\n", nThreads);
for (i = 0; i < num_layers; ++i ) {
if (i == 0) {
act_size += (double)(MB*C[i]*sizeof(float))/(1024.0*1024.0);
printf("SIZE Activations %i (%dx%d): %10.2f MiB\n", i, MB, C[i], (double)(MB*C[i]*sizeof(float))/(1024.0*1024.0) );
}
act_size += (double)(MB*C[i+1]*sizeof(float))/(1024.0*1024.0);
fil_size += (double)(C[i]*C[i+1]*sizeof(float))/(1024.0*1024.0);
printf("SIZE Filter %i (%dx%d): %10.2f MiB\n", i, C[i], C[i+1], (double)(C[i]*C[i+1]*sizeof(float))/(1024.0*1024.0) );
printf("SIZE Activations %i (%dx%d): %10.2f MiB\n", i+1, MB, C[i+1], (double)(MB*C[i+1]*sizeof(float))/(1024.0*1024.0) );
}
act_size += (double)(MB*C[num_layers+1]*sizeof(float))/(1024.0*1024.0);
printf("SIZE Activations softmax (%dx%d): %10.2f MiB\n", MB, C[num_layers+1], (double)(MB*C[num_layers+1]*sizeof(float))/(1024.0*1024.0) );
printf("\nTOTAL SIZE Activations: %10.2f MiB\n", act_size );
printf("TOTAL SIZE Filter: %10.2f MiB\n", fil_size );
printf("TOTAL SIZE delActivations: %10.2f MiB\n", act_size );
printf("TOTAL SIZE delFilter: %10.2f MiB\n", fil_size );
printf("TOTAL SIZE MLP: %10.2f MiB\n", (2.0*fil_size) + (2.0*act_size) );
/* allocate data */
/* +2 because of the softwax layer */
act_libxsmm = (float**)malloc( (num_layers+2)*sizeof(float*) );
delact_libxsmm = (float**)malloc( (num_layers+1)*sizeof(float*) );
for ( i = 0 ; i < num_layers+2; ++i ) {
#ifdef ACT_NUMA_INTERLEAVED
act_libxsmm[i] = (float*)numa_alloc_interleaved( MB*C[i]*sizeof(float));
#else
act_libxsmm[i] = (float*)libxsmm_aligned_malloc( MB*C[i]*sizeof(float), 2097152);
#endif
/* softmax has no incoming gradients */
if ( i < num_layers+1 ) {
delact_libxsmm[i] = (float*)libxsmm_aligned_malloc( MB*C[i]*sizeof(float), 2097152);
}
}
fil_libxsmm = (float**)malloc( num_layers*sizeof(float*) );
delfil_libxsmm = (float**)malloc( num_layers*sizeof(float*) );
for ( i = 0 ; i < num_layers; ++i ) {
fil_libxsmm[i] = (float*)libxsmm_aligned_malloc( C[i]*C[i+1]*sizeof(float), 2097152);
delfil_libxsmm[i] = (float*)libxsmm_aligned_malloc( C[i]*C[i+1]*sizeof(float), 2097152);
}
bias_libxsmm = (float**)malloc( num_layers*sizeof(float*) );
delbias_libxsmm = (float**)malloc( num_layers*sizeof(float*) );
for ( i = 0 ; i < num_layers; ++i ) {
bias_libxsmm[i] = (float*)libxsmm_aligned_malloc( C[i+1]*sizeof(float), 2097152);
delbias_libxsmm[i] = (float*)libxsmm_aligned_malloc( C[i+1]*sizeof(float), 2097152);
}
relumask_libxsmm = (unsigned char**)malloc( num_layers*sizeof(unsigned char*) );
for ( i = 0 ; i < num_layers; ++i ) {
relumask_libxsmm[i] = (unsigned char*)libxsmm_aligned_malloc( MB*C[i+1]*sizeof(unsigned char), 2097152);
}
label_libxsmm = (int*)libxsmm_aligned_malloc( MB*sizeof(int), 2097152);
/* init data */
for ( i = 0 ; i < num_layers+2; ++i ) {
my_init_buf( act_libxsmm[i], MB*C[i], 0, 0 );
}
for ( i = 0 ; i < num_layers+1; ++i ) {
my_init_buf( delact_libxsmm[i], MB*C[i], 0, 0 );
}
for ( i = 0 ; i < num_layers; ++i ) {
my_init_buf( fil_libxsmm[i], C[i]*C[i+1], 0, 0 );
}
for ( i = 0 ; i < num_layers; ++i ) {
my_init_buf( delfil_libxsmm[i], C[i]*C[i+1], 0, 0 );
}
for ( i = 0 ; i < num_layers; ++i ) {
my_init_buf( bias_libxsmm[i], C[i+1], 0, 0 );
}
for ( i = 0 ; i < num_layers; ++i ) {
my_init_buf( delbias_libxsmm[i], C[i+1], 0, 0 );
}
for ( i = 0 ; i < num_layers; ++i ) {
zero_buf_uint8( relumask_libxsmm[i], MB*C[i+1] );
}
zero_buf_int32( label_libxsmm, MB );
printf("\n");
printf("##########################################\n");
printf("# Setting Up (custom-Storage) #\n");
printf("##########################################\n");
if ( fuse_type == 0 ) {
my_fuse = MY_ELTWISE_FUSE_NONE;
} else if ( fuse_type == 1 ) {
my_fuse = MY_ELTWISE_FUSE_BIAS;
} else if ( fuse_type == 2 ) {
my_fuse = MY_ELTWISE_FUSE_RELU;
} else if ( fuse_type == 4 ) {
my_fuse = MY_ELTWISE_FUSE_BIAS_RELU;
} else {
my_fuse = MY_ELTWISE_FUSE_NONE;
}
/* allocating handles */
my_fc_fwd = (my_fc_fwd_config*) malloc( num_layers*sizeof(my_fc_fwd_config) );
my_fc_bwd = (my_fc_bwd_config*) malloc( num_layers*sizeof(my_fc_bwd_config) );
my_opt = (my_opt_config*) malloc( num_layers*sizeof(my_opt_config) );
/* setting up handles + scratch */
for ( i = 0; i < num_layers; ++i ) {
my_fc_fwd[i] = setup_my_fc_fwd(MB, C[i], C[i+1], (MB % bn == 0) ? bn : MB,
(C[i ] % bc == 0) ? bc : C[i ],
(C[i+1] % bk == 0) ? bk : C[i+1],
nThreads, my_fuse);
my_fc_bwd[i] = setup_my_fc_bwd(MB, C[i], C[i+1], (MB % bn == 0) ? bn : MB,
(C[i ] % bc == 0) ? bc : C[i ],
(C[i+1] % bk == 0) ? bk : C[i+1],
nThreads, my_fuse);
my_opt[i] = setup_my_opt( C[i], C[i+1], (C[i ] % bc == 0) ? bc : C[i ],
(C[i+1] % bk == 0) ? bk : C[i+1],
nThreads, lr );
/* let's allocate and bind scratch */
if ( my_fc_fwd[i].scratch_size > 0 || my_fc_bwd[i].scratch_size > 0 || my_opt[i].scratch_size > 0 ) {
size_t alloc_size = LIBXSMM_MAX( LIBXSMM_MAX( my_fc_fwd[i].scratch_size, my_fc_bwd[i].scratch_size), my_opt[i].scratch_size );
if ( alloc_size > scratch_size ) {
if ( scratch != NULL ) libxsmm_free( scratch );
scratch_size = alloc_size;
scratch = libxsmm_aligned_scratch( scratch_size, 2097152 );
my_init_buf( (float*)(scratch), (scratch_size)/4, 0, 0 );
}
}
}
/* softmax+loss is treated as N+! layer */
my_smax_fwd = setup_my_smax_fwd( MB, C[num_layers+1], (MB % bn == 0) ? bn : MB,
(C[num_layers+1] % bk == 0) ? bk : C[num_layers+1],
nThreads );
my_smax_bwd = setup_my_smax_bwd( MB, C[num_layers+1], (MB % bn == 0) ? bn : MB,
(C[num_layers+1] % bk == 0) ? bk : C[num_layers+1],
nThreads, loss_weight );
if ( my_smax_fwd.scratch_size > 0 || my_smax_bwd.scratch_size > 0 ) {
size_t alloc_size = LIBXSMM_MAX( my_smax_fwd.scratch_size, my_smax_bwd.scratch_size );
if ( alloc_size > scratch_size ) {
if ( scratch != NULL ) libxsmm_free( scratch );
scratch_size = alloc_size;
scratch = libxsmm_aligned_scratch( scratch_size, 2097152 );
my_init_buf( (float*)(scratch), (scratch_size)/4, 0, 0 );
}
}
my_numa_thr_cfg *numa_thr_cfg;
/* Define numa configuration: #numa nodes, #threads on each node */
setup_my_numa(&numa_thr_cfg, num_layers, nThreads);
if ( type == 'F') {
printf("##########################################\n");
printf("# Performance - FWD (custom-Storage) #\n");
printf("##########################################\n");
setup_my_numa_fwd(&numa_thr_cfg, num_layers, my_fc_fwd);
allocate_numa_buffers_fwd(&numa_thr_cfg, num_layers, my_fc_fwd);
l_start = libxsmm_timer_tick();
#if defined(_OPENMP)
# pragma omp parallel private(i,j)
#endif
{
#if defined(_OPENMP)
const int tid = omp_get_thread_num();
#else
const int tid = 0;
#endif
const int numa_node = numa_node_of_cpu(tid);
for ( i = 0; i < num_layers; ++i) {
copy_to_numa_buffers_fwd(&numa_thr_cfg[numa_node], my_fc_fwd[i], fil_libxsmm[i], numa_node, i, tid, 0);
}
for (j = 0; j < iters; ++j) {
for ( i = 0; i < num_layers; ++i) {
my_fc_fwd_exec( my_fc_fwd[i], act_libxsmm[i], act_libxsmm[i+1],
bias_libxsmm[i], relumask_libxsmm[i], 0, tid, scratch, &numa_thr_cfg[numa_node], i);
}
#ifdef USE_SOFTMAX
my_smax_fwd_exec( my_smax_fwd, act_libxsmm[num_layers], act_libxsmm[num_layers+1], label_libxsmm, &loss,
0, tid, scratch );
#endif
}
}
l_end = libxsmm_timer_tick();
l_total = libxsmm_timer_duration(l_start, l_end);
gflop = 0.0;
for ( i = 0; i < num_layers; ++i) {
gflop += (2.0*(double)MB*(double)C[i]*(double)C[i+1]*(double)iters) / (1000.0*1000.0*1000.0);
}
printf("GFLOP = %.5g\n", gflop/(double)iters);
printf("fp time = %.5g\n", ((double)(l_total/iters)));
printf("GFLOPS = %.5g\n", gflop/l_total);
printf("PERFDUMP,FP,%s,%i,%i,", LIBXSMM_VERSION, nThreads, MB );
for ( i = 0; i < num_layers; ++i ) {
printf("%i,", C[i] );
}
printf("%f,%f\n", ((double)(l_total/iters)), gflop/l_total);
/* Print some norms on last act for fwd and weights of first layer after all iterations */
libxsmm_matdiff(&norms_fwd, LIBXSMM_DATATYPE_F32, MB*C[num_layers], 1, act_libxsmm[num_layers], act_libxsmm[num_layers], 0, 0);
printf("L1 of act[num_layers] : %.25g\n", norms_fwd.l1_ref);
}
if (type == 'B') {
printf("##########################################\n");
printf("# NOT Supported: Performance - BWD (custom-Storage) #\n");
printf("##########################################\n");
exit( -1 );
#if 0
l_start = libxsmm_timer_tick();
#if defined(_OPENMP)
# pragma omp parallel private(i,j)
#endif
{
#if defined(_OPENMP)
const int tid = omp_get_thread_num();
#else
const int tid = 0;
#endif
for (j = 0; j < iters; ++j) {
#ifdef USE_SOFTMAX
my_smax_bwd_exec( my_smax_bwd, delact_libxsmm[num_layers], act_libxsmm[num_layers+1], label_libxsmm,
0, tid, scratch );
#endif
for ( i = num_layers-1; i > 0; --i) {
my_fc_bwd_exec( my_fc_bwd[i], fil_libxsmm[i], delact_libxsmm[i], delact_libxsmm[i+1], delfil_libxsmm[i],
act_libxsmm[i], delbias_libxsmm[i], relumask_libxsmm[i], MY_PASS_BWD, 0, tid, scratch );
my_opt_exec( my_opt[i], fil_libxsmm[i], delfil_libxsmm[i], 0, tid, scratch );
}
my_fc_bwd_exec( my_fc_bwd[0], fil_libxsmm[0], delact_libxsmm[0], delact_libxsmm[0+1], delfil_libxsmm[0],
act_libxsmm[0], delbias_libxsmm[0], relumask_libxsmm[0], MY_PASS_BWD_W, 0, tid, scratch );
my_opt_exec( my_opt[0], fil_libxsmm[0], delfil_libxsmm[0], 0, tid, scratch );
}
}
l_end = libxsmm_timer_tick();
l_total = libxsmm_timer_duration(l_start, l_end);
gflop = 0.0;
for ( i = num_layers-1; i > 0; --i) {
gflop += (4.0*(double)MB*(double)C[i]*(double)C[i+1]*(double)iters) / (1000.0*1000.0*1000.0);
}
gflop += (2.0*(double)MB*(double)C[0]*(double)C[1]*(double)iters) / (1000.0*1000.0*1000.0);
printf("GFLOP = %.5g\n", gflop/(double)iters);
printf("fp time = %.5g\n", ((double)(l_total/iters)));
printf("GFLOPS = %.5g\n", gflop/l_total);
printf("PERFDUMP,BP,%s,%i,%i,", LIBXSMM_VERSION, nThreads, MB );
for ( i = 0; i < num_layers; ++i ) {
printf("%i,", C[i] );
}
printf("%f,%f\n", ((double)(l_total/iters)), gflop/l_total);
#endif
}
if (type == 'A') {
printf("##########################################\n");
printf("# Performance - FWD-BWD (custom-Storage) #\n");
printf("##########################################\n");
/* Timers: */
fwd_time = (unsigned long long *) malloc(sizeof(unsigned long long) * nThreads);
bwd_time = (unsigned long long *) malloc(sizeof(unsigned long long) * nThreads);
solver_time = (unsigned long long *) malloc(sizeof(unsigned long long) * nThreads);
/* Calculate chunks of weights used on each nume node on FWD based on FWD thread decomposition */
setup_my_numa_fwd(&numa_thr_cfg, num_layers, my_fc_fwd);
/* Calculate chunks of weights used on each nume node on BWD/d based on BWD/d thread decomposition */
setup_my_numa_bwd_d(&numa_thr_cfg, num_layers, my_fc_bwd);
/* NUMA aware allocations of buffers needed for FWD */
allocate_numa_buffers_fwd(&numa_thr_cfg, num_layers, my_fc_fwd);
/* NUMA aware allocations of buffers needed for BWD */
allocate_numa_buffers_bwd_d(&numa_thr_cfg, num_layers, my_fc_bwd);
/* Utility needed for transpoisition of weigths on BWD/d: get numa node based on current ofm */
int **fwd_ofm_to_node = (int**)malloc(sizeof(int*) * num_layers);
set_fwd_ofm_to_node(fwd_ofm_to_node, &numa_thr_cfg, num_layers, my_fc_fwd);
l_start = libxsmm_timer_tick();
#if defined(_OPENMP)
# pragma omp parallel private(i,j)
#endif
{
#if defined(_OPENMP)
const int tid = omp_get_thread_num();
#else
const int tid = 0;
#endif
fwd_time[tid] = 0;
bwd_time[tid] = 0;
solver_time[tid] = 0;
const int numa_node = numa_node_of_cpu(tid);
for ( i = 0; i < num_layers; ++i) {
/* Copy original weights to NUMA FWD buffers. Threading decomposition is the same with FWD. */
copy_to_numa_buffers_fwd(&numa_thr_cfg[numa_node], my_fc_fwd[i], fil_libxsmm[i], numa_node, i, tid, 0);
}
for (j = 0; j < iters; ++j) {
unsigned long long fwd_time_start = libxsmm_timer_tick();
for ( i = 0; i < num_layers; ++i) {
/* FWD: Use weights from NUMA FWD buffers */
my_fc_fwd_exec( my_fc_fwd[i], act_libxsmm[i], act_libxsmm[i+1],
bias_libxsmm[i], relumask_libxsmm[i], 0, tid, scratch, &numa_thr_cfg[numa_node], i );
}
fwd_time[tid] += (libxsmm_timer_tick() - fwd_time_start);
#ifdef USE_SOFTMAX
my_smax_fwd_exec( my_smax_fwd, act_libxsmm[num_layers], act_libxsmm[num_layers+1], label_libxsmm, &loss,
0, tid, scratch );
my_smax_bwd_exec( my_smax_bwd, delact_libxsmm[num_layers], act_libxsmm[num_layers+1], label_libxsmm,
0, tid, scratch );
#endif
for ( i = num_layers-1; i > 0; --i) {
unsigned long long bwd_time_start = libxsmm_timer_tick();
/* Transpose weights from NUMA FWD buffers to NUMA BWD buffer. Threading decomposition is the same with BWD/d. */
my_fc_bwd_d_transpose( my_fc_bwd[i], tid , &numa_thr_cfg, numa_node, i, fwd_ofm_to_node[i] );
/* BWD/d: Use weights from NUMA BWD buffers */
my_fc_bwd_exec( my_fc_bwd[i], delact_libxsmm[i], delact_libxsmm[i+1], delfil_libxsmm[i],
act_libxsmm[i], delbias_libxsmm[i], relumask_libxsmm[i], MY_PASS_BWD, 0, tid, scratch, &numa_thr_cfg[numa_node], i );
bwd_time[tid] += (libxsmm_timer_tick() - bwd_time_start);
/* Solver: Update NUMA FWD buffers. Threading decomposition is the same with FWD. */
unsigned long long solver_time_start = libxsmm_timer_tick();
my_opt_exec( my_opt[i], delfil_libxsmm[i], 0, tid, &numa_thr_cfg[numa_node], i, my_fc_fwd[i] );
solver_time[tid] += (libxsmm_timer_tick() - solver_time_start);
}
/* BWD/w: todo */
unsigned long long bwd_time_start = libxsmm_timer_tick();
my_fc_bwd_exec( my_fc_bwd[0], delact_libxsmm[0], delact_libxsmm[0+1], delfil_libxsmm[0],
act_libxsmm[0], delbias_libxsmm[0], relumask_libxsmm[0], MY_PASS_BWD_W, 0, tid, scratch, &numa_thr_cfg[numa_node], 0 );
bwd_time[tid] += (libxsmm_timer_tick() - bwd_time_start);
/* Solver: Update NUMA FWD buffers. Threading decomposition is the same with FWD. */
unsigned long long solver_time_start = libxsmm_timer_tick();
my_opt_exec( my_opt[0], delfil_libxsmm[0], 0, tid, &numa_thr_cfg[numa_node], 0, my_fc_fwd[0] );
solver_time[tid] += (libxsmm_timer_tick() - solver_time_start);
}
/* Copy result from NUMA FWD Buffers to original weights. Threading decomposition is the same with FWD. */
for ( i = 0; i < num_layers; ++i) {
copy_to_numa_buffers_fwd(&numa_thr_cfg[numa_node], my_fc_fwd[i], fil_libxsmm[i], numa_node, i, tid, 1);
}
}
l_end = libxsmm_timer_tick();
l_total = libxsmm_timer_duration(l_start, l_end);
free_fwd_ofm_to_node(fwd_ofm_to_node, num_layers);
free(fwd_ofm_to_node);
#ifdef CHECK_L1
#if 1
/* Print some norms on last act for fwd and weights of first layer after all iterations */
libxsmm_matdiff(&norms_fwd, LIBXSMM_DATATYPE_F32, MB*C[num_layers], 1, act_libxsmm[num_layers], act_libxsmm[num_layers], 0, 0);
printf("L1 of act[num_layers] : %.25g\n", norms_fwd.l1_ref);
libxsmm_matdiff_reduce(&diff, &norms_fwd);
libxsmm_matdiff(&norms_bwd, LIBXSMM_DATATYPE_F32, C[0]*C[1], 1, fil_libxsmm[0], fil_libxsmm[0], 0, 0);
printf("L1 of wt[0] : %.25g\n", norms_bwd.l1_ref);
libxsmm_matdiff_reduce(&diff, &norms_bwd);
#else
{
int e = 0;
FILE *fileAct, *fileWt;
fileAct = fopen("acts.txt","w+");
if (fileAct != NULL) {
for (e = 0; e < MB*C[num_layers]; e++) {
fprintf(fileAct, "%.10g\n", *((float*)act_libxsmm[num_layers] + e));
}
fclose(fileAct);
}
fileWt = fopen("weights.txt","w+");
if (fileWt != NULL) {
for (e = 0; e < C[0]*C[1]; e++) {
fprintf(fileWt, "%.10g\n", *((float*)fil_libxsmm[0] + e));
}
fclose(fileWt);
}
}
#endif
#endif
gflop = 0.0;
for ( i = num_layers-1; i > 0; --i) {
gflop += (6.0*(double)MB*(double)C[i]*(double)C[i+1]*(double)iters) / (1000.0*1000.0*1000.0);
}
gflop += (4.0*(double)MB*(double)C[0]*(double)C[1]*(double)iters) / (1000.0*1000.0*1000.0);
printf("GFLOP = %.5g\n", gflop/(double)iters);
printf("fp time = %.5g\n", ((double)(l_total/iters)));
printf("GFLOPS = %.5g\n", gflop/l_total);
printf("PERFDUMP,BP,%s,%i,%i,", LIBXSMM_VERSION, nThreads, MB );
for ( i = 0; i < num_layers; ++i ) {
printf("%i,", C[i] );
}
printf("%f,%f\n", ((double)(l_total/iters)), gflop/l_total);
unsigned long long max_fwd_time = 0, max_bwd_time = 0, max_solver_time = 0;
for (i = 0; i < nThreads; i++) {
if (max_fwd_time < fwd_time[i]) max_fwd_time = fwd_time[i];
if (max_bwd_time < bwd_time[i]) max_bwd_time = bwd_time[i];
if (max_solver_time < solver_time[i]) max_solver_time = solver_time[i];
}
printf("Profiling: fwd_time = %lld, bwd_time = %lld, solver_time = %lld\n",
max_fwd_time, max_bwd_time, max_solver_time);
}
/* deallocate data */
if ( scratch != NULL ) {
libxsmm_free(scratch);
}
for ( i = 0; i < num_layers; ++i ) {
if ( i == 0 ) {
#ifdef ACT_NUMA_INTERLEAVED
numa_free(act_libxsmm[i], MB*C[i]*sizeof(float));
#else
libxsmm_free(act_libxsmm[i]);
#endif
libxsmm_free(delact_libxsmm[i]);
}
#ifdef ACT_NUMA_INTERLEAVED
numa_free(act_libxsmm[i+1], MB*C[i+1]*sizeof(float));
#else
libxsmm_free(act_libxsmm[i+1]);
#endif
libxsmm_free(delact_libxsmm[i+1]);
libxsmm_free(fil_libxsmm[i]);
libxsmm_free(delfil_libxsmm[i]);
libxsmm_free(bias_libxsmm[i]);
libxsmm_free(delbias_libxsmm[i]);
libxsmm_free(relumask_libxsmm[i]);
}
#ifdef ACT_NUMA_INTERLEAVED
numa_free(act_libxsmm[num_layers+1], MB*C[num_layers+1]*sizeof(float));
#else
libxsmm_free(act_libxsmm[num_layers+1]);
#endif
libxsmm_free(label_libxsmm);
for (i = 0; i < numa_num_configured_nodes(); i++) {
free(numa_thr_cfg[i].blocksOFm_s);
free(numa_thr_cfg[i].blocksOFm_e);
free(numa_thr_cfg[i].blocksIFm_tr_s);
free(numa_thr_cfg[i].blocksIFm_tr_e);
for (j = 0; j < num_layers; j++) {
numa_free_aligned(numa_thr_cfg[i].scratch[j], numa_thr_cfg[i].layer_size[j]);
}
free(numa_thr_cfg[i].scratch);
free(numa_thr_cfg[i].layer_size);
numa_free_aligned(numa_thr_cfg[i].bwd_d_scratch, numa_thr_cfg[i].bwd_d_scratch_size);
}
free(numa_thr_cfg);
free( my_opt );
free( my_fc_fwd );
free( my_fc_bwd );
free( act_libxsmm );
free( delact_libxsmm );
free( fil_libxsmm );
free( delfil_libxsmm );
free( bias_libxsmm );
free( delbias_libxsmm );
free( relumask_libxsmm );
free( C );
/* some empty lines at the end */
printf("\n\n\n");
return 0;
}
|
core_zlag2c.c | /**
*
* @file
*
* PLASMA is a software package provided by:
* University of Tennessee, US,
* University of Manchester, UK.
*
* @precisions mixed zc -> ds
*
**/
#include "core_blas.h"
#include "core_lapack.h"
#include "plasma_types.h"
/***************************************************************************//**
*
* @ingroup core_lag2
*
* Converts m-by-n matrix A from double complex to single complex precision.
*
*******************************************************************************
*
* @param[in] m
* The number of rows of the matrix A.
* m >= 0.
*
* @param[in] n
* The number of columns of the matrix A.
* n >= 0.
*
* @param[in] A
* The lda-by-n matrix in double complex precision to convert.
*
* @param[in] lda
* The leading dimension of the matrix A.
* lda >= max(1,m).
*
* @param[out] As
* On exit, the converted ldas-by-n matrix in single complex precision.
*
* @param[in] ldas
* The leading dimension of the matrix As.
* ldas >= max(1,m).
*
******************************************************************************/
void core_zlag2c(int m, int n,
plasma_complex64_t *A, int lda,
plasma_complex32_t *As, int ldas)
{
LAPACKE_zlag2c_work(LAPACK_COL_MAJOR, m, n, A, lda, As, ldas);
}
/******************************************************************************/
void core_omp_zlag2c(int m, int n,
plasma_complex64_t *A, int lda,
plasma_complex32_t *As, int ldas,
plasma_sequence_t *sequence, plasma_request_t *request)
{
#pragma omp task depend(in:A[0:lda*n]) \
depend(out:As[0:ldas*n])
{
if (sequence->status == PlasmaSuccess)
core_zlag2c(m, n, A, lda, As, ldas);
}
}
|
cp-tree.h | /* Definitions for C++ parsing and type checking.
Copyright (C) 1987-2014 Free Software Foundation, Inc.
Contributed by Michael Tiemann (tiemann@cygnus.com)
This file is part of GCC.
GCC is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 3, or (at your option)
any later version.
GCC is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with GCC; see the file COPYING3. If not see
<http://www.gnu.org/licenses/>. */
#ifndef GCC_CP_TREE_H
#define GCC_CP_TREE_H
#include "ggc.h"
#include "function.h"
#include "hashtab.h"
#include "vec.h"
/* In order for the format checking to accept the C++ front end
diagnostic framework extensions, you must include this file before
diagnostic-core.h, not after. We override the definition of GCC_DIAG_STYLE
in c-common.h. */
#undef GCC_DIAG_STYLE
#define GCC_DIAG_STYLE __gcc_cxxdiag__
#if defined(GCC_DIAGNOSTIC_CORE_H) || defined (GCC_C_COMMON_H)
#error \
In order for the format checking to accept the C++ front end diagnostic \
framework extensions, you must include this file before diagnostic-core.h and \
c-common.h, not after.
#endif
#include "c-family/c-common.h"
#include "diagnostic.h"
#include "name-lookup.h"
/* Usage of TREE_LANG_FLAG_?:
0: IDENTIFIER_MARKED (IDENTIFIER_NODEs)
NEW_EXPR_USE_GLOBAL (in NEW_EXPR).
DELETE_EXPR_USE_GLOBAL (in DELETE_EXPR).
COMPOUND_EXPR_OVERLOADED (in COMPOUND_EXPR).
TREE_INDIRECT_USING (in NAMESPACE_DECL).
CLEANUP_P (in TRY_BLOCK)
AGGR_INIT_VIA_CTOR_P (in AGGR_INIT_EXPR)
PTRMEM_OK_P (in ADDR_EXPR, OFFSET_REF, SCOPE_REF)
PAREN_STRING_LITERAL (in STRING_CST)
DECL_GNU_TLS_P (in VAR_DECL)
KOENIG_LOOKUP_P (in CALL_EXPR)
STATEMENT_LIST_NO_SCOPE (in STATEMENT_LIST).
EXPR_STMT_STMT_EXPR_RESULT (in EXPR_STMT)
STMT_EXPR_NO_SCOPE (in STMT_EXPR)
BIND_EXPR_TRY_BLOCK (in BIND_EXPR)
TYPENAME_IS_ENUM_P (in TYPENAME_TYPE)
OMP_FOR_GIMPLIFYING_P (in OMP_FOR, OMP_SIMD and OMP_DISTRIBUTE)
BASELINK_QUALIFIED_P (in BASELINK)
TARGET_EXPR_IMPLICIT_P (in TARGET_EXPR)
TEMPLATE_PARM_PARAMETER_PACK (in TEMPLATE_PARM_INDEX)
ATTR_IS_DEPENDENT (in the TREE_LIST for an attribute)
ABI_TAG_IMPLICIT (in the TREE_LIST for the argument of abi_tag)
CONSTRUCTOR_IS_DIRECT_INIT (in CONSTRUCTOR)
LAMBDA_EXPR_CAPTURES_THIS_P (in LAMBDA_EXPR)
DECLTYPE_FOR_LAMBDA_CAPTURE (in DECLTYPE_TYPE)
VEC_INIT_EXPR_IS_CONSTEXPR (in VEC_INIT_EXPR)
DECL_OVERRIDE_P (in FUNCTION_DECL)
IMPLICIT_CONV_EXPR_DIRECT_INIT (in IMPLICIT_CONV_EXPR)
TRANSACTION_EXPR_IS_STMT (in TRANSACTION_EXPR)
CONVERT_EXPR_VBASE_PATH (in CONVERT_EXPR)
OVL_ARG_DEPENDENT (in OVERLOAD)
PACK_EXPANSION_LOCAL_P (in *_PACK_EXPANSION)
TINFO_RECHECK_ACCESS_P (in TEMPLATE_INFO)
SIZEOF_EXPR_TYPE_P (in SIZEOF_EXPR)
1: IDENTIFIER_VIRTUAL_P (in IDENTIFIER_NODE)
TI_PENDING_TEMPLATE_FLAG.
TEMPLATE_PARMS_FOR_INLINE.
DELETE_EXPR_USE_VEC (in DELETE_EXPR).
(TREE_CALLS_NEW) (in _EXPR or _REF) (commented-out).
ICS_ELLIPSIS_FLAG (in _CONV)
DECL_INITIALIZED_P (in VAR_DECL)
TYPENAME_IS_CLASS_P (in TYPENAME_TYPE)
STMT_IS_FULL_EXPR_P (in _STMT)
TARGET_EXPR_LIST_INIT_P (in TARGET_EXPR)
LAMBDA_EXPR_MUTABLE_P (in LAMBDA_EXPR)
DECL_FINAL_P (in FUNCTION_DECL)
QUALIFIED_NAME_IS_TEMPLATE (in SCOPE_REF)
DECLTYPE_FOR_INIT_CAPTURE (in DECLTYPE_TYPE)
2: IDENTIFIER_OPNAME_P (in IDENTIFIER_NODE)
ICS_THIS_FLAG (in _CONV)
DECL_INITIALIZED_BY_CONSTANT_EXPRESSION_P (in VAR_DECL)
STATEMENT_LIST_TRY_BLOCK (in STATEMENT_LIST)
TYPENAME_IS_RESOLVING_P (in TYPE_NAME_TYPE)
TARGET_EXPR_DIRECT_INIT_P (in TARGET_EXPR)
FNDECL_USED_AUTO (in FUNCTION_DECL)
DECLTYPE_FOR_LAMBDA_PROXY (in DECLTYPE_TYPE)
REF_PARENTHESIZED_P (in COMPONENT_REF, INDIRECT_REF)
AGGR_INIT_ZERO_FIRST (in AGGR_INIT_EXPR)
3: (TREE_REFERENCE_EXPR) (in NON_LVALUE_EXPR) (commented-out).
ICS_BAD_FLAG (in _CONV)
FN_TRY_BLOCK_P (in TRY_BLOCK)
IDENTIFIER_CTOR_OR_DTOR_P (in IDENTIFIER_NODE)
BIND_EXPR_BODY_BLOCK (in BIND_EXPR)
DECL_NON_TRIVIALLY_INITIALIZED_P (in VAR_DECL)
CALL_EXPR_LIST_INIT_P (in CALL_EXPR, AGGR_INIT_EXPR)
4: TREE_HAS_CONSTRUCTOR (in INDIRECT_REF, SAVE_EXPR, CONSTRUCTOR,
or FIELD_DECL).
IDENTIFIER_TYPENAME_P (in IDENTIFIER_NODE)
DECL_TINFO_P (in VAR_DECL)
FUNCTION_REF_QUALIFIED (in FUNCTION_TYPE, METHOD_TYPE)
5: C_IS_RESERVED_WORD (in IDENTIFIER_NODE)
DECL_VTABLE_OR_VTT_P (in VAR_DECL)
FUNCTION_RVALUE_QUALIFIED (in FUNCTION_TYPE, METHOD_TYPE)
6: IDENTIFIER_REPO_CHOSEN (in IDENTIFIER_NODE)
DECL_CONSTRUCTION_VTABLE_P (in VAR_DECL)
TYPE_MARKED_P (in _TYPE)
RANGE_FOR_IVDEP (in RANGE_FOR_STMT)
Usage of TYPE_LANG_FLAG_?:
0: TYPE_DEPENDENT_P
1: TYPE_HAS_USER_CONSTRUCTOR.
2: unused
3: TYPE_FOR_JAVA.
4: TYPE_HAS_NONTRIVIAL_DESTRUCTOR
5: CLASS_TYPE_P (in RECORD_TYPE and UNION_TYPE)
ENUM_FIXED_UNDERLYING_TYPE_P (in ENUMERAL_TYPE)
AUTO_IS_DECLTYPE (in TEMPLATE_TYPE_PARM)
REFERENCE_VLA_OK (in REFERENCE_TYPE)
6: TYPE_DEPENDENT_P_VALID
Usage of DECL_LANG_FLAG_?:
0: DECL_ERROR_REPORTED (in VAR_DECL).
DECL_TEMPLATE_PARM_P (in PARM_DECL, CONST_DECL, TYPE_DECL, or TEMPLATE_DECL)
DECL_LOCAL_FUNCTION_P (in FUNCTION_DECL)
DECL_MUTABLE_P (in FIELD_DECL)
DECL_DEPENDENT_P (in USING_DECL)
1: C_TYPEDEF_EXPLICITLY_SIGNED (in TYPE_DECL).
DECL_TEMPLATE_INSTANTIATED (in a VAR_DECL or a FUNCTION_DECL)
DECL_MEMBER_TEMPLATE_P (in TEMPLATE_DECL)
USING_DECL_TYPENAME_P (in USING_DECL)
DECL_VLA_CAPTURE_P (in FIELD_DECL)
2: DECL_THIS_EXTERN (in VAR_DECL or FUNCTION_DECL).
DECL_IMPLICIT_TYPEDEF_P (in a TYPE_DECL)
3: DECL_IN_AGGR_P.
4: DECL_C_BIT_FIELD (in a FIELD_DECL)
DECL_ANON_UNION_VAR_P (in a VAR_DECL)
DECL_SELF_REFERENCE_P (in a TYPE_DECL)
DECL_INVALID_OVERRIDER_P (in a FUNCTION_DECL)
5: DECL_INTERFACE_KNOWN.
6: DECL_THIS_STATIC (in VAR_DECL or FUNCTION_DECL).
DECL_FIELD_IS_BASE (in FIELD_DECL)
TYPE_DECL_ALIAS_P (in TYPE_DECL)
7: DECL_DEAD_FOR_LOCAL (in VAR_DECL).
DECL_THUNK_P (in a member FUNCTION_DECL)
DECL_NORMAL_CAPTURE_P (in FIELD_DECL)
8: DECL_DECLARED_CONSTEXPR_P (in VAR_DECL, FUNCTION_DECL)
Usage of language-independent fields in a language-dependent manner:
TYPE_ALIAS_SET
This field is used by TYPENAME_TYPEs, TEMPLATE_TYPE_PARMs, and so
forth as a substitute for the mark bits provided in `lang_type'.
At present, only the six low-order bits are used.
TYPE_LANG_SLOT_1
For an ENUMERAL_TYPE, this is ENUM_TEMPLATE_INFO.
For a FUNCTION_TYPE or METHOD_TYPE, this is TYPE_RAISES_EXCEPTIONS
BINFO_VIRTUALS
For a binfo, this is a TREE_LIST. There is an entry for each
virtual function declared either in BINFO or its direct and
indirect primary bases.
The BV_DELTA of each node gives the amount by which to adjust the
`this' pointer when calling the function. If the method is an
overridden version of a base class method, then it is assumed
that, prior to adjustment, the this pointer points to an object
of the base class.
The BV_VCALL_INDEX of each node, if non-NULL, gives the vtable
index of the vcall offset for this entry.
The BV_FN is the declaration for the virtual function itself.
If BV_LOST_PRIMARY is set, it means that this entry is for a lost
primary virtual base and can be left null in the vtable.
BINFO_VTABLE
This is an expression with POINTER_TYPE that gives the value
to which the vptr should be initialized. Use get_vtbl_decl_for_binfo
to extract the VAR_DECL for the complete vtable.
DECL_VINDEX
This field is NULL for a non-virtual function. For a virtual
function, it is eventually set to an INTEGER_CST indicating the
index in the vtable at which this function can be found. When
a virtual function is declared, but before it is known what
function is overridden, this field is the error_mark_node.
Temporarily, it may be set to a TREE_LIST whose TREE_VALUE is
the virtual function this one overrides, and whose TREE_CHAIN is
the old DECL_VINDEX. */
/* Language-specific tree checkers. */
#define VAR_OR_FUNCTION_DECL_CHECK(NODE) \
TREE_CHECK2(NODE,VAR_DECL,FUNCTION_DECL)
#define TYPE_FUNCTION_OR_TEMPLATE_DECL_CHECK(NODE) \
TREE_CHECK3(NODE,TYPE_DECL,TEMPLATE_DECL,FUNCTION_DECL)
#define TYPE_FUNCTION_OR_TEMPLATE_DECL_P(NODE) \
(TREE_CODE (NODE) == TYPE_DECL || TREE_CODE (NODE) == TEMPLATE_DECL \
|| TREE_CODE (NODE) == FUNCTION_DECL)
#define VAR_FUNCTION_OR_PARM_DECL_CHECK(NODE) \
TREE_CHECK3(NODE,VAR_DECL,FUNCTION_DECL,PARM_DECL)
#define VAR_TEMPL_TYPE_OR_FUNCTION_DECL_CHECK(NODE) \
TREE_CHECK4(NODE,VAR_DECL,FUNCTION_DECL,TYPE_DECL,TEMPLATE_DECL)
#define VAR_TEMPL_TYPE_FIELD_OR_FUNCTION_DECL_CHECK(NODE) \
TREE_CHECK5(NODE,VAR_DECL,FIELD_DECL,FUNCTION_DECL,TYPE_DECL,TEMPLATE_DECL)
#define BOUND_TEMPLATE_TEMPLATE_PARM_TYPE_CHECK(NODE) \
TREE_CHECK(NODE,BOUND_TEMPLATE_TEMPLATE_PARM)
#if defined ENABLE_TREE_CHECKING && (GCC_VERSION >= 2007)
#define THUNK_FUNCTION_CHECK(NODE) __extension__ \
({ __typeof (NODE) const __t = (NODE); \
if (TREE_CODE (__t) != FUNCTION_DECL || !__t->decl_common.lang_specific \
|| !__t->decl_common.lang_specific->u.fn.thunk_p) \
tree_check_failed (__t, __FILE__, __LINE__, __FUNCTION__, 0); \
__t; })
#else
#define THUNK_FUNCTION_CHECK(NODE) (NODE)
#endif
/* Language-dependent contents of an identifier. */
struct GTY(()) lang_identifier {
struct c_common_identifier c_common;
cxx_binding *namespace_bindings;
cxx_binding *bindings;
tree class_template_info;
tree label_value;
};
/* Return a typed pointer version of T if it designates a
C++ front-end identifier. */
inline lang_identifier*
identifier_p (tree t)
{
if (TREE_CODE (t) == IDENTIFIER_NODE)
return (lang_identifier*) t;
return NULL;
}
/* In an IDENTIFIER_NODE, nonzero if this identifier is actually a
keyword. C_RID_CODE (node) is then the RID_* value of the keyword,
and C_RID_YYCODE is the token number wanted by Yacc. */
#define C_IS_RESERVED_WORD(ID) TREE_LANG_FLAG_5 (ID)
#define LANG_IDENTIFIER_CAST(NODE) \
((struct lang_identifier*)IDENTIFIER_NODE_CHECK (NODE))
struct GTY(()) template_parm_index_s {
struct tree_common common;
int index;
int level;
int orig_level;
tree decl;
};
typedef struct template_parm_index_s template_parm_index;
struct GTY(()) ptrmem_cst {
struct tree_common common;
tree member;
};
typedef struct ptrmem_cst * ptrmem_cst_t;
#define IDENTIFIER_GLOBAL_VALUE(NODE) \
namespace_binding ((NODE), global_namespace)
#define SET_IDENTIFIER_GLOBAL_VALUE(NODE, VAL) \
set_namespace_binding ((NODE), global_namespace, (VAL))
#define IDENTIFIER_NAMESPACE_VALUE(NODE) \
namespace_binding ((NODE), current_namespace)
#define SET_IDENTIFIER_NAMESPACE_VALUE(NODE, VAL) \
set_namespace_binding ((NODE), current_namespace, (VAL))
#define CLEANUP_P(NODE) TREE_LANG_FLAG_0 (TRY_BLOCK_CHECK (NODE))
#define BIND_EXPR_TRY_BLOCK(NODE) \
TREE_LANG_FLAG_0 (BIND_EXPR_CHECK (NODE))
/* Used to mark the block around the member initializers and cleanups. */
#define BIND_EXPR_BODY_BLOCK(NODE) \
TREE_LANG_FLAG_3 (BIND_EXPR_CHECK (NODE))
#define FUNCTION_NEEDS_BODY_BLOCK(NODE) \
(DECL_CONSTRUCTOR_P (NODE) || DECL_DESTRUCTOR_P (NODE) \
|| LAMBDA_FUNCTION_P (NODE))
#define STATEMENT_LIST_NO_SCOPE(NODE) \
TREE_LANG_FLAG_0 (STATEMENT_LIST_CHECK (NODE))
#define STATEMENT_LIST_TRY_BLOCK(NODE) \
TREE_LANG_FLAG_2 (STATEMENT_LIST_CHECK (NODE))
/* Nonzero if this statement should be considered a full-expression,
i.e., if temporaries created during this statement should have
their destructors run at the end of this statement. */
#define STMT_IS_FULL_EXPR_P(NODE) TREE_LANG_FLAG_1 ((NODE))
/* Marks the result of a statement expression. */
#define EXPR_STMT_STMT_EXPR_RESULT(NODE) \
TREE_LANG_FLAG_0 (EXPR_STMT_CHECK (NODE))
/* Nonzero if this statement-expression does not have an associated scope. */
#define STMT_EXPR_NO_SCOPE(NODE) \
TREE_LANG_FLAG_0 (STMT_EXPR_CHECK (NODE))
/* Returns nonzero iff TYPE1 and TYPE2 are the same type, in the usual
sense of `same'. */
#define same_type_p(TYPE1, TYPE2) \
comptypes ((TYPE1), (TYPE2), COMPARE_STRICT)
/* Returns nonzero iff NODE is a declaration for the global function
`main'. */
#define DECL_MAIN_P(NODE) \
(DECL_EXTERN_C_FUNCTION_P (NODE) \
&& DECL_NAME (NODE) != NULL_TREE \
&& MAIN_NAME_P (DECL_NAME (NODE)) \
&& flag_hosted)
/* The overloaded FUNCTION_DECL. */
#define OVL_FUNCTION(NODE) \
(((struct tree_overload*)OVERLOAD_CHECK (NODE))->function)
#define OVL_CHAIN(NODE) TREE_CHAIN (NODE)
/* Polymorphic access to FUNCTION and CHAIN. */
#define OVL_CURRENT(NODE) \
((TREE_CODE (NODE) == OVERLOAD) ? OVL_FUNCTION (NODE) : (NODE))
#define OVL_NEXT(NODE) \
((TREE_CODE (NODE) == OVERLOAD) ? TREE_CHAIN (NODE) : NULL_TREE)
/* If set, this was imported in a using declaration.
This is not to confuse with being used somewhere, which
is not important for this node. */
#define OVL_USED(NODE) TREE_USED (OVERLOAD_CHECK (NODE))
/* If set, this OVERLOAD was created for argument-dependent lookup
and can be freed afterward. */
#define OVL_ARG_DEPENDENT(NODE) TREE_LANG_FLAG_0 (OVERLOAD_CHECK (NODE))
struct GTY(()) tree_overload {
struct tree_common common;
tree function;
};
/* Returns true iff NODE is a BASELINK. */
#define BASELINK_P(NODE) \
(TREE_CODE (NODE) == BASELINK)
/* The BINFO indicating the base in which lookup found the
BASELINK_FUNCTIONS. */
#define BASELINK_BINFO(NODE) \
(((struct tree_baselink*) BASELINK_CHECK (NODE))->binfo)
/* The functions referred to by the BASELINK; either a FUNCTION_DECL,
a TEMPLATE_DECL, an OVERLOAD, or a TEMPLATE_ID_EXPR. */
#define BASELINK_FUNCTIONS(NODE) \
(((struct tree_baselink*) BASELINK_CHECK (NODE))->functions)
/* The BINFO in which the search for the functions indicated by this baselink
began. This base is used to determine the accessibility of functions
selected by overload resolution. */
#define BASELINK_ACCESS_BINFO(NODE) \
(((struct tree_baselink*) BASELINK_CHECK (NODE))->access_binfo)
/* For a type-conversion operator, the BASELINK_OPTYPE indicates the type
to which the conversion should occur. This value is important if
the BASELINK_FUNCTIONS include a template conversion operator --
the BASELINK_OPTYPE can be used to determine what type the user
requested. */
#define BASELINK_OPTYPE(NODE) \
(TREE_CHAIN (BASELINK_CHECK (NODE)))
/* Nonzero if this baselink was from a qualified lookup. */
#define BASELINK_QUALIFIED_P(NODE) \
TREE_LANG_FLAG_0 (BASELINK_CHECK (NODE))
struct GTY(()) tree_baselink {
struct tree_common common;
tree binfo;
tree functions;
tree access_binfo;
};
/* The different kinds of ids that we encounter. */
typedef enum cp_id_kind
{
/* Not an id at all. */
CP_ID_KIND_NONE,
/* An unqualified-id that is not a template-id. */
CP_ID_KIND_UNQUALIFIED,
/* An unqualified-id that is a dependent name. */
CP_ID_KIND_UNQUALIFIED_DEPENDENT,
/* An unqualified template-id. */
CP_ID_KIND_TEMPLATE_ID,
/* A qualified-id. */
CP_ID_KIND_QUALIFIED
} cp_id_kind;
/* The various kinds of C++0x warnings we encounter. */
typedef enum cpp0x_warn_str
{
/* extended initializer lists */
CPP0X_INITIALIZER_LISTS,
/* explicit conversion operators */
CPP0X_EXPLICIT_CONVERSION,
/* variadic templates */
CPP0X_VARIADIC_TEMPLATES,
/* lambda expressions */
CPP0X_LAMBDA_EXPR,
/* C++0x auto */
CPP0X_AUTO,
/* scoped enums */
CPP0X_SCOPED_ENUMS,
/* defaulted and deleted functions */
CPP0X_DEFAULTED_DELETED,
/* inline namespaces */
CPP0X_INLINE_NAMESPACES,
/* override controls, override/final */
CPP0X_OVERRIDE_CONTROLS,
/* non-static data member initializers */
CPP0X_NSDMI,
/* user defined literals */
CPP0X_USER_DEFINED_LITERALS,
/* delegating constructors */
CPP0X_DELEGATING_CTORS,
/* inheriting constructors */
CPP0X_INHERITING_CTORS,
/* C++11 attributes */
CPP0X_ATTRIBUTES,
/* ref-qualified member functions */
CPP0X_REF_QUALIFIER
} cpp0x_warn_str;
/* The various kinds of operation used by composite_pointer_type. */
typedef enum composite_pointer_operation
{
/* comparison */
CPO_COMPARISON,
/* conversion */
CPO_CONVERSION,
/* conditional expression */
CPO_CONDITIONAL_EXPR
} composite_pointer_operation;
/* Possible cases of expression list used by build_x_compound_expr_from_list. */
typedef enum expr_list_kind {
ELK_INIT, /* initializer */
ELK_MEM_INIT, /* member initializer */
ELK_FUNC_CAST /* functional cast */
} expr_list_kind;
/* Possible cases of implicit bad rhs conversions. */
typedef enum impl_conv_rhs {
ICR_DEFAULT_ARGUMENT, /* default argument */
ICR_CONVERTING, /* converting */
ICR_INIT, /* initialization */
ICR_ARGPASS, /* argument passing */
ICR_RETURN, /* return */
ICR_ASSIGN /* assignment */
} impl_conv_rhs;
/* Possible cases of implicit or explicit bad conversions to void. */
typedef enum impl_conv_void {
ICV_CAST, /* (explicit) conversion to void */
ICV_SECOND_OF_COND, /* second operand of conditional expression */
ICV_THIRD_OF_COND, /* third operand of conditional expression */
ICV_RIGHT_OF_COMMA, /* right operand of comma operator */
ICV_LEFT_OF_COMMA, /* left operand of comma operator */
ICV_STATEMENT, /* statement */
ICV_THIRD_IN_FOR /* for increment expression */
} impl_conv_void;
/* Possible invalid uses of an abstract class that might not have a
specific associated declaration. */
typedef enum abstract_class_use {
ACU_UNKNOWN, /* unknown or decl provided */
ACU_CAST, /* cast to abstract class */
ACU_NEW, /* new-expression of abstract class */
ACU_THROW, /* throw-expression of abstract class */
ACU_CATCH, /* catch-parameter of abstract class */
ACU_ARRAY, /* array of abstract class */
ACU_RETURN, /* return type of abstract class */
ACU_PARM /* parameter type of abstract class */
} abstract_class_use;
/* Macros for access to language-specific slots in an identifier. */
#define IDENTIFIER_NAMESPACE_BINDINGS(NODE) \
(LANG_IDENTIFIER_CAST (NODE)->namespace_bindings)
#define IDENTIFIER_TEMPLATE(NODE) \
(LANG_IDENTIFIER_CAST (NODE)->class_template_info)
/* The IDENTIFIER_BINDING is the innermost cxx_binding for the
identifier. It's PREVIOUS is the next outermost binding. Each
VALUE field is a DECL for the associated declaration. Thus,
name lookup consists simply of pulling off the node at the front
of the list (modulo oddities for looking up the names of types,
and such.) You can use SCOPE field to determine the scope
that bound the name. */
#define IDENTIFIER_BINDING(NODE) \
(LANG_IDENTIFIER_CAST (NODE)->bindings)
/* TREE_TYPE only indicates on local and class scope the current
type. For namespace scope, the presence of a type in any namespace
is indicated with global_type_node, and the real type behind must
be found through lookup. */
#define IDENTIFIER_TYPE_VALUE(NODE) identifier_type_value (NODE)
#define REAL_IDENTIFIER_TYPE_VALUE(NODE) TREE_TYPE (NODE)
#define SET_IDENTIFIER_TYPE_VALUE(NODE,TYPE) (TREE_TYPE (NODE) = (TYPE))
#define IDENTIFIER_HAS_TYPE_VALUE(NODE) (IDENTIFIER_TYPE_VALUE (NODE) ? 1 : 0)
#define IDENTIFIER_LABEL_VALUE(NODE) \
(LANG_IDENTIFIER_CAST (NODE)->label_value)
#define SET_IDENTIFIER_LABEL_VALUE(NODE, VALUE) \
IDENTIFIER_LABEL_VALUE (NODE) = (VALUE)
/* Nonzero if this identifier is used as a virtual function name somewhere
(optimizes searches). */
#define IDENTIFIER_VIRTUAL_P(NODE) TREE_LANG_FLAG_1 (NODE)
/* Nonzero if this identifier is the prefix for a mangled C++ operator
name. */
#define IDENTIFIER_OPNAME_P(NODE) TREE_LANG_FLAG_2 (NODE)
/* Nonzero if this identifier is the name of a type-conversion
operator. */
#define IDENTIFIER_TYPENAME_P(NODE) \
TREE_LANG_FLAG_4 (NODE)
/* Nonzero if this identifier is the name of a constructor or
destructor. */
#define IDENTIFIER_CTOR_OR_DTOR_P(NODE) \
TREE_LANG_FLAG_3 (NODE)
/* True iff NAME is the DECL_ASSEMBLER_NAME for an entity with vague
linkage which the prelinker has assigned to this translation
unit. */
#define IDENTIFIER_REPO_CHOSEN(NAME) \
(TREE_LANG_FLAG_6 (NAME))
/* In a RECORD_TYPE or UNION_TYPE, nonzero if any component is read-only. */
#define C_TYPE_FIELDS_READONLY(TYPE) \
(LANG_TYPE_CLASS_CHECK (TYPE)->fields_readonly)
/* The tokens stored in the default argument. */
#define DEFARG_TOKENS(NODE) \
(((struct tree_default_arg *)DEFAULT_ARG_CHECK (NODE))->tokens)
#define DEFARG_INSTANTIATIONS(NODE) \
(((struct tree_default_arg *)DEFAULT_ARG_CHECK (NODE))->instantiations)
struct GTY (()) tree_default_arg {
struct tree_common common;
struct cp_token_cache *tokens;
vec<tree, va_gc> *instantiations;
};
#define DEFERRED_NOEXCEPT_PATTERN(NODE) \
(((struct tree_deferred_noexcept *)DEFERRED_NOEXCEPT_CHECK (NODE))->pattern)
#define DEFERRED_NOEXCEPT_ARGS(NODE) \
(((struct tree_deferred_noexcept *)DEFERRED_NOEXCEPT_CHECK (NODE))->args)
#define DEFERRED_NOEXCEPT_SPEC_P(NODE) \
((NODE) && (TREE_PURPOSE (NODE)) \
&& (TREE_CODE (TREE_PURPOSE (NODE)) == DEFERRED_NOEXCEPT \
|| is_overloaded_fn (TREE_PURPOSE (NODE))))
struct GTY (()) tree_deferred_noexcept {
struct tree_base base;
tree pattern;
tree args;
};
/* The condition associated with the static assertion. This must be
an integral constant expression. */
#define STATIC_ASSERT_CONDITION(NODE) \
(((struct tree_static_assert *)STATIC_ASSERT_CHECK (NODE))->condition)
/* The message associated with the static assertion. This must be a
string constant, which will be emitted as an error message when the
static assert condition is false. */
#define STATIC_ASSERT_MESSAGE(NODE) \
(((struct tree_static_assert *)STATIC_ASSERT_CHECK (NODE))->message)
/* Source location information for a static assertion. */
#define STATIC_ASSERT_SOURCE_LOCATION(NODE) \
(((struct tree_static_assert *)STATIC_ASSERT_CHECK (NODE))->location)
struct GTY (()) tree_static_assert {
struct tree_common common;
tree condition;
tree message;
location_t location;
};
struct GTY (()) tree_argument_pack_select {
struct tree_common common;
tree argument_pack;
int index;
};
/* The different kinds of traits that we encounter. */
typedef enum cp_trait_kind
{
CPTK_BASES,
CPTK_DIRECT_BASES,
CPTK_HAS_NOTHROW_ASSIGN,
CPTK_HAS_NOTHROW_CONSTRUCTOR,
CPTK_HAS_NOTHROW_COPY,
CPTK_HAS_TRIVIAL_ASSIGN,
CPTK_HAS_TRIVIAL_CONSTRUCTOR,
CPTK_HAS_TRIVIAL_COPY,
CPTK_HAS_TRIVIAL_DESTRUCTOR,
CPTK_HAS_VIRTUAL_DESTRUCTOR,
CPTK_IS_ABSTRACT,
CPTK_IS_BASE_OF,
CPTK_IS_CLASS,
CPTK_IS_CONVERTIBLE_TO,
CPTK_IS_EMPTY,
CPTK_IS_ENUM,
CPTK_IS_FINAL,
CPTK_IS_LITERAL_TYPE,
CPTK_IS_POD,
CPTK_IS_POLYMORPHIC,
CPTK_IS_STD_LAYOUT,
CPTK_IS_TRIVIAL,
CPTK_IS_UNION,
CPTK_UNDERLYING_TYPE
} cp_trait_kind;
/* The types that we are processing. */
#define TRAIT_EXPR_TYPE1(NODE) \
(((struct tree_trait_expr *)TRAIT_EXPR_CHECK (NODE))->type1)
#define TRAIT_EXPR_TYPE2(NODE) \
(((struct tree_trait_expr *)TRAIT_EXPR_CHECK (NODE))->type2)
/* The specific trait that we are processing. */
#define TRAIT_EXPR_KIND(NODE) \
(((struct tree_trait_expr *)TRAIT_EXPR_CHECK (NODE))->kind)
struct GTY (()) tree_trait_expr {
struct tree_common common;
tree type1;
tree type2;
enum cp_trait_kind kind;
};
/* Based off of TYPE_ANONYMOUS_P. */
#define LAMBDA_TYPE_P(NODE) \
(CLASS_TYPE_P (NODE) && CLASSTYPE_LAMBDA_EXPR (NODE))
/* Test if FUNCTION_DECL is a lambda function. */
#define LAMBDA_FUNCTION_P(FNDECL) \
(DECL_OVERLOADED_OPERATOR_P (FNDECL) == CALL_EXPR \
&& LAMBDA_TYPE_P (CP_DECL_CONTEXT (FNDECL)))
enum cp_lambda_default_capture_mode_type {
CPLD_NONE,
CPLD_COPY,
CPLD_REFERENCE
};
/* The method of default capture, if any. */
#define LAMBDA_EXPR_DEFAULT_CAPTURE_MODE(NODE) \
(((struct tree_lambda_expr *)LAMBDA_EXPR_CHECK (NODE))->default_capture_mode)
/* The capture-list, including `this'. Each capture is stored as a FIELD_DECL
* so that the name, type, and field are all together, whether or not it has
* been added to the lambda's class type.
TREE_LIST:
TREE_PURPOSE: The FIELD_DECL for this capture.
TREE_VALUE: The initializer. This is part of a GNU extension. */
#define LAMBDA_EXPR_CAPTURE_LIST(NODE) \
(((struct tree_lambda_expr *)LAMBDA_EXPR_CHECK (NODE))->capture_list)
/* During parsing of the lambda-introducer, the node in the capture-list
that holds the 'this' capture. During parsing of the body, the
capture proxy for that node. */
#define LAMBDA_EXPR_THIS_CAPTURE(NODE) \
(((struct tree_lambda_expr *)LAMBDA_EXPR_CHECK (NODE))->this_capture)
/* Predicate tracking whether `this' is in the effective capture set. */
#define LAMBDA_EXPR_CAPTURES_THIS_P(NODE) \
LAMBDA_EXPR_THIS_CAPTURE(NODE)
/* Predicate tracking whether the lambda was declared 'mutable'. */
#define LAMBDA_EXPR_MUTABLE_P(NODE) \
TREE_LANG_FLAG_1 (LAMBDA_EXPR_CHECK (NODE))
/* The return type in the expression.
* NULL_TREE indicates that none was specified. */
#define LAMBDA_EXPR_RETURN_TYPE(NODE) \
(((struct tree_lambda_expr *)LAMBDA_EXPR_CHECK (NODE))->return_type)
/* The source location of the lambda. */
#define LAMBDA_EXPR_LOCATION(NODE) \
(((struct tree_lambda_expr *)LAMBDA_EXPR_CHECK (NODE))->locus)
/* The mangling scope for the lambda: FUNCTION_DECL, PARM_DECL, VAR_DECL,
FIELD_DECL or NULL_TREE. If this is NULL_TREE, we have no linkage. */
#define LAMBDA_EXPR_EXTRA_SCOPE(NODE) \
(((struct tree_lambda_expr *)LAMBDA_EXPR_CHECK (NODE))->extra_scope)
/* If EXTRA_SCOPE, this is the number of the lambda within that scope. */
#define LAMBDA_EXPR_DISCRIMINATOR(NODE) \
(((struct tree_lambda_expr *)LAMBDA_EXPR_CHECK (NODE))->discriminator)
/* During parsing of the lambda, a vector of capture proxies which need
to be pushed once we're done processing a nested lambda. */
#define LAMBDA_EXPR_PENDING_PROXIES(NODE) \
(((struct tree_lambda_expr *)LAMBDA_EXPR_CHECK (NODE))->pending_proxies)
/* The closure type of the lambda. Note that the TREE_TYPE of a
LAMBDA_EXPR is always NULL_TREE, because we need to instantiate the
LAMBDA_EXPR in order to instantiate the type. */
#define LAMBDA_EXPR_CLOSURE(NODE) \
(((struct tree_lambda_expr *)LAMBDA_EXPR_CHECK (NODE))->closure)
struct GTY (()) tree_lambda_expr
{
struct tree_typed typed;
tree capture_list;
tree this_capture;
tree return_type;
tree extra_scope;
tree closure;
vec<tree, va_gc> *pending_proxies;
location_t locus;
enum cp_lambda_default_capture_mode_type default_capture_mode;
int discriminator;
};
/* A (typedef,context,usage location) triplet.
It represents a typedef used through a
context at a given source location.
e.g.
struct foo {
typedef int myint;
};
struct bar {
foo::myint v; // #1<-- this location.
};
In bar, the triplet will be (myint, foo, #1).
*/
struct GTY(()) qualified_typedef_usage_s {
tree typedef_decl;
tree context;
location_t locus;
};
typedef struct qualified_typedef_usage_s qualified_typedef_usage_t;
/* Non-zero if this template specialization has access violations that
should be rechecked when the function is instantiated outside argument
deduction. */
#define TINFO_HAS_ACCESS_ERRORS(NODE) \
(TREE_LANG_FLAG_0 (TEMPLATE_INFO_CHECK (NODE)))
#define FNDECL_HAS_ACCESS_ERRORS(NODE) \
(TINFO_HAS_ACCESS_ERRORS (DECL_TEMPLATE_INFO (NODE)))
struct GTY(()) tree_template_info {
struct tree_common common;
vec<qualified_typedef_usage_t, va_gc> *typedefs_needing_access_checking;
};
enum cp_tree_node_structure_enum {
TS_CP_GENERIC,
TS_CP_IDENTIFIER,
TS_CP_TPI,
TS_CP_PTRMEM,
TS_CP_BINDING,
TS_CP_OVERLOAD,
TS_CP_BASELINK,
TS_CP_WRAPPER,
TS_CP_DEFAULT_ARG,
TS_CP_DEFERRED_NOEXCEPT,
TS_CP_STATIC_ASSERT,
TS_CP_ARGUMENT_PACK_SELECT,
TS_CP_TRAIT_EXPR,
TS_CP_LAMBDA_EXPR,
TS_CP_TEMPLATE_INFO,
TS_CP_USERDEF_LITERAL,
LAST_TS_CP_ENUM
};
/* The resulting tree type. */
union GTY((desc ("cp_tree_node_structure (&%h)"),
chain_next ("(union lang_tree_node *) c_tree_chain_next (&%h.generic)"))) lang_tree_node {
union tree_node GTY ((tag ("TS_CP_GENERIC"),
desc ("tree_node_structure (&%h)"))) generic;
struct template_parm_index_s GTY ((tag ("TS_CP_TPI"))) tpi;
struct ptrmem_cst GTY ((tag ("TS_CP_PTRMEM"))) ptrmem;
struct tree_overload GTY ((tag ("TS_CP_OVERLOAD"))) overload;
struct tree_baselink GTY ((tag ("TS_CP_BASELINK"))) baselink;
struct tree_default_arg GTY ((tag ("TS_CP_DEFAULT_ARG"))) default_arg;
struct tree_deferred_noexcept GTY ((tag ("TS_CP_DEFERRED_NOEXCEPT"))) deferred_noexcept;
struct lang_identifier GTY ((tag ("TS_CP_IDENTIFIER"))) identifier;
struct tree_static_assert GTY ((tag ("TS_CP_STATIC_ASSERT")))
static_assertion;
struct tree_argument_pack_select GTY ((tag ("TS_CP_ARGUMENT_PACK_SELECT")))
argument_pack_select;
struct tree_trait_expr GTY ((tag ("TS_CP_TRAIT_EXPR")))
trait_expression;
struct tree_lambda_expr GTY ((tag ("TS_CP_LAMBDA_EXPR")))
lambda_expression;
struct tree_template_info GTY ((tag ("TS_CP_TEMPLATE_INFO")))
template_info;
struct tree_userdef_literal GTY ((tag ("TS_CP_USERDEF_LITERAL")))
userdef_literal;
};
enum cp_tree_index
{
CPTI_JAVA_BYTE_TYPE,
CPTI_JAVA_SHORT_TYPE,
CPTI_JAVA_INT_TYPE,
CPTI_JAVA_LONG_TYPE,
CPTI_JAVA_FLOAT_TYPE,
CPTI_JAVA_DOUBLE_TYPE,
CPTI_JAVA_CHAR_TYPE,
CPTI_JAVA_BOOLEAN_TYPE,
CPTI_WCHAR_DECL,
CPTI_VTABLE_ENTRY_TYPE,
CPTI_DELTA_TYPE,
CPTI_VTABLE_INDEX_TYPE,
CPTI_CLEANUP_TYPE,
CPTI_VTT_PARM_TYPE,
CPTI_CLASS_TYPE,
CPTI_UNKNOWN_TYPE,
CPTI_INIT_LIST_TYPE,
CPTI_VTBL_TYPE,
CPTI_VTBL_PTR_TYPE,
CPTI_STD,
CPTI_ABI,
CPTI_CONST_TYPE_INFO_TYPE,
CPTI_TYPE_INFO_PTR_TYPE,
CPTI_ABORT_FNDECL,
CPTI_GLOBAL_DELETE_FNDECL,
CPTI_AGGR_TAG,
CPTI_CTOR_IDENTIFIER,
CPTI_COMPLETE_CTOR_IDENTIFIER,
CPTI_BASE_CTOR_IDENTIFIER,
CPTI_DTOR_IDENTIFIER,
CPTI_COMPLETE_DTOR_IDENTIFIER,
CPTI_BASE_DTOR_IDENTIFIER,
CPTI_DELETING_DTOR_IDENTIFIER,
CPTI_DELTA_IDENTIFIER,
CPTI_IN_CHARGE_IDENTIFIER,
CPTI_VTT_PARM_IDENTIFIER,
CPTI_NELTS_IDENTIFIER,
CPTI_THIS_IDENTIFIER,
CPTI_PFN_IDENTIFIER,
CPTI_VPTR_IDENTIFIER,
CPTI_STD_IDENTIFIER,
CPTI_LANG_NAME_C,
CPTI_LANG_NAME_CPLUSPLUS,
CPTI_LANG_NAME_JAVA,
CPTI_EMPTY_EXCEPT_SPEC,
CPTI_NOEXCEPT_TRUE_SPEC,
CPTI_NOEXCEPT_FALSE_SPEC,
CPTI_JCLASS,
CPTI_TERMINATE,
CPTI_CALL_UNEXPECTED,
CPTI_ATEXIT_FN_PTR_TYPE,
CPTI_ATEXIT,
CPTI_DSO_HANDLE,
CPTI_DCAST,
CPTI_KEYED_CLASSES,
CPTI_NULLPTR,
CPTI_NULLPTR_TYPE,
CPTI_MAX
};
extern GTY(()) tree cp_global_trees[CPTI_MAX];
#define java_byte_type_node cp_global_trees[CPTI_JAVA_BYTE_TYPE]
#define java_short_type_node cp_global_trees[CPTI_JAVA_SHORT_TYPE]
#define java_int_type_node cp_global_trees[CPTI_JAVA_INT_TYPE]
#define java_long_type_node cp_global_trees[CPTI_JAVA_LONG_TYPE]
#define java_float_type_node cp_global_trees[CPTI_JAVA_FLOAT_TYPE]
#define java_double_type_node cp_global_trees[CPTI_JAVA_DOUBLE_TYPE]
#define java_char_type_node cp_global_trees[CPTI_JAVA_CHAR_TYPE]
#define java_boolean_type_node cp_global_trees[CPTI_JAVA_BOOLEAN_TYPE]
#define wchar_decl_node cp_global_trees[CPTI_WCHAR_DECL]
#define vtable_entry_type cp_global_trees[CPTI_VTABLE_ENTRY_TYPE]
/* The type used to represent an offset by which to adjust the `this'
pointer in pointer-to-member types. */
#define delta_type_node cp_global_trees[CPTI_DELTA_TYPE]
/* The type used to represent an index into the vtable. */
#define vtable_index_type cp_global_trees[CPTI_VTABLE_INDEX_TYPE]
#define class_type_node cp_global_trees[CPTI_CLASS_TYPE]
#define unknown_type_node cp_global_trees[CPTI_UNKNOWN_TYPE]
#define init_list_type_node cp_global_trees[CPTI_INIT_LIST_TYPE]
#define vtbl_type_node cp_global_trees[CPTI_VTBL_TYPE]
#define vtbl_ptr_type_node cp_global_trees[CPTI_VTBL_PTR_TYPE]
#define std_node cp_global_trees[CPTI_STD]
#define abi_node cp_global_trees[CPTI_ABI]
#define const_type_info_type_node cp_global_trees[CPTI_CONST_TYPE_INFO_TYPE]
#define type_info_ptr_type cp_global_trees[CPTI_TYPE_INFO_PTR_TYPE]
#define abort_fndecl cp_global_trees[CPTI_ABORT_FNDECL]
#define global_delete_fndecl cp_global_trees[CPTI_GLOBAL_DELETE_FNDECL]
#define current_aggr cp_global_trees[CPTI_AGGR_TAG]
#define nullptr_node cp_global_trees[CPTI_NULLPTR]
#define nullptr_type_node cp_global_trees[CPTI_NULLPTR_TYPE]
/* We cache these tree nodes so as to call get_identifier less
frequently. */
/* The name of a constructor that takes an in-charge parameter to
decide whether or not to construct virtual base classes. */
#define ctor_identifier cp_global_trees[CPTI_CTOR_IDENTIFIER]
/* The name of a constructor that constructs virtual base classes. */
#define complete_ctor_identifier cp_global_trees[CPTI_COMPLETE_CTOR_IDENTIFIER]
/* The name of a constructor that does not construct virtual base classes. */
#define base_ctor_identifier cp_global_trees[CPTI_BASE_CTOR_IDENTIFIER]
/* The name of a destructor that takes an in-charge parameter to
decide whether or not to destroy virtual base classes and whether
or not to delete the object. */
#define dtor_identifier cp_global_trees[CPTI_DTOR_IDENTIFIER]
/* The name of a destructor that destroys virtual base classes. */
#define complete_dtor_identifier cp_global_trees[CPTI_COMPLETE_DTOR_IDENTIFIER]
/* The name of a destructor that does not destroy virtual base
classes. */
#define base_dtor_identifier cp_global_trees[CPTI_BASE_DTOR_IDENTIFIER]
/* The name of a destructor that destroys virtual base classes, and
then deletes the entire object. */
#define deleting_dtor_identifier cp_global_trees[CPTI_DELETING_DTOR_IDENTIFIER]
#define delta_identifier cp_global_trees[CPTI_DELTA_IDENTIFIER]
#define in_charge_identifier cp_global_trees[CPTI_IN_CHARGE_IDENTIFIER]
/* The name of the parameter that contains a pointer to the VTT to use
for this subobject constructor or destructor. */
#define vtt_parm_identifier cp_global_trees[CPTI_VTT_PARM_IDENTIFIER]
#define nelts_identifier cp_global_trees[CPTI_NELTS_IDENTIFIER]
#define this_identifier cp_global_trees[CPTI_THIS_IDENTIFIER]
#define pfn_identifier cp_global_trees[CPTI_PFN_IDENTIFIER]
#define vptr_identifier cp_global_trees[CPTI_VPTR_IDENTIFIER]
/* The name of the std namespace. */
#define std_identifier cp_global_trees[CPTI_STD_IDENTIFIER]
#define lang_name_c cp_global_trees[CPTI_LANG_NAME_C]
#define lang_name_cplusplus cp_global_trees[CPTI_LANG_NAME_CPLUSPLUS]
#define lang_name_java cp_global_trees[CPTI_LANG_NAME_JAVA]
/* Exception specifier used for throw(). */
#define empty_except_spec cp_global_trees[CPTI_EMPTY_EXCEPT_SPEC]
#define noexcept_true_spec cp_global_trees[CPTI_NOEXCEPT_TRUE_SPEC]
#define noexcept_false_spec cp_global_trees[CPTI_NOEXCEPT_FALSE_SPEC]
/* If non-NULL, a POINTER_TYPE equivalent to (java::lang::Class*). */
#define jclass_node cp_global_trees[CPTI_JCLASS]
/* The declaration for `std::terminate'. */
#define terminate_node cp_global_trees[CPTI_TERMINATE]
/* The declaration for "__cxa_call_unexpected". */
#define call_unexpected_node cp_global_trees[CPTI_CALL_UNEXPECTED]
/* The type of the function-pointer argument to "__cxa_atexit" (or
"std::atexit", if "__cxa_atexit" is not being used). */
#define atexit_fn_ptr_type_node cp_global_trees[CPTI_ATEXIT_FN_PTR_TYPE]
/* A pointer to `std::atexit'. */
#define atexit_node cp_global_trees[CPTI_ATEXIT]
/* A pointer to `__dso_handle'. */
#define dso_handle_node cp_global_trees[CPTI_DSO_HANDLE]
/* The declaration of the dynamic_cast runtime. */
#define dynamic_cast_node cp_global_trees[CPTI_DCAST]
/* The type of a destructor. */
#define cleanup_type cp_global_trees[CPTI_CLEANUP_TYPE]
/* The type of the vtt parameter passed to subobject constructors and
destructors. */
#define vtt_parm_type cp_global_trees[CPTI_VTT_PARM_TYPE]
/* A TREE_LIST of the dynamic classes whose vtables may have to be
emitted in this translation unit. */
#define keyed_classes cp_global_trees[CPTI_KEYED_CLASSES]
/* Node to indicate default access. This must be distinct from the
access nodes in tree.h. */
#define access_default_node null_node
/* Global state. */
struct GTY(()) saved_scope {
vec<cxx_saved_binding, va_gc> *old_bindings;
tree old_namespace;
vec<tree, va_gc> *decl_ns_list;
tree class_name;
tree class_type;
tree access_specifier;
tree function_decl;
vec<tree, va_gc> *lang_base;
tree lang_name;
tree template_parms;
cp_binding_level *x_previous_class_level;
tree x_saved_tree;
/* Only used for uses of this in trailing return type. */
tree x_current_class_ptr;
tree x_current_class_ref;
int x_processing_template_decl;
int x_processing_specialization;
BOOL_BITFIELD x_processing_explicit_instantiation : 1;
BOOL_BITFIELD need_pop_function_context : 1;
int unevaluated_operand;
int inhibit_evaluation_warnings;
/* If non-zero, implicit "omp declare target" attribute is added into the
attribute lists. */
int omp_declare_target_attribute;
struct stmt_tree_s x_stmt_tree;
cp_binding_level *class_bindings;
cp_binding_level *bindings;
struct pointer_map_t *x_local_specializations;
struct saved_scope *prev;
};
/* The current open namespace. */
#define current_namespace scope_chain->old_namespace
/* The stack for namespaces of current declarations. */
#define decl_namespace_list scope_chain->decl_ns_list
/* IDENTIFIER_NODE: name of current class */
#define current_class_name scope_chain->class_name
/* _TYPE: the type of the current class */
#define current_class_type scope_chain->class_type
/* When parsing a class definition, the access specifier most recently
given by the user, or, if no access specifier was given, the
default value appropriate for the kind of class (i.e., struct,
class, or union). */
#define current_access_specifier scope_chain->access_specifier
/* Pointer to the top of the language name stack. */
#define current_lang_base scope_chain->lang_base
#define current_lang_name scope_chain->lang_name
/* When parsing a template declaration, a TREE_LIST represents the
active template parameters. Each node in the list represents one
level of template parameters. The innermost level is first in the
list. The depth of each level is stored as an INTEGER_CST in the
TREE_PURPOSE of each node. The parameters for that level are
stored in the TREE_VALUE. */
#define current_template_parms scope_chain->template_parms
#define processing_template_decl scope_chain->x_processing_template_decl
#define processing_specialization scope_chain->x_processing_specialization
#define processing_explicit_instantiation scope_chain->x_processing_explicit_instantiation
/* The cached class binding level, from the most recently exited
class, or NULL if none. */
#define previous_class_level scope_chain->x_previous_class_level
/* A map from local variable declarations in the body of the template
presently being instantiated to the corresponding instantiated
local variables. */
#define local_specializations scope_chain->x_local_specializations
/* A list of private types mentioned, for deferred access checking. */
extern GTY(()) struct saved_scope *scope_chain;
struct GTY(()) cxx_int_tree_map {
unsigned int uid;
tree to;
};
extern unsigned int cxx_int_tree_map_hash (const void *);
extern int cxx_int_tree_map_eq (const void *, const void *);
/* Global state pertinent to the current function. */
struct GTY(()) language_function {
struct c_language_function base;
tree x_cdtor_label;
tree x_current_class_ptr;
tree x_current_class_ref;
tree x_eh_spec_block;
tree x_in_charge_parm;
tree x_vtt_parm;
tree x_return_value;
tree x_auto_return_pattern;
BOOL_BITFIELD returns_value : 1;
BOOL_BITFIELD returns_null : 1;
BOOL_BITFIELD returns_abnormally : 1;
BOOL_BITFIELD infinite_loop: 1;
BOOL_BITFIELD x_in_function_try_handler : 1;
BOOL_BITFIELD x_in_base_initializer : 1;
/* True if this function can throw an exception. */
BOOL_BITFIELD can_throw : 1;
htab_t GTY((param_is(struct named_label_entry))) x_named_labels;
cp_binding_level *bindings;
vec<tree, va_gc> *x_local_names;
/* Tracking possibly infinite loops. This is a vec<tree> only because
vec<bool> doesn't work with gtype. */
vec<tree, va_gc> *infinite_loops;
htab_t GTY((param_is (struct cxx_int_tree_map))) extern_decl_map;
};
/* The current C++-specific per-function global variables. */
#define cp_function_chain (cfun->language)
/* In a constructor destructor, the point at which all derived class
destroying/construction has been done. I.e., just before a
constructor returns, or before any base class destroying will be done
in a destructor. */
#define cdtor_label cp_function_chain->x_cdtor_label
/* When we're processing a member function, current_class_ptr is the
PARM_DECL for the `this' pointer. The current_class_ref is an
expression for `*this'. */
#define current_class_ptr \
(*(cfun && cp_function_chain \
? &cp_function_chain->x_current_class_ptr \
: &scope_chain->x_current_class_ptr))
#define current_class_ref \
(*(cfun && cp_function_chain \
? &cp_function_chain->x_current_class_ref \
: &scope_chain->x_current_class_ref))
/* The EH_SPEC_BLOCK for the exception-specifiers for the current
function, if any. */
#define current_eh_spec_block cp_function_chain->x_eh_spec_block
/* The `__in_chrg' parameter for the current function. Only used for
constructors and destructors. */
#define current_in_charge_parm cp_function_chain->x_in_charge_parm
/* The `__vtt_parm' parameter for the current function. Only used for
constructors and destructors. */
#define current_vtt_parm cp_function_chain->x_vtt_parm
/* Set to 0 at beginning of a function definition, set to 1 if
a return statement that specifies a return value is seen. */
#define current_function_returns_value cp_function_chain->returns_value
/* Set to 0 at beginning of a function definition, set to 1 if
a return statement with no argument is seen. */
#define current_function_returns_null cp_function_chain->returns_null
/* Set to 0 at beginning of a function definition, set to 1 if
a call to a noreturn function is seen. */
#define current_function_returns_abnormally \
cp_function_chain->returns_abnormally
/* Set to 0 at beginning of a function definition, set to 1 if we see an
obvious infinite loop. This can have false positives and false
negatives, so it should only be used as a heuristic. */
#define current_function_infinite_loop cp_function_chain->infinite_loop
/* Nonzero if we are processing a base initializer. Zero elsewhere. */
#define in_base_initializer cp_function_chain->x_in_base_initializer
#define in_function_try_handler cp_function_chain->x_in_function_try_handler
/* Expression always returned from function, or error_mark_node
otherwise, for use by the automatic named return value optimization. */
#define current_function_return_value \
(cp_function_chain->x_return_value)
/* A type involving 'auto' to be used for return type deduction. */
#define current_function_auto_return_pattern \
(cp_function_chain->x_auto_return_pattern)
/* True if NAME is the IDENTIFIER_NODE for an overloaded "operator
new" or "operator delete". */
#define NEW_DELETE_OPNAME_P(NAME) \
((NAME) == ansi_opname (NEW_EXPR) \
|| (NAME) == ansi_opname (VEC_NEW_EXPR) \
|| (NAME) == ansi_opname (DELETE_EXPR) \
|| (NAME) == ansi_opname (VEC_DELETE_EXPR))
#define ansi_opname(CODE) \
(operator_name_info[(int) (CODE)].identifier)
#define ansi_assopname(CODE) \
(assignment_operator_name_info[(int) (CODE)].identifier)
/* TRUE if a tree code represents a statement. */
extern bool statement_code_p[MAX_TREE_CODES];
#define STATEMENT_CODE_P(CODE) statement_code_p[(int) (CODE)]
enum languages { lang_c, lang_cplusplus, lang_java };
/* Macros to make error reporting functions' lives easier. */
#define TYPE_IDENTIFIER(NODE) (DECL_NAME (TYPE_NAME (NODE)))
#define TYPE_LINKAGE_IDENTIFIER(NODE) \
(TYPE_IDENTIFIER (TYPE_MAIN_VARIANT (NODE)))
#define TYPE_NAME_STRING(NODE) (IDENTIFIER_POINTER (TYPE_IDENTIFIER (NODE)))
#define TYPE_NAME_LENGTH(NODE) (IDENTIFIER_LENGTH (TYPE_IDENTIFIER (NODE)))
/* Nonzero if NODE has no name for linkage purposes. */
#define TYPE_ANONYMOUS_P(NODE) \
(OVERLOAD_TYPE_P (NODE) && ANON_AGGRNAME_P (TYPE_LINKAGE_IDENTIFIER (NODE)))
/* The _DECL for this _TYPE. */
#define TYPE_MAIN_DECL(NODE) (TYPE_STUB_DECL (TYPE_MAIN_VARIANT (NODE)))
/* Nonzero if T is a type that could resolve to any kind of concrete type
at instantiation time. */
#define WILDCARD_TYPE_P(T) \
(TREE_CODE (T) == TEMPLATE_TYPE_PARM \
|| TREE_CODE (T) == TYPENAME_TYPE \
|| TREE_CODE (T) == TYPEOF_TYPE \
|| TREE_CODE (T) == BOUND_TEMPLATE_TEMPLATE_PARM \
|| TREE_CODE (T) == DECLTYPE_TYPE)
/* Nonzero if T is a class (or struct or union) type. Also nonzero
for template type parameters, typename types, and instantiated
template template parameters. Keep these checks in ascending code
order. */
#define MAYBE_CLASS_TYPE_P(T) (WILDCARD_TYPE_P (T) || CLASS_TYPE_P (T))
/* Set CLASS_TYPE_P for T to VAL. T must be a class, struct, or
union type. */
#define SET_CLASS_TYPE_P(T, VAL) \
(TYPE_LANG_FLAG_5 (T) = (VAL))
/* Nonzero if T is a class type. Zero for template type parameters,
typename types, and so forth. */
#define CLASS_TYPE_P(T) \
(RECORD_OR_UNION_CODE_P (TREE_CODE (T)) && TYPE_LANG_FLAG_5 (T))
/* Nonzero if T is a class type but not an union. */
#define NON_UNION_CLASS_TYPE_P(T) \
(CLASS_TYPE_P (T) && TREE_CODE (T) != UNION_TYPE)
/* Keep these checks in ascending code order. */
#define RECORD_OR_UNION_CODE_P(T) \
((T) == RECORD_TYPE || (T) == UNION_TYPE)
#define OVERLOAD_TYPE_P(T) \
(CLASS_TYPE_P (T) || TREE_CODE (T) == ENUMERAL_TYPE)
/* True if this a "Java" type, defined in 'extern "Java"'. */
#define TYPE_FOR_JAVA(NODE) TYPE_LANG_FLAG_3 (NODE)
/* True if this type is dependent. This predicate is only valid if
TYPE_DEPENDENT_P_VALID is true. */
#define TYPE_DEPENDENT_P(NODE) TYPE_LANG_FLAG_0 (NODE)
/* True if dependent_type_p has been called for this type, with the
result that TYPE_DEPENDENT_P is valid. */
#define TYPE_DEPENDENT_P_VALID(NODE) TYPE_LANG_FLAG_6(NODE)
/* Nonzero if this type is const-qualified. */
#define CP_TYPE_CONST_P(NODE) \
((cp_type_quals (NODE) & TYPE_QUAL_CONST) != 0)
/* Nonzero if this type is volatile-qualified. */
#define CP_TYPE_VOLATILE_P(NODE) \
((cp_type_quals (NODE) & TYPE_QUAL_VOLATILE) != 0)
/* Nonzero if this type is restrict-qualified. */
#define CP_TYPE_RESTRICT_P(NODE) \
((cp_type_quals (NODE) & TYPE_QUAL_RESTRICT) != 0)
/* Nonzero if this type is const-qualified, but not
volatile-qualified. Other qualifiers are ignored. This macro is
used to test whether or not it is OK to bind an rvalue to a
reference. */
#define CP_TYPE_CONST_NON_VOLATILE_P(NODE) \
((cp_type_quals (NODE) & (TYPE_QUAL_CONST | TYPE_QUAL_VOLATILE)) \
== TYPE_QUAL_CONST)
#define FUNCTION_ARG_CHAIN(NODE) \
TREE_CHAIN (TYPE_ARG_TYPES (TREE_TYPE (NODE)))
/* Given a FUNCTION_DECL, returns the first TREE_LIST out of TYPE_ARG_TYPES
which refers to a user-written parameter. */
#define FUNCTION_FIRST_USER_PARMTYPE(NODE) \
skip_artificial_parms_for ((NODE), TYPE_ARG_TYPES (TREE_TYPE (NODE)))
/* Similarly, but for DECL_ARGUMENTS. */
#define FUNCTION_FIRST_USER_PARM(NODE) \
skip_artificial_parms_for ((NODE), DECL_ARGUMENTS (NODE))
/* Nonzero iff TYPE is derived from PARENT. Ignores accessibility and
ambiguity issues. */
#define DERIVED_FROM_P(PARENT, TYPE) \
(lookup_base ((TYPE), (PARENT), ba_any, NULL, tf_none) != NULL_TREE)
/* Gives the visibility specification for a class type. */
#define CLASSTYPE_VISIBILITY(TYPE) \
DECL_VISIBILITY (TYPE_MAIN_DECL (TYPE))
#define CLASSTYPE_VISIBILITY_SPECIFIED(TYPE) \
DECL_VISIBILITY_SPECIFIED (TYPE_MAIN_DECL (TYPE))
typedef struct GTY (()) tree_pair_s {
tree purpose;
tree value;
} tree_pair_s;
typedef tree_pair_s *tree_pair_p;
/* This is a few header flags for 'struct lang_type'. Actually,
all but the first are used only for lang_type_class; they
are put in this structure to save space. */
struct GTY(()) lang_type_header {
BOOL_BITFIELD is_lang_type_class : 1;
BOOL_BITFIELD has_type_conversion : 1;
BOOL_BITFIELD has_copy_ctor : 1;
BOOL_BITFIELD has_default_ctor : 1;
BOOL_BITFIELD const_needs_init : 1;
BOOL_BITFIELD ref_needs_init : 1;
BOOL_BITFIELD has_const_copy_assign : 1;
BOOL_BITFIELD spare : 1;
};
/* This structure provides additional information above and beyond
what is provide in the ordinary tree_type. In the past, we used it
for the types of class types, template parameters types, typename
types, and so forth. However, there can be many (tens to hundreds
of thousands) of template parameter types in a compilation, and
there's no need for this additional information in that case.
Therefore, we now use this data structure only for class types.
In the past, it was thought that there would be relatively few
class types. However, in the presence of heavy use of templates,
many (i.e., thousands) of classes can easily be generated.
Therefore, we should endeavor to keep the size of this structure to
a minimum. */
struct GTY(()) lang_type_class {
struct lang_type_header h;
unsigned char align;
unsigned has_mutable : 1;
unsigned com_interface : 1;
unsigned non_pod_class : 1;
unsigned nearly_empty_p : 1;
unsigned user_align : 1;
unsigned has_copy_assign : 1;
unsigned has_new : 1;
unsigned has_array_new : 1;
unsigned gets_delete : 2;
unsigned interface_only : 1;
unsigned interface_unknown : 1;
unsigned contains_empty_class_p : 1;
unsigned anon_aggr : 1;
unsigned non_zero_init : 1;
unsigned empty_p : 1;
unsigned vec_new_uses_cookie : 1;
unsigned declared_class : 1;
unsigned diamond_shaped : 1;
unsigned repeated_base : 1;
unsigned being_defined : 1;
unsigned java_interface : 1;
unsigned debug_requested : 1;
unsigned fields_readonly : 1;
unsigned use_template : 2;
unsigned ptrmemfunc_flag : 1;
unsigned was_anonymous : 1;
unsigned lazy_default_ctor : 1;
unsigned lazy_copy_ctor : 1;
unsigned lazy_copy_assign : 1;
unsigned lazy_destructor : 1;
unsigned has_const_copy_ctor : 1;
unsigned has_complex_copy_ctor : 1;
unsigned has_complex_copy_assign : 1;
unsigned non_aggregate : 1;
unsigned has_complex_dflt : 1;
unsigned has_list_ctor : 1;
unsigned non_std_layout : 1;
unsigned is_literal : 1;
unsigned lazy_move_ctor : 1;
unsigned lazy_move_assign : 1;
unsigned has_complex_move_ctor : 1;
unsigned has_complex_move_assign : 1;
unsigned has_constexpr_ctor : 1;
/* When adding a flag here, consider whether or not it ought to
apply to a template instance if it applies to the template. If
so, make sure to copy it in instantiate_class_template! */
/* There are some bits left to fill out a 32-bit word. Keep track
of this by updating the size of this bitfield whenever you add or
remove a flag. */
unsigned dummy : 3;
tree primary_base;
vec<tree_pair_s, va_gc> *vcall_indices;
tree vtables;
tree typeinfo_var;
vec<tree, va_gc> *vbases;
binding_table nested_udts;
tree as_base;
vec<tree, va_gc> *pure_virtuals;
tree friend_classes;
vec<tree, va_gc> * GTY((reorder ("resort_type_method_vec"))) methods;
tree key_method;
tree decl_list;
tree template_info;
tree befriending_classes;
/* In a RECORD_TYPE, information specific to Objective-C++, such
as a list of adopted protocols or a pointer to a corresponding
@interface. See objc/objc-act.h for details. */
tree objc_info;
/* sorted_fields is sorted based on a pointer, so we need to be able
to resort it if pointers get rearranged. */
struct sorted_fields_type * GTY ((reorder ("resort_sorted_fields")))
sorted_fields;
/* FIXME reuse another field? */
tree lambda_expr;
};
struct GTY(()) lang_type_ptrmem {
struct lang_type_header h;
tree record;
};
struct GTY((variable_size)) lang_type {
union lang_type_u
{
struct lang_type_header GTY((skip (""))) h;
struct lang_type_class GTY((tag ("1"))) c;
struct lang_type_ptrmem GTY((tag ("0"))) ptrmem;
} GTY((desc ("%h.h.is_lang_type_class"))) u;
};
#if defined ENABLE_TREE_CHECKING && (GCC_VERSION >= 2007)
#define LANG_TYPE_CLASS_CHECK(NODE) __extension__ \
({ struct lang_type *lt = TYPE_LANG_SPECIFIC (NODE); \
if (! lt->u.h.is_lang_type_class) \
lang_check_failed (__FILE__, __LINE__, __FUNCTION__); \
<->u.c; })
#define LANG_TYPE_PTRMEM_CHECK(NODE) __extension__ \
({ struct lang_type *lt = TYPE_LANG_SPECIFIC (NODE); \
if (lt->u.h.is_lang_type_class) \
lang_check_failed (__FILE__, __LINE__, __FUNCTION__); \
<->u.ptrmem; })
#else
#define LANG_TYPE_CLASS_CHECK(NODE) (&TYPE_LANG_SPECIFIC (NODE)->u.c)
#define LANG_TYPE_PTRMEM_CHECK(NODE) (&TYPE_LANG_SPECIFIC (NODE)->u.ptrmem)
#endif /* ENABLE_TREE_CHECKING */
/* Nonzero for _CLASSTYPE means that operator delete is defined. */
#define TYPE_GETS_DELETE(NODE) (LANG_TYPE_CLASS_CHECK (NODE)->gets_delete)
#define TYPE_GETS_REG_DELETE(NODE) (TYPE_GETS_DELETE (NODE) & 1)
/* Nonzero if `new NODE[x]' should cause the allocation of extra
storage to indicate how many array elements are in use. */
#define TYPE_VEC_NEW_USES_COOKIE(NODE) \
(CLASS_TYPE_P (NODE) \
&& LANG_TYPE_CLASS_CHECK (NODE)->vec_new_uses_cookie)
/* Nonzero means that this _CLASSTYPE node defines ways of converting
itself to other types. */
#define TYPE_HAS_CONVERSION(NODE) \
(LANG_TYPE_CLASS_CHECK (NODE)->h.has_type_conversion)
/* Nonzero means that NODE (a class type) has a default constructor --
but that it has not yet been declared. */
#define CLASSTYPE_LAZY_DEFAULT_CTOR(NODE) \
(LANG_TYPE_CLASS_CHECK (NODE)->lazy_default_ctor)
/* Nonzero means that NODE (a class type) has a copy constructor --
but that it has not yet been declared. */
#define CLASSTYPE_LAZY_COPY_CTOR(NODE) \
(LANG_TYPE_CLASS_CHECK (NODE)->lazy_copy_ctor)
/* Nonzero means that NODE (a class type) has a move constructor --
but that it has not yet been declared. */
#define CLASSTYPE_LAZY_MOVE_CTOR(NODE) \
(LANG_TYPE_CLASS_CHECK (NODE)->lazy_move_ctor)
/* Nonzero means that NODE (a class type) has an assignment operator
-- but that it has not yet been declared. */
#define CLASSTYPE_LAZY_COPY_ASSIGN(NODE) \
(LANG_TYPE_CLASS_CHECK (NODE)->lazy_copy_assign)
/* Nonzero means that NODE (a class type) has an assignment operator
-- but that it has not yet been declared. */
#define CLASSTYPE_LAZY_MOVE_ASSIGN(NODE) \
(LANG_TYPE_CLASS_CHECK (NODE)->lazy_move_assign)
/* Nonzero means that NODE (a class type) has a destructor -- but that
it has not yet been declared. */
#define CLASSTYPE_LAZY_DESTRUCTOR(NODE) \
(LANG_TYPE_CLASS_CHECK (NODE)->lazy_destructor)
/* Nonzero means that NODE (a class type) is final */
#define CLASSTYPE_FINAL(NODE) \
TYPE_FINAL_P (NODE)
/* Nonzero means that this _CLASSTYPE node overloads operator=(X&). */
#define TYPE_HAS_COPY_ASSIGN(NODE) (LANG_TYPE_CLASS_CHECK (NODE)->has_copy_assign)
/* True iff the class type NODE has an "operator =" whose parameter
has a parameter of type "const X&". */
#define TYPE_HAS_CONST_COPY_ASSIGN(NODE) \
(LANG_TYPE_CLASS_CHECK (NODE)->h.has_const_copy_assign)
/* Nonzero means that this _CLASSTYPE node has an X(X&) constructor. */
#define TYPE_HAS_COPY_CTOR(NODE) (LANG_TYPE_CLASS_CHECK (NODE)->h.has_copy_ctor)
#define TYPE_HAS_CONST_COPY_CTOR(NODE) \
(LANG_TYPE_CLASS_CHECK (NODE)->has_const_copy_ctor)
/* Nonzero if this class has an X(initializer_list<T>) constructor. */
#define TYPE_HAS_LIST_CTOR(NODE) \
(LANG_TYPE_CLASS_CHECK (NODE)->has_list_ctor)
/* Nonzero if this class has a constexpr constructor other than a copy/move
constructor. Note that a class can have constexpr constructors for
static initialization even if it isn't a literal class. */
#define TYPE_HAS_CONSTEXPR_CTOR(NODE) \
(LANG_TYPE_CLASS_CHECK (NODE)->has_constexpr_ctor)
/* Nonzero if this class defines an overloaded operator new. (An
operator new [] doesn't count.) */
#define TYPE_HAS_NEW_OPERATOR(NODE) \
(LANG_TYPE_CLASS_CHECK (NODE)->has_new)
/* Nonzero if this class defines an overloaded operator new[]. */
#define TYPE_HAS_ARRAY_NEW_OPERATOR(NODE) \
(LANG_TYPE_CLASS_CHECK (NODE)->has_array_new)
/* Nonzero means that this type is being defined. I.e., the left brace
starting the definition of this type has been seen. */
#define TYPE_BEING_DEFINED(NODE) (LANG_TYPE_CLASS_CHECK (NODE)->being_defined)
/* Nonzero means that this type is either complete or being defined, so we
can do lookup in it. */
#define COMPLETE_OR_OPEN_TYPE_P(NODE) \
(COMPLETE_TYPE_P (NODE) || (CLASS_TYPE_P (NODE) && TYPE_BEING_DEFINED (NODE)))
/* Mark bits for repeated base checks. */
#define TYPE_MARKED_P(NODE) TREE_LANG_FLAG_6 (TYPE_CHECK (NODE))
/* Nonzero if the class NODE has multiple paths to the same (virtual)
base object. */
#define CLASSTYPE_DIAMOND_SHAPED_P(NODE) \
(LANG_TYPE_CLASS_CHECK(NODE)->diamond_shaped)
/* Nonzero if the class NODE has multiple instances of the same base
type. */
#define CLASSTYPE_REPEATED_BASE_P(NODE) \
(LANG_TYPE_CLASS_CHECK(NODE)->repeated_base)
/* The member function with which the vtable will be emitted:
the first noninline non-pure-virtual member function. NULL_TREE
if there is no key function or if this is a class template */
#define CLASSTYPE_KEY_METHOD(NODE) (LANG_TYPE_CLASS_CHECK (NODE)->key_method)
/* Vector member functions defined in this class. Each element is
either a FUNCTION_DECL, a TEMPLATE_DECL, or an OVERLOAD. All
functions with the same name end up in the same slot. The first
two elements are for constructors, and destructors, respectively.
All template conversion operators to innermost template dependent
types are overloaded on the next slot, if they exist. Note, the
names for these functions will not all be the same. The
non-template conversion operators & templated conversions to
non-innermost template types are next, followed by ordinary member
functions. There may be empty entries at the end of the vector.
The conversion operators are unsorted. The ordinary member
functions are sorted, once the class is complete. */
#define CLASSTYPE_METHOD_VEC(NODE) (LANG_TYPE_CLASS_CHECK (NODE)->methods)
/* For class templates, this is a TREE_LIST of all member data,
functions, types, and friends in the order of declaration.
The TREE_PURPOSE of each TREE_LIST is NULL_TREE for a friend,
and the RECORD_TYPE for the class template otherwise. */
#define CLASSTYPE_DECL_LIST(NODE) (LANG_TYPE_CLASS_CHECK (NODE)->decl_list)
/* The slot in the CLASSTYPE_METHOD_VEC where constructors go. */
#define CLASSTYPE_CONSTRUCTOR_SLOT 0
/* The slot in the CLASSTYPE_METHOD_VEC where destructors go. */
#define CLASSTYPE_DESTRUCTOR_SLOT 1
/* The first slot in the CLASSTYPE_METHOD_VEC where conversion
operators can appear. */
#define CLASSTYPE_FIRST_CONVERSION_SLOT 2
/* A FUNCTION_DECL or OVERLOAD for the constructors for NODE. These
are the constructors that take an in-charge parameter. */
#define CLASSTYPE_CONSTRUCTORS(NODE) \
((*CLASSTYPE_METHOD_VEC (NODE))[CLASSTYPE_CONSTRUCTOR_SLOT])
/* A FUNCTION_DECL for the destructor for NODE. These are the
destructors that take an in-charge parameter. If
CLASSTYPE_LAZY_DESTRUCTOR is true, then this entry will be NULL
until the destructor is created with lazily_declare_fn. */
#define CLASSTYPE_DESTRUCTORS(NODE) \
(CLASSTYPE_METHOD_VEC (NODE) \
? (*CLASSTYPE_METHOD_VEC (NODE))[CLASSTYPE_DESTRUCTOR_SLOT] \
: NULL_TREE)
/* A dictionary of the nested user-defined-types (class-types, or enums)
found within this class. This table includes nested member class
templates. */
#define CLASSTYPE_NESTED_UTDS(NODE) \
(LANG_TYPE_CLASS_CHECK (NODE)->nested_udts)
/* Nonzero if NODE has a primary base class, i.e., a base class with
which it shares the virtual function table pointer. */
#define CLASSTYPE_HAS_PRIMARY_BASE_P(NODE) \
(CLASSTYPE_PRIMARY_BINFO (NODE) != NULL_TREE)
/* If non-NULL, this is the binfo for the primary base class, i.e.,
the base class which contains the virtual function table pointer
for this class. */
#define CLASSTYPE_PRIMARY_BINFO(NODE) \
(LANG_TYPE_CLASS_CHECK (NODE)->primary_base)
/* A vector of BINFOs for the direct and indirect virtual base classes
that this type uses in a post-order depth-first left-to-right
order. (In other words, these bases appear in the order that they
should be initialized.) */
#define CLASSTYPE_VBASECLASSES(NODE) (LANG_TYPE_CLASS_CHECK (NODE)->vbases)
/* The type corresponding to NODE when NODE is used as a base class,
i.e., NODE without virtual base classes. */
#define CLASSTYPE_AS_BASE(NODE) (LANG_TYPE_CLASS_CHECK (NODE)->as_base)
/* True iff NODE is the CLASSTYPE_AS_BASE version of some type. */
#define IS_FAKE_BASE_TYPE(NODE) \
(TREE_CODE (NODE) == RECORD_TYPE \
&& TYPE_CONTEXT (NODE) && CLASS_TYPE_P (TYPE_CONTEXT (NODE)) \
&& CLASSTYPE_AS_BASE (TYPE_CONTEXT (NODE)) == (NODE))
/* These are the size and alignment of the type without its virtual
base classes, for when we use this type as a base itself. */
#define CLASSTYPE_SIZE(NODE) TYPE_SIZE (CLASSTYPE_AS_BASE (NODE))
#define CLASSTYPE_SIZE_UNIT(NODE) TYPE_SIZE_UNIT (CLASSTYPE_AS_BASE (NODE))
#define CLASSTYPE_ALIGN(NODE) TYPE_ALIGN (CLASSTYPE_AS_BASE (NODE))
#define CLASSTYPE_USER_ALIGN(NODE) TYPE_USER_ALIGN (CLASSTYPE_AS_BASE (NODE))
/* The alignment of NODE, without its virtual bases, in bytes. */
#define CLASSTYPE_ALIGN_UNIT(NODE) \
(CLASSTYPE_ALIGN (NODE) / BITS_PER_UNIT)
/* True if this a Java interface type, declared with
'__attribute__ ((java_interface))'. */
#define TYPE_JAVA_INTERFACE(NODE) \
(LANG_TYPE_CLASS_CHECK (NODE)->java_interface)
/* A vec<tree> of virtual functions which cannot be inherited by
derived classes. When deriving from this type, the derived
class must provide its own definition for each of these functions. */
#define CLASSTYPE_PURE_VIRTUALS(NODE) \
(LANG_TYPE_CLASS_CHECK (NODE)->pure_virtuals)
/* Nonzero means that this type is an abstract class type. */
#define ABSTRACT_CLASS_TYPE_P(NODE) \
(CLASS_TYPE_P (NODE) && CLASSTYPE_PURE_VIRTUALS(NODE))
/* Nonzero means that this type has an X() constructor. */
#define TYPE_HAS_DEFAULT_CONSTRUCTOR(NODE) \
(LANG_TYPE_CLASS_CHECK (NODE)->h.has_default_ctor)
/* Nonzero means that this type contains a mutable member. */
#define CLASSTYPE_HAS_MUTABLE(NODE) (LANG_TYPE_CLASS_CHECK (NODE)->has_mutable)
#define TYPE_HAS_MUTABLE_P(NODE) (cp_has_mutable_p (NODE))
/* Nonzero means that this class type is not POD for the purpose of layout
(as defined in the ABI). This is different from the language's POD. */
#define CLASSTYPE_NON_LAYOUT_POD_P(NODE) \
(LANG_TYPE_CLASS_CHECK (NODE)->non_pod_class)
/* Nonzero means that this class type is a non-standard-layout class. */
#define CLASSTYPE_NON_STD_LAYOUT(NODE) \
(LANG_TYPE_CLASS_CHECK (NODE)->non_std_layout)
/* Nonzero means that this class contains pod types whose default
initialization is not a zero initialization (namely, pointers to
data members). */
#define CLASSTYPE_NON_ZERO_INIT_P(NODE) \
(LANG_TYPE_CLASS_CHECK (NODE)->non_zero_init)
/* Nonzero if this class is "empty" in the sense of the C++ ABI. */
#define CLASSTYPE_EMPTY_P(NODE) \
(LANG_TYPE_CLASS_CHECK (NODE)->empty_p)
/* Nonzero if this class is "nearly empty", i.e., contains only a
virtual function table pointer. */
#define CLASSTYPE_NEARLY_EMPTY_P(NODE) \
(LANG_TYPE_CLASS_CHECK (NODE)->nearly_empty_p)
/* Nonzero if this class contains an empty subobject. */
#define CLASSTYPE_CONTAINS_EMPTY_CLASS_P(NODE) \
(LANG_TYPE_CLASS_CHECK (NODE)->contains_empty_class_p)
/* A list of class types of which this type is a friend. The
TREE_VALUE is normally a TYPE, but will be a TEMPLATE_DECL in the
case of a template friend. */
#define CLASSTYPE_FRIEND_CLASSES(NODE) \
(LANG_TYPE_CLASS_CHECK (NODE)->friend_classes)
/* A list of the classes which grant friendship to this class. */
#define CLASSTYPE_BEFRIENDING_CLASSES(NODE) \
(LANG_TYPE_CLASS_CHECK (NODE)->befriending_classes)
/* The associated LAMBDA_EXPR that made this class. */
#define CLASSTYPE_LAMBDA_EXPR(NODE) \
(LANG_TYPE_CLASS_CHECK (NODE)->lambda_expr)
/* The extra mangling scope for this closure type. */
#define LAMBDA_TYPE_EXTRA_SCOPE(NODE) \
(LAMBDA_EXPR_EXTRA_SCOPE (CLASSTYPE_LAMBDA_EXPR (NODE)))
/* Say whether this node was declared as a "class" or a "struct". */
#define CLASSTYPE_DECLARED_CLASS(NODE) \
(LANG_TYPE_CLASS_CHECK (NODE)->declared_class)
/* Nonzero if this class has const members
which have no specified initialization. */
#define CLASSTYPE_READONLY_FIELDS_NEED_INIT(NODE) \
(TYPE_LANG_SPECIFIC (NODE) \
? LANG_TYPE_CLASS_CHECK (NODE)->h.const_needs_init : 0)
#define SET_CLASSTYPE_READONLY_FIELDS_NEED_INIT(NODE, VALUE) \
(LANG_TYPE_CLASS_CHECK (NODE)->h.const_needs_init = (VALUE))
/* Nonzero if this class has ref members
which have no specified initialization. */
#define CLASSTYPE_REF_FIELDS_NEED_INIT(NODE) \
(TYPE_LANG_SPECIFIC (NODE) \
? LANG_TYPE_CLASS_CHECK (NODE)->h.ref_needs_init : 0)
#define SET_CLASSTYPE_REF_FIELDS_NEED_INIT(NODE, VALUE) \
(LANG_TYPE_CLASS_CHECK (NODE)->h.ref_needs_init = (VALUE))
/* Nonzero if this class is included from a header file which employs
`#pragma interface', and it is not included in its implementation file. */
#define CLASSTYPE_INTERFACE_ONLY(NODE) \
(LANG_TYPE_CLASS_CHECK (NODE)->interface_only)
/* True if we have already determined whether or not vtables, VTTs,
typeinfo, and other similar per-class data should be emitted in
this translation unit. This flag does not indicate whether or not
these items should be emitted; it only indicates that we know one
way or the other. */
#define CLASSTYPE_INTERFACE_KNOWN(NODE) \
(LANG_TYPE_CLASS_CHECK (NODE)->interface_unknown == 0)
/* The opposite of CLASSTYPE_INTERFACE_KNOWN. */
#define CLASSTYPE_INTERFACE_UNKNOWN(NODE) \
(LANG_TYPE_CLASS_CHECK (NODE)->interface_unknown)
#define SET_CLASSTYPE_INTERFACE_UNKNOWN_X(NODE,X) \
(LANG_TYPE_CLASS_CHECK (NODE)->interface_unknown = !!(X))
#define SET_CLASSTYPE_INTERFACE_UNKNOWN(NODE) \
(LANG_TYPE_CLASS_CHECK (NODE)->interface_unknown = 1)
#define SET_CLASSTYPE_INTERFACE_KNOWN(NODE) \
(LANG_TYPE_CLASS_CHECK (NODE)->interface_unknown = 0)
/* Nonzero if a _DECL node requires us to output debug info for this class. */
#define CLASSTYPE_DEBUG_REQUESTED(NODE) \
(LANG_TYPE_CLASS_CHECK (NODE)->debug_requested)
/* Additional macros for inheritance information. */
/* Nonzero means that this class is on a path leading to a new vtable. */
#define BINFO_VTABLE_PATH_MARKED(NODE) BINFO_FLAG_1 (NODE)
/* Nonzero means B (a BINFO) has its own vtable. Any copies will not
have this flag set. */
#define BINFO_NEW_VTABLE_MARKED(B) (BINFO_FLAG_2 (B))
/* Compare a BINFO_TYPE with another type for equality. For a binfo,
this is functionally equivalent to using same_type_p, but
measurably faster. At least one of the arguments must be a
BINFO_TYPE. The other can be a BINFO_TYPE or a regular type. If
BINFO_TYPE(T) ever stops being the main variant of the class the
binfo is for, this macro must change. */
#define SAME_BINFO_TYPE_P(A, B) ((A) == (B))
/* Any subobject that needs a new vtable must have a vptr and must not
be a non-virtual primary base (since it would then use the vtable from a
derived class and never become non-primary.) */
#define SET_BINFO_NEW_VTABLE_MARKED(B) \
(BINFO_NEW_VTABLE_MARKED (B) = 1, \
gcc_assert (!BINFO_PRIMARY_P (B) || BINFO_VIRTUAL_P (B)), \
gcc_assert (TYPE_VFIELD (BINFO_TYPE (B))))
/* Nonzero if this binfo is for a dependent base - one that should not
be searched. */
#define BINFO_DEPENDENT_BASE_P(NODE) BINFO_FLAG_3 (NODE)
/* Nonzero if this binfo has lost its primary base binfo (because that
is a nearly-empty virtual base that has been taken by some other
base in the complete hierarchy. */
#define BINFO_LOST_PRIMARY_P(NODE) BINFO_FLAG_4 (NODE)
/* Nonzero if this BINFO is a primary base class. */
#define BINFO_PRIMARY_P(NODE) BINFO_FLAG_5(NODE)
/* Used by various search routines. */
#define IDENTIFIER_MARKED(NODE) TREE_LANG_FLAG_0 (NODE)
/* A vec<tree_pair_s> of the vcall indices associated with the class
NODE. The PURPOSE of each element is a FUNCTION_DECL for a virtual
function. The VALUE is the index into the virtual table where the
vcall offset for that function is stored, when NODE is a virtual
base. */
#define CLASSTYPE_VCALL_INDICES(NODE) \
(LANG_TYPE_CLASS_CHECK (NODE)->vcall_indices)
/* The various vtables for the class NODE. The primary vtable will be
first, followed by the construction vtables and VTT, if any. */
#define CLASSTYPE_VTABLES(NODE) \
(LANG_TYPE_CLASS_CHECK (NODE)->vtables)
/* The std::type_info variable representing this class, or NULL if no
such variable has been created. This field is only set for the
TYPE_MAIN_VARIANT of the class. */
#define CLASSTYPE_TYPEINFO_VAR(NODE) \
(LANG_TYPE_CLASS_CHECK (NODE)->typeinfo_var)
/* Accessor macros for the BINFO_VIRTUALS list. */
/* The number of bytes by which to adjust the `this' pointer when
calling this virtual function. Subtract this value from the this
pointer. Always non-NULL, might be constant zero though. */
#define BV_DELTA(NODE) (TREE_PURPOSE (NODE))
/* If non-NULL, the vtable index at which to find the vcall offset
when calling this virtual function. Add the value at that vtable
index to the this pointer. */
#define BV_VCALL_INDEX(NODE) (TREE_TYPE (NODE))
/* The function to call. */
#define BV_FN(NODE) (TREE_VALUE (NODE))
/* Whether or not this entry is for a lost primary virtual base. */
#define BV_LOST_PRIMARY(NODE) (TREE_LANG_FLAG_0 (NODE))
/* For FUNCTION_TYPE or METHOD_TYPE, a list of the exceptions that
this type can raise. Each TREE_VALUE is a _TYPE. The TREE_VALUE
will be NULL_TREE to indicate a throw specification of `()', or
no exceptions allowed. For a noexcept specification, TREE_VALUE
is NULL_TREE and TREE_PURPOSE is the constant-expression. For
a deferred noexcept-specification, TREE_PURPOSE is a DEFERRED_NOEXCEPT
(for templates) or an OVERLOAD list of functions (for implicitly
declared functions). */
#define TYPE_RAISES_EXCEPTIONS(NODE) \
TYPE_LANG_SLOT_1 (FUNC_OR_METHOD_CHECK (NODE))
/* For FUNCTION_TYPE or METHOD_TYPE, return 1 iff it is declared `throw()'
or noexcept(true). */
#define TYPE_NOTHROW_P(NODE) nothrow_spec_p (TYPE_RAISES_EXCEPTIONS (NODE))
/* For FUNCTION_TYPE or METHOD_TYPE, true if NODE is noexcept. This is the
case for things declared noexcept(true) and, with -fnothrow-opt, for
throw() functions. */
#define TYPE_NOEXCEPT_P(NODE) type_noexcept_p (NODE)
/* The binding level associated with the namespace. */
#define NAMESPACE_LEVEL(NODE) \
(LANG_DECL_NS_CHECK (NODE)->level)
/* Flags shared by all forms of DECL_LANG_SPECIFIC.
Some of the flags live here only to make lang_decl_min/fn smaller. Do
not make this struct larger than 32 bits; instead, make sel smaller. */
struct GTY(()) lang_decl_base {
unsigned selector : 16; /* Larger than necessary for faster access. */
ENUM_BITFIELD(languages) language : 4;
unsigned use_template : 2;
unsigned not_really_extern : 1; /* var or fn */
unsigned initialized_in_class : 1; /* var or fn */
unsigned repo_available_p : 1; /* var or fn */
unsigned threadprivate_or_deleted_p : 1; /* var or fn */
unsigned anticipated_p : 1; /* fn, type or template */
unsigned friend_attr : 1; /* fn, type or template */
unsigned template_conv_p : 1; /* var or template */
unsigned odr_used : 1; /* var or fn */
unsigned u2sel : 1;
/* 1 spare bit */
};
/* True for DECL codes which have template info and access. */
#define LANG_DECL_HAS_MIN(NODE) \
(VAR_OR_FUNCTION_DECL_P (NODE) \
|| TREE_CODE (NODE) == FIELD_DECL \
|| TREE_CODE (NODE) == CONST_DECL \
|| TREE_CODE (NODE) == TYPE_DECL \
|| TREE_CODE (NODE) == TEMPLATE_DECL \
|| TREE_CODE (NODE) == USING_DECL)
/* DECL_LANG_SPECIFIC for the above codes. */
struct GTY(()) lang_decl_min {
struct lang_decl_base base;
/* In a FUNCTION_DECL for which DECL_THUNK_P holds, this is
THUNK_ALIAS.
In a FUNCTION_DECL for which DECL_THUNK_P does not hold,
VAR_DECL, TYPE_DECL, or TEMPLATE_DECL, this is
DECL_TEMPLATE_INFO. */
tree template_info;
union lang_decl_u2 {
/* In a FUNCTION_DECL for which DECL_THUNK_P holds, this is
THUNK_VIRTUAL_OFFSET.
Otherwise this is DECL_ACCESS. */
tree GTY ((tag ("0"))) access;
/* For VAR_DECL in function, this is DECL_DISCRIMINATOR. */
int GTY ((tag ("1"))) discriminator;
} GTY ((desc ("%0.u.base.u2sel"))) u2;
};
/* Additional DECL_LANG_SPECIFIC information for functions. */
struct GTY(()) lang_decl_fn {
struct lang_decl_min min;
/* In an overloaded operator, this is the value of
DECL_OVERLOADED_OPERATOR_P. */
ENUM_BITFIELD (tree_code) operator_code : 16;
unsigned global_ctor_p : 1;
unsigned global_dtor_p : 1;
unsigned constructor_attr : 1;
unsigned destructor_attr : 1;
unsigned assignment_operator_p : 1;
unsigned static_function : 1;
unsigned pure_virtual : 1;
unsigned defaulted_p : 1;
unsigned has_in_charge_parm_p : 1;
unsigned has_vtt_parm_p : 1;
unsigned pending_inline_p : 1;
unsigned nonconverting : 1;
unsigned thunk_p : 1;
unsigned this_thunk_p : 1;
unsigned hidden_friend_p : 1;
unsigned omp_declare_reduction_p : 1;
/* No spare bits on 32-bit hosts, 32 on 64-bit hosts. */
/* For a non-thunk function decl, this is a tree list of
friendly classes. For a thunk function decl, it is the
thunked to function decl. */
tree befriending_classes;
/* For a non-virtual FUNCTION_DECL, this is
DECL_FRIEND_CONTEXT. For a virtual FUNCTION_DECL for which
DECL_THIS_THUNK_P does not hold, this is DECL_THUNKS. Both
this pointer and result pointer adjusting thunks are
chained here. This pointer thunks to return pointer thunks
will be chained on the return pointer thunk. */
tree context;
union lang_decl_u5
{
/* In a non-thunk FUNCTION_DECL or TEMPLATE_DECL, this is
DECL_CLONED_FUNCTION. */
tree GTY ((tag ("0"))) cloned_function;
/* In a FUNCTION_DECL for which THUNK_P holds this is the
THUNK_FIXED_OFFSET. */
HOST_WIDE_INT GTY ((tag ("1"))) fixed_offset;
} GTY ((desc ("%1.thunk_p"))) u5;
union lang_decl_u3
{
struct cp_token_cache * GTY ((tag ("1"))) pending_inline_info;
struct language_function * GTY ((tag ("0")))
saved_language_function;
} GTY ((desc ("%1.pending_inline_p"))) u;
};
/* DECL_LANG_SPECIFIC for namespaces. */
struct GTY(()) lang_decl_ns {
struct lang_decl_base base;
cp_binding_level *level;
};
/* DECL_LANG_SPECIFIC for parameters. */
struct GTY(()) lang_decl_parm {
struct lang_decl_base base;
int level;
int index;
};
/* DECL_LANG_SPECIFIC for all types. It would be nice to just make this a
union rather than a struct containing a union as its only field, but
tree.h declares it as a struct. */
struct GTY((variable_size)) lang_decl {
union GTY((desc ("%h.base.selector"))) lang_decl_u {
struct lang_decl_base GTY ((default)) base;
struct lang_decl_min GTY((tag ("0"))) min;
struct lang_decl_fn GTY ((tag ("1"))) fn;
struct lang_decl_ns GTY((tag ("2"))) ns;
struct lang_decl_parm GTY((tag ("3"))) parm;
} u;
};
/* Looks through a template (if present) to find what it declares. */
#define STRIP_TEMPLATE(NODE) \
(TREE_CODE (NODE) == TEMPLATE_DECL ? DECL_TEMPLATE_RESULT (NODE) : NODE)
#if defined ENABLE_TREE_CHECKING && (GCC_VERSION >= 2007)
#define LANG_DECL_MIN_CHECK(NODE) __extension__ \
({ struct lang_decl *lt = DECL_LANG_SPECIFIC (NODE); \
if (!LANG_DECL_HAS_MIN (NODE)) \
lang_check_failed (__FILE__, __LINE__, __FUNCTION__); \
<->u.min; })
/* We want to be able to check DECL_CONSTRUCTOR_P and such on a function
template, not just on a FUNCTION_DECL. So when looking for things in
lang_decl_fn, look down through a TEMPLATE_DECL into its result. */
#define LANG_DECL_FN_CHECK(NODE) __extension__ \
({ struct lang_decl *lt = DECL_LANG_SPECIFIC (STRIP_TEMPLATE (NODE)); \
if (!DECL_DECLARES_FUNCTION_P (NODE) || lt->u.base.selector != 1) \
lang_check_failed (__FILE__, __LINE__, __FUNCTION__); \
<->u.fn; })
#define LANG_DECL_NS_CHECK(NODE) __extension__ \
({ struct lang_decl *lt = DECL_LANG_SPECIFIC (NODE); \
if (TREE_CODE (NODE) != NAMESPACE_DECL || lt->u.base.selector != 2) \
lang_check_failed (__FILE__, __LINE__, __FUNCTION__); \
<->u.ns; })
#define LANG_DECL_PARM_CHECK(NODE) __extension__ \
({ struct lang_decl *lt = DECL_LANG_SPECIFIC (NODE); \
if (TREE_CODE (NODE) != PARM_DECL) \
lang_check_failed (__FILE__, __LINE__, __FUNCTION__); \
<->u.parm; })
#define LANG_DECL_U2_CHECK(NODE, TF) __extension__ \
({ struct lang_decl *lt = DECL_LANG_SPECIFIC (NODE); \
if (!LANG_DECL_HAS_MIN (NODE) || lt->u.base.u2sel != TF) \
lang_check_failed (__FILE__, __LINE__, __FUNCTION__); \
<->u.min.u2; })
#else
#define LANG_DECL_MIN_CHECK(NODE) \
(&DECL_LANG_SPECIFIC (NODE)->u.min)
#define LANG_DECL_FN_CHECK(NODE) \
(&DECL_LANG_SPECIFIC (STRIP_TEMPLATE (NODE))->u.fn)
#define LANG_DECL_NS_CHECK(NODE) \
(&DECL_LANG_SPECIFIC (NODE)->u.ns)
#define LANG_DECL_PARM_CHECK(NODE) \
(&DECL_LANG_SPECIFIC (NODE)->u.parm)
#define LANG_DECL_U2_CHECK(NODE, TF) \
(&DECL_LANG_SPECIFIC (NODE)->u.min.u2)
#endif /* ENABLE_TREE_CHECKING */
/* For a FUNCTION_DECL or a VAR_DECL, the language linkage for the
declaration. Some entities (like a member function in a local
class, or a local variable) do not have linkage at all, and this
macro should not be used in those cases.
Implementation note: A FUNCTION_DECL without DECL_LANG_SPECIFIC was
created by language-independent code, and has C linkage. Most
VAR_DECLs have C++ linkage, and do not have DECL_LANG_SPECIFIC, but
we do create DECL_LANG_SPECIFIC for variables with non-C++ linkage. */
#define DECL_LANGUAGE(NODE) \
(DECL_LANG_SPECIFIC (NODE) \
? DECL_LANG_SPECIFIC (NODE)->u.base.language \
: (TREE_CODE (NODE) == FUNCTION_DECL \
? lang_c : lang_cplusplus))
/* Set the language linkage for NODE to LANGUAGE. */
#define SET_DECL_LANGUAGE(NODE, LANGUAGE) \
(DECL_LANG_SPECIFIC (NODE)->u.base.language = (LANGUAGE))
/* For FUNCTION_DECLs and TEMPLATE_DECLs: nonzero means that this function
is a constructor. */
#define DECL_CONSTRUCTOR_P(NODE) \
DECL_CXX_CONSTRUCTOR_P (STRIP_TEMPLATE (NODE))
/* Nonzero if NODE (a FUNCTION_DECL) is a constructor for a complete
object. */
#define DECL_COMPLETE_CONSTRUCTOR_P(NODE) \
(DECL_CONSTRUCTOR_P (NODE) \
&& DECL_NAME (NODE) == complete_ctor_identifier)
/* Nonzero if NODE (a FUNCTION_DECL) is a constructor for a base
object. */
#define DECL_BASE_CONSTRUCTOR_P(NODE) \
(DECL_CONSTRUCTOR_P (NODE) \
&& DECL_NAME (NODE) == base_ctor_identifier)
/* Nonzero if NODE (a FUNCTION_DECL) is a constructor, but not either the
specialized in-charge constructor or the specialized not-in-charge
constructor. */
#define DECL_MAYBE_IN_CHARGE_CONSTRUCTOR_P(NODE) \
(DECL_DECLARES_FUNCTION_P (NODE) && DECL_CONSTRUCTOR_P (NODE) \
&& !DECL_CLONED_FUNCTION_P (NODE))
/* Nonzero if NODE (a FUNCTION_DECL) is a copy constructor. */
#define DECL_COPY_CONSTRUCTOR_P(NODE) \
(DECL_CONSTRUCTOR_P (NODE) && copy_fn_p (NODE) > 0)
/* Nonzero if NODE (a FUNCTION_DECL) is a move constructor. */
#define DECL_MOVE_CONSTRUCTOR_P(NODE) \
(DECL_CONSTRUCTOR_P (NODE) && move_fn_p (NODE))
/* Nonzero if NODE (a FUNCTION_DECL or TEMPLATE_DECL)
is a destructor. */
#define DECL_DESTRUCTOR_P(NODE) \
DECL_CXX_DESTRUCTOR_P (STRIP_TEMPLATE (NODE))
/* Nonzero if NODE (a FUNCTION_DECL) is a destructor, but not the
specialized in-charge constructor, in-charge deleting constructor,
or the base destructor. */
#define DECL_MAYBE_IN_CHARGE_DESTRUCTOR_P(NODE) \
(DECL_DECLARES_FUNCTION_P (NODE) && DECL_DESTRUCTOR_P (NODE) \
&& !DECL_CLONED_FUNCTION_P (NODE))
/* Nonzero if NODE (a FUNCTION_DECL) is a destructor for a complete
object. */
#define DECL_COMPLETE_DESTRUCTOR_P(NODE) \
(DECL_DESTRUCTOR_P (NODE) \
&& DECL_NAME (NODE) == complete_dtor_identifier)
/* Nonzero if NODE (a FUNCTION_DECL) is a destructor for a base
object. */
#define DECL_BASE_DESTRUCTOR_P(NODE) \
(DECL_DESTRUCTOR_P (NODE) \
&& DECL_NAME (NODE) == base_dtor_identifier)
/* Nonzero if NODE (a FUNCTION_DECL) is a destructor for a complete
object that deletes the object after it has been destroyed. */
#define DECL_DELETING_DESTRUCTOR_P(NODE) \
(DECL_DESTRUCTOR_P (NODE) \
&& DECL_NAME (NODE) == deleting_dtor_identifier)
/* Nonzero if NODE (a FUNCTION_DECL) is a cloned constructor or
destructor. */
#define DECL_CLONED_FUNCTION_P(NODE) (!!decl_cloned_function_p (NODE, true))
/* If DECL_CLONED_FUNCTION_P holds, this is the function that was
cloned. */
#define DECL_CLONED_FUNCTION(NODE) (*decl_cloned_function_p (NODE, false))
/* Perform an action for each clone of FN, if FN is a function with
clones. This macro should be used like:
FOR_EACH_CLONE (clone, fn)
{ ... }
*/
#define FOR_EACH_CLONE(CLONE, FN) \
if (TREE_CODE (FN) == FUNCTION_DECL \
&& (DECL_MAYBE_IN_CHARGE_CONSTRUCTOR_P (FN) \
|| DECL_MAYBE_IN_CHARGE_DESTRUCTOR_P (FN))) \
for (CLONE = DECL_CHAIN (FN); \
CLONE && DECL_CLONED_FUNCTION_P (CLONE); \
CLONE = DECL_CHAIN (CLONE))
/* Nonzero if NODE has DECL_DISCRIMINATOR and not DECL_ACCESS. */
#define DECL_DISCRIMINATOR_P(NODE) \
(VAR_P (NODE) && DECL_FUNCTION_SCOPE_P (NODE))
/* Discriminator for name mangling. */
#define DECL_DISCRIMINATOR(NODE) (LANG_DECL_U2_CHECK (NODE, 1)->discriminator)
/* True iff DECL_DISCRIMINATOR is set for a DECL_DISCRIMINATOR_P decl. */
#define DECL_DISCRIMINATOR_SET_P(NODE) \
(DECL_LANG_SPECIFIC (NODE) && DECL_LANG_SPECIFIC (NODE)->u.base.u2sel == 1)
/* The index of a user-declared parameter in its function, starting at 1.
All artificial parameters will have index 0. */
#define DECL_PARM_INDEX(NODE) \
(LANG_DECL_PARM_CHECK (NODE)->index)
/* The level of a user-declared parameter in its function, starting at 1.
A parameter of the function will have level 1; a parameter of the first
nested function declarator (i.e. t in void f (void (*p)(T t))) will have
level 2. */
#define DECL_PARM_LEVEL(NODE) \
(LANG_DECL_PARM_CHECK (NODE)->level)
/* Nonzero if the VTT parm has been added to NODE. */
#define DECL_HAS_VTT_PARM_P(NODE) \
(LANG_DECL_FN_CHECK (NODE)->has_vtt_parm_p)
/* Nonzero if NODE is a FUNCTION_DECL for which a VTT parameter is
required. */
#define DECL_NEEDS_VTT_PARM_P(NODE) \
(CLASSTYPE_VBASECLASSES (DECL_CONTEXT (NODE)) \
&& (DECL_BASE_CONSTRUCTOR_P (NODE) \
|| DECL_BASE_DESTRUCTOR_P (NODE)))
/* Nonzero if NODE is a user-defined conversion operator. */
#define DECL_CONV_FN_P(NODE) \
(DECL_NAME (NODE) && IDENTIFIER_TYPENAME_P (DECL_NAME (NODE)))
/* If FN is a conversion operator, the type to which it converts.
Otherwise, NULL_TREE. */
#define DECL_CONV_FN_TYPE(FN) \
(DECL_CONV_FN_P (FN) ? TREE_TYPE (DECL_NAME (FN)) : NULL_TREE)
/* Nonzero if NODE, which is a TEMPLATE_DECL, is a template
conversion operator to a type dependent on the innermost template
args. */
#define DECL_TEMPLATE_CONV_FN_P(NODE) \
(DECL_LANG_SPECIFIC (TEMPLATE_DECL_CHECK (NODE))->u.base.template_conv_p)
/* Nonzero if NODE, a static data member, was declared in its class as an
array of unknown bound. */
#define VAR_HAD_UNKNOWN_BOUND(NODE) \
(DECL_LANG_SPECIFIC (VAR_DECL_CHECK (NODE)) \
? DECL_LANG_SPECIFIC (NODE)->u.base.template_conv_p \
: false)
#define SET_VAR_HAD_UNKNOWN_BOUND(NODE) \
(DECL_LANG_SPECIFIC (VAR_DECL_CHECK (NODE))->u.base.template_conv_p = true)
/* Set the overloaded operator code for NODE to CODE. */
#define SET_OVERLOADED_OPERATOR_CODE(NODE, CODE) \
(LANG_DECL_FN_CHECK (NODE)->operator_code = (CODE))
/* If NODE is an overloaded operator, then this returns the TREE_CODE
associated with the overloaded operator.
DECL_ASSIGNMENT_OPERATOR_P must also be checked to determine
whether or not NODE is an assignment operator. If NODE is not an
overloaded operator, ERROR_MARK is returned. Since the numerical
value of ERROR_MARK is zero, this macro can be used as a predicate
to test whether or not NODE is an overloaded operator. */
#define DECL_OVERLOADED_OPERATOR_P(NODE) \
(IDENTIFIER_OPNAME_P (DECL_NAME (NODE)) \
? LANG_DECL_FN_CHECK (NODE)->operator_code : ERROR_MARK)
/* Nonzero if NODE is an assignment operator (including += and such). */
#define DECL_ASSIGNMENT_OPERATOR_P(NODE) \
(LANG_DECL_FN_CHECK (NODE)->assignment_operator_p)
/* For FUNCTION_DECLs: nonzero means that this function is a
constructor or a destructor with an extra in-charge parameter to
control whether or not virtual bases are constructed. */
#define DECL_HAS_IN_CHARGE_PARM_P(NODE) \
(LANG_DECL_FN_CHECK (NODE)->has_in_charge_parm_p)
/* Nonzero if DECL is a declaration of __builtin_constant_p. */
#define DECL_IS_BUILTIN_CONSTANT_P(NODE) \
(TREE_CODE (NODE) == FUNCTION_DECL \
&& DECL_BUILT_IN_CLASS (NODE) == BUILT_IN_NORMAL \
&& DECL_FUNCTION_CODE (NODE) == BUILT_IN_CONSTANT_P)
/* Nonzero for _DECL means that this decl appears in (or will appear
in) as a member in a RECORD_TYPE or UNION_TYPE node. It is also for
detecting circularity in case members are multiply defined. In the
case of a VAR_DECL, it is also used to determine how program storage
should be allocated. */
#define DECL_IN_AGGR_P(NODE) (DECL_LANG_FLAG_3 (NODE))
/* Nonzero for a VAR_DECL means that the variable's initialization (if
any) has been processed. (In general, DECL_INITIALIZED_P is
!DECL_EXTERNAL, but static data members may be initialized even if
not defined.) */
#define DECL_INITIALIZED_P(NODE) \
(TREE_LANG_FLAG_1 (VAR_DECL_CHECK (NODE)))
/* Nonzero for a VAR_DECL iff an explicit initializer was provided
or a non-trivial constructor is called. */
#define DECL_NONTRIVIALLY_INITIALIZED_P(NODE) \
(TREE_LANG_FLAG_3 (VAR_DECL_CHECK (NODE)))
/* Nonzero for a VAR_DECL that was initialized with a
constant-expression. */
#define DECL_INITIALIZED_BY_CONSTANT_EXPRESSION_P(NODE) \
(TREE_LANG_FLAG_2 (VAR_DECL_CHECK (NODE)))
/* Nonzero if the DECL was initialized in the class definition itself,
rather than outside the class. This is used for both static member
VAR_DECLS, and FUNCTION_DECLS that are defined in the class. */
#define DECL_INITIALIZED_IN_CLASS_P(DECL) \
(DECL_LANG_SPECIFIC (VAR_OR_FUNCTION_DECL_CHECK (DECL)) \
->u.base.initialized_in_class)
/* Nonzero if the DECL is used in the sense of 3.2 [basic.def.odr].
Only available for decls with DECL_LANG_SPECIFIC. */
#define DECL_ODR_USED(DECL) \
(DECL_LANG_SPECIFIC (VAR_OR_FUNCTION_DECL_CHECK (DECL)) \
->u.base.odr_used)
/* Nonzero for DECL means that this decl is just a friend declaration,
and should not be added to the list of members for this class. */
#define DECL_FRIEND_P(NODE) \
(DECL_LANG_SPECIFIC (TYPE_FUNCTION_OR_TEMPLATE_DECL_CHECK (NODE)) \
->u.base.friend_attr)
/* A TREE_LIST of the types which have befriended this FUNCTION_DECL. */
#define DECL_BEFRIENDING_CLASSES(NODE) \
(LANG_DECL_FN_CHECK (NODE)->befriending_classes)
/* Nonzero for FUNCTION_DECL means that this decl is a static
member function. */
#define DECL_STATIC_FUNCTION_P(NODE) \
(LANG_DECL_FN_CHECK (NODE)->static_function)
/* Nonzero for FUNCTION_DECL means that this decl is a non-static
member function. */
#define DECL_NONSTATIC_MEMBER_FUNCTION_P(NODE) \
(TREE_CODE (TREE_TYPE (NODE)) == METHOD_TYPE)
/* Nonzero for FUNCTION_DECL means that this decl is a member function
(static or non-static). */
#define DECL_FUNCTION_MEMBER_P(NODE) \
(DECL_NONSTATIC_MEMBER_FUNCTION_P (NODE) || DECL_STATIC_FUNCTION_P (NODE))
/* Nonzero for FUNCTION_DECL means that this member function
has `this' as const X *const. */
#define DECL_CONST_MEMFUNC_P(NODE) \
(DECL_NONSTATIC_MEMBER_FUNCTION_P (NODE) \
&& CP_TYPE_CONST_P (TREE_TYPE (TREE_VALUE \
(TYPE_ARG_TYPES (TREE_TYPE (NODE))))))
/* Nonzero for FUNCTION_DECL means that this member function
has `this' as volatile X *const. */
#define DECL_VOLATILE_MEMFUNC_P(NODE) \
(DECL_NONSTATIC_MEMBER_FUNCTION_P (NODE) \
&& CP_TYPE_VOLATILE_P (TREE_TYPE (TREE_VALUE \
(TYPE_ARG_TYPES (TREE_TYPE (NODE))))))
/* Nonzero for a DECL means that this member is a non-static member. */
#define DECL_NONSTATIC_MEMBER_P(NODE) \
(DECL_NONSTATIC_MEMBER_FUNCTION_P (NODE) \
|| TREE_CODE (NODE) == FIELD_DECL)
/* Nonzero for _DECL means that this member object type
is mutable. */
#define DECL_MUTABLE_P(NODE) (DECL_LANG_FLAG_0 (NODE))
/* Nonzero for _DECL means that this constructor or conversion function is
non-converting. */
#define DECL_NONCONVERTING_P(NODE) \
(LANG_DECL_FN_CHECK (NODE)->nonconverting)
/* Nonzero for FUNCTION_DECL means that this member function is a pure
virtual function. */
#define DECL_PURE_VIRTUAL_P(NODE) \
(LANG_DECL_FN_CHECK (NODE)->pure_virtual)
/* True (in a FUNCTION_DECL) if NODE is a virtual function that is an
invalid overrider for a function from a base class. Once we have
complained about an invalid overrider we avoid complaining about it
again. */
#define DECL_INVALID_OVERRIDER_P(NODE) \
(DECL_LANG_FLAG_4 (NODE))
/* True (in a FUNCTION_DECL) if NODE is a function declared with
an override virt-specifier */
#define DECL_OVERRIDE_P(NODE) (TREE_LANG_FLAG_0 (NODE))
/* The thunks associated with NODE, a FUNCTION_DECL. */
#define DECL_THUNKS(NODE) \
(DECL_VIRTUAL_P (NODE) ? LANG_DECL_FN_CHECK (NODE)->context : NULL_TREE)
/* Set DECL_THUNKS. */
#define SET_DECL_THUNKS(NODE,THUNKS) \
(LANG_DECL_FN_CHECK (NODE)->context = (THUNKS))
/* If NODE, a FUNCTION_DECL, is a C++11 inheriting constructor, then this
is the base it inherits from. */
#define DECL_INHERITED_CTOR_BASE(NODE) \
(DECL_CONSTRUCTOR_P (NODE) ? LANG_DECL_FN_CHECK (NODE)->context : NULL_TREE)
/* Set the inherited base. */
#define SET_DECL_INHERITED_CTOR_BASE(NODE,INH) \
(LANG_DECL_FN_CHECK (NODE)->context = (INH))
/* Nonzero if NODE is a thunk, rather than an ordinary function. */
#define DECL_THUNK_P(NODE) \
(TREE_CODE (NODE) == FUNCTION_DECL \
&& DECL_LANG_SPECIFIC (NODE) \
&& LANG_DECL_FN_CHECK (NODE)->thunk_p)
/* Set DECL_THUNK_P for node. */
#define SET_DECL_THUNK_P(NODE, THIS_ADJUSTING) \
(LANG_DECL_FN_CHECK (NODE)->thunk_p = 1, \
LANG_DECL_FN_CHECK (NODE)->this_thunk_p = (THIS_ADJUSTING))
/* Nonzero if NODE is a this pointer adjusting thunk. */
#define DECL_THIS_THUNK_P(NODE) \
(DECL_THUNK_P (NODE) && LANG_DECL_FN_CHECK (NODE)->this_thunk_p)
/* Nonzero if NODE is a result pointer adjusting thunk. */
#define DECL_RESULT_THUNK_P(NODE) \
(DECL_THUNK_P (NODE) && !LANG_DECL_FN_CHECK (NODE)->this_thunk_p)
/* Nonzero if NODE is a FUNCTION_DECL, but not a thunk. */
#define DECL_NON_THUNK_FUNCTION_P(NODE) \
(TREE_CODE (NODE) == FUNCTION_DECL && !DECL_THUNK_P (NODE))
/* Nonzero if NODE is `extern "C"'. */
#define DECL_EXTERN_C_P(NODE) \
(DECL_LANGUAGE (NODE) == lang_c)
/* Nonzero if NODE is an `extern "C"' function. */
#define DECL_EXTERN_C_FUNCTION_P(NODE) \
(DECL_NON_THUNK_FUNCTION_P (NODE) && DECL_EXTERN_C_P (NODE))
/* True iff DECL is an entity with vague linkage whose definition is
available in this translation unit. */
#define DECL_REPO_AVAILABLE_P(NODE) \
(DECL_LANG_SPECIFIC (NODE)->u.base.repo_available_p)
/* True if DECL is declared 'constexpr'. */
#define DECL_DECLARED_CONSTEXPR_P(DECL) \
DECL_LANG_FLAG_8 (VAR_OR_FUNCTION_DECL_CHECK (STRIP_TEMPLATE (DECL)))
/* Nonzero if this DECL is the __PRETTY_FUNCTION__ variable in a
template function. */
#define DECL_PRETTY_FUNCTION_P(NODE) \
(DECL_NAME (NODE) \
&& !strcmp (IDENTIFIER_POINTER (DECL_NAME (NODE)), "__PRETTY_FUNCTION__"))
/* Nonzero if the thread-local variable was declared with __thread
as opposed to thread_local. */
#define DECL_GNU_TLS_P(NODE) \
(TREE_LANG_FLAG_0 (VAR_DECL_CHECK (NODE)))
/* The _TYPE context in which this _DECL appears. This field holds the
class where a virtual function instance is actually defined. */
#define DECL_CLASS_CONTEXT(NODE) \
(DECL_CLASS_SCOPE_P (NODE) ? DECL_CONTEXT (NODE) : NULL_TREE)
/* For a non-member friend function, the class (if any) in which this
friend was defined. For example, given:
struct S { friend void f (); };
the DECL_FRIEND_CONTEXT for `f' will be `S'. */
#define DECL_FRIEND_CONTEXT(NODE) \
((DECL_DECLARES_FUNCTION_P (NODE) \
&& DECL_FRIEND_P (NODE) && !DECL_FUNCTION_MEMBER_P (NODE)) \
? LANG_DECL_FN_CHECK (NODE)->context \
: NULL_TREE)
/* Set the DECL_FRIEND_CONTEXT for NODE to CONTEXT. */
#define SET_DECL_FRIEND_CONTEXT(NODE, CONTEXT) \
(LANG_DECL_FN_CHECK (NODE)->context = (CONTEXT))
#define CP_DECL_CONTEXT(NODE) \
(!DECL_FILE_SCOPE_P (NODE) ? DECL_CONTEXT (NODE) : global_namespace)
#define CP_TYPE_CONTEXT(NODE) \
(!TYPE_FILE_SCOPE_P (NODE) ? TYPE_CONTEXT (NODE) : global_namespace)
#define FROB_CONTEXT(NODE) \
((NODE) == global_namespace ? DECL_CONTEXT (NODE) : (NODE))
/* 1 iff NODE has namespace scope, including the global namespace. */
#define DECL_NAMESPACE_SCOPE_P(NODE) \
(!DECL_TEMPLATE_PARM_P (NODE) \
&& TREE_CODE (CP_DECL_CONTEXT (NODE)) == NAMESPACE_DECL)
#define TYPE_NAMESPACE_SCOPE_P(NODE) \
(TREE_CODE (CP_TYPE_CONTEXT (NODE)) == NAMESPACE_DECL)
#define NAMESPACE_SCOPE_P(NODE) \
((DECL_P (NODE) && DECL_NAMESPACE_SCOPE_P (NODE)) \
|| (TYPE_P (NODE) && TYPE_NAMESPACE_SCOPE_P (NODE)))
/* 1 iff NODE is a class member. */
#define DECL_CLASS_SCOPE_P(NODE) \
(DECL_CONTEXT (NODE) && TYPE_P (DECL_CONTEXT (NODE)))
#define TYPE_CLASS_SCOPE_P(NODE) \
(TYPE_CONTEXT (NODE) && TYPE_P (TYPE_CONTEXT (NODE)))
/* 1 iff NODE is function-local. */
#define DECL_FUNCTION_SCOPE_P(NODE) \
(DECL_CONTEXT (NODE) \
&& TREE_CODE (DECL_CONTEXT (NODE)) == FUNCTION_DECL)
#define TYPE_FUNCTION_SCOPE_P(NODE) \
(TYPE_CONTEXT (NODE) && TREE_CODE (TYPE_CONTEXT (NODE)) == FUNCTION_DECL)
/* 1 iff VAR_DECL node NODE is a type-info decl. This flag is set for
both the primary typeinfo object and the associated NTBS name. */
#define DECL_TINFO_P(NODE) TREE_LANG_FLAG_4 (VAR_DECL_CHECK (NODE))
/* 1 iff VAR_DECL node NODE is virtual table or VTT. */
#define DECL_VTABLE_OR_VTT_P(NODE) TREE_LANG_FLAG_5 (VAR_DECL_CHECK (NODE))
/* 1 iff FUNCTION_TYPE or METHOD_TYPE has a ref-qualifier (either & or &&). */
#define FUNCTION_REF_QUALIFIED(NODE) \
TREE_LANG_FLAG_4 (FUNC_OR_METHOD_CHECK (NODE))
/* 1 iff FUNCTION_TYPE or METHOD_TYPE has &&-ref-qualifier. */
#define FUNCTION_RVALUE_QUALIFIED(NODE) \
TREE_LANG_FLAG_5 (FUNC_OR_METHOD_CHECK (NODE))
/* Returns 1 iff VAR_DECL is a construction virtual table.
DECL_VTABLE_OR_VTT_P will be true in this case and must be checked
before using this macro. */
#define DECL_CONSTRUCTION_VTABLE_P(NODE) \
TREE_LANG_FLAG_6 (VAR_DECL_CHECK (NODE))
/* 1 iff NODE is function-local, but for types. */
#define LOCAL_CLASS_P(NODE) \
(decl_function_context (TYPE_MAIN_DECL (NODE)) != NULL_TREE)
/* For a NAMESPACE_DECL: the list of using namespace directives
The PURPOSE is the used namespace, the value is the namespace
that is the common ancestor. */
#define DECL_NAMESPACE_USING(NODE) DECL_VINDEX (NAMESPACE_DECL_CHECK (NODE))
/* In a NAMESPACE_DECL, the DECL_INITIAL is used to record all users
of a namespace, to record the transitive closure of using namespace. */
#define DECL_NAMESPACE_USERS(NODE) DECL_INITIAL (NAMESPACE_DECL_CHECK (NODE))
/* In a NAMESPACE_DECL, the list of namespaces which have associated
themselves with this one. */
#define DECL_NAMESPACE_ASSOCIATIONS(NODE) \
(NAMESPACE_DECL_CHECK (NODE)->decl_non_common.saved_tree)
/* In a NAMESPACE_DECL, points to the original namespace if this is
a namespace alias. */
#define DECL_NAMESPACE_ALIAS(NODE) \
DECL_ABSTRACT_ORIGIN (NAMESPACE_DECL_CHECK (NODE))
#define ORIGINAL_NAMESPACE(NODE) \
(DECL_NAMESPACE_ALIAS (NODE) ? DECL_NAMESPACE_ALIAS (NODE) : (NODE))
/* Nonzero if NODE is the std namespace. */
#define DECL_NAMESPACE_STD_P(NODE) \
(TREE_CODE (NODE) == NAMESPACE_DECL \
&& CP_DECL_CONTEXT (NODE) == global_namespace \
&& DECL_NAME (NODE) == std_identifier)
/* In a TREE_LIST concatenating using directives, indicate indirect
directives */
#define TREE_INDIRECT_USING(NODE) TREE_LANG_FLAG_0 (TREE_LIST_CHECK (NODE))
/* In a TREE_LIST in an attribute list, indicates that the attribute
must be applied at instantiation time. */
#define ATTR_IS_DEPENDENT(NODE) TREE_LANG_FLAG_0 (TREE_LIST_CHECK (NODE))
/* In a TREE_LIST in the argument of attribute abi_tag, indicates that the tag
was inherited from a template parameter, not explicitly indicated. */
#define ABI_TAG_IMPLICIT(NODE) TREE_LANG_FLAG_0 (TREE_LIST_CHECK (NODE))
extern tree decl_shadowed_for_var_lookup (tree);
extern void decl_shadowed_for_var_insert (tree, tree);
/* Non zero if this is a using decl for a dependent scope. */
#define DECL_DEPENDENT_P(NODE) DECL_LANG_FLAG_0 (USING_DECL_CHECK (NODE))
/* The scope named in a using decl. */
#define USING_DECL_SCOPE(NODE) TREE_TYPE (USING_DECL_CHECK (NODE))
/* The decls named by a using decl. */
#define USING_DECL_DECLS(NODE) DECL_INITIAL (USING_DECL_CHECK (NODE))
/* Non zero if the using decl refers to a dependent type. */
#define USING_DECL_TYPENAME_P(NODE) DECL_LANG_FLAG_1 (USING_DECL_CHECK (NODE))
/* In a VAR_DECL, true if we have a shadowed local variable
in the shadowed var table for this VAR_DECL. */
#define DECL_HAS_SHADOWED_FOR_VAR_P(NODE) \
(VAR_DECL_CHECK (NODE)->decl_with_vis.shadowed_for_var_p)
/* In a VAR_DECL for a variable declared in a for statement,
this is the shadowed (local) variable. */
#define DECL_SHADOWED_FOR_VAR(NODE) \
(DECL_HAS_SHADOWED_FOR_VAR_P(NODE) ? decl_shadowed_for_var_lookup (NODE) : NULL)
#define SET_DECL_SHADOWED_FOR_VAR(NODE, VAL) \
(decl_shadowed_for_var_insert (NODE, VAL))
/* In a FUNCTION_DECL, this is nonzero if this function was defined in
the class definition. We have saved away the text of the function,
but have not yet processed it. */
#define DECL_PENDING_INLINE_P(NODE) \
(LANG_DECL_FN_CHECK (NODE)->pending_inline_p)
/* If DECL_PENDING_INLINE_P holds, this is the saved text of the
function. */
#define DECL_PENDING_INLINE_INFO(NODE) \
(LANG_DECL_FN_CHECK (NODE)->u.pending_inline_info)
/* Nonzero for TYPE_DECL means that it was written 'using name = type'. */
#define TYPE_DECL_ALIAS_P(NODE) \
DECL_LANG_FLAG_6 (TYPE_DECL_CHECK (NODE))
/* Nonzero for a type which is an alias for another type; i.e, a type
which declaration was written 'using name-of-type =
another-type'. */
#define TYPE_ALIAS_P(NODE) \
(TYPE_P (NODE) \
&& TYPE_NAME (NODE) \
&& TREE_CODE (TYPE_NAME (NODE)) == TYPE_DECL \
&& TYPE_DECL_ALIAS_P (TYPE_NAME (NODE)))
/* For a class type: if this structure has many fields, we'll sort them
and put them into a TREE_VEC. */
#define CLASSTYPE_SORTED_FIELDS(NODE) \
(LANG_TYPE_CLASS_CHECK (NODE)->sorted_fields)
/* If non-NULL for a VAR_DECL, FUNCTION_DECL, TYPE_DECL or
TEMPLATE_DECL, the entity is either a template specialization (if
DECL_USE_TEMPLATE is nonzero) or the abstract instance of the
template itself.
In either case, DECL_TEMPLATE_INFO is a TREE_LIST, whose
TREE_PURPOSE is the TEMPLATE_DECL of which this entity is a
specialization or abstract instance. The TREE_VALUE is the
template arguments used to specialize the template.
Consider:
template <typename T> struct S { friend void f(T) {} };
In this case, S<int>::f is, from the point of view of the compiler,
an instantiation of a template -- but, from the point of view of
the language, each instantiation of S results in a wholly unrelated
global function f. In this case, DECL_TEMPLATE_INFO for S<int>::f
will be non-NULL, but DECL_USE_TEMPLATE will be zero. */
#define DECL_TEMPLATE_INFO(NODE) \
(DECL_LANG_SPECIFIC (VAR_TEMPL_TYPE_FIELD_OR_FUNCTION_DECL_CHECK (NODE)) \
->u.min.template_info)
/* For a VAR_DECL, indicates that the variable is actually a
non-static data member of anonymous union that has been promoted to
variable status. */
#define DECL_ANON_UNION_VAR_P(NODE) \
(DECL_LANG_FLAG_4 (VAR_DECL_CHECK (NODE)))
/* Template information for a RECORD_TYPE or UNION_TYPE. */
#define CLASSTYPE_TEMPLATE_INFO(NODE) \
(LANG_TYPE_CLASS_CHECK (RECORD_OR_UNION_CHECK (NODE))->template_info)
/* Template information for an ENUMERAL_TYPE. Although an enumeration may
not be a primary template, it may be declared within the scope of a
primary template and the enumeration constants may depend on
non-type template parameters. */
#define ENUM_TEMPLATE_INFO(NODE) \
(TYPE_LANG_SLOT_1 (ENUMERAL_TYPE_CHECK (NODE)))
/* Template information for a template template parameter. */
#define TEMPLATE_TEMPLATE_PARM_TEMPLATE_INFO(NODE) \
(LANG_TYPE_CLASS_CHECK (BOUND_TEMPLATE_TEMPLATE_PARM_TYPE_CHECK (NODE)) \
->template_info)
/* Template information for an ENUMERAL_, RECORD_, UNION_TYPE, or
BOUND_TEMPLATE_TEMPLATE_PARM type. Note that if NODE is a
specialization of an alias template, this accessor returns the
template info for the alias template, not the one (if any) for the
template of the underlying type. */
#define TYPE_TEMPLATE_INFO(NODE) \
((TYPE_ALIAS_P (NODE) && DECL_LANG_SPECIFIC (TYPE_NAME (NODE))) \
? (DECL_LANG_SPECIFIC (TYPE_NAME (NODE)) \
? DECL_TEMPLATE_INFO (TYPE_NAME (NODE)) \
: NULL_TREE) \
: ((TREE_CODE (NODE) == ENUMERAL_TYPE) \
? ENUM_TEMPLATE_INFO (NODE) \
: ((TREE_CODE (NODE) == BOUND_TEMPLATE_TEMPLATE_PARM) \
? TEMPLATE_TEMPLATE_PARM_TEMPLATE_INFO (NODE) \
: (CLASS_TYPE_P (NODE) \
? CLASSTYPE_TEMPLATE_INFO (NODE) \
: NULL_TREE))))
/* Set the template information for an ENUMERAL_, RECORD_, or
UNION_TYPE to VAL. */
#define SET_TYPE_TEMPLATE_INFO(NODE, VAL) \
(TREE_CODE (NODE) == ENUMERAL_TYPE \
? (ENUM_TEMPLATE_INFO (NODE) = (VAL)) \
: ((CLASS_TYPE_P (NODE) && !TYPE_ALIAS_P (NODE)) \
? (CLASSTYPE_TEMPLATE_INFO (NODE) = (VAL)) \
: (DECL_TEMPLATE_INFO (TYPE_NAME (NODE)) = (VAL))))
#define TI_TEMPLATE(NODE) TREE_TYPE (TEMPLATE_INFO_CHECK (NODE))
#define TI_ARGS(NODE) TREE_CHAIN (TEMPLATE_INFO_CHECK (NODE))
#define TI_PENDING_TEMPLATE_FLAG(NODE) TREE_LANG_FLAG_1 (NODE)
/* For a given TREE_VEC containing a template argument list,
this property contains the number of arguments that are not
defaulted. */
#define NON_DEFAULT_TEMPLATE_ARGS_COUNT(NODE) TREE_CHAIN (TREE_VEC_CHECK (NODE))
/* Below are the setter and getter of the NON_DEFAULT_TEMPLATE_ARGS_COUNT
property. */
#define SET_NON_DEFAULT_TEMPLATE_ARGS_COUNT(NODE, INT_VALUE) \
NON_DEFAULT_TEMPLATE_ARGS_COUNT(NODE) = build_int_cst (NULL_TREE, INT_VALUE)
#ifdef ENABLE_CHECKING
#define GET_NON_DEFAULT_TEMPLATE_ARGS_COUNT(NODE) \
int_cst_value (NON_DEFAULT_TEMPLATE_ARGS_COUNT (NODE))
#else
#define GET_NON_DEFAULT_TEMPLATE_ARGS_COUNT(NODE) \
NON_DEFAULT_TEMPLATE_ARGS_COUNT (NODE) \
? int_cst_value (NON_DEFAULT_TEMPLATE_ARGS_COUNT (NODE)) \
: TREE_VEC_LENGTH (INNERMOST_TEMPLATE_ARGS (NODE))
#endif
/* The list of typedefs - used in the template - that need
access checking at template instantiation time.
FIXME this should be associated with the TEMPLATE_DECL, not the
TEMPLATE_INFO. */
#define TI_TYPEDEFS_NEEDING_ACCESS_CHECKING(NODE) \
((struct tree_template_info*)TEMPLATE_INFO_CHECK \
(NODE))->typedefs_needing_access_checking
/* We use TREE_VECs to hold template arguments. If there is only one
level of template arguments, then the TREE_VEC contains the
arguments directly. If there is more than one level of template
arguments, then each entry in the TREE_VEC is itself a TREE_VEC,
containing the template arguments for a single level. The first
entry in the outer TREE_VEC is the outermost level of template
parameters; the last is the innermost.
It is incorrect to ever form a template argument vector containing
only one level of arguments, but which is a TREE_VEC containing as
its only entry the TREE_VEC for that level.
For each TREE_VEC containing the template arguments for a single
level, it's possible to get or set the number of non defaulted
template arguments by using the accessor macros
GET_NON_DEFAULT_TEMPLATE_ARGS_COUNT or
SET_NON_DEFAULT_TEMPLATE_ARGS_COUNT. */
/* Nonzero if the template arguments is actually a vector of vectors,
rather than just a vector. */
#define TMPL_ARGS_HAVE_MULTIPLE_LEVELS(NODE) \
(NODE && TREE_VEC_LENGTH (NODE) && TREE_VEC_ELT (NODE, 0) \
&& TREE_CODE (TREE_VEC_ELT (NODE, 0)) == TREE_VEC)
/* The depth of a template argument vector. When called directly by
the parser, we use a TREE_LIST rather than a TREE_VEC to represent
template arguments. In fact, we may even see NULL_TREE if there
are no template arguments. In both of those cases, there is only
one level of template arguments. */
#define TMPL_ARGS_DEPTH(NODE) \
(TMPL_ARGS_HAVE_MULTIPLE_LEVELS (NODE) ? TREE_VEC_LENGTH (NODE) : 1)
/* The LEVELth level of the template ARGS. The outermost level of
args is level 1, not level 0. */
#define TMPL_ARGS_LEVEL(ARGS, LEVEL) \
(TMPL_ARGS_HAVE_MULTIPLE_LEVELS (ARGS) \
? TREE_VEC_ELT (ARGS, (LEVEL) - 1) : (ARGS))
/* Set the LEVELth level of the template ARGS to VAL. This macro does
not work with single-level argument vectors. */
#define SET_TMPL_ARGS_LEVEL(ARGS, LEVEL, VAL) \
(TREE_VEC_ELT (ARGS, (LEVEL) - 1) = (VAL))
/* Accesses the IDXth parameter in the LEVELth level of the ARGS. */
#define TMPL_ARG(ARGS, LEVEL, IDX) \
(TREE_VEC_ELT (TMPL_ARGS_LEVEL (ARGS, LEVEL), IDX))
/* Given a single level of template arguments in NODE, return the
number of arguments. */
#define NUM_TMPL_ARGS(NODE) \
(TREE_VEC_LENGTH (NODE))
/* Returns the innermost level of template arguments in ARGS. */
#define INNERMOST_TEMPLATE_ARGS(NODE) \
(get_innermost_template_args ((NODE), 1))
/* The number of levels of template parameters given by NODE. */
#define TMPL_PARMS_DEPTH(NODE) \
((HOST_WIDE_INT) TREE_INT_CST_LOW (TREE_PURPOSE (NODE)))
/* The TEMPLATE_DECL instantiated or specialized by NODE. This
TEMPLATE_DECL will be the immediate parent, not the most general
template. For example, in:
template <class T> struct S { template <class U> void f(U); }
the FUNCTION_DECL for S<int>::f<double> will have, as its
DECL_TI_TEMPLATE, `template <class U> S<int>::f<U>'.
As a special case, for a member friend template of a template
class, this value will not be a TEMPLATE_DECL, but rather an
IDENTIFIER_NODE or OVERLOAD indicating the name of the template and
any explicit template arguments provided. For example, in:
template <class T> struct S { friend void f<int>(int, double); }
the DECL_TI_TEMPLATE will be an IDENTIFIER_NODE for `f' and the
DECL_TI_ARGS will be {int}.
For a FIELD_DECL with a non-static data member initializer, this value
is the FIELD_DECL it was instantiated from. */
#define DECL_TI_TEMPLATE(NODE) TI_TEMPLATE (DECL_TEMPLATE_INFO (NODE))
/* The template arguments used to obtain this decl from the most
general form of DECL_TI_TEMPLATE. For the example given for
DECL_TI_TEMPLATE, the DECL_TI_ARGS will be {int, double}. These
are always the full set of arguments required to instantiate this
declaration from the most general template specialized here. */
#define DECL_TI_ARGS(NODE) TI_ARGS (DECL_TEMPLATE_INFO (NODE))
/* The TEMPLATE_DECL associated with NODE, a class type. Even if NODE
will be generated from a partial specialization, the TEMPLATE_DECL
referred to here will be the original template. For example,
given:
template <typename T> struct S {};
template <typename T> struct S<T*> {};
the CLASSTPYE_TI_TEMPLATE for S<int*> will be S, not the S<T*>. */
#define CLASSTYPE_TI_TEMPLATE(NODE) TI_TEMPLATE (CLASSTYPE_TEMPLATE_INFO (NODE))
#define CLASSTYPE_TI_ARGS(NODE) TI_ARGS (CLASSTYPE_TEMPLATE_INFO (NODE))
/* For a template instantiation TYPE, returns the TYPE corresponding
to the primary template. Otherwise returns TYPE itself. */
#define CLASSTYPE_PRIMARY_TEMPLATE_TYPE(TYPE) \
((CLASSTYPE_USE_TEMPLATE ((TYPE)) \
&& !CLASSTYPE_TEMPLATE_SPECIALIZATION ((TYPE))) \
? TREE_TYPE (DECL_TEMPLATE_RESULT (DECL_PRIMARY_TEMPLATE \
(CLASSTYPE_TI_TEMPLATE ((TYPE))))) \
: (TYPE))
/* Like CLASS_TI_TEMPLATE, but also works for ENUMERAL_TYPEs. */
#define TYPE_TI_TEMPLATE(NODE) \
(TI_TEMPLATE (TYPE_TEMPLATE_INFO (NODE)))
/* Like DECL_TI_ARGS, but for an ENUMERAL_, RECORD_, or UNION_TYPE. */
#define TYPE_TI_ARGS(NODE) \
(TI_ARGS (TYPE_TEMPLATE_INFO (NODE)))
#define INNERMOST_TEMPLATE_PARMS(NODE) TREE_VALUE (NODE)
/* Nonzero if NODE (a TEMPLATE_DECL) is a member template, in the
sense of [temp.mem]. */
#define DECL_MEMBER_TEMPLATE_P(NODE) \
(DECL_LANG_FLAG_1 (TEMPLATE_DECL_CHECK (NODE)))
/* Nonzero if the NODE corresponds to the template parameters for a
member template, whose inline definition is being processed after
the class definition is complete. */
#define TEMPLATE_PARMS_FOR_INLINE(NODE) TREE_LANG_FLAG_1 (NODE)
/* Determine if a declaration (PARM_DECL or FIELD_DECL) is a pack. */
#define DECL_PACK_P(NODE) \
(DECL_P (NODE) && PACK_EXPANSION_P (TREE_TYPE (NODE)))
/* Determines if NODE is an expansion of one or more parameter packs,
e.g., a TYPE_PACK_EXPANSION or EXPR_PACK_EXPANSION. */
#define PACK_EXPANSION_P(NODE) \
(TREE_CODE (NODE) == TYPE_PACK_EXPANSION \
|| TREE_CODE (NODE) == EXPR_PACK_EXPANSION)
/* Extracts the type or expression pattern from a TYPE_PACK_EXPANSION or
EXPR_PACK_EXPANSION. */
#define PACK_EXPANSION_PATTERN(NODE) \
(TREE_CODE (NODE) == TYPE_PACK_EXPANSION? TREE_TYPE (NODE) \
: TREE_OPERAND (NODE, 0))
/* Sets the type or expression pattern for a TYPE_PACK_EXPANSION or
EXPR_PACK_EXPANSION. */
#define SET_PACK_EXPANSION_PATTERN(NODE,VALUE) \
if (TREE_CODE (NODE) == TYPE_PACK_EXPANSION) \
TREE_TYPE (NODE) = VALUE; \
else \
TREE_OPERAND (NODE, 0) = VALUE
/* The list of parameter packs used in the PACK_EXPANSION_* node. The
TREE_VALUE of each TREE_LIST contains the parameter packs. */
#define PACK_EXPANSION_PARAMETER_PACKS(NODE) \
*(TREE_CODE (NODE) == EXPR_PACK_EXPANSION \
? &TREE_OPERAND (NODE, 1) \
: &TYPE_MINVAL (TYPE_PACK_EXPANSION_CHECK (NODE)))
/* Any additional template args to be applied when substituting into
the pattern, set by tsubst_pack_expansion for partial instantiations. */
#define PACK_EXPANSION_EXTRA_ARGS(NODE) \
*(TREE_CODE (NODE) == TYPE_PACK_EXPANSION \
? &TYPE_MAXVAL (NODE) \
: &TREE_OPERAND ((NODE), 2))
/* True iff this pack expansion is within a function context. */
#define PACK_EXPANSION_LOCAL_P(NODE) TREE_LANG_FLAG_0 (NODE)
/* Determine if this is an argument pack. */
#define ARGUMENT_PACK_P(NODE) \
(TREE_CODE (NODE) == TYPE_ARGUMENT_PACK \
|| TREE_CODE (NODE) == NONTYPE_ARGUMENT_PACK)
/* The arguments stored in an argument pack. Arguments are stored in a
TREE_VEC, which may have length zero. */
#define ARGUMENT_PACK_ARGS(NODE) \
(TREE_CODE (NODE) == TYPE_ARGUMENT_PACK? TREE_TYPE (NODE) \
: TREE_OPERAND (NODE, 0))
/* Set the arguments stored in an argument pack. VALUE must be a
TREE_VEC. */
#define SET_ARGUMENT_PACK_ARGS(NODE,VALUE) \
if (TREE_CODE (NODE) == TYPE_ARGUMENT_PACK) \
TREE_TYPE (NODE) = VALUE; \
else \
TREE_OPERAND (NODE, 0) = VALUE
/* Whether the argument pack is "incomplete", meaning that more
arguments can still be deduced. Incomplete argument packs are only
used when the user has provided an explicit template argument list
for a variadic function template. Some of the explicit template
arguments will be placed into the beginning of the argument pack,
but additional arguments might still be deduced. */
#define ARGUMENT_PACK_INCOMPLETE_P(NODE) \
TREE_ADDRESSABLE (ARGUMENT_PACK_ARGS (NODE))
/* When ARGUMENT_PACK_INCOMPLETE_P, stores the explicit template
arguments used to fill this pack. */
#define ARGUMENT_PACK_EXPLICIT_ARGS(NODE) \
TREE_TYPE (ARGUMENT_PACK_ARGS (NODE))
/* In an ARGUMENT_PACK_SELECT, the argument pack from which an
argument will be selected. */
#define ARGUMENT_PACK_SELECT_FROM_PACK(NODE) \
(((struct tree_argument_pack_select *)ARGUMENT_PACK_SELECT_CHECK (NODE))->argument_pack)
/* In an ARGUMENT_PACK_SELECT, the index of the argument we want to
select. */
#define ARGUMENT_PACK_SELECT_INDEX(NODE) \
(((struct tree_argument_pack_select *)ARGUMENT_PACK_SELECT_CHECK (NODE))->index)
/* In an ARGUMENT_PACK_SELECT, the actual underlying argument that the
ARGUMENT_PACK_SELECT represents. */
#define ARGUMENT_PACK_SELECT_ARG(NODE) \
TREE_VEC_ELT (ARGUMENT_PACK_ARGS (ARGUMENT_PACK_SELECT_FROM_PACK (NODE)), \
ARGUMENT_PACK_SELECT_INDEX (NODE));
/* In a FUNCTION_DECL, the saved language-specific per-function data. */
#define DECL_SAVED_FUNCTION_DATA(NODE) \
(LANG_DECL_FN_CHECK (FUNCTION_DECL_CHECK (NODE)) \
->u.saved_language_function)
/* True if NODE is an implicit INDIRECT_EXPR from convert_from_reference. */
#define REFERENCE_REF_P(NODE) \
(INDIRECT_REF_P (NODE) \
&& TREE_TYPE (TREE_OPERAND (NODE, 0)) \
&& (TREE_CODE (TREE_TYPE (TREE_OPERAND ((NODE), 0))) \
== REFERENCE_TYPE))
/* True if NODE is a REFERENCE_TYPE which is OK to instantiate to be a
reference to VLA type, because it's used for VLA capture. */
#define REFERENCE_VLA_OK(NODE) \
(TYPE_LANG_FLAG_5 (REFERENCE_TYPE_CHECK (NODE)))
#define NEW_EXPR_USE_GLOBAL(NODE) \
TREE_LANG_FLAG_0 (NEW_EXPR_CHECK (NODE))
#define DELETE_EXPR_USE_GLOBAL(NODE) \
TREE_LANG_FLAG_0 (DELETE_EXPR_CHECK (NODE))
#define DELETE_EXPR_USE_VEC(NODE) \
TREE_LANG_FLAG_1 (DELETE_EXPR_CHECK (NODE))
/* Indicates that this is a non-dependent COMPOUND_EXPR which will
resolve to a function call. */
#define COMPOUND_EXPR_OVERLOADED(NODE) \
TREE_LANG_FLAG_0 (COMPOUND_EXPR_CHECK (NODE))
/* In a CALL_EXPR appearing in a template, true if Koenig lookup
should be performed at instantiation time. */
#define KOENIG_LOOKUP_P(NODE) TREE_LANG_FLAG_0 (CALL_EXPR_CHECK (NODE))
/* True if CALL_EXPR expresses list-initialization of an object. */
#define CALL_EXPR_LIST_INIT_P(NODE) \
TREE_LANG_FLAG_3 (TREE_CHECK2 ((NODE),CALL_EXPR,AGGR_INIT_EXPR))
/* Indicates whether a string literal has been parenthesized. Such
usages are disallowed in certain circumstances. */
#define PAREN_STRING_LITERAL_P(NODE) \
TREE_LANG_FLAG_0 (STRING_CST_CHECK (NODE))
/* Indicates whether a COMPONENT_REF has been parenthesized, or an
INDIRECT_REF comes from parenthesizing a VAR_DECL. Currently only set
some of the time in C++14 mode. */
#define REF_PARENTHESIZED_P(NODE) \
TREE_LANG_FLAG_2 (TREE_CHECK2 ((NODE), COMPONENT_REF, INDIRECT_REF))
/* Nonzero if this AGGR_INIT_EXPR provides for initialization via a
constructor call, rather than an ordinary function call. */
#define AGGR_INIT_VIA_CTOR_P(NODE) \
TREE_LANG_FLAG_0 (AGGR_INIT_EXPR_CHECK (NODE))
/* Nonzero if expanding this AGGR_INIT_EXPR should first zero-initialize
the object. */
#define AGGR_INIT_ZERO_FIRST(NODE) \
TREE_LANG_FLAG_2 (AGGR_INIT_EXPR_CHECK (NODE))
/* AGGR_INIT_EXPR accessors. These are equivalent to the CALL_EXPR
accessors, except for AGGR_INIT_EXPR_SLOT (which takes the place of
CALL_EXPR_STATIC_CHAIN). */
#define AGGR_INIT_EXPR_FN(NODE) TREE_OPERAND (AGGR_INIT_EXPR_CHECK (NODE), 1)
#define AGGR_INIT_EXPR_SLOT(NODE) \
TREE_OPERAND (AGGR_INIT_EXPR_CHECK (NODE), 2)
#define AGGR_INIT_EXPR_ARG(NODE, I) \
TREE_OPERAND (AGGR_INIT_EXPR_CHECK (NODE), (I) + 3)
#define aggr_init_expr_nargs(NODE) (VL_EXP_OPERAND_LENGTH(NODE) - 3)
/* AGGR_INIT_EXPR_ARGP returns a pointer to the argument vector for NODE.
We can't use &AGGR_INIT_EXPR_ARG (NODE, 0) because that will complain if
the argument count is zero when checking is enabled. Instead, do
the pointer arithmetic to advance past the 3 fixed operands in a
AGGR_INIT_EXPR. That produces a valid pointer to just past the end of
the operand array, even if it's not valid to dereference it. */
#define AGGR_INIT_EXPR_ARGP(NODE) \
(&(TREE_OPERAND (AGGR_INIT_EXPR_CHECK (NODE), 0)) + 3)
/* Abstract iterators for AGGR_INIT_EXPRs. */
/* Structure containing iterator state. */
typedef struct aggr_init_expr_arg_iterator_d {
tree t; /* the aggr_init_expr */
int n; /* argument count */
int i; /* next argument index */
} aggr_init_expr_arg_iterator;
/* Initialize the abstract argument list iterator object ITER with the
arguments from AGGR_INIT_EXPR node EXP. */
inline void
init_aggr_init_expr_arg_iterator (tree exp,
aggr_init_expr_arg_iterator *iter)
{
iter->t = exp;
iter->n = aggr_init_expr_nargs (exp);
iter->i = 0;
}
/* Return the next argument from abstract argument list iterator object ITER,
and advance its state. Return NULL_TREE if there are no more arguments. */
inline tree
next_aggr_init_expr_arg (aggr_init_expr_arg_iterator *iter)
{
tree result;
if (iter->i >= iter->n)
return NULL_TREE;
result = AGGR_INIT_EXPR_ARG (iter->t, iter->i);
iter->i++;
return result;
}
/* Initialize the abstract argument list iterator object ITER, then advance
past and return the first argument. Useful in for expressions, e.g.
for (arg = first_aggr_init_expr_arg (exp, &iter); arg;
arg = next_aggr_init_expr_arg (&iter)) */
inline tree
first_aggr_init_expr_arg (tree exp, aggr_init_expr_arg_iterator *iter)
{
init_aggr_init_expr_arg_iterator (exp, iter);
return next_aggr_init_expr_arg (iter);
}
/* Test whether there are more arguments in abstract argument list iterator
ITER, without changing its state. */
inline bool
more_aggr_init_expr_args_p (const aggr_init_expr_arg_iterator *iter)
{
return (iter->i < iter->n);
}
/* Iterate through each argument ARG of AGGR_INIT_EXPR CALL, using variable
ITER (of type aggr_init_expr_arg_iterator) to hold the iteration state. */
#define FOR_EACH_AGGR_INIT_EXPR_ARG(arg, iter, call) \
for ((arg) = first_aggr_init_expr_arg ((call), &(iter)); (arg); \
(arg) = next_aggr_init_expr_arg (&(iter)))
/* VEC_INIT_EXPR accessors. */
#define VEC_INIT_EXPR_SLOT(NODE) TREE_OPERAND (VEC_INIT_EXPR_CHECK (NODE), 0)
#define VEC_INIT_EXPR_INIT(NODE) TREE_OPERAND (VEC_INIT_EXPR_CHECK (NODE), 1)
/* Indicates that a VEC_INIT_EXPR is a potential constant expression.
Only set when the current function is constexpr. */
#define VEC_INIT_EXPR_IS_CONSTEXPR(NODE) \
TREE_LANG_FLAG_0 (VEC_INIT_EXPR_CHECK (NODE))
/* Indicates that a VEC_INIT_EXPR is expressing value-initialization. */
#define VEC_INIT_EXPR_VALUE_INIT(NODE) \
TREE_LANG_FLAG_1 (VEC_INIT_EXPR_CHECK (NODE))
/* The condition under which this MUST_NOT_THROW_EXPR actually blocks
exceptions. NULL_TREE means 'true'. */
#define MUST_NOT_THROW_COND(NODE) \
TREE_OPERAND (MUST_NOT_THROW_EXPR_CHECK (NODE), 1)
/* The TYPE_MAIN_DECL for a class template type is a TYPE_DECL, not a
TEMPLATE_DECL. This macro determines whether or not a given class
type is really a template type, as opposed to an instantiation or
specialization of one. */
#define CLASSTYPE_IS_TEMPLATE(NODE) \
(CLASSTYPE_TEMPLATE_INFO (NODE) \
&& !CLASSTYPE_USE_TEMPLATE (NODE) \
&& PRIMARY_TEMPLATE_P (CLASSTYPE_TI_TEMPLATE (NODE)))
/* The name used by the user to name the typename type. Typically,
this is an IDENTIFIER_NODE, and the same as the DECL_NAME on the
corresponding TYPE_DECL. However, this may also be a
TEMPLATE_ID_EXPR if we had something like `typename X::Y<T>'. */
#define TYPENAME_TYPE_FULLNAME(NODE) \
(TYPE_VALUES_RAW (TYPENAME_TYPE_CHECK (NODE)))
/* True if a TYPENAME_TYPE was declared as an "enum". */
#define TYPENAME_IS_ENUM_P(NODE) \
(TREE_LANG_FLAG_0 (TYPENAME_TYPE_CHECK (NODE)))
/* True if a TYPENAME_TYPE was declared as a "class", "struct", or
"union". */
#define TYPENAME_IS_CLASS_P(NODE) \
(TREE_LANG_FLAG_1 (TYPENAME_TYPE_CHECK (NODE)))
/* True if a TYPENAME_TYPE is in the process of being resolved. */
#define TYPENAME_IS_RESOLVING_P(NODE) \
(TREE_LANG_FLAG_2 (TYPENAME_TYPE_CHECK (NODE)))
/* [class.virtual]
A class that declares or inherits a virtual function is called a
polymorphic class. */
#define TYPE_POLYMORPHIC_P(NODE) (TREE_LANG_FLAG_2 (NODE))
/* Nonzero if this class has a virtual function table pointer. */
#define TYPE_CONTAINS_VPTR_P(NODE) \
(TYPE_POLYMORPHIC_P (NODE) || CLASSTYPE_VBASECLASSES (NODE))
/* This flag is true of a local VAR_DECL if it was declared in a for
statement, but we are no longer in the scope of the for. */
#define DECL_DEAD_FOR_LOCAL(NODE) DECL_LANG_FLAG_7 (VAR_DECL_CHECK (NODE))
/* This flag is set on a VAR_DECL that is a DECL_DEAD_FOR_LOCAL
if we already emitted a warning about using it. */
#define DECL_ERROR_REPORTED(NODE) DECL_LANG_FLAG_0 (VAR_DECL_CHECK (NODE))
/* Nonzero if NODE is a FUNCTION_DECL (for a function with global
scope) declared in a local scope. */
#define DECL_LOCAL_FUNCTION_P(NODE) \
DECL_LANG_FLAG_0 (FUNCTION_DECL_CHECK (NODE))
/* True if NODE was declared with auto in its return type, but it has
started compilation and so the return type might have been changed by
return type deduction; its declared return type should be found in
DECL_STRUCT_FUNCTION(NODE)->language->x_auto_return_pattern. */
#define FNDECL_USED_AUTO(NODE) \
TREE_LANG_FLAG_2 (FUNCTION_DECL_CHECK (NODE))
/* Nonzero if NODE is a DECL which we know about but which has not
been explicitly declared, such as a built-in function or a friend
declared inside a class. In the latter case DECL_HIDDEN_FRIEND_P
will be set. */
#define DECL_ANTICIPATED(NODE) \
(DECL_LANG_SPECIFIC (TYPE_FUNCTION_OR_TEMPLATE_DECL_CHECK (NODE)) \
->u.base.anticipated_p)
/* Nonzero if NODE is a FUNCTION_DECL which was declared as a friend
within a class but has not been declared in the surrounding scope.
The function is invisible except via argument dependent lookup. */
#define DECL_HIDDEN_FRIEND_P(NODE) \
(LANG_DECL_FN_CHECK (DECL_COMMON_CHECK (NODE))->hidden_friend_p)
/* Nonzero if NODE is an artificial FUNCTION_DECL for
#pragma omp declare reduction. */
#define DECL_OMP_DECLARE_REDUCTION_P(NODE) \
(LANG_DECL_FN_CHECK (DECL_COMMON_CHECK (NODE))->omp_declare_reduction_p)
/* Nonzero if DECL has been declared threadprivate by
#pragma omp threadprivate. */
#define CP_DECL_THREADPRIVATE_P(DECL) \
(DECL_LANG_SPECIFIC (VAR_DECL_CHECK (DECL))->u.base.threadprivate_or_deleted_p)
/* Nonzero if DECL was declared with '= delete'. */
#define DECL_DELETED_FN(DECL) \
(LANG_DECL_FN_CHECK (DECL)->min.base.threadprivate_or_deleted_p)
/* Nonzero if DECL was declared with '= default' (maybe implicitly). */
#define DECL_DEFAULTED_FN(DECL) \
(LANG_DECL_FN_CHECK (DECL)->defaulted_p)
/* Nonzero if DECL is explicitly defaulted in the class body. */
#define DECL_DEFAULTED_IN_CLASS_P(DECL) \
(DECL_DEFAULTED_FN (DECL) && DECL_INITIALIZED_IN_CLASS_P (DECL))
/* Nonzero if DECL was defaulted outside the class body. */
#define DECL_DEFAULTED_OUTSIDE_CLASS_P(DECL) \
(DECL_DEFAULTED_FN (DECL) \
&& !(DECL_ARTIFICIAL (DECL) || DECL_INITIALIZED_IN_CLASS_P (DECL)))
/* Record whether a typedef for type `int' was actually `signed int'. */
#define C_TYPEDEF_EXPLICITLY_SIGNED(EXP) DECL_LANG_FLAG_1 (EXP)
/* Returns nonzero if DECL has external linkage, as specified by the
language standard. (This predicate may hold even when the
corresponding entity is not actually given external linkage in the
object file; see decl_linkage for details.) */
#define DECL_EXTERNAL_LINKAGE_P(DECL) \
(decl_linkage (DECL) == lk_external)
/* Keep these codes in ascending code order. */
#define INTEGRAL_CODE_P(CODE) \
((CODE) == ENUMERAL_TYPE \
|| (CODE) == BOOLEAN_TYPE \
|| (CODE) == INTEGER_TYPE)
/* [basic.fundamental]
Types bool, char, wchar_t, and the signed and unsigned integer types
are collectively called integral types.
Note that INTEGRAL_TYPE_P, as defined in tree.h, allows enumeration
types as well, which is incorrect in C++. Keep these checks in
ascending code order. */
#define CP_INTEGRAL_TYPE_P(TYPE) \
(TREE_CODE (TYPE) == BOOLEAN_TYPE \
|| TREE_CODE (TYPE) == INTEGER_TYPE)
/* Returns true if TYPE is an integral or enumeration name. Keep
these checks in ascending code order. */
#define INTEGRAL_OR_ENUMERATION_TYPE_P(TYPE) \
(TREE_CODE (TYPE) == ENUMERAL_TYPE || CP_INTEGRAL_TYPE_P (TYPE))
/* Returns true if TYPE is an integral or unscoped enumeration type. */
#define INTEGRAL_OR_UNSCOPED_ENUMERATION_TYPE_P(TYPE) \
(UNSCOPED_ENUM_P (TYPE) || CP_INTEGRAL_TYPE_P (TYPE))
/* True if the class type TYPE is a literal type. */
#define CLASSTYPE_LITERAL_P(TYPE) \
(LANG_TYPE_CLASS_CHECK (TYPE)->is_literal)
/* [basic.fundamental]
Integral and floating types are collectively called arithmetic
types.
As a GNU extension, we also accept complex types.
Keep these checks in ascending code order. */
#define ARITHMETIC_TYPE_P(TYPE) \
(CP_INTEGRAL_TYPE_P (TYPE) \
|| TREE_CODE (TYPE) == REAL_TYPE \
|| TREE_CODE (TYPE) == COMPLEX_TYPE)
/* True iff TYPE is cv decltype(nullptr). */
#define NULLPTR_TYPE_P(TYPE) (TREE_CODE (TYPE) == NULLPTR_TYPE)
/* [basic.types]
Arithmetic types, enumeration types, pointer types,
pointer-to-member types, and std::nullptr_t are collectively called
scalar types.
Keep these checks in ascending code order. */
#define SCALAR_TYPE_P(TYPE) \
(TYPE_PTRDATAMEM_P (TYPE) \
|| TREE_CODE (TYPE) == ENUMERAL_TYPE \
|| ARITHMETIC_TYPE_P (TYPE) \
|| TYPE_PTR_P (TYPE) \
|| TYPE_PTRMEMFUNC_P (TYPE) \
|| NULLPTR_TYPE_P (TYPE))
/* Determines whether this type is a C++0x scoped enumeration
type. Scoped enumerations types are introduced via "enum class" or
"enum struct", e.g.,
enum class Color {
Red, Green, Blue
};
Scoped enumeration types are different from normal (unscoped)
enumeration types in several ways:
- The enumerators of a scoped enumeration type are only available
within the scope of the enumeration type and not in the
enclosing scope. For example, the Red color can be referred to
with "Color::Red" but not "Red".
- Scoped enumerators and enumerations do not implicitly convert
to integers or 'bool'.
- The underlying type of the enum is well-defined. */
#define SCOPED_ENUM_P(TYPE) \
(TREE_CODE (TYPE) == ENUMERAL_TYPE && ENUM_IS_SCOPED (TYPE))
/* Determine whether this is an unscoped enumeration type. */
#define UNSCOPED_ENUM_P(TYPE) \
(TREE_CODE (TYPE) == ENUMERAL_TYPE && !ENUM_IS_SCOPED (TYPE))
/* Set the flag indicating whether an ENUMERAL_TYPE is a C++0x scoped
enumeration type (1) or a normal (unscoped) enumeration type
(0). */
#define SET_SCOPED_ENUM_P(TYPE, VAL) \
(ENUM_IS_SCOPED (TYPE) = (VAL))
#define SET_OPAQUE_ENUM_P(TYPE, VAL) \
(ENUM_IS_OPAQUE (TYPE) = (VAL))
#define OPAQUE_ENUM_P(TYPE) \
(TREE_CODE (TYPE) == ENUMERAL_TYPE && ENUM_IS_OPAQUE (TYPE))
/* Determines whether an ENUMERAL_TYPE has an explicit
underlying type. */
#define ENUM_FIXED_UNDERLYING_TYPE_P(NODE) (TYPE_LANG_FLAG_5 (NODE))
/* Returns the underlying type of the given enumeration type. The
underlying type is determined in different ways, depending on the
properties of the enum:
- In C++0x, the underlying type can be explicitly specified, e.g.,
enum E1 : char { ... } // underlying type is char
- In a C++0x scoped enumeration, the underlying type is int
unless otherwises specified:
enum class E2 { ... } // underlying type is int
- Otherwise, the underlying type is determined based on the
values of the enumerators. In this case, the
ENUM_UNDERLYING_TYPE will not be set until after the definition
of the enumeration is completed by finish_enum. */
#define ENUM_UNDERLYING_TYPE(TYPE) \
TREE_TYPE (ENUMERAL_TYPE_CHECK (TYPE))
/* [dcl.init.aggr]
An aggregate is an array or a class with no user-provided
constructors, no brace-or-equal-initializers for non-static data
members, no private or protected non-static data members, no
base classes, and no virtual functions.
As an extension, we also treat vectors as aggregates. Keep these
checks in ascending code order. */
#define CP_AGGREGATE_TYPE_P(TYPE) \
(TREE_CODE (TYPE) == VECTOR_TYPE \
||TREE_CODE (TYPE) == ARRAY_TYPE \
|| (CLASS_TYPE_P (TYPE) && !CLASSTYPE_NON_AGGREGATE (TYPE)))
/* Nonzero for a class type means that the class type has a
user-declared constructor. */
#define TYPE_HAS_USER_CONSTRUCTOR(NODE) (TYPE_LANG_FLAG_1 (NODE))
/* When appearing in an INDIRECT_REF, it means that the tree structure
underneath is actually a call to a constructor. This is needed
when the constructor must initialize local storage (which can
be automatically destroyed), rather than allowing it to allocate
space from the heap.
When appearing in a SAVE_EXPR, it means that underneath
is a call to a constructor.
When appearing in a CONSTRUCTOR, the expression is a
compound literal.
When appearing in a FIELD_DECL, it means that this field
has been duly initialized in its constructor. */
#define TREE_HAS_CONSTRUCTOR(NODE) (TREE_LANG_FLAG_4 (NODE))
/* True if NODE is a brace-enclosed initializer. */
#define BRACE_ENCLOSED_INITIALIZER_P(NODE) \
(TREE_CODE (NODE) == CONSTRUCTOR && TREE_TYPE (NODE) == init_list_type_node)
/* True if NODE is a compound-literal, i.e., a brace-enclosed
initializer cast to a particular type. */
#define COMPOUND_LITERAL_P(NODE) \
(TREE_CODE (NODE) == CONSTRUCTOR && TREE_HAS_CONSTRUCTOR (NODE))
#define EMPTY_CONSTRUCTOR_P(NODE) (TREE_CODE (NODE) == CONSTRUCTOR \
&& vec_safe_is_empty(CONSTRUCTOR_ELTS(NODE))\
&& !TREE_HAS_CONSTRUCTOR (NODE))
/* True if NODE is a init-list used as a direct-initializer, i.e.
B b{1,2}, not B b({1,2}) or B b = {1,2}. */
#define CONSTRUCTOR_IS_DIRECT_INIT(NODE) (TREE_LANG_FLAG_0 (CONSTRUCTOR_CHECK (NODE)))
#define DIRECT_LIST_INIT_P(NODE) \
(BRACE_ENCLOSED_INITIALIZER_P (NODE) && CONSTRUCTOR_IS_DIRECT_INIT (NODE))
/* True if NODE represents a conversion for direct-initialization in a
template. Set by perform_implicit_conversion_flags. */
#define IMPLICIT_CONV_EXPR_DIRECT_INIT(NODE) \
(TREE_LANG_FLAG_0 (IMPLICIT_CONV_EXPR_CHECK (NODE)))
/* Nonzero means that an object of this type can not be initialized using
an initializer list. */
#define CLASSTYPE_NON_AGGREGATE(NODE) \
(LANG_TYPE_CLASS_CHECK (NODE)->non_aggregate)
#define TYPE_NON_AGGREGATE_CLASS(NODE) \
(CLASS_TYPE_P (NODE) && CLASSTYPE_NON_AGGREGATE (NODE))
/* Nonzero if there is a non-trivial X::op=(cv X&) for this class. */
#define TYPE_HAS_COMPLEX_COPY_ASSIGN(NODE) (LANG_TYPE_CLASS_CHECK (NODE)->has_complex_copy_assign)
/* Nonzero if there is a non-trivial X::X(cv X&) for this class. */
#define TYPE_HAS_COMPLEX_COPY_CTOR(NODE) (LANG_TYPE_CLASS_CHECK (NODE)->has_complex_copy_ctor)
/* Nonzero if there is a non-trivial X::op=(X&&) for this class. */
#define TYPE_HAS_COMPLEX_MOVE_ASSIGN(NODE) (LANG_TYPE_CLASS_CHECK (NODE)->has_complex_move_assign)
/* Nonzero if there is a non-trivial X::X(X&&) for this class. */
#define TYPE_HAS_COMPLEX_MOVE_CTOR(NODE) (LANG_TYPE_CLASS_CHECK (NODE)->has_complex_move_ctor)
/* Nonzero if there is a non-trivial default constructor for this class. */
#define TYPE_HAS_COMPLEX_DFLT(NODE) (LANG_TYPE_CLASS_CHECK (NODE)->has_complex_dflt)
/* Nonzero if TYPE has a trivial destructor. From [class.dtor]:
A destructor is trivial if it is an implicitly declared
destructor and if:
- all of the direct base classes of its class have trivial
destructors,
- for all of the non-static data members of its class that are
of class type (or array thereof), each such class has a
trivial destructor. */
#define TYPE_HAS_TRIVIAL_DESTRUCTOR(NODE) \
(!TYPE_HAS_NONTRIVIAL_DESTRUCTOR (NODE))
/* Nonzero for _TYPE node means that this type does not have a trivial
destructor. Therefore, destroying an object of this type will
involve a call to a destructor. This can apply to objects of
ARRAY_TYPE is the type of the elements needs a destructor. */
#define TYPE_HAS_NONTRIVIAL_DESTRUCTOR(NODE) \
(TYPE_LANG_FLAG_4 (NODE))
/* Nonzero for class type means that the default constructor is trivial. */
#define TYPE_HAS_TRIVIAL_DFLT(NODE) \
(TYPE_HAS_DEFAULT_CONSTRUCTOR (NODE) && ! TYPE_HAS_COMPLEX_DFLT (NODE))
/* Nonzero for class type means that copy initialization of this type can use
a bitwise copy. */
#define TYPE_HAS_TRIVIAL_COPY_CTOR(NODE) \
(TYPE_HAS_COPY_CTOR (NODE) && ! TYPE_HAS_COMPLEX_COPY_CTOR (NODE))
/* Nonzero for class type means that assignment of this type can use
a bitwise copy. */
#define TYPE_HAS_TRIVIAL_COPY_ASSIGN(NODE) \
(TYPE_HAS_COPY_ASSIGN (NODE) && ! TYPE_HAS_COMPLEX_COPY_ASSIGN (NODE))
/* Returns true if NODE is a pointer-to-data-member. */
#define TYPE_PTRDATAMEM_P(NODE) \
(TREE_CODE (NODE) == OFFSET_TYPE)
/* Returns true if NODE is a pointer. */
#define TYPE_PTR_P(NODE) \
(TREE_CODE (NODE) == POINTER_TYPE)
/* Returns true if NODE is an object type:
[basic.types]
An object type is a (possibly cv-qualified) type that is not a
function type, not a reference type, and not a void type.
Keep these checks in ascending order, for speed. */
#define TYPE_OBJ_P(NODE) \
(TREE_CODE (NODE) != REFERENCE_TYPE \
&& !VOID_TYPE_P (NODE) \
&& TREE_CODE (NODE) != FUNCTION_TYPE \
&& TREE_CODE (NODE) != METHOD_TYPE)
/* Returns true if NODE is a pointer to an object. Keep these checks
in ascending tree code order. */
#define TYPE_PTROB_P(NODE) \
(TYPE_PTR_P (NODE) && TYPE_OBJ_P (TREE_TYPE (NODE)))
/* Returns true if NODE is a reference to an object. Keep these checks
in ascending tree code order. */
#define TYPE_REF_OBJ_P(NODE) \
(TREE_CODE (NODE) == REFERENCE_TYPE && TYPE_OBJ_P (TREE_TYPE (NODE)))
/* Returns true if NODE is a pointer to an object, or a pointer to
void. Keep these checks in ascending tree code order. */
#define TYPE_PTROBV_P(NODE) \
(TYPE_PTR_P (NODE) \
&& !(TREE_CODE (TREE_TYPE (NODE)) == FUNCTION_TYPE \
|| TREE_CODE (TREE_TYPE (NODE)) == METHOD_TYPE))
/* Returns true if NODE is a pointer to function. */
#define TYPE_PTRFN_P(NODE) \
(TYPE_PTR_P (NODE) \
&& TREE_CODE (TREE_TYPE (NODE)) == FUNCTION_TYPE)
/* Returns true if NODE is a reference to function. */
#define TYPE_REFFN_P(NODE) \
(TREE_CODE (NODE) == REFERENCE_TYPE \
&& TREE_CODE (TREE_TYPE (NODE)) == FUNCTION_TYPE)
/* Nonzero for _TYPE node means that this type is a pointer to member
function type. */
#define TYPE_PTRMEMFUNC_P(NODE) \
(TREE_CODE (NODE) == RECORD_TYPE \
&& TYPE_LANG_SPECIFIC (NODE) \
&& TYPE_PTRMEMFUNC_FLAG (NODE))
#define TYPE_PTRMEMFUNC_FLAG(NODE) \
(LANG_TYPE_CLASS_CHECK (NODE)->ptrmemfunc_flag)
/* Returns true if NODE is a pointer-to-member. */
#define TYPE_PTRMEM_P(NODE) \
(TYPE_PTRDATAMEM_P (NODE) || TYPE_PTRMEMFUNC_P (NODE))
/* Returns true if NODE is a pointer or a pointer-to-member. */
#define TYPE_PTR_OR_PTRMEM_P(NODE) \
(TYPE_PTR_P (NODE) || TYPE_PTRMEM_P (NODE))
/* Indicates when overload resolution may resolve to a pointer to
member function. [expr.unary.op]/3 */
#define PTRMEM_OK_P(NODE) \
TREE_LANG_FLAG_0 (TREE_CHECK3 ((NODE), ADDR_EXPR, OFFSET_REF, SCOPE_REF))
/* Get the POINTER_TYPE to the METHOD_TYPE associated with this
pointer to member function. TYPE_PTRMEMFUNC_P _must_ be true,
before using this macro. */
#define TYPE_PTRMEMFUNC_FN_TYPE(NODE) \
(TREE_TYPE (TYPE_FIELDS (NODE)))
/* Returns `A' for a type like `int (A::*)(double)' */
#define TYPE_PTRMEMFUNC_OBJECT_TYPE(NODE) \
TYPE_METHOD_BASETYPE (TREE_TYPE (TYPE_PTRMEMFUNC_FN_TYPE (NODE)))
/* These are use to manipulate the canonical RECORD_TYPE from the
hashed POINTER_TYPE, and can only be used on the POINTER_TYPE. */
#define TYPE_GET_PTRMEMFUNC_TYPE(NODE) \
(TYPE_LANG_SPECIFIC (NODE) ? LANG_TYPE_PTRMEM_CHECK (NODE)->record : NULL)
#define TYPE_SET_PTRMEMFUNC_TYPE(NODE, VALUE) \
do { \
if (TYPE_LANG_SPECIFIC (NODE) == NULL) \
{ \
TYPE_LANG_SPECIFIC (NODE) = ggc_alloc_cleared_lang_type \
(sizeof (struct lang_type_ptrmem)); \
TYPE_LANG_SPECIFIC (NODE)->u.ptrmem.h.is_lang_type_class = 0; \
} \
TYPE_LANG_SPECIFIC (NODE)->u.ptrmem.record = (VALUE); \
} while (0)
/* For a pointer-to-member type of the form `T X::*', this is `X'.
For a type like `void (X::*)() const', this type is `X', not `const
X'. To get at the `const X' you have to look at the
TYPE_PTRMEM_POINTED_TO_TYPE; there, the first parameter will have
type `const X*'. */
#define TYPE_PTRMEM_CLASS_TYPE(NODE) \
(TYPE_PTRDATAMEM_P (NODE) \
? TYPE_OFFSET_BASETYPE (NODE) \
: TYPE_PTRMEMFUNC_OBJECT_TYPE (NODE))
/* For a pointer-to-member type of the form `T X::*', this is `T'. */
#define TYPE_PTRMEM_POINTED_TO_TYPE(NODE) \
(TYPE_PTRDATAMEM_P (NODE) \
? TREE_TYPE (NODE) \
: TREE_TYPE (TYPE_PTRMEMFUNC_FN_TYPE (NODE)))
/* For a pointer-to-member constant `X::Y' this is the RECORD_TYPE for
`X'. */
#define PTRMEM_CST_CLASS(NODE) \
TYPE_PTRMEM_CLASS_TYPE (TREE_TYPE (PTRMEM_CST_CHECK (NODE)))
/* For a pointer-to-member constant `X::Y' this is the _DECL for
`Y'. */
#define PTRMEM_CST_MEMBER(NODE) (((ptrmem_cst_t)PTRMEM_CST_CHECK (NODE))->member)
/* The expression in question for a TYPEOF_TYPE. */
#define TYPEOF_TYPE_EXPR(NODE) (TYPE_VALUES_RAW (TYPEOF_TYPE_CHECK (NODE)))
/* The type in question for an UNDERLYING_TYPE. */
#define UNDERLYING_TYPE_TYPE(NODE) \
(TYPE_VALUES_RAW (UNDERLYING_TYPE_CHECK (NODE)))
/* The type in question for BASES. */
#define BASES_TYPE(NODE) \
(TYPE_VALUES_RAW (BASES_CHECK (NODE)))
#define BASES_DIRECT(NODE) \
TREE_LANG_FLAG_0 (BASES_CHECK (NODE))
/* The expression in question for a DECLTYPE_TYPE. */
#define DECLTYPE_TYPE_EXPR(NODE) (TYPE_VALUES_RAW (DECLTYPE_TYPE_CHECK (NODE)))
/* Whether the DECLTYPE_TYPE_EXPR of NODE was originally parsed as an
id-expression or a member-access expression. When false, it was
parsed as a full expression. */
#define DECLTYPE_TYPE_ID_EXPR_OR_MEMBER_ACCESS_P(NODE) \
(DECLTYPE_TYPE_CHECK (NODE))->type_common.string_flag
/* These flags indicate that we want different semantics from normal
decltype: lambda capture just drops references, init capture
uses auto semantics, lambda proxies look through implicit dereference. */
#define DECLTYPE_FOR_LAMBDA_CAPTURE(NODE) \
TREE_LANG_FLAG_0 (DECLTYPE_TYPE_CHECK (NODE))
#define DECLTYPE_FOR_INIT_CAPTURE(NODE) \
TREE_LANG_FLAG_1 (DECLTYPE_TYPE_CHECK (NODE))
#define DECLTYPE_FOR_LAMBDA_PROXY(NODE) \
TREE_LANG_FLAG_2 (DECLTYPE_TYPE_CHECK (NODE))
/* Nonzero for VAR_DECL and FUNCTION_DECL node means that `extern' was
specified in its declaration. This can also be set for an
erroneously declared PARM_DECL. */
#define DECL_THIS_EXTERN(NODE) \
DECL_LANG_FLAG_2 (VAR_FUNCTION_OR_PARM_DECL_CHECK (NODE))
/* Nonzero for VAR_DECL and FUNCTION_DECL node means that `static' was
specified in its declaration. This can also be set for an
erroneously declared PARM_DECL. */
#define DECL_THIS_STATIC(NODE) \
DECL_LANG_FLAG_6 (VAR_FUNCTION_OR_PARM_DECL_CHECK (NODE))
/* Nonzero for FIELD_DECL node means that this field is a lambda capture
field for an array of runtime bound. */
#define DECL_VLA_CAPTURE_P(NODE) \
DECL_LANG_FLAG_1 (FIELD_DECL_CHECK (NODE))
/* Nonzero for FIELD_DECL node means that this field is a base class
of the parent object, as opposed to a member field. */
#define DECL_FIELD_IS_BASE(NODE) \
DECL_LANG_FLAG_6 (FIELD_DECL_CHECK (NODE))
/* Nonzero for FIELD_DECL node means that this field is a simple (no
explicit initializer) lambda capture field, making it invisible to
name lookup in unevaluated contexts. */
#define DECL_NORMAL_CAPTURE_P(NODE) \
DECL_LANG_FLAG_7 (FIELD_DECL_CHECK (NODE))
/* Nonzero if TYPE is an anonymous union or struct type. We have to use a
flag for this because "A union for which objects or pointers are
declared is not an anonymous union" [class.union]. */
#define ANON_AGGR_TYPE_P(NODE) \
(CLASS_TYPE_P (NODE) && LANG_TYPE_CLASS_CHECK (NODE)->anon_aggr)
#define SET_ANON_AGGR_TYPE_P(NODE) \
(LANG_TYPE_CLASS_CHECK (NODE)->anon_aggr = 1)
/* Nonzero if TYPE is an anonymous union type. */
#define ANON_UNION_TYPE_P(NODE) \
(TREE_CODE (NODE) == UNION_TYPE && ANON_AGGR_TYPE_P (NODE))
/* Define fields and accessors for nodes representing declared names. */
#define TYPE_WAS_ANONYMOUS(NODE) (LANG_TYPE_CLASS_CHECK (NODE)->was_anonymous)
/* C++: all of these are overloaded! These apply only to TYPE_DECLs. */
/* The format of each node in the DECL_FRIENDLIST is as follows:
The TREE_PURPOSE will be the name of a function, i.e., an
IDENTIFIER_NODE. The TREE_VALUE will be itself a TREE_LIST, whose
TREE_VALUEs are friends with the given name. */
#define DECL_FRIENDLIST(NODE) (DECL_INITIAL (NODE))
#define FRIEND_NAME(LIST) (TREE_PURPOSE (LIST))
#define FRIEND_DECLS(LIST) (TREE_VALUE (LIST))
/* The DECL_ACCESS, if non-NULL, is a TREE_LIST. The TREE_PURPOSE of
each node is a type; the TREE_VALUE is the access granted for this
DECL in that type. The DECL_ACCESS is set by access declarations.
For example, if a member that would normally be public in a
derived class is made protected, then the derived class and the
protected_access_node will appear in the DECL_ACCESS for the node. */
#define DECL_ACCESS(NODE) (LANG_DECL_U2_CHECK (NODE, 0)->access)
/* Nonzero if the FUNCTION_DECL is a global constructor. */
#define DECL_GLOBAL_CTOR_P(NODE) \
(LANG_DECL_FN_CHECK (NODE)->global_ctor_p)
/* Nonzero if the FUNCTION_DECL is a global destructor. */
#define DECL_GLOBAL_DTOR_P(NODE) \
(LANG_DECL_FN_CHECK (NODE)->global_dtor_p)
/* Accessor macros for C++ template decl nodes. */
/* The DECL_TEMPLATE_PARMS are a list. The TREE_PURPOSE of each node
is a INT_CST whose TREE_INT_CST_LOW indicates the level of the
template parameters, with 1 being the outermost set of template
parameters. The TREE_VALUE is a vector, whose elements are the
template parameters at each level. Each element in the vector is a
TREE_LIST, whose TREE_VALUE is a PARM_DECL (if the parameter is a
non-type parameter), or a TYPE_DECL (if the parameter is a type
parameter). The TREE_PURPOSE is the default value, if any. The
TEMPLATE_PARM_INDEX for the parameter is available as the
DECL_INITIAL (for a PARM_DECL) or as the TREE_TYPE (for a
TYPE_DECL). */
#define DECL_TEMPLATE_PARMS(NODE) \
TEMPLATE_DECL_CHECK (NODE)->decl_non_common.arguments
#define DECL_INNERMOST_TEMPLATE_PARMS(NODE) \
INNERMOST_TEMPLATE_PARMS (DECL_TEMPLATE_PARMS (NODE))
#define DECL_NTPARMS(NODE) \
TREE_VEC_LENGTH (DECL_INNERMOST_TEMPLATE_PARMS (NODE))
/* For function, method, class-data templates. */
#define DECL_TEMPLATE_RESULT(NODE) \
DECL_RESULT_FLD (TEMPLATE_DECL_CHECK (NODE))
/* For a function template at namespace scope, DECL_TEMPLATE_INSTANTIATIONS
lists all instantiations and specializations of the function so that
tsubst_friend_function can reassign them to another template if we find
that the namespace-scope template is really a partial instantiation of a
friend template.
For a class template the DECL_TEMPLATE_INSTANTIATIONS lists holds
all instantiations and specializations of the class type, including
partial instantiations and partial specializations, so that if we
explicitly specialize a partial instantiation we can walk the list
in maybe_process_partial_specialization and reassign them or complain
as appropriate.
In both cases, the TREE_PURPOSE of each node contains the arguments
used; the TREE_VALUE contains the generated variable. The template
arguments are always complete. For example, given:
template <class T> struct S1 {
template <class U> struct S2 {};
template <class U> struct S2<U*> {};
};
the record for the partial specialization will contain, as its
argument list, { {T}, {U*} }, and will be on the
DECL_TEMPLATE_INSTANTIATIONS list for `template <class T> template
<class U> struct S1<T>::S2'.
This list is not used for other templates. */
#define DECL_TEMPLATE_INSTANTIATIONS(NODE) \
DECL_VINDEX (TEMPLATE_DECL_CHECK (NODE))
/* For a class template, this list contains the partial
specializations of this template. (Full specializations are not
recorded on this list.) The TREE_PURPOSE holds the arguments used
in the partial specialization (e.g., for `template <class T> struct
S<T*, int>' this will be `T*, int'.) The arguments will also include
any outer template arguments. The TREE_VALUE holds the TEMPLATE_DECL
for the partial specialization. The TREE_TYPE is the _TYPE node for
the partial specialization.
This list is not used for other templates. */
#define DECL_TEMPLATE_SPECIALIZATIONS(NODE) \
DECL_SIZE (TEMPLATE_DECL_CHECK (NODE))
/* Nonzero for a DECL which is actually a template parameter. Keep
these checks in ascending tree code order. */
#define DECL_TEMPLATE_PARM_P(NODE) \
(DECL_LANG_FLAG_0 (NODE) \
&& (TREE_CODE (NODE) == CONST_DECL \
|| TREE_CODE (NODE) == PARM_DECL \
|| TREE_CODE (NODE) == TYPE_DECL \
|| TREE_CODE (NODE) == TEMPLATE_DECL))
/* Mark NODE as a template parameter. */
#define SET_DECL_TEMPLATE_PARM_P(NODE) \
(DECL_LANG_FLAG_0 (NODE) = 1)
/* Nonzero if NODE is a template template parameter. */
#define DECL_TEMPLATE_TEMPLATE_PARM_P(NODE) \
(TREE_CODE (NODE) == TEMPLATE_DECL && DECL_TEMPLATE_PARM_P (NODE))
/* Nonzero for a DECL that represents a function template. */
#define DECL_FUNCTION_TEMPLATE_P(NODE) \
(TREE_CODE (NODE) == TEMPLATE_DECL \
&& DECL_TEMPLATE_RESULT (NODE) != NULL_TREE \
&& TREE_CODE (DECL_TEMPLATE_RESULT (NODE)) == FUNCTION_DECL)
/* Nonzero for a DECL that represents a class template or alias
template. */
#define DECL_TYPE_TEMPLATE_P(NODE) \
(TREE_CODE (NODE) == TEMPLATE_DECL \
&& DECL_TEMPLATE_RESULT (NODE) != NULL_TREE \
&& TREE_CODE (DECL_TEMPLATE_RESULT (NODE)) == TYPE_DECL)
/* Nonzero for a DECL that represents a class template. */
#define DECL_CLASS_TEMPLATE_P(NODE) \
(DECL_TYPE_TEMPLATE_P (NODE) \
&& DECL_IMPLICIT_TYPEDEF_P (DECL_TEMPLATE_RESULT (NODE)))
/* Nonzero for a TEMPLATE_DECL that represents an alias template. */
#define DECL_ALIAS_TEMPLATE_P(NODE) \
(DECL_TYPE_TEMPLATE_P (NODE) \
&& !DECL_ARTIFICIAL (DECL_TEMPLATE_RESULT (NODE)))
/* Nonzero for a NODE which declares a type. */
#define DECL_DECLARES_TYPE_P(NODE) \
(TREE_CODE (NODE) == TYPE_DECL || DECL_TYPE_TEMPLATE_P (NODE))
/* Nonzero if NODE declares a function. */
#define DECL_DECLARES_FUNCTION_P(NODE) \
(TREE_CODE (NODE) == FUNCTION_DECL || DECL_FUNCTION_TEMPLATE_P (NODE))
/* Nonzero if NODE is the typedef implicitly generated for a type when
the type is declared. In C++, `struct S {};' is roughly
equivalent to `struct S {}; typedef struct S S;' in C.
DECL_IMPLICIT_TYPEDEF_P will hold for the typedef indicated in this
example. In C++, there is a second implicit typedef for each
class, in the scope of `S' itself, so that you can say `S::S'.
DECL_SELF_REFERENCE_P will hold for that second typedef. */
#define DECL_IMPLICIT_TYPEDEF_P(NODE) \
(TREE_CODE (NODE) == TYPE_DECL && DECL_LANG_FLAG_2 (NODE))
#define SET_DECL_IMPLICIT_TYPEDEF_P(NODE) \
(DECL_LANG_FLAG_2 (NODE) = 1)
#define DECL_SELF_REFERENCE_P(NODE) \
(TREE_CODE (NODE) == TYPE_DECL && DECL_LANG_FLAG_4 (NODE))
#define SET_DECL_SELF_REFERENCE_P(NODE) \
(DECL_LANG_FLAG_4 (NODE) = 1)
/* A `primary' template is one that has its own template header and is not
a partial specialization. A member function of a class template is a
template, but not primary. A member template is primary. Friend
templates are primary, too. */
/* Returns the primary template corresponding to these parameters. */
#define DECL_PRIMARY_TEMPLATE(NODE) \
(TREE_TYPE (DECL_INNERMOST_TEMPLATE_PARMS (NODE)))
/* Returns nonzero if NODE is a primary template. */
#define PRIMARY_TEMPLATE_P(NODE) (DECL_PRIMARY_TEMPLATE (NODE) == (NODE))
/* Nonzero iff NODE is a specialization of a template. The value
indicates the type of specializations:
1=implicit instantiation
2=partial or explicit specialization, e.g.:
template <> int min<int> (int, int),
3=explicit instantiation, e.g.:
template int min<int> (int, int);
Note that NODE will be marked as a specialization even if the
template it is instantiating is not a primary template. For
example, given:
template <typename T> struct O {
void f();
struct I {};
};
both O<int>::f and O<int>::I will be marked as instantiations.
If DECL_USE_TEMPLATE is nonzero, then DECL_TEMPLATE_INFO will also
be non-NULL. */
#define DECL_USE_TEMPLATE(NODE) (DECL_LANG_SPECIFIC (NODE)->u.base.use_template)
/* Like DECL_USE_TEMPLATE, but for class types. */
#define CLASSTYPE_USE_TEMPLATE(NODE) \
(LANG_TYPE_CLASS_CHECK (NODE)->use_template)
/* True if NODE is a specialization of a primary template. */
#define CLASSTYPE_SPECIALIZATION_OF_PRIMARY_TEMPLATE_P(NODE) \
(CLASS_TYPE_P (NODE) \
&& CLASSTYPE_USE_TEMPLATE (NODE) \
&& PRIMARY_TEMPLATE_P (CLASSTYPE_TI_TEMPLATE (NODE)))
#define DECL_TEMPLATE_INSTANTIATION(NODE) (DECL_USE_TEMPLATE (NODE) & 1)
#define CLASSTYPE_TEMPLATE_INSTANTIATION(NODE) \
(CLASSTYPE_USE_TEMPLATE (NODE) & 1)
#define DECL_TEMPLATE_SPECIALIZATION(NODE) (DECL_USE_TEMPLATE (NODE) == 2)
#define SET_DECL_TEMPLATE_SPECIALIZATION(NODE) (DECL_USE_TEMPLATE (NODE) = 2)
/* Returns true for an explicit or partial specialization of a class
template. */
#define CLASSTYPE_TEMPLATE_SPECIALIZATION(NODE) \
(CLASSTYPE_USE_TEMPLATE (NODE) == 2)
#define SET_CLASSTYPE_TEMPLATE_SPECIALIZATION(NODE) \
(CLASSTYPE_USE_TEMPLATE (NODE) = 2)
#define DECL_IMPLICIT_INSTANTIATION(NODE) (DECL_USE_TEMPLATE (NODE) == 1)
#define SET_DECL_IMPLICIT_INSTANTIATION(NODE) (DECL_USE_TEMPLATE (NODE) = 1)
#define CLASSTYPE_IMPLICIT_INSTANTIATION(NODE) \
(CLASSTYPE_USE_TEMPLATE (NODE) == 1)
#define SET_CLASSTYPE_IMPLICIT_INSTANTIATION(NODE) \
(CLASSTYPE_USE_TEMPLATE (NODE) = 1)
#define DECL_EXPLICIT_INSTANTIATION(NODE) (DECL_USE_TEMPLATE (NODE) == 3)
#define SET_DECL_EXPLICIT_INSTANTIATION(NODE) (DECL_USE_TEMPLATE (NODE) = 3)
#define CLASSTYPE_EXPLICIT_INSTANTIATION(NODE) \
(CLASSTYPE_USE_TEMPLATE (NODE) == 3)
#define SET_CLASSTYPE_EXPLICIT_INSTANTIATION(NODE) \
(CLASSTYPE_USE_TEMPLATE (NODE) = 3)
/* Nonzero if DECL is a friend function which is an instantiation
from the point of view of the compiler, but not from the point of
view of the language. For example given:
template <class T> struct S { friend void f(T) {}; };
the declaration of `void f(int)' generated when S<int> is
instantiated will not be a DECL_TEMPLATE_INSTANTIATION, but will be
a DECL_FRIEND_PSEUDO_TEMPLATE_INSTANTIATION. */
#define DECL_FRIEND_PSEUDO_TEMPLATE_INSTANTIATION(DECL) \
(DECL_TEMPLATE_INFO (DECL) && !DECL_USE_TEMPLATE (DECL))
/* Nonzero if DECL is a function generated from a function 'temploid',
i.e. template, member of class template, or dependent friend. */
#define DECL_TEMPLOID_INSTANTIATION(DECL) \
(DECL_TEMPLATE_INSTANTIATION (DECL) \
|| DECL_FRIEND_PSEUDO_TEMPLATE_INSTANTIATION (DECL))
/* Nonzero if DECL is either defined implicitly by the compiler or
generated from a temploid. */
#define DECL_GENERATED_P(DECL) \
(DECL_TEMPLOID_INSTANTIATION (DECL) || DECL_DEFAULTED_FN (DECL))
/* Nonzero iff we are currently processing a declaration for an
entity with its own template parameter list, and which is not a
full specialization. */
#define PROCESSING_REAL_TEMPLATE_DECL_P() \
(processing_template_decl > template_class_depth (current_scope ()))
/* Nonzero if this VAR_DECL or FUNCTION_DECL has already been
instantiated, i.e. its definition has been generated from the
pattern given in the template. */
#define DECL_TEMPLATE_INSTANTIATED(NODE) \
DECL_LANG_FLAG_1 (VAR_OR_FUNCTION_DECL_CHECK (NODE))
/* We know what we're doing with this decl now. */
#define DECL_INTERFACE_KNOWN(NODE) DECL_LANG_FLAG_5 (NODE)
/* DECL_EXTERNAL must be set on a decl until the decl is actually emitted,
so that assemble_external will work properly. So we have this flag to
tell us whether the decl is really not external.
This flag does not indicate whether or not the decl is defined in the
current translation unit; it indicates whether or not we should emit the
decl at the end of compilation if it is defined and needed. */
#define DECL_NOT_REALLY_EXTERN(NODE) \
(DECL_LANG_SPECIFIC (NODE)->u.base.not_really_extern)
#define DECL_REALLY_EXTERN(NODE) \
(DECL_EXTERNAL (NODE) && ! DECL_NOT_REALLY_EXTERN (NODE))
/* A thunk is a stub function.
A thunk is an alternate entry point for an ordinary FUNCTION_DECL.
The address of the ordinary FUNCTION_DECL is given by the
DECL_INITIAL, which is always an ADDR_EXPR whose operand is a
FUNCTION_DECL. The job of the thunk is to either adjust the this
pointer before transferring control to the FUNCTION_DECL, or call
FUNCTION_DECL and then adjust the result value. Note, the result
pointer adjusting thunk must perform a call to the thunked
function, (or be implemented via passing some invisible parameter
to the thunked function, which is modified to perform the
adjustment just before returning).
A thunk may perform either, or both, of the following operations:
o Adjust the this or result pointer by a constant offset.
o Adjust the this or result pointer by looking up a vcall or vbase offset
in the vtable.
A this pointer adjusting thunk converts from a base to a derived
class, and hence adds the offsets. A result pointer adjusting thunk
converts from a derived class to a base, and hence subtracts the
offsets. If both operations are performed, then the constant
adjustment is performed first for this pointer adjustment and last
for the result pointer adjustment.
The constant adjustment is given by THUNK_FIXED_OFFSET. If the
vcall or vbase offset is required, THUNK_VIRTUAL_OFFSET is
used. For this pointer adjusting thunks, it is the vcall offset
into the vtable. For result pointer adjusting thunks it is the
binfo of the virtual base to convert to. Use that binfo's vbase
offset.
It is possible to have equivalent covariant thunks. These are
distinct virtual covariant thunks whose vbase offsets happen to
have the same value. THUNK_ALIAS is used to pick one as the
canonical thunk, which will get all the this pointer adjusting
thunks attached to it. */
/* An integer indicating how many bytes should be subtracted from the
this or result pointer when this function is called. */
#define THUNK_FIXED_OFFSET(DECL) \
(DECL_LANG_SPECIFIC (THUNK_FUNCTION_CHECK (DECL))->u.fn.u5.fixed_offset)
/* A tree indicating how to perform the virtual adjustment. For a this
adjusting thunk it is the number of bytes to be added to the vtable
to find the vcall offset. For a result adjusting thunk, it is the
binfo of the relevant virtual base. If NULL, then there is no
virtual adjust. (The vptr is always located at offset zero from
the this or result pointer.) (If the covariant type is within the
class hierarchy being laid out, the vbase index is not yet known
at the point we need to create the thunks, hence the need to use
binfos.) */
#define THUNK_VIRTUAL_OFFSET(DECL) \
(LANG_DECL_U2_CHECK (FUNCTION_DECL_CHECK (DECL), 0)->access)
/* A thunk which is equivalent to another thunk. */
#define THUNK_ALIAS(DECL) \
(DECL_LANG_SPECIFIC (FUNCTION_DECL_CHECK (DECL))->u.min.template_info)
/* For thunk NODE, this is the FUNCTION_DECL thunked to. It is
possible for the target to be a thunk too. */
#define THUNK_TARGET(NODE) \
(LANG_DECL_FN_CHECK (NODE)->befriending_classes)
/* True for a SCOPE_REF iff the "template" keyword was used to
indicate that the qualified name denotes a template. */
#define QUALIFIED_NAME_IS_TEMPLATE(NODE) \
(TREE_LANG_FLAG_1 (SCOPE_REF_CHECK (NODE)))
/* True for an OMP_ATOMIC that has dependent parameters. These are stored
as an expr in operand 1, and integer_zero_node in operand 0. */
#define OMP_ATOMIC_DEPENDENT_P(NODE) \
(TREE_CODE (TREE_OPERAND (OMP_ATOMIC_CHECK (NODE), 0)) == INTEGER_CST)
/* Used while gimplifying continue statements bound to OMP_FOR nodes. */
#define OMP_FOR_GIMPLIFYING_P(NODE) \
(TREE_LANG_FLAG_0 (OMP_LOOP_CHECK (NODE)))
/* A language-specific token attached to the OpenMP data clauses to
hold code (or code fragments) related to ctors, dtors, and op=.
See semantics.c for details. */
#define CP_OMP_CLAUSE_INFO(NODE) \
TREE_TYPE (OMP_CLAUSE_RANGE_CHECK (NODE, OMP_CLAUSE_PRIVATE, \
OMP_CLAUSE_LINEAR))
/* Nonzero if this transaction expression's body contains statements. */
#define TRANSACTION_EXPR_IS_STMT(NODE) \
TREE_LANG_FLAG_0 (TRANSACTION_EXPR_CHECK (NODE))
/* These macros provide convenient access to the various _STMT nodes
created when parsing template declarations. */
#define TRY_STMTS(NODE) TREE_OPERAND (TRY_BLOCK_CHECK (NODE), 0)
#define TRY_HANDLERS(NODE) TREE_OPERAND (TRY_BLOCK_CHECK (NODE), 1)
#define EH_SPEC_STMTS(NODE) TREE_OPERAND (EH_SPEC_BLOCK_CHECK (NODE), 0)
#define EH_SPEC_RAISES(NODE) TREE_OPERAND (EH_SPEC_BLOCK_CHECK (NODE), 1)
#define USING_STMT_NAMESPACE(NODE) TREE_OPERAND (USING_STMT_CHECK (NODE), 0)
/* Nonzero if this try block is a function try block. */
#define FN_TRY_BLOCK_P(NODE) TREE_LANG_FLAG_3 (TRY_BLOCK_CHECK (NODE))
#define HANDLER_PARMS(NODE) TREE_OPERAND (HANDLER_CHECK (NODE), 0)
#define HANDLER_BODY(NODE) TREE_OPERAND (HANDLER_CHECK (NODE), 1)
#define HANDLER_TYPE(NODE) TREE_TYPE (HANDLER_CHECK (NODE))
/* CLEANUP_STMT accessors. The statement(s) covered, the cleanup to run
and the VAR_DECL for which this cleanup exists. */
#define CLEANUP_BODY(NODE) TREE_OPERAND (CLEANUP_STMT_CHECK (NODE), 0)
#define CLEANUP_EXPR(NODE) TREE_OPERAND (CLEANUP_STMT_CHECK (NODE), 1)
#define CLEANUP_DECL(NODE) TREE_OPERAND (CLEANUP_STMT_CHECK (NODE), 2)
/* IF_STMT accessors. These give access to the condition of the if
statement, the then block of the if statement, and the else block
of the if statement if it exists. */
#define IF_COND(NODE) TREE_OPERAND (IF_STMT_CHECK (NODE), 0)
#define THEN_CLAUSE(NODE) TREE_OPERAND (IF_STMT_CHECK (NODE), 1)
#define ELSE_CLAUSE(NODE) TREE_OPERAND (IF_STMT_CHECK (NODE), 2)
#define IF_SCOPE(NODE) TREE_OPERAND (IF_STMT_CHECK (NODE), 3)
/* WHILE_STMT accessors. These give access to the condition of the
while statement and the body of the while statement, respectively. */
#define WHILE_COND(NODE) TREE_OPERAND (WHILE_STMT_CHECK (NODE), 0)
#define WHILE_BODY(NODE) TREE_OPERAND (WHILE_STMT_CHECK (NODE), 1)
/* DO_STMT accessors. These give access to the condition of the do
statement and the body of the do statement, respectively. */
#define DO_COND(NODE) TREE_OPERAND (DO_STMT_CHECK (NODE), 0)
#define DO_BODY(NODE) TREE_OPERAND (DO_STMT_CHECK (NODE), 1)
/* FOR_STMT accessors. These give access to the init statement,
condition, update expression, and body of the for statement,
respectively. */
#define FOR_INIT_STMT(NODE) TREE_OPERAND (FOR_STMT_CHECK (NODE), 0)
#define FOR_COND(NODE) TREE_OPERAND (FOR_STMT_CHECK (NODE), 1)
#define FOR_EXPR(NODE) TREE_OPERAND (FOR_STMT_CHECK (NODE), 2)
#define FOR_BODY(NODE) TREE_OPERAND (FOR_STMT_CHECK (NODE), 3)
#define FOR_SCOPE(NODE) TREE_OPERAND (FOR_STMT_CHECK (NODE), 4)
/* RANGE_FOR_STMT accessors. These give access to the declarator,
expression, body, and scope of the statement, respectively. */
#define RANGE_FOR_DECL(NODE) TREE_OPERAND (RANGE_FOR_STMT_CHECK (NODE), 0)
#define RANGE_FOR_EXPR(NODE) TREE_OPERAND (RANGE_FOR_STMT_CHECK (NODE), 1)
#define RANGE_FOR_BODY(NODE) TREE_OPERAND (RANGE_FOR_STMT_CHECK (NODE), 2)
#define RANGE_FOR_SCOPE(NODE) TREE_OPERAND (RANGE_FOR_STMT_CHECK (NODE), 3)
#define RANGE_FOR_IVDEP(NODE) TREE_LANG_FLAG_6 (RANGE_FOR_STMT_CHECK (NODE))
#define SWITCH_STMT_COND(NODE) TREE_OPERAND (SWITCH_STMT_CHECK (NODE), 0)
#define SWITCH_STMT_BODY(NODE) TREE_OPERAND (SWITCH_STMT_CHECK (NODE), 1)
#define SWITCH_STMT_TYPE(NODE) TREE_OPERAND (SWITCH_STMT_CHECK (NODE), 2)
#define SWITCH_STMT_SCOPE(NODE) TREE_OPERAND (SWITCH_STMT_CHECK (NODE), 3)
/* STMT_EXPR accessor. */
#define STMT_EXPR_STMT(NODE) TREE_OPERAND (STMT_EXPR_CHECK (NODE), 0)
/* EXPR_STMT accessor. This gives the expression associated with an
expression statement. */
#define EXPR_STMT_EXPR(NODE) TREE_OPERAND (EXPR_STMT_CHECK (NODE), 0)
/* True if this TARGET_EXPR was created by build_cplus_new, and so we can
discard it if it isn't useful. */
#define TARGET_EXPR_IMPLICIT_P(NODE) \
TREE_LANG_FLAG_0 (TARGET_EXPR_CHECK (NODE))
/* True if this TARGET_EXPR is the result of list-initialization of a
temporary. */
#define TARGET_EXPR_LIST_INIT_P(NODE) \
TREE_LANG_FLAG_1 (TARGET_EXPR_CHECK (NODE))
/* True if this TARGET_EXPR expresses direct-initialization of an object
to be named later. */
#define TARGET_EXPR_DIRECT_INIT_P(NODE) \
TREE_LANG_FLAG_2 (TARGET_EXPR_CHECK (NODE))
/* True if EXPR expresses direct-initialization of a TYPE. */
#define DIRECT_INIT_EXPR_P(TYPE,EXPR) \
(TREE_CODE (EXPR) == TARGET_EXPR && TREE_LANG_FLAG_2 (EXPR) \
&& same_type_ignoring_top_level_qualifiers_p (TYPE, TREE_TYPE (EXPR)))
/* True if this CONVERT_EXPR is for a conversion to virtual base in
an NSDMI, and should be re-evaluated when used in a constructor. */
#define CONVERT_EXPR_VBASE_PATH(NODE) \
TREE_LANG_FLAG_0 (CONVERT_EXPR_CHECK (NODE))
/* True if SIZEOF_EXPR argument is type. */
#define SIZEOF_EXPR_TYPE_P(NODE) \
TREE_LANG_FLAG_0 (SIZEOF_EXPR_CHECK (NODE))
/* An enumeration of the kind of tags that C++ accepts. */
enum tag_types {
none_type = 0, /* Not a tag type. */
record_type, /* "struct" types. */
class_type, /* "class" types. */
union_type, /* "union" types. */
enum_type, /* "enum" types. */
typename_type /* "typename" types. */
};
/* The various kinds of lvalues we distinguish. */
enum cp_lvalue_kind_flags {
clk_none = 0, /* Things that are not an lvalue. */
clk_ordinary = 1, /* An ordinary lvalue. */
clk_rvalueref = 2,/* An xvalue (rvalue formed using an rvalue reference) */
clk_class = 4, /* A prvalue of class-type. */
clk_bitfield = 8, /* An lvalue for a bit-field. */
clk_packed = 16 /* An lvalue for a packed field. */
};
/* This type is used for parameters and variables which hold
combinations of the flags in enum cp_lvalue_kind_flags. */
typedef int cp_lvalue_kind;
/* Various kinds of template specialization, instantiation, etc. */
typedef enum tmpl_spec_kind {
tsk_none, /* Not a template at all. */
tsk_invalid_member_spec, /* An explicit member template
specialization, but the enclosing
classes have not all been explicitly
specialized. */
tsk_invalid_expl_inst, /* An explicit instantiation containing
template parameter lists. */
tsk_excessive_parms, /* A template declaration with too many
template parameter lists. */
tsk_insufficient_parms, /* A template declaration with too few
parameter lists. */
tsk_template, /* A template declaration. */
tsk_expl_spec, /* An explicit specialization. */
tsk_expl_inst /* An explicit instantiation. */
} tmpl_spec_kind;
/* The various kinds of access. BINFO_ACCESS depends on these being
two bit quantities. The numerical values are important; they are
used to initialize RTTI data structures, so changing them changes
the ABI. */
typedef enum access_kind {
ak_none = 0, /* Inaccessible. */
ak_public = 1, /* Accessible, as a `public' thing. */
ak_protected = 2, /* Accessible, as a `protected' thing. */
ak_private = 3 /* Accessible, as a `private' thing. */
} access_kind;
/* The various kinds of special functions. If you add to this list,
you should update special_function_p as well. */
typedef enum special_function_kind {
sfk_none = 0, /* Not a special function. This enumeral
must have value zero; see
special_function_p. */
sfk_constructor, /* A constructor. */
sfk_copy_constructor, /* A copy constructor. */
sfk_move_constructor, /* A move constructor. */
sfk_copy_assignment, /* A copy assignment operator. */
sfk_move_assignment, /* A move assignment operator. */
sfk_destructor, /* A destructor. */
sfk_complete_destructor, /* A destructor for complete objects. */
sfk_base_destructor, /* A destructor for base subobjects. */
sfk_deleting_destructor, /* A destructor for complete objects that
deletes the object after it has been
destroyed. */
sfk_conversion, /* A conversion operator. */
sfk_inheriting_constructor /* An inheriting constructor */
} special_function_kind;
/* The various kinds of linkage. From [basic.link],
A name is said to have linkage when it might denote the same
object, reference, function, type, template, namespace or value
as a name introduced in another scope:
-- When a name has external linkage, the entity it denotes can
be referred to from scopes of other translation units or from
other scopes of the same translation unit.
-- When a name has internal linkage, the entity it denotes can
be referred to by names from other scopes in the same
translation unit.
-- When a name has no linkage, the entity it denotes cannot be
referred to by names from other scopes. */
typedef enum linkage_kind {
lk_none, /* No linkage. */
lk_internal, /* Internal linkage. */
lk_external /* External linkage. */
} linkage_kind;
typedef enum duration_kind {
dk_static,
dk_thread,
dk_auto,
dk_dynamic
} duration_kind;
/* Bitmask flags to control type substitution. */
enum tsubst_flags {
tf_none = 0, /* nothing special */
tf_error = 1 << 0, /* give error messages */
tf_warning = 1 << 1, /* give warnings too */
tf_ignore_bad_quals = 1 << 2, /* ignore bad cvr qualifiers */
tf_keep_type_decl = 1 << 3, /* retain typedef type decls
(make_typename_type use) */
tf_ptrmem_ok = 1 << 4, /* pointers to member ok (internal
instantiate_type use) */
tf_user = 1 << 5, /* found template must be a user template
(lookup_template_class use) */
tf_conv = 1 << 6, /* We are determining what kind of
conversion might be permissible,
not actually performing the
conversion. */
tf_decltype = 1 << 7, /* We are the operand of decltype.
Used to implement the special rules
for calls in decltype (5.2.2/11). */
tf_partial = 1 << 8, /* Doing initial explicit argument
substitution in fn_type_unification. */
/* Convenient substitution flags combinations. */
tf_warning_or_error = tf_warning | tf_error
};
/* This type is used for parameters and variables which hold
combinations of the flags in enum tsubst_flags. */
typedef int tsubst_flags_t;
/* The kind of checking we can do looking in a class hierarchy. */
enum base_access_flags {
ba_any = 0, /* Do not check access, allow an ambiguous base,
prefer a non-virtual base */
ba_unique = 1 << 0, /* Must be a unique base. */
ba_check_bit = 1 << 1, /* Check access. */
ba_check = ba_unique | ba_check_bit,
ba_ignore_scope = 1 << 2 /* Ignore access allowed by local scope. */
};
/* This type is used for parameters and variables which hold
combinations of the flags in enum base_access_flags. */
typedef int base_access;
/* The various kinds of access check during parsing. */
typedef enum deferring_kind {
dk_no_deferred = 0, /* Check access immediately */
dk_deferred = 1, /* Deferred check */
dk_no_check = 2 /* No access check */
} deferring_kind;
/* The kind of base we can find, looking in a class hierarchy.
Values <0 indicate we failed. */
typedef enum base_kind {
bk_inaccessible = -3, /* The base is inaccessible */
bk_ambig = -2, /* The base is ambiguous */
bk_not_base = -1, /* It is not a base */
bk_same_type = 0, /* It is the same type */
bk_proper_base = 1, /* It is a proper base */
bk_via_virtual = 2 /* It is a proper base, but via a virtual
path. This might not be the canonical
binfo. */
} base_kind;
/* Node for "pointer to (virtual) function".
This may be distinct from ptr_type_node so gdb can distinguish them. */
#define vfunc_ptr_type_node vtable_entry_type
/* For building calls to `delete'. */
extern GTY(()) tree integer_two_node;
/* The number of function bodies which we are currently processing.
(Zero if we are at namespace scope, one inside the body of a
function, two inside the body of a function in a local class, etc.) */
extern int function_depth;
/* Nonzero if we are inside eq_specializations, which affects comparison of
PARM_DECLs in cp_tree_equal. */
extern int comparing_specializations;
/* A type-qualifier, or bitmask therefore, using the TYPE_QUAL
constants. */
typedef int cp_cv_quals;
/* In parser.c. */
/* Nonzero if we are parsing an unevaluated operand: an operand to
sizeof, typeof, or alignof. This is a count since operands to
sizeof can be nested. */
extern int cp_unevaluated_operand;
extern tree cp_convert_range_for (tree, tree, tree, bool);
extern bool parsing_nsdmi (void);
extern void inject_this_parameter (tree, cp_cv_quals);
/* in pt.c */
/* These values are used for the `STRICT' parameter to type_unification and
fn_type_unification. Their meanings are described with the
documentation for fn_type_unification. */
typedef enum unification_kind_t {
DEDUCE_CALL,
DEDUCE_CONV,
DEDUCE_EXACT
} unification_kind_t;
/* in class.c */
extern int current_class_depth;
/* An array of all local classes present in this translation unit, in
declaration order. */
extern GTY(()) vec<tree, va_gc> *local_classes;
/* Here's where we control how name mangling takes place. */
/* Cannot use '$' up front, because this confuses gdb
(names beginning with '$' are gdb-local identifiers).
Note that all forms in which the '$' is significant are long enough
for direct indexing (meaning that if we know there is a '$'
at a particular location, we can index into the string at
any other location that provides distinguishing characters). */
/* Define NO_DOT_IN_LABEL in your favorite tm file if your assembler
doesn't allow '.' in symbol names. */
#ifndef NO_DOT_IN_LABEL
#define JOINER '.'
#define AUTO_TEMP_NAME "_.tmp_"
#define VFIELD_BASE ".vf"
#define VFIELD_NAME "_vptr."
#define VFIELD_NAME_FORMAT "_vptr.%s"
#else /* NO_DOT_IN_LABEL */
#ifndef NO_DOLLAR_IN_LABEL
#define JOINER '$'
#define AUTO_TEMP_NAME "_$tmp_"
#define VFIELD_BASE "$vf"
#define VFIELD_NAME "_vptr$"
#define VFIELD_NAME_FORMAT "_vptr$%s"
#else /* NO_DOLLAR_IN_LABEL */
#define AUTO_TEMP_NAME "__tmp_"
#define TEMP_NAME_P(ID_NODE) \
(!strncmp (IDENTIFIER_POINTER (ID_NODE), AUTO_TEMP_NAME, \
sizeof (AUTO_TEMP_NAME) - 1))
#define VTABLE_NAME "__vt_"
#define VTABLE_NAME_P(ID_NODE) \
(!strncmp (IDENTIFIER_POINTER (ID_NODE), VTABLE_NAME, \
sizeof (VTABLE_NAME) - 1))
#define VFIELD_BASE "__vfb"
#define VFIELD_NAME "__vptr_"
#define VFIELD_NAME_P(ID_NODE) \
(!strncmp (IDENTIFIER_POINTER (ID_NODE), VFIELD_NAME, \
sizeof (VFIELD_NAME) - 1))
#define VFIELD_NAME_FORMAT "__vptr_%s"
#endif /* NO_DOLLAR_IN_LABEL */
#endif /* NO_DOT_IN_LABEL */
#define THIS_NAME "this"
#define IN_CHARGE_NAME "__in_chrg"
#define VTBL_PTR_TYPE "__vtbl_ptr_type"
#define VTABLE_DELTA_NAME "__delta"
#define VTABLE_PFN_NAME "__pfn"
#define LAMBDANAME_PREFIX "__lambda"
#define LAMBDANAME_FORMAT LAMBDANAME_PREFIX "%d"
#define UDLIT_OP_ANSI_PREFIX "operator\"\""
#define UDLIT_OP_ANSI_FORMAT UDLIT_OP_ANSI_PREFIX "%s"
#define UDLIT_OP_MANGLED_PREFIX "li"
#define UDLIT_OP_MANGLED_FORMAT UDLIT_OP_MANGLED_PREFIX "%s"
#define UDLIT_OPER_P(ID_NODE) \
(!strncmp (IDENTIFIER_POINTER (ID_NODE), \
UDLIT_OP_ANSI_PREFIX, \
sizeof (UDLIT_OP_ANSI_PREFIX) - 1))
#define UDLIT_OP_SUFFIX(ID_NODE) \
(IDENTIFIER_POINTER (ID_NODE) + sizeof (UDLIT_OP_ANSI_PREFIX) - 1)
#if !defined(NO_DOLLAR_IN_LABEL) || !defined(NO_DOT_IN_LABEL)
#define VTABLE_NAME_P(ID_NODE) (IDENTIFIER_POINTER (ID_NODE)[1] == 'v' \
&& IDENTIFIER_POINTER (ID_NODE)[2] == 't' \
&& IDENTIFIER_POINTER (ID_NODE)[3] == JOINER)
#define TEMP_NAME_P(ID_NODE) \
(!strncmp (IDENTIFIER_POINTER (ID_NODE), AUTO_TEMP_NAME, sizeof (AUTO_TEMP_NAME)-1))
#define VFIELD_NAME_P(ID_NODE) \
(!strncmp (IDENTIFIER_POINTER (ID_NODE), VFIELD_NAME, sizeof(VFIELD_NAME)-1))
#endif /* !defined(NO_DOLLAR_IN_LABEL) || !defined(NO_DOT_IN_LABEL) */
/* Nonzero if we're done parsing and into end-of-file activities. */
extern int at_eof;
/* A list of namespace-scope objects which have constructors or
destructors which reside in the global scope. The decl is stored
in the TREE_VALUE slot and the initializer is stored in the
TREE_PURPOSE slot. */
extern GTY(()) tree static_aggregates;
/* Likewise, for thread local storage. */
extern GTY(()) tree tls_aggregates;
enum overload_flags { NO_SPECIAL = 0, DTOR_FLAG, TYPENAME_FLAG };
/* These are uses as bits in flags passed to various functions to
control their behavior. Despite the LOOKUP_ prefix, many of these
do not control name lookup. ??? Functions using these flags should
probably be modified to accept explicit boolean flags for the
behaviors relevant to them. */
/* Check for access violations. */
#define LOOKUP_PROTECT (1 << 0)
#define LOOKUP_NORMAL (LOOKUP_PROTECT)
/* Even if the function found by lookup is a virtual function, it
should be called directly. */
#define LOOKUP_NONVIRTUAL (1 << 1)
/* Non-converting (i.e., "explicit") constructors are not tried. This flag
indicates that we are not performing direct-initialization. */
#define LOOKUP_ONLYCONVERTING (1 << 2)
#define LOOKUP_IMPLICIT (LOOKUP_NORMAL | LOOKUP_ONLYCONVERTING)
/* If a temporary is created, it should be created so that it lives
as long as the current variable bindings; otherwise it only lives
until the end of the complete-expression. It also forces
direct-initialization in cases where other parts of the compiler
have already generated a temporary, such as reference
initialization and the catch parameter. */
#define DIRECT_BIND (1 << 3)
/* We're performing a user-defined conversion, so more user-defined
conversions are not permitted (only built-in conversions). */
#define LOOKUP_NO_CONVERSION (1 << 4)
/* The user has explicitly called a destructor. (Therefore, we do
not need to check that the object is non-NULL before calling the
destructor.) */
#define LOOKUP_DESTRUCTOR (1 << 5)
/* Do not permit references to bind to temporaries. */
#define LOOKUP_NO_TEMP_BIND (1 << 6)
/* Do not accept objects, and possibly namespaces. */
#define LOOKUP_PREFER_TYPES (1 << 7)
/* Do not accept objects, and possibly types. */
#define LOOKUP_PREFER_NAMESPACES (1 << 8)
/* Accept types or namespaces. */
#define LOOKUP_PREFER_BOTH (LOOKUP_PREFER_TYPES | LOOKUP_PREFER_NAMESPACES)
/* Return friend declarations and un-declared builtin functions.
(Normally, these entities are registered in the symbol table, but
not found by lookup.) */
#define LOOKUP_HIDDEN (LOOKUP_PREFER_NAMESPACES << 1)
/* Prefer that the lvalue be treated as an rvalue. */
#define LOOKUP_PREFER_RVALUE (LOOKUP_HIDDEN << 1)
/* We're inside an init-list, so narrowing conversions are ill-formed. */
#define LOOKUP_NO_NARROWING (LOOKUP_PREFER_RVALUE << 1)
/* We're looking up a constructor for list-initialization. */
#define LOOKUP_LIST_INIT_CTOR (LOOKUP_NO_NARROWING << 1)
/* This is the first parameter of a copy constructor. */
#define LOOKUP_COPY_PARM (LOOKUP_LIST_INIT_CTOR << 1)
/* We only want to consider list constructors. */
#define LOOKUP_LIST_ONLY (LOOKUP_COPY_PARM << 1)
/* Return after determining which function to call and checking access.
Used by sythesized_method_walk to determine which functions will
be called to initialize subobjects, in order to determine exception
specification and possible implicit delete.
This is kind of a hack, but exiting early avoids problems with trying
to perform argument conversions when the class isn't complete yet. */
#define LOOKUP_SPECULATIVE (LOOKUP_LIST_ONLY << 1)
/* Used by calls from defaulted functions to limit the overload set to avoid
cycles trying to declare them (core issue 1092). */
#define LOOKUP_DEFAULTED (LOOKUP_SPECULATIVE << 1)
/* Used in calls to store_init_value to suppress its usual call to
digest_init. */
#define LOOKUP_ALREADY_DIGESTED (LOOKUP_DEFAULTED << 1)
/* An instantiation with explicit template arguments. */
#define LOOKUP_EXPLICIT_TMPL_ARGS (LOOKUP_ALREADY_DIGESTED << 1)
/* Like LOOKUP_NO_TEMP_BIND, but also prevent binding to xvalues. */
#define LOOKUP_NO_RVAL_BIND (LOOKUP_EXPLICIT_TMPL_ARGS << 1)
/* Used by case_conversion to disregard non-integral conversions. */
#define LOOKUP_NO_NON_INTEGRAL (LOOKUP_NO_RVAL_BIND << 1)
/* Used for delegating constructors in order to diagnose self-delegation. */
#define LOOKUP_DELEGATING_CONS (LOOKUP_NO_NON_INTEGRAL << 1)
#define LOOKUP_NAMESPACES_ONLY(F) \
(((F) & LOOKUP_PREFER_NAMESPACES) && !((F) & LOOKUP_PREFER_TYPES))
#define LOOKUP_TYPES_ONLY(F) \
(!((F) & LOOKUP_PREFER_NAMESPACES) && ((F) & LOOKUP_PREFER_TYPES))
#define LOOKUP_QUALIFIERS_ONLY(F) ((F) & LOOKUP_PREFER_BOTH)
/* These flags are used by the conversion code.
CONV_IMPLICIT : Perform implicit conversions (standard and user-defined).
CONV_STATIC : Perform the explicit conversions for static_cast.
CONV_CONST : Perform the explicit conversions for const_cast.
CONV_REINTERPRET: Perform the explicit conversions for reinterpret_cast.
CONV_PRIVATE : Perform upcasts to private bases.
CONV_FORCE_TEMP : Require a new temporary when converting to the same
aggregate type. */
#define CONV_IMPLICIT 1
#define CONV_STATIC 2
#define CONV_CONST 4
#define CONV_REINTERPRET 8
#define CONV_PRIVATE 16
/* #define CONV_NONCONVERTING 32 */
#define CONV_FORCE_TEMP 64
#define CONV_OLD_CONVERT (CONV_IMPLICIT | CONV_STATIC | CONV_CONST \
| CONV_REINTERPRET)
#define CONV_C_CAST (CONV_IMPLICIT | CONV_STATIC | CONV_CONST \
| CONV_REINTERPRET | CONV_PRIVATE | CONV_FORCE_TEMP)
/* Used by build_expr_type_conversion to indicate which types are
acceptable as arguments to the expression under consideration. */
#define WANT_INT 1 /* integer types, including bool */
#define WANT_FLOAT 2 /* floating point types */
#define WANT_ENUM 4 /* enumerated types */
#define WANT_POINTER 8 /* pointer types */
#define WANT_NULL 16 /* null pointer constant */
#define WANT_VECTOR_OR_COMPLEX 32 /* vector or complex types */
#define WANT_ARITH (WANT_INT | WANT_FLOAT | WANT_VECTOR_OR_COMPLEX)
/* Used with comptypes, and related functions, to guide type
comparison. */
#define COMPARE_STRICT 0 /* Just check if the types are the
same. */
#define COMPARE_BASE 1 /* Check to see if the second type is
derived from the first. */
#define COMPARE_DERIVED 2 /* Like COMPARE_BASE, but in
reverse. */
#define COMPARE_REDECLARATION 4 /* The comparison is being done when
another declaration of an existing
entity is seen. */
#define COMPARE_STRUCTURAL 8 /* The comparison is intended to be
structural. The actual comparison
will be identical to
COMPARE_STRICT. */
/* Used with push_overloaded_decl. */
#define PUSH_GLOBAL 0 /* Push the DECL into namespace scope,
regardless of the current scope. */
#define PUSH_LOCAL 1 /* Push the DECL into the current
scope. */
#define PUSH_USING 2 /* We are pushing this DECL as the
result of a using declaration. */
/* Used with start function. */
#define SF_DEFAULT 0 /* No flags. */
#define SF_PRE_PARSED 1 /* The function declaration has
already been parsed. */
#define SF_INCLASS_INLINE 2 /* The function is an inline, defined
in the class body. */
/* Used with start_decl's initialized parameter. */
#define SD_UNINITIALIZED 0
#define SD_INITIALIZED 1
#define SD_DEFAULTED 2
#define SD_DELETED 3
/* Returns nonzero iff TYPE1 and TYPE2 are the same type, or if TYPE2
is derived from TYPE1, or if TYPE2 is a pointer (reference) to a
class derived from the type pointed to (referred to) by TYPE1. */
#define same_or_base_type_p(TYPE1, TYPE2) \
comptypes ((TYPE1), (TYPE2), COMPARE_BASE)
/* These macros are used to access a TEMPLATE_PARM_INDEX. */
#define TEMPLATE_PARM_INDEX_CAST(NODE) \
((template_parm_index*)TEMPLATE_PARM_INDEX_CHECK (NODE))
#define TEMPLATE_PARM_IDX(NODE) (TEMPLATE_PARM_INDEX_CAST (NODE)->index)
#define TEMPLATE_PARM_LEVEL(NODE) (TEMPLATE_PARM_INDEX_CAST (NODE)->level)
#define TEMPLATE_PARM_DESCENDANTS(NODE) (TREE_CHAIN (NODE))
#define TEMPLATE_PARM_ORIG_LEVEL(NODE) (TEMPLATE_PARM_INDEX_CAST (NODE)->orig_level)
#define TEMPLATE_PARM_DECL(NODE) (TEMPLATE_PARM_INDEX_CAST (NODE)->decl)
#define TEMPLATE_PARM_PARAMETER_PACK(NODE) \
(TREE_LANG_FLAG_0 (TEMPLATE_PARM_INDEX_CHECK (NODE)))
/* These macros are for accessing the fields of TEMPLATE_TYPE_PARM,
TEMPLATE_TEMPLATE_PARM and BOUND_TEMPLATE_TEMPLATE_PARM nodes. */
#define TEMPLATE_TYPE_PARM_INDEX(NODE) \
(TYPE_VALUES_RAW (TREE_CHECK3 ((NODE), TEMPLATE_TYPE_PARM, \
TEMPLATE_TEMPLATE_PARM, \
BOUND_TEMPLATE_TEMPLATE_PARM)))
#define TEMPLATE_TYPE_IDX(NODE) \
(TEMPLATE_PARM_IDX (TEMPLATE_TYPE_PARM_INDEX (NODE)))
#define TEMPLATE_TYPE_LEVEL(NODE) \
(TEMPLATE_PARM_LEVEL (TEMPLATE_TYPE_PARM_INDEX (NODE)))
#define TEMPLATE_TYPE_ORIG_LEVEL(NODE) \
(TEMPLATE_PARM_ORIG_LEVEL (TEMPLATE_TYPE_PARM_INDEX (NODE)))
#define TEMPLATE_TYPE_DECL(NODE) \
(TEMPLATE_PARM_DECL (TEMPLATE_TYPE_PARM_INDEX (NODE)))
#define TEMPLATE_TYPE_PARAMETER_PACK(NODE) \
(TEMPLATE_PARM_PARAMETER_PACK (TEMPLATE_TYPE_PARM_INDEX (NODE)))
/* True iff this TEMPLATE_TYPE_PARM represents decltype(auto). */
#define AUTO_IS_DECLTYPE(NODE) \
(TYPE_LANG_FLAG_5 (TEMPLATE_TYPE_PARM_CHECK (NODE)))
/* These constants can used as bit flags in the process of tree formatting.
TFF_PLAIN_IDENTIFIER: unqualified part of a name.
TFF_SCOPE: include the class and namespace scope of the name.
TFF_CHASE_TYPEDEF: print the original type-id instead of the typedef-name.
TFF_DECL_SPECIFIERS: print decl-specifiers.
TFF_CLASS_KEY_OR_ENUM: precede a class-type name (resp. enum name) with
a class-key (resp. `enum').
TFF_RETURN_TYPE: include function return type.
TFF_FUNCTION_DEFAULT_ARGUMENTS: include function default parameter values.
TFF_EXCEPTION_SPECIFICATION: show function exception specification.
TFF_TEMPLATE_HEADER: show the template<...> header in a
template-declaration.
TFF_TEMPLATE_NAME: show only template-name.
TFF_EXPR_IN_PARENS: parenthesize expressions.
TFF_NO_FUNCTION_ARGUMENTS: don't show function arguments.
TFF_UNQUALIFIED_NAME: do not print the qualifying scope of the
top-level entity.
TFF_NO_OMIT_DEFAULT_TEMPLATE_ARGUMENTS: do not omit template arguments
identical to their defaults.
TFF_NO_TEMPLATE_BINDINGS: do not print information about the template
arguments for a function template specialization. */
#define TFF_PLAIN_IDENTIFIER (0)
#define TFF_SCOPE (1)
#define TFF_CHASE_TYPEDEF (1 << 1)
#define TFF_DECL_SPECIFIERS (1 << 2)
#define TFF_CLASS_KEY_OR_ENUM (1 << 3)
#define TFF_RETURN_TYPE (1 << 4)
#define TFF_FUNCTION_DEFAULT_ARGUMENTS (1 << 5)
#define TFF_EXCEPTION_SPECIFICATION (1 << 6)
#define TFF_TEMPLATE_HEADER (1 << 7)
#define TFF_TEMPLATE_NAME (1 << 8)
#define TFF_EXPR_IN_PARENS (1 << 9)
#define TFF_NO_FUNCTION_ARGUMENTS (1 << 10)
#define TFF_UNQUALIFIED_NAME (1 << 11)
#define TFF_NO_OMIT_DEFAULT_TEMPLATE_ARGUMENTS (1 << 12)
#define TFF_NO_TEMPLATE_BINDINGS (1 << 13)
/* Returns the TEMPLATE_DECL associated to a TEMPLATE_TEMPLATE_PARM
node. */
#define TEMPLATE_TEMPLATE_PARM_TEMPLATE_DECL(NODE) \
((TREE_CODE (NODE) == BOUND_TEMPLATE_TEMPLATE_PARM) \
? TYPE_TI_TEMPLATE (NODE) \
: TYPE_NAME (NODE))
/* in lex.c */
extern void init_reswords (void);
typedef struct GTY(()) operator_name_info_t {
/* The IDENTIFIER_NODE for the operator. */
tree identifier;
/* The name of the operator. */
const char *name;
/* The mangled name of the operator. */
const char *mangled_name;
/* The arity of the operator. */
int arity;
} operator_name_info_t;
/* A mapping from tree codes to operator name information. */
extern GTY(()) operator_name_info_t operator_name_info
[(int) MAX_TREE_CODES];
/* Similar, but for assignment operators. */
extern GTY(()) operator_name_info_t assignment_operator_name_info
[(int) MAX_TREE_CODES];
/* Non-static member functions have an optional virt-specifier-seq.
There is a VIRT_SPEC value for each virt-specifier.
They can be combined by bitwise-or to form the complete set of
virt-specifiers for a member function. */
enum virt_specifier
{
VIRT_SPEC_UNSPECIFIED = 0x0,
VIRT_SPEC_FINAL = 0x1,
VIRT_SPEC_OVERRIDE = 0x2
};
/* A type-qualifier, or bitmask therefore, using the VIRT_SPEC
constants. */
typedef int cp_virt_specifiers;
/* Wherever there is a function-cv-qual, there could also be a ref-qualifier:
[dcl.fct]
The return type, the parameter-type-list, the ref-qualifier, and
the cv-qualifier-seq, but not the default arguments or the exception
specification, are part of the function type.
REF_QUAL_NONE Ordinary member function with no ref-qualifier
REF_QUAL_LVALUE Member function with the &-ref-qualifier
REF_QUAL_RVALUE Member function with the &&-ref-qualifier */
enum cp_ref_qualifier {
REF_QUAL_NONE = 0,
REF_QUAL_LVALUE = 1,
REF_QUAL_RVALUE = 2
};
/* A storage class. */
typedef enum cp_storage_class {
/* sc_none must be zero so that zeroing a cp_decl_specifier_seq
sets the storage_class field to sc_none. */
sc_none = 0,
sc_auto,
sc_register,
sc_static,
sc_extern,
sc_mutable
} cp_storage_class;
/* An individual decl-specifier. This is used to index the array of
locations for the declspecs in struct cp_decl_specifier_seq
below. */
typedef enum cp_decl_spec {
ds_first,
ds_signed = ds_first,
ds_unsigned,
ds_short,
ds_long,
ds_const,
ds_volatile,
ds_restrict,
ds_inline,
ds_virtual,
ds_explicit,
ds_friend,
ds_typedef,
ds_alias,
ds_constexpr,
ds_complex,
ds_thread,
ds_type_spec,
ds_redefined_builtin_type_spec,
ds_attribute,
ds_std_attribute,
ds_storage_class,
ds_long_long,
ds_last /* This enumerator must always be the last one. */
} cp_decl_spec;
/* A decl-specifier-seq. */
typedef struct cp_decl_specifier_seq {
/* An array of locations for the declaration sepecifiers, indexed by
enum cp_decl_spec_word. */
source_location locations[ds_last];
/* The primary type, if any, given by the decl-specifier-seq.
Modifiers, like "short", "const", and "unsigned" are not
reflected here. This field will be a TYPE, unless a typedef-name
was used, in which case it will be a TYPE_DECL. */
tree type;
/* The attributes, if any, provided with the specifier sequence. */
tree attributes;
/* The c++11 attributes that follows the type specifier. */
tree std_attributes;
/* If non-NULL, a built-in type that the user attempted to redefine
to some other type. */
tree redefined_builtin_type;
/* The storage class specified -- or sc_none if no storage class was
explicitly specified. */
cp_storage_class storage_class;
/* True iff TYPE_SPEC defines a class or enum. */
BOOL_BITFIELD type_definition_p : 1;
/* True iff multiple types were (erroneously) specified for this
decl-specifier-seq. */
BOOL_BITFIELD multiple_types_p : 1;
/* True iff multiple storage classes were (erroneously) specified
for this decl-specifier-seq or a combination of a storage class
with a typedef specifier. */
BOOL_BITFIELD conflicting_specifiers_p : 1;
/* True iff at least one decl-specifier was found. */
BOOL_BITFIELD any_specifiers_p : 1;
/* True iff at least one type-specifier was found. */
BOOL_BITFIELD any_type_specifiers_p : 1;
/* True iff "int" was explicitly provided. */
BOOL_BITFIELD explicit_int_p : 1;
/* True iff "__int128" was explicitly provided. */
BOOL_BITFIELD explicit_int128_p : 1;
/* True iff "char" was explicitly provided. */
BOOL_BITFIELD explicit_char_p : 1;
/* True iff ds_thread is set for __thread, not thread_local. */
BOOL_BITFIELD gnu_thread_keyword_p : 1;
} cp_decl_specifier_seq;
/* The various kinds of declarators. */
typedef enum cp_declarator_kind {
cdk_id,
cdk_function,
cdk_array,
cdk_pointer,
cdk_reference,
cdk_ptrmem,
cdk_error
} cp_declarator_kind;
/* A declarator. */
typedef struct cp_declarator cp_declarator;
typedef struct cp_parameter_declarator cp_parameter_declarator;
/* A parameter, before it has been semantically analyzed. */
struct cp_parameter_declarator {
/* The next parameter, or NULL_TREE if none. */
cp_parameter_declarator *next;
/* The decl-specifiers-seq for the parameter. */
cp_decl_specifier_seq decl_specifiers;
/* The declarator for the parameter. */
cp_declarator *declarator;
/* The default-argument expression, or NULL_TREE, if none. */
tree default_argument;
/* True iff this is the first parameter in the list and the
parameter sequence ends with an ellipsis. */
bool ellipsis_p;
};
/* A declarator. */
struct cp_declarator {
/* The kind of declarator. */
ENUM_BITFIELD (cp_declarator_kind) kind : 4;
/* Whether we parsed an ellipsis (`...') just before the declarator,
to indicate this is a parameter pack. */
BOOL_BITFIELD parameter_pack_p : 1;
location_t id_loc; /* Currently only set for cdk_id and cdk_function. */
/* GNU Attributes that apply to this declarator. If the declarator
is a pointer or a reference, these attribute apply to the type
pointed to. */
tree attributes;
/* Standard C++11 attributes that apply to this declarator. If the
declarator is a pointer or a reference, these attributes apply
to the pointer, rather than to the type pointed to. */
tree std_attributes;
/* For all but cdk_id and cdk_error, the contained declarator. For
cdk_id and cdk_error, guaranteed to be NULL. */
cp_declarator *declarator;
union {
/* For identifiers. */
struct {
/* If non-NULL, the qualifying scope (a NAMESPACE_DECL or
*_TYPE) for this identifier. */
tree qualifying_scope;
/* The unqualified name of the entity -- an IDENTIFIER_NODE,
BIT_NOT_EXPR, or TEMPLATE_ID_EXPR. */
tree unqualified_name;
/* If this is the name of a function, what kind of special
function (if any). */
special_function_kind sfk;
} id;
/* For functions. */
struct {
/* The parameters to the function as a TREE_LIST of decl/default. */
tree parameters;
/* The cv-qualifiers for the function. */
cp_cv_quals qualifiers;
/* The virt-specifiers for the function. */
cp_virt_specifiers virt_specifiers;
/* The ref-qualifier for the function. */
cp_ref_qualifier ref_qualifier;
/* The exception-specification for the function. */
tree exception_specification;
/* The late-specified return type, if any. */
tree late_return_type;
} function;
/* For arrays. */
struct {
/* The bounds to the array. */
tree bounds;
} array;
/* For cdk_pointer and cdk_ptrmem. */
struct {
/* The cv-qualifiers for the pointer. */
cp_cv_quals qualifiers;
/* For cdk_ptrmem, the class type containing the member. */
tree class_type;
} pointer;
/* For cdk_reference */
struct {
/* The cv-qualifiers for the reference. These qualifiers are
only used to diagnose ill-formed code. */
cp_cv_quals qualifiers;
/* Whether this is an rvalue reference */
bool rvalue_ref;
} reference;
} u;
};
/* A level of template instantiation. */
struct GTY((chain_next ("%h.next"))) tinst_level {
/* The immediately deeper level in the chain. */
struct tinst_level *next;
/* The original node. Can be either a DECL (for a function or static
data member) or a TYPE (for a class), depending on what we were
asked to instantiate. */
tree decl;
/* The location where the template is instantiated. */
location_t locus;
/* errorcount+sorrycount when we pushed this level. */
int errors;
/* True if the location is in a system header. */
bool in_system_header_p;
};
bool decl_spec_seq_has_spec_p (const cp_decl_specifier_seq *, cp_decl_spec);
/* Return the type of the `this' parameter of FNTYPE. */
inline tree
type_of_this_parm (const_tree fntype)
{
function_args_iterator iter;
gcc_assert (TREE_CODE (fntype) == METHOD_TYPE);
function_args_iter_init (&iter, fntype);
return function_args_iter_cond (&iter);
}
/* Return the class of the `this' parameter of FNTYPE. */
inline tree
class_of_this_parm (const_tree fntype)
{
return TREE_TYPE (type_of_this_parm (fntype));
}
/* A parameter list indicating for a function with no parameters,
e.g "int f(void)". */
extern cp_parameter_declarator *no_parameters;
/* True if we saw "#pragma GCC java_exceptions". */
extern bool pragma_java_exceptions;
/* in call.c */
extern bool check_dtor_name (tree, tree);
bool magic_varargs_p (tree);
extern tree build_conditional_expr (location_t, tree, tree, tree,
tsubst_flags_t);
extern tree build_addr_func (tree, tsubst_flags_t);
extern void set_flags_from_callee (tree);
extern tree build_call_a (tree, int, tree*);
extern tree build_call_n (tree, int, ...);
extern bool null_ptr_cst_p (tree);
extern bool null_member_pointer_value_p (tree);
extern bool sufficient_parms_p (const_tree);
extern tree type_decays_to (tree);
extern tree build_user_type_conversion (tree, tree, int,
tsubst_flags_t);
extern tree build_new_function_call (tree, vec<tree, va_gc> **, bool,
tsubst_flags_t);
extern tree build_operator_new_call (tree, vec<tree, va_gc> **, tree *,
tree *, tree, tree *,
tsubst_flags_t);
extern tree build_new_method_call (tree, tree, vec<tree, va_gc> **,
tree, int, tree *,
tsubst_flags_t);
extern tree build_special_member_call (tree, tree, vec<tree, va_gc> **,
tree, int, tsubst_flags_t);
extern tree build_new_op (location_t, enum tree_code,
int, tree, tree, tree, tree *,
tsubst_flags_t);
extern tree build_op_call (tree, vec<tree, va_gc> **,
tsubst_flags_t);
extern tree build_op_delete_call (enum tree_code, tree, tree,
bool, tree, tree,
tsubst_flags_t);
extern bool can_convert (tree, tree, tsubst_flags_t);
extern bool can_convert_standard (tree, tree, tsubst_flags_t);
extern bool can_convert_arg (tree, tree, tree, int,
tsubst_flags_t);
extern bool can_convert_arg_bad (tree, tree, tree, int,
tsubst_flags_t);
extern bool enforce_access (tree, tree, tree,
tsubst_flags_t);
extern void push_defarg_context (tree);
extern void pop_defarg_context (void);
extern tree convert_default_arg (tree, tree, tree, int,
tsubst_flags_t);
extern tree convert_arg_to_ellipsis (tree, tsubst_flags_t);
extern tree build_x_va_arg (source_location, tree, tree);
extern tree cxx_type_promotes_to (tree);
extern tree type_passed_as (tree);
extern tree convert_for_arg_passing (tree, tree, tsubst_flags_t);
extern bool is_properly_derived_from (tree, tree);
extern tree initialize_reference (tree, tree, int,
tsubst_flags_t);
extern tree extend_ref_init_temps (tree, tree, vec<tree, va_gc>**);
extern tree make_temporary_var_for_ref_to_temp (tree, tree);
extern bool type_has_extended_temps (tree);
extern tree strip_top_quals (tree);
extern bool reference_related_p (tree, tree);
extern tree perform_implicit_conversion (tree, tree, tsubst_flags_t);
extern tree perform_implicit_conversion_flags (tree, tree, tsubst_flags_t, int);
extern tree build_integral_nontype_arg_conv (tree, tree, tsubst_flags_t);
extern tree perform_direct_initialization_if_possible (tree, tree, bool,
tsubst_flags_t);
extern tree in_charge_arg_for_name (tree);
extern tree build_cxx_call (tree, int, tree *,
tsubst_flags_t);
extern bool is_std_init_list (tree);
extern bool is_list_ctor (tree);
#ifdef ENABLE_CHECKING
extern void validate_conversion_obstack (void);
#endif /* ENABLE_CHECKING */
extern void mark_versions_used (tree);
extern tree get_function_version_dispatcher (tree);
/* in class.c */
extern tree build_vfield_ref (tree, tree);
extern tree build_base_path (enum tree_code, tree,
tree, int, tsubst_flags_t);
extern tree convert_to_base (tree, tree, bool, bool,
tsubst_flags_t);
extern tree convert_to_base_statically (tree, tree);
extern tree build_vtbl_ref (tree, tree);
extern tree build_vfn_ref (tree, tree);
extern tree get_vtable_decl (tree, int);
extern void resort_type_method_vec (void *, void *,
gt_pointer_operator, void *);
extern bool add_method (tree, tree, tree);
extern bool currently_open_class (tree);
extern tree currently_open_derived_class (tree);
extern tree outermost_open_class (void);
extern tree current_nonlambda_class_type (void);
extern tree finish_struct (tree, tree);
extern void finish_struct_1 (tree);
extern int resolves_to_fixed_type_p (tree, int *);
extern void init_class_processing (void);
extern int is_empty_class (tree);
extern bool is_really_empty_class (tree);
extern void pushclass (tree);
extern void popclass (void);
extern void push_nested_class (tree);
extern void pop_nested_class (void);
extern int current_lang_depth (void);
extern void push_lang_context (tree);
extern void pop_lang_context (void);
extern tree instantiate_type (tree, tree, tsubst_flags_t);
extern void print_class_statistics (void);
extern void build_self_reference (void);
extern int same_signature_p (const_tree, const_tree);
extern void maybe_add_class_template_decl_list (tree, tree, int);
extern void unreverse_member_declarations (tree);
extern void invalidate_class_lookup_cache (void);
extern void maybe_note_name_used_in_class (tree, tree);
extern void note_name_declared_in_class (tree, tree);
extern tree get_vtbl_decl_for_binfo (tree);
extern void debug_class (tree);
extern void debug_thunks (tree);
extern void set_linkage_according_to_type (tree, tree);
extern void determine_key_method (tree);
extern void check_for_override (tree, tree);
extern void push_class_stack (void);
extern void pop_class_stack (void);
extern bool type_has_user_nondefault_constructor (tree);
extern tree in_class_defaulted_default_constructor (tree);
extern bool user_provided_p (tree);
extern bool type_has_user_provided_constructor (tree);
extern bool type_has_user_provided_default_constructor (tree);
extern bool vbase_has_user_provided_move_assign (tree);
extern tree default_init_uninitialized_part (tree);
extern bool trivial_default_constructor_is_constexpr (tree);
extern bool type_has_constexpr_default_constructor (tree);
extern bool type_has_virtual_destructor (tree);
extern bool type_has_move_constructor (tree);
extern bool type_has_move_assign (tree);
extern bool type_has_user_declared_move_constructor (tree);
extern bool type_has_user_declared_move_assign(tree);
extern bool type_build_ctor_call (tree);
extern bool type_build_dtor_call (tree);
extern void explain_non_literal_class (tree);
extern void defaulted_late_check (tree);
extern bool defaultable_fn_check (tree);
extern void fixup_type_variants (tree);
extern void fixup_attribute_variants (tree);
extern tree* decl_cloned_function_p (const_tree, bool);
extern void clone_function_decl (tree, int);
extern void adjust_clone_args (tree);
extern void deduce_noexcept_on_destructor (tree);
extern void insert_late_enum_def_into_classtype_sorted_fields (tree, tree);
extern bool uniquely_derived_from_p (tree, tree);
extern bool publicly_uniquely_derived_p (tree, tree);
extern tree common_enclosing_class (tree, tree);
/* in cvt.c */
extern tree convert_to_reference (tree, tree, int, int, tree,
tsubst_flags_t);
extern tree convert_from_reference (tree);
extern tree force_rvalue (tree, tsubst_flags_t);
extern tree ocp_convert (tree, tree, int, int,
tsubst_flags_t);
extern tree cp_convert (tree, tree, tsubst_flags_t);
extern tree cp_convert_and_check (tree, tree, tsubst_flags_t);
extern tree cp_fold_convert (tree, tree);
extern tree convert_to_void (tree, impl_conv_void,
tsubst_flags_t);
extern tree convert_force (tree, tree, int,
tsubst_flags_t);
extern tree build_expr_type_conversion (int, tree, bool);
extern tree type_promotes_to (tree);
extern tree perform_qualification_conversions (tree, tree);
/* in name-lookup.c */
extern tree pushdecl (tree);
extern tree pushdecl_maybe_friend (tree, bool);
extern void maybe_push_cleanup_level (tree);
extern tree pushtag (tree, tree, tag_scope);
extern tree make_anon_name (void);
extern tree pushdecl_top_level_maybe_friend (tree, bool);
extern tree pushdecl_top_level_and_finish (tree, tree);
extern tree check_for_out_of_scope_variable (tree);
extern void dump (cp_binding_level &ref);
extern void dump (cp_binding_level *ptr);
extern void print_other_binding_stack (cp_binding_level *);
extern tree maybe_push_decl (tree);
extern tree current_decl_namespace (void);
/* decl.c */
extern tree poplevel (int, int, int);
extern void cxx_init_decl_processing (void);
enum cp_tree_node_structure_enum cp_tree_node_structure
(union lang_tree_node *);
extern void finish_scope (void);
extern void push_switch (tree);
extern void pop_switch (void);
extern tree make_lambda_name (void);
extern int decls_match (tree, tree);
extern tree duplicate_decls (tree, tree, bool);
extern tree declare_local_label (tree);
extern tree define_label (location_t, tree);
extern void check_goto (tree);
extern bool check_omp_return (void);
extern tree make_typename_type (tree, tree, enum tag_types, tsubst_flags_t);
extern tree make_unbound_class_template (tree, tree, tree, tsubst_flags_t);
extern tree build_library_fn_ptr (const char *, tree, int);
extern tree build_cp_library_fn_ptr (const char *, tree, int);
extern tree push_library_fn (tree, tree, tree, int);
extern tree push_void_library_fn (tree, tree, int);
extern tree push_throw_library_fn (tree, tree);
extern void warn_misplaced_attr_for_class_type (source_location location,
tree class_type);
extern tree check_tag_decl (cp_decl_specifier_seq *, bool);
extern tree shadow_tag (cp_decl_specifier_seq *);
extern tree groktypename (cp_decl_specifier_seq *, const cp_declarator *, bool);
extern tree start_decl (const cp_declarator *, cp_decl_specifier_seq *, int, tree, tree, tree *);
extern void start_decl_1 (tree, bool);
extern bool check_array_initializer (tree, tree, tree);
extern void cp_finish_decl (tree, tree, bool, tree, int);
extern int cp_complete_array_type (tree *, tree, bool);
extern int cp_complete_array_type_or_error (tree *, tree, bool, tsubst_flags_t);
extern tree build_ptrmemfunc_type (tree);
extern tree build_ptrmem_type (tree, tree);
/* the grokdeclarator prototype is in decl.h */
extern tree build_this_parm (tree, cp_cv_quals);
extern int copy_fn_p (const_tree);
extern bool move_fn_p (const_tree);
extern bool move_signature_fn_p (const_tree);
extern tree get_scope_of_declarator (const cp_declarator *);
extern void grok_special_member_properties (tree);
extern int grok_ctor_properties (const_tree, const_tree);
extern bool grok_op_properties (tree, bool);
extern tree xref_tag (enum tag_types, tree, tag_scope, bool);
extern tree xref_tag_from_type (tree, tree, tag_scope);
extern bool xref_basetypes (tree, tree);
extern tree start_enum (tree, tree, tree, bool, bool *);
extern void finish_enum_value_list (tree);
extern void finish_enum (tree);
extern void build_enumerator (tree, tree, tree, location_t);
extern tree lookup_enumerator (tree, tree);
extern bool start_preparsed_function (tree, tree, int);
extern bool start_function (cp_decl_specifier_seq *,
const cp_declarator *, tree);
extern tree begin_function_body (void);
extern void finish_function_body (tree);
extern tree outer_curly_brace_block (tree);
extern tree finish_function (int);
extern tree grokmethod (cp_decl_specifier_seq *, const cp_declarator *, tree);
extern void maybe_register_incomplete_var (tree);
extern void maybe_commonize_var (tree);
extern void complete_vars (tree);
extern tree static_fn_type (tree);
extern void revert_static_member_fn (tree);
extern void fixup_anonymous_aggr (tree);
extern tree compute_array_index_type (tree, tree, tsubst_flags_t);
extern tree check_default_argument (tree, tree, tsubst_flags_t);
typedef int (*walk_namespaces_fn) (tree, void *);
extern int walk_namespaces (walk_namespaces_fn,
void *);
extern int wrapup_globals_for_namespace (tree, void *);
extern tree create_implicit_typedef (tree, tree);
extern int local_variable_p (const_tree);
extern tree register_dtor_fn (tree);
extern tmpl_spec_kind current_tmpl_spec_kind (int);
extern tree cp_fname_init (const char *, tree *);
extern tree cxx_builtin_function (tree decl);
extern tree cxx_builtin_function_ext_scope (tree decl);
extern tree check_elaborated_type_specifier (enum tag_types, tree, bool);
extern void warn_extern_redeclared_static (tree, tree);
extern tree cxx_comdat_group (tree);
extern bool cp_missing_noreturn_ok_p (tree);
extern void initialize_artificial_var (tree, vec<constructor_elt, va_gc> *);
extern tree check_var_type (tree, tree);
extern tree reshape_init (tree, tree, tsubst_flags_t);
extern tree next_initializable_field (tree);
extern tree fndecl_declared_return_type (tree);
extern bool undeduced_auto_decl (tree);
extern void require_deduced_type (tree);
extern bool defer_mark_used_calls;
extern GTY(()) vec<tree, va_gc> *deferred_mark_used_calls;
extern tree finish_case_label (location_t, tree, tree);
extern tree cxx_maybe_build_cleanup (tree, tsubst_flags_t);
/* in decl2.c */
extern bool check_java_method (tree);
extern tree build_memfn_type (tree, tree, cp_cv_quals, cp_ref_qualifier);
extern tree build_pointer_ptrmemfn_type (tree);
extern tree change_return_type (tree, tree);
extern void maybe_retrofit_in_chrg (tree);
extern void maybe_make_one_only (tree);
extern bool vague_linkage_p (tree);
extern void grokclassfn (tree, tree,
enum overload_flags);
extern tree grok_array_decl (location_t, tree, tree, bool);
extern tree delete_sanity (tree, tree, bool, int, tsubst_flags_t);
extern tree check_classfn (tree, tree, tree);
extern void check_member_template (tree);
extern tree grokfield (const cp_declarator *, cp_decl_specifier_seq *,
tree, bool, tree, tree);
extern tree grokbitfield (const cp_declarator *, cp_decl_specifier_seq *,
tree, tree);
extern tree cp_reconstruct_complex_type (tree, tree);
extern bool attributes_naming_typedef_ok (tree);
extern void cplus_decl_attributes (tree *, tree, int);
extern void finish_anon_union (tree);
extern void cp_write_global_declarations (void);
extern tree coerce_new_type (tree);
extern tree coerce_delete_type (tree);
extern void comdat_linkage (tree);
extern void determine_visibility (tree);
extern void constrain_class_visibility (tree);
extern void reset_type_linkage (tree);
extern void tentative_decl_linkage (tree);
extern void import_export_decl (tree);
extern tree build_cleanup (tree);
extern tree build_offset_ref_call_from_tree (tree, vec<tree, va_gc> **,
tsubst_flags_t);
extern bool decl_constant_var_p (tree);
extern bool decl_maybe_constant_var_p (tree);
extern void no_linkage_error (tree);
extern void check_default_args (tree);
extern bool mark_used (tree);
extern bool mark_used (tree, tsubst_flags_t);
extern void finish_static_data_member_decl (tree, tree, bool, tree, int);
extern tree cp_build_parm_decl (tree, tree);
extern tree get_guard (tree);
extern tree get_guard_cond (tree);
extern tree set_guard (tree);
extern tree get_tls_wrapper_fn (tree);
extern void mark_needed (tree);
extern bool decl_needed_p (tree);
extern void note_vague_linkage_fn (tree);
extern void note_comdat_fn (tree);
extern tree build_artificial_parm (tree, tree);
extern bool possibly_inlined_p (tree);
extern int parm_index (tree);
extern tree vtv_start_verification_constructor_init_function (void);
extern tree vtv_finish_verification_constructor_init_function (tree);
extern bool cp_omp_mappable_type (tree);
/* in error.c */
extern void init_error (void);
extern const char *type_as_string (tree, int);
extern const char *type_as_string_translate (tree, int);
extern const char *decl_as_string (tree, int);
extern const char *decl_as_string_translate (tree, int);
extern const char *decl_as_dwarf_string (tree, int);
extern const char *expr_as_string (tree, int);
extern const char *lang_decl_name (tree, int, bool);
extern const char *lang_decl_dwarf_name (tree, int, bool);
extern const char *language_to_string (enum languages);
extern const char *class_key_or_enum_as_string (tree);
extern void print_instantiation_context (void);
extern void maybe_warn_variadic_templates (void);
extern void maybe_warn_cpp0x (cpp0x_warn_str str);
extern bool pedwarn_cxx98 (location_t, int, const char *, ...) ATTRIBUTE_GCC_DIAG(3,4);
extern location_t location_of (tree);
extern void qualified_name_lookup_error (tree, tree, tree,
location_t);
/* in except.c */
extern void init_exception_processing (void);
extern tree expand_start_catch_block (tree);
extern void expand_end_catch_block (void);
extern tree build_exc_ptr (void);
extern tree build_throw (tree);
extern int nothrow_libfn_p (const_tree);
extern void check_handlers (tree);
extern tree finish_noexcept_expr (tree, tsubst_flags_t);
extern bool expr_noexcept_p (tree, tsubst_flags_t);
extern void perform_deferred_noexcept_checks (void);
extern bool nothrow_spec_p (const_tree);
extern bool type_noexcept_p (const_tree);
extern bool type_throw_all_p (const_tree);
extern tree build_noexcept_spec (tree, int);
extern void choose_personality_routine (enum languages);
extern tree build_must_not_throw_expr (tree,tree);
extern tree eh_type_info (tree);
extern tree begin_eh_spec_block (void);
extern void finish_eh_spec_block (tree, tree);
extern tree build_eh_type_type (tree);
extern tree cp_protect_cleanup_actions (void);
extern tree create_try_catch_expr (tree, tree);
/* in expr.c */
extern tree cplus_expand_constant (tree);
extern tree mark_rvalue_use (tree);
extern tree mark_lvalue_use (tree);
extern tree mark_type_use (tree);
extern void mark_exp_read (tree);
/* friend.c */
extern int is_friend (tree, tree);
extern void make_friend_class (tree, tree, bool);
extern void add_friend (tree, tree, bool);
extern tree do_friend (tree, tree, tree, tree, enum overload_flags, bool);
/* in init.c */
extern tree expand_member_init (tree);
extern void emit_mem_initializers (tree);
extern tree build_aggr_init (tree, tree, int,
tsubst_flags_t);
extern int is_class_type (tree, int);
extern tree get_type_value (tree);
extern tree build_zero_init (tree, tree, bool);
extern tree build_value_init (tree, tsubst_flags_t);
extern tree build_value_init_noctor (tree, tsubst_flags_t);
extern tree get_nsdmi (tree, bool);
extern tree build_offset_ref (tree, tree, bool,
tsubst_flags_t);
extern tree throw_bad_array_new_length (void);
extern tree throw_bad_array_length (void);
extern tree build_new (vec<tree, va_gc> **, tree, tree,
vec<tree, va_gc> **, int,
tsubst_flags_t);
extern tree get_temp_regvar (tree, tree);
extern tree build_vec_init (tree, tree, tree, bool, int,
tsubst_flags_t);
extern tree build_delete (tree, tree,
special_function_kind,
int, int, tsubst_flags_t);
extern void push_base_cleanups (void);
extern tree build_vec_delete (tree, tree,
special_function_kind, int,
tsubst_flags_t);
extern tree create_temporary_var (tree);
extern void initialize_vtbl_ptrs (tree);
extern tree build_java_class_ref (tree);
extern tree integral_constant_value (tree);
extern tree decl_constant_value_safe (tree);
extern int diagnose_uninitialized_cst_or_ref_member (tree, bool, bool);
extern tree build_vtbl_address (tree);
/* in lex.c */
extern void cxx_dup_lang_specific_decl (tree);
extern void yyungetc (int, int);
extern tree unqualified_name_lookup_error (tree);
extern tree unqualified_fn_lookup_error (tree);
extern tree build_lang_decl (enum tree_code, tree, tree);
extern tree build_lang_decl_loc (location_t, enum tree_code, tree, tree);
extern void retrofit_lang_decl (tree);
extern tree copy_decl (tree);
extern tree copy_type (tree);
extern tree cxx_make_type (enum tree_code);
extern tree make_class_type (enum tree_code);
extern bool cxx_init (void);
extern void cxx_finish (void);
extern bool in_main_input_context (void);
/* in method.c */
extern void init_method (void);
extern tree make_thunk (tree, bool, tree, tree);
extern void finish_thunk (tree);
extern void use_thunk (tree, bool);
extern bool trivial_fn_p (tree);
extern bool maybe_explain_implicit_delete (tree);
extern void explain_implicit_non_constexpr (tree);
extern void deduce_inheriting_ctor (tree);
extern void synthesize_method (tree);
extern tree lazily_declare_fn (special_function_kind,
tree);
extern tree skip_artificial_parms_for (const_tree, tree);
extern int num_artificial_parms_for (const_tree);
extern tree make_alias_for (tree, tree);
extern tree get_copy_ctor (tree, tsubst_flags_t);
extern tree get_copy_assign (tree);
extern tree get_default_ctor (tree);
extern tree get_dtor (tree, tsubst_flags_t);
extern tree get_inherited_ctor (tree);
extern tree locate_ctor (tree);
extern tree implicitly_declare_fn (special_function_kind, tree,
bool, tree, tree);
/* In optimize.c */
extern bool maybe_clone_body (tree);
/* in pt.c */
extern bool check_template_shadow (tree);
extern tree get_innermost_template_args (tree, int);
extern void maybe_begin_member_template_processing (tree);
extern void maybe_end_member_template_processing (void);
extern tree finish_member_template_decl (tree);
extern void begin_template_parm_list (void);
extern bool begin_specialization (void);
extern void reset_specialization (void);
extern void end_specialization (void);
extern void begin_explicit_instantiation (void);
extern void end_explicit_instantiation (void);
extern tree check_explicit_specialization (tree, tree, int, int);
extern int num_template_headers_for_class (tree);
extern void check_template_variable (tree);
extern tree make_auto (void);
extern tree make_decltype_auto (void);
extern tree do_auto_deduction (tree, tree, tree);
extern tree type_uses_auto (tree);
extern tree type_uses_auto_or_concept (tree);
extern void append_type_to_template_for_access_check (tree, tree, tree,
location_t);
extern tree convert_generic_types_to_packs (tree, int, int);
extern tree splice_late_return_type (tree, tree);
extern bool is_auto (const_tree);
extern bool is_auto_or_concept (const_tree);
extern tree process_template_parm (tree, location_t, tree,
bool, bool);
extern tree end_template_parm_list (tree);
extern void end_template_decl (void);
extern tree maybe_update_decl_type (tree, tree);
extern bool check_default_tmpl_args (tree, tree, bool, bool, int);
extern tree push_template_decl (tree);
extern tree push_template_decl_real (tree, bool);
extern tree add_inherited_template_parms (tree, tree);
extern bool redeclare_class_template (tree, tree);
extern tree lookup_template_class (tree, tree, tree, tree,
int, tsubst_flags_t);
extern tree lookup_template_function (tree, tree);
extern int uses_template_parms (tree);
extern int uses_template_parms_level (tree, int);
extern bool in_template_function (void);
extern tree instantiate_class_template (tree);
extern tree instantiate_template (tree, tree, tsubst_flags_t);
extern tree fn_type_unification (tree, tree, tree,
const tree *, unsigned int,
tree, unification_kind_t, int,
bool, bool);
extern void mark_decl_instantiated (tree, int);
extern int more_specialized_fn (tree, tree, int);
extern void do_decl_instantiation (tree, tree);
extern void do_type_instantiation (tree, tree, tsubst_flags_t);
extern bool always_instantiate_p (tree);
extern void maybe_instantiate_noexcept (tree);
extern tree instantiate_decl (tree, int, bool);
extern int comp_template_parms (const_tree, const_tree);
extern bool uses_parameter_packs (tree);
extern bool template_parameter_pack_p (const_tree);
extern bool function_parameter_pack_p (const_tree);
extern bool function_parameter_expanded_from_pack_p (tree, tree);
extern tree make_pack_expansion (tree);
extern bool check_for_bare_parameter_packs (tree);
extern tree build_template_info (tree, tree);
extern tree get_template_info (const_tree);
extern vec<qualified_typedef_usage_t, va_gc> *get_types_needing_access_check (tree);
extern int template_class_depth (tree);
extern int is_specialization_of (tree, tree);
extern bool is_specialization_of_friend (tree, tree);
extern tree get_pattern_parm (tree, tree);
extern int comp_template_args (tree, tree);
extern tree maybe_process_partial_specialization (tree);
extern tree most_specialized_instantiation (tree);
extern void print_candidates (tree);
extern void instantiate_pending_templates (int);
extern tree tsubst_default_argument (tree, tree, tree,
tsubst_flags_t);
extern tree tsubst (tree, tree, tsubst_flags_t, tree);
extern tree tsubst_copy_and_build (tree, tree, tsubst_flags_t,
tree, bool, bool);
extern tree most_general_template (tree);
extern tree get_mostly_instantiated_function_type (tree);
extern int problematic_instantiation_changed (void);
extern void record_last_problematic_instantiation (void);
extern struct tinst_level *current_instantiation(void);
extern tree maybe_get_template_decl_from_type_decl (tree);
extern int processing_template_parmlist;
extern bool dependent_type_p (tree);
extern bool dependent_scope_p (tree);
extern bool any_dependent_template_arguments_p (const_tree);
extern bool dependent_template_p (tree);
extern bool dependent_template_id_p (tree, tree);
extern bool type_dependent_expression_p (tree);
extern bool any_type_dependent_arguments_p (const vec<tree, va_gc> *);
extern bool any_type_dependent_elements_p (const_tree);
extern bool type_dependent_expression_p_push (tree);
extern bool value_dependent_expression_p (tree);
extern bool instantiation_dependent_expression_p (tree);
extern bool any_value_dependent_elements_p (const_tree);
extern bool dependent_omp_for_p (tree, tree, tree, tree);
extern tree resolve_typename_type (tree, bool);
extern tree template_for_substitution (tree);
extern tree build_non_dependent_expr (tree);
extern void make_args_non_dependent (vec<tree, va_gc> *);
extern bool reregister_specialization (tree, tree, tree);
extern tree fold_non_dependent_expr (tree);
extern tree fold_non_dependent_expr_sfinae (tree, tsubst_flags_t);
extern bool alias_type_or_template_p (tree);
extern bool alias_template_specialization_p (const_tree);
extern bool explicit_class_specialization_p (tree);
extern int push_tinst_level (tree);
extern void pop_tinst_level (void);
extern struct tinst_level *outermost_tinst_level(void);
extern void init_template_processing (void);
extern void print_template_statistics (void);
bool template_template_parameter_p (const_tree);
bool template_type_parameter_p (const_tree);
extern bool primary_template_instantiation_p (const_tree);
extern tree get_primary_template_innermost_parameters (const_tree);
extern tree get_template_parms_at_level (tree, int);
extern tree get_template_innermost_arguments (const_tree);
extern tree get_template_argument_pack_elems (const_tree);
extern tree get_function_template_decl (const_tree);
extern tree resolve_nondeduced_context (tree);
extern hashval_t iterative_hash_template_arg (tree arg, hashval_t val);
/* in repo.c */
extern void init_repo (void);
extern int repo_emit_p (tree);
extern bool repo_export_class_p (const_tree);
extern void finish_repo (void);
/* in rtti.c */
/* A vector of all tinfo decls that haven't been emitted yet. */
extern GTY(()) vec<tree, va_gc> *unemitted_tinfo_decls;
extern void init_rtti_processing (void);
extern tree build_typeid (tree, tsubst_flags_t);
extern tree get_tinfo_decl (tree);
extern tree get_typeid (tree, tsubst_flags_t);
extern tree build_headof (tree);
extern tree build_dynamic_cast (tree, tree, tsubst_flags_t);
extern void emit_support_tinfos (void);
extern bool emit_tinfo_decl (tree);
/* in search.c */
extern bool accessible_base_p (tree, tree, bool);
extern tree lookup_base (tree, tree, base_access,
base_kind *, tsubst_flags_t);
extern tree dcast_base_hint (tree, tree);
extern int accessible_p (tree, tree, bool);
extern int accessible_in_template_p (tree, tree);
extern tree lookup_field_1 (tree, tree, bool);
extern tree lookup_field (tree, tree, int, bool);
extern int lookup_fnfields_1 (tree, tree);
extern tree lookup_fnfields_slot (tree, tree);
extern tree lookup_fnfields_slot_nolazy (tree, tree);
extern int class_method_index_for_fn (tree, tree);
extern tree lookup_fnfields (tree, tree, int);
extern tree lookup_member (tree, tree, int, bool,
tsubst_flags_t);
extern int look_for_overrides (tree, tree);
extern void get_pure_virtuals (tree);
extern void maybe_suppress_debug_info (tree);
extern void note_debug_info_needed (tree);
extern void print_search_statistics (void);
extern void reinit_search_statistics (void);
extern tree current_scope (void);
extern int at_function_scope_p (void);
extern bool at_class_scope_p (void);
extern bool at_namespace_scope_p (void);
extern tree context_for_name_lookup (tree);
extern tree lookup_conversions (tree);
extern tree binfo_from_vbase (tree);
extern tree binfo_for_vbase (tree, tree);
extern tree look_for_overrides_here (tree, tree);
#define dfs_skip_bases ((tree)1)
extern tree dfs_walk_all (tree, tree (*) (tree, void *),
tree (*) (tree, void *), void *);
extern tree dfs_walk_once (tree, tree (*) (tree, void *),
tree (*) (tree, void *), void *);
extern tree binfo_via_virtual (tree, tree);
extern tree build_baselink (tree, tree, tree, tree);
extern tree adjust_result_of_qualified_name_lookup
(tree, tree, tree);
extern tree copied_binfo (tree, tree);
extern tree original_binfo (tree, tree);
extern int shared_member_p (tree);
/* The representation of a deferred access check. */
typedef struct GTY(()) deferred_access_check {
/* The base class in which the declaration is referenced. */
tree binfo;
/* The declaration whose access must be checked. */
tree decl;
/* The declaration that should be used in the error message. */
tree diag_decl;
/* The location of this access. */
location_t loc;
} deferred_access_check;
/* in semantics.c */
extern void push_deferring_access_checks (deferring_kind);
extern void resume_deferring_access_checks (void);
extern void stop_deferring_access_checks (void);
extern void pop_deferring_access_checks (void);
extern vec<deferred_access_check, va_gc> *get_deferred_access_checks (void);
extern void reopen_deferring_access_checks (vec<deferred_access_check, va_gc> *);
extern void pop_to_parent_deferring_access_checks (void);
extern bool perform_access_checks (vec<deferred_access_check, va_gc> *,
tsubst_flags_t);
extern bool perform_deferred_access_checks (tsubst_flags_t);
extern bool perform_or_defer_access_check (tree, tree, tree,
tsubst_flags_t);
extern int stmts_are_full_exprs_p (void);
extern void init_cp_semantics (void);
extern tree do_poplevel (tree);
extern void break_maybe_infinite_loop (void);
extern void add_decl_expr (tree);
extern tree maybe_cleanup_point_expr_void (tree);
extern tree finish_expr_stmt (tree);
extern tree begin_if_stmt (void);
extern void finish_if_stmt_cond (tree, tree);
extern tree finish_then_clause (tree);
extern void begin_else_clause (tree);
extern void finish_else_clause (tree);
extern void finish_if_stmt (tree);
extern tree begin_while_stmt (void);
extern void finish_while_stmt_cond (tree, tree, bool);
extern void finish_while_stmt (tree);
extern tree begin_do_stmt (void);
extern void finish_do_body (tree);
extern void finish_do_stmt (tree, tree, bool);
extern tree finish_return_stmt (tree);
extern tree begin_for_scope (tree *);
extern tree begin_for_stmt (tree, tree);
extern void finish_for_init_stmt (tree);
extern void finish_for_cond (tree, tree, bool);
extern void finish_for_expr (tree, tree);
extern void finish_for_stmt (tree);
extern tree begin_range_for_stmt (tree, tree);
extern void finish_range_for_decl (tree, tree, tree);
extern void finish_range_for_stmt (tree);
extern tree finish_break_stmt (void);
extern tree finish_continue_stmt (void);
extern tree begin_switch_stmt (void);
extern void finish_switch_cond (tree, tree);
extern void finish_switch_stmt (tree);
extern tree finish_goto_stmt (tree);
extern tree begin_try_block (void);
extern void finish_try_block (tree);
extern void finish_handler_sequence (tree);
extern tree begin_function_try_block (tree *);
extern void finish_function_try_block (tree);
extern void finish_function_handler_sequence (tree, tree);
extern void finish_cleanup_try_block (tree);
extern tree begin_handler (void);
extern void finish_handler_parms (tree, tree);
extern void finish_handler (tree);
extern void finish_cleanup (tree, tree);
extern bool literal_type_p (tree);
extern tree register_constexpr_fundef (tree, tree);
extern bool check_constexpr_ctor_body (tree, tree);
extern tree ensure_literal_type_for_constexpr_object (tree);
extern bool potential_constant_expression (tree);
extern bool potential_rvalue_constant_expression (tree);
extern bool require_potential_constant_expression (tree);
extern bool require_potential_rvalue_constant_expression (tree);
extern tree cxx_constant_value (tree);
extern tree maybe_constant_value (tree);
extern tree maybe_constant_init (tree);
extern bool is_sub_constant_expr (tree);
extern bool reduced_constant_expression_p (tree);
extern void explain_invalid_constexpr_fn (tree);
extern vec<tree> cx_error_context (void);
enum {
BCS_NO_SCOPE = 1,
BCS_TRY_BLOCK = 2,
BCS_FN_BODY = 4
};
extern tree begin_compound_stmt (unsigned int);
extern void finish_compound_stmt (tree);
extern tree finish_asm_stmt (int, tree, tree, tree, tree,
tree);
extern tree finish_label_stmt (tree);
extern void finish_label_decl (tree);
extern tree finish_parenthesized_expr (tree);
extern tree force_paren_expr (tree);
extern tree finish_non_static_data_member (tree, tree, tree);
extern tree begin_stmt_expr (void);
extern tree finish_stmt_expr_expr (tree, tree);
extern tree finish_stmt_expr (tree, bool);
extern tree stmt_expr_value_expr (tree);
bool empty_expr_stmt_p (tree);
extern tree perform_koenig_lookup (tree, vec<tree, va_gc> *,
tsubst_flags_t);
extern tree finish_call_expr (tree, vec<tree, va_gc> **, bool,
bool, tsubst_flags_t);
extern tree finish_increment_expr (tree, enum tree_code);
extern tree finish_this_expr (void);
extern tree finish_pseudo_destructor_expr (tree, tree, tree, location_t);
extern tree finish_unary_op_expr (location_t, enum tree_code, tree,
tsubst_flags_t);
extern tree finish_compound_literal (tree, tree, tsubst_flags_t);
extern tree finish_fname (tree);
extern void finish_translation_unit (void);
extern tree finish_template_type_parm (tree, tree);
extern tree finish_template_template_parm (tree, tree);
extern tree begin_class_definition (tree);
extern void finish_template_decl (tree);
extern tree finish_template_type (tree, tree, int);
extern tree finish_base_specifier (tree, tree, bool);
extern void finish_member_declaration (tree);
extern tree finish_id_expression (tree, tree, tree,
cp_id_kind *,
bool, bool, bool *,
bool, bool, bool, bool,
const char **,
location_t);
extern tree finish_typeof (tree);
extern tree finish_underlying_type (tree);
extern tree calculate_bases (tree);
extern tree finish_bases (tree, bool);
extern tree calculate_direct_bases (tree);
extern tree finish_offsetof (tree);
extern void finish_decl_cleanup (tree, tree);
extern void finish_eh_cleanup (tree);
extern void emit_associated_thunks (tree);
extern void finish_mem_initializers (tree);
extern tree check_template_template_default_arg (tree);
extern bool expand_or_defer_fn_1 (tree);
extern void expand_or_defer_fn (tree);
extern void add_typedef_to_current_template_for_access_check (tree, tree,
location_t);
extern void check_accessibility_of_qualified_id (tree, tree, tree);
extern tree finish_qualified_id_expr (tree, tree, bool, bool,
bool, bool, tsubst_flags_t);
extern void simplify_aggr_init_expr (tree *);
extern void finalize_nrv (tree *, tree, tree);
extern void note_decl_for_pch (tree);
extern tree omp_reduction_id (enum tree_code, tree, tree);
extern tree cp_remove_omp_priv_cleanup_stmt (tree *, int *, void *);
extern void cp_check_omp_declare_reduction (tree);
extern tree finish_omp_clauses (tree);
extern void finish_omp_threadprivate (tree);
extern tree begin_omp_structured_block (void);
extern tree finish_omp_structured_block (tree);
extern tree begin_omp_parallel (void);
extern tree finish_omp_parallel (tree, tree);
extern tree begin_omp_task (void);
extern tree finish_omp_task (tree, tree);
extern tree finish_omp_for (location_t, enum tree_code,
tree, tree, tree, tree, tree,
tree, tree);
extern void finish_omp_atomic (enum tree_code, enum tree_code,
tree, tree, tree, tree, tree,
bool);
extern void finish_omp_barrier (void);
extern void finish_omp_flush (void);
extern void finish_omp_taskwait (void);
extern void finish_omp_taskyield (void);
extern void finish_omp_cancel (tree);
extern void finish_omp_cancellation_point (tree);
extern tree begin_transaction_stmt (location_t, tree *, int);
extern void finish_transaction_stmt (tree, tree, int, tree);
extern tree build_transaction_expr (location_t, tree, int, tree);
extern bool cxx_omp_create_clause_info (tree, tree, bool, bool,
bool, bool);
extern tree baselink_for_fns (tree);
extern void finish_static_assert (tree, tree, location_t,
bool);
extern tree finish_decltype_type (tree, bool, tsubst_flags_t);
extern tree finish_trait_expr (enum cp_trait_kind, tree, tree);
extern tree build_lambda_expr (void);
extern tree build_lambda_object (tree);
extern tree begin_lambda_type (tree);
extern tree lambda_capture_field_type (tree, bool);
extern tree lambda_return_type (tree);
extern tree lambda_proxy_type (tree);
extern tree lambda_function (tree);
extern void apply_deduced_return_type (tree, tree);
extern tree add_capture (tree, tree, tree, bool, bool);
extern tree add_default_capture (tree, tree, tree);
extern tree build_capture_proxy (tree);
extern void insert_capture_proxy (tree);
extern void insert_pending_capture_proxies (void);
extern bool is_capture_proxy (tree);
extern bool is_normal_capture_proxy (tree);
extern void register_capture_members (tree);
extern tree lambda_expr_this_capture (tree);
extern tree maybe_resolve_dummy (tree);
extern tree nonlambda_method_basetype (void);
extern void maybe_add_lambda_conv_op (tree);
extern bool is_lambda_ignored_entity (tree);
/* in tree.c */
extern int cp_tree_operand_length (const_tree);
void cp_free_lang_data (tree t);
extern tree force_target_expr (tree, tree, tsubst_flags_t);
extern tree build_target_expr_with_type (tree, tree, tsubst_flags_t);
extern void lang_check_failed (const char *, int,
const char *) ATTRIBUTE_NORETURN;
extern tree stabilize_expr (tree, tree *);
extern void stabilize_call (tree, tree *);
extern bool stabilize_init (tree, tree *);
extern tree add_stmt_to_compound (tree, tree);
extern void init_tree (void);
extern bool pod_type_p (const_tree);
extern bool layout_pod_type_p (const_tree);
extern bool std_layout_type_p (const_tree);
extern bool trivial_type_p (const_tree);
extern bool trivially_copyable_p (const_tree);
extern bool scalarish_type_p (const_tree);
extern bool type_has_nontrivial_default_init (const_tree);
extern bool type_has_nontrivial_copy_init (const_tree);
extern bool class_tmpl_impl_spec_p (const_tree);
extern int zero_init_p (const_tree);
extern bool check_abi_tag_redeclaration (const_tree, const_tree, const_tree);
extern tree strip_typedefs (tree);
extern tree strip_typedefs_expr (tree);
extern tree copy_binfo (tree, tree, tree,
tree *, int);
extern int member_p (const_tree);
extern cp_lvalue_kind real_lvalue_p (const_tree);
extern cp_lvalue_kind lvalue_kind (const_tree);
extern bool lvalue_or_rvalue_with_address_p (const_tree);
extern bool xvalue_p (const_tree);
extern bool builtin_valid_in_constant_expr_p (const_tree);
extern tree build_min (enum tree_code, tree, ...);
extern tree build_min_nt_loc (location_t, enum tree_code,
...);
extern tree build_min_non_dep (enum tree_code, tree, ...);
extern tree build_min_non_dep_call_vec (tree, tree, vec<tree, va_gc> *);
extern tree build_cplus_new (tree, tree, tsubst_flags_t);
extern tree build_aggr_init_expr (tree, tree);
extern tree get_target_expr (tree);
extern tree get_target_expr_sfinae (tree, tsubst_flags_t);
extern tree build_cplus_array_type (tree, tree);
extern tree build_array_of_n_type (tree, int);
extern bool array_of_runtime_bound_p (tree);
extern tree build_array_copy (tree);
extern tree build_vec_init_expr (tree, tree, tsubst_flags_t);
extern void diagnose_non_constexpr_vec_init (tree);
extern tree hash_tree_cons (tree, tree, tree);
extern tree hash_tree_chain (tree, tree);
extern tree build_qualified_name (tree, tree, tree, bool);
extern tree build_ref_qualified_type (tree, cp_ref_qualifier);
extern int is_overloaded_fn (tree);
extern tree dependent_name (tree);
extern tree get_fns (tree);
extern tree get_first_fn (tree);
extern tree ovl_cons (tree, tree);
extern tree build_overload (tree, tree);
extern tree ovl_scope (tree);
extern bool non_static_member_function_p (tree);
extern const char *cxx_printable_name (tree, int);
extern const char *cxx_printable_name_translate (tree, int);
extern tree build_exception_variant (tree, tree);
extern tree bind_template_template_parm (tree, tree);
extern tree array_type_nelts_total (tree);
extern tree array_type_nelts_top (tree);
extern tree break_out_target_exprs (tree);
extern tree get_type_decl (tree);
extern tree decl_namespace_context (tree);
extern bool decl_anon_ns_mem_p (const_tree);
extern tree lvalue_type (tree);
extern tree error_type (tree);
extern int varargs_function_p (const_tree);
extern bool really_overloaded_fn (tree);
extern bool cp_tree_equal (tree, tree);
extern tree no_linkage_check (tree, bool);
extern void debug_binfo (tree);
extern tree build_dummy_object (tree);
extern tree maybe_dummy_object (tree, tree *);
extern int is_dummy_object (const_tree);
extern const struct attribute_spec cxx_attribute_table[];
extern tree make_ptrmem_cst (tree, tree);
extern tree cp_build_type_attribute_variant (tree, tree);
extern tree cp_build_reference_type (tree, bool);
extern tree move (tree);
extern tree cp_build_qualified_type_real (tree, int, tsubst_flags_t);
#define cp_build_qualified_type(TYPE, QUALS) \
cp_build_qualified_type_real ((TYPE), (QUALS), tf_warning_or_error)
extern bool cv_qualified_p (const_tree);
extern tree cv_unqualified (tree);
extern special_function_kind special_function_p (const_tree);
extern int count_trees (tree);
extern int char_type_p (tree);
extern void verify_stmt_tree (tree);
extern linkage_kind decl_linkage (tree);
extern duration_kind decl_storage_duration (tree);
extern tree cp_walk_subtrees (tree*, int*, walk_tree_fn,
void*, struct pointer_set_t*);
#define cp_walk_tree(tp,func,data,pset) \
walk_tree_1 (tp, func, data, pset, cp_walk_subtrees)
#define cp_walk_tree_without_duplicates(tp,func,data) \
walk_tree_without_duplicates_1 (tp, func, data, cp_walk_subtrees)
extern tree fold_if_not_in_template (tree);
extern tree rvalue (tree);
extern tree convert_bitfield_to_declared_type (tree);
extern tree cp_save_expr (tree);
extern bool cast_valid_in_integral_constant_expression_p (tree);
extern bool cxx_type_hash_eq (const_tree, const_tree);
extern void cxx_print_statistics (void);
extern bool maybe_warn_zero_as_null_pointer_constant (tree, location_t);
/* in ptree.c */
extern void cxx_print_xnode (FILE *, tree, int);
extern void cxx_print_decl (FILE *, tree, int);
extern void cxx_print_type (FILE *, tree, int);
extern void cxx_print_identifier (FILE *, tree, int);
extern void cxx_print_error_function (diagnostic_context *,
const char *,
struct diagnostic_info *);
/* in typeck.c */
extern bool cxx_mark_addressable (tree);
extern int string_conv_p (const_tree, const_tree, int);
extern tree cp_truthvalue_conversion (tree);
extern tree condition_conversion (tree);
extern tree require_complete_type (tree);
extern tree require_complete_type_sfinae (tree, tsubst_flags_t);
extern tree complete_type (tree);
extern tree complete_type_or_else (tree, tree);
extern tree complete_type_or_maybe_complain (tree, tree, tsubst_flags_t);
extern int type_unknown_p (const_tree);
enum { ce_derived, ce_normal, ce_exact };
extern bool comp_except_specs (const_tree, const_tree, int);
extern bool comptypes (tree, tree, int);
extern bool same_type_ignoring_top_level_qualifiers_p (tree, tree);
extern bool compparms (const_tree, const_tree);
extern int comp_cv_qualification (const_tree, const_tree);
extern int comp_cv_qual_signature (tree, tree);
extern tree cxx_sizeof_or_alignof_expr (tree, enum tree_code, bool);
extern tree cxx_sizeof_or_alignof_type (tree, enum tree_code, bool);
extern tree cxx_alignas_expr (tree);
extern tree cxx_sizeof_nowarn (tree);
extern tree is_bitfield_expr_with_lowered_type (const_tree);
extern tree unlowered_expr_type (const_tree);
extern tree decay_conversion (tree, tsubst_flags_t);
extern tree build_class_member_access_expr (tree, tree, tree, bool,
tsubst_flags_t);
extern tree finish_class_member_access_expr (tree, tree, bool,
tsubst_flags_t);
extern tree build_x_indirect_ref (location_t, tree,
ref_operator, tsubst_flags_t);
extern tree cp_build_indirect_ref (tree, ref_operator,
tsubst_flags_t);
extern tree build_array_ref (location_t, tree, tree);
extern tree cp_build_array_ref (location_t, tree, tree,
tsubst_flags_t);
extern tree get_member_function_from_ptrfunc (tree *, tree, tsubst_flags_t);
extern tree cp_build_function_call (tree, tree, tsubst_flags_t);
extern tree cp_build_function_call_nary (tree, tsubst_flags_t, ...)
ATTRIBUTE_SENTINEL;
extern tree cp_build_function_call_vec (tree, vec<tree, va_gc> **,
tsubst_flags_t);
extern tree build_x_binary_op (location_t,
enum tree_code, tree,
enum tree_code, tree,
enum tree_code, tree *,
tsubst_flags_t);
extern tree build_x_array_ref (location_t, tree, tree,
tsubst_flags_t);
extern tree build_x_unary_op (location_t,
enum tree_code, tree,
tsubst_flags_t);
extern tree cp_build_addr_expr (tree, tsubst_flags_t);
extern tree cp_build_addr_expr_strict (tree, tsubst_flags_t);
extern tree cp_build_unary_op (enum tree_code, tree, int,
tsubst_flags_t);
extern tree unary_complex_lvalue (enum tree_code, tree);
extern tree build_x_conditional_expr (location_t, tree, tree, tree,
tsubst_flags_t);
extern tree build_x_compound_expr_from_list (tree, expr_list_kind,
tsubst_flags_t);
extern tree build_x_compound_expr_from_vec (vec<tree, va_gc> *,
const char *, tsubst_flags_t);
extern tree build_x_compound_expr (location_t, tree, tree,
tsubst_flags_t);
extern tree build_compound_expr (location_t, tree, tree);
extern tree cp_build_compound_expr (tree, tree, tsubst_flags_t);
extern tree build_static_cast (tree, tree, tsubst_flags_t);
extern tree build_reinterpret_cast (tree, tree, tsubst_flags_t);
extern tree build_const_cast (tree, tree, tsubst_flags_t);
extern tree build_c_cast (location_t, tree, tree);
extern tree cp_build_c_cast (tree, tree, tsubst_flags_t);
extern tree build_x_modify_expr (location_t, tree,
enum tree_code, tree,
tsubst_flags_t);
extern tree cp_build_modify_expr (tree, enum tree_code, tree,
tsubst_flags_t);
extern tree convert_for_initialization (tree, tree, tree, int,
impl_conv_rhs, tree, int,
tsubst_flags_t);
extern int comp_ptr_ttypes (tree, tree);
extern bool comp_ptr_ttypes_const (tree, tree);
extern bool error_type_p (const_tree);
extern bool ptr_reasonably_similar (const_tree, const_tree);
extern tree build_ptrmemfunc (tree, tree, int, bool,
tsubst_flags_t);
extern int cp_type_quals (const_tree);
extern int type_memfn_quals (const_tree);
extern cp_ref_qualifier type_memfn_rqual (const_tree);
extern tree apply_memfn_quals (tree, cp_cv_quals, cp_ref_qualifier);
extern bool cp_has_mutable_p (const_tree);
extern bool at_least_as_qualified_p (const_tree, const_tree);
extern void cp_apply_type_quals_to_decl (int, tree);
extern tree build_ptrmemfunc1 (tree, tree, tree);
extern void expand_ptrmemfunc_cst (tree, tree *, tree *);
extern tree type_after_usual_arithmetic_conversions (tree, tree);
extern tree common_pointer_type (tree, tree);
extern tree composite_pointer_type (tree, tree, tree, tree,
composite_pointer_operation,
tsubst_flags_t);
extern tree merge_types (tree, tree);
extern tree strip_array_domain (tree);
extern tree check_return_expr (tree, bool *);
extern tree cp_build_binary_op (location_t,
enum tree_code, tree, tree,
tsubst_flags_t);
extern tree build_x_vec_perm_expr (location_t,
tree, tree, tree,
tsubst_flags_t);
#define cxx_sizeof(T) cxx_sizeof_or_alignof_type (T, SIZEOF_EXPR, true)
extern tree build_simple_component_ref (tree, tree);
extern tree build_ptrmemfunc_access_expr (tree, tree);
extern tree build_address (tree);
extern tree build_typed_address (tree, tree);
extern tree build_nop (tree, tree);
extern tree non_reference (tree);
extern tree lookup_anon_field (tree, tree);
extern bool invalid_nonstatic_memfn_p (tree, tsubst_flags_t);
extern tree convert_member_func_to_ptr (tree, tree, tsubst_flags_t);
extern tree convert_ptrmem (tree, tree, bool, bool,
tsubst_flags_t);
extern int lvalue_or_else (tree, enum lvalue_use,
tsubst_flags_t);
extern void check_template_keyword (tree);
extern bool check_raw_literal_operator (const_tree decl);
extern bool check_literal_operator_args (const_tree, bool *, bool *);
extern void maybe_warn_about_useless_cast (tree, tree, tsubst_flags_t);
extern tree cp_perform_integral_promotions (tree, tsubst_flags_t);
/* in typeck2.c */
extern void require_complete_eh_spec_types (tree, tree);
extern void cxx_incomplete_type_diagnostic (const_tree, const_tree, diagnostic_t);
#undef cxx_incomplete_type_error
extern void cxx_incomplete_type_error (const_tree, const_tree);
#define cxx_incomplete_type_error(V,T) \
(cxx_incomplete_type_diagnostic ((V), (T), DK_ERROR))
extern tree error_not_base_type (tree, tree);
extern tree binfo_or_else (tree, tree);
extern void cxx_readonly_error (tree, enum lvalue_use);
extern void complete_type_check_abstract (tree);
extern int abstract_virtuals_error (tree, tree);
extern int abstract_virtuals_error (abstract_class_use, tree);
extern int abstract_virtuals_error_sfinae (tree, tree, tsubst_flags_t);
extern int abstract_virtuals_error_sfinae (abstract_class_use, tree, tsubst_flags_t);
extern tree store_init_value (tree, tree, vec<tree, va_gc>**, int);
extern void check_narrowing (tree, tree);
extern tree digest_init (tree, tree, tsubst_flags_t);
extern tree digest_init_flags (tree, tree, int);
extern tree digest_nsdmi_init (tree, tree);
extern tree build_scoped_ref (tree, tree, tree *);
extern tree build_x_arrow (location_t, tree,
tsubst_flags_t);
extern tree build_m_component_ref (tree, tree, tsubst_flags_t);
extern tree build_functional_cast (tree, tree, tsubst_flags_t);
extern tree add_exception_specifier (tree, tree, int);
extern tree merge_exception_specifiers (tree, tree, tree);
/* in mangle.c */
extern void init_mangle (void);
extern void mangle_decl (tree);
extern const char *mangle_type_string (tree);
extern tree mangle_typeinfo_for_type (tree);
extern tree mangle_typeinfo_string_for_type (tree);
extern tree mangle_vtbl_for_type (tree);
extern tree mangle_vtt_for_type (tree);
extern tree mangle_ctor_vtbl_for_type (tree, tree);
extern tree mangle_thunk (tree, int, tree, tree);
extern tree mangle_conv_op_name_for_type (tree);
extern tree mangle_guard_variable (tree);
extern tree mangle_tls_init_fn (tree);
extern tree mangle_tls_wrapper_fn (tree);
extern bool decl_tls_wrapper_p (tree);
extern tree mangle_ref_init_variable (tree);
extern char * get_mangled_vtable_map_var_name (tree);
/* in dump.c */
extern bool cp_dump_tree (void *, tree);
/* In cp/cp-objcp-common.c. */
extern alias_set_type cxx_get_alias_set (tree);
extern bool cxx_warn_unused_global_decl (const_tree);
extern size_t cp_tree_size (enum tree_code);
extern bool cp_var_mod_type_p (tree, tree);
extern void cxx_initialize_diagnostics (diagnostic_context *);
extern int cxx_types_compatible_p (tree, tree);
extern void init_shadowed_var_for_decl (void);
extern bool cxx_block_may_fallthru (const_tree);
/* in cp-gimplify.c */
extern int cp_gimplify_expr (tree *, gimple_seq *,
gimple_seq *);
extern void cp_genericize (tree);
extern bool cxx_omp_const_qual_no_mutable (tree);
extern enum omp_clause_default_kind cxx_omp_predetermined_sharing (tree);
extern tree cxx_omp_clause_default_ctor (tree, tree, tree);
extern tree cxx_omp_clause_copy_ctor (tree, tree, tree);
extern tree cxx_omp_clause_assign_op (tree, tree, tree);
extern tree cxx_omp_clause_dtor (tree, tree);
extern void cxx_omp_finish_clause (tree, gimple_seq *);
extern bool cxx_omp_privatize_by_reference (const_tree);
/* in name-lookup.c */
extern void suggest_alternatives_for (location_t, tree);
extern tree strip_using_decl (tree);
/* in vtable-class-hierarchy.c */
extern void vtv_compute_class_hierarchy_transitive_closure (void);
extern void vtv_generate_init_routine (void);
extern void vtv_save_class_info (tree);
extern void vtv_recover_class_info (void);
extern void vtv_build_vtable_verify_fndecl (void);
/* In cp-cilkplus.c. */
extern bool cpp_validate_cilk_plus_loop (tree);
/* In cp/cp-array-notations.c */
extern tree expand_array_notation_exprs (tree);
bool cilkplus_an_triplet_types_ok_p (location_t, tree, tree, tree,
tree);
/* In c-family/cilk.c */
extern bool cilk_valid_spawn (tree);
/* -- end of C++ */
#endif /* ! GCC_CP_TREE_H */
|
BubbleSort_OpenMP.c | #include <stdio.h>
#include <omp.h>
#include <time.h>
#include <stdlib.h>
#define SIZE 2000
long array[SIZE];
double avg;
void BubbleSort(int start, int end){
long temp = 0;
for (long i = start ; i < end ; i++ )
{
for (long j = start ; j < end ; j++)
{
if (array[j] > array[i])
{
temp = array[j];
array[j] = array[i];
array[i] = temp;
}
}
}
}
void Automated()
{
long i;
srand (time(NULL));
for (i = 0 ; i < SIZE ; i++)
{
if (i < 500)
{
array[i] = 1 + (rand() % 500);
}
else if (i < 1000)
{
array[i] = 501 + (rand() % 500);
}
else if (i < 1500)
{
array[i] = 1001 + (rand() % 500);
}
else if (i < 2000)
{
array[i] = 1501 + (rand() % 500);
}
// array[i] = i + (rand() % 10);
}
clock_t start = clock();
// printf("==============================Unsorted Array==============================\n\n");
// for (i = 0 ; i < SIZE ; i++)
// {
// printf("array[%ld] ==> %ld \n", i, array[i]);
// }
#pragma omp parallel
{
omp_set_num_threads(4);
int total_threads = omp_get_num_threads();
int segment = SIZE/total_threads;
// printf("--------Total Threads: %d--------\n\n", total_threads);
#pragma omp for
for(i = 0; i < total_threads; i++){
BubbleSort(i*segment, i*segment+segment);
}
}
// printf("==============================Sorted Array==============================\n\n");
// for (i = 0 ; i < SIZE ; i++){
// printf("array[%ld] ==> %ld \n", i, array[i]);
// }
clock_t stop = clock();
double elapsed = (double)(stop - start) * 1000.0 / CLOCKS_PER_SEC;
// printf("-------------------------\nTime elapsed in ms: %lf\n-------------------------\n", elapsed);
avg += elapsed;
}
int main(){
int i;
avg = 0;
for (i = 0 ; i < 100 ; i++)
{
Automated();
}
avg /= 100;
printf("\n\nOPenMP: Average Time Taken; BubbleSorts: %lf \n\n", avg);
return 0;
}
|
tsv.c | /* This code is part of this project: Donato E, Ouyang M,
* Peguero-Isalguez C. Triangle counting with a multi-core computer.
* Proceedings of IEEE High Performance Extreme Computing Conference
* (HPEC), 2018, 1-7.
*
* Copyright (c) 2018 Ming Ouyang
*
* Permission is hereby granted, free of charge, to any person
* obtaining a copy of this software and associated documentation
* files (the "Software"), to deal in the Software without
* restriction, including without limitation the rights to use, copy,
* modify, merge, publish, distribute, sublicense, and/or sell copies
* of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be
* included in all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
* HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
* WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#include <stdio.h>
#include <stdlib.h>
#include <stdint.h>
#include <unistd.h>
#include <string.h>
#include <sys/types.h>
#include <sys/stat.h>
#include <omp.h>
#include "ompTri.h"
/* read a tsv file
* store the graph in degree[n] and neighbor[n][]
*
* tsv format: one edge per line, each line is 3 numbers separated by 2 tabs
* "u\tv\tedgeWeight\n"
* vertex numbers, u and v, start at 1
*/
void readTSV(char *filename) {
uint64_t i, j, *off, numItem, *myNumItem, chunk, start, end;
uint64_t *rawNum, u, v;
struct stat buf;
char *buffer;
int status;
FILE *fp;
status = stat(filename, &buf);
if (status) {
printf("no such file: %s\n", filename);
exit(0);
}
if (verbose)
printf("file has %lu bytes, ", buf.st_size);
buffer = (char*) malloc(buf.st_size);
myNumItem = (uint64_t*) malloc(sizeof(uint64_t) * (numT + 1));
for (i = 0; i <= numT; i++) //prefix sum later, need one extra element
myNumItem[i] = 0;
chunk = buf.st_size / numT;
//grab the whole file
fp = fopen(filename, "rb");
fread((void*) buffer, 1, buf.st_size, fp);
fclose(fp);
//count how many numbers are in the file
#pragma omp parallel for private(j,start,end)
for (i = 0; i < numT; i++) {
start = i * chunk;
end = (i == numT - 1) ? buf.st_size : start + chunk;
for (j = start; j < end; j++)
if (buffer[j] == '\t' || buffer[j] == '\n')
myNumItem[i + 1]++; //note (i + 1), shift by one
}
for (i = 0; i < numT; i++) //prefix sum
myNumItem[i + 1] += myNumItem[i];
numItem = myNumItem[numT]; //number of numbers in the file
off = (uint64_t*) malloc(sizeof(uint64_t) * (numItem + 1));
rawNum = (uint64_t*) malloc(sizeof(uint64_t) * numItem);
off[0] = 0;
off += 1;
//locate the beginning of each number in the file
#pragma omp parallel for private(j,start,end)
for (i = 0; i < numT; i++) {
start = i * chunk;
end = (i == numT - 1) ? buf.st_size : start + chunk;
for (j = start; j < end; j++)
if (buffer[j] == '\t' || buffer[j] == '\n')
off[ myNumItem[i]++ ] = j + 1;
}
off -= 1;
n = 0; //for max reduction
#pragma omp parallel for reduction(max:n)
for (i = 0; i < numItem; i++) {
rawNum[i] = str2u64( &buffer[ off[i] ]);
n = (n < rawNum[i]) ? rawNum[i] : n;
}
free(off);
free(myNumItem);
free(buffer);
degree = (uint64_t*) malloc(sizeof(uint64_t) * n);
neighbor = (uint64_t**)malloc(sizeof(uint64_t*) * n);
for (i = 0; i < n; i++)
degree[i] = 0;
//vertex numbers in a tsv file start at 1
for (i = 0; i < numItem; i += 3)
degree[rawNum[i] - 1]++; //shift to 0-based indexing
for (i = 0; i < n; i++) {
if (degree[i])
neighbor[i] = (uint64_t*) malloc(sizeof(uint64_t) * degree[i]);
else
neighbor[i] = NULL;
degree[i] = 0;
}
for (i = 0; i < numItem; i += 3) {
u = rawNum[i] - 1;
v = rawNum[i + 1] - 1;
if (u == v) {
fprintf(stderr, "self-loop: i %lu, u %lu\n", i, u);
continue;
}
neighbor[u] [degree[u]++] = v;
}
free(rawNum);
if (verbose)
printf("n %lu, m %lu\n", n, numItem / 3 >> 1); //m is not set yet
}
//memory not freed: degree, neighbor, neighbor[*]
|
gamma_index_ivfpq.h | /**
* Copyright (c) Facebook, Inc. and its affiliates.
*
* This faiss source code is licensed under the MIT license.
* https://github.com/facebookresearch/faiss/blob/master/LICENSE
*
*
* The works below are modified based on faiss:
* 1. Replace the static batch indexing with real time indexing
* 2. Add the fine-grained sort after PQ coarse sort
* 3. Add the numeric field and bitmap filters in the process of searching
*
* Modified works copyright 2019 The Gamma Authors.
*
* The modified codes are licensed under the Apache License, Version 2.0 license
* found in the LICENSE file in the root directory of this source tree.
*
*/
#ifndef GAMMA_INDEX_IVFPQ_H_
#define GAMMA_INDEX_IVFPQ_H_
#include <unistd.h>
#include <atomic>
#include "faiss/IndexIVF.h"
#include "faiss/IndexIVFPQ.h"
#include "faiss/VectorTransform.h"
#include "faiss/IndexHNSW.h"
#include "faiss/InvertedLists.h"
#include "faiss/impl/FaissAssert.h"
#include "faiss/impl/io.h"
#include "faiss/index_io.h"
#include "faiss/utils/Heap.h"
#include "faiss/utils/distances.h"
#include "faiss/utils/hamming.h"
#include "faiss/utils/utils.h"
#include "field_range_index.h"
#include "gamma_common_data.h"
#include "gamma_index_flat.h"
#include "gamma_scanner.h"
#include "log.h"
#include "memory_raw_vector.h"
#include "raw_vector.h"
#include "realtime_invert_index.h"
#include "retrieval_model.h"
#include "utils.h"
namespace tig_gamma {
/// statistics are robust to internal threading, but not if
/// IndexIVFPQ::search_preassigned is called by multiple threads
struct IndexIVFPQStats {
size_t nrefine; // nb of refines (IVFPQR)
size_t n_hamming_pass;
// nb of passed Hamming distance tests (for polysemous)
// timings measured with the CPU RTC
// on all threads
size_t search_cycles;
size_t refine_cycles; // only for IVFPQR
IndexIVFPQStats() { reset(); }
void reset(){};
};
// global var that collects them all
extern IndexIVFPQStats indexIVFPQ_stats;
// namespace {
using idx_t = faiss::Index::idx_t;
static uint64_t get_cycles() {
#ifdef __x86_64__
uint32_t high, low;
asm volatile("rdtsc \n\t" : "=a"(low), "=d"(high));
return ((uint64_t)high << 32) | (low);
#else
return 0;
#endif
}
#define TIC t0 = get_cycles()
#define TOC get_cycles() - t0
/** QueryTables manages the various ways of searching an
* IndexIVFPQ. The code contains a lot of branches, depending on:
* - metric_type: are we computing L2 or Inner product similarity?
* - by_residual: do we encode raw vectors or residuals?
* - use_precomputed_table: are x_R|x_C tables precomputed?
* - polysemous_ht: are we filtering with polysemous codes?
*/
struct QueryTables {
/*****************************************************
* General data from the IVFPQ
*****************************************************/
const faiss::IndexIVFPQ &ivfpq;
const faiss::IVFSearchParameters *params;
// copied from IndexIVFPQ for easier access
int d;
const faiss::ProductQuantizer &pq;
faiss::MetricType metric_type;
bool by_residual;
int use_precomputed_table;
int polysemous_ht;
// pre-allocated data buffers
float *sim_table, *sim_table_2;
float *residual_vec, *decoded_vec;
// single data buffer
std::vector<float> mem;
// for table pointers
std::vector<const float *> sim_table_ptrs;
explicit QueryTables(const faiss::IndexIVFPQ &ivfpq,
const faiss::IVFSearchParameters *params,
faiss::MetricType metric_type)
: ivfpq(ivfpq),
d(ivfpq.d),
pq(ivfpq.pq),
metric_type(metric_type),
by_residual(ivfpq.by_residual),
use_precomputed_table(ivfpq.use_precomputed_table) {
mem.resize(pq.ksub * pq.M * 2 + d * 2);
sim_table = mem.data();
sim_table_2 = sim_table + pq.ksub * pq.M;
residual_vec = sim_table_2 + pq.ksub * pq.M;
decoded_vec = residual_vec + d;
// for polysemous
polysemous_ht = ivfpq.polysemous_ht;
if (auto ivfpq_params =
dynamic_cast<const faiss::IVFPQSearchParameters *>(params)) {
polysemous_ht = ivfpq_params->polysemous_ht;
}
if (polysemous_ht != 0) {
q_code.resize(pq.code_size);
}
init_list_cycles = 0;
sim_table_ptrs.resize(pq.M);
}
/*****************************************************
* What we do when query is known
*****************************************************/
// field specific to query
const float *qi;
// query-specific intialization
void init_query(const float *qi) {
this->qi = qi;
if (metric_type == faiss::METRIC_INNER_PRODUCT)
init_query_IP();
else
init_query_L2();
if (!by_residual && polysemous_ht != 0) pq.compute_code(qi, q_code.data());
}
void init_query_IP() {
// precompute some tables specific to the query qi
pq.compute_inner_prod_table(qi, sim_table);
}
void init_query_L2() {
if (!by_residual) {
pq.compute_distance_table(qi, sim_table);
} else if (use_precomputed_table) {
pq.compute_inner_prod_table(qi, sim_table_2);
}
}
/*****************************************************
* When inverted list is known: prepare computations
*****************************************************/
// fields specific to list
long key;
float coarse_dis;
std::vector<uint8_t> q_code;
uint64_t init_list_cycles;
/// once we know the query and the centroid, we can prepare the
/// sim_table that will be used for accumulation
/// and dis0, the initial value
float precompute_list_tables() {
float dis0 = 0;
uint64_t t0;
TIC;
if (by_residual) {
if (metric_type == faiss::METRIC_INNER_PRODUCT)
dis0 = precompute_list_tables_IP();
else
dis0 = precompute_list_tables_L2();
}
init_list_cycles += TOC;
return dis0;
}
float precompute_list_table_pointers() {
float dis0 = 0;
uint64_t t0;
TIC;
if (by_residual) {
if (metric_type == faiss::METRIC_INNER_PRODUCT)
FAISS_THROW_MSG("not implemented");
else
dis0 = precompute_list_table_pointers_L2();
}
init_list_cycles += TOC;
return dis0;
}
/*****************************************************
* compute tables for inner prod
*****************************************************/
float precompute_list_tables_IP() {
// prepare the sim_table that will be used for accumulation
// and dis0, the initial value
ivfpq.quantizer->reconstruct(key, decoded_vec);
// decoded_vec = centroid
float dis0 = faiss::fvec_inner_product(qi, decoded_vec, d);
if (polysemous_ht) {
for (int i = 0; i < d; i++) {
residual_vec[i] = qi[i] - decoded_vec[i];
}
pq.compute_code(residual_vec, q_code.data());
}
return dis0;
}
/*****************************************************
* compute tables for L2 distance
*****************************************************/
float precompute_list_tables_L2() {
float dis0 = 0;
if (use_precomputed_table == 0 || use_precomputed_table == -1) {
ivfpq.quantizer->compute_residual(qi, residual_vec, key);
pq.compute_distance_table(residual_vec, sim_table);
if (polysemous_ht != 0) {
pq.compute_code(residual_vec, q_code.data());
}
} else if (use_precomputed_table == 1) {
dis0 = coarse_dis;
faiss::fvec_madd(pq.M * pq.ksub,
&ivfpq.precomputed_table[key * pq.ksub * pq.M], -2.0,
sim_table_2, sim_table);
if (polysemous_ht != 0) {
ivfpq.quantizer->compute_residual(qi, residual_vec, key);
pq.compute_code(residual_vec, q_code.data());
}
} else if (use_precomputed_table == 2) {
dis0 = coarse_dis;
const faiss::MultiIndexQuantizer *miq =
dynamic_cast<const faiss::MultiIndexQuantizer *>(ivfpq.quantizer);
FAISS_THROW_IF_NOT(miq);
const faiss::ProductQuantizer &cpq = miq->pq;
int Mf = pq.M / cpq.M;
const float *qtab = sim_table_2; // query-specific table
float *ltab = sim_table; // (output) list-specific table
long k = key;
for (size_t cm = 0; cm < cpq.M; cm++) {
// compute PQ index
int ki = k & ((uint64_t(1) << cpq.nbits) - 1);
k >>= cpq.nbits;
// get corresponding table
const float *pc =
&ivfpq.precomputed_table[(ki * pq.M + cm * Mf) * pq.ksub];
if (polysemous_ht == 0) {
// sum up with query-specific table
faiss::fvec_madd(Mf * pq.ksub, pc, -2.0, qtab, ltab);
ltab += Mf * pq.ksub;
qtab += Mf * pq.ksub;
} else {
for (size_t m = cm * Mf; m < (cm + 1) * Mf; m++) {
q_code[m] =
faiss::fvec_madd_and_argmin(pq.ksub, pc, -2, qtab, ltab);
pc += pq.ksub;
ltab += pq.ksub;
qtab += pq.ksub;
}
}
}
}
return dis0;
}
float precompute_list_table_pointers_L2() {
float dis0 = 0;
if (use_precomputed_table == 1) {
dis0 = coarse_dis;
const float *s = &ivfpq.precomputed_table[key * pq.ksub * pq.M];
for (size_t m = 0; m < pq.M; m++) {
sim_table_ptrs[m] = s;
s += pq.ksub;
}
} else if (use_precomputed_table == 2) {
dis0 = coarse_dis;
const faiss::MultiIndexQuantizer *miq =
dynamic_cast<const faiss::MultiIndexQuantizer *>(ivfpq.quantizer);
FAISS_THROW_IF_NOT(miq);
const faiss::ProductQuantizer &cpq = miq->pq;
int Mf = pq.M / cpq.M;
long k = key;
int m0 = 0;
for (size_t cm = 0; cm < cpq.M; cm++) {
int ki = k & ((uint64_t(1) << cpq.nbits) - 1);
k >>= cpq.nbits;
const float *pc =
&ivfpq.precomputed_table[(ki * pq.M + cm * Mf) * pq.ksub];
for (int m = m0; m < m0 + Mf; m++) {
sim_table_ptrs[m] = pc;
pc += pq.ksub;
}
m0 += Mf;
}
} else {
FAISS_THROW_MSG("need precomputed tables");
}
if (polysemous_ht) {
FAISS_THROW_MSG("not implemented");
// Not clear that it makes sense to implemente this,
// because it costs M * ksub, which is what we wanted to
// avoid with the tables pointers.
}
return dis0;
}
};
template <class C>
struct KnnSearchResults {
idx_t key;
const idx_t *ids;
// heap params
size_t k;
float *heap_sim;
idx_t *heap_ids;
size_t nup;
inline void add(idx_t j, float dis) {
if (C::cmp(heap_sim[0], dis)) {
faiss::heap_pop<C>(k, heap_sim, heap_ids);
idx_t id = ids ? ids[j] : (key << 32 | j);
faiss::heap_push<C>(k, heap_sim, heap_ids, dis, id);
nup++;
}
}
};
/*****************************************************
* Scaning the codes.
* The scanning functions call their favorite precompute_*
* function to precompute the tables they need.
*****************************************************/
template <typename IDType, faiss::MetricType METRIC_TYPE>
struct IVFPQScannerT : QueryTables {
const uint8_t *list_codes;
const IDType *list_ids;
size_t list_size;
explicit IVFPQScannerT(const faiss::IndexIVFPQ &ivfpq,
const faiss::IVFSearchParameters *params)
: QueryTables(ivfpq, params, METRIC_TYPE) {
FAISS_THROW_IF_NOT(pq.nbits == 8);
}
float dis0;
void init_list(idx_t list_no, float coarse_dis, int mode) {
this->key = list_no;
this->coarse_dis = coarse_dis;
if (mode == 2) {
dis0 = precompute_list_tables();
} else if (mode == 1) {
dis0 = precompute_list_table_pointers();
}
}
/// tables are not precomputed, but pointers are provided to the
/// relevant X_c|x_r tables
template <class SearchResultType>
void scan_list_with_pointer(size_t ncode, const uint8_t *codes,
SearchResultType &res) const {
for (size_t j = 0; j < ncode; j++) {
float dis = dis0;
const float *tab = sim_table_2;
for (size_t m = 0; m < pq.M; m++) {
int ci = *codes++;
dis += sim_table_ptrs[m][ci] - 2 * tab[ci];
tab += pq.ksub;
}
res.add(j, dis);
}
}
/// nothing is precomputed: access residuals on-the-fly
template <class SearchResultType>
void scan_on_the_fly_dist(size_t ncode, const uint8_t *codes,
SearchResultType &res) const {
const float *dvec;
float dis0 = 0;
if (by_residual) {
if (METRIC_TYPE == faiss::METRIC_INNER_PRODUCT) {
ivfpq.quantizer->reconstruct(key, residual_vec);
dis0 = faiss::fvec_inner_product(residual_vec, qi, d);
} else {
ivfpq.quantizer->compute_residual(qi, residual_vec, key);
}
dvec = residual_vec;
} else {
dvec = qi;
dis0 = 0;
}
for (size_t j = 0; j < ncode; j++) {
pq.decode(codes, decoded_vec);
codes += pq.code_size;
float dis;
if (METRIC_TYPE == faiss::METRIC_INNER_PRODUCT) {
dis = dis0 + faiss::fvec_inner_product(decoded_vec, qi, d);
} else {
dis = faiss::fvec_L2sqr(decoded_vec, dvec, d);
}
res.add(j, dis);
}
}
/*****************************************************
* Scanning codes with polysemous filtering
*****************************************************/
template <class HammingComputer, class SearchResultType>
void scan_list_polysemous_hc(size_t ncode, const uint8_t *codes,
SearchResultType &res) const {
int ht = ivfpq.polysemous_ht;
size_t n_hamming_pass = 0;
int code_size = pq.code_size;
HammingComputer hc(q_code.data(), code_size);
for (size_t j = 0; j < ncode; j++) {
const uint8_t *b_code = codes;
int hd = hc.hamming(b_code);
if (hd < ht) {
n_hamming_pass++;
float dis = dis0;
const float *tab = sim_table;
for (size_t m = 0; m < pq.M; m++) {
dis += tab[*b_code++];
tab += pq.ksub;
}
res.add(j, dis);
}
codes += code_size;
}
#pragma omp critical
{ indexIVFPQ_stats.n_hamming_pass += n_hamming_pass; }
}
template <class SearchResultType>
void scan_list_polysemous(size_t ncode, const uint8_t *codes,
SearchResultType &res) const {
switch (pq.code_size) {
#define HANDLE_CODE_SIZE(cs) \
case cs: \
scan_list_polysemous_hc<faiss::HammingComputer##cs, SearchResultType>( \
ncode, codes, res); \
break
HANDLE_CODE_SIZE(4);
HANDLE_CODE_SIZE(8);
HANDLE_CODE_SIZE(16);
HANDLE_CODE_SIZE(20);
HANDLE_CODE_SIZE(32);
HANDLE_CODE_SIZE(64);
#undef HANDLE_CODE_SIZE
default:
if (pq.code_size % 8 == 0)
scan_list_polysemous_hc<faiss::HammingComputerM8, SearchResultType>(
ncode, codes, res);
else
scan_list_polysemous_hc<faiss::HammingComputerM4, SearchResultType>(
ncode, codes, res);
break;
}
}
};
/* struct GammaInvertedListScanner : faiss::InvertedListScanner { */
/* GammaInvertedListScanner() { retrieval_context_ = nullptr; } */
/* virtual size_t scan_codes_pointer(size_t ncode, const uint8_t **codes, */
/* const idx_t *ids, float *heap_sim, */
/* idx_t *heap_ids, size_t k) = 0; */
/* void set_search_context(RetrievalContext *retrieval_context) { */
/* this->retrieval_context_ = retrieval_context; */
/* } */
/* RetrievalContext *retrieval_context_; */
/* }; */
template <faiss::MetricType metric, class C>
struct GammaIVFFlatScanner : GammaInvertedListScanner {
size_t d;
GammaIVFFlatScanner(size_t d) : d(d) {}
const float *xi;
void set_query(const float *query) override { this->xi = query; }
idx_t list_no;
void set_list(idx_t list_no, float /* coarse_dis */) override {
this->list_no = list_no;
}
float distance_to_code(const uint8_t *code) const override {
const float *yj = (float *)code;
float dis = metric == faiss::METRIC_INNER_PRODUCT
? faiss::fvec_inner_product(xi, yj, d)
: faiss::fvec_L2sqr(xi, yj, d);
return dis;
}
inline size_t scan_codes(size_t list_size, const uint8_t *codes,
const idx_t *ids, float *simi, idx_t *idxi,
size_t k) const override {
RawVector *raw_vec = (RawVector *)codes;
size_t nup = 0;
for (size_t j = 0; j < list_size; j++) {
if (ids[j] & realtime::kDelIdxMask) continue;
idx_t vid = ids[j] & realtime::kRecoverIdxMask;
if (vid < 0) continue;
if (retrieval_context_->IsValid(vid) == false) continue;
ScopeVector svec;
raw_vec->GetVector(vid, svec);
const float *yj = (const float *)svec.Get();
float dis = metric == faiss::METRIC_INNER_PRODUCT
? faiss::fvec_inner_product(xi, yj, d)
: faiss::fvec_L2sqr(xi, yj, d);
if (retrieval_context_->IsSimilarScoreValid(dis) && C::cmp(simi[0], dis)) {
faiss::heap_pop<C>(k, simi, idxi);
faiss::heap_push<C>(k, simi, idxi, dis, vid);
nup++;
}
}
return nup;
}
size_t scan_codes_pointer(size_t ncode, const uint8_t **codes,
const idx_t *ids, float *heap_sim, idx_t *heap_ids,
size_t k) {
return 0;
}
};
class IVFPQRetrievalParameters : public RetrievalParameters {
public:
IVFPQRetrievalParameters() : RetrievalParameters() {
parallel_on_queries_ = true;
recall_num_ = 100;
nprobe_ = 80;
ivf_flat_ = false;
}
IVFPQRetrievalParameters(bool parallel_on_queries, int recall_num, int nprobe,
enum DistanceComputeType type, bool ivf_flat) {
parallel_on_queries_ = parallel_on_queries;
recall_num_ = recall_num;
nprobe_ = nprobe;
ivf_flat_ = ivf_flat;
distance_compute_type_ = type;
}
IVFPQRetrievalParameters(enum DistanceComputeType type) {
parallel_on_queries_ = true;
recall_num_ = 100;
nprobe_ = 80;
ivf_flat_ = false;
distance_compute_type_ = type;
}
virtual ~IVFPQRetrievalParameters() {}
int RecallNum() { return recall_num_; }
void SetRecallNum(int recall_num) { recall_num_ = recall_num; }
int Nprobe() { return nprobe_; }
void SetNprobe(int nprobe) { nprobe_ = nprobe; }
bool ParallelOnQueries() { return parallel_on_queries_; }
void SetParallelOnQueries(bool parallel_on_queries) {
parallel_on_queries_ = parallel_on_queries;
}
bool IvfFlat() { return ivf_flat_; }
void SetIvfFlat(bool ivf_flat) { ivf_flat_ = ivf_flat; }
protected:
// parallelize over queries or ivf lists
bool parallel_on_queries_;
int recall_num_;
int nprobe_;
bool ivf_flat_;
};
struct IVFPQModelParams;
struct GammaIVFPQIndex : GammaFLATIndex, faiss::IndexIVFPQ {
GammaIVFPQIndex();
virtual ~GammaIVFPQIndex();
faiss::InvertedListScanner *get_InvertedListScanner(
bool store_pairs, faiss::MetricType metric_type);
GammaInvertedListScanner *GetGammaIVFFlatScanner(
size_t d, faiss::MetricType metric_type);
GammaInvertedListScanner *GetGammaInvertedListScanner(
bool store_pairs, faiss::MetricType metric_type);
int Init(const std::string &model_parameters) override;
RetrievalParameters *Parse(const std::string ¶meters) override;
int Indexing() override;
bool Add(int n, const uint8_t *vec);
int Update(const std::vector<int64_t> &ids,
const std::vector<const uint8_t *> &vecs);
// assign the vectors, then call search_preassign
int Search(RetrievalContext *retrieval_context, int n, const uint8_t *x,
int k, float *distances, idx_t *labels);
void search_preassigned(RetrievalContext *retrieval_context, int n,
const float *x, const float *applied_x, int k, const idx_t *keys,
const float *coarse_dis, float *distances,
idx_t *labels, int nprobe, bool store_pairs,
const faiss::IVFSearchParameters *params = nullptr);
void search_ivf_flat(RetrievalContext *retrieval_context, int n,
const float *x, int k, const idx_t *keys,
const float *coarse_dis, float *distances, idx_t *labels,
int nprobe, bool store_pairs,
const faiss::IVFSearchParameters *params = nullptr);
long GetTotalMemBytes() override {
if (!rt_invert_index_ptr_) {
return 0;
}
return rt_invert_index_ptr_->GetTotalMemBytes();
}
int Dump(const std::string &dir) override;
int Load(const std::string &index_dir) override;
virtual void copy_subset_to(faiss::IndexIVF &other, int subset_type, idx_t a1,
idx_t a2) const;
int Delete(const std::vector<int64_t> &ids);
int indexed_vec_count_;
realtime::RTInvertIndex *rt_invert_index_ptr_;
bool compaction_;
size_t compact_bucket_no_;
uint64_t compacted_num_;
uint64_t updated_num_;
int d_;
DistanceComputeType metric_type_;
faiss::VectorTransform *opq_;
// 0 is FlatL2, 1 is HNSWFlat
int quantizer_type_;
#ifdef PERFORMANCE_TESTING
std::atomic<uint64_t> search_count_;
int add_count_;
#endif
IVFPQModelParams *model_param_;
};
template <faiss::MetricType METRIC_TYPE, class C, int precompute_mode>
struct GammaIVFPQScanner : IVFPQScannerT<idx_t, METRIC_TYPE>,
GammaInvertedListScanner {
const GammaIVFPQIndex &gamma_ivfpq_;
bool store_pairs_;
GammaIVFPQScanner(const GammaIVFPQIndex &gamma_ivfpq, bool store_pairs)
: IVFPQScannerT<idx_t, METRIC_TYPE>(gamma_ivfpq, nullptr),
gamma_ivfpq_(gamma_ivfpq) {
store_pairs_ = store_pairs;
}
template <class SearchResultType>
void scan_list_with_table(size_t ncode, const uint8_t *codes,
SearchResultType &res) const {
size_t j = 0;
for (; j < ncode; j++) {
if (res.ids[j] & realtime::kDelIdxMask) {
codes += this->pq.M;
continue;
}
if (!retrieval_context_->IsValid(res.ids[j] &
realtime::kRecoverIdxMask)) {
codes += this->pq.M;
continue;
}
float dis = this->dis0;
const float *tab = this->sim_table;
for (size_t m = 0; m < this->pq.M; m++) {
dis += tab[*codes++];
tab += this->pq.ksub;
}
res.add(j, dis);
}
assert(j == ncode);
}
inline void set_query(const float *query) override {
this->init_query(query);
}
inline void set_list(idx_t list_no, float coarse_dis) override {
this->init_list(list_no, coarse_dis, precompute_mode);
}
inline float distance_to_code(const uint8_t *code) const override {
assert(precompute_mode == 2);
float dis = this->dis0;
const float *tab = this->sim_table;
for (size_t m = 0; m < this->pq.M; m++) {
dis += tab[*code++];
tab += this->pq.ksub;
}
return dis;
}
inline size_t scan_codes(size_t ncode, const uint8_t *codes, const idx_t *ids,
float *heap_sim, idx_t *heap_ids,
size_t k) const override {
KnnSearchResults<C> res = {/* key */ this->key,
/* ids */ this->store_pairs_ ? nullptr : ids,
/* k */ k,
/* heap_sim */ heap_sim,
/* heap_ids */ heap_ids,
/* nup */ 0};
if (this->polysemous_ht > 0) {
assert(precompute_mode == 2);
this->scan_list_polysemous(ncode, codes, res);
} else if (precompute_mode == 2) {
this->scan_list_with_table(ncode, codes, res);
} else if (precompute_mode == 1) {
this->scan_list_with_pointer(ncode, codes, res);
} else if (precompute_mode == 0) {
this->scan_on_the_fly_dist(ncode, codes, res);
} else {
FAISS_THROW_MSG("bad precomp mode");
}
return 0;
}
inline size_t scan_codes_pointer(size_t ncode, const uint8_t **codes,
const idx_t *ids, float *heap_sim,
idx_t *heap_ids, size_t k) {
KnnSearchResults<C> res = {/* key */ this->key,
/* ids */ this->store_pairs_ ? nullptr : ids,
/* k */ k,
/* heap_sim */ heap_sim,
/* heap_ids */ heap_ids,
/* nup */ 0};
if (precompute_mode == 2) {
this->scan_list_with_table(ncode, codes, res);
} else {
FAISS_THROW_MSG("bad precomp mode");
}
return 0;
}
};
} // namespace tig_gamma
#endif
|
DRB011-minusminus-orig-yes.c | /*
Copyright (c) 2017, Lawrence Livermore National Security, LLC.
Produced at the Lawrence Livermore National Laboratory
Written by Chunhua Liao, Pei-Hung Lin, Joshua Asplund,
Markus Schordan, and Ian Karlin
(email: liao6@llnl.gov, lin32@llnl.gov, asplund1@llnl.gov,
schordan1@llnl.gov, karlin1@llnl.gov)
LLNL-CODE-732144
All rights reserved.
This file is part of DataRaceBench. For details, see
https://github.com/LLNL/dataracebench. Please also see the LICENSE file
for our additional BSD notice.
Redistribution and use in source and binary forms, with
or without modification, are permitted provided that the following
conditions are met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the disclaimer below.
* Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the disclaimer (as noted below)
in the documentation and/or other materials provided with the
distribution.
* Neither the name of the LLNS/LLNL nor the names of its contributors
may be used to endorse or promote products derived from this
software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND
CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES,
INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL LAWRENCE LIVERMORE NATIONAL
SECURITY, LLC, THE U.S. DEPARTMENT OF ENERGY OR CONTRIBUTORS BE
LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY,
OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
THE POSSIBILITY OF SUCH DAMAGE.
*/
/*
The -- operation on numNodes2 is not protected, causing data race.
Data race pair: numNodes2@74:7 vs. numNodes2@74:7
*/
#include <stdlib.h>
#include <stdio.h>
int main(int argc, char* argv[])
{
int i;
int len=100;
int numNodes=len, numNodes2=0;
int x[100];
// initialize x[]
for (i=0; i< len; i++)
{
if (i%2==0)
x[i]=5;
else
x[i]= -5;
}
#pragma omp parallel for schedule(dynamic)
for (i=numNodes-1 ; i>-1 ; --i) {
if (x[i]<=0) {
numNodes2-- ;
}
}
printf ("numNodes2 = %d\n", numNodes2);
return 0;
}
|
seriesSum.c | #include <stdio.h>
#include <math.h>
#include <omp.h>
int main(){
int x, n, i, sum = 1;
omp_set_dynamic(0);
printf("Enter the values\nx = ");
scanf("%d", &x);
printf("N = ");
scanf("%d", &n);
int m = omp_get_num_procs();
omp_set_num_threads(m);
// x^1 + x^2 + x^3 + ... + x^n
#pragma omp parallel for reduction(+:sum)
for(i = 1; i <= n; i++){
sum += pow(x, i);
}
printf("Sum = %d\n", sum);
return 0;
} |
intersectionFunctor.h | // Copyright 2014 Nicolas Mellado
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
// -------------------------------------------------------------------------- //
//
// Authors: Nicolas Mellado
//
// An implementation of the Super 4-points Congruent Sets (Super 4PCS)
// algorithm presented in:
//
// Super 4PCS: Fast Global Pointcloud Registration via Smart Indexing
// Nicolas Mellado, Dror Aiger, Niloy J. Mitra
// Symposium on Geometry Processing 2014.
//
// Data acquisition in large-scale scenes regularly involves accumulating
// information across multiple scans. A common approach is to locally align scan
// pairs using Iterative Closest Point (ICP) algorithm (or its variants), but
// requires static scenes and small motion between scan pairs. This prevents
// accumulating data across multiple scan sessions and/or different acquisition
// modalities (e.g., stereo, depth scans). Alternatively, one can use a global
// registration algorithm allowing scans to be in arbitrary initial poses. The
// state-of-the-art global registration algorithm, 4PCS, however has a quadratic
// time complexity in the number of data points. This vastly limits its
// applicability to acquisition of large environments. We present Super 4PCS for
// global pointcloud registration that is optimal, i.e., runs in linear time (in
// the number of data points) and is also output sensitive in the complexity of
// the alignment problem based on the (unknown) overlap across scan pairs.
// Technically, we map the algorithm as an ‘instance problem’ and solve it
// efficiently using a smart indexing data organization. The algorithm is
// simple, memory-efficient, and fast. We demonstrate that Super 4PCS results in
// significant speedup over alternative approaches and allows unstructured
// efficient acquisition of scenes at scales previously not possible. Complete
// source code and datasets are available for research use at
// http://geometry.cs.ucl.ac.uk/projects/2014/super4PCS/.
#pragma once
#include "gr/accelerators/pairExtraction/intersectionNode.h"
#include <list>
#include <iostream>
namespace gr{
template <typename Scalar>
static Scalar GetRoundedEpsilonValue(Scalar epsilon, int* lvl = nullptr) {
const int lvlMax = -std::log2(epsilon); //!< Maximum level
if (lvl != nullptr) *lvl = lvlMax;
// Refine epsilon by the closest conservative values
return double(1)/double(pow(2,lvlMax));
}
//! \brief Extract pairs of points by rasterizing primitives and collect points
/*!
* Acceleration technique used in Super4PCS
* \todo Use Traits to allow custom parameters but similar API between variants
* \see BruteForceFunctor
*/
template <class _Primitive, class _Point, int _dim, typename _Scalar>
struct IntersectionFunctor{
typedef _Point Point;
typedef _Primitive Primitive;
typedef _Scalar Scalar;
enum { dim = _dim };
template <class PrimitiveContainer,
class PointContainer,
class ProcessingFunctor> //!< Process the extracted pairs
void
process(
const PrimitiveContainer& M, //!< Input primitives to intersect with Q
const PointContainer & Q, //!< Normalized innput point set \in [0:1]^d
Scalar &epsilon, //!< Intersection accuracy, refined
unsigned int minNodeSize, //!< Min number of points in nodes
ProcessingFunctor& functor
);
};
/*!
\return Pairs< PointId, PrimitiveId>
*/
template <class Primitive, class Point, int dim, typename Scalar>
template <class PrimitiveContainer,
class PointContainer,
class ProcessingFunctor>
void
IntersectionFunctor<Primitive, Point, dim, Scalar>::process(
const PrimitiveContainer& M, //!< Input primitives to intersect with Q
const PointContainer & Q, //!< Normalized innput point set \in [0:1]^d
Scalar &epsilon, //!< Intersection accuracy in [0:1]
unsigned int minNodeSize, //!< Min number of points in nodes
ProcessingFunctor& functor
)
{
using std::pow;
// types definitions
typedef NdNode<Point, dim, Scalar, PointContainer> Node;
typedef typename std::vector<Node> NodeContainer;
typedef typename std::pair<unsigned int, unsigned int> ResPair;
typedef typename std::vector<ResPair> ResContainer;
// Global variables
const unsigned int nbPoint = Q.size(); //!< Number of points
int lvlMax = 0;
epsilon = GetRoundedEpsilonValue(epsilon, &lvlMax);
int clvl = 0; //!< Current level
// Use local array and manipulate references to avoid array copies
NodeContainer ping, pong;
NodeContainer* nodes = &ping; //!< Nodes of the current level
NodeContainer* childNodes = &pong; //!< Child nodes for the next level
//! Nodes too small for split
std::vector< std::pair<Node, Scalar> > earlyNodes;
//
// // Fill the idContainer with identity values
if (functor.ids.size() != nbPoint){
std::cout << "[IntersectionFunctor] Init id array" << std::endl;
functor.ids.clear();
for(unsigned int i = 0; i < nbPoint; i++)
functor.ids.push_back(i);
}
// Buid root node in the child node, will be copied to the current nodes
childNodes->push_back(Node::buildUnitRootNode(Q, functor.ids));
Scalar edgeLength { 0 };
Scalar edgeHalfLength { 0 };
// First Loop
while (clvl != lvlMax-1){
// Stop if we not have any nodes to checks
if (childNodes->empty())
break;
edgeLength = Scalar(1)/pow(2, clvl);
edgeHalfLength = edgeLength/Scalar(2);
// swap pointers
std::swap(nodes, childNodes);
childNodes->clear();
//#pragma omp parallel
for(typename NodeContainer::iterator nit = nodes->begin();
nit != nodes->end(); nit++){
Node &n = *nit;
// Check if the current node intersect one of the primitives
// In this case, subdivide, store new nodes and stop the loop
for(typename PrimitiveContainer::const_iterator pit = M.begin();
pit != M.end(); pit++){
if ((*pit).intersect(n.center(), edgeHalfLength+epsilon)){
// There is two options now: either there is already few points in the
// current node, in that case we stop splitting it, or we split.
if (n.rangeLength() > int(minNodeSize)){
//#pragma omp critical
n.split(*childNodes, edgeHalfLength);
}else{
//#pragma omp critical
earlyNodes.emplace_back(n, edgeHalfLength+epsilon);
}
break;
}
}
}
clvl++;
}
// Second Loop
ResContainer results;
results.reserve(childNodes->size());
unsigned int pId = 0;
for(typename PrimitiveContainer::const_iterator itP = M.begin();
itP != M.end(); itP++, pId++){
// add childs
for(typename NodeContainer::const_iterator itN = childNodes->begin();
itN != childNodes->end(); itN++){
if ((*itP).intersect((*itN).center(), epsilon*Scalar(2))){
functor.beginPrimitiveCollect(pId);
for(unsigned int j = 0; j!= (unsigned int)((*itN).rangeLength()); j++){
if(pId>(*itN).idInRange(j))
if((*itP).intersectPoint((*itN).pointInRange(j),epsilon))
functor.process(pId, (*itN).idInRange(j));
}
functor.endPrimitiveCollect(pId);
}
}
// add other leafs
for(typename std::vector< std::pair<Node, Scalar> >::const_iterator itPairs =
earlyNodes.begin();
itPairs != earlyNodes.end();
itPairs++){
if((*itP).intersect((*itPairs).first.center(), (*itPairs).second)){
// Notice the functor we are collecting points for the current primitive
functor.beginPrimitiveCollect(pId);
for(unsigned int j = 0; j!= (unsigned int)((*itPairs).first.rangeLength()); j++){
if(pId>(*itPairs).first.idInRange(j))
if((*itP).intersectPoint((*itPairs).first.pointInRange(j),epsilon))
functor.process(pId, (*itPairs).first.idInRange(j));
}
functor.endPrimitiveCollect(pId);
}
}
}
}
} // namespace gr
|
func_1v.c | void func_1v(float* in, float* out, unsigned n){
unsigned i;
#pragma omp target teams distribute parallel for map(to: in[0:n]) map(from: out[0:n])
for(i=0; i<n; ++i){
out[i]=2*in[i];
}
} |
GB_unaryop__ainv_uint32_uint64.c | //------------------------------------------------------------------------------
// GB_unaryop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2019, All Rights Reserved.
// http://suitesparse.com See GraphBLAS/Doc/License.txt for license.
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_iterator.h"
#include "GB_unaryop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB_unop__ainv_uint32_uint64
// op(A') function: GB_tran__ainv_uint32_uint64
// C type: uint32_t
// A type: uint64_t
// cast: uint32_t cij = (uint32_t) aij
// unaryop: cij = -aij
#define GB_ATYPE \
uint64_t
#define GB_CTYPE \
uint32_t
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
uint64_t aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = -x ;
// casting
#define GB_CASTING(z, x) \
uint32_t z = (uint32_t) x ;
// cij = op (cast (aij))
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
GB_GETA (aij, Ax, pA) ; \
/* Cx [pC] = op (cast (aij)) */ \
GB_CASTING (x, aij) ; \
GB_OP (GB_CX (pC), x) ; \
}
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_AINV || GxB_NO_UINT32 || GxB_NO_UINT64)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_unop__ainv_uint32_uint64
(
uint32_t *restrict Cx,
const uint64_t *restrict Ax,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (int64_t p = 0 ; p < anz ; p++)
{
GB_CAST_OP (p, p) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_tran__ainv_uint32_uint64
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t *restrict *Rowcounts,
GBI_single_iterator Iter,
const int64_t *restrict A_slice,
int naslice
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#define GB_PHASE_2_OF_2
#include "GB_unaryop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
for-6.c | /* { dg-do compile } */
/* { dg-options "-fopenmp -fdump-tree-ompexp" } */
/* LLVM LOCAL test not applicable */
/* { dg-require-fdump "" } */
extern void bar(int);
void foo (int n)
{
int i;
#pragma omp for schedule(runtime)
for (i = 0; i < n; ++i)
bar(i);
}
/* { dg-final { scan-tree-dump-times "GOMP_loop_runtime_start" 1 "ompexp" } } */
/* { dg-final { scan-tree-dump-times "GOMP_loop_runtime_next" 1 "ompexp" } } */
/* { dg-final { cleanup-tree-dump "ompexp" } } */
|
transpose.c | /*
*
*/
#include <stdlib.h>
#include <complex.h>
#include "np_helper.h"
/*
* matrix a[n,m]
*/
void NPdtranspose(int n, int m, double *a, double *at)
{
size_t i, j, j0, j1;
for (j0 = 0; j0 < n; j0+=BLOCK_DIM) {
j1 = MIN(j0+BLOCK_DIM, n);
for (i = 0; i < m; i++) {
for (j = j0; j < j1; j++) {
at[i*n+j] = a[j*m+i];
}
}
}
}
void NPztranspose(int n, int m, double complex *a, double complex *at)
{
size_t i, j, j0, j1;
for (j0 = 0; j0 < n; j0+=BLOCK_DIM) {
j1 = MIN(j0+BLOCK_DIM, n);
for (i = 0; i < m; i++) {
for (j = j0; j < j1; j++) {
at[i*n+j] = a[j*m+i];
}
}
}
}
void NPdtranspose_021(int *shape, double *a, double *at)
{
#pragma omp parallel default(none) \
shared(shape, a, at)
{
int ic;
size_t nm = shape[1] * shape[2];
#pragma omp for schedule (static)
for (ic = 0; ic < shape[0]; ic++) {
NPdtranspose(shape[1], shape[2], a+ic*nm, at+ic*nm);
}
}
}
void NPztranspose_021(int *shape, double complex *a, double complex *at)
{
#pragma omp parallel default(none) \
shared(shape, a, at)
{
int ic;
size_t nm = shape[1] * shape[2];
#pragma omp for schedule (static)
for (ic = 0; ic < shape[0]; ic++) {
NPztranspose(shape[1], shape[2], a+ic*nm, at+ic*nm);
}
}
}
void NPdsymm_sum(int n, double *a, double *out, int hermi)
{
size_t i, j, j0, j1;
double tmp;
if (hermi == HERMITIAN || hermi == SYMMETRIC) {
TRIU_LOOP(i, j) {
tmp = a[i*n+j] + a[j*n+i];
out[i*n+j] = tmp;
out[j*n+i] = tmp;
}
} else {
TRIU_LOOP(i, j) {
tmp = a[i*n+j] - a[j*n+i];
out[i*n+j] = tmp;
out[j*n+i] =-tmp;
}
}
}
void NPzhermi_sum(int n, double complex *a, double complex *out, int hermi)
{
size_t i, j, j0, j1;
double complex tmp;
if (hermi == HERMITIAN) {
TRIU_LOOP(i, j) {
tmp = a[i*n+j] + conj(a[j*n+i]);
out[i*n+j] = tmp;
out[j*n+i] = conj(tmp);
}
} else if (hermi == SYMMETRIC) {
TRIU_LOOP(i, j) {
tmp = a[i*n+j] + a[j*n+i];
out[i*n+j] = tmp;
out[j*n+i] = tmp;
}
} else {
TRIU_LOOP(i, j) {
tmp = a[i*n+j] - conj(a[j*n+i]);
out[i*n+j] = tmp;
out[j*n+i] =-conj(tmp);
}
}
}
void NPdsymm_021_sum(int *shape, double *a, double *out, int hermi)
{
#pragma omp parallel default(none) \
shared(shape, a, out, hermi)
{
int ic;
size_t nn = shape[1] * shape[1];
#pragma omp for schedule (static)
for (ic = 0; ic < shape[0]; ic++) {
NPdsymm_sum(shape[1], a+ic*nn, out+ic*nn, hermi);
}
}
}
void NPzhermi_021_sum(int *shape, double complex *a, double complex *out, int hermi)
{
#pragma omp parallel default(none) \
shared(shape, a, out, hermi)
{
int ic;
size_t nn = shape[1] * shape[1];
#pragma omp for schedule (static)
for (ic = 0; ic < shape[0]; ic++) {
NPzhermi_sum(shape[1], a+ic*nn, out+ic*nn, hermi);
}
}
}
|
Parallelizer.h | // This file is part of Eigen, a lightweight C++ template library
// for linear algebra.
//
// Copyright (C) 2010 Gael Guennebaud <gael.guennebaud@inria.fr>
//
// This Source Code Form is subject to the terms of the Mozilla
// Public License v. 2.0. If a copy of the MPL was not distributed
// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
#ifndef EIGEN_PARALLELIZER_H
#define EIGEN_PARALLELIZER_H
namespace Eigen {
namespace internal {
/** \internal */
inline void manage_multi_threading(Action action, int* v)
{
static int m_maxThreads = -1;
EIGEN_UNUSED_VARIABLE(m_maxThreads);
if(action==SetAction)
{
eigen_internal_assert(v!=0);
m_maxThreads = *v;
}
else if(action==GetAction)
{
eigen_internal_assert(v!=0);
#ifdef EIGEN_HAS_OPENMP
if(m_maxThreads>0)
*v = m_maxThreads;
else
*v = omp_get_max_threads();
#else
*v = 1;
#endif
}
else
{
eigen_internal_assert(false);
}
}
}
/** Must be call first when calling Eigen from multiple threads */
inline void initParallel()
{
int nbt;
internal::manage_multi_threading(GetAction, &nbt);
std::ptrdiff_t l1, l2, l3;
internal::manage_caching_sizes(GetAction, &l1, &l2, &l3);
}
/** \returns the max number of threads reserved for Eigen
* \sa setNbThreads */
inline int nbThreads()
{
int ret;
internal::manage_multi_threading(GetAction, &ret);
return ret;
}
/** Sets the max number of threads reserved for Eigen
* \sa nbThreads */
inline void setNbThreads(int v)
{
internal::manage_multi_threading(SetAction, &v);
}
namespace internal {
template<typename Index> struct GemmParallelInfo
{
GemmParallelInfo() : sync(-1), users(0), lhs_start(0), lhs_length(0) {}
Index volatile sync;
int volatile users;
Index lhs_start;
Index lhs_length;
};
template<bool Condition, typename Functor, typename Index>
void parallelize_gemm(const Functor& func, Index rows, Index cols, Index depth, bool transpose)
{
// TODO when EIGEN_USE_BLAS is defined,
// we should still enable OMP for other scalar types
#if !(defined (EIGEN_HAS_OPENMP)) || defined (EIGEN_USE_BLAS)
// FIXME the transpose variable is only needed to properly split
// the matrix product when multithreading is enabled. This is a temporary
// fix to support row-major destination matrices. This whole
// parallelizer mechanism has to be redisigned anyway.
EIGEN_UNUSED_VARIABLE(depth);
EIGEN_UNUSED_VARIABLE(transpose);
func(0,rows, 0,cols);
#else
// Dynamically check whether we should enable or disable OpenMP.
// The conditions are:
// - the max number of threads we can create is greater than 1
// - we are not already in a parallel code
// - the sizes are large enough
// compute the maximal number of threads from the size of the product:
// This first heuristic takes into account that the product kernel is fully optimized when working with nr columns at once.
Index size = transpose ? rows : cols;
Index pb_max_threads = std::max<Index>(1,size / Functor::Traits::nr);
// compute the maximal number of threads from the total amount of work:
double work = static_cast<double>(rows) * static_cast<double>(cols) *
static_cast<double>(depth);
double kMinTaskSize = 50000; // FIXME improve this heuristic.
pb_max_threads = std::max<Index>(1, std::min<Index>(pb_max_threads, work / kMinTaskSize));
// compute the number of threads we are going to use
Index threads = std::min<Index>(nbThreads(), pb_max_threads);
// if multi-threading is explicitely disabled, not useful, or if we already are in a parallel session,
// then abort multi-threading
// FIXME omp_get_num_threads()>1 only works for openmp, what if the user does not use openmp?
if((!Condition) || (threads==1) || (omp_get_num_threads()>1))
return func(0,rows, 0,cols);
Eigen::initParallel();
func.initParallelSession(threads);
if(transpose)
std::swap(rows,cols);
ei_declare_aligned_stack_constructed_variable(GemmParallelInfo<Index>,info,threads,0);
#pragma omp parallel num_threads(threads)
{
Index i = omp_get_thread_num();
// Note that the actual number of threads might be lower than the number of request ones.
Index actual_threads = omp_get_num_threads();
Index blockCols = (cols / actual_threads) & ~Index(0x3);
Index blockRows = (rows / actual_threads);
blockRows = (blockRows/Functor::Traits::mr)*Functor::Traits::mr;
Index r0 = i*blockRows;
Index actualBlockRows = (i+1==actual_threads) ? rows-r0 : blockRows;
Index c0 = i*blockCols;
Index actualBlockCols = (i+1==actual_threads) ? cols-c0 : blockCols;
info[i].lhs_start = r0;
info[i].lhs_length = actualBlockRows;
if(transpose) func(c0, actualBlockCols, 0, rows, info);
else func(0, rows, c0, actualBlockCols, info);
}
#endif
}
} // end namespace internal
} // end namespace Eigen
#endif // EIGEN_PARALLELIZER_H
|
explicit.c | // RUN: %libomp-compile-and-run | %sort-threads | FileCheck %s
// REQUIRES: ompt
// UNSUPPORTED: gcc-4, gcc-5, gcc-6, gcc-7, gcc-8
#include "callback.h"
#include <omp.h>
int main()
{
int x = 0;
#pragma omp parallel num_threads(2)
{
#pragma omp atomic
x++;
#pragma omp barrier
print_current_address();
#pragma omp atomic
x++;
}
// Check if libomp supports the callbacks for this test.
// CHECK-NOT: {{^}}0: Could not register callback 'ompt_callback_sync_region'
// CHECK-NOT: {{^}}0: Could not register callback 'ompt_callback_sync_region_wait'
// CHECK: 0: NULL_POINTER=[[NULL:.*$]]
// master thread explicit barrier
// CHECK: {{^}}[[MASTER_ID:[0-9]+]]: ompt_event_barrier_begin: parallel_id={{[0-9]+}}, task_id={{[0-9]+}}, codeptr_ra=[[RETURN_ADDRESS:0x[0-f]+]]
// CHECK: {{^}}[[MASTER_ID]]: ompt_event_wait_barrier_begin: parallel_id={{[0-9]+}}, task_id={{[0-9]+}}, codeptr_ra=[[RETURN_ADDRESS]]
// CHECK: {{^}}[[MASTER_ID]]: ompt_event_wait_barrier_end: parallel_id={{[0-9]+}}, task_id={{[0-9]+}}, codeptr_ra=[[RETURN_ADDRESS]]
// CHECK: {{^}}[[MASTER_ID]]: ompt_event_barrier_end: parallel_id={{[0-9]+}}, task_id={{[0-9]+}}, codeptr_ra=[[RETURN_ADDRESS]]
// CHECK: {{^}}[[MASTER_ID]]: current_address={{.*}}[[RETURN_ADDRESS]]
// master thread implicit barrier at parallel end
// CHECK: {{^}}[[MASTER_ID]]: ompt_event_barrier_begin: parallel_id={{[0-9]+}}, task_id={{[0-9]+}}, codeptr_ra=0x{{[0-f]+}}
// CHECK: {{^}}[[MASTER_ID]]: ompt_event_wait_barrier_begin: parallel_id={{[0-9]+}}, task_id={{[0-9]+}}, codeptr_ra=0x{{[0-f]+}}
// CHECK: {{^}}[[MASTER_ID]]: ompt_event_wait_barrier_end: parallel_id={{[0-9]+}}, task_id={{[0-9]+}}, codeptr_ra=0x{{[0-f]+}}
// CHECK: {{^}}[[MASTER_ID]]: ompt_event_barrier_end: parallel_id={{[0-9]+}}, task_id={{[0-9]+}}, codeptr_ra=0x{{[0-f]+}}
// worker thread explicit barrier
// CHECK: {{^}}[[THREAD_ID:[0-9]+]]: ompt_event_barrier_begin: parallel_id={{[0-9]+}}, task_id={{[0-9]+}}, codeptr_ra=[[RETURN_ADDRESS:0x[0-f]+]]
// CHECK: {{^}}[[THREAD_ID]]: ompt_event_wait_barrier_begin: parallel_id={{[0-9]+}}, task_id={{[0-9]+}}, codeptr_ra=[[RETURN_ADDRESS]]
// CHECK: {{^}}[[THREAD_ID]]: ompt_event_wait_barrier_end: parallel_id={{[0-9]+}}, task_id={{[0-9]+}}, codeptr_ra=[[RETURN_ADDRESS]]
// CHECK: {{^}}[[THREAD_ID]]: ompt_event_barrier_end: parallel_id={{[0-9]+}}, task_id={{[0-9]+}}, codeptr_ra=[[RETURN_ADDRESS]]
// CHECK: {{^}}[[THREAD_ID]]: current_address={{.*}}[[RETURN_ADDRESS]]
// worker thread implicit barrier at parallel end
// CHECK: {{^}}[[THREAD_ID]]: ompt_event_barrier_begin: parallel_id={{[0-9]+}}, task_id={{[0-9]+}}, codeptr_ra=[[WRA:0x[0-f]+]]
// CHECK: {{^}}[[THREAD_ID]]: ompt_event_wait_barrier_begin: parallel_id={{[0-9]+}}, task_id={{[0-9]+}}, codeptr_ra=[[WRA]]
// CHECK: {{^}}[[THREAD_ID]]: ompt_event_wait_barrier_end: parallel_id={{[0-9]+}}, task_id={{[0-9]+}}, codeptr_ra=[[NULL]]
// CHECK: {{^}}[[THREAD_ID]]: ompt_event_barrier_end: parallel_id={{[0-9]+}}, task_id={{[0-9]+}}, codeptr_ra=[[NULL]]
return 0;
}
|
DRB041-3mm-parallel-no.c | /**
* 3mm.c: This file is part of the PolyBench/C 3.2 test suite.
* three steps of matrix multiplication to multiply four matrices.
*
* Contact: Louis-Noel Pouchet <pouchet@cse.ohio-state.edu>
* Web address: http://polybench.sourceforge.net
* License: /LICENSE.OSU.txt
*/
#include <stdio.h>
#include <unistd.h>
#include <string.h>
#include <math.h>
/* Include polybench common header. */
#include "polybench/polybench.h"
/* Include benchmark-specific header. */
/* Default data type is double, default size is 4000. */
#include "polybench/3mm.h"
/* Array initialization. */
static void init_array(int ni,int nj,int nk,int nl,int nm,double A[128 + 0][128 + 0],double B[128 + 0][128 + 0],double C[128 + 0][128 + 0],double D[128 + 0][128 + 0])
{
//int i;
//int j;
{
int c2;
int c1;
if (nl >= 1) {
#pragma omp parallel for private(c1 ,c2 )
for (c1 = 0; c1 <= ((((((ni + -1 < nj + -1?ni + -1 : nj + -1)) < nk + -1?((ni + -1 < nj + -1?ni + -1 : nj + -1)) : nk + -1)) < nm + -1?((((ni + -1 < nj + -1?ni + -1 : nj + -1)) < nk + -1?((ni + -1 < nj + -1?ni + -1 : nj + -1)) : nk + -1)) : nm + -1)); c1++) {
#pragma omp parallel for private(c2 )
for (c2 = 0; c2 <= ((((((nj + -1 < nk + -1?nj + -1 : nk + -1)) < nl + -1?((nj + -1 < nk + -1?nj + -1 : nk + -1)) : nl + -1)) < nm + -1?((((nj + -1 < nk + -1?nj + -1 : nk + -1)) < nl + -1?((nj + -1 < nk + -1?nj + -1 : nk + -1)) : nl + -1)) : nm + -1)); c2++) {
A[c1][c2] = ((double )c1) * c2 / ni;
B[c1][c2] = ((double )c1) * (c2 + 1) / nj;
C[c1][c2] = ((double )c1) * (c2 + 3) / nl;
D[c1][c2] = ((double )c1) * (c2 + 2) / nk;
}
#pragma omp parallel for private(c2 )
for (c2 = nl; c2 <= ((((nj + -1 < nk + -1?nj + -1 : nk + -1)) < nm + -1?((nj + -1 < nk + -1?nj + -1 : nk + -1)) : nm + -1)); c2++) {
A[c1][c2] = ((double )c1) * c2 / ni;
B[c1][c2] = ((double )c1) * (c2 + 1) / nj;
C[c1][c2] = ((double )c1) * (c2 + 3) / nl;
}
#pragma omp parallel for private(c2 )
for (c2 = nm; c2 <= ((((nj + -1 < nk + -1?nj + -1 : nk + -1)) < nl + -1?((nj + -1 < nk + -1?nj + -1 : nk + -1)) : nl + -1)); c2++) {
A[c1][c2] = ((double )c1) * c2 / ni;
B[c1][c2] = ((double )c1) * (c2 + 1) / nj;
D[c1][c2] = ((double )c1) * (c2 + 2) / nk;
}
#pragma omp parallel for private(c2 )
for (c2 = (nl > nm?nl : nm); c2 <= ((nj + -1 < nk + -1?nj + -1 : nk + -1)); c2++) {
A[c1][c2] = ((double )c1) * c2 / ni;
B[c1][c2] = ((double )c1) * (c2 + 1) / nj;
}
#pragma omp parallel for private(c2 )
for (c2 = nj; c2 <= ((((nk + -1 < nl + -1?nk + -1 : nl + -1)) < nm + -1?((nk + -1 < nl + -1?nk + -1 : nl + -1)) : nm + -1)); c2++) {
A[c1][c2] = ((double )c1) * c2 / ni;
C[c1][c2] = ((double )c1) * (c2 + 3) / nl;
D[c1][c2] = ((double )c1) * (c2 + 2) / nk;
}
#pragma omp parallel for private(c2 )
for (c2 = (nj > nl?nj : nl); c2 <= ((nk + -1 < nm + -1?nk + -1 : nm + -1)); c2++) {
A[c1][c2] = ((double )c1) * c2 / ni;
C[c1][c2] = ((double )c1) * (c2 + 3) / nl;
}
#pragma omp parallel for private(c2 )
for (c2 = (nj > nm?nj : nm); c2 <= ((nk + -1 < nl + -1?nk + -1 : nl + -1)); c2++) {
A[c1][c2] = ((double )c1) * c2 / ni;
D[c1][c2] = ((double )c1) * (c2 + 2) / nk;
}
#pragma omp parallel for private(c2 )
for (c2 = (((nj > nl?nj : nl)) > nm?((nj > nl?nj : nl)) : nm); c2 <= nk + -1; c2++) {
A[c1][c2] = ((double )c1) * c2 / ni;
}
#pragma omp parallel for private(c2 )
for (c2 = nk; c2 <= ((((nj + -1 < nl + -1?nj + -1 : nl + -1)) < nm + -1?((nj + -1 < nl + -1?nj + -1 : nl + -1)) : nm + -1)); c2++) {
B[c1][c2] = ((double )c1) * (c2 + 1) / nj;
C[c1][c2] = ((double )c1) * (c2 + 3) / nl;
D[c1][c2] = ((double )c1) * (c2 + 2) / nk;
}
#pragma omp parallel for private(c2 )
for (c2 = (nk > nl?nk : nl); c2 <= ((nj + -1 < nm + -1?nj + -1 : nm + -1)); c2++) {
B[c1][c2] = ((double )c1) * (c2 + 1) / nj;
C[c1][c2] = ((double )c1) * (c2 + 3) / nl;
}
#pragma omp parallel for private(c2 )
for (c2 = (nk > nm?nk : nm); c2 <= ((nj + -1 < nl + -1?nj + -1 : nl + -1)); c2++) {
B[c1][c2] = ((double )c1) * (c2 + 1) / nj;
D[c1][c2] = ((double )c1) * (c2 + 2) / nk;
}
#pragma omp parallel for private(c2 )
for (c2 = (((nk > nl?nk : nl)) > nm?((nk > nl?nk : nl)) : nm); c2 <= nj + -1; c2++) {
B[c1][c2] = ((double )c1) * (c2 + 1) / nj;
}
#pragma omp parallel for private(c2 )
for (c2 = (nj > nk?nj : nk); c2 <= ((nl + -1 < nm + -1?nl + -1 : nm + -1)); c2++) {
C[c1][c2] = ((double )c1) * (c2 + 3) / nl;
D[c1][c2] = ((double )c1) * (c2 + 2) / nk;
}
#pragma omp parallel for private(c2 )
for (c2 = (((nj > nk?nj : nk)) > nl?((nj > nk?nj : nk)) : nl); c2 <= nm + -1; c2++) {
C[c1][c2] = ((double )c1) * (c2 + 3) / nl;
}
#pragma omp parallel for private(c2 )
for (c2 = (((nj > nk?nj : nk)) > nm?((nj > nk?nj : nk)) : nm); c2 <= nl + -1; c2++) {
D[c1][c2] = ((double )c1) * (c2 + 2) / nk;
}
}
}
if (nl <= 0) {
#pragma omp parallel for private(c1 ,c2 )
for (c1 = 0; c1 <= ((((((ni + -1 < nj + -1?ni + -1 : nj + -1)) < nk + -1?((ni + -1 < nj + -1?ni + -1 : nj + -1)) : nk + -1)) < nm + -1?((((ni + -1 < nj + -1?ni + -1 : nj + -1)) < nk + -1?((ni + -1 < nj + -1?ni + -1 : nj + -1)) : nk + -1)) : nm + -1)); c1++) {
#pragma omp parallel for private(c2 )
for (c2 = 0; c2 <= ((((nj + -1 < nk + -1?nj + -1 : nk + -1)) < nm + -1?((nj + -1 < nk + -1?nj + -1 : nk + -1)) : nm + -1)); c2++) {
A[c1][c2] = ((double )c1) * c2 / ni;
B[c1][c2] = ((double )c1) * (c2 + 1) / nj;
C[c1][c2] = ((double )c1) * (c2 + 3) / nl;
}
#pragma omp parallel for private(c2 )
for (c2 = nm; c2 <= ((nj + -1 < nk + -1?nj + -1 : nk + -1)); c2++) {
A[c1][c2] = ((double )c1) * c2 / ni;
B[c1][c2] = ((double )c1) * (c2 + 1) / nj;
}
#pragma omp parallel for private(c2 )
for (c2 = nj; c2 <= ((nk + -1 < nm + -1?nk + -1 : nm + -1)); c2++) {
A[c1][c2] = ((double )c1) * c2 / ni;
C[c1][c2] = ((double )c1) * (c2 + 3) / nl;
}
#pragma omp parallel for private(c2 )
for (c2 = (nj > nm?nj : nm); c2 <= nk + -1; c2++) {
A[c1][c2] = ((double )c1) * c2 / ni;
}
#pragma omp parallel for private(c2 )
for (c2 = nk; c2 <= ((nj + -1 < nm + -1?nj + -1 : nm + -1)); c2++) {
B[c1][c2] = ((double )c1) * (c2 + 1) / nj;
C[c1][c2] = ((double )c1) * (c2 + 3) / nl;
}
#pragma omp parallel for private(c2 )
for (c2 = (nk > nm?nk : nm); c2 <= nj + -1; c2++) {
B[c1][c2] = ((double )c1) * (c2 + 1) / nj;
}
#pragma omp parallel for private(c2 )
for (c2 = (nj > nk?nj : nk); c2 <= nm + -1; c2++) {
C[c1][c2] = ((double )c1) * (c2 + 3) / nl;
}
}
}
if (nm >= 1) {
#pragma omp parallel for private(c1 ,c2 )
for (c1 = nm; c1 <= ((((ni + -1 < nj + -1?ni + -1 : nj + -1)) < nk + -1?((ni + -1 < nj + -1?ni + -1 : nj + -1)) : nk + -1)); c1++) {
#pragma omp parallel for private(c2 )
for (c2 = 0; c2 <= nm + -1; c2++) {
A[c1][c2] = ((double )c1) * c2 / ni;
B[c1][c2] = ((double )c1) * (c2 + 1) / nj;
C[c1][c2] = ((double )c1) * (c2 + 3) / nl;
}
#pragma omp parallel for private(c2 )
for (c2 = nm; c2 <= ((nj + -1 < nk + -1?nj + -1 : nk + -1)); c2++) {
A[c1][c2] = ((double )c1) * c2 / ni;
B[c1][c2] = ((double )c1) * (c2 + 1) / nj;
}
#pragma omp parallel for private(c2 )
for (c2 = nj; c2 <= nk + -1; c2++) {
A[c1][c2] = ((double )c1) * c2 / ni;
}
#pragma omp parallel for private(c2 )
for (c2 = nk; c2 <= nj + -1; c2++) {
B[c1][c2] = ((double )c1) * (c2 + 1) / nj;
}
}
}
if (nm <= 0) {
#pragma omp parallel for private(c1 ,c2 )
for (c1 = 0; c1 <= ((((ni + -1 < nj + -1?ni + -1 : nj + -1)) < nk + -1?((ni + -1 < nj + -1?ni + -1 : nj + -1)) : nk + -1)); c1++) {
#pragma omp parallel for private(c2 )
for (c2 = 0; c2 <= ((nj + -1 < nk + -1?nj + -1 : nk + -1)); c2++) {
A[c1][c2] = ((double )c1) * c2 / ni;
B[c1][c2] = ((double )c1) * (c2 + 1) / nj;
}
#pragma omp parallel for private(c2 )
for (c2 = nj; c2 <= nk + -1; c2++) {
A[c1][c2] = ((double )c1) * c2 / ni;
}
#pragma omp parallel for private(c2 )
for (c2 = nk; c2 <= nj + -1; c2++) {
B[c1][c2] = ((double )c1) * (c2 + 1) / nj;
}
}
}
if (nj >= 1 && nl >= 1) {
#pragma omp parallel for private(c1 ,c2 )
for (c1 = nj; c1 <= ((((ni + -1 < nk + -1?ni + -1 : nk + -1)) < nm + -1?((ni + -1 < nk + -1?ni + -1 : nk + -1)) : nm + -1)); c1++) {
#pragma omp parallel for private(c2 )
for (c2 = 0; c2 <= ((nj + -1 < nl + -1?nj + -1 : nl + -1)); c2++) {
A[c1][c2] = ((double )c1) * c2 / ni;
B[c1][c2] = ((double )c1) * (c2 + 1) / nj;
D[c1][c2] = ((double )c1) * (c2 + 2) / nk;
}
#pragma omp parallel for private(c2 )
for (c2 = nl; c2 <= nj + -1; c2++) {
A[c1][c2] = ((double )c1) * c2 / ni;
B[c1][c2] = ((double )c1) * (c2 + 1) / nj;
}
#pragma omp parallel for private(c2 )
for (c2 = nj; c2 <= ((nk + -1 < nl + -1?nk + -1 : nl + -1)); c2++) {
A[c1][c2] = ((double )c1) * c2 / ni;
D[c1][c2] = ((double )c1) * (c2 + 2) / nk;
}
#pragma omp parallel for private(c2 )
for (c2 = (nj > nl?nj : nl); c2 <= nk + -1; c2++) {
A[c1][c2] = ((double )c1) * c2 / ni;
}
#pragma omp parallel for private(c2 )
for (c2 = nk; c2 <= nl + -1; c2++) {
D[c1][c2] = ((double )c1) * (c2 + 2) / nk;
}
}
}
if (nj >= 1 && nl <= 0) {
#pragma omp parallel for private(c1 ,c2 )
for (c1 = nj; c1 <= ((((ni + -1 < nk + -1?ni + -1 : nk + -1)) < nm + -1?((ni + -1 < nk + -1?ni + -1 : nk + -1)) : nm + -1)); c1++) {
#pragma omp parallel for private(c2 )
for (c2 = 0; c2 <= nj + -1; c2++) {
A[c1][c2] = ((double )c1) * c2 / ni;
B[c1][c2] = ((double )c1) * (c2 + 1) / nj;
}
#pragma omp parallel for private(c2 )
for (c2 = nj; c2 <= nk + -1; c2++) {
A[c1][c2] = ((double )c1) * c2 / ni;
}
}
}
if (nj >= 1) {
#pragma omp parallel for private(c1 ,c2 )
for (c1 = (nj > nm?nj : nm); c1 <= ((ni + -1 < nk + -1?ni + -1 : nk + -1)); c1++) {
#pragma omp parallel for private(c2 )
for (c2 = 0; c2 <= nj + -1; c2++) {
A[c1][c2] = ((double )c1) * c2 / ni;
B[c1][c2] = ((double )c1) * (c2 + 1) / nj;
}
#pragma omp parallel for private(c2 )
for (c2 = nj; c2 <= nk + -1; c2++) {
A[c1][c2] = ((double )c1) * c2 / ni;
}
}
}
if (nj <= 0 && nl >= 1) {
#pragma omp parallel for private(c1 ,c2 )
for (c1 = 0; c1 <= ((((ni + -1 < nk + -1?ni + -1 : nk + -1)) < nm + -1?((ni + -1 < nk + -1?ni + -1 : nk + -1)) : nm + -1)); c1++) {
#pragma omp parallel for private(c2 )
for (c2 = 0; c2 <= ((nk + -1 < nl + -1?nk + -1 : nl + -1)); c2++) {
A[c1][c2] = ((double )c1) * c2 / ni;
D[c1][c2] = ((double )c1) * (c2 + 2) / nk;
}
#pragma omp parallel for private(c2 )
for (c2 = nl; c2 <= nk + -1; c2++) {
A[c1][c2] = ((double )c1) * c2 / ni;
}
#pragma omp parallel for private(c2 )
for (c2 = nk; c2 <= nl + -1; c2++) {
D[c1][c2] = ((double )c1) * (c2 + 2) / nk;
}
}
}
if (nj <= 0 && nl <= 0) {
#pragma omp parallel for private(c1 ,c2 )
for (c1 = 0; c1 <= ((((ni + -1 < nk + -1?ni + -1 : nk + -1)) < nm + -1?((ni + -1 < nk + -1?ni + -1 : nk + -1)) : nm + -1)); c1++) {
#pragma omp parallel for private(c2 )
for (c2 = 0; c2 <= nk + -1; c2++) {
A[c1][c2] = ((double )c1) * c2 / ni;
}
}
}
if (nj <= 0) {
#pragma omp parallel for private(c1 ,c2 )
for (c1 = (0 > nm?0 : nm); c1 <= ((ni + -1 < nk + -1?ni + -1 : nk + -1)); c1++) {
#pragma omp parallel for private(c2 )
for (c2 = 0; c2 <= nk + -1; c2++) {
A[c1][c2] = ((double )c1) * c2 / ni;
}
}
}
if (nk >= 1 && nl >= 1) {
#pragma omp parallel for private(c1 ,c2 )
for (c1 = nk; c1 <= ((((ni + -1 < nj + -1?ni + -1 : nj + -1)) < nm + -1?((ni + -1 < nj + -1?ni + -1 : nj + -1)) : nm + -1)); c1++) {
#pragma omp parallel for private(c2 )
for (c2 = 0; c2 <= ((nk + -1 < nl + -1?nk + -1 : nl + -1)); c2++) {
A[c1][c2] = ((double )c1) * c2 / ni;
C[c1][c2] = ((double )c1) * (c2 + 3) / nl;
D[c1][c2] = ((double )c1) * (c2 + 2) / nk;
}
#pragma omp parallel for private(c2 )
for (c2 = nl; c2 <= nk + -1; c2++) {
A[c1][c2] = ((double )c1) * c2 / ni;
C[c1][c2] = ((double )c1) * (c2 + 3) / nl;
}
#pragma omp parallel for private(c2 )
for (c2 = nk; c2 <= ((nl + -1 < nm + -1?nl + -1 : nm + -1)); c2++) {
C[c1][c2] = ((double )c1) * (c2 + 3) / nl;
D[c1][c2] = ((double )c1) * (c2 + 2) / nk;
}
#pragma omp parallel for private(c2 )
for (c2 = (nk > nl?nk : nl); c2 <= nm + -1; c2++) {
C[c1][c2] = ((double )c1) * (c2 + 3) / nl;
}
#pragma omp parallel for private(c2 )
for (c2 = nm; c2 <= nl + -1; c2++) {
D[c1][c2] = ((double )c1) * (c2 + 2) / nk;
}
}
}
if (nk >= 1 && nl <= 0) {
#pragma omp parallel for private(c1 ,c2 )
for (c1 = nk; c1 <= ((((ni + -1 < nj + -1?ni + -1 : nj + -1)) < nm + -1?((ni + -1 < nj + -1?ni + -1 : nj + -1)) : nm + -1)); c1++) {
#pragma omp parallel for private(c2 )
for (c2 = 0; c2 <= nk + -1; c2++) {
A[c1][c2] = ((double )c1) * c2 / ni;
C[c1][c2] = ((double )c1) * (c2 + 3) / nl;
}
#pragma omp parallel for private(c2 )
for (c2 = nk; c2 <= nm + -1; c2++) {
C[c1][c2] = ((double )c1) * (c2 + 3) / nl;
}
}
}
if (nk >= 1 && nm >= 1) {
#pragma omp parallel for private(c1 ,c2 )
for (c1 = (nk > nm?nk : nm); c1 <= ((ni + -1 < nj + -1?ni + -1 : nj + -1)); c1++) {
#pragma omp parallel for private(c2 )
for (c2 = 0; c2 <= ((nk + -1 < nm + -1?nk + -1 : nm + -1)); c2++) {
A[c1][c2] = ((double )c1) * c2 / ni;
C[c1][c2] = ((double )c1) * (c2 + 3) / nl;
}
#pragma omp parallel for private(c2 )
for (c2 = nm; c2 <= nk + -1; c2++) {
A[c1][c2] = ((double )c1) * c2 / ni;
}
#pragma omp parallel for private(c2 )
for (c2 = nk; c2 <= nm + -1; c2++) {
C[c1][c2] = ((double )c1) * (c2 + 3) / nl;
}
}
}
if (nk >= 1 && nm <= 0) {
#pragma omp parallel for private(c1 ,c2 )
for (c1 = nk; c1 <= ((ni + -1 < nj + -1?ni + -1 : nj + -1)); c1++) {
#pragma omp parallel for private(c2 )
for (c2 = 0; c2 <= nk + -1; c2++) {
A[c1][c2] = ((double )c1) * c2 / ni;
}
}
}
if (nk >= 1 && nl >= 1) {
#pragma omp parallel for private(c1 ,c2 )
for (c1 = (nj > nk?nj : nk); c1 <= ((ni + -1 < nm + -1?ni + -1 : nm + -1)); c1++) {
#pragma omp parallel for private(c2 )
for (c2 = 0; c2 <= ((nk + -1 < nl + -1?nk + -1 : nl + -1)); c2++) {
A[c1][c2] = ((double )c1) * c2 / ni;
D[c1][c2] = ((double )c1) * (c2 + 2) / nk;
}
#pragma omp parallel for private(c2 )
for (c2 = nl; c2 <= nk + -1; c2++) {
A[c1][c2] = ((double )c1) * c2 / ni;
}
#pragma omp parallel for private(c2 )
for (c2 = nk; c2 <= nl + -1; c2++) {
D[c1][c2] = ((double )c1) * (c2 + 2) / nk;
}
}
}
if (nk >= 1 && nl <= 0) {
#pragma omp parallel for private(c1 ,c2 )
for (c1 = (nj > nk?nj : nk); c1 <= ((ni + -1 < nm + -1?ni + -1 : nm + -1)); c1++) {
#pragma omp parallel for private(c2 )
for (c2 = 0; c2 <= nk + -1; c2++) {
A[c1][c2] = ((double )c1) * c2 / ni;
}
}
}
if (nk >= 1) {
#pragma omp parallel for private(c1 ,c2 )
for (c1 = (((nj > nk?nj : nk)) > nm?((nj > nk?nj : nk)) : nm); c1 <= ni + -1; c1++) {
#pragma omp parallel for private(c2 )
for (c2 = 0; c2 <= nk + -1; c2++) {
A[c1][c2] = ((double )c1) * c2 / ni;
}
}
}
if (nl >= 1) {
#pragma omp parallel for private(c1 ,c2 )
for (c1 = (0 > ni?0 : ni); c1 <= ((((nj + -1 < nk + -1?nj + -1 : nk + -1)) < nm + -1?((nj + -1 < nk + -1?nj + -1 : nk + -1)) : nm + -1)); c1++) {
#pragma omp parallel for private(c2 )
for (c2 = 0; c2 <= ((((nj + -1 < nl + -1?nj + -1 : nl + -1)) < nm + -1?((nj + -1 < nl + -1?nj + -1 : nl + -1)) : nm + -1)); c2++) {
B[c1][c2] = ((double )c1) * (c2 + 1) / nj;
C[c1][c2] = ((double )c1) * (c2 + 3) / nl;
D[c1][c2] = ((double )c1) * (c2 + 2) / nk;
}
#pragma omp parallel for private(c2 )
for (c2 = nl; c2 <= ((nj + -1 < nm + -1?nj + -1 : nm + -1)); c2++) {
B[c1][c2] = ((double )c1) * (c2 + 1) / nj;
C[c1][c2] = ((double )c1) * (c2 + 3) / nl;
}
#pragma omp parallel for private(c2 )
for (c2 = nm; c2 <= ((nj + -1 < nl + -1?nj + -1 : nl + -1)); c2++) {
B[c1][c2] = ((double )c1) * (c2 + 1) / nj;
D[c1][c2] = ((double )c1) * (c2 + 2) / nk;
}
#pragma omp parallel for private(c2 )
for (c2 = (nl > nm?nl : nm); c2 <= nj + -1; c2++) {
B[c1][c2] = ((double )c1) * (c2 + 1) / nj;
}
#pragma omp parallel for private(c2 )
for (c2 = nj; c2 <= ((nl + -1 < nm + -1?nl + -1 : nm + -1)); c2++) {
C[c1][c2] = ((double )c1) * (c2 + 3) / nl;
D[c1][c2] = ((double )c1) * (c2 + 2) / nk;
}
#pragma omp parallel for private(c2 )
for (c2 = (nj > nl?nj : nl); c2 <= nm + -1; c2++) {
C[c1][c2] = ((double )c1) * (c2 + 3) / nl;
}
#pragma omp parallel for private(c2 )
for (c2 = (nj > nm?nj : nm); c2 <= nl + -1; c2++) {
D[c1][c2] = ((double )c1) * (c2 + 2) / nk;
}
}
}
if (nl <= 0) {
#pragma omp parallel for private(c1 ,c2 )
for (c1 = (0 > ni?0 : ni); c1 <= ((((nj + -1 < nk + -1?nj + -1 : nk + -1)) < nm + -1?((nj + -1 < nk + -1?nj + -1 : nk + -1)) : nm + -1)); c1++) {
#pragma omp parallel for private(c2 )
for (c2 = 0; c2 <= ((nj + -1 < nm + -1?nj + -1 : nm + -1)); c2++) {
B[c1][c2] = ((double )c1) * (c2 + 1) / nj;
C[c1][c2] = ((double )c1) * (c2 + 3) / nl;
}
#pragma omp parallel for private(c2 )
for (c2 = nm; c2 <= nj + -1; c2++) {
B[c1][c2] = ((double )c1) * (c2 + 1) / nj;
}
#pragma omp parallel for private(c2 )
for (c2 = nj; c2 <= nm + -1; c2++) {
C[c1][c2] = ((double )c1) * (c2 + 3) / nl;
}
}
}
if (nm >= 1) {
#pragma omp parallel for private(c1 ,c2 )
for (c1 = (ni > nm?ni : nm); c1 <= ((nj + -1 < nk + -1?nj + -1 : nk + -1)); c1++) {
#pragma omp parallel for private(c2 )
for (c2 = 0; c2 <= nm + -1; c2++) {
B[c1][c2] = ((double )c1) * (c2 + 1) / nj;
C[c1][c2] = ((double )c1) * (c2 + 3) / nl;
}
#pragma omp parallel for private(c2 )
for (c2 = nm; c2 <= nj + -1; c2++) {
B[c1][c2] = ((double )c1) * (c2 + 1) / nj;
}
}
}
if (nm <= 0) {
#pragma omp parallel for private(c1 ,c2 )
for (c1 = (0 > ni?0 : ni); c1 <= ((nj + -1 < nk + -1?nj + -1 : nk + -1)); c1++) {
#pragma omp parallel for private(c2 )
for (c2 = 0; c2 <= nj + -1; c2++) {
B[c1][c2] = ((double )c1) * (c2 + 1) / nj;
}
}
}
if (nj >= 1 && nl >= 1) {
#pragma omp parallel for private(c1 ,c2 )
for (c1 = (ni > nj?ni : nj); c1 <= ((nk + -1 < nm + -1?nk + -1 : nm + -1)); c1++) {
#pragma omp parallel for private(c2 )
for (c2 = 0; c2 <= ((nj + -1 < nl + -1?nj + -1 : nl + -1)); c2++) {
B[c1][c2] = ((double )c1) * (c2 + 1) / nj;
D[c1][c2] = ((double )c1) * (c2 + 2) / nk;
}
#pragma omp parallel for private(c2 )
for (c2 = nl; c2 <= nj + -1; c2++) {
B[c1][c2] = ((double )c1) * (c2 + 1) / nj;
}
#pragma omp parallel for private(c2 )
for (c2 = nj; c2 <= nl + -1; c2++) {
D[c1][c2] = ((double )c1) * (c2 + 2) / nk;
}
}
}
if (nj >= 1 && nl <= 0) {
#pragma omp parallel for private(c1 ,c2 )
for (c1 = (ni > nj?ni : nj); c1 <= ((nk + -1 < nm + -1?nk + -1 : nm + -1)); c1++) {
#pragma omp parallel for private(c2 )
for (c2 = 0; c2 <= nj + -1; c2++) {
B[c1][c2] = ((double )c1) * (c2 + 1) / nj;
}
}
}
if (nj >= 1) {
#pragma omp parallel for private(c1 ,c2 )
for (c1 = (((ni > nj?ni : nj)) > nm?((ni > nj?ni : nj)) : nm); c1 <= nk + -1; c1++) {
#pragma omp parallel for private(c2 )
for (c2 = 0; c2 <= nj + -1; c2++) {
B[c1][c2] = ((double )c1) * (c2 + 1) / nj;
}
}
}
if (nk >= 1 && nl >= 1) {
#pragma omp parallel for private(c1 ,c2 )
for (c1 = (ni > nk?ni : nk); c1 <= ((nj + -1 < nm + -1?nj + -1 : nm + -1)); c1++) {
#pragma omp parallel for private(c2 )
for (c2 = 0; c2 <= ((nl + -1 < nm + -1?nl + -1 : nm + -1)); c2++) {
C[c1][c2] = ((double )c1) * (c2 + 3) / nl;
D[c1][c2] = ((double )c1) * (c2 + 2) / nk;
}
#pragma omp parallel for private(c2 )
for (c2 = nl; c2 <= nm + -1; c2++) {
C[c1][c2] = ((double )c1) * (c2 + 3) / nl;
}
#pragma omp parallel for private(c2 )
for (c2 = nm; c2 <= nl + -1; c2++) {
D[c1][c2] = ((double )c1) * (c2 + 2) / nk;
}
}
}
if (nk >= 1 && nl <= 0) {
#pragma omp parallel for private(c1 ,c2 )
for (c1 = (ni > nk?ni : nk); c1 <= ((nj + -1 < nm + -1?nj + -1 : nm + -1)); c1++) {
#pragma omp parallel for private(c2 )
for (c2 = 0; c2 <= nm + -1; c2++) {
C[c1][c2] = ((double )c1) * (c2 + 3) / nl;
}
}
}
if (nk >= 1 && nm >= 1) {
#pragma omp parallel for private(c1 ,c2 )
for (c1 = (((ni > nk?ni : nk)) > nm?((ni > nk?ni : nk)) : nm); c1 <= nj + -1; c1++) {
#pragma omp parallel for private(c2 )
for (c2 = 0; c2 <= nm + -1; c2++) {
C[c1][c2] = ((double )c1) * (c2 + 3) / nl;
}
}
}
if (nk <= 0 && nl >= 1) {
#pragma omp parallel for private(c1 ,c2 )
for (c1 = 0; c1 <= ((nj + -1 < nm + -1?nj + -1 : nm + -1)); c1++) {
#pragma omp parallel for private(c2 )
for (c2 = 0; c2 <= ((nl + -1 < nm + -1?nl + -1 : nm + -1)); c2++) {
C[c1][c2] = ((double )c1) * (c2 + 3) / nl;
D[c1][c2] = ((double )c1) * (c2 + 2) / nk;
}
#pragma omp parallel for private(c2 )
for (c2 = nl; c2 <= nm + -1; c2++) {
C[c1][c2] = ((double )c1) * (c2 + 3) / nl;
}
#pragma omp parallel for private(c2 )
for (c2 = nm; c2 <= nl + -1; c2++) {
D[c1][c2] = ((double )c1) * (c2 + 2) / nk;
}
}
}
if (nk <= 0 && nl <= 0) {
#pragma omp parallel for private(c1 ,c2 )
for (c1 = 0; c1 <= ((nj + -1 < nm + -1?nj + -1 : nm + -1)); c1++) {
#pragma omp parallel for private(c2 )
for (c2 = 0; c2 <= nm + -1; c2++) {
C[c1][c2] = ((double )c1) * (c2 + 3) / nl;
}
}
}
if (nk <= 0 && nm >= 1) {
#pragma omp parallel for private(c1 ,c2 )
for (c1 = nm; c1 <= nj + -1; c1++) {
#pragma omp parallel for private(c2 )
for (c2 = 0; c2 <= nm + -1; c2++) {
C[c1][c2] = ((double )c1) * (c2 + 3) / nl;
}
}
}
if (nj <= 0 && nl >= 1) {
#pragma omp parallel for private(c1 ,c2 )
for (c1 = (0 > ni?0 : ni); c1 <= ((nk + -1 < nm + -1?nk + -1 : nm + -1)); c1++) {
#pragma omp parallel for private(c2 )
for (c2 = 0; c2 <= nl + -1; c2++) {
D[c1][c2] = ((double )c1) * (c2 + 2) / nk;
}
}
}
if (nk >= 1 && nl >= 1) {
#pragma omp parallel for private(c1 ,c2 )
for (c1 = (((ni > nj?ni : nj)) > nk?((ni > nj?ni : nj)) : nk); c1 <= nm + -1; c1++) {
#pragma omp parallel for private(c2 )
for (c2 = 0; c2 <= nl + -1; c2++) {
D[c1][c2] = ((double )c1) * (c2 + 2) / nk;
}
}
}
if (nk <= 0 && nl >= 1) {
#pragma omp parallel for private(c1 ,c2 )
for (c1 = (0 > nj?0 : nj); c1 <= nm + -1; c1++) {
#pragma omp parallel for private(c2 )
for (c2 = 0; c2 <= nl + -1; c2++) {
D[c1][c2] = ((double )c1) * (c2 + 2) / nk;
}
}
}
}
}
/* DCE code. Must scan the entire live-out data.
Can be used also to check the correctness of the output. */
static void print_array(int ni,int nl,double G[128 + 0][128 + 0])
{
int i;
int j;
for (i = 0; i < ni; i++)
for (j = 0; j < nl; j++) {
fprintf(stderr,"%0.2lf ",G[i][j]);
if ((i * ni + j) % 20 == 0)
fprintf(stderr,"\n");
}
fprintf(stderr,"\n");
}
/* Main computational kernel. The whole function will be timed,
including the call and return. */
static void kernel_3mm(int ni,int nj,int nk,int nl,int nm,double E[128 + 0][128 + 0],double A[128 + 0][128 + 0],double B[128 + 0][128 + 0],double F[128 + 0][128 + 0],double C[128 + 0][128 + 0],double D[128 + 0][128 + 0],double G[128 + 0][128 + 0])
{
//int i;
//int j;
//int k;
//#pragma scop
{
int c1;
int c2;
int c5;
#pragma omp parallel for private(c1 ,c2 )
for (c1 = 0; c1 <= 127; c1++) {
#pragma omp parallel for private(c2 )
for (c2 = 0; c2 <= 127; c2++) {
G[c1][c2] = 0;
F[c1][c2] = 0;
}
}
#pragma omp parallel for private(c1 ,c5 ,c2 )
for (c1 = 0; c1 <= 127; c1++) {
#pragma omp parallel for private(c2 ,c5 )
for (c2 = 0; c2 <= 127; c2++) {
#pragma omp parallel for private(c5 )
for (c5 = 0; c5 <= 127; c5++) {
F[c1][c2] += C[c1][c5] * D[c5][c2];
}
}
}
#pragma omp parallel for private(c1 ,c2 )
for (c1 = 0; c1 <= 127; c1++) {
#pragma omp parallel for private(c2 )
for (c2 = 0; c2 <= 127; c2++) {
E[c1][c2] = 0;
}
}
#pragma omp parallel for private(c1 ,c5 ,c2 )
for (c1 = 0; c1 <= 127; c1++) {
#pragma omp parallel for private(c5 ,c2 )
for (c2 = 0; c2 <= 127; c2++) {
#pragma omp parallel for private(c5 )
for (c5 = 0; c5 <= 127; c5++) {
E[c1][c2] += A[c1][c5] * B[c5][c2];
}
#pragma omp parallel for private(c5 )
for (c5 = 0; c5 <= 127; c5++) {
G[c1][c5] += E[c1][c2] * F[c2][c5];
}
}
}
}
//#pragma endscop
}
int main(int argc,char **argv)
{
/* Retrieve problem size. */
int ni = 128;
int nj = 128;
int nk = 128;
int nl = 128;
int nm = 128;
/* Variable declaration/allocation. */
double (*E)[128 + 0][128 + 0];
E = ((double (*)[128 + 0][128 + 0])(polybench_alloc_data(((128 + 0) * (128 + 0)),(sizeof(double )))));
;
double (*A)[128 + 0][128 + 0];
A = ((double (*)[128 + 0][128 + 0])(polybench_alloc_data(((128 + 0) * (128 + 0)),(sizeof(double )))));
;
double (*B)[128 + 0][128 + 0];
B = ((double (*)[128 + 0][128 + 0])(polybench_alloc_data(((128 + 0) * (128 + 0)),(sizeof(double )))));
;
double (*F)[128 + 0][128 + 0];
F = ((double (*)[128 + 0][128 + 0])(polybench_alloc_data(((128 + 0) * (128 + 0)),(sizeof(double )))));
;
double (*C)[128 + 0][128 + 0];
C = ((double (*)[128 + 0][128 + 0])(polybench_alloc_data(((128 + 0) * (128 + 0)),(sizeof(double )))));
;
double (*D)[128 + 0][128 + 0];
D = ((double (*)[128 + 0][128 + 0])(polybench_alloc_data(((128 + 0) * (128 + 0)),(sizeof(double )))));
;
double (*G)[128 + 0][128 + 0];
G = ((double (*)[128 + 0][128 + 0])(polybench_alloc_data(((128 + 0) * (128 + 0)),(sizeof(double )))));
;
/* Initialize array(s). */
init_array(ni,nj,nk,nl,nm, *A, *B, *C, *D);
/* Start timer. */
polybench_timer_start();
;
/* Run kernel. */
kernel_3mm(ni,nj,nk,nl,nm, *E, *A, *B, *F, *C, *D, *G);
/* Stop and print timer. */
polybench_timer_stop();
;
polybench_timer_print();
;
/* Prevent dead-code elimination. All live-out data must be printed
by the function call in argument. */
if (argc > 42 && !strcmp(argv[0],"")) {
print_array(ni,nl, *A);
print_array(ni,nl, *B);
print_array(ni,nl, *C);
print_array(ni,nl, *D);
print_array(ni,nl, *E);
print_array(ni,nl, *F);
print_array(ni,nl, *G);
}
/* Be clean. */
free(((void *)E));
;
free(((void *)A));
;
free(((void *)B));
;
free(((void *)F));
;
free(((void *)C));
;
free(((void *)D));
;
free(((void *)G));
;
return 0;
}
|
GB_binop__bxnor_uint64.c |
//------------------------------------------------------------------------------
// GB_binop: hard-coded functions for each built-in binary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2022, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated2/ folder, do not edit it
// (it is auto-generated from Generator/*).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_emult.h"
#include "GB_control.h"
#include "GB_ek_slice.h"
#include "GB_dense.h"
#include "GB_atomics.h"
#include "GB_bitmap_assign_methods.h"
#include "GB_binop__include.h"
// C=binop(A,B) is defined by the following types and operators:
// A+B function (eWiseAdd): GB (_AaddB__bxnor_uint64)
// A.*B function (eWiseMult): GB (_AemultB_08__bxnor_uint64)
// A.*B function (eWiseMult): GB (_AemultB_02__bxnor_uint64)
// A.*B function (eWiseMult): GB (_AemultB_04__bxnor_uint64)
// A.*B function (eWiseMult): GB (_AemultB_bitmap__bxnor_uint64)
// A*D function (colscale): GB (_AxD__bxnor_uint64)
// D*A function (rowscale): GB (_DxB__bxnor_uint64)
// C+=B function (dense accum): GB (_Cdense_accumB__bxnor_uint64)
// C+=b function (dense accum): GB (_Cdense_accumb__bxnor_uint64)
// C+=A+B function (dense ewise3): GB ((none))
// C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__bxnor_uint64)
// C=scalar+B GB (_bind1st__bxnor_uint64)
// C=scalar+B' GB (_bind1st_tran__bxnor_uint64)
// C=A+scalar GB (_bind2nd__bxnor_uint64)
// C=A'+scalar GB (_bind2nd_tran__bxnor_uint64)
// C type: uint64_t
// A type: uint64_t
// A pattern? 0
// B type: uint64_t
// B pattern? 0
// BinaryOp: cij = ~((aij) ^ (bij))
#define GB_ATYPE \
uint64_t
#define GB_BTYPE \
uint64_t
#define GB_CTYPE \
uint64_t
// true if the types of A and B are identical
#define GB_ATYPE_IS_BTYPE \
1
// true if the types of C and A are identical
#define GB_CTYPE_IS_ATYPE \
1
// true if the types of C and B are identical
#define GB_CTYPE_IS_BTYPE \
1
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA,A_iso) \
uint64_t aij = GBX (Ax, pA, A_iso)
// true if values of A are not used
#define GB_A_IS_PATTERN \
0 \
// bij = Bx [pB]
#define GB_GETB(bij,Bx,pB,B_iso) \
uint64_t bij = GBX (Bx, pB, B_iso)
// true if values of B are not used
#define GB_B_IS_PATTERN \
0 \
// declare scalar of the same type as C
#define GB_CTYPE_SCALAR(t) \
uint64_t t
// cij = Ax [pA]
#define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \
cij = GBX (Ax, pA, A_iso)
// cij = Bx [pB]
#define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \
cij = GBX (Bx, pB, B_iso)
#define GB_CX(p) Cx [p]
// binary operator
#define GB_BINOP(z,x,y,i,j) \
z = ~((x) ^ (y)) ;
// true if the binop must be flipped
#define GB_BINOP_FLIP \
0
// op is second
#define GB_OP_IS_SECOND \
0
// do the numerical phases of GB_add and GB_emult
#define GB_PHASE_2_OF_2
// hard-coded loops can be vectorized
#define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_BXNOR || GxB_NO_UINT64 || GxB_NO_BXNOR_UINT64)
//------------------------------------------------------------------------------
// C += A+B, all 3 matrices dense
//------------------------------------------------------------------------------
#if 0
// The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV.
void GB ((none))
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_accum_template.c"
}
#endif
//------------------------------------------------------------------------------
// C = A+B, all 3 matrices dense
//------------------------------------------------------------------------------
void GB (_Cdense_ewise3_noaccum__bxnor_uint64)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_noaccum_template.c"
}
//------------------------------------------------------------------------------
// C += B, accumulate a sparse matrix into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumB__bxnor_uint64)
(
GrB_Matrix C,
const GrB_Matrix B,
const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
#include "GB_dense_subassign_23_template.c"
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += b, accumulate a scalar into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumb__bxnor_uint64)
(
GrB_Matrix C,
const GB_void *p_bwork,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
// get the scalar b for C += b, of type uint64_t
uint64_t bwork = (*((uint64_t *) p_bwork)) ;
#include "GB_dense_subassign_22_template.c"
return (GrB_SUCCESS) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = A*D, column scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_AxD__bxnor_uint64)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix D,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint64_t *restrict Cx = (uint64_t *) C->x ;
#include "GB_AxB_colscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = D*B, row scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_DxB__bxnor_uint64)
(
GrB_Matrix C,
const GrB_Matrix D,
const GrB_Matrix B,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint64_t *restrict Cx = (uint64_t *) C->x ;
#include "GB_AxB_rowscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseAdd: C=A+B, C<M>=A+B, C<!M>=A+B
//------------------------------------------------------------------------------
GrB_Info GB (_AaddB__bxnor_uint64)
(
GrB_Matrix C,
const int C_sparsity,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool is_eWiseUnion,
const GB_void *alpha_scalar_in,
const GB_void *beta_scalar_in,
const bool Ch_is_Mh,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
GB_WERK_DECLARE (M_ek_slicing, int64_t) ;
GB_WERK_DECLARE (A_ek_slicing, int64_t) ;
GB_WERK_DECLARE (B_ek_slicing, int64_t) ;
uint64_t alpha_scalar ;
uint64_t beta_scalar ;
if (is_eWiseUnion)
{
alpha_scalar = (*((uint64_t *) alpha_scalar_in)) ;
beta_scalar = (*((uint64_t *) beta_scalar_in )) ;
}
#include "GB_add_template.c"
GB_FREE_WORKSPACE ;
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, or C<M!>=A.*B where C is sparse/hyper
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_08__bxnor_uint64)
(
GrB_Matrix C,
const int C_sparsity,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_08_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_02__bxnor_uint64)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool flipxy,
const int64_t *restrict Cp_kfirst,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#if GB_BINOP_FLIP
// The operator is not commutative, and does not have a flipped
// variant. For example z=atan2(y,x).
if (flipxy)
{
// use fmult(y,x)
#undef GB_FLIPPED
#define GB_FLIPPED 1
#include "GB_emult_02_template.c"
}
else
{
// use fmult(x,y)
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
}
#else
// No need to handle the flip: the operator is either commutative, or
// has been handled by changing z=div(y,x) to z=rdiv(x,y) for example.
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
#endif
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_04__bxnor_uint64)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict Cp_kfirst,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_04_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_bitmap__bxnor_uint64)
(
GrB_Matrix C,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_bitmap_emult_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st
//------------------------------------------------------------------------------
GrB_Info GB (_bind1st__bxnor_uint64)
(
GB_void *Cx_output, // Cx and Bx may be aliased
const GB_void *x_input,
const GB_void *Bx_input,
const int8_t *restrict Bb,
int64_t bnz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint64_t *Cx = (uint64_t *) Cx_output ;
uint64_t x = (*((uint64_t *) x_input)) ;
uint64_t *Bx = (uint64_t *) Bx_input ;
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < bnz ; p++)
{
if (!GBB (Bb, p)) continue ;
uint64_t bij = GBX (Bx, p, false) ;
Cx [p] = ~((x) ^ (bij)) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd
//------------------------------------------------------------------------------
GrB_Info GB (_bind2nd__bxnor_uint64)
(
GB_void *Cx_output, // Cx and Ax may be aliased
const GB_void *Ax_input,
const GB_void *y_input,
const int8_t *restrict Ab,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
uint64_t *Cx = (uint64_t *) Cx_output ;
uint64_t *Ax = (uint64_t *) Ax_input ;
uint64_t y = (*((uint64_t *) y_input)) ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!GBB (Ab, p)) continue ;
uint64_t aij = GBX (Ax, p, false) ;
Cx [p] = ~((aij) ^ (y)) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (x, A'): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (x, aij), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
uint64_t aij = GBX (Ax, pA, false) ; \
Cx [pC] = ~((x) ^ (aij)) ; \
}
GrB_Info GB (_bind1st_tran__bxnor_uint64)
(
GrB_Matrix C,
const GB_void *x_input,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
// GB_unop_transpose.c uses GB_ATYPE, but A is
// the 2nd input to binary operator z=f(x,y).
#undef GB_ATYPE
#define GB_ATYPE \
uint64_t
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint64_t x = (*((const uint64_t *) x_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
#undef GB_ATYPE
#define GB_ATYPE \
uint64_t
}
//------------------------------------------------------------------------------
// C = op (A', y): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (aij, y), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
uint64_t aij = GBX (Ax, pA, false) ; \
Cx [pC] = ~((aij) ^ (y)) ; \
}
GrB_Info GB (_bind2nd_tran__bxnor_uint64)
(
GrB_Matrix C,
const GrB_Matrix A,
const GB_void *y_input,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint64_t y = (*((const uint64_t *) y_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
3d25pt_var.c | /*
* Order-1, 3D 25 point stencil with axis-symmetric ariable coefficients
* Adapted from PLUTO and Pochoir test bench
*
* Tareq Malas
*/
#include <stdio.h>
#include <stdlib.h>
#include <sys/time.h>
#ifdef LIKWID_PERFMON
#include <likwid.h>
#endif
#include "print_utils.h"
#define TESTS 2
#define MAX(a,b) ((a) > (b) ? a : b)
#define MIN(a,b) ((a) < (b) ? a : b)
/* Subtract the `struct timeval' values X and Y,
* storing the result in RESULT.
*
* Return 1 if the difference is negative, otherwise 0.
*/
int timeval_subtract(struct timeval *result, struct timeval *x, struct timeval *y)
{
/* Perform the carry for the later subtraction by updating y. */
if (x->tv_usec < y->tv_usec)
{
int nsec = (y->tv_usec - x->tv_usec) / 1000000 + 1;
y->tv_usec -= 1000000 * nsec;
y->tv_sec += nsec;
}
if (x->tv_usec - y->tv_usec > 1000000)
{
int nsec = (x->tv_usec - y->tv_usec) / 1000000;
y->tv_usec += 1000000 * nsec;
y->tv_sec -= nsec;
}
/* Compute the time remaining to wait.
* tv_usec is certainly positive.
*/
result->tv_sec = x->tv_sec - y->tv_sec;
result->tv_usec = x->tv_usec - y->tv_usec;
/* Return 1 if result is negative. */
return x->tv_sec < y->tv_sec;
}
int main(int argc, char *argv[])
{
int t, i, j, k, m, test;
int Nx, Ny, Nz, Nt;
if (argc > 3) {
Nx = atoi(argv[1])+8;
Ny = atoi(argv[2])+8;
Nz = atoi(argv[3])+8;
}
if (argc > 4)
Nt = atoi(argv[4]);
// allocate the arrays
double ****A = (double ****) malloc(sizeof(double***)*2);
for(m=0; m<2;m++){
A[m] = (double ***) malloc(sizeof(double**)*Nz);
for(i=0; i<Nz; i++){
A[m][i] = (double**) malloc(sizeof(double*)*Ny);
for(j=0;j<Ny;j++){
A[m][i][j] = (double*) malloc(sizeof(double)*Nx);
}
}
}
double ****coef = (double ****) malloc(sizeof(double***)*13);
for(m=0; m<13;m++){
coef[m] = (double ***) malloc(sizeof(double**)*Nz);
for(i=0; i<Nz; i++){
coef[m][i] = (double**) malloc(sizeof(double*)*Ny);
for(j=0;j<Ny;j++){
coef[m][i][j] = (double*) malloc(sizeof(double)*Nx);
}
}
}
// tile size information, including extra element to decide the list length
int *tile_size = (int*) malloc(sizeof(int));
tile_size[0] = -1;
// The list is modified here before source-to-source transformations
tile_size = (int*) realloc((void *)tile_size, sizeof(int)*5);
tile_size[0] = 16;
tile_size[1] = 16;
tile_size[2] = 32;
tile_size[3] = 256;
tile_size[4] = -1;
// for timekeeping
int ts_return = -1;
struct timeval start, end, result;
double tdiff = 0.0, min_tdiff=1.e100;
const int BASE = 1024;
// initialize variables
//
srand(42);
for (i = 1; i < Nz; i++) {
for (j = 1; j < Ny; j++) {
for (k = 1; k < Nx; k++) {
A[0][i][j][k] = 1.0 * (rand() % BASE);
}
}
}
for (m=0; m<13; m++) {
for (i=1; i<Nz; i++) {
for (j=1; j<Ny; j++) {
for (k=1; k<Nx; k++) {
coef[m][i][j][k] = 1.0 * (rand() % BASE);
}
}
}
}
#ifdef LIKWID_PERFMON
LIKWID_MARKER_INIT;
#pragma omp parallel
{
LIKWID_MARKER_THREADINIT;
#pragma omp barrier
LIKWID_MARKER_START("calc");
}
#endif
int num_threads = 1;
#if defined(_OPENMP)
num_threads = omp_get_max_threads();
#endif
for(test=0; test<TESTS; test++){
gettimeofday(&start, 0);
// serial execution - Addition: 6 && Multiplication: 2
#pragma scop
for (t = 0; t < Nt; t++) {
for (i = 4; i < Nz-4; i++) {
for (j = 4; j < Ny-4; j++) {
for (k = 4; k < Nx-4; k++) {
A[(t+1)%2][i][j][k] =
coef[0][i][j][k] * A[(t)%2][i ][j ][k ] +
coef[1][i][j][k] * (A[(t)%2][i-1][j ][k ] + A[(t)%2][i+1][j ][k ]) +
coef[2][i][j][k] * (A[(t)%2][i ][j-1][k ] + A[(t)%2][i ][j+1][k ]) +
coef[3][i][j][k] * (A[(t)%2][i ][j ][k-1] + A[(t)%2][i ][j ][k+1]) +
coef[4][i][j][k] * (A[(t)%2][i-2][j ][k ] + A[(t)%2][i+2][j ][k ]) +
coef[5][i][j][k] * (A[(t)%2][i ][j-2][k ] + A[(t)%2][i ][j+2][k ]) +
coef[6][i][j][k] * (A[(t)%2][i ][j ][k-2] + A[(t)%2][i ][j ][k+2]) +
coef[7][i][j][k] * (A[(t)%2][i-3][j ][k ] + A[(t)%2][i+3][j ][k ]) +
coef[8][i][j][k] * (A[(t)%2][i ][j-3][k ] + A[(t)%2][i ][j+3][k ]) +
coef[9][i][j][k] * (A[(t)%2][i ][j ][k-3] + A[(t)%2][i ][j ][k+3]) +
coef[10][i][j][k]* (A[(t)%2][i-4][j ][k ] + A[(t)%2][i+4][j ][k ]) +
coef[11][i][j][k]* (A[(t)%2][i ][j-4][k ] + A[(t)%2][i ][j+4][k ]) +
coef[12][i][j][k]* (A[(t)%2][i ][j ][k-4] + A[(t)%2][i ][j ][k+4]) ;
}
}
}
}
#pragma endscop
gettimeofday(&end, 0);
ts_return = timeval_subtract(&result, &end, &start);
tdiff = (double) (result.tv_sec + result.tv_usec * 1.0e-6);
min_tdiff = min(min_tdiff, tdiff);
printf("Rank 0 TEST# %d time: %f\n", test, tdiff);
}
PRINT_RESULTS(4, "variable axis-symmetric")
#ifdef LIKWID_PERFMON
#pragma omp parallel
{
LIKWID_MARKER_STOP("calc");
}
LIKWID_MARKER_CLOSE;
#endif
// Free allocated arrays
for(i=0; i<Nz; i++){
for(j=0;j<Ny;j++){
free(A[0][i][j]);
free(A[1][i][j]);
}
free(A[0][i]);
free(A[1][i]);
}
free(A[0]);
free(A[1]);
for(m=0; m<13;m++){
for(i=0; i<Nz; i++){
for(j=0;j<Ny;j++){
free(coef[m][i][j]);
}
free(coef[m][i]);
}
free(coef[m]);
}
return 0;
}
|
3d7pt.lbpar.c | #include <omp.h>
#include <math.h>
#define ceild(n,d) ceil(((double)(n))/((double)(d)))
#define floord(n,d) floor(((double)(n))/((double)(d)))
#define max(x,y) ((x) > (y)? (x) : (y))
#define min(x,y) ((x) < (y)? (x) : (y))
/*
* Order-1, 3D 7 point stencil
* Adapted from PLUTO and Pochoir test bench
*
* Tareq Malas
*/
#include <stdio.h>
#include <stdlib.h>
#include <sys/time.h>
#ifdef LIKWID_PERFMON
#include <likwid.h>
#endif
#include "print_utils.h"
#define TESTS 2
#define MAX(a,b) ((a) > (b) ? a : b)
#define MIN(a,b) ((a) < (b) ? a : b)
/* Subtract the `struct timeval' values X and Y,
* storing the result in RESULT.
*
* Return 1 if the difference is negative, otherwise 0.
*/
int timeval_subtract(struct timeval *result, struct timeval *x, struct timeval *y)
{
/* Perform the carry for the later subtraction by updating y. */
if (x->tv_usec < y->tv_usec)
{
int nsec = (y->tv_usec - x->tv_usec) / 1000000 + 1;
y->tv_usec -= 1000000 * nsec;
y->tv_sec += nsec;
}
if (x->tv_usec - y->tv_usec > 1000000)
{
int nsec = (x->tv_usec - y->tv_usec) / 1000000;
y->tv_usec += 1000000 * nsec;
y->tv_sec -= nsec;
}
/* Compute the time remaining to wait.
* tv_usec is certainly positive.
*/
result->tv_sec = x->tv_sec - y->tv_sec;
result->tv_usec = x->tv_usec - y->tv_usec;
/* Return 1 if result is negative. */
return x->tv_sec < y->tv_sec;
}
int main(int argc, char *argv[])
{
int t, i, j, k, test;
int Nx, Ny, Nz, Nt;
if (argc > 3) {
Nx = atoi(argv[1])+2;
Ny = atoi(argv[2])+2;
Nz = atoi(argv[3])+2;
}
if (argc > 4)
Nt = atoi(argv[4]);
double ****A = (double ****) malloc(sizeof(double***)*2);
A[0] = (double ***) malloc(sizeof(double**)*Nz);
A[1] = (double ***) malloc(sizeof(double**)*Nz);
for(i=0; i<Nz; i++){
A[0][i] = (double**) malloc(sizeof(double*)*Ny);
A[1][i] = (double**) malloc(sizeof(double*)*Ny);
for(j=0;j<Ny;j++){
A[0][i][j] = (double*) malloc(sizeof(double)*Nx);
A[1][i][j] = (double*) malloc(sizeof(double)*Nx);
}
}
// tile size information, including extra element to decide the list length
int *tile_size = (int*) malloc(sizeof(int));
tile_size[0] = -1;
// The list is modified here before source-to-source transformations
tile_size = (int*) realloc((void *)tile_size, sizeof(int)*5);
tile_size[0] = 16;
tile_size[1] = 16;
tile_size[2] = 8;
tile_size[3] = 32;
tile_size[4] = -1;
// for timekeeping
int ts_return = -1;
struct timeval start, end, result;
double tdiff = 0.0, min_tdiff=1.e100;
const int BASE = 1024;
const double alpha = 0.0876;
const double beta = 0.0765;
// initialize variables
//
srand(42);
for (i = 1; i < Nz; i++) {
for (j = 1; j < Ny; j++) {
for (k = 1; k < Nx; k++) {
A[0][i][j][k] = 1.0 * (rand() % BASE);
}
}
}
#ifdef LIKWID_PERFMON
LIKWID_MARKER_INIT;
#pragma omp parallel
{
LIKWID_MARKER_THREADINIT;
#pragma omp barrier
LIKWID_MARKER_START("calc");
}
#endif
int num_threads = 1;
#if defined(_OPENMP)
num_threads = omp_get_max_threads();
#endif
for(test=0; test<TESTS; test++){
gettimeofday(&start, 0);
// serial execution - Addition: 6 && Multiplication: 2
/* Copyright (C) 1991-2014 Free Software Foundation, Inc.
This file is part of the GNU C Library.
The GNU C Library is free software; you can redistribute it and/or
modify it under the terms of the GNU Lesser General Public
License as published by the Free Software Foundation; either
version 2.1 of the License, or (at your option) any later version.
The GNU C Library is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
Lesser General Public License for more details.
You should have received a copy of the GNU Lesser General Public
License along with the GNU C Library; if not, see
<http://www.gnu.org/licenses/>. */
/* This header is separate from features.h so that the compiler can
include it implicitly at the start of every compilation. It must
not itself include <features.h> or any other header that includes
<features.h> because the implicit include comes before any feature
test macros that may be defined in a source file before it first
explicitly includes a system header. GCC knows the name of this
header in order to preinclude it. */
/* glibc's intent is to support the IEC 559 math functionality, real
and complex. If the GCC (4.9 and later) predefined macros
specifying compiler intent are available, use them to determine
whether the overall intent is to support these features; otherwise,
presume an older compiler has intent to support these features and
define these macros by default. */
/* wchar_t uses ISO/IEC 10646 (2nd ed., published 2011-03-15) /
Unicode 6.0. */
/* We do not support C11 <threads.h>. */
int t1, t2, t3, t4, t5, t6, t7, t8;
int lb, ub, lbp, ubp, lb2, ub2;
register int lbv, ubv;
/* Start of CLooG code */
if ((Nt >= 2) && (Nx >= 3) && (Ny >= 3) && (Nz >= 3)) {
for (t1=-1;t1<=floord(Nt-2,8);t1++) {
lbp=max(ceild(t1,2),ceild(16*t1-Nt+3,16));
ubp=min(floord(Nt+Nz-4,16),floord(8*t1+Nz+5,16));
#pragma omp parallel for private(lbv,ubv,t3,t4,t5,t6,t7,t8)
for (t2=lbp;t2<=ubp;t2++) {
for (t3=max(max(0,ceild(16*t2-Nz-4,8)),t1);t3<=min(min(min(floord(Nt+Ny-4,8),floord(8*t1+Ny+13,8)),floord(16*t2+Ny+12,8)),floord(16*t1-16*t2+Nz+Ny+11,8));t3++) {
for (t4=max(max(max(0,ceild(t1-3,4)),ceild(16*t2-Nz-28,32)),ceild(8*t3-Ny-28,32));t4<=min(min(min(min(floord(Nt+Nx-4,32),floord(8*t1+Nx+13,32)),floord(16*t2+Nx+12,32)),floord(8*t3+Nx+4,32)),floord(16*t1-16*t2+Nz+Nx+11,32));t4++) {
for (t5=max(max(max(max(max(0,8*t1),16*t1-16*t2+1),16*t2-Nz+2),8*t3-Ny+2),32*t4-Nx+2);t5<=min(min(min(min(min(Nt-2,8*t1+15),16*t2+14),8*t3+6),32*t4+30),16*t1-16*t2+Nz+13);t5++) {
for (t6=max(max(16*t2,t5+1),-16*t1+16*t2+2*t5-15);t6<=min(min(16*t2+15,-16*t1+16*t2+2*t5),t5+Nz-2);t6++) {
for (t7=max(8*t3,t5+1);t7<=min(8*t3+7,t5+Ny-2);t7++) {
lbv=max(32*t4,t5+1);
ubv=min(32*t4+31,t5+Nx-2);
#pragma ivdep
#pragma vector always
for (t8=lbv;t8<=ubv;t8++) {
A[( t5 + 1) % 2][ (-t5+t6)][ (-t5+t7)][ (-t5+t8)] = ((alpha * A[ t5 % 2][ (-t5+t6)][ (-t5+t7)][ (-t5+t8)]) + (beta * (((((A[ t5 % 2][ (-t5+t6) - 1][ (-t5+t7)][ (-t5+t8)] + A[ t5 % 2][ (-t5+t6)][ (-t5+t7) - 1][ (-t5+t8)]) + A[ t5 % 2][ (-t5+t6)][ (-t5+t7)][ (-t5+t8) - 1]) + A[ t5 % 2][ (-t5+t6) + 1][ (-t5+t7)][ (-t5+t8)]) + A[ t5 % 2][ (-t5+t6)][ (-t5+t7) + 1][ (-t5+t8)]) + A[ t5 % 2][ (-t5+t6)][ (-t5+t7)][ (-t5+t8) + 1])));;
}
}
}
}
}
}
}
}
}
/* End of CLooG code */
gettimeofday(&end, 0);
ts_return = timeval_subtract(&result, &end, &start);
tdiff = (double) (result.tv_sec + result.tv_usec * 1.0e-6);
min_tdiff = min(min_tdiff, tdiff);
printf("Rank 0 TEST# %d time: %f\n", test, tdiff);
}
PRINT_RESULTS(1, "constant")
#ifdef LIKWID_PERFMON
#pragma omp parallel
{
LIKWID_MARKER_STOP("calc");
}
LIKWID_MARKER_CLOSE;
#endif
// Free allocated arrays (Causing performance degradation
/* for(i=0; i<Nz; i++){
for(j=0;j<Ny;j++){
free(A[0][i][j]);
free(A[1][i][j]);
}
free(A[0][i]);
free(A[1][i]);
}
free(A[0]);
free(A[1]);
*/
return 0;
}
|
morphology.c | /*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% M M OOO RRRR PPPP H H OOO L OOO GGGG Y Y %
% MM MM O O R R P P H H O O L O O G Y Y %
% M M M O O RRRR PPPP HHHHH O O L O O G GGG Y %
% M M O O R R P H H O O L O O G G Y %
% M M OOO R R P H H OOO LLLLL OOO GGG Y %
% %
% %
% MagickCore Morphology Methods %
% %
% Software Design %
% Anthony Thyssen %
% January 2010 %
% %
% %
% Copyright 1999-2017 ImageMagick Studio LLC, a non-profit organization %
% dedicated to making software imaging solutions freely available. %
% %
% You may not use this file except in compliance with the License. You may %
% obtain a copy of the License at %
% %
% https://www.imagemagick.org/script/license.php %
% %
% Unless required by applicable law or agreed to in writing, software %
% distributed under the License is distributed on an "AS IS" BASIS, %
% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. %
% See the License for the specific language governing permissions and %
% limitations under the License. %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% Morphology is the application of various kernels, of any size or shape, to an
% image in various ways (typically binary, but not always).
%
% Convolution (weighted sum or average) is just one specific type of
% morphology. Just one that is very common for image bluring and sharpening
% effects. Not only 2D Gaussian blurring, but also 2-pass 1D Blurring.
%
% This module provides not only a general morphology function, and the ability
% to apply more advanced or iterative morphologies, but also functions for the
% generation of many different types of kernel arrays from user supplied
% arguments. Prehaps even the generation of a kernel from a small image.
*/
/*
Include declarations.
*/
#include "MagickCore/studio.h"
#include "MagickCore/artifact.h"
#include "MagickCore/cache-view.h"
#include "MagickCore/channel.h"
#include "MagickCore/color-private.h"
#include "MagickCore/enhance.h"
#include "MagickCore/exception.h"
#include "MagickCore/exception-private.h"
#include "MagickCore/gem.h"
#include "MagickCore/gem-private.h"
#include "MagickCore/image.h"
#include "MagickCore/image-private.h"
#include "MagickCore/linked-list.h"
#include "MagickCore/list.h"
#include "MagickCore/magick.h"
#include "MagickCore/memory_.h"
#include "MagickCore/memory-private.h"
#include "MagickCore/monitor-private.h"
#include "MagickCore/morphology.h"
#include "MagickCore/morphology-private.h"
#include "MagickCore/option.h"
#include "MagickCore/pixel-accessor.h"
#include "MagickCore/pixel-private.h"
#include "MagickCore/prepress.h"
#include "MagickCore/quantize.h"
#include "MagickCore/resource_.h"
#include "MagickCore/registry.h"
#include "MagickCore/semaphore.h"
#include "MagickCore/splay-tree.h"
#include "MagickCore/statistic.h"
#include "MagickCore/string_.h"
#include "MagickCore/string-private.h"
#include "MagickCore/thread-private.h"
#include "MagickCore/token.h"
#include "MagickCore/utility.h"
#include "MagickCore/utility-private.h"
/*
Other global definitions used by module.
*/
#define Minimize(assign,value) assign=MagickMin(assign,value)
#define Maximize(assign,value) assign=MagickMax(assign,value)
/* Integer Factorial Function - for a Binomial kernel */
#if 1
static inline size_t fact(size_t n)
{
size_t f,l;
for(f=1, l=2; l <= n; f=f*l, l++);
return(f);
}
#elif 1 /* glibc floating point alternatives */
#define fact(n) ((size_t)tgamma((double)n+1))
#else
#define fact(n) ((size_t)lgamma((double)n+1))
#endif
/* Currently these are only internal to this module */
static void
CalcKernelMetaData(KernelInfo *),
ExpandMirrorKernelInfo(KernelInfo *),
ExpandRotateKernelInfo(KernelInfo *, const double),
RotateKernelInfo(KernelInfo *, double);
/* Quick function to find last kernel in a kernel list */
static inline KernelInfo *LastKernelInfo(KernelInfo *kernel)
{
while (kernel->next != (KernelInfo *) NULL)
kernel=kernel->next;
return(kernel);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% A c q u i r e K e r n e l I n f o %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% AcquireKernelInfo() takes the given string (generally supplied by the
% user) and converts it into a Morphology/Convolution Kernel. This allows
% users to specify a kernel from a number of pre-defined kernels, or to fully
% specify their own kernel for a specific Convolution or Morphology
% Operation.
%
% The kernel so generated can be any rectangular array of floating point
% values (doubles) with the 'control point' or 'pixel being affected'
% anywhere within that array of values.
%
% Previously IM was restricted to a square of odd size using the exact
% center as origin, this is no longer the case, and any rectangular kernel
% with any value being declared the origin. This in turn allows the use of
% highly asymmetrical kernels.
%
% The floating point values in the kernel can also include a special value
% known as 'nan' or 'not a number' to indicate that this value is not part
% of the kernel array. This allows you to shaped the kernel within its
% rectangular area. That is 'nan' values provide a 'mask' for the kernel
% shape. However at least one non-nan value must be provided for correct
% working of a kernel.
%
% The returned kernel should be freed using the DestroyKernelInfo() when you
% are finished with it. Do not free this memory yourself.
%
% Input kernel defintion strings can consist of any of three types.
%
% "name:args[[@><]"
% Select from one of the built in kernels, using the name and
% geometry arguments supplied. See AcquireKernelBuiltIn()
%
% "WxH[+X+Y][@><]:num, num, num ..."
% a kernel of size W by H, with W*H floating point numbers following.
% the 'center' can be optionally be defined at +X+Y (such that +0+0
% is top left corner). If not defined the pixel in the center, for
% odd sizes, or to the immediate top or left of center for even sizes
% is automatically selected.
%
% "num, num, num, num, ..."
% list of floating point numbers defining an 'old style' odd sized
% square kernel. At least 9 values should be provided for a 3x3
% square kernel, 25 for a 5x5 square kernel, 49 for 7x7, etc.
% Values can be space or comma separated. This is not recommended.
%
% You can define a 'list of kernels' which can be used by some morphology
% operators A list is defined as a semi-colon separated list kernels.
%
% " kernel ; kernel ; kernel ; "
%
% Any extra ';' characters, at start, end or between kernel defintions are
% simply ignored.
%
% The special flags will expand a single kernel, into a list of rotated
% kernels. A '@' flag will expand a 3x3 kernel into a list of 45-degree
% cyclic rotations, while a '>' will generate a list of 90-degree rotations.
% The '<' also exands using 90-degree rotates, but giving a 180-degree
% reflected kernel before the +/- 90-degree rotations, which can be important
% for Thinning operations.
%
% Note that 'name' kernels will start with an alphabetic character while the
% new kernel specification has a ':' character in its specification string.
% If neither is the case, it is assumed an old style of a simple list of
% numbers generating a odd-sized square kernel has been given.
%
% The format of the AcquireKernal method is:
%
% KernelInfo *AcquireKernelInfo(const char *kernel_string)
%
% A description of each parameter follows:
%
% o kernel_string: the Morphology/Convolution kernel wanted.
%
*/
/* This was separated so that it could be used as a separate
** array input handling function, such as for -color-matrix
*/
static KernelInfo *ParseKernelArray(const char *kernel_string)
{
KernelInfo
*kernel;
char
token[MagickPathExtent];
const char
*p,
*end;
register ssize_t
i;
double
nan = sqrt((double)-1.0); /* Special Value : Not A Number */
MagickStatusType
flags;
GeometryInfo
args;
kernel=(KernelInfo *) AcquireQuantumMemory(1,sizeof(*kernel));
if (kernel == (KernelInfo *) NULL)
return(kernel);
(void) ResetMagickMemory(kernel,0,sizeof(*kernel));
kernel->minimum = kernel->maximum = kernel->angle = 0.0;
kernel->negative_range = kernel->positive_range = 0.0;
kernel->type = UserDefinedKernel;
kernel->next = (KernelInfo *) NULL;
kernel->signature=MagickCoreSignature;
if (kernel_string == (const char *) NULL)
return(kernel);
/* find end of this specific kernel definition string */
end = strchr(kernel_string, ';');
if ( end == (char *) NULL )
end = strchr(kernel_string, '\0');
/* clear flags - for Expanding kernel lists thorugh rotations */
flags = NoValue;
/* Has a ':' in argument - New user kernel specification
FUTURE: this split on ':' could be done by StringToken()
*/
p = strchr(kernel_string, ':');
if ( p != (char *) NULL && p < end)
{
/* ParseGeometry() needs the geometry separated! -- Arrgghh */
memcpy(token, kernel_string, (size_t) (p-kernel_string));
token[p-kernel_string] = '\0';
SetGeometryInfo(&args);
flags = ParseGeometry(token, &args);
/* Size handling and checks of geometry settings */
if ( (flags & WidthValue) == 0 ) /* if no width then */
args.rho = args.sigma; /* then width = height */
if ( args.rho < 1.0 ) /* if width too small */
args.rho = 1.0; /* then width = 1 */
if ( args.sigma < 1.0 ) /* if height too small */
args.sigma = args.rho; /* then height = width */
kernel->width = (size_t)args.rho;
kernel->height = (size_t)args.sigma;
/* Offset Handling and Checks */
if ( args.xi < 0.0 || args.psi < 0.0 )
return(DestroyKernelInfo(kernel));
kernel->x = ((flags & XValue)!=0) ? (ssize_t)args.xi
: (ssize_t) (kernel->width-1)/2;
kernel->y = ((flags & YValue)!=0) ? (ssize_t)args.psi
: (ssize_t) (kernel->height-1)/2;
if ( kernel->x >= (ssize_t) kernel->width ||
kernel->y >= (ssize_t) kernel->height )
return(DestroyKernelInfo(kernel));
p++; /* advance beyond the ':' */
}
else
{ /* ELSE - Old old specification, forming odd-square kernel */
/* count up number of values given */
p=(const char *) kernel_string;
while ((isspace((int) ((unsigned char) *p)) != 0) || (*p == '\''))
p++; /* ignore "'" chars for convolve filter usage - Cristy */
for (i=0; p < end; i++)
{
GetNextToken(p,&p,MagickPathExtent,token);
if (*token == ',')
GetNextToken(p,&p,MagickPathExtent,token);
}
/* set the size of the kernel - old sized square */
kernel->width = kernel->height= (size_t) sqrt((double) i+1.0);
kernel->x = kernel->y = (ssize_t) (kernel->width-1)/2;
p=(const char *) kernel_string;
while ((isspace((int) ((unsigned char) *p)) != 0) || (*p == '\''))
p++; /* ignore "'" chars for convolve filter usage - Cristy */
}
/* Read in the kernel values from rest of input string argument */
kernel->values=(MagickRealType *) MagickAssumeAligned(AcquireAlignedMemory(
kernel->width,kernel->height*sizeof(*kernel->values)));
if (kernel->values == (MagickRealType *) NULL)
return(DestroyKernelInfo(kernel));
kernel->minimum=MagickMaximumValue;
kernel->maximum=(-MagickMaximumValue);
kernel->negative_range = kernel->positive_range = 0.0;
for (i=0; (i < (ssize_t) (kernel->width*kernel->height)) && (p < end); i++)
{
GetNextToken(p,&p,MagickPathExtent,token);
if (*token == ',')
GetNextToken(p,&p,MagickPathExtent,token);
if ( LocaleCompare("nan",token) == 0
|| LocaleCompare("-",token) == 0 ) {
kernel->values[i] = nan; /* this value is not part of neighbourhood */
}
else {
kernel->values[i] = StringToDouble(token,(char **) NULL);
( kernel->values[i] < 0)
? ( kernel->negative_range += kernel->values[i] )
: ( kernel->positive_range += kernel->values[i] );
Minimize(kernel->minimum, kernel->values[i]);
Maximize(kernel->maximum, kernel->values[i]);
}
}
/* sanity check -- no more values in kernel definition */
GetNextToken(p,&p,MagickPathExtent,token);
if ( *token != '\0' && *token != ';' && *token != '\'' )
return(DestroyKernelInfo(kernel));
#if 0
/* this was the old method of handling a incomplete kernel */
if ( i < (ssize_t) (kernel->width*kernel->height) ) {
Minimize(kernel->minimum, kernel->values[i]);
Maximize(kernel->maximum, kernel->values[i]);
for ( ; i < (ssize_t) (kernel->width*kernel->height); i++)
kernel->values[i]=0.0;
}
#else
/* Number of values for kernel was not enough - Report Error */
if ( i < (ssize_t) (kernel->width*kernel->height) )
return(DestroyKernelInfo(kernel));
#endif
/* check that we recieved at least one real (non-nan) value! */
if (kernel->minimum == MagickMaximumValue)
return(DestroyKernelInfo(kernel));
if ( (flags & AreaValue) != 0 ) /* '@' symbol in kernel size */
ExpandRotateKernelInfo(kernel, 45.0); /* cyclic rotate 3x3 kernels */
else if ( (flags & GreaterValue) != 0 ) /* '>' symbol in kernel args */
ExpandRotateKernelInfo(kernel, 90.0); /* 90 degree rotate of kernel */
else if ( (flags & LessValue) != 0 ) /* '<' symbol in kernel args */
ExpandMirrorKernelInfo(kernel); /* 90 degree mirror rotate */
return(kernel);
}
static KernelInfo *ParseKernelName(const char *kernel_string,
ExceptionInfo *exception)
{
char
token[MagickPathExtent];
const char
*p,
*end;
GeometryInfo
args;
KernelInfo
*kernel;
MagickStatusType
flags;
ssize_t
type;
/* Parse special 'named' kernel */
GetNextToken(kernel_string,&p,MagickPathExtent,token);
type=ParseCommandOption(MagickKernelOptions,MagickFalse,token);
if ( type < 0 || type == UserDefinedKernel )
return((KernelInfo *) NULL); /* not a valid named kernel */
while (((isspace((int) ((unsigned char) *p)) != 0) ||
(*p == ',') || (*p == ':' )) && (*p != '\0') && (*p != ';'))
p++;
end = strchr(p, ';'); /* end of this kernel defintion */
if ( end == (char *) NULL )
end = strchr(p, '\0');
/* ParseGeometry() needs the geometry separated! -- Arrgghh */
memcpy(token, p, (size_t) (end-p));
token[end-p] = '\0';
SetGeometryInfo(&args);
flags = ParseGeometry(token, &args);
#if 0
/* For Debugging Geometry Input */
(void) FormatLocaleFile(stderr, "Geometry = 0x%04X : %lg x %lg %+lg %+lg\n",
flags, args.rho, args.sigma, args.xi, args.psi );
#endif
/* special handling of missing values in input string */
switch( type ) {
/* Shape Kernel Defaults */
case UnityKernel:
if ( (flags & WidthValue) == 0 )
args.rho = 1.0; /* Default scale = 1.0, zero is valid */
break;
case SquareKernel:
case DiamondKernel:
case OctagonKernel:
case DiskKernel:
case PlusKernel:
case CrossKernel:
if ( (flags & HeightValue) == 0 )
args.sigma = 1.0; /* Default scale = 1.0, zero is valid */
break;
case RingKernel:
if ( (flags & XValue) == 0 )
args.xi = 1.0; /* Default scale = 1.0, zero is valid */
break;
case RectangleKernel: /* Rectangle - set size defaults */
if ( (flags & WidthValue) == 0 ) /* if no width then */
args.rho = args.sigma; /* then width = height */
if ( args.rho < 1.0 ) /* if width too small */
args.rho = 3; /* then width = 3 */
if ( args.sigma < 1.0 ) /* if height too small */
args.sigma = args.rho; /* then height = width */
if ( (flags & XValue) == 0 ) /* center offset if not defined */
args.xi = (double)(((ssize_t)args.rho-1)/2);
if ( (flags & YValue) == 0 )
args.psi = (double)(((ssize_t)args.sigma-1)/2);
break;
/* Distance Kernel Defaults */
case ChebyshevKernel:
case ManhattanKernel:
case OctagonalKernel:
case EuclideanKernel:
if ( (flags & HeightValue) == 0 ) /* no distance scale */
args.sigma = 100.0; /* default distance scaling */
else if ( (flags & AspectValue ) != 0 ) /* '!' flag */
args.sigma = QuantumRange/(args.sigma+1); /* maximum pixel distance */
else if ( (flags & PercentValue ) != 0 ) /* '%' flag */
args.sigma *= QuantumRange/100.0; /* percentage of color range */
break;
default:
break;
}
kernel = AcquireKernelBuiltIn((KernelInfoType)type, &args, exception);
if ( kernel == (KernelInfo *) NULL )
return(kernel);
/* global expand to rotated kernel list - only for single kernels */
if ( kernel->next == (KernelInfo *) NULL ) {
if ( (flags & AreaValue) != 0 ) /* '@' symbol in kernel args */
ExpandRotateKernelInfo(kernel, 45.0);
else if ( (flags & GreaterValue) != 0 ) /* '>' symbol in kernel args */
ExpandRotateKernelInfo(kernel, 90.0);
else if ( (flags & LessValue) != 0 ) /* '<' symbol in kernel args */
ExpandMirrorKernelInfo(kernel);
}
return(kernel);
}
MagickExport KernelInfo *AcquireKernelInfo(const char *kernel_string,
ExceptionInfo *exception)
{
KernelInfo
*kernel,
*new_kernel;
char
*kernel_cache,
token[MagickPathExtent];
const char
*p;
if (kernel_string == (const char *) NULL)
return(ParseKernelArray(kernel_string));
p=kernel_string;
kernel_cache=(char *) NULL;
if (*kernel_string == '@')
{
kernel_cache=FileToString(kernel_string+1,~0UL,exception);
if (kernel_cache == (char *) NULL)
return((KernelInfo *) NULL);
p=(const char *) kernel_cache;
}
kernel=NULL;
while (GetNextToken(p,(const char **) NULL,MagickPathExtent,token), *token != '\0')
{
/* ignore extra or multiple ';' kernel separators */
if (*token != ';')
{
/* tokens starting with alpha is a Named kernel */
if (isalpha((int) ((unsigned char) *token)) != 0)
new_kernel=ParseKernelName(p,exception);
else /* otherwise a user defined kernel array */
new_kernel=ParseKernelArray(p);
/* Error handling -- this is not proper error handling! */
if (new_kernel == (KernelInfo *) NULL)
{
if (kernel != (KernelInfo *) NULL)
kernel=DestroyKernelInfo(kernel);
return((KernelInfo *) NULL);
}
/* initialise or append the kernel list */
if (kernel == (KernelInfo *) NULL)
kernel=new_kernel;
else
LastKernelInfo(kernel)->next=new_kernel;
}
/* look for the next kernel in list */
p=strchr(p,';');
if (p == (char *) NULL)
break;
p++;
}
if (kernel_cache != (char *) NULL)
kernel_cache=DestroyString(kernel_cache);
return(kernel);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% A c q u i r e K e r n e l B u i l t I n %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% AcquireKernelBuiltIn() returned one of the 'named' built-in types of
% kernels used for special purposes such as gaussian blurring, skeleton
% pruning, and edge distance determination.
%
% They take a KernelType, and a set of geometry style arguments, which were
% typically decoded from a user supplied string, or from a more complex
% Morphology Method that was requested.
%
% The format of the AcquireKernalBuiltIn method is:
%
% KernelInfo *AcquireKernelBuiltIn(const KernelInfoType type,
% const GeometryInfo args)
%
% A description of each parameter follows:
%
% o type: the pre-defined type of kernel wanted
%
% o args: arguments defining or modifying the kernel
%
% Convolution Kernels
%
% Unity
% The a No-Op or Scaling single element kernel.
%
% Gaussian:{radius},{sigma}
% Generate a two-dimensional gaussian kernel, as used by -gaussian.
% The sigma for the curve is required. The resulting kernel is
% normalized,
%
% If 'sigma' is zero, you get a single pixel on a field of zeros.
%
% NOTE: that the 'radius' is optional, but if provided can limit (clip)
% the final size of the resulting kernel to a square 2*radius+1 in size.
% The radius should be at least 2 times that of the sigma value, or
% sever clipping and aliasing may result. If not given or set to 0 the
% radius will be determined so as to produce the best minimal error
% result, which is usally much larger than is normally needed.
%
% LoG:{radius},{sigma}
% "Laplacian of a Gaussian" or "Mexician Hat" Kernel.
% The supposed ideal edge detection, zero-summing kernel.
%
% An alturnative to this kernel is to use a "DoG" with a sigma ratio of
% approx 1.6 (according to wikipedia).
%
% DoG:{radius},{sigma1},{sigma2}
% "Difference of Gaussians" Kernel.
% As "Gaussian" but with a gaussian produced by 'sigma2' subtracted
% from the gaussian produced by 'sigma1'. Typically sigma2 > sigma1.
% The result is a zero-summing kernel.
%
% Blur:{radius},{sigma}[,{angle}]
% Generates a 1 dimensional or linear gaussian blur, at the angle given
% (current restricted to orthogonal angles). If a 'radius' is given the
% kernel is clipped to a width of 2*radius+1. Kernel can be rotated
% by a 90 degree angle.
%
% If 'sigma' is zero, you get a single pixel on a field of zeros.
%
% Note that two convolutions with two "Blur" kernels perpendicular to
% each other, is equivalent to a far larger "Gaussian" kernel with the
% same sigma value, However it is much faster to apply. This is how the
% "-blur" operator actually works.
%
% Comet:{width},{sigma},{angle}
% Blur in one direction only, much like how a bright object leaves
% a comet like trail. The Kernel is actually half a gaussian curve,
% Adding two such blurs in opposite directions produces a Blur Kernel.
% Angle can be rotated in multiples of 90 degrees.
%
% Note that the first argument is the width of the kernel and not the
% radius of the kernel.
%
% Binomial:[{radius}]
% Generate a discrete kernel using a 2 dimentional Pascel's Triangle
% of values. Used for special forma of image filters.
%
% # Still to be implemented...
% #
% # Filter2D
% # Filter1D
% # Set kernel values using a resize filter, and given scale (sigma)
% # Cylindrical or Linear. Is this possible with an image?
% #
%
% Named Constant Convolution Kernels
%
% All these are unscaled, zero-summing kernels by default. As such for
% non-HDRI version of ImageMagick some form of normalization, user scaling,
% and biasing the results is recommended, to prevent the resulting image
% being 'clipped'.
%
% The 3x3 kernels (most of these) can be circularly rotated in multiples of
% 45 degrees to generate the 8 angled varients of each of the kernels.
%
% Laplacian:{type}
% Discrete Lapacian Kernels, (without normalization)
% Type 0 : 3x3 with center:8 surounded by -1 (8 neighbourhood)
% Type 1 : 3x3 with center:4 edge:-1 corner:0 (4 neighbourhood)
% Type 2 : 3x3 with center:4 edge:1 corner:-2
% Type 3 : 3x3 with center:4 edge:-2 corner:1
% Type 5 : 5x5 laplacian
% Type 7 : 7x7 laplacian
% Type 15 : 5x5 LoG (sigma approx 1.4)
% Type 19 : 9x9 LoG (sigma approx 1.4)
%
% Sobel:{angle}
% Sobel 'Edge' convolution kernel (3x3)
% | -1, 0, 1 |
% | -2, 0,-2 |
% | -1, 0, 1 |
%
% Roberts:{angle}
% Roberts convolution kernel (3x3)
% | 0, 0, 0 |
% | -1, 1, 0 |
% | 0, 0, 0 |
%
% Prewitt:{angle}
% Prewitt Edge convolution kernel (3x3)
% | -1, 0, 1 |
% | -1, 0, 1 |
% | -1, 0, 1 |
%
% Compass:{angle}
% Prewitt's "Compass" convolution kernel (3x3)
% | -1, 1, 1 |
% | -1,-2, 1 |
% | -1, 1, 1 |
%
% Kirsch:{angle}
% Kirsch's "Compass" convolution kernel (3x3)
% | -3,-3, 5 |
% | -3, 0, 5 |
% | -3,-3, 5 |
%
% FreiChen:{angle}
% Frei-Chen Edge Detector is based on a kernel that is similar to
% the Sobel Kernel, but is designed to be isotropic. That is it takes
% into account the distance of the diagonal in the kernel.
%
% | 1, 0, -1 |
% | sqrt(2), 0, -sqrt(2) |
% | 1, 0, -1 |
%
% FreiChen:{type},{angle}
%
% Frei-Chen Pre-weighted kernels...
%
% Type 0: default un-nomalized version shown above.
%
% Type 1: Orthogonal Kernel (same as type 11 below)
% | 1, 0, -1 |
% | sqrt(2), 0, -sqrt(2) | / 2*sqrt(2)
% | 1, 0, -1 |
%
% Type 2: Diagonal form of Kernel...
% | 1, sqrt(2), 0 |
% | sqrt(2), 0, -sqrt(2) | / 2*sqrt(2)
% | 0, -sqrt(2) -1 |
%
% However this kernel is als at the heart of the FreiChen Edge Detection
% Process which uses a set of 9 specially weighted kernel. These 9
% kernels not be normalized, but directly applied to the image. The
% results is then added together, to produce the intensity of an edge in
% a specific direction. The square root of the pixel value can then be
% taken as the cosine of the edge, and at least 2 such runs at 90 degrees
% from each other, both the direction and the strength of the edge can be
% determined.
%
% Type 10: All 9 of the following pre-weighted kernels...
%
% Type 11: | 1, 0, -1 |
% | sqrt(2), 0, -sqrt(2) | / 2*sqrt(2)
% | 1, 0, -1 |
%
% Type 12: | 1, sqrt(2), 1 |
% | 0, 0, 0 | / 2*sqrt(2)
% | 1, sqrt(2), 1 |
%
% Type 13: | sqrt(2), -1, 0 |
% | -1, 0, 1 | / 2*sqrt(2)
% | 0, 1, -sqrt(2) |
%
% Type 14: | 0, 1, -sqrt(2) |
% | -1, 0, 1 | / 2*sqrt(2)
% | sqrt(2), -1, 0 |
%
% Type 15: | 0, -1, 0 |
% | 1, 0, 1 | / 2
% | 0, -1, 0 |
%
% Type 16: | 1, 0, -1 |
% | 0, 0, 0 | / 2
% | -1, 0, 1 |
%
% Type 17: | 1, -2, 1 |
% | -2, 4, -2 | / 6
% | -1, -2, 1 |
%
% Type 18: | -2, 1, -2 |
% | 1, 4, 1 | / 6
% | -2, 1, -2 |
%
% Type 19: | 1, 1, 1 |
% | 1, 1, 1 | / 3
% | 1, 1, 1 |
%
% The first 4 are for edge detection, the next 4 are for line detection
% and the last is to add a average component to the results.
%
% Using a special type of '-1' will return all 9 pre-weighted kernels
% as a multi-kernel list, so that you can use them directly (without
% normalization) with the special "-set option:morphology:compose Plus"
% setting to apply the full FreiChen Edge Detection Technique.
%
% If 'type' is large it will be taken to be an actual rotation angle for
% the default FreiChen (type 0) kernel. As such FreiChen:45 will look
% like a Sobel:45 but with 'sqrt(2)' instead of '2' values.
%
% WARNING: The above was layed out as per
% http://www.math.tau.ac.il/~turkel/notes/edge_detectors.pdf
% But rotated 90 degrees so direction is from left rather than the top.
% I have yet to find any secondary confirmation of the above. The only
% other source found was actual source code at
% http://ltswww.epfl.ch/~courstiv/exos_labos/sol3.pdf
% Neigher paper defineds the kernels in a way that looks locical or
% correct when taken as a whole.
%
% Boolean Kernels
%
% Diamond:[{radius}[,{scale}]]
% Generate a diamond shaped kernel with given radius to the points.
% Kernel size will again be radius*2+1 square and defaults to radius 1,
% generating a 3x3 kernel that is slightly larger than a square.
%
% Square:[{radius}[,{scale}]]
% Generate a square shaped kernel of size radius*2+1, and defaulting
% to a 3x3 (radius 1).
%
% Octagon:[{radius}[,{scale}]]
% Generate octagonal shaped kernel of given radius and constant scale.
% Default radius is 3 producing a 7x7 kernel. A radius of 1 will result
% in "Diamond" kernel.
%
% Disk:[{radius}[,{scale}]]
% Generate a binary disk, thresholded at the radius given, the radius
% may be a float-point value. Final Kernel size is floor(radius)*2+1
% square. A radius of 5.3 is the default.
%
% NOTE: That a low radii Disk kernels produce the same results as
% many of the previously defined kernels, but differ greatly at larger
% radii. Here is a table of equivalences...
% "Disk:1" => "Diamond", "Octagon:1", or "Cross:1"
% "Disk:1.5" => "Square"
% "Disk:2" => "Diamond:2"
% "Disk:2.5" => "Octagon"
% "Disk:2.9" => "Square:2"
% "Disk:3.5" => "Octagon:3"
% "Disk:4.5" => "Octagon:4"
% "Disk:5.4" => "Octagon:5"
% "Disk:6.4" => "Octagon:6"
% All other Disk shapes are unique to this kernel, but because a "Disk"
% is more circular when using a larger radius, using a larger radius is
% preferred over iterating the morphological operation.
%
% Rectangle:{geometry}
% Simply generate a rectangle of 1's with the size given. You can also
% specify the location of the 'control point', otherwise the closest
% pixel to the center of the rectangle is selected.
%
% Properly centered and odd sized rectangles work the best.
%
% Symbol Dilation Kernels
%
% These kernel is not a good general morphological kernel, but is used
% more for highlighting and marking any single pixels in an image using,
% a "Dilate" method as appropriate.
%
% For the same reasons iterating these kernels does not produce the
% same result as using a larger radius for the symbol.
%
% Plus:[{radius}[,{scale}]]
% Cross:[{radius}[,{scale}]]
% Generate a kernel in the shape of a 'plus' or a 'cross' with
% a each arm the length of the given radius (default 2).
%
% NOTE: "plus:1" is equivalent to a "Diamond" kernel.
%
% Ring:{radius1},{radius2}[,{scale}]
% A ring of the values given that falls between the two radii.
% Defaults to a ring of approximataly 3 radius in a 7x7 kernel.
% This is the 'edge' pixels of the default "Disk" kernel,
% More specifically, "Ring" -> "Ring:2.5,3.5,1.0"
%
% Hit and Miss Kernels
%
% Peak:radius1,radius2
% Find any peak larger than the pixels the fall between the two radii.
% The default ring of pixels is as per "Ring".
% Edges
% Find flat orthogonal edges of a binary shape
% Corners
% Find 90 degree corners of a binary shape
% Diagonals:type
% A special kernel to thin the 'outside' of diagonals
% LineEnds:type
% Find end points of lines (for pruning a skeletion)
% Two types of lines ends (default to both) can be searched for
% Type 0: All line ends
% Type 1: single kernel for 4-conneected line ends
% Type 2: single kernel for simple line ends
% LineJunctions
% Find three line junctions (within a skeletion)
% Type 0: all line junctions
% Type 1: Y Junction kernel
% Type 2: Diagonal T Junction kernel
% Type 3: Orthogonal T Junction kernel
% Type 4: Diagonal X Junction kernel
% Type 5: Orthogonal + Junction kernel
% Ridges:type
% Find single pixel ridges or thin lines
% Type 1: Fine single pixel thick lines and ridges
% Type 2: Find two pixel thick lines and ridges
% ConvexHull
% Octagonal Thickening Kernel, to generate convex hulls of 45 degrees
% Skeleton:type
% Traditional skeleton generating kernels.
% Type 1: Tradional Skeleton kernel (4 connected skeleton)
% Type 2: HIPR2 Skeleton kernel (8 connected skeleton)
% Type 3: Thinning skeleton based on a ressearch paper by
% Dan S. Bloomberg (Default Type)
% ThinSE:type
% A huge variety of Thinning Kernels designed to preserve conectivity.
% many other kernel sets use these kernels as source definitions.
% Type numbers are 41-49, 81-89, 481, and 482 which are based on
% the super and sub notations used in the source research paper.
%
% Distance Measuring Kernels
%
% Different types of distance measuring methods, which are used with the
% a 'Distance' morphology method for generating a gradient based on
% distance from an edge of a binary shape, though there is a technique
% for handling a anti-aliased shape.
%
% See the 'Distance' Morphological Method, for information of how it is
% applied.
%
% Chebyshev:[{radius}][x{scale}[%!]]
% Chebyshev Distance (also known as Tchebychev or Chessboard distance)
% is a value of one to any neighbour, orthogonal or diagonal. One why
% of thinking of it is the number of squares a 'King' or 'Queen' in
% chess needs to traverse reach any other position on a chess board.
% It results in a 'square' like distance function, but one where
% diagonals are given a value that is closer than expected.
%
% Manhattan:[{radius}][x{scale}[%!]]
% Manhattan Distance (also known as Rectilinear, City Block, or the Taxi
% Cab distance metric), it is the distance needed when you can only
% travel in horizontal or vertical directions only. It is the
% distance a 'Rook' in chess would have to travel, and results in a
% diamond like distances, where diagonals are further than expected.
%
% Octagonal:[{radius}][x{scale}[%!]]
% An interleving of Manhatten and Chebyshev metrics producing an
% increasing octagonally shaped distance. Distances matches those of
% the "Octagon" shaped kernel of the same radius. The minimum radius
% and default is 2, producing a 5x5 kernel.
%
% Euclidean:[{radius}][x{scale}[%!]]
% Euclidean distance is the 'direct' or 'as the crow flys' distance.
% However by default the kernel size only has a radius of 1, which
% limits the distance to 'Knight' like moves, with only orthogonal and
% diagonal measurements being correct. As such for the default kernel
% you will get octagonal like distance function.
%
% However using a larger radius such as "Euclidean:4" you will get a
% much smoother distance gradient from the edge of the shape. Especially
% if the image is pre-processed to include any anti-aliasing pixels.
% Of course a larger kernel is slower to use, and not always needed.
%
% The first three Distance Measuring Kernels will only generate distances
% of exact multiples of {scale} in binary images. As such you can use a
% scale of 1 without loosing any information. However you also need some
% scaling when handling non-binary anti-aliased shapes.
%
% The "Euclidean" Distance Kernel however does generate a non-integer
% fractional results, and as such scaling is vital even for binary shapes.
%
*/
MagickExport KernelInfo *AcquireKernelBuiltIn(const KernelInfoType type,
const GeometryInfo *args,ExceptionInfo *exception)
{
KernelInfo
*kernel;
register ssize_t
i;
register ssize_t
u,
v;
double
nan = sqrt((double)-1.0); /* Special Value : Not A Number */
/* Generate a new empty kernel if needed */
kernel=(KernelInfo *) NULL;
switch(type) {
case UndefinedKernel: /* These should not call this function */
case UserDefinedKernel:
assert("Should not call this function" != (char *) NULL);
break;
case LaplacianKernel: /* Named Descrete Convolution Kernels */
case SobelKernel: /* these are defined using other kernels */
case RobertsKernel:
case PrewittKernel:
case CompassKernel:
case KirschKernel:
case FreiChenKernel:
case EdgesKernel: /* Hit and Miss kernels */
case CornersKernel:
case DiagonalsKernel:
case LineEndsKernel:
case LineJunctionsKernel:
case RidgesKernel:
case ConvexHullKernel:
case SkeletonKernel:
case ThinSEKernel:
break; /* A pre-generated kernel is not needed */
#if 0
/* set to 1 to do a compile-time check that we haven't missed anything */
case UnityKernel:
case GaussianKernel:
case DoGKernel:
case LoGKernel:
case BlurKernel:
case CometKernel:
case BinomialKernel:
case DiamondKernel:
case SquareKernel:
case RectangleKernel:
case OctagonKernel:
case DiskKernel:
case PlusKernel:
case CrossKernel:
case RingKernel:
case PeaksKernel:
case ChebyshevKernel:
case ManhattanKernel:
case OctangonalKernel:
case EuclideanKernel:
#else
default:
#endif
/* Generate the base Kernel Structure */
kernel=(KernelInfo *) AcquireMagickMemory(sizeof(*kernel));
if (kernel == (KernelInfo *) NULL)
return(kernel);
(void) ResetMagickMemory(kernel,0,sizeof(*kernel));
kernel->minimum = kernel->maximum = kernel->angle = 0.0;
kernel->negative_range = kernel->positive_range = 0.0;
kernel->type = type;
kernel->next = (KernelInfo *) NULL;
kernel->signature=MagickCoreSignature;
break;
}
switch(type) {
/*
Convolution Kernels
*/
case UnityKernel:
{
kernel->height = kernel->width = (size_t) 1;
kernel->x = kernel->y = (ssize_t) 0;
kernel->values=(MagickRealType *) MagickAssumeAligned(
AcquireAlignedMemory(1,sizeof(*kernel->values)));
if (kernel->values == (MagickRealType *) NULL)
return(DestroyKernelInfo(kernel));
kernel->maximum = kernel->values[0] = args->rho;
break;
}
break;
case GaussianKernel:
case DoGKernel:
case LoGKernel:
{ double
sigma = fabs(args->sigma),
sigma2 = fabs(args->xi),
A, B, R;
if ( args->rho >= 1.0 )
kernel->width = (size_t)args->rho*2+1;
else if ( (type != DoGKernel) || (sigma >= sigma2) )
kernel->width = GetOptimalKernelWidth2D(args->rho,sigma);
else
kernel->width = GetOptimalKernelWidth2D(args->rho,sigma2);
kernel->height = kernel->width;
kernel->x = kernel->y = (ssize_t) (kernel->width-1)/2;
kernel->values=(MagickRealType *) MagickAssumeAligned(
AcquireAlignedMemory(kernel->width,kernel->height*
sizeof(*kernel->values)));
if (kernel->values == (MagickRealType *) NULL)
return(DestroyKernelInfo(kernel));
/* WARNING: The following generates a 'sampled gaussian' kernel.
* What we really want is a 'discrete gaussian' kernel.
*
* How to do this is I don't know, but appears to be basied on the
* Error Function 'erf()' (intergral of a gaussian)
*/
if ( type == GaussianKernel || type == DoGKernel )
{ /* Calculate a Gaussian, OR positive half of a DoG */
if ( sigma > MagickEpsilon )
{ A = 1.0/(2.0*sigma*sigma); /* simplify loop expressions */
B = (double) (1.0/(Magick2PI*sigma*sigma));
for ( i=0, v=-kernel->y; v <= (ssize_t)kernel->y; v++)
for ( u=-kernel->x; u <= (ssize_t)kernel->x; u++, i++)
kernel->values[i] = exp(-((double)(u*u+v*v))*A)*B;
}
else /* limiting case - a unity (normalized Dirac) kernel */
{ (void) ResetMagickMemory(kernel->values,0, (size_t)
kernel->width*kernel->height*sizeof(*kernel->values));
kernel->values[kernel->x+kernel->y*kernel->width] = 1.0;
}
}
if ( type == DoGKernel )
{ /* Subtract a Negative Gaussian for "Difference of Gaussian" */
if ( sigma2 > MagickEpsilon )
{ sigma = sigma2; /* simplify loop expressions */
A = 1.0/(2.0*sigma*sigma);
B = (double) (1.0/(Magick2PI*sigma*sigma));
for ( i=0, v=-kernel->y; v <= (ssize_t)kernel->y; v++)
for ( u=-kernel->x; u <= (ssize_t)kernel->x; u++, i++)
kernel->values[i] -= exp(-((double)(u*u+v*v))*A)*B;
}
else /* limiting case - a unity (normalized Dirac) kernel */
kernel->values[kernel->x+kernel->y*kernel->width] -= 1.0;
}
if ( type == LoGKernel )
{ /* Calculate a Laplacian of a Gaussian - Or Mexician Hat */
if ( sigma > MagickEpsilon )
{ A = 1.0/(2.0*sigma*sigma); /* simplify loop expressions */
B = (double) (1.0/(MagickPI*sigma*sigma*sigma*sigma));
for ( i=0, v=-kernel->y; v <= (ssize_t)kernel->y; v++)
for ( u=-kernel->x; u <= (ssize_t)kernel->x; u++, i++)
{ R = ((double)(u*u+v*v))*A;
kernel->values[i] = (1-R)*exp(-R)*B;
}
}
else /* special case - generate a unity kernel */
{ (void) ResetMagickMemory(kernel->values,0, (size_t)
kernel->width*kernel->height*sizeof(*kernel->values));
kernel->values[kernel->x+kernel->y*kernel->width] = 1.0;
}
}
/* Note the above kernels may have been 'clipped' by a user defined
** radius, producing a smaller (darker) kernel. Also for very small
** sigma's (> 0.1) the central value becomes larger than one, and thus
** producing a very bright kernel.
**
** Normalization will still be needed.
*/
/* Normalize the 2D Gaussian Kernel
**
** NB: a CorrelateNormalize performs a normal Normalize if
** there are no negative values.
*/
CalcKernelMetaData(kernel); /* the other kernel meta-data */
ScaleKernelInfo(kernel, 1.0, CorrelateNormalizeValue);
break;
}
case BlurKernel:
{ double
sigma = fabs(args->sigma),
alpha, beta;
if ( args->rho >= 1.0 )
kernel->width = (size_t)args->rho*2+1;
else
kernel->width = GetOptimalKernelWidth1D(args->rho,sigma);
kernel->height = 1;
kernel->x = (ssize_t) (kernel->width-1)/2;
kernel->y = 0;
kernel->negative_range = kernel->positive_range = 0.0;
kernel->values=(MagickRealType *) MagickAssumeAligned(
AcquireAlignedMemory(kernel->width,kernel->height*
sizeof(*kernel->values)));
if (kernel->values == (MagickRealType *) NULL)
return(DestroyKernelInfo(kernel));
#if 1
#define KernelRank 3
/* Formula derived from GetBlurKernel() in "effect.c" (plus bug fix).
** It generates a gaussian 3 times the width, and compresses it into
** the expected range. This produces a closer normalization of the
** resulting kernel, especially for very low sigma values.
** As such while wierd it is prefered.
**
** I am told this method originally came from Photoshop.
**
** A properly normalized curve is generated (apart from edge clipping)
** even though we later normalize the result (for edge clipping)
** to allow the correct generation of a "Difference of Blurs".
*/
/* initialize */
v = (ssize_t) (kernel->width*KernelRank-1)/2; /* start/end points to fit range */
(void) ResetMagickMemory(kernel->values,0, (size_t)
kernel->width*kernel->height*sizeof(*kernel->values));
/* Calculate a Positive 1D Gaussian */
if ( sigma > MagickEpsilon )
{ sigma *= KernelRank; /* simplify loop expressions */
alpha = 1.0/(2.0*sigma*sigma);
beta= (double) (1.0/(MagickSQ2PI*sigma ));
for ( u=-v; u <= v; u++) {
kernel->values[(u+v)/KernelRank] +=
exp(-((double)(u*u))*alpha)*beta;
}
}
else /* special case - generate a unity kernel */
kernel->values[kernel->x+kernel->y*kernel->width] = 1.0;
#else
/* Direct calculation without curve averaging
This is equivelent to a KernelRank of 1 */
/* Calculate a Positive Gaussian */
if ( sigma > MagickEpsilon )
{ alpha = 1.0/(2.0*sigma*sigma); /* simplify loop expressions */
beta = 1.0/(MagickSQ2PI*sigma);
for ( i=0, u=-kernel->x; u <= (ssize_t)kernel->x; u++, i++)
kernel->values[i] = exp(-((double)(u*u))*alpha)*beta;
}
else /* special case - generate a unity kernel */
{ (void) ResetMagickMemory(kernel->values,0, (size_t)
kernel->width*kernel->height*sizeof(*kernel->values));
kernel->values[kernel->x+kernel->y*kernel->width] = 1.0;
}
#endif
/* Note the above kernel may have been 'clipped' by a user defined
** radius, producing a smaller (darker) kernel. Also for very small
** sigma's (> 0.1) the central value becomes larger than one, as a
** result of not generating a actual 'discrete' kernel, and thus
** producing a very bright 'impulse'.
**
** Becuase of these two factors Normalization is required!
*/
/* Normalize the 1D Gaussian Kernel
**
** NB: a CorrelateNormalize performs a normal Normalize if
** there are no negative values.
*/
CalcKernelMetaData(kernel); /* the other kernel meta-data */
ScaleKernelInfo(kernel, 1.0, CorrelateNormalizeValue);
/* rotate the 1D kernel by given angle */
RotateKernelInfo(kernel, args->xi );
break;
}
case CometKernel:
{ double
sigma = fabs(args->sigma),
A;
if ( args->rho < 1.0 )
kernel->width = (GetOptimalKernelWidth1D(args->rho,sigma)-1)/2+1;
else
kernel->width = (size_t)args->rho;
kernel->x = kernel->y = 0;
kernel->height = 1;
kernel->negative_range = kernel->positive_range = 0.0;
kernel->values=(MagickRealType *) MagickAssumeAligned(
AcquireAlignedMemory(kernel->width,kernel->height*
sizeof(*kernel->values)));
if (kernel->values == (MagickRealType *) NULL)
return(DestroyKernelInfo(kernel));
/* A comet blur is half a 1D gaussian curve, so that the object is
** blurred in one direction only. This may not be quite the right
** curve to use so may change in the future. The function must be
** normalised after generation, which also resolves any clipping.
**
** As we are normalizing and not subtracting gaussians,
** there is no need for a divisor in the gaussian formula
**
** It is less comples
*/
if ( sigma > MagickEpsilon )
{
#if 1
#define KernelRank 3
v = (ssize_t) kernel->width*KernelRank; /* start/end points */
(void) ResetMagickMemory(kernel->values,0, (size_t)
kernel->width*sizeof(*kernel->values));
sigma *= KernelRank; /* simplify the loop expression */
A = 1.0/(2.0*sigma*sigma);
/* B = 1.0/(MagickSQ2PI*sigma); */
for ( u=0; u < v; u++) {
kernel->values[u/KernelRank] +=
exp(-((double)(u*u))*A);
/* exp(-((double)(i*i))/2.0*sigma*sigma)/(MagickSQ2PI*sigma); */
}
for (i=0; i < (ssize_t) kernel->width; i++)
kernel->positive_range += kernel->values[i];
#else
A = 1.0/(2.0*sigma*sigma); /* simplify the loop expression */
/* B = 1.0/(MagickSQ2PI*sigma); */
for ( i=0; i < (ssize_t) kernel->width; i++)
kernel->positive_range +=
kernel->values[i] = exp(-((double)(i*i))*A);
/* exp(-((double)(i*i))/2.0*sigma*sigma)/(MagickSQ2PI*sigma); */
#endif
}
else /* special case - generate a unity kernel */
{ (void) ResetMagickMemory(kernel->values,0, (size_t)
kernel->width*kernel->height*sizeof(*kernel->values));
kernel->values[kernel->x+kernel->y*kernel->width] = 1.0;
kernel->positive_range = 1.0;
}
kernel->minimum = 0.0;
kernel->maximum = kernel->values[0];
kernel->negative_range = 0.0;
ScaleKernelInfo(kernel, 1.0, NormalizeValue); /* Normalize */
RotateKernelInfo(kernel, args->xi); /* Rotate by angle */
break;
}
case BinomialKernel:
{
size_t
order_f;
if (args->rho < 1.0)
kernel->width = kernel->height = 3; /* default radius = 1 */
else
kernel->width = kernel->height = ((size_t)args->rho)*2+1;
kernel->x = kernel->y = (ssize_t) (kernel->width-1)/2;
order_f = fact(kernel->width-1);
kernel->values=(MagickRealType *) MagickAssumeAligned(
AcquireAlignedMemory(kernel->width,kernel->height*
sizeof(*kernel->values)));
if (kernel->values == (MagickRealType *) NULL)
return(DestroyKernelInfo(kernel));
/* set all kernel values within diamond area to scale given */
for ( i=0, v=0; v < (ssize_t)kernel->height; v++)
{ size_t
alpha = order_f / ( fact((size_t) v) * fact(kernel->height-v-1) );
for ( u=0; u < (ssize_t)kernel->width; u++, i++)
kernel->positive_range += kernel->values[i] = (double)
(alpha * order_f / ( fact((size_t) u) * fact(kernel->height-u-1) ));
}
kernel->minimum = 1.0;
kernel->maximum = kernel->values[kernel->x+kernel->y*kernel->width];
kernel->negative_range = 0.0;
break;
}
/*
Convolution Kernels - Well Known Named Constant Kernels
*/
case LaplacianKernel:
{ switch ( (int) args->rho ) {
case 0:
default: /* laplacian square filter -- default */
kernel=ParseKernelArray("3: -1,-1,-1 -1,8,-1 -1,-1,-1");
break;
case 1: /* laplacian diamond filter */
kernel=ParseKernelArray("3: 0,-1,0 -1,4,-1 0,-1,0");
break;
case 2:
kernel=ParseKernelArray("3: -2,1,-2 1,4,1 -2,1,-2");
break;
case 3:
kernel=ParseKernelArray("3: 1,-2,1 -2,4,-2 1,-2,1");
break;
case 5: /* a 5x5 laplacian */
kernel=ParseKernelArray(
"5: -4,-1,0,-1,-4 -1,2,3,2,-1 0,3,4,3,0 -1,2,3,2,-1 -4,-1,0,-1,-4");
break;
case 7: /* a 7x7 laplacian */
kernel=ParseKernelArray(
"7:-10,-5,-2,-1,-2,-5,-10 -5,0,3,4,3,0,-5 -2,3,6,7,6,3,-2 -1,4,7,8,7,4,-1 -2,3,6,7,6,3,-2 -5,0,3,4,3,0,-5 -10,-5,-2,-1,-2,-5,-10" );
break;
case 15: /* a 5x5 LoG (sigma approx 1.4) */
kernel=ParseKernelArray(
"5: 0,0,-1,0,0 0,-1,-2,-1,0 -1,-2,16,-2,-1 0,-1,-2,-1,0 0,0,-1,0,0");
break;
case 19: /* a 9x9 LoG (sigma approx 1.4) */
/* http://www.cscjournals.org/csc/manuscript/Journals/IJIP/volume3/Issue1/IJIP-15.pdf */
kernel=ParseKernelArray(
"9: 0,-1,-1,-2,-2,-2,-1,-1,0 -1,-2,-4,-5,-5,-5,-4,-2,-1 -1,-4,-5,-3,-0,-3,-5,-4,-1 -2,-5,-3,12,24,12,-3,-5,-2 -2,-5,-0,24,40,24,-0,-5,-2 -2,-5,-3,12,24,12,-3,-5,-2 -1,-4,-5,-3,-0,-3,-5,-4,-1 -1,-2,-4,-5,-5,-5,-4,-2,-1 0,-1,-1,-2,-2,-2,-1,-1,0");
break;
}
if (kernel == (KernelInfo *) NULL)
return(kernel);
kernel->type = type;
break;
}
case SobelKernel:
{ /* Simple Sobel Kernel */
kernel=ParseKernelArray("3: 1,0,-1 2,0,-2 1,0,-1");
if (kernel == (KernelInfo *) NULL)
return(kernel);
kernel->type = type;
RotateKernelInfo(kernel, args->rho);
break;
}
case RobertsKernel:
{
kernel=ParseKernelArray("3: 0,0,0 1,-1,0 0,0,0");
if (kernel == (KernelInfo *) NULL)
return(kernel);
kernel->type = type;
RotateKernelInfo(kernel, args->rho);
break;
}
case PrewittKernel:
{
kernel=ParseKernelArray("3: 1,0,-1 1,0,-1 1,0,-1");
if (kernel == (KernelInfo *) NULL)
return(kernel);
kernel->type = type;
RotateKernelInfo(kernel, args->rho);
break;
}
case CompassKernel:
{
kernel=ParseKernelArray("3: 1,1,-1 1,-2,-1 1,1,-1");
if (kernel == (KernelInfo *) NULL)
return(kernel);
kernel->type = type;
RotateKernelInfo(kernel, args->rho);
break;
}
case KirschKernel:
{
kernel=ParseKernelArray("3: 5,-3,-3 5,0,-3 5,-3,-3");
if (kernel == (KernelInfo *) NULL)
return(kernel);
kernel->type = type;
RotateKernelInfo(kernel, args->rho);
break;
}
case FreiChenKernel:
/* Direction is set to be left to right positive */
/* http://www.math.tau.ac.il/~turkel/notes/edge_detectors.pdf -- RIGHT? */
/* http://ltswww.epfl.ch/~courstiv/exos_labos/sol3.pdf -- WRONG? */
{ switch ( (int) args->rho ) {
default:
case 0:
kernel=ParseKernelArray("3: 1,0,-1 2,0,-2 1,0,-1");
if (kernel == (KernelInfo *) NULL)
return(kernel);
kernel->type = type;
kernel->values[3] = +(MagickRealType) MagickSQ2;
kernel->values[5] = -(MagickRealType) MagickSQ2;
CalcKernelMetaData(kernel); /* recalculate meta-data */
break;
case 2:
kernel=ParseKernelArray("3: 1,2,0 2,0,-2 0,-2,-1");
if (kernel == (KernelInfo *) NULL)
return(kernel);
kernel->type = type;
kernel->values[1] = kernel->values[3]= +(MagickRealType) MagickSQ2;
kernel->values[5] = kernel->values[7]= -(MagickRealType) MagickSQ2;
CalcKernelMetaData(kernel); /* recalculate meta-data */
ScaleKernelInfo(kernel, (double) (1.0/2.0*MagickSQ2), NoValue);
break;
case 10:
{
kernel=AcquireKernelInfo("FreiChen:11;FreiChen:12;FreiChen:13;FreiChen:14;FreiChen:15;FreiChen:16;FreiChen:17;FreiChen:18;FreiChen:19",exception);
if (kernel == (KernelInfo *) NULL)
return(kernel);
break;
}
case 1:
case 11:
kernel=ParseKernelArray("3: 1,0,-1 2,0,-2 1,0,-1");
if (kernel == (KernelInfo *) NULL)
return(kernel);
kernel->type = type;
kernel->values[3] = +(MagickRealType) MagickSQ2;
kernel->values[5] = -(MagickRealType) MagickSQ2;
CalcKernelMetaData(kernel); /* recalculate meta-data */
ScaleKernelInfo(kernel, (double) (1.0/2.0*MagickSQ2), NoValue);
break;
case 12:
kernel=ParseKernelArray("3: 1,2,1 0,0,0 1,2,1");
if (kernel == (KernelInfo *) NULL)
return(kernel);
kernel->type = type;
kernel->values[1] = +(MagickRealType) MagickSQ2;
kernel->values[7] = +(MagickRealType) MagickSQ2;
CalcKernelMetaData(kernel);
ScaleKernelInfo(kernel, (double) (1.0/2.0*MagickSQ2), NoValue);
break;
case 13:
kernel=ParseKernelArray("3: 2,-1,0 -1,0,1 0,1,-2");
if (kernel == (KernelInfo *) NULL)
return(kernel);
kernel->type = type;
kernel->values[0] = +(MagickRealType) MagickSQ2;
kernel->values[8] = -(MagickRealType) MagickSQ2;
CalcKernelMetaData(kernel);
ScaleKernelInfo(kernel, (double) (1.0/2.0*MagickSQ2), NoValue);
break;
case 14:
kernel=ParseKernelArray("3: 0,1,-2 -1,0,1 2,-1,0");
if (kernel == (KernelInfo *) NULL)
return(kernel);
kernel->type = type;
kernel->values[2] = -(MagickRealType) MagickSQ2;
kernel->values[6] = +(MagickRealType) MagickSQ2;
CalcKernelMetaData(kernel);
ScaleKernelInfo(kernel, (double) (1.0/2.0*MagickSQ2), NoValue);
break;
case 15:
kernel=ParseKernelArray("3: 0,-1,0 1,0,1 0,-1,0");
if (kernel == (KernelInfo *) NULL)
return(kernel);
kernel->type = type;
ScaleKernelInfo(kernel, 1.0/2.0, NoValue);
break;
case 16:
kernel=ParseKernelArray("3: 1,0,-1 0,0,0 -1,0,1");
if (kernel == (KernelInfo *) NULL)
return(kernel);
kernel->type = type;
ScaleKernelInfo(kernel, 1.0/2.0, NoValue);
break;
case 17:
kernel=ParseKernelArray("3: 1,-2,1 -2,4,-2 -1,-2,1");
if (kernel == (KernelInfo *) NULL)
return(kernel);
kernel->type = type;
ScaleKernelInfo(kernel, 1.0/6.0, NoValue);
break;
case 18:
kernel=ParseKernelArray("3: -2,1,-2 1,4,1 -2,1,-2");
if (kernel == (KernelInfo *) NULL)
return(kernel);
kernel->type = type;
ScaleKernelInfo(kernel, 1.0/6.0, NoValue);
break;
case 19:
kernel=ParseKernelArray("3: 1,1,1 1,1,1 1,1,1");
if (kernel == (KernelInfo *) NULL)
return(kernel);
kernel->type = type;
ScaleKernelInfo(kernel, 1.0/3.0, NoValue);
break;
}
if ( fabs(args->sigma) >= MagickEpsilon )
/* Rotate by correctly supplied 'angle' */
RotateKernelInfo(kernel, args->sigma);
else if ( args->rho > 30.0 || args->rho < -30.0 )
/* Rotate by out of bounds 'type' */
RotateKernelInfo(kernel, args->rho);
break;
}
/*
Boolean or Shaped Kernels
*/
case DiamondKernel:
{
if (args->rho < 1.0)
kernel->width = kernel->height = 3; /* default radius = 1 */
else
kernel->width = kernel->height = ((size_t)args->rho)*2+1;
kernel->x = kernel->y = (ssize_t) (kernel->width-1)/2;
kernel->values=(MagickRealType *) MagickAssumeAligned(
AcquireAlignedMemory(kernel->width,kernel->height*
sizeof(*kernel->values)));
if (kernel->values == (MagickRealType *) NULL)
return(DestroyKernelInfo(kernel));
/* set all kernel values within diamond area to scale given */
for ( i=0, v=-kernel->y; v <= (ssize_t)kernel->y; v++)
for ( u=-kernel->x; u <= (ssize_t)kernel->x; u++, i++)
if ( (labs((long) u)+labs((long) v)) <= (long) kernel->x)
kernel->positive_range += kernel->values[i] = args->sigma;
else
kernel->values[i] = nan;
kernel->minimum = kernel->maximum = args->sigma; /* a flat shape */
break;
}
case SquareKernel:
case RectangleKernel:
{ double
scale;
if ( type == SquareKernel )
{
if (args->rho < 1.0)
kernel->width = kernel->height = 3; /* default radius = 1 */
else
kernel->width = kernel->height = (size_t) (2*args->rho+1);
kernel->x = kernel->y = (ssize_t) (kernel->width-1)/2;
scale = args->sigma;
}
else {
/* NOTE: user defaults set in "AcquireKernelInfo()" */
if ( args->rho < 1.0 || args->sigma < 1.0 )
return(DestroyKernelInfo(kernel)); /* invalid args given */
kernel->width = (size_t)args->rho;
kernel->height = (size_t)args->sigma;
if ( args->xi < 0.0 || args->xi > (double)kernel->width ||
args->psi < 0.0 || args->psi > (double)kernel->height )
return(DestroyKernelInfo(kernel)); /* invalid args given */
kernel->x = (ssize_t) args->xi;
kernel->y = (ssize_t) args->psi;
scale = 1.0;
}
kernel->values=(MagickRealType *) MagickAssumeAligned(
AcquireAlignedMemory(kernel->width,kernel->height*
sizeof(*kernel->values)));
if (kernel->values == (MagickRealType *) NULL)
return(DestroyKernelInfo(kernel));
/* set all kernel values to scale given */
u=(ssize_t) (kernel->width*kernel->height);
for ( i=0; i < u; i++)
kernel->values[i] = scale;
kernel->minimum = kernel->maximum = scale; /* a flat shape */
kernel->positive_range = scale*u;
break;
}
case OctagonKernel:
{
if (args->rho < 1.0)
kernel->width = kernel->height = 5; /* default radius = 2 */
else
kernel->width = kernel->height = ((size_t)args->rho)*2+1;
kernel->x = kernel->y = (ssize_t) (kernel->width-1)/2;
kernel->values=(MagickRealType *) MagickAssumeAligned(
AcquireAlignedMemory(kernel->width,kernel->height*
sizeof(*kernel->values)));
if (kernel->values == (MagickRealType *) NULL)
return(DestroyKernelInfo(kernel));
for ( i=0, v=-kernel->y; v <= (ssize_t)kernel->y; v++)
for ( u=-kernel->x; u <= (ssize_t)kernel->x; u++, i++)
if ( (labs((long) u)+labs((long) v)) <=
((long)kernel->x + (long)(kernel->x/2)) )
kernel->positive_range += kernel->values[i] = args->sigma;
else
kernel->values[i] = nan;
kernel->minimum = kernel->maximum = args->sigma; /* a flat shape */
break;
}
case DiskKernel:
{
ssize_t
limit = (ssize_t)(args->rho*args->rho);
if (args->rho < 0.4) /* default radius approx 4.3 */
kernel->width = kernel->height = 9L, limit = 18L;
else
kernel->width = kernel->height = (size_t)fabs(args->rho)*2+1;
kernel->x = kernel->y = (ssize_t) (kernel->width-1)/2;
kernel->values=(MagickRealType *) MagickAssumeAligned(
AcquireAlignedMemory(kernel->width,kernel->height*
sizeof(*kernel->values)));
if (kernel->values == (MagickRealType *) NULL)
return(DestroyKernelInfo(kernel));
for ( i=0, v=-kernel->y; v <= (ssize_t)kernel->y; v++)
for ( u=-kernel->x; u <= (ssize_t)kernel->x; u++, i++)
if ((u*u+v*v) <= limit)
kernel->positive_range += kernel->values[i] = args->sigma;
else
kernel->values[i] = nan;
kernel->minimum = kernel->maximum = args->sigma; /* a flat shape */
break;
}
case PlusKernel:
{
if (args->rho < 1.0)
kernel->width = kernel->height = 5; /* default radius 2 */
else
kernel->width = kernel->height = ((size_t)args->rho)*2+1;
kernel->x = kernel->y = (ssize_t) (kernel->width-1)/2;
kernel->values=(MagickRealType *) MagickAssumeAligned(
AcquireAlignedMemory(kernel->width,kernel->height*
sizeof(*kernel->values)));
if (kernel->values == (MagickRealType *) NULL)
return(DestroyKernelInfo(kernel));
/* set all kernel values along axises to given scale */
for ( i=0, v=-kernel->y; v <= (ssize_t)kernel->y; v++)
for ( u=-kernel->x; u <= (ssize_t)kernel->x; u++, i++)
kernel->values[i] = (u == 0 || v == 0) ? args->sigma : nan;
kernel->minimum = kernel->maximum = args->sigma; /* a flat shape */
kernel->positive_range = args->sigma*(kernel->width*2.0 - 1.0);
break;
}
case CrossKernel:
{
if (args->rho < 1.0)
kernel->width = kernel->height = 5; /* default radius 2 */
else
kernel->width = kernel->height = ((size_t)args->rho)*2+1;
kernel->x = kernel->y = (ssize_t) (kernel->width-1)/2;
kernel->values=(MagickRealType *) MagickAssumeAligned(
AcquireAlignedMemory(kernel->width,kernel->height*
sizeof(*kernel->values)));
if (kernel->values == (MagickRealType *) NULL)
return(DestroyKernelInfo(kernel));
/* set all kernel values along axises to given scale */
for ( i=0, v=-kernel->y; v <= (ssize_t)kernel->y; v++)
for ( u=-kernel->x; u <= (ssize_t)kernel->x; u++, i++)
kernel->values[i] = (u == v || u == -v) ? args->sigma : nan;
kernel->minimum = kernel->maximum = args->sigma; /* a flat shape */
kernel->positive_range = args->sigma*(kernel->width*2.0 - 1.0);
break;
}
/*
HitAndMiss Kernels
*/
case RingKernel:
case PeaksKernel:
{
ssize_t
limit1,
limit2,
scale;
if (args->rho < args->sigma)
{
kernel->width = ((size_t)args->sigma)*2+1;
limit1 = (ssize_t)(args->rho*args->rho);
limit2 = (ssize_t)(args->sigma*args->sigma);
}
else
{
kernel->width = ((size_t)args->rho)*2+1;
limit1 = (ssize_t)(args->sigma*args->sigma);
limit2 = (ssize_t)(args->rho*args->rho);
}
if ( limit2 <= 0 )
kernel->width = 7L, limit1 = 7L, limit2 = 11L;
kernel->height = kernel->width;
kernel->x = kernel->y = (ssize_t) (kernel->width-1)/2;
kernel->values=(MagickRealType *) MagickAssumeAligned(
AcquireAlignedMemory(kernel->width,kernel->height*
sizeof(*kernel->values)));
if (kernel->values == (MagickRealType *) NULL)
return(DestroyKernelInfo(kernel));
/* set a ring of points of 'scale' ( 0.0 for PeaksKernel ) */
scale = (ssize_t) (( type == PeaksKernel) ? 0.0 : args->xi);
for ( i=0, v= -kernel->y; v <= (ssize_t)kernel->y; v++)
for ( u=-kernel->x; u <= (ssize_t)kernel->x; u++, i++)
{ ssize_t radius=u*u+v*v;
if (limit1 < radius && radius <= limit2)
kernel->positive_range += kernel->values[i] = (double) scale;
else
kernel->values[i] = nan;
}
kernel->minimum = kernel->maximum = (double) scale;
if ( type == PeaksKernel ) {
/* set the central point in the middle */
kernel->values[kernel->x+kernel->y*kernel->width] = 1.0;
kernel->positive_range = 1.0;
kernel->maximum = 1.0;
}
break;
}
case EdgesKernel:
{
kernel=AcquireKernelInfo("ThinSE:482",exception);
if (kernel == (KernelInfo *) NULL)
return(kernel);
kernel->type = type;
ExpandMirrorKernelInfo(kernel); /* mirror expansion of kernels */
break;
}
case CornersKernel:
{
kernel=AcquireKernelInfo("ThinSE:87",exception);
if (kernel == (KernelInfo *) NULL)
return(kernel);
kernel->type = type;
ExpandRotateKernelInfo(kernel, 90.0); /* Expand 90 degree rotations */
break;
}
case DiagonalsKernel:
{
switch ( (int) args->rho ) {
case 0:
default:
{ KernelInfo
*new_kernel;
kernel=ParseKernelArray("3: 0,0,0 0,-,1 1,1,-");
if (kernel == (KernelInfo *) NULL)
return(kernel);
kernel->type = type;
new_kernel=ParseKernelArray("3: 0,0,1 0,-,1 0,1,-");
if (new_kernel == (KernelInfo *) NULL)
return(DestroyKernelInfo(kernel));
new_kernel->type = type;
LastKernelInfo(kernel)->next = new_kernel;
ExpandMirrorKernelInfo(kernel);
return(kernel);
}
case 1:
kernel=ParseKernelArray("3: 0,0,0 0,-,1 1,1,-");
break;
case 2:
kernel=ParseKernelArray("3: 0,0,1 0,-,1 0,1,-");
break;
}
if (kernel == (KernelInfo *) NULL)
return(kernel);
kernel->type = type;
RotateKernelInfo(kernel, args->sigma);
break;
}
case LineEndsKernel:
{ /* Kernels for finding the end of thin lines */
switch ( (int) args->rho ) {
case 0:
default:
/* set of kernels to find all end of lines */
return(AcquireKernelInfo("LineEnds:1>;LineEnds:2>",exception));
case 1:
/* kernel for 4-connected line ends - no rotation */
kernel=ParseKernelArray("3: 0,0,- 0,1,1 0,0,-");
break;
case 2:
/* kernel to add for 8-connected lines - no rotation */
kernel=ParseKernelArray("3: 0,0,0 0,1,0 0,0,1");
break;
case 3:
/* kernel to add for orthogonal line ends - does not find corners */
kernel=ParseKernelArray("3: 0,0,0 0,1,1 0,0,0");
break;
case 4:
/* traditional line end - fails on last T end */
kernel=ParseKernelArray("3: 0,0,0 0,1,- 0,0,-");
break;
}
if (kernel == (KernelInfo *) NULL)
return(kernel);
kernel->type = type;
RotateKernelInfo(kernel, args->sigma);
break;
}
case LineJunctionsKernel:
{ /* kernels for finding the junctions of multiple lines */
switch ( (int) args->rho ) {
case 0:
default:
/* set of kernels to find all line junctions */
return(AcquireKernelInfo("LineJunctions:1@;LineJunctions:2>",exception));
case 1:
/* Y Junction */
kernel=ParseKernelArray("3: 1,-,1 -,1,- -,1,-");
break;
case 2:
/* Diagonal T Junctions */
kernel=ParseKernelArray("3: 1,-,- -,1,- 1,-,1");
break;
case 3:
/* Orthogonal T Junctions */
kernel=ParseKernelArray("3: -,-,- 1,1,1 -,1,-");
break;
case 4:
/* Diagonal X Junctions */
kernel=ParseKernelArray("3: 1,-,1 -,1,- 1,-,1");
break;
case 5:
/* Orthogonal X Junctions - minimal diamond kernel */
kernel=ParseKernelArray("3: -,1,- 1,1,1 -,1,-");
break;
}
if (kernel == (KernelInfo *) NULL)
return(kernel);
kernel->type = type;
RotateKernelInfo(kernel, args->sigma);
break;
}
case RidgesKernel:
{ /* Ridges - Ridge finding kernels */
KernelInfo
*new_kernel;
switch ( (int) args->rho ) {
case 1:
default:
kernel=ParseKernelArray("3x1:0,1,0");
if (kernel == (KernelInfo *) NULL)
return(kernel);
kernel->type = type;
ExpandRotateKernelInfo(kernel, 90.0); /* 2 rotated kernels (symmetrical) */
break;
case 2:
kernel=ParseKernelArray("4x1:0,1,1,0");
if (kernel == (KernelInfo *) NULL)
return(kernel);
kernel->type = type;
ExpandRotateKernelInfo(kernel, 90.0); /* 4 rotated kernels */
/* Kernels to find a stepped 'thick' line, 4 rotates + mirrors */
/* Unfortunatally we can not yet rotate a non-square kernel */
/* But then we can't flip a non-symetrical kernel either */
new_kernel=ParseKernelArray("4x3+1+1:0,1,1,- -,1,1,- -,1,1,0");
if (new_kernel == (KernelInfo *) NULL)
return(DestroyKernelInfo(kernel));
new_kernel->type = type;
LastKernelInfo(kernel)->next = new_kernel;
new_kernel=ParseKernelArray("4x3+2+1:0,1,1,- -,1,1,- -,1,1,0");
if (new_kernel == (KernelInfo *) NULL)
return(DestroyKernelInfo(kernel));
new_kernel->type = type;
LastKernelInfo(kernel)->next = new_kernel;
new_kernel=ParseKernelArray("4x3+1+1:-,1,1,0 -,1,1,- 0,1,1,-");
if (new_kernel == (KernelInfo *) NULL)
return(DestroyKernelInfo(kernel));
new_kernel->type = type;
LastKernelInfo(kernel)->next = new_kernel;
new_kernel=ParseKernelArray("4x3+2+1:-,1,1,0 -,1,1,- 0,1,1,-");
if (new_kernel == (KernelInfo *) NULL)
return(DestroyKernelInfo(kernel));
new_kernel->type = type;
LastKernelInfo(kernel)->next = new_kernel;
new_kernel=ParseKernelArray("3x4+1+1:0,-,- 1,1,1 1,1,1 -,-,0");
if (new_kernel == (KernelInfo *) NULL)
return(DestroyKernelInfo(kernel));
new_kernel->type = type;
LastKernelInfo(kernel)->next = new_kernel;
new_kernel=ParseKernelArray("3x4+1+2:0,-,- 1,1,1 1,1,1 -,-,0");
if (new_kernel == (KernelInfo *) NULL)
return(DestroyKernelInfo(kernel));
new_kernel->type = type;
LastKernelInfo(kernel)->next = new_kernel;
new_kernel=ParseKernelArray("3x4+1+1:-,-,0 1,1,1 1,1,1 0,-,-");
if (new_kernel == (KernelInfo *) NULL)
return(DestroyKernelInfo(kernel));
new_kernel->type = type;
LastKernelInfo(kernel)->next = new_kernel;
new_kernel=ParseKernelArray("3x4+1+2:-,-,0 1,1,1 1,1,1 0,-,-");
if (new_kernel == (KernelInfo *) NULL)
return(DestroyKernelInfo(kernel));
new_kernel->type = type;
LastKernelInfo(kernel)->next = new_kernel;
break;
}
break;
}
case ConvexHullKernel:
{
KernelInfo
*new_kernel;
/* first set of 8 kernels */
kernel=ParseKernelArray("3: 1,1,- 1,0,- 1,-,0");
if (kernel == (KernelInfo *) NULL)
return(kernel);
kernel->type = type;
ExpandRotateKernelInfo(kernel, 90.0);
/* append the mirror versions too - no flip function yet */
new_kernel=ParseKernelArray("3: 1,1,1 1,0,- -,-,0");
if (new_kernel == (KernelInfo *) NULL)
return(DestroyKernelInfo(kernel));
new_kernel->type = type;
ExpandRotateKernelInfo(new_kernel, 90.0);
LastKernelInfo(kernel)->next = new_kernel;
break;
}
case SkeletonKernel:
{
switch ( (int) args->rho ) {
case 1:
default:
/* Traditional Skeleton...
** A cyclically rotated single kernel
*/
kernel=AcquireKernelInfo("ThinSE:482",exception);
if (kernel == (KernelInfo *) NULL)
return(kernel);
kernel->type = type;
ExpandRotateKernelInfo(kernel, 45.0); /* 8 rotations */
break;
case 2:
/* HIPR Variation of the cyclic skeleton
** Corners of the traditional method made more forgiving,
** but the retain the same cyclic order.
*/
kernel=AcquireKernelInfo("ThinSE:482; ThinSE:87x90;",exception);
if (kernel == (KernelInfo *) NULL)
return(kernel);
if (kernel->next == (KernelInfo *) NULL)
return(DestroyKernelInfo(kernel));
kernel->type = type;
kernel->next->type = type;
ExpandRotateKernelInfo(kernel, 90.0); /* 4 rotations of the 2 kernels */
break;
case 3:
/* Dan Bloomberg Skeleton, from his paper on 3x3 thinning SE's
** "Connectivity-Preserving Morphological Image Thransformations"
** by Dan S. Bloomberg, available on Leptonica, Selected Papers,
** http://www.leptonica.com/papers/conn.pdf
*/
kernel=AcquireKernelInfo("ThinSE:41; ThinSE:42; ThinSE:43",
exception);
if (kernel == (KernelInfo *) NULL)
return(kernel);
kernel->type = type;
kernel->next->type = type;
kernel->next->next->type = type;
ExpandMirrorKernelInfo(kernel); /* 12 kernels total */
break;
}
break;
}
case ThinSEKernel:
{ /* Special kernels for general thinning, while preserving connections
** "Connectivity-Preserving Morphological Image Thransformations"
** by Dan S. Bloomberg, available on Leptonica, Selected Papers,
** http://www.leptonica.com/papers/conn.pdf
** And
** http://tpgit.github.com/Leptonica/ccthin_8c_source.html
**
** Note kernels do not specify the origin pixel, allowing them
** to be used for both thickening and thinning operations.
*/
switch ( (int) args->rho ) {
/* SE for 4-connected thinning */
case 41: /* SE_4_1 */
kernel=ParseKernelArray("3: -,-,1 0,-,1 -,-,1");
break;
case 42: /* SE_4_2 */
kernel=ParseKernelArray("3: -,-,1 0,-,1 -,0,-");
break;
case 43: /* SE_4_3 */
kernel=ParseKernelArray("3: -,0,- 0,-,1 -,-,1");
break;
case 44: /* SE_4_4 */
kernel=ParseKernelArray("3: -,0,- 0,-,1 -,0,-");
break;
case 45: /* SE_4_5 */
kernel=ParseKernelArray("3: -,0,1 0,-,1 -,0,-");
break;
case 46: /* SE_4_6 */
kernel=ParseKernelArray("3: -,0,- 0,-,1 -,0,1");
break;
case 47: /* SE_4_7 */
kernel=ParseKernelArray("3: -,1,1 0,-,1 -,0,-");
break;
case 48: /* SE_4_8 */
kernel=ParseKernelArray("3: -,-,1 0,-,1 0,-,1");
break;
case 49: /* SE_4_9 */
kernel=ParseKernelArray("3: 0,-,1 0,-,1 -,-,1");
break;
/* SE for 8-connected thinning - negatives of the above */
case 81: /* SE_8_0 */
kernel=ParseKernelArray("3: -,1,- 0,-,1 -,1,-");
break;
case 82: /* SE_8_2 */
kernel=ParseKernelArray("3: -,1,- 0,-,1 0,-,-");
break;
case 83: /* SE_8_3 */
kernel=ParseKernelArray("3: 0,-,- 0,-,1 -,1,-");
break;
case 84: /* SE_8_4 */
kernel=ParseKernelArray("3: 0,-,- 0,-,1 0,-,-");
break;
case 85: /* SE_8_5 */
kernel=ParseKernelArray("3: 0,-,1 0,-,1 0,-,-");
break;
case 86: /* SE_8_6 */
kernel=ParseKernelArray("3: 0,-,- 0,-,1 0,-,1");
break;
case 87: /* SE_8_7 */
kernel=ParseKernelArray("3: -,1,- 0,-,1 0,0,-");
break;
case 88: /* SE_8_8 */
kernel=ParseKernelArray("3: -,1,- 0,-,1 0,1,-");
break;
case 89: /* SE_8_9 */
kernel=ParseKernelArray("3: 0,1,- 0,-,1 -,1,-");
break;
/* Special combined SE kernels */
case 423: /* SE_4_2 , SE_4_3 Combined Kernel */
kernel=ParseKernelArray("3: -,-,1 0,-,- -,0,-");
break;
case 823: /* SE_8_2 , SE_8_3 Combined Kernel */
kernel=ParseKernelArray("3: -,1,- -,-,1 0,-,-");
break;
case 481: /* SE_48_1 - General Connected Corner Kernel */
kernel=ParseKernelArray("3: -,1,1 0,-,1 0,0,-");
break;
default:
case 482: /* SE_48_2 - General Edge Kernel */
kernel=ParseKernelArray("3: 0,-,1 0,-,1 0,-,1");
break;
}
if (kernel == (KernelInfo *) NULL)
return(kernel);
kernel->type = type;
RotateKernelInfo(kernel, args->sigma);
break;
}
/*
Distance Measuring Kernels
*/
case ChebyshevKernel:
{
if (args->rho < 1.0)
kernel->width = kernel->height = 3; /* default radius = 1 */
else
kernel->width = kernel->height = ((size_t)args->rho)*2+1;
kernel->x = kernel->y = (ssize_t) (kernel->width-1)/2;
kernel->values=(MagickRealType *) MagickAssumeAligned(
AcquireAlignedMemory(kernel->width,kernel->height*
sizeof(*kernel->values)));
if (kernel->values == (MagickRealType *) NULL)
return(DestroyKernelInfo(kernel));
for ( i=0, v=-kernel->y; v <= (ssize_t)kernel->y; v++)
for ( u=-kernel->x; u <= (ssize_t)kernel->x; u++, i++)
kernel->positive_range += ( kernel->values[i] =
args->sigma*MagickMax(fabs((double)u),fabs((double)v)) );
kernel->maximum = kernel->values[0];
break;
}
case ManhattanKernel:
{
if (args->rho < 1.0)
kernel->width = kernel->height = 3; /* default radius = 1 */
else
kernel->width = kernel->height = ((size_t)args->rho)*2+1;
kernel->x = kernel->y = (ssize_t) (kernel->width-1)/2;
kernel->values=(MagickRealType *) MagickAssumeAligned(
AcquireAlignedMemory(kernel->width,kernel->height*
sizeof(*kernel->values)));
if (kernel->values == (MagickRealType *) NULL)
return(DestroyKernelInfo(kernel));
for ( i=0, v=-kernel->y; v <= (ssize_t)kernel->y; v++)
for ( u=-kernel->x; u <= (ssize_t)kernel->x; u++, i++)
kernel->positive_range += ( kernel->values[i] =
args->sigma*(labs((long) u)+labs((long) v)) );
kernel->maximum = kernel->values[0];
break;
}
case OctagonalKernel:
{
if (args->rho < 2.0)
kernel->width = kernel->height = 5; /* default/minimum radius = 2 */
else
kernel->width = kernel->height = ((size_t)args->rho)*2+1;
kernel->x = kernel->y = (ssize_t) (kernel->width-1)/2;
kernel->values=(MagickRealType *) MagickAssumeAligned(
AcquireAlignedMemory(kernel->width,kernel->height*
sizeof(*kernel->values)));
if (kernel->values == (MagickRealType *) NULL)
return(DestroyKernelInfo(kernel));
for ( i=0, v=-kernel->y; v <= (ssize_t)kernel->y; v++)
for ( u=-kernel->x; u <= (ssize_t)kernel->x; u++, i++)
{
double
r1 = MagickMax(fabs((double)u),fabs((double)v)),
r2 = floor((double)(labs((long)u)+labs((long)v)+1)/1.5);
kernel->positive_range += kernel->values[i] =
args->sigma*MagickMax(r1,r2);
}
kernel->maximum = kernel->values[0];
break;
}
case EuclideanKernel:
{
if (args->rho < 1.0)
kernel->width = kernel->height = 3; /* default radius = 1 */
else
kernel->width = kernel->height = ((size_t)args->rho)*2+1;
kernel->x = kernel->y = (ssize_t) (kernel->width-1)/2;
kernel->values=(MagickRealType *) MagickAssumeAligned(
AcquireAlignedMemory(kernel->width,kernel->height*
sizeof(*kernel->values)));
if (kernel->values == (MagickRealType *) NULL)
return(DestroyKernelInfo(kernel));
for ( i=0, v=-kernel->y; v <= (ssize_t)kernel->y; v++)
for ( u=-kernel->x; u <= (ssize_t)kernel->x; u++, i++)
kernel->positive_range += ( kernel->values[i] =
args->sigma*sqrt((double)(u*u+v*v)) );
kernel->maximum = kernel->values[0];
break;
}
default:
{
/* No-Op Kernel - Basically just a single pixel on its own */
kernel=ParseKernelArray("1:1");
if (kernel == (KernelInfo *) NULL)
return(kernel);
kernel->type = UndefinedKernel;
break;
}
break;
}
return(kernel);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% C l o n e K e r n e l I n f o %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% CloneKernelInfo() creates a new clone of the given Kernel List so that its
% can be modified without effecting the original. The cloned kernel should
% be destroyed using DestoryKernelInfo() when no longer needed.
%
% The format of the CloneKernelInfo method is:
%
% KernelInfo *CloneKernelInfo(const KernelInfo *kernel)
%
% A description of each parameter follows:
%
% o kernel: the Morphology/Convolution kernel to be cloned
%
*/
MagickExport KernelInfo *CloneKernelInfo(const KernelInfo *kernel)
{
register ssize_t
i;
KernelInfo
*new_kernel;
assert(kernel != (KernelInfo *) NULL);
new_kernel=(KernelInfo *) AcquireMagickMemory(sizeof(*kernel));
if (new_kernel == (KernelInfo *) NULL)
return(new_kernel);
*new_kernel=(*kernel); /* copy values in structure */
/* replace the values with a copy of the values */
new_kernel->values=(MagickRealType *) MagickAssumeAligned(
AcquireAlignedMemory(kernel->width,kernel->height*sizeof(*kernel->values)));
if (new_kernel->values == (MagickRealType *) NULL)
return(DestroyKernelInfo(new_kernel));
for (i=0; i < (ssize_t) (kernel->width*kernel->height); i++)
new_kernel->values[i]=kernel->values[i];
/* Also clone the next kernel in the kernel list */
if ( kernel->next != (KernelInfo *) NULL ) {
new_kernel->next = CloneKernelInfo(kernel->next);
if ( new_kernel->next == (KernelInfo *) NULL )
return(DestroyKernelInfo(new_kernel));
}
return(new_kernel);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% D e s t r o y K e r n e l I n f o %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% DestroyKernelInfo() frees the memory used by a Convolution/Morphology
% kernel.
%
% The format of the DestroyKernelInfo method is:
%
% KernelInfo *DestroyKernelInfo(KernelInfo *kernel)
%
% A description of each parameter follows:
%
% o kernel: the Morphology/Convolution kernel to be destroyed
%
*/
MagickExport KernelInfo *DestroyKernelInfo(KernelInfo *kernel)
{
assert(kernel != (KernelInfo *) NULL);
if (kernel->next != (KernelInfo *) NULL)
kernel->next=DestroyKernelInfo(kernel->next);
kernel->values=(MagickRealType *) RelinquishAlignedMemory(kernel->values);
kernel=(KernelInfo *) RelinquishMagickMemory(kernel);
return(kernel);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ E x p a n d M i r r o r K e r n e l I n f o %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ExpandMirrorKernelInfo() takes a single kernel, and expands it into a
% sequence of 90-degree rotated kernels but providing a reflected 180
% rotatation, before the -/+ 90-degree rotations.
%
% This special rotation order produces a better, more symetrical thinning of
% objects.
%
% The format of the ExpandMirrorKernelInfo method is:
%
% void ExpandMirrorKernelInfo(KernelInfo *kernel)
%
% A description of each parameter follows:
%
% o kernel: the Morphology/Convolution kernel
%
% This function is only internel to this module, as it is not finalized,
% especially with regard to non-orthogonal angles, and rotation of larger
% 2D kernels.
*/
#if 0
static void FlopKernelInfo(KernelInfo *kernel)
{ /* Do a Flop by reversing each row. */
size_t
y;
register ssize_t
x,r;
register double
*k,t;
for ( y=0, k=kernel->values; y < kernel->height; y++, k+=kernel->width)
for ( x=0, r=kernel->width-1; x<kernel->width/2; x++, r--)
t=k[x], k[x]=k[r], k[r]=t;
kernel->x = kernel->width - kernel->x - 1;
angle = fmod(angle+180.0, 360.0);
}
#endif
static void ExpandMirrorKernelInfo(KernelInfo *kernel)
{
KernelInfo
*clone,
*last;
last = kernel;
clone = CloneKernelInfo(last);
RotateKernelInfo(clone, 180); /* flip */
LastKernelInfo(last)->next = clone;
last = clone;
clone = CloneKernelInfo(last);
RotateKernelInfo(clone, 90); /* transpose */
LastKernelInfo(last)->next = clone;
last = clone;
clone = CloneKernelInfo(last);
RotateKernelInfo(clone, 180); /* flop */
LastKernelInfo(last)->next = clone;
return;
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ E x p a n d R o t a t e K e r n e l I n f o %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ExpandRotateKernelInfo() takes a kernel list, and expands it by rotating
% incrementally by the angle given, until the kernel repeats.
%
% WARNING: 45 degree rotations only works for 3x3 kernels.
% While 90 degree roatations only works for linear and square kernels
%
% The format of the ExpandRotateKernelInfo method is:
%
% void ExpandRotateKernelInfo(KernelInfo *kernel, double angle)
%
% A description of each parameter follows:
%
% o kernel: the Morphology/Convolution kernel
%
% o angle: angle to rotate in degrees
%
% This function is only internel to this module, as it is not finalized,
% especially with regard to non-orthogonal angles, and rotation of larger
% 2D kernels.
*/
/* Internal Routine - Return true if two kernels are the same */
static MagickBooleanType SameKernelInfo(const KernelInfo *kernel1,
const KernelInfo *kernel2)
{
register size_t
i;
/* check size and origin location */
if ( kernel1->width != kernel2->width
|| kernel1->height != kernel2->height
|| kernel1->x != kernel2->x
|| kernel1->y != kernel2->y )
return MagickFalse;
/* check actual kernel values */
for (i=0; i < (kernel1->width*kernel1->height); i++) {
/* Test for Nan equivalence */
if ( IsNaN(kernel1->values[i]) && !IsNaN(kernel2->values[i]) )
return MagickFalse;
if ( IsNaN(kernel2->values[i]) && !IsNaN(kernel1->values[i]) )
return MagickFalse;
/* Test actual values are equivalent */
if ( fabs(kernel1->values[i] - kernel2->values[i]) >= MagickEpsilon )
return MagickFalse;
}
return MagickTrue;
}
static void ExpandRotateKernelInfo(KernelInfo *kernel, const double angle)
{
KernelInfo
*clone,
*last;
last = kernel;
DisableMSCWarning(4127)
while(1) {
RestoreMSCWarning
clone = CloneKernelInfo(last);
RotateKernelInfo(clone, angle);
if ( SameKernelInfo(kernel, clone) != MagickFalse )
break;
LastKernelInfo(last)->next = clone;
last = clone;
}
clone = DestroyKernelInfo(clone); /* kernel has repeated - junk the clone */
return;
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ C a l c M e t a K e r n a l I n f o %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% CalcKernelMetaData() recalculate the KernelInfo meta-data of this kernel only,
% using the kernel values. This should only ne used if it is not possible to
% calculate that meta-data in some easier way.
%
% It is important that the meta-data is correct before ScaleKernelInfo() is
% used to perform kernel normalization.
%
% The format of the CalcKernelMetaData method is:
%
% void CalcKernelMetaData(KernelInfo *kernel, const double scale )
%
% A description of each parameter follows:
%
% o kernel: the Morphology/Convolution kernel to modify
%
% WARNING: Minimum and Maximum values are assumed to include zero, even if
% zero is not part of the kernel (as in Gaussian Derived kernels). This
% however is not true for flat-shaped morphological kernels.
%
% WARNING: Only the specific kernel pointed to is modified, not a list of
% multiple kernels.
%
% This is an internal function and not expected to be useful outside this
% module. This could change however.
*/
static void CalcKernelMetaData(KernelInfo *kernel)
{
register size_t
i;
kernel->minimum = kernel->maximum = 0.0;
kernel->negative_range = kernel->positive_range = 0.0;
for (i=0; i < (kernel->width*kernel->height); i++)
{
if ( fabs(kernel->values[i]) < MagickEpsilon )
kernel->values[i] = 0.0;
( kernel->values[i] < 0)
? ( kernel->negative_range += kernel->values[i] )
: ( kernel->positive_range += kernel->values[i] );
Minimize(kernel->minimum, kernel->values[i]);
Maximize(kernel->maximum, kernel->values[i]);
}
return;
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% M o r p h o l o g y A p p l y %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% MorphologyApply() applies a morphological method, multiple times using
% a list of multiple kernels. This is the method that should be called by
% other 'operators' that internally use morphology operations as part of
% their processing.
%
% It is basically equivalent to as MorphologyImage() (see below) but without
% any user controls. This allows internel programs to use this method to
% perform a specific task without possible interference by any API user
% supplied settings.
%
% It is MorphologyImage() task to extract any such user controls, and
% pass them to this function for processing.
%
% More specifically all given kernels should already be scaled, normalised,
% and blended appropriatally before being parred to this routine. The
% appropriate bias, and compose (typically 'UndefinedComposeOp') given.
%
% The format of the MorphologyApply method is:
%
% Image *MorphologyApply(const Image *image,MorphologyMethod method,
% const ssize_t iterations,const KernelInfo *kernel,
% const CompositeMethod compose,const double bias,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the source image
%
% o method: the morphology method to be applied.
%
% o iterations: apply the operation this many times (or no change).
% A value of -1 means loop until no change found.
% How this is applied may depend on the morphology method.
% Typically this is a value of 1.
%
% o channel: the channel type.
%
% o kernel: An array of double representing the morphology kernel.
%
% o compose: How to handle or merge multi-kernel results.
% If 'UndefinedCompositeOp' use default for the Morphology method.
% If 'NoCompositeOp' force image to be re-iterated by each kernel.
% Otherwise merge the results using the compose method given.
%
% o bias: Convolution Output Bias.
%
% o exception: return any errors or warnings in this structure.
%
*/
static ssize_t MorphologyPrimitive(const Image *image,Image *morphology_image,
const MorphologyMethod method,const KernelInfo *kernel,const double bias,
ExceptionInfo *exception)
{
#define MorphologyTag "Morphology/Image"
CacheView
*image_view,
*morphology_view;
OffsetInfo
offset;
register ssize_t
j,
y;
size_t
*changes,
changed,
width;
MagickBooleanType
status;
MagickOffsetType
progress;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
assert(morphology_image != (Image *) NULL);
assert(morphology_image->signature == MagickCoreSignature);
assert(kernel != (KernelInfo *) NULL);
assert(kernel->signature == MagickCoreSignature);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
status=MagickTrue;
progress=0;
image_view=AcquireVirtualCacheView(image,exception);
morphology_view=AcquireAuthenticCacheView(morphology_image,exception);
width=image->columns+kernel->width-1;
offset.x=0;
offset.y=0;
switch (method)
{
case ConvolveMorphology:
case DilateMorphology:
case DilateIntensityMorphology:
case IterativeDistanceMorphology:
{
/*
Kernel needs to used with reflection about origin.
*/
offset.x=(ssize_t) kernel->width-kernel->x-1;
offset.y=(ssize_t) kernel->height-kernel->y-1;
break;
}
case ErodeMorphology:
case ErodeIntensityMorphology:
case HitAndMissMorphology:
case ThinningMorphology:
case ThickenMorphology:
{
offset.x=kernel->x;
offset.y=kernel->y;
break;
}
default:
{
assert("Not a Primitive Morphology Method" != (char *) NULL);
break;
}
}
changed=0;
changes=(size_t *) AcquireQuantumMemory(GetOpenMPMaximumThreads(),
sizeof(*changes));
if (changes == (size_t *) NULL)
ThrowFatalException(ResourceLimitFatalError,"MemoryAllocationFailed");
for (j=0; j < (ssize_t) GetOpenMPMaximumThreads(); j++)
changes[j]=0;
if ((method == ConvolveMorphology) && (kernel->width == 1))
{
register ssize_t
x;
/*
Special handling (for speed) of vertical (blur) kernels. This performs
its handling in columns rather than in rows. This is only done
for convolve as it is the only method that generates very large 1-D
vertical kernels (such as a 'BlurKernel')
*/
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static,4) shared(progress,status) \
magick_threads(image,morphology_image,image->columns,1)
#endif
for (x=0; x < (ssize_t) image->columns; x++)
{
const int
id = GetOpenMPThreadId();
register const Quantum
*magick_restrict p;
register Quantum
*magick_restrict q;
register ssize_t
r;
ssize_t
center;
if (status == MagickFalse)
continue;
p=GetCacheViewVirtualPixels(image_view,x,-offset.y,1,image->rows+
kernel->height-1,exception);
q=GetCacheViewAuthenticPixels(morphology_view,x,0,1,
morphology_image->rows,exception);
if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL))
{
status=MagickFalse;
continue;
}
center=(ssize_t) GetPixelChannels(image)*offset.y;
for (r=0; r < (ssize_t) image->rows; r++)
{
register ssize_t
i;
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
double
alpha,
gamma,
pixel;
PixelChannel
channel;
PixelTrait
morphology_traits,
traits;
register const MagickRealType
*magick_restrict k;
register const Quantum
*magick_restrict pixels;
register ssize_t
v;
size_t
count;
channel=GetPixelChannelChannel(image,i);
traits=GetPixelChannelTraits(image,channel);
morphology_traits=GetPixelChannelTraits(morphology_image,channel);
if ((traits == UndefinedPixelTrait) ||
(morphology_traits == UndefinedPixelTrait))
continue;
if (((traits & CopyPixelTrait) != 0) ||
(GetPixelWriteMask(image,p+center) <= (QuantumRange/2)))
{
SetPixelChannel(morphology_image,channel,p[center+i],q);
continue;
}
k=(&kernel->values[kernel->height-1]);
pixels=p;
pixel=bias;
gamma=0.0;
count=0;
if ((morphology_traits & BlendPixelTrait) == 0)
for (v=0; v < (ssize_t) kernel->height; v++)
{
if (!IsNaN(*k))
{
pixel+=(*k)*pixels[i];
gamma+=(*k);
count++;
}
k--;
pixels+=GetPixelChannels(image);
}
else
for (v=0; v < (ssize_t) kernel->height; v++)
{
if (!IsNaN(*k))
{
alpha=(double) (QuantumScale*GetPixelAlpha(image,pixels));
pixel+=alpha*(*k)*pixels[i];
gamma+=alpha*(*k);
count++;
}
k--;
pixels+=GetPixelChannels(image);
}
if (fabs(pixel-p[center+i]) > MagickEpsilon)
changes[id]++;
gamma=PerceptibleReciprocal(gamma);
if (count != 0)
gamma*=(double) kernel->height/count;
SetPixelChannel(morphology_image,channel,ClampToQuantum(gamma*
pixel),q);
}
p+=GetPixelChannels(image);
q+=GetPixelChannels(morphology_image);
}
if (SyncCacheViewAuthenticPixels(morphology_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp critical (MagickCore_MorphologyPrimitive)
#endif
proceed=SetImageProgress(image,MorphologyTag,progress++,
image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
morphology_image->type=image->type;
morphology_view=DestroyCacheView(morphology_view);
image_view=DestroyCacheView(image_view);
for (j=0; j < (ssize_t) GetOpenMPMaximumThreads(); j++)
changed+=changes[j];
changes=(size_t *) RelinquishMagickMemory(changes);
return(status ? (ssize_t) changed : 0);
}
/*
Normal handling of horizontal or rectangular kernels (row by row).
*/
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static,4) shared(progress,status) \
magick_threads(image,morphology_image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
const int
id = GetOpenMPThreadId();
register const Quantum
*magick_restrict p;
register Quantum
*magick_restrict q;
register ssize_t
x;
ssize_t
center;
if (status == MagickFalse)
continue;
p=GetCacheViewVirtualPixels(image_view,-offset.x,y-offset.y,width,
kernel->height,exception);
q=GetCacheViewAuthenticPixels(morphology_view,0,y,morphology_image->columns,
1,exception);
if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL))
{
status=MagickFalse;
continue;
}
center=(ssize_t) (GetPixelChannels(image)*width*offset.y+
GetPixelChannels(image)*offset.x);
for (x=0; x < (ssize_t) image->columns; x++)
{
register ssize_t
i;
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
double
alpha,
gamma,
intensity,
maximum,
minimum,
pixel;
PixelChannel
channel;
PixelTrait
morphology_traits,
traits;
register const MagickRealType
*magick_restrict k;
register const Quantum
*magick_restrict pixels;
register ssize_t
u;
size_t
count;
ssize_t
v;
channel=GetPixelChannelChannel(image,i);
traits=GetPixelChannelTraits(image,channel);
morphology_traits=GetPixelChannelTraits(morphology_image,channel);
if ((traits == UndefinedPixelTrait) ||
(morphology_traits == UndefinedPixelTrait))
continue;
if (((traits & CopyPixelTrait) != 0) ||
(GetPixelWriteMask(image,p+center) <= (QuantumRange/2)))
{
SetPixelChannel(morphology_image,channel,p[center+i],q);
continue;
}
pixels=p;
maximum=0.0;
minimum=(double) QuantumRange;
count=kernel->width*kernel->height;
switch (method)
{
case ConvolveMorphology: pixel=bias; break;
case DilateMorphology:
case ErodeIntensityMorphology:
{
pixel=0.0;
break;
}
default:
{
pixel=(double) p[center+i];
break;
}
}
gamma=1.0;
switch (method)
{
case ConvolveMorphology:
{
/*
Weighted Average of pixels using reflected kernel
For correct working of this operation for asymetrical kernels,
the kernel needs to be applied in its reflected form. That is
its values needs to be reversed.
Correlation is actually the same as this but without reflecting
the kernel, and thus 'lower-level' that Convolution. However as
Convolution is the more common method used, and it does not
really cost us much in terms of processing to use a reflected
kernel, so it is Convolution that is implemented.
Correlation will have its kernel reflected before calling this
function to do a Convolve.
For more details of Correlation vs Convolution see
http://www.cs.umd.edu/~djacobs/CMSC426/Convolution.pdf
*/
k=(&kernel->values[kernel->width*kernel->height-1]);
count=0;
if ((morphology_traits & BlendPixelTrait) == 0)
{
/*
No alpha blending.
*/
for (v=0; v < (ssize_t) kernel->height; v++)
{
for (u=0; u < (ssize_t) kernel->width; u++)
{
if (!IsNaN(*k))
{
pixel+=(*k)*pixels[i];
count++;
}
k--;
pixels+=GetPixelChannels(image);
}
pixels+=(image->columns-1)*GetPixelChannels(image);
}
break;
}
/*
Alpha blending.
*/
gamma=0.0;
for (v=0; v < (ssize_t) kernel->height; v++)
{
for (u=0; u < (ssize_t) kernel->width; u++)
{
if (!IsNaN(*k))
{
alpha=(double) (QuantumScale*GetPixelAlpha(image,pixels));
pixel+=alpha*(*k)*pixels[i];
gamma+=alpha*(*k);
count++;
}
k--;
pixels+=GetPixelChannels(image);
}
pixels+=(image->columns-1)*GetPixelChannels(image);
}
break;
}
case ErodeMorphology:
{
/*
Minimum value within kernel neighbourhood.
The kernel is not reflected for this operation. In normal
Greyscale Morphology, the kernel value should be added
to the real value, this is currently not done, due to the
nature of the boolean kernels being used.
*/
k=kernel->values;
for (v=0; v < (ssize_t) kernel->height; v++)
{
for (u=0; u < (ssize_t) kernel->width; u++)
{
if (!IsNaN(*k) && (*k >= 0.5))
{
if ((double) pixels[i] < pixel)
pixel=(double) pixels[i];
}
k++;
pixels+=GetPixelChannels(image);
}
pixels+=(image->columns-1)*GetPixelChannels(image);
}
break;
}
case DilateMorphology:
{
/*
Maximum value within kernel neighbourhood.
For correct working of this operation for asymetrical kernels,
the kernel needs to be applied in its reflected form. That is
its values needs to be reversed.
In normal Greyscale Morphology, the kernel value should be
added to the real value, this is currently not done, due to the
nature of the boolean kernels being used.
*/
count=0;
k=(&kernel->values[kernel->width*kernel->height-1]);
for (v=0; v < (ssize_t) kernel->height; v++)
{
for (u=0; u < (ssize_t) kernel->width; u++)
{
if (!IsNaN(*k) && (*k > 0.5))
{
if ((double) pixels[i] > pixel)
pixel=(double) pixels[i];
}
k--;
pixels+=GetPixelChannels(image);
}
pixels+=(image->columns-1)*GetPixelChannels(image);
}
break;
}
case HitAndMissMorphology:
case ThinningMorphology:
case ThickenMorphology:
{
/*
Minimum of foreground pixel minus maxumum of background pixels.
The kernel is not reflected for this operation, and consists
of both foreground and background pixel neighbourhoods, 0.0 for
background, and 1.0 for foreground with either Nan or 0.5 values
for don't care.
This never produces a meaningless negative result. Such results
cause Thinning/Thicken to not work correctly when used against a
greyscale image.
*/
count=0;
k=kernel->values;
for (v=0; v < (ssize_t) kernel->height; v++)
{
for (u=0; u < (ssize_t) kernel->width; u++)
{
if (!IsNaN(*k))
{
if (*k > 0.7)
{
if ((double) pixels[i] < pixel)
pixel=(double) pixels[i];
}
else
if (*k < 0.3)
{
if ((double) pixels[i] > maximum)
maximum=(double) pixels[i];
}
count++;
}
k++;
pixels+=GetPixelChannels(image);
}
pixels+=(image->columns-1)*GetPixelChannels(image);
}
pixel-=maximum;
if (pixel < 0.0)
pixel=0.0;
if (method == ThinningMorphology)
pixel=(double) p[center+i]-pixel;
else
if (method == ThickenMorphology)
pixel+=(double) p[center+i]+pixel;
break;
}
case ErodeIntensityMorphology:
{
/*
Select pixel with minimum intensity within kernel neighbourhood.
The kernel is not reflected for this operation.
*/
count=0;
k=kernel->values;
for (v=0; v < (ssize_t) kernel->height; v++)
{
for (u=0; u < (ssize_t) kernel->width; u++)
{
if (!IsNaN(*k) && (*k >= 0.5))
{
intensity=(double) GetPixelIntensity(image,pixels);
if (intensity < minimum)
{
pixel=(double) pixels[i];
minimum=intensity;
}
count++;
}
k++;
pixels+=GetPixelChannels(image);
}
pixels+=(image->columns-1)*GetPixelChannels(image);
}
break;
}
case DilateIntensityMorphology:
{
/*
Select pixel with maximum intensity within kernel neighbourhood.
The kernel is not reflected for this operation.
*/
count=0;
k=(&kernel->values[kernel->width*kernel->height-1]);
for (v=0; v < (ssize_t) kernel->height; v++)
{
for (u=0; u < (ssize_t) kernel->width; u++)
{
if (!IsNaN(*k) && (*k >= 0.5))
{
intensity=(double) GetPixelIntensity(image,pixels);
if (intensity > maximum)
{
pixel=(double) pixels[i];
maximum=intensity;
}
count++;
}
k--;
pixels+=GetPixelChannels(image);
}
pixels+=(image->columns-1)*GetPixelChannels(image);
}
break;
}
case IterativeDistanceMorphology:
{
/*
Compute th iterative distance from black edge of a white image
shape. Essentually white values are decreased to the smallest
'distance from edge' it can find.
It works by adding kernel values to the neighbourhood, and and
select the minimum value found. The kernel is rotated before
use, so kernel distances match resulting distances, when a user
provided asymmetric kernel is applied.
This code is nearly identical to True GrayScale Morphology but
not quite.
GreyDilate Kernel values added, maximum value found Kernel is
rotated before use.
GrayErode: Kernel values subtracted and minimum value found No
kernel rotation used.
Note the the Iterative Distance method is essentially a
GrayErode, but with negative kernel values, and kernel rotation
applied.
*/
count=0;
k=(&kernel->values[kernel->width*kernel->height-1]);
for (v=0; v < (ssize_t) kernel->height; v++)
{
for (u=0; u < (ssize_t) kernel->width; u++)
{
if (!IsNaN(*k))
{
if ((pixels[i]+(*k)) < pixel)
pixel=(double) pixels[i]+(*k);
count++;
}
k--;
pixels+=GetPixelChannels(image);
}
pixels+=(image->columns-1)*GetPixelChannels(image);
}
break;
}
case UndefinedMorphology:
default:
break;
}
if (fabs(pixel-p[center+i]) > MagickEpsilon)
changes[id]++;
gamma=PerceptibleReciprocal(gamma);
if (count != 0)
gamma*=(double) kernel->height*kernel->width/count;
SetPixelChannel(morphology_image,channel,ClampToQuantum(gamma*pixel),q);
}
p+=GetPixelChannels(image);
q+=GetPixelChannels(morphology_image);
}
if (SyncCacheViewAuthenticPixels(morphology_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp critical (MagickCore_MorphologyPrimitive)
#endif
proceed=SetImageProgress(image,MorphologyTag,progress++,image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
morphology_view=DestroyCacheView(morphology_view);
image_view=DestroyCacheView(image_view);
for (j=0; j < (ssize_t) GetOpenMPMaximumThreads(); j++)
changed+=changes[j];
changes=(size_t *) RelinquishMagickMemory(changes);
return(status ? (ssize_t) changed : -1);
}
/*
This is almost identical to the MorphologyPrimative() function above, but
applies the primitive directly to the actual image using two passes, once in
each direction, with the results of the previous (and current) row being
re-used.
That is after each row is 'Sync'ed' into the image, the next row makes use of
those values as part of the calculation of the next row. It repeats, but
going in the oppisite (bottom-up) direction.
Because of this 're-use of results' this function can not make use of multi-
threaded, parellel processing.
*/
static ssize_t MorphologyPrimitiveDirect(Image *image,
const MorphologyMethod method,const KernelInfo *kernel,
ExceptionInfo *exception)
{
CacheView
*morphology_view,
*image_view;
MagickBooleanType
status;
MagickOffsetType
progress;
OffsetInfo
offset;
size_t
width,
changed;
ssize_t
y;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
assert(kernel != (KernelInfo *) NULL);
assert(kernel->signature == MagickCoreSignature);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
status=MagickTrue;
changed=0;
progress=0;
switch(method)
{
case DistanceMorphology:
case VoronoiMorphology:
{
/*
Kernel reflected about origin.
*/
offset.x=(ssize_t) kernel->width-kernel->x-1;
offset.y=(ssize_t) kernel->height-kernel->y-1;
break;
}
default:
{
offset.x=kernel->x;
offset.y=kernel->y;
break;
}
}
/*
Two views into same image, do not thread.
*/
image_view=AcquireVirtualCacheView(image,exception);
morphology_view=AcquireAuthenticCacheView(image,exception);
width=image->columns+kernel->width-1;
for (y=0; y < (ssize_t) image->rows; y++)
{
register const Quantum
*magick_restrict p;
register Quantum
*magick_restrict q;
register ssize_t
x;
ssize_t
center;
/*
Read virtual pixels, and authentic pixels, from the same image! We read
using virtual to get virtual pixel handling, but write back into the same
image.
Only top half of kernel is processed as we do a single pass downward
through the image iterating the distance function as we go.
*/
if (status == MagickFalse)
continue;
p=GetCacheViewVirtualPixels(image_view,-offset.x,y-offset.y,width,(size_t)
offset.y+1,exception);
q=GetCacheViewAuthenticPixels(morphology_view,0,y,image->columns,1,
exception);
if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL))
{
status=MagickFalse;
continue;
}
center=(ssize_t) (GetPixelChannels(image)*width*offset.y+
GetPixelChannels(image)*offset.x);
for (x=0; x < (ssize_t) image->columns; x++)
{
register ssize_t
i;
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
double
pixel;
PixelTrait
traits;
register const MagickRealType
*magick_restrict k;
register const Quantum
*magick_restrict pixels;
register ssize_t
u;
ssize_t
v;
traits=GetPixelChannelTraits(image,(PixelChannel) i);
if (traits == UndefinedPixelTrait)
continue;
if (((traits & CopyPixelTrait) != 0) ||
(GetPixelWriteMask(image,p+center) <= (QuantumRange/2)))
continue;
pixels=p;
pixel=(double) QuantumRange;
switch (method)
{
case DistanceMorphology:
{
k=(&kernel->values[kernel->width*kernel->height-1]);
for (v=0; v <= offset.y; v++)
{
for (u=0; u < (ssize_t) kernel->width; u++)
{
if (!IsNaN(*k))
{
if ((pixels[i]+(*k)) < pixel)
pixel=(double) pixels[i]+(*k);
}
k--;
pixels+=GetPixelChannels(image);
}
pixels+=(image->columns-1)*GetPixelChannels(image);
}
k=(&kernel->values[kernel->width*(kernel->y+1)-1]);
pixels=q-offset.x*GetPixelChannels(image);
for (u=0; u < offset.x; u++)
{
if (!IsNaN(*k) && ((x+u-offset.x) >= 0))
{
if ((pixels[i]+(*k)) < pixel)
pixel=(double) pixels[i]+(*k);
}
k--;
pixels+=GetPixelChannels(image);
}
break;
}
case VoronoiMorphology:
{
k=(&kernel->values[kernel->width*kernel->height-1]);
for (v=0; v < offset.y; v++)
{
for (u=0; u < (ssize_t) kernel->width; u++)
{
if (!IsNaN(*k))
{
if ((pixels[i]+(*k)) < pixel)
pixel=(double) pixels[i]+(*k);
}
k--;
pixels+=GetPixelChannels(image);
}
pixels+=(image->columns-1)*GetPixelChannels(image);
}
k=(&kernel->values[kernel->width*(kernel->y+1)-1]);
pixels=q-offset.x*GetPixelChannels(image);
for (u=0; u < offset.x; u++)
{
if (!IsNaN(*k) && ((x+u-offset.x) >= 0))
{
if ((pixels[i]+(*k)) < pixel)
pixel=(double) pixels[i]+(*k);
}
k--;
pixels+=GetPixelChannels(image);
}
break;
}
default:
break;
}
if (fabs(pixel-q[i]) > MagickEpsilon)
changed++;
q[i]=ClampToQuantum(pixel);
}
p+=GetPixelChannels(image);
q+=GetPixelChannels(image);
}
if (SyncCacheViewAuthenticPixels(morphology_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
proceed=SetImageProgress(image,MorphologyTag,progress++,2*image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
morphology_view=DestroyCacheView(morphology_view);
image_view=DestroyCacheView(image_view);
/*
Do the reverse pass through the image.
*/
image_view=AcquireVirtualCacheView(image,exception);
morphology_view=AcquireAuthenticCacheView(image,exception);
for (y=(ssize_t) image->rows-1; y >= 0; y--)
{
register const Quantum
*magick_restrict p;
register Quantum
*magick_restrict q;
register ssize_t
x;
ssize_t
center;
/*
Read virtual pixels, and authentic pixels, from the same image. We
read using virtual to get virtual pixel handling, but write back
into the same image.
Only the bottom half of the kernel is processed as we up the image.
*/
if (status == MagickFalse)
continue;
p=GetCacheViewVirtualPixels(image_view,-offset.x,y,width,(size_t)
kernel->y+1,exception);
q=GetCacheViewAuthenticPixels(morphology_view,0,y,image->columns,1,
exception);
if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL))
{
status=MagickFalse;
continue;
}
p+=(image->columns-1)*GetPixelChannels(image);
q+=(image->columns-1)*GetPixelChannels(image);
center=(ssize_t) (offset.x*GetPixelChannels(image));
for (x=(ssize_t) image->columns-1; x >= 0; x--)
{
register ssize_t
i;
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
double
pixel;
PixelTrait
traits;
register const MagickRealType
*magick_restrict k;
register const Quantum
*magick_restrict pixels;
register ssize_t
u;
ssize_t
v;
traits=GetPixelChannelTraits(image,(PixelChannel) i);
if (traits == UndefinedPixelTrait)
continue;
if (((traits & CopyPixelTrait) != 0) ||
(GetPixelWriteMask(image,p+center) <= (QuantumRange/2)))
continue;
pixels=p;
pixel=(double) QuantumRange;
switch (method)
{
case DistanceMorphology:
{
k=(&kernel->values[kernel->width*(kernel->y+1)-1]);
for (v=offset.y; v < (ssize_t) kernel->height; v++)
{
for (u=0; u < (ssize_t) kernel->width; u++)
{
if (!IsNaN(*k))
{
if ((pixels[i]+(*k)) < pixel)
pixel=(double) pixels[i]+(*k);
}
k--;
pixels+=GetPixelChannels(image);
}
pixels+=(image->columns-1)*GetPixelChannels(image);
}
k=(&kernel->values[kernel->width*kernel->y+kernel->x-1]);
pixels=q;
for (u=offset.x+1; u < (ssize_t) kernel->width; u++)
{
pixels+=GetPixelChannels(image);
if (!IsNaN(*k) && ((x+u-offset.x) < (ssize_t) image->columns))
{
if ((pixels[i]+(*k)) < pixel)
pixel=(double) pixels[i]+(*k);
}
k--;
}
break;
}
case VoronoiMorphology:
{
k=(&kernel->values[kernel->width*(kernel->y+1)-1]);
for (v=offset.y; v < (ssize_t) kernel->height; v++)
{
for (u=0; u < (ssize_t) kernel->width; u++)
{
if (!IsNaN(*k))
{
if ((pixels[i]+(*k)) < pixel)
pixel=(double) pixels[i]+(*k);
}
k--;
pixels+=GetPixelChannels(image);
}
pixels+=(image->columns-1)*GetPixelChannels(image);
}
k=(&kernel->values[kernel->width*(kernel->y+1)-1]);
pixels=q;
for (u=offset.x+1; u < (ssize_t) kernel->width; u++)
{
pixels+=GetPixelChannels(image);
if (!IsNaN(*k) && ((x+u-offset.x) < (ssize_t) image->columns))
{
if ((pixels[i]+(*k)) < pixel)
pixel=(double) pixels[i]+(*k);
}
k--;
}
break;
}
default:
break;
}
if (fabs(pixel-q[i]) > MagickEpsilon)
changed++;
q[i]=ClampToQuantum(pixel);
}
p-=GetPixelChannels(image);
q-=GetPixelChannels(image);
}
if (SyncCacheViewAuthenticPixels(morphology_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
proceed=SetImageProgress(image,MorphologyTag,progress++,2*image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
morphology_view=DestroyCacheView(morphology_view);
image_view=DestroyCacheView(image_view);
return(status ? (ssize_t) changed : -1);
}
/*
Apply a Morphology by calling one of the above low level primitive
application functions. This function handles any iteration loops,
composition or re-iteration of results, and compound morphology methods that
is based on multiple low-level (staged) morphology methods.
Basically this provides the complex glue between the requested morphology
method and raw low-level implementation (above).
*/
MagickPrivate Image *MorphologyApply(const Image *image,
const MorphologyMethod method, const ssize_t iterations,
const KernelInfo *kernel, const CompositeOperator compose,const double bias,
ExceptionInfo *exception)
{
CompositeOperator
curr_compose;
Image
*curr_image, /* Image we are working with or iterating */
*work_image, /* secondary image for primitive iteration */
*save_image, /* saved image - for 'edge' method only */
*rslt_image; /* resultant image - after multi-kernel handling */
KernelInfo
*reflected_kernel, /* A reflected copy of the kernel (if needed) */
*norm_kernel, /* the current normal un-reflected kernel */
*rflt_kernel, /* the current reflected kernel (if needed) */
*this_kernel; /* the kernel being applied */
MorphologyMethod
primitive; /* the current morphology primitive being applied */
CompositeOperator
rslt_compose; /* multi-kernel compose method for results to use */
MagickBooleanType
special, /* do we use a direct modify function? */
verbose; /* verbose output of results */
size_t
method_loop, /* Loop 1: number of compound method iterations (norm 1) */
method_limit, /* maximum number of compound method iterations */
kernel_number, /* Loop 2: the kernel number being applied */
stage_loop, /* Loop 3: primitive loop for compound morphology */
stage_limit, /* how many primitives are in this compound */
kernel_loop, /* Loop 4: iterate the kernel over image */
kernel_limit, /* number of times to iterate kernel */
count, /* total count of primitive steps applied */
kernel_changed, /* total count of changed using iterated kernel */
method_changed; /* total count of changed over method iteration */
ssize_t
changed; /* number pixels changed by last primitive operation */
char
v_info[MagickPathExtent];
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
assert(kernel != (KernelInfo *) NULL);
assert(kernel->signature == MagickCoreSignature);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
count = 0; /* number of low-level morphology primitives performed */
if ( iterations == 0 )
return((Image *) NULL); /* null operation - nothing to do! */
kernel_limit = (size_t) iterations;
if ( iterations < 0 ) /* negative interations = infinite (well alomst) */
kernel_limit = image->columns>image->rows ? image->columns : image->rows;
verbose = IsStringTrue(GetImageArtifact(image,"debug"));
/* initialise for cleanup */
curr_image = (Image *) image;
curr_compose = image->compose;
(void) curr_compose;
work_image = save_image = rslt_image = (Image *) NULL;
reflected_kernel = (KernelInfo *) NULL;
/* Initialize specific methods
* + which loop should use the given iteratations
* + how many primitives make up the compound morphology
* + multi-kernel compose method to use (by default)
*/
method_limit = 1; /* just do method once, unless otherwise set */
stage_limit = 1; /* assume method is not a compound */
special = MagickFalse; /* assume it is NOT a direct modify primitive */
rslt_compose = compose; /* and we are composing multi-kernels as given */
switch( method ) {
case SmoothMorphology: /* 4 primitive compound morphology */
stage_limit = 4;
break;
case OpenMorphology: /* 2 primitive compound morphology */
case OpenIntensityMorphology:
case TopHatMorphology:
case CloseMorphology:
case CloseIntensityMorphology:
case BottomHatMorphology:
case EdgeMorphology:
stage_limit = 2;
break;
case HitAndMissMorphology:
rslt_compose = LightenCompositeOp; /* Union of multi-kernel results */
/* FALL THUR */
case ThinningMorphology:
case ThickenMorphology:
method_limit = kernel_limit; /* iterate the whole method */
kernel_limit = 1; /* do not do kernel iteration */
break;
case DistanceMorphology:
case VoronoiMorphology:
special = MagickTrue; /* use special direct primative */
break;
default:
break;
}
/* Apply special methods with special requirments
** For example, single run only, or post-processing requirements
*/
if ( special != MagickFalse )
{
rslt_image=CloneImage(image,0,0,MagickTrue,exception);
if (rslt_image == (Image *) NULL)
goto error_cleanup;
if (SetImageStorageClass(rslt_image,DirectClass,exception) == MagickFalse)
goto error_cleanup;
changed=MorphologyPrimitiveDirect(rslt_image,method,kernel,exception);
if (verbose != MagickFalse)
(void) (void) FormatLocaleFile(stderr,
"%s:%.20g.%.20g #%.20g => Changed %.20g\n",
CommandOptionToMnemonic(MagickMorphologyOptions, method),
1.0,0.0,1.0, (double) changed);
if ( changed < 0 )
goto error_cleanup;
if ( method == VoronoiMorphology ) {
/* Preserve the alpha channel of input image - but turned it off */
(void) SetImageAlphaChannel(rslt_image, DeactivateAlphaChannel,
exception);
(void) CompositeImage(rslt_image,image,CopyAlphaCompositeOp,
MagickTrue,0,0,exception);
(void) SetImageAlphaChannel(rslt_image, DeactivateAlphaChannel,
exception);
}
goto exit_cleanup;
}
/* Handle user (caller) specified multi-kernel composition method */
if ( compose != UndefinedCompositeOp )
rslt_compose = compose; /* override default composition for method */
if ( rslt_compose == UndefinedCompositeOp )
rslt_compose = NoCompositeOp; /* still not defined! Then re-iterate */
/* Some methods require a reflected kernel to use with primitives.
* Create the reflected kernel for those methods. */
switch ( method ) {
case CorrelateMorphology:
case CloseMorphology:
case CloseIntensityMorphology:
case BottomHatMorphology:
case SmoothMorphology:
reflected_kernel = CloneKernelInfo(kernel);
if (reflected_kernel == (KernelInfo *) NULL)
goto error_cleanup;
RotateKernelInfo(reflected_kernel,180);
break;
default:
break;
}
/* Loops around more primitive morpholgy methods
** erose, dilate, open, close, smooth, edge, etc...
*/
/* Loop 1: iterate the compound method */
method_loop = 0;
method_changed = 1;
while ( method_loop < method_limit && method_changed > 0 ) {
method_loop++;
method_changed = 0;
/* Loop 2: iterate over each kernel in a multi-kernel list */
norm_kernel = (KernelInfo *) kernel;
this_kernel = (KernelInfo *) kernel;
rflt_kernel = reflected_kernel;
kernel_number = 0;
while ( norm_kernel != NULL ) {
/* Loop 3: Compound Morphology Staging - Select Primative to apply */
stage_loop = 0; /* the compound morphology stage number */
while ( stage_loop < stage_limit ) {
stage_loop++; /* The stage of the compound morphology */
/* Select primitive morphology for this stage of compound method */
this_kernel = norm_kernel; /* default use unreflected kernel */
primitive = method; /* Assume method is a primitive */
switch( method ) {
case ErodeMorphology: /* just erode */
case EdgeInMorphology: /* erode and image difference */
primitive = ErodeMorphology;
break;
case DilateMorphology: /* just dilate */
case EdgeOutMorphology: /* dilate and image difference */
primitive = DilateMorphology;
break;
case OpenMorphology: /* erode then dialate */
case TopHatMorphology: /* open and image difference */
primitive = ErodeMorphology;
if ( stage_loop == 2 )
primitive = DilateMorphology;
break;
case OpenIntensityMorphology:
primitive = ErodeIntensityMorphology;
if ( stage_loop == 2 )
primitive = DilateIntensityMorphology;
break;
case CloseMorphology: /* dilate, then erode */
case BottomHatMorphology: /* close and image difference */
this_kernel = rflt_kernel; /* use the reflected kernel */
primitive = DilateMorphology;
if ( stage_loop == 2 )
primitive = ErodeMorphology;
break;
case CloseIntensityMorphology:
this_kernel = rflt_kernel; /* use the reflected kernel */
primitive = DilateIntensityMorphology;
if ( stage_loop == 2 )
primitive = ErodeIntensityMorphology;
break;
case SmoothMorphology: /* open, close */
switch ( stage_loop ) {
case 1: /* start an open method, which starts with Erode */
primitive = ErodeMorphology;
break;
case 2: /* now Dilate the Erode */
primitive = DilateMorphology;
break;
case 3: /* Reflect kernel a close */
this_kernel = rflt_kernel; /* use the reflected kernel */
primitive = DilateMorphology;
break;
case 4: /* Finish the Close */
this_kernel = rflt_kernel; /* use the reflected kernel */
primitive = ErodeMorphology;
break;
}
break;
case EdgeMorphology: /* dilate and erode difference */
primitive = DilateMorphology;
if ( stage_loop == 2 ) {
save_image = curr_image; /* save the image difference */
curr_image = (Image *) image;
primitive = ErodeMorphology;
}
break;
case CorrelateMorphology:
/* A Correlation is a Convolution with a reflected kernel.
** However a Convolution is a weighted sum using a reflected
** kernel. It may seem stange to convert a Correlation into a
** Convolution as the Correlation is the simplier method, but
** Convolution is much more commonly used, and it makes sense to
** implement it directly so as to avoid the need to duplicate the
** kernel when it is not required (which is typically the
** default).
*/
this_kernel = rflt_kernel; /* use the reflected kernel */
primitive = ConvolveMorphology;
break;
default:
break;
}
assert( this_kernel != (KernelInfo *) NULL );
/* Extra information for debugging compound operations */
if (verbose != MagickFalse) {
if ( stage_limit > 1 )
(void) FormatLocaleString(v_info,MagickPathExtent,"%s:%.20g.%.20g -> ",
CommandOptionToMnemonic(MagickMorphologyOptions,method),(double)
method_loop,(double) stage_loop);
else if ( primitive != method )
(void) FormatLocaleString(v_info, MagickPathExtent, "%s:%.20g -> ",
CommandOptionToMnemonic(MagickMorphologyOptions, method),(double)
method_loop);
else
v_info[0] = '\0';
}
/* Loop 4: Iterate the kernel with primitive */
kernel_loop = 0;
kernel_changed = 0;
changed = 1;
while ( kernel_loop < kernel_limit && changed > 0 ) {
kernel_loop++; /* the iteration of this kernel */
/* Create a clone as the destination image, if not yet defined */
if ( work_image == (Image *) NULL )
{
work_image=CloneImage(image,0,0,MagickTrue,exception);
if (work_image == (Image *) NULL)
goto error_cleanup;
if (SetImageStorageClass(work_image,DirectClass,exception) == MagickFalse)
goto error_cleanup;
}
/* APPLY THE MORPHOLOGICAL PRIMITIVE (curr -> work) */
count++;
changed = MorphologyPrimitive(curr_image, work_image, primitive,
this_kernel, bias, exception);
if (verbose != MagickFalse) {
if ( kernel_loop > 1 )
(void) FormatLocaleFile(stderr, "\n"); /* add end-of-line from previous */
(void) (void) FormatLocaleFile(stderr,
"%s%s%s:%.20g.%.20g #%.20g => Changed %.20g",
v_info,CommandOptionToMnemonic(MagickMorphologyOptions,
primitive),(this_kernel == rflt_kernel ) ? "*" : "",
(double) (method_loop+kernel_loop-1),(double) kernel_number,
(double) count,(double) changed);
}
if ( changed < 0 )
goto error_cleanup;
kernel_changed += changed;
method_changed += changed;
/* prepare next loop */
{ Image *tmp = work_image; /* swap images for iteration */
work_image = curr_image;
curr_image = tmp;
}
if ( work_image == image )
work_image = (Image *) NULL; /* replace input 'image' */
} /* End Loop 4: Iterate the kernel with primitive */
if (verbose != MagickFalse && kernel_changed != (size_t)changed)
(void) FormatLocaleFile(stderr, " Total %.20g",(double) kernel_changed);
if (verbose != MagickFalse && stage_loop < stage_limit)
(void) FormatLocaleFile(stderr, "\n"); /* add end-of-line before looping */
#if 0
(void) FormatLocaleFile(stderr, "--E-- image=0x%lx\n", (unsigned long)image);
(void) FormatLocaleFile(stderr, " curr =0x%lx\n", (unsigned long)curr_image);
(void) FormatLocaleFile(stderr, " work =0x%lx\n", (unsigned long)work_image);
(void) FormatLocaleFile(stderr, " save =0x%lx\n", (unsigned long)save_image);
(void) FormatLocaleFile(stderr, " union=0x%lx\n", (unsigned long)rslt_image);
#endif
} /* End Loop 3: Primative (staging) Loop for Coumpound Methods */
/* Final Post-processing for some Compound Methods
**
** The removal of any 'Sync' channel flag in the Image Compositon
** below ensures the methematical compose method is applied in a
** purely mathematical way, and only to the selected channels.
** Turn off SVG composition 'alpha blending'.
*/
switch( method ) {
case EdgeOutMorphology:
case EdgeInMorphology:
case TopHatMorphology:
case BottomHatMorphology:
if (verbose != MagickFalse)
(void) FormatLocaleFile(stderr,
"\n%s: Difference with original image",CommandOptionToMnemonic(
MagickMorphologyOptions, method) );
(void) CompositeImage(curr_image,image,DifferenceCompositeOp,
MagickTrue,0,0,exception);
break;
case EdgeMorphology:
if (verbose != MagickFalse)
(void) FormatLocaleFile(stderr,
"\n%s: Difference of Dilate and Erode",CommandOptionToMnemonic(
MagickMorphologyOptions, method) );
(void) CompositeImage(curr_image,save_image,DifferenceCompositeOp,
MagickTrue,0,0,exception);
save_image = DestroyImage(save_image); /* finished with save image */
break;
default:
break;
}
/* multi-kernel handling: re-iterate, or compose results */
if ( kernel->next == (KernelInfo *) NULL )
rslt_image = curr_image; /* just return the resulting image */
else if ( rslt_compose == NoCompositeOp )
{ if (verbose != MagickFalse) {
if ( this_kernel->next != (KernelInfo *) NULL )
(void) FormatLocaleFile(stderr, " (re-iterate)");
else
(void) FormatLocaleFile(stderr, " (done)");
}
rslt_image = curr_image; /* return result, and re-iterate */
}
else if ( rslt_image == (Image *) NULL)
{ if (verbose != MagickFalse)
(void) FormatLocaleFile(stderr, " (save for compose)");
rslt_image = curr_image;
curr_image = (Image *) image; /* continue with original image */
}
else
{ /* Add the new 'current' result to the composition
**
** The removal of any 'Sync' channel flag in the Image Compositon
** below ensures the methematical compose method is applied in a
** purely mathematical way, and only to the selected channels.
** IE: Turn off SVG composition 'alpha blending'.
*/
if (verbose != MagickFalse)
(void) FormatLocaleFile(stderr, " (compose \"%s\")",
CommandOptionToMnemonic(MagickComposeOptions, rslt_compose) );
(void) CompositeImage(rslt_image,curr_image,rslt_compose,MagickTrue,
0,0,exception);
curr_image = DestroyImage(curr_image);
curr_image = (Image *) image; /* continue with original image */
}
if (verbose != MagickFalse)
(void) FormatLocaleFile(stderr, "\n");
/* loop to the next kernel in a multi-kernel list */
norm_kernel = norm_kernel->next;
if ( rflt_kernel != (KernelInfo *) NULL )
rflt_kernel = rflt_kernel->next;
kernel_number++;
} /* End Loop 2: Loop over each kernel */
} /* End Loop 1: compound method interation */
goto exit_cleanup;
/* Yes goto's are bad, but it makes cleanup lot more efficient */
error_cleanup:
if ( curr_image == rslt_image )
curr_image = (Image *) NULL;
if ( rslt_image != (Image *) NULL )
rslt_image = DestroyImage(rslt_image);
exit_cleanup:
if ( curr_image == rslt_image || curr_image == image )
curr_image = (Image *) NULL;
if ( curr_image != (Image *) NULL )
curr_image = DestroyImage(curr_image);
if ( work_image != (Image *) NULL )
work_image = DestroyImage(work_image);
if ( save_image != (Image *) NULL )
save_image = DestroyImage(save_image);
if ( reflected_kernel != (KernelInfo *) NULL )
reflected_kernel = DestroyKernelInfo(reflected_kernel);
return(rslt_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% M o r p h o l o g y I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% MorphologyImage() applies a user supplied kernel to the image according to
% the given mophology method.
%
% This function applies any and all user defined settings before calling
% the above internal function MorphologyApply().
%
% User defined settings include...
% * Output Bias for Convolution and correlation ("-define convolve:bias=??")
% * Kernel Scale/normalize settings ("-define convolve:scale=??")
% This can also includes the addition of a scaled unity kernel.
% * Show Kernel being applied ("-define morphology:showkernel=1")
%
% Other operators that do not want user supplied options interfering,
% especially "convolve:bias" and "morphology:showkernel" should use
% MorphologyApply() directly.
%
% The format of the MorphologyImage method is:
%
% Image *MorphologyImage(const Image *image,MorphologyMethod method,
% const ssize_t iterations,KernelInfo *kernel,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o method: the morphology method to be applied.
%
% o iterations: apply the operation this many times (or no change).
% A value of -1 means loop until no change found.
% How this is applied may depend on the morphology method.
% Typically this is a value of 1.
%
% o kernel: An array of double representing the morphology kernel.
% Warning: kernel may be normalized for the Convolve method.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *MorphologyImage(const Image *image,
const MorphologyMethod method,const ssize_t iterations,
const KernelInfo *kernel,ExceptionInfo *exception)
{
const char
*artifact;
CompositeOperator
compose;
double
bias;
Image
*morphology_image;
KernelInfo
*curr_kernel;
curr_kernel = (KernelInfo *) kernel;
bias=0.0;
compose = UndefinedCompositeOp; /* use default for method */
/* Apply Convolve/Correlate Normalization and Scaling Factors.
* This is done BEFORE the ShowKernelInfo() function is called so that
* users can see the results of the 'option:convolve:scale' option.
*/
if ( method == ConvolveMorphology || method == CorrelateMorphology ) {
/* Get the bias value as it will be needed */
artifact = GetImageArtifact(image,"convolve:bias");
if ( artifact != (const char *) NULL) {
if (IsGeometry(artifact) == MagickFalse)
(void) ThrowMagickException(exception,GetMagickModule(),
OptionWarning,"InvalidSetting","'%s' '%s'",
"convolve:bias",artifact);
else
bias=StringToDoubleInterval(artifact,(double) QuantumRange+1.0);
}
/* Scale kernel according to user wishes */
artifact = GetImageArtifact(image,"convolve:scale");
if ( artifact != (const char *) NULL ) {
if (IsGeometry(artifact) == MagickFalse)
(void) ThrowMagickException(exception,GetMagickModule(),
OptionWarning,"InvalidSetting","'%s' '%s'",
"convolve:scale",artifact);
else {
if ( curr_kernel == kernel )
curr_kernel = CloneKernelInfo(kernel);
if (curr_kernel == (KernelInfo *) NULL)
return((Image *) NULL);
ScaleGeometryKernelInfo(curr_kernel, artifact);
}
}
}
/* display the (normalized) kernel via stderr */
artifact=GetImageArtifact(image,"morphology:showkernel");
if (IsStringTrue(artifact) != MagickFalse)
ShowKernelInfo(curr_kernel);
/* Override the default handling of multi-kernel morphology results
* If 'Undefined' use the default method
* If 'None' (default for 'Convolve') re-iterate previous result
* Otherwise merge resulting images using compose method given.
* Default for 'HitAndMiss' is 'Lighten'.
*/
{
ssize_t
parse;
artifact = GetImageArtifact(image,"morphology:compose");
if ( artifact != (const char *) NULL) {
parse=ParseCommandOption(MagickComposeOptions,
MagickFalse,artifact);
if ( parse < 0 )
(void) ThrowMagickException(exception,GetMagickModule(),
OptionWarning,"UnrecognizedComposeOperator","'%s' '%s'",
"morphology:compose",artifact);
else
compose=(CompositeOperator)parse;
}
}
/* Apply the Morphology */
morphology_image = MorphologyApply(image,method,iterations,
curr_kernel,compose,bias,exception);
/* Cleanup and Exit */
if ( curr_kernel != kernel )
curr_kernel=DestroyKernelInfo(curr_kernel);
return(morphology_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ R o t a t e K e r n e l I n f o %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% RotateKernelInfo() rotates the kernel by the angle given.
%
% Currently it is restricted to 90 degree angles, of either 1D kernels
% or square kernels. And 'circular' rotations of 45 degrees for 3x3 kernels.
% It will ignore usless rotations for specific 'named' built-in kernels.
%
% The format of the RotateKernelInfo method is:
%
% void RotateKernelInfo(KernelInfo *kernel, double angle)
%
% A description of each parameter follows:
%
% o kernel: the Morphology/Convolution kernel
%
% o angle: angle to rotate in degrees
%
% This function is currently internal to this module only, but can be exported
% to other modules if needed.
*/
static void RotateKernelInfo(KernelInfo *kernel, double angle)
{
/* angle the lower kernels first */
if ( kernel->next != (KernelInfo *) NULL)
RotateKernelInfo(kernel->next, angle);
/* WARNING: Currently assumes the kernel (rightly) is horizontally symetrical
**
** TODO: expand beyond simple 90 degree rotates, flips and flops
*/
/* Modulus the angle */
angle = fmod(angle, 360.0);
if ( angle < 0 )
angle += 360.0;
if ( 337.5 < angle || angle <= 22.5 )
return; /* Near zero angle - no change! - At least not at this time */
/* Handle special cases */
switch (kernel->type) {
/* These built-in kernels are cylindrical kernels, rotating is useless */
case GaussianKernel:
case DoGKernel:
case LoGKernel:
case DiskKernel:
case PeaksKernel:
case LaplacianKernel:
case ChebyshevKernel:
case ManhattanKernel:
case EuclideanKernel:
return;
/* These may be rotatable at non-90 angles in the future */
/* but simply rotating them in multiples of 90 degrees is useless */
case SquareKernel:
case DiamondKernel:
case PlusKernel:
case CrossKernel:
return;
/* These only allows a +/-90 degree rotation (by transpose) */
/* A 180 degree rotation is useless */
case BlurKernel:
if ( 135.0 < angle && angle <= 225.0 )
return;
if ( 225.0 < angle && angle <= 315.0 )
angle -= 180;
break;
default:
break;
}
/* Attempt rotations by 45 degrees -- 3x3 kernels only */
if ( 22.5 < fmod(angle,90.0) && fmod(angle,90.0) <= 67.5 )
{
if ( kernel->width == 3 && kernel->height == 3 )
{ /* Rotate a 3x3 square by 45 degree angle */
double t = kernel->values[0];
kernel->values[0] = kernel->values[3];
kernel->values[3] = kernel->values[6];
kernel->values[6] = kernel->values[7];
kernel->values[7] = kernel->values[8];
kernel->values[8] = kernel->values[5];
kernel->values[5] = kernel->values[2];
kernel->values[2] = kernel->values[1];
kernel->values[1] = t;
/* rotate non-centered origin */
if ( kernel->x != 1 || kernel->y != 1 ) {
ssize_t x,y;
x = (ssize_t) kernel->x-1;
y = (ssize_t) kernel->y-1;
if ( x == y ) x = 0;
else if ( x == 0 ) x = -y;
else if ( x == -y ) y = 0;
else if ( y == 0 ) y = x;
kernel->x = (ssize_t) x+1;
kernel->y = (ssize_t) y+1;
}
angle = fmod(angle+315.0, 360.0); /* angle reduced 45 degrees */
kernel->angle = fmod(kernel->angle+45.0, 360.0);
}
else
perror("Unable to rotate non-3x3 kernel by 45 degrees");
}
if ( 45.0 < fmod(angle, 180.0) && fmod(angle,180.0) <= 135.0 )
{
if ( kernel->width == 1 || kernel->height == 1 )
{ /* Do a transpose of a 1 dimensional kernel,
** which results in a fast 90 degree rotation of some type.
*/
ssize_t
t;
t = (ssize_t) kernel->width;
kernel->width = kernel->height;
kernel->height = (size_t) t;
t = kernel->x;
kernel->x = kernel->y;
kernel->y = t;
if ( kernel->width == 1 ) {
angle = fmod(angle+270.0, 360.0); /* angle reduced 90 degrees */
kernel->angle = fmod(kernel->angle+90.0, 360.0);
} else {
angle = fmod(angle+90.0, 360.0); /* angle increased 90 degrees */
kernel->angle = fmod(kernel->angle+270.0, 360.0);
}
}
else if ( kernel->width == kernel->height )
{ /* Rotate a square array of values by 90 degrees */
{ register ssize_t
i,j,x,y;
register MagickRealType
*k,t;
k=kernel->values;
for( i=0, x=(ssize_t) kernel->width-1; i<=x; i++, x--)
for( j=0, y=(ssize_t) kernel->height-1; j<y; j++, y--)
{ t = k[i+j*kernel->width];
k[i+j*kernel->width] = k[j+x*kernel->width];
k[j+x*kernel->width] = k[x+y*kernel->width];
k[x+y*kernel->width] = k[y+i*kernel->width];
k[y+i*kernel->width] = t;
}
}
/* rotate the origin - relative to center of array */
{ register ssize_t x,y;
x = (ssize_t) (kernel->x*2-kernel->width+1);
y = (ssize_t) (kernel->y*2-kernel->height+1);
kernel->x = (ssize_t) ( -y +(ssize_t) kernel->width-1)/2;
kernel->y = (ssize_t) ( +x +(ssize_t) kernel->height-1)/2;
}
angle = fmod(angle+270.0, 360.0); /* angle reduced 90 degrees */
kernel->angle = fmod(kernel->angle+90.0, 360.0);
}
else
perror("Unable to rotate a non-square, non-linear kernel 90 degrees");
}
if ( 135.0 < angle && angle <= 225.0 )
{
/* For a 180 degree rotation - also know as a reflection
* This is actually a very very common operation!
* Basically all that is needed is a reversal of the kernel data!
* And a reflection of the origon
*/
MagickRealType
t;
register MagickRealType
*k;
ssize_t
i,
j;
k=kernel->values;
j=(ssize_t) (kernel->width*kernel->height-1);
for (i=0; i < j; i++, j--)
t=k[i], k[i]=k[j], k[j]=t;
kernel->x = (ssize_t) kernel->width - kernel->x - 1;
kernel->y = (ssize_t) kernel->height - kernel->y - 1;
angle = fmod(angle-180.0, 360.0); /* angle+180 degrees */
kernel->angle = fmod(kernel->angle+180.0, 360.0);
}
/* At this point angle should at least between -45 (315) and +45 degrees
* In the future some form of non-orthogonal angled rotates could be
* performed here, posibily with a linear kernel restriction.
*/
return;
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% S c a l e G e o m e t r y K e r n e l I n f o %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ScaleGeometryKernelInfo() takes a geometry argument string, typically
% provided as a "-set option:convolve:scale {geometry}" user setting,
% and modifies the kernel according to the parsed arguments of that setting.
%
% The first argument (and any normalization flags) are passed to
% ScaleKernelInfo() to scale/normalize the kernel. The second argument
% is then passed to UnityAddKernelInfo() to add a scled unity kernel
% into the scaled/normalized kernel.
%
% The format of the ScaleGeometryKernelInfo method is:
%
% void ScaleGeometryKernelInfo(KernelInfo *kernel,
% const double scaling_factor,const MagickStatusType normalize_flags)
%
% A description of each parameter follows:
%
% o kernel: the Morphology/Convolution kernel to modify
%
% o geometry:
% The geometry string to parse, typically from the user provided
% "-set option:convolve:scale {geometry}" setting.
%
*/
MagickExport void ScaleGeometryKernelInfo (KernelInfo *kernel,
const char *geometry)
{
MagickStatusType
flags;
GeometryInfo
args;
SetGeometryInfo(&args);
flags = ParseGeometry(geometry, &args);
#if 0
/* For Debugging Geometry Input */
(void) FormatLocaleFile(stderr, "Geometry = 0x%04X : %lg x %lg %+lg %+lg\n",
flags, args.rho, args.sigma, args.xi, args.psi );
#endif
if ( (flags & PercentValue) != 0 ) /* Handle Percentage flag*/
args.rho *= 0.01, args.sigma *= 0.01;
if ( (flags & RhoValue) == 0 ) /* Set Defaults for missing args */
args.rho = 1.0;
if ( (flags & SigmaValue) == 0 )
args.sigma = 0.0;
/* Scale/Normalize the input kernel */
ScaleKernelInfo(kernel, args.rho, (GeometryFlags) flags);
/* Add Unity Kernel, for blending with original */
if ( (flags & SigmaValue) != 0 )
UnityAddKernelInfo(kernel, args.sigma);
return;
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% S c a l e K e r n e l I n f o %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ScaleKernelInfo() scales the given kernel list by the given amount, with or
% without normalization of the sum of the kernel values (as per given flags).
%
% By default (no flags given) the values within the kernel is scaled
% directly using given scaling factor without change.
%
% If either of the two 'normalize_flags' are given the kernel will first be
% normalized and then further scaled by the scaling factor value given.
%
% Kernel normalization ('normalize_flags' given) is designed to ensure that
% any use of the kernel scaling factor with 'Convolve' or 'Correlate'
% morphology methods will fall into -1.0 to +1.0 range. Note that for
% non-HDRI versions of IM this may cause images to have any negative results
% clipped, unless some 'bias' is used.
%
% More specifically. Kernels which only contain positive values (such as a
% 'Gaussian' kernel) will be scaled so that those values sum to +1.0,
% ensuring a 0.0 to +1.0 output range for non-HDRI images.
%
% For Kernels that contain some negative values, (such as 'Sharpen' kernels)
% the kernel will be scaled by the absolute of the sum of kernel values, so
% that it will generally fall within the +/- 1.0 range.
%
% For kernels whose values sum to zero, (such as 'Laplician' kernels) kernel
% will be scaled by just the sum of the postive values, so that its output
% range will again fall into the +/- 1.0 range.
%
% For special kernels designed for locating shapes using 'Correlate', (often
% only containing +1 and -1 values, representing foreground/brackground
% matching) a special normalization method is provided to scale the positive
% values separately to those of the negative values, so the kernel will be
% forced to become a zero-sum kernel better suited to such searches.
%
% WARNING: Correct normalization of the kernel assumes that the '*_range'
% attributes within the kernel structure have been correctly set during the
% kernels creation.
%
% NOTE: The values used for 'normalize_flags' have been selected specifically
% to match the use of geometry options, so that '!' means NormalizeValue, '^'
% means CorrelateNormalizeValue. All other GeometryFlags values are ignored.
%
% The format of the ScaleKernelInfo method is:
%
% void ScaleKernelInfo(KernelInfo *kernel, const double scaling_factor,
% const MagickStatusType normalize_flags )
%
% A description of each parameter follows:
%
% o kernel: the Morphology/Convolution kernel
%
% o scaling_factor:
% multiply all values (after normalization) by this factor if not
% zero. If the kernel is normalized regardless of any flags.
%
% o normalize_flags:
% GeometryFlags defining normalization method to use.
% specifically: NormalizeValue, CorrelateNormalizeValue,
% and/or PercentValue
%
*/
MagickExport void ScaleKernelInfo(KernelInfo *kernel,
const double scaling_factor,const GeometryFlags normalize_flags)
{
register double
pos_scale,
neg_scale;
register ssize_t
i;
/* do the other kernels in a multi-kernel list first */
if ( kernel->next != (KernelInfo *) NULL)
ScaleKernelInfo(kernel->next, scaling_factor, normalize_flags);
/* Normalization of Kernel */
pos_scale = 1.0;
if ( (normalize_flags&NormalizeValue) != 0 ) {
if ( fabs(kernel->positive_range + kernel->negative_range) >= MagickEpsilon )
/* non-zero-summing kernel (generally positive) */
pos_scale = fabs(kernel->positive_range + kernel->negative_range);
else
/* zero-summing kernel */
pos_scale = kernel->positive_range;
}
/* Force kernel into a normalized zero-summing kernel */
if ( (normalize_flags&CorrelateNormalizeValue) != 0 ) {
pos_scale = ( fabs(kernel->positive_range) >= MagickEpsilon )
? kernel->positive_range : 1.0;
neg_scale = ( fabs(kernel->negative_range) >= MagickEpsilon )
? -kernel->negative_range : 1.0;
}
else
neg_scale = pos_scale;
/* finialize scaling_factor for positive and negative components */
pos_scale = scaling_factor/pos_scale;
neg_scale = scaling_factor/neg_scale;
for (i=0; i < (ssize_t) (kernel->width*kernel->height); i++)
if (!IsNaN(kernel->values[i]))
kernel->values[i] *= (kernel->values[i] >= 0) ? pos_scale : neg_scale;
/* convolution output range */
kernel->positive_range *= pos_scale;
kernel->negative_range *= neg_scale;
/* maximum and minimum values in kernel */
kernel->maximum *= (kernel->maximum >= 0.0) ? pos_scale : neg_scale;
kernel->minimum *= (kernel->minimum >= 0.0) ? pos_scale : neg_scale;
/* swap kernel settings if user's scaling factor is negative */
if ( scaling_factor < MagickEpsilon ) {
double t;
t = kernel->positive_range;
kernel->positive_range = kernel->negative_range;
kernel->negative_range = t;
t = kernel->maximum;
kernel->maximum = kernel->minimum;
kernel->minimum = 1;
}
return;
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% S h o w K e r n e l I n f o %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ShowKernelInfo() outputs the details of the given kernel defination to
% standard error, generally due to a users 'morphology:showkernel' option
% request.
%
% The format of the ShowKernel method is:
%
% void ShowKernelInfo(const KernelInfo *kernel)
%
% A description of each parameter follows:
%
% o kernel: the Morphology/Convolution kernel
%
*/
MagickPrivate void ShowKernelInfo(const KernelInfo *kernel)
{
const KernelInfo
*k;
size_t
c, i, u, v;
for (c=0, k=kernel; k != (KernelInfo *) NULL; c++, k=k->next ) {
(void) FormatLocaleFile(stderr, "Kernel");
if ( kernel->next != (KernelInfo *) NULL )
(void) FormatLocaleFile(stderr, " #%lu", (unsigned long) c );
(void) FormatLocaleFile(stderr, " \"%s",
CommandOptionToMnemonic(MagickKernelOptions, k->type) );
if ( fabs(k->angle) >= MagickEpsilon )
(void) FormatLocaleFile(stderr, "@%lg", k->angle);
(void) FormatLocaleFile(stderr, "\" of size %lux%lu%+ld%+ld",(unsigned long)
k->width,(unsigned long) k->height,(long) k->x,(long) k->y);
(void) FormatLocaleFile(stderr,
" with values from %.*lg to %.*lg\n",
GetMagickPrecision(), k->minimum,
GetMagickPrecision(), k->maximum);
(void) FormatLocaleFile(stderr, "Forming a output range from %.*lg to %.*lg",
GetMagickPrecision(), k->negative_range,
GetMagickPrecision(), k->positive_range);
if ( fabs(k->positive_range+k->negative_range) < MagickEpsilon )
(void) FormatLocaleFile(stderr, " (Zero-Summing)\n");
else if ( fabs(k->positive_range+k->negative_range-1.0) < MagickEpsilon )
(void) FormatLocaleFile(stderr, " (Normalized)\n");
else
(void) FormatLocaleFile(stderr, " (Sum %.*lg)\n",
GetMagickPrecision(), k->positive_range+k->negative_range);
for (i=v=0; v < k->height; v++) {
(void) FormatLocaleFile(stderr, "%2lu:", (unsigned long) v );
for (u=0; u < k->width; u++, i++)
if (IsNaN(k->values[i]))
(void) FormatLocaleFile(stderr," %*s", GetMagickPrecision()+3, "nan");
else
(void) FormatLocaleFile(stderr," %*.*lg", GetMagickPrecision()+3,
GetMagickPrecision(), (double) k->values[i]);
(void) FormatLocaleFile(stderr,"\n");
}
}
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% U n i t y A d d K e r n a l I n f o %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% UnityAddKernelInfo() Adds a given amount of the 'Unity' Convolution Kernel
% to the given pre-scaled and normalized Kernel. This in effect adds that
% amount of the original image into the resulting convolution kernel. This
% value is usually provided by the user as a percentage value in the
% 'convolve:scale' setting.
%
% The resulting effect is to convert the defined kernels into blended
% soft-blurs, unsharp kernels or into sharpening kernels.
%
% The format of the UnityAdditionKernelInfo method is:
%
% void UnityAdditionKernelInfo(KernelInfo *kernel, const double scale )
%
% A description of each parameter follows:
%
% o kernel: the Morphology/Convolution kernel
%
% o scale:
% scaling factor for the unity kernel to be added to
% the given kernel.
%
*/
MagickExport void UnityAddKernelInfo(KernelInfo *kernel,
const double scale)
{
/* do the other kernels in a multi-kernel list first */
if ( kernel->next != (KernelInfo *) NULL)
UnityAddKernelInfo(kernel->next, scale);
/* Add the scaled unity kernel to the existing kernel */
kernel->values[kernel->x+kernel->y*kernel->width] += scale;
CalcKernelMetaData(kernel); /* recalculate the meta-data */
return;
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% Z e r o K e r n e l N a n s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ZeroKernelNans() replaces any special 'nan' value that may be present in
% the kernel with a zero value. This is typically done when the kernel will
% be used in special hardware (GPU) convolution processors, to simply
% matters.
%
% The format of the ZeroKernelNans method is:
%
% void ZeroKernelNans (KernelInfo *kernel)
%
% A description of each parameter follows:
%
% o kernel: the Morphology/Convolution kernel
%
*/
MagickPrivate void ZeroKernelNans(KernelInfo *kernel)
{
register size_t
i;
/* do the other kernels in a multi-kernel list first */
if (kernel->next != (KernelInfo *) NULL)
ZeroKernelNans(kernel->next);
for (i=0; i < (kernel->width*kernel->height); i++)
if (IsNaN(kernel->values[i]))
kernel->values[i]=0.0;
return;
}
|
3D.ref.c | #include <sys/time.h>
#include <time.h>
#include <stdio.h>
static unsigned long long current_time_ns() {
#ifdef __MACH__
clock_serv_t cclock;
mach_timespec_t mts;
host_get_clock_service(mach_host_self(), CALENDAR_CLOCK, &cclock);
clock_get_time(cclock, &mts);
mach_port_deallocate(mach_task_self(), cclock);
unsigned long long s = 1000000000ULL * (unsigned long long)mts.tv_sec;
return (unsigned long long)mts.tv_nsec + s;
#else
struct timespec t ={0,0};
clock_gettime(CLOCK_MONOTONIC, &t);
unsigned long long s = 1000000000ULL * (unsigned long long)t.tv_sec;
return (((unsigned long long)t.tv_nsec)) + s;
#endif
}
#include <stdio.h>
#include <time.h>
#include <assert.h>
#include <stdlib.h>
#include <math.h>
#include <sys/time.h>
#include <string.h>
#define STR_SIZE (256)
#define MAX_PD (3.0e6)
/* required precision in degrees */
#define PRECISION 0.001
#define SPEC_HEAT_SI 1.75e6
#define K_SI 100
/* capacitance fitting factor */
#define FACTOR_CHIP 0.5
/* chip parameters */
float t_chip = 0.0005;
float chip_height = 0.016; float chip_width = 0.016;
/* ambient temperature, assuming no package at all */
float amb_temp = 80.0;
void fatal(char *s)
{
fprintf(stderr, "Error: %s\n", s);
}
void readinput(float *vect, int grid_rows, int grid_cols, int layers, char *file) {
int i,j,k;
FILE *fp;
char str[STR_SIZE];
float val;
if( (fp = fopen(file, "r" )) ==0 )
fatal( "The file was not opened" );
for (i=0; i <= grid_rows-1; i++)
for (j=0; j <= grid_cols-1; j++)
for (k=0; k <= layers-1; k++)
{
if (fgets(str, STR_SIZE, fp) == NULL) fatal("Error reading file\n");
if (feof(fp))
fatal("not enough lines in file");
if ((sscanf(str, "%f", &val) != 1))
fatal("invalid file format");
vect[i*grid_cols+j+k*grid_rows*grid_cols] = val;
}
fclose(fp);
}
void writeoutput(float *vect, int grid_rows, int grid_cols, int layers, char *file) {
int i,j,k, index=0;
FILE *fp;
char str[STR_SIZE];
if( (fp = fopen(file, "w" )) == 0 )
printf( "The file was not opened\n" );
for (i=0; i < grid_rows; i++)
for (j=0; j < grid_cols; j++)
for (k=0; k < layers; k++)
{
sprintf(str, "%d\t%g\n", index, vect[i*grid_cols+j+k*grid_rows*grid_cols]);
fputs(str,fp);
index++;
}
fclose(fp);
}
void computeTempCPU(float *pIn, float* tIn, float *tOut,
int nx, int ny, int nz, float Cap,
float Rx, float Ry, float Rz,
float dt, int numiter)
{ float ce, cw, cn, cs, ct, cb, cc;
float stepDivCap = dt / Cap;
ce = cw =stepDivCap/ Rx;
cn = cs =stepDivCap/ Ry;
ct = cb =stepDivCap/ Rz;
cc = 1.0 - (2.0*ce + 2.0*cn + 3.0*ct);
int c,w,e,n,s,b,t;
int x,y,z;
int i = 0;
do{
for(z = 0; z < nz; z++)
for(y = 0; y < ny; y++)
for(x = 0; x < nx; x++)
{
c = x + y * nx + z * nx * ny;
w = (x == 0) ? c : c - 1;
e = (x == nx - 1) ? c : c + 1;
n = (y == 0) ? c : c - nx;
s = (y == ny - 1) ? c : c + nx;
b = (z == 0) ? c : c - nx * ny;
t = (z == nz - 1) ? c : c + nx * ny;
tOut[c] = tIn[c]*cc + tIn[n]*cn + tIn[s]*cs + tIn[e]*ce + tIn[w]*cw + tIn[t]*ct + tIn[b]*cb + (dt/Cap) * pIn[c] + ct*amb_temp;
}
float *temp = tIn;
tIn = tOut;
tOut = temp;
i++;
}
while(i < numiter);
}
float accuracy(float *arr1, float *arr2, int len)
{
float err = 0.0;
int i;
for(i = 0; i < len; i++)
{
err += (arr1[i]-arr2[i]) * (arr1[i]-arr2[i]);
}
return (float)sqrt(err/len);
}
void computeTempOMP(float *pIn, float* tIn, float *tOut,
int nx, int ny, int nz, float Cap,
float Rx, float Ry, float Rz,
float dt, int numiter)
{
float ce, cw, cn, cs, ct, cb, cc;
float stepDivCap = dt / Cap;
ce = cw =stepDivCap/ Rx;
cn = cs =stepDivCap/ Ry;
ct = cb =stepDivCap/ Rz;
cc = 1.0 - (2.0*ce + 2.0*cn + 3.0*ct);
{
int count = 0;
float *tIn_t = tIn;
float *tOut_t = tOut;
do {
int z;
{ const unsigned long long parallel_for_start = current_time_ns();
#pragma omp parallel for
for (z = 0; z < nz; z++) {
int y;
for (y = 0; y < ny; y++) {
int x;
for (x = 0; x < nx; x++) {
int c, w, e, n, s, b, t;
c = x + y * nx + z * nx * ny;
w = (x == 0) ? c : c - 1;
e = (x == nx-1) ? c : c + 1;
n = (y == 0) ? c : c - nx;
s = (y == ny-1) ? c : c + nx;
b = (z == 0) ? c : c - nx * ny;
t = (z == nz-1) ? c : c + nx * ny;
tOut_t[c] = cc * tIn_t[c] + cw * tIn_t[w] + ce * tIn_t[e]
+ cs * tIn_t[s] + cn * tIn_t[n] + cb * tIn_t[b] + ct * tIn_t[t]+(dt/Cap) * pIn[c] + ct*amb_temp;
}
}
} ;
const unsigned long long parallel_for_end = current_time_ns();
printf("pragma157_omp_parallel %llu ns\n", parallel_for_end - parallel_for_start); }
float *t = tIn_t;
tIn_t = tOut_t;
tOut_t = t;
count++;
} while (count < numiter);
}
return;
}
void usage(int argc, char **argv)
{
fprintf(stderr, "Usage: %s <rows/cols> <layers> <iterations> <powerFile> <tempFile> <outputFile>\n", argv[0]);
fprintf(stderr, "\t<rows/cols> - number of rows/cols in the grid (positive integer)\n");
fprintf(stderr, "\t<layers> - number of layers in the grid (positive integer)\n");
fprintf(stderr, "\t<iteration> - number of iterations\n");
fprintf(stderr, "\t<powerFile> - name of the file containing the initial power values of each cell\n");
fprintf(stderr, "\t<tempFile> - name of the file containing the initial temperature values of each cell\n");
fprintf(stderr, "\t<outputFile - output file\n");
exit(1);
}
int main(int argc, char** argv)
{
if (argc != 7)
{
usage(argc,argv);
}
char *pfile, *tfile, *ofile;// *testFile;
int iterations = atoi(argv[3]);
pfile = argv[4];
tfile = argv[5];
ofile = argv[6];
//testFile = argv[7];
int numCols = atoi(argv[1]);
int numRows = atoi(argv[1]);
int layers = atoi(argv[2]);
/* calculating parameters*/
float dx = chip_height/numRows;
float dy = chip_width/numCols;
float dz = t_chip/layers;
float Cap = FACTOR_CHIP * SPEC_HEAT_SI * t_chip * dx * dy;
float Rx = dy / (2.0 * K_SI * t_chip * dx);
float Ry = dx / (2.0 * K_SI * t_chip * dy);
float Rz = dz / (K_SI * dx * dy);
// cout << Rx << " " << Ry << " " << Rz << endl;
float max_slope = MAX_PD / (FACTOR_CHIP * t_chip * SPEC_HEAT_SI);
float dt = PRECISION / max_slope;
float *powerIn, *tempOut, *tempIn, *tempCopy;// *pCopy;
// float *d_powerIn, *d_tempIn, *d_tempOut;
int size = numCols * numRows * layers;
powerIn = (float*)calloc(size, sizeof(float));
tempCopy = (float*)malloc(size * sizeof(float));
tempIn = (float*)calloc(size,sizeof(float));
tempOut = (float*)calloc(size, sizeof(float));
//pCopy = (float*)calloc(size,sizeof(float));
float* answer = (float*)calloc(size, sizeof(float));
// outCopy = (float*)calloc(size, sizeof(float));
readinput(powerIn,numRows, numCols, layers,pfile);
readinput(tempIn, numRows, numCols, layers, tfile);
memcpy(tempCopy,tempIn, size * sizeof(float));
const unsigned long long full_program_start = current_time_ns();
{
struct timeval start, stop;
float time;
gettimeofday(&start,NULL);
computeTempOMP(powerIn, tempIn, tempOut, numCols, numRows, layers, Cap, Rx, Ry, Rz, dt,iterations);
gettimeofday(&stop,NULL);
time = (stop.tv_usec-start.tv_usec)*1.0e-6 + stop.tv_sec - start.tv_sec;
computeTempCPU(powerIn, tempCopy, answer, numCols, numRows, layers, Cap, Rx, Ry, Rz, dt,iterations);
float acc = accuracy(tempOut,answer,numRows*numCols*layers);
printf("Time: %.3f (s)\n",time);
printf("Accuracy: %e\n",acc);
} ;
const unsigned long long full_program_end = current_time_ns();
printf("full_program %llu ns\n", full_program_end - full_program_start);
writeoutput(tempOut,numRows, numCols, layers, ofile);
free(tempIn);
free(tempOut); free(powerIn);
return 0;
}
|
GB_unaryop__lnot_uint16_uint64.c | //------------------------------------------------------------------------------
// GB_unaryop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2020, All Rights Reserved.
// http://suitesparse.com See GraphBLAS/Doc/License.txt for license.
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_iterator.h"
#include "GB_unaryop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB_unop__lnot_uint16_uint64
// op(A') function: GB_tran__lnot_uint16_uint64
// C type: uint16_t
// A type: uint64_t
// cast: uint16_t cij = (uint16_t) aij
// unaryop: cij = !(aij != 0)
#define GB_ATYPE \
uint64_t
#define GB_CTYPE \
uint16_t
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
uint64_t aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = !(x != 0) ;
// casting
#define GB_CASTING(z, aij) \
uint16_t z = (uint16_t) aij ;
// cij = op (cast (aij))
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
GB_GETA (aij, Ax, pA) ; \
/* Cx [pC] = op (cast (aij)) */ \
GB_CASTING (z, aij) ; \
GB_OP (GB_CX (pC), z) ; \
}
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_LNOT || GxB_NO_UINT16 || GxB_NO_UINT64)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_unop__lnot_uint16_uint64
(
uint16_t *Cx, // Cx and Ax may be aliased
uint64_t *Ax,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
GB_CAST_OP (p, p) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_tran__lnot_uint16_uint64
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t *GB_RESTRICT *Rowcounts,
GBI_single_iterator Iter,
const int64_t *GB_RESTRICT A_slice,
int naslice
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#define GB_PHASE_2_OF_2
#include "GB_unaryop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
gemm.c | #include "gemm.h"
#include "utils.h"
#include "cuda.h"
#include <stdlib.h>
#include <stdio.h>
#include <math.h>
void gemm_bin(int M, int N, int K, float ALPHA,
char *A, int lda,
float *B, int ldb,
float *C, int ldc)
{
int i,j,k;
for(i = 0; i < M; ++i){
for(k = 0; k < K; ++k){
char A_PART = A[i*lda+k];
if(A_PART){
for(j = 0; j < N; ++j){
C[i*ldc+j] += B[k*ldb+j];
}
} else {
for(j = 0; j < N; ++j){
C[i*ldc+j] -= B[k*ldb+j];
}
}
}
}
}
float *random_matrix(int rows, int cols)
{
int i;
float *m = calloc(rows*cols, sizeof(float));
for(i = 0; i < rows*cols; ++i){
m[i] = (float)rand()/RAND_MAX;
}
return m;
}
void time_random_matrix(int TA, int TB, int m, int k, int n)
{
float *a;
if(!TA) a = random_matrix(m,k);
else a = random_matrix(k,m);
int lda = (!TA)?k:m;
float *b;
if(!TB) b = random_matrix(k,n);
else b = random_matrix(n,k);
int ldb = (!TB)?n:k;
float *c = random_matrix(m,n);
int i;
clock_t start = clock(), end;
for(i = 0; i<10; ++i){
gemm_cpu(TA,TB,m,n,k,1,a,lda,b,ldb,1,c,n);
}
end = clock();
printf("Matrix Multiplication %dx%d * %dx%d, TA=%d, TB=%d: %lf ms\n",m,k,k,n, TA, TB, (float)(end-start)/CLOCKS_PER_SEC);
free(a);
free(b);
free(c);
}
void gemm(int TA, int TB, int M, int N, int K, float ALPHA,
float *A, int lda,
float *B, int ldb,
float BETA,
float *C, int ldc)
{
gemm_cpu( TA, TB, M, N, K, ALPHA,A,lda, B, ldb,BETA,C,ldc);
}
void gemm_nn(int M, int N, int K, float ALPHA,
float *A, int lda,
float *B, int ldb,
float *C, int ldc)
{
int i,j,k;
for(i = 0; i < M; ++i){
for(k = 0; k < K; ++k){
register float A_PART = ALPHA*A[i*lda+k];
for(j = 0; j < N; ++j){
C[i*ldc+j] += A_PART*B[k*ldb+j];
}
}
}
}
void gemm_nt(int M, int N, int K, float ALPHA,
float *A, int lda,
float *B, int ldb,
float *C, int ldc)
{
int i,j,k;
for(i = 0; i < M; ++i){
for(j = 0; j < N; ++j){
register float sum = 0;
for(k = 0; k < K; ++k){
sum += ALPHA*A[i*lda+k]*B[j*ldb + k];
}
C[i*ldc+j] += sum;
}
}
}
void gemm_tn(int M, int N, int K, float ALPHA,
float *A, int lda,
float *B, int ldb,
float *C, int ldc)
{
int i,j,k;
for(i = 0; i < M; ++i){
for(k = 0; k < K; ++k){
register float A_PART = ALPHA*A[k*lda+i];
for(j = 0; j < N; ++j){
C[i*ldc+j] += A_PART*B[k*ldb+j];
}
}
}
}
void gemm_tt(int M, int N, int K, float ALPHA,
float *A, int lda,
float *B, int ldb,
float *C, int ldc)
{
int i,j,k;
for(i = 0; i < M; ++i){
for(j = 0; j < N; ++j){
register float sum = 0;
for(k = 0; k < K; ++k){
sum += ALPHA*A[i+k*lda]*B[k+j*ldb];
}
C[i*ldc+j] += sum;
}
}
}
void gemm_cpu(int TA, int TB, int M, int N, int K, float ALPHA,
float *A, int lda,
float *B, int ldb,
float BETA,
float *C, int ldc)
{
//printf("cpu: %d %d %d %d %d %f %d %d %f %d\n",TA, TB, M, N, K, ALPHA, lda, ldb, BETA, ldc);
int i, j;
for(i = 0; i < M; ++i){
for(j = 0; j < N; ++j){
C[i*ldc + j] *= BETA;
}
}
int t;
#pragma omp parallel for
for (t = 0; t < M; ++t) {
if (!TA && !TB)
gemm_nn(1, N, K, ALPHA, A + t*lda, lda, B, ldb, C + t*ldc, ldc);
else if (TA && !TB)
gemm_tn(1, N, K, ALPHA, A + t, lda, B, ldb, C + t*ldc, ldc);
else if (!TA && TB)
gemm_nt(1, N, K, ALPHA, A + t*lda, lda, B, ldb, C + t*ldc, ldc);
else
gemm_tt(1, N, K, ALPHA, A + t, lda, B, ldb, C + t*ldc, ldc);
}
}
#ifdef GPU
#include <math.h>
void gemm_ongpu(int TA, int TB, int M, int N, int K, float ALPHA,
float *A_gpu, int lda,
float *B_gpu, int ldb,
float BETA,
float *C_gpu, int ldc)
{
cublasHandle_t handle = blas_handle();
cudaError_t status = cublasSgemm(handle, (TB ? CUBLAS_OP_T : CUBLAS_OP_N),
(TA ? CUBLAS_OP_T : CUBLAS_OP_N), N, M, K, &ALPHA, B_gpu, ldb, A_gpu, lda, &BETA, C_gpu, ldc);
check_error(status);
}
void gemm_gpu(int TA, int TB, int M, int N, int K, float ALPHA,
float *A, int lda,
float *B, int ldb,
float BETA,
float *C, int ldc)
{
float *A_gpu = cuda_make_array(A, (TA ? lda*K:lda*M));
float *B_gpu = cuda_make_array(B, (TB ? ldb*N : ldb*K));
float *C_gpu = cuda_make_array(C, ldc*M);
gemm_ongpu(TA, TB, M, N, K, ALPHA, A_gpu, lda, B_gpu, ldb, BETA, C_gpu, ldc);
cuda_pull_array(C_gpu, C, ldc*M);
cuda_free(A_gpu);
cuda_free(B_gpu);
cuda_free(C_gpu);
}
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <time.h>
void time_gpu_random_matrix(int TA, int TB, int m, int k, int n)
{
float *a;
if(!TA) a = random_matrix(m,k);
else a = random_matrix(k,m);
int lda = (!TA)?k:m;
float *b;
if(!TB) b = random_matrix(k,n);
else b = random_matrix(n,k);
int ldb = (!TB)?n:k;
float *c = random_matrix(m,n);
int i;
clock_t start = clock(), end;
for(i = 0; i<32; ++i){
gemm_gpu(TA,TB,m,n,k,1,a,lda,b,ldb,1,c,n);
}
end = clock();
printf("Matrix Multiplication %dx%d * %dx%d, TA=%d, TB=%d: %lf s\n",m,k,k,n, TA, TB, (float)(end-start)/CLOCKS_PER_SEC);
free(a);
free(b);
free(c);
}
void time_ongpu(int TA, int TB, int m, int k, int n)
{
int iter = 10;
float *a = random_matrix(m,k);
float *b = random_matrix(k,n);
int lda = (!TA)?k:m;
int ldb = (!TB)?n:k;
float *c = random_matrix(m,n);
float *a_cl = cuda_make_array(a, m*k);
float *b_cl = cuda_make_array(b, k*n);
float *c_cl = cuda_make_array(c, m*n);
int i;
clock_t start = clock(), end;
for(i = 0; i<iter; ++i){
gemm_ongpu(TA,TB,m,n,k,1,a_cl,lda,b_cl,ldb,1,c_cl,n);
cudaThreadSynchronize();
}
double flop = ((double)m)*n*(2.*k + 2.)*iter;
double gflop = flop/pow(10., 9);
end = clock();
double seconds = sec(end-start);
printf("Matrix Multiplication %dx%d * %dx%d, TA=%d, TB=%d: %lf s, %lf GFLOPS\n",m,k,k,n, TA, TB, seconds, gflop/seconds);
cuda_free(a_cl);
cuda_free(b_cl);
cuda_free(c_cl);
free(a);
free(b);
free(c);
}
void test_gpu_accuracy(int TA, int TB, int m, int k, int n)
{
srand(0);
float *a;
if(!TA) a = random_matrix(m,k);
else a = random_matrix(k,m);
int lda = (!TA)?k:m;
float *b;
if(!TB) b = random_matrix(k,n);
else b = random_matrix(n,k);
int ldb = (!TB)?n:k;
float *c = random_matrix(m,n);
float *c_gpu = random_matrix(m,n);
memset(c, 0, m*n*sizeof(float));
memset(c_gpu, 0, m*n*sizeof(float));
int i;
//pm(m,k,b);
gemm_gpu(TA,TB,m,n,k,1,a,lda,b,ldb,1,c_gpu,n);
//printf("GPU\n");
//pm(m, n, c_gpu);
gemm_cpu(TA,TB,m,n,k,1,a,lda,b,ldb,1,c,n);
//printf("\n\nCPU\n");
//pm(m, n, c);
double sse = 0;
for(i = 0; i < m*n; ++i) {
//printf("%f %f\n", c[i], c_gpu[i]);
sse += pow(c[i]-c_gpu[i], 2);
}
printf("Matrix Multiplication %dx%d * %dx%d, TA=%d, TB=%d: %g SSE\n",m,k,k,n, TA, TB, sse/(m*n));
free(a);
free(b);
free(c);
free(c_gpu);
}
int test_gpu_blas()
{
/*
test_gpu_accuracy(0,0,10,576,75);
test_gpu_accuracy(0,0,17,10,10);
test_gpu_accuracy(1,0,17,10,10);
test_gpu_accuracy(0,1,17,10,10);
test_gpu_accuracy(1,1,17,10,10);
test_gpu_accuracy(0,0,1000,10,100);
test_gpu_accuracy(1,0,1000,10,100);
test_gpu_accuracy(0,1,1000,10,100);
test_gpu_accuracy(1,1,1000,10,100);
test_gpu_accuracy(0,0,10,10,10);
time_ongpu(0,0,64,2916,363);
time_ongpu(0,0,64,2916,363);
time_ongpu(0,0,64,2916,363);
time_ongpu(0,0,192,729,1600);
time_ongpu(0,0,384,196,1728);
time_ongpu(0,0,256,196,3456);
time_ongpu(0,0,256,196,2304);
time_ongpu(0,0,128,4096,12544);
time_ongpu(0,0,128,4096,4096);
*/
time_ongpu(0,0,64,75,12544);
time_ongpu(0,0,64,75,12544);
time_ongpu(0,0,64,75,12544);
time_ongpu(0,0,64,576,12544);
time_ongpu(0,0,256,2304,784);
time_ongpu(1,1,2304,256,784);
time_ongpu(0,0,512,4608,196);
time_ongpu(1,1,4608,512,196);
return 0;
}
#endif
|
fill_nr_s8.c | /* Copyright 2014-2018 The PySCF Developers. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*
* Author: Qiming Sun <osirpt.sun@gmail.com>
*/
#include <stdlib.h>
#include <math.h>
//#include <omp.h>
#include "config.h"
#include "cint.h"
#include "cvhf.h"
#include "nr_direct.h"
#include "optimizer.h"
#define MAX(I,J) ((I) > (J) ? (I) : (J))
int GTOmax_shell_dim(int *ao_loc, int *shls, int ncenter);
int GTOmax_cache_size(int (*intor)(), int *shls_slice, int ncenter,
int *atm, int natm, int *bas, int nbas, double *env);
void int2e_optimizer(CINTOpt **opt, int *atm, int natm, int *bas, int nbas, double *env);
/*
* 8-fold symmetry, k>=l, k>=i>=j,
*/
static void fillnr_s8(int (*intor)(), int (*fprescreen)(), double *eri,
int ish, int jsh, CVHFOpt *vhfopt, IntorEnvs *envs)
{
const int *atm = envs->atm;
const int *bas = envs->bas;
const double *env = envs->env;
const int natm = envs->natm;
const int nbas = envs->nbas;
const int *ao_loc = envs->ao_loc;
const CINTOpt *cintopt = envs->cintopt;
const int nao = ao_loc[nbas];
const size_t nao2 = nao * nao;
const int di = ao_loc[ish+1] - ao_loc[ish];
const int dj = ao_loc[jsh+1] - ao_loc[jsh];
double *cache = eri + di * dj * nao2;
int dims[4] = {nao, nao, dj, di};
int ksh, lsh, ij, k, l;
int shls[4];
double *peri;
shls[2] = jsh;
shls[3] = ish;
for (ksh = 0; ksh <= ish; ksh++) {
for (lsh = 0; lsh <= ksh; lsh++) {
shls[0] = lsh;
shls[1] = ksh;
peri = eri + ao_loc[ksh] * nao + ao_loc[lsh];
if ((*fprescreen)(shls, vhfopt, atm, bas, env)) {
(*intor)(peri, dims, shls, atm, natm, bas, nbas, env,
cintopt, cache);
} else {
for (ij = 0; ij < di*dj; ij++) {
for (k = 0; k < ao_loc[ksh+1]-ao_loc[ksh]; k++) {
for (l = 0; l < ao_loc[lsh+1]-ao_loc[lsh]; l++) {
peri[k*nao+l] = 0;
} }
peri += nao2;
}
}
} }
}
static void store_ij(int (*intor)(), double *eri, double *buf, int ish, int jsh,
CVHFOpt *vhfopt, IntorEnvs *envs)
{
const int nbas = envs->nbas;
const int *ao_loc = envs->ao_loc;
const int nao = ao_loc[nbas];
const size_t nao2 = nao * nao;
const int di = ao_loc[ish+1] - ao_loc[ish];
const int dj = ao_loc[jsh+1] - ao_loc[jsh];
int i, j, k, l, i0, j0, kl;
size_t ij0;
double *peri, *pbuf;
fillnr_s8(intor, vhfopt->fprescreen, buf, ish, jsh, vhfopt, envs);
for (i0 = ao_loc[ish], i = 0; i < di; i++, i0++) {
for (j0 = ao_loc[jsh], j = 0; j < dj; j++, j0++) {
if (i0 >= j0) {
ij0 = i0*(i0+1)/2 + j0;
peri = eri + ij0*(ij0+1)/2;
pbuf = buf + nao2 * (i*dj+j);
for (kl = 0, k = 0; k < i0; k++) {
for (l = 0; l <= k; l++, kl++) {
peri[kl] = pbuf[k*nao+l];
} }
// k == i0
for (l = 0; l <= j0; l++, kl++) {
peri[kl] = pbuf[k*nao+l];
}
}
} }
}
void GTO2e_cart_or_sph(int (*intor)(), CINTOpt *cintopt, double *eri, int *ao_loc,
int *atm, int natm, int *bas, int nbas, double *env)
{
const int nao = ao_loc[nbas];
IntorEnvs envs = {natm, nbas, atm, bas, env, NULL, ao_loc, NULL,
cintopt, 1};
CVHFOpt *vhfopt;
CVHFnr_optimizer(&vhfopt, intor, cintopt, ao_loc, atm, natm, bas, nbas, env);
vhfopt->fprescreen = CVHFnr_schwarz_cond;
int shls_slice[] = {0, nbas};
const int di = GTOmax_shell_dim(ao_loc, shls_slice, 1);
const int cache_size = GTOmax_cache_size(intor, shls_slice, 1,
atm, natm, bas, nbas, env);
#pragma omp parallel
{
int i, j, ij;
double *buf = malloc(sizeof(double) * (di*di*nao*nao + cache_size));
#pragma omp for nowait schedule(dynamic, 2)
for (ij = 0; ij < nbas*(nbas+1)/2; ij++) {
i = (int)(sqrt(2*ij+.25) - .5 + 1e-7);
j = ij - (i*(i+1)/2);
store_ij(intor, eri, buf, i, j, vhfopt, &envs);
}
free(buf);
}
CVHFdel_optimizer(&vhfopt);
}
|
mgl.h | /***************************************************************************
* mgl.h is part of Math Graphic Library
* Copyright (C) 2007-2016 Alexey Balakin <mathgl.abalakin@gmail.ru> *
* *
* This program is free software; you can redistribute it and/or modify *
* it under the terms of the GNU General Public License as published by *
* the Free Software Foundation; either version 2 of the License, or *
* (at your option) any later version. *
* *
* This program is distributed in the hope that it will be useful, *
* but WITHOUT ANY WARRANTY; without even the implied warranty of *
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the *
* GNU General Public License for more details. *
* *
* You should have received a copy of the GNU General Public License *
* along with this program; if not, write to the *
* Free Software Foundation, Inc., *
* 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. *
***************************************************************************/
#ifndef _MGL_H_
#define _MGL_H_
#include "mgl2/mgl_cf.h"
#ifdef __cplusplus
#include "mgl2/data.h"
#include "mgl2/datac.h"
#include <sys/stat.h>
//-----------------------------------------------------------------------------
/// Wrapper class for all graphics
class MGL_EXPORT mglGraph
{
mglGraph(const mglGraph &) {} // copying is not allowed
const mglGraph &operator=(const mglGraph &t) { return t; }
protected:
HMGL gr;
public:
HMPR pr; ///< Pointer to associated MGL parser
mglGraph(int kind=0, int width=600, int height=400)
{ pr = NULL;
if(kind==-1) gr=NULL;
#if MGL_HAVE_OPENGL
else if(kind==1) gr=mgl_create_graph_gl();
#else
else if(kind==1)
{ gr=mgl_create_graph(width, height);
SetGlobalWarn("OpenGL support was disabled. Please, enable it and rebuild MathGL."); }
#endif
else gr=mgl_create_graph(width, height);
}
mglGraph(HMGL graph)
{ pr = NULL; gr = graph; mgl_use_graph(gr,1); }
virtual ~mglGraph()
{ if(mgl_use_graph(gr,-1)<1) mgl_delete_graph(gr); }
/// Get pointer to internal HMGL object
inline HMGL Self() { return gr; }
/// Set default parameters for plotting
inline void DefaultPlotParam() { mgl_set_def_param(gr); }
/// Set name of plot for saving filename
inline void SetPlotId(const char *id) { mgl_set_plotid(gr,id); }
/// Get name of plot for saving filename
inline const char *GetPlotId() { return mgl_get_plotid(gr); }
/// Ask to stop drawing
inline void Stop(bool stop=true) { mgl_ask_stop(gr, stop); }
/// Check if plot termination is asked
inline bool NeedStop() { return mgl_need_stop(gr); }
/// Set callback function for event processing
inline void SetEventFunc(void (*func)(void *), void *par=NULL)
{ mgl_set_event_func(gr, func, par); }
/// Set the transparency on/off.
inline void Alpha(bool enable) { mgl_set_alpha(gr, enable); }
/// Set the gray-scale mode on/off.
inline void Gray(bool enable) { mgl_set_gray(gr, enable); }
/// Set default value of alpha-channel
inline void SetAlphaDef(double alpha) { mgl_set_alpha_default(gr, alpha); }
/// Set the transparency type (0 - usual, 1 - glass, 2 - lamp)
inline void SetTranspType(int type) { mgl_set_transp_type(gr, type); }
/// Set the size of semi-transparent area around lines, marks, glyphs, ... Default is 1.
inline void SetPenDelta(double d) { mgl_pen_delta(gr,d); }
/// Set the using of light on/off.
inline void Light(bool enable) { mgl_set_light(gr, enable); }
/// Switch on/off the specified light source.
inline void Light(int n,bool enable) { mgl_set_light_n(gr, n, enable); }
/// Use diffusive light (only for local light sources) -- OBSOLETE
inline void SetDifLight(bool dif) { mgl_set_light_dif(gr, dif); }
/// Set to attach light settings to inplot.
inline void AttachLight(bool enable) { mgl_set_attach_light(gr, enable); }
/// Add a light source.
inline void AddLight(int n, mglPoint p, char col='w', double bright=0.5, double ap=0)
{ mgl_add_light_ext(gr, n, p.x, p.y, p.z, col, bright, ap); }
inline void AddLight(int n, mglPoint r, mglPoint p, char col='w', double bright=0.5, double ap=0)
{ mgl_add_light_loc(gr, n, r.x, r.y, r.z, p.x, p.y, p.z, col, bright, ap); }
/// Set ambient light brightness
inline void SetAmbient(double i) { mgl_set_ambbr(gr, i); }
/// Set diffusive light brightness
inline void SetDiffuse(double i) { mgl_set_difbr(gr, i); }
/// Set the fog distance or switch it off (if d=0).
inline void Fog(double d, double dz=0.25) { mgl_set_fog(gr, d, dz); }
/// Set relative width of rectangles in Bars, Barh, BoxPlot, Candle, OHLC (default is 0.7)
inline void SetBarWidth(double width) { mgl_set_bar_width(gr, width); }
/// Set default size of marks (locally you can use "size" option)
inline void SetMarkSize(double size) { mgl_set_mark_size(gr, size); }
/// Set default size of arrows (locally you can use "size" option)
inline void SetArrowSize(double size) { mgl_set_arrow_size(gr, size); }
/// Set number of mesh lines (use 0 to draw all of them)
inline void SetMeshNum(int num) { mgl_set_meshnum(gr, num); }
/// Set number of visible faces (use 0 to draw all of them)
inline void SetFaceNum(int num) { mgl_set_facenum(gr, num); }
/// Set cutting for points outside of bounding box
inline void SetCut(bool cut) { mgl_set_cut(gr, cut); }
/// Set additional cutting box
inline void SetCutBox(mglPoint p1, mglPoint p2)
{ mgl_set_cut_box(gr, p1.x, p1.y, p1.z, p2.x, p2.y, p2.z); }
/// Set the cutting off condition (formula)
inline void CutOff(const char *EqC) { mgl_set_cutoff(gr, EqC); }
/// Set default font size
inline void SetFontSize(double size) { mgl_set_font_size(gr, size); }
/// Set default font style and color
inline void SetFontDef(const char *fnt) { mgl_set_font_def(gr, fnt); }
/// Set FontSize by size in pt and picture DPI (default is 16 pt for dpi=72)
virtual void SetFontSizePT(double pt, int dpi=72) { SetFontSize(pt*27.f/dpi); }
/// Set FontSize by size in centimeters and picture DPI (default is 0.56 cm = 16 pt)
inline void SetFontSizeCM(double cm, int dpi=72) { SetFontSizePT(cm*28.45f,dpi); }
/// Set FontSize by size in inch and picture DPI (default is 0.22 in = 16 pt)
inline void SetFontSizeIN(double in, int dpi=72) { SetFontSizePT(in*72.27f,dpi); }
/// Load font from file
inline void LoadFont(const char *name, const char *path=NULL)
{ mgl_load_font(gr, name, path); }
/// Copy font from another mglGraph instance
inline void CopyFont(const mglGraph *GR) { mgl_copy_font(gr, GR->gr);}
/// Restore font (load default font for new HMGL objects)
inline void RestoreFont() { mgl_restore_font(gr); }
/// Set to use or not text rotation
inline void SetRotatedText(bool enable) { mgl_set_rotated_text(gr, enable); }
/// Set to scale text in relative subplots too
inline void SetScaleText(bool enable) { mgl_set_scale_text(gr, enable); }
/// Set default font for all new HMGL and mglGraph objects
static inline void SetDefFont(const char *name, const char *path=NULL) { mgl_def_font(name,path); }
/// Add user-defined glyph for symbol and set its optional id
inline void DefineSymbol(char id, const mglDataA &x, const mglDataA &y)
{ mgl_define_symbol(gr, id, &x, &y); }
/// Set default palette
inline void SetPalette(const char *colors) { mgl_set_palette(gr, colors); }
/// Set default color scheme
inline void SetDefScheme(const char *sch) { mgl_set_def_sch(gr, sch); }
/// Sets RGB values for color with given id
static inline void SetColor(char id, double r, double g, double b) { mgl_set_color(id, r, g, b); }
/// Set mask for face coloring as array of type 'unsigned char[8]'
static inline void SetMask(char id, const char *mask) { mgl_set_mask(id, mask); }
/// Set mask for face coloring as uint64_t number
static inline void SetMask(char id, uint64_t mask) { mgl_set_mask_val(id, mask); }
/// Set default mask rotation angle
inline void SetMaskAngle(int angle) { mgl_set_mask_angle(gr, angle); }
/// Get last warning code
inline int GetWarn() { return mgl_get_warn(gr);}
/// Set warning code ant fill message
inline void SetWarn(int code, const char *info) { mgl_set_warn(gr,code,info); }
/// Get text of warning message(s)
inline const char *Message() { return mgl_get_mess(gr); }
/// Set global warning message
static inline void SetGlobalWarn(const char *text) { mgl_set_global_warn(text); }
/// Get text of global warning message(s)
static inline const char *GlobalWarn() { return mgl_get_global_warn(); }
/// Suppress printing warnings to stderr
static inline void SuppressWarn(bool on) { mgl_suppress_warn(on); }
/// Check if MathGL version is valid (return false) or not (return true)
static inline bool CheckVersion(const char *ver) { return mgl_check_version(ver); }
/// Display progress of something.
inline void Progress(int value, int maximal) { mgl_progress(value, maximal, gr); }
/// Set axis range scaling -- simplified way to shift/zoom axis range -- need to replot whole image!
inline void ZoomAxis(mglPoint p1=mglPoint(0,0,0,0), mglPoint p2=mglPoint(1,1,1,1))
{ mgl_zoom_axis(gr, p1.x,p1.y,p1.z,p1.c, p2.x,p2.y,p2.z,p2.c); }
/// Add [v1, v2] to the current range in direction dir
inline void AddRange(char dir, double v1, double v2)
{ mgl_add_range_val(gr, dir, v1, v2); }
/// Set range in direction dir as [v1, v2]
inline void SetRange(char dir, double v1, double v2)
{ mgl_set_range_val(gr, dir, v1, v2); }
/// Set range in direction dir as minimal and maximal values of data a
inline void SetRange(char dir, const mglDataA &dat, bool add=false)
{ mgl_set_range_dat(gr, dir, &dat, add); }
/// Set values of axis range as minimal and maximal values of corresponding data
inline void SetRanges(const mglDataA &xx, const mglDataA &yy, const mglDataA &zz, const mglDataA &cc)
{ mgl_set_range_dat(gr,'x',&xx,0); mgl_set_range_dat(gr,'y',&yy,0);
mgl_set_range_dat(gr,'z',&zz,0); mgl_set_range_dat(gr,'c',&cc,0); }
/// Set values of axis range as minimal and maximal values of corresponding data
inline void SetRanges(const mglDataA &xx, const mglDataA &yy, const mglDataA &zz)
{ mgl_set_range_dat(gr,'x',&xx,0); mgl_set_range_dat(gr,'y',&yy,0);
mgl_set_range_dat(gr,'z',&zz,0); mgl_set_range_dat(gr,'c',&zz,0); }
/// Set values of axis range as minimal and maximal values of corresponding data
inline void SetRanges(const mglDataA &xx, const mglDataA &yy)
{ mgl_set_range_dat(gr,'x',&xx,0); mgl_set_range_dat(gr,'y',&yy,0); }
/// Set values of axis ranges
inline void SetRanges(double x1, double x2, double y1, double y2, double z1=0, double z2=0)
{ mgl_set_ranges(gr, x1, x2, y1, y2, z1, z2); }
/// Set values of axis ranges
inline void SetRanges(mglPoint p1, mglPoint p2)
{ mgl_set_ranges(gr, p1.x, p2.x, p1.y, p2.y, p1.z, p2.z); }
/// Set ranges for automatic variables
inline void SetAutoRanges(double x1, double x2, double y1=0, double y2=0, double z1=0, double z2=0, double c1=0, double c2=0)
{ mgl_set_auto_ranges(gr, x1, x2, y1, y2, z1, z2, c1, c2); }
/// Set ranges for automatic variables
inline void SetAutoRanges(mglPoint p1, mglPoint p2)
{ mgl_set_auto_ranges(gr, p1.x, p2.x, p1.y, p2.y, p1.z, p2.z, p1.c, p2.c); }
/// Set axis origin
inline void SetOrigin(mglPoint p)
{ mgl_set_origin(gr, p.x, p.y, p.z); }
inline void SetOrigin(double x0, double y0, double z0=mglNaN)
{ mgl_set_origin(gr, x0, y0, z0); }
/// Set the transformation formulas for coordinate. Use "" or NULL for built-in ones
inline void SetFunc(const char *EqX, const char *EqY, const char *EqZ=NULL, const char *EqA=NULL)
{ mgl_set_func(gr, EqX, EqY, EqZ, EqA); }
/// Set one of predefined transformation rule
inline void SetCoor(int how) { mgl_set_coor(gr, how); }
/// Set to draw Ternary axis (triangle like axis, grid and so on)
/** val=1 for Ternary axis (a+b+c=1, z=z),
* val=2 for Quaternary axis (a+b+c+d=1),
* val|4 for projections. */
inline void Ternary(int val) { mgl_set_ternary(gr, val); }
/// Set to use or not tick labels rotation
inline void SetTickRotate(bool val) { mgl_set_tick_rotate(gr,val); }
/// Set to use or not tick labels skipping
inline void SetTickSkip(bool val) { mgl_set_tick_skip(gr,val); }
/// Set tick length
inline void SetTickLen(double len, double stt=1)
{ mgl_set_tick_len(gr, len, stt); }
/// Set axis and ticks style
inline void SetAxisStl(const char *stl="k", const char *tck=0, const char *sub=0)
{ mgl_set_axis_stl(gr, stl, tck, sub); }
/// Set time templates for ticks
inline void SetTicksTime(char dir, double d=0, const char *t="")
{ mgl_set_ticks_time(gr,dir,d,t); }
/// Set ticks text (\n separated). Use "" to disable this feature.
inline void SetTicksVal(char dir, const char *lbl, bool add=false)
{ mgl_set_ticks_str(gr,dir,lbl,add); }
inline void SetTicksVal(char dir, const wchar_t *lbl, bool add=false)
{ mgl_set_ticks_wcs(gr,dir,lbl,add); }
/// Set ticks position and text (\n separated). Use "" to disable this feature.
inline void SetTicksVal(char dir, const mglDataA &v, const char *lbl, bool add=false)
{ mgl_set_ticks_val(gr,dir,&v,lbl,add); }
inline void SetTicksVal(char dir, const mglDataA &v, const wchar_t *lbl, bool add=false)
{ mgl_set_ticks_valw(gr,dir,&v,lbl,add); }
/// Add manual tick at given position. Use "" to disable this feature.
inline void AddTick(char dir, double val, const char *lbl)
{ mgl_add_tick(gr,dir,val,lbl); }
inline void AddTick(char dir, double val, const wchar_t *lbl)
{ mgl_add_tickw(gr,dir,val,lbl); }
/// Set the ticks parameters and string for its factor
inline void SetTicks(char dir, double d=0, int ns=0, double org=mglNaN, const char *factor="")
{ mgl_set_ticks_fact(gr, dir, d, ns, org, factor); }
inline void SetTicks(char dir, double d, int ns, double org, const wchar_t *factor)
{ mgl_set_ticks_factw(gr, dir, d, ns, org, factor); }
/// Auto adjust ticks
inline void Adjust(const char *dir="xyzc")
{ mgl_adjust_ticks(gr, dir); }
/// Set templates for ticks
inline void SetTickTempl(char dir, const char *t)
{ mgl_set_tick_templ(gr,dir,t); }
inline void SetTickTempl(char dir, const wchar_t *t)
{ mgl_set_tick_templw(gr,dir,t); }
/// Tune ticks (tune|1 for common multiplier, tune|2 for common component)
inline void SetTuneTicks(int tune, double fact_pos=1.15)
{ mgl_tune_ticks(gr, tune, fact_pos); }
/// Set additional shift of tick labels
inline void SetTickShift(mglPoint p)
{ mgl_set_tick_shift(gr,p.x,p.y,p.z,p.c); }
/// Set to use UTC time instead of local time
inline void SetTimeUTC(bool enable)
{ mgl_set_flag(gr,enable, MGL_USE_GMTIME); }
/// Set to draw tick labels at axis origin
inline void SetOriginTick(bool enable=true)
{ mgl_set_flag(gr,!enable, MGL_NO_ORIGIN); }
/// Set bit-value flag of HMGL state (for advanced users only)
inline void SetFlagAdv(int val, uint32_t flag)
{ mgl_set_flag(gr, val, flag); }
/// Put further plotting in m-th cell of nx*ny grid of the image.
/** String \a style may contain:
* '<' for reserving space at left
* '>' for reserving space at right
* '^' for reserving space at top
* '_' for reserving space at bottom
* '#' for using whole region. */
inline void SubPlot(int nx,int ny,int m,const char *style="<>_^", double dx=0, double dy=0)
{ mgl_subplot_d(gr, nx, ny, m, style, dx, dy); }
/// Put further plotting in rectangle of dx*dy cells starting from m-th cell of nx*ny grid of the image and shift it by distance {sx,sy}.
/** String \a style may contain:
* '<' for reserving space at left
* '>' for reserving space at right
* '^' for reserving space at top
* '_' for reserving space at bottom
* '#' for using whole region. */
inline void MultiPlot(int nx,int ny,int m, int dx, int dy, const char *style="<>_^", double sx=0, double sy=0)
{ mgl_multiplot_d(gr, nx, ny, m, dx, dy, style, sx, sy); }
/// Put further plotting in a region [x1,x2]*[y1,y2] of the image or subplot (x1,x2,y1,y2 in range [0, 1]).
inline void InPlot(double x1,double x2,double y1,double y2, bool rel=true)
{ if(rel) mgl_relplot(gr, x1, x2, y1, y2);
else mgl_inplot(gr, x1, x2, y1, y2); }
/// Put further plotting in column cell of previous subplot
inline void ColumnPlot(int num, int ind, double d=0)
{ mgl_columnplot(gr,num,ind,d); }
/// Put further plotting in matrix cell of previous subplot
inline void GridPlot(int nx, int ny, int ind, double d=0)
{ mgl_gridplot(gr,nx,ny,ind,d); }
/// Put further plotting in cell of stick rotated on angles tet, phi
inline void StickPlot(int num, int i, double tet, double phi)
{ mgl_stickplot(gr,num,i,tet,phi); }
/// Put further plotting in cell of stick sheared on sx, sy.
inline void ShearPlot(int num, int i, mreal sx, mreal sy, mreal xd=1, mreal yd=0)
{ mgl_shearplot(gr,num,i,sx,sy,xd,yd); }
/// Set factor of plot size
inline void SetPlotFactor(double val)
{ mgl_set_plotfactor(gr,val); }
/// Push transformation matrix into stack
inline void Push() { mgl_mat_push(gr); }
/// Pop transformation matrix from stack
inline void Pop() { mgl_mat_pop(gr); }
/// Add title for current subplot/inplot
/** Style '#' draw box around the title. */
inline void Title(const char *title,const char *stl="",double size=-2)
{ mgl_title(gr,title,stl,size); }
/// Add title for current subplot/inplot
/** Style '#' draw box around the title. */
inline void Title(const wchar_t *title,const char *stl="",double size=-2)
{ mgl_titlew(gr,title,stl,size); }
/// Set aspect ratio for further plotting.
inline void Aspect(double Ax,double Ay,double Az=1)
{ mgl_aspect(gr, Ax, Ay, Az); }
/// Shear a further plotting.
inline void Shear(double Sx,double Sy)
{ mgl_shear(gr, Sx, Sy); }
/// Rotate a further plotting.
inline void Rotate(double TetX,double TetZ=0,double TetY=0)
{ mgl_rotate(gr, TetX, TetZ, TetY); }
/// Rotate a further plotting around vector {x,y,z}.
inline void RotateN(double Tet,double x,double y,double z)
{ mgl_rotate_vector(gr, Tet, x, y, z); }
/// Set perspective (in range [0,1)) for plot. Set to zero for switching off.
inline void Perspective(double val)
{ mgl_perspective(gr, val); }
/// Set angle of view independently from Rotate().
inline void View(double TetX,double TetZ=0,double TetY=0)
{ mgl_view(gr, TetX, TetZ, TetY); }
/// Set angle of view independently from Rotate().
inline void ViewAsRotate(double TetZ,double TetX,double TetY=0)
{ mgl_view(gr, -TetX, -TetZ, -TetY); }
/// Zoom in/out a part of picture (use Zoom(0, 0, 1, 1) for restore default)
inline void Zoom(double x1, double y1, double x2, double y2)
{ mgl_zoom(gr, x1, y1, x2, y2); }
/// Set size of frame in pixels. Normally this function is called internally.
inline void SetSize(int width, int height, bool clf=true)
{ if(clf) mgl_set_size(gr, width, height);
else mgl_scale_size(gr, width, height); }
/// Scaling for all further set size calls.
static inline void SetSizeScl(double scl) { mgl_set_size_scl(scl); }
/// Set plot quality
/** qual=0 -- no face drawing (fastest),
* qual=1 -- no color interpolation (fast),
* qual=2 -- high quality (normal),
* qual|4 -- direct bitmap drawing (low memory usage);
* qual|8 for dots drawing instead of primitives (extremely fast). */
inline void SetQuality(int qual=MGL_DRAW_NORM) { mgl_set_quality(gr, qual); }
/// Get plot quality
inline int GetQuality() { return mgl_get_quality(gr); }
/// Set drawing region for Quality&4
inline void SetDrawReg(long nx=1, long ny=1, long m=0) { mgl_set_draw_reg(gr,nx,ny,m); }
/// Start group of objects
inline void StartGroup(const char *name) { mgl_start_group(gr, name); }
/// End group of objects
inline void EndGroup() { mgl_end_group(gr); }
/// Highlight objects with given id
inline void Highlight(int id) { mgl_highlight(gr, id); }
/// Set boundary box for export graphics into 2D file formats.
/** If x2<0 (y2<0) then full width (height) will be used.
* If x1<0 or y1<0 or x1>=x2|Width or y1>=y2|Height then cropping will be disabled. */
inline void SetBBox(int x1=0, int y1=0, int x2=-1, int y2=-1)
{ mgl_set_bbox(gr,x1,y1,x2,y2); }
/// Show current image
inline void ShowImage(const char *viewer, bool keep=0)
{ mgl_show_image(gr, viewer, keep); }
/// Write the frame in file (depending extension, write current frame if fname is empty)
inline void WriteFrame(const char *fname=0,const char *descr="")
{ mgl_write_frame(gr, fname, descr); }
/// Write the frame in file using JPEG format
inline void WriteJPEG(const char *fname,const char *descr="")
{ mgl_write_jpg(gr, fname, descr); }
/// Write the frame in file using PNG format with transparency
inline void WritePNG(const char *fname,const char *descr="", bool alpha=true)
{ if(alpha) mgl_write_png(gr, fname, descr);
else mgl_write_png_solid(gr, fname, descr); }
/// Write the frame in file using BMP format
inline void WriteBMP(const char *fname,const char *descr="")
{ mgl_write_bmp(gr, fname, descr); }
/// Write the frame in file using BMP format
inline void WriteTGA(const char *fname,const char *descr="")
{ mgl_write_tga(gr, fname, descr); }
/// Write the frame in file using PostScript format
inline void WriteEPS(const char *fname,const char *descr="")
{ mgl_write_eps(gr, fname, descr); }
/// Write the frame in file using LaTeX format
inline void WriteTEX(const char *fname,const char *descr="")
{ mgl_write_tex(gr, fname, descr); }
/// Write the frame in file using PostScript format as bitmap
inline void WriteBPS(const char *fname,const char *descr="")
{ mgl_write_bps(gr, fname, descr); }
/// Write the frame in file using SVG format
inline void WriteSVG(const char *fname,const char *descr="")
{ mgl_write_svg(gr, fname, descr); }
/// Write the frame in file using GIF format (only for current frame!)
inline void WriteGIF(const char *fname,const char *descr="")
{ mgl_write_gif(gr, fname, descr); }
/// Write the frame in file using OBJ format
inline void WriteOBJ(const char *fname,const char *descr="",bool use_png=true)
{ mgl_write_obj(gr, fname, descr, use_png); }
/// Write the frame in file using OBJ format - Balakin way
inline void WriteOBJold(const char *fname,const char *descr="",bool use_png=true)
{ mgl_write_obj_old(gr, fname, descr, use_png); }
/// Write the frame in file using XYZ format
inline void WriteXYZ(const char *fname,const char *descr="")
{ mgl_write_xyz(gr, fname, descr); }
/// Write the frame in file using STL format (faces only)
inline void WriteSTL(const char *fname,const char *descr="")
{ mgl_write_stl(gr, fname, descr); }
/// Write the frame in file using OFF format
inline void WriteOFF(const char *fname,const char *descr="", bool colored=false)
{ mgl_write_off(gr, fname, descr,colored); }
// /// Write the frame in file using X3D format
// inline void WriteX3D(const char *fname,const char *descr="")
// { mgl_write_x3d(gr, fname, descr); }
/// Write the frame in file using PRC format
inline void WritePRC(const char *fname,const char *descr="",bool make_pdf=true)
{ mgl_write_prc(gr, fname, descr, make_pdf); }
/// Export in JSON format suitable for later drawing by JavaScript
inline void WriteJSON(const char *fname,const char *descr="",bool force_z=false)
{ if(force_z) mgl_write_json_z(gr, fname, descr);
else mgl_write_json(gr, fname, descr); }
/// Return string of JSON data suitable for later drawing by JavaScript
inline const char *GetJSON() { return mgl_get_json(gr); }
/// Force preparing the image. It can be useful for OpenGL mode mostly.
inline void Finish() { mgl_finish(gr); }
/// Create new frame.
inline void NewFrame() { mgl_new_frame(gr); }
/// Finish frame drawing
inline void EndFrame() { mgl_end_frame(gr); }
/// Get the number of created frames
inline int GetNumFrame() { return mgl_get_num_frame(gr); }
/// Reset frames counter (start it from zero)
inline void ResetFrames() { mgl_reset_frames(gr); }
/// Delete primitives for i-th frame (work if MGL_VECT_FRAME is set on)
inline void DelFrame(int i) { mgl_del_frame(gr, i); }
/// Get drawing data for i-th frame (work if MGL_VECT_FRAME is set on)
inline void GetFrame(int i) { mgl_get_frame(gr, i); }
/// Set drawing data for i-th frame (work if MGL_VECT_FRAME is set on). Work as EndFrame() but don't add frame to GIF image.
inline void SetFrame(int i) { mgl_set_frame(gr, i); }
/// Append drawing data from i-th frame (work if MGL_VECT_FRAME is set on)
inline void ShowFrame(int i){ mgl_show_frame(gr, i); }
/// Clear list of primitives for current drawing
inline void ClearFrame() { mgl_clear_frame(gr); }
/// Start write frames to cinema using GIF format
inline void StartGIF(const char *fname, int ms=100)
{ mgl_start_gif(gr, fname,ms); }
/// Stop writing cinema using GIF format
inline void CloseGIF() { mgl_close_gif(gr); }
/// Export points and primitives in file using MGLD format
inline void ExportMGLD(const char *fname, const char *descr=0)
{ mgl_export_mgld(gr, fname, descr); }
/// Import points and primitives from file using MGLD format
inline void ImportMGLD(const char *fname, bool add=false)
{ mgl_import_mgld(gr, fname, add); }
/// Copy RGB values into array which is allocated by user
/** Position of element {i,j} is [3*i + 3*Width*j]. */
inline bool GetRGB(char *imgdata, int imglen)
{
long w=mgl_get_width(gr), h=mgl_get_height(gr);
if(imglen>=3*w*h) memcpy(imgdata, mgl_get_rgb(gr),3*w*h);
return imglen>=3*w*h;
}
/// Get RGB values of current bitmap
/** Position of element {i,j} is [3*i + 3*Width*j]. */
inline const unsigned char *GetRGB() { return mgl_get_rgb(gr); }
/// Copy RGBA values into array which is allocated by user
/** Position of element {i,j} is [4*i + 4*Width*j]. */
inline bool GetRGBA(char *imgdata, int imglen)
{
long w=mgl_get_width(gr), h=mgl_get_height(gr);
if(imglen>=4*w*h) memcpy(imgdata, mgl_get_rgba(gr),4*w*h);
return imglen>=4*w*h;
}
/// Get RGBA values of current bitmap
/** Position of element {i,j} is [4*i + 4*Width*j]. */
inline const unsigned char *GetRGBA() { return mgl_get_rgba(gr); }
/// Copy BGRN values into array which is allocated by user
inline bool GetBGRN(unsigned char *imgdata, int imglen)
{
long w=mgl_get_width(gr), h=mgl_get_height(gr), i;
const unsigned char *buf=mgl_get_rgb(gr);
if(imglen>=4*w*h) for(i=0;i<w*h;i++)
{
imgdata[4*i] = buf[3*i+2];
imgdata[4*i+1] = buf[3*i+1];
imgdata[4*i+2] = buf[3*i];
imgdata[4*i+3] = 255;
}
return imglen>=4*w*h;
}
/// Copy RGBA values of background image into array which is allocated by user
/** Position of element {i,j} is [4*i + 4*Width*j]. */
inline bool GetBackground(char *imgdata, int imglen)
{
long w=mgl_get_width(gr), h=mgl_get_height(gr);
if(imglen>=4*w*h) memcpy(imgdata, mgl_get_background(gr),4*w*h);
return imglen>=4*w*h;
}
/// Get RGBA values of background image
/** Position of element {i,j} is [4*i + 4*Width*j]. */
inline const unsigned char *GetBackground() { return mgl_get_background(gr); }
/// Get width of the image
inline int GetWidth() { return mgl_get_width(gr); }
/// Get height of the image
inline int GetHeight() { return mgl_get_height(gr);}
/// Calculate 3D coordinate {x,y,z} for screen point {xs,ys}
inline mglPoint CalcXYZ(int xs, int ys)
{
mreal x,y,z;
mgl_calc_xyz(gr,xs,ys,&x,&y,&z);
return mglPoint(x,y,z);
}
/// Calculate screen point {xs,ys} for 3D coordinate {x,y,z}
inline mglPoint CalcScr(mglPoint p)
{
int xs,ys;
mgl_calc_scr(gr,p.x,p.y,p.z,&xs,&ys);
return mglPoint(xs,ys);
}
/// Set object/subplot id
inline void SetObjId(int id) { mgl_set_obj_id(gr,id); }
/// Get object id
inline int GetObjId(long x,long y) { return mgl_get_obj_id(gr,x,y); }
/// Get subplot id
inline int GetSplId(long x,long y) { return mgl_get_spl_id(gr,x,y); }
/// Check if {\a xs,\a ys} is close to active point with accuracy d, and return its position or -1
inline long IsActive(int xs, int ys, int d=1) { return mgl_is_active(gr,xs,ys,d); }
/// Combine plots from 2 canvases. Result will be saved into this
inline void Combine(const mglGraph *g) { mgl_combine_gr(gr,g->gr); }
/// Clear up the frame and fill background by specified color
inline void Clf(double r, double g, double b) { mgl_clf_rgb(gr, r, g, b); }
/// Clear up the frame and fill background by specified color with manual transparency
inline void Clf(const char *col) { mgl_clf_str(gr, col); }
/// Clear up the frame and fill background by specified color
inline void Clf(char col) { mgl_clf_chr(gr, col); }
/// Clear up the frame
inline void Clf() { mgl_clf(gr); }
/// Clear unused points and primitives. Useful only in combination with SetFaceNum().
inline void ClearUnused() { mgl_clear_unused(gr); }
/// Load background image
inline void LoadBackground(const char *fname, double alpha=1)
{ mgl_load_background(gr,fname,alpha); }
/// Force drawing the image and use it as background one
inline void Rasterize() { mgl_rasterize(gr); }
/// Draws the point (ball) at position {x,y,z} with color c
inline void Ball(mglPoint p, char c='r')
{ char s[3]={'.',c,0}; mgl_mark(gr, p.x, p.y, p.z, s); }
/// Draws the mark at position p
inline void Mark(mglPoint p, const char *mark)
{ mgl_mark(gr, p.x, p.y, p.z, mark); }
/// Draws the line between points by specified pen
/** Large \a n (for example, n=100) should be used for geodesic line in curved coordinates */
inline void Line(mglPoint p1, mglPoint p2, const char *pen="B",int n=2)
{ mgl_line(gr, p1.x, p1.y, p1.z, p2.x, p2.y, p2.z, pen, n); }
/// Draws the spline curve between points by specified pen
inline void Curve(mglPoint p1, mglPoint d1, mglPoint p2, mglPoint d2, const char *pen="B", int n=100)
{ mgl_curve(gr, p1.x, p1.y, p1.z, d1.x, d1.y, d1.z, p2.x, p2.y, p2.z, d2.x, d2.y, d2.z, pen, n); }
/// Draws the 3d error box e for point p
inline void Error(mglPoint p, mglPoint e, const char *pen="k")
{ mgl_error_box(gr, p.x, p.y, p.z, e.x, e.y, e.z, pen); }
/// Draws Lamerey diagram for mapping x_new = f(x_old)
/** String \a stl may contain: ‘v’ for drawing arrows; ‘~’ for disable 1st segment.
* Option value set the number of segments (default is 20).*/
inline void Lamerey(double x0, const mglDataA &f, const char *stl="", const char *opt="")
{ mgl_lamerey_dat(gr,x0,&f,stl,opt); }
inline void Lamerey(double x0, const char *func, const char *stl="", const char *opt="")
{ mgl_lamerey_str(gr,x0,func,stl,opt); }
/// Draws Bifurcation diagram for mapping x_new = f(x_old) in x-axis range
/** Option value set the number of stationary points (default is 1024).*/
inline void Bifurcation(double dx, const mglDataA &f, const char *stl="", const char *opt="")
{ mgl_bifurcation_dat(gr,dx,&f,stl,opt); }
inline void Bifurcation(double dx, const char *func, const char *stl="", const char *opt="")
{ mgl_bifurcation_str(gr,dx,func,stl,opt); }
/// Draws Iris plots for determining cross-dependences of data arrays
/** NOTE: using the same ranges and empty ids will not draw axis. This will add data to existing Iris plot.
* Option value set the size of data labels ids, separated by ';'.*/
inline void Iris(mglDataA &dats, const char *ids, const char *stl="", const char *opt="")
{ mgl_iris_1(gr,&dats,ids,stl,opt); }
inline void Iris(mglDataA &dats, const wchar_t *ids, const char *stl="", const char *opt="")
{ mgl_irisw_1(gr,&dats,ids,stl,opt); }
inline void Iris(mglDataA &dats, mglDataA &ranges, const char *ids, const char *stl="", const char *opt="")
{ mgl_iris(gr,&dats,&ranges,ids,stl,opt); }
inline void Iris(mglDataA &dats, mglDataA &ranges, const wchar_t *ids, const char *stl="", const char *opt="")
{ mgl_irisw(gr,&dats,&ranges,ids,stl,opt); }
/// Draws the face between points with color stl (include interpolation up to 4 colors).
inline void Face(mglPoint p1, mglPoint p2, mglPoint p3, mglPoint p4, const char *stl="r")
{ mgl_face(gr, p1.x, p1.y, p1.z, p2.x, p2.y, p2.z, p3.x, p3.y, p3.z, p4.x, p4.y, p4.z, stl); }
/// Draws the face in y-z plane at point p with color stl (include interpolation up to 4 colors).
inline void FaceX(mglPoint p, double wy, double wz, const char *stl="w", double dx=0, double dy=0)
{ mgl_facex(gr, p.x, p.y, p.z, wy, wz, stl, dx, dy); }
/// Draws the face in x-z plane at point p with color stl (include interpolation up to 4 colors).
inline void FaceY(mglPoint p, double wx, double wz, const char *stl="w", double dx=0, double dy=0)
{ mgl_facey(gr, p.x, p.y, p.z, wx, wz, stl, dx, dy); }
/// Draws the face in x-y plane at point p with color stl (include interpolation up to 4 colors).
inline void FaceZ(mglPoint p, double wx, double wy, const char *stl="w", double dx=0, double dy=0)
{ mgl_facez(gr, p.x, p.y, p.z, wx, wy, stl, dx, dy); }
/// Draws the drop at point p in direction d with color col and radius r
/** Parameter \a shift set the degree of drop oblongness: ‘0’ is sphere, ‘1’ is maximally oblongness drop. Parameter \a ap set relative width of the drop (this is analogue of “ellipticity” for the sphere).*/
inline void Drop(mglPoint p, mglPoint d, double r, const char *col="r", double shift=1, double ap=1)
{ mgl_drop(gr, p.x, p.y, p.z, d.x, d.y, d.z, r, col, shift, ap); }
/// Draws the sphere at point p with color col and radius r
inline void Sphere(mglPoint p, double r, const char *col="r")
{ mgl_sphere(gr, p.x, p.y, p.z, r, col); }
/// Draws the cone between points p1,p2 with radius r1,r2 and with style stl
/** Parameter \a stl can contain:
* ‘@’ for drawing edges;
* ‘#’ for wired cones;
* ‘t’ for drawing tubes/cylinder instead of cones/prisms;
* ‘4’, ‘6’, ‘8’ for drawing square, hex- or octo-prism instead of cones.*/
inline void Cone(mglPoint p1, mglPoint p2, double r1, double r2=-1, const char *stl="r@")
{ mgl_cone(gr, p1.x, p1.y, p1.z, p2.x, p2.y, p2.z,r1,r2,stl); }
/// Draws the ellipse between points p1,p2 with color stl and width r
/** Parameter \a stl can contain:
* ‘#’ for wired figure (boundary only);
* ‘@’ for filled figure and with boundary (second color or black one is used for boundary).*/
inline void Ellipse(mglPoint p1, mglPoint p2, double r, const char *stl="r")
{ mgl_ellipse(gr, p1.x, p1.y, p1.z, p2.x, p2.y, p2.z, r,stl); }
/// Draws the circle at point p with color stl and radius r
/** Parameter \a stl can contain:
* ‘#’ for wired figure (boundary only);
* ‘@’ for filled figure and with boundary (second color or black one is used for boundary).*/
inline void Circle(mglPoint p, double r, const char *stl="r")
{ mgl_ellipse(gr, p.x, p.y, p.z, p.x, p.y, p.z, r,stl); }
/// Draws the rhomb between points p1,p2 with color stl and width r
/** Parameter \a stl can contain:
* ‘#’ for wired figure (boundary only);
* ‘@’ for filled figure and with boundary (second color or black one is used for boundary).*/
inline void Rhomb(mglPoint p1, mglPoint p2, double r, const char *stl="r")
{ mgl_rhomb(gr, p1.x, p1.y, p1.z, p2.x, p2.y, p2.z, r,stl); }
/// Draws the polygon based on points p1,p2 with color stl
/** Parameter \a stl can contain:
* ‘#’ for wired figure (boundary only);
* ‘@’ for filled figure and with boundary (second color or black one is used for boundary).*/
inline void Polygon(mglPoint p1, mglPoint p2, int n, const char *stl="r")
{ mgl_polygon(gr, p1.x, p1.y, p1.z, p2.x, p2.y, p2.z, n,stl); }
/// Draws the arc around axis pr with center at p0 and starting from p1, by color stl and angle a (in degrees)
inline void Arc(mglPoint p0, mglPoint pa, mglPoint p1, double a, const char *stl="r")
{ mgl_arc_ext(gr, p0.x,p0.y,p0.z, pa.x,pa.y,pa.z, p1.x,p1.y,p1.z, a,stl); }
/// Draws the arc around axis 'z' with center at p0 and starting from p1, by color stl and angle a (in degrees)
inline void Arc(mglPoint p0, mglPoint p1, double a, const char *stl="r")
{ mgl_arc_ext(gr, p0.x,p0.y,p0.z, 0,0,1, p1.x,p1.y,p0.z, a,stl); }
/// Draws bitmap (logo) which is stretched along whole axis range
inline void Logo(long w, long h, const unsigned char *rgba, bool smooth=false, const char *opt="")
{ mgl_logo(gr, w, h, rgba, smooth, opt); }
inline void Logo(const char *fname, bool smooth=false, const char *opt="")
{ mgl_logo_file(gr, fname, smooth, opt); }
/// Draw user-defined symbol in position p
inline void Symbol(mglPoint p, char id, const char *how="", double size=-1)
{ mgl_symbol(gr, p.x, p.y, p.z, id, how, size); }
/// Draw user-defined symbol in position p along direction d
inline void Symbol(mglPoint p, mglPoint d, char id, const char *how="", double size=-1)
{ mgl_symbol_dir(gr, p.x, p.y, p.z, d.x, d.y, d.z, id, how, size); }
/// Print text in position p with specified font
inline void Putsw(mglPoint p,const wchar_t *text,const char *font=":C",double size=-1)
{ mgl_putsw(gr, p.x, p.y, p.z, text, font, size); }
/// Print text in position p with specified font
inline void Puts(mglPoint p,const char *text,const char *font=":C",double size=-1)
{ mgl_puts(gr, p.x, p.y, p.z, text, font, size); }
/// Print text in position p with specified font
inline void Putsw(double x, double y,const wchar_t *text,const char *font=":AC",double size=-1)
{ mgl_putsw(gr, x, y, 0, text, font, size); }
/// Print text in position p with specified font
inline void Puts(double x, double y,const char *text,const char *font=":AC",double size=-1)
{ mgl_puts(gr, x, y, 0, text, font, size); }
/// Print text in position p along direction d with specified font
inline void Putsw(mglPoint p, mglPoint d, const wchar_t *text, const char *font=":L", double size=-1)
{ mgl_putsw_dir(gr, p.x, p.y, p.z, d.x, d.y, d.z, text, font, size); }
/// Print text in position p along direction d with specified font
inline void Puts(mglPoint p, mglPoint d, const char *text, const char *font=":L", double size=-1)
{ mgl_puts_dir(gr, p.x, p.y, p.z, d.x, d.y, d.z, text, font, size); }
/// Print text along the curve
inline void Text(const mglDataA &x, const mglDataA &y, const mglDataA &z, const char *text, const char *font="", const char *opt="")
{ mgl_text_xyz(gr, &x, &y, &z, text, font, opt); }
/// Print text along the curve
inline void Text(const mglDataA &x, const mglDataA &y, const char *text, const char *font="", const char *opt="")
{ mgl_text_xy(gr, &x, &y, text, font, opt); }
/// Print text along the curve
inline void Text(const mglDataA &y, const char *text, const char *font="", const char *opt="")
{ mgl_text_y(gr, &y, text, font, opt); }
/// Print text along the curve
inline void Text(const mglDataA &x, const mglDataA &y, const mglDataA &z, const wchar_t *text, const char *font="", const char *opt="")
{ mgl_textw_xyz(gr, &x, &y, &z, text, font, opt); }
/// Print text along the curve
inline void Text(const mglDataA &x, const mglDataA &y, const wchar_t *text, const char *font="", const char *opt="")
{ mgl_textw_xy(gr, &x, &y, text, font, opt); }
/// Print text along the curve
inline void Text(const mglDataA &y, const wchar_t *text, const char *font="", const char *opt="")
{ mgl_textw_y(gr, &y, text, font, opt); }
/// Draws bounding box outside the plotting volume with color c.
/** Style ‘@’ produce filled back faces. */
inline void Box(const char *col="", bool ticks=true)
{ mgl_box_str(gr, col, ticks); }
/// Draw axises with ticks in direction(s) dir.
/** Parameter \a dir may contain:
* ‘xyzt’for drawing axis in corresponding direction;
* ‘XYZT’ for drawing axis in corresponding direction but with inverted positions of labels;
* ‘~’, ‘_’ for disabling tick labels;
* ‘U’ for disabling rotation of tick labels;
* ‘^’ for inverting default axis origin;
* ‘!’ for disabling ticks tuning;
* ‘AKDTVISO’ for drawing arrow at the end of axis;
* ‘a’ for forced adjusting of axis ticks;
* ‘f’ for printing ticks labels in fixed format;
* ‘E’ for using ‘E’ instead of ‘e’ in ticks labels;
* ‘F’ for printing ticks labels in LaTeX format;
* ‘+’ for printing ‘+’ for positive ticks;
* ‘-’ for printing usual ‘-’ in ticks labels;
* ‘0123456789’ for precision at printing ticks labels.
* Option "value" set the manual rotation angle for the ticks. */
inline void Axis(const char *dir="xyzt", const char *stl="", const char *opt="")
{ mgl_axis(gr, dir,stl,opt); }
/// Draw grid lines perpendicular to direction(s) dir.
inline void Grid(const char *dir="xyzt",const char *pen="B", const char *opt="")
{ mgl_axis_grid(gr, dir, pen, opt); }
/// Print the label text for axis dir.
/** Option "value" set additional shifting of the label. */
inline void Label(char dir, const char *text, double pos=+1, const char *opt="")
{ mgl_label(gr, dir, text, pos, opt); }
/// Print the label text for axis dir.
/** Option "value" set additional shifting of the label. */
inline void Label(char dir, const wchar_t *text, double pos=+1, const char *opt="")
{ mgl_labelw(gr, dir, text, pos, opt); }
/// Draw colorbar at edge of axis
/** Parameter \a sch may contain:
* ‘<>^_’ for positioning at left, at right, at top or at bottom correspondingly;
* ‘I’ for positioning near bounding (by default, at edges of subplot);
* ‘A’ for using absolute coordinates;
* ‘~’ for disabling tick labels.
* ‘!’ for disabling ticks tuning;
* ‘f’ for printing ticks labels in fixed format;
* ‘E’ for using ‘E’ instead of ‘e’ in ticks labels;
* ‘F’ for printing ticks labels in LaTeX format;
* ‘+’ for printing ‘+’ for positive ticks;
* ‘-’ for printing usual ‘-’ in ticks labels;
* ‘0123456789’ for precision at printing ticks labels.*/
inline void Colorbar(const char *sch="")
{ mgl_colorbar(gr, sch); }
/// Draw colorbar at manual position
/** Parameter \a sch may contain:
* ‘<>^_’ for positioning at left, at right, at top or at bottom correspondingly;
* ‘I’ for positioning near bounding (by default, at edges of subplot);
* ‘A’ for using absolute coordinates;
* ‘~’ for disabling tick labels.
* ‘!’ for disabling ticks tuning;
* ‘f’ for printing ticks labels in fixed format;
* ‘E’ for using ‘E’ instead of ‘e’ in ticks labels;
* ‘F’ for printing ticks labels in LaTeX format;
* ‘+’ for printing ‘+’ for positive ticks;
* ‘-’ for printing usual ‘-’ in ticks labels;
* ‘0123456789’ for precision at printing ticks labels.*/
inline void Colorbar(const char *sch,double x,double y,double w=1,double h=1)
{ mgl_colorbar_ext(gr, sch, x,y,w,h); }
/// Draw colorbar with manual colors at edge of axis
/** Parameter \a sch may contain:
* ‘<>^_’ for positioning at left, at right, at top or at bottom correspondingly;
* ‘I’ for positioning near bounding (by default, at edges of subplot);
* ‘A’ for using absolute coordinates;
* ‘~’ for disabling tick labels.
* ‘!’ for disabling ticks tuning;
* ‘f’ for printing ticks labels in fixed format;
* ‘E’ for using ‘E’ instead of ‘e’ in ticks labels;
* ‘F’ for printing ticks labels in LaTeX format;
* ‘+’ for printing ‘+’ for positive ticks;
* ‘-’ for printing usual ‘-’ in ticks labels;
* ‘0123456789’ for precision at printing ticks labels.*/
inline void Colorbar(const mglDataA &val, const char *sch="")
{ mgl_colorbar_val(gr, &val, sch); }
/// Draw colorbar with manual colors at manual position
/** Parameter \a sch may contain:
* ‘<>^_’ for positioning at left, at right, at top or at bottom correspondingly;
* ‘I’ for positioning near bounding (by default, at edges of subplot);
* ‘A’ for using absolute coordinates;
* ‘~’ for disabling tick labels.
* ‘!’ for disabling ticks tuning;
* ‘f’ for printing ticks labels in fixed format;
* ‘E’ for using ‘E’ instead of ‘e’ in ticks labels;
* ‘F’ for printing ticks labels in LaTeX format;
* ‘+’ for printing ‘+’ for positive ticks;
* ‘-’ for printing usual ‘-’ in ticks labels;
* ‘0123456789’ for precision at printing ticks labels.*/
inline void Colorbar(const mglDataA &val, const char *sch,double x,double y,double w=1,double h=1)
{ mgl_colorbar_val_ext(gr, &val, sch, x,y,w,h); }
/// Add string to legend
inline void AddLegend(const char *text,const char *style)
{ mgl_add_legend(gr, text, style); }
inline void AddLegend(const wchar_t *text,const char *style)
{ mgl_add_legendw(gr, text, style); }
/// Clear saved legend string
inline void ClearLegend()
{ mgl_clear_legend(gr); }
/// Draw legend of accumulated strings at position {x,y}
/** Parameter fnt may contain:
* font style for legend text;
* colors for background (first one), border (second one) and text (last one);
* ‘A’ for positioning in absolute coordinates;
* ‘^’ for positioning outside of specified point;
* ‘-’ for arranging entries horizontally;
* ‘#’ for drawing box around legend.
* Option value set the space between line samples and text (default is 0.1).*/
inline void Legend(double x, double y, const char *font="#", const char *opt="")
{ mgl_legend_pos(gr, x, y, font, opt); }
/// Draw legend of accumulated strings
/** Parameter fnt may contain:
* font style for legend text;
* colors for background (first one), border (second one) and text (last one);
* ‘A’ for positioning in absolute coordinates;
* ‘^’ for positioning outside of specified point;
* ‘-’ for arranging entries horizontally;
* ‘#’ for drawing box around legend.
* Option value set the space between line samples and text (default is 0.1).
* Parameter \a where sets position: 0 at bottom-left, 1 at bottom-right, 2 at top-left, 3 at top-right (default).*/
inline void Legend(int where=3, const char *font="#", const char *opt="")
{ mgl_legend(gr, where, font, opt); }
/// Set number of marks in legend sample
inline void SetLegendMarks(int num) { mgl_set_legend_marks(gr, num); }
/// Draw usual curve {x,y,z}
inline void Plot(const mglDataA &x, const mglDataA &y, const mglDataA &z, const char *pen="", const char *opt="")
{ mgl_plot_xyz(gr, &x, &y, &z, pen, opt); }
/// Draw usual curve {x,y}
inline void Plot(const mglDataA &x, const mglDataA &y, const char *pen="", const char *opt="")
{ mgl_plot_xy(gr, &x, &y, pen,opt); }
/// Draw usual curve {x,y} with x in x-axis range
inline void Plot(const mglDataA &y, const char *pen="", const char *opt="")
{ mgl_plot(gr, &y, pen,opt); }
/// Draw tapes which rotates as (bi-)normales of curve {x,y,z}
/** The width of tape is proportional to barwidth and can be changed by option "value".*/
inline void Tape(const mglDataA &x, const mglDataA &y, const mglDataA &z, const char *pen="", const char *opt="")
{ mgl_tape_xyz(gr, &x, &y, &z, pen, opt); }
/// Draw tapes which rotates as (bi-)normales of curve {x,y}
/** The width of tape is proportional to barwidth and can be changed by option "value".*/
inline void Tape(const mglDataA &x, const mglDataA &y, const char *pen="", const char *opt="")
{ mgl_tape_xy(gr, &x, &y, pen,opt); }
/// Draw tapes which rotates as (bi-)normales of curve {x,y} with x in x-axis range
/** The width of tape is proportional to barwidth and can be changed by option "value".*/
inline void Tape(const mglDataA &y, const char *pen="", const char *opt="")
{ mgl_tape(gr, &y, pen,opt); }
/// Draw radar chart (plot in curved coordinates)
/** Option "value" set the additional shift of data (i.e. the data a+value is used instead of a).*/
inline void Radar(const mglDataA &a, const char *pen="", const char *opt="")
{ mgl_radar(gr, &a, pen, opt); }
/// Draw stairs for points in arrays {x,y,z}
inline void Step(const mglDataA &x, const mglDataA &y, const mglDataA &z, const char *pen="", const char *opt="")
{ mgl_step_xyz(gr, &x, &y, &z, pen, opt); }
/// Draw stairs for points in arrays {x,y}
inline void Step(const mglDataA &x, const mglDataA &y, const char *pen="", const char *opt="")
{ mgl_step_xy(gr, &x, &y, pen, opt); }
/// Draw stairs for points in arrays {x,y} with x in x-axis range
inline void Step(const mglDataA &y, const char *pen="", const char *opt="")
{ mgl_step(gr, &y, pen, opt); }
/// Draw curve {x,y,z} which is colored by c (like tension plot)
inline void Tens(const mglDataA &x, const mglDataA &y, const mglDataA &z, const mglDataA &c, const char *pen="", const char *opt="")
{ mgl_tens_xyz(gr, &x, &y, &z, &c, pen, opt); }
/// Draw curve {x,y} which is colored by c (like tension plot)
inline void Tens(const mglDataA &x, const mglDataA &y, const mglDataA &c, const char *pen="", const char *opt="")
{ mgl_tens_xy(gr, &x, &y, &c, pen, opt); }
/// Draw curve {x,y} with x in x-axis range which is colored by c (like tension plot)
inline void Tens(const mglDataA &y, const mglDataA &c, const char *pen="", const char *opt="")
{ mgl_tens(gr, &y, &c, pen, opt); }
/// Fill area between curve {x,y,z} and axis plane
/** Gradient filling is used if number of specified colors is equal to 2*number of curves.*/
inline void Area(const mglDataA &x, const mglDataA &y, const mglDataA &z, const char *pen="", const char *opt="")
{ mgl_area_xyz(gr, &x, &y, &z, pen, opt); }
/// Fill area between curve {x,y} and axis plane
/** Gradient filling is used if number of specified colors is equal to 2*number of curves.*/
inline void Area(const mglDataA &x, const mglDataA &y, const char *pen="", const char *opt="")
{ mgl_area_xy(gr, &x, &y, pen, opt); }
/// Fill area between curve {x,y} with x in x-axis range and axis plane
/** Gradient filling is used if number of specified colors is equal to 2*number of curves.*/
inline void Area(const mglDataA &y, const char *pen="", const char *opt="")
{ mgl_area(gr, &y, pen, opt); }
/// Fill area between curves {x,y1} and {x,y2} with x in x-axis range
/** Style 'i' will fill area only if y1 < y2.
* Gradient filling is used if number of specified colors is equal to 2*number of curves.*/
inline void Region(const mglDataA &y1, const mglDataA &y2, const char *pen="", const char *opt="")
{ mgl_region(gr, &y1, &y2, pen, opt); }
/// Fill area between curves {x,y1} and {x,y2}
/** Style 'i' will fill area only if y1 < y2.
* Gradient filling is used if number of specified colors is equal to 2*number of curves.*/
inline void Region(const mglDataA &x, const mglDataA &y1, const mglDataA &y2, const char *pen="", const char *opt="")
{ mgl_region_xy(gr, &x, &y1, &y2, pen, opt); }
/// Fill area (draw ribbon) between curves {x1,y1,z1} and {x2,y2,z2}
/** Gradient filling is used if number of specified colors is equal to 2*number of curves.*/
inline void Region(const mglDataA &x1, const mglDataA &y1, const mglDataA &z1, const mglDataA &x2, const mglDataA &y2, const mglDataA &z2, const char *pen="", const char *opt="")
{ mgl_region_3d(gr, &x1, &y1, &z1, &x2, &y2, &z2, pen, opt); }
/// Fill area (draw ribbon) between curves {x1,y1} and {x2,y2}
/** Gradient filling is used if number of specified colors is equal to 2*number of curves.*/
inline void Region(const mglDataA &x1, const mglDataA &y1, const mglDataA &x2, const mglDataA &y2, const char *pen="", const char *opt="")
{ mgl_region_3d(gr, &x1, &y1, NULL, &x2, &y2, NULL, pen, opt); }
/// Draw vertical lines from points {x,y,z} to axis plane
inline void Stem(const mglDataA &x, const mglDataA &y, const mglDataA &z, const char *pen="", const char *opt="")
{ mgl_stem_xyz(gr, &x, &y, &z, pen, opt); }
/// Draw vertical lines from points {x,y} to axis plane
inline void Stem(const mglDataA &x, const mglDataA &y, const char *pen="", const char *opt="")
{ mgl_stem_xy(gr, &x, &y, pen, opt); }
/// Draw vertical lines from points {x,y} with x in x-axis range to axis plane
inline void Stem(const mglDataA &y, const char *pen="", const char *opt="")
{ mgl_stem(gr, &y, pen, opt); }
/// Draw vertical bars from points {x,y,z} to axis plane
/** String \a pen may contain:
* ‘a’ for drawing boxes one above another (like summation);
* ‘f’ for waterfall chart;
* ‘<’, ‘^’, ‘>’ for aligning boxes: at left, centered, at right.
* Gradient filling is used if number of specified colors is equal to 2*number of curves.*/
inline void Bars(const mglDataA &x, const mglDataA &y, const mglDataA &z, const char *pen="", const char *opt="")
{ mgl_bars_xyz(gr, &x, &y, &z, pen, opt); }
/// Draw vertical bars from points {x,y} to axis plane
/** String \a pen may contain:
* ‘a’ for drawing boxes one above another (like summation);
* ‘f’ for waterfall chart;
* ‘<’, ‘^’, ‘>’ for aligning boxes: at left, centered, at right.
* Gradient filling is used if number of specified colors is equal to 2*number of curves.*/
inline void Bars(const mglDataA &x, const mglDataA &y, const char *pen="", const char *opt="")
{ mgl_bars_xy(gr, &x, &y, pen, opt); }
/// Draw vertical bars from points {x,y} with x in x-axis range to axis plane
/** String \a pen may contain:
* ‘a’ for drawing boxes one above another (like summation);
* ‘f’ for waterfall chart;
* ‘<’, ‘^’, ‘>’ for aligning boxes: at left, centered, at right.
* Gradient filling is used if number of specified colors is equal to 2*number of curves.*/
inline void Bars(const mglDataA &y, const char *pen="", const char *opt="")
{ mgl_bars(gr, &y, pen, opt); }
/// Draw horizontal bars from points {x,y} to axis plane
/** String \a pen may contain:
* ‘a’ for drawing boxes one above another (like summation);
* ‘f’ for waterfall chart;
* ‘<’, ‘^’, ‘>’ for aligning boxes: at left, centered, at right.
* Gradient filling is used if number of specified colors is equal to 2*number of curves.*/
inline void Barh(const mglDataA &y, const mglDataA &v, const char *pen="", const char *opt="")
{ mgl_barh_yx(gr, &y, &v, pen, opt); }
/// Draw horizontal bars from points {x,y} with y in y-axis range to axis plane
/** String \a pen may contain:
* ‘a’ for drawing boxes one above another (like summation);
* ‘f’ for waterfall chart;
* ‘<’, ‘^’, ‘>’ for aligning boxes: at left, centered, at right.
* Gradient filling is used if number of specified colors is equal to 2*number of curves.*/
inline void Barh(const mglDataA &v, const char *pen="", const char *opt="")
{ mgl_barh(gr, &v, pen, opt); }
/// Draw chart for data a
/** Space denote transparent color. Style '#' draw black borders. */
inline void Chart(const mglDataA &a, const char *colors="", const char *opt="")
{ mgl_chart(gr, &a, colors,opt); }
/// Draw Open-High-Low-Close (OHLC) diagram
/** Different colors for up and down values are used if number of specified colors is equal to 2*number of curves. */
inline void OHLC(const mglDataA &x, const mglDataA &open, const mglDataA &high, const mglDataA &low, const mglDataA &close, const char *pen="", const char *opt="")
{ mgl_ohlc_x(gr, &x, &open,&high,&low,&close,pen,opt); }
/// Draw Open-High-Low-Close (OHLC) diagram with x in x-axis range
/** Different colors for up and down values are used if number of specified colors is equal to 2*number of curves. */
inline void OHLC(const mglDataA &open, const mglDataA &high, const mglDataA &low, const mglDataA &close, const char *pen="", const char *opt="")
{ mgl_ohlc(gr, &open,&high,&low,&close,pen,opt); }
/// Draw box-plot (special 5-value plot used in statistic)
/** String \a pen may contain ‘<’, ‘^’, ‘>’ for aligning boxes: at left, centered, at right.*/
inline void BoxPlot(const mglDataA &x, const mglDataA &y, const char *pen="", const char *opt="")
{ mgl_boxplot_xy(gr, &x, &y, pen,opt); }
/// Draw box-plot (special 5-value plot used in statistic) with x in x-axis range
/** String \a pen may contain ‘<’, ‘^’, ‘>’ for aligning boxes: at left, centered, at right.*/
inline void BoxPlot(const mglDataA &y, const char *pen="", const char *opt="")
{ mgl_boxplot(gr, &y, pen,opt); }
/// Draw candle plot
/** Different colors are used for up and down values if 2 colors are specified.
* Style ‘#’ force drawing wire candle even for 2-color scheme. */
inline void Candle(const mglDataA &x, const mglDataA &v1, const mglDataA &v2, const mglDataA &y1, const mglDataA &y2, const char *pen="", const char *opt="")
{ mgl_candle_xyv(gr, &x, &v1, &v2, &y1, &y2, pen, opt); }
/// Draw candle plot with x in x-axis range
/** Different colors are used for up and down values if 2 colors are specified.
* Style ‘#’ force drawing wire candle even for 2-color scheme. */
inline void Candle(const mglDataA &v1, const mglDataA &v2, const mglDataA &y1, const mglDataA &y2, const char *pen="", const char *opt="")
{ mgl_candle_yv(gr, &v1, &v2, &y1, &y2, pen, opt); }
inline void Candle(const mglDataA &v1, const mglDataA &v2, const char *pen="", const char *opt="")
{ mgl_candle_yv(gr, &v1, &v2, NULL, NULL, pen, opt); }
/// Draw candle plot with v1=v[i], v2=v[i+1]
/** Different colors are used for up and down values if 2 colors are specified.
* Style ‘#’ force drawing wire candle even for 2-color scheme. */
inline void Candle(const mglDataA &y, const mglDataA &y1, const mglDataA &y2, const char *pen="", const char *opt="")
{ mgl_candle(gr, &y, &y1, &y2, pen, opt); }
/// Draw candle plot with v1=v[i], v2=v[i+1]
/** Different colors are used for up and down values if 2 colors are specified.
* Style ‘#’ force drawing wire candle even for 2-color scheme. */
inline void Candle(const mglDataA &y, const char *pen="", const char *opt="")
{ mgl_candle(gr, &y, NULL, NULL, pen, opt); }
/// Draw cones from points {x,y,z} to axis plane
/** String \a pen may contain:
* ‘@’ for drawing edges;
* ‘#’ for wired cones;
* ‘t’ for drawing tubes/cylinders instead of cones/prisms;
* ‘4’, ‘6’, ‘8’ for drawing square, hex- or octo-prism instead of cones;
* ‘<’, ‘^’ or ‘>’ for aligning cones left, right or centering them at its x-coordinates.
* Gradient filling is used if number of specified colors is equal to 2*number of curves.*/
inline void Cones(const mglDataA &x, const mglDataA &y, const mglDataA &z, const char *pen="@", const char *opt="")
{ mgl_cones_xyz(gr, &x, &y, &z, pen, opt); }
/// Draw cones from points {x,z} to axis plane
/** String \a pen may contain:
* ‘@’ for drawing edges;
* ‘#’ for wired cones;
* ‘t’ for drawing tubes/cylinders instead of cones/prisms;
* ‘4’, ‘6’, ‘8’ for drawing square, hex- or octo-prism instead of cones;
* ‘<’, ‘^’ or ‘>’ for aligning cones left, right or centering them at its x-coordinates.
* Gradient filling is used if number of specified colors is equal to 2*number of curves.*/
inline void Cones(const mglDataA &x, const mglDataA &z, const char *pen="@", const char *opt="")
{ mgl_cones_xz(gr, &x, &z, pen, opt); }
/// Draw cones from points {x,z} with x in x-axis range to axis plane
/** String \a pen may contain:
* ‘@’ for drawing edges;
* ‘#’ for wired cones;
* ‘t’ for drawing tubes/cylinders instead of cones/prisms;
* ‘4’, ‘6’, ‘8’ for drawing square, hex- or octo-prism instead of cones;
* ‘<’, ‘^’ or ‘>’ for aligning cones left, right or centering them at its x-coordinates.
* Gradient filling is used if number of specified colors is equal to 2*number of curves.*/
inline void Cones(const mglDataA &z, const char *pen="@", const char *opt="")
{ mgl_cones(gr, &z, pen, opt); }
/// Draw error boxes {ey} at points {x,y} with x in x-axis range
/** Style ‘@’ set to draw large semitransparent mark instead of error box.*/
inline void Error(const mglDataA &y, const mglDataA &ey, const char *pen="", const char *opt="")
{ mgl_error(gr, &y, &ey, pen, opt); }
/// Draw error boxes {ey} at points {x,y}
/** Style ‘@’ set to draw large semitransparent mark instead of error box.*/
inline void Error(const mglDataA &x, const mglDataA &y, const mglDataA &ey, const char *pen="", const char *opt="")
{ mgl_error_xy(gr, &x, &y, &ey, pen, opt); }
/// Draw error boxes {ex,ey} at points {x,y}
/** Style ‘@’ set to draw large semitransparent mark instead of error box.*/
inline void Error(const mglDataA &x, const mglDataA &y, const mglDataA &ex, const mglDataA &ey, const char *pen="", const char *opt="")
{ mgl_error_exy(gr, &x, &y, &ex, &ey, pen, opt); }
/// Draw marks with size r at points {x,y,z}
inline void Mark(const mglDataA &x, const mglDataA &y, const mglDataA &z, const mglDataA &r, const char *pen, const char *opt="")
{ mgl_mark_xyz(gr, &x, &y, &z, &r, pen, opt); }
/// Draw marks with size r at points {x,y}
inline void Mark(const mglDataA &x, const mglDataA &y, const mglDataA &r, const char *pen, const char *opt="")
{ mgl_mark_xy(gr, &x, &y, &r, pen, opt); }
/// Draw marks with size r at points {x,y} with x in x-axis range
inline void Mark(const mglDataA &y, const mglDataA &r, const char *pen, const char *opt="")
{ mgl_mark_y(gr, &y, &r, pen, opt); }
/// Draw Poincare map at condition s==0 for curve {x,y,z}
inline void Pmap(const mglDataA &x, const mglDataA &y, const mglDataA &z, const mglDataA &s, const char *pen, const char *opt="")
{ mgl_pmap_xyz(gr, &x, &y, &z, &s, pen, opt); }
/// Draw Poincare map at condition s==0 for curve {x,y}
inline void Pmap(const mglDataA &x, const mglDataA &y, const mglDataA &s, const char *pen, const char *opt="")
{ mgl_pmap_xy(gr, &x, &y, &s, pen, opt); }
/// Draw Poincare map at condition s==0 for curve {x,y} with x in x-axis range
inline void Pmap(const mglDataA &y, const mglDataA &s, const char *pen, const char *opt="")
{ mgl_pmap(gr, &y, &s, pen, opt); }
/// Draw textual marks with size r at points {x,y,z}
inline void TextMark(const mglDataA &x, const mglDataA &y, const mglDataA &z, const mglDataA &r, const char *text, const char *fnt="", const char *opt="")
{ mgl_textmark_xyzr(gr, &x, &y, &z, &r, text, fnt, opt); }
/// Draw textual marks with size r at points {x,y}
inline void TextMark(const mglDataA &x, const mglDataA &y, const mglDataA &r, const char *text, const char *fnt="", const char *opt="")
{ mgl_textmark_xyr(gr, &x, &y, &r, text, fnt, opt); }
/// Draw textual marks with size r at points {x,y} with x in x-axis range
inline void TextMark(const mglDataA &y, const mglDataA &r, const char *text, const char *fnt="", const char *opt="")
{ mgl_textmark_yr(gr, &y, &r, text, fnt, opt); }
/// Draw textual marks at points {x,y} with x in x-axis range
inline void TextMark(const mglDataA &y, const char *text, const char *fnt="", const char *opt="")
{ mgl_textmark(gr, &y, text, fnt, opt); }
/// Draw textual marks with size r at points {x,y,z}
inline void TextMark(const mglDataA &x, const mglDataA &y, const mglDataA &z, const mglDataA &r, const wchar_t *text, const char *fnt="", const char *opt="")
{ mgl_textmarkw_xyzr(gr, &x, &y, &z, &r, text, fnt, opt); }
/// Draw textual marks with size r at points {x,y}
inline void TextMark(const mglDataA &x, const mglDataA &y, const mglDataA &r, const wchar_t *text, const char *fnt="", const char *opt="")
{ mgl_textmarkw_xyr(gr, &x, &y, &r, text, fnt, opt); }
/// Draw textual marks with size r at points {x,y} with x in x-axis range
inline void TextMark(const mglDataA &y, const mglDataA &r, const wchar_t *text, const char *fnt="", const char *opt="")
{ mgl_textmarkw_yr(gr, &y, &r, text, fnt, opt); }
/// Draw textual marks at points {x,y} with x in x-axis range
inline void TextMark(const mglDataA &y, const wchar_t *text, const char *fnt="", const char *opt="")
{ mgl_textmarkw(gr, &y, text, fnt, opt); }
/// Draw labels for points coordinate(s) at points {x,y,z}
/** String \a fnt may contain:
* ‘f’ for fixed format of printed numbers;
* ‘E’ for using ‘E’ instead of ‘e’;
* ‘F’ for printing in LaTeX format;
* ‘+’ for printing ‘+’ for positive numbers;
* ‘-’ for printing usual ‘-’;
* ‘0123456789’ for precision at printing numbers.*/
inline void Label(const mglDataA &x, const mglDataA &y, const mglDataA &z, const char *text, const char *fnt="", const char *opt="")
{ mgl_label_xyz(gr, &x, &y, &z, text, fnt, opt); }
/// Draw labels for points coordinate(s) at points {x,y}
/** String \a fnt may contain:
* ‘f’ for fixed format of printed numbers;
* ‘E’ for using ‘E’ instead of ‘e’;
* ‘F’ for printing in LaTeX format;
* ‘+’ for printing ‘+’ for positive numbers;
* ‘-’ for printing usual ‘-’;
* ‘0123456789’ for precision at printing numbers.*/
inline void Label(const mglDataA &x, const mglDataA &y, const char *text, const char *fnt="", const char *opt="")
{ mgl_label_xy(gr, &x, &y, text, fnt, opt); }
/// Draw labels for points coordinate(s) at points {x,y} with x in x-axis range
/** String \a fnt may contain:
* ‘f’ for fixed format of printed numbers;
* ‘E’ for using ‘E’ instead of ‘e’;
* ‘F’ for printing in LaTeX format;
* ‘+’ for printing ‘+’ for positive numbers;
* ‘-’ for printing usual ‘-’;
* ‘0123456789’ for precision at printing numbers.*/
inline void Label(const mglDataA &y, const char *text, const char *fnt="", const char *opt="")
{ mgl_label_y(gr, &y, text, fnt, opt); }
/// Draw labels for points coordinate(s) at points {x,y,z}
/** String \a fnt may contain:
* ‘f’ for fixed format of printed numbers;
* ‘E’ for using ‘E’ instead of ‘e’;
* ‘F’ for printing in LaTeX format;
* ‘+’ for printing ‘+’ for positive numbers;
* ‘-’ for printing usual ‘-’;
* ‘0123456789’ for precision at printing numbers.*/
inline void Label(const mglDataA &x, const mglDataA &y, const mglDataA &z, const wchar_t *text, const char *fnt="", const char *opt="")
{ mgl_labelw_xyz(gr, &x, &y, &z, text, fnt, opt); }
/// Draw labels for points coordinate(s) at points {x,y}
/** String \a fnt may contain:
* ‘f’ for fixed format of printed numbers;
* ‘E’ for using ‘E’ instead of ‘e’;
* ‘F’ for printing in LaTeX format;
* ‘+’ for printing ‘+’ for positive numbers;
* ‘-’ for printing usual ‘-’;
* ‘0123456789’ for precision at printing numbers.*/
inline void Label(const mglDataA &x, const mglDataA &y, const wchar_t *text, const char *fnt="", const char *opt="")
{ mgl_labelw_xy(gr, &x, &y, text, fnt, opt); }
/// Draw labels for points coordinate(s) at points {x,y} with x in x-axis range
/** String \a fnt may contain:
* ‘f’ for fixed format of printed numbers;
* ‘E’ for using ‘E’ instead of ‘e’;
* ‘F’ for printing in LaTeX format;
* ‘+’ for printing ‘+’ for positive numbers;
* ‘-’ for printing usual ‘-’;
* ‘0123456789’ for precision at printing numbers.*/
inline void Label(const mglDataA &y, const wchar_t *text, const char *fnt="", const char *opt="")
{ mgl_labelw_y(gr, &y, text, fnt, opt); }
/// Draw table for values val along given direction with row labels text
/** String \a fnt may contain:
* ‘#’ for drawing cell borders;
* ‘|’ for limiting table widh by subplot one (equal to option ‘value 1’);
* ‘=’ for equal width of all cells;
* ‘f’ for fixed format of printed numbers;
* ‘E’ for using ‘E’ instead of ‘e’;
* ‘F’ for printing in LaTeX format;
* ‘+’ for printing ‘+’ for positive numbers;
* ‘-’ for printing usual ‘-’;
* ‘0123456789’ for precision at printing numbers.
* Option value set the width of the table (default is 1).*/
inline void Table(const mglDataA &val, const char *text, const char *fnt="#|", const char *opt="")
{ mgl_table(gr, 0, 0, &val, text, fnt, opt); }
/// Draw table for values val along given direction with row labels text
/** String \a fnt may contain:
* ‘#’ for drawing cell borders;
* ‘|’ for limiting table widh by subplot one (equal to option ‘value 1’);
* ‘=’ for equal width of all cells;
* ‘f’ for fixed format of printed numbers;
* ‘E’ for using ‘E’ instead of ‘e’;
* ‘F’ for printing in LaTeX format;
* ‘+’ for printing ‘+’ for positive numbers;
* ‘-’ for printing usual ‘-’;
* ‘0123456789’ for precision at printing numbers.
* Option value set the width of the table (default is 1).*/
inline void Table(const mglDataA &val, const wchar_t *text, const char *fnt="#|", const char *opt="")
{ mgl_tablew(gr, 0, 0, &val, text, fnt, opt); }
/// Draw table for values val along given direction with row labels text at given position
/** String \a fnt may contain:
* ‘#’ for drawing cell borders;
* ‘|’ for limiting table widh by subplot one (equal to option ‘value 1’);
* ‘=’ for equal width of all cells;
* ‘f’ for fixed format of printed numbers;
* ‘E’ for using ‘E’ instead of ‘e’;
* ‘F’ for printing in LaTeX format;
* ‘+’ for printing ‘+’ for positive numbers;
* ‘-’ for printing usual ‘-’;
* ‘0123456789’ for precision at printing numbers.
* Option value set the width of the table (default is 1).*/
inline void Table(double x, double y, const mglDataA &val, const char *text, const char *fnt="#|", const char *opt="")
{ mgl_table(gr, x, y, &val, text, fnt, opt); }
/// Draw table for values val along given direction with row labels text at given position
/** String \a fnt may contain:
* ‘#’ for drawing cell borders;
* ‘|’ for limiting table widh by subplot one (equal to option ‘value 1’);
* ‘=’ for equal width of all cells;
* ‘f’ for fixed format of printed numbers;
* ‘E’ for using ‘E’ instead of ‘e’;
* ‘F’ for printing in LaTeX format;
* ‘+’ for printing ‘+’ for positive numbers;
* ‘-’ for printing usual ‘-’;
* ‘0123456789’ for precision at printing numbers.
* Option value set the width of the table (default is 1).*/
inline void Table(double x, double y, const mglDataA &val, const wchar_t *text, const char *fnt="#|", const char *opt="")
{ mgl_tablew(gr, x, y, &val, text, fnt, opt); }
/// Draw tube with radius r around curve {x,y,z}
inline void Tube(const mglDataA &x, const mglDataA &y, const mglDataA &z, const mglDataA &r, const char *pen="", const char *opt="")
{ mgl_tube_xyzr(gr, &x, &y, &z, &r, pen, opt); }
/// Draw tube with radius r around curve {x,y,z}
inline void Tube(const mglDataA &x, const mglDataA &y, const mglDataA &z, double r, const char *pen="", const char *opt="")
{ mgl_tube_xyz(gr, &x, &y, &z, r, pen, opt); }
/// Draw tube with radius r around curve {x,y}
inline void Tube(const mglDataA &x, const mglDataA &y, const mglDataA &r, const char *pen="", const char *opt="")
{ mgl_tube_xyr(gr, &x, &y, &r, pen, opt); }
/// Draw tube with radius r around curve {x,y}
inline void Tube(const mglDataA &x, const mglDataA &y, double r, const char *pen="", const char *opt="")
{ mgl_tube_xy(gr, &x, &y, r, pen, opt); }
/// Draw tube with radius r around curve {x,y} with x in x-axis range
inline void Tube(const mglDataA &y, const mglDataA &r, const char *pen="", const char *opt="")
{ mgl_tube_r(gr, &y, &r, pen, opt); }
/// Draw tube with radius r around curve {x,y} with x in x-axis range
inline void Tube(const mglDataA &y, double r, const char *pen="", const char *opt="")
{ mgl_tube(gr, &y, r, pen, opt); }
/// Draw surface of curve {r,z} rotation around axis
/** Style ‘#’ produce wire plot. Style ‘.’ produce plot by dots.*/
inline void Torus(const mglDataA &r, const mglDataA &z, const char *pen="", const char *opt="")
{ mgl_torus(gr, &r, &z, pen,opt); }
/// Draw mesh lines for 2d data specified parametrically
inline void Mesh(const mglDataA &x, const mglDataA &y, const mglDataA &z, const char *stl="", const char *opt="")
{ mgl_mesh_xy(gr, &x, &y, &z, stl, opt); }
/// Draw mesh lines for 2d data
inline void Mesh(const mglDataA &z, const char *stl="", const char *opt="")
{ mgl_mesh(gr, &z, stl, opt); }
/// Draw waterfall plot for 2d data specified parametrically
/** Style 'x' draw lines in x-direction. */
inline void Fall(const mglDataA &x, const mglDataA &y, const mglDataA &z, const char *stl="", const char *opt="")
{ mgl_fall_xy(gr, &x, &y, &z, stl, opt); }
/// Draw waterfall plot for 2d data
/** Style 'x' draw lines in x-direction. */
inline void Fall(const mglDataA &z, const char *stl="", const char *opt="")
{ mgl_fall(gr, &z, stl, opt); }
/// Draw belts for 2d data specified parametrically
/** Style 'x' draw belts in x-direction. */
inline void Belt(const mglDataA &x, const mglDataA &y, const mglDataA &z, const char *stl="", const char *opt="")
{ mgl_belt_xy(gr, &x, &y, &z, stl, opt); }
/// Draw belts for 2d data
/** Style 'x' draw belts in x-direction. */
inline void Belt(const mglDataA &z, const char *stl="", const char *opt="")
{ mgl_belt(gr, &z, stl, opt); }
/// Draw belts for 2d data specified parametrically with color proportional to c
/** Style 'x' draw belts in x-direction. */
inline void BeltC(const mglDataA &x, const mglDataA &y, const mglDataA &z, const mglDataA &c, const char *stl="", const char *opt="")
{ mgl_beltc_xy(gr, &x, &y, &z, &c, stl, opt); }
/// Draw belts for 2d data with color proportional to c
/** Style 'x' draw belts in x-direction. */
inline void BeltC(const mglDataA &z, const mglDataA &c, const char *stl="", const char *opt="")
{ mgl_beltc(gr, &z, &c, stl, opt); }
/// Draw surface for 2d data specified parametrically with color proportional to z
/** Style ‘#’ draw grid lines. Style ‘.’ produce plot by dots.*/
inline void Surf(const mglDataA &x, const mglDataA &y, const mglDataA &z, const char *stl="", const char *opt="")
{ mgl_surf_xy(gr, &x, &y, &z, stl, opt); }
/// Draw surface for 2d data with color proportional to z
/** Style ‘#’ draw grid lines. Style ‘.’ produce plot by dots.*/
inline void Surf(const mglDataA &z, const char *stl="", const char *opt="")
{ mgl_surf(gr, &z, stl, opt); }
/// Draw grid lines for density plot of 2d data specified parametrically
inline void Grid(const mglDataA &x, const mglDataA &y, const mglDataA &z, const char *stl="", const char *opt="")
{ mgl_grid_xy(gr, &x, &y, &z, stl, opt); }
/// Draw grid lines for density plot of 2d data
inline void Grid(const mglDataA &z, const char *stl="", const char *opt="")
{ mgl_grid(gr, &z, stl, opt); }
/// Draw vertical tiles with manual colors c for 2d data specified parametrically
inline void Tile(const mglDataA &x, const mglDataA &y, const mglDataA &z, const mglDataA &c, const char *stl="", const char *opt="")
{ mgl_tile_xyc(gr, &x, &y, &z, &c, stl, opt); }
/// Draw vertical tiles for 2d data specified parametrically
inline void Tile(const mglDataA &x, const mglDataA &y, const mglDataA &z, const char *stl="", const char *opt="")
{ mgl_tile_xy(gr, &x, &y, &z, stl, opt); }
/// Draw vertical tiles for 2d data
inline void Tile(const mglDataA &z, const char *stl="", const char *opt="")
{ mgl_tile(gr, &z, stl, opt); }
/// Draw density plot for 2d data specified parametrically
/** Style ‘#’ draw grid lines. Style ‘.’ produce plot by dots.*/
inline void Dens(const mglDataA &x, const mglDataA &y, const mglDataA &c, const char *stl="", const char *opt="")
{ mgl_dens_xy(gr, &x, &y, &c, stl, opt); }
/// Draw density plot for 2d data
/** Style ‘#’ draw grid lines. Style ‘.’ produce plot by dots.*/
inline void Dens(const mglDataA &c, const char *stl="", const char *opt="")
{ mgl_dens(gr, &c, stl, opt); }
/// Draw vertical boxes for 2d data specified parametrically
/** Style ‘#’ draw filled boxes. */
inline void Boxs(const mglDataA &x, const mglDataA &y, const mglDataA &z, const char *stl="", const char *opt="")
{ mgl_boxs_xy(gr, &x, &y, &z, stl, opt); }
/// Draw vertical boxes for 2d data
/** Style ‘#’ draw filled boxes. */
inline void Boxs(const mglDataA &z, const char *stl="", const char *opt="")
{ mgl_boxs(gr, &z, stl, opt); }
/// Draw contour lines on parametric surface at manual levels for 2d data specified parametrically
/** Style ‘f’ to draw solid contours.
* Style 't'/'T' draw contour labels below/above contours.*/
inline void ContP(const mglDataA &v, const mglDataA &x, const mglDataA &y, const mglDataA &z, const mglDataA &a, const char *sch="", const char *opt="")
{ mgl_contp_val(gr, &v, &x, &y, &z, &a, sch, opt); }
/// Draw contour lines on parametric surface at manual levels for 2d data specified parametrically
/** Style ‘f’ to draw solid contours.
* Style ‘t’/‘T’ draw contour labels below/above contours.
* Option "value" set the number of contour levels (default is 7). */
inline void ContP(const mglDataA &x, const mglDataA &y, const mglDataA &z, const mglDataA &a, const char *sch="", const char *opt="")
{ mgl_contp(gr, &x, &y, &z, &a, sch, opt); }
/// Draw contour lines at manual levels for 2d data specified parametrically
/** Style ‘_’ to draw contours at bottom of axis box.
* Style 't'/'T' draw contour labels below/above contours.*/
inline void Cont(const mglDataA &v, const mglDataA &x, const mglDataA &y, const mglDataA &z, const char *sch="", const char *opt="")
{ mgl_cont_xy_val(gr, &v, &x, &y, &z, sch, opt); }
/// Draw contour lines for 2d data
/** Style ‘_’ to draw contours at bottom of axis box.
* Style 't'/'T' draw contour labels below/above contours.*/
inline void Cont(const mglDataA &v, const mglDataA &z, const char *sch="", const char *opt="")
{ mgl_cont_val(gr, &v, &z, sch, opt); }
/// Draw contour lines at manual levels for 2d data specified parametrically
/** Style ‘_’ to draw contours at bottom of axis box.
* Style ‘t’/‘T’ draw contour labels below/above contours.
* Option "value" set the number of contour levels (default is 7). */
inline void Cont(const mglDataA &x, const mglDataA &y, const mglDataA &z, const char *sch="", const char *opt="")
{ mgl_cont_xy(gr, &x, &y, &z, sch, opt); }
/// Draw contour lines for 2d data
/** Style ‘_’ to draw contours at bottom of axis box.
* Style ‘t’/‘T’ draw contour labels below/above contours.
* Option "value" set the number of contour levels (default is 7). */
inline void Cont(const mglDataA &z, const char *sch="", const char *opt="")
{ mgl_cont(gr, &z, sch, opt); }
/// Draw solid contours at manual levels for 2d data specified parametrically
/** Style ‘_’ to draw contours at bottom of axis box. */
inline void ContF(const mglDataA &v, const mglDataA &x, const mglDataA &y, const mglDataA &z, const char *sch="", const char *opt="")
{ mgl_contf_xy_val(gr, &v, &x, &y, &z, sch, opt); }
/// Draw solid contours at manual levels for 2d data
/** Style ‘_’ to draw contours at bottom of axis box. */
inline void ContF(const mglDataA &v, const mglDataA &z, const char *sch="", const char *opt="")
{ mgl_contf_val(gr, &v, &z, sch, opt); }
/// Draw solid contours for 2d data specified parametrically
/** Style ‘_’ to draw contours at bottom of axis box.
* Option "value" set the number of contour levels (default is 7). */
inline void ContF(const mglDataA &x, const mglDataA &y, const mglDataA &z, const char *sch="", const char *opt="")
{ mgl_contf_xy(gr, &x, &y, &z, sch, opt); }
/// Draw solid contours for 2d data
/** Style ‘_’ to draw contours at bottom of axis box.
* Option "value" set the number of contour levels (default is 7). */
inline void ContF(const mglDataA &z, const char *sch="", const char *opt="")
{ mgl_contf(gr, &z, sch, opt); }
/// Draw solid contours at manual levels for 2d data specified parametrically with specified colors
/** Style ‘_’ to draw contours at bottom of axis box. */
inline void ContD(const mglDataA &v, const mglDataA &x, const mglDataA &y, const mglDataA &z, const char *sch="", const char *opt="")
{ mgl_contd_xy_val(gr, &v, &x, &y, &z, sch, opt); }
/// Draw solid contours at manual levels for 2d data with specified colors
/** Style ‘_’ to draw contours at bottom of axis box. */
inline void ContD(const mglDataA &v, const mglDataA &z, const char *sch="", const char *opt="")
{ mgl_contd_val(gr, &v, &z, sch, opt); }
/// Draw solid contours for 2d data specified parametrically with specified colors
/** Style ‘_’ to draw contours at bottom of axis box.
* Option "value" set the number of contour levels (default is 7). */
inline void ContD(const mglDataA &x, const mglDataA &y, const mglDataA &z, const char *sch="", const char *opt="")
{ mgl_contd_xy(gr, &x, &y, &z, sch, opt); }
/// Draw solid contours for 2d data with specified colors
/** Style ‘_’ to draw contours at bottom of axis box.
* Option "value" set the number of contour levels (default is 7). */
inline void ContD(const mglDataA &z, const char *sch="", const char *opt="")
{ mgl_contd(gr, &z, sch, opt); }
/// Draw contour tubes between manual levels for 2d data specified parametrically
/** Style ‘_’ to draw contours at bottom of axis box. */
inline void ContV(const mglDataA &v, const mglDataA &x, const mglDataA &y, const mglDataA &z, const char *sch="", const char *opt="")
{ mgl_contv_xy_val(gr, &v, &x, &y, &z, sch, opt); }
/// Draw contour tubes between manual levels for 2d data
/** Style ‘_’ to draw contours at bottom of axis box. */
inline void ContV(const mglDataA &v, const mglDataA &z, const char *sch="", const char *opt="")
{ mgl_contv_val(gr, &v, &z, sch, opt); }
/// Draw contour tubes for 2d data specified parametrically
/** Style ‘_’ to draw contours at bottom of axis box.
* Option "value" set the number of contour levels (default is 7). */
inline void ContV(const mglDataA &x, const mglDataA &y, const mglDataA &z, const char *sch="", const char *opt="")
{ mgl_contv_xy(gr, &x, &y, &z, sch, opt); }
/// Draw contour tubes for 2d data
/** Style ‘_’ to draw contours at bottom of axis box.
* Option "value" set the number of contour levels (default is 7). */
inline void ContV(const mglDataA &z, const char *sch="", const char *opt="")
{ mgl_contv(gr, &z, sch, opt); }
/// Draw axial-symmetric isosurfaces at manual levels for 2d data specified parametrically
/** String \a sch may contain:
* ‘#’ for wired plot;
* ‘.’ for plot by dots;
* ‘x’, ‘z’ for rotation around x-, z-axis correspondingly (default is y-axis). */
inline void Axial(const mglDataA &v, const mglDataA &x, const mglDataA &y, const mglDataA &z, const char *sch="", const char *opt="")
{ mgl_axial_xy_val(gr, &v, &x, &y, &z, sch,opt); }
/// Draw axial-symmetric isosurfaces at manual levels for 2d data
/** String \a sch may contain:
* ‘#’ for wired plot;
* ‘.’ for plot by dots;
* ‘x’, ‘z’ for rotation around x-, z-axis correspondingly (default is y-axis). */
inline void Axial(const mglDataA &v, const mglDataA &z, const char *sch="", const char *opt="")
{ mgl_axial_val(gr, &v, &z, sch, opt); }
/// Draw axial-symmetric isosurfaces for 2d data specified parametrically
/** String \a sch may contain:
* ‘#’ for wired plot;
* ‘.’ for plot by dots;
* ‘x’, ‘z’ for rotation around x-, z-axis correspondingly (default is y-axis).
* Option "value" set the number of isosurfaces (default is 3). */
inline void Axial(const mglDataA &x, const mglDataA &y, const mglDataA &z, const char *sch="", const char *opt="")
{ mgl_axial_xy(gr, &x, &y, &z, sch, opt); }
/// Draw axial-symmetric isosurfaces for 2d data
/** String \a sch may contain:
* ‘#’ for wired plot;
* ‘.’ for plot by dots;
* ‘x’, ‘z’ for rotation around x-, z-axis correspondingly (default is y-axis).
* Option "value" set the number of isosurfaces (default is 3). */
inline void Axial(const mglDataA &z, const char *sch="", const char *opt="")
{ mgl_axial(gr, &z, sch, opt); }
/// Draw grid lines for density plot at slice for 3d data specified parametrically
/** Style ‘x’ or ‘z’ produce plot perpendicular to x- or z-direction correspondingly.*/
inline void Grid3(const mglDataA &x, const mglDataA &y, const mglDataA &z, const mglDataA &a, const char *stl="", double sVal=-1, const char *opt="")
{ mgl_grid3_xyz(gr, &x, &y, &z, &a, stl, sVal, opt); }
/// Draw grid lines for density plot at slice for 3d data
/** Style ‘x’ or ‘z’ produce plot perpendicular to x- or z-direction correspondingly.*/
inline void Grid3(const mglDataA &a, const char *stl="", double sVal=-1, const char *opt="")
{ mgl_grid3(gr, &a, stl, sVal, opt); }
/// Draw density plot at slice for 3d data specified parametrically
/** Style ‘#’ draw grid lines. Style ‘x’ or ‘z’ produce plot perpendicular to x- or z-direction correspondingly.*/
inline void Dens3(const mglDataA &x, const mglDataA &y, const mglDataA &z, const mglDataA &a, const char *stl="", double sVal=-1, const char *opt="")
{ mgl_dens3_xyz(gr, &x, &y, &z, &a, stl, sVal, opt); }
/// Draw density plot at slice for 3d data
/** Style ‘#’ draw grid lines. Style ‘x’ or ‘z’ produce plot perpendicular to x- or z-direction correspondingly.*/
inline void Dens3(const mglDataA &a, const char *stl="", double sVal=-1, const char *opt="")
{ mgl_dens3(gr, &a, stl, sVal, opt); }
/// Draw isosurface for 3d data specified parametrically
/** Style ‘#’ draw wired plot. Style ‘.’ produce plot by dots.*/
inline void Surf3(double Val, const mglDataA &x, const mglDataA &y, const mglDataA &z, const mglDataA &a, const char *stl="", const char *opt="")
{ mgl_surf3_xyz_val(gr, Val, &x, &y, &z, &a, stl, opt); }
/// Draw isosurface for 3d data
/** Style ‘#’ draw wired plot. Style ‘.’ produce plot by dots.*/
inline void Surf3(double Val, const mglDataA &a, const char *stl="", const char *opt="")
{ mgl_surf3_val(gr, Val, &a, stl, opt); }
/// Draw isosurfaces for 3d data specified parametrically
/** Style ‘#’ draw wired plot. Style ‘.’ produce plot by dots.
* Option "value" set the number of isosurfaces (default is 3). */
inline void Surf3(const mglDataA &x, const mglDataA &y, const mglDataA &z, const mglDataA &a, const char *stl="", const char *opt="")
{ mgl_surf3_xyz(gr, &x, &y, &z, &a, stl, opt); }
/// Draw isosurfaces for 3d data
/** Style ‘#’ draw wired plot. Style ‘.’ produce plot by dots.
* Option "value" set the number of isosurfaces (default is 3). */
inline void Surf3(const mglDataA &a, const char *stl="", const char *opt="")
{ mgl_surf3(gr, &a, stl, opt); }
/// Draw a semi-transparent cloud for 3d data specified parametrically
/** Style ‘.’ produce plot by dots. Style ‘i’ use inverted values for transparency. */
inline void Cloud(const mglDataA &x, const mglDataA &y, const mglDataA &z, const mglDataA &a, const char *stl="", const char *opt="")
{ mgl_cloud_xyz(gr, &x, &y, &z, &a, stl, opt); }
/// Draw a semi-transparent cloud for 3d data
/** Style ‘.’ produce plot by dots. Style ‘i’ use inverted values for transparency. */
inline void Cloud(const mglDataA &a, const char *stl="", const char *opt="")
{ mgl_cloud(gr, &a, stl, opt); }
/// Draw contour lines at manual levels along slice for 3d data specified parametrically
/** Style ‘#’ draw grid lines.
* Style ‘x’ or ‘z’ produce plot perpendicular to x- or z-direction correspondingly.
* Style ‘t’/‘T’ draw contour labels below/above contours. */
inline void Cont3(const mglDataA &v, const mglDataA &x, const mglDataA &y, const mglDataA &z, const mglDataA &a, const char *sch="", double sVal=-1, const char *opt="")
{ mgl_cont3_xyz_val(gr, &v, &x, &y, &z, &a, sch, sVal, opt); }
/// Draw contour lines at manual levels along slice for 3d data
/** Style ‘#’ draw grid lines.
* Style ‘x’ or ‘z’ produce plot perpendicular to x- or z-direction correspondingly.
* Style ‘t’/‘T’ draw contour labels below/above contours. */
inline void Cont3(const mglDataA &v, const mglDataA &a, const char *sch="", double sVal=-1, const char *opt="")
{ mgl_cont3_val(gr, &v, &a, sch, sVal, opt); }
/// Draw contour lines along slice for 3d data specified parametrically
/** Style ‘#’ draw grid lines.
* Style ‘x’ or ‘z’ produce plot perpendicular to x- or z-direction correspondingly.
* Style ‘t’/‘T’ draw contour labels below/above contours.
* Option "value" set the number of contour levels (default is 7). */
inline void Cont3(const mglDataA &x, const mglDataA &y, const mglDataA &z, const mglDataA &a, const char *sch="", double sVal=-1, const char *opt="")
{ mgl_cont3_xyz(gr, &x, &y, &z, &a, sch, sVal, opt); }
/// Draw contour lines along slice for 3d data
/** Style ‘#’ draw grid lines.
* Style ‘x’ or ‘z’ produce plot perpendicular to x- or z-direction correspondingly.
* Style ‘t’/‘T’ draw contour labels below/above contours.
* Option "value" set the number of contour levels (default is 7). */
inline void Cont3(const mglDataA &a, const char *sch="", double sVal=-1, const char *opt="")
{ mgl_cont3(gr, &a, sch, sVal, opt); }
/// Draw solid contours at manual levels along slice for 3d data specified parametrically
/** Style ‘#’ draw grid lines. Style ‘x’ or ‘z’ produce plot perpendicular to x- or z-direction correspondingly. */
inline void ContF3(const mglDataA &v, const mglDataA &x, const mglDataA &y, const mglDataA &z, const mglDataA &a, const char *sch="", double sVal=-1, const char *opt="")
{ mgl_contf3_xyz_val(gr, &v, &x, &y, &z, &a, sch, sVal, opt); }
/// Draw solid contours at manual levels along slice for 3d data
/** Style ‘#’ draw grid lines. Style ‘x’ or ‘z’ produce plot perpendicular to x- or z-direction correspondingly. */
inline void ContF3(const mglDataA &v, const mglDataA &a, const char *sch="", double sVal=-1, const char *opt="")
{ mgl_contf3_val(gr, &v, &a, sch, sVal, opt); }
/// Draw solid contours along slice for 3d data specified parametrically
/** Style ‘#’ draw grid lines. Style ‘x’ or ‘z’ produce plot perpendicular to x- or z-direction correspondingly.
* Option "value" set the number of contour levels (default is 7).*/
inline void ContF3(const mglDataA &x, const mglDataA &y, const mglDataA &z, const mglDataA &a, const char *sch="", double sVal=-1, const char *opt="")
{ mgl_contf3_xyz(gr, &x, &y, &z, &a, sch, sVal, opt); }
/// Draw solid contours along slice for 3d data
/** Style ‘#’ draw grid lines. Style ‘x’ or ‘z’ produce plot perpendicular to x- or z-direction correspondingly.
* Option "value" set the number of contour levels (default is 7).*/
inline void ContF3(const mglDataA &a, const char *sch="", double sVal=-1, const char *opt="")
{ mgl_contf3(gr, &a, sch, sVal, opt); }
/// Draw several isosurfaces for 3d beam in curvilinear coordinates
/** Style ‘#’ draw grid lines. Style ‘.’ produce plot by dots.
* Variable \a flag is bitwise:
* ‘0x1’ - draw in accompanied (not laboratory) coordinates;
* ‘0x2’ - draw projection to \rho-z plane;
* ‘0x4’ - draw normalized in each slice field.*/
inline void Beam(const mglDataA &tr, const mglDataA &g1, const mglDataA &g2, const mglDataA &a, double r, const char *stl=0, int flag=0, int num=3)
{ mgl_beam(gr, &tr,&g1,&g2,&a,r,stl,flag,num); }
/// Draw isosurface at value \a val for 3d beam in curvilinear coordinates
/** Style ‘#’ draw grid lines. Style ‘.’ produce plot by dots.
* Variable \a flag is bitwise:
* ‘0x1’ - draw in accompanied (not laboratory) coordinates;
* ‘0x2’ - draw projection to \rho-z plane;
* ‘0x4’ - draw normalized in each slice field.*/
inline void Beam(double val, const mglDataA &tr, const mglDataA &g1, const mglDataA &g2, const mglDataA &a, double r, const char *stl=NULL, int flag=0)
{ mgl_beam_val(gr,val,&tr,&g1,&g2,&a,r,stl,flag); }
/// Draw vertical tiles with variable size r and manual colors c for 2d data specified parametrically
inline void TileS(const mglDataA &x, const mglDataA &y, const mglDataA &z, const mglDataA &r, const mglDataA &c, const char *stl="", const char *opt="")
{ mgl_tiles_xyc(gr, &x, &y, &z, &r, &c, stl, opt); }
/// Draw vertical tiles with variable size r for 2d data specified parametrically
inline void TileS(const mglDataA &x, const mglDataA &y, const mglDataA &z, const mglDataA &r, const char *stl="", const char *opt="")
{ mgl_tiles_xy(gr, &x, &y, &z, &r, stl, opt); }
/// Draw vertical tiles with variable size r for 2d data
inline void TileS(const mglDataA &z, const mglDataA &r, const char *stl="", const char *opt="")
{ mgl_tiles(gr, &z, &r, stl, opt); }
/// Draw surface for 2d data specified parametrically with color proportional to c
/** Style ‘#’ draw grid lines. Style ‘.’ produce plot by dots.*/
inline void SurfC(const mglDataA &x, const mglDataA &y, const mglDataA &z, const mglDataA &c, const char *sch="", const char *opt="")
{ mgl_surfc_xy(gr, &x, &y, &z, &c, sch,opt); }
/// Draw surface for 2d data with color proportional to c
/** Style ‘#’ draw grid lines. Style ‘.’ produce plot by dots.*/
inline void SurfC(const mglDataA &z, const mglDataA &c, const char *sch="", const char *opt="")
{ mgl_surfc(gr, &z, &c, sch,opt); }
/// Draw surface for 2d data specified parametrically with alpha proportional to c
/** Style ‘#’ draw grid lines. Style ‘.’ produce plot by dots.*/
inline void SurfA(const mglDataA &x, const mglDataA &y, const mglDataA &z, const mglDataA &c, const char *sch="", const char *opt="")
{ mgl_surfa_xy(gr, &x, &y, &z, &c, sch,opt); }
/// Draw surface for 2d data with alpha proportional to c
/** Style ‘#’ draw grid lines. Style ‘.’ produce plot by dots.*/
inline void SurfA(const mglDataA &z, const mglDataA &c, const char *sch="", const char *opt="")
{ mgl_surfa(gr, &z, &c, sch,opt); }
/// Draw surface for 2d data specified parametrically with color proportional to c and alpha proportional to a
/** Style ‘#’ draw grid lines. Style ‘.’ produce plot by dots.*/
inline void SurfCA(const mglDataA &x, const mglDataA &y, const mglDataA &z, const mglDataA &c, const mglDataA &a, const char *sch="", const char *opt="")
{ mgl_surfca_xy(gr, &x, &y, &z, &c, &a, sch,opt); }
/// Draw surface for 2d data with color proportional to c and alpha proportional to a
/** Style ‘#’ draw grid lines. Style ‘.’ produce plot by dots.*/
inline void SurfCA(const mglDataA &z, const mglDataA &c, const mglDataA &a, const char *sch="", const char *opt="")
{ mgl_surfca(gr, &z, &c, &a, sch,opt); }
/// Color map of matrix a to matrix b, both matrix can parametrically depend on coordinates
/** Style ‘.’ produce plot by dots. */
inline void Map(const mglDataA &x, const mglDataA &y, const mglDataA &a, const mglDataA &b, const char *sch="", const char *opt="")
{ mgl_map_xy(gr, &x, &y, &a, &b, sch, opt); }
/// Color map of matrix a to matrix b
/** Style ‘.’ produce plot by dots. */
inline void Map(const mglDataA &a, const mglDataA &b, const char *sch="", const char *opt="")
{ mgl_map(gr, &a, &b, sch, opt); }
/// Draw density plot for spectra-gramm specified parametrically
/** Style ‘#’ draw grid lines. Style ‘.’ produce plot by dots.*/
inline void STFA(const mglDataA &x, const mglDataA &y, const mglDataA &re, const mglDataA &im, int dn, const char *sch="", const char *opt="")
{ mgl_stfa_xy(gr, &x, &y, &re, &im, dn, sch, opt); }
/// Draw density plot for spectra-gramm
/** Style ‘#’ draw grid lines. Style ‘.’ produce plot by dots.*/
inline void STFA(const mglDataA &re, const mglDataA &im, int dn, const char *sch="", const char *opt="")
{ mgl_stfa(gr, &re, &im, dn, sch, opt); }
/// Draw isosurface for 3d data specified parametrically with alpha proportional to b
/** Style ‘#’ draw wired plot. Style ‘.’ produce plot by dots. */
inline void Surf3A(double Val, const mglDataA &x, const mglDataA &y, const mglDataA &z, const mglDataA &a, const mglDataA &b, const char *stl="", const char *opt="")
{ mgl_surf3a_xyz_val(gr, Val, &x, &y, &z, &a, &b, stl, opt); }
/// Draw isosurface for 3d data with alpha proportional to b
/** Style ‘#’ draw wired plot. Style ‘.’ produce plot by dots. */
inline void Surf3A(double Val, const mglDataA &a, const mglDataA &b, const char *stl="", const char *opt="")
{ mgl_surf3a_val(gr, Val, &a, &b, stl, opt); }
/// Draw isosurfaces for 3d data specified parametrically with alpha proportional to b
/** Style ‘#’ draw wired plot. Style ‘.’ produce plot by dots.
* Option "value" set the number of isosurfaces (default is 3). */
inline void Surf3A(const mglDataA &x, const mglDataA &y, const mglDataA &z, const mglDataA &a, const mglDataA &b, const char *stl="", const char *opt="")
{ mgl_surf3a_xyz(gr, &x, &y, &z, &a, &b, stl, opt); }
/// Draw isosurfaces for 3d data with alpha proportional to b
/** Style ‘#’ draw wired plot. Style ‘.’ produce plot by dots.
* Option "value" set the number of isosurfaces (default is 3). */
inline void Surf3A(const mglDataA &a, const mglDataA &b, const char *stl="", const char *opt="")
{ mgl_surf3a(gr, &a, &b, stl, opt); }
/// Draw isosurface for 3d data specified parametrically with color proportional to c
/** Style ‘#’ draw wired plot. Style ‘.’ produce plot by dots. */
inline void Surf3C(double Val, const mglDataA &x, const mglDataA &y, const mglDataA &z, const mglDataA &a, const mglDataA &c, const char *stl="", const char *opt="")
{ mgl_surf3c_xyz_val(gr, Val, &x, &y, &z, &a, &c, stl,opt); }
/// Draw isosurface for 3d data with color proportional to c
/** Style ‘#’ draw wired plot. Style ‘.’ produce plot by dots. */
inline void Surf3C(double Val, const mglDataA &a, const mglDataA &c, const char *stl="", const char *opt="")
{ mgl_surf3c_val(gr, Val, &a, &c, stl, opt); }
/// Draw isosurfaces for 3d data specified parametrically with color proportional to c
/** Style ‘#’ draw wired plot. Style ‘.’ produce plot by dots.
* Option "value" set the number of isosurfaces (default is 3). */
inline void Surf3C(const mglDataA &x, const mglDataA &y, const mglDataA &z, const mglDataA &a, const mglDataA &c, const char *stl="", const char *opt="")
{ mgl_surf3c_xyz(gr, &x, &y, &z, &a, &c, stl, opt); }
/// Draw isosurfaces for 3d data specified parametrically with color proportional to c
/** Style ‘#’ draw wired plot. Style ‘.’ produce plot by dots.
* Option "value" set the number of isosurfaces (default is 3). */
inline void Surf3C(const mglDataA &a, const mglDataA &c, const char *stl="", const char *opt="")
{ mgl_surf3c(gr, &a, &c, stl, opt); }
/// Draw isosurface for 3d data specified parametrically with color proportional to c and alpha proportional to b
/** Style ‘#’ draw wired plot. Style ‘.’ produce plot by dots. */
inline void Surf3CA(double Val, const mglDataA &x, const mglDataA &y, const mglDataA &z, const mglDataA &a, const mglDataA &c, const mglDataA &b, const char *stl="", const char *opt="")
{ mgl_surf3ca_xyz_val(gr, Val, &x, &y, &z, &a, &c, &b, stl,opt); }
/// Draw isosurface for 3d data with color proportional to c and alpha proportional to b
/** Style ‘#’ draw wired plot. Style ‘.’ produce plot by dots. */
inline void Surf3CA(double Val, const mglDataA &a, const mglDataA &c, const mglDataA &b, const char *stl="", const char *opt="")
{ mgl_surf3ca_val(gr, Val, &a, &c, &b, stl, opt); }
/// Draw isosurfaces for 3d data specified parametrically with color proportional to c and alpha proportional to b
/** Style ‘#’ draw wired plot. Style ‘.’ produce plot by dots.
* Option "value" set the number of isosurfaces (default is 3). */
inline void Surf3CA(const mglDataA &x, const mglDataA &y, const mglDataA &z, const mglDataA &a, const mglDataA &c, const mglDataA &b, const char *stl="", const char *opt="")
{ mgl_surf3ca_xyz(gr, &x, &y, &z, &a, &c, &b, stl, opt); }
/// Draw isosurfaces for 3d data with color proportional to c and alpha proportional to b
/** Style ‘#’ draw wired plot. Style ‘.’ produce plot by dots.
* Option "value" set the number of isosurfaces (default is 3). */
inline void Surf3CA(const mglDataA &a, const mglDataA &c, const mglDataA &b, const char *stl="", const char *opt="")
{ mgl_surf3ca(gr, &a, &c, &b, stl, opt); }
/// Plot dew drops for vector field {ax,ay} parametrically depended on coordinate {x,y}
inline void Dew(const mglDataA &x, const mglDataA &y, const mglDataA &ax, const mglDataA &ay, const char *sch="", const char *opt="")
{ mgl_dew_xy(gr, &x, &y, &ax, &ay, sch, opt); }
/// Plot dew drops for vector field {ax,ay}
inline void Dew(const mglDataA &ax, const mglDataA &ay, const char *sch="", const char *opt="")
{ mgl_dew_2d(gr, &ax, &ay, sch, opt); }
/// Plot vectors at position {x,y} along {ax,ay} with length/color proportional to |a|
/** Option value set the vector length factor (if non-zero) or vector length to be proportional the distance between curve points (if value=0). */
inline void Traj(const mglDataA &x, const mglDataA &y, const mglDataA &ax, const mglDataA &ay, const char *sch="", const char *opt="")
{ mgl_traj_xy(gr, &x, &y, &ax, &ay, sch, opt); }
/// Plot vectors at position {x,y,z} along {ax,ay,az} with length/color proportional to |a|
/** Option value set the vector length factor (if non-zero) or vector length to be proportional the distance between curve points (if value=0). */
inline void Traj(const mglDataA &x, const mglDataA &y, const mglDataA &z, const mglDataA &ax, const mglDataA &ay, const mglDataA &az, const char *sch="", const char *opt="")
{ mgl_traj_xyz(gr, &x, &y, &z, &ax, &ay, &az, sch, opt); }
/// Plot vector field {ax,ay} parametrically depended on coordinate {x,y} with length/color proportional to |a|
/** String \a sch may contain:
* ‘f’ for drawing arrows with fixed lengths,
* ‘ >’, ‘<’ for drawing arrows to or from the ce*ll point (default is centering),
* ‘.’ for drawing hachures with dots instead of arrows,
* ‘=’ for enabling color gradient along arrows. */
inline void Vect(const mglDataA &x, const mglDataA &y, const mglDataA &ax, const mglDataA &ay, const char *sch="", const char *opt="")
{ mgl_vect_xy(gr, &x, &y, &ax, &ay, sch, opt); }
/// Plot vector field {ax,ay} with length/color proportional to |a|
/** String \a sch may contain:
* ‘f’ for drawing arrows with fixed lengths,
* ‘ >’, ‘<’ for drawing arrows to or from the ce*ll point (default is centering),
* ‘.’ for drawing hachures with dots instead of arrows,
* ‘=’ for enabling color gradient along arrows. */
inline void Vect(const mglDataA &ax, const mglDataA &ay, const char *sch="", const char *opt="")
{ mgl_vect_2d(gr, &ax, &ay, sch, opt); }
/// Plot vector field {ax,ay,az} parametrically depended on coordinate {x,y,z} with length/color proportional to |a|
/** String \a sch may contain:
* ‘f’ for drawing arrows with fixed lengths,
* ‘ >’, ‘<’ for drawing arrows to or from the ce*ll point (default is centering),
* ‘.’ for drawing hachures with dots instead of arrows,
* ‘=’ for enabling color gradient along arrows. */
inline void Vect(const mglDataA &x, const mglDataA &y, const mglDataA &z, const mglDataA &ax, const mglDataA &ay, const mglDataA &az, const char *sch="", const char *opt="")
{ mgl_vect_xyz(gr, &x, &y, &z, &ax, &ay, &az, sch, opt); }
/// Plot vector field {ax,ay,az} with length/color proportional to |a|
/** String \a sch may contain:
* ‘f’ for drawing arrows with fixed lengths,
* ‘ >’, ‘<’ for drawing arrows to or from the ce*ll point (default is centering),
* ‘.’ for drawing hachures with dots instead of arrows,
* ‘=’ for enabling color gradient along arrows. */
inline void Vect(const mglDataA &ax, const mglDataA &ay, const mglDataA &az, const char *sch="", const char *opt="")
{ mgl_vect_3d(gr, &ax, &ay, &az, sch, opt); }
/// Draw vector plot along slice for 3d data specified parametrically
/** String \a sch may contain:
* ‘f’ for drawing arrows with fixed lengths,
* ‘ >’, ‘<’ for drawing arrows to or from the ce*ll point (default is centering),
* ‘.’ for drawing hachures with dots instead of arrows,
* ‘=’ for enabling color gradient along arrows,
* ‘ x’, ‘z’ for producing plot perpendicular to x- or z-direction correspondingly. */
inline void Vect3(const mglDataA &x, const mglDataA &y, const mglDataA &z, const mglDataA &ax, const mglDataA &ay, const mglDataA &az, const char *stl="", double sVal=-1, const char *opt="")
{ mgl_vect3_xyz(gr, &x, &y, &z, &ax,&ay,&az, stl, sVal, opt); }
/// Draw vector plot along slice for 3d data
/** String \a sch may contain:
* ‘f’ for drawing arrows with fixed lengths,
* ‘ >’, ‘<’ for drawing arrows to or from the ce*ll point (default is centering),
* ‘.’ for drawing hachures with dots instead of arrows,
* ‘=’ for enabling color gradient along arrows,
* ‘ x’, ‘z’ for producing plot perpendicular to x- or z-direction correspondingly. */
inline void Vect3(const mglDataA &ax, const mglDataA &ay, const mglDataA &az, const char *stl="", double sVal=-1, const char *opt="")
{ mgl_vect3(gr, &ax,&ay,&az, stl, sVal, opt); }
/// Plot flows for vector field {ax,ay} parametrically depended on coordinate {x,y} with color proportional to |a|
/** String \a sch may contain:
* color scheme: up-half (warm) corresponds to normal flow (like attractor), bottom-half (cold) corresponds to inverse flow (like source);
* ‘#’ for starting threads from edges only;
* ‘v’ for drawing arrows on the threads;
* ‘x’, ‘z’ for drawing tapes of normals in x-y and y-z planes correspondingly.
* Option "value" sets the number of threads (default is 5). */
inline void Flow(const mglDataA &x, const mglDataA &y, const mglDataA &ax, const mglDataA &ay, const char *sch="", const char *opt="")
{ mgl_flow_xy(gr, &x, &y, &ax, &ay, sch, opt); }
/// Plot flows for vector field {ax,ay} with color proportional to |a|
/** String \a sch may contain:
* color scheme: up-half (warm) corresponds to normal flow (like attractor), bottom-half (cold) corresponds to inverse flow (like source);
* ‘#’ for starting threads from edges only;
* ‘v’ for drawing arrows on the threads;
* ‘x’, ‘z’ for drawing tapes of normals in x-y and y-z planes correspondingly.
* Option "value" sets the number of threads (default is 5). */
inline void Flow(const mglDataA &ax, const mglDataA &ay, const char *sch="", const char *opt="")
{ mgl_flow_2d(gr, &ax, &ay, sch, opt); }
/// Plot flows for vector field {ax,ay,az} parametrically depended on coordinate {x,y,z} with color proportional to |a|
/** String \a sch may contain:
* color scheme: up-half (warm) corresponds to normal flow (like attractor), bottom-half (cold) corresponds to inverse flow (like source);
* ‘#’ for starting threads from edges only;
* ‘v’ for drawing arrows on the threads;
* ‘x’, ‘z’ for drawing tapes of normals in x-y and y-z planes correspondingly.
* Option "value" sets the number of threads (default is 5). */
inline void Flow(const mglDataA &x, const mglDataA &y, const mglDataA &z, const mglDataA &ax, const mglDataA &ay, const mglDataA &az, const char *sch="", const char *opt="")
{ mgl_flow_xyz(gr, &x, &y, &z, &ax, &ay, &az, sch, opt); }
/// Plot flows for vector field {ax,ay,az} with color proportional to |a|
/** String \a sch may contain:
* color scheme: up-half (warm) corresponds to normal flow (like attractor), bottom-half (cold) corresponds to inverse flow (like source);
* ‘#’ for starting threads from edges only;
* ‘v’ for drawing arrows on the threads;
* ‘x’, ‘z’ for drawing tapes of normals in x-y and y-z planes correspondingly.
* Option "value" sets the number of threads (default is 5). */
inline void Flow(const mglDataA &ax, const mglDataA &ay, const mglDataA &az, const char *sch="", const char *opt="")
{ mgl_flow_3d(gr, &ax, &ay, &az, sch, opt); }
/// Plot flow from point p for vector field {ax,ay} parametrically depended on coordinate {x,y} with color proportional to |a|
/** String \a sch may contain:
* color scheme: up-half (warm) corresponds to normal flow (like attractor), bottom-half (cold) corresponds to inverse flow (like source);
* ‘#’ for starting threads from edges only;
* ‘v’ for drawing arrows on the threads. */
inline void FlowP(mglPoint p, const mglDataA &x, const mglDataA &y, const mglDataA &ax, const mglDataA &ay, const char *sch="", const char *opt="")
{ mgl_flowp_xy(gr, p.x, p.y, p.z, &x, &y, &ax, &ay, sch, opt); }
/// Plot flow from point p for vector field {ax,ay} with color proportional to |a|
/** String \a sch may contain:
* color scheme: up-half (warm) corresponds to normal flow (like attractor), bottom-half (cold) corresponds to inverse flow (like source);
* ‘#’ for starting threads from edges only;
* ‘v’ for drawing arrows on the threads. */
inline void FlowP(mglPoint p, const mglDataA &ax, const mglDataA &ay, const char *sch="", const char *opt="")
{ mgl_flowp_2d(gr, p.x, p.y, p.z, &ax, &ay, sch, opt); }
/// Plot flow from point p for vector field {ax,ay,az} parametrically depended on coordinate {x,y,z} with color proportional to |a|
/** String \a sch may contain:
* color scheme: up-half (warm) corresponds to normal flow (like attractor), bottom-half (cold) corresponds to inverse flow (like source);
* ‘#’ for starting threads from edges only;
* ‘v’ for drawing arrows on the threads;
* ‘x’, ‘z’ for drawing tapes of normals in x-y and y-z planes correspondingly. */
inline void FlowP(mglPoint p, const mglDataA &x, const mglDataA &y, const mglDataA &z, const mglDataA &ax, const mglDataA &ay, const mglDataA &az, const char *sch="", const char *opt="")
{ mgl_flowp_xyz(gr, p.x, p.y, p.z, &x, &y, &z, &ax, &ay, &az, sch, opt); }
/// Plot flow from point p for vector field {ax,ay,az} with color proportional to |a|
/** String \a sch may contain:
* color scheme: up-half (warm) corresponds to normal flow (like attractor), bottom-half (cold) corresponds to inverse flow (like source);
* ‘#’ for starting threads from edges only;
* ‘v’ for drawing arrows on the threads;
* ‘x’, ‘z’ for drawing tapes of normals in x-y and y-z planes correspondingly. */
inline void FlowP(mglPoint p, const mglDataA &ax, const mglDataA &ay, const mglDataA &az, const char *sch="", const char *opt="")
{ mgl_flowp_3d(gr, p.x, p.y, p.z, &ax, &ay, &az, sch, opt); }
/// Plot flows from given plain for vector field {ax,ay,az} parametrically depended on coordinate {x,y,z} with color proportional to |a|
/** String \a sch may contain:
* color scheme: up-half (warm) corresponds to normal flow (like attractor), bottom-half (cold) corresponds to inverse flow (like source);
* 'v' for drawing arrows on the threads;
* 't' for drawing tapes of normals in x-y and y-z planes.
* Option "value" sets the number of threads (default is 5). */
inline void Flow3(const mglDataA &x, const mglDataA &y, const mglDataA &z, const mglDataA &ax, const mglDataA &ay, const mglDataA &az, const char *sch="", double sVal=-1, const char *opt="")
{ mgl_flow3_xyz(gr, &x, &y, &z, &ax, &ay, &az, sch, sVal, opt); }
/// Plot flows from given plain for vector field {ax,ay,az} with color proportional to |a|
/** String \a sch may contain:
* color scheme: up-half (warm) corresponds to normal flow (like attractor), bottom-half (cold) corresponds to inverse flow (like source);
* 'v' for drawing arrows on the threads;
* 't' for drawing tapes of normals in x-y and y-z planes.
* Option "value" sets the number of threads (default is 5). */
inline void Flow3(const mglDataA &ax, const mglDataA &ay, const mglDataA &az, const char *sch="", double sVal=-1, const char *opt="")
{ mgl_flow3(gr, &ax, &ay, &az, sch, sVal, opt); }
/// Plot flows for gradient of scalar field phi parametrically depended on coordinate {x,y,z}
/** String \a sch may contain:
* color scheme: up-half (warm) corresponds to normal flow (like attractor), bottom-half (cold) corresponds to inverse flow (like source);
* ‘#’ for starting threads from edges only;
* ‘v’ for drawing arrows on the threads;
* ‘x’, ‘z’ for drawing tapes of normals in x-y and y-z planes correspondingly.
* Option "value" sets the number of threads (default is 5). */
inline void Grad(const mglDataA &x, const mglDataA &y, const mglDataA &z, const mglDataA &phi, const char *sch="", const char *opt="")
{ mgl_grad_xyz(gr,&x,&y,&z,&phi,sch,opt); }
/// Plot flows for gradient of scalar field phi parametrically depended on coordinate {x,y}
/** String \a sch may contain:
* color scheme: up-half (warm) corresponds to normal flow (like attractor), bottom-half (cold) corresponds to inverse flow (like source);
* ‘#’ for starting threads from edges only;
* ‘v’ for drawing arrows on the threads;
* ‘x’, ‘z’ for drawing tapes of normals in x-y and y-z planes correspondingly.
* Option "value" sets the number of threads (default is 5). */
inline void Grad(const mglDataA &x, const mglDataA &y, const mglDataA &phi, const char *sch="", const char *opt="")
{ mgl_grad_xy(gr,&x,&y,&phi,sch,opt); }
/// Plot flows for gradient of scalar field phi
/** String \a sch may contain:
* color scheme: up-half (warm) corresponds to normal flow (like attractor), bottom-half (cold) corresponds to inverse flow (like source);
* ‘#’ for starting threads from edges only;
* ‘v’ for drawing arrows on the threads;
* ‘x’, ‘z’ for drawing tapes of normals in x-y and y-z planes correspondingly.
* Option "value" sets the number of threads (default is 5). */
inline void Grad(const mglDataA &phi, const char *sch="", const char *opt="")
{ mgl_grad(gr,&phi,sch,opt); }
/// Plot flow pipes for vector field {ax,ay} parametrically depended on coordinate {x,y} with color and radius proportional to |a|
/** String \a sch may contain:
* color scheme: up-half (warm) corresponds to normal flow (like attractor), bottom-half (cold) corresponds to inverse flow (like source);
* ‘#’ for starting threads from edges only;
* ‘i’ for pipe radius to be inverse proportional to amplitude.
* Option "value" sets the number of threads (default is 5). */
inline void Pipe(const mglDataA &x, const mglDataA &y, const mglDataA &ax, const mglDataA &ay, const char *sch="", double r0=0.05, const char *opt="")
{ mgl_pipe_xy(gr, &x, &y, &ax, &ay, sch, r0, opt); }
/// Plot flow pipes for vector field {ax,ay} with color and radius proportional to |a|
/** String \a sch may contain:
* color scheme: up-half (warm) corresponds to normal flow (like attractor), bottom-half (cold) corresponds to inverse flow (like source);
* ‘#’ for starting threads from edges only;
* ‘i’ for pipe radius to be inverse proportional to amplitude.
* Option "value" sets the number of threads (default is 5). */
inline void Pipe(const mglDataA &ax, const mglDataA &ay, const char *sch="", double r0=0.05, const char *opt="")
{ mgl_pipe_2d(gr, &ax, &ay, sch, r0, opt); }
/// Plot flow pipes for vector field {ax,ay,az} parametrically depended on coordinate {x,y,z} with color and radius proportional to |a|
/** String \a sch may contain:
* color scheme: up-half (warm) corresponds to normal flow (like attractor), bottom-half (cold) corresponds to inverse flow (like source);
* ‘#’ for starting threads from edges only;
* ‘i’ for pipe radius to be inverse proportional to amplitude;
* ‘x’, ‘z’ for drawing tapes of normals in x-y and y-z planes correspondingly.
* Option "value" sets the number of threads (default is 5). */
inline void Pipe(const mglDataA &x, const mglDataA &y, const mglDataA &z, const mglDataA &ax, const mglDataA &ay, const mglDataA &az, const char *sch="", double r0=0.05, const char *opt="")
{ mgl_pipe_xyz(gr, &x, &y, &z, &ax, &ay, &az, sch, r0, opt); }
/// Plot flow pipes for vector field {ax,ay,az} with color and radius proportional to |a|
/** String \a sch may contain:
* color scheme: up-half (warm) corresponds to normal flow (like attractor), bottom-half (cold) corresponds to inverse flow (like source);
* ‘#’ for starting threads from edges only;
* ‘i’ for pipe radius to be inverse proportional to amplitude;
* ‘x’, ‘z’ for drawing tapes of normals in x-y and y-z planes correspondingly.
* Option "value" sets the number of threads (default is 5). */
inline void Pipe(const mglDataA &ax, const mglDataA &ay, const mglDataA &az, const char *sch="", double r0=0.05, const char *opt="")
{ mgl_pipe_3d(gr, &ax, &ay, &az, sch, r0, opt); }
/// Draw density plot for data at x = sVal
/** Style ‘#’ draw grid lines. Style ‘.’ produce plot by dots.*/
inline void DensX(const mglDataA &a, const char *stl="", double sVal=mglNaN, const char *opt="")
{ mgl_dens_x(gr, &a, stl, sVal, opt); }
/// Draw density plot for data at y = sVal
/** Style ‘#’ draw grid lines. Style ‘.’ produce plot by dots.*/
inline void DensY(const mglDataA &a, const char *stl="", double sVal=mglNaN, const char *opt="")
{ mgl_dens_y(gr, &a, stl, sVal, opt); }
/// Draw density plot for data at z = sVal
/** Style ‘#’ draw grid lines. Style ‘.’ produce plot by dots.*/
inline void DensZ(const mglDataA &a, const char *stl="", double sVal=mglNaN, const char *opt="")
{ mgl_dens_z(gr, &a, stl, sVal, opt); }
/// Draw contour lines for data at x = sVal
/** Style ‘t’/‘T’ draw contour labels below/above contours.
* Option "value" set the number of contour levels (default is 7). */
inline void ContX(const mglDataA &a, const char *stl="", double sVal=mglNaN, const char *opt="")
{ mgl_cont_x(gr, &a, stl, sVal, opt); }
/// Draw contour lines at manual levels for data at x = sVal
/** Style ‘t’/‘T’ draw contour labels below/above contours. */
inline void ContX(const mglDataA &v, const mglDataA &a, const char *stl="", double sVal=mglNaN, const char *opt="")
{ mgl_cont_x_val(gr, &v, &a, stl, sVal, opt); }
/// Draw contour lines for data at y = sVal
/** Style ‘t’/‘T’ draw contour labels below/above contours.
* Option "value" set the number of contour levels (default is 7). */
inline void ContY(const mglDataA &a, const char *stl="", double sVal=mglNaN, const char *opt="")
{ mgl_cont_y(gr, &a, stl, sVal, opt); }
/// Draw contour lines at manual levels for data at y = sVal
/** Style ‘t’/‘T’ draw contour labels below/above contours. */
inline void ContY(const mglDataA &v, const mglDataA &a, const char *stl="", double sVal=mglNaN, const char *opt="")
{ mgl_cont_y_val(gr, &v, &a, stl, sVal, opt); }
/// Draw contour lines for data at z = sVal
/** Style ‘t’/‘T’ draw contour labels below/above contours.
* Option "value" set the number of contour levels (default is 7). */
inline void ContZ(const mglDataA &a, const char *stl="", double sVal=mglNaN, const char *opt="")
{ mgl_cont_z(gr, &a, stl, sVal, opt); }
/// Draw contour lines at manual levels for data at z = sVal
/** Style ‘t’/‘T’ draw contour labels below/above contours. */
inline void ContZ(const mglDataA &v, const mglDataA &a, const char *stl="", double sVal=mglNaN, const char *opt="")
{ mgl_cont_z_val(gr, &v, &a, stl, sVal, opt); }
/// Draw solid contours for data at x = sVal
/** Option "value" set the number of contour levels (default is 7). */
inline void ContFX(const mglDataA &a, const char *stl="", double sVal=mglNaN, const char *opt="")
{ mgl_contf_x(gr, &a, stl, sVal, opt); }
/// Draw solid contours at manual levels for data at x = sVal
inline void ContFX(const mglDataA &v, const mglDataA &a, const char *stl="", double sVal=mglNaN, const char *opt="")
{ mgl_contf_x_val(gr, &v, &a, stl, sVal, opt); }
/// Draw solid contours for data at y = sVal
/** Option "value" set the number of contour levels (default is 7). */
inline void ContFY(const mglDataA &a, const char *stl="", double sVal=mglNaN, const char *opt="")
{ mgl_contf_y(gr, &a, stl, sVal, opt); }
/// Draw solid contours at manual levels for data at y = sVal
inline void ContFY(const mglDataA &v, const mglDataA &a, const char *stl="", double sVal=mglNaN, const char *opt="")
{ mgl_contf_y_val(gr, &v, &a, stl, sVal, opt); }
/// Draw solid contours for data at z = sVal
/** Option "value" set the number of contour levels (default is 7). */
inline void ContFZ(const mglDataA &a, const char *stl="", double sVal=mglNaN, const char *opt="")
{ mgl_contf_z(gr, &a, stl, sVal, opt); }
/// Draw solid contours at manual levels for data at z = sVal
inline void ContFZ(const mglDataA &v, const mglDataA &a, const char *stl="", double sVal=mglNaN, const char *opt="")
{ mgl_contf_z_val(gr, &v, &a, stl, sVal, opt); }
/// Draw curve for formula with x in x-axis range
/** Option "value" set initial number of points. */
inline void FPlot(const char *fy, const char *stl="", const char *opt="")
{ mgl_fplot(gr, fy, stl, opt); }
/// Draw curve for formulas parametrically depended on t in range [0,1]
/** Option "value" set initial number of points. */
inline void FPlot(const char *fx, const char *fy, const char *fz, const char *stl, const char *opt="")
{ mgl_fplot_xyz(gr, fx, fy, fz, stl, opt); }
/// Draw surface by formula with x,y in axis range
/** Option "value" set initial number of points. */
inline void FSurf(const char *fz, const char *stl="", const char *opt="")
{ mgl_fsurf(gr, fz, stl, opt); }
/// Draw surface by formulas parametrically depended on u,v in range [0,1]
/** Option "value" set initial number of points. */
inline void FSurf(const char *fx, const char *fy, const char *fz, const char *stl, const char *opt="")
{ mgl_fsurf_xyz(gr, fx, fy, fz, stl, opt); }
/// Draw triangle mesh for points in arrays {x,y,z} with specified color c.
/** Style ‘#’ produce wire plot. If id.ny=c.nx then c set the triangle colors, else vertex colors. */
inline void TriPlot(const mglDataA &nums, const mglDataA &x, const mglDataA &y, const mglDataA &z, const mglDataA &c, const char *sch="", const char *opt="")
{ mgl_triplot_xyzc(gr, &nums, &x, &y, &z, &c, sch, opt); }
/// Draw triangle mesh for points in arrays {x,y,z}
/** Style ‘#’ produce wire plot. */
inline void TriPlot(const mglDataA &nums, const mglDataA &x, const mglDataA &y, const mglDataA &z, const char *sch="", const char *opt="")
{ mgl_triplot_xyz(gr, &nums, &x, &y, &z, sch, opt); }
/// Draw triangle mesh for points in arrays {x,y}
/** Style ‘#’ produce wire plot. */
inline void TriPlot(const mglDataA &nums, const mglDataA &x, const mglDataA &y, const char *sch="", const char *opt="")
{ mgl_triplot_xy(gr, &nums, &x, &y, sch, opt); }
/// Draw quad mesh for points in arrays {x,y,z} with specified color c
/** Style ‘#’ produce wire plot. If id.ny=c.nx then c set the quadrangle colors, else vertex colors. */
inline void QuadPlot(const mglDataA &nums, const mglDataA &x, const mglDataA &y, const mglDataA &z, const mglDataA &c, const char *sch="", const char *opt="")
{ mgl_quadplot_xyzc(gr, &nums, &x, &y, &z, &c, sch, opt); }
/// Draw quad mesh for points in arrays {x,y,z}
/** Style ‘#’ produce wire plot. */
inline void QuadPlot(const mglDataA &nums, const mglDataA &x, const mglDataA &y, const mglDataA &z, const char *sch="", const char *opt="")
{ mgl_quadplot_xyz(gr, &nums, &x, &y, &z, sch, opt); }
/// Draw quad mesh for points in arrays {x,y}
/** Style ‘#’ produce wire plot. */
inline void QuadPlot(const mglDataA &nums, const mglDataA &x, const mglDataA &y, const char *sch="", const char *opt="")
{ mgl_quadplot_xy(gr, &nums, &x, &y, sch, opt); }
/// Draw contour lines for triangle mesh for points in arrays {x,y,z}
/** Style ‘_’ to draw contours at bottom of axis box.
* Style ‘t’/‘T’ draw contour labels below/above contours.
* If id.ny=c.nx then c set the quadrangle colors, else vertex colors. */
inline void TriCont(const mglDataA &nums, const mglDataA &x, const mglDataA &y, const mglDataA &z, const char *sch="", const char *opt="")
{ mgl_tricont_xyc(gr, &nums, &x, &y, &z, sch, opt); }
/// Draw contour lines for triangle mesh for points in arrays {x,y,z}
/** Style ‘_’ to draw contours at bottom of axis box.
* Style ‘t’/‘T’ draw contour labels below/above contours.
* If id.ny=c.nx then c set the quadrangle colors, else vertex colors.
* Option "value" set the number of contour levels (default is 7). */
inline void TriContV(const mglDataA &v, const mglDataA &nums, const mglDataA &x, const mglDataA &y, const mglDataA &z, const char *sch="", const char *opt="")
{ mgl_tricont_xycv(gr, &v, &nums, &x, &y, &z, sch, opt); }
/// Draw contour lines for triangle mesh for points in arrays {x,y,z} with specified color c.
/** Style ‘_’ to draw contours at bottom of axis box.
* Style ‘t’/‘T’ draw contour labels below/above contours.
* If id.ny=c.nx then c set the quadrangle colors, else vertex colors. */
inline void TriCont(const mglDataA &nums, const mglDataA &x, const mglDataA &y, const mglDataA &z, const mglDataA &a, const char *sch="", const char *opt="")
{ mgl_tricont_xyzc(gr, &nums, &x, &y, &z, &a, sch, opt); }
/// Draw contour lines for triangle mesh for points in arrays {x,y,z} with specified color c.
/** Style ‘_’ to draw contours at bottom of axis box.
* Style ‘t’/‘T’ draw contour labels below/above contours.
* If id.ny=c.nx then c set the quadrangle colors, else vertex colors. */
inline void TriContV(const mglDataA &v, const mglDataA &nums, const mglDataA &x, const mglDataA &y, const mglDataA &z, const mglDataA &a, const char *sch="", const char *opt="")
{ mgl_tricont_xyzcv(gr, &v, &nums, &x, &y, &z, &a, sch, opt); }
/// Draw contour lines for triangle mesh for points in arrays {x,y,z} with specified color c.
/** Style ‘_’ to draw contours at bottom of axis box.
* Style ‘t’/‘T’ draw contour labels below/above contours.
* If id.ny=c.nx then c set the quadrangle colors, else vertex colors. */
inline void TriCont(const mglDataA &v, const mglDataA &nums, const mglDataA &x, const mglDataA &y, const mglDataA &z, const mglDataA &a, const char *sch="", const char *opt="")
{ mgl_tricont_xyzcv(gr, &v, &nums, &x, &y, &z, &a, sch, opt); }
/// Draw contour tubes for triangle mesh for points in arrays {x,y,z}
/** Option "value" set the number of contour levels (default is 7). */
inline void TriContVt(const mglDataA &nums, const mglDataA &x, const mglDataA &y, const mglDataA &z, const char *sch="", const char *opt="")
{ mgl_tricontv_xyc(gr, &nums, &x, &y, &z, sch, opt); }
/// Draw contour tubes for triangle mesh for points in arrays {x,y,z} with specified color c
/** Option "value" set the number of contour levels (default is 7). */
inline void TriContVt(const mglDataA &nums, const mglDataA &x, const mglDataA &y, const mglDataA &z, const mglDataA &a, const char *sch="", const char *opt="")
{ mgl_tricontv_xyzc(gr, &nums, &x, &y, &z, &a, sch, opt); }
/// Draw contour tubes for triangle mesh for points in arrays {x,y,z} with specified color c
/** If id.ny=c.nx then c set the quadrangle colors, else vertex colors. */
inline void TriContVt(const mglDataA &v, const mglDataA &nums, const mglDataA &x, const mglDataA &y, const mglDataA &z, const mglDataA &a, const char *sch="", const char *opt="")
{ mgl_tricontv_xyzcv(gr, &v, &nums, &x, &y, &z, &a, sch, opt); }
/// Draw dots in points {x,y,z}.
inline void Dots(const mglDataA &x, const mglDataA &y, const mglDataA &z, const char *sch="", const char *opt="")
{ mgl_dots(gr, &x, &y, &z, sch, opt); }
/// Draw semitransparent dots in points {x,y,z} with specified alpha a.
inline void Dots(const mglDataA &x, const mglDataA &y, const mglDataA &z, const mglDataA &a, const char *sch="", const char *opt="")
{ mgl_dots_a(gr, &x, &y, &z, &a, sch, opt); }
/// Draw semitransparent dots in points {x,y,z} with specified color c and alpha a.
inline void Dots(const mglDataA &x, const mglDataA &y, const mglDataA &z, const mglDataA &c, const mglDataA &a, const char *sch="", const char *opt="")
{ mgl_dots_ca(gr, &x, &y, &z, &c, &a, sch, opt); }
/// Draw surface reconstructed for points in arrays {x,y,z}.
/** Style ‘#’ produce wired plot. */
inline void Crust(const mglDataA &x, const mglDataA &y, const mglDataA &z, const char *sch="", const char *opt="")
{ mgl_crust(gr, &x, &y, &z, sch, opt); }
/// Fit data along x-direction for each data row. Return array with values for found formula.
inline mglData Fit(const mglDataA &y, const char *eq, const char *vars, const char *opt="")
{ return mglData(true,mgl_fit_1(gr, &y, eq,vars,0, opt)); }
/// Fit data along x-direction for each data row starting from \a ini values. Return array with values for found formula.
inline mglData Fit(const mglDataA &y, const char *eq, const char *vars, mglData &ini, const char *opt="")
{ return mglData(true,mgl_fit_1(gr, &y, eq, vars, &ini, opt)); }
/// Fit data along x-, y-directions for each data slice. Return array with values for found formula.
inline mglData Fit2(const mglDataA &z, const char *eq, const char *vars, const char *opt="")
{ return mglData(true,mgl_fit_2(gr, &z, eq, vars,0, opt)); }
/// Fit data along x-, y-direction for each data slice starting from \a ini values. Return array with values for found formula.
inline mglData Fit2(const mglDataA &z, const char *eq, const char *vars, mglData &ini, const char *opt="")
{ return mglData(true,mgl_fit_2(gr, &z, eq, vars, &ini, opt)); }
/// Fit data along along all directions. Return array with values for found formula.
inline mglData Fit3(const mglDataA &a, const char *eq, const char *vars, const char *opt="")
{ return mglData(true,mgl_fit_3(gr, &a, eq, vars,0, opt)); }
/// Fit data along all directions starting from \a ini values. Return array with values for found formula.
inline mglData Fit3(const mglDataA &a, const char *eq, const char *vars, mglData &ini, const char *opt="")
{ return mglData(true,mgl_fit_3(gr, &a, eq, vars, &ini, opt)); }
/// Fit data along x-direction for each data row. Return array with values for found formula.
inline mglData Fit(const mglDataA &x, const mglDataA &y, const char *eq, const char *vars, const char *opt="")
{ return mglData(true,mgl_fit_xy(gr, &x, &y, eq, vars,0, opt)); }
/// Fit data along x-direction for each data row starting from \a ini values. Return array with values for found formula.
inline mglData Fit(const mglDataA &x, const mglDataA &y, const char *eq, const char *vars, mglData &ini, const char *opt="")
{ return mglData(true,mgl_fit_xy(gr, &x, &y, eq, vars, &ini, opt)); }
/// Fit data along x-, y-directions for each data slice. Return array with values for found formula.
inline mglData Fit(const mglDataA &x, const mglDataA &y, const mglDataA &z, const char *eq, const char *vars, const char *opt="")
{ return mglData(true,mgl_fit_xyz(gr, &x, &y, &z, eq, vars,0, opt)); }
/// Fit data along x-, y-directions for each data slice starting from \a ini values. Return array with values for found formula.
inline mglData Fit(const mglDataA &x, const mglDataA &y, const mglDataA &z, const char *eq, const char *vars, mglData &ini, const char *opt="")
{ return mglData(true,mgl_fit_xyz(gr, &x, &y, &z, eq, vars, &ini, opt)); }
/// Fit data along along all directions. Return array with values for found formula.
inline mglData Fit(const mglDataA &x, const mglDataA &y, const mglDataA &z, const mglDataA &a, const char *eq, const char *vars, const char *opt="")
{ return mglData(true,mgl_fit_xyza(gr, &x, &y, &z, &a, eq, vars,0, opt)); }
/// Fit data along along all directions starting from \a ini values. Return array with values for found formula.
inline mglData Fit(const mglDataA &x, const mglDataA &y, const mglDataA &z, const mglDataA &a, const char *eq, const char *vars, mglData &ini, const char *opt="")
{ return mglData(true,mgl_fit_xyza(gr, &x, &y, &z, &a, eq,vars, &ini, opt)); }
/// Fit data with dispersion s along x-direction for each data row. Return array with values for found formula.
inline mglData FitS(const mglDataA &y, const mglDataA &s, const char *eq, const char *vars, const char *opt="")
{ return mglData(true,mgl_fit_ys(gr, &y, &s, eq, vars,0, opt)); }
/// Fit data with dispersion s along x-direction for each data row starting from \a ini values. Return array with values for found formula.
inline mglData FitS(const mglDataA &y, const mglDataA &s, const char *eq, const char *vars, mglData &ini, const char *opt="")
{ return mglData(true,mgl_fit_ys(gr, &y, &s, eq, vars, &ini, opt)); }
/// Fit data with dispersion s along x-direction for each data row. Return array with values for found formula.
inline mglData FitS(const mglDataA &x, const mglDataA &y, const mglDataA &s, const char *eq, const char *vars, const char *opt="")
{ return mglData(true,mgl_fit_xys(gr, &x, &y, &s, eq, vars,0, opt)); }
/// Fit data with dispersion s along x-direction for each data row starting from \a ini values. Return array with values for found formula.
inline mglData FitS(const mglDataA &x, const mglDataA &y, const mglDataA &s, const char *eq, const char *vars, mglData &ini, const char *opt="")
{ return mglData(true,mgl_fit_xys(gr, &x, &y, &s, eq, vars, &ini, opt)); }
/// Fit data with dispersion s along x-, y-directions for each data slice. Return array with values for found formula.
inline mglData FitS(const mglDataA &x, const mglDataA &y, const mglDataA &z, const mglDataA &s, const char *eq, const char *vars, const char *opt="")
{ return mglData(true,mgl_fit_xyzs(gr, &x, &y, &z, &s, eq, vars,0, opt)); }
/// Fit data with dispersion s along x-, y-directions for each data slice starting from \a ini values. Return array with values for found formula.
inline mglData FitS(const mglDataA &x, const mglDataA &y, const mglDataA &z, const mglDataA &s, const char *eq, const char *vars, mglData &ini, const char *opt="")
{ return mglData(true,mgl_fit_xyzs(gr, &x, &y, &z, &s, eq, vars, &ini, opt)); }
/// Fit data with dispersion s along all directions. Return array with values for found formula.
inline mglData FitS(const mglDataA &x, const mglDataA &y, const mglDataA &z, const mglDataA &a, const mglDataA &s, const char *eq, const char *vars, const char *opt="")
{ return mglData(true,mgl_fit_xyzas(gr, &x, &y, &z, &a, &s, eq, vars,0, opt)); }
/// Fit data with dispersion s along all directions starting from \a ini values. Return array with values for found formula.
inline mglData FitS(const mglDataA &x, const mglDataA &y, const mglDataA &z, const mglDataA &a, const mglDataA &s, const char *eq, const char *vars, mglData &ini, const char *opt="")
{ return mglData(true,mgl_fit_xyzas(gr, &x, &y, &z, &a, &s, eq, vars, &ini, opt)); }
/// Print fitted last formula (with coefficients)
inline void PutsFit(mglPoint p, const char *prefix=0, const char *font="", double size=-1)
{ mgl_puts_fit(gr, p.x, p.y, p.z, prefix, font, size); }
/// Get last fitted formula
inline const char *GetFit() const
{ return mgl_get_fit(gr); }
/// Get chi for last fitted formula
static inline mreal GetFitChi()
{ return mgl_get_fit_chi(); }
/// Get covariance matrix for last fitted formula
static inline mglData GetFitCovar()
{ return mglData(mgl_get_fit_covar()); }
/// Solve PDE with x,y,z in range axis range
inline mglData PDE(const char *ham, const mglDataA &ini_re, const mglDataA &ini_im, double dz=0.1, double k0=100, const char *opt="")
{ return mglData(true,mgl_pde_solve(gr,ham,&ini_re,&ini_im,dz,k0, opt)); }
/// Solve PDE with x,y,z in range axis range
inline mglDataC PDEc(const char *ham, const mglDataA &ini_re, const mglDataA &ini_im, double dz=0.1, double k0=100, const char *opt="")
{ return mglDataC(true,mgl_pde_solve_c(gr,ham,&ini_re,&ini_im,dz,k0, opt)); }
/// Solve PDE with x,y,z in range axis range using advanced (slow!!!) method (2d only)
inline mglData APDE(const char *ham, const mglDataA &ini_re, const mglDataA &ini_im, double dz=0.1, double k0=100, const char *opt="")
{ return mglData(true,mgl_pde_adv(gr,ham,&ini_re,&ini_im,dz,k0, opt)); }
/// Solve PDE with x,y,z in range axis range using advanced (slow!!!) method (2d only)
inline mglDataC APDEc(const char *ham, const mglDataA &ini_re, const mglDataA &ini_im, double dz=0.1, double k0=100, const char *opt="")
{ return mglDataC(true,mgl_pde_adv_c(gr,ham,&ini_re,&ini_im,dz,k0, opt)); }
/// Fill data by formula with x,y,z in range axis range
inline void Fill(mglData &u, const char *eq, const char *opt="")
{ mgl_data_fill_eq(gr, &u, eq, 0, 0, opt); }
inline void Fill(mglData &u, const char *eq, const mglDataA &v, const char *opt="")
{ mgl_data_fill_eq(gr, &u, eq, &v, 0, opt); }
inline void Fill(mglData &u, const char *eq, const mglDataA &v, const mglDataA &w, const char *opt="")
{ mgl_data_fill_eq(gr, &u, eq, &v, &w, opt); }
/// Fill data by formula with x,y,z in range axis range
inline void Fill(mglDataC &u, const char *eq, const char *opt="")
{ mgl_datac_fill_eq(gr, &u, eq, 0, 0, opt); }
inline void Fill(mglDataC &u, const char *eq, const mglDataA &v, const char *opt="")
{ mgl_datac_fill_eq(gr, &u, eq, &v, 0, opt); }
inline void Fill(mglDataC &u, const char *eq, const mglDataA &v, const mglDataA &w, const char *opt="")
{ mgl_datac_fill_eq(gr, &u, eq, &v, &w, opt); }
/// Fill dat by interpolated values of vdat parametrically depended on xdat for x in axis range
inline void Refill(mglData &dat, const mglDataA &xdat, const mglDataA &vdat, long sl=-1, const char *opt="")
{ mgl_data_refill_gr(gr,&dat,&xdat,0,0,&vdat,sl,opt); }
/// Fill dat by interpolated values of vdat parametrically depended on xdat,ydat for x,y in axis range
inline void Refill(mglData &dat, const mglDataA &xdat, const mglDataA &ydat, const mglDataA &vdat, long sl=-1, const char *opt="")
{ mgl_data_refill_gr(gr,&dat,&xdat,&ydat,0,&vdat,sl,opt); }
/// Fill dat by interpolated values of vdat parametrically depended on xdat,ydat,zdat for x,y,z in axis range
inline void Refill(mglData &dat, const mglDataA &xdat, const mglDataA &ydat, const mglDataA &zdat, const mglDataA &vdat, const char *opt="")
{ mgl_data_refill_gr(gr,&dat,&xdat,&ydat,&zdat,&vdat,-1,opt); }
/// Fill dat by interpolated values of vdat parametrically depended on xdat for x in axis range
inline void Refill(mglDataC &dat, const mglDataA &xdat, const mglDataA &vdat, long sl=-1, const char *opt="")
{ mgl_datac_refill_gr(gr,&dat,&xdat,0,0,&vdat,sl,opt); }
/// Fill dat by interpolated values of vdat parametrically depended on xdat,ydat for x,y in axis range
inline void Refill(mglDataC &dat, const mglDataA &xdat, const mglDataA &ydat, const mglDataA &vdat, long sl=-1, const char *opt="")
{ mgl_datac_refill_gr(gr,&dat,&xdat,&ydat,0,&vdat,sl,opt); }
/// Fill dat by interpolated values of vdat parametrically depended on xdat,ydat,zdat for x,y,z in axis range
inline void Refill(mglDataC &dat, const mglDataA &xdat, const mglDataA &ydat, const mglDataA &zdat, const mglDataA &vdat, const char *opt="")
{ mgl_datac_refill_gr(gr,&dat,&xdat,&ydat,&zdat,&vdat,-1,opt); }
/// Set the data by triangulated surface values assuming x,y,z in range axis range
inline void DataGrid(mglData &d, const mglDataA &x, const mglDataA &y, const mglDataA &z, const char *opt="")
{ mgl_data_grid(gr,&d,&x,&y,&z,opt); }
/// Make histogram (distribution) of data. This function do not plot data.
/** Option "value" sets the size of output array (default is mglFitPnts=100). */
inline mglData Hist(const mglDataA &x, const mglDataA &a, const char *opt="")
{ return mglData(true, mgl_hist_x(gr, &x, &a, opt)); }
/// Make histogram (distribution) of data. This function do not plot data.
/** Option "value" sets the size of output array (default is mglFitPnts=100). */
inline mglData Hist(const mglDataA &x, const mglDataA &y, const mglDataA &a, const char *opt="")
{ return mglData(true, mgl_hist_xy(gr, &x, &y, &a, opt)); }
/// Make histogram (distribution) of data. This function do not plot data.
/** Option "value" sets the size of output array (default is mglFitPnts=100). */
inline mglData Hist(const mglDataA &x, const mglDataA &y, const mglDataA &z, const mglDataA &a, const char *opt="")
{ return mglData(true, mgl_hist_xyz(gr, &x, &y, &z, &a, opt)); }
inline void Compression(bool){} // NOTE: Add later -- IDTF
/// Set the preference for vertex color on/off (for formats that support it, now only PRC does).
inline void VertexColor(bool enable) { mgl_set_flag(gr,enable, MGL_PREFERVC); }
/// Render only front side of surfaces for dubugging purposes (for formats that support it, now only PRC does).
inline void DoubleSided(bool enable) { mgl_set_flag(gr,!enable, MGL_ONESIDED); }
// inline void TextureColor(bool){} // NOTE: Add later -- IDTF
};
//-----------------------------------------------------------------------------
/// Wrapper class for MGL parsing
class MGL_EXPORT mglParse
{
HMPR pr;
mglParse &operator=(mglParse &p)
{ pr = p.pr; mgl_use_parser(pr,1); return p; }
public:
mglParse(HMPR p) { pr = p; mgl_use_parser(pr,1); }
mglParse(mglParse &p) { pr = p.pr; mgl_use_parser(pr,1); }
mglParse(bool setsize=false)
{ pr=mgl_create_parser(); mgl_parser_allow_setsize(pr, setsize); }
virtual ~mglParse()
{
#pragma omp critical
if(mgl_use_parser(pr,-1)<1) mgl_delete_parser(pr);
}
/// Get pointer to internal mglParser object
inline HMPR Self() { return pr; }
/// Parse and draw single line of the MGL script
inline int Parse(mglGraph *gr, const char *str, int pos)
{ return mgl_parse_line(gr->Self(), pr, str, pos); }
inline int Parse(mglGraph *gr, const wchar_t *str, int pos)
{ return mgl_parse_linew(gr->Self(), pr, str, pos); }
/// Execute MGL script text with '\n' separated lines
inline void Execute(mglGraph *gr, const char *str)
{ mgl_parse_text(gr->Self(), pr, str); }
inline void Execute(mglGraph *gr, const wchar_t *str)
{ mgl_parse_textw(gr->Self(), pr, str); }
/// Execute and draw script from the file
inline void Execute(mglGraph *gr, FILE *fp, bool print=false)
{ mgl_parse_file(gr->Self(), pr, fp, print); }
/// Return type of command: 0 - not found, 1 - other data plot, 2 - func plot,
/// 3 - setup, 4 - data handle, 5 - data create, 6 - subplot, 7 - program
/// 8 - 1d plot, 9 - 2d plot, 10 - 3d plot, 11 - dd plot, 12 - vector plot
/// 13 - axis, 14 - primitives, 15 - axis setup, 16 - text/legend, 17 - data transform
inline int CmdType(const char *name)
{ return mgl_parser_cmd_type(pr, name); }
/// Return string of command format (command name and its argument[s])
inline const char *CmdFormat(const char *name)
{ return mgl_parser_cmd_frmt(pr, name); }
/// Return description of MGL command
inline const char *CmdDesc(const char *name)
{ return mgl_parser_cmd_desc(pr, name); }
/// Get name of command with number n
inline const char *GetCmdName(long n)
{ return mgl_parser_cmd_name(pr,n); }
/// Get number of defined commands
inline long GetCmdNum()
{ return mgl_parser_cmd_num(pr); }
/// Load new commands from external dynamic Library (must have "const mglCommand *mgl_cmd_extra" variable)
inline void LoadDLL(const char *fname)
{ mgl_parser_load(pr, fname); }
/// Apply one step for equation d vars[i]/dt = eqs[i] using Runge-Kutta method
inline void RK_Step(const char *eqs, const char *vars, mreal dt=1)
{ mgl_rk_step(pr, eqs, vars, dt); }
inline void RK_Step(const wchar_t *eqs, const wchar_t *vars, mreal dt=1)
{ mgl_rk_step_w(pr, eqs, vars, dt); }
// Open all data arrays from HDF file and assign it as variables of parser p
inline void OpenHDF(const char *fname)
{ mgl_parser_openhdf(pr, fname); }
/// Set value for parameter $N
inline void AddParam(int id, const char *str)
{ mgl_parser_add_param(pr, id, str); }
inline void AddParam(int id, const wchar_t *str)
{ mgl_parser_add_paramw(pr, id, str); }
/// Restore once flag
inline void RestoreOnce() { mgl_parser_restore_once(pr); }
/// Allow changing size of the picture
inline void AllowSetSize(bool allow) { mgl_parser_allow_setsize(pr, allow); }
/// Allow reading/saving files
inline void AllowFileIO(bool allow) { mgl_parser_allow_file_io(pr, allow); }
/// Allow loading commands from external libraries
inline void AllowDllCall(bool allow) { mgl_parser_allow_dll_call(pr, allow); }
/// Set flag to stop script parsing
inline void Stop() { mgl_parser_stop(pr); }
/// Set variant of argument(s) separated by '?' to be used in further commands
inline void SetVariant(int var=0)
{ mgl_parser_variant(pr, var); }
/// Set starting object ID
inline void StartID(int id=0)
{ mgl_parser_start_id(pr, id); }
/// Return result of formula evaluation
inline mglData Calc(const char *formula)
{ return mglData(true,mgl_parser_calc(pr,formula)); }
inline mglData Calc(const wchar_t *formula)
{ return mglData(true,mgl_parser_calcw(pr,formula)); }
/// Return result of formula evaluation as complex data
inline mglDataC CalcComplex(const char *formula)
{ return mglDataC(true,mgl_parser_calc_complex(pr,formula)); }
inline mglDataC CalcComplex(const wchar_t *formula)
{ return mglDataC(true,mgl_parser_calc_complexw(pr,formula)); }
/// Find variable with given name or add a new one
/// NOTE !!! You must not delete obtained data arrays !!!
inline mglDataA *AddVar(const char *name)
{ return mgl_parser_add_var(pr, name); }
inline mglDataA *AddVar(const wchar_t *name)
{ return mgl_parser_add_varw(pr, name); }
/// Find variable with given name or return NULL if no one
/// NOTE !!! You must not delete obtained data arrays !!!
inline mglDataA *FindVar(const char *name)
{ return mgl_parser_find_var(pr, name); }
inline mglDataA *FindVar(const wchar_t *name)
{ return mgl_parser_find_varw(pr, name); }
/// Get variable with given id. Can be NULL for temporary ones.
/// NOTE !!! You must not delete obtained data arrays !!!
inline mglDataA *GetVar(unsigned long id)
{ return mgl_parser_get_var(pr,id); }
/// Get number of variables
inline long GetNumVar()
{ return mgl_parser_num_var(pr); }
/// Delete variable with name
inline void DeleteVar(const char *name) { mgl_parser_del_var(pr, name); }
inline void DeleteVar(const wchar_t *name) { mgl_parser_del_varw(pr, name); }
/// Delete all data variables
void DeleteAll() { mgl_parser_del_all(pr); }
/// Get constant with given id. Can be NULL if not found.
/// NOTE !!! You must not delete obtained data arrays !!!
inline mglNum *GetConst(unsigned long id)
{ return mgl_parser_get_const(pr,id); }
/// Get number of constants
inline long GetNumConst()
{ return mgl_parser_num_const(pr); }
};
//-----------------------------------------------------------------------------
#endif
#endif
|
GB_unaryop__lnot_bool_int16.c | //------------------------------------------------------------------------------
// GB_unaryop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2020, All Rights Reserved.
// http://suitesparse.com See GraphBLAS/Doc/License.txt for license.
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_iterator.h"
#include "GB_unaryop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB_unop__lnot_bool_int16
// op(A') function: GB_tran__lnot_bool_int16
// C type: bool
// A type: int16_t
// cast: bool cij = (bool) aij
// unaryop: cij = !aij
#define GB_ATYPE \
int16_t
#define GB_CTYPE \
bool
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
int16_t aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = !x ;
// casting
#define GB_CASTING(z, aij) \
bool z = (bool) aij ;
// cij = op (cast (aij))
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
GB_GETA (aij, Ax, pA) ; \
/* Cx [pC] = op (cast (aij)) */ \
GB_CASTING (z, aij) ; \
GB_OP (GB_CX (pC), z) ; \
}
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_LNOT || GxB_NO_BOOL || GxB_NO_INT16)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_unop__lnot_bool_int16
(
bool *Cx, // Cx and Ax may be aliased
int16_t *Ax,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
GB_CAST_OP (p, p) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_tran__lnot_bool_int16
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t *GB_RESTRICT *Rowcounts,
GBI_single_iterator Iter,
const int64_t *GB_RESTRICT A_slice,
int naslice
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#define GB_PHASE_2_OF_2
#include "GB_unaryop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
MkBase.h | #ifndef RecoTracker_MkFitCore_src_MkBase_h
#define RecoTracker_MkFitCore_src_MkBase_h
#include "Matrix.h"
#include "PropagationMPlex.h"
namespace mkfit {
//==============================================================================
// MkBase
//==============================================================================
class MkBase {
public:
static constexpr int iC = 0; // current
static constexpr int iP = 1; // propagated
float getPar(int itrack, int i, int par) const { return m_Par[i].constAt(itrack, par, 0); }
float radiusSqr(int itrack, int i) const { return hipo_sqr(getPar(itrack, i, 0), getPar(itrack, i, 1)); }
//----------------------------------------------------------------------------
MkBase() {}
//----------------------------------------------------------------------------
void propagateTracksToR(float r, const int N_proc, const PropagationFlags pf) {
MPlexQF msRad;
#pragma omp simd
for (int n = 0; n < NN; ++n) {
msRad.At(n, 0, 0) = r;
}
propagateHelixToRMPlex(m_Err[iC], m_Par[iC], m_Chg, msRad, m_Err[iP], m_Par[iP], N_proc, pf);
}
void propagateTracksToHitR(const MPlexHV& par,
const int N_proc,
const PropagationFlags pf,
const MPlexQI* noMatEffPtr = nullptr) {
MPlexQF msRad;
#pragma omp simd
for (int n = 0; n < NN; ++n) {
msRad.At(n, 0, 0) = std::hypot(par.constAt(n, 0, 0), par.constAt(n, 1, 0));
}
propagateHelixToRMPlex(m_Err[iC], m_Par[iC], m_Chg, msRad, m_Err[iP], m_Par[iP], N_proc, pf, noMatEffPtr);
}
//----------------------------------------------------------------------------
void propagateTracksToZ(float z, const int N_proc, const PropagationFlags pf) {
MPlexQF msZ;
#pragma omp simd
for (int n = 0; n < NN; ++n) {
msZ.At(n, 0, 0) = z;
}
propagateHelixToZMPlex(m_Err[iC], m_Par[iC], m_Chg, msZ, m_Err[iP], m_Par[iP], N_proc, pf);
}
void propagateTracksToHitZ(const MPlexHV& par,
const int N_proc,
const PropagationFlags pf,
const MPlexQI* noMatEffPtr = nullptr) {
MPlexQF msZ;
#pragma omp simd
for (int n = 0; n < NN; ++n) {
msZ.At(n, 0, 0) = par.constAt(n, 2, 0);
}
propagateHelixToZMPlex(m_Err[iC], m_Par[iC], m_Chg, msZ, m_Err[iP], m_Par[iP], N_proc, pf, noMatEffPtr);
}
void propagateTracksToPCAZ(const int N_proc, const PropagationFlags pf) {
MPlexQF msZ; // PCA z-coordinate
#pragma omp simd
for (int n = 0; n < NN; ++n) {
const float slope = std::tan(m_Par[iC].constAt(n, 5, 0));
// msZ.At(n, 0, 0) = ( Config::beamspotz0 + slope * ( Config::beamspotr0 - std::hypot(m_Par[iC].constAt(n, 0, 0), m_Par[iC].constAt(n, 1, 0))) + slope * slope * m_Par[iC].constAt(n, 2, 0) ) / ( 1+slope*slope); // PCA w.r.t. z0, r0
msZ.At(n, 0, 0) = (slope * (slope * m_Par[iC].constAt(n, 2, 0) -
std::hypot(m_Par[iC].constAt(n, 0, 0), m_Par[iC].constAt(n, 1, 0)))) /
(1 + slope * slope); // PCA to origin
}
propagateHelixToZMPlex(m_Err[iC], m_Par[iC], m_Chg, msZ, m_Err[iP], m_Par[iP], N_proc, pf);
}
//----------------------------------------------------------------------------
protected:
MPlexLS m_Err[2];
MPlexLV m_Par[2];
MPlexQI m_Chg;
};
} // end namespace mkfit
#endif
|
convolution_3x3_pack8to4_int8.h | // Tencent is pleased to support the open source community by making ncnn available.
//
// Copyright (C) 2020 THL A29 Limited, a Tencent company. All rights reserved.
//
// Licensed under the BSD 3-Clause License (the "License"); you may not use this file except
// in compliance with the License. You may obtain a copy of the License at
//
// https://opensource.org/licenses/BSD-3-Clause
//
// Unless required by applicable law or agreed to in writing, software distributed
// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
// CONDITIONS OF ANY KIND, either express or implied. See the License for the
// specific language governing permissions and limitations under the License.
static void conv3x3s1_winograd42_transform_kernel_pack8to4_int8_neon(const Mat& kernel, Mat& kernel_tm_pack8, int inch, int outch, const Option& opt)
{
// winograd42 transform kernel
Mat kernel_tm(6 * 6, inch, outch, (size_t)2u);
const short ktm[6][3] = {
{6, 0, 0},
{-4, -4, -4},
{-4, 4, -4},
{1, 2, 4},
{1, -2, 4},
{0, 0, 6}
};
#pragma omp parallel for num_threads(opt.num_threads)
for (int p = 0; p < outch; p++)
{
for (int q = 0; q < inch; q++)
{
const signed char* kernel0 = (const signed char*)kernel + p * inch * 9 + q * 9;
short* kernel_tm0 = kernel_tm.channel(p).row<short>(q);
// transform kernel
const signed char* k0 = kernel0;
const signed char* k1 = kernel0 + 3;
const signed char* k2 = kernel0 + 6;
// h
short tmp[6][3];
for (int i = 0; i < 6; i++)
{
tmp[i][0] = k0[0] * ktm[i][0] + k0[1] * ktm[i][1] + k0[2] * ktm[i][2];
tmp[i][1] = k1[0] * ktm[i][0] + k1[1] * ktm[i][1] + k1[2] * ktm[i][2];
tmp[i][2] = k2[0] * ktm[i][0] + k2[1] * ktm[i][1] + k2[2] * ktm[i][2];
}
// U
for (int j = 0; j < 6; j++)
{
short* tmpp = &tmp[j][0];
for (int i = 0; i < 6; i++)
{
kernel_tm0[j * 6 + i] = tmpp[0] * ktm[i][0] + tmpp[1] * ktm[i][1] + tmpp[2] * ktm[i][2];
}
}
}
}
// interleave
// src = 36-inch-outch
// dst = 4b-8a-inch/8a-36-outch/4b
kernel_tm_pack8.create(inch / 8, 36, outch / 8 + (outch % 8) / 4, (size_t)2u * 64, 64);
int q = 0;
for (; q + 7 < outch; q += 8)
{
const Mat k0 = kernel_tm.channel(q);
const Mat k1 = kernel_tm.channel(q + 1);
const Mat k2 = kernel_tm.channel(q + 2);
const Mat k3 = kernel_tm.channel(q + 3);
const Mat k4 = kernel_tm.channel(q + 4);
const Mat k5 = kernel_tm.channel(q + 5);
const Mat k6 = kernel_tm.channel(q + 6);
const Mat k7 = kernel_tm.channel(q + 7);
Mat kernel_tm = kernel_tm_pack8.channel(q / 8);
for (int k = 0; k < 36; k++)
{
short* g00 = kernel_tm.row<short>(k);
for (int p = 0; p + 7 < inch; p += 8)
{
for (int i = 0; i < 8; i++)
{
const short* k00 = k0.row<const short>(p + i);
const short* k10 = k1.row<const short>(p + i);
const short* k20 = k2.row<const short>(p + i);
const short* k30 = k3.row<const short>(p + i);
const short* k40 = k4.row<const short>(p + i);
const short* k50 = k5.row<const short>(p + i);
const short* k60 = k6.row<const short>(p + i);
const short* k70 = k7.row<const short>(p + i);
g00[0] = k00[k];
g00[1] = k10[k];
g00[2] = k20[k];
g00[3] = k30[k];
g00[4] = k40[k];
g00[5] = k50[k];
g00[6] = k60[k];
g00[7] = k70[k];
g00 += 8;
}
}
}
}
for (; q + 3 < outch; q += 4)
{
const Mat k0 = kernel_tm.channel(q);
const Mat k1 = kernel_tm.channel(q + 1);
const Mat k2 = kernel_tm.channel(q + 2);
const Mat k3 = kernel_tm.channel(q + 3);
Mat kernel_tm = kernel_tm_pack8.channel(q / 8 + (q % 8) / 4);
for (int k = 0; k < 36; k++)
{
short* g00 = kernel_tm.row<short>(k);
for (int p = 0; p + 7 < inch; p += 8)
{
for (int i = 0; i < 8; i++)
{
const short* k00 = k0.row<const short>(p + i);
const short* k10 = k1.row<const short>(p + i);
const short* k20 = k2.row<const short>(p + i);
const short* k30 = k3.row<const short>(p + i);
g00[0] = k00[k];
g00[1] = k10[k];
g00[2] = k20[k];
g00[3] = k30[k];
g00 += 4;
}
}
}
}
}
static void conv3x3s1_winograd42_pack8to4_int8_neon(const Mat& bottom_blob, Mat& top_blob, const Mat& kernel_tm, const Option& opt)
{
int w = bottom_blob.w;
int h = bottom_blob.h;
int inch = bottom_blob.c;
// size_t elemsize = bottom_blob.elemsize;
int elempack = bottom_blob.elempack;
int outw = top_blob.w;
int outh = top_blob.h;
int outch = top_blob.c;
// pad to 4n+2
Mat bottom_blob_bordered = bottom_blob;
outw = (outw + 3) / 4 * 4;
outh = (outh + 3) / 4 * 4;
w = outw + 2;
h = outh + 2;
copy_make_border(bottom_blob, bottom_blob_bordered, 0, h - bottom_blob.h, 0, w - bottom_blob.w, BORDER_CONSTANT, 0.f, opt);
// BEGIN transform input
Mat bottom_blob_tm;
{
int w_tm = outw / 4 * 6;
int h_tm = outh / 4 * 6;
const int tiles = w_tm / 6 * h_tm / 6;
bottom_blob_tm.create(tiles, 36, inch, 2u * elempack, elempack, opt.workspace_allocator);
// const float itm[4][4] = {
// {4.0f, 0.0f, -5.0f, 0.0f, 1.0f, 0.0f},
// {0.0f,-4.0f, -4.0f, 1.0f, 1.0f, 0.0f},
// {0.0f, 4.0f, -4.0f,-1.0f, 1.0f, 0.0f},
// {0.0f,-2.0f, -1.0f, 2.0f, 1.0f, 0.0f},
// {0.0f, 2.0f, -1.0f,-2.0f, 1.0f, 0.0f},
// {0.0f, 4.0f, 0.0f,-5.0f, 0.0f, 1.0f}
// };
// 0 = 4 * r00 - 5 * r02 + r04
// 1 = -4 * (r01 + r02) + r04 + r03
// 2 = 4 * (r01 - r02) + r04 - r03
// 3 = -2 * (r01 - r03) + r04 - r02
// 4 = 2 * (r01 - r03) + r04 - r02
// 5 = 4 * r01 - 5 * r03 + r05
#pragma omp parallel for num_threads(opt.num_threads)
for (int q = 0; q < inch; q++)
{
const Mat img0 = bottom_blob_bordered.channel(q);
Mat img0_tm = bottom_blob_tm.channel(q);
short tmp[6][6][8];
// tile
for (int i = 0; i < h_tm / 6; i++)
{
for (int j = 0; j < w_tm / 6; j++)
{
const signed char* r0 = img0.row<const signed char>(i * 4) + (j * 4) * 8;
for (int m = 0; m < 6; m++)
{
int8x8_t _r00 = vld1_s8(r0);
int8x8_t _r01 = vld1_s8(r0 + 8);
int8x8_t _r02 = vld1_s8(r0 + 16);
int8x8_t _r03 = vld1_s8(r0 + 24);
int8x8_t _r04 = vld1_s8(r0 + 32);
int8x8_t _r05 = vld1_s8(r0 + 40);
int8x8_t _v4s8 = vdup_n_s8(4);
int8x8_t _v5s8 = vdup_n_s8(5);
int16x8_t _v2 = vdupq_n_s16(2);
int16x8_t _v4 = vdupq_n_s16(4);
// int16x8_t _tmp0m = vfmsq_n_f16(vfmaq_n_f16(_r04, _r00, 4.f), _r02, 5.f);
int16x8_t _tmp0m = vsubq_s16(vaddw_s8(vmull_s8(_r00, _v4s8), _r04), vmull_s8(_r02, _v5s8));
// int16x8_t _tmp1m = vfmsq_n_f16(vaddq_f16(_r04, _r03), vaddq_f16(_r01, _r02), 4.f);
int16x8_t _tmp1m = vmlsq_s16(vaddl_s8(_r04, _r03), vaddl_s8(_r01, _r02), _v4);
// int16x8_t _tmp2m = vfmaq_n_f16(vsubq_f16(_r04, _r03), vsubq_f16(_r01, _r02), 4.f);
int16x8_t _tmp2m = vmlaq_s16(vsubl_s8(_r04, _r03), vsubl_s8(_r01, _r02), _v4);
// int16x8_t _tmp3m = vfmsq_n_f16(vsubq_f16(_r04, _r02), vsubq_f16(_r01, _r03), 2.f);
int16x8_t _tmp3m = vmlsq_s16(vsubl_s8(_r04, _r02), vsubl_s8(_r01, _r03), _v2);
// int16x8_t _tmp4m = vfmaq_n_f16(vsubq_f16(_r04, _r02), vsubq_f16(_r01, _r03), 2.f);
int16x8_t _tmp4m = vmlaq_s16(vsubl_s8(_r04, _r02), vsubl_s8(_r01, _r03), _v2);
// int16x8_t _tmp5m = vfmsq_n_f16(vfmaq_n_f16(_r05, _r01, 4.f), _r03, 5.f);
int16x8_t _tmp5m = vsubq_s16(vaddw_s8(vmull_s8(_r01, _v4s8), _r05), vmull_s8(_r03, _v5s8));
vst1q_s16(tmp[0][m], _tmp0m);
vst1q_s16(tmp[1][m], _tmp1m);
vst1q_s16(tmp[2][m], _tmp2m);
vst1q_s16(tmp[3][m], _tmp3m);
vst1q_s16(tmp[4][m], _tmp4m);
vst1q_s16(tmp[5][m], _tmp5m);
r0 += w * 8;
}
short* r0_tm_0 = (short*)img0_tm + (i * w_tm / 6 + j) * 8;
short* r0_tm_1 = r0_tm_0 + tiles * 8;
short* r0_tm_2 = r0_tm_0 + tiles * 16;
short* r0_tm_3 = r0_tm_0 + tiles * 24;
short* r0_tm_4 = r0_tm_0 + tiles * 32;
short* r0_tm_5 = r0_tm_0 + tiles * 40;
for (int m = 0; m < 6; m++)
{
int16x8_t _tmp00 = vld1q_s16(tmp[m][0]);
int16x8_t _tmp01 = vld1q_s16(tmp[m][1]);
int16x8_t _tmp02 = vld1q_s16(tmp[m][2]);
int16x8_t _tmp03 = vld1q_s16(tmp[m][3]);
int16x8_t _tmp04 = vld1q_s16(tmp[m][4]);
int16x8_t _tmp05 = vld1q_s16(tmp[m][5]);
int16x8_t _v2 = vdupq_n_s16(2);
int16x8_t _v4 = vdupq_n_s16(4);
int16x8_t _v5 = vdupq_n_s16(5);
int16x8_t _r0tm0 = vmlsq_s16(vmlaq_s16(_tmp04, _tmp00, _v4), _tmp02, _v5);
int16x8_t _r0tm1 = vmlsq_s16(vaddq_s16(_tmp04, _tmp03), vaddq_s16(_tmp01, _tmp02), _v4);
int16x8_t _r0tm2 = vmlaq_s16(vsubq_s16(_tmp04, _tmp03), vsubq_s16(_tmp01, _tmp02), _v4);
int16x8_t _r0tm3 = vmlsq_s16(vsubq_s16(_tmp04, _tmp02), vsubq_s16(_tmp01, _tmp03), _v2);
int16x8_t _r0tm4 = vmlaq_s16(vsubq_s16(_tmp04, _tmp02), vsubq_s16(_tmp01, _tmp03), _v2);
int16x8_t _r0tm5 = vmlsq_s16(vmlaq_s16(_tmp05, _tmp01, _v4), _tmp03, _v5);
vst1q_s16(r0_tm_0, _r0tm0);
vst1q_s16(r0_tm_1, _r0tm1);
vst1q_s16(r0_tm_2, _r0tm2);
vst1q_s16(r0_tm_3, _r0tm3);
vst1q_s16(r0_tm_4, _r0tm4);
vst1q_s16(r0_tm_5, _r0tm5);
r0_tm_0 += tiles * 48;
r0_tm_1 += tiles * 48;
r0_tm_2 += tiles * 48;
r0_tm_3 += tiles * 48;
r0_tm_4 += tiles * 48;
r0_tm_5 += tiles * 48;
}
}
}
}
}
bottom_blob_bordered = Mat();
// END transform input
// BEGIN dot
Mat top_blob_tm;
{
int w_tm = outw / 4 * 6;
int h_tm = outh / 4 * 6;
const int tiles = h_tm / 6 * w_tm / 6;
// permute
// bottom_blob_tm.create(tiles, 36, inch, elemsize, elempack, opt.workspace_allocator);
Mat bottom_blob_tm2;
#if __aarch64__
if (tiles >= 12)
bottom_blob_tm2.create(12 * inch, tiles / 12 + (tiles % 12) / 8 + (tiles % 12 % 8) / 4 + (tiles % 12 % 4) / 2 + tiles % 12 % 2, 36, 2u * elempack, elempack, opt.workspace_allocator);
else if (tiles >= 8)
bottom_blob_tm2.create(8 * inch, tiles / 8 + (tiles % 8) / 4 + (tiles % 4) / 2 + tiles % 2, 36, 2u * elempack, elempack, opt.workspace_allocator);
else if (tiles >= 4)
bottom_blob_tm2.create(4 * inch, tiles / 4 + (tiles % 4) / 2 + tiles % 2, 36, 2u * elempack, elempack, opt.workspace_allocator);
else if (tiles >= 2)
bottom_blob_tm2.create(2 * inch, tiles / 2 + tiles % 2, 36, 2u * elempack, elempack, opt.workspace_allocator);
else // if (tiles >= 1)
bottom_blob_tm2.create(1 * inch, tiles, 36, 2u * elempack, elempack, opt.workspace_allocator);
#else
if (tiles >= 4)
bottom_blob_tm2.create(4 * inch, tiles / 4 + (tiles % 4) / 2 + tiles % 2, 36, 2u * elempack, elempack, opt.workspace_allocator);
else if (tiles >= 2)
bottom_blob_tm2.create(2 * inch, tiles / 2 + tiles % 2, 36, 2u * elempack, elempack, opt.workspace_allocator);
else // if (tiles >= 1)
bottom_blob_tm2.create(1 * inch, tiles, 36, 2u * elempack, elempack, opt.workspace_allocator);
#endif
#pragma omp parallel for num_threads(opt.num_threads)
for (int r = 0; r < 36; r++)
{
Mat tm2 = bottom_blob_tm2.channel(r);
// tile
int i = 0;
#if __aarch64__
for (; i + 11 < tiles; i += 12)
{
short* tm2p = tm2.row<short>(i / 12);
const short* r0 = bottom_blob_tm;
r0 += (r * tiles + i) * 8;
for (int q = 0; q < inch; q++)
{
// transpose 12x8
asm volatile(
"prfm pldl1keep, [%0, #512] \n"
"ld4 {v0.8h, v1.8h, v2.8h, v3.8h}, [%0], #64 \n"
"ld4 {v4.8h, v5.8h, v6.8h, v7.8h}, [%0], #64 \n"
"ld4 {v16.8h, v17.8h, v18.8h, v19.8h}, [%0] \n"
"sub %0, %0, #128 \n"
"uzp1 v20.8h, v0.8h, v4.8h \n" // 0
"uzp1 v21.8h, v16.8h, v1.8h \n" // 1
"uzp1 v22.8h, v5.8h, v17.8h \n" // 2
"uzp1 v23.8h, v2.8h, v6.8h \n" // 3
"uzp1 v24.8h, v18.8h, v3.8h \n" // 4
"uzp1 v25.8h, v7.8h, v19.8h \n" // 5
"uzp2 v26.8h, v0.8h, v4.8h \n" // 6
"uzp2 v27.8h, v16.8h, v1.8h \n" // 7
"uzp2 v28.8h, v5.8h, v17.8h \n" // 8
"uzp2 v29.8h, v2.8h, v6.8h \n" // 9
"uzp2 v30.8h, v18.8h, v3.8h \n" // 10
"uzp2 v31.8h, v7.8h, v19.8h \n" // 11
"st1 {v20.8h, v21.8h, v22.8h, v23.8h}, [%1], #64 \n"
"st1 {v24.8h, v25.8h, v26.8h, v27.8h}, [%1], #64 \n"
"st1 {v28.8h, v29.8h, v30.8h, v31.8h}, [%1], #64 \n"
: "=r"(r0), // %0
"=r"(tm2p) // %1
: "0"(r0),
"1"(tm2p)
: "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31");
r0 += bottom_blob_tm.cstep * 8;
}
}
for (; i + 7 < tiles; i += 8)
{
short* tmpptr = tm2.row<short>(i / 12 + (i % 12) / 8);
const short* r0 = bottom_blob_tm;
r0 += (r * tiles + i) * 8;
for (int q = 0; q < inch; q++)
{
// transpose 8x8
asm volatile(
"prfm pldl1keep, [%0, #512] \n"
"ld4 {v0.8h, v1.8h, v2.8h, v3.8h}, [%0], #64 \n"
"ld4 {v4.8h, v5.8h, v6.8h, v7.8h}, [%0] \n"
"sub %0, %0, #64 \n"
"uzp1 v16.8h, v0.8h, v4.8h \n"
"uzp2 v20.8h, v0.8h, v4.8h \n"
"uzp1 v17.8h, v1.8h, v5.8h \n"
"uzp2 v21.8h, v1.8h, v5.8h \n"
"uzp1 v18.8h, v2.8h, v6.8h \n"
"uzp2 v22.8h, v2.8h, v6.8h \n"
"uzp1 v19.8h, v3.8h, v7.8h \n"
"uzp2 v23.8h, v3.8h, v7.8h \n"
"st1 {v16.8h, v17.8h, v18.8h, v19.8h}, [%1], #64 \n"
"st1 {v20.8h, v21.8h, v22.8h, v23.8h}, [%1], #64 \n"
: "=r"(r0), // %0
"=r"(tmpptr) // %1
: "0"(r0),
"1"(tmpptr)
: "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23");
r0 += bottom_blob_tm.cstep * 8;
}
}
#endif // __aarch64__
for (; i + 3 < tiles; i += 4)
{
#if __aarch64__
short* tmpptr = tm2.row<short>(i / 12 + (i % 12) / 8 + (i % 12 % 8) / 4);
#else
short* tmpptr = tm2.row<short>(i / 4);
#endif
const short* r0 = bottom_blob_tm;
r0 += (r * tiles + i) * 8;
for (int q = 0; q < inch; q++)
{
#if __aarch64__
asm volatile(
"prfm pldl1keep, [%0, #512] \n"
"ld1 {v0.8h, v1.8h, v2.8h, v3.8h}, [%0] \n"
"st1 {v0.8h, v1.8h, v2.8h, v3.8h}, [%1], #64 \n"
: "=r"(r0), // %0
"=r"(tmpptr) // %1
: "0"(r0),
"1"(tmpptr)
: "memory", "v0", "v1", "v2", "v3");
#else
asm volatile(
"pld [%0, #512] \n"
"vldm %0, {d0-d7} \n"
"vstm %1!, {d0-d7} \n"
: "=r"(r0), // %0
"=r"(tmpptr) // %1
: "0"(r0),
"1"(tmpptr)
: "memory", "q0", "q1", "q2", "q3");
#endif
r0 += bottom_blob_tm.cstep * 8;
}
}
for (; i + 1 < tiles; i += 2)
{
#if __aarch64__
short* tmpptr = tm2.row<short>(i / 12 + (i % 12) / 8 + (i % 12 % 8) / 4 + (i % 12 % 4) / 2);
#else
short* tmpptr = tm2.row<short>(i / 4 + (i % 4) / 2);
#endif
const short* r0 = bottom_blob_tm;
r0 += (r * tiles + i) * 8;
for (int q = 0; q < inch; q++)
{
#if __aarch64__
asm volatile(
"prfm pldl1keep, [%0, #256] \n"
"ld1 {v0.8h, v1.8h}, [%0] \n"
"st1 {v0.8h, v1.8h}, [%1], #32 \n"
: "=r"(r0), // %0
"=r"(tmpptr) // %1
: "0"(r0),
"1"(tmpptr)
: "memory", "v0", "v1");
#else
asm volatile(
"pld [%0, #256] \n"
"vld1.s16 {d0-d3}, [%0 :128] \n"
"vst1.s16 {d0-d3}, [%1 :128]! \n"
: "=r"(r0), // %0
"=r"(tmpptr) // %1
: "0"(r0),
"1"(tmpptr)
: "memory", "q0", "q1");
#endif
r0 += bottom_blob_tm.cstep * 8;
}
}
for (; i < tiles; i++)
{
#if __aarch64__
short* tmpptr = tm2.row<short>(i / 12 + (i % 12) / 8 + (i % 12 % 8) / 4 + (i % 12 % 4) / 2 + i % 12 % 2);
#else
short* tmpptr = tm2.row<short>(i / 4 + (i % 4) / 2 + i % 2);
#endif
const short* r0 = bottom_blob_tm;
r0 += (r * tiles + i) * 8;
for (int q = 0; q < inch; q++)
{
#if __aarch64__
asm volatile(
"prfm pldl1keep, [%0, #128] \n"
"ld1 {v0.8h}, [%0] \n"
"st1 {v0.8h}, [%1], #16 \n"
: "=r"(r0), // %0
"=r"(tmpptr) // %1
: "0"(r0),
"1"(tmpptr)
: "memory", "v0");
#else
asm volatile(
"pld [%0, #128] \n"
"vld1.s16 {d0-d1}, [%0 :128] \n"
"vst1.s16 {d0-d1}, [%1 :128]! \n"
: "=r"(r0), // %0
"=r"(tmpptr) // %1
: "0"(r0),
"1"(tmpptr)
: "memory", "q0");
#endif
r0 += bottom_blob_tm.cstep * 8;
}
}
}
bottom_blob_tm = Mat();
// permute end
top_blob_tm.create(tiles, 36, outch, 4u * 4, 4, opt.workspace_allocator);
int nn_outch = 0;
int remain_outch_start = 0;
nn_outch = outch >> 1;
#pragma omp parallel for num_threads(opt.num_threads)
for (int pp = 0; pp < nn_outch; pp++)
{
int p = pp * 2;
int* output0_tm = top_blob_tm.channel(p);
int* output1_tm = top_blob_tm.channel(p + 1);
const Mat kernel0_tm = kernel_tm.channel(p / 2);
for (int r = 0; r < 36; r++)
{
const Mat bb2 = bottom_blob_tm2.channel(r);
int i = 0;
#if __aarch64__
for (; i + 11 < tiles; i += 12)
{
const short* r0 = bb2.row<const short>(i / 12);
const short* k0 = kernel0_tm.row<const short>(r);
int nn = inch; // inch always > 0
asm volatile(
"ld1 {v0.8h, v1.8h}, [%3], #32 \n" // r01
"eor v8.16b, v8.16b, v8.16b \n"
"eor v9.16b, v9.16b, v9.16b \n"
"ld1 {v4.8h, v5.8h}, [%4], #32 \n" // w01
"eor v10.16b, v10.16b, v10.16b \n"
"eor v11.16b, v11.16b, v11.16b \n"
"prfm pldl1keep, [%3, #256] \n"
"eor v12.16b, v12.16b, v12.16b \n"
"eor v13.16b, v13.16b, v13.16b \n"
"prfm pldl1keep, [%4, #256] \n"
"eor v14.16b, v14.16b, v14.16b \n"
"eor v15.16b, v15.16b, v15.16b \n"
"eor v16.16b, v16.16b, v16.16b \n"
"eor v17.16b, v17.16b, v17.16b \n"
"eor v18.16b, v18.16b, v18.16b \n"
"eor v19.16b, v19.16b, v19.16b \n"
"eor v20.16b, v20.16b, v20.16b \n"
"eor v21.16b, v21.16b, v21.16b \n"
"eor v22.16b, v22.16b, v22.16b \n"
"eor v23.16b, v23.16b, v23.16b \n"
"eor v24.16b, v24.16b, v24.16b \n"
"eor v25.16b, v25.16b, v25.16b \n"
"eor v26.16b, v26.16b, v26.16b \n"
"eor v27.16b, v27.16b, v27.16b \n"
"eor v28.16b, v28.16b, v28.16b \n"
"eor v29.16b, v29.16b, v29.16b \n"
"eor v30.16b, v30.16b, v30.16b \n"
"eor v31.16b, v31.16b, v31.16b \n"
"0: \n"
"smlal v8.4s, v4.4h, v0.h[0] \n"
"smlal2 v20.4s, v4.8h, v0.h[0] \n"
"smlal v9.4s, v4.4h, v0.h[1] \n"
"smlal2 v21.4s, v4.8h, v0.h[1] \n"
"smlal v10.4s, v4.4h, v0.h[2] \n"
"smlal2 v22.4s, v4.8h, v0.h[2] \n"
"smlal v11.4s, v4.4h, v0.h[3] \n"
"smlal2 v23.4s, v4.8h, v0.h[3] \n"
"smlal v12.4s, v4.4h, v0.h[4] \n"
"smlal2 v24.4s, v4.8h, v0.h[4] \n"
"smlal v13.4s, v4.4h, v0.h[5] \n"
"smlal2 v25.4s, v4.8h, v0.h[5] \n"
"smlal v14.4s, v4.4h, v0.h[6] \n"
"smlal2 v26.4s, v4.8h, v0.h[6] \n"
"smlal v15.4s, v4.4h, v0.h[7] \n"
"smlal2 v27.4s, v4.8h, v0.h[7] \n"
"ld1 {v2.8h, v3.8h}, [%3], #32 \n" // r23
"smlal v16.4s, v4.4h, v1.h[0] \n"
"smlal2 v28.4s, v4.8h, v1.h[0] \n"
"smlal v17.4s, v4.4h, v1.h[1] \n"
"smlal2 v29.4s, v4.8h, v1.h[1] \n"
"prfm pldl1keep, [%3, #256] \n"
"smlal v18.4s, v4.4h, v1.h[2] \n"
"smlal2 v30.4s, v4.8h, v1.h[2] \n"
"smlal v19.4s, v4.4h, v1.h[3] \n"
"smlal2 v31.4s, v4.8h, v1.h[3] \n"
"ld1 {v6.8h, v7.8h}, [%4], #32 \n" // w23
"smlal v8.4s, v5.4h, v1.h[4] \n"
"smlal2 v20.4s, v5.8h, v1.h[4] \n"
"smlal v9.4s, v5.4h, v1.h[5] \n"
"smlal2 v21.4s, v5.8h, v1.h[5] \n"
"prfm pldl1keep, [%4, #256] \n"
"smlal v10.4s, v5.4h, v1.h[6] \n"
"smlal2 v22.4s, v5.8h, v1.h[6] \n"
"smlal v11.4s, v5.4h, v1.h[7] \n"
"smlal2 v23.4s, v5.8h, v1.h[7] \n"
"smlal v12.4s, v5.4h, v2.h[0] \n"
"smlal2 v24.4s, v5.8h, v2.h[0] \n"
"smlal v13.4s, v5.4h, v2.h[1] \n"
"smlal2 v25.4s, v5.8h, v2.h[1] \n"
"smlal v14.4s, v5.4h, v2.h[2] \n"
"smlal2 v26.4s, v5.8h, v2.h[2] \n"
"smlal v15.4s, v5.4h, v2.h[3] \n"
"smlal2 v27.4s, v5.8h, v2.h[3] \n"
"smlal v16.4s, v5.4h, v2.h[4] \n"
"smlal2 v28.4s, v5.8h, v2.h[4] \n"
"smlal v17.4s, v5.4h, v2.h[5] \n"
"smlal2 v29.4s, v5.8h, v2.h[5] \n"
"smlal v18.4s, v5.4h, v2.h[6] \n"
"smlal2 v30.4s, v5.8h, v2.h[6] \n"
"smlal v19.4s, v5.4h, v2.h[7] \n"
"smlal2 v31.4s, v5.8h, v2.h[7] \n"
"ld1 {v0.8h, v1.8h}, [%3], #32 \n" // r45
"smlal v8.4s, v6.4h, v3.h[0] \n"
"smlal2 v20.4s, v6.8h, v3.h[0] \n"
"smlal v9.4s, v6.4h, v3.h[1] \n"
"smlal2 v21.4s, v6.8h, v3.h[1] \n"
"prfm pldl1keep, [%3, #256] \n"
"smlal v10.4s, v6.4h, v3.h[2] \n"
"smlal2 v22.4s, v6.8h, v3.h[2] \n"
"smlal v11.4s, v6.4h, v3.h[3] \n"
"smlal2 v23.4s, v6.8h, v3.h[3] \n"
"smlal v12.4s, v6.4h, v3.h[4] \n"
"smlal2 v24.4s, v6.8h, v3.h[4] \n"
"smlal v13.4s, v6.4h, v3.h[5] \n"
"smlal2 v25.4s, v6.8h, v3.h[5] \n"
"smlal v14.4s, v6.4h, v3.h[6] \n"
"smlal2 v26.4s, v6.8h, v3.h[6] \n"
"smlal v15.4s, v6.4h, v3.h[7] \n"
"smlal2 v27.4s, v6.8h, v3.h[7] \n"
"smlal v16.4s, v6.4h, v0.h[0] \n"
"smlal2 v28.4s, v6.8h, v0.h[0] \n"
"smlal v17.4s, v6.4h, v0.h[1] \n"
"smlal2 v29.4s, v6.8h, v0.h[1] \n"
"smlal v18.4s, v6.4h, v0.h[2] \n"
"smlal2 v30.4s, v6.8h, v0.h[2] \n"
"smlal v19.4s, v6.4h, v0.h[3] \n"
"smlal2 v31.4s, v6.8h, v0.h[3] \n"
"ld1 {v4.8h, v5.8h}, [%4], #32 \n" // w45
"smlal v8.4s, v7.4h, v0.h[4] \n"
"smlal2 v20.4s, v7.8h, v0.h[4] \n"
"smlal v9.4s, v7.4h, v0.h[5] \n"
"smlal2 v21.4s, v7.8h, v0.h[5] \n"
"prfm pldl1keep, [%4, #256] \n"
"smlal v10.4s, v7.4h, v0.h[6] \n"
"smlal2 v22.4s, v7.8h, v0.h[6] \n"
"smlal v11.4s, v7.4h, v0.h[7] \n"
"smlal2 v23.4s, v7.8h, v0.h[7] \n"
"ld1 {v2.8h, v3.8h}, [%3], #32 \n" // r67
"smlal v12.4s, v7.4h, v1.h[0] \n"
"smlal2 v24.4s, v7.8h, v1.h[0] \n"
"smlal v13.4s, v7.4h, v1.h[1] \n"
"smlal2 v25.4s, v7.8h, v1.h[1] \n"
"prfm pldl1keep, [%3, #256] \n"
"smlal v14.4s, v7.4h, v1.h[2] \n"
"smlal2 v26.4s, v7.8h, v1.h[2] \n"
"smlal v15.4s, v7.4h, v1.h[3] \n"
"smlal2 v27.4s, v7.8h, v1.h[3] \n"
"smlal v16.4s, v7.4h, v1.h[4] \n"
"smlal2 v28.4s, v7.8h, v1.h[4] \n"
"smlal v17.4s, v7.4h, v1.h[5] \n"
"smlal2 v29.4s, v7.8h, v1.h[5] \n"
"smlal v18.4s, v7.4h, v1.h[6] \n"
"smlal2 v30.4s, v7.8h, v1.h[6] \n"
"smlal v19.4s, v7.4h, v1.h[7] \n"
"smlal2 v31.4s, v7.8h, v1.h[7] \n"
"smlal v8.4s, v4.4h, v2.h[0] \n"
"smlal2 v20.4s, v4.8h, v2.h[0] \n"
"smlal v9.4s, v4.4h, v2.h[1] \n"
"smlal2 v21.4s, v4.8h, v2.h[1] \n"
"smlal v10.4s, v4.4h, v2.h[2] \n"
"smlal2 v22.4s, v4.8h, v2.h[2] \n"
"smlal v11.4s, v4.4h, v2.h[3] \n"
"smlal2 v23.4s, v4.8h, v2.h[3] \n"
"smlal v12.4s, v4.4h, v2.h[4] \n"
"smlal2 v24.4s, v4.8h, v2.h[4] \n"
"smlal v13.4s, v4.4h, v2.h[5] \n"
"smlal2 v25.4s, v4.8h, v2.h[5] \n"
"smlal v14.4s, v4.4h, v2.h[6] \n"
"smlal2 v26.4s, v4.8h, v2.h[6] \n"
"smlal v15.4s, v4.4h, v2.h[7] \n"
"smlal2 v27.4s, v4.8h, v2.h[7] \n"
"ld1 {v0.8h, v1.8h}, [%3], #32 \n" // r89
"smlal v16.4s, v4.4h, v3.h[0] \n"
"smlal2 v28.4s, v4.8h, v3.h[0] \n"
"smlal v17.4s, v4.4h, v3.h[1] \n"
"smlal2 v29.4s, v4.8h, v3.h[1] \n"
"prfm pldl1keep, [%3, #256] \n"
"smlal v18.4s, v4.4h, v3.h[2] \n"
"smlal2 v30.4s, v4.8h, v3.h[2] \n"
"smlal v19.4s, v4.4h, v3.h[3] \n"
"smlal2 v31.4s, v4.8h, v3.h[3] \n"
"ld1 {v6.8h, v7.8h}, [%4], #32 \n" // w67
"smlal v8.4s, v5.4h, v3.h[4] \n"
"smlal2 v20.4s, v5.8h, v3.h[4] \n"
"smlal v9.4s, v5.4h, v3.h[5] \n"
"smlal2 v21.4s, v5.8h, v3.h[5] \n"
"prfm pldl1keep, [%4, #256] \n"
"smlal v10.4s, v5.4h, v3.h[6] \n"
"smlal2 v22.4s, v5.8h, v3.h[6] \n"
"smlal v11.4s, v5.4h, v3.h[7] \n"
"smlal2 v23.4s, v5.8h, v3.h[7] \n"
"smlal v12.4s, v5.4h, v0.h[0] \n"
"smlal2 v24.4s, v5.8h, v0.h[0] \n"
"smlal v13.4s, v5.4h, v0.h[1] \n"
"smlal2 v25.4s, v5.8h, v0.h[1] \n"
"smlal v14.4s, v5.4h, v0.h[2] \n"
"smlal2 v26.4s, v5.8h, v0.h[2] \n"
"smlal v15.4s, v5.4h, v0.h[3] \n"
"smlal2 v27.4s, v5.8h, v0.h[3] \n"
"smlal v16.4s, v5.4h, v0.h[4] \n"
"smlal2 v28.4s, v5.8h, v0.h[4] \n"
"smlal v17.4s, v5.4h, v0.h[5] \n"
"smlal2 v29.4s, v5.8h, v0.h[5] \n"
"smlal v18.4s, v5.4h, v0.h[6] \n"
"smlal2 v30.4s, v5.8h, v0.h[6] \n"
"smlal v19.4s, v5.4h, v0.h[7] \n"
"smlal2 v31.4s, v5.8h, v0.h[7] \n"
"ld1 {v2.8h, v3.8h}, [%3], #32 \n" // r1011
"smlal v8.4s, v6.4h, v1.h[0] \n"
"smlal2 v20.4s, v6.8h, v1.h[0] \n"
"smlal v9.4s, v6.4h, v1.h[1] \n"
"smlal2 v21.4s, v6.8h, v1.h[1] \n"
"prfm pldl1keep, [%3, #256] \n"
"smlal v10.4s, v6.4h, v1.h[2] \n"
"smlal2 v22.4s, v6.8h, v1.h[2] \n"
"smlal v11.4s, v6.4h, v1.h[3] \n"
"smlal2 v23.4s, v6.8h, v1.h[3] \n"
"smlal v12.4s, v6.4h, v1.h[4] \n"
"smlal2 v24.4s, v6.8h, v1.h[4] \n"
"smlal v13.4s, v6.4h, v1.h[5] \n"
"smlal2 v25.4s, v6.8h, v1.h[5] \n"
"smlal v14.4s, v6.4h, v1.h[6] \n"
"smlal2 v26.4s, v6.8h, v1.h[6] \n"
"smlal v15.4s, v6.4h, v1.h[7] \n"
"smlal2 v27.4s, v6.8h, v1.h[7] \n"
"smlal v16.4s, v6.4h, v2.h[0] \n"
"smlal2 v28.4s, v6.8h, v2.h[0] \n"
"smlal v17.4s, v6.4h, v2.h[1] \n"
"smlal2 v29.4s, v6.8h, v2.h[1] \n"
"smlal v18.4s, v6.4h, v2.h[2] \n"
"smlal2 v30.4s, v6.8h, v2.h[2] \n"
"smlal v19.4s, v6.4h, v2.h[3] \n"
"smlal2 v31.4s, v6.8h, v2.h[3] \n"
"ld1 {v4.8h, v5.8h}, [%4], #32 \n" // w01
"smlal v8.4s, v7.4h, v2.h[4] \n"
"smlal2 v20.4s, v7.8h, v2.h[4] \n"
"smlal v9.4s, v7.4h, v2.h[5] \n"
"smlal2 v21.4s, v7.8h, v2.h[5] \n"
"prfm pldl1keep, [%4, #256] \n"
"smlal v10.4s, v7.4h, v2.h[6] \n"
"smlal2 v22.4s, v7.8h, v2.h[6] \n"
"smlal v11.4s, v7.4h, v2.h[7] \n"
"smlal2 v23.4s, v7.8h, v2.h[7] \n"
"ld1 {v0.8h, v1.8h}, [%3], #32 \n" // r01
"smlal v12.4s, v7.4h, v3.h[0] \n"
"smlal2 v24.4s, v7.8h, v3.h[0] \n"
"smlal v13.4s, v7.4h, v3.h[1] \n"
"smlal2 v25.4s, v7.8h, v3.h[1] \n"
"prfm pldl1keep, [%3, #256] \n"
"smlal v14.4s, v7.4h, v3.h[2] \n"
"smlal2 v26.4s, v7.8h, v3.h[2] \n"
"smlal v15.4s, v7.4h, v3.h[3] \n"
"smlal2 v27.4s, v7.8h, v3.h[3] \n"
"smlal v16.4s, v7.4h, v3.h[4] \n"
"smlal2 v28.4s, v7.8h, v3.h[4] \n"
"smlal v17.4s, v7.4h, v3.h[5] \n"
"smlal2 v29.4s, v7.8h, v3.h[5] \n"
"subs %w0, %w0, #1 \n"
"smlal v18.4s, v7.4h, v3.h[6] \n"
"smlal2 v30.4s, v7.8h, v3.h[6] \n"
"smlal v19.4s, v7.4h, v3.h[7] \n"
"smlal2 v31.4s, v7.8h, v3.h[7] \n"
"bne 0b \n"
"sub %3, %3, #32 \n"
"sub %4, %4, #32 \n"
"st1 {v8.4s, v9.4s, v10.4s, v11.4s}, [%1], #64 \n"
"st1 {v20.4s, v21.4s, v22.4s, v23.4s}, [%2], #64 \n"
"st1 {v12.4s, v13.4s, v14.4s, v15.4s}, [%1], #64 \n"
"st1 {v24.4s, v25.4s, v26.4s, v27.4s}, [%2], #64 \n"
"st1 {v16.4s, v17.4s, v18.4s, v19.4s}, [%1], #64 \n"
"st1 {v28.4s, v29.4s, v30.4s, v31.4s}, [%2], #64 \n"
: "=r"(nn), // %0
"=r"(output0_tm), // %1
"=r"(output1_tm), // %2
"=r"(r0), // %3
"=r"(k0) // %4
: "0"(nn),
"1"(output0_tm),
"2"(output1_tm),
"3"(r0),
"4"(k0)
: "cc", "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31");
}
for (; i + 7 < tiles; i += 8)
{
const short* r0 = bb2.row<const short>(i / 12 + (i % 12) / 8);
const short* k0 = kernel0_tm.row<const short>(r);
int nn = inch; // inch always > 0
int32x4_t _sum0 = vdupq_n_s32(0);
int32x4_t _sum1 = vdupq_n_s32(0);
int32x4_t _sum2 = vdupq_n_s32(0);
int32x4_t _sum3 = vdupq_n_s32(0);
int32x4_t _sum4 = vdupq_n_s32(0);
int32x4_t _sum5 = vdupq_n_s32(0);
int32x4_t _sum6 = vdupq_n_s32(0);
int32x4_t _sum7 = vdupq_n_s32(0);
int32x4_t _sum8 = vdupq_n_s32(0);
int32x4_t _sum9 = vdupq_n_s32(0);
int32x4_t _suma = vdupq_n_s32(0);
int32x4_t _sumb = vdupq_n_s32(0);
int32x4_t _sumc = vdupq_n_s32(0);
int32x4_t _sumd = vdupq_n_s32(0);
int32x4_t _sume = vdupq_n_s32(0);
int32x4_t _sumf = vdupq_n_s32(0);
for (int j = 0; j < nn; j++)
{
int16x8_t _val0 = vld1q_s16(r0);
int16x8_t _val1 = vld1q_s16(r0 + 8);
int16x8_t _val2 = vld1q_s16(r0 + 16);
int16x8_t _val3 = vld1q_s16(r0 + 24);
int16x8_t _val4 = vld1q_s16(r0 + 32);
int16x8_t _val5 = vld1q_s16(r0 + 40);
int16x8_t _val6 = vld1q_s16(r0 + 48);
int16x8_t _val7 = vld1q_s16(r0 + 56);
int16x8_t _w0 = vld1q_s16(k0);
_sum0 = vmlal_lane_s16(_sum0, vget_low_s16(_w0), vget_low_s16(_val0), 0);
_sum1 = vmlal_lane_s16(_sum1, vget_high_s16(_w0), vget_low_s16(_val0), 0);
_sum2 = vmlal_lane_s16(_sum2, vget_low_s16(_w0), vget_low_s16(_val0), 1);
_sum3 = vmlal_lane_s16(_sum3, vget_high_s16(_w0), vget_low_s16(_val0), 1);
_sum4 = vmlal_lane_s16(_sum4, vget_low_s16(_w0), vget_low_s16(_val0), 2);
_sum5 = vmlal_lane_s16(_sum5, vget_high_s16(_w0), vget_low_s16(_val0), 2);
_sum6 = vmlal_lane_s16(_sum6, vget_low_s16(_w0), vget_low_s16(_val0), 3);
_sum7 = vmlal_lane_s16(_sum7, vget_high_s16(_w0), vget_low_s16(_val0), 3);
_sum8 = vmlal_lane_s16(_sum8, vget_low_s16(_w0), vget_high_s16(_val0), 0);
_sum9 = vmlal_lane_s16(_sum9, vget_high_s16(_w0), vget_high_s16(_val0), 0);
_suma = vmlal_lane_s16(_suma, vget_low_s16(_w0), vget_high_s16(_val0), 1);
_sumb = vmlal_lane_s16(_sumb, vget_high_s16(_w0), vget_high_s16(_val0), 1);
_sumc = vmlal_lane_s16(_sumc, vget_low_s16(_w0), vget_high_s16(_val0), 2);
_sumd = vmlal_lane_s16(_sumd, vget_high_s16(_w0), vget_high_s16(_val0), 2);
_sume = vmlal_lane_s16(_sume, vget_low_s16(_w0), vget_high_s16(_val0), 3);
_sumf = vmlal_lane_s16(_sumf, vget_high_s16(_w0), vget_high_s16(_val0), 3);
int16x8_t _w1 = vld1q_s16(k0 + 8);
_sum0 = vmlal_lane_s16(_sum0, vget_low_s16(_w1), vget_low_s16(_val1), 0);
_sum1 = vmlal_lane_s16(_sum1, vget_high_s16(_w1), vget_low_s16(_val1), 0);
_sum2 = vmlal_lane_s16(_sum2, vget_low_s16(_w1), vget_low_s16(_val1), 1);
_sum3 = vmlal_lane_s16(_sum3, vget_high_s16(_w1), vget_low_s16(_val1), 1);
_sum4 = vmlal_lane_s16(_sum4, vget_low_s16(_w1), vget_low_s16(_val1), 2);
_sum5 = vmlal_lane_s16(_sum5, vget_high_s16(_w1), vget_low_s16(_val1), 2);
_sum6 = vmlal_lane_s16(_sum6, vget_low_s16(_w1), vget_low_s16(_val1), 3);
_sum7 = vmlal_lane_s16(_sum7, vget_high_s16(_w1), vget_low_s16(_val1), 3);
_sum8 = vmlal_lane_s16(_sum8, vget_low_s16(_w1), vget_high_s16(_val1), 0);
_sum9 = vmlal_lane_s16(_sum9, vget_high_s16(_w1), vget_high_s16(_val1), 0);
_suma = vmlal_lane_s16(_suma, vget_low_s16(_w1), vget_high_s16(_val1), 1);
_sumb = vmlal_lane_s16(_sumb, vget_high_s16(_w1), vget_high_s16(_val1), 1);
_sumc = vmlal_lane_s16(_sumc, vget_low_s16(_w1), vget_high_s16(_val1), 2);
_sumd = vmlal_lane_s16(_sumd, vget_high_s16(_w1), vget_high_s16(_val1), 2);
_sume = vmlal_lane_s16(_sume, vget_low_s16(_w1), vget_high_s16(_val1), 3);
_sumf = vmlal_lane_s16(_sumf, vget_high_s16(_w1), vget_high_s16(_val1), 3);
int16x8_t _w2 = vld1q_s16(k0 + 16);
_sum0 = vmlal_lane_s16(_sum0, vget_low_s16(_w2), vget_low_s16(_val2), 0);
_sum1 = vmlal_lane_s16(_sum1, vget_high_s16(_w2), vget_low_s16(_val2), 0);
_sum2 = vmlal_lane_s16(_sum2, vget_low_s16(_w2), vget_low_s16(_val2), 1);
_sum3 = vmlal_lane_s16(_sum3, vget_high_s16(_w2), vget_low_s16(_val2), 1);
_sum4 = vmlal_lane_s16(_sum4, vget_low_s16(_w2), vget_low_s16(_val2), 2);
_sum5 = vmlal_lane_s16(_sum5, vget_high_s16(_w2), vget_low_s16(_val2), 2);
_sum6 = vmlal_lane_s16(_sum6, vget_low_s16(_w2), vget_low_s16(_val2), 3);
_sum7 = vmlal_lane_s16(_sum7, vget_high_s16(_w2), vget_low_s16(_val2), 3);
_sum8 = vmlal_lane_s16(_sum8, vget_low_s16(_w2), vget_high_s16(_val2), 0);
_sum9 = vmlal_lane_s16(_sum9, vget_high_s16(_w2), vget_high_s16(_val2), 0);
_suma = vmlal_lane_s16(_suma, vget_low_s16(_w2), vget_high_s16(_val2), 1);
_sumb = vmlal_lane_s16(_sumb, vget_high_s16(_w2), vget_high_s16(_val2), 1);
_sumc = vmlal_lane_s16(_sumc, vget_low_s16(_w2), vget_high_s16(_val2), 2);
_sumd = vmlal_lane_s16(_sumd, vget_high_s16(_w2), vget_high_s16(_val2), 2);
_sume = vmlal_lane_s16(_sume, vget_low_s16(_w2), vget_high_s16(_val2), 3);
_sumf = vmlal_lane_s16(_sumf, vget_high_s16(_w2), vget_high_s16(_val2), 3);
int16x8_t _w3 = vld1q_s16(k0 + 24);
_sum0 = vmlal_lane_s16(_sum0, vget_low_s16(_w3), vget_low_s16(_val3), 0);
_sum1 = vmlal_lane_s16(_sum1, vget_high_s16(_w3), vget_low_s16(_val3), 0);
_sum2 = vmlal_lane_s16(_sum2, vget_low_s16(_w3), vget_low_s16(_val3), 1);
_sum3 = vmlal_lane_s16(_sum3, vget_high_s16(_w3), vget_low_s16(_val3), 1);
_sum4 = vmlal_lane_s16(_sum4, vget_low_s16(_w3), vget_low_s16(_val3), 2);
_sum5 = vmlal_lane_s16(_sum5, vget_high_s16(_w3), vget_low_s16(_val3), 2);
_sum6 = vmlal_lane_s16(_sum6, vget_low_s16(_w3), vget_low_s16(_val3), 3);
_sum7 = vmlal_lane_s16(_sum7, vget_high_s16(_w3), vget_low_s16(_val3), 3);
_sum8 = vmlal_lane_s16(_sum8, vget_low_s16(_w3), vget_high_s16(_val3), 0);
_sum9 = vmlal_lane_s16(_sum9, vget_high_s16(_w3), vget_high_s16(_val3), 0);
_suma = vmlal_lane_s16(_suma, vget_low_s16(_w3), vget_high_s16(_val3), 1);
_sumb = vmlal_lane_s16(_sumb, vget_high_s16(_w3), vget_high_s16(_val3), 1);
_sumc = vmlal_lane_s16(_sumc, vget_low_s16(_w3), vget_high_s16(_val3), 2);
_sumd = vmlal_lane_s16(_sumd, vget_high_s16(_w3), vget_high_s16(_val3), 2);
_sume = vmlal_lane_s16(_sume, vget_low_s16(_w3), vget_high_s16(_val3), 3);
_sumf = vmlal_lane_s16(_sumf, vget_high_s16(_w3), vget_high_s16(_val3), 3);
int16x8_t _w4 = vld1q_s16(k0 + 32);
_sum0 = vmlal_lane_s16(_sum0, vget_low_s16(_w4), vget_low_s16(_val4), 0);
_sum1 = vmlal_lane_s16(_sum1, vget_high_s16(_w4), vget_low_s16(_val4), 0);
_sum2 = vmlal_lane_s16(_sum2, vget_low_s16(_w4), vget_low_s16(_val4), 1);
_sum3 = vmlal_lane_s16(_sum3, vget_high_s16(_w4), vget_low_s16(_val4), 1);
_sum4 = vmlal_lane_s16(_sum4, vget_low_s16(_w4), vget_low_s16(_val4), 2);
_sum5 = vmlal_lane_s16(_sum5, vget_high_s16(_w4), vget_low_s16(_val4), 2);
_sum6 = vmlal_lane_s16(_sum6, vget_low_s16(_w4), vget_low_s16(_val4), 3);
_sum7 = vmlal_lane_s16(_sum7, vget_high_s16(_w4), vget_low_s16(_val4), 3);
_sum8 = vmlal_lane_s16(_sum8, vget_low_s16(_w4), vget_high_s16(_val4), 0);
_sum9 = vmlal_lane_s16(_sum9, vget_high_s16(_w4), vget_high_s16(_val4), 0);
_suma = vmlal_lane_s16(_suma, vget_low_s16(_w4), vget_high_s16(_val4), 1);
_sumb = vmlal_lane_s16(_sumb, vget_high_s16(_w4), vget_high_s16(_val4), 1);
_sumc = vmlal_lane_s16(_sumc, vget_low_s16(_w4), vget_high_s16(_val4), 2);
_sumd = vmlal_lane_s16(_sumd, vget_high_s16(_w4), vget_high_s16(_val4), 2);
_sume = vmlal_lane_s16(_sume, vget_low_s16(_w4), vget_high_s16(_val4), 3);
_sumf = vmlal_lane_s16(_sumf, vget_high_s16(_w4), vget_high_s16(_val4), 3);
int16x8_t _w5 = vld1q_s16(k0 + 40);
_sum0 = vmlal_lane_s16(_sum0, vget_low_s16(_w5), vget_low_s16(_val5), 0);
_sum1 = vmlal_lane_s16(_sum1, vget_high_s16(_w5), vget_low_s16(_val5), 0);
_sum2 = vmlal_lane_s16(_sum2, vget_low_s16(_w5), vget_low_s16(_val5), 1);
_sum3 = vmlal_lane_s16(_sum3, vget_high_s16(_w5), vget_low_s16(_val5), 1);
_sum4 = vmlal_lane_s16(_sum4, vget_low_s16(_w5), vget_low_s16(_val5), 2);
_sum5 = vmlal_lane_s16(_sum5, vget_high_s16(_w5), vget_low_s16(_val5), 2);
_sum6 = vmlal_lane_s16(_sum6, vget_low_s16(_w5), vget_low_s16(_val5), 3);
_sum7 = vmlal_lane_s16(_sum7, vget_high_s16(_w5), vget_low_s16(_val5), 3);
_sum8 = vmlal_lane_s16(_sum8, vget_low_s16(_w5), vget_high_s16(_val5), 0);
_sum9 = vmlal_lane_s16(_sum9, vget_high_s16(_w5), vget_high_s16(_val5), 0);
_suma = vmlal_lane_s16(_suma, vget_low_s16(_w5), vget_high_s16(_val5), 1);
_sumb = vmlal_lane_s16(_sumb, vget_high_s16(_w5), vget_high_s16(_val5), 1);
_sumc = vmlal_lane_s16(_sumc, vget_low_s16(_w5), vget_high_s16(_val5), 2);
_sumd = vmlal_lane_s16(_sumd, vget_high_s16(_w5), vget_high_s16(_val5), 2);
_sume = vmlal_lane_s16(_sume, vget_low_s16(_w5), vget_high_s16(_val5), 3);
_sumf = vmlal_lane_s16(_sumf, vget_high_s16(_w5), vget_high_s16(_val5), 3);
int16x8_t _w6 = vld1q_s16(k0 + 48);
_sum0 = vmlal_lane_s16(_sum0, vget_low_s16(_w6), vget_low_s16(_val6), 0);
_sum1 = vmlal_lane_s16(_sum1, vget_high_s16(_w6), vget_low_s16(_val6), 0);
_sum2 = vmlal_lane_s16(_sum2, vget_low_s16(_w6), vget_low_s16(_val6), 1);
_sum3 = vmlal_lane_s16(_sum3, vget_high_s16(_w6), vget_low_s16(_val6), 1);
_sum4 = vmlal_lane_s16(_sum4, vget_low_s16(_w6), vget_low_s16(_val6), 2);
_sum5 = vmlal_lane_s16(_sum5, vget_high_s16(_w6), vget_low_s16(_val6), 2);
_sum6 = vmlal_lane_s16(_sum6, vget_low_s16(_w6), vget_low_s16(_val6), 3);
_sum7 = vmlal_lane_s16(_sum7, vget_high_s16(_w6), vget_low_s16(_val6), 3);
_sum8 = vmlal_lane_s16(_sum8, vget_low_s16(_w6), vget_high_s16(_val6), 0);
_sum9 = vmlal_lane_s16(_sum9, vget_high_s16(_w6), vget_high_s16(_val6), 0);
_suma = vmlal_lane_s16(_suma, vget_low_s16(_w6), vget_high_s16(_val6), 1);
_sumb = vmlal_lane_s16(_sumb, vget_high_s16(_w6), vget_high_s16(_val6), 1);
_sumc = vmlal_lane_s16(_sumc, vget_low_s16(_w6), vget_high_s16(_val6), 2);
_sumd = vmlal_lane_s16(_sumd, vget_high_s16(_w6), vget_high_s16(_val6), 2);
_sume = vmlal_lane_s16(_sume, vget_low_s16(_w6), vget_high_s16(_val6), 3);
_sumf = vmlal_lane_s16(_sumf, vget_high_s16(_w6), vget_high_s16(_val6), 3);
int16x8_t _w7 = vld1q_s16(k0 + 56);
_sum0 = vmlal_lane_s16(_sum0, vget_low_s16(_w7), vget_low_s16(_val7), 0);
_sum1 = vmlal_lane_s16(_sum1, vget_high_s16(_w7), vget_low_s16(_val7), 0);
_sum2 = vmlal_lane_s16(_sum2, vget_low_s16(_w7), vget_low_s16(_val7), 1);
_sum3 = vmlal_lane_s16(_sum3, vget_high_s16(_w7), vget_low_s16(_val7), 1);
_sum4 = vmlal_lane_s16(_sum4, vget_low_s16(_w7), vget_low_s16(_val7), 2);
_sum5 = vmlal_lane_s16(_sum5, vget_high_s16(_w7), vget_low_s16(_val7), 2);
_sum6 = vmlal_lane_s16(_sum6, vget_low_s16(_w7), vget_low_s16(_val7), 3);
_sum7 = vmlal_lane_s16(_sum7, vget_high_s16(_w7), vget_low_s16(_val7), 3);
_sum8 = vmlal_lane_s16(_sum8, vget_low_s16(_w7), vget_high_s16(_val7), 0);
_sum9 = vmlal_lane_s16(_sum9, vget_high_s16(_w7), vget_high_s16(_val7), 0);
_suma = vmlal_lane_s16(_suma, vget_low_s16(_w7), vget_high_s16(_val7), 1);
_sumb = vmlal_lane_s16(_sumb, vget_high_s16(_w7), vget_high_s16(_val7), 1);
_sumc = vmlal_lane_s16(_sumc, vget_low_s16(_w7), vget_high_s16(_val7), 2);
_sumd = vmlal_lane_s16(_sumd, vget_high_s16(_w7), vget_high_s16(_val7), 2);
_sume = vmlal_lane_s16(_sume, vget_low_s16(_w7), vget_high_s16(_val7), 3);
_sumf = vmlal_lane_s16(_sumf, vget_high_s16(_w7), vget_high_s16(_val7), 3);
r0 += 64;
k0 += 64;
}
vst1q_s32(output0_tm, _sum0);
vst1q_s32(output1_tm, _sum1);
vst1q_s32(output0_tm + 4, _sum2);
vst1q_s32(output1_tm + 4, _sum3);
vst1q_s32(output0_tm + 8, _sum4);
vst1q_s32(output1_tm + 8, _sum5);
vst1q_s32(output0_tm + 12, _sum6);
vst1q_s32(output1_tm + 12, _sum7);
vst1q_s32(output0_tm + 16, _sum8);
vst1q_s32(output1_tm + 16, _sum9);
vst1q_s32(output0_tm + 20, _suma);
vst1q_s32(output1_tm + 20, _sumb);
vst1q_s32(output0_tm + 24, _sumc);
vst1q_s32(output1_tm + 24, _sumd);
vst1q_s32(output0_tm + 28, _sume);
vst1q_s32(output1_tm + 28, _sumf);
output0_tm += 32;
output1_tm += 32;
}
#endif // __aarch64__
for (; i + 3 < tiles; i += 4)
{
#if __aarch64__
const short* r0 = bb2.row<const short>(i / 12 + (i % 12) / 8 + (i % 12 % 8) / 4);
#else
const short* r0 = bb2.row<const short>(i / 4);
#endif
const short* k0 = kernel0_tm.row<const short>(r);
int nn = inch; // inch always > 0
#if __aarch64__
int32x4_t _sum0 = vdupq_n_s32(0);
int32x4_t _sum1 = vdupq_n_s32(0);
int32x4_t _sum2 = vdupq_n_s32(0);
int32x4_t _sum3 = vdupq_n_s32(0);
int32x4_t _sum4 = vdupq_n_s32(0);
int32x4_t _sum5 = vdupq_n_s32(0);
int32x4_t _sum6 = vdupq_n_s32(0);
int32x4_t _sum7 = vdupq_n_s32(0);
for (int j = 0; j < nn; j++)
{
int16x8_t _val0 = vld1q_s16(r0);
int16x8_t _val1 = vld1q_s16(r0 + 8);
int16x8_t _val2 = vld1q_s16(r0 + 16);
int16x8_t _val3 = vld1q_s16(r0 + 24);
int16x8_t _w0 = vld1q_s16(k0);
_sum0 = vmlal_lane_s16(_sum0, vget_low_s16(_w0), vget_low_s16(_val0), 0);
_sum1 = vmlal_lane_s16(_sum1, vget_high_s16(_w0), vget_low_s16(_val0), 0);
_sum2 = vmlal_lane_s16(_sum2, vget_low_s16(_w0), vget_low_s16(_val1), 0);
_sum3 = vmlal_lane_s16(_sum3, vget_high_s16(_w0), vget_low_s16(_val1), 0);
_sum4 = vmlal_lane_s16(_sum4, vget_low_s16(_w0), vget_low_s16(_val2), 0);
_sum5 = vmlal_lane_s16(_sum5, vget_high_s16(_w0), vget_low_s16(_val2), 0);
_sum6 = vmlal_lane_s16(_sum6, vget_low_s16(_w0), vget_low_s16(_val3), 0);
_sum7 = vmlal_lane_s16(_sum7, vget_high_s16(_w0), vget_low_s16(_val3), 0);
int16x8_t _w1 = vld1q_s16(k0 + 8);
_sum0 = vmlal_lane_s16(_sum0, vget_low_s16(_w1), vget_low_s16(_val0), 1);
_sum1 = vmlal_lane_s16(_sum1, vget_high_s16(_w1), vget_low_s16(_val0), 1);
_sum2 = vmlal_lane_s16(_sum2, vget_low_s16(_w1), vget_low_s16(_val1), 1);
_sum3 = vmlal_lane_s16(_sum3, vget_high_s16(_w1), vget_low_s16(_val1), 1);
_sum4 = vmlal_lane_s16(_sum4, vget_low_s16(_w1), vget_low_s16(_val2), 1);
_sum5 = vmlal_lane_s16(_sum5, vget_high_s16(_w1), vget_low_s16(_val2), 1);
_sum6 = vmlal_lane_s16(_sum6, vget_low_s16(_w1), vget_low_s16(_val3), 1);
_sum7 = vmlal_lane_s16(_sum7, vget_high_s16(_w1), vget_low_s16(_val3), 1);
int16x8_t _w2 = vld1q_s16(k0 + 16);
_sum0 = vmlal_lane_s16(_sum0, vget_low_s16(_w2), vget_low_s16(_val0), 2);
_sum1 = vmlal_lane_s16(_sum1, vget_high_s16(_w2), vget_low_s16(_val0), 2);
_sum2 = vmlal_lane_s16(_sum2, vget_low_s16(_w2), vget_low_s16(_val1), 2);
_sum3 = vmlal_lane_s16(_sum3, vget_high_s16(_w2), vget_low_s16(_val1), 2);
_sum4 = vmlal_lane_s16(_sum4, vget_low_s16(_w2), vget_low_s16(_val2), 2);
_sum5 = vmlal_lane_s16(_sum5, vget_high_s16(_w2), vget_low_s16(_val2), 2);
_sum6 = vmlal_lane_s16(_sum6, vget_low_s16(_w2), vget_low_s16(_val3), 2);
_sum7 = vmlal_lane_s16(_sum7, vget_high_s16(_w2), vget_low_s16(_val3), 2);
int16x8_t _w3 = vld1q_s16(k0 + 24);
_sum0 = vmlal_lane_s16(_sum0, vget_low_s16(_w3), vget_low_s16(_val0), 3);
_sum1 = vmlal_lane_s16(_sum1, vget_high_s16(_w3), vget_low_s16(_val0), 3);
_sum2 = vmlal_lane_s16(_sum2, vget_low_s16(_w3), vget_low_s16(_val1), 3);
_sum3 = vmlal_lane_s16(_sum3, vget_high_s16(_w3), vget_low_s16(_val1), 3);
_sum4 = vmlal_lane_s16(_sum4, vget_low_s16(_w3), vget_low_s16(_val2), 3);
_sum5 = vmlal_lane_s16(_sum5, vget_high_s16(_w3), vget_low_s16(_val2), 3);
_sum6 = vmlal_lane_s16(_sum6, vget_low_s16(_w3), vget_low_s16(_val3), 3);
_sum7 = vmlal_lane_s16(_sum7, vget_high_s16(_w3), vget_low_s16(_val3), 3);
int16x8_t _w4 = vld1q_s16(k0 + 32);
_sum0 = vmlal_lane_s16(_sum0, vget_low_s16(_w4), vget_high_s16(_val0), 0);
_sum1 = vmlal_lane_s16(_sum1, vget_high_s16(_w4), vget_high_s16(_val0), 0);
_sum2 = vmlal_lane_s16(_sum2, vget_low_s16(_w4), vget_high_s16(_val1), 0);
_sum3 = vmlal_lane_s16(_sum3, vget_high_s16(_w4), vget_high_s16(_val1), 0);
_sum4 = vmlal_lane_s16(_sum4, vget_low_s16(_w4), vget_high_s16(_val2), 0);
_sum5 = vmlal_lane_s16(_sum5, vget_high_s16(_w4), vget_high_s16(_val2), 0);
_sum6 = vmlal_lane_s16(_sum6, vget_low_s16(_w4), vget_high_s16(_val3), 0);
_sum7 = vmlal_lane_s16(_sum7, vget_high_s16(_w4), vget_high_s16(_val3), 0);
int16x8_t _w5 = vld1q_s16(k0 + 40);
_sum0 = vmlal_lane_s16(_sum0, vget_low_s16(_w5), vget_high_s16(_val0), 1);
_sum1 = vmlal_lane_s16(_sum1, vget_high_s16(_w5), vget_high_s16(_val0), 1);
_sum2 = vmlal_lane_s16(_sum2, vget_low_s16(_w5), vget_high_s16(_val1), 1);
_sum3 = vmlal_lane_s16(_sum3, vget_high_s16(_w5), vget_high_s16(_val1), 1);
_sum4 = vmlal_lane_s16(_sum4, vget_low_s16(_w5), vget_high_s16(_val2), 1);
_sum5 = vmlal_lane_s16(_sum5, vget_high_s16(_w5), vget_high_s16(_val2), 1);
_sum6 = vmlal_lane_s16(_sum6, vget_low_s16(_w5), vget_high_s16(_val3), 1);
_sum7 = vmlal_lane_s16(_sum7, vget_high_s16(_w5), vget_high_s16(_val3), 1);
int16x8_t _w6 = vld1q_s16(k0 + 48);
_sum0 = vmlal_lane_s16(_sum0, vget_low_s16(_w6), vget_high_s16(_val0), 2);
_sum1 = vmlal_lane_s16(_sum1, vget_high_s16(_w6), vget_high_s16(_val0), 2);
_sum2 = vmlal_lane_s16(_sum2, vget_low_s16(_w6), vget_high_s16(_val1), 2);
_sum3 = vmlal_lane_s16(_sum3, vget_high_s16(_w6), vget_high_s16(_val1), 2);
_sum4 = vmlal_lane_s16(_sum4, vget_low_s16(_w6), vget_high_s16(_val2), 2);
_sum5 = vmlal_lane_s16(_sum5, vget_high_s16(_w6), vget_high_s16(_val2), 2);
_sum6 = vmlal_lane_s16(_sum6, vget_low_s16(_w6), vget_high_s16(_val3), 2);
_sum7 = vmlal_lane_s16(_sum7, vget_high_s16(_w6), vget_high_s16(_val3), 2);
int16x8_t _w7 = vld1q_s16(k0 + 56);
_sum0 = vmlal_lane_s16(_sum0, vget_low_s16(_w7), vget_high_s16(_val0), 3);
_sum1 = vmlal_lane_s16(_sum1, vget_high_s16(_w7), vget_high_s16(_val0), 3);
_sum2 = vmlal_lane_s16(_sum2, vget_low_s16(_w7), vget_high_s16(_val1), 3);
_sum3 = vmlal_lane_s16(_sum3, vget_high_s16(_w7), vget_high_s16(_val1), 3);
_sum4 = vmlal_lane_s16(_sum4, vget_low_s16(_w7), vget_high_s16(_val2), 3);
_sum5 = vmlal_lane_s16(_sum5, vget_high_s16(_w7), vget_high_s16(_val2), 3);
_sum6 = vmlal_lane_s16(_sum6, vget_low_s16(_w7), vget_high_s16(_val3), 3);
_sum7 = vmlal_lane_s16(_sum7, vget_high_s16(_w7), vget_high_s16(_val3), 3);
r0 += 32;
k0 += 64;
}
vst1q_s32(output0_tm, _sum0);
vst1q_s32(output1_tm, _sum1);
vst1q_s32(output0_tm + 4, _sum2);
vst1q_s32(output1_tm + 4, _sum3);
vst1q_s32(output0_tm + 8, _sum4);
vst1q_s32(output1_tm + 8, _sum5);
vst1q_s32(output0_tm + 12, _sum6);
vst1q_s32(output1_tm + 12, _sum7);
output0_tm += 16;
output1_tm += 16;
#else
asm volatile(
"veor q8, q8 \n"
"veor q9, q9 \n"
"veor q10, q10 \n"
"veor q11, q11 \n"
"veor q12, q12 \n"
"veor q13, q13 \n"
"veor q14, q14 \n"
"veor q15, q15 \n"
"0: \n"
"pld [%3, #256] \n"
"pld [%3, #512] \n"
"vldm %3!, {d0-d7} \n"
"pld [%4, #256] \n"
"vld1.s16 {d8-d11}, [%4 :128]! \n"
"vmlal.s16 q8, d8, d0[0] \n"
"vmlal.s16 q12, d9, d0[0] \n"
"vmlal.s16 q9, d8, d2[0] \n"
"vmlal.s16 q13, d9, d2[0] \n"
"vmlal.s16 q10, d8, d4[0] \n"
"vmlal.s16 q14, d9, d4[0] \n"
"vmlal.s16 q11, d8, d6[0] \n"
"vmlal.s16 q15, d9, d6[0] \n"
"pld [%4, #128] \n"
"vld1.s16 {d8-d9}, [%4 :128]! \n"
"vmlal.s16 q8, d10, d0[1] \n"
"vmlal.s16 q12, d11, d0[1] \n"
"vmlal.s16 q9, d10, d2[1] \n"
"vmlal.s16 q13, d11, d2[1] \n"
"vmlal.s16 q10, d10, d4[1] \n"
"vmlal.s16 q14, d11, d4[1] \n"
"vmlal.s16 q11, d10, d6[1] \n"
"vmlal.s16 q15, d11, d6[1] \n"
"pld [%4, #128] \n"
"vld1.s16 {d10-d11}, [%4 :128]! \n"
"vmlal.s16 q8, d8, d0[2] \n"
"vmlal.s16 q12, d9, d0[2] \n"
"vmlal.s16 q9, d8, d2[2] \n"
"vmlal.s16 q13, d9, d2[2] \n"
"vmlal.s16 q10, d8, d4[2] \n"
"vmlal.s16 q14, d9, d4[2] \n"
"vmlal.s16 q11, d8, d6[2] \n"
"vmlal.s16 q15, d9, d6[2] \n"
"pld [%4, #128] \n"
"vld1.s16 {d8-d9}, [%4 :128]! \n"
"vmlal.s16 q8, d10, d0[3] \n"
"vmlal.s16 q12, d11, d0[3] \n"
"vmlal.s16 q9, d10, d2[3] \n"
"vmlal.s16 q13, d11, d2[3] \n"
"vmlal.s16 q10, d10, d4[3] \n"
"vmlal.s16 q14, d11, d4[3] \n"
"vmlal.s16 q11, d10, d6[3] \n"
"vmlal.s16 q15, d11, d6[3] \n"
"pld [%4, #128] \n"
"vld1.s16 {d10-d11}, [%4 :128]! \n"
"vmlal.s16 q8, d8, d1[0] \n"
"vmlal.s16 q12, d9, d1[0] \n"
"vmlal.s16 q9, d8, d3[0] \n"
"vmlal.s16 q13, d9, d3[0] \n"
"vmlal.s16 q10, d8, d5[0] \n"
"vmlal.s16 q14, d9, d5[0] \n"
"vmlal.s16 q11, d8, d7[0] \n"
"vmlal.s16 q15, d9, d7[0] \n"
"pld [%4, #128] \n"
"vld1.s16 {d8-d9}, [%4 :128]! \n"
"vmlal.s16 q8, d10, d1[1] \n"
"vmlal.s16 q12, d11, d1[1] \n"
"vmlal.s16 q9, d10, d3[1] \n"
"vmlal.s16 q13, d11, d3[1] \n"
"vmlal.s16 q10, d10, d5[1] \n"
"vmlal.s16 q14, d11, d5[1] \n"
"vmlal.s16 q11, d10, d7[1] \n"
"vmlal.s16 q15, d11, d7[1] \n"
"pld [%4, #128] \n"
"vld1.s16 {d10-d11}, [%4 :128]! \n"
"vmlal.s16 q8, d8, d1[2] \n"
"vmlal.s16 q12, d9, d1[2] \n"
"vmlal.s16 q9, d8, d3[2] \n"
"vmlal.s16 q13, d9, d3[2] \n"
"vmlal.s16 q10, d8, d5[2] \n"
"vmlal.s16 q14, d9, d5[2] \n"
"vmlal.s16 q11, d8, d7[2] \n"
"vmlal.s16 q15, d9, d7[2] \n"
"subs %0, %0, #1 \n"
"vmlal.s16 q8, d10, d1[3] \n"
"vmlal.s16 q12, d11, d1[3] \n"
"vmlal.s16 q9, d10, d3[3] \n"
"vmlal.s16 q13, d11, d3[3] \n"
"vmlal.s16 q10, d10, d5[3] \n"
"vmlal.s16 q14, d11, d5[3] \n"
"vmlal.s16 q11, d10, d7[3] \n"
"vmlal.s16 q15, d11, d7[3] \n"
"bne 0b \n"
"vstm %1!, {d16-d23} \n"
"vstm %2!, {d24-d31} \n"
: "=r"(nn),
"=r"(output0_tm),
"=r"(output1_tm),
"=r"(r0),
"=r"(k0)
: "0"(nn),
"1"(output0_tm),
"2"(output1_tm),
"3"(r0),
"4"(k0)
: "cc", "memory", "q0", "q1", "q2", "q3", "q4", "q5", "q6", "q7", "q8", "q9", "q10", "q11", "q12", "q13", "q14", "q15");
#endif
}
for (; i + 1 < tiles; i += 2)
{
#if __aarch64__
const short* r0 = bb2.row<const short>(i / 12 + (i % 12) / 8 + (i % 12 % 8) / 4 + (i % 12 % 4) / 2);
#else
const short* r0 = bb2.row<const short>(i / 4 + (i % 4) / 2);
#endif
const short* k0 = kernel0_tm.row<const short>(r);
int nn = inch; // inch always > 0
int32x4_t _sum0 = vdupq_n_s32(0);
int32x4_t _sum1 = vdupq_n_s32(0);
int32x4_t _sum2 = vdupq_n_s32(0);
int32x4_t _sum3 = vdupq_n_s32(0);
for (int j = 0; j < nn; j++)
{
int16x8_t _val0 = vld1q_s16(r0);
int16x8_t _val1 = vld1q_s16(r0 + 8);
int16x8_t _w0 = vld1q_s16(k0);
int16x8_t _w1 = vld1q_s16(k0 + 8);
int16x8_t _w2 = vld1q_s16(k0 + 16);
int16x8_t _w3 = vld1q_s16(k0 + 24);
int16x8_t _w4 = vld1q_s16(k0 + 32);
int16x8_t _w5 = vld1q_s16(k0 + 40);
int16x8_t _w6 = vld1q_s16(k0 + 48);
int16x8_t _w7 = vld1q_s16(k0 + 56);
_sum0 = vmlal_lane_s16(_sum0, vget_low_s16(_w0), vget_low_s16(_val0), 0);
_sum1 = vmlal_lane_s16(_sum1, vget_high_s16(_w0), vget_low_s16(_val0), 0);
_sum2 = vmlal_lane_s16(_sum2, vget_low_s16(_w0), vget_low_s16(_val1), 0);
_sum3 = vmlal_lane_s16(_sum3, vget_high_s16(_w0), vget_low_s16(_val1), 0);
_sum0 = vmlal_lane_s16(_sum0, vget_low_s16(_w1), vget_low_s16(_val0), 1);
_sum1 = vmlal_lane_s16(_sum1, vget_high_s16(_w1), vget_low_s16(_val0), 1);
_sum2 = vmlal_lane_s16(_sum2, vget_low_s16(_w1), vget_low_s16(_val1), 1);
_sum3 = vmlal_lane_s16(_sum3, vget_high_s16(_w1), vget_low_s16(_val1), 1);
_sum0 = vmlal_lane_s16(_sum0, vget_low_s16(_w2), vget_low_s16(_val0), 2);
_sum1 = vmlal_lane_s16(_sum1, vget_high_s16(_w2), vget_low_s16(_val0), 2);
_sum2 = vmlal_lane_s16(_sum2, vget_low_s16(_w2), vget_low_s16(_val1), 2);
_sum3 = vmlal_lane_s16(_sum3, vget_high_s16(_w2), vget_low_s16(_val1), 2);
_sum0 = vmlal_lane_s16(_sum0, vget_low_s16(_w3), vget_low_s16(_val0), 3);
_sum1 = vmlal_lane_s16(_sum1, vget_high_s16(_w3), vget_low_s16(_val0), 3);
_sum2 = vmlal_lane_s16(_sum2, vget_low_s16(_w3), vget_low_s16(_val1), 3);
_sum3 = vmlal_lane_s16(_sum3, vget_high_s16(_w3), vget_low_s16(_val1), 3);
_sum0 = vmlal_lane_s16(_sum0, vget_low_s16(_w4), vget_high_s16(_val0), 0);
_sum1 = vmlal_lane_s16(_sum1, vget_high_s16(_w4), vget_high_s16(_val0), 0);
_sum2 = vmlal_lane_s16(_sum2, vget_low_s16(_w4), vget_high_s16(_val1), 0);
_sum3 = vmlal_lane_s16(_sum3, vget_high_s16(_w4), vget_high_s16(_val1), 0);
_sum0 = vmlal_lane_s16(_sum0, vget_low_s16(_w5), vget_high_s16(_val0), 1);
_sum1 = vmlal_lane_s16(_sum1, vget_high_s16(_w5), vget_high_s16(_val0), 1);
_sum2 = vmlal_lane_s16(_sum2, vget_low_s16(_w5), vget_high_s16(_val1), 1);
_sum3 = vmlal_lane_s16(_sum3, vget_high_s16(_w5), vget_high_s16(_val1), 1);
_sum0 = vmlal_lane_s16(_sum0, vget_low_s16(_w6), vget_high_s16(_val0), 2);
_sum1 = vmlal_lane_s16(_sum1, vget_high_s16(_w6), vget_high_s16(_val0), 2);
_sum2 = vmlal_lane_s16(_sum2, vget_low_s16(_w6), vget_high_s16(_val1), 2);
_sum3 = vmlal_lane_s16(_sum3, vget_high_s16(_w6), vget_high_s16(_val1), 2);
_sum0 = vmlal_lane_s16(_sum0, vget_low_s16(_w7), vget_high_s16(_val0), 3);
_sum1 = vmlal_lane_s16(_sum1, vget_high_s16(_w7), vget_high_s16(_val0), 3);
_sum2 = vmlal_lane_s16(_sum2, vget_low_s16(_w7), vget_high_s16(_val1), 3);
_sum3 = vmlal_lane_s16(_sum3, vget_high_s16(_w7), vget_high_s16(_val1), 3);
r0 += 16;
k0 += 64;
}
vst1q_s32(output0_tm, _sum0);
vst1q_s32(output1_tm, _sum1);
vst1q_s32(output0_tm + 4, _sum2);
vst1q_s32(output1_tm + 4, _sum3);
output0_tm += 8;
output1_tm += 8;
}
for (; i < tiles; i++)
{
#if __aarch64__
const short* r0 = bb2.row<const short>(i / 12 + (i % 12) / 8 + (i % 12 % 8) / 4 + (i % 12 % 4) / 2 + i % 12 % 2);
#else
const short* r0 = bb2.row<const short>(i / 4 + (i % 4) / 2 + i % 2);
#endif
const short* k0 = kernel0_tm.row<const short>(r);
int nn = inch; // inch always > 0
int32x4_t _sum0 = vdupq_n_s32(0);
int32x4_t _sum1 = vdupq_n_s32(0);
for (int j = 0; j < nn; j++)
{
int16x8_t _val0 = vld1q_s16(r0);
int16x8_t _w0 = vld1q_s16(k0);
int16x8_t _w1 = vld1q_s16(k0 + 8);
int16x8_t _w2 = vld1q_s16(k0 + 16);
int16x8_t _w3 = vld1q_s16(k0 + 24);
int16x8_t _w4 = vld1q_s16(k0 + 32);
int16x8_t _w5 = vld1q_s16(k0 + 40);
int16x8_t _w6 = vld1q_s16(k0 + 48);
int16x8_t _w7 = vld1q_s16(k0 + 56);
_sum0 = vmlal_lane_s16(_sum0, vget_low_s16(_w0), vget_low_s16(_val0), 0);
_sum1 = vmlal_lane_s16(_sum1, vget_high_s16(_w0), vget_low_s16(_val0), 0);
_sum0 = vmlal_lane_s16(_sum0, vget_low_s16(_w1), vget_low_s16(_val0), 1);
_sum1 = vmlal_lane_s16(_sum1, vget_high_s16(_w1), vget_low_s16(_val0), 1);
_sum0 = vmlal_lane_s16(_sum0, vget_low_s16(_w2), vget_low_s16(_val0), 2);
_sum1 = vmlal_lane_s16(_sum1, vget_high_s16(_w2), vget_low_s16(_val0), 2);
_sum0 = vmlal_lane_s16(_sum0, vget_low_s16(_w3), vget_low_s16(_val0), 3);
_sum1 = vmlal_lane_s16(_sum1, vget_high_s16(_w3), vget_low_s16(_val0), 3);
_sum0 = vmlal_lane_s16(_sum0, vget_low_s16(_w4), vget_high_s16(_val0), 0);
_sum1 = vmlal_lane_s16(_sum1, vget_high_s16(_w4), vget_high_s16(_val0), 0);
_sum0 = vmlal_lane_s16(_sum0, vget_low_s16(_w5), vget_high_s16(_val0), 1);
_sum1 = vmlal_lane_s16(_sum1, vget_high_s16(_w5), vget_high_s16(_val0), 1);
_sum0 = vmlal_lane_s16(_sum0, vget_low_s16(_w6), vget_high_s16(_val0), 2);
_sum1 = vmlal_lane_s16(_sum1, vget_high_s16(_w6), vget_high_s16(_val0), 2);
_sum0 = vmlal_lane_s16(_sum0, vget_low_s16(_w7), vget_high_s16(_val0), 3);
_sum1 = vmlal_lane_s16(_sum1, vget_high_s16(_w7), vget_high_s16(_val0), 3);
r0 += 8;
k0 += 64;
}
vst1q_s32(output0_tm, _sum0);
vst1q_s32(output1_tm, _sum1);
output0_tm += 4;
output1_tm += 4;
}
}
}
remain_outch_start += nn_outch << 1;
#pragma omp parallel for num_threads(opt.num_threads)
for (int p = remain_outch_start; p < outch; p++)
{
int* output0_tm = top_blob_tm.channel(p);
const Mat kernel0_tm = kernel_tm.channel(p / 2 + p % 2);
for (int r = 0; r < 36; r++)
{
const Mat bb2 = bottom_blob_tm2.channel(r);
int i = 0;
#if __aarch64__
for (; i + 11 < tiles; i += 12)
{
const short* r0 = bb2.row<const short>(i / 12);
const short* k0 = kernel0_tm.row<const short>(r);
int nn = inch; // inch always > 0
asm volatile(
"ld1 {v0.8h, v1.8h}, [%2], #32 \n" // r01
"eor v8.16b, v8.16b, v8.16b \n"
"eor v9.16b, v9.16b, v9.16b \n"
"ld1 {v4.8h, v5.8h}, [%3], #32 \n" // w01
"eor v10.16b, v10.16b, v10.16b \n"
"eor v11.16b, v11.16b, v11.16b \n"
"prfm pldl1keep, [%2, #256] \n"
"eor v12.16b, v12.16b, v12.16b \n"
"eor v13.16b, v13.16b, v13.16b \n"
"prfm pldl1keep, [%3, #256] \n"
"eor v14.16b, v14.16b, v14.16b \n"
"eor v15.16b, v15.16b, v15.16b \n"
"eor v16.16b, v16.16b, v16.16b \n"
"eor v17.16b, v17.16b, v17.16b \n"
"eor v18.16b, v18.16b, v18.16b \n"
"eor v19.16b, v19.16b, v19.16b \n"
"0: \n"
"smlal v8.4s, v4.4h, v0.h[0] \n"
"smlal v9.4s, v4.4h, v0.h[1] \n"
"smlal v10.4s, v4.4h, v0.h[2] \n"
"smlal v11.4s, v4.4h, v0.h[3] \n"
"smlal v12.4s, v4.4h, v0.h[4] \n"
"smlal v13.4s, v4.4h, v0.h[5] \n"
"smlal v14.4s, v4.4h, v0.h[6] \n"
"smlal v15.4s, v4.4h, v0.h[7] \n"
"ld1 {v2.8h, v3.8h}, [%2], #32 \n" // r23
"smlal v16.4s, v4.4h, v1.h[0] \n"
"smlal v17.4s, v4.4h, v1.h[1] \n"
"prfm pldl1keep, [%2, #256] \n"
"smlal v18.4s, v4.4h, v1.h[2] \n"
"smlal v19.4s, v4.4h, v1.h[3] \n"
"smlal2 v8.4s, v4.8h, v1.h[4] \n"
"smlal2 v9.4s, v4.8h, v1.h[5] \n"
"smlal2 v10.4s, v4.8h, v1.h[6] \n"
"smlal2 v11.4s, v4.8h, v1.h[7] \n"
"smlal2 v12.4s, v4.8h, v2.h[0] \n"
"smlal2 v13.4s, v4.8h, v2.h[1] \n"
"smlal2 v14.4s, v4.8h, v2.h[2] \n"
"smlal2 v15.4s, v4.8h, v2.h[3] \n"
"smlal2 v16.4s, v4.8h, v2.h[4] \n"
"smlal2 v17.4s, v4.8h, v2.h[5] \n"
"smlal2 v18.4s, v4.8h, v2.h[6] \n"
"smlal2 v19.4s, v4.8h, v2.h[7] \n"
"ld1 {v0.8h, v1.8h}, [%2], #32 \n" // r45
"smlal v8.4s, v5.4h, v3.h[0] \n"
"smlal v9.4s, v5.4h, v3.h[1] \n"
"prfm pldl1keep, [%2, #256] \n"
"smlal v10.4s, v5.4h, v3.h[2] \n"
"smlal v11.4s, v5.4h, v3.h[3] \n"
"smlal v12.4s, v5.4h, v3.h[4] \n"
"smlal v13.4s, v5.4h, v3.h[5] \n"
"smlal v14.4s, v5.4h, v3.h[6] \n"
"smlal v15.4s, v5.4h, v3.h[7] \n"
"smlal v16.4s, v5.4h, v0.h[0] \n"
"smlal v17.4s, v5.4h, v0.h[1] \n"
"smlal v18.4s, v5.4h, v0.h[2] \n"
"smlal v19.4s, v5.4h, v0.h[3] \n"
"ld1 {v6.8h, v7.8h}, [%3], #32 \n" // w23
"smlal2 v8.4s, v5.8h, v0.h[4] \n"
"smlal2 v9.4s, v5.8h, v0.h[5] \n"
"prfm pldl1keep, [%3, #256] \n"
"smlal2 v10.4s, v5.8h, v0.h[6] \n"
"smlal2 v11.4s, v5.8h, v0.h[7] \n"
"ld1 {v2.8h, v3.8h}, [%2], #32 \n" // r67
"smlal2 v12.4s, v5.8h, v1.h[0] \n"
"smlal2 v13.4s, v5.8h, v1.h[1] \n"
"prfm pldl1keep, [%2, #256] \n"
"smlal2 v14.4s, v5.8h, v1.h[2] \n"
"smlal2 v15.4s, v5.8h, v1.h[3] \n"
"smlal2 v16.4s, v5.8h, v1.h[4] \n"
"smlal2 v17.4s, v5.8h, v1.h[5] \n"
"smlal2 v18.4s, v5.8h, v1.h[6] \n"
"smlal2 v19.4s, v5.8h, v1.h[7] \n"
"smlal v8.4s, v6.4h, v2.h[0] \n"
"smlal v9.4s, v6.4h, v2.h[1] \n"
"smlal v10.4s, v6.4h, v2.h[2] \n"
"smlal v11.4s, v6.4h, v2.h[3] \n"
"smlal v12.4s, v6.4h, v2.h[4] \n"
"smlal v13.4s, v6.4h, v2.h[5] \n"
"smlal v14.4s, v6.4h, v2.h[6] \n"
"smlal v15.4s, v6.4h, v2.h[7] \n"
"ld1 {v0.8h, v1.8h}, [%2], #32 \n" // r89
"smlal v16.4s, v6.4h, v3.h[0] \n"
"smlal v17.4s, v6.4h, v3.h[1] \n"
"prfm pldl1keep, [%2, #256] \n"
"smlal v18.4s, v6.4h, v3.h[2] \n"
"smlal v19.4s, v6.4h, v3.h[3] \n"
"smlal2 v8.4s, v6.8h, v3.h[4] \n"
"smlal2 v9.4s, v6.8h, v3.h[5] \n"
"smlal2 v10.4s, v6.8h, v3.h[6] \n"
"smlal2 v11.4s, v6.8h, v3.h[7] \n"
"smlal2 v12.4s, v6.8h, v0.h[0] \n"
"smlal2 v13.4s, v6.8h, v0.h[1] \n"
"smlal2 v14.4s, v6.8h, v0.h[2] \n"
"smlal2 v15.4s, v6.8h, v0.h[3] \n"
"smlal2 v16.4s, v6.8h, v0.h[4] \n"
"smlal2 v17.4s, v6.8h, v0.h[5] \n"
"smlal2 v18.4s, v6.8h, v0.h[6] \n"
"smlal2 v19.4s, v6.8h, v0.h[7] \n"
"ld1 {v2.8h, v3.8h}, [%2], #32 \n" // r1011
"smlal v8.4s, v7.4h, v1.h[0] \n"
"smlal v9.4s, v7.4h, v1.h[1] \n"
"prfm pldl1keep, [%2, #256] \n"
"smlal v10.4s, v7.4h, v1.h[2] \n"
"smlal v11.4s, v7.4h, v1.h[3] \n"
"smlal v12.4s, v7.4h, v1.h[4] \n"
"smlal v13.4s, v7.4h, v1.h[5] \n"
"smlal v14.4s, v7.4h, v1.h[6] \n"
"smlal v15.4s, v7.4h, v1.h[7] \n"
"smlal v16.4s, v7.4h, v2.h[0] \n"
"smlal v17.4s, v7.4h, v2.h[1] \n"
"smlal v18.4s, v7.4h, v2.h[2] \n"
"smlal v19.4s, v7.4h, v2.h[3] \n"
"ld1 {v4.8h, v5.8h}, [%3], #32 \n" // w01
"smlal2 v8.4s, v7.8h, v2.h[4] \n"
"smlal2 v9.4s, v7.8h, v2.h[5] \n"
"prfm pldl1keep, [%3, #256] \n"
"smlal2 v10.4s, v7.8h, v2.h[6] \n"
"smlal2 v11.4s, v7.8h, v2.h[7] \n"
"ld1 {v0.8h, v1.8h}, [%2], #32 \n" // r01
"smlal2 v12.4s, v7.8h, v3.h[0] \n"
"smlal2 v13.4s, v7.8h, v3.h[1] \n"
"prfm pldl1keep, [%2, #256] \n"
"smlal2 v14.4s, v7.8h, v3.h[2] \n"
"smlal2 v15.4s, v7.8h, v3.h[3] \n"
"smlal2 v16.4s, v7.8h, v3.h[4] \n"
"smlal2 v17.4s, v7.8h, v3.h[5] \n"
"subs %w0, %w0, #1 \n"
"smlal2 v18.4s, v7.8h, v3.h[6] \n"
"smlal2 v19.4s, v7.8h, v3.h[7] \n"
"bne 0b \n"
"sub %2, %2, #32 \n"
"sub %3, %3, #32 \n"
"st1 {v8.4s, v9.4s, v10.4s, v11.4s}, [%1], #64 \n"
"st1 {v12.4s, v13.4s, v14.4s, v15.4s}, [%1], #64 \n"
"st1 {v16.4s, v17.4s, v18.4s, v19.4s}, [%1], #64 \n"
: "=r"(nn), // %0
"=r"(output0_tm), // %1
"=r"(r0), // %2
"=r"(k0) // %3
: "0"(nn),
"1"(output0_tm),
"2"(r0),
"3"(k0)
: "cc", "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19");
}
for (; i + 7 < tiles; i += 8)
{
const short* r0 = bb2.row<const short>(i / 12 + (i % 12) / 8);
const short* k0 = kernel0_tm.row<const short>(r);
int nn = inch; // inch always > 0
int32x4_t _sum0 = vdupq_n_s32(0);
int32x4_t _sum1 = vdupq_n_s32(0);
int32x4_t _sum2 = vdupq_n_s32(0);
int32x4_t _sum3 = vdupq_n_s32(0);
int32x4_t _sum4 = vdupq_n_s32(0);
int32x4_t _sum5 = vdupq_n_s32(0);
int32x4_t _sum6 = vdupq_n_s32(0);
int32x4_t _sum7 = vdupq_n_s32(0);
for (int j = 0; j < nn; j++)
{
int16x8_t _val0 = vld1q_s16(r0);
int16x8_t _val1 = vld1q_s16(r0 + 8);
int16x8_t _val2 = vld1q_s16(r0 + 16);
int16x8_t _val3 = vld1q_s16(r0 + 24);
int16x8_t _val4 = vld1q_s16(r0 + 32);
int16x8_t _val5 = vld1q_s16(r0 + 40);
int16x8_t _val6 = vld1q_s16(r0 + 48);
int16x8_t _val7 = vld1q_s16(r0 + 56);
int16x8_t _w0 = vld1q_s16(k0);
_sum0 = vmlal_lane_s16(_sum0, vget_low_s16(_w0), vget_low_s16(_val0), 0);
_sum1 = vmlal_lane_s16(_sum1, vget_low_s16(_w0), vget_low_s16(_val0), 1);
_sum2 = vmlal_lane_s16(_sum2, vget_low_s16(_w0), vget_low_s16(_val0), 2);
_sum3 = vmlal_lane_s16(_sum3, vget_low_s16(_w0), vget_low_s16(_val0), 3);
_sum4 = vmlal_lane_s16(_sum4, vget_low_s16(_w0), vget_high_s16(_val0), 0);
_sum5 = vmlal_lane_s16(_sum5, vget_low_s16(_w0), vget_high_s16(_val0), 1);
_sum6 = vmlal_lane_s16(_sum6, vget_low_s16(_w0), vget_high_s16(_val0), 2);
_sum7 = vmlal_lane_s16(_sum7, vget_low_s16(_w0), vget_high_s16(_val0), 3);
_sum0 = vmlal_lane_s16(_sum0, vget_high_s16(_w0), vget_low_s16(_val1), 0);
_sum1 = vmlal_lane_s16(_sum1, vget_high_s16(_w0), vget_low_s16(_val1), 1);
_sum2 = vmlal_lane_s16(_sum2, vget_high_s16(_w0), vget_low_s16(_val1), 2);
_sum3 = vmlal_lane_s16(_sum3, vget_high_s16(_w0), vget_low_s16(_val1), 3);
_sum4 = vmlal_lane_s16(_sum4, vget_high_s16(_w0), vget_high_s16(_val1), 0);
_sum5 = vmlal_lane_s16(_sum5, vget_high_s16(_w0), vget_high_s16(_val1), 1);
_sum6 = vmlal_lane_s16(_sum6, vget_high_s16(_w0), vget_high_s16(_val1), 2);
_sum7 = vmlal_lane_s16(_sum7, vget_high_s16(_w0), vget_high_s16(_val1), 3);
int16x8_t _w1 = vld1q_s16(k0 + 8);
_sum0 = vmlal_lane_s16(_sum0, vget_low_s16(_w1), vget_low_s16(_val2), 0);
_sum1 = vmlal_lane_s16(_sum1, vget_low_s16(_w1), vget_low_s16(_val2), 1);
_sum2 = vmlal_lane_s16(_sum2, vget_low_s16(_w1), vget_low_s16(_val2), 2);
_sum3 = vmlal_lane_s16(_sum3, vget_low_s16(_w1), vget_low_s16(_val2), 3);
_sum4 = vmlal_lane_s16(_sum4, vget_low_s16(_w1), vget_high_s16(_val2), 0);
_sum5 = vmlal_lane_s16(_sum5, vget_low_s16(_w1), vget_high_s16(_val2), 1);
_sum6 = vmlal_lane_s16(_sum6, vget_low_s16(_w1), vget_high_s16(_val2), 2);
_sum7 = vmlal_lane_s16(_sum7, vget_low_s16(_w1), vget_high_s16(_val2), 3);
_sum0 = vmlal_lane_s16(_sum0, vget_high_s16(_w1), vget_low_s16(_val3), 0);
_sum1 = vmlal_lane_s16(_sum1, vget_high_s16(_w1), vget_low_s16(_val3), 1);
_sum2 = vmlal_lane_s16(_sum2, vget_high_s16(_w1), vget_low_s16(_val3), 2);
_sum3 = vmlal_lane_s16(_sum3, vget_high_s16(_w1), vget_low_s16(_val3), 3);
_sum4 = vmlal_lane_s16(_sum4, vget_high_s16(_w1), vget_high_s16(_val3), 0);
_sum5 = vmlal_lane_s16(_sum5, vget_high_s16(_w1), vget_high_s16(_val3), 1);
_sum6 = vmlal_lane_s16(_sum6, vget_high_s16(_w1), vget_high_s16(_val3), 2);
_sum7 = vmlal_lane_s16(_sum7, vget_high_s16(_w1), vget_high_s16(_val3), 3);
int16x8_t _w2 = vld1q_s16(k0 + 16);
_sum0 = vmlal_lane_s16(_sum0, vget_low_s16(_w2), vget_low_s16(_val4), 0);
_sum1 = vmlal_lane_s16(_sum1, vget_low_s16(_w2), vget_low_s16(_val4), 1);
_sum2 = vmlal_lane_s16(_sum2, vget_low_s16(_w2), vget_low_s16(_val4), 2);
_sum3 = vmlal_lane_s16(_sum3, vget_low_s16(_w2), vget_low_s16(_val4), 3);
_sum4 = vmlal_lane_s16(_sum4, vget_low_s16(_w2), vget_high_s16(_val4), 0);
_sum5 = vmlal_lane_s16(_sum5, vget_low_s16(_w2), vget_high_s16(_val4), 1);
_sum6 = vmlal_lane_s16(_sum6, vget_low_s16(_w2), vget_high_s16(_val4), 2);
_sum7 = vmlal_lane_s16(_sum7, vget_low_s16(_w2), vget_high_s16(_val4), 3);
_sum0 = vmlal_lane_s16(_sum0, vget_high_s16(_w2), vget_low_s16(_val5), 0);
_sum1 = vmlal_lane_s16(_sum1, vget_high_s16(_w2), vget_low_s16(_val5), 1);
_sum2 = vmlal_lane_s16(_sum2, vget_high_s16(_w2), vget_low_s16(_val5), 2);
_sum3 = vmlal_lane_s16(_sum3, vget_high_s16(_w2), vget_low_s16(_val5), 3);
_sum4 = vmlal_lane_s16(_sum4, vget_high_s16(_w2), vget_high_s16(_val5), 0);
_sum5 = vmlal_lane_s16(_sum5, vget_high_s16(_w2), vget_high_s16(_val5), 1);
_sum6 = vmlal_lane_s16(_sum6, vget_high_s16(_w2), vget_high_s16(_val5), 2);
_sum7 = vmlal_lane_s16(_sum7, vget_high_s16(_w2), vget_high_s16(_val5), 3);
int16x8_t _w3 = vld1q_s16(k0 + 24);
_sum0 = vmlal_lane_s16(_sum0, vget_low_s16(_w3), vget_low_s16(_val6), 0);
_sum1 = vmlal_lane_s16(_sum1, vget_low_s16(_w3), vget_low_s16(_val6), 1);
_sum2 = vmlal_lane_s16(_sum2, vget_low_s16(_w3), vget_low_s16(_val6), 2);
_sum3 = vmlal_lane_s16(_sum3, vget_low_s16(_w3), vget_low_s16(_val6), 3);
_sum4 = vmlal_lane_s16(_sum4, vget_low_s16(_w3), vget_high_s16(_val6), 0);
_sum5 = vmlal_lane_s16(_sum5, vget_low_s16(_w3), vget_high_s16(_val6), 1);
_sum6 = vmlal_lane_s16(_sum6, vget_low_s16(_w3), vget_high_s16(_val6), 2);
_sum7 = vmlal_lane_s16(_sum7, vget_low_s16(_w3), vget_high_s16(_val6), 3);
_sum0 = vmlal_lane_s16(_sum0, vget_high_s16(_w3), vget_low_s16(_val7), 0);
_sum1 = vmlal_lane_s16(_sum1, vget_high_s16(_w3), vget_low_s16(_val7), 1);
_sum2 = vmlal_lane_s16(_sum2, vget_high_s16(_w3), vget_low_s16(_val7), 2);
_sum3 = vmlal_lane_s16(_sum3, vget_high_s16(_w3), vget_low_s16(_val7), 3);
_sum4 = vmlal_lane_s16(_sum4, vget_high_s16(_w3), vget_high_s16(_val7), 0);
_sum5 = vmlal_lane_s16(_sum5, vget_high_s16(_w3), vget_high_s16(_val7), 1);
_sum6 = vmlal_lane_s16(_sum6, vget_high_s16(_w3), vget_high_s16(_val7), 2);
_sum7 = vmlal_lane_s16(_sum7, vget_high_s16(_w3), vget_high_s16(_val7), 3);
r0 += 64;
k0 += 32;
}
vst1q_s32(output0_tm, _sum0);
vst1q_s32(output0_tm + 4, _sum1);
vst1q_s32(output0_tm + 8, _sum2);
vst1q_s32(output0_tm + 12, _sum3);
vst1q_s32(output0_tm + 16, _sum4);
vst1q_s32(output0_tm + 20, _sum5);
vst1q_s32(output0_tm + 24, _sum6);
vst1q_s32(output0_tm + 28, _sum7);
output0_tm += 32;
}
#endif // __aarch64__
for (; i + 3 < tiles; i += 4)
{
#if __aarch64__
const short* r0 = bb2.row<const short>(i / 12 + (i % 12) / 8 + (i % 12 % 8) / 4);
#else
const short* r0 = bb2.row<const short>(i / 4);
#endif
const short* k0 = kernel0_tm.row<const short>(r);
int nn = inch; // inch always > 0
#if __aarch64__
int32x4_t _sum0 = vdupq_n_s32(0);
int32x4_t _sum1 = vdupq_n_s32(0);
int32x4_t _sum2 = vdupq_n_s32(0);
int32x4_t _sum3 = vdupq_n_s32(0);
int32x4_t _sum4 = vdupq_n_s32(0);
int32x4_t _sum5 = vdupq_n_s32(0);
int32x4_t _sum6 = vdupq_n_s32(0);
int32x4_t _sum7 = vdupq_n_s32(0);
for (int j = 0; j < nn; j++)
{
int16x8_t _val0 = vld1q_s16(r0);
int16x8_t _val1 = vld1q_s16(r0 + 8);
int16x8_t _val2 = vld1q_s16(r0 + 16);
int16x8_t _val3 = vld1q_s16(r0 + 24);
int16x8_t _w0 = vld1q_s16(k0);
_sum0 = vmlal_lane_s16(_sum0, vget_low_s16(_w0), vget_low_s16(_val0), 0);
_sum1 = vmlal_lane_s16(_sum1, vget_high_s16(_w0), vget_low_s16(_val0), 1);
_sum2 = vmlal_lane_s16(_sum2, vget_low_s16(_w0), vget_low_s16(_val1), 0);
_sum3 = vmlal_lane_s16(_sum3, vget_high_s16(_w0), vget_low_s16(_val1), 1);
_sum4 = vmlal_lane_s16(_sum4, vget_low_s16(_w0), vget_low_s16(_val2), 0);
_sum5 = vmlal_lane_s16(_sum5, vget_high_s16(_w0), vget_low_s16(_val2), 1);
_sum6 = vmlal_lane_s16(_sum6, vget_low_s16(_w0), vget_low_s16(_val3), 0);
_sum7 = vmlal_lane_s16(_sum7, vget_high_s16(_w0), vget_low_s16(_val3), 1);
int16x8_t _w1 = vld1q_s16(k0 + 8);
_sum0 = vmlal_lane_s16(_sum0, vget_low_s16(_w1), vget_low_s16(_val0), 2);
_sum1 = vmlal_lane_s16(_sum1, vget_high_s16(_w1), vget_low_s16(_val0), 3);
_sum2 = vmlal_lane_s16(_sum2, vget_low_s16(_w1), vget_low_s16(_val1), 2);
_sum3 = vmlal_lane_s16(_sum3, vget_high_s16(_w1), vget_low_s16(_val1), 3);
_sum4 = vmlal_lane_s16(_sum4, vget_low_s16(_w1), vget_low_s16(_val2), 2);
_sum5 = vmlal_lane_s16(_sum5, vget_high_s16(_w1), vget_low_s16(_val2), 3);
_sum6 = vmlal_lane_s16(_sum6, vget_low_s16(_w1), vget_low_s16(_val3), 2);
_sum7 = vmlal_lane_s16(_sum7, vget_high_s16(_w1), vget_low_s16(_val3), 3);
int16x8_t _w2 = vld1q_s16(k0 + 16);
_sum0 = vmlal_lane_s16(_sum0, vget_low_s16(_w2), vget_high_s16(_val0), 0);
_sum1 = vmlal_lane_s16(_sum1, vget_high_s16(_w2), vget_high_s16(_val0), 1);
_sum2 = vmlal_lane_s16(_sum2, vget_low_s16(_w2), vget_high_s16(_val1), 0);
_sum3 = vmlal_lane_s16(_sum3, vget_high_s16(_w2), vget_high_s16(_val1), 1);
_sum4 = vmlal_lane_s16(_sum4, vget_low_s16(_w2), vget_high_s16(_val2), 0);
_sum5 = vmlal_lane_s16(_sum5, vget_high_s16(_w2), vget_high_s16(_val2), 1);
_sum6 = vmlal_lane_s16(_sum6, vget_low_s16(_w2), vget_high_s16(_val3), 0);
_sum7 = vmlal_lane_s16(_sum7, vget_high_s16(_w2), vget_high_s16(_val3), 1);
int16x8_t _w3 = vld1q_s16(k0 + 24);
_sum0 = vmlal_lane_s16(_sum0, vget_low_s16(_w3), vget_high_s16(_val0), 2);
_sum1 = vmlal_lane_s16(_sum1, vget_high_s16(_w3), vget_high_s16(_val0), 3);
_sum2 = vmlal_lane_s16(_sum2, vget_low_s16(_w3), vget_high_s16(_val1), 2);
_sum3 = vmlal_lane_s16(_sum3, vget_high_s16(_w3), vget_high_s16(_val1), 3);
_sum4 = vmlal_lane_s16(_sum4, vget_low_s16(_w3), vget_high_s16(_val2), 2);
_sum5 = vmlal_lane_s16(_sum5, vget_high_s16(_w3), vget_high_s16(_val2), 3);
_sum6 = vmlal_lane_s16(_sum6, vget_low_s16(_w3), vget_high_s16(_val3), 2);
_sum7 = vmlal_lane_s16(_sum7, vget_high_s16(_w3), vget_high_s16(_val3), 3);
r0 += 32;
k0 += 32;
}
_sum0 = vaddq_s32(_sum0, _sum1);
_sum2 = vaddq_s32(_sum2, _sum3);
_sum4 = vaddq_s32(_sum4, _sum5);
_sum6 = vaddq_s32(_sum6, _sum7);
vst1q_s32(output0_tm, _sum0);
vst1q_s32(output0_tm + 4, _sum2);
vst1q_s32(output0_tm + 8, _sum4);
vst1q_s32(output0_tm + 12, _sum6);
output0_tm += 16;
#else
asm volatile(
"veor q8, q8 \n"
"veor q9, q9 \n"
"veor q10, q10 \n"
"veor q11, q11 \n"
"veor q12, q12 \n"
"veor q13, q13 \n"
"veor q14, q14 \n"
"veor q15, q15 \n"
"0: \n"
"pld [%2, #256] \n"
"pld [%2, #512] \n"
"vldm %2!, {d0-d7} \n"
"pld [%3, #256] \n"
"vld1.s16 {d8-d11}, [%3 :128]! \n"
"vmlal.s16 q8, d8, d0[0] \n"
"vmlal.s16 q12, d9, d0[1] \n"
"vmlal.s16 q9, d8, d2[0] \n"
"vmlal.s16 q13, d9, d2[1] \n"
"vmlal.s16 q10, d8, d4[0] \n"
"vmlal.s16 q14, d9, d4[1] \n"
"vmlal.s16 q11, d8, d6[0] \n"
"vmlal.s16 q15, d9, d6[1] \n"
"pld [%3, #128] \n"
"vld1.s16 {d8-d9}, [%3 :128]! \n"
"vmlal.s16 q8, d10, d0[2] \n"
"vmlal.s16 q12, d11, d0[3] \n"
"vmlal.s16 q9, d10, d2[2] \n"
"vmlal.s16 q13, d11, d2[3] \n"
"vmlal.s16 q10, d10, d4[2] \n"
"vmlal.s16 q14, d11, d4[3] \n"
"vmlal.s16 q11, d10, d6[2] \n"
"vmlal.s16 q15, d11, d6[3] \n"
"pld [%3, #128] \n"
"vld1.s16 {d10-d11}, [%3 :128]! \n"
"vmlal.s16 q8, d8, d1[0] \n"
"vmlal.s16 q12, d9, d1[1] \n"
"vmlal.s16 q9, d8, d3[0] \n"
"vmlal.s16 q13, d9, d3[1] \n"
"vmlal.s16 q10, d8, d5[0] \n"
"vmlal.s16 q14, d9, d5[1] \n"
"vmlal.s16 q11, d8, d7[0] \n"
"vmlal.s16 q15, d9, d7[1] \n"
"subs %0, %0, #1 \n"
"vmlal.s16 q8, d10, d1[2] \n"
"vmlal.s16 q12, d11, d1[3] \n"
"vmlal.s16 q9, d10, d3[2] \n"
"vmlal.s16 q13, d11, d3[3] \n"
"vmlal.s16 q10, d10, d5[2] \n"
"vmlal.s16 q14, d11, d5[3] \n"
"vmlal.s16 q11, d10, d7[2] \n"
"vmlal.s16 q15, d11, d7[3] \n"
"bne 0b \n"
"vadd.s32 q8, q8, q12 \n"
"vadd.s32 q9, q9, q13 \n"
"vadd.s32 q10, q10, q14 \n"
"vadd.s32 q11, q11, q15 \n"
"vstm %1!, {d16-d23} \n"
: "=r"(nn),
"=r"(output0_tm),
"=r"(r0),
"=r"(k0)
: "0"(nn),
"1"(output0_tm),
"2"(r0),
"3"(k0)
: "cc", "memory", "q0", "q1", "q2", "q3", "q4", "q5", "q6", "q7", "q8", "q9", "q10", "q11", "q12", "q13", "q14", "q15");
#endif
}
for (; i + 1 < tiles; i += 2)
{
#if __aarch64__
const short* r0 = bb2.row<const short>(i / 12 + (i % 12) / 8 + (i % 12 % 8) / 4 + (i % 12 % 4) / 2);
#else
const short* r0 = bb2.row<const short>(i / 4 + (i % 4) / 2);
#endif
const short* k0 = kernel0_tm.row<const short>(r);
int nn = inch; // inch always > 0
int32x4_t _sum0 = vdupq_n_s32(0);
int32x4_t _sum1 = vdupq_n_s32(0);
int32x4_t _sum2 = vdupq_n_s32(0);
int32x4_t _sum3 = vdupq_n_s32(0);
for (int j = 0; j < nn; j++)
{
int16x8_t _val0 = vld1q_s16(r0);
int16x8_t _val1 = vld1q_s16(r0 + 8);
int16x8_t _w0 = vld1q_s16(k0);
int16x8_t _w1 = vld1q_s16(k0 + 8);
int16x8_t _w2 = vld1q_s16(k0 + 16);
int16x8_t _w3 = vld1q_s16(k0 + 24);
_sum0 = vmlal_lane_s16(_sum0, vget_low_s16(_w0), vget_low_s16(_val0), 0);
_sum1 = vmlal_lane_s16(_sum1, vget_high_s16(_w0), vget_low_s16(_val0), 1);
_sum2 = vmlal_lane_s16(_sum2, vget_low_s16(_w0), vget_low_s16(_val1), 0);
_sum3 = vmlal_lane_s16(_sum3, vget_high_s16(_w0), vget_low_s16(_val1), 1);
_sum0 = vmlal_lane_s16(_sum0, vget_low_s16(_w1), vget_low_s16(_val0), 2);
_sum1 = vmlal_lane_s16(_sum1, vget_high_s16(_w1), vget_low_s16(_val0), 3);
_sum2 = vmlal_lane_s16(_sum2, vget_low_s16(_w1), vget_low_s16(_val1), 2);
_sum3 = vmlal_lane_s16(_sum3, vget_high_s16(_w1), vget_low_s16(_val1), 3);
_sum0 = vmlal_lane_s16(_sum0, vget_low_s16(_w2), vget_high_s16(_val0), 0);
_sum1 = vmlal_lane_s16(_sum1, vget_high_s16(_w2), vget_high_s16(_val0), 1);
_sum2 = vmlal_lane_s16(_sum2, vget_low_s16(_w2), vget_high_s16(_val1), 0);
_sum3 = vmlal_lane_s16(_sum3, vget_high_s16(_w2), vget_high_s16(_val1), 1);
_sum0 = vmlal_lane_s16(_sum0, vget_low_s16(_w3), vget_high_s16(_val0), 2);
_sum1 = vmlal_lane_s16(_sum1, vget_high_s16(_w3), vget_high_s16(_val0), 3);
_sum2 = vmlal_lane_s16(_sum2, vget_low_s16(_w3), vget_high_s16(_val1), 2);
_sum3 = vmlal_lane_s16(_sum3, vget_high_s16(_w3), vget_high_s16(_val1), 3);
r0 += 16;
k0 += 32;
}
_sum0 = vaddq_s32(_sum0, _sum1);
_sum2 = vaddq_s32(_sum2, _sum3);
vst1q_s32(output0_tm, _sum0);
vst1q_s32(output0_tm + 4, _sum2);
output0_tm += 8;
}
for (; i < tiles; i++)
{
#if __aarch64__
const short* r0 = bb2.row<const short>(i / 12 + (i % 12) / 8 + (i % 12 % 8) / 4 + (i % 12 % 4) / 2 + i % 12 % 2);
#else
const short* r0 = bb2.row<const short>(i / 4 + (i % 4) / 2 + i % 2);
#endif
const short* k0 = kernel0_tm.row<const short>(r);
int nn = inch; // inch always > 0
int32x4_t _sum0 = vdupq_n_s32(0);
int32x4_t _sum1 = vdupq_n_s32(0);
for (int j = 0; j < nn; j++)
{
int16x8_t _val0 = vld1q_s16(r0);
int16x8_t _w0 = vld1q_s16(k0);
int16x8_t _w1 = vld1q_s16(k0 + 8);
int16x8_t _w2 = vld1q_s16(k0 + 16);
int16x8_t _w3 = vld1q_s16(k0 + 24);
_sum0 = vmlal_lane_s16(_sum0, vget_low_s16(_w0), vget_low_s16(_val0), 0);
_sum1 = vmlal_lane_s16(_sum1, vget_high_s16(_w0), vget_low_s16(_val0), 1);
_sum0 = vmlal_lane_s16(_sum0, vget_low_s16(_w1), vget_low_s16(_val0), 2);
_sum1 = vmlal_lane_s16(_sum1, vget_high_s16(_w1), vget_low_s16(_val0), 3);
_sum0 = vmlal_lane_s16(_sum0, vget_low_s16(_w2), vget_high_s16(_val0), 0);
_sum1 = vmlal_lane_s16(_sum1, vget_high_s16(_w2), vget_high_s16(_val0), 1);
_sum0 = vmlal_lane_s16(_sum0, vget_low_s16(_w3), vget_high_s16(_val0), 2);
_sum1 = vmlal_lane_s16(_sum1, vget_high_s16(_w3), vget_high_s16(_val0), 3);
r0 += 8;
k0 += 32;
}
_sum0 = vaddq_s32(_sum0, _sum1);
vst1q_s32(output0_tm, _sum0);
output0_tm += 4;
}
}
}
}
bottom_blob_tm = Mat();
// END dot
// BEGIN transform output
Mat top_blob_bordered;
if (outw == top_blob.w && outh == top_blob.h)
{
top_blob_bordered = top_blob;
}
else
{
top_blob_bordered.create(outw, outh, outch, 4u * 4, 4, opt.workspace_allocator);
}
{
// const float otm[4][6] = {
// {1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 0.0f},
// {0.0f, 1.0f, -1.0f, 2.0f, -2.0f, 0.0f},
// {0.0f, 1.0f, 1.0f, 4.0f, 4.0f, 0.0f},
// {0.0f, 1.0f, -1.0f, 8.0f, -8.0f, 1.0f}
// };
// 0 = r00 + (r01 + r02) + (r03 + r04)
// 1 = (r01 - r02) + (r03 - r04) * 2
// 2 = (r01 + r02) + (r03 + r04) * 4
// 3 = r05 + (r01 - r02) + (r03 - r04) * 8
int w_tm = outw / 4 * 6;
int h_tm = outh / 4 * 6;
const int tiles = w_tm / 6 * h_tm / 6;
#pragma omp parallel for num_threads(opt.num_threads)
for (int p = 0; p < outch; p++)
{
const Mat out0_tm = top_blob_tm.channel(p);
Mat out0 = top_blob_bordered.channel(p);
int tmp[4][6][4];
// tile
for (int i = 0; i < outh / 4; i++)
{
for (int j = 0; j < outw / 4; j++)
{
// top_blob_tm.create(tiles, 36, outch, elemsize, elempack);
const int* output0_tm_0 = (const int*)out0_tm + (i * w_tm / 6 + j) * 4;
const int* output0_tm_1 = output0_tm_0 + tiles * 4;
const int* output0_tm_2 = output0_tm_0 + tiles * 8;
const int* output0_tm_3 = output0_tm_0 + tiles * 12;
const int* output0_tm_4 = output0_tm_0 + tiles * 16;
const int* output0_tm_5 = output0_tm_0 + tiles * 20;
int* output0 = out0.row<int>(i * 4) + (j * 4) * 4;
// TODO neon optimize
for (int m = 0; m < 5; m++)
{
int32x4_t _out0tm0 = vld1q_s32(output0_tm_0);
int32x4_t _out0tm1 = vld1q_s32(output0_tm_1);
int32x4_t _out0tm2 = vld1q_s32(output0_tm_2);
int32x4_t _out0tm3 = vld1q_s32(output0_tm_3);
int32x4_t _out0tm4 = vld1q_s32(output0_tm_4);
int32x4_t _out0tm5 = vld1q_s32(output0_tm_5);
int32x4_t _tmp02a = vaddq_s32(_out0tm1, _out0tm2);
int32x4_t _tmp13a = vsubq_s32(_out0tm1, _out0tm2);
int32x4_t _tmp02b = vaddq_s32(_out0tm3, _out0tm4);
int32x4_t _tmp13b = vsubq_s32(_out0tm3, _out0tm4);
int32x4_t _v2 = vdupq_n_s32(2);
int32x4_t _v4 = vdupq_n_s32(4);
int32x4_t _v8 = vdupq_n_s32(8);
int32x4_t _tmp0m = vaddq_s32(vaddq_s32(_out0tm0, _tmp02a), _tmp02b);
int32x4_t _tmp1m = vmlaq_s32(_tmp13a, _tmp13b, _v2);
int32x4_t _tmp2m = vmlaq_s32(_tmp02a, _tmp02b, _v4);
int32x4_t _tmp3m = vmlaq_s32(vmlaq_s32(_tmp13a, _out0tm5, _v4), _tmp13b, _v8);
vst1q_s32(tmp[0][m], _tmp0m);
vst1q_s32(tmp[1][m], _tmp1m);
vst1q_s32(tmp[2][m], _tmp2m);
vst1q_s32(tmp[3][m], _tmp3m);
output0_tm_0 += tiles * 24;
output0_tm_1 += tiles * 24;
output0_tm_2 += tiles * 24;
output0_tm_3 += tiles * 24;
output0_tm_4 += tiles * 24;
output0_tm_5 += tiles * 24;
}
for (int m = 5; m < 6; m++)
{
int32x4_t _out0tm0 = vld1q_s32(output0_tm_0);
int32x4_t _out0tm1 = vld1q_s32(output0_tm_1);
int32x4_t _out0tm2 = vld1q_s32(output0_tm_2);
int32x4_t _out0tm3 = vld1q_s32(output0_tm_3);
int32x4_t _out0tm4 = vld1q_s32(output0_tm_4);
int32x4_t _out0tm5 = vld1q_s32(output0_tm_5);
int32x4_t _tmp02a = vaddq_s32(_out0tm1, _out0tm2);
int32x4_t _tmp13a = vsubq_s32(_out0tm1, _out0tm2);
int32x4_t _tmp02b = vaddq_s32(_out0tm3, _out0tm4);
int32x4_t _tmp13b = vsubq_s32(_out0tm3, _out0tm4);
int32x4_t _v2 = vdupq_n_s32(2);
int32x4_t _v4 = vdupq_n_s32(4);
int32x4_t _v8 = vdupq_n_s32(8);
int32x4_t _tmp0m = vaddq_s32(vaddq_s32(_out0tm0, _tmp02a), _tmp02b);
int32x4_t _tmp1m = vmlaq_s32(_tmp13a, _tmp13b, _v2);
int32x4_t _tmp2m = vmlaq_s32(_tmp02a, _tmp02b, _v4);
int32x4_t _tmp3m = vmlaq_s32(vmlaq_s32(_tmp13a, _out0tm5, _v4), _tmp13b, _v8);
_tmp0m = vmulq_s32(_tmp0m, _v4);
_tmp1m = vmulq_s32(_tmp1m, _v4);
_tmp2m = vmulq_s32(_tmp2m, _v4);
_tmp3m = vmulq_s32(_tmp3m, _v4);
vst1q_s32(tmp[0][m], _tmp0m);
vst1q_s32(tmp[1][m], _tmp1m);
vst1q_s32(tmp[2][m], _tmp2m);
vst1q_s32(tmp[3][m], _tmp3m);
output0_tm_0 += tiles * 24;
output0_tm_1 += tiles * 24;
output0_tm_2 += tiles * 24;
output0_tm_3 += tiles * 24;
output0_tm_4 += tiles * 24;
output0_tm_5 += tiles * 24;
}
for (int m = 0; m < 4; m++)
{
int32x4_t _tmp00 = vld1q_s32(tmp[m][0]);
int32x4_t _tmp01 = vld1q_s32(tmp[m][1]);
int32x4_t _tmp02 = vld1q_s32(tmp[m][2]);
int32x4_t _tmp03 = vld1q_s32(tmp[m][3]);
int32x4_t _tmp04 = vld1q_s32(tmp[m][4]);
int32x4_t _tmp05 = vld1q_s32(tmp[m][5]);
int32x4_t _tmp02a = vaddq_s32(_tmp01, _tmp02);
int32x4_t _tmp13a = vsubq_s32(_tmp01, _tmp02);
int32x4_t _tmp02b = vaddq_s32(_tmp03, _tmp04);
int32x4_t _tmp13b = vsubq_s32(_tmp03, _tmp04);
int32x4_t _v2 = vdupq_n_s32(2);
int32x4_t _v4 = vdupq_n_s32(4);
int32x4_t _v8 = vdupq_n_s32(8);
int32x4_t _out00 = vaddq_s32(vaddq_s32(_tmp00, _tmp02a), _tmp02b);
int32x4_t _out01 = vmlaq_s32(_tmp13a, _tmp13b, _v2);
int32x4_t _out02 = vmlaq_s32(_tmp02a, _tmp02b, _v4);
int32x4_t _out03 = vmlaq_s32(vaddq_s32(_tmp05, _tmp13a), _tmp13b, _v8);
// TODO use integer trick for division by 576
float32x4_t _v576 = vdupq_n_f32(1.0 / 576);
_out00 = vcvtq_s32_f32(vmulq_f32(vcvtq_f32_s32(_out00), _v576));
_out01 = vcvtq_s32_f32(vmulq_f32(vcvtq_f32_s32(_out01), _v576));
_out02 = vcvtq_s32_f32(vmulq_f32(vcvtq_f32_s32(_out02), _v576));
_out03 = vcvtq_s32_f32(vmulq_f32(vcvtq_f32_s32(_out03), _v576));
vst1q_s32(output0, _out00);
vst1q_s32(output0 + 4, _out01);
vst1q_s32(output0 + 8, _out02);
vst1q_s32(output0 + 12, _out03);
output0 += outw * 4;
}
}
}
}
}
// END transform output
// cut result pad
copy_cut_border(top_blob_bordered, top_blob, 0, top_blob_bordered.h - top_blob.h, 0, top_blob_bordered.w - top_blob.w, opt);
}
|
GB_AxB_dot2.c | //------------------------------------------------------------------------------
// GB_AxB_dot2: compute C=A'*B or C<!M>=A'*B in parallel, in place
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2019, All Rights Reserved.
// http://suitesparse.com See GraphBLAS/Doc/License.txt for license.
//------------------------------------------------------------------------------
// GB_AxB_dot2 does its computation in two phases. The first phase counts the
// number of entries in each column of C. The second phase can then construct
// the result C in place, and thus this method can be done in parallel for the
// single matrix computation C=A'*B.
// Two variants are handled: C=A'*B and C<!M>=A'*B.
// The C<M>=A'*B computation is computed by GB_AxB_dot3.
#include "GB_mxm.h"
#include "GB_iterator.h"
#ifndef GBCOMPACT
#include "GB_AxB__include.h"
#endif
#define GB_FREE_ALL \
{ \
for (int taskid = 0 ; taskid < naslice ; taskid++) \
{ \
GB_FREE_MEMORY (C_counts [taskid], cnvec, sizeof (int64_t)) ; \
} \
}
GrB_Info GB_AxB_dot2 // C=A'*B or C<!M>=A'*B, dot product method
(
GrB_Matrix *Chandle, // output matrix
const GrB_Matrix M, // mask matrix for C<!M>=A'*B
// if present, the mask is complemented
const GrB_Matrix *Aslice, // input matrices (already sliced)
const GrB_Matrix B, // input matrix
const GrB_Semiring semiring, // semiring that defines C=A*B
const bool flipxy, // if true, do z=fmult(b,a) vs fmult(a,b)
bool *mask_applied, // if true, mask was applied
int nthreads,
int naslice,
int nbslice,
GB_Context Context
)
{
//--------------------------------------------------------------------------
// check inputs
//--------------------------------------------------------------------------
GrB_Info info ;
ASSERT (Aslice != NULL) ;
GrB_Matrix A = Aslice [0] ; // just for type and dimensions
ASSERT (Chandle != NULL) ;
ASSERT (*Chandle == NULL) ;
ASSERT_OK_OR_NULL (GB_check (M, "M for dot A'*B", GB0)) ;
ASSERT_OK (GB_check (A, "A for dot A'*B", GB0)) ;
for (int taskid = 0 ; taskid < naslice ; taskid++)
{
ASSERT_OK (GB_check (Aslice [taskid], "A slice for dot2 A'*B", GB0)) ;
ASSERT (!GB_PENDING (Aslice [taskid])) ;
ASSERT (!GB_ZOMBIES (Aslice [taskid])) ;
ASSERT ((Aslice [taskid])->vlen == B->vlen) ;
ASSERT (A->vlen == (Aslice [taskid])->vlen) ;
ASSERT (A->vdim == (Aslice [taskid])->vdim) ;
ASSERT (A->type == (Aslice [taskid])->type) ;
}
ASSERT_OK (GB_check (B, "B for dot A'*B", GB0)) ;
ASSERT (!GB_PENDING (M)) ; ASSERT (!GB_ZOMBIES (M)) ;
ASSERT (!GB_PENDING (A)) ; ASSERT (!GB_ZOMBIES (A)) ;
ASSERT (!GB_PENDING (B)) ; ASSERT (!GB_ZOMBIES (B)) ;
ASSERT_OK (GB_check (semiring, "semiring for numeric A'*B", GB0)) ;
ASSERT (A->vlen == B->vlen) ;
ASSERT (mask_applied != NULL) ;
//--------------------------------------------------------------------------
// get the semiring operators
//--------------------------------------------------------------------------
GrB_BinaryOp mult = semiring->multiply ;
GrB_Monoid add = semiring->add ;
ASSERT (mult->ztype == add->op->ztype) ;
bool op_is_first = mult->opcode == GB_FIRST_opcode ;
bool op_is_second = mult->opcode == GB_SECOND_opcode ;
bool A_is_pattern = false ;
bool B_is_pattern = false ;
if (flipxy)
{
// z = fmult (b,a) will be computed
A_is_pattern = op_is_first ;
B_is_pattern = op_is_second ;
ASSERT (GB_IMPLIES (!A_is_pattern,
GB_Type_compatible (A->type, mult->ytype))) ;
ASSERT (GB_IMPLIES (!B_is_pattern,
GB_Type_compatible (B->type, mult->xtype))) ;
}
else
{
// z = fmult (a,b) will be computed
A_is_pattern = op_is_second ;
B_is_pattern = op_is_first ;
ASSERT (GB_IMPLIES (!A_is_pattern,
GB_Type_compatible (A->type, mult->xtype))) ;
ASSERT (GB_IMPLIES (!B_is_pattern,
GB_Type_compatible (B->type, mult->ytype))) ;
}
(*Chandle) = NULL ;
//--------------------------------------------------------------------------
// compute # of entries in each vector of C
//--------------------------------------------------------------------------
GrB_Type ctype = add->op->ztype ;
int64_t cvlen = A->vdim ;
int64_t cvdim = B->vdim ;
if (B->nvec_nonempty < 0)
{
B->nvec_nonempty = GB_nvec_nonempty (B, NULL) ;
}
int64_t cnvec = B->nvec ;
int64_t *C_counts [naslice] ;
for (int a_taskid = 0 ; a_taskid < naslice ; a_taskid++)
{
C_counts [a_taskid] = NULL ;
}
for (int a_taskid = 0 ; a_taskid < naslice ; a_taskid++)
{
int64_t *restrict C_count = NULL ;
GB_CALLOC_MEMORY (C_count, B->nvec, sizeof (int64_t)) ;
if (C_count == NULL)
{
// out of memory
GB_FREE_ALL ;
return (GrB_OUT_OF_MEMORY) ;
}
C_counts [a_taskid] = C_count ;
}
for (int a_taskid = 0 ; a_taskid < naslice ; a_taskid++)
{
if ((Aslice [a_taskid])->nvec_nonempty < 0)
{
(Aslice [a_taskid])->nvec_nonempty =
GB_nvec_nonempty (Aslice [a_taskid], NULL) ;
}
}
#define GB_PHASE_1_OF_2
#include "GB_AxB_dot2_meta.c"
#undef GB_PHASE_1_OF_2
GB_NEW (Chandle, ctype, cvlen, cvdim, GB_Ap_malloc, true,
GB_SAME_HYPER_AS (B->is_hyper), B->hyper_ratio, cnvec, Context) ;
if (info != GrB_SUCCESS)
{
// out of memory
GB_FREE_ALL ;
return (info) ;
}
GrB_Matrix C = (*Chandle) ;
int64_t *restrict Cp = C->p ;
// cumulative sum of counts in each column
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (int64_t k = 0 ; k < cnvec ; k++)
{
int64_t s = 0 ;
for (int taskid = 0 ; taskid < naslice ; taskid++)
{
int64_t *restrict C_count = C_counts [taskid] ;
int64_t c = C_count [k] ;
C_count [k] = s ;
s += c ;
}
Cp [k] = s ;
}
Cp [cnvec] = 0 ;
C->nvec = cnvec ;
// Cp = cumulative sum of Cp
GB_cumsum (Cp, cnvec, &(C->nvec_nonempty), nthreads) ;
int64_t cnz = Cp [cnvec] ;
// C->h = B->h
if (B->is_hyper)
{
GB_memcpy (C->h, B->h, cnvec * sizeof (int64_t), nthreads) ;
}
// free C_count for the first thread; it is no longer needed
GB_FREE_MEMORY (C_counts [0], cnvec, sizeof (int64_t)) ;
C->magic = GB_MAGIC ;
//--------------------------------------------------------------------------
// allocate C->x and C->i
//--------------------------------------------------------------------------
info = GB_ix_alloc (C, cnz, true, Context) ;
if (info != GrB_SUCCESS)
{
// out of memory
GB_MATRIX_FREE (Chandle) ;
GB_FREE_ALL ;
return (info) ;
}
//--------------------------------------------------------------------------
// C = A'*B, computing each entry with a dot product, via builtin semiring
//--------------------------------------------------------------------------
bool done = false ;
#ifndef GBCOMPACT
//--------------------------------------------------------------------------
// define the worker for the switch factory
//--------------------------------------------------------------------------
#define GB_Adot2B(add,mult,xyname) GB_Adot2B_ ## add ## mult ## xyname
#define GB_AxB_WORKER(add,mult,xyname) \
{ \
info = GB_Adot2B (add,mult,xyname) (C, M, \
Aslice, A_is_pattern, B, B_is_pattern, \
C_counts, nthreads, naslice, nbslice) ; \
done = (info != GrB_NO_VALUE) ; \
} \
break ;
//--------------------------------------------------------------------------
// launch the switch factory
//--------------------------------------------------------------------------
GB_Opcode mult_opcode, add_opcode ;
GB_Type_code xycode, zcode ;
if (GB_AxB_semiring_builtin (A, A_is_pattern, B, B_is_pattern, semiring,
flipxy, &mult_opcode, &add_opcode, &xycode, &zcode))
{
#include "GB_AxB_factory.c"
}
ASSERT (info == GrB_SUCCESS || info == GrB_NO_VALUE) ;
#endif
//--------------------------------------------------------------------------
// user semirings created at compile time
//--------------------------------------------------------------------------
if (semiring->object_kind == GB_USER_COMPILED)
{
// determine the required type of A and B for the user semiring
GrB_Type atype_required, btype_required ;
if (flipxy)
{
// A is passed as y, and B as x, in z = mult(x,y)
atype_required = mult->ytype ;
btype_required = mult->xtype ;
}
else
{
// A is passed as x, and B as y, in z = mult(x,y)
atype_required = mult->xtype ;
btype_required = mult->ytype ;
}
if (A->type == atype_required && B->type == btype_required)
{
info = GB_AxB_user (GxB_AxB_DOT, semiring, Chandle, M, NULL, B,
flipxy,
/* heap: */ NULL, NULL, NULL, 0,
/* Gustavson: */ NULL,
/* dot: */ Aslice, nthreads, naslice, nbslice, C_counts,
/* dot3: */ NULL, 0) ;
done = true ;
}
}
//--------------------------------------------------------------------------
// C = A'*B, computing each entry with a dot product, with typecasting
//--------------------------------------------------------------------------
if (!done)
{
//----------------------------------------------------------------------
// get operators, functions, workspace, contents of A, B, C, and M
//----------------------------------------------------------------------
GxB_binary_function fmult = mult->function ;
GxB_binary_function fadd = add->op->function ;
size_t csize = C->type->size ;
size_t asize = A_is_pattern ? 0 : A->type->size ;
size_t bsize = B_is_pattern ? 0 : B->type->size ;
size_t xsize = mult->xtype->size ;
size_t ysize = mult->ytype->size ;
// scalar workspace: because of typecasting, the x/y types need not
// be the same as the size of the A and B types.
// flipxy false: aki = (xtype) A(k,i) and bkj = (ytype) B(k,j)
// flipxy true: aki = (ytype) A(k,i) and bkj = (xtype) B(k,j)
size_t aki_size = flipxy ? ysize : xsize ;
size_t bkj_size = flipxy ? xsize : ysize ;
// GB_void *restrict identity = add->identity ;
GB_void *restrict terminal = add->terminal ;
GB_cast_function cast_A, cast_B ;
if (flipxy)
{
// A is typecasted to y, and B is typecasted to x
cast_A = A_is_pattern ? NULL :
GB_cast_factory (mult->ytype->code, A->type->code) ;
cast_B = B_is_pattern ? NULL :
GB_cast_factory (mult->xtype->code, B->type->code) ;
}
else
{
// A is typecasted to x, and B is typecasted to y
cast_A = A_is_pattern ? NULL :
GB_cast_factory (mult->xtype->code, A->type->code) ;
cast_B = B_is_pattern ? NULL :
GB_cast_factory (mult->ytype->code, B->type->code) ;
}
//----------------------------------------------------------------------
// C = A'*B via dot products, function pointers, and typecasting
//----------------------------------------------------------------------
// aki = A(k,i), located in Ax [pA]
#define GB_GETA(aki,Ax,pA) \
GB_void aki [aki_size] ; \
if (!A_is_pattern) cast_A (aki, Ax +((pA)*asize), asize) ;
// bkj = B(k,j), located in Bx [pB]
#define GB_GETB(bkj,Bx,pB) \
GB_void bkj [bkj_size] ; \
if (!B_is_pattern) cast_B (bkj, Bx +((pB)*bsize), bsize) ;
// break if cij reaches the terminal value
#define GB_DOT_TERMINAL(cij) \
if (terminal != NULL && memcmp (cij, terminal, csize) == 0) \
{ \
break ; \
}
// C(i,j) = A(i,k) * B(k,j)
#define GB_MULT(cij, aki, bkj) \
GB_MULTIPLY (cij, aki, bkj) ; \
// C(i,j) += A(i,k) * B(k,j)
#define GB_MULTADD(cij, aki, bkj) \
GB_void zwork [csize] ; \
GB_MULTIPLY (zwork, aki, bkj) ; \
fadd (cij, cij, zwork) ;
// define cij for each task
#define GB_CIJ_DECLARE(cij) \
GB_void cij [csize] ;
// address of Cx [p]
#define GB_CX(p) Cx +((p)*csize)
// save the value of C(i,j)
#define GB_CIJ_SAVE(cij,p) \
memcpy (GB_CX (p), cij, csize) ;
#define GB_ATYPE GB_void
#define GB_BTYPE GB_void
#define GB_CTYPE GB_void
#define GB_PHASE_2_OF_2
// loops with function pointers cannot be vectorized
#define GB_DOT_SIMD ;
if (flipxy)
{
#define GB_MULTIPLY(z,x,y) fmult (z,y,x)
#include "GB_AxB_dot2_meta.c"
#undef GB_MULTIPLY
}
else
{
#define GB_MULTIPLY(z,x,y) fmult (z,x,y)
#include "GB_AxB_dot2_meta.c"
#undef GB_MULTIPLY
}
}
//--------------------------------------------------------------------------
// free workspace and return result
//--------------------------------------------------------------------------
GB_FREE_ALL ;
ASSERT_OK (GB_check (C, "dot: C = A'*B output", GB0)) ;
ASSERT (*Chandle == C) ;
(*mask_applied) = (M != NULL) ;
return (GrB_SUCCESS) ;
}
|
reduction-clauseModificado.c | #include <stdio.h>
#include <stdlib.h>
#ifdef _OPENMP
#include <omp.h>
#else
#define omp_get_thread_num() 0
#endif
main(int argc, char **argv) {
int i, n=20, a[n],suma=10;
if(argc < 2) {
fprintf(stderr,"Falta iteraciones\n");
exit(-1);
}
n = atoi(argv[1]);
if (n>20) {n=20; printf("n=%d",n);}
for (i=0; i<n; i++) a[i] = i;
#pragma omp parallel for reduction(+:suma)
for (i=0; i<n; i++) suma += a[i];
printf("Tras 'parallel' suma=%d\n",suma);
}
|
cpu_stream.h | /*
Copyright 2020 The OneFlow Authors. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
#ifndef ONEFLOW_CORE_EP_CPU_CPU_STREAM_H_
#define ONEFLOW_CORE_EP_CPU_CPU_STREAM_H_
#include "oneflow/core/ep/include/stream.h"
#include "oneflow/core/ep/cpu/cpu_device.h"
#define OF_RUNTIME_SEQ 0u
#define OF_RUNTIME_OMP 1u
#define OF_RUNTIME_TBB 2u
#if OF_CPU_THREADING_RUNTIME == OF_RUNTIME_OMP
#include <omp.h>
#elif OF_CPU_THREADING_RUNTIME == OF_RUNTIME_TBB
#include <tbb/blocked_range.h>
#include <tbb/parallel_for.h>
#include <tbb/global_control.h>
#elif OF_CPU_THREADING_RUNTIME == OF_RUNTIME_SEQ
// Nothing
#else
#error OF_CPU_THREADING_RUNTIME Error setting
#endif
#ifdef WITH_ONEDNN
#include <oneapi/dnnl/dnnl.hpp>
#endif
namespace oneflow {
namespace ep {
class CpuNumThreadsGuard {
public:
OF_DISALLOW_COPY_AND_MOVE(CpuNumThreadsGuard);
#if OF_CPU_THREADING_RUNTIME == OF_RUNTIME_TBB
explicit CpuNumThreadsGuard(size_t num_threads)
: global_thread_limit(tbb::global_control::max_allowed_parallelism, num_threads) {}
~CpuNumThreadsGuard() {}
#elif OF_CPU_THREADING_RUNTIME == OF_RUNTIME_OMP
explicit CpuNumThreadsGuard(size_t num_threads) : set_num_threads_(num_threads) {
saved_num_threads_ = omp_get_max_threads();
omp_set_num_threads(set_num_threads_);
}
~CpuNumThreadsGuard() { omp_set_num_threads(saved_num_threads_); }
#elif OF_CPU_THREADING_RUNTIME == OF_RUNTIME_SEQ
explicit CpuNumThreadsGuard(size_t num_threads) {}
~CpuNumThreadsGuard() {}
#else
#error OF_CPU_THREADING_RUNTIME Error setting
#endif
private:
#if OF_CPU_THREADING_RUNTIME == OF_RUNTIME_TBB
tbb::global_control global_thread_limit;
#elif OF_CPU_THREADING_RUNTIME == OF_RUNTIME_OMP
size_t set_num_threads_;
size_t saved_num_threads_;
#elif OF_CPU_THREADING_RUNTIME == OF_RUNTIME_SEQ
#else
#error OF_CPU_THREADING_RUNTIME Error setting
#endif
};
class CpuStream : public Stream {
public:
OF_DISALLOW_COPY_AND_MOVE(CpuStream);
explicit CpuStream(CpuDevice* device) : device_(device) {
#ifdef WITH_ONEDNN
onednn_engine_.reset(new dnnl::engine(dnnl::engine::kind::cpu, 0));
onednn_stream_.reset(new dnnl::stream(*onednn_engine_));
#endif
}
~CpuStream() override = default;
DeviceType device_type() const override;
CpuDevice* device() const override;
Maybe<void> Sync() override;
void RecordEvent(Event* event) override;
template<typename F>
void ParallelFor(int64_t begin, int64_t end, const F& func) {
ParallelFor(begin, end, func, kParallelForDefaultGrain);
}
template<typename F>
void ParallelFor(int64_t begin, int64_t end, const F& func, size_t grain_size) {
#if OF_CPU_THREADING_RUNTIME != OF_RUNTIME_SEQ
auto DivUp = [](int64_t x, int64_t y) { return (x + y - 1) / y; };
size_t num_threads = device()->GetNumThreads();
#endif
if (begin >= end) { return; }
#if OF_CPU_THREADING_RUNTIME == OF_RUNTIME_OMP
if (grain_size > 0) {
num_threads = std::min(num_threads, (size_t)(DivUp((end - begin), grain_size)));
} else {
num_threads = 1;
}
#pragma omp parallel num_threads(num_threads)
{
int64_t omp_num_thread = omp_get_num_threads();
int64_t chunk_size = DivUp((end - begin), omp_num_thread);
int64_t omp_tid = omp_get_thread_num();
int64_t thread_begin_index = begin + omp_tid * chunk_size;
int64_t thread_end_index = std::min(end, chunk_size + thread_begin_index);
if (thread_begin_index < end) { func(thread_begin_index, thread_end_index); }
}
#elif OF_CPU_THREADING_RUNTIME == OF_RUNTIME_TBB
CpuNumThreadsGuard guard(num_threads);
size_t tmp_chunk_size = DivUp((end - begin), num_threads);
int64_t chunk_size = std::max(tmp_chunk_size, grain_size);
tbb::parallel_for(
tbb::blocked_range<int64_t>(begin, end, chunk_size),
[func](const tbb::blocked_range<int64_t>& r) { func(r.begin(), r.end()); },
tbb::static_partitioner{});
#elif OF_CPU_THREADING_RUNTIME == OF_RUNTIME_SEQ
func(begin, end);
#else
#error OF_CPU_THREADING_RUNTIME Error setting
#endif
}
#ifdef WITH_ONEDNN
dnnl::engine* onednn_engine() const { return onednn_engine_.get(); }
dnnl::stream* onednn_stream() const { return onednn_stream_.get(); }
#endif
private:
#ifdef WITH_ONEDNN
std::unique_ptr<dnnl::engine> onednn_engine_;
std::unique_ptr<dnnl::stream> onednn_stream_;
#endif
CpuDevice* device_;
static constexpr size_t kParallelForDefaultGrain = 32768;
};
} // namespace ep
} // namespace oneflow
#endif // ONEFLOW_CORE_EP_CPU_CPU_STREAM_H_
|
integrator.h | /// @file integrator.h
/// Numerical integration.
/// @author Ralf Quast
/// @date 2021
/// @copyright MIT License
#ifndef INTEGRATOR_H
#define INTEGRATOR_H
#include <algorithm>
#include <cmath>
#include <valarray>
#include <vector>
#include "base.h"
namespace especia {
/// Numerical integration by means of recursive monotone stable quadrature
/// formulas.
///
/// Further reading:
///
/// Favati, P.; Lotti, G.; and Romani, F. (1991).
/// *Interpolary Integration Formulas for Optimal Composition.*
/// ACM Trans. Math. Software 17, 207-217.
/// https://doi.org/10.1145/108556.108571
/// Favati, P.; Lotti, G.; and Romani, F. (1991).
/// *Algorithm 691: Improving QUADPACK Automatic Integration Routines.*
/// ACM Trans. Math. Software 17, 218-232.
/// https://doi.org/10.1145/108556.108580
///
/// @tparam T The number type used for quadrature calculations. The quadrature weight
/// and abscissa values are defined with a precision of 48 decimal digits.
template<class T = real>
class Integrator {
public:
/// The recursive monotone stable quadrature formulas.
enum Formula {
/// The formula for integration with 13 quadrature points.
Q13,
/// The formula for integration with 19 quadrature points.
Q19,
/// The formula for integration with 27 quadrature points.
Q27,
/// The formula for integration with 41 quadrature points.
Q41
};
/// Constructs a new integrator, based on the formulas supplied as argument.
///
/// @param[in] p The formula with less quadrature points.
/// @param[in] q The formula with more quadrature points.
explicit Integrator(Formula p = Q19, Formula q = Q27) : p(p), q(q) {
}
/// The destructor.
~Integrator() = default;
/// Computes the integral of a function, with the limits supplied as argument, i.e.
/// @f[ \int_{a}^{b} f(x) dx @f].
///
/// @tparam F The integrand type.
///
/// @param[in] f The integrand.
/// @param[in] a The lower limit of integration.
/// @param[in] b The upper limit of integration.
/// @param[in] accuracy_goal The (absolute) accuracy goal.
/// @param[in] max_iteration The maximum number of iterations.
/// @return the value of the integral.
template<class F>
T integrate(const F &f, T a, T b, T accuracy_goal = T(1.0E-6), natural max_iteration = 100) const {
Integrator::Partition<F, Integrator::Part<F>> partition(f, a, b, p, q);
for (natural i = 0; i < max_iteration; ++i) {
if (partition.absolute_error() < accuracy_goal) {
break;
}
partition.refine();
}
return partition.result();
}
/// Computes the value of the positive-infinite integral of a function, i.e.
/// @f[ \int_{0}^{\infty} f(x) dx @f].
///
/// Makes the variable transformation @f$ u = \exp(-x) @f$ and computes
/// @f[ \int_{0}^{1} \frac{f(-\log(u))}{u} du @f].
///
/// @tparam F The integrand type.
///
/// @param[in] f The integrand.
/// @param[in] accuracy_goal The (absolute) accuracy goal.
/// @param[in] max_iteration The maximum number of iterations.
/// @return the value of the positive-infinite integral.
///
/// @attention The integrand must converge rapidly (faster than @f$ 1/x @f$) to zero at infinity, i.e.
/// @f[ \lim_{x\to\infty} \frac{f(x)}{x} = 0 @f].
template<class F>
T integrate_positive_infinite(const F &f, T accuracy_goal = T(1.0E-6), natural max_iteration = 100) const {
using std::log;
return integrate([&f](T u) -> T { return u > T(0.0) ? f(-log(u)) / u : T(0.0); }, // infinity maps to zero
T(0.0), T(1.0), accuracy_goal, max_iteration);
}
/// Computes the value of the negative-infinite integral of a function, i.e.
/// @f[ \int_{-\infty}^{0} f(x) dx @f].
///
/// @tparam F The integrand type.
///
/// @param[in] f The integrand.
/// @param[in] accuracy_goal The (absolute) accuracy goal.
/// @param[in] max_iteration The maximum number of iterations.
/// @return the value of the negative-infinite integral.
///
/// @attention The integrand must converge rapidly (faster than @f$ 1/x @f$) to zero at infinity, i.e.
/// @f[ \lim_{x\to -\infty} \frac{f(x)}{x} = 0 @f].
template<class F>
T integrate_negative_infinite(const F &f, T accuracy_goal = T(1.0E-6), natural max_iteration = 100) const {
return integrate_positive_infinite([&f](T x) -> T { return f(-x); }, accuracy_goal, max_iteration);
}
/// Computes the value of the infinite integral of a function, i.e.
/// @f[ \int_{-\infty}^{\infty} f(x) dx @f].
///
/// @tparam F The integrand type.
///
/// @param[in] f The integrand.
/// @param[in] accuracy_goal The (absolute) accuracy goal.
/// @param[in] max_iteration The maximum number of iterations.
/// @return the value of the infinite integral.
///
/// @attention The integrand must converge rapidly (faster than @f$ 1/x @f$) to zero at infinity, i.e.
/// @f[ \lim_{x\to\pm\infty} \frac{f(x)}{x} = 0 @f].
template<class F>
T integrate_infinite(const F &f, T accuracy_goal = T(1.0E-6), natural max_iteration = 100) const {
return integrate_positive_infinite(f, accuracy_goal, max_iteration)
+ integrate_negative_infinite(f, accuracy_goal, max_iteration);
}
private:
/// A part of a numerical integral.
///
/// @tparam F The integrand type.
template<class F>
class Part {
public:
/// The constructor.
///
/// @param f The integrand.
/// @param a The lower limit of integration.
/// @param b The upper limit of integration.
/// @param p The formula with less quadrature points.
/// @param q The formula with more quadrature points.
Part(const F &f, T a, T b, Formula p, Formula q)
: f(f), a(a), b(b), p(p), q(q), c(T(0.5) * (a + b)), h(T(0.5) * (b - a)), yl(21), yu(21) {
evaluate();
}
/// The destructor.
~Part() = default;
/// Returns the absolute error of the integration result of this part.
///
/// @return the absolute error of the integration result.
T absolute_error() const {
return err;
}
/// Returns the integration result of this part.
///
/// @return the integration result.
T result() const {
return res;
}
/// Creates a new part from the lower half of this part.
///
/// @return the lower half part.
Part *new_lower_part() const {
auto *part = new Part(this, a, c);
part->yu[0] = yl[2];
part->yu[1] = yl[7];
part->yu[2] = yl[1];
part->yu[4] = f(part->c + Integrator::xi[4] * part->h);
part->yu[5] = f(part->c + Integrator::xi[5] * part->h);
part->yu[6] = yl[0];
part->yl[0] = yl[2];
part->yl[1] = yl[8];
part->yl[2] = yl[3];
part->yl[3] = yl[4];
part->yl[4] = yl[5];
part->yl[5] = yl[9];
part->yl[6] = yl[6];
if (nl > 10) {
part->yu[3] = yl[10];
part->yl[7] = yl[11];
part->yl[8] = yl[12];
part->yl[9] = yl[13];
if (nl > 14) {
part->yu[7] = yl[15];
part->yu[8] = yl[14];
part->yu[9] = f(part->c + Integrator::xi[9] * part->h);
part->yu[10] = yl[16];
part->yl[10] = yl[17];
part->yl[11] = yl[18];
part->yl[12] = yl[19];
part->yl[13] = yl[20];
part->nu = 11;
part->nl = 14;
} else {
part->nu = 7;
part->nl = 10;
}
} else {
part->yu[3] = f(part->c + Integrator::xi[3] * part->h);
part->nu = 7;
part->nl = 7;
}
part->evaluate();
return part;
}
/// Creates a new part from the upper half of this part.
///
/// @return the upper half part.
Part *new_upper_part() const {
auto *part = new Part(this, c, b);
part->yl[0] = yu[2];
part->yl[1] = yu[7];
part->yl[2] = yu[1];
part->yl[4] = f(part->c - Integrator::xi[4] * part->h);
part->yl[5] = f(part->c - Integrator::xi[5] * part->h);
part->yl[6] = yu[0];
part->yu[0] = yu[2];
part->yu[1] = yu[8];
part->yu[2] = yu[3];
part->yu[3] = yu[4];
part->yu[4] = yu[5];
part->yu[5] = yu[9];
part->yu[6] = yu[6];
if (nu > 10) {
part->yl[3] = yu[10];
part->yu[7] = yu[11];
part->yu[8] = yu[12];
part->yu[9] = yu[13];
if (nu > 14) {
part->yl[7] = yu[15];
part->yl[8] = yu[14];
part->yl[9] = f(part->c - Integrator::xi[9] * part->h);
part->yl[10] = yu[16];
part->yu[10] = yu[17];
part->yu[11] = yu[18];
part->yu[12] = yu[19];
part->yu[13] = yu[20];
part->nl = 11;
part->nu = 14;
} else {
part->nl = 7;
part->nu = 10;
}
} else {
part->yl[3] = f(part->c - Integrator::xi[3] * part->h);
part->nl = 7;
part->nu = 7;
}
part->evaluate();
return part;
}
private:
/// Constructs a new part from a parent part.
///
/// @param parent The parent part.
/// @param a The lower limit of integration.
/// @param b The upper limit of integration.
Part(const Part *parent, T a, T b)
: f(parent->f), a(a), b(b), p(parent->p), q(parent->q),
c(T(0.5) * (a + b)), h(T(0.5) * (b - a)), yl(21), yu(21) {
// do not evaluate
}
/// Evaluates the integration result of this part and its absolute error.
void evaluate() {
using std::abs;
res = evaluate(q);
err = abs(res - evaluate(p));
}
/// Evaluates the integration result of this part using the quadrature formula supplied as argument.
///
/// @param q The quadrature formula.
/// @return the result.
T evaluate(Formula q) {
const natural m = Integrator::mw[q];
const natural n = Integrator::nw[q];
T result = T(0.0);
#ifdef _OPENMP
#pragma omp parallel for reduction(+:result)
#endif
for (natural i = 0; i < n; ++i) {
if (i >= nl) {
yl[i] = f(c - h * Integrator::xi[i]);
}
if (i >= nu) {
yu[i] = f(c + h * Integrator::xi[i]);
}
result += (yl[i] + yu[i]) * Integrator::wi[m + i];
}
if (nl < n) {
nl = n;
}
if (nu < n) {
nu = n;
}
return result * h;
}
/// The integrand.
const F &f;
/// The lower limit of integration.
const T a;
/// The upper limit of integration.
const T b;
/// The selected formula with less quadrature points.
const Formula p;
/// The selected formula with more quadrature points.
const Formula q;
/// The center of the interval of integration.
const T c;
/// The width of the interval of integration.
const T h;
/// The integrand values for the lower half interval of integration.
std::valarray<T> yl;
/// The integrand values for the upper half interval of integration.
std::valarray<T> yu;
/// The number of evaluated integrand values for the lower half interval.
natural nl = 0;
/// The number of evaluated integrand values for the upper half interval.
natural nu = 0;
/// The absolute error of the integration result.
T err = T(0.0);
/// The integration result.
T res = T(0.0);
};
/// Compares the absolute error of two parts of a numerical integration.
///
/// @tparam P The part type.
template<class P>
class Part_Compare {
public:
/// Compares the absolute error of two parts of a numerical integration.
///
/// @param p The first part.
/// @param q The other part.
/// @return @c true, if the absolute error of the first part is less than that of the other part.
bool operator()(const P *p, const P *q) const {
return p->absolute_error() < q->absolute_error();
}
};
/// A partition of a numerical integral into a complete set of disjoint parts.
///
/// @tparam F The integrand type.
/// @tparam P The part type.
template<class F, class P>
class Partition {
public:
/// The constructor.
///
/// @param f The integrand.
/// @param a The lower limit of integration.
/// @param b The upper limit of integration.
/// @param p The formula with less quadrature points.
/// @param q The formula with more quadrature points.
Partition(const F &f, T a, T b, Formula p, Formula q) : part_compare(Part_Compare<P>()) {
using std::make_heap;
using std::push_heap;
auto *part = new P(f, a, b, p, q);
make_heap(parts.begin(), parts.end(), part_compare);
add_part(part);
}
/// The destructor.
~Partition() { // NOLINT
for (auto part : parts) {
delete part;
}
}
/// Returns the absolute error of the integration result for this partition.
///
/// @return the absolute error of the integration result.
T absolute_error() const {
T err = T(0.0);
for (auto part : parts) {
err += part->absolute_error();
}
return err;
}
/// Returns the integration result for this partition.
///
/// @return the integration result.
T result() const {
T res = T(0.0);
for (auto part : parts) {
res += part->result();
}
return res;
}
/// Refines this partition.
void refine() {
P *popped = pop_part();
add_part(popped->new_lower_part());
add_part(popped->new_upper_part());
delete popped;
}
private:
/// Removes the part with the largest absolute error of integration from the partition.
///
/// @return the part with the largest absolute error.
P *pop_part() {
using std::pop_heap;
P *popped = parts.front();
pop_heap(parts.begin(), parts.end(), part_compare);
parts.pop_back();
return popped;
}
/// Adds a new part to the partition.
///
/// @param part The part.
void add_part(P *part) {
using std::push_heap;
parts.push_back(part);
push_heap(parts.begin(), parts.end(), part_compare);
}
/// Compares the absolute error of integration of two parts.
const Part_Compare<P> part_compare;
/// The parts of this partition.
std::vector<P *> parts{};
};
/// The selected quadrature formula with less points.
const Formula p;
/// The selected quadrature formula with more points.
const Formula q;
/// The quadrature abscissa values.
static const T xi[];
/// The quadrature weights.
static const T wi[];
/// The start indices into the quadrature weights.
static const natural mw[];
/// The number of quadrature weights.
static const natural nw[];
};
template<class T>
const T Integrator<T>::xi[] = {
// abscissas for Q13
T(0.0000000L),
T(0.2500000L),
T(0.5000000L),
T(0.7500000L),
T(0.8750000L),
T(0.9375000L),
T(1.0000000L),
// additional abscissas for Q19, Q27 and Q41
T(0.3750000L),
T(0.6250000L),
T(0.9687500L),
// additional abscissas for Q27 and Q41
T(0.1250000L),
T(0.6875000L),
T(0.8125000L),
T(0.9843750L),
// additional abscissas for Q41
T(0.1875000L),
T(0.3125000L),
T(0.4375000L),
T(0.5625000L),
T(0.8437500L),
T(0.9062500L),
T(0.9921875L)
};
template<class T>
const T Integrator<T>::wi[] = {
// weights for Q13
T(1.303262173284849021810473057638590518409112513421E-01L),
T(2.390632866847646220320329836544615917290026806242E-01L),
T(2.630626354774670227333506083741355715758124943143E-01L),
T(2.186819313830574175167853094864355208948886875898E-01L),
T(2.757897646642836865859601197607471574336674206700E-02L),
T(1.055750100538458443365034879086669791305550493830E-01L),
T(1.571194260595182254168429283636656908546309467968E-02L),
// weights for Q19
T(1.298751627936015783241173611320651866834051160074E-01L),
T(2.249996826462523640447834514709508786970828213187E-01L),
T(1.680415725925575286319046726692683040162290325505E-01L),
T(1.415567675701225879892811622832845252125600939627E-01L),
T(1.006482260551160175038684459742336605269707889822E-01L),
T(2.510604860724282479058338820428989444699235030871E-02L),
T(9.402964360009747110031098328922608224934320397592E-03L),
T(5.542699233295875168406783695143646338274805359780E-02L),
T(9.986735247403367525720377847755415293097913496236E-02L),
T(4.507523056810492466415880450799432587809828791196E-02L),
// weights for Q27
T(6.300942249647773931746170540321811473310938661469E-02L),
T(1.261383225537664703012999637242003647020326905948E-01L),
T(1.273864433581028272878709981850307363453523117880E-01L),
T(8.576500414311820514214087864326799153427368592787E-02L),
T(7.102884842310253397447305465997026228407227220665E-02L),
T(5.026383572857942403759829860675892897279675661654E-02L),
T(4.683670010609093810432609684738393586390722052124E-03L),
T(1.235837891364555000245004813294817451524633100256E-01L),
T(1.148933497158144016800199601785309838604146040215E-01L),
T(1.252575774226122633391477702593585307254527198070E-02L),
T(1.239572396231834242194189674243818619042280816640E-01L),
T(2.501306413750310579525950767549691151739047969345E-02L),
T(4.915957918146130094258849161350510503556792927578E-02L),
T(2.259167374956474713302030584548274729936249753832E-02L),
// weights for Q41
T(6.362762978782724559269342300509058175967124446839E-02L),
T(9.950065827346794643193261975720606296171462239514E-02L),
T(7.048220002718565366098742295389607994441704889441E-02L),
T(6.512297339398335645872697307762912795346716454337E-02L),
T(3.998229150313659724790527138690215186863915308702E-02L),
T(3.456512257080287509832054272964315588028252136044E-02L),
T(2.212167975884114432760321569298651047876071264944E-03L),
T(8.140326425945938045967829319725797511040878579808E-02L),
T(6.583213447600552906273539578430361199084485578379E-02L),
T(2.592913726450792546064232192976262988065252032902E-02L),
T(1.187141856692283347609436153545356484256869129472E-01L),
T(5.999947605385971985589674757013565610751028128731E-02L),
T(5.500937980198041736910257988346101839062581489820E-02L),
T(5.264422421764655969760271538981443718440340270116E-03L),
T(1.533126874056586959338368742803997744815413565014E-02L),
T(3.527159369750123100455704702965541866345781113903E-02L),
T(5.000556431653955124212795201196389006184693561679E-02L),
T(5.744164831179720106340717579281831675999717767532E-02L),
T(1.598823797283813438301248206397233634639162043386E-02L),
T(2.635660410220884993472478832884065450876913559421E-02L),
T(1.196003937945541091670106760660561117114584656319E-02L)
};
template<class T>
const natural Integrator<T>::mw[] = {
0, 7, 17, 31
};
template<class T>
const natural Integrator<T>::nw[] = {
7, 10, 14, 21
};
}
#endif // INTEGRATOR_H
|
GB_unop__lgamma_fp32_fp32.c | //------------------------------------------------------------------------------
// GB_unop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_atomics.h"
#include "GB_unop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB_unop_apply__lgamma_fp32_fp32
// op(A') function: GB_unop_tran__lgamma_fp32_fp32
// C type: float
// A type: float
// cast: float cij = aij
// unaryop: cij = lgammaf (aij)
#define GB_ATYPE \
float
#define GB_CTYPE \
float
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
float aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = lgammaf (x) ;
// casting
#define GB_CAST(z, aij) \
float z = aij ;
// cij = op (aij)
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
float aij = Ax [pA] ; \
/* Cx [pC] = op (cast (aij)) */ \
float z = aij ; \
Cx [pC] = lgammaf (z) ; \
}
// true if operator is the identity op with no typecasting
#define GB_OP_IS_IDENTITY_WITH_NO_TYPECAST \
0
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_LGAMMA || GxB_NO_FP32)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_unop_apply__lgamma_fp32_fp32
(
float *Cx, // Cx and Ax may be aliased
const float *Ax,
const int8_t *GB_RESTRICT Ab, // A->b if A is bitmap
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
if (Ab == NULL)
{
#if ( GB_OP_IS_IDENTITY_WITH_NO_TYPECAST )
GB_memcpy (Cx, Ax, anz * sizeof (float), nthreads) ;
#else
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
float aij = Ax [p] ;
float z = aij ;
Cx [p] = lgammaf (z) ;
}
#endif
}
else
{
// bitmap case, no transpose; A->b already memcpy'd into C->b
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!Ab [p]) continue ;
float aij = Ax [p] ;
float z = aij ;
Cx [p] = lgammaf (z) ;
}
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_unop_tran__lgamma_fp32_fp32
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t *GB_RESTRICT *Workspaces,
const int64_t *GB_RESTRICT A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
binarytrees.gcc-7.c | /* The Computer Language Benchmarks Game
* http://shootout.alioth.debian.org/
*
* contributed by Francesco Abbate
*/
#include <stdlib.h>
#include <stdio.h>
typedef off_t off64_t;
#include <apr_pools.h>
const size_t LINE_SIZE = 64;
struct node
{
int i;
struct node *left;
struct node *right;
};
int
node_check(const struct node *n)
{
if (n->left)
{
int lc = node_check (n->left);
int rc = node_check (n->right);
return lc + n->i - rc;
}
return n->i;
}
struct node *
node_get_avail (apr_pool_t *pool)
{
return apr_palloc (pool, sizeof(struct node));
}
struct node *
make (int i, int depth, apr_pool_t *pool)
{
struct node *curr = node_get_avail (pool);
curr->i = i;
if (depth > 0)
{
curr->left = make (2*i-1, depth - 1, pool);
curr->right = make (2*i , depth - 1, pool);
}
else
{
curr->left = NULL;
curr->right = NULL;
}
return curr;
}
int
main(int argc, char *argv[])
{
apr_pool_t *long_lived_pool;
int min_depth = 4;
int req_depth = (argc == 2 ? atoi(argv[1]) : 10);
int max_depth = (req_depth > min_depth + 2 ? req_depth : min_depth + 2);
int stretch_depth = max_depth+1;
apr_initialize();
/* Alloc then dealloc stretchdepth tree */
{
apr_pool_t *store;
struct node *curr;
apr_pool_create (&store, NULL);
curr = make (0, stretch_depth, store);
printf ("stretch tree of depth %i\t check: %i\n", stretch_depth,
node_check (curr));
apr_pool_destroy (store);
}
apr_pool_create (&long_lived_pool, NULL);
{
struct node *long_lived_tree = make(0, max_depth, long_lived_pool);
/* buffer to store output of each thread */
char *outputstr = (char*) malloc(LINE_SIZE * (max_depth +1) * sizeof(char));
int d;
#pragma omp parallel for
for (d = min_depth; d <= max_depth; d += 2)
{
int iterations = 1 << (max_depth - d + min_depth);
apr_pool_t *store;
int c = 0, i;
apr_pool_create (&store, NULL);
for (i = 1; i <= iterations; ++i)
{
struct node *a, *b;
a = make ( i, d, store);
b = make (-i, d, store);
c += node_check (a) + node_check (b);
apr_pool_clear (store);
}
apr_pool_destroy (store);
/* each thread write to separate location */
sprintf(outputstr + LINE_SIZE * d, "%d\t trees of depth %d\t check: %d\n", (2 * iterations), d, c);
}
/* print all results */
for (d = min_depth; d <= max_depth; d += 2)
printf("%s", outputstr + (d * LINE_SIZE) );
free(outputstr);
printf ("long lived tree of depth %i\t check: %i\n", max_depth,
node_check (long_lived_tree));
return 0;
}
}
|
encode_in_pgm.c | #include "PGM.h"
#include <string.h>
/*Refer Research paper:[https://sci-hub.ee/10.1007/s11220-019-0262-y]
for formulation of layers.*/
void generate_layer_one(int *image, int *secret, int height, int width, int *p1, int *q1)
{
if (!(p1) | !(q1))
{
printf("ERROR: Memory Allocation for first layer failed\n.");
exit(1);
}
#pragma omp parallel for shared(image, secret, p1, q1) schedule(static, 4)
for (int i = 0; i < height * width; i++)
{
// Generation of p1 in layer 1.
if (get_lsb(image[i]) == get_bit(secret[i], 1))
{
p1[i] = image[i];
// Generation of q1 in layer 1.
if (get_lsb(((int)(image[i] / 2)) + image[i]) == get_bit(secret[i], 2))
{
q1[i] = p1[i];
}
else
{
q1[i] = p1[i] + 1;
}
}
else
{
p1[i] = image[i] + 1;
if (get_lsb(((int)((image[i] - 1) / 2)) + image[i]) == get_bit(secret[i], 2))
{
q1[i] = p1[i] - 1;
}
else
{
q1[i] = p1[i];
}
}
}
}
void generate_intermediate_two(int *secret, int height, int width, int *p1, int *q1, int *p1_l2, int *p2_l2, int *q1_l2, int *q2_l2)
{
if (!q1_l2 | !q2_l2 | !p1_l2 | !p2_l2)
{
printf("ERROR: Memory Allocation for intermediate layer failed\n");
exit(1);
}
#pragma omp parallel for shared(secret, p1, q1, p1_l2, p2_l2, q1_l2, q2_l2) schedule(static, 4)
for (int i = 0; i < height * width; i++)
{
//Generating p1' and p2'
if (get_lsb(p1[i]) == get_bit(secret[i], 3))
{
p1_l2[i] = p1[i];
if (get_lsb(p1[i] / 2 + q1[i]) == get_bit(secret[i], 4))
{
p2_l2[i] = q1[i];
}
else
{
p2_l2[i] = q1[i] + 1;
}
}
else
{
p1_l2[i] = p1[i] + 1;
if (get_lsb((p1[i] - 1) / 2 + q1[i]) == get_bit(secret[i], 4))
{
p2_l2[i] = q1[i] - 1;
}
else
{
p2_l2[i] = q1[i];
}
}
// Generating q1' and q2'
if (get_lsb(p1[i]) == get_bit(secret[i], 5))
{
q1_l2[i] = p1[i];
if (get_lsb(p1[i] / 2 + q1[i]) == get_bit(secret[i], 6))
{
q2_l2[i] = q1[i];
}
else
{
q2_l2[i] = q1[i] + 1;
}
}
else
{
q1_l2[i] = p1[i] + 1;
if (get_lsb((p1[i] - 1) / 2 + q1[i]) == get_bit(secret[i], 6))
{
q2_l2[i] = q1[i] - 1;
}
else
{
q2_l2[i] = q1[i];
}
}
}
}
void generate_second_layer_pair1(int height, int width, int *p1, int *p1_l2, int *q1_l2, int *p1_star, int *q1_star)
{
if (!p1_star | !q1_star)
{
printf("ERROR: Memory Allocation for Final State Failed\n");
exit(1);
}
#pragma omp parallel for schedule(static, 4) shared(p1, p1_l2, q1_l2, p1_star, q1_star)
for (int i = 0; i < height * width; i++)
{
if ((2 * p1[i] + 2) == p1_l2[i] + q1_l2[i])
{
p1_star[i] = p1_l2[i] - 2;
q1_star[i] = q1_l2[i];
}
else
{
p1_star[i] = p1_l2[i];
q1_star[i] = q1_l2[i];
}
}
}
void generate_second_layer_pair2(int height, int width, int *q1, int *p2_l2, int *q2_l2, int *p2_star, int *q2_star)
{
if (!p2_star | !q2_star)
{
printf("ERROR: Memory Allocation for Final State Failed\n");
exit(1);
}
#pragma omp parallel for schedule(static, 4) shared(q1, p2_l2, q2_l2, p2_star, q2_star)
for (int i = 0; i < height * width; i++)
{
if ((2 * q1[i]) > (p2_l2[i] + q2_l2[i]) && p2_l2[i] >= q2_l2[i])
{
q2_star[i] = q2_l2[i] + 2;
p2_star[i] = p2_l2[i] + 2;
}
else if ((2 * q1[i] + 2) == p2_l2[i] + q2_l2[i])
{
p2_star[i] = p2_l2[i] - 2;
q2_star[i] = q2_l2[i];
}
else
{
p2_star[i] = p2_l2[i];
q2_star[i] = q2_l2[i];
}
}
}
void encode(int *image, char *image_name, int *secret, int height, int width)
{
int *p1, *q1;
p1 = allocate_space(height, width);
q1 = allocate_space(height, width);
// Layer One Generation.
generate_layer_one(image, secret, height, width, p1, q1);
printf("\nGeneration of layer one sucessfull\n");
int *p1_l2, *p2_l2, *q1_l2, *q2_l2;
p1_l2 = allocate_space(height, width);
q1_l2 = allocate_space(height, width);
p2_l2 = allocate_space(height, width);
q2_l2 = allocate_space(height, width);
// Intermediate pixel generation
generate_intermediate_two(secret, height, width, p1, q1, p1_l2, p2_l2, q1_l2, q2_l2);
printf("\nGeneration of intermediate pixels sucessfull\n");
int *p1_star, *p2_star, *q1_star, *q2_star;
// Layer two generation.
// First pair(p1*,p2*);
p1_star = allocate_space(height, width);
p2_star = allocate_space(height, width);
q1_star = allocate_space(height, width);
q2_star = allocate_space(height, width);
generate_second_layer_pair1(height, width, p1, p1_l2, q1_l2, p1_star, q1_star);
generate_second_layer_pair2(height, width, q1, p2_l2, q2_l2, p2_star, q2_star);
printf("\nGeneration of layer two sucessfull\n");
// File creation.
char pStarOne[200] = "P1*_";
char pStarTwo[200] = "P2*_";
char qStarOne[200] = "Q1*_";
char qStarTwo[200] = "Q2*_";
write_to_pgm(p1_star, strcat(pStarOne, image_name), height, width);
write_to_pgm(q1_star, strcat(qStarOne, image_name), height, width);
write_to_pgm(p2_star, strcat(pStarTwo, image_name), height, width);
write_to_pgm(q2_star, strcat(qStarTwo, image_name), height, width);
printf("\nInformation encoding succesfull\n");
// Clean memory allocated to pixels.
free(p1);
free(q1);
free(p1_l2);
free(p2_l2);
free(q1_l2);
free(q2_l2);
free(p1_star);
free(p2_star);
free(q1_star);
free(q2_star);
} |
ten_tusscher_2004_epi_S2_11.c | //Original Ten Tusscher
#include <assert.h>
#include <stdlib.h>
#include "ten_tusscher_2004_epi_S2_11.h"
GET_CELL_MODEL_DATA(init_cell_model_data) {
assert(cell_model);
if(get_initial_v)
cell_model->initial_v = INITIAL_V;
if(get_neq)
cell_model->number_of_ode_equations = NEQ;
}
//TODO: this should be called only once for the whole mesh, like in the GPU code
SET_ODE_INITIAL_CONDITIONS_CPU(set_model_initial_conditions_cpu) {
// Default initial conditions
/*
sv[0] = INITIAL_V; // V; millivolt
sv[1] = 0.f; //M
sv[2] = 0.75; //H
sv[3] = 0.75f; //J
sv[4] = 0.f; //Xr1
sv[5] = 1.f; //Xr2
sv[6] = 0.f; //Xs
sv[7] = 1.f; //S
sv[8] = 0.f; //R
sv[9] = 0.f; //D
sv[10] = 1.f; //F
sv[11] = 1.f; //FCa
sv[12] = 1.f; //G
sv[13] = 0.0002; //Cai
sv[14] = 0.2f; //CaSR
sv[15] = 11.6f; //Nai
sv[16] = 138.3f; //Ki
*/
// Elnaz's steady-state initial conditions
real sv_sst[]={-86.5169285143903,0.00130428563365721,0.778447364000152,0.778294777556242,0.000176052463458467,0.484556008731646,0.00295106634206365,0.999998331220203,1.95009865571766e-08,1.90405217604297e-05,0.999773931735328,1.00732337673749,0.999997839287066,3.97912244489960e-05,0.947578224058516,9.53631582857868,139.823425609239};
for (uint32_t i = 0; i < NEQ; i++)
sv[i] = sv_sst[i];
}
SOLVE_MODEL_ODES_CPU(solve_model_odes_cpu) {
uint32_t sv_id;
int i;
#pragma omp parallel for private(sv_id)
for (i = 0; i < num_cells_to_solve; i++) {
if(cells_to_solve)
sv_id = cells_to_solve[i];
else
sv_id = i;
for (int j = 0; j < num_steps; ++j) {
solve_model_ode_cpu(dt, sv + (sv_id * NEQ), stim_currents[i]);
}
}
}
void solve_model_ode_cpu(real dt, real *sv, real stim_current) {
assert(sv);
real rY[NEQ], rDY[NEQ];
for(int i = 0; i < NEQ; i++)
rY[i] = sv[i];
RHS_cpu(rY, rDY, stim_current, dt);
for(int i = 0; i < NEQ; i++)
sv[i] = rDY[i];
}
void RHS_cpu(const real *sv, real *rDY_, real stim_current, real dt) {
// State variables
real svolt = sv[0];
real sm = sv[1];
real sh = sv[2];
real sj = sv[3];
real sxr1 = sv[4];
real sxr2 = sv[5];
real sxs = sv[6];
real ss = sv[7];
real sr = sv[8];
real sd = sv[9];
real sf = sv[10];
real sfca = sv[11];
real sg = sv[12];
real Cai = sv[13];
real CaSR = sv[14];
real Nai = sv[15];
real Ki = sv[16];
//External concentrations
real Ko=5.4;
real Cao=2.0;
real Nao=140.0;
//Intracellular volumes
real Vc=0.016404;
real Vsr=0.001094;
//Calcium dynamics
real Bufc=0.15f;
real Kbufc=0.001f;
real Bufsr=10.f;
real Kbufsr=0.3f;
real taufca=2.f;
real taug=2.f;
real Vmaxup=0.000425f;
real Kup=0.00025f;
//Constants
const real R = 8314.472f;
const real F = 96485.3415f;
const real T =310.0f;
real RTONF =(R*T)/F;
//Cellular capacitance
real CAPACITANCE=0.185;
//Parameters for currents
//Parameters for IKr
real Gkr=0.096;
//Parameters for Iks
real pKNa=0.03;
///#ifdef EPI
real Gks=0.245;
///#endif
///#ifdef ENDO
/// real Gks=0.245;
///#endif
///#ifdef MCELL
/// real Gks=0.062;
///#endif
//Parameters for Ik1
real GK1=5.405;
//Parameters for Ito
//#ifdef EPI
real Gto=0.294;
//#endif
// #ifdef ENDO
// real Gto=0.073;
//#endif
//#ifdef MCELL
// real Gto=0.294;
///#endif
//Parameters for INa
real GNa=14.838;
//Parameters for IbNa
real GbNa=0.00029;
//Parameters for INaK
real KmK=1.0;
real KmNa=40.0;
real knak=1.362;
//Parameters for ICaL
real GCaL=0.000175;
//Parameters for IbCa
real GbCa=0.000592;
//Parameters for INaCa
real knaca=1000;
real KmNai=87.5;
real KmCa=1.38;
real ksat=0.1;
real n=0.35;
//Parameters for IpCa
real GpCa=0.825;
real KpCa=0.0005;
//Parameters for IpK;
real GpK=0.0146;
real parameters []={13.9822763642886,0.000336521649878696,0.000144542916642332,0.000516942526086760,0.253138096656416,0.171109018622005,0.130336142672705,3.88071468613803,0.0154855862471817,2.16547576686118,1091.40643117116,0.000575140596221629,0.180541766553447,0.0183755879605413,0.00807832472755813,1.82509834179719e-05};
GNa=parameters[0];
GbNa=parameters[1];
GCaL=parameters[2];
GbCa=parameters[3];
Gto=parameters[4];
Gkr=parameters[5];
Gks=parameters[6];
GK1=parameters[7];
GpK=parameters[8];
knak=parameters[9];
knaca=parameters[10];
Vmaxup=parameters[11];
GpCa=parameters[12];
real arel=parameters[13];
real crel=parameters[14];
real Vleak=parameters[15];
real IKr;
real IKs;
real IK1;
real Ito;
real INa;
real IbNa;
real ICaL;
real IbCa;
real INaCa;
real IpCa;
real IpK;
real INaK;
real Irel;
real Ileak;
real dNai;
real dKi;
real dCai;
real dCaSR;
real A;
// real BufferFactorc;
// real BufferFactorsr;
real SERCA;
real Caisquare;
real CaSRsquare;
real CaCurrent;
real CaSRCurrent;
real fcaold;
real gold;
real Ek;
real Ena;
real Eks;
real Eca;
real CaCSQN;
real bjsr;
real cjsr;
real CaBuf;
real bc;
real cc;
real Ak1;
real Bk1;
real rec_iK1;
real rec_ipK;
real rec_iNaK;
real AM;
real BM;
real AH_1;
real BH_1;
real AH_2;
real BH_2;
real AJ_1;
real BJ_1;
real AJ_2;
real BJ_2;
real M_INF;
real H_INF;
real J_INF;
real TAU_M;
real TAU_H;
real TAU_J;
real axr1;
real bxr1;
real axr2;
real bxr2;
real Xr1_INF;
real Xr2_INF;
real TAU_Xr1;
real TAU_Xr2;
real Axs;
real Bxs;
real Xs_INF;
real TAU_Xs;
real R_INF;
real TAU_R;
real S_INF;
real TAU_S;
real Ad;
real Bd;
real Cd;
real TAU_D;
real D_INF;
real TAU_F;
real F_INF;
real FCa_INF;
real G_INF;
real inverseVcF2=1/(2*Vc*F);
real inverseVcF=1./(Vc*F);
real Kupsquare=Kup*Kup;
// real BufcKbufc=Bufc*Kbufc;
// real Kbufcsquare=Kbufc*Kbufc;
// real Kbufc2=2*Kbufc;
// real BufsrKbufsr=Bufsr*Kbufsr;
// const real Kbufsrsquare=Kbufsr*Kbufsr;
// const real Kbufsr2=2*Kbufsr;
const real exptaufca=exp(-dt/taufca);
const real exptaug=exp(-dt/taug);
real sItot;
//Needed to compute currents
Ek=RTONF*(log((Ko/Ki)));
Ena=RTONF*(log((Nao/Nai)));
Eks=RTONF*(log((Ko+pKNa*Nao)/(Ki+pKNa*Nai)));
Eca=0.5*RTONF*(log((Cao/Cai)));
Ak1=0.1/(1.+exp(0.06*(svolt-Ek-200)));
Bk1=(3.*exp(0.0002*(svolt-Ek+100))+
exp(0.1*(svolt-Ek-10)))/(1.+exp(-0.5*(svolt-Ek)));
rec_iK1=Ak1/(Ak1+Bk1);
rec_iNaK=(1./(1.+0.1245*exp(-0.1*svolt*F/(R*T))+0.0353*exp(-svolt*F/(R*T))));
rec_ipK=1./(1.+exp((25-svolt)/5.98));
//Compute currents
INa=GNa*sm*sm*sm*sh*sj*(svolt-Ena);
ICaL=GCaL*sd*sf*sfca*4*svolt*(F*F/(R*T))*
(exp(2*svolt*F/(R*T))*Cai-0.341*Cao)/(exp(2*svolt*F/(R*T))-1.);
Ito=Gto*sr*ss*(svolt-Ek);
IKr=Gkr*sqrt(Ko/5.4)*sxr1*sxr2*(svolt-Ek);
IKs=Gks*sxs*sxs*(svolt-Eks);
IK1=GK1*rec_iK1*(svolt-Ek);
INaCa=knaca*(1./(KmNai*KmNai*KmNai+Nao*Nao*Nao))*(1./(KmCa+Cao))*
(1./(1+ksat*exp((n-1)*svolt*F/(R*T))))*
(exp(n*svolt*F/(R*T))*Nai*Nai*Nai*Cao-
exp((n-1)*svolt*F/(R*T))*Nao*Nao*Nao*Cai*2.5);
INaK=knak*(Ko/(Ko+KmK))*(Nai/(Nai+KmNa))*rec_iNaK;
IpCa=GpCa*Cai/(KpCa+Cai);
IpK=GpK*rec_ipK*(svolt-Ek);
IbNa=GbNa*(svolt-Ena);
IbCa=GbCa*(svolt-Eca);
//Determine total current
(sItot) = IKr +
IKs +
IK1 +
Ito +
INa +
IbNa +
ICaL +
IbCa +
INaK +
INaCa +
IpCa +
IpK +
stim_current;
//update concentrations
Caisquare=Cai*Cai;
CaSRsquare=CaSR*CaSR;
CaCurrent=-(ICaL+IbCa+IpCa-2.0f*INaCa)*inverseVcF2*CAPACITANCE;
///A=0.016464f*CaSRsquare/(0.0625f+CaSRsquare)+0.008232f;
A=arel*CaSRsquare/(0.0625f+CaSRsquare)+crel;
Irel=A*sd*sg;
///Ileak=0.00008f*(CaSR-Cai);
Ileak=Vleak*(CaSR-Cai);
SERCA=Vmaxup/(1.f+(Kupsquare/Caisquare));
CaSRCurrent=SERCA-Irel-Ileak;
CaCSQN=Bufsr*CaSR/(CaSR+Kbufsr);
dCaSR=dt*(Vc/Vsr)*CaSRCurrent;
bjsr=Bufsr-CaCSQN-dCaSR-CaSR+Kbufsr;
cjsr=Kbufsr*(CaCSQN+dCaSR+CaSR);
CaSR=(sqrt(bjsr*bjsr+4.*cjsr)-bjsr)/2.;
CaBuf=Bufc*Cai/(Cai+Kbufc);
dCai=dt*(CaCurrent-CaSRCurrent);
bc=Bufc-CaBuf-dCai-Cai+Kbufc;
cc=Kbufc*(CaBuf+dCai+Cai);
Cai=(sqrt(bc*bc+4*cc)-bc)/2;
dNai=-(INa+IbNa+3*INaK+3*INaCa)*inverseVcF*CAPACITANCE;
Nai+=dt*dNai;
dKi=-(stim_current+IK1+Ito+IKr+IKs-2*INaK+IpK)*inverseVcF*CAPACITANCE;
Ki+=dt*dKi;
//compute steady state values and time constants
AM=1./(1.+exp((-60.-svolt)/5.));
BM=0.1/(1.+exp((svolt+35.)/5.))+0.10/(1.+exp((svolt-50.)/200.));
TAU_M=AM*BM;
M_INF=1./((1.+exp((-56.86-svolt)/9.03))*(1.+exp((-56.86-svolt)/9.03)));
if (svolt>=-40.)
{
AH_1=0.;
BH_1=(0.77/(0.13*(1.+exp(-(svolt+10.66)/11.1))));
TAU_H= 1.0/(AH_1+BH_1);
}
else
{
AH_2=(0.057*exp(-(svolt+80.)/6.8));
BH_2=(2.7*exp(0.079*svolt)+(3.1e5)*exp(0.3485*svolt));
TAU_H=1.0/(AH_2+BH_2);
}
H_INF=1./((1.+exp((svolt+71.55)/7.43))*(1.+exp((svolt+71.55)/7.43)));
if(svolt>=-40.)
{
AJ_1=0.;
BJ_1=(0.6*exp((0.057)*svolt)/(1.+exp(-0.1*(svolt+32.))));
TAU_J= 1.0/(AJ_1+BJ_1);
}
else
{
AJ_2=(((-2.5428e4)*exp(0.2444*svolt)-(6.948e-6)*
exp(-0.04391*svolt))*(svolt+37.78)/
(1.+exp(0.311*(svolt+79.23))));
BJ_2=(0.02424*exp(-0.01052*svolt)/(1.+exp(-0.1378*(svolt+40.14))));
TAU_J= 1.0/(AJ_2+BJ_2);
}
J_INF=H_INF;
Xr1_INF=1./(1.+exp((-26.-svolt)/7.));
axr1=450./(1.+exp((-45.-svolt)/10.));
bxr1=6./(1.+exp((svolt-(-30.))/11.5));
TAU_Xr1=axr1*bxr1;
Xr2_INF=1./(1.+exp((svolt-(-88.))/24.));
axr2=3./(1.+exp((-60.-svolt)/20.));
bxr2=1.12/(1.+exp((svolt-60.)/20.));
TAU_Xr2=axr2*bxr2;
Xs_INF=1./(1.+exp((-5.-svolt)/14.));
Axs=1100./(sqrt(1.+exp((-10.-svolt)/6)));
Bxs=1./(1.+exp((svolt-60.)/20.));
TAU_Xs=Axs*Bxs;
#ifdef EPI
R_INF=1./(1.+exp((20-svolt)/6.));
S_INF=1./(1.+exp((svolt+20)/5.));
TAU_R=9.5*exp(-(svolt+40.)*(svolt+40.)/1800.)+0.8;
TAU_S=85.*exp(-(svolt+45.)*(svolt+45.)/320.)+5./(1.+exp((svolt-20.)/5.))+3.;
#endif
#ifdef ENDO
R_INF=1./(1.+exp((20-svolt)/6.));
S_INF=1./(1.+exp((svolt+28)/5.));
TAU_R=9.5*exp(-(svolt+40.)*(svolt+40.)/1800.)+0.8;
TAU_S=1000.*exp(-(svolt+67)*(svolt+67)/1000.)+8.;
#endif
#ifdef MCELL
R_INF=1./(1.+exp((20-svolt)/6.));
S_INF=1./(1.+exp((svolt+20)/5.));
TAU_R=9.5*exp(-(svolt+40.)*(svolt+40.)/1800.)+0.8;
TAU_S=85.*exp(-(svolt+45.)*(svolt+45.)/320.)+5./(1.+exp((svolt-20.)/5.))+3.;
#endif
D_INF=1./(1.+exp((-5-svolt)/7.5));
Ad=1.4/(1.+exp((-35-svolt)/13))+0.25;
Bd=1.4/(1.+exp((svolt+5)/5));
Cd=1./(1.+exp((50-svolt)/20));
TAU_D=Ad*Bd+Cd;
F_INF=1./(1.+exp((svolt+20)/7));
TAU_F=1125*exp(-(svolt+27)*(svolt+27)/240)+80+165/(1.+exp((25-svolt)/10));
FCa_INF=(1./(1.+pow((Cai/0.000325),8))+
0.1/(1.+exp((Cai-0.0005)/0.0001))+
0.20/(1.+exp((Cai-0.00075)/0.0008))+
0.23 )/1.46;
if(Cai<0.00035)
G_INF=1./(1.+pow((Cai/0.00035),6));
else
G_INF=1./(1.+pow((Cai/0.00035),16));
//Update gates
rDY_[1] = M_INF-(M_INF-sm)*exp(-dt/TAU_M);
rDY_[2] = H_INF-(H_INF-sh)*exp(-dt/TAU_H);
rDY_[3] = J_INF-(J_INF-sj)*exp(-dt/TAU_J);
rDY_[4] = Xr1_INF-(Xr1_INF-sxr1)*exp(-dt/TAU_Xr1);
rDY_[5] = Xr2_INF-(Xr2_INF-sxr2)*exp(-dt/TAU_Xr2);
rDY_[6] = Xs_INF-(Xs_INF-sxs)*exp(-dt/TAU_Xs);
rDY_[7] = S_INF-(S_INF-ss)*exp(-dt/TAU_S);
rDY_[8] = R_INF-(R_INF-sr)*exp(-dt/TAU_R);
rDY_[9] = D_INF-(D_INF-sd)*exp(-dt/TAU_D);
rDY_[10] = F_INF-(F_INF-sf)*exp(-dt/TAU_F);
fcaold= sfca;
sfca = FCa_INF-(FCa_INF-sfca)*exptaufca;
if(sfca>fcaold && (svolt)>-37.0)
sfca = fcaold;
gold = sg;
sg = G_INF-(G_INF-sg)*exptaug;
if(sg>gold && (svolt)>-37.0)
sg=gold;
//update voltage
rDY_[0] = svolt + dt*(-sItot);
rDY_[11] = sfca;
rDY_[12] = sg;
rDY_[13] = Cai;
rDY_[14] = CaSR;
rDY_[15] = Nai;
rDY_[16] = Ki;
}
|
GB_unop__ainv_uint8_uint8.c | //------------------------------------------------------------------------------
// GB_unop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2022, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated2/ folder, do not edit it
// (it is auto-generated from Generator/*).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_atomics.h"
#include "GB_unop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB (_unop_apply__ainv_uint8_uint8)
// op(A') function: GB (_unop_tran__ainv_uint8_uint8)
// C type: uint8_t
// A type: uint8_t
// cast: uint8_t cij = aij
// unaryop: cij = -aij
#define GB_ATYPE \
uint8_t
#define GB_CTYPE \
uint8_t
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
uint8_t aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = -x ;
// casting
#define GB_CAST(z, aij) \
uint8_t z = aij ;
// cij = op (aij)
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
uint8_t aij = Ax [pA] ; \
/* Cx [pC] = op (cast (aij)) */ \
uint8_t z = aij ; \
Cx [pC] = -z ; \
}
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_AINV || GxB_NO_UINT8)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB (_unop_apply__ainv_uint8_uint8)
(
uint8_t *Cx, // Cx and Ax may be aliased
const uint8_t *Ax,
const int8_t *restrict Ab, // A->b if A is bitmap
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
if (Ab == NULL)
{
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
uint8_t aij = Ax [p] ;
uint8_t z = aij ;
Cx [p] = -z ;
}
}
else
{
// bitmap case, no transpose; A->b already memcpy'd into C->b
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!Ab [p]) continue ;
uint8_t aij = Ax [p] ;
uint8_t z = aij ;
Cx [p] = -z ;
}
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB (_unop_tran__ainv_uint8_uint8)
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
shortcut_layer.c | #include "shortcut_layer.h"
#include "convolutional_layer.h"
#include "dark_cuda.h"
#include "blas.h"
#include "utils.h"
#include "gemm.h"
#include <stdio.h>
#include <assert.h>
layer make_shortcut_layer(int batch, int n, int *input_layers, int* input_sizes, int w, int h, int c,
float **layers_output, float **layers_delta, float **layers_output_gpu, float **layers_delta_gpu, WEIGHTS_TYPE_T weights_type, WEIGHTS_NORMALIZATION_T weights_normalization,
ACTIVATION activation, int train)
{
fprintf(stderr, "Shortcut Layer: ");
int i;
for(i = 0; i < n; ++i) fprintf(stderr, "%d, ", input_layers[i]);
layer l = { (LAYER_TYPE)0 };
l.train = train;
l.type = SHORTCUT;
l.batch = batch;
l.activation = activation;
l.n = n;
l.input_layers = input_layers;
l.input_sizes = input_sizes;
l.layers_output = layers_output;
l.layers_delta = layers_delta;
l.weights_type = weights_type;
l.weights_normalization = weights_normalization;
l.learning_rate_scale = 1; // not necessary
//l.w = w2;
//l.h = h2;
//l.c = c2;
l.w = l.out_w = w;
l.h = l.out_h = h;
l.c = l.out_c = c;
l.outputs = w*h*c;
l.inputs = l.outputs;
//if(w != w2 || h != h2 || c != c2) fprintf(stderr, " w = %d, w2 = %d, h = %d, h2 = %d, c = %d, c2 = %d \n", w, w2, h, h2, c, c2);
l.index = l.input_layers[0];
if (train) l.delta = (float*)xcalloc(l.outputs * batch, sizeof(float));
l.output = (float*)xcalloc(l.outputs * batch, sizeof(float));
l.nweights = 0;
if (l.weights_type == PER_FEATURE) l.nweights = (l.n + 1);
else if (l.weights_type == PER_CHANNEL) l.nweights = (l.n + 1) * l.c;
if (l.nweights > 0) {
l.weights = (float*)calloc(l.nweights, sizeof(float));
float scale = sqrt(2. / l.nweights);
for (i = 0; i < l.nweights; ++i) l.weights[i] = 1;// +0.01*rand_uniform(-1, 1);// scale*rand_uniform(-1, 1); // rand_normal();
if (train) l.weight_updates = (float*)calloc(l.nweights, sizeof(float));
l.update = update_shortcut_layer;
}
l.forward = forward_shortcut_layer;
l.backward = backward_shortcut_layer;
#ifndef GPU
if (l.activation == SWISH || l.activation == MISH) l.activation_input = (float*)calloc(l.batch*l.outputs, sizeof(float));
#endif // GPU
#ifdef GPU
if (l.activation == SWISH || l.activation == MISH) l.activation_input_gpu = cuda_make_array(l.activation_input, l.batch*l.outputs);
l.forward_gpu = forward_shortcut_layer_gpu;
l.backward_gpu = backward_shortcut_layer_gpu;
if (l.nweights > 0) {
l.update_gpu = update_shortcut_layer_gpu;
l.weights_gpu = cuda_make_array(l.weights, l.nweights);
if (train) l.weight_updates_gpu = cuda_make_array(l.weight_updates, l.nweights);
}
if (train) l.delta_gpu = cuda_make_array(l.delta, l.outputs*batch);
l.output_gpu = cuda_make_array(l.output, l.outputs*batch);
l.input_sizes_gpu = cuda_make_int_array_new_api(input_sizes, l.n);
l.layers_output_gpu = (float**)cuda_make_array_pointers((void**)layers_output_gpu, l.n);
l.layers_delta_gpu = (float**)cuda_make_array_pointers((void**)layers_delta_gpu, l.n);
#endif // GPU
l.bflops = l.out_w * l.out_h * l.out_c * l.n / 1000000000.;
if (l.weights_type) l.bflops *= 2;
fprintf(stderr, " wt = %d, wn = %d, outputs:%4d x%4d x%4d %5.3f BF %d\n", l.weights_type, l.weights_normalization, l.out_w, l.out_h, l.out_c, l.bflops, l.batch);
return l;
}
void resize_shortcut_layer(layer *l, int w, int h, network *net)
{
//assert(l->w == l->out_w);
//assert(l->h == l->out_h);
l->w = l->out_w = w;
l->h = l->out_h = h;
l->outputs = w*h*l->out_c;
l->inputs = l->outputs;
if (l->train) l->delta = (float*)xrealloc(l->delta, l->outputs * l->batch * sizeof(float));
l->output = (float*)xrealloc(l->output, l->outputs * l->batch * sizeof(float));
int i;
for (i = 0; i < l->n; ++i) {
int index = l->input_layers[i];
l->input_sizes[i] = net->layers[index].outputs;
l->layers_output[i] = net->layers[index].output;
l->layers_delta[i] = net->layers[index].delta;
assert(l->w == net->layers[index].out_w && l->h == net->layers[index].out_h);
}
if (l->activation == SWISH || l->activation == MISH) l->activation_input = (float*)realloc(l->activation_input, l->batch*l->outputs * sizeof(float));
#ifdef GPU
cuda_free(l->output_gpu);
l->output_gpu = cuda_make_array(l->output, l->outputs*l->batch);
if (l->train) {
cuda_free(l->delta_gpu);
l->delta_gpu = cuda_make_array(l->delta, l->outputs*l->batch);
}
float **layers_output_gpu = (float **)calloc(l->n, sizeof(float *));
float **layers_delta_gpu = (float **)calloc(l->n, sizeof(float *));
for (i = 0; i < l->n; ++i) {
const int index = l->input_layers[i];
layers_output_gpu[i] = net->layers[index].output_gpu;
layers_delta_gpu[i] = net->layers[index].delta_gpu;
}
memcpy_ongpu(l->input_sizes_gpu, l->input_sizes, l->n * sizeof(int));
memcpy_ongpu(l->layers_output_gpu, layers_output_gpu, l->n * sizeof(float*));
memcpy_ongpu(l->layers_delta_gpu, layers_delta_gpu, l->n * sizeof(float*));
free(layers_output_gpu);
free(layers_delta_gpu);
if (l->activation == SWISH || l->activation == MISH) {
cuda_free(l->activation_input_gpu);
l->activation_input_gpu = cuda_make_array(l->activation_input, l->batch*l->outputs);
}
#endif
}
void forward_shortcut_layer(const layer l, network_state state)
{
int from_w = state.net.layers[l.index].w;
int from_h = state.net.layers[l.index].h;
int from_c = state.net.layers[l.index].c;
if (l.nweights == 0 && l.n == 1 && from_w == l.w && from_h == l.h && from_c == l.c) {
int size = l.batch * l.w * l.h * l.c;
int i;
#pragma omp parallel for
for(i = 0; i < size; ++i)
l.output[i] = state.input[i] + state.net.layers[l.index].output[i];
}
else {
shortcut_multilayer_cpu(l.outputs * l.batch, l.outputs, l.batch, l.n, l.input_sizes, l.layers_output, l.output, state.input, l.weights, l.nweights, l.weights_normalization);
}
//copy_cpu(l.outputs*l.batch, state.input, 1, l.output, 1);
//shortcut_cpu(l.batch, from_w, from_h, from_c, state.net.layers[l.index].output, l.out_w, l.out_h, l.out_c, l.output);
//activate_array(l.output, l.outputs*l.batch, l.activation);
if (l.activation == SWISH) activate_array_swish(l.output, l.outputs*l.batch, l.activation_input, l.output);
else if (l.activation == MISH) activate_array_mish(l.output, l.outputs*l.batch, l.activation_input, l.output);
else activate_array_cpu_custom(l.output, l.outputs*l.batch, l.activation);
}
void backward_shortcut_layer(const layer l, network_state state)
{
if (l.activation == SWISH) gradient_array_swish(l.output, l.outputs*l.batch, l.activation_input, l.delta);
else if (l.activation == MISH) gradient_array_mish(l.outputs*l.batch, l.activation_input, l.delta);
else gradient_array(l.output, l.outputs*l.batch, l.activation, l.delta);
backward_shortcut_multilayer_cpu(l.outputs * l.batch, l.outputs, l.batch, l.n, l.input_sizes,
l.layers_delta, state.delta, l.delta, l.weights, l.weight_updates, l.nweights, state.input, l.layers_output, l.weights_normalization);
//axpy_cpu(l.outputs*l.batch, 1, l.delta, 1, state.delta, 1);
//shortcut_cpu(l.batch, l.out_w, l.out_h, l.out_c, l.delta, l.w, l.h, l.c, state.net.layers[l.index].delta);
}
void update_shortcut_layer(layer l, int batch, float learning_rate_init, float momentum, float decay)
{
if (l.nweights > 0) {
float learning_rate = learning_rate_init*l.learning_rate_scale;
//float momentum = a.momentum;
//float decay = a.decay;
//int batch = a.batch;
axpy_cpu(l.nweights, -decay*batch, l.weights, 1, l.weight_updates, 1);
axpy_cpu(l.nweights, learning_rate / batch, l.weight_updates, 1, l.weights, 1);
scal_cpu(l.nweights, momentum, l.weight_updates, 1);
}
}
#ifdef GPU
void forward_shortcut_layer_gpu(const layer l, network_state state)
{
//copy_ongpu(l.outputs*l.batch, state.input, 1, l.output_gpu, 1);
//simple_copy_ongpu(l.outputs*l.batch, state.input, l.output_gpu);
//shortcut_gpu(l.batch, l.w, l.h, l.c, state.net.layers[l.index].output_gpu, l.out_w, l.out_h, l.out_c, l.output_gpu);
//input_shortcut_gpu(state.input, l.batch, l.w, l.h, l.c, state.net.layers[l.index].output_gpu, l.out_w, l.out_h, l.out_c, l.output_gpu);
//-----------
//if (l.outputs == l.input_sizes[0])
//if(l.n == 1 && l.nweights == 0)
//{
// input_shortcut_gpu(state.input, l.batch, state.net.layers[l.index].w, state.net.layers[l.index].h, state.net.layers[l.index].c,
// state.net.layers[l.index].output_gpu, l.out_w, l.out_h, l.out_c, l.output_gpu);
//}
//else
{
shortcut_multilayer_gpu(l.outputs, l.batch, l.n, l.input_sizes_gpu, l.layers_output_gpu, l.output_gpu, state.input, l.weights_gpu, l.nweights, l.weights_normalization);
}
if (l.activation == SWISH) activate_array_swish_ongpu(l.output_gpu, l.outputs*l.batch, l.activation_input_gpu, l.output_gpu);
else if (l.activation == MISH) activate_array_mish_ongpu(l.output_gpu, l.outputs*l.batch, l.activation_input_gpu, l.output_gpu);
else activate_array_ongpu(l.output_gpu, l.outputs*l.batch, l.activation);
}
void backward_shortcut_layer_gpu(const layer l, network_state state)
{
if (l.activation == SWISH) gradient_array_swish_ongpu(l.output_gpu, l.outputs*l.batch, l.activation_input_gpu, l.delta_gpu);
else if (l.activation == MISH) gradient_array_mish_ongpu(l.outputs*l.batch, l.activation_input_gpu, l.delta_gpu);
else gradient_array_ongpu(l.output_gpu, l.outputs*l.batch, l.activation, l.delta_gpu);
backward_shortcut_multilayer_gpu(l.outputs, l.batch, l.n, l.input_sizes_gpu, l.layers_delta_gpu, state.delta, l.delta_gpu,
l.weights_gpu, l.weight_updates_gpu, l.nweights, state.input, l.layers_output_gpu, l.weights_normalization);
//axpy_ongpu(l.outputs*l.batch, 1, l.delta_gpu, 1, state.delta, 1);
//shortcut_gpu(l.batch, l.out_w, l.out_h, l.out_c, l.delta_gpu, l.w, l.h, l.c, state.net.layers[l.index].delta_gpu);
}
void update_shortcut_layer_gpu(layer l, int batch, float learning_rate_init, float momentum, float decay, float loss_scale)
{
if (l.nweights > 0) {
float learning_rate = learning_rate_init*l.learning_rate_scale;
//float momentum = a.momentum;
//float decay = a.decay;
//int batch = a.batch;
// Loss scale for Mixed-Precision on Tensor-Cores
if (loss_scale != 1.0) {
if(l.weight_updates_gpu && l.nweights > 0) scal_ongpu(l.nweights, 1.0 / loss_scale, l.weight_updates_gpu, 1);
}
reset_nan_and_inf(l.weight_updates_gpu, l.nweights);
fix_nan_and_inf(l.weights_gpu, l.nweights);
//constrain_weight_updates_ongpu(l.nweights, 1, l.weights_gpu, l.weight_updates_gpu);
constrain_ongpu(l.nweights, 1, l.weight_updates_gpu, 1);
/*
cuda_pull_array_async(l.weights_gpu, l.weights, l.nweights);
cuda_pull_array_async(l.weight_updates_gpu, l.weight_updates, l.nweights);
CHECK_CUDA(cudaStreamSynchronize(get_cuda_stream()));
for (int i = 0; i < l.nweights; ++i) printf(" %f, ", l.weight_updates[i]);
printf(" l.nweights = %d - updates \n", l.nweights);
for (int i = 0; i < l.nweights; ++i) printf(" %f, ", l.weights[i]);
printf(" l.nweights = %d \n\n", l.nweights);
*/
//axpy_ongpu(l.nweights, -decay*batch, l.weights_gpu, 1, l.weight_updates_gpu, 1);
axpy_ongpu(l.nweights, learning_rate / batch, l.weight_updates_gpu, 1, l.weights_gpu, 1);
scal_ongpu(l.nweights, momentum, l.weight_updates_gpu, 1);
//fill_ongpu(l.nweights, 0, l.weight_updates_gpu, 1);
//if (l.clip) {
// constrain_ongpu(l.nweights, l.clip, l.weights_gpu, 1);
//}
}
}
void pull_shortcut_layer(layer l)
{
constrain_ongpu(l.nweights, 1, l.weight_updates_gpu, 1);
cuda_pull_array_async(l.weight_updates_gpu, l.weight_updates, l.nweights);
cuda_pull_array_async(l.weights_gpu, l.weights, l.nweights);
CHECK_CUDA(cudaPeekAtLastError());
CHECK_CUDA(cudaStreamSynchronize(get_cuda_stream()));
}
void push_shortcut_layer(layer l)
{
cuda_push_array(l.weights_gpu, l.weights, l.nweights);
CHECK_CUDA(cudaPeekAtLastError());
}
#endif
|
kernel_cpu.c | #ifdef __cplusplus
extern "C" {
#endif
//========================================================================================================================================================================================================200
// DEFINE/INCLUDE
//========================================================================================================================================================================================================200
//======================================================================================================================================================150
// LIBRARIES
//======================================================================================================================================================150
#include <omp.h> // (in path known to compiler) needed by openmp
#include <stdlib.h> // (in path known to compiler) needed by malloc
#include <stdio.h> // (in path known to compiler) needed by printf
#include <math.h> // (in path known to compiler) needed by exp
//======================================================================================================================================================150
// MAIN FUNCTION HEADER
//======================================================================================================================================================150
#include "./../lavaMD.h" // (in the main program folder) needed to recognized input variables
//======================================================================================================================================================150
// UTILITIES
//======================================================================================================================================================150
#include "./../util/timer/timer.h" // (in library path specified to compiler) needed by timer
//======================================================================================================================================================150
// KERNEL_CPU FUNCTION HEADER
//======================================================================================================================================================150
#include "kernel_cpu.h" // (in the current directory)
//========================================================================================================================================================================================================200
// PLASMAKERNEL_GPU
//========================================================================================================================================================================================================200
void kernel_cpu(par_str par, dim_str dim, box_str *box, FOUR_VECTOR *rv, fp *qv,
FOUR_VECTOR *fv) {
//======================================================================================================================================================150
// Variables
//======================================================================================================================================================150
// timer
long long time0;
time0 = get_time();
// timer
long long time1;
long long time2;
long long time3;
long long time4;
// parameters
fp alpha;
fp a2;
// counters
int i, j, k, l;
// home box
long first_i;
FOUR_VECTOR *rA;
FOUR_VECTOR *fA;
// neighbor box
int pointer;
long first_j;
FOUR_VECTOR *rB;
fp *qB;
// common
fp r2;
fp u2;
fp fs;
fp vij;
fp fxij, fyij, fzij;
THREE_VECTOR d;
time1 = get_time();
//======================================================================================================================================================150
// MCPU SETUP
//======================================================================================================================================================150
time2 = get_time();
//======================================================================================================================================================150
// INPUTS
//======================================================================================================================================================150
alpha = par.alpha;
a2 = 2.0 * alpha * alpha;
time3 = get_time();
//======================================================================================================================================================150
// PROCESS INTERACTIONS
//======================================================================================================================================================150
#pragma omp parallel for private(i, j, k) private(first_i, rA, fA) private( \
pointer, first_j, rB, qB) private(r2, u2, fs, vij, fxij, fyij, fzij, d)
for (l = 0; l < dim.number_boxes; l = l + 1) {
//------------------------------------------------------------------------------------------100
// home box - box parameters
//------------------------------------------------------------------------------------------100
first_i = box[l].offset; // offset to common arrays
//------------------------------------------------------------------------------------------100
// home box - distance, force, charge and type parameters from
//common arrays
//------------------------------------------------------------------------------------------100
rA = &rv[first_i];
fA = &fv[first_i];
//------------------------------------------------------------------------------------------100
// Do for the # of (home+neighbor) boxes
//------------------------------------------------------------------------------------------100
for (k = 0; k < (1 + box[l].nn); k++) {
//----------------------------------------50
// neighbor box - get pointer to the right box
//----------------------------------------50
if (k == 0) {
pointer = l; // set first box to be processed to home box
} else {
pointer = box[l].nei[k - 1].number; // remaining boxes are
// neighbor boxes
}
//----------------------------------------50
// neighbor box - box parameters
//----------------------------------------50
first_j = box[pointer].offset;
//----------------------------------------50
// neighbor box - distance, force, charge and type parameters
//----------------------------------------50
rB = &rv[first_j];
qB = &qv[first_j];
//----------------------------------------50
// Do for the # of particles in home box
//----------------------------------------50
for (i = 0; i < NUMBER_PAR_PER_BOX; i = i + 1) {
// do for the # of particles in current (home or neighbor) box
for (j = 0; j < NUMBER_PAR_PER_BOX; j = j + 1) {
// // coefficients
r2 = rA[i].v + rB[j].v - DOT(rA[i], rB[j]);
u2 = a2 * r2;
vij = exp(-u2);
fs = 2. * vij;
d.x = rA[i].x - rB[j].x;
d.y = rA[i].y - rB[j].y;
d.z = rA[i].z - rB[j].z;
fxij = fs * d.x;
fyij = fs * d.y;
fzij = fs * d.z;
// forces
fA[i].v += qB[j] * vij;
fA[i].x += qB[j] * fxij;
fA[i].y += qB[j] * fyij;
fA[i].z += qB[j] * fzij;
} // for j
} // for i
} // for k
} // for l
time4 = get_time();
//======================================================================================================================================================150
// DISPLAY TIMING
//======================================================================================================================================================150
printf("Time spent in different stages of CPU/MCPU KERNEL:\n");
printf("%15.12f s, %15.12f % : CPU/MCPU: VARIABLES\n",
(float)(time1 - time0) / 1000000,
(float)(time1 - time0) / (float)(time4 - time0) * 100);
printf("%15.12f s, %15.12f % : MCPU: SET DEVICE\n",
(float)(time2 - time1) / 1000000,
(float)(time2 - time1) / (float)(time4 - time0) * 100);
printf("%15.12f s, %15.12f % : CPU/MCPU: INPUTS\n",
(float)(time3 - time2) / 1000000,
(float)(time3 - time2) / (float)(time4 - time0) * 100);
printf("%15.12f s, %15.12f % : CPU/MCPU: KERNEL\n",
(float)(time4 - time3) / 1000000,
(float)(time4 - time3) / (float)(time4 - time0) * 100);
printf("Total time:\n");
printf("%.12f s\n", (float)(time4 - time0) / 1000000);
} // main
#ifdef __cplusplus
}
#endif
|
DRB040-truedepsingleelement-var-yes.c | /*
Copyright (c) 2017, Lawrence Livermore National Security, LLC.
Produced at the Lawrence Livermore National Laboratory
Written by Chunhua Liao, Pei-Hung Lin, Joshua Asplund,
Markus Schordan, and Ian Karlin
(email: liao6@llnl.gov, lin32@llnl.gov, asplund1@llnl.gov,
schordan1@llnl.gov, karlin1@llnl.gov)
LLNL-CODE-732144
All rights reserved.
This file is part of DataRaceBench. For details, see
https://github.com/LLNL/dataracebench. Please also see the LICENSE file
for our additional BSD notice.
Redistribution and use in source and binary forms, with
or without modification, are permitted provided that the following
conditions are met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the disclaimer below.
* Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the disclaimer (as noted below)
in the documentation and/or other materials provided with the
distribution.
* Neither the name of the LLNS/LLNL nor the names of its contributors
may be used to endorse or promote products derived from this
software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND
CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES,
INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL LAWRENCE LIVERMORE NATIONAL
SECURITY, LLC, THE U.S. DEPARTMENT OF ENERGY OR CONTRIBUTORS BE
LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY,
OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
THE POSSIBILITY OF SUCH DAMAGE.
*/
/*
Data race pair: a[i]@63:5 vs. a[0]@63:15
*/
#include <stdlib.h>
int main (int argc, char* argv[])
{
int len=1000;
int i;
if (argc>1)
len = atoi(argv[1]);
int a[len];
a[0] = 2;
#pragma omp parallel for
for (i=0;i<len;i++)
a[i] = i;
for (i=0;i<len;i++)
a[i]=a[i]+a[0];
for (i=0;i<len;i++)
printf("%d\n",a[i]);
return 0;
}
|
utils.c | #include <stdio.h>
#include <stdlib.h>
#include <time.h>
#include <omp.h>
void print_mat(int n , int** mat) {
int i,j;
for (i = 0; i < n; i++){
printf("%X: ",mat[i]);
for (j = 0; j < n; j++){
printf("%d ",mat[i][j]);
}
printf("\n");
}
printf("\n");
}
void make_rand_mat(int n, int** mat, int max_val) {
double begin,end;
int i,j;
begin = omp_get_wtime();
srand(time(NULL)); // generate rand seed from current time
#pragma omp parallel for private(i,j) firstprivate (n)
for (i = 0; i < n; i++) {
#pragma omp parallel for private(j)
for (j = 0; j < n; j++) {
mat[i][j] = rand() % max_val;
}
}
end = omp_get_wtime();
printf("matrix initialization with random numbers took %lf seconds\n", end - begin);
return mat;
}
void make_zero_mat(int n, int** mat) {
double begin,end;
int i,j;
begin = omp_get_wtime();
srand(time(NULL)); // generate rand seed from current time
#pragma omp parallel for private(i,j) firstprivate (n)
for (i = 0; i < n; i++) {
#pragma omp parallel for private(j)
for (j = 0; j < n; j++) {
mat[i][j] = 0;
}
}
end = omp_get_wtime();
printf("matrix initialization with zeros took %lf seconds\n", end - begin);
return mat;
}
int compare_pat(int n, int* bad_i, int* bad_j, int** mat1, int** mat2) {
int i,j;
for(i = 0; i < n; i++) {
for(j = 0; j < n; j++) {
if(mat1[i][j] - mat2[i][j]) {
*bad_i = i;
*bad_j = j;
return 1;
}
}
}
return 0;
}
|
kwallet_fmt_plug.c | /* KDE KWallet cracker patch for JtR. Written by Narendra Kangralkar
* <narendrakangralkar at gmail.com> and Dhiru Kholia <dhiru at openwall.com>.
*
* Also see https://github.com/gaganpreet/kwallet-dump ;)
*
* This software is Copyright (c) 2013 by above authors and it is hereby
* released to the general public under the following terms:
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted.
*/
#if FMT_EXTERNS_H
extern struct fmt_main fmt_kwallet;
#elif FMT_REGISTERS_H
john_register_one(&fmt_kwallet);
#else
#include <string.h>
#include <assert.h>
#include <errno.h>
#include "arch.h"
#include "misc.h"
#include "memory.h"
#include "common.h"
#include "formats.h"
#include "params.h"
#include "options.h"
#include <openssl/blowfish.h>
#include "sha.h"
#ifdef _OPENMP
#include <omp.h>
#ifndef OMP_SCALE
#define OMP_SCALE 64
#endif
#endif
#include "memdbg.h"
#define FORMAT_LABEL "kwallet"
#define FORMAT_NAME "KDE KWallet"
#define FORMAT_TAG "$kwallet$"
#define FORMAT_TAG_LEN (sizeof(FORMAT_TAG)-1)
#define ALGORITHM_NAME "SHA1 32/" ARCH_BITS_STR
#define BENCHMARK_COMMENT ""
#define BENCHMARK_LENGTH -1
#define BINARY_SIZE 0
#define PLAINTEXT_LENGTH 125
#define SALT_SIZE sizeof(*cur_salt)
#define BINARY_ALIGN 1
#define SALT_ALIGN sizeof(int)
#define MIN_KEYS_PER_CRYPT 1
#define MAX_KEYS_PER_CRYPT 1
// #define BENCH_LARGE_PASSWORDS 1
static struct fmt_tests kwallet_tests[] = {
{"$kwallet$112$25be8c9cdaa53f5404d7809ff48a37752b325c8ccd296fbd537440dfcef9d66f72940e97141d21702b325c8ccd296fbd537440dfcef9d66fcd953cf1e41904b0c494ad1e718760e74c4487cc1449233d85525e7974da221774010bb9582b1d68b55ea9288f53a2be6bd15b93a5e1b33d", "openwall"},
{"$kwallet$240$e5383800cf0ccabf76461a647bf7ed94b7260f0ac33374ea1fec0bb0144b7e3f8fa3d0f368a61075827ac60beb62be830ece6fb2f9cfb13561ed4372af19d0a720a37b0d21132a59513b3ab9030395671c9725d7d6592ad98a4754795c858c59df6049522384af98c77d5351ddc577da07ea10e7d44b3fbc9af737744f53ed0a0a67252599b66a4d1fc65926d7097dc50f45b57f41f11934e0cfc4d5491f82b43f38acde1fd337d51cf47eb5da1bcd8bff1432d7b02f0d316633b33ced337d202a44342fc79db6aea568fb322831d886d4cb6dcc50a3e17c1027550b9ee94f56bc33f9861d2b24cbb7797d79f967bea4", ""},
#ifdef BENCH_LARGE_PASSWORDS
{"$kwallet$240$f17296588b2dd9f22f7c9ec43fddb5ee28db5edcb69575dcb887f5d2d0bfcc9317773c0f4e32517ace087d33ace8155a099e16c259c1a2f4f8992fc17481b122ef9f0c38c9eafd46794ff34e32c3ad83345f2d4e19ce727379856af9b774c00dca25a8528f5a2318af1fcbffdc6e73e7e081b106b4fbfe1887ea5bde782f9b3c3a2cfe3b215a65c66c03d053bfdee4d5d940e3e28f0c2d9897460fc1153af198b9037aac4dcd76e999c6d6a1f67f559e87349c6416cd7fc37b85ee230ef8caa2417b65732b61dbdb68fd2d12eb3df87474a05f337305c79427a970700a1b63f2018ba06f32e522bba4d30a0ec8ae223d", "pythonpythonpythonpythonpython"},
#endif
{NULL}
};
#if defined (_OPENMP)
static int omp_t = 1;
#endif
static char (*saved_key)[PLAINTEXT_LENGTH + 1];
static int *cracked;
static struct custom_salt {
unsigned char ct[0x10000];
unsigned int ctlen;
} *cur_salt;
static void init(struct fmt_main *self)
{
#if defined (_OPENMP)
omp_t = omp_get_max_threads();
self->params.min_keys_per_crypt *= omp_t;
omp_t *= OMP_SCALE;
self->params.max_keys_per_crypt *= omp_t;
#endif
saved_key = mem_calloc(self->params.max_keys_per_crypt,
sizeof(*saved_key));
cracked = mem_calloc(self->params.max_keys_per_crypt,
sizeof(*cracked));
}
static void done(void)
{
MEM_FREE(cracked);
MEM_FREE(saved_key);
}
static int valid(char *ciphertext, struct fmt_main *self)
{
char *ctcopy, *keeptr, *p;
int res, extra;
if (strncmp(ciphertext, FORMAT_TAG, FORMAT_TAG_LEN) != 0)
return 0;
ctcopy = strdup(ciphertext);
keeptr = ctcopy;
ctcopy += FORMAT_TAG_LEN;
if ((p = strtokm(ctcopy, "$")) == NULL) /* ctlen */
goto err;
if (!isdec(p))
goto err;
res = atoi(p);
if (!res)
goto err;
if ((p = strtokm(NULL, "$")) == NULL) /* ct */
goto err;
if (hexlenl(p, &extra) != res*2 || extra)
goto err;
MEM_FREE(keeptr);
return 1;
err:
MEM_FREE(keeptr);
return 0;
}
static void *get_salt(char *ciphertext)
{
char *ctcopy = strdup(ciphertext);
static struct custom_salt *salt;
char *keeptr = ctcopy;
int i;
char *p;
ctcopy += FORMAT_TAG_LEN; /* skip over "$kwallet$" */
if (!salt) salt = mem_calloc_tiny(sizeof(struct custom_salt), MEM_ALIGN_WORD);
memset(salt, 0, sizeof(*salt));
p = strtokm(ctcopy, "$");
salt->ctlen = atoi(p);
p = strtokm(NULL, "$");
for (i = 0; i < salt->ctlen; i++)
salt->ct[i] = atoi16[ARCH_INDEX(p[i * 2])] * 16
+ atoi16[ARCH_INDEX(p[i * 2 + 1])];
MEM_FREE(keeptr);
return (void *)salt;
}
static void password2hash(const char *password, unsigned char *hash, int *key_size)
{
SHA_CTX ctx;
unsigned char output[20 * ((PLAINTEXT_LENGTH + 15) / 16)];
unsigned char buf[20];
int i, j, oindex = 0;
int plength = strlen(password);
// divide the password into blocks of size 16 and hash the resulting
// individually!
for (i = 0; i <= plength; i += 16) {
SHA1_Init(&ctx);
SHA1_Update(&ctx, password + i, MIN(plength - i, 16));
// To make brute force take longer
for (j = 0; j < 2000; j++) {
SHA1_Final(buf, &ctx);
SHA1_Init(&ctx);
SHA1_Update(&ctx, buf, 20);
}
memcpy(output + oindex, buf, 20);
oindex += 20;
}
if (plength < 16) {
// key size is 20
memcpy(hash, output, 20);
*key_size = 20;
}
else if (plength < 32) {
// key size is 40 (20/20)
memcpy(hash, output, 40);
*key_size = 40;
}
else if (plength < 48) {
// key size is 56 (20/20/16 split)
memcpy(hash, output, 56);
*key_size = 56;
}
else {
// key size is 56 (14/14/14 split)
memcpy(hash + 14 * 0, output + 0, 14);
memcpy(hash + 14 * 1, output + 20, 14);
memcpy(hash + 14 * 2, output + 40, 14);
*key_size = 56;
}
}
static void set_salt(void *salt)
{
cur_salt = (struct custom_salt *)salt;
}
static int verify_passphrase(char *passphrase)
{
unsigned char key[56]; /* 56 seems to be the max. key size */
SHA_CTX ctx;
BF_KEY bf_key;
int sz;
int i;
int key_size = 0;
unsigned char testhash[20];
unsigned char buffer[0x10000]; // XXX respect the stack limits!
const char *t;
size_t fsize;
password2hash(passphrase, key, &key_size);
memcpy(buffer, cur_salt->ct, cur_salt->ctlen);
/* Blowfish implementation in KWallet is wrong w.r.t endianness
* Well, that is why we had bad_blowfish_plug.c originally ;) */
alter_endianity(buffer, cur_salt->ctlen);
/* decryption stuff */
BF_set_key(&bf_key, key_size, key);
for(i = 0; i < cur_salt->ctlen; i += 8) {
BF_ecb_encrypt(buffer + i, buffer + i, &bf_key, 0);
}
alter_endianity(buffer, cur_salt->ctlen);
/* verification stuff */
t = (char *) buffer;
// strip the leading data
t += 8; // one block of random data
// strip the file size off
fsize = 0;
fsize |= ((size_t) (*t) << 24) & 0xff000000;
t++;
fsize |= ((size_t) (*t) << 16) & 0x00ff0000;
t++;
fsize |= ((size_t) (*t) << 8) & 0x0000ff00;
t++;
fsize |= (size_t) (*t) & 0x000000ff;
t++;
if (fsize > (size_t) (cur_salt->ctlen) - 8 - 4) {
// file structure error
return -1;
}
SHA1_Init(&ctx);
SHA1_Update(&ctx, t, fsize);
SHA1_Final(testhash, &ctx);
// compare hashes
sz = cur_salt->ctlen;
for (i = 0; i < 20; i++) {
if (testhash[i] != buffer[sz - 20 + i]) {
return -2;
}
}
return 0;
}
static int crypt_all(int *pcount, struct db_salt *salt)
{
const int count = *pcount;
int index = 0;
#ifdef _OPENMP
#pragma omp parallel for
#endif
for (index = 0; index < count; index++)
{
int ret;
ret = verify_passphrase(saved_key[index]);
if(ret == 0)
cracked[index] = 1;
else
cracked[index] = 0;
}
return count;
}
static int cmp_all(void *binary, int count)
{
int index;
for (index = 0; index < count; index++)
if (cracked[index])
return 1;
return 0;
}
static int cmp_one(void *binary, int index)
{
return cracked[index];
}
static int cmp_exact(char *source, int index)
{
return 1;
}
static void kwallet_set_key(char *key, int index)
{
int saved_len = strlen(key);
if (saved_len > PLAINTEXT_LENGTH)
saved_len = PLAINTEXT_LENGTH;
memcpy(saved_key[index], key, saved_len);
saved_key[index][saved_len] = 0;
}
static char *get_key(int index)
{
return saved_key[index];
}
struct fmt_main fmt_kwallet = {
{
FORMAT_LABEL,
FORMAT_NAME,
ALGORITHM_NAME,
BENCHMARK_COMMENT,
BENCHMARK_LENGTH,
0,
PLAINTEXT_LENGTH,
BINARY_SIZE,
BINARY_ALIGN,
SALT_SIZE,
SALT_ALIGN,
MIN_KEYS_PER_CRYPT,
MAX_KEYS_PER_CRYPT,
FMT_CASE | FMT_8_BIT | FMT_OMP | FMT_NOT_EXACT,
{ NULL },
{ FORMAT_TAG },
kwallet_tests
}, {
init,
done,
fmt_default_reset,
fmt_default_prepare,
valid,
fmt_default_split,
fmt_default_binary,
get_salt,
{ NULL },
fmt_default_source,
{
fmt_default_binary_hash
},
fmt_default_salt_hash,
NULL,
set_salt,
kwallet_set_key,
get_key,
fmt_default_clear_keys,
crypt_all,
{
fmt_default_get_hash
},
cmp_all,
cmp_one,
cmp_exact
}
};
#endif /* plugin stanza */
|
deprecate.c | /*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% DDDD EEEEE PPPP RRRR EEEEE CCCC AAA TTTTT EEEEE %
% D D E P P R R E C A A T E %
% D D EEE PPPPP RRRR EEE C AAAAA T EEE %
% D D E P R R E C A A T E %
% DDDD EEEEE P R R EEEEE CCCC A A T EEEEE %
% %
% %
% MagickCore Deprecated Methods %
% %
% Software Design %
% Cristy %
% October 2002 %
% %
% %
% Copyright 1999-2021 ImageMagick Studio LLC, a non-profit organization %
% dedicated to making software imaging solutions freely available. %
% %
% You may not use this file except in compliance with the License. You may %
% obtain a copy of the License at %
% %
% https://imagemagick.org/script/license.php %
% %
% Unless required by applicable law or agreed to in writing, software %
% distributed under the License is distributed on an "AS IS" BASIS, %
% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. %
% See the License for the specific language governing permissions and %
% limitations under the License. %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
%
%
*/
/*
Include declarations.
*/
#include "magick/studio.h"
#if defined(MAGICKCORE_WINGDI32_DELEGATE)
# if defined(__CYGWIN__)
# include <windows.h>
# else
/* All MinGW needs ... */
# include "magick/nt-base-private.h"
# include <wingdi.h>
# endif
#endif
#include "magick/blob.h"
#include "magick/blob-private.h"
#include "magick/cache.h"
#include "magick/cache-view.h"
#include "magick/channel.h"
#include "magick/client.h"
#include "magick/color.h"
#include "magick/color-private.h"
#include "magick/colormap.h"
#include "magick/colormap-private.h"
#include "magick/colorspace.h"
#include "magick/colorspace-private.h"
#include "magick/composite.h"
#include "magick/composite-private.h"
#include "magick/constitute.h"
#include "magick/deprecate.h"
#include "magick/draw.h"
#include "magick/draw-private.h"
#include "magick/effect.h"
#include "magick/enhance.h"
#include "magick/exception.h"
#include "magick/exception-private.h"
#include "magick/fx.h"
#include "magick/geometry.h"
#include "magick/identify.h"
#include "magick/image.h"
#include "magick/image-private.h"
#include "magick/list.h"
#include "magick/log.h"
#include "magick/memory_.h"
#include "magick/magick.h"
#include "magick/monitor.h"
#include "magick/monitor-private.h"
#include "magick/morphology.h"
#include "magick/mutex.h"
#include "magick/nt-feature.h"
#include "magick/paint.h"
#include "magick/pixel.h"
#include "magick/pixel-accessor.h"
#include "magick/pixel-private.h"
#include "magick/property.h"
#include "magick/quantize.h"
#include "magick/random_.h"
#include "magick/resource_.h"
#include "magick/semaphore.h"
#include "magick/semaphore-private.h"
#include "magick/segment.h"
#include "magick/splay-tree.h"
#include "magick/statistic.h"
#include "magick/string_.h"
#include "magick/threshold.h"
#include "magick/thread_.h"
#include "magick/thread-private.h"
#include "magick/token.h"
#include "magick/transform.h"
#include "magick/utility.h"
#if !defined(MAGICKCORE_EXCLUDE_DEPRECATED)
/*
Global declarations.
*/
static MonitorHandler
monitor_handler = (MonitorHandler) NULL;
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% A c q u i r e C a c h e V i e w I n d e x e s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% AcquireCacheViewIndexes() returns the indexes associated with the specified
% view.
%
% Deprecated, replace with:
%
% GetCacheViewVirtualIndexQueue(cache_view);
%
% The format of the AcquireCacheViewIndexes method is:
%
% const IndexPacket *AcquireCacheViewIndexes(const CacheView *cache_view)
%
% A description of each parameter follows:
%
% o cache_view: the cache view.
%
*/
MagickExport const IndexPacket *AcquireCacheViewIndexes(
const CacheView *cache_view)
{
return(GetCacheViewVirtualIndexQueue(cache_view));
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% A c q u i r e C a c h e V i e w P i x e l s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% AcquireCacheViewPixels() gets pixels from the in-memory or disk pixel cache
% as defined by the geometry parameters. A pointer to the pixels is returned
% if the pixels are transferred, otherwise a NULL is returned.
%
% Deprecated, replace with:
%
% GetCacheViewVirtualPixels(cache_view,x,y,columns,rows,exception);
%
% The format of the AcquireCacheViewPixels method is:
%
% const PixelPacket *AcquireCacheViewPixels(const CacheView *cache_view,
% const ssize_t x,const ssize_t y,const size_t columns,
% const size_t rows,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o cache_view: the cache view.
%
% o x,y,columns,rows: These values define the perimeter of a region of
% pixels.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport const PixelPacket *AcquireCacheViewPixels(
const CacheView *cache_view,const ssize_t x,const ssize_t y,
const size_t columns,const size_t rows,ExceptionInfo *exception)
{
return(GetCacheViewVirtualPixels(cache_view,x,y,columns,rows,exception));
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% A c q u i r e I m a g e P i x e l s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% AcquireImagePixels() returns an immutable pixel region. If the
% region is successfully accessed, a pointer to it is returned, otherwise
% NULL is returned. The returned pointer may point to a temporary working
% copy of the pixels or it may point to the original pixels in memory.
% Performance is maximized if the selected region is part of one row, or one
% or more full rows, since there is opportunity to access the pixels in-place
% (without a copy) if the image is in RAM, or in a memory-mapped file. The
% returned pointer should *never* be deallocated by the user.
%
% Pixels accessed via the returned pointer represent a simple array of type
% PixelPacket. If the image type is CMYK or the storage class is PseudoClass,
% call GetAuthenticIndexQueue() after invoking GetAuthenticPixels() to access
% the black color component or to obtain the colormap indexes (of type
% IndexPacket) corresponding to the region.
%
% If you plan to modify the pixels, use GetAuthenticPixels() instead.
%
% Note, the AcquireImagePixels() and GetAuthenticPixels() methods are not
% thread-safe. In a threaded environment, use GetCacheViewVirtualPixels() or
% GetCacheViewAuthenticPixels() instead.
%
% Deprecated, replace with:
%
% GetVirtualPixels(image,x,y,columns,rows,exception);
%
% The format of the AcquireImagePixels() method is:
%
% const PixelPacket *AcquireImagePixels(const Image *image,const ssize_t x,
% const ssize_t y,const size_t columns,const size_t rows,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o x,y,columns,rows: These values define the perimeter of a region of
% pixels.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport const PixelPacket *AcquireImagePixels(const Image *image,
const ssize_t x,const ssize_t y,const size_t columns,
const size_t rows,ExceptionInfo *exception)
{
return(GetVirtualPixels(image,x,y,columns,rows,exception));
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% A c q u i r e I n d e x e s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% AcquireIndexes() returns the black channel or the colormap indexes
% associated with the last call to QueueAuthenticPixels() or
% GetVirtualPixels(). NULL is returned if the black channel or colormap
% indexes are not available.
%
% Deprecated, replace with:
%
% GetVirtualIndexQueue(image);
%
% The format of the AcquireIndexes() method is:
%
% const IndexPacket *AcquireIndexes(const Image *image)
%
% A description of each parameter follows:
%
% o indexes: AcquireIndexes() returns the indexes associated with the last
% call to QueueAuthenticPixels() or GetVirtualPixels().
%
% o image: the image.
%
*/
MagickExport const IndexPacket *AcquireIndexes(const Image *image)
{
return(GetVirtualIndexQueue(image));
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% A c q u i r e M e m o r y %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% AcquireMemory() returns a pointer to a block of memory at least size bytes
% suitably aligned for any use.
%
% The format of the AcquireMemory method is:
%
% void *AcquireMemory(const size_t size)
%
% A description of each parameter follows:
%
% o size: the size of the memory in bytes to allocate.
%
*/
MagickExport void *AcquireMemory(const size_t size)
{
void
*allocation;
assert(size != 0);
(void) LogMagickEvent(DeprecateEvent,GetMagickModule(),"last use: v5.5.7");
allocation=malloc(size);
return(allocation);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% A c q u i r e O n e C a c h e V i e w P i x e l %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% AcquireOneCacheViewPixel() returns a single pixel at the specified (x,y)
% location. The image background color is returned if an error occurs. If
% you plan to modify the pixel, use GetOneCacheViewAuthenticPixel() instead.
%
% Deprecated, replace with:
%
% GetOneCacheViewVirtualPixel(cache_view,x,y,pixel,exception);
%
% The format of the AcquireOneCacheViewPixel method is:
%
% MagickBooleanType AcquireOneCacheViewPixel(const CacheView *cache_view,
% const ssize_t x,const ssize_t y,PixelPacket *pixel,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o cache_view: the cache view.
%
% o x,y: These values define the offset of the pixel.
%
% o pixel: return a pixel at the specified (x,y) location.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport MagickBooleanType AcquireOneCacheViewPixel(
const CacheView *cache_view,const ssize_t x,const ssize_t y,
PixelPacket *pixel,ExceptionInfo *exception)
{
return(GetOneCacheViewVirtualPixel(cache_view,x,y,pixel,exception));
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% A c q u i r e O n e C a c h e V i e w V i r t u a l P i x e l %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% AcquireOneCacheViewVirtualPixel() returns a single pixel at the specified
% (x,y) location. The image background color is returned if an error occurs.
% If you plan to modify the pixel, use GetOneCacheViewAuthenticPixel() instead.
%
% Deprecated, replace with:
%
% GetOneCacheViewVirtualMethodPixel(cache_view,virtual_pixel_method,
% x,y,pixel,exception);
%
% The format of the AcquireOneCacheViewPixel method is:
%
% MagickBooleanType AcquireOneCacheViewVirtualPixel(
% const CacheView *cache_view,
% const VirtualPixelMethod virtual_pixel_method,const ssize_t x,
% const ssize_t y,PixelPacket *pixel,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o cache_view: the cache view.
%
% o virtual_pixel_method: the virtual pixel method.
%
% o x,y: These values define the offset of the pixel.
%
% o pixel: return a pixel at the specified (x,y) location.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport MagickBooleanType AcquireOneCacheViewVirtualPixel(
const CacheView *cache_view,const VirtualPixelMethod virtual_pixel_method,
const ssize_t x,const ssize_t y,PixelPacket *pixel,ExceptionInfo *exception)
{
MagickBooleanType
status;
status=GetOneCacheViewVirtualMethodPixel(cache_view,virtual_pixel_method,
x,y,pixel,exception);
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% A c q u i r e O n e M a g i c k P i x e l %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% AcquireOneMagickPixel() returns a single pixel at the specified (x,y)
% location. The image background color is returned if an error occurs. If
% you plan to modify the pixel, use GetOnePixel() instead.
%
% Deprecated, replace with:
%
% MagickPixelPacket pixel;
% GetOneVirtualMagickPixel(image,x,y,&pixel,exception);
%
% The format of the AcquireOneMagickPixel() method is:
%
% MagickPixelPacket AcquireOneMagickPixel(const Image image,const ssize_t x,
% const ssize_t y,ExceptionInfo exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o x,y: These values define the location of the pixel to return.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport MagickPixelPacket AcquireOneMagickPixel(const Image *image,
const ssize_t x,const ssize_t y,ExceptionInfo *exception)
{
MagickPixelPacket
pixel;
(void) GetOneVirtualMagickPixel(image,x,y,&pixel,exception);
return(pixel);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% A c q u i r e O n e P i x e l %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% AcquireOnePixel() returns a single pixel at the specified (x,y) location.
% The image background color is returned if an error occurs. If you plan to
% modify the pixel, use GetOnePixel() instead.
%
% Deprecated, replace with:
%
% PixelPacket pixel;
% GetOneVirtualPixel(image,x,y,&pixel,exception);
%
% The format of the AcquireOnePixel() method is:
%
% PixelPacket AcquireOnePixel(const Image image,const ssize_t x,
% const ssize_t y,ExceptionInfo exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o x,y: These values define the location of the pixel to return.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport PixelPacket AcquireOnePixel(const Image *image,const ssize_t x,
const ssize_t y,ExceptionInfo *exception)
{
PixelPacket
pixel;
(void) GetOneVirtualPixel(image,x,y,&pixel,exception);
return(pixel);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% A c q u i r e O n e V i r t u a l P i x e l %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% AcquireOneVirtualPixel() returns a single pixel at the specified (x,y)
% location as defined by specified pixel method. The image background color
% is returned if an error occurs. If you plan to modify the pixel, use
% GetOnePixel() instead.
%
% Deprecated, replace with:
%
% PixelPacket pixel;
% GetOneVirtualMethodPixel(image,virtual_pixel_method,x,y,&pixel,exception);
%
% The format of the AcquireOneVirtualPixel() method is:
%
% PixelPacket AcquireOneVirtualPixel(const Image image,
% const VirtualPixelMethod virtual_pixel_method,const ssize_t x,
% const ssize_t y,ExceptionInfo exception)
%
% A description of each parameter follows:
%
% o virtual_pixel_method: the virtual pixel method.
%
% o image: the image.
%
% o x,y: These values define the location of the pixel to return.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport PixelPacket AcquireOneVirtualPixel(const Image *image,
const VirtualPixelMethod virtual_pixel_method,const ssize_t x,const ssize_t y,
ExceptionInfo *exception)
{
PixelPacket
pixel;
(void) GetOneVirtualMethodPixel(image,virtual_pixel_method,x,y,&pixel,
exception);
return(pixel);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% A c q u i r e P i x e l s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% AcquirePixels() returns the pixels associated with the last call to
% QueueAuthenticPixels() or GetVirtualPixels().
%
% Deprecated, replace with:
%
% GetVirtualPixelQueue(image);
%
% The format of the AcquirePixels() method is:
%
% const PixelPacket *AcquirePixels(const Image image)
%
% A description of each parameter follows:
%
% o image: the image.
%
*/
MagickExport const PixelPacket *AcquirePixels(const Image *image)
{
return(GetVirtualPixelQueue(image));
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% A c q u i r e S e m a p h o r e I n f o %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% AcquireSemaphoreInfo() acquires a semaphore.
%
% The format of the AcquireSemaphoreInfo method is:
%
% void AcquireSemaphoreInfo(SemaphoreInfo **semaphore_info)
%
% A description of each parameter follows:
%
% o semaphore_info: Specifies a pointer to an SemaphoreInfo structure.
%
*/
MagickExport void AcquireSemaphoreInfo(SemaphoreInfo **semaphore_info)
{
assert(semaphore_info != (SemaphoreInfo **) NULL);
if (*semaphore_info == (SemaphoreInfo *) NULL)
{
InitializeMagickMutex();
LockMagickMutex();
if (*semaphore_info == (SemaphoreInfo *) NULL)
*semaphore_info=AllocateSemaphoreInfo();
UnlockMagickMutex();
}
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% A f f i n i t y I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% AffinityImage() replaces the colors of an image with the closest color from
% a reference image.
%
% Deprecated, replace with:
%
% RemapImage(quantize_info,image,affinity_image);
%
% The format of the AffinityImage method is:
%
% MagickBooleanType AffinityImage(const QuantizeInfo *quantize_info,
% Image *image,const Image *affinity_image)
%
% A description of each parameter follows:
%
% o quantize_info: Specifies a pointer to an QuantizeInfo structure.
%
% o image: the image.
%
% o affinity_image: the reference image.
%
*/
MagickExport MagickBooleanType AffinityImage(const QuantizeInfo *quantize_info,
Image *image,const Image *affinity_image)
{
return(RemapImage(quantize_info,image,affinity_image));
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% A f f i n i t y I m a g e s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% AffinityImages() replaces the colors of a sequence of images with the
% closest color from a reference image.
%
% Deprecated, replace with:
%
% RemapImages(quantize_info,images,affinity_image);
%
% The format of the AffinityImage method is:
%
% MagickBooleanType AffinityImages(const QuantizeInfo *quantize_info,
% Image *images,Image *affinity_image)
%
% A description of each parameter follows:
%
% o quantize_info: Specifies a pointer to an QuantizeInfo structure.
%
% o images: the image sequence.
%
% o affinity_image: the reference image.
%
*/
MagickExport MagickBooleanType AffinityImages(const QuantizeInfo *quantize_info,
Image *images,const Image *affinity_image)
{
return(RemapImages(quantize_info,images,affinity_image));
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% A l l o c a t e I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% AllocateImage() returns a pointer to an image structure initialized to
% default values.
%
% Deprecated, replace with:
%
% AcquireImage(image_info);
%
% The format of the AllocateImage method is:
%
% Image *AllocateImage(const ImageInfo *image_info)
%
% A description of each parameter follows:
%
% o image_info: Many of the image default values are set from this
% structure. For example, filename, compression, depth, background color,
% and others.
%
*/
MagickExport Image *AllocateImage(const ImageInfo *image_info)
{
return(AcquireImage(image_info));
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% A l l o c a t e I m a g e C o l o r m a p %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% AllocateImageColormap() allocates an image colormap and initializes
% it to a linear gray colorspace. If the image already has a colormap,
% it is replaced. AllocateImageColormap() returns MagickTrue if successful,
% otherwise MagickFalse if there is not enough memory.
%
% Deprecated, replace with:
%
% AcquireImageColormap(image,colors);
%
% The format of the AllocateImageColormap method is:
%
% MagickBooleanType AllocateImageColormap(Image *image,
% const size_t colors)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o colors: the number of colors in the image colormap.
%
*/
MagickExport MagickBooleanType AllocateImageColormap(Image *image,
const size_t colors)
{
return(AcquireImageColormap(image,colors));
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% A l l o c a t e N e x t I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% AllocateNextImage() initializes the next image in a sequence to
% default values. The next member of image points to the newly allocated
% image. If there is a memory shortage, next is assigned NULL.
%
% Deprecated, replace with:
%
% AcquireNextImage(image_info,image);
%
% The format of the AllocateNextImage method is:
%
% void AllocateNextImage(const ImageInfo *image_info,Image *image)
%
% A description of each parameter follows:
%
% o image_info: Many of the image default values are set from this
% structure. For example, filename, compression, depth, background color,
% and others.
%
% o image: the image.
%
*/
MagickExport void AllocateNextImage(const ImageInfo *image_info,Image *image)
{
AcquireNextImage(image_info,image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% A l l o c a t e S t r i n g %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% AllocateString() allocates memory for a string and copies the source string
% to that memory location (and returns it).
%
% The format of the AllocateString method is:
%
% char *AllocateString(const char *source)
%
% A description of each parameter follows:
%
% o source: A character string.
%
*/
MagickExport char *AllocateString(const char *source)
{
char
*destination;
size_t
length;
assert(source != (const char *) NULL);
(void) LogMagickEvent(DeprecateEvent,GetMagickModule(),"last use: v5.5.7");
length=strlen(source)+MaxTextExtent+1;
destination=(char *) AcquireQuantumMemory(length,sizeof(*destination));
if (destination == (char *) NULL)
ThrowFatalException(ResourceLimitFatalError,"MemoryAllocationFailed");
*destination='\0';
(void) CopyMagickString(destination,source,length);
return(destination);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% A v e r a g e I m a g e s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% AverageImages() takes a set of images and averages them together. Each
% image in the set must have the same width and height. AverageImages()
% returns a single image with each corresponding pixel component of each
% image averaged. On failure, a NULL image is returned and exception
% describes the reason for the failure.
%
% Deprecated, replace with:
%
% EvaluateImages(images,MeanEvaluateOperator,exception);
%
% The format of the AverageImages method is:
%
% Image *AverageImages(Image *images,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image sequence.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *AverageImages(const Image *images,ExceptionInfo *exception)
{
return(EvaluateImages(images,MeanEvaluateOperator,exception));
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% C h a n n e l I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% Extract a channel from the image. A channel is a particular color component
% of each pixel in the image.
%
% Deprecated, replace with:
%
% SeparateImageChannel(image,channel);
%
% The format of the ChannelImage method is:
%
% unsigned int ChannelImage(Image *image,const ChannelType channel)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o channel: Identify which channel to extract: RedChannel, GreenChannel,
% BlueChannel, OpacityChannel, CyanChannel, MagentaChannel, YellowChannel,
% or BlackChannel.
%
*/
MagickExport unsigned int ChannelImage(Image *image,const ChannelType channel)
{
return(SeparateImageChannel(image,channel));
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% C h a n n e l T h r e s h o l d I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ChannelThresholdImage() changes the value of individual pixels based on
% the intensity of each pixel channel. The result is a high-contrast image.
%
% The format of the ChannelThresholdImage method is:
%
% unsigned int ChannelThresholdImage(Image *image,const char *level)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o level: define the threshold values.
%
*/
MagickExport unsigned int ChannelThresholdImage(Image *image,const char *level)
{
MagickPixelPacket
threshold;
GeometryInfo
geometry_info;
unsigned int
flags,
status;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
if (image->debug != MagickFalse)
(void) LogMagickEvent(DeprecateEvent,GetMagickModule(),"last use: v5.5.7");
if (level == (char *) NULL)
return(MagickFalse);
flags=ParseGeometry(level,&geometry_info);
threshold.red=geometry_info.rho;
threshold.green=geometry_info.sigma;
if ((flags & SigmaValue) == 0)
threshold.green=threshold.red;
threshold.blue=geometry_info.xi;
if ((flags & XiValue) == 0)
threshold.blue=threshold.red;
status=BilevelImageChannel(image,RedChannel,threshold.red);
status&=BilevelImageChannel(image,GreenChannel,threshold.green);
status&=BilevelImageChannel(image,BlueChannel,threshold.blue);
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% C l i p I m a g e P a t h %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ClipPathImage() sets the image clip mask based any clipping path information
% if it exists.
%
% Deprecated, replace with:
%
% ClipImagePath(image,pathname,inside);
%
% The format of the ClipImage method is:
%
% MagickBooleanType ClipPathImage(Image *image,const char *pathname,
% const MagickBooleanType inside)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o pathname: name of clipping path resource. If name is preceded by #, use
% clipping path numbered by name.
%
% o inside: if non-zero, later operations take effect inside clipping path.
% Otherwise later operations take effect outside clipping path.
%
*/
MagickExport MagickBooleanType ClipPathImage(Image *image,const char *pathname,
const MagickBooleanType inside)
{
return(ClipImagePath(image,pathname,inside));
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% C l o n e I m a g e A t t r i b u t e s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% CloneImageAttributes() clones one or more image attributes.
%
% Deprecated, replace with:
%
% CloneImageProperties(image,clone_image);
%
% The format of the CloneImageAttributes method is:
%
% MagickBooleanType CloneImageAttributes(Image *image,
% const Image *clone_image)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o clone_image: the clone image.
%
*/
MagickExport MagickBooleanType CloneImageAttributes(Image *image,
const Image *clone_image)
{
return(CloneImageProperties(image,clone_image));
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% C l o n e M e m o r y %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% CloneMemory() copies size bytes from memory area source to the destination.
% Copying between objects that overlap will take place correctly. It returns
% destination.
%
% The format of the CloneMemory method is:
%
% void *CloneMemory(void *destination,const void *source,
% const size_t size)
%
% A description of each parameter follows:
%
% o destination: the destination.
%
% o source: the source.
%
% o size: the size of the memory in bytes to allocate.
%
*/
MagickExport void *CloneMemory(void *destination,const void *source,
const size_t size)
{
const unsigned char
*p;
unsigned char
*q;
ssize_t
i;
assert(destination != (void *) NULL);
assert(source != (const void *) NULL);
(void) LogMagickEvent(DeprecateEvent,GetMagickModule(),"last use: v5.5.7");
p=(const unsigned char *) source;
q=(unsigned char *) destination;
if ((p <= q) || ((p+size) >= q))
return(memcpy(destination,source,size));
/*
Overlap, copy backwards.
*/
p+=size;
q+=size;
for (i=(ssize_t) (size-1); i >= 0; i--)
*--q=(*--p);
return(destination);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% C l o s e C a c h e V i e w %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% CloseCacheView() closes the specified view returned by a previous call to
% OpenCacheView().
%
% Deprecated, replace with:
%
% DestroyCacheView(view_info);
%
% The format of the CloseCacheView method is:
%
% CacheView *CloseCacheView(CacheView *view_info)
%
% A description of each parameter follows:
%
% o view_info: the address of a structure of type CacheView.
%
*/
MagickExport CacheView *CloseCacheView(CacheView *view_info)
{
return(DestroyCacheView(view_info));
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% C o l o r F l o o d f i l l I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ColorFloodfill() changes the color value of any pixel that matches
% target and is an immediate neighbor. If the method FillToBorderMethod is
% specified, the color value is changed for any neighbor pixel that does not
% match the bordercolor member of image.
%
% By default target must match a particular pixel color exactly.
% However, in many cases two colors may differ by a small amount. The
% fuzz member of image defines how much tolerance is acceptable to
% consider two colors as the same. For example, set fuzz to 10 and the
% color red at intensities of 100 and 102 respectively are now
% interpreted as the same color for the purposes of the floodfill.
%
% The format of the ColorFloodfillImage method is:
%
% MagickBooleanType ColorFloodfillImage(Image *image,
% const DrawInfo *draw_info,const PixelPacket target,
% const ssize_t x_offset,const ssize_t y_offset,const PaintMethod method)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o draw_info: the draw info.
%
% o target: the RGB value of the target color.
%
% o x,y: the starting location of the operation.
%
% o method: Choose either FloodfillMethod or FillToBorderMethod.
%
*/
#define MaxStacksize (1UL << 15)
#define PushSegmentStack(up,left,right,delta) \
{ \
if (s >= (segment_stack+MaxStacksize)) \
ThrowBinaryImageException(DrawError,"SegmentStackOverflow",image->filename) \
else \
{ \
if ((((up)+(delta)) >= 0) && (((up)+(delta)) < (ssize_t) image->rows)) \
{ \
s->x1=(double) (left); \
s->y1=(double) (up); \
s->x2=(double) (right); \
s->y2=(double) (delta); \
s++; \
} \
} \
}
MagickExport MagickBooleanType ColorFloodfillImage(Image *image,
const DrawInfo *draw_info,const PixelPacket target,const ssize_t x_offset,
const ssize_t y_offset,const PaintMethod method)
{
Image
*floodplane_image;
MagickBooleanType
skip;
PixelPacket
fill_color;
SegmentInfo
*s;
SegmentInfo
*segment_stack;
ssize_t
offset,
start,
x,
x1,
x2,
y;
/*
Check boundary conditions.
*/
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(draw_info != (DrawInfo *) NULL);
assert(draw_info->signature == MagickCoreSignature);
if ((x_offset < 0) || (x_offset >= (ssize_t) image->columns))
return(MagickFalse);
if ((y_offset < 0) || (y_offset >= (ssize_t) image->rows))
return(MagickFalse);
if (SetImageStorageClass(image,DirectClass) == MagickFalse)
return(MagickFalse);
if (image->matte == MagickFalse)
(void) SetImageAlphaChannel(image,OpaqueAlphaChannel);
floodplane_image=CloneImage(image,0,0,MagickTrue,&image->exception);
if (floodplane_image == (Image *) NULL)
return(MagickFalse);
(void) SetImageAlphaChannel(floodplane_image,OpaqueAlphaChannel);
/*
Set floodfill color.
*/
segment_stack=(SegmentInfo *) AcquireQuantumMemory(MaxStacksize,
sizeof(*segment_stack));
if (segment_stack == (SegmentInfo *) NULL)
{
floodplane_image=DestroyImage(floodplane_image);
ThrowBinaryImageException(ResourceLimitError,"MemoryAllocationFailed",
image->filename);
}
/*
Push initial segment on stack.
*/
x=x_offset;
y=y_offset;
start=0;
s=segment_stack;
PushSegmentStack(y,x,x,1);
PushSegmentStack(y+1,x,x,-1);
while (s > segment_stack)
{
const PixelPacket
*magick_restrict p;
ssize_t
x;
PixelPacket
*magick_restrict q;
/*
Pop segment off stack.
*/
s--;
x1=(ssize_t) s->x1;
x2=(ssize_t) s->x2;
offset=(ssize_t) s->y2;
y=(ssize_t) s->y1+offset;
/*
Recolor neighboring pixels.
*/
p=GetVirtualPixels(image,0,y,(size_t) (x1+1),1,&image->exception);
q=GetAuthenticPixels(floodplane_image,0,y,(size_t) (x1+1),1,
&image->exception);
if ((p == (const PixelPacket *) NULL) || (q == (PixelPacket *) NULL))
break;
p+=x1;
q+=x1;
for (x=x1; x >= 0; x--)
{
if (q->opacity == (Quantum) TransparentOpacity)
break;
if (method == FloodfillMethod)
{
if (IsColorSimilar(image,p,&target) == MagickFalse)
break;
}
else
if (IsColorSimilar(image,p,&target) != MagickFalse)
break;
q->opacity=(Quantum) TransparentOpacity;
p--;
q--;
}
if (SyncAuthenticPixels(floodplane_image,&image->exception) == MagickFalse)
break;
skip=x >= x1 ? MagickTrue : MagickFalse;
if (skip == MagickFalse)
{
start=x+1;
if (start < x1)
PushSegmentStack(y,start,x1-1,-offset);
x=x1+1;
}
do
{
if (skip == MagickFalse)
{
if (x < (ssize_t) image->columns)
{
p=GetVirtualPixels(image,x,y,image->columns-x,1,
&image->exception);
q=GetAuthenticPixels(floodplane_image,x,y,image->columns-x,1,
&image->exception);
if ((p == (const PixelPacket *) NULL) ||
(q == (PixelPacket *) NULL))
break;
for ( ; x < (ssize_t) image->columns; x++)
{
if (q->opacity == (Quantum) TransparentOpacity)
break;
if (method == FloodfillMethod)
{
if (IsColorSimilar(image,p,&target) == MagickFalse)
break;
}
else
if (IsColorSimilar(image,p,&target) != MagickFalse)
break;
q->opacity=(Quantum) TransparentOpacity;
p++;
q++;
}
if (SyncAuthenticPixels(floodplane_image,&image->exception) == MagickFalse)
break;
}
PushSegmentStack(y,start,x-1,offset);
if (x > (x2+1))
PushSegmentStack(y,x2+1,x-1,-offset);
}
skip=MagickFalse;
x++;
if (x <= x2)
{
p=GetVirtualPixels(image,x,y,(size_t) (x2-x+1),1,
&image->exception);
q=GetAuthenticPixels(floodplane_image,x,y,(size_t) (x2-x+1),1,
&image->exception);
if ((p == (const PixelPacket *) NULL) || (q == (PixelPacket *) NULL))
break;
for ( ; x <= x2; x++)
{
if (q->opacity == (Quantum) TransparentOpacity)
break;
if (method == FloodfillMethod)
{
if (IsColorSimilar(image,p,&target) != MagickFalse)
break;
}
else
if (IsColorSimilar(image,p,&target) == MagickFalse)
break;
p++;
q++;
}
}
start=x;
} while (x <= x2);
}
for (y=0; y < (ssize_t) image->rows; y++)
{
const PixelPacket
*magick_restrict p;
ssize_t
x;
PixelPacket
*magick_restrict q;
/*
Tile fill color onto floodplane.
*/
p=GetVirtualPixels(floodplane_image,0,y,image->columns,1,
&image->exception);
q=GetAuthenticPixels(image,0,y,image->columns,1,&image->exception);
if ((p == (const PixelPacket *) NULL) || (q == (PixelPacket *) NULL))
break;
for (x=0; x < (ssize_t) image->columns; x++)
{
if (GetPixelOpacity(p) != OpaqueOpacity)
{
(void) GetFillColor(draw_info,x,y,&fill_color);
MagickCompositeOver(&fill_color,(MagickRealType) fill_color.opacity,q,
(MagickRealType) q->opacity,q);
}
p++;
q++;
}
if (SyncAuthenticPixels(image,&image->exception) == MagickFalse)
break;
}
segment_stack=(SegmentInfo *) RelinquishMagickMemory(segment_stack);
floodplane_image=DestroyImage(floodplane_image);
return(y == (ssize_t) image->rows ? MagickTrue : MagickFalse);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ C o n s t i t u t e C o m p o n e n t G e n e s i s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ConstituteComponentGenesis() instantiates the constitute component.
%
% The format of the ConstituteComponentGenesis method is:
%
% MagickBooleanType ConstituteComponentGenesis(void)
%
*/
MagickExport MagickBooleanType ConstituteComponentGenesis(void)
{
return(MagickTrue);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ C o n s t i t u t e C o m p o n e n t T e r m i n u s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ConstituteComponentTerminus() destroys the constitute component.
%
% The format of the ConstituteComponentTerminus method is:
%
% ConstituteComponentTerminus(void)
%
*/
MagickExport void ConstituteComponentTerminus(void)
{
}
#if defined(MAGICKCORE_WINGDI32_DELEGATE)
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% C r o p I m a g e T o H B i t m a p %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% CropImageToHBITMAP() extracts a specified region of the image and returns
% it as a Windows HBITMAP. While the same functionality can be accomplished by
% invoking CropImage() followed by ImageToHBITMAP(), this method is more
% efficient since it copies pixels directly to the HBITMAP.
%
% The format of the CropImageToHBITMAP method is:
%
% HBITMAP CropImageToHBITMAP(Image* image,const RectangleInfo *geometry,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o geometry: Define the region of the image to crop with members
% x, y, width, and height.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport void *CropImageToHBITMAP(Image *image,
const RectangleInfo *geometry,ExceptionInfo *exception)
{
#define CropImageTag "Crop/Image"
BITMAP
bitmap;
HBITMAP
bitmapH;
HANDLE
bitmap_bitsH;
MagickBooleanType
proceed;
RectangleInfo
page;
const PixelPacket
*p;
RGBQUAD
*q;
RGBQUAD
*bitmap_bits;
ssize_t
y;
/*
Check crop geometry.
*/
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(geometry != (const RectangleInfo *) NULL);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
if (((geometry->x+(ssize_t) geometry->width) < 0) ||
((geometry->y+(ssize_t) geometry->height) < 0) ||
(geometry->x >= (ssize_t) image->columns) ||
(geometry->y >= (ssize_t) image->rows))
ThrowImageException(OptionError,"GeometryDoesNotContainImage");
page=(*geometry);
if ((page.x+(ssize_t) page.width) > (ssize_t) image->columns)
page.width=image->columns-page.x;
if ((page.y+(ssize_t) page.height) > (ssize_t) image->rows)
page.height=image->rows-page.y;
if (page.x < 0)
{
page.width+=page.x;
page.x=0;
}
if (page.y < 0)
{
page.height+=page.y;
page.y=0;
}
if ((page.width == 0) || (page.height == 0))
ThrowImageException(OptionError,"GeometryDimensionsAreZero");
/*
Initialize crop image attributes.
*/
bitmap.bmType = 0;
bitmap.bmWidth = (LONG) page.width;
bitmap.bmHeight = (LONG) page.height;
bitmap.bmWidthBytes = bitmap.bmWidth * 4;
bitmap.bmPlanes = 1;
bitmap.bmBitsPixel = 32;
bitmap.bmBits = NULL;
bitmap_bitsH=(HANDLE) GlobalAlloc(GMEM_MOVEABLE | GMEM_DDESHARE,page.width*
page.height*bitmap.bmBitsPixel);
if (bitmap_bitsH == NULL)
return(NULL);
bitmap_bits=(RGBQUAD *) GlobalLock((HGLOBAL) bitmap_bitsH);
if ( bitmap.bmBits == NULL )
bitmap.bmBits = bitmap_bits;
if (IssRGBCompatibleColorspace(image->colorspace) == MagickFalse)
SetImageColorspace(image,sRGBColorspace);
/*
Extract crop image.
*/
q=bitmap_bits;
for (y=0; y < (ssize_t) page.height; y++)
{
ssize_t
x;
p=GetVirtualPixels(image,page.x,page.y+y,page.width,1,exception);
if (p == (const PixelPacket *) NULL)
break;
/* Transfer pixels, scaling to Quantum */
for( x=(ssize_t) page.width ; x> 0 ; x-- )
{
q->rgbRed = ScaleQuantumToChar(GetPixelRed(p));
q->rgbGreen = ScaleQuantumToChar(GetPixelGreen(p));
q->rgbBlue = ScaleQuantumToChar(GetPixelBlue(p));
q->rgbReserved = 0;
p++;
q++;
}
proceed=SetImageProgress(image,CropImageTag,y,page.height);
if (proceed == MagickFalse)
break;
}
if (y < (ssize_t) page.height)
{
GlobalUnlock((HGLOBAL) bitmap_bitsH);
GlobalFree((HGLOBAL) bitmap_bitsH);
return((void *) NULL);
}
bitmap.bmBits=bitmap_bits;
bitmapH=CreateBitmapIndirect(&bitmap);
GlobalUnlock((HGLOBAL) bitmap_bitsH);
GlobalFree((HGLOBAL) bitmap_bitsH);
return((void *) bitmapH);
}
#endif
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% D e l e t e I m a g e A t t r i b u t e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% DeleteImageAttribute() deletes an attribute from the image.
%
% Deprecated, replace with:
%
% DeleteImageProperty(image,key);
%
% The format of the DeleteImageAttribute method is:
%
% MagickBooleanType DeleteImageAttribute(Image *image,const char *key)
%
% A description of each parameter follows:
%
% o image: the image info.
%
% o key: the image key.
%
*/
MagickExport MagickBooleanType DeleteImageAttribute(Image *image,
const char *key)
{
return(DeleteImageProperty(image,key));
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% D e l e t e I m a g e L i s t %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% DeleteImageList() deletes an image at the specified position in the list.
%
% The format of the DeleteImageList method is:
%
% unsigned int DeleteImageList(Image *images,const ssize_t offset)
%
% A description of each parameter follows:
%
% o images: the image list.
%
% o offset: the position within the list.
%
*/
MagickExport unsigned int DeleteImageList(Image *images,const ssize_t offset)
{
ssize_t
i;
if (images->debug != MagickFalse)
(void) LogMagickEvent(DeprecateEvent,GetMagickModule(),"last use: v5.5.2");
while (GetPreviousImageInList(images) != (Image *) NULL)
images=GetPreviousImageInList(images);
for (i=0; i < offset; i++)
{
if (GetNextImageInList(images) == (Image *) NULL)
return(MagickFalse);
images=GetNextImageInList(images);
}
DeleteImageFromList(&images);
return(MagickTrue);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% D e l e t e M a g i c k R e g i s t r y %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% DeleteMagickRegistry() deletes an entry in the registry as defined by the id.
% It returns MagickTrue if the entry is deleted otherwise MagickFalse if no
% entry is found in the registry that matches the id.
%
% Deprecated, replace with:
%
% char key[MaxTextExtent];
% FormatLocaleString(key,MaxTextExtent,"%ld\n",id);
% DeleteImageRegistry(key);
%
% The format of the DeleteMagickRegistry method is:
%
% MagickBooleanType DeleteMagickRegistry(const ssize_t id)
%
% A description of each parameter follows:
%
% o id: the registry id.
%
*/
MagickExport MagickBooleanType DeleteMagickRegistry(const ssize_t id)
{
char
key[MaxTextExtent];
(void) FormatLocaleString(key,MaxTextExtent,"%.20g\n",(double) id);
return(DeleteImageRegistry(key));
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ D e s t r o y C o n s t i t u t e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% DestroyConstitute() destroys the constitute component.
%
% The format of the DestroyConstitute method is:
%
% DestroyConstitute(void)
%
*/
MagickExport void DestroyConstitute(void)
{
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ D e s t r o y M a g i c k R e g i s t r y %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% DestroyMagickRegistry() deallocates memory associated the magick registry.
%
% Deprecated, replace with:
%
% RegistryComponentTerminus();
%
% The format of the DestroyMagickRegistry method is:
%
% void DestroyMagickRegistry(void)
%
*/
MagickExport void DestroyMagickRegistry(void)
{
RegistryComponentTerminus();
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% D e s c r i b e I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% DescribeImage() describes an image by printing its attributes to the file.
% Attributes include the image width, height, size, and others.
%
% Deprecated, replace with:
%
% IdentifyImage(image,file,verbose);
%
% The format of the DescribeImage method is:
%
% MagickBooleanType DescribeImage(Image *image,FILE *file,
% const MagickBooleanType verbose)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o file: the file, typically stdout.
%
% o verbose: A value other than zero prints more detailed information
% about the image.
%
*/
MagickExport MagickBooleanType DescribeImage(Image *image,FILE *file,
const MagickBooleanType verbose)
{
return(IdentifyImage(image,file,verbose));
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% D e s t r o y I m a g e A t t r i b u t e s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% DestroyImageAttributes() deallocates memory associated with the image
% attribute list.
%
% The format of the DestroyImageAttributes method is:
%
% DestroyImageAttributes(Image *image)
%
% A description of each parameter follows:
%
% o image: the image.
%
*/
MagickExport void DestroyImageAttributes(Image *image)
{
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
if (image->attributes != (void *) NULL)
image->attributes=(void *) DestroySplayTree((SplayTreeInfo *)
image->attributes);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% D e s t r o y I m a g e s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% DestroyImages() destroys an image list.
%
% Deprecated, replace with:
%
% DestroyImageList(image);
%
% The format of the DestroyImages method is:
%
% void DestroyImages(Image *image)
%
% A description of each parameter follows:
%
% o image: the image sequence.
%
*/
MagickExport void DestroyImages(Image *image)
{
if (image == (Image *) NULL)
return;
if (image->debug != MagickFalse)
(void) LogMagickEvent(DeprecateEvent,GetMagickModule(),"last use: v5.4.3");
image=DestroyImageList(image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% D e s t r o y M a g i c k %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% DestroyMagick() destroys the ImageMagick environment.
%
% Deprecated, replace with:
%
% MagickCoreTerminus();
%
% The format of the DestroyMagick function is:
%
% DestroyMagick(void)
%
*/
MagickExport void DestroyMagick(void)
{
MagickCoreTerminus();
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% D i s p a t c h I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% DispatchImage() extracts pixel data from an image and returns it to you.
% The method returns MagickFalse on success otherwise MagickTrue if an error is
% encountered. The data is returned as char, short int, int, ssize_t, float,
% or double in the order specified by map.
%
% Suppose you want to extract the first scanline of a 640x480 image as
% character data in red-green-blue order:
%
% DispatchImage(image,0,0,640,1,"RGB",CharPixel,pixels,exception);
%
% Deprecated, replace with:
%
% ExportImagePixels(image,x_offset,y_offset,columns,rows,map,type,pixels,
% exception);
%
% The format of the DispatchImage method is:
%
% unsigned int DispatchImage(const Image *image,const ssize_t x_offset,
% const ssize_t y_offset,const size_t columns,
% const size_t rows,const char *map,const StorageType type,
% void *pixels,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o x_offset, y_offset, columns, rows: These values define the perimeter
% of a region of pixels you want to extract.
%
% o map: This string reflects the expected ordering of the pixel array.
% It can be any combination or order of R = red, G = green, B = blue,
% A = alpha, C = cyan, Y = yellow, M = magenta, K = black, or
% I = intensity (for grayscale).
%
% o type: Define the data type of the pixels. Float and double types are
% normalized to [0..1] otherwise [0..QuantumRange]. Choose from these
% types: CharPixel, ShortPixel, IntegerPixel, LongPixel, FloatPixel, or
% DoublePixel.
%
% o pixels: This array of values contain the pixel components as defined by
% map and type. You must preallocate this array where the expected
% length varies depending on the values of width, height, map, and type.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport unsigned int DispatchImage(const Image *image,const ssize_t x_offset,
const ssize_t y_offset,const size_t columns,const size_t rows,
const char *map,const StorageType type,void *pixels,ExceptionInfo *exception)
{
unsigned int
status;
if (image->debug != MagickFalse)
(void) LogMagickEvent(DeprecateEvent,GetMagickModule(),"last use: v5.5.6");
status=ExportImagePixels(image,x_offset,y_offset,columns,rows,map,type,pixels,
exception);
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% E x t r a c t S u b i m a g e F r o m I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ExtractSubimageFromImageImage() extracts a region of the image that most
% closely resembles the reference.
%
% The format of the ExtractSubimageFromImageImage method is:
%
% Image *ExtractSubimageFromImage(const Image *image,
% const Image *reference,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o reference: find an area of the image that closely resembles this image.
%
% o exception: return any errors or warnings in this structure.
%
*/
static double GetSimilarityMetric(const Image *image,const Image *reference,
const ssize_t x_offset,const ssize_t y_offset,
const double similarity_threshold,ExceptionInfo *exception)
{
CacheView
*image_view,
*reference_view;
double
channels,
normalized_similarity,
similarity;
ssize_t
y;
/*
Compute the similarity in pixels between two images.
*/
normalized_similarity=1.0;
similarity=0.0;
channels=3;
if ((image->matte != MagickFalse) && (reference->matte != MagickFalse))
channels++;
if ((image->colorspace == CMYKColorspace) &&
(reference->colorspace == CMYKColorspace))
channels++;
image_view=AcquireVirtualCacheView(image,exception);
reference_view=AcquireVirtualCacheView(reference,exception);
for (y=0; y < (ssize_t) reference->rows; y++)
{
const IndexPacket
*indexes,
*reference_indexes;
const PixelPacket
*p,
*q;
ssize_t
x;
p=GetCacheViewVirtualPixels(image_view,x_offset,y_offset+y,
reference->columns,1,exception);
q=GetCacheViewVirtualPixels(reference_view,0,y,reference->columns,1,
exception);
if ((p == (const PixelPacket *) NULL) || (q == (const PixelPacket *) NULL))
continue;
indexes=GetCacheViewVirtualIndexQueue(image_view);
reference_indexes=GetCacheViewVirtualIndexQueue(reference_view);
for (x=0; x < (ssize_t) reference->columns; x++)
{
MagickRealType
pixel;
pixel=QuantumScale*(GetPixelRed(p)-(double)
GetPixelRed(q));
similarity+=pixel*pixel;
pixel=QuantumScale*(GetPixelGreen(p)-(double)
GetPixelGreen(q));
similarity+=pixel*pixel;
pixel=QuantumScale*(GetPixelBlue(p)-(double)
GetPixelBlue(q));
similarity+=pixel*pixel;
if ((image->matte != MagickFalse) && (reference->matte != MagickFalse))
{
pixel=QuantumScale*(GetPixelOpacity(p)-(double)
GetPixelOpacity(q));
similarity+=pixel*pixel;
}
if ((image->colorspace == CMYKColorspace) &&
(reference->colorspace == CMYKColorspace))
{
pixel=QuantumScale*(GetPixelIndex(indexes+x)-(double)
GetPixelIndex(reference_indexes+x));
similarity+=pixel*pixel;
}
p++;
q++;
}
normalized_similarity=sqrt(similarity)/reference->columns/reference->rows/
channels;
if (normalized_similarity > similarity_threshold)
break;
}
reference_view=DestroyCacheView(reference_view);
image_view=DestroyCacheView(image_view);
return(normalized_similarity);
}
MagickExport Image *ExtractSubimageFromImage(Image *image,
const Image *reference,ExceptionInfo *exception)
{
double
similarity_threshold;
RectangleInfo
offset;
ssize_t
y;
/*
Extract reference from image.
*/
if ((reference->columns > image->columns) || (reference->rows > image->rows))
return((Image *) NULL);
similarity_threshold=(double) image->columns*image->rows;
SetGeometry(reference,&offset);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static)
#endif
for (y=0; y < (ssize_t) (image->rows-reference->rows); y++)
{
double
similarity;
ssize_t
x;
for (x=0; x < (ssize_t) (image->columns-reference->columns); x++)
{
similarity=GetSimilarityMetric(image,reference,x,y,similarity_threshold,
exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp critical (MagickCore_ExtractSubimageFromImage)
#endif
if (similarity < similarity_threshold)
{
similarity_threshold=similarity;
offset.x=x;
offset.y=y;
}
}
}
if (similarity_threshold > (QuantumScale*reference->fuzz/100.0))
return((Image *) NULL);
return(CropImage(image,&offset,exception));
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% F l a t t e n I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% FlattenImages() Obsolete Function: Use MergeImageLayers() instead.
%
% Deprecated, replace with:
%
% MergeImageLayers(image,FlattenLayer,exception);
%
% The format of the FlattenImage method is:
%
% Image *FlattenImage(Image *image,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image sequence.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *FlattenImages(Image *image,ExceptionInfo *exception)
{
return(MergeImageLayers(image,FlattenLayer,exception));
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% F o r m a t I m a g e A t t r i b u t e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% FormatImageAttribute() permits formatted key/value pairs to be saved as an
% image attribute.
%
% The format of the FormatImageAttribute method is:
%
% MagickBooleanType FormatImageAttribute(Image *image,const char *key,
% const char *format,...)
%
% A description of each parameter follows.
%
% o image: The image.
%
% o key: The attribute key.
%
% o format: A string describing the format to use to write the remaining
% arguments.
%
*/
MagickExport MagickBooleanType FormatImageAttributeList(Image *image,
const char *key,const char *format,va_list operands)
{
char
value[MaxTextExtent];
int
n;
#if defined(MAGICKCORE_HAVE_VSNPRINTF)
n=vsnprintf(value,MaxTextExtent,format,operands);
#else
n=vsprintf(value,format,operands);
#endif
if (n < 0)
value[MaxTextExtent-1]='\0';
return(SetImageProperty(image,key,value));
}
MagickExport MagickBooleanType FormatImagePropertyList(Image *image,
const char *property,const char *format,va_list operands)
{
char
value[MaxTextExtent];
int
n;
#if defined(MAGICKCORE_HAVE_VSNPRINTF)
n=vsnprintf(value,MaxTextExtent,format,operands);
#else
n=vsprintf(value,format,operands);
#endif
if (n < 0)
value[MaxTextExtent-1]='\0';
return(SetImageProperty(image,property,value));
}
MagickExport MagickBooleanType FormatImageAttribute(Image *image,
const char *key,const char *format,...)
{
char
value[MaxTextExtent];
int
n;
va_list
operands;
va_start(operands,format);
n=FormatLocaleStringList(value,MaxTextExtent,format,operands);
(void) n;
va_end(operands);
return(SetImageProperty(image,key,value));
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% F o r m a t M a g i c k S t r i n g %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% FormatMagickString() prints formatted output of a variable argument list.
%
% The format of the FormatMagickString method is:
%
% ssize_t FormatMagickString(char *string,const size_t length,
% const char *format,...)
%
% A description of each parameter follows.
%
% o string: FormatMagickString() returns the formatted string in this
% character buffer.
%
% o length: the maximum length of the string.
%
% o format: A string describing the format to use to write the remaining
% arguments.
%
*/
MagickExport ssize_t FormatMagickStringList(char *string,const size_t length,
const char *format,va_list operands)
{
int
n;
#if defined(MAGICKCORE_HAVE_VSNPRINTF)
n=vsnprintf(string,length,format,operands);
#else
n=vsprintf(string,format,operands);
#endif
if (n < 0)
string[length-1]='\0';
return((ssize_t) n);
}
MagickExport ssize_t FormatMagickString(char *string,const size_t length,
const char *format,...)
{
ssize_t
n;
va_list
operands;
va_start(operands,format);
n=(ssize_t) FormatMagickStringList(string,length,format,operands);
va_end(operands);
return(n);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% F o r m a t S t r i n g %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% FormatString() prints formatted output of a variable argument list.
%
% The format of the FormatString method is:
%
% void FormatString(char *string,const char *format,...)
%
% A description of each parameter follows.
%
% o string: Method FormatString returns the formatted string in this
% character buffer.
%
% o format: A string describing the format to use to write the remaining
% arguments.
%
*/
MagickExport void FormatStringList(char *string,const char *format,
va_list operands)
{
int
n;
(void) LogMagickEvent(DeprecateEvent,GetMagickModule(),"last use: v5.5.7");
#if defined(MAGICKCORE_HAVE_VSNPRINTF)
n=vsnprintf(string,MaxTextExtent,format,operands);
#else
n=vsprintf(string,format,operands);
#endif
if (n < 0)
string[MaxTextExtent-1]='\0';
}
MagickExport void FormatString(char *string,const char *format,...)
{
va_list
operands;
va_start(operands,format);
(void) FormatLocaleStringList(string,MaxTextExtent,format,operands);
va_end(operands);
return;
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ F u z z y C o l o r M a t c h %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% FuzzyColorMatch() returns true if two pixels are identical in color.
%
% The format of the ColorMatch method is:
%
% void FuzzyColorMatch(const PixelPacket *p,const PixelPacket *q,
% const double fuzz)
%
% A description of each parameter follows:
%
% o p: Pixel p.
%
% o q: Pixel q.
%
% o distance: Define how much tolerance is acceptable to consider
% two colors as the same.
%
*/
MagickExport unsigned int FuzzyColorMatch(const PixelPacket *p,
const PixelPacket *q,const double fuzz)
{
MagickPixelPacket
pixel;
MagickRealType
distance;
if ((fuzz == 0.0) && (GetPixelRed(p) == GetPixelRed(q)) &&
(GetPixelGreen(p) == GetPixelGreen(q)) &&
(GetPixelBlue(p) == GetPixelBlue(q)))
return(MagickTrue);
pixel.red=GetPixelRed(p)-(MagickRealType) GetPixelRed(q);
distance=pixel.red*pixel.red;
if (distance > (fuzz*fuzz))
return(MagickFalse);
pixel.green=GetPixelGreen(p)-(MagickRealType)
GetPixelGreen(q);
distance+=pixel.green*pixel.green;
if (distance > (fuzz*fuzz))
return(MagickFalse);
pixel.blue=GetPixelBlue(p)-(MagickRealType) GetPixelBlue(q);
distance+=pixel.blue*pixel.blue;
if (distance > (fuzz*fuzz))
return(MagickFalse);
return(MagickTrue);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ F u z z y C o l o r C o m p a r e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% FuzzyColorCompare() returns MagickTrue if the distance between two colors is
% less than the specified distance in a linear three dimensional color space.
% This method is used by ColorFloodFill() and other algorithms which
% compare two colors.
%
% The format of the FuzzyColorCompare method is:
%
% void FuzzyColorCompare(const Image *image,const PixelPacket *p,
% const PixelPacket *q)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o p: Pixel p.
%
% o q: Pixel q.
%
*/
MagickExport MagickBooleanType FuzzyColorCompare(const Image *image,
const PixelPacket *p,const PixelPacket *q)
{
(void) LogMagickEvent(DeprecateEvent,GetMagickModule(),"last use: v6.2.5");
return(IsColorSimilar(image,p,q));
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ F u z z y O p a c i t y C o m p a r e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% FuzzyOpacityCompare() returns true if the distance between two opacity
% values is less than the specified distance in a linear color space. This
% method is used by MatteFloodFill() and other algorithms which compare
% two opacity values.
%
% Deprecated, replace with:
%
% IsOpacitySimilar(image,p,q);
%
% The format of the FuzzyOpacityCompare method is:
%
% void FuzzyOpacityCompare(const Image *image,const PixelPacket *p,
% const PixelPacket *q)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o p: Pixel p.
%
% o q: Pixel q.
%
*/
MagickExport MagickBooleanType FuzzyOpacityCompare(const Image *image,
const PixelPacket *p,const PixelPacket *q)
{
(void) LogMagickEvent(DeprecateEvent,GetMagickModule(),"last use: v6.2.5");
return(IsOpacitySimilar(image,p,q));
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% G e t C o n f i g u r e B l o b %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetConfigureBlob() returns the specified configure file as a blob.
%
% The format of the GetConfigureBlob method is:
%
% void *GetConfigureBlob(const char *filename,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o filename: the configure file name.
%
% o path: return the full path information of the configure file.
%
% o length: This pointer to a size_t integer sets the initial length of the
% blob. On return, it reflects the actual length of the blob.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport void *GetConfigureBlob(const char *filename,char *path,
size_t *length,ExceptionInfo *exception)
{
void
*blob;
assert(filename != (const char *) NULL);
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",filename);
(void) LogMagickEvent(DeprecateEvent,GetMagickModule(),"last use: v5.5.7");
assert(path != (char *) NULL);
assert(length != (size_t *) NULL);
assert(exception != (ExceptionInfo *) NULL);
blob=(void *) NULL;
(void) CopyMagickString(path,filename,MaxTextExtent);
#if defined(MAGICKCORE_INSTALLED_SUPPORT)
#if defined(MAGICKCORE_LIBRARY_PATH)
if (blob == (void *) NULL)
{
/*
Search hard coded paths.
*/
(void) FormatLocaleString(path,MaxTextExtent,"%s%s",
MAGICKCORE_LIBRARY_PATH,filename);
if (IsPathAccessible(path) != MagickFalse)
blob=FileToBlob(path,~0UL,length,exception);
}
#endif
#if defined(MAGICKCORE_WINDOWS_SUPPORT) && !(defined(MAGICKCORE_CONFIGURE_PATH) || defined(MAGICKCORE_SHARE_PATH))
if (blob == (void *) NULL)
{
unsigned char
*key_value;
/*
Locate file via registry key.
*/
key_value=NTRegistryKeyLookup("ConfigurePath");
if (key_value != (unsigned char *) NULL)
{
(void) FormatLocaleString(path,MaxTextExtent,"%s%s%s",(char *)
key_value,DirectorySeparator,filename);
if (IsPathAccessible(path) != MagickFalse)
blob=FileToBlob(path,~0UL,length,exception);
}
}
#endif
#else
if (blob == (void *) NULL)
{
char
*home;
home=GetEnvironmentValue("MAGICK_HOME");
if (home != (char *) NULL)
{
/*
Search MAGICK_HOME.
*/
#if !defined(MAGICKCORE_POSIX_SUPPORT)
(void) FormatLocaleString(path,MaxTextExtent,"%s%s%s",home,
DirectorySeparator,filename);
#else
(void) FormatLocaleString(path,MaxTextExtent,"%s/lib/%s/%s",home,
MAGICKCORE_LIBRARY_RELATIVE_PATH,filename);
#endif
if (IsPathAccessible(path) != MagickFalse)
blob=FileToBlob(path,~0UL,length,exception);
home=DestroyString(home);
}
home=GetEnvironmentValue("HOME");
if (home == (char *) NULL)
home=GetEnvironmentValue("USERPROFILE");
if (home != (char *) NULL)
{
/*
Search $HOME/.magick.
*/
(void) FormatLocaleString(path,MaxTextExtent,"%s%s.magick%s%s",home,
DirectorySeparator,DirectorySeparator,filename);
if ((IsPathAccessible(path) != MagickFalse) && (blob == (void *) NULL))
blob=FileToBlob(path,~0UL,length,exception);
home=DestroyString(home);
}
}
if ((blob == (void *) NULL) && (*GetClientPath() != '\0'))
{
#if !defined(MAGICKCORE_POSIX_SUPPORT)
(void) FormatLocaleString(path,MaxTextExtent,"%s%s%s",GetClientPath(),
DirectorySeparator,filename);
#else
char
prefix[MaxTextExtent];
/*
Search based on executable directory if directory is known.
*/
(void) CopyMagickString(prefix,GetClientPath(),
MaxTextExtent);
ChopPathComponents(prefix,1);
(void) FormatLocaleString(path,MaxTextExtent,"%s/lib/%s/%s",prefix,
MAGICKCORE_LIBRARY_RELATIVE_PATH,filename);
#endif
if (IsPathAccessible(path) != MagickFalse)
blob=FileToBlob(path,~0UL,length,exception);
}
/*
Search current directory.
*/
if ((blob == (void *) NULL) && (IsPathAccessible(path) != MagickFalse))
blob=FileToBlob(path,~0UL,length,exception);
#if defined(MAGICKCORE_WINDOWS_SUPPORT)
/*
Search Windows registry.
*/
if (blob == (void *) NULL)
blob=NTResourceToBlob(filename);
#endif
#endif
if (blob == (void *) NULL)
(void) ThrowMagickException(exception,GetMagickModule(),ConfigureWarning,
"UnableToOpenConfigureFile","`%s'",path);
return(blob);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% G e t C a c h e V i e w %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetCacheView() gets pixels from the in-memory or disk pixel cache as
% defined by the geometry parameters. A pointer to the pixels is returned if
% the pixels are transferred, otherwise a NULL is returned.
%
% Deprecated, replace with:
%
% GetCacheViewAuthenticPixels(cache_view,x,y,columns,rows,
% GetCacheViewException(cache_view));
%
% The format of the GetCacheView method is:
%
% PixelPacket *GetCacheView(CacheView *cache_view,const ssize_t x,
% const ssize_t y,const size_t columns,const size_t rows)
%
% A description of each parameter follows:
%
% o cache_view: the address of a structure of type CacheView.
%
% o x,y,columns,rows: These values define the perimeter of a region of
% pixels.
%
*/
MagickExport PixelPacket *GetCacheView(CacheView *cache_view,const ssize_t x,
const ssize_t y,const size_t columns,const size_t rows)
{
PixelPacket
*pixels;
pixels=GetCacheViewAuthenticPixels(cache_view,x,y,columns,rows,
GetCacheViewException(cache_view));
return(pixels);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% G e t C a c h e V i e w I n d e x e s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetCacheViewIndexes() returns the indexes associated with the specified
% view.
%
% Deprecated, replace with:
%
% GetCacheViewAuthenticIndexQueue(cache_view);
%
% The format of the GetCacheViewIndexes method is:
%
% IndexPacket *GetCacheViewIndexes(CacheView *cache_view)
%
% A description of each parameter follows:
%
% o cache_view: the cache view.
%
*/
MagickExport IndexPacket *GetCacheViewIndexes(CacheView *cache_view)
{
return(GetCacheViewAuthenticIndexQueue(cache_view));
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% G e t C a c h e V i e w P i x e l s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetCacheViewPixels() gets pixels from the in-memory or disk pixel cache as
% defined by the geometry parameters. A pointer to the pixels is returned if
% the pixels are transferred, otherwise a NULL is returned.
%
% Deprecated, replace with:
%
% GetCacheViewAuthenticPixels(cache_view,x,y,columns,rows,
% GetCacheViewException(cache_view));
%
% The format of the GetCacheViewPixels method is:
%
% PixelPacket *GetCacheViewPixels(CacheView *cache_view,const ssize_t x,
% const ssize_t y,const size_t columns,const size_t rows)
%
% A description of each parameter follows:
%
% o cache_view: the cache view.
%
% o x,y,columns,rows: These values define the perimeter of a region of
% pixels.
%
*/
MagickExport PixelPacket *GetCacheViewPixels(CacheView *cache_view,const ssize_t x,
const ssize_t y,const size_t columns,const size_t rows)
{
PixelPacket
*pixels;
pixels=GetCacheViewAuthenticPixels(cache_view,x,y,columns,rows,
GetCacheViewException(cache_view));
return(pixels);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% G e t E x c e p t i o n I n f o %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetExceptionInfo() initializes an exception to default values.
%
% The format of the GetExceptionInfo method is:
%
% GetExceptionInfo(ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o exception: the exception info.
%
*/
MagickExport void GetExceptionInfo(ExceptionInfo *exception)
{
assert(exception != (ExceptionInfo *) NULL);
(void) memset(exception,0,sizeof(*exception));
exception->severity=UndefinedException;
exception->exceptions=(void *) NewLinkedList(0);
exception->semaphore=AllocateSemaphoreInfo();
exception->signature=MagickCoreSignature;
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% G e t I m a g e A t t r i b u t e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetImageAttribute() searches the list of image attributes and returns
% a pointer to the attribute if it exists otherwise NULL.
%
% The format of the GetImageAttribute method is:
%
% const ImageAttribute *GetImageAttribute(const Image *image,
% const char *key)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o key: These character strings are the name of an image attribute to
% return.
%
*/
static void *DestroyAttribute(void *attribute)
{
ImageAttribute
*p;
p=(ImageAttribute *) attribute;
if (p->value != (char *) NULL)
p->value=DestroyString(p->value);
return(RelinquishMagickMemory(p));
}
MagickExport const ImageAttribute *GetImageAttribute(const Image *image,
const char *key)
{
const char
*value;
ImageAttribute
*attribute;
(void) LogMagickEvent(DeprecateEvent,GetMagickModule(),"last use: v6.3.1");
value=GetImageProperty(image,key);
if (value == (const char *) NULL)
return((const ImageAttribute *) NULL);
if (image->attributes == (void *) NULL)
((Image *) image)->attributes=NewSplayTree(CompareSplayTreeString,
RelinquishMagickMemory,DestroyAttribute);
else
{
const ImageAttribute
*attribute;
attribute=(const ImageAttribute *) GetValueFromSplayTree((SplayTreeInfo *)
image->attributes,key);
if (attribute != (const ImageAttribute *) NULL)
return(attribute);
}
attribute=(ImageAttribute *) AcquireMagickMemory(sizeof(*attribute));
if (attribute == (ImageAttribute *) NULL)
ThrowFatalException(ResourceLimitFatalError,"MemoryAllocationFailed");
(void) memset(attribute,0,sizeof(*attribute));
attribute->key=ConstantString(key);
attribute->value=ConstantString(value);
(void) AddValueToSplayTree((SplayTreeInfo *) ((Image *) image)->attributes,
attribute->key,attribute);
return((const ImageAttribute *) attribute);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% G e t I m a g e C l i p p i n g P a t h A t t r i b u t e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetImageClippingPathAttribute() searches the list of image attributes and
% returns a pointer to a clipping path if it exists otherwise NULL.
%
% Deprecated, replace with:
%
% GetImageAttribute(image,"8BIM:1999,2998");
%
% The format of the GetImageClippingPathAttribute method is:
%
% const ImageAttribute *GetImageClippingPathAttribute(Image *image)
%
% A description of each parameter follows:
%
% o attribute: Method GetImageClippingPathAttribute returns the clipping
% path if it exists otherwise NULL.
%
% o image: the image.
%
*/
MagickExport const ImageAttribute *GetImageClippingPathAttribute(Image *image)
{
return(GetImageAttribute(image,"8BIM:1999,2998"));
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% G e t I m a g e F r o m M a g i c k R e g i s t r y %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetImageFromMagickRegistry() gets an image from the registry as defined by
% its name. If the image is not found, a NULL image is returned.
%
% Deprecated, replace with:
%
% GetImageRegistry(ImageRegistryType,name,exception);
%
% The format of the GetImageFromMagickRegistry method is:
%
% Image *GetImageFromMagickRegistry(const char *name,ssize_t *id,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o name: the name of the image to retrieve from the registry.
%
% o id: the registry id.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *GetImageFromMagickRegistry(const char *name,ssize_t *id,
ExceptionInfo *exception)
{
*id=0L;
return((Image *) GetImageRegistry(ImageRegistryType,name,exception));
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% G e t M a g i c k R e g i s t r y %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetMagickRegistry() gets a blob from the registry as defined by the id. If
% the blob that matches the id is not found, NULL is returned.
%
% The format of the GetMagickRegistry method is:
%
% const void *GetMagickRegistry(const ssize_t id,RegistryType *type,
% size_t *length,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o id: the registry id.
%
% o type: the registry type.
%
% o length: the blob length in number of bytes.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport void *GetMagickRegistry(const ssize_t id,RegistryType *type,
size_t *length,ExceptionInfo *exception)
{
char
key[MaxTextExtent];
void
*blob;
*type=UndefinedRegistryType;
*length=0;
(void) FormatLocaleString(key,MaxTextExtent,"%.20g\n",(double) id);
blob=(void *) GetImageRegistry(ImageRegistryType,key,exception);
if (blob != (void *) NULL)
return(blob);
blob=(void *) GetImageRegistry(ImageInfoRegistryType,key,exception);
if (blob != (void *) NULL)
return(blob);
return((void *) GetImageRegistry(UndefinedRegistryType,key,exception));
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ G e t M a g i c k T o k e n %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetMagickToken() gets a token from the token stream. A token is defined as
% a sequence of characters delimited by whitespace (e.g. clip-path), a
% sequence delimited with quotes (.e.g "Quote me"), or a sequence enclosed in
% parenthesis (e.g. rgb(0,0,0)). GetMagickToken() also recognizes these
% separator characters: ':', '=', ',', and ';'.
%
% The format of the GetMagickToken method is:
%
% void GetMagickToken(const char *start,const char **end,char *token)
%
% A description of each parameter follows:
%
% o start: the start of the token sequence.
%
% o end: point to the end of the token sequence.
%
% o token: copy the token to this buffer.
%
*/
MagickExport void GetMagickToken(const char *start,const char **end,char *token)
{
(void) GetNextToken(start,end,~0UL,token);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% G e t I m a g e G e o m e t r y %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetImageGeometry() returns a region as defined by the geometry string with
% respect to the image and its gravity.
%
% Deprecated, replace with:
%
% if (size_to_fit != MagickFalse)
% ParseRegionGeometry(image,geometry,region_info,&image->exception); else
% ParsePageGeometry(image,geometry,region_info,&image->exception);
%
% The format of the GetImageGeometry method is:
%
% int GetImageGeometry(Image *image,const char *geometry,
% const unsigned int size_to_fit,RectangeInfo *region_info)
%
% A description of each parameter follows:
%
% o flags: Method GetImageGeometry returns a bitmask that indicates
% which of the four values were located in the geometry string.
%
% o geometry: The geometry (e.g. 100x100+10+10).
%
% o size_to_fit: A value other than 0 means to scale the region so it
% fits within the specified width and height.
%
% o region_info: the region as defined by the geometry string with
% respect to the image and its gravity.
%
*/
MagickExport int GetImageGeometry(Image *image,const char *geometry,
const unsigned int size_to_fit,RectangleInfo *region_info)
{
if (image->debug != MagickFalse)
(void) LogMagickEvent(DeprecateEvent,GetMagickModule(),"last use: v5.5.4");
if (size_to_fit != MagickFalse)
return((int) ParseRegionGeometry(image,geometry,region_info,&image->exception));
return((int) ParsePageGeometry(image,geometry,region_info,&image->exception));
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% G e t I m a g e L i s t %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetImageList() returns an image at the specified position in the list.
%
% Deprecated, replace with:
%
% CloneImage(GetImageFromList(images,(ssize_t) offset),0,0,MagickTrue,
% exception);
%
% The format of the GetImageList method is:
%
% Image *GetImageList(const Image *images,const ssize_t offset,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o images: the image list.
%
% o offset: the position within the list.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *GetImageList(const Image *images,const ssize_t offset,
ExceptionInfo *exception)
{
Image
*image;
if (images->debug != MagickFalse)
(void) LogMagickEvent(DeprecateEvent,GetMagickModule(),"last use: v5.5.2");
image=CloneImage(GetImageFromList(images,(ssize_t) offset),0,0,MagickTrue,
exception);
return(image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% G e t I m a g e L i s t I n d e x %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetImageListIndex() returns the position in the list of the specified
% image.
%
% Deprecated, replace with:
%
% GetImageIndexInList(images);
%
% The format of the GetImageListIndex method is:
%
% ssize_t GetImageListIndex(const Image *images)
%
% A description of each parameter follows:
%
% o images: the image list.
%
*/
MagickExport ssize_t GetImageListIndex(const Image *images)
{
if (images->debug != MagickFalse)
(void) LogMagickEvent(DeprecateEvent,GetMagickModule(),"last use: v5.5.2");
return(GetImageIndexInList(images));
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% G e t I m a g e L i s t S i z e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetImageListSize() returns the number of images in the list.
%
% Deprecated, replace with:
%
% GetImageListLength(images);
%
% The format of the GetImageListSize method is:
%
% size_t GetImageListSize(const Image *images)
%
% A description of each parameter follows:
%
% o images: the image list.
%
*/
MagickExport size_t GetImageListSize(const Image *images)
{
if (images->debug != MagickFalse)
(void) LogMagickEvent(DeprecateEvent,GetMagickModule(),"last use: v5.5.2");
return(GetImageListLength(images));
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% G e t I m a g e P i x e l s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetImagePixels() obtains a pixel region for read/write access. If the
% region is successfully accessed, a pointer to a PixelPacket array
% representing the region is returned, otherwise NULL is returned.
%
% The returned pointer may point to a temporary working copy of the pixels
% or it may point to the original pixels in memory. Performance is maximized
% if the selected region is part of one row, or one or more full rows, since
% then there is opportunity to access the pixels in-place (without a copy)
% if the image is in RAM, or in a memory-mapped file. The returned pointer
% should *never* be deallocated by the user.
%
% Pixels accessed via the returned pointer represent a simple array of type
% PixelPacket. If the image type is CMYK or if the storage class is
% PseduoClass, call GetAuthenticIndexQueue() after invoking GetImagePixels()
% to obtain the black color component or colormap indexes (of type IndexPacket)
% corresponding to the region. Once the PixelPacket (and/or IndexPacket)
% array has been updated, the changes must be saved back to the underlying
% image using SyncAuthenticPixels() or they may be lost.
%
% Deprecated, replace with:
%
% GetAuthenticPixels(image,x,y,columns,rows,&image->exception);
%
% The format of the GetImagePixels() method is:
%
% PixelPacket *GetImagePixels(Image *image,const ssize_t x,const ssize_t y,
% const size_t columns,const size_t rows)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o x,y,columns,rows: These values define the perimeter of a region of
% pixels.
%
*/
MagickExport PixelPacket *GetImagePixels(Image *image,const ssize_t x,const ssize_t y,
const size_t columns,const size_t rows)
{
return(GetAuthenticPixels(image,x,y,columns,rows,&image->exception));
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% G e t I n d e x e s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetIndexes() returns the black channel or the colormap indexes associated
% with the last call to QueueAuthenticPixels() or GetVirtualPixels(). NULL is
% returned if the black channel or colormap indexes are not available.
%
% Deprecated, replace with:
%
% GetAuthenticIndexQueue(image);
%
% The format of the GetIndexes() method is:
%
% IndexPacket *GetIndexes(const Image *image)
%
% A description of each parameter follows:
%
% o indexes: GetIndexes() returns the indexes associated with the last
% call to QueueAuthenticPixels() or GetAuthenticPixels().
%
% o image: the image.
%
*/
MagickExport IndexPacket *GetIndexes(const Image *image)
{
return(GetAuthenticIndexQueue(image));
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ G e t M a g i c k G e o m e t r y %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetMagickGeometry() is similar to GetGeometry() except the returned
% geometry is modified as determined by the meta characters: %, !, <, >,
% and ~.
%
% Deprecated, replace with:
%
% ParseMetaGeometry(geometry,x,y,width,height);
%
% The format of the GetMagickGeometry method is:
%
% unsigned int GetMagickGeometry(const char *geometry,ssize_t *x,ssize_t *y,
% size_t *width,size_t *height)
%
% A description of each parameter follows:
%
% o geometry: Specifies a character string representing the geometry
% specification.
%
% o x,y: A pointer to an integer. The x and y offset as determined by
% the geometry specification is returned here.
%
% o width,height: A pointer to an unsigned integer. The width and height
% as determined by the geometry specification is returned here.
%
*/
MagickExport unsigned int GetMagickGeometry(const char *geometry,ssize_t *x,
ssize_t *y,size_t *width,size_t *height)
{
(void) LogMagickEvent(DeprecateEvent,GetMagickModule(),"last use: v5.5.3");
return(ParseMetaGeometry(geometry,x,y,width,height));
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% G e t N e x t I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetNextImage() returns the next image in a list.
%
% Deprecated, replace with:
%
% GetNextImageInList(images);
%
% The format of the GetNextImage method is:
%
% Image *GetNextImage(const Image *images)
%
% A description of each parameter follows:
%
% o images: the image list.
%
*/
MagickExport Image *GetNextImage(const Image *images)
{
if (images->debug != MagickFalse)
(void) LogMagickEvent(DeprecateEvent,GetMagickModule(),"last use: v5.5.2");
return(GetNextImageInList(images));
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% G e t N e x t I m a g e A t t r i b u t e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetNextImageAttribute() gets the next image attribute.
%
% Deprecated, replace with:
%
% const char *property;
% property=GetNextImageProperty(image);
% if (property != (const char *) NULL)
% GetImageAttribute(image,property);
%
% The format of the GetNextImageAttribute method is:
%
% const ImageAttribute *GetNextImageAttribute(const Image *image)
%
% A description of each parameter follows:
%
% o image: the image.
%
*/
MagickExport const ImageAttribute *GetNextImageAttribute(const Image *image)
{
const char
*property;
property=GetNextImageProperty(image);
if (property == (const char *) NULL)
return((const ImageAttribute *) NULL);
return(GetImageAttribute(image,property));
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% G e t N u m b e r S c e n e s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetNumberScenes() returns the number of images in the list.
%
% Deprecated, replace with:
%
% GetImageListLength(image);
%
% The format of the GetNumberScenes method is:
%
% unsigned int GetNumberScenes(const Image *images)
%
% A description of each parameter follows:
%
% o images: the image list.
%
*/
MagickExport unsigned int GetNumberScenes(const Image *image)
{
if (image->debug != MagickFalse)
(void) LogMagickEvent(DeprecateEvent,GetMagickModule(),"last use: v5.5.2");
return((unsigned int) GetImageListLength(image));
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% G e t O n e P i x e l %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetOnePixel() returns a single pixel at the specified (x,y) location.
% The image background color is returned if an error occurs.
%
% Deprecated, replace with:
%
% GetOneAuthenticPixel(image,x,y,&pixel,&image->exception);
%
% The format of the GetOnePixel() method is:
%
% PixelPacket GetOnePixel(const Image image,const ssize_t x,const ssize_t y)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o x,y: These values define the location of the pixel to return.
%
*/
MagickExport PixelPacket GetOnePixel(Image *image,const ssize_t x,const ssize_t y)
{
PixelPacket
pixel;
(void) GetOneAuthenticPixel(image,x,y,&pixel,&image->exception);
return(pixel);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% G e t P i x e l s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetPixels() returns the pixels associated with the last call to
% QueueAuthenticPixels() or GetAuthenticPixels().
%
% Deprecated, replace with:
%
% GetAuthenticPixelQueue(image);
%
% The format of the GetPixels() method is:
%
% PixelPacket *GetPixels(const Image image)
%
% A description of each parameter follows:
%
% o pixels: GetPixels() returns the pixels associated with the last call
% to QueueAuthenticPixels() or GetAuthenticPixels().
%
% o image: the image.
%
*/
MagickExport PixelPacket *GetPixels(const Image *image)
{
return(GetAuthenticPixelQueue(image));
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% G e t P r e v i o u s I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetPreviousImage() returns the previous image in a list.
%
% Deprecated, replace with:
%
% GetPreviousImageInList(images));
%
% The format of the GetPreviousImage method is:
%
% Image *GetPreviousImage(const Image *images)
%
% A description of each parameter follows:
%
% o images: the image list.
%
*/
MagickExport Image *GetPreviousImage(const Image *images)
{
if (images->debug != MagickFalse)
(void) LogMagickEvent(DeprecateEvent,GetMagickModule(),"last use: v5.5.2");
return(GetPreviousImageInList(images));
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% H S L T r a n s f o r m %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% HSLTransform() converts a (hue, saturation, lightness) to a (red, green,
% blue) triple.
%
% The format of the HSLTransformImage method is:
%
% void HSLTransform(const double hue,const double saturation,
% const double lightness,Quantum *red,Quantum *green,Quantum *blue)
%
% A description of each parameter follows:
%
% o hue, saturation, lightness: A double value representing a
% component of the HSL color space.
%
% o red, green, blue: A pointer to a pixel component of type Quantum.
%
*/
static inline MagickRealType HueToRGB(MagickRealType m1,MagickRealType m2,
MagickRealType hue)
{
if (hue < 0.0)
hue+=1.0;
if (hue > 1.0)
hue-=1.0;
if ((6.0*hue) < 1.0)
return(m1+6.0*(m2-m1)*hue);
if ((2.0*hue) < 1.0)
return(m2);
if ((3.0*hue) < 2.0)
return(m1+6.0*(m2-m1)*(2.0/3.0-hue));
return(m1);
}
MagickExport void HSLTransform(const double hue,const double saturation,
const double lightness,Quantum *red,Quantum *green,Quantum *blue)
{
MagickRealType
b,
g,
r,
m1,
m2;
/*
Convert HSL to RGB colorspace.
*/
assert(red != (Quantum *) NULL);
assert(green != (Quantum *) NULL);
assert(blue != (Quantum *) NULL);
if (lightness <= 0.5)
m2=lightness*(saturation+1.0);
else
m2=lightness+saturation-lightness*saturation;
m1=2.0*lightness-m2;
r=HueToRGB(m1,m2,hue+1.0/3.0);
g=HueToRGB(m1,m2,hue);
b=HueToRGB(m1,m2,hue-1.0/3.0);
*red=ClampToQuantum((MagickRealType) QuantumRange*r);
*green=ClampToQuantum((MagickRealType) QuantumRange*g);
*blue=ClampToQuantum((MagickRealType) QuantumRange*b);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% I d e n t i t y A f f i n e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% IdentityAffine() initializes the affine transform to the identity matrix.
%
% The format of the IdentityAffine method is:
%
% IdentityAffine(AffineMatrix *affine)
%
% A description of each parameter follows:
%
% o affine: A pointer the affine transform of type AffineMatrix.
%
*/
MagickExport void IdentityAffine(AffineMatrix *affine)
{
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"...");
(void) LogMagickEvent(DeprecateEvent,GetMagickModule(),"last use: v5.5.7");
assert(affine != (AffineMatrix *) NULL);
(void) memset(affine,0,sizeof(AffineMatrix));
affine->sx=1.0;
affine->sy=1.0;
}
#if defined(MAGICKCORE_WINGDI32_DELEGATE)
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% I m a g e T o H B i t m a p %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ImageToHBITMAP() creates a Windows HBITMAP from an image.
%
% The format of the ImageToHBITMAP method is:
%
% HBITMAP ImageToHBITMAP(Image *image,Exceptioninfo *exception)
%
% A description of each parameter follows:
%
% o image: the image to convert.
%
*/
MagickExport void *ImageToHBITMAP(Image *image,ExceptionInfo *exception)
{
BITMAP
bitmap;
HANDLE
bitmap_bitsH;
HBITMAP
bitmapH;
ssize_t
x;
const PixelPacket
*p;
RGBQUAD
*q;
RGBQUAD
*bitmap_bits;
size_t
length;
ssize_t
y;
(void) memset(&bitmap,0,sizeof(bitmap));
bitmap.bmType=0;
bitmap.bmWidth=(LONG) image->columns;
bitmap.bmHeight=(LONG) image->rows;
bitmap.bmWidthBytes=4*bitmap.bmWidth;
bitmap.bmPlanes=1;
bitmap.bmBitsPixel=32;
bitmap.bmBits=NULL;
length=bitmap.bmWidthBytes*bitmap.bmHeight;
bitmap_bitsH=(HANDLE) GlobalAlloc(GMEM_MOVEABLE | GMEM_DDESHARE,length);
if (bitmap_bitsH == NULL)
{
char
*message;
message=GetExceptionMessage(errno);
(void) ThrowMagickException(exception,GetMagickModule(),
ResourceLimitError,"MemoryAllocationFailed","`%s'",message);
message=DestroyString(message);
return(NULL);
}
bitmap_bits=(RGBQUAD *) GlobalLock((HGLOBAL) bitmap_bitsH);
q=bitmap_bits;
if (bitmap.bmBits == NULL)
bitmap.bmBits=bitmap_bits;
(void) SetImageColorspace(image,sRGBColorspace);
for (y=0; y < (ssize_t) image->rows; y++)
{
p=GetVirtualPixels(image,0,y,image->columns,1,exception);
if (p == (const PixelPacket *) NULL)
break;
for (x=0; x < (ssize_t) image->columns; x++)
{
q->rgbRed=ScaleQuantumToChar(GetPixelRed(p));
q->rgbGreen=ScaleQuantumToChar(GetPixelGreen(p));
q->rgbBlue=ScaleQuantumToChar(GetPixelBlue(p));
q->rgbReserved=0;
p++;
q++;
}
}
bitmap.bmBits=bitmap_bits;
bitmapH=CreateBitmapIndirect(&bitmap);
if (bitmapH == NULL)
{
char
*message;
message=GetExceptionMessage(errno);
(void) ThrowMagickException(exception,GetMagickModule(),
ResourceLimitError,"MemoryAllocationFailed","`%s'",message);
message=DestroyString(message);
}
GlobalUnlock((HGLOBAL) bitmap_bitsH);
GlobalFree((HGLOBAL) bitmap_bitsH);
return((void *) bitmapH);
}
#endif
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% I n i t i a l i z e M a g i c k %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% InitializeMagick() initializes the ImageMagick environment.
%
% Deprecated, replace with:
%
% MagickCoreGenesis(path,MagickFalse);
%
% The format of the InitializeMagick function is:
%
% InitializeMagick(const char *path)
%
% A description of each parameter follows:
%
% o path: the execution path of the current ImageMagick client.
%
*/
MagickExport void InitializeMagick(const char *path)
{
MagickCoreGenesis(path,MagickFalse);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% I n t e r p o l a t e P i x e l C o l o r %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% InterpolatePixelColor() applies bi-linear or tri-linear interpolation
% between a pixel and it's neighbors.
%
% The format of the InterpolatePixelColor method is:
%
% MagickPixelPacket InterpolatePixelColor(const Image *image,
% CacheView *view_info,InterpolatePixelMethod method,const double x,
% const double y,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o image_view: the image cache view.
%
% o type: the type of pixel color interpolation.
%
% o x,y: A double representing the current (x,y) position of the pixel.
%
% o exception: return any errors or warnings in this structure.
%
*/
static void BicubicInterpolate(const MagickPixelPacket *pixels,const double dx,
MagickPixelPacket *pixel)
{
MagickRealType
dx2,
p,
q,
r,
s;
dx2=dx*dx;
p=(pixels[3].red-pixels[2].red)-(pixels[0].red-pixels[1].red);
q=(pixels[0].red-pixels[1].red)-p;
r=pixels[2].red-pixels[0].red;
s=pixels[1].red;
pixel->red=(dx*dx2*p)+(dx2*q)+(dx*r)+s;
p=(pixels[3].green-pixels[2].green)-(pixels[0].green-pixels[1].green);
q=(pixels[0].green-pixels[1].green)-p;
r=pixels[2].green-pixels[0].green;
s=pixels[1].green;
pixel->green=(dx*dx2*p)+(dx2*q)+(dx*r)+s;
p=(pixels[3].blue-pixels[2].blue)-(pixels[0].blue-pixels[1].blue);
q=(pixels[0].blue-pixels[1].blue)-p;
r=pixels[2].blue-pixels[0].blue;
s=pixels[1].blue;
pixel->blue=(dx*dx2*p)+(dx2*q)+(dx*r)+s;
p=(pixels[3].opacity-pixels[2].opacity)-(pixels[0].opacity-pixels[1].opacity);
q=(pixels[0].opacity-pixels[1].opacity)-p;
r=pixels[2].opacity-pixels[0].opacity;
s=pixels[1].opacity;
pixel->opacity=(dx*dx2*p)+(dx2*q)+(dx*r)+s;
if (pixel->colorspace == CMYKColorspace)
{
p=(pixels[3].index-pixels[2].index)-(pixels[0].index-pixels[1].index);
q=(pixels[0].index-pixels[1].index)-p;
r=pixels[2].index-pixels[0].index;
s=pixels[1].index;
pixel->index=(dx*dx2*p)+(dx2*q)+(dx*r)+s;
}
}
static inline MagickRealType CubicWeightingFunction(const MagickRealType x)
{
MagickRealType
alpha,
gamma;
alpha=MagickMax(x+2.0,0.0);
gamma=1.0*alpha*alpha*alpha;
alpha=MagickMax(x+1.0,0.0);
gamma-=4.0*alpha*alpha*alpha;
alpha=MagickMax(x+0.0,0.0);
gamma+=6.0*alpha*alpha*alpha;
alpha=MagickMax(x-1.0,0.0);
gamma-=4.0*alpha*alpha*alpha;
return(gamma/6.0);
}
static inline double MeshInterpolate(const PointInfo *delta,const double p,
const double x,const double y)
{
return(delta->x*x+delta->y*y+(1.0-delta->x-delta->y)*p);
}
static inline ssize_t NearestNeighbor(MagickRealType x)
{
if (x >= 0.0)
return((ssize_t) (x+0.5));
return((ssize_t) (x-0.5));
}
MagickExport MagickPixelPacket InterpolatePixelColor(const Image *image,
CacheView *image_view,const InterpolatePixelMethod method,const double x,
const double y,ExceptionInfo *exception)
{
MagickPixelPacket
pixel;
const IndexPacket
*indexes;
const PixelPacket
*p;
ssize_t
i;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
assert(image_view != (CacheView *) NULL);
GetMagickPixelPacket(image,&pixel);
switch (method)
{
case AverageInterpolatePixel:
{
double
gamma;
MagickPixelPacket
pixels[16];
MagickRealType
alpha[16];
p=GetCacheViewVirtualPixels(image_view,(ssize_t) floor(x)-1,(ssize_t)
floor(y)-1,4,4,exception);
if (p == (const PixelPacket *) NULL)
break;
indexes=GetCacheViewVirtualIndexQueue(image_view);
for (i=0; i < 16L; i++)
{
GetMagickPixelPacket(image,pixels+i);
SetMagickPixelPacket(image,p,indexes+i,pixels+i);
alpha[i]=1.0;
if (image->matte != MagickFalse)
{
alpha[i]=QuantumScale*((MagickRealType) GetPixelAlpha(p));
pixels[i].red*=alpha[i];
pixels[i].green*=alpha[i];
pixels[i].blue*=alpha[i];
if (image->colorspace == CMYKColorspace)
pixels[i].index*=alpha[i];
}
gamma=alpha[i];
gamma=PerceptibleReciprocal(gamma);
pixel.red+=gamma*0.0625*pixels[i].red;
pixel.green+=gamma*0.0625*pixels[i].green;
pixel.blue+=gamma*0.0625*pixels[i].blue;
pixel.opacity+=0.0625*pixels[i].opacity;
if (image->colorspace == CMYKColorspace)
pixel.index+=gamma*0.0625*pixels[i].index;
p++;
}
break;
}
case BicubicInterpolatePixel:
{
MagickPixelPacket
pixels[16],
u[4];
MagickRealType
alpha[16];
PointInfo
delta;
p=GetCacheViewVirtualPixels(image_view,(ssize_t) floor(x)-1,(ssize_t)
floor(y)-1,4,4,exception);
if (p == (const PixelPacket *) NULL)
break;
indexes=GetCacheViewVirtualIndexQueue(image_view);
for (i=0; i < 4L; i++)
GetMagickPixelPacket(image,u+i);
for (i=0; i < 16L; i++)
{
GetMagickPixelPacket(image,pixels+i);
SetMagickPixelPacket(image,p,indexes+i,pixels+i);
alpha[i]=1.0;
if (image->matte != MagickFalse)
{
alpha[i]=QuantumScale*((MagickRealType) GetPixelAlpha(p));
pixels[i].red*=alpha[i];
pixels[i].green*=alpha[i];
pixels[i].blue*=alpha[i];
if (image->colorspace == CMYKColorspace)
pixels[i].index*=alpha[i];
}
p++;
}
delta.x=x-floor(x);
for (i=0; i < 4L; i++)
{
GetMagickPixelPacket(image,pixels+4*i);
BicubicInterpolate(pixels+4*i,delta.x,u+i);
}
delta.y=y-floor(y);
BicubicInterpolate(u,delta.y,&pixel);
break;
}
case BilinearInterpolatePixel:
default:
{
double
gamma;
MagickPixelPacket
pixels[16];
MagickRealType
alpha[16];
PointInfo
delta;
p=GetCacheViewVirtualPixels(image_view,(ssize_t) floor(x),(ssize_t)
floor(y),2,2,exception);
if (p == (const PixelPacket *) NULL)
break;
indexes=GetCacheViewVirtualIndexQueue(image_view);
for (i=0; i < 4L; i++)
{
GetMagickPixelPacket(image,pixels+i);
SetMagickPixelPacket(image,p,indexes+i,pixels+i);
alpha[i]=1.0;
if (image->matte != MagickFalse)
{
alpha[i]=QuantumScale*((MagickRealType) GetPixelAlpha(p));
pixels[i].red*=alpha[i];
pixels[i].green*=alpha[i];
pixels[i].blue*=alpha[i];
if (image->colorspace == CMYKColorspace)
pixels[i].index*=alpha[i];
}
p++;
}
delta.x=x-floor(x);
delta.y=y-floor(y);
gamma=(((1.0-delta.y)*((1.0-delta.x)*alpha[0]+delta.x*alpha[1])+delta.y*
((1.0-delta.x)*alpha[2]+delta.x*alpha[3])));
gamma=PerceptibleReciprocal(gamma);
pixel.red=gamma*((1.0-delta.y)*((1.0-delta.x)*pixels[0].red+delta.x*
pixels[1].red)+delta.y*((1.0-delta.x)*pixels[2].red+delta.x*
pixels[3].red));
pixel.green=gamma*((1.0-delta.y)*((1.0-delta.x)*pixels[0].green+delta.x*
pixels[1].green)+delta.y*((1.0-delta.x)*pixels[2].green+
delta.x*pixels[3].green));
pixel.blue=gamma*((1.0-delta.y)*((1.0-delta.x)*pixels[0].blue+delta.x*
pixels[1].blue)+delta.y*((1.0-delta.x)*pixels[2].blue+delta.x*
pixels[3].blue));
pixel.opacity=((1.0-delta.y)*((1.0-delta.x)*pixels[0].opacity+delta.x*
pixels[1].opacity)+delta.y*((1.0-delta.x)*pixels[2].opacity+delta.x*
pixels[3].opacity));
if (image->colorspace == CMYKColorspace)
pixel.index=gamma*((1.0-delta.y)*((1.0-delta.x)*pixels[0].index+delta.x*
pixels[1].index)+delta.y*((1.0-delta.x)*pixels[2].index+delta.x*
pixels[3].index));
break;
}
case FilterInterpolatePixel:
{
Image
*excerpt_image,
*filter_image;
MagickPixelPacket
pixels[1];
RectangleInfo
geometry;
geometry.width=4L;
geometry.height=4L;
geometry.x=(ssize_t) floor(x)-1L;
geometry.y=(ssize_t) floor(y)-1L;
excerpt_image=ExcerptImage(image,&geometry,exception);
if (excerpt_image == (Image *) NULL)
break;
filter_image=ResizeImage(excerpt_image,1,1,image->filter,image->blur,
exception);
excerpt_image=DestroyImage(excerpt_image);
if (filter_image == (Image *) NULL)
break;
p=GetVirtualPixels(filter_image,0,0,1,1,exception);
if (p == (const PixelPacket *) NULL)
{
filter_image=DestroyImage(filter_image);
break;
}
indexes=GetVirtualIndexQueue(filter_image);
GetMagickPixelPacket(image,pixels);
SetMagickPixelPacket(image,p,indexes,&pixel);
filter_image=DestroyImage(filter_image);
break;
}
case IntegerInterpolatePixel:
{
MagickPixelPacket
pixels[1];
p=GetCacheViewVirtualPixels(image_view,(ssize_t) floor(x),(ssize_t)
floor(y),1,1,exception);
if (p == (const PixelPacket *) NULL)
break;
indexes=GetCacheViewVirtualIndexQueue(image_view);
GetMagickPixelPacket(image,pixels);
SetMagickPixelPacket(image,p,indexes,&pixel);
break;
}
case MeshInterpolatePixel:
{
double
gamma;
MagickPixelPacket
pixels[4];
MagickRealType
alpha[4];
PointInfo
delta,
luminance;
p=GetCacheViewVirtualPixels(image_view,(ssize_t) floor(x),(ssize_t)
floor(y),2,2,exception);
if (p == (const PixelPacket *) NULL)
break;
indexes=GetCacheViewVirtualIndexQueue(image_view);
for (i=0; i < 4L; i++)
{
GetMagickPixelPacket(image,pixels+i);
SetMagickPixelPacket(image,p,indexes+i,pixels+i);
alpha[i]=1.0;
if (image->matte != MagickFalse)
{
alpha[i]=QuantumScale*((MagickRealType) GetPixelAlpha(p));
pixels[i].red*=alpha[i];
pixels[i].green*=alpha[i];
pixels[i].blue*=alpha[i];
if (image->colorspace == CMYKColorspace)
pixels[i].index*=alpha[i];
}
p++;
}
delta.x=x-floor(x);
delta.y=y-floor(y);
luminance.x=MagickPixelLuma(pixels+0)-MagickPixelLuma(pixels+3);
luminance.y=MagickPixelLuma(pixels+1)-MagickPixelLuma(pixels+2);
if (fabs(luminance.x) < fabs(luminance.y))
{
/*
Diagonal 0-3 NW-SE.
*/
if (delta.x <= delta.y)
{
/*
Bottom-left triangle (pixel:2, diagonal: 0-3).
*/
delta.y=1.0-delta.y;
gamma=MeshInterpolate(&delta,alpha[2],alpha[3],alpha[0]);
gamma=PerceptibleReciprocal(gamma);
pixel.red=gamma*MeshInterpolate(&delta,pixels[2].red,
pixels[3].red,pixels[0].red);
pixel.green=gamma*MeshInterpolate(&delta,pixels[2].green,
pixels[3].green,pixels[0].green);
pixel.blue=gamma*MeshInterpolate(&delta,pixels[2].blue,
pixels[3].blue,pixels[0].blue);
pixel.opacity=gamma*MeshInterpolate(&delta,pixels[2].opacity,
pixels[3].opacity,pixels[0].opacity);
if (image->colorspace == CMYKColorspace)
pixel.index=gamma*MeshInterpolate(&delta,pixels[2].index,
pixels[3].index,pixels[0].index);
}
else
{
/*
Top-right triangle (pixel:1, diagonal: 0-3).
*/
delta.x=1.0-delta.x;
gamma=MeshInterpolate(&delta,alpha[1],alpha[0],alpha[3]);
gamma=PerceptibleReciprocal(gamma);
pixel.red=gamma*MeshInterpolate(&delta,pixels[1].red,
pixels[0].red,pixels[3].red);
pixel.green=gamma*MeshInterpolate(&delta,pixels[1].green,
pixels[0].green,pixels[3].green);
pixel.blue=gamma*MeshInterpolate(&delta,pixels[1].blue,
pixels[0].blue,pixels[3].blue);
pixel.opacity=gamma*MeshInterpolate(&delta,pixels[1].opacity,
pixels[0].opacity,pixels[3].opacity);
if (image->colorspace == CMYKColorspace)
pixel.index=gamma*MeshInterpolate(&delta,pixels[1].index,
pixels[0].index,pixels[3].index);
}
}
else
{
/*
Diagonal 1-2 NE-SW.
*/
if (delta.x <= (1.0-delta.y))
{
/*
Top-left triangle (pixel 0, diagonal: 1-2).
*/
gamma=MeshInterpolate(&delta,alpha[0],alpha[1],alpha[2]);
gamma=PerceptibleReciprocal(gamma);
pixel.red=gamma*MeshInterpolate(&delta,pixels[0].red,
pixels[1].red,pixels[2].red);
pixel.green=gamma*MeshInterpolate(&delta,pixels[0].green,
pixels[1].green,pixels[2].green);
pixel.blue=gamma*MeshInterpolate(&delta,pixels[0].blue,
pixels[1].blue,pixels[2].blue);
pixel.opacity=gamma*MeshInterpolate(&delta,pixels[0].opacity,
pixels[1].opacity,pixels[2].opacity);
if (image->colorspace == CMYKColorspace)
pixel.index=gamma*MeshInterpolate(&delta,pixels[0].index,
pixels[1].index,pixels[2].index);
}
else
{
/*
Bottom-right triangle (pixel: 3, diagonal: 1-2).
*/
delta.x=1.0-delta.x;
delta.y=1.0-delta.y;
gamma=MeshInterpolate(&delta,alpha[3],alpha[2],alpha[1]);
gamma=PerceptibleReciprocal(gamma);
pixel.red=gamma*MeshInterpolate(&delta,pixels[3].red,
pixels[2].red,pixels[1].red);
pixel.green=gamma*MeshInterpolate(&delta,pixels[3].green,
pixels[2].green,pixels[1].green);
pixel.blue=gamma*MeshInterpolate(&delta,pixels[3].blue,
pixels[2].blue,pixels[1].blue);
pixel.opacity=gamma*MeshInterpolate(&delta,pixels[3].opacity,
pixels[2].opacity,pixels[1].opacity);
if (image->colorspace == CMYKColorspace)
pixel.index=gamma*MeshInterpolate(&delta,pixels[3].index,
pixels[2].index,pixels[1].index);
}
}
break;
}
case NearestNeighborInterpolatePixel:
{
MagickPixelPacket
pixels[1];
p=GetCacheViewVirtualPixels(image_view,NearestNeighbor(x),
NearestNeighbor(y),1,1,exception);
if (p == (const PixelPacket *) NULL)
break;
indexes=GetCacheViewVirtualIndexQueue(image_view);
GetMagickPixelPacket(image,pixels);
SetMagickPixelPacket(image,p,indexes,&pixel);
break;
}
case SplineInterpolatePixel:
{
double
gamma;
MagickPixelPacket
pixels[16];
MagickRealType
alpha[16],
dx,
dy;
PointInfo
delta;
ssize_t
j,
n;
p=GetCacheViewVirtualPixels(image_view,(ssize_t) floor(x)-1,(ssize_t)
floor(y)-1,4,4,exception);
if (p == (const PixelPacket *) NULL)
break;
indexes=GetCacheViewVirtualIndexQueue(image_view);
n=0;
delta.x=x-floor(x);
delta.y=y-floor(y);
for (i=(-1); i < 3L; i++)
{
dy=CubicWeightingFunction((MagickRealType) i-delta.y);
for (j=(-1); j < 3L; j++)
{
GetMagickPixelPacket(image,pixels+n);
SetMagickPixelPacket(image,p,indexes+n,pixels+n);
alpha[n]=1.0;
if (image->matte != MagickFalse)
{
alpha[n]=QuantumScale*((MagickRealType)
GetPixelAlpha(p));
pixels[n].red*=alpha[n];
pixels[n].green*=alpha[n];
pixels[n].blue*=alpha[n];
if (image->colorspace == CMYKColorspace)
pixels[n].index*=alpha[n];
}
dx=CubicWeightingFunction(delta.x-(MagickRealType) j);
gamma=alpha[n];
gamma=PerceptibleReciprocal(gamma);
pixel.red+=gamma*dx*dy*pixels[n].red;
pixel.green+=gamma*dx*dy*pixels[n].green;
pixel.blue+=gamma*dx*dy*pixels[n].blue;
if (image->matte != MagickFalse)
pixel.opacity+=dx*dy*pixels[n].opacity;
if (image->colorspace == CMYKColorspace)
pixel.index+=gamma*dx*dy*pixels[n].index;
n++;
p++;
}
}
break;
}
}
return(pixel);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% I n t e r p r e t I m a g e A t t r i b u t e s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% InterpretImageAttributes() replaces any embedded formatting characters with
% the appropriate image attribute and returns the translated text.
%
% Deprecated, replace with:
%
% InterpretImageProperties(image_info,image,embed_text);
%
% The format of the InterpretImageAttributes method is:
%
% char *InterpretImageAttributes(const ImageInfo *image_info,Image *image,
% const char *embed_text)
%
% A description of each parameter follows:
%
% o image_info: the image info.
%
% o image: the image.
%
% o embed_text: the address of a character string containing the embedded
% formatting characters.
%
*/
MagickExport char *InterpretImageAttributes(const ImageInfo *image_info,
Image *image,const char *embed_text)
{
(void) LogMagickEvent(DeprecateEvent,GetMagickModule(),"last use: v6.3.1");
return(InterpretImageProperties(image_info,image,embed_text));
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% I n v e r s e s R G B C o m p a n d o r %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% InversesRGBCompandor() removes the gamma function from a sRGB pixel.
%
% The format of the InversesRGBCompandor method is:
%
% MagickRealType InversesRGBCompandor(const MagickRealType pixel)
%
% A description of each parameter follows:
%
% o pixel: the pixel.
%
*/
MagickExport MagickRealType InversesRGBCompandor(const MagickRealType pixel)
{
if (pixel <= (0.0404482362771076*QuantumRange))
return(pixel/12.92);
return(QuantumRange*pow((QuantumScale*pixel+0.055)/1.055,2.4));
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% I s M a g i c k I n s t a n t i a t e d %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% IsMagickInstantiated() returns MagickTrue if the ImageMagick environment
% is currently instantiated: MagickCoreGenesis() has been called but
% MagickDestroy() has not.
%
% The format of the IsMagickInstantiated method is:
%
% MagickBooleanType IsMagickInstantiated(void)
%
*/
MagickExport MagickBooleanType IsMagickInstantiated(void)
{
return(IsMagickCoreInstantiated());
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ I s S u b i m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% IsSubimage() returns MagickTrue if the geometry is a valid subimage
% specification (e.g. [1], [1-9], [1,7,4]).
%
% The format of the IsSubimage method is:
%
% unsigned int IsSubimage(const char *geometry,const unsigned int pedantic)
%
% A description of each parameter follows:
%
% o geometry: This string is the geometry specification.
%
% o pedantic: A value other than 0 invokes a more restrictive set of
% conditions for a valid specification (e.g. [1], [1-4], [4-1]).
%
*/
MagickExport unsigned int IsSubimage(const char *geometry,
const unsigned int pedantic)
{
(void) LogMagickEvent(DeprecateEvent,GetMagickModule(),"last use: v5.5.7");
if (geometry == (const char *) NULL)
return(MagickFalse);
if ((strchr(geometry,'x') != (char *) NULL) ||
(strchr(geometry,'X') != (char *) NULL))
return(MagickFalse);
if ((pedantic != MagickFalse) && (strchr(geometry,',') != (char *) NULL))
return(MagickFalse);
return(MagickTrue);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% L e v e l I m a g e C o l o r s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% LevelImageColor() will map the given color to "black" and "white"
% values, limearly spreading out the colors, and level values on a channel by
% channel bases, as per LevelImage(). The given colors allows you to specify
% different level ranges for each of the color channels separately.
%
% If the boolean 'invert' is set true the image values will modifyed in the
% reverse direction. That is any existing "black" and "white" colors in the
% image will become the color values given, with all other values compressed
% appropriatally. This effectivally maps a greyscale gradient into the given
% color gradient.
%
% Deprecated, replace with:
%
% LevelColorsImageChannel(image,channel,black_color,white_color,invert);
%
% The format of the LevelImageColors method is:
%
% MagickBooleanType LevelImageColors(Image *image,const ChannelType channel,
% const MagickPixelPacket *black_color,const MagickPixelPacket *white_color,
% const MagickBooleanType invert)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o channel: the channel.
%
% o black_color: The color to map black to/from
%
% o white_point: The color to map white to/from
%
% o invert: if true map the colors (levelize), rather than from (level)
%
*/
MagickBooleanType LevelImageColors(Image *image,const ChannelType channel,
const MagickPixelPacket *black_color,const MagickPixelPacket *white_color,
const MagickBooleanType invert)
{
return(LevelColorsImageChannel(image,channel,black_color,white_color,invert));
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% L i b e r a t e M e m o r y %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% LiberateMemory() frees memory that has already been allocated, and NULL's
% the pointer to it.
%
% The format of the LiberateMemory method is:
%
% void LiberateMemory(void **memory)
%
% A description of each parameter follows:
%
% o memory: A pointer to a block of memory to free for reuse.
%
*/
MagickExport void LiberateMemory(void **memory)
{
assert(memory != (void **) NULL);
(void) LogMagickEvent(DeprecateEvent,GetMagickModule(),"last use: v5.5.7");
if (*memory == (void *) NULL)
return;
free(*memory);
*memory=(void *) NULL;
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% L i b e r a t e S e m a p h o r e I n f o %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% LiberateSemaphoreInfo() relinquishes a semaphore.
%
% Deprecated, replace with:
%
% UnlockSemaphoreInfo(*semaphore_info);
%
% The format of the LiberateSemaphoreInfo method is:
%
% LiberateSemaphoreInfo(void **semaphore_info)
%
% A description of each parameter follows:
%
% o semaphore_info: Specifies a pointer to an SemaphoreInfo structure.
%
*/
MagickExport void LiberateSemaphoreInfo(SemaphoreInfo **semaphore_info)
{
(void) LogMagickEvent(DeprecateEvent,GetMagickModule(),"last use: v5.5.7");
UnlockSemaphoreInfo(*semaphore_info);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% M a g i c k I n c a r n a t e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% MagickIncarnate() initializes the ImageMagick environment.
%
% Deprecated, replace with:
%
% MagickCoreGenesis(path,MagickFalse);
%
% The format of the MagickIncarnate function is:
%
% MagickIncarnate(const char *path)
%
% A description of each parameter follows:
%
% o path: the execution path of the current ImageMagick client.
%
*/
MagickExport void MagickIncarnate(const char *path)
{
(void) LogMagickEvent(DeprecateEvent,GetMagickModule(),"last use: v5.5.1");
MagickCoreGenesis(path,MagickFalse);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% M a g i c k M o n i t o r %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% MagickMonitor() calls the monitor handler method with a text string that
% describes the task and a measure of completion. The method returns
% MagickTrue on success otherwise MagickFalse if an error is encountered, e.g.
% if there was a user interrupt.
%
% The format of the MagickMonitor method is:
%
% MagickBooleanType MagickMonitor(const char *text,
% const MagickOffsetType offset,const MagickSizeType span,
% void *client_data)
%
% A description of each parameter follows:
%
% o offset: the position relative to the span parameter which represents
% how much progress has been made toward completing a task.
%
% o span: the span relative to completing a task.
%
% o client_data: the client data.
%
*/
MagickExport MagickBooleanType MagickMonitor(const char *text,
const MagickOffsetType offset,const MagickSizeType span,
void *magick_unused(client_data))
{
ExceptionInfo
*exception;
MagickBooleanType
status;
magick_unreferenced(client_data);
assert(text != (const char *) NULL);
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",text);
ProcessPendingEvents(text);
status=MagickTrue;
exception=AcquireExceptionInfo();
if (monitor_handler != (MonitorHandler) NULL)
status=(*monitor_handler)(text,offset,span,exception);
exception=DestroyExceptionInfo(exception);
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% M a p I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% MapImage() replaces the colors of an image with the closest color from a
% reference image.
%
% Deprecated, replace with:
%
% QuantizeInfo quantize_info;
% GetQuantizeInfo(&quantize_info);
% quantize_info.dither=dither;
% RemapImage(&quantize_info,image,map_image);
%
% The format of the MapImage method is:
%
% MagickBooleanType MapImage(Image *image,const Image *map_image,
% const MagickBooleanType dither)
%
% A description of each parameter follows:
%
% o image: Specifies a pointer to an Image structure.
%
% o map_image: the image. Reduce image to a set of colors represented by
% this image.
%
% o dither: Set this integer value to something other than zero to
% dither the mapped image.
%
*/
MagickExport MagickBooleanType MapImage(Image *image,const Image *map_image,
const MagickBooleanType dither)
{
QuantizeInfo
quantize_info;
/*
Initialize color cube.
*/
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(map_image != (Image *) NULL);
assert(map_image->signature == MagickCoreSignature);
GetQuantizeInfo(&quantize_info);
quantize_info.dither=dither;
return(RemapImage(&quantize_info,image,map_image));
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% M a p I m a g e s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% MapImages() replaces the colors of a sequence of images with the closest
% color from a reference image.
%
% Deprecated, replace with:
%
% QuantizeInfo quantize_info;
% GetQuantizeInfo(&quantize_info);
% quantize_info.dither=dither;
% RemapImages(&quantize_info,images,map_image);
%
% The format of the MapImage method is:
%
% MagickBooleanType MapImages(Image *images,Image *map_image,
% const MagickBooleanType dither)
%
% A description of each parameter follows:
%
% o image: Specifies a pointer to a set of Image structures.
%
% o map_image: the image. Reduce image to a set of colors represented by
% this image.
%
% o dither: Set this integer value to something other than zero to
% dither the quantized image.
%
*/
MagickExport MagickBooleanType MapImages(Image *images,const Image *map_image,
const MagickBooleanType dither)
{
QuantizeInfo
quantize_info;
assert(images != (Image *) NULL);
assert(images->signature == MagickCoreSignature);
if (images->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",images->filename);
GetQuantizeInfo(&quantize_info);
quantize_info.dither=dither;
return(RemapImages(&quantize_info,images,map_image));
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% M a t t e F l o o d f i l l I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% MatteFloodfill() changes the transparency value of any pixel that matches
% target and is an immediate neighbor. If the method FillToBorderMethod
% is specified, the transparency value is changed for any neighbor pixel
% that does not match the bordercolor member of image.
%
% By default target must match a particular pixel transparency exactly.
% However, in many cases two transparency values may differ by a
% small amount. The fuzz member of image defines how much tolerance is
% acceptable to consider two transparency values as the same. For example,
% set fuzz to 10 and the opacity values of 100 and 102 respectively are
% now interpreted as the same value for the purposes of the floodfill.
%
% The format of the MatteFloodfillImage method is:
%
% MagickBooleanType MatteFloodfillImage(Image *image,
% const PixelPacket target,const Quantum opacity,const ssize_t x_offset,
% const ssize_t y_offset,const PaintMethod method)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o target: the RGB value of the target color.
%
% o opacity: the level of transparency: 0 is fully opaque and QuantumRange is
% fully transparent.
%
% o x,y: the starting location of the operation.
%
% o method: Choose either FloodfillMethod or FillToBorderMethod.
%
*/
MagickExport MagickBooleanType MatteFloodfillImage(Image *image,
const PixelPacket target,const Quantum opacity,const ssize_t x_offset,
const ssize_t y_offset,const PaintMethod method)
{
Image
*floodplane_image;
MagickBooleanType
skip;
SegmentInfo
*s;
SegmentInfo
*segment_stack;
ssize_t
offset,
start,
x,
x1,
x2,
y;
/*
Check boundary conditions.
*/
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
if ((x_offset < 0) || (x_offset >= (ssize_t) image->columns))
return(MagickFalse);
if ((y_offset < 0) || (y_offset >= (ssize_t) image->rows))
return(MagickFalse);
if (SetImageStorageClass(image,DirectClass) == MagickFalse)
return(MagickFalse);
if (image->matte == MagickFalse)
(void) SetImageAlphaChannel(image,OpaqueAlphaChannel);
floodplane_image=CloneImage(image,0,0,MagickTrue,&image->exception);
if (floodplane_image == (Image *) NULL)
return(MagickFalse);
(void) SetImageAlphaChannel(floodplane_image,OpaqueAlphaChannel);
/*
Set floodfill color.
*/
segment_stack=(SegmentInfo *) AcquireQuantumMemory(MaxStacksize,
sizeof(*segment_stack));
if (segment_stack == (SegmentInfo *) NULL)
{
floodplane_image=DestroyImage(floodplane_image);
ThrowBinaryImageException(ResourceLimitError,"MemoryAllocationFailed",
image->filename);
}
/*
Push initial segment on stack.
*/
x=x_offset;
y=y_offset;
start=0;
s=segment_stack;
PushSegmentStack(y,x,x,1);
PushSegmentStack(y+1,x,x,-1);
while (s > segment_stack)
{
const PixelPacket
*magick_restrict p;
ssize_t
x;
PixelPacket
*magick_restrict q;
/*
Pop segment off stack.
*/
s--;
x1=(ssize_t) s->x1;
x2=(ssize_t) s->x2;
offset=(ssize_t) s->y2;
y=(ssize_t) s->y1+offset;
/*
Recolor neighboring pixels.
*/
p=GetVirtualPixels(image,0,y,(size_t) (x1+1),1,&image->exception);
q=GetAuthenticPixels(floodplane_image,0,y,(size_t) (x1+1),1,
&image->exception);
if ((p == (const PixelPacket *) NULL) || (q == (PixelPacket *) NULL))
break;
p+=x1;
q+=x1;
for (x=x1; x >= 0; x--)
{
if (q->opacity == (Quantum) TransparentOpacity)
break;
if (method == FloodfillMethod)
{
if (IsColorSimilar(image,p,&target) == MagickFalse)
break;
}
else
if (IsColorSimilar(image,p,&target) != MagickFalse)
break;
q->opacity=(Quantum) TransparentOpacity;
q--;
p--;
}
if (SyncAuthenticPixels(floodplane_image,&image->exception) == MagickFalse)
break;
skip=x >= x1 ? MagickTrue : MagickFalse;
if (skip == MagickFalse)
{
start=x+1;
if (start < x1)
PushSegmentStack(y,start,x1-1,-offset);
x=x1+1;
}
do
{
if (skip == MagickFalse)
{
if (x < (ssize_t) image->columns)
{
p=GetVirtualPixels(image,x,y,image->columns-x,1,
&image->exception);
q=GetAuthenticPixels(floodplane_image,x,y,image->columns-x,1,
&image->exception);
if ((p == (const PixelPacket *) NULL) ||
(q == (PixelPacket *) NULL))
break;
for ( ; x < (ssize_t) image->columns; x++)
{
if (q->opacity == (Quantum) TransparentOpacity)
break;
if (method == FloodfillMethod)
{
if (IsColorSimilar(image,p,&target) == MagickFalse)
break;
}
else
if (IsColorSimilar(image,p,&target) != MagickFalse)
break;
q->opacity=(Quantum) TransparentOpacity;
q++;
p++;
}
if (SyncAuthenticPixels(floodplane_image,&image->exception) == MagickFalse)
break;
}
PushSegmentStack(y,start,x-1,offset);
if (x > (x2+1))
PushSegmentStack(y,x2+1,x-1,-offset);
}
skip=MagickFalse;
x++;
if (x <= x2)
{
p=GetVirtualPixels(image,x,y,(size_t) (x2-x+1),1,
&image->exception);
q=GetAuthenticPixels(floodplane_image,x,y,(size_t) (x2-x+1),1,
&image->exception);
if ((p == (const PixelPacket *) NULL) || (q == (PixelPacket *) NULL))
break;
for ( ; x <= x2; x++)
{
if (q->opacity == (Quantum) TransparentOpacity)
break;
if (method == FloodfillMethod)
{
if (IsColorSimilar(image,p,&target) != MagickFalse)
break;
}
else
if (IsColorSimilar(image,p,&target) == MagickFalse)
break;
p++;
q++;
}
}
start=x;
} while (x <= x2);
}
for (y=0; y < (ssize_t) image->rows; y++)
{
const PixelPacket
*magick_restrict p;
ssize_t
x;
PixelPacket
*magick_restrict q;
/*
Tile fill color onto floodplane.
*/
p=GetVirtualPixels(floodplane_image,0,y,image->columns,1,
&image->exception);
q=GetAuthenticPixels(image,0,y,image->columns,1,&image->exception);
if ((p == (const PixelPacket *) NULL) || (q == (PixelPacket *) NULL))
break;
for (x=0; x < (ssize_t) image->columns; x++)
{
if (GetPixelOpacity(p) != OpaqueOpacity)
q->opacity=opacity;
p++;
q++;
}
if (SyncAuthenticPixels(image,&image->exception) == MagickFalse)
break;
}
segment_stack=(SegmentInfo *) RelinquishMagickMemory(segment_stack);
floodplane_image=DestroyImage(floodplane_image);
return(y == (ssize_t) image->rows ? MagickTrue : MagickFalse);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% M a x i m u m I m a g e s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% MaximumImages() returns the maximum intensity of an image sequence.
%
% Deprecated, replace with:
%
% EvaluateImages(images,MinEvaluateOperator,exception);
%
% The format of the MaxImages method is:
%
% Image *MaximumImages(Image *images,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o images: the image sequence.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *MaximumImages(const Image *images,ExceptionInfo *exception)
{
return(EvaluateImages(images,MinEvaluateOperator,exception));
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% M i n i m u m I m a g e s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% MinimumImages() returns the minimum intensity of an image sequence.
%
% Deprecated, replace with:
%
% EvaluateImages(images,MinEvaluateOperator,exception);
%
% The format of the MinimumImages method is:
%
% Image *MinimumImages(Image *images,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o images: the image sequence.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *MinimumImages(const Image *images,ExceptionInfo *exception)
{
return(EvaluateImages(images,MinEvaluateOperator,exception));
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% M e d i a n F i l t e r I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% MedianFilterImage() applies a digital filter that improves the quality
% of a noisy image. Each pixel is replaced by the median in a set of
% neighboring pixels as defined by radius.
%
% The algorithm was contributed by Mike Edmonds and implements an insertion
% sort for selecting median color-channel values. For more on this algorithm
% see "Skip Lists: A probabilistic Alternative to Balanced Trees" by William
% Pugh in the June 1990 of Communications of the ACM.
%
% The format of the MedianFilterImage method is:
%
% Image *MedianFilterImage(const Image *image,const double radius,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o radius: the radius of the pixel neighborhood.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *MedianFilterImage(const Image *image,const double radius,
ExceptionInfo *exception)
{
Image
*median_image;
median_image=StatisticImage(image,MedianStatistic,(size_t) radius,(size_t)
radius,exception);
return(median_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% M o d e I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ModeImage() makes each pixel the 'predominant color' of the neighborhood
% of the specified radius.
%
% The format of the ModeImage method is:
%
% Image *ModeImage(const Image *image,const double radius,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o radius: the radius of the pixel neighborhood.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *ModeImage(const Image *image,const double radius,
ExceptionInfo *exception)
{
Image
*mode_image;
mode_image=StatisticImage(image,ModeStatistic,(size_t) radius,(size_t) radius,
exception);
return(mode_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% M o s a i c I m a g e s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% MosaicImages() Obsolete Function: Use MergeImageLayers() instead.
%
% Deprecated, replace with:
%
% MergeImageLayers(image,MosaicLayer,exception);
%
% The format of the MosaicImage method is:
%
% Image *MosaicImages(const Image *image,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image list to be composited together
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *MosaicImages(Image *image,ExceptionInfo *exception)
{
return(MergeImageLayers(image,MosaicLayer,exception));
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% O p a q u e I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% OpaqueImage() changes any pixel that matches color with the color
% defined by fill.
%
% By default color must match a particular pixel color exactly. However,
% in many cases two colors may differ by a small amount. Fuzz defines
% how much tolerance is acceptable to consider two colors as the same.
% For example, set fuzz to 10 and the color red at intensities of 100 and
% 102 respectively are now interpreted as the same color.
%
% The format of the OpaqueImage method is:
%
% MagickBooleanType OpaqueImage(Image *image,
% const PixelPacket *target,const PixelPacket fill)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o target: the RGB value of the target color.
%
% o fill: the replacement color.
%
*/
MagickExport MagickBooleanType OpaqueImage(Image *image,
const PixelPacket target,const PixelPacket fill)
{
#define OpaqueImageTag "Opaque/Image"
MagickBooleanType
proceed;
ssize_t
i;
ssize_t
y;
/*
Make image color opaque.
*/
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
(void) LogMagickEvent(DeprecateEvent,GetMagickModule(),"last use: v6.1.0");
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
switch (image->storage_class)
{
case DirectClass:
default:
{
/*
Make DirectClass image opaque.
*/
for (y=0; y < (ssize_t) image->rows; y++)
{
ssize_t
x;
PixelPacket
*magick_restrict q;
q=GetAuthenticPixels(image,0,y,image->columns,1,&image->exception);
if (q == (PixelPacket *) NULL)
break;
for (x=0; x < (ssize_t) image->columns; x++)
{
if (IsColorSimilar(image,q,&target) != MagickFalse)
*q=fill;
q++;
}
if (SyncAuthenticPixels(image,&image->exception) == MagickFalse)
break;
proceed=SetImageProgress(image,OpaqueImageTag,(MagickOffsetType) y,
image->rows);
if (proceed == MagickFalse)
break;
}
break;
}
case PseudoClass:
{
/*
Make PseudoClass image opaque.
*/
for (i=0; i < (ssize_t) image->colors; i++)
{
if (IsColorSimilar(image,&image->colormap[i],&target) != MagickFalse)
image->colormap[i]=fill;
}
if (fill.opacity != OpaqueOpacity)
{
for (y=0; y < (ssize_t) image->rows; y++)
{
ssize_t
x;
PixelPacket
*magick_restrict q;
q=GetAuthenticPixels(image,0,y,image->columns,1,&image->exception);
if (q == (PixelPacket *) NULL)
break;
for (x=0; x < (ssize_t) image->columns; x++)
{
if (IsColorSimilar(image,q,&target) != MagickFalse)
q->opacity=fill.opacity;
q++;
}
if (SyncAuthenticPixels(image,&image->exception) == MagickFalse)
break;
}
}
(void) SyncImage(image);
break;
}
}
if (fill.opacity != OpaqueOpacity)
image->matte=MagickTrue;
return(MagickTrue);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% O p e n C a c h e V i e w %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% OpenCacheView() opens a view into the pixel cache, using the
% VirtualPixelMethod that is defined within the given image itself.
%
% Deprecated, replace with:
%
% AcquireVirtualCacheView(image,&image->exception);
%
% The format of the OpenCacheView method is:
%
% CacheView *OpenCacheView(const Image *image)
%
% A description of each parameter follows:
%
% o image: the image.
%
*/
MagickExport CacheView *OpenCacheView(const Image *image)
{
return(AcquireVirtualCacheView(image,&((Image *) image)->exception));
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% O p e n M a g i c k S t r e a m %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% OpenMagickStream() opens the file at the specified path and return the
% associated stream.
%
% The path of the OpenMagickStream method is:
%
% FILE *OpenMagickStream(const char *path,const char *mode)
%
% A description of each parameter follows.
%
% o path: the file path.
%
% o mode: the file mode.
%
*/
#if defined(MAGICKCORE_HAVE__WFOPEN)
static size_t UTF8ToUTF16(const unsigned char *utf8,wchar_t *utf16)
{
const unsigned char
*p;
if (utf16 != (wchar_t *) NULL)
{
wchar_t
*q;
wchar_t
c;
/*
Convert UTF-8 to UTF-16.
*/
q=utf16;
for (p=utf8; *p != '\0'; p++)
{
if ((*p & 0x80) == 0)
*q=(*p);
else
if ((*p & 0xE0) == 0xC0)
{
c=(*p);
*q=(c & 0x1F) << 6;
p++;
if ((*p & 0xC0) != 0x80)
return(0);
*q|=(*p & 0x3F);
}
else
if ((*p & 0xF0) == 0xE0)
{
c=(*p);
*q=c << 12;
p++;
if ((*p & 0xC0) != 0x80)
return(0);
c=(*p);
*q|=(c & 0x3F) << 6;
p++;
if ((*p & 0xC0) != 0x80)
return(0);
*q|=(*p & 0x3F);
}
else
return(0);
q++;
}
*q++='\0';
return(q-utf16);
}
/*
Compute UTF-16 string length.
*/
for (p=utf8; *p != '\0'; p++)
{
if ((*p & 0x80) == 0)
;
else
if ((*p & 0xE0) == 0xC0)
{
p++;
if ((*p & 0xC0) != 0x80)
return(0);
}
else
if ((*p & 0xF0) == 0xE0)
{
p++;
if ((*p & 0xC0) != 0x80)
return(0);
p++;
if ((*p & 0xC0) != 0x80)
return(0);
}
else
return(0);
}
return(p-utf8);
}
static wchar_t *ConvertUTF8ToUTF16(const unsigned char *source)
{
size_t
length;
wchar_t
*utf16;
length=UTF8ToUTF16(source,(wchar_t *) NULL);
if (length == 0)
{
ssize_t
i;
/*
Not UTF-8, just copy.
*/
length=strlen((const char *) source);
utf16=(wchar_t *) AcquireQuantumMemory(length+1,sizeof(*utf16));
if (utf16 == (wchar_t *) NULL)
return((wchar_t *) NULL);
for (i=0; i <= (ssize_t) length; i++)
utf16[i]=source[i];
return(utf16);
}
utf16=(wchar_t *) AcquireQuantumMemory(length+1,sizeof(*utf16));
if (utf16 == (wchar_t *) NULL)
return((wchar_t *) NULL);
length=UTF8ToUTF16(source,utf16);
return(utf16);
}
#endif
MagickExport FILE *OpenMagickStream(const char *path,const char *mode)
{
FILE
*file;
if ((path == (const char *) NULL) || (mode == (const char *) NULL))
{
errno=EINVAL;
return((FILE *) NULL);
}
file=(FILE *) NULL;
#if defined(MAGICKCORE_HAVE__WFOPEN)
{
wchar_t
*unicode_mode,
*unicode_path;
unicode_path=ConvertUTF8ToUTF16((const unsigned char *) path);
if (unicode_path == (wchar_t *) NULL)
return((FILE *) NULL);
unicode_mode=ConvertUTF8ToUTF16((const unsigned char *) mode);
if (unicode_mode == (wchar_t *) NULL)
{
unicode_path=(wchar_t *) RelinquishMagickMemory(unicode_path);
return((FILE *) NULL);
}
file=_wfopen(unicode_path,unicode_mode);
unicode_mode=(wchar_t *) RelinquishMagickMemory(unicode_mode);
unicode_path=(wchar_t *) RelinquishMagickMemory(unicode_path);
}
#endif
if (file == (FILE *) NULL)
file=fopen(path,mode);
return(file);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% P a i n t F l o o d f i l l I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% PaintFloodfill() changes the color value of any pixel that matches
% target and is an immediate neighbor. If the method FillToBorderMethod is
% specified, the color value is changed for any neighbor pixel that does not
% match the bordercolor member of image.
%
% By default target must match a particular pixel color exactly.
% However, in many cases two colors may differ by a small amount. The
% fuzz member of image defines how much tolerance is acceptable to
% consider two colors as the same. For example, set fuzz to 10 and the
% color red at intensities of 100 and 102 respectively are now
% interpreted as the same color for the purposes of the floodfill.
%
% Deprecated, replace with:
%
% FloodfillPaintImage(image,channel,draw_info,target,x,y,
% method == FloodfillMethod ? MagickFalse : MagickTrue);
%
% The format of the PaintFloodfillImage method is:
%
% MagickBooleanType PaintFloodfillImage(Image *image,
% const ChannelType channel,const MagickPixelPacket target,
% const ssize_t x,const ssize_t y,const DrawInfo *draw_info,
% const PaintMethod method)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o channel: the channel(s).
%
% o target: the RGB value of the target color.
%
% o x,y: the starting location of the operation.
%
% o draw_info: the draw info.
%
% o method: Choose either FloodfillMethod or FillToBorderMethod.
%
*/
MagickExport MagickBooleanType PaintFloodfillImage(Image *image,
const ChannelType channel,const MagickPixelPacket *target,const ssize_t x,
const ssize_t y,const DrawInfo *draw_info,const PaintMethod method)
{
MagickBooleanType
status;
status=FloodfillPaintImage(image,channel,draw_info,target,x,y,
method == FloodfillMethod ? MagickFalse : MagickTrue);
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% P a i n t O p a q u e I m a g e %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% PaintOpaqueImage() changes any pixel that matches color with the color
% defined by fill.
%
% By default color must match a particular pixel color exactly. However,
% in many cases two colors may differ by a small amount. Fuzz defines
% how much tolerance is acceptable to consider two colors as the same.
% For example, set fuzz to 10 and the color red at intensities of 100 and
% 102 respectively are now interpreted as the same color.
%
% Deprecated, replace with:
%
% OpaquePaintImageChannel(image,DefaultChannels,target,fill,MagickFalse);
% OpaquePaintImageChannel(image,channel,target,fill,MagickFalse);
%
% The format of the PaintOpaqueImage method is:
%
% MagickBooleanType PaintOpaqueImage(Image *image,
% const PixelPacket *target,const PixelPacket *fill)
% MagickBooleanType PaintOpaqueImageChannel(Image *image,
% const ChannelType channel,const PixelPacket *target,
% const PixelPacket *fill)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o channel: the channel(s).
%
% o target: the RGB value of the target color.
%
% o fill: the replacement color.
%
*/
MagickExport MagickBooleanType PaintOpaqueImage(Image *image,
const MagickPixelPacket *target,const MagickPixelPacket *fill)
{
MagickBooleanType
status;
status=OpaquePaintImageChannel(image,DefaultChannels,target,fill,MagickFalse);
return(status);
}
MagickExport MagickBooleanType PaintOpaqueImageChannel(Image *image,
const ChannelType channel,const MagickPixelPacket *target,
const MagickPixelPacket *fill)
{
return(OpaquePaintImageChannel(image,channel,target,fill,MagickFalse));
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% P a i n t T r a n s p a r e n t I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% PaintTransparentImage() changes the opacity value associated with any pixel
% that matches color to the value defined by opacity.
%
% By default color must match a particular pixel color exactly. However,
% in many cases two colors may differ by a small amount. Fuzz defines
% how much tolerance is acceptable to consider two colors as the same.
% For example, set fuzz to 10 and the color red at intensities of 100 and
% 102 respectively are now interpreted as the same color.
%
% Deprecated, replace with:
%
% TransparentPaintImage(image,target,opacity,MagickFalse);
%
% The format of the PaintTransparentImage method is:
%
% MagickBooleanType PaintTransparentImage(Image *image,
% const MagickPixelPacket *target,const Quantum opacity)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o target: the RGB value of the target color.
%
% o opacity: the replacement opacity value.
%
*/
MagickExport MagickBooleanType PaintTransparentImage(Image *image,
const MagickPixelPacket *target,const Quantum opacity)
{
return(TransparentPaintImage(image,target,opacity,MagickFalse));
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ P a r s e I m a g e G e o m e t r y %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ParseImageGeometry() is similar to GetGeometry() except the returned
% geometry is modified as determined by the meta characters: %, !, <,
% and >.
%
% Deprecated, replace with:
%
% ParseMetaGeometry(geometry,x,y,width,height);
%
% The format of the ParseImageGeometry method is:
%
% int ParseImageGeometry(char *geometry,ssize_t *x,ssize_t *y,
% size_t *width,size_t *height)
%
% A description of each parameter follows:
%
% o flags: Method ParseImageGeometry returns a bitmask that indicates
% which of the four values were located in the geometry string.
%
% o image_geometry: Specifies a character string representing the geometry
% specification.
%
% o x,y: A pointer to an integer. The x and y offset as determined by
% the geometry specification is returned here.
%
% o width,height: A pointer to an unsigned integer. The width and height
% as determined by the geometry specification is returned here.
%
*/
MagickExport int ParseImageGeometry(const char *geometry,ssize_t *x,ssize_t *y,
size_t *width,size_t *height)
{
(void) LogMagickEvent(DeprecateEvent,GetMagickModule(),"last use: v5.5.1");
return((int) ParseMetaGeometry(geometry,x,y,width,height));
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% P a r s e S i z e G e o m e t r y %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ParseSizeGeometry() returns a region as defined by the geometry string with
% respect to the image dimensions and aspect ratio.
%
% Deprecated, replace with:
%
% ParseMetaGeometry(geometry,®ion_info->x,®ion_info->y,
% ®ion_info->width,®ion_info->height);
%
% The format of the ParseSizeGeometry method is:
%
% MagickStatusType ParseSizeGeometry(const Image *image,
% const char *geometry,RectangeInfo *region_info)
%
% A description of each parameter follows:
%
% o geometry: The geometry (e.g. 100x100+10+10).
%
% o region_info: the region as defined by the geometry string.
%
*/
MagickExport MagickStatusType ParseSizeGeometry(const Image *image,
const char *geometry,RectangleInfo *region_info)
{
MagickStatusType
flags;
(void) LogMagickEvent(DeprecateEvent,GetMagickModule(),"last use: v6.4.7");
SetGeometry(image,region_info);
flags=ParseMetaGeometry(geometry,®ion_info->x,®ion_info->y,
®ion_info->width,®ion_info->height);
return(flags);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% P o p I m a g e L i s t %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% PopImageList() removes the last image in the list.
%
% Deprecated, replace with:
%
% RemoveLastImageFromList(images);
%
% The format of the PopImageList method is:
%
% Image *PopImageList(Image **images)
%
% A description of each parameter follows:
%
% o images: the image list.
%
*/
MagickExport Image *PopImageList(Image **images)
{
(void) LogMagickEvent(DeprecateEvent,GetMagickModule(),"last use: v5.5.2");
return(RemoveLastImageFromList(images));
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% P o p I m a g e P i x e l s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% PopImagePixels() transfers one or more pixel components from the image pixel
% cache to a user supplied buffer. The pixels are returned in network byte
% order. MagickTrue is returned if the pixels are successfully transferred,
% otherwise MagickFalse.
%
% The format of the PopImagePixels method is:
%
% size_t PopImagePixels(Image *,const QuantumType quantum,
% unsigned char *destination)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o quantum: Declare which pixel components to transfer (RGB, RGBA, etc).
%
% o destination: The components are transferred to this buffer.
%
*/
MagickExport size_t PopImagePixels(Image *image,const QuantumType quantum,
unsigned char *destination)
{
QuantumInfo
*quantum_info;
size_t
length;
quantum_info=AcquireQuantumInfo((const ImageInfo *) NULL,image);
if (quantum_info == (QuantumInfo *) NULL)
return(0);
length=ExportQuantumPixels(image,(const CacheView *) NULL,quantum_info,
quantum,destination,&image->exception);
quantum_info=DestroyQuantumInfo(quantum_info);
return(length);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% P o s t s c r i p t G e o m e t r y %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% PostscriptGeometry() replaces any page mneumonic with the equivalent size in
% picas.
%
% Deprecated, replace with:
%
% GetPageGeometry(page);
%
% The format of the PostscriptGeometry method is:
%
% char *PostscriptGeometry(const char *page)
%
% A description of each parameter follows.
%
% o page: Specifies a pointer to an array of characters.
% The string is either a Postscript page name (e.g. A4) or a postscript
% page geometry (e.g. 612x792+36+36).
%
*/
MagickExport char *PostscriptGeometry(const char *page)
{
(void) LogMagickEvent(DeprecateEvent,GetMagickModule(),"last use: v5.5.1");
return(GetPageGeometry(page));
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% P u s h I m a g e L i s t %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% PushImageList() adds an image to the end of the list.
%
% Deprecated, replace with:
%
% AppendImageToList(images,CloneImageList(image,exception));
%
% The format of the PushImageList method is:
%
% unsigned int PushImageList(Image *images,const Image *image,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o images: the image list.
%
% o image: the image.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport unsigned int PushImageList(Image **images,const Image *image,
ExceptionInfo *exception)
{
(void) LogMagickEvent(DeprecateEvent,GetMagickModule(),"last use: v5.5.2");
AppendImageToList(images,CloneImageList(image,exception));
return(MagickTrue);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% P u s h I m a g e P i x e l s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% PushImagePixels() transfers one or more pixel components from a user
% supplied buffer into the image pixel cache of an image. The pixels are
% expected in network byte order. It returns MagickTrue if the pixels are
% successfully transferred, otherwise MagickFalse.
%
% The format of the PushImagePixels method is:
%
% size_t PushImagePixels(Image *image,const QuantumType quantum,
% const unsigned char *source)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o quantum: Declare which pixel components to transfer (red, green, blue,
% opacity, RGB, or RGBA).
%
% o source: The pixel components are transferred from this buffer.
%
*/
MagickExport size_t PushImagePixels(Image *image,const QuantumType quantum,
const unsigned char *source)
{
QuantumInfo
*quantum_info;
size_t
length;
quantum_info=AcquireQuantumInfo((const ImageInfo *) NULL,image);
if (quantum_info == (QuantumInfo *) NULL)
return(0);
length=ImportQuantumPixels(image,(CacheView *) NULL,quantum_info,quantum,
source,&image->exception);
quantum_info=DestroyQuantumInfo(quantum_info);
return(length);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% Q u a n t i z a t i o n E r r o r %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% QuantizationError() measures the difference between the original and
% quantized images. This difference is the total quantization error. The
% error is computed by summing over all pixels in an image the distance
% squared in RGB space between each reference pixel value and its quantized
% value. These values are computed:
%
% o mean_error_per_pixel: This value is the mean error for any single
% pixel in the image.
%
% o normalized_mean_square_error: This value is the normalized mean
% quantization error for any single pixel in the image. This distance
% measure is normalized to a range between 0 and 1. It is independent
% of the range of red, green, and blue values in the image.
%
% o normalized_maximum_square_error: Thsi value is the normalized
% maximum quantization error for any single pixel in the image. This
% distance measure is normalized to a range between 0 and 1. It is
% independent of the range of red, green, and blue values in your image.
%
% Deprecated, replace with:
%
% GetImageQuantizeError(image);
%
% The format of the QuantizationError method is:
%
% unsigned int QuantizationError(Image *image)
%
% A description of each parameter follows.
%
% o image: Specifies a pointer to an Image structure; returned from
% ReadImage.
%
*/
MagickExport unsigned int QuantizationError(Image *image)
{
if (image->debug != MagickFalse)
(void) LogMagickEvent(DeprecateEvent,GetMagickModule(),"last use: v5.5.3");
return(GetImageQuantizeError(image));
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% R a d i a l B l u r I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% RadialBlurImage() applies a radial blur to the image.
%
% Andrew Protano contributed this effect.
%
% The format of the RadialBlurImage method is:
%
% Image *RadialBlurImage(const Image *image,const double angle,
% ExceptionInfo *exception)
% Image *RadialBlurImageChannel(const Image *image,const ChannelType channel,
% const double angle,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o channel: the channel type.
%
% o angle: the angle of the radial blur.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *RadialBlurImage(const Image *image,const double angle,
ExceptionInfo *exception)
{
return(RotationalBlurImage(image,angle,exception));
}
MagickExport Image *RadialBlurImageChannel(const Image *image,
const ChannelType channel,const double angle,ExceptionInfo *exception)
{
return(RotationalBlurImageChannel(image,channel,angle,exception));
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% R a n d o m C h a n n e l T h r e s h o l d I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% RandomChannelThresholdImage() changes the value of individual pixels based
% on the intensity of each pixel compared to a random threshold. The result
% is a low-contrast, two color image.
%
% The format of the RandomChannelThresholdImage method is:
%
% unsigned int RandomChannelThresholdImage(Image *image,
% const char *channel, const char *thresholds,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o channel: the channel or channels to be thresholded.
%
% o thresholds: a geometry string containing LOWxHIGH thresholds.
% If the string contains 2x2, 3x3, or 4x4, then an ordered
% dither of order 2, 3, or 4 will be performed instead.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport unsigned int RandomChannelThresholdImage(Image *image,
const char *channel,const char *thresholds,ExceptionInfo *exception)
{
#define RandomChannelThresholdImageText " RandomChannelThreshold image... "
double
lower_threshold,
upper_threshold;
RandomInfo
*random_info;
ssize_t
count,
y;
static MagickRealType
o2[4]={0.2f, 0.6f, 0.8f, 0.4f},
o3[9]={0.1f, 0.6f, 0.3f, 0.7f, 0.5f, 0.8f, 0.4f, 0.9f, 0.2f},
o4[16]={0.1f, 0.7f, 1.1f, 0.3f, 1.0f, 0.5f, 1.5f, 0.8f, 1.4f, 1.6f, 0.6f,
1.2f, 0.4f, 0.9f, 1.3f, 0.2f},
threshold=128;
size_t
order;
/*
Threshold image.
*/
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(DeprecateEvent,GetMagickModule(),"last use: v5.5.7");
if (thresholds == (const char *) NULL)
return(MagickTrue);
lower_threshold=0;
upper_threshold=0;
if (LocaleCompare(thresholds,"2x2") == 0)
order=2;
else
if (LocaleCompare(thresholds,"3x3") == 0)
order=3;
else
if (LocaleCompare(thresholds,"4x4") == 0)
order=4;
else
{
order=1;
count=(ssize_t) sscanf(thresholds,"%lf[/x%%]%lf",&lower_threshold,
&upper_threshold);
if (strchr(thresholds,'%') != (char *) NULL)
{
upper_threshold*=(.01*QuantumRange);
lower_threshold*=(.01*QuantumRange);
}
if (count == 1)
upper_threshold=(MagickRealType) QuantumRange-lower_threshold;
}
if (image->debug != MagickFalse)
(void) LogMagickEvent(TransformEvent,GetMagickModule(),
" RandomChannelThresholdImage: channel type=%s",channel);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TransformEvent,GetMagickModule(),
" Thresholds: %s (%fx%f)",thresholds,lower_threshold,upper_threshold);
if (LocaleCompare(channel,"all") == 0 ||
LocaleCompare(channel,"intensity") == 0)
if (AcquireImageColormap(image,2) == MagickFalse)
ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed",
image->filename);
random_info=AcquireRandomInfo();
for (y=0; y < (ssize_t) image->rows; y++)
{
ssize_t
x;
IndexPacket
index,
*magick_restrict indexes;
PixelPacket
*magick_restrict q;
q=GetAuthenticPixels(image,0,y,image->columns,1,exception);
if (q == (PixelPacket *) NULL)
break;
if (LocaleCompare(channel,"all") == 0 ||
LocaleCompare(channel,"intensity") == 0)
{
indexes=GetAuthenticIndexQueue(image);
for (x=0; x < (ssize_t) image->columns; x++)
{
MagickRealType
intensity;
intensity=GetPixelIntensity(image,q);
if (order == 1)
{
if (intensity < lower_threshold)
threshold=lower_threshold;
else if (intensity > upper_threshold)
threshold=upper_threshold;
else
threshold=(MagickRealType) (QuantumRange*
GetPseudoRandomValue(random_info));
}
else if (order == 2)
threshold=(MagickRealType) QuantumRange*o2[(x%2)+2*(y%2)];
else if (order == 3)
threshold=(MagickRealType) QuantumRange*o3[(x%3)+3*(y%3)];
else if (order == 4)
threshold=(MagickRealType) QuantumRange*o4[(x%4)+4*(y%4)];
index=(IndexPacket) (intensity <= threshold ? 0 : 1);
SetPixelIndex(indexes+x,index);
SetPixelRGBO(q,image->colormap+(ssize_t) index);
q++;
}
}
if (LocaleCompare(channel,"opacity") == 0 ||
LocaleCompare(channel,"all") == 0 ||
LocaleCompare(channel,"matte") == 0)
{
if (image->matte != MagickFalse)
for (x=0; x < (ssize_t) image->columns; x++)
{
if (order == 1)
{
if ((MagickRealType) q->opacity < lower_threshold)
threshold=lower_threshold;
else if ((MagickRealType) q->opacity > upper_threshold)
threshold=upper_threshold;
else
threshold=(MagickRealType) (QuantumRange*
GetPseudoRandomValue(random_info));
}
else if (order == 2)
threshold=(MagickRealType) QuantumRange*o2[(x%2)+2*(y%2)];
else if (order == 3)
threshold=(MagickRealType) QuantumRange*o3[(x%3)+3*(y%3)];
else if (order == 4)
threshold=(MagickRealType) QuantumRange*o4[(x%4)+4*(y%4)]/1.7;
SetPixelOpacity(q,(MagickRealType) q->opacity <=
threshold ? 0 : QuantumRange);
q++;
}
}
else
{
/* To Do: red, green, blue, cyan, magenta, yellow, black */
if (LocaleCompare(channel,"intensity") != 0)
ThrowBinaryException(OptionError,"UnrecognizedChannelType",
image->filename);
}
if (SyncAuthenticPixels(image,exception) == MagickFalse)
break;
}
random_info=DestroyRandomInfo(random_info);
return(MagickTrue);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% R e a c q u i r e M e m o r y %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ReacquireMemory() changes the size of the memory and returns a pointer to
% the (possibly moved) block. The contents will be unchanged up to the
% lesser of the new and old sizes.
%
% The format of the ReacquireMemory method is:
%
% void ReacquireMemory(void **memory,const size_t size)
%
% A description of each parameter follows:
%
% o memory: A pointer to a memory allocation. On return the pointer
% may change but the contents of the original allocation will not.
%
% o size: the new size of the allocated memory.
%
*/
MagickExport void ReacquireMemory(void **memory,const size_t size)
{
void
*allocation;
assert(memory != (void **) NULL);
(void) LogMagickEvent(DeprecateEvent,GetMagickModule(),"last use: v5.5.7");
if (*memory == (void *) NULL)
{
*memory=AcquireMagickMemory(size);
return;
}
allocation=realloc(*memory,size);
if (allocation == (void *) NULL)
*memory=RelinquishMagickMemory(*memory);
*memory=allocation;
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% R e c o l o r I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% RecolorImage() apply color transformation to an image. The method permits
% saturation changes, hue rotation, luminance to alpha, and various other
% effects. Although variable-sized transformation matrices can be used,
% typically one uses a 5x5 matrix for an RGBA image and a 6x6 for CMYKA
% (or RGBA with offsets). The matrix is similar to those used by Adobe Flash
% except offsets are in column 6 rather than 5 (in support of CMYKA images)
% and offsets are normalized (divide Flash offset by 255).
%
% The format of the RecolorImage method is:
%
% Image *RecolorImage(const Image *image,const size_t order,
% const double *color_matrix,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o order: the number of columns and rows in the recolor matrix.
%
% o color_matrix: An array of double representing the recolor matrix.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *RecolorImage(const Image *image,const size_t order,
const double *color_matrix,ExceptionInfo *exception)
{
KernelInfo
*kernel_info;
Image
*recolor_image;
kernel_info=AcquireKernelInfo("1");
if (kernel_info == (KernelInfo *) NULL)
return((Image *) NULL);
kernel_info->width=order;
kernel_info->height=order;
kernel_info->values=(double *) color_matrix;
recolor_image=ColorMatrixImage(image,kernel_info,exception);
kernel_info->values=(double *) NULL;
kernel_info=DestroyKernelInfo(kernel_info);
return(recolor_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% R e d u c e N o i s e I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ReduceNoiseImage() smooths the contours of an image while still preserving
% edge information. The algorithm works by replacing each pixel with its
% neighbor closest in value. A neighbor is defined by radius. Use a radius
% of 0 and ReduceNoise() selects a suitable radius for you.
%
% The format of the ReduceNoiseImage method is:
%
% Image *ReduceNoiseImage(const Image *image,const double radius,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o radius: the radius of the pixel neighborhood.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *ReduceNoiseImage(const Image *image,const double radius,
ExceptionInfo *exception)
{
Image
*reduce_image;
reduce_image=StatisticImage(image,NonpeakStatistic,(size_t) radius,(size_t)
radius,exception);
return(reduce_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% R e l i n g u i s h S e m a p h o r e I n f o %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% RelinquishSemaphoreInfo() relinquishes a semaphore.
%
% The format of the RelinquishSemaphoreInfo method is:
%
% RelinquishSemaphoreInfo(SemaphoreInfo *semaphore_info)
%
% A description of each parameter follows:
%
% o semaphore_info: Specifies a pointer to an SemaphoreInfo structure.
%
*/
MagickExport void RelinquishSemaphoreInfo(SemaphoreInfo *semaphore_info)
{
assert(semaphore_info != (SemaphoreInfo *) NULL);
UnlockSemaphoreInfo(semaphore_info);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% R e s e t I m a g e A t t r i b u t e I t e r a t o r %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ResetImageAttributeIterator() resets the image attributes iterator. Use it
% in conjunction with GetNextImageAttribute() to iterate over all the values
% associated with an image.
%
% Deprecated, replace with:
%
% ResetImagePropertyIterator(image);
%
% The format of the ResetImageAttributeIterator method is:
%
% ResetImageAttributeIterator(const ImageInfo *image)
%
% A description of each parameter follows:
%
% o image: the image.
%
*/
MagickExport void ResetImageAttributeIterator(const Image *image)
{
ResetImagePropertyIterator(image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% S e t C a c h e V i e w P i x e l s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% SetCacheViewPixels() gets pixels from the in-memory or disk pixel cache as
% defined by the geometry parameters. A pointer to the pixels is returned
% if the pixels are transferred, otherwise a NULL is returned.
%
% Deprecated, replace with:
%
% QueueCacheViewAuthenticPixels(cache_view,x,y,columns,rows,
% GetCacheViewException(cache_view));
%
% The format of the SetCacheViewPixels method is:
%
% PixelPacket *SetCacheViewPixels(CacheView *cache_view,const ssize_t x,
% const ssize_t y,const size_t columns,const size_t rows)
%
% A description of each parameter follows:
%
% o cache_view: the cache view.
%
% o x,y,columns,rows: These values define the perimeter of a region of
% pixels.
%
*/
MagickExport PixelPacket *SetCacheViewPixels(CacheView *cache_view,const ssize_t x,
const ssize_t y,const size_t columns,const size_t rows)
{
PixelPacket
*pixels;
pixels=QueueCacheViewAuthenticPixels(cache_view,x,y,columns,rows,
GetCacheViewException(cache_view));
return(pixels);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ S e t C a c h e T h e s h o l d %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% SetCacheThreshold() sets the amount of free memory allocated for the pixel
% cache. Once this threshold is exceeded, all subsequent pixels cache
% operations are to/from disk.
%
% The format of the SetCacheThreshold() method is:
%
% void SetCacheThreshold(const size_t threshold)
%
% A description of each parameter follows:
%
% o threshold: the number of megabytes of memory available to the pixel
% cache.
%
*/
MagickExport void SetCacheThreshold(const size_t size)
{
(void) LogMagickEvent(DeprecateEvent,GetMagickModule(),"last use: v5.5.1");
(void) SetMagickResourceLimit(MemoryResource,size*1024*1024);
(void) SetMagickResourceLimit(MapResource,2*size*1024*1024);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% S e t E x c e p t i o n I n f o %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% SetExceptionInfo() sets the exception severity.
%
% The format of the SetExceptionInfo method is:
%
% MagickBooleanType SetExceptionInfo(ExceptionInfo *exception,
% ExceptionType severity)
%
% A description of each parameter follows:
%
% o exception: the exception info.
%
% o severity: the exception severity.
%
*/
MagickExport MagickBooleanType SetExceptionInfo(ExceptionInfo *exception,
ExceptionType severity)
{
assert(exception != (ExceptionInfo *) NULL);
ClearMagickException(exception);
exception->severity=severity;
return(MagickTrue);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% S e t I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% SetImage() sets the red, green, and blue components of each pixel to
% the image background color and the opacity component to the specified
% level of transparency. The background color is defined by the
% background_color member of the image.
%
% The format of the SetImage method is:
%
% void SetImage(Image *image,const Quantum opacity)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o opacity: Set each pixel to this level of transparency.
%
*/
MagickExport void SetImage(Image *image,const Quantum opacity)
{
PixelPacket
background_color;
ssize_t
y;
(void) LogMagickEvent(DeprecateEvent,GetMagickModule(),"last use: v6.2.0");
assert(image != (Image *) NULL);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"...");
assert(image->signature == MagickCoreSignature);
background_color=image->background_color;
if (opacity != OpaqueOpacity)
background_color.opacity=opacity;
if (background_color.opacity != OpaqueOpacity)
{
(void) SetImageStorageClass(image,DirectClass);
image->matte=MagickTrue;
}
if ((image->storage_class == PseudoClass) ||
(image->colorspace == CMYKColorspace))
{
/*
Set colormapped or CMYK image.
*/
for (y=0; y < (ssize_t) image->rows; y++)
{
IndexPacket
*magick_restrict indexes;
ssize_t
x;
PixelPacket
*magick_restrict q;
q=QueueAuthenticPixels(image,0,y,image->columns,1,&image->exception);
if (q == (PixelPacket *) NULL)
break;
for (x=0; x < (ssize_t) image->columns; x++)
{
SetPixelRGBO(q,&background_color);
q++;
}
indexes=GetAuthenticIndexQueue(image);
for (x=0; x < (ssize_t) image->columns; x++)
SetPixelIndex(indexes+x,0);
if (SyncAuthenticPixels(image,&image->exception) == MagickFalse)
break;
}
return;
}
/*
Set DirectClass image.
*/
for (y=0; y < (ssize_t) image->rows; y++)
{
ssize_t
x;
PixelPacket
*magick_restrict q;
q=QueueAuthenticPixels(image,0,y,image->columns,1,&image->exception);
if (q == (PixelPacket *) NULL)
break;
for (x=0; x < (ssize_t) image->columns; x++)
{
SetPixelRGBO(q,&background_color);
q++;
}
if (SyncAuthenticPixels(image,&image->exception) == MagickFalse)
break;
}
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% S e t I m a g e A t t r i b u t e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% SetImageAttribute() searches the list of image attributes and replaces the
% attribute value. If it is not found in the list, the attribute name
% and value is added to the list.
%
% Deprecated, replace with:
%
% SetImageProperty(image,key,value);
%
% The format of the SetImageAttribute method is:
%
% MagickBooleanType SetImageAttribute(Image *image,const char *key,
% const char *value)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o key: the key.
%
% o value: the value.
%
*/
MagickExport MagickBooleanType SetImageAttribute(Image *image,const char *key,
const char *value)
{
(void) LogMagickEvent(DeprecateEvent,GetMagickModule(),"last use: v6.3.1");
return(SetImageProperty(image,key,value));
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% S e t I m a g e L i s t %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% SetImageList() inserts an image into the list at the specified position.
%
% The format of the SetImageList method is:
%
% unsigned int SetImageList(Image *images,const Image *image,
% const ssize_t offset,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o images: the image list.
%
% o image: the image.
%
% o offset: the position within the list.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport unsigned int SetImageList(Image **images,const Image *image,
const ssize_t offset,ExceptionInfo *exception)
{
Image
*clone;
ssize_t
i;
(void) LogMagickEvent(DeprecateEvent,GetMagickModule(),"last use: v5.5.2");
clone=CloneImageList(image,exception);
while (GetPreviousImageInList(*images) != (Image *) NULL)
(*images)=GetPreviousImageInList(*images);
for (i=0; i < offset; i++)
{
if (GetNextImageInList(*images) == (Image *) NULL)
return(MagickFalse);
(*images)=GetNextImageInList(*images);
}
InsertImageInList(images,clone);
return(MagickTrue);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% S e t I m a g e P i x e l s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% SetImagePixels() queues a mutable pixel region.
% If the region is successfully initialized a pointer to a PixelPacket
% array representing the region is returned, otherwise NULL is returned.
% The returned pointer may point to a temporary working buffer for the
% pixels or it may point to the final location of the pixels in memory.
%
% Write-only access means that any existing pixel values corresponding to
% the region are ignored. This useful while the initial image is being
% created from scratch, or if the existing pixel values are to be
% completely replaced without need to refer to their pre-existing values.
% The application is free to read and write the pixel buffer returned by
% SetImagePixels() any way it pleases. SetImagePixels() does not initialize
% the pixel array values. Initializing pixel array values is the
% application's responsibility.
%
% Performance is maximized if the selected region is part of one row, or
% one or more full rows, since then there is opportunity to access the
% pixels in-place (without a copy) if the image is in RAM, or in a
% memory-mapped file. The returned pointer should *never* be deallocated
% by the user.
%
% Pixels accessed via the returned pointer represent a simple array of type
% PixelPacket. If the image type is CMYK or the storage class is PseudoClass,
% call GetAuthenticIndexQueue() after invoking GetAuthenticPixels() to obtain
% the black color component or the colormap indexes (of type IndexPacket)
% corresponding to the region. Once the PixelPacket (and/or IndexPacket)
% array has been updated, the changes must be saved back to the underlying
% image using SyncAuthenticPixels() or they may be lost.
%
% Deprecated, replace with:
%
% QueueAuthenticPixels(image,x,y,columns,rows,&image->exception);
%
% The format of the SetImagePixels() method is:
%
% PixelPacket *SetImagePixels(Image *image,const ssize_t x,const ssize_t y,
% const size_t columns,const size_t rows)
%
% A description of each parameter follows:
%
% o pixels: SetImagePixels returns a pointer to the pixels if they are
% transferred, otherwise a NULL is returned.
%
% o image: the image.
%
% o x,y,columns,rows: These values define the perimeter of a region of
% pixels.
%
*/
MagickExport PixelPacket *SetImagePixels(Image *image,const ssize_t x,const ssize_t y,
const size_t columns,const size_t rows)
{
return(QueueAuthenticPixels(image,x,y,columns,rows,&image->exception));
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% S e t M a g i c k R e g i s t r y %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% SetMagickRegistry() sets a blob into the registry and returns a unique ID.
% If an error occurs, -1 is returned.
%
% The format of the SetMagickRegistry method is:
%
% ssize_t SetMagickRegistry(const RegistryType type,const void *blob,
% const size_t length,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o type: the registry type.
%
% o blob: the address of a Binary Large OBject.
%
% o length: For a registry type of ImageRegistryType use sizeof(Image)
% otherise the blob length in number of bytes.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport ssize_t SetMagickRegistry(const RegistryType type,const void *blob,
const size_t magick_unused(length),ExceptionInfo *exception)
{
char
key[MaxTextExtent];
MagickBooleanType
status;
static ssize_t
id = 0;
magick_unreferenced(length);
(void) FormatLocaleString(key,MaxTextExtent,"%.20g\n",(double) id);
status=SetImageRegistry(type,key,blob,exception);
if (status == MagickFalse)
return(-1);
return(id++);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% S e t M o n i t o r H a n d l e r %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% SetMonitorHandler() sets the monitor handler to the specified method
% and returns the previous monitor handler.
%
% The format of the SetMonitorHandler method is:
%
% MonitorHandler SetMonitorHandler(MonitorHandler handler)
%
% A description of each parameter follows:
%
% o handler: Specifies a pointer to a method to handle monitors.
%
*/
MagickExport MonitorHandler GetMonitorHandler(void)
{
return(monitor_handler);
}
MagickExport MonitorHandler SetMonitorHandler(MonitorHandler handler)
{
MonitorHandler
previous_handler;
previous_handler=monitor_handler;
monitor_handler=handler;
return(previous_handler);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% S h i f t I m a g e L i s t %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ShiftImageList() removes an image from the beginning of the list.
%
% Deprecated, replace with:
%
% RemoveFirstImageFromList(images);
%
% The format of the ShiftImageList method is:
%
% Image *ShiftImageList(Image **images)
%
% A description of each parameter follows:
%
% o images: the image list.
%
*/
MagickExport Image *ShiftImageList(Image **images)
{
(void) LogMagickEvent(DeprecateEvent,GetMagickModule(),"last use: v5.5.2");
return(RemoveFirstImageFromList(images));
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ S i z e B l o b %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% SizeBlob() returns the current length of the image file or blob.
%
% Deprecated, replace with:
%
% GetBlobSize(image);
%
% The format of the SizeBlob method is:
%
% off_t SizeBlob(Image *image)
%
% A description of each parameter follows:
%
% o size: Method SizeBlob returns the current length of the image file
% or blob.
%
% o image: the image.
%
*/
MagickExport MagickOffsetType SizeBlob(Image *image)
{
if (image->debug != MagickFalse)
(void) LogMagickEvent(DeprecateEvent,GetMagickModule(),"last use: v5.4.3");
return((MagickOffsetType) GetBlobSize(image));
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% S p l i c e I m a g e L i s t %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% SpliceImageList() removes the images designated by offset and length from
% the list and replaces them with the specified list.
%
% The format of the SpliceImageList method is:
%
% Image *SpliceImageList(Image *images,const ssize_t offset,
% const size_t length,const Image *splices,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o images: the image list.
%
% o offset: the position within the list.
%
% o length: the length of the image list to remove.
%
% o splice: Replace the removed image list with this list.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *SpliceImageList(Image *images,const ssize_t offset,
const size_t length,const Image *splices,ExceptionInfo *exception)
{
Image
*clone;
ssize_t
i;
if (images->debug != MagickFalse)
(void) LogMagickEvent(DeprecateEvent,GetMagickModule(),"last use: v5.5.2");
clone=CloneImageList(splices,exception);
while (GetPreviousImageInList(images) != (Image *) NULL)
images=GetPreviousImageInList(images);
for (i=0; i < offset; i++)
{
if (GetNextImageInList(images) == (Image *) NULL)
return((Image *) NULL);
images=GetNextImageInList(images);
}
(void) SpliceImageIntoList(&images,length,clone);
return(images);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% s R G B C o m p a n d o r %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% sRGBCompandor() adds the gamma function to a sRGB pixel.
%
% The format of the sRGBCompandor method is:
%
% MagickRealType sRGBCompandor(const MagickRealType pixel)
%
% A description of each parameter follows:
%
% o pixel: the pixel.
%
*/
MagickExport MagickRealType sRGBCompandor(const MagickRealType pixel)
{
if (pixel <= (0.0031306684425005883*QuantumRange))
return(12.92*pixel);
return(QuantumRange*(1.055*pow(QuantumScale*pixel,1.0/2.4)-0.055));
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% S t r i p %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% Strip() strips any whitespace or quotes from the beginning and end of a
% string of characters.
%
% The format of the Strip method is:
%
% void Strip(char *message)
%
% A description of each parameter follows:
%
% o message: Specifies an array of characters.
%
*/
MagickExport void Strip(char *message)
{
char
*p,
*q;
assert(message != (char *) NULL);
(void) LogMagickEvent(DeprecateEvent,GetMagickModule(),"last use: v5.5.7");
if (*message == '\0')
return;
if (strlen(message) == 1)
return;
p=message;
while (isspace((int) ((unsigned char) *p)) != 0)
p++;
if ((*p == '\'') || (*p == '"'))
p++;
q=message+strlen(message)-1;
while ((isspace((int) ((unsigned char) *q)) != 0) && (q > p))
q--;
if (q > p)
if ((*q == '\'') || (*q == '"'))
q--;
(void) memcpy(message,p,(size_t) (q-p+1));
message[q-p+1]='\0';
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% S y n c C a c h e V i e w %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% SyncCacheView() saves the cache view pixels to the in-memory or disk
% cache. It returns MagickTrue if the pixel region is synced, otherwise
% MagickFalse.
%
% Deprecated, replace with:
%
% SyncCacheViewAuthenticPixels(cache_view,GetCacheViewException(cache_view));
%
% The format of the SyncCacheView method is:
%
% MagickBooleanType SyncCacheView(CacheView *cache_view)
%
% A description of each parameter follows:
%
% o cache_view: the cache view.
%
*/
MagickExport MagickBooleanType SyncCacheView(CacheView *cache_view)
{
MagickBooleanType
status;
status=SyncCacheViewAuthenticPixels(cache_view,
GetCacheViewException(cache_view));
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% S y n c C a c h e V i e w P i x e l s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% SyncCacheViewPixels() saves the cache view pixels to the in-memory
% or disk cache. It returns MagickTrue if the pixel region is flushed,
% otherwise MagickFalse.
%
% Deprecated, replace with:
%
% SyncCacheViewAuthenticPixels(cache_view,GetCacheViewException(cache_view));
%
% The format of the SyncCacheViewPixels method is:
%
% MagickBooleanType SyncCacheViewPixels(CacheView *cache_view)
%
% A description of each parameter follows:
%
% o cache_view: the cache view.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport MagickBooleanType SyncCacheViewPixels(CacheView *cache_view)
{
MagickBooleanType
status;
status=SyncCacheViewAuthenticPixels(cache_view,
GetCacheViewException(cache_view));
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% S y n c I m a g e P i x e l s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% SyncImagePixels() saves the image pixels to the in-memory or disk cache.
% The method returns MagickTrue if the pixel region is synced, otherwise
% MagickFalse.
%
% Deprecated, replace with:
%
% SyncAuthenticPixels(image,&image->exception);
%
% The format of the SyncImagePixels() method is:
%
% MagickBooleanType SyncImagePixels(Image *image)
%
% A description of each parameter follows:
%
% o image: the image.
%
*/
MagickExport MagickBooleanType SyncImagePixels(Image *image)
{
return(SyncAuthenticPixels(image,&image->exception));
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% S y s t e m C o m m a n d %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% SystemCommand() executes the specified command and waits until it
% terminates. The returned value is the exit status of the command.
%
% The format of the SystemCommand method is:
%
% int SystemCommand(const MagickBooleanType asynchronous,
% const MagickBooleanType verbose,const char *command,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o asynchronous: a value other than 0 executes the parent program
% concurrently with the new child process.
%
% o verbose: a value other than 0 prints the executed command before it is
% invoked.
%
% o command: this string is the command to execute.
%
% o exception: return any errors here.
%
*/
MagickExport int SystemCommand(const MagickBooleanType asynchronous,
const MagickBooleanType verbose,const char *command,ExceptionInfo *exception)
{
int
status;
status=ExternalDelegateCommand(asynchronous,verbose,command,(char *) NULL,
exception);
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% T e m p o r a r y F i l e n a m e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% TemporaryFilename() replaces the contents of path by a unique path name.
%
% The format of the TemporaryFilename method is:
%
% void TemporaryFilename(char *path)
%
% A description of each parameter follows.
%
% o path: Specifies a pointer to an array of characters. The unique path
% name is returned in this array.
%
*/
MagickExport void TemporaryFilename(char *path)
{
(void) LogMagickEvent(DeprecateEvent,GetMagickModule(),"last use: v5.5.6");
(void) AcquireUniqueFilename(path);
(void) RelinquishUniqueFileResource(path);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% T h r e s h o l d I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ThresholdImage() changes the value of individual pixels based on
% the intensity of each pixel compared to threshold. The result is a
% high-contrast, two color image.
%
% The format of the ThresholdImage method is:
%
% unsigned int ThresholdImage(Image *image,const double threshold)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o threshold: Define the threshold value
%
*/
MagickExport unsigned int ThresholdImage(Image *image,const double threshold)
{
#define ThresholdImageTag "Threshold/Image"
IndexPacket
index;
ssize_t
y;
/*
Threshold image.
*/
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
if (image->debug != MagickFalse)
(void) LogMagickEvent(DeprecateEvent,GetMagickModule(),"last use: v5.5.7");
if (!AcquireImageColormap(image,2))
ThrowBinaryImageException(ResourceLimitError,"MemoryAllocationFailed",
"UnableToThresholdImage");
for (y=0; y < (ssize_t) image->rows; y++)
{
IndexPacket
*magick_restrict indexes;
ssize_t
x;
PixelPacket
*magick_restrict q;
q=GetAuthenticPixels(image,0,y,image->columns,1,&image->exception);
if (q == (PixelPacket *) NULL)
break;
indexes=GetAuthenticIndexQueue(image);
for (x=0; x < (ssize_t) image->columns; x++)
{
index=(IndexPacket) (GetPixelIntensity(image,q) <=
threshold ? 0 : 1);
SetPixelIndex(indexes+x,index);
SetPixelRGBO(q,image->colormap+(ssize_t) index);
q++;
}
if (!SyncAuthenticPixels(image,&image->exception))
break;
}
return(MagickTrue);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% T h r e s h o l d I m a g e C h a n n e l %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ThresholdImageChannel() changes the value of individual pixels based on
% the intensity of each pixel channel. The result is a high-contrast image.
%
% The format of the ThresholdImageChannel method is:
%
% unsigned int ThresholdImageChannel(Image *image,const char *threshold)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o threshold: define the threshold values.
%
*/
MagickExport unsigned int ThresholdImageChannel(Image *image,
const char *threshold)
{
#define ThresholdImageTag "Threshold/Image"
MagickPixelPacket
pixel;
GeometryInfo
geometry_info;
IndexPacket
index;
ssize_t
y;
unsigned int
flags;
/*
Threshold image.
*/
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
if (threshold == (const char *) NULL)
return(MagickTrue);
if (SetImageStorageClass(image,DirectClass) == MagickFalse)
return(MagickFalse);
GetMagickPixelPacket(image,&pixel);
flags=ParseGeometry(threshold,&geometry_info);
pixel.red=geometry_info.rho;
if (flags & SigmaValue)
pixel.green=geometry_info.sigma;
else
pixel.green=pixel.red;
if (flags & XiValue)
pixel.blue=geometry_info.xi;
else
pixel.blue=pixel.red;
if (flags & PsiValue)
pixel.opacity=geometry_info.psi;
else
pixel.opacity=(MagickRealType) OpaqueOpacity;
if (flags & PercentValue)
{
pixel.red*=QuantumRange/100.0f;
pixel.green*=QuantumRange/100.0f;
pixel.blue*=QuantumRange/100.0f;
pixel.opacity*=QuantumRange/100.0f;
}
if (!(flags & SigmaValue))
{
if (!AcquireImageColormap(image,2))
ThrowBinaryImageException(ResourceLimitError,"MemoryAllocationFailed",
"UnableToThresholdImage");
if (pixel.red == 0)
(void) GetImageDynamicThreshold(image,2.0,2.0,&pixel,&image->exception);
}
for (y=0; y < (ssize_t) image->rows; y++)
{
IndexPacket
*magick_restrict indexes;
ssize_t
x;
PixelPacket
*magick_restrict q;
q=GetAuthenticPixels(image,0,y,image->columns,1,&image->exception);
if (q == (PixelPacket *) NULL)
break;
indexes=GetAuthenticIndexQueue(image);
if (IsMagickGray(&pixel) != MagickFalse)
for (x=0; x < (ssize_t) image->columns; x++)
{
index=(IndexPacket) (GetPixelIntensity(image,q) <= pixel.red ? 0 : 1);
SetPixelIndex(indexes+x,index);
SetPixelRed(q,image->colormap[(ssize_t) index].red);
SetPixelGreen(q,image->colormap[(ssize_t) index].green);
SetPixelBlue(q,image->colormap[(ssize_t) index].blue);
q++;
}
else
for (x=0; x < (ssize_t) image->columns; x++)
{
SetPixelRed(q,(MagickRealType) q->red <= pixel.red
? 0 : QuantumRange);
SetPixelGreen(q,(MagickRealType) q->green <= pixel.green
? 0 : QuantumRange);
SetPixelBlue(q,(MagickRealType) q->blue <= pixel.blue
? 0 : QuantumRange);
SetPixelOpacity(q,(MagickRealType) q->opacity <= pixel.opacity
? 0 : QuantumRange);
q++;
}
if (!SyncAuthenticPixels(image,&image->exception))
break;
}
return(MagickTrue);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ T r a n s f o r m C o l o r s p a c e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% TransformColorspace() converts the image to a specified colorspace.
% If the image is already in the requested colorspace, no work is performed.
% Note that the current colorspace is stored in the image colorspace member.
% The transformation matrices are not necessarily the standard ones: the
% weights are rescaled to normalize the range of the transformed values to
% be [0..QuantumRange].
%
% Deprecated, replace with:
%
% TransformImageColorspace(image,colorspace);
%
% The format of the TransformColorspace method is:
%
% unsigned int (void) TransformColorspace(Image *image,
% const ColorspaceType colorspace)
%
% A description of each parameter follows:
%
% o image: the image to transform
%
% o colorspace: the desired colorspace.
%
*/
MagickExport unsigned int TransformColorspace(Image *image,
const ColorspaceType colorspace)
{
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(DeprecateEvent,GetMagickModule(),"last use: v5.5.6");
return(TransformImageColorspace(image,colorspace));
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% T r a n s f o r m H S L %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% TransformHSL() converts a (red, green, blue) to a (hue, saturation,
% lightness) triple.
%
% The format of the TransformHSL method is:
%
% void TransformHSL(const Quantum red,const Quantum green,
% const Quantum blue,double *hue,double *saturation,double *lightness)
%
% A description of each parameter follows:
%
% o red, green, blue: A Quantum value representing the red, green, and
% blue component of a pixel..
%
% o hue, saturation, lightness: A pointer to a double value representing a
% component of the HSL color space.
%
*/
MagickExport void TransformHSL(const Quantum red,const Quantum green,
const Quantum blue,double *hue,double *saturation,double *lightness)
{
MagickRealType
b,
delta,
g,
max,
min,
r;
/*
Convert RGB to HSL colorspace.
*/
assert(hue != (double *) NULL);
assert(saturation != (double *) NULL);
assert(lightness != (double *) NULL);
r=QuantumScale*red;
g=QuantumScale*green;
b=QuantumScale*blue;
max=MagickMax(r,MagickMax(g,b));
min=MagickMin(r,MagickMin(g,b));
*hue=0.0;
*saturation=0.0;
*lightness=(double) ((min+max)/2.0);
delta=max-min;
if (delta == 0.0)
return;
*saturation=(double) (delta/((*lightness < 0.5) ? (min+max) :
(2.0-max-min)));
if (r == max)
*hue=(double) (g == min ? 5.0+(max-b)/delta : 1.0-(max-g)/delta);
else
if (g == max)
*hue=(double) (b == min ? 1.0+(max-r)/delta : 3.0-(max-b)/delta);
else
*hue=(double) (r == min ? 3.0+(max-g)/delta : 5.0-(max-r)/delta);
*hue/=6.0;
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% T r a n s l a t e T e x t %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% TranslateText() replaces any embedded formatting characters with the
% appropriate image attribute and returns the translated text.
%
% Deprecated, replace with:
%
% InterpretImageProperties(image_info,image,embed_text);
%
% The format of the TranslateText method is:
%
% char *TranslateText(const ImageInfo *image_info,Image *image,
% const char *embed_text)
%
% A description of each parameter follows:
%
% o image_info: the image info.
%
% o image: the image.
%
% o embed_text: the address of a character string containing the embedded
% formatting characters.
%
*/
MagickExport char *TranslateText(const ImageInfo *image_info,Image *image,
const char *embed_text)
{
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(DeprecateEvent,GetMagickModule(),"last use: v6.2.6");
return(InterpretImageProperties(image_info,image,embed_text));
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% T r a n s p a r e n t I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% TransparentImage() changes the opacity value associated with any pixel
% that matches color to the value defined by opacity.
%
% By default color must match a particular pixel color exactly. However,
% in many cases two colors may differ by a small amount. Fuzz defines
% how much tolerance is acceptable to consider two colors as the same.
% For example, set fuzz to 10 and the color red at intensities of 100 and
% 102 respectively are now interpreted as the same color.
%
% The format of the TransparentImage method is:
%
% MagickBooleanType TransparentImage(Image *image,
% const PixelPacket target,const Quantum opacity)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o target: the RGB value of the target color.
%
% o opacity: the replacement opacity value.
%
*/
MagickExport MagickBooleanType TransparentImage(Image *image,
const PixelPacket target,const Quantum opacity)
{
#define TransparentImageTag "Transparent/Image"
MagickBooleanType
proceed;
ssize_t
y;
/*
Make image color transparent.
*/
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
(void) LogMagickEvent(DeprecateEvent,GetMagickModule(),"last use: v6.1.0");
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
if (image->matte == MagickFalse)
(void) SetImageAlphaChannel(image,OpaqueAlphaChannel);
for (y=0; y < (ssize_t) image->rows; y++)
{
ssize_t
x;
PixelPacket
*magick_restrict q;
q=GetAuthenticPixels(image,0,y,image->columns,1,&image->exception);
if (q == (PixelPacket *) NULL)
break;
for (x=0; x < (ssize_t) image->columns; x++)
{
if (IsColorSimilar(image,q,&target) != MagickFalse)
q->opacity=opacity;
q++;
}
if (SyncAuthenticPixels(image,&image->exception) == MagickFalse)
break;
proceed=SetImageProgress(image,TransparentImageTag,(MagickOffsetType) y,
image->rows);
if (proceed == MagickFalse)
break;
}
return(MagickTrue);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% U n s h i f t I m a g e L i s t %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% UnshiftImageList() adds the image to the beginning of the list.
%
% Deprecated, replace with:
%
% PrependImageToList(images,CloneImageList(image,exception));
%
% The format of the UnshiftImageList method is:
%
% unsigned int UnshiftImageList(Image *images,const Image *image,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o images: the image list.
%
% o image: the image.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport unsigned int UnshiftImageList(Image **images,const Image *image,
ExceptionInfo *exception)
{
(void) LogMagickEvent(DeprecateEvent,GetMagickModule(),"last use: v5.5.2");
PrependImageToList(images,CloneImageList(image,exception));
return(MagickTrue);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ V a l i d a t e C o l o r m a p I n d e x %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ValidateColormapIndex() validates the colormap index. If the index does
% not range from 0 to the number of colors in the colormap an exception
% issued and 0 is returned.
%
% Deprecated, replace with:
%
% ConstrainColormapIndex(image,index);
%
% The format of the ValidateColormapIndex method is:
%
% IndexPacket ValidateColormapIndex(Image *image,const unsigned int index)
%
% A description of each parameter follows:
%
% o index: Method ValidateColormapIndex returns colormap index if it is
% valid other an exception issued and 0 is returned.
%
% o image: the image.
%
% o index: This integer is the colormap index.
%
*/
MagickExport IndexPacket ValidateColormapIndex(Image *image,
const size_t index)
{
if (image->debug != MagickFalse)
(void) LogMagickEvent(DeprecateEvent,GetMagickModule(),"last use: v5.4.4");
return(ConstrainColormapIndex(image,index));
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% Z o o m I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ZoomImage() creates a new image that is a scaled size of an existing one.
% It allocates the memory necessary for the new Image structure and returns a
% pointer to the new image. The Point filter gives fast pixel replication,
% Triangle is equivalent to bi-linear interpolation, and Mitchel giver slower,
% very high-quality results. See Graphic Gems III for details on this
% algorithm.
%
% The filter member of the Image structure specifies which image filter to
% use. Blur specifies the blur factor where > 1 is blurry, < 1 is sharp.
%
% The format of the ZoomImage method is:
%
% Image *ZoomImage(const Image *image,const size_t columns,
% const size_t rows,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o columns: An integer that specifies the number of columns in the zoom
% image.
%
% o rows: An integer that specifies the number of rows in the scaled
% image.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *ZoomImage(const Image *image,const size_t columns,
const size_t rows,ExceptionInfo *exception)
{
Image
*zoom_image;
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
zoom_image=ResizeImage(image,columns,rows,image->filter,image->blur,
exception);
return(zoom_image);
}
#endif
|
hello-omp-pip.c | #include <stdio.h>
#include <pip.h>
int main( int argc, char **argv ) {
int pipid, tid;
pip_init( &pipid, NULL, NULL, 0 );
printf( "<%d> %s\n", pipid, pip_get_mode_str() );
#pragma omp parallel private(tid)
{
tid = omp_get_thread_num();
printf( "Hello from PiP-OMP thread %d:%d\n", pipid, tid );
}
pip_fin();
return 0;
}
|
sol2.c | /**
* \file
* \brief [Problem 23](https://projecteuler.net/problem=23) solution -
* optimization using look-up array
* \author [Krishna Vedala](https://github.com/kvedala)
*
* Optimization applied - compute & store abundant numbers once
* into a look-up array.
*/
#include <stdio.h>
#include <stdlib.h>
#include <time.h>
#ifdef _OPENMP
#include <omp.h>
#endif
/**
* This is the global array to be used to store a flag to identify
* if a particular number is abundant (1) or not (0).
* Using a whole byte to store a binary info would be redundant.
* We will use each byte to represent 8 numbers by relying on bits.
* This saves memory required by 1/8
*/
char *abundant_flags = NULL;
/**
* \returns -1 if N is deficient
* \returns 1 if N is abundant
* \returns 0 if N is perfect
*/
char get_perfect_number(unsigned long N)
{
unsigned long sum = 1;
char ret = 0;
for (unsigned long i = 2; i * i <= N; i++)
{
if (N % i == 0)
{
sum += i;
unsigned long tmp = N / i;
if (tmp != i)
{
sum += tmp;
}
}
}
ret = sum == N ? 0 : (sum > N ? 1 : -1);
#ifdef DEBUG
printf("%5lu: %5lu : %d\n", N, sum, ret);
#endif
return ret;
}
/**
* Is the given number an abundant number (1) or not (0)
*/
char is_abundant(unsigned long N)
{
// return abundant_flags[N >> 3] & (1 << N % 8) ? 1 : 0;
return abundant_flags[N >> 3] & (1 << (N & 7))
? 1
: 0; /* optimized modulo operation */
}
/**
* Find the next abundant number after N and not including N
*/
unsigned long get_next_abundant(unsigned long N)
{
unsigned long i;
/* keep checking successive numbers till an abundant number is found */
for (i = N + 1; !is_abundant(i); ++i)
{
;
}
return i;
}
/**
* check if a given number can be represented as a sum
* of two abundant numbers.
* \returns 1 - if yes
* \returns 0 - if not
*/
char is_sum_of_abundant(unsigned long N)
{
/* optimized logic:
* i + j = N where both i and j should be abundant
* hence we can simply check for j = N - i as we loop through i
*/
for (unsigned long i = get_next_abundant(1); i <= (N >> 1);
i = get_next_abundant(i))
{
if (is_abundant(N - i))
{
#ifdef DEBUG
printf("\t%4lu + %4lu = %4lu\n", i, N - i, N);
#endif
return 1;
}
}
return 0;
}
/** Main function */
int main(int argc, char **argv)
{
long MAX_N = 28123; /* Limit of numbers to check */
unsigned long sum = 0;
if (argc == 2)
{
MAX_N = strtoul(argv[1], NULL, 10);
}
/* byte array to store flags to identify abundant numbers
* the flags are identified by bits
*/
abundant_flags = (char *)calloc(MAX_N >> 3, 1);
if (!abundant_flags)
{
perror("Unable to allocate memoey!");
return -1;
}
#ifdef _OPENMP
printf("Using OpenMP parallleization with %d threads\n",
omp_get_max_threads());
#else
printf("Not using parallleization!\n");
#endif
clock_t start_time = clock();
/* Loop to set abundant flags */
long N;
#ifdef _OPENMP
#pragma omp for schedule(runtime)
#endif
for (N = 1; N <= MAX_N; N++)
{
char ret = get_perfect_number(N);
if (ret == 1)
{
// int byte_offset = N % 8, index = N >> 3;
int byte_offset = N & 7, index = N >> 3;
#ifdef _OPENMP
#pragma omp critical
#endif
abundant_flags[index] |= ret << byte_offset;
}
// if (i % 100 == 0)
// printf("... %5lu: %8lu\r", i, sum);
}
clock_t end_time = clock();
double t1 = 1e3 * (end_time - start_time) / CLOCKS_PER_SEC;
printf("Time taken to get abundant numbers: %.4g ms\n", t1);
clock_t t2 = 0;
long i;
#ifdef _OPENMP
#pragma omp parallel for schedule(runtime) reduction(+ : sum)
#endif
for (i = 1; i < MAX_N; i++)
{
clock_t start_time1 = clock();
if (!is_sum_of_abundant(i))
{
// #ifdef _OPENMP
// #pragma omp critical
// #endif
sum += i;
}
clock_t end_time1 = clock();
#ifdef _OPENMP
#pragma omp critical
#endif
t2 += end_time1 - start_time1;
printf("... %5lu: %8lu\r", i, sum);
if (i % 100 == 0)
{
fflush(stdout);
}
}
#ifdef DEBUG
putchar('\n');
#endif
double t22 = 1e3 * t2 / CLOCKS_PER_SEC;
printf("Time taken for final sum: %.4g ms\nTotal Time taken: %.4g ms\n",
t22, t1 + t22);
printf("Memory used: %lu bytes\n", MAX_N >> 3);
printf(
"Sum of numbers that cannot be represented as sum of two abundant "
"numbers : %lu\n",
sum);
free(abundant_flags);
return 0;
}
|
batchnorm_ref.c | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* License); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* AS IS BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
/*
* Copyright (c) 2021, OPEN AI LAB
* Author: bhu@openailab.com
* Update: hhchen@openailab.com
*/
#include "batchnorm_param.h"
#include "graph/tensor.h"
#include "graph/node.h"
#include "graph/graph.h"
#include "utility/sys_port.h"
#include "utility/log.h"
#include "device/cpu/cpu_node.h"
#include "device/cpu/cpu_graph.h"
#include "device/cpu/cpu_module.h"
#include <stdbool.h>
#include <string.h>
#include <math.h>
struct ref_batchnorm_param
{
int input_n;
int input_h;
int input_w;
int input_c;
int layout;
bool iscaffe;
float* scale_mean;
float* scale_var_inv;
float* gamma;
float* beta;
float in_scale;
int in_zero;
float out_scale;
int out_zero;
};
static int ref_batchnorm_uint8(struct tensor* input_tensor, struct tensor* output_tensor, const struct ref_batchnorm_param* param, int num_thread)
{
float* scale_mean = param->scale_mean;
float* scale_var_inv = param->scale_var_inv;
float* gamma = param->gamma;
float* beta = param->beta;
int img_size = param->input_c * param->input_h * param->input_w;
int total_size = img_size * param->input_n;
// dequant
uint8_t* input_uint8 = input_tensor->data;
uint8_t* output_uint8 = output_tensor->data;
float input_scale = input_tensor->scale;
float output_scale = output_tensor->scale;
int32_t input_zero = input_tensor->zero_point;
int32_t output_zero = output_tensor->zero_point;
float* data_fp32 = (float*) sys_malloc(total_size * sizeof(float));
for(int i = 0; i < total_size; i++)
data_fp32[i] = ((float) input_uint8[i] - (float)input_zero) * input_scale;
for (int n = 0; n < param->input_n; ++n)
{
#pragma omp parallel for num_threads(num_thread)
for (int h = 0; h < param->input_h; ++h)
{
for (int w = 0; w < param->input_w; ++w)
{
for (int c = 0; c < param->input_c; ++c)
{
float s_mean = scale_mean[c];
float s_var = scale_var_inv[c];
float s_val1 = s_mean;
float s_val2 = s_var;
if (!param->iscaffe)
{
float s_gamma = gamma[c];
float s_beta = beta[c];
s_val1 = s_beta + s_gamma * s_mean;
s_val2 = s_gamma * s_var;
}
int offset = 0;
if (TENGINE_LAYOUT_NCHW == param->layout)
{
offset = n * img_size + c * param->input_h * param->input_w + h * param->input_w + w;
}
else
{
offset = n * img_size + h * param->input_w * param->input_c + w * param->input_c + c;
}
data_fp32[offset] = data_fp32[offset] * s_val2 + s_val1;
}
}
}
}
// quant
for(int i=0; i<total_size; i++)
{
int udata = (int)roundf(data_fp32[i] / output_scale + output_zero);
if (udata > 255)
udata = 255;
else if (udata < 0)
udata = 0;
output_uint8[i] = udata;
}
return 0;
}
static int ref_batchnorm_fp32(float* input, float* output, const struct ref_batchnorm_param* param, int num_thread)
{
float* scale_mean = param->scale_mean;
float* scale_var_inv = param->scale_var_inv;
float* gamma = param->gamma;
float* beta = param->beta;
int img_size = param->input_c * param->input_h * param->input_w;
for (int n = 0; n < param->input_n; ++n)
{
#pragma omp parallel for num_threads(num_thread)
for (int h = 0; h < param->input_h; ++h)
{
for (int w = 0; w < param->input_w; ++w)
{
for (int c = 0; c < param->input_c; ++c)
{
float s_mean = scale_mean[c];
float s_var = scale_var_inv[c];
float s_val1 = s_mean;
float s_val2 = s_var;
if (!param->iscaffe)
{
float s_gamma = gamma[c];
float s_beta = beta[c];
s_val1 = s_beta + s_gamma * s_mean;
s_val2 = s_gamma * s_var;
}
int offset = 0;
if (TENGINE_LAYOUT_NCHW == param->layout)
{
offset = n * img_size + c * param->input_h * param->input_w + h * param->input_w + w;
}
else
{
offset = n * img_size + h * param->input_w * param->input_c + w * param->input_c + c;
}
output[offset] = input[offset] * s_val2 + s_val1;
}
}
}
}
return 0;
}
static int init_node(struct node_ops* node_ops, struct exec_node* exec_node, struct exec_graph* exec_graph)
{
struct ref_batchnorm_param* batchnorm_op_param =
( struct ref_batchnorm_param* )sys_malloc(sizeof(struct ref_batchnorm_param));
memset(batchnorm_op_param, 0, sizeof(struct ref_batchnorm_param));
exec_node->ops_priv = batchnorm_op_param;
return 0;
}
static int release_node(struct node_ops* node_ops, struct exec_node* exec_node, struct exec_graph* exec_graph)
{
sys_free(exec_node->ops_priv);
return 0;
}
static int prerun(struct node_ops* node_ops, struct exec_node* exec_node, struct exec_graph* exec_graph)
{
struct node* ir_node = exec_node->ir_node;
struct graph* ir_graph = ir_node->graph;
struct tensor* output_tensor;
const struct tensor* input_tensor;
int channel_num;
// struct tensor* input_tensor = get_ir_graph_tensor(ir_graph, ir_node->input_tensors[0]);
output_tensor = get_ir_graph_tensor(ir_graph, ir_node->output_tensors[0]);
input_tensor = get_ir_graph_tensor(ir_graph, ir_node->input_tensors[0]);
const struct tensor* mean_tensor = get_ir_graph_tensor(ir_graph, ir_node->input_tensors[3]);
const struct tensor* var_tensor = get_ir_graph_tensor(ir_graph, ir_node->input_tensors[4]);
;
struct ref_batchnorm_param* op_param = ( struct ref_batchnorm_param* )exec_node->ops_priv;
struct batchnorm_param* batchnorm_param = ( struct batchnorm_param* )ir_node->op.param_mem;
if (ir_graph->graph_layout == TENGINE_LAYOUT_NCHW)
{
channel_num = input_tensor->dims[1];
}
else if (ir_graph->graph_layout == TENGINE_LAYOUT_NHWC)
{
channel_num = input_tensor->dims[3];
}
float* scale_mean = ( float* )sys_malloc(channel_num * sizeof(float));
float* scale_var_inv = ( float* )sys_malloc(channel_num * sizeof(float));
const float* mean = ( const float* )mean_tensor->data;
const float* var = ( const float* )var_tensor->data;
float rescale_factor;
float eps = batchnorm_param->eps;
rescale_factor = batchnorm_param->rescale_factor ? 1 / batchnorm_param->rescale_factor : 0;
for (int c = 0; c < channel_num; c++)
{
float tmp = sqrtf(var[c] * rescale_factor + eps);
scale_var_inv[c] = ( float )(1.f / tmp);
tmp = rescale_factor * scale_var_inv[c];
scale_mean[c] = ( float )(-mean[c] * tmp);
}
float* gamma = NULL;
float* beta = NULL;
if (!batchnorm_param->caffe_flavor)
{
const struct tensor* gamma_tensor = get_ir_graph_tensor(ir_graph, ir_node->input_tensors[1]);
const struct tensor* beta_tensor = get_ir_graph_tensor(ir_graph, ir_node->input_tensors[2]);
gamma = ( float* )gamma_tensor->data;
beta = ( float* )beta_tensor->data;
}
int layout = ir_graph->graph_layout;
op_param->iscaffe = batchnorm_param->caffe_flavor;
op_param->scale_mean = scale_mean;
op_param->scale_var_inv = scale_var_inv;
op_param->gamma = gamma;
op_param->beta = beta;
op_param->layout = layout;
return 0;
}
static int run(struct node_ops* node_ops, struct exec_node* exec_node, struct exec_graph* exec_graph)
{
struct node* ir_node = exec_node->ir_node;
struct graph* ir_graph = ir_node->graph;
struct tensor* input_tensor;
struct tensor* output_tensor;
input_tensor = get_ir_graph_tensor(ir_graph, ir_node->input_tensors[0]);
output_tensor = get_ir_graph_tensor(ir_graph, ir_node->output_tensors[0]);
struct ref_batchnorm_param* batchnorm_op_param = ( struct ref_batchnorm_param* )exec_node->ops_priv;
void* out_data = output_tensor->data;
void* input = input_tensor->data;
if (TENGINE_LAYOUT_NCHW == ir_graph->graph_layout)
{
if (4 == input_tensor->dim_num)
{
batchnorm_op_param->input_n = input_tensor->dims[0];
batchnorm_op_param->input_c = input_tensor->dims[1];
batchnorm_op_param->input_h = input_tensor->dims[2];
batchnorm_op_param->input_w = input_tensor->dims[3];
}
else if (3 == input_tensor->dim_num)
{
batchnorm_op_param->input_n = input_tensor->dims[0];
batchnorm_op_param->input_c = input_tensor->dims[1];
batchnorm_op_param->input_w = input_tensor->dims[2];
batchnorm_op_param->input_h = 1;
}
else
{
return false;
}
}
else
{
if (4 == input_tensor->dim_num)
{
batchnorm_op_param->input_n = input_tensor->dims[0];
batchnorm_op_param->input_c = input_tensor->dims[3];
batchnorm_op_param->input_h = input_tensor->dims[1];
batchnorm_op_param->input_w = input_tensor->dims[2];
}
else if (3 == input_tensor->dim_num)
{
batchnorm_op_param->input_n = input_tensor->dims[0];
batchnorm_op_param->input_c = input_tensor->dims[2];
batchnorm_op_param->input_w = input_tensor->dims[1];
batchnorm_op_param->input_h = 1;
}
else
{
return false;
}
}
int ret = -1;
if (input_tensor->data_type == TENGINE_DT_FP32)
ret = ref_batchnorm_fp32(input, out_data, batchnorm_op_param, exec_graph->num_thread);
else if (input_tensor->data_type == TENGINE_DT_UINT8)
ret = ref_batchnorm_uint8(input_tensor, output_tensor, batchnorm_op_param, exec_graph->num_thread);
return ret;
}
static int postrun(struct node_ops* node_ops, struct exec_node* exec_node, struct exec_graph* exec_graph)
{
struct ref_batchnorm_param* batchnorm_op_param = ( struct ref_batchnorm_param* )exec_node->ops_priv;
sys_free(batchnorm_op_param->scale_mean);
sys_free(batchnorm_op_param->scale_var_inv);
return 0;
}
static int score(struct node_ops* node_ops, struct exec_graph* exec_graph, struct node* exec_node)
{
return OPS_SCORE_CANDO;
}
static struct node_ops hcl_node_ops = {.prerun = prerun,
.run = run,
.reshape = NULL,
.postrun = postrun,
.init_node = init_node,
.release_node = release_node,
.score = score};
int register_batchnorm_ref_op()
{
return register_builtin_node_ops(OP_BATCHNORM, &hcl_node_ops);
}
int unregister_batchnorm_ref_op()
{
return unregister_builtin_node_ops(OP_BATCHNORM, &hcl_node_ops);
}
|
resample.c | /*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% RRRR EEEEE SSSSS AAA M M PPPP L EEEEE %
% R R E SS A A MM MM P P L E %
% RRRR EEE SSS AAAAA M M M PPPP L EEE %
% R R E SS A A M M P L E %
% R R EEEEE SSSSS A A M M P LLLLL EEEEE %
% %
% %
% MagickCore Pixel Resampling Methods %
% %
% Software Design %
% Cristy %
% Anthony Thyssen %
% August 2007 %
% %
% %
% Copyright 1999-2020 ImageMagick Studio LLC, a non-profit organization %
% dedicated to making software imaging solutions freely available. %
% %
% You may not use this file except in compliance with the License. You may %
% obtain a copy of the License at %
% %
% https://imagemagick.org/script/license.php %
% %
% Unless required by applicable law or agreed to in writing, software %
% distributed under the License is distributed on an "AS IS" BASIS, %
% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. %
% See the License for the specific language governing permissions and %
% limitations under the License. %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
%
*/
/*
Include declarations.
*/
#include "magick/studio.h"
#include "magick/artifact.h"
#include "magick/color-private.h"
#include "magick/cache.h"
#include "magick/draw.h"
#include "magick/exception-private.h"
#include "magick/gem.h"
#include "magick/image.h"
#include "magick/image-private.h"
#include "magick/log.h"
#include "magick/magick.h"
#include "magick/memory_.h"
#include "magick/pixel.h"
#include "magick/pixel-private.h"
#include "magick/quantum.h"
#include "magick/random_.h"
#include "magick/resample.h"
#include "magick/resize.h"
#include "magick/resize-private.h"
#include "magick/resource_.h"
#include "magick/transform.h"
#include "magick/signature-private.h"
#include "magick/token.h"
#include "magick/utility.h"
#include "magick/option.h"
/*
EWA Resampling Options
*/
/* select ONE resampling method */
#define EWA 1 /* Normal EWA handling - raw or clamped */
/* if 0 then use "High Quality EWA" */
#define EWA_CLAMP 1 /* EWA Clamping from Nicolas Robidoux */
#define FILTER_LUT 1 /* Use a LUT rather then direct filter calls */
/* output debugging information */
#define DEBUG_ELLIPSE 0 /* output ellipse info for debug */
#define DEBUG_HIT_MISS 0 /* output hit/miss pixels (as gnuplot commands) */
#define DEBUG_NO_PIXEL_HIT 0 /* Make pixels that fail to hit anything - RED */
#if ! FILTER_DIRECT
#define WLUT_WIDTH 1024 /* size of the filter cache */
#endif
/*
Typedef declarations.
*/
struct _ResampleFilter
{
CacheView
*view;
Image
*image;
ExceptionInfo
*exception;
MagickBooleanType
debug;
/* Information about image being resampled */
ssize_t
image_area;
InterpolatePixelMethod
interpolate;
VirtualPixelMethod
virtual_pixel;
FilterTypes
filter;
/* processing settings needed */
MagickBooleanType
limit_reached,
do_interpolate,
average_defined;
MagickPixelPacket
average_pixel;
/* current ellipitical area being resampled around center point */
double
A, B, C,
Vlimit, Ulimit, Uwidth, slope;
#if FILTER_LUT
/* LUT of weights for filtered average in elliptical area */
double
filter_lut[WLUT_WIDTH];
#else
/* Use a Direct call to the filter functions */
ResizeFilter
*filter_def;
double
F;
#endif
/* the practical working support of the filter */
double
support;
size_t
signature;
};
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% A c q u i r e R e s a m p l e I n f o %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% AcquireResampleFilter() initializes the information resample needs do to a
% scaled lookup of a color from an image, using area sampling.
%
% The algorithm is based on a Elliptical Weighted Average, where the pixels
% found in a large elliptical area is averaged together according to a
% weighting (filter) function. For more details see "Fundamentals of Texture
% Mapping and Image Warping" a master's thesis by Paul.S.Heckbert, June 17,
% 1989. Available for free from, http://www.cs.cmu.edu/~ph/
%
% As EWA resampling (or any sort of resampling) can require a lot of
% calculations to produce a distorted scaling of the source image for each
% output pixel, the ResampleFilter structure generated holds that information
% between individual image resampling.
%
% This function will make the appropriate AcquireVirtualCacheView() calls
% to view the image, calling functions do not need to open a cache view.
%
% Usage Example...
% resample_filter=AcquireResampleFilter(image,exception);
% SetResampleFilter(resample_filter, GaussianFilter, 1.0);
% for (y=0; y < (ssize_t) image->rows; y++) {
% for (x=0; x < (ssize_t) image->columns; x++) {
% u= ....; v= ....;
% ScaleResampleFilter(resample_filter, ... scaling vectors ...);
% (void) ResamplePixelColor(resample_filter,u,v,&pixel);
% ... assign resampled pixel value ...
% }
% }
% DestroyResampleFilter(resample_filter);
%
% The format of the AcquireResampleFilter method is:
%
% ResampleFilter *AcquireResampleFilter(const Image *image,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport ResampleFilter *AcquireResampleFilter(const Image *image,
ExceptionInfo *exception)
{
register ResampleFilter
*resample_filter;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
resample_filter=(ResampleFilter *) AcquireMagickMemory(
sizeof(*resample_filter));
if (resample_filter == (ResampleFilter *) NULL)
ThrowFatalException(ResourceLimitFatalError,"MemoryAllocationFailed");
(void) memset(resample_filter,0,sizeof(*resample_filter));
resample_filter->exception=exception;
resample_filter->image=ReferenceImage((Image *) image);
resample_filter->view=AcquireVirtualCacheView(resample_filter->image,exception);
resample_filter->debug=IsEventLogging();
resample_filter->signature=MagickCoreSignature;
resample_filter->image_area=(ssize_t) (image->columns*image->rows);
resample_filter->average_defined = MagickFalse;
/* initialise the resampling filter settings */
SetResampleFilter(resample_filter, image->filter, image->blur);
(void) SetResampleFilterInterpolateMethod(resample_filter,
image->interpolate);
(void) SetResampleFilterVirtualPixelMethod(resample_filter,
GetImageVirtualPixelMethod(image));
return(resample_filter);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% D e s t r o y R e s a m p l e I n f o %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% DestroyResampleFilter() finalizes and cleans up the resampling
% resample_filter as returned by AcquireResampleFilter(), freeing any memory
% or other information as needed.
%
% The format of the DestroyResampleFilter method is:
%
% ResampleFilter *DestroyResampleFilter(ResampleFilter *resample_filter)
%
% A description of each parameter follows:
%
% o resample_filter: resampling information structure
%
*/
MagickExport ResampleFilter *DestroyResampleFilter(
ResampleFilter *resample_filter)
{
assert(resample_filter != (ResampleFilter *) NULL);
assert(resample_filter->signature == MagickCoreSignature);
assert(resample_filter->image != (Image *) NULL);
if (resample_filter->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",
resample_filter->image->filename);
resample_filter->view=DestroyCacheView(resample_filter->view);
resample_filter->image=DestroyImage(resample_filter->image);
#if ! FILTER_LUT
resample_filter->filter_def=DestroyResizeFilter(resample_filter->filter_def);
#endif
resample_filter->signature=(~MagickCoreSignature);
resample_filter=(ResampleFilter *) RelinquishMagickMemory(resample_filter);
return(resample_filter);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% R e s a m p l e P i x e l C o l o r %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ResamplePixelColor() samples the pixel values surrounding the location
% given using an elliptical weighted average, at the scale previously
% calculated, and in the most efficent manner possible for the
% VirtualPixelMethod setting.
%
% The format of the ResamplePixelColor method is:
%
% MagickBooleanType ResamplePixelColor(ResampleFilter *resample_filter,
% const double u0,const double v0,MagickPixelPacket *pixel)
%
% A description of each parameter follows:
%
% o resample_filter: the resample filter.
%
% o u0,v0: A double representing the center of the area to resample,
% The distortion transformed transformed x,y coordinate.
%
% o pixel: the resampled pixel is returned here.
%
*/
MagickExport MagickBooleanType ResamplePixelColor(
ResampleFilter *resample_filter,const double u0,const double v0,
MagickPixelPacket *pixel)
{
MagickBooleanType
status;
ssize_t u,v, v1, v2, uw, hit;
double u1;
double U,V,Q,DQ,DDQ;
double divisor_c,divisor_m;
register double weight;
register const PixelPacket *pixels;
register const IndexPacket *indexes;
assert(resample_filter != (ResampleFilter *) NULL);
assert(resample_filter->signature == MagickCoreSignature);
status=MagickTrue;
/* GetMagickPixelPacket(resample_filter->image,pixel); */
if ( resample_filter->do_interpolate ) {
status=InterpolateMagickPixelPacket(resample_filter->image,
resample_filter->view,resample_filter->interpolate,u0,v0,pixel,
resample_filter->exception);
return(status);
}
#if DEBUG_ELLIPSE
(void) FormatLocaleFile(stderr, "u0=%lf; v0=%lf;\n", u0, v0);
#endif
/*
Does resample area Miss the image Proper?
If and that area a simple solid color - then simply return that color!
This saves a lot of calculation when resampling outside the bounds of
the source image.
However it probably should be expanded to image bounds plus the filters
scaled support size.
*/
hit = 0;
switch ( resample_filter->virtual_pixel ) {
case BackgroundVirtualPixelMethod:
case ConstantVirtualPixelMethod:
case TransparentVirtualPixelMethod:
case BlackVirtualPixelMethod:
case GrayVirtualPixelMethod:
case WhiteVirtualPixelMethod:
case MaskVirtualPixelMethod:
if ( resample_filter->limit_reached
|| u0 + resample_filter->Ulimit < 0.0
|| u0 - resample_filter->Ulimit > (double) resample_filter->image->columns-1.0
|| v0 + resample_filter->Vlimit < 0.0
|| v0 - resample_filter->Vlimit > (double) resample_filter->image->rows-1.0
)
hit++;
break;
case UndefinedVirtualPixelMethod:
case EdgeVirtualPixelMethod:
if ( ( u0 + resample_filter->Ulimit < 0.0 && v0 + resample_filter->Vlimit < 0.0 )
|| ( u0 + resample_filter->Ulimit < 0.0
&& v0 - resample_filter->Vlimit > (double) resample_filter->image->rows-1.0 )
|| ( u0 - resample_filter->Ulimit > (double) resample_filter->image->columns-1.0
&& v0 + resample_filter->Vlimit < 0.0 )
|| ( u0 - resample_filter->Ulimit > (double) resample_filter->image->columns-1.0
&& v0 - resample_filter->Vlimit > (double) resample_filter->image->rows-1.0 )
)
hit++;
break;
case HorizontalTileVirtualPixelMethod:
if ( v0 + resample_filter->Vlimit < 0.0
|| v0 - resample_filter->Vlimit > (double) resample_filter->image->rows-1.0
)
hit++; /* outside the horizontally tiled images. */
break;
case VerticalTileVirtualPixelMethod:
if ( u0 + resample_filter->Ulimit < 0.0
|| u0 - resample_filter->Ulimit > (double) resample_filter->image->columns-1.0
)
hit++; /* outside the vertically tiled images. */
break;
case DitherVirtualPixelMethod:
if ( ( u0 + resample_filter->Ulimit < -32.0 && v0 + resample_filter->Vlimit < -32.0 )
|| ( u0 + resample_filter->Ulimit < -32.0
&& v0 - resample_filter->Vlimit > (double) resample_filter->image->rows+31.0 )
|| ( u0 - resample_filter->Ulimit > (double) resample_filter->image->columns+31.0
&& v0 + resample_filter->Vlimit < -32.0 )
|| ( u0 - resample_filter->Ulimit > (double) resample_filter->image->columns+31.0
&& v0 - resample_filter->Vlimit > (double) resample_filter->image->rows+31.0 )
)
hit++;
break;
case TileVirtualPixelMethod:
case MirrorVirtualPixelMethod:
case RandomVirtualPixelMethod:
case HorizontalTileEdgeVirtualPixelMethod:
case VerticalTileEdgeVirtualPixelMethod:
case CheckerTileVirtualPixelMethod:
/* resampling of area is always needed - no VP limits */
break;
}
if ( hit ) {
/* The area being resampled is simply a solid color
* just return a single lookup color.
*
* Should this return the users requested interpolated color?
*/
status=InterpolateMagickPixelPacket(resample_filter->image,
resample_filter->view,IntegerInterpolatePixel,u0,v0,pixel,
resample_filter->exception);
return(status);
}
/*
When Scaling limits reached, return an 'averaged' result.
*/
if ( resample_filter->limit_reached ) {
switch ( resample_filter->virtual_pixel ) {
/* This is always handled by the above, so no need.
case BackgroundVirtualPixelMethod:
case ConstantVirtualPixelMethod:
case TransparentVirtualPixelMethod:
case GrayVirtualPixelMethod,
case WhiteVirtualPixelMethod
case MaskVirtualPixelMethod:
*/
case UndefinedVirtualPixelMethod:
case EdgeVirtualPixelMethod:
case DitherVirtualPixelMethod:
case HorizontalTileEdgeVirtualPixelMethod:
case VerticalTileEdgeVirtualPixelMethod:
/* We need an average edge pixel, from the correct edge!
How should I calculate an average edge color?
Just returning an averaged neighbourhood,
works well in general, but falls down for TileEdge methods.
This needs to be done properly!!!!!!
*/
status=InterpolateMagickPixelPacket(resample_filter->image,
resample_filter->view,AverageInterpolatePixel,u0,v0,pixel,
resample_filter->exception);
break;
case HorizontalTileVirtualPixelMethod:
case VerticalTileVirtualPixelMethod:
/* just return the background pixel - Is there a better way? */
status=InterpolateMagickPixelPacket(resample_filter->image,
resample_filter->view,IntegerInterpolatePixel,-1.0,-1.0,pixel,
resample_filter->exception);
break;
case TileVirtualPixelMethod:
case MirrorVirtualPixelMethod:
case RandomVirtualPixelMethod:
case CheckerTileVirtualPixelMethod:
default:
/* generate a average color of the WHOLE image */
if ( resample_filter->average_defined == MagickFalse ) {
Image
*average_image;
CacheView
*average_view;
GetMagickPixelPacket(resample_filter->image,(MagickPixelPacket *)
&resample_filter->average_pixel);
resample_filter->average_defined=MagickTrue;
/* Try to get an averaged pixel color of whole image */
average_image=ResizeImage(resample_filter->image,1,1,BoxFilter,1.0,
resample_filter->exception);
if (average_image == (Image *) NULL)
{
*pixel=resample_filter->average_pixel; /* FAILED */
break;
}
average_view=AcquireVirtualCacheView(average_image,
&average_image->exception);
pixels=(PixelPacket *)GetCacheViewVirtualPixels(average_view,0,0,1,1,
resample_filter->exception);
if (pixels == (const PixelPacket *) NULL) {
average_view=DestroyCacheView(average_view);
average_image=DestroyImage(average_image);
*pixel=resample_filter->average_pixel; /* FAILED */
break;
}
indexes=(IndexPacket *) GetCacheViewAuthenticIndexQueue(average_view);
SetMagickPixelPacket(resample_filter->image,pixels,indexes,
&(resample_filter->average_pixel));
average_view=DestroyCacheView(average_view);
average_image=DestroyImage(average_image);
if ( resample_filter->virtual_pixel == CheckerTileVirtualPixelMethod )
{
/* CheckerTile is a alpha blend of the image's average pixel
color and the current background color */
/* image's average pixel color */
weight = QuantumScale*((MagickRealType)(QuantumRange-
resample_filter->average_pixel.opacity));
resample_filter->average_pixel.red *= weight;
resample_filter->average_pixel.green *= weight;
resample_filter->average_pixel.blue *= weight;
divisor_c = weight;
/* background color */
weight = QuantumScale*((MagickRealType)(QuantumRange-
resample_filter->image->background_color.opacity));
resample_filter->average_pixel.red +=
weight*resample_filter->image->background_color.red;
resample_filter->average_pixel.green +=
weight*resample_filter->image->background_color.green;
resample_filter->average_pixel.blue +=
weight*resample_filter->image->background_color.blue;
resample_filter->average_pixel.opacity +=
resample_filter->image->background_color.opacity;
divisor_c += weight;
/* alpha blend */
resample_filter->average_pixel.red /= divisor_c;
resample_filter->average_pixel.green /= divisor_c;
resample_filter->average_pixel.blue /= divisor_c;
resample_filter->average_pixel.opacity /= 2; /* 50% blend */
}
}
*pixel=resample_filter->average_pixel;
break;
}
return(status);
}
/*
Initialize weighted average data collection
*/
hit = 0;
divisor_c = 0.0;
divisor_m = 0.0;
pixel->red = pixel->green = pixel->blue = 0.0;
if (pixel->matte != MagickFalse) pixel->opacity = 0.0;
if (pixel->colorspace == CMYKColorspace) pixel->index = 0.0;
/*
Determine the parellelogram bounding box fitted to the ellipse
centered at u0,v0. This area is bounding by the lines...
*/
v1 = (ssize_t)ceil(v0 - resample_filter->Vlimit); /* range of scan lines */
v2 = (ssize_t)floor(v0 + resample_filter->Vlimit);
/* scan line start and width accross the parallelogram */
u1 = u0 + (v1-v0)*resample_filter->slope - resample_filter->Uwidth;
uw = (ssize_t)(2.0*resample_filter->Uwidth)+1;
#if DEBUG_ELLIPSE
(void) FormatLocaleFile(stderr, "v1=%ld; v2=%ld\n", (long)v1, (long)v2);
(void) FormatLocaleFile(stderr, "u1=%ld; uw=%ld\n", (long)u1, (long)uw);
#else
# define DEBUG_HIT_MISS 0 /* only valid if DEBUG_ELLIPSE is enabled */
#endif
/*
Do weighted resampling of all pixels, within the scaled ellipse,
bound by a Parellelogram fitted to the ellipse.
*/
DDQ = 2*resample_filter->A;
for( v=v1; v<=v2; v++ ) {
#if DEBUG_HIT_MISS
long uu = ceil(u1); /* actual pixel location (for debug only) */
(void) FormatLocaleFile(stderr, "# scan line from pixel %ld, %ld\n", (long)uu, (long)v);
#endif
u = (ssize_t)ceil(u1); /* first pixel in scanline */
u1 += resample_filter->slope; /* start of next scan line */
/* location of this first pixel, relative to u0,v0 */
U = (double)u-u0;
V = (double)v-v0;
/* Q = ellipse quotent ( if Q<F then pixel is inside ellipse) */
Q = (resample_filter->A*U + resample_filter->B*V)*U + resample_filter->C*V*V;
DQ = resample_filter->A*(2.0*U+1) + resample_filter->B*V;
/* get the scanline of pixels for this v */
pixels=GetCacheViewVirtualPixels(resample_filter->view,u,v,(size_t) uw,
1,resample_filter->exception);
if (pixels == (const PixelPacket *) NULL)
return(MagickFalse);
indexes=GetCacheViewVirtualIndexQueue(resample_filter->view);
/* count up the weighted pixel colors */
for( u=0; u<uw; u++ ) {
weight = 0;
#if FILTER_LUT
/* Note that the ellipse has been pre-scaled so F = WLUT_WIDTH */
if ( Q < (double)WLUT_WIDTH ) {
weight = resample_filter->filter_lut[(int)Q];
#else
/* Note that the ellipse has been pre-scaled so F = support^2 */
if ( Q < (double)resample_filter->F ) {
weight = GetResizeFilterWeight(resample_filter->filter_def,
sqrt(Q)); /* a SquareRoot! Arrggghhhhh... */
#endif
if (pixel->matte != MagickFalse)
pixel->opacity += weight*pixels->opacity;
divisor_m += weight;
if (pixel->matte != MagickFalse)
weight *= QuantumScale*((MagickRealType)(QuantumRange-pixels->opacity));
pixel->red += weight*pixels->red;
pixel->green += weight*pixels->green;
pixel->blue += weight*pixels->blue;
if (pixel->colorspace == CMYKColorspace)
pixel->index += weight*(*indexes);
divisor_c += weight;
hit++;
#if DEBUG_HIT_MISS
/* mark the pixel according to hit/miss of the ellipse */
(void) FormatLocaleFile(stderr, "set arrow from %lf,%lf to %lf,%lf nohead ls 3\n",
(long)uu-.1,(double)v-.1,(long)uu+.1,(long)v+.1);
(void) FormatLocaleFile(stderr, "set arrow from %lf,%lf to %lf,%lf nohead ls 3\n",
(long)uu+.1,(double)v-.1,(long)uu-.1,(long)v+.1);
} else {
(void) FormatLocaleFile(stderr, "set arrow from %lf,%lf to %lf,%lf nohead ls 1\n",
(long)uu-.1,(double)v-.1,(long)uu+.1,(long)v+.1);
(void) FormatLocaleFile(stderr, "set arrow from %lf,%lf to %lf,%lf nohead ls 1\n",
(long)uu+.1,(double)v-.1,(long)uu-.1,(long)v+.1);
}
uu++;
#else
}
#endif
pixels++;
indexes++;
Q += DQ;
DQ += DDQ;
}
}
#if DEBUG_ELLIPSE
(void) FormatLocaleFile(stderr, "Hit=%ld; Total=%ld;\n", (long)hit, (long)uw*(v2-v1) );
#endif
/*
Result sanity check -- this should NOT happen
*/
if ( hit == 0 || divisor_m <= MagickEpsilon || divisor_c <= MagickEpsilon ) {
/* not enough pixels, or bad weighting in resampling,
resort to direct interpolation */
#if DEBUG_NO_PIXEL_HIT
pixel->opacity = pixel->red = pixel->green = pixel->blue = 0;
pixel->red = QuantumRange; /* show pixels for which EWA fails */
#else
status=InterpolateMagickPixelPacket(resample_filter->image,
resample_filter->view,resample_filter->interpolate,u0,v0,pixel,
resample_filter->exception);
#endif
return status;
}
/*
Finialize results of resampling
*/
divisor_m = 1.0/divisor_m;
if (pixel->matte != MagickFalse)
pixel->opacity = (MagickRealType) ClampToQuantum(divisor_m*pixel->opacity);
divisor_c = 1.0/divisor_c;
pixel->red = (MagickRealType) ClampToQuantum(divisor_c*pixel->red);
pixel->green = (MagickRealType) ClampToQuantum(divisor_c*pixel->green);
pixel->blue = (MagickRealType) ClampToQuantum(divisor_c*pixel->blue);
if (pixel->colorspace == CMYKColorspace)
pixel->index = (MagickRealType) ClampToQuantum(divisor_c*pixel->index);
return(MagickTrue);
}
#if EWA && EWA_CLAMP
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
- C l a m p U p A x e s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ClampUpAxes() function converts the input vectors into a major and
% minor axis unit vectors, and their magnitude. This allows us to
% ensure that the ellipse generated is never smaller than the unit
% circle and thus never too small for use in EWA resampling.
%
% This purely mathematical 'magic' was provided by Professor Nicolas
% Robidoux and his Masters student Chantal Racette.
%
% Reference: "We Recommend Singular Value Decomposition", David Austin
% http://www.ams.org/samplings/feature-column/fcarc-svd
%
% By generating major and minor axis vectors, we can actually use the
% ellipse in its "canonical form", by remapping the dx,dy of the
% sampled point into distances along the major and minor axis unit
% vectors.
%
% Reference: http://en.wikipedia.org/wiki/Ellipse#Canonical_form
*/
static inline void ClampUpAxes(const double dux,
const double dvx,
const double duy,
const double dvy,
double *major_mag,
double *minor_mag,
double *major_unit_x,
double *major_unit_y,
double *minor_unit_x,
double *minor_unit_y)
{
/*
* ClampUpAxes takes an input 2x2 matrix
*
* [ a b ] = [ dux duy ]
* [ c d ] = [ dvx dvy ]
*
* and computes from it the major and minor axis vectors [major_x,
* major_y] and [minor_x,minor_y] of the smallest ellipse containing
* both the unit disk and the ellipse which is the image of the unit
* disk by the linear transformation
*
* [ dux duy ] [S] = [s]
* [ dvx dvy ] [T] = [t]
*
* (The vector [S,T] is the difference between a position in output
* space and [X,Y]; the vector [s,t] is the difference between a
* position in input space and [x,y].)
*/
/*
* Output:
*
* major_mag is the half-length of the major axis of the "new"
* ellipse.
*
* minor_mag is the half-length of the minor axis of the "new"
* ellipse.
*
* major_unit_x is the x-coordinate of the major axis direction vector
* of both the "old" and "new" ellipses.
*
* major_unit_y is the y-coordinate of the major axis direction vector.
*
* minor_unit_x is the x-coordinate of the minor axis direction vector.
*
* minor_unit_y is the y-coordinate of the minor axis direction vector.
*
* Unit vectors are useful for computing projections, in particular,
* to compute the distance between a point in output space and the
* center of a unit disk in output space, using the position of the
* corresponding point [s,t] in input space. Following the clamping,
* the square of this distance is
*
* ( ( s * major_unit_x + t * major_unit_y ) / major_mag )^2
* +
* ( ( s * minor_unit_x + t * minor_unit_y ) / minor_mag )^2
*
* If such distances will be computed for many [s,t]'s, it makes
* sense to actually compute the reciprocal of major_mag and
* minor_mag and multiply them by the above unit lengths.
*
* Now, if you want to modify the input pair of tangent vectors so
* that it defines the modified ellipse, all you have to do is set
*
* newdux = major_mag * major_unit_x
* newdvx = major_mag * major_unit_y
* newduy = minor_mag * minor_unit_x = minor_mag * -major_unit_y
* newdvy = minor_mag * minor_unit_y = minor_mag * major_unit_x
*
* and use these tangent vectors as if they were the original ones.
* Usually, this is a drastic change in the tangent vectors even if
* the singular values are not clamped; for example, the minor axis
* vector always points in a direction which is 90 degrees
* counterclockwise from the direction of the major axis vector.
*/
/*
* Discussion:
*
* GOAL: Fix things so that the pullback, in input space, of a disk
* of radius r in output space is an ellipse which contains, at
* least, a disc of radius r. (Make this hold for any r>0.)
*
* ESSENCE OF THE METHOD: Compute the product of the first two
* factors of an SVD of the linear transformation defining the
* ellipse and make sure that both its columns have norm at least 1.
* Because rotations and reflexions map disks to themselves, it is
* not necessary to compute the third (rightmost) factor of the SVD.
*
* DETAILS: Find the singular values and (unit) left singular
* vectors of Jinv, clampling up the singular values to 1, and
* multiply the unit left singular vectors by the new singular
* values in order to get the minor and major ellipse axis vectors.
*
* Image resampling context:
*
* The Jacobian matrix of the transformation at the output point
* under consideration is defined as follows:
*
* Consider the transformation (x,y) -> (X,Y) from input locations
* to output locations. (Anthony Thyssen, elsewhere in resample.c,
* uses the notation (u,v) -> (x,y).)
*
* The Jacobian matrix of the transformation at (x,y) is equal to
*
* J = [ A, B ] = [ dX/dx, dX/dy ]
* [ C, D ] [ dY/dx, dY/dy ]
*
* that is, the vector [A,C] is the tangent vector corresponding to
* input changes in the horizontal direction, and the vector [B,D]
* is the tangent vector corresponding to input changes in the
* vertical direction.
*
* In the context of resampling, it is natural to use the inverse
* Jacobian matrix Jinv because resampling is generally performed by
* pulling pixel locations in the output image back to locations in
* the input image. Jinv is
*
* Jinv = [ a, b ] = [ dx/dX, dx/dY ]
* [ c, d ] [ dy/dX, dy/dY ]
*
* Note: Jinv can be computed from J with the following matrix
* formula:
*
* Jinv = 1/(A*D-B*C) [ D, -B ]
* [ -C, A ]
*
* What we do is modify Jinv so that it generates an ellipse which
* is as close as possible to the original but which contains the
* unit disk. This can be accomplished as follows:
*
* Let
*
* Jinv = U Sigma V^T
*
* be an SVD decomposition of Jinv. (The SVD is not unique, but the
* final ellipse does not depend on the particular SVD.)
*
* We could clamp up the entries of the diagonal matrix Sigma so
* that they are at least 1, and then set
*
* Jinv = U newSigma V^T.
*
* However, we do not need to compute V for the following reason:
* V^T is an orthogonal matrix (that is, it represents a combination
* of rotations and reflexions) so that it maps the unit circle to
* itself. For this reason, the exact value of V does not affect the
* final ellipse, and we can choose V to be the identity
* matrix. This gives
*
* Jinv = U newSigma.
*
* In the end, we return the two diagonal entries of newSigma
* together with the two columns of U.
*/
/*
* ClampUpAxes was written by Nicolas Robidoux and Chantal Racette
* of Laurentian University with insightful suggestions from Anthony
* Thyssen and funding from the National Science and Engineering
* Research Council of Canada. It is distinguished from its
* predecessors by its efficient handling of degenerate cases.
*
* The idea of clamping up the EWA ellipse's major and minor axes so
* that the result contains the reconstruction kernel filter support
* is taken from Andreas Gustaffson's Masters thesis "Interactive
* Image Warping", Helsinki University of Technology, Faculty of
* Information Technology, 59 pages, 1993 (see Section 3.6).
*
* The use of the SVD to clamp up the singular values of the
* Jacobian matrix of the pullback transformation for EWA resampling
* is taken from the astrophysicist Craig DeForest. It is
* implemented in his PDL::Transform code (PDL = Perl Data
* Language).
*/
const double a = dux;
const double b = duy;
const double c = dvx;
const double d = dvy;
/*
* n is the matrix Jinv * transpose(Jinv). Eigenvalues of n are the
* squares of the singular values of Jinv.
*/
const double aa = a*a;
const double bb = b*b;
const double cc = c*c;
const double dd = d*d;
/*
* Eigenvectors of n are left singular vectors of Jinv.
*/
const double n11 = aa+bb;
const double n12 = a*c+b*d;
const double n21 = n12;
const double n22 = cc+dd;
const double det = a*d-b*c;
const double twice_det = det+det;
const double frobenius_squared = n11+n22;
const double discriminant =
(frobenius_squared+twice_det)*(frobenius_squared-twice_det);
/*
* In exact arithmetic, discriminant can't be negative. In floating
* point, it can, because of the bad conditioning of SVD
* decompositions done through the associated normal matrix.
*/
const double sqrt_discriminant =
sqrt(discriminant > 0.0 ? discriminant : 0.0);
/*
* s1 is the largest singular value of the inverse Jacobian
* matrix. In other words, its reciprocal is the smallest singular
* value of the Jacobian matrix itself.
* If s1 = 0, both singular values are 0, and any orthogonal pair of
* left and right factors produces a singular decomposition of Jinv.
*/
/*
* Initially, we only compute the squares of the singular values.
*/
const double s1s1 = 0.5*(frobenius_squared+sqrt_discriminant);
/*
* s2 the smallest singular value of the inverse Jacobian
* matrix. Its reciprocal is the largest singular value of the
* Jacobian matrix itself.
*/
const double s2s2 = 0.5*(frobenius_squared-sqrt_discriminant);
const double s1s1minusn11 = s1s1-n11;
const double s1s1minusn22 = s1s1-n22;
/*
* u1, the first column of the U factor of a singular decomposition
* of Jinv, is a (non-normalized) left singular vector corresponding
* to s1. It has entries u11 and u21. We compute u1 from the fact
* that it is an eigenvector of n corresponding to the eigenvalue
* s1^2.
*/
const double s1s1minusn11_squared = s1s1minusn11*s1s1minusn11;
const double s1s1minusn22_squared = s1s1minusn22*s1s1minusn22;
/*
* The following selects the largest row of n-s1^2 I as the one
* which is used to find the eigenvector. If both s1^2-n11 and
* s1^2-n22 are zero, n-s1^2 I is the zero matrix. In that case,
* any vector is an eigenvector; in addition, norm below is equal to
* zero, and, in exact arithmetic, this is the only case in which
* norm = 0. So, setting u1 to the simple but arbitrary vector [1,0]
* if norm = 0 safely takes care of all cases.
*/
const double temp_u11 =
( (s1s1minusn11_squared>=s1s1minusn22_squared) ? n12 : s1s1minusn22 );
const double temp_u21 =
( (s1s1minusn11_squared>=s1s1minusn22_squared) ? s1s1minusn11 : n21 );
const double norm = sqrt(temp_u11*temp_u11+temp_u21*temp_u21);
/*
* Finalize the entries of first left singular vector (associated
* with the largest singular value).
*/
const double u11 = ( (norm>0.0) ? temp_u11/norm : 1.0 );
const double u21 = ( (norm>0.0) ? temp_u21/norm : 0.0 );
/*
* Clamp the singular values up to 1.
*/
*major_mag = ( (s1s1<=1.0) ? 1.0 : sqrt(s1s1) );
*minor_mag = ( (s2s2<=1.0) ? 1.0 : sqrt(s2s2) );
/*
* Return the unit major and minor axis direction vectors.
*/
*major_unit_x = u11;
*major_unit_y = u21;
*minor_unit_x = -u21;
*minor_unit_y = u11;
}
#endif
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% S c a l e R e s a m p l e F i l t e r %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ScaleResampleFilter() does all the calculations needed to resample an image
% at a specific scale, defined by two scaling vectors. This not using
% a orthogonal scaling, but two distorted scaling vectors, to allow the
% generation of a angled ellipse.
%
% As only two deritive scaling vectors are used the center of the ellipse
% must be the center of the lookup. That is any curvature that the
% distortion may produce is discounted.
%
% The input vectors are produced by either finding the derivitives of the
% distortion function, or the partial derivitives from a distortion mapping.
% They do not need to be the orthogonal dx,dy scaling vectors, but can be
% calculated from other derivatives. For example you could use dr,da/r
% polar coordinate vector scaling vectors
%
% If u,v = DistortEquation(x,y) OR u = Fu(x,y); v = Fv(x,y)
% Then the scaling vectors are determined from the deritives...
% du/dx, dv/dx and du/dy, dv/dy
% If the resulting scaling vectors is othogonally aligned then...
% dv/dx = 0 and du/dy = 0
% Producing an othogonally alligned ellipse in source space for the area to
% be resampled.
%
% Note that scaling vectors are different to argument order. Argument order
% is the general order the deritives are extracted from the distortion
% equations, and not the scaling vectors. As such the middle two vaules
% may be swapped from what you expect. Caution is advised.
%
% WARNING: It is assumed that any SetResampleFilter() method call will
% always be performed before the ScaleResampleFilter() method, so that the
% size of the ellipse will match the support for the resampling filter being
% used.
%
% The format of the ScaleResampleFilter method is:
%
% void ScaleResampleFilter(const ResampleFilter *resample_filter,
% const double dux,const double duy,const double dvx,const double dvy)
%
% A description of each parameter follows:
%
% o resample_filter: the resampling resample_filterrmation defining the
% image being resampled
%
% o dux,duy,dvx,dvy:
% The deritives or scaling vectors defining the EWA ellipse.
% NOTE: watch the order, which is based on the order deritives
% are usally determined from distortion equations (see above).
% The middle two values may need to be swapped if you are thinking
% in terms of scaling vectors.
%
*/
MagickExport void ScaleResampleFilter(ResampleFilter *resample_filter,
const double dux,const double duy,const double dvx,const double dvy)
{
double A,B,C,F;
assert(resample_filter != (ResampleFilter *) NULL);
assert(resample_filter->signature == MagickCoreSignature);
resample_filter->limit_reached = MagickFalse;
/* A 'point' filter forces use of interpolation instead of area sampling */
if ( resample_filter->filter == PointFilter )
return; /* EWA turned off - nothing to do */
#if DEBUG_ELLIPSE
(void) FormatLocaleFile(stderr, "# -----\n" );
(void) FormatLocaleFile(stderr, "dux=%lf; dvx=%lf; duy=%lf; dvy=%lf;\n",
dux, dvx, duy, dvy);
#endif
/* Find Ellipse Coefficents such that
A*u^2 + B*u*v + C*v^2 = F
With u,v relative to point around which we are resampling.
And the given scaling dx,dy vectors in u,v space
du/dx,dv/dx and du/dy,dv/dy
*/
#if EWA
/* Direct conversion of derivatives into elliptical coefficients
However when magnifying images, the scaling vectors will be small
resulting in a ellipse that is too small to sample properly.
As such we need to clamp the major/minor axis to a minumum of 1.0
to prevent it getting too small.
*/
#if EWA_CLAMP
{ double major_mag,
minor_mag,
major_x,
major_y,
minor_x,
minor_y;
ClampUpAxes(dux,dvx,duy,dvy, &major_mag, &minor_mag,
&major_x, &major_y, &minor_x, &minor_y);
major_x *= major_mag; major_y *= major_mag;
minor_x *= minor_mag; minor_y *= minor_mag;
#if DEBUG_ELLIPSE
(void) FormatLocaleFile(stderr, "major_x=%lf; major_y=%lf; minor_x=%lf; minor_y=%lf;\n",
major_x, major_y, minor_x, minor_y);
#endif
A = major_y*major_y+minor_y*minor_y;
B = -2.0*(major_x*major_y+minor_x*minor_y);
C = major_x*major_x+minor_x*minor_x;
F = major_mag*minor_mag;
F *= F; /* square it */
}
#else /* raw unclamped EWA */
A = dvx*dvx+dvy*dvy;
B = -2.0*(dux*dvx+duy*dvy);
C = dux*dux+duy*duy;
F = dux*dvy-duy*dvx;
F *= F; /* square it */
#endif /* EWA_CLAMP */
#else /* HQ_EWA */
/*
This Paul Heckbert's "Higher Quality EWA" formula, from page 60 in his
thesis, which adds a unit circle to the elliptical area so as to do both
Reconstruction and Prefiltering of the pixels in the resampling. It also
means it is always likely to have at least 4 pixels within the area of the
ellipse, for weighted averaging. No scaling will result with F == 4.0 and
a circle of radius 2.0, and F smaller than this means magnification is
being used.
NOTE: This method produces a very blury result at near unity scale while
producing perfect results for strong minitification and magnifications.
However filter support is fixed to 2.0 (no good for Windowed Sinc filters)
*/
A = dvx*dvx+dvy*dvy+1;
B = -2.0*(dux*dvx+duy*dvy);
C = dux*dux+duy*duy+1;
F = A*C - B*B/4;
#endif
#if DEBUG_ELLIPSE
(void) FormatLocaleFile(stderr, "A=%lf; B=%lf; C=%lf; F=%lf\n", A,B,C,F);
/* Figure out the various information directly about the ellipse.
This information currently not needed at this time, but may be
needed later for better limit determination.
It is also good to have as a record for future debugging
*/
{ double alpha, beta, gamma, Major, Minor;
double Eccentricity, Ellipse_Area, Ellipse_Angle;
alpha = A+C;
beta = A-C;
gamma = sqrt(beta*beta + B*B );
if ( alpha - gamma <= MagickEpsilon )
Major= MagickMaximumValue;
else
Major= sqrt(2*F/(alpha - gamma));
Minor = sqrt(2*F/(alpha + gamma));
(void) FormatLocaleFile(stderr, "# Major=%lf; Minor=%lf\n", Major, Minor );
/* other information about ellipse include... */
Eccentricity = Major/Minor;
Ellipse_Area = MagickPI*Major*Minor;
Ellipse_Angle = atan2(B, A-C);
(void) FormatLocaleFile(stderr, "# Angle=%lf Area=%lf\n",
(double) RadiansToDegrees(Ellipse_Angle), Ellipse_Area);
}
#endif
/* If one or both of the scaling vectors is impossibly large
(producing a very large raw F value), we may as well not bother
doing any form of resampling since resampled area is very large.
In this case some alternative means of pixel sampling, such as
the average of the whole image is needed to get a reasonable
result. Calculate only as needed.
*/
if ( (4*A*C - B*B) > MagickMaximumValue ) {
resample_filter->limit_reached = MagickTrue;
return;
}
/* Scale ellipse to match the filters support
(that is, multiply F by the square of the support)
Simplier to just multiply it by the support twice!
*/
F *= resample_filter->support;
F *= resample_filter->support;
/* Orthogonal bounds of the ellipse */
resample_filter->Ulimit = sqrt(C*F/(A*C-0.25*B*B));
resample_filter->Vlimit = sqrt(A*F/(A*C-0.25*B*B));
/* Horizontally aligned parallelogram fitted to Ellipse */
resample_filter->Uwidth = sqrt(F/A); /* Half of the parallelogram width */
resample_filter->slope = -B/(2.0*A); /* Reciprocal slope of the parallelogram */
#if DEBUG_ELLIPSE
(void) FormatLocaleFile(stderr, "Ulimit=%lf; Vlimit=%lf; UWidth=%lf; Slope=%lf;\n",
resample_filter->Ulimit, resample_filter->Vlimit,
resample_filter->Uwidth, resample_filter->slope );
#endif
/* Check the absolute area of the parallelogram involved.
* This limit needs more work, as it is too slow for larger images
* with tiled views of the horizon.
*/
if ( (resample_filter->Uwidth * resample_filter->Vlimit)
> (4.0*resample_filter->image_area)) {
resample_filter->limit_reached = MagickTrue;
return;
}
/* Scale ellipse formula to directly index the Filter Lookup Table */
{ register double scale;
#if FILTER_LUT
/* scale so that F = WLUT_WIDTH; -- hardcoded */
scale = (double)WLUT_WIDTH/F;
#else
/* scale so that F = resample_filter->F (support^2) */
scale = resample_filter->F/F;
#endif
resample_filter->A = A*scale;
resample_filter->B = B*scale;
resample_filter->C = C*scale;
}
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% S e t R e s a m p l e F i l t e r %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% SetResampleFilter() set the resampling filter lookup table based on a
% specific filter. Note that the filter is used as a radial filter not as a
% two pass othogonally aligned resampling filter.
%
% The format of the SetResampleFilter method is:
%
% void SetResampleFilter(ResampleFilter *resample_filter,
% const FilterTypes filter,const double blur)
%
% A description of each parameter follows:
%
% o resample_filter: resampling resample_filterrmation structure
%
% o filter: the resize filter for elliptical weighting LUT
%
% o blur: filter blur factor (radial scaling) for elliptical weighting LUT
%
*/
MagickExport void SetResampleFilter(ResampleFilter *resample_filter,
const FilterTypes filter,const double blur)
{
ResizeFilter
*resize_filter;
assert(resample_filter != (ResampleFilter *) NULL);
assert(resample_filter->signature == MagickCoreSignature);
resample_filter->do_interpolate = MagickFalse;
resample_filter->filter = filter;
/* Default cylindrical filter is a Cubic Keys filter */
if ( filter == UndefinedFilter )
resample_filter->filter = RobidouxFilter;
if ( resample_filter->filter == PointFilter ) {
resample_filter->do_interpolate = MagickTrue;
return; /* EWA turned off - nothing more to do */
}
resize_filter = AcquireResizeFilter(resample_filter->image,
resample_filter->filter,blur,MagickTrue,resample_filter->exception);
if (resize_filter == (ResizeFilter *) NULL) {
(void) ThrowMagickException(resample_filter->exception,GetMagickModule(),
ModuleError, "UnableToSetFilteringValue",
"Fall back to Interpolated 'Point' filter");
resample_filter->filter = PointFilter;
resample_filter->do_interpolate = MagickTrue;
return; /* EWA turned off - nothing more to do */
}
/* Get the practical working support for the filter,
* after any API call blur factors have been accoded for.
*/
#if EWA
resample_filter->support = GetResizeFilterSupport(resize_filter);
#else
resample_filter->support = 2.0; /* fixed support size for HQ-EWA */
#endif
#if FILTER_LUT
/* Fill the LUT with the weights from the selected filter function */
{ register int
Q;
double
r_scale;
/* Scale radius so the filter LUT covers the full support range */
r_scale = resample_filter->support*sqrt(1.0/(double)WLUT_WIDTH);
for(Q=0; Q<WLUT_WIDTH; Q++)
resample_filter->filter_lut[Q] = (double)
GetResizeFilterWeight(resize_filter,sqrt((double)Q)*r_scale);
/* finished with the resize filter */
resize_filter = DestroyResizeFilter(resize_filter);
}
#else
/* save the filter and the scaled ellipse bounds needed for filter */
resample_filter->filter_def = resize_filter;
resample_filter->F = resample_filter->support*resample_filter->support;
#endif
/*
Adjust the scaling of the default unit circle
This assumes that any real scaling changes will always
take place AFTER the filter method has been initialized.
*/
ScaleResampleFilter(resample_filter, 1.0, 0.0, 0.0, 1.0);
#if 0
/*
This is old code kept as a reference only. Basically it generates
a Gaussian bell curve, with sigma = 0.5 if the support is 2.0
Create Normal Gaussian 2D Filter Weighted Lookup Table.
A normal EWA guassual lookup would use exp(Q*ALPHA)
where Q = distance squared from 0.0 (center) to 1.0 (edge)
and ALPHA = -4.0*ln(2.0) ==> -2.77258872223978123767
The table is of length 1024, and equates to support radius of 2.0
thus needs to be scaled by ALPHA*4/1024 and any blur factor squared
The it comes from reference code provided by Fred Weinhaus.
*/
r_scale = -2.77258872223978123767/(WLUT_WIDTH*blur*blur);
for(Q=0; Q<WLUT_WIDTH; Q++)
resample_filter->filter_lut[Q] = exp((double)Q*r_scale);
resample_filter->support = WLUT_WIDTH;
#endif
#if FILTER_LUT
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp single
#endif
{
if (IsMagickTrue(GetImageArtifact(resample_filter->image,
"resample:verbose")) )
{
register int
Q;
double
r_scale;
/* Debug output of the filter weighting LUT
Gnuplot the LUT data, the x scale index has been adjusted
plot [0:2][-.2:1] "lut.dat" with lines
The filter values should be normalized for comparision
*/
printf("#\n");
printf("# Resampling Filter LUT (%d values) for '%s' filter\n",
WLUT_WIDTH, CommandOptionToMnemonic(MagickFilterOptions,
resample_filter->filter) );
printf("#\n");
printf("# Note: values in table are using a squared radius lookup.\n");
printf("# As such its distribution is not uniform.\n");
printf("#\n");
printf("# The X value is the support distance for the Y weight\n");
printf("# so you can use gnuplot to plot this cylindrical filter\n");
printf("# plot [0:2][-.2:1] \"lut.dat\" with lines\n");
printf("#\n");
/* Scale radius so the filter LUT covers the full support range */
r_scale = resample_filter->support*sqrt(1.0/(double)WLUT_WIDTH);
for(Q=0; Q<WLUT_WIDTH; Q++)
printf("%8.*g %.*g\n",
GetMagickPrecision(),sqrt((double)Q)*r_scale,
GetMagickPrecision(),resample_filter->filter_lut[Q] );
printf("\n\n"); /* generate a 'break' in gnuplot if multiple outputs */
}
/* Output the above once only for each image, and each setting
(void) DeleteImageArtifact(resample_filter->image,"resample:verbose");
*/
}
#endif /* FILTER_LUT */
return;
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% S e t R e s a m p l e F i l t e r I n t e r p o l a t e M e t h o d %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% SetResampleFilterInterpolateMethod() sets the resample filter interpolation
% method.
%
% The format of the SetResampleFilterInterpolateMethod method is:
%
% MagickBooleanType SetResampleFilterInterpolateMethod(
% ResampleFilter *resample_filter,const InterpolateMethod method)
%
% A description of each parameter follows:
%
% o resample_filter: the resample filter.
%
% o method: the interpolation method.
%
*/
MagickExport MagickBooleanType SetResampleFilterInterpolateMethod(
ResampleFilter *resample_filter,const InterpolatePixelMethod method)
{
assert(resample_filter != (ResampleFilter *) NULL);
assert(resample_filter->signature == MagickCoreSignature);
assert(resample_filter->image != (Image *) NULL);
if (resample_filter->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",
resample_filter->image->filename);
resample_filter->interpolate=method;
return(MagickTrue);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% S e t R e s a m p l e F i l t e r V i r t u a l P i x e l M e t h o d %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% SetResampleFilterVirtualPixelMethod() changes the virtual pixel method
% associated with the specified resample filter.
%
% The format of the SetResampleFilterVirtualPixelMethod method is:
%
% MagickBooleanType SetResampleFilterVirtualPixelMethod(
% ResampleFilter *resample_filter,const VirtualPixelMethod method)
%
% A description of each parameter follows:
%
% o resample_filter: the resample filter.
%
% o method: the virtual pixel method.
%
*/
MagickExport MagickBooleanType SetResampleFilterVirtualPixelMethod(
ResampleFilter *resample_filter,const VirtualPixelMethod method)
{
assert(resample_filter != (ResampleFilter *) NULL);
assert(resample_filter->signature == MagickCoreSignature);
assert(resample_filter->image != (Image *) NULL);
if (resample_filter->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",
resample_filter->image->filename);
resample_filter->virtual_pixel=method;
if (method != UndefinedVirtualPixelMethod)
(void) SetCacheViewVirtualPixelMethod(resample_filter->view,method);
return(MagickTrue);
}
|
3d7pt.c | /*
* Order-1, 3D 7 point stencil
* Adapted from PLUTO and Pochoir test bench
*
* Tareq Malas
*/
#include <stdio.h>
#include <stdlib.h>
#include <sys/time.h>
#ifdef LIKWID_PERFMON
#include <likwid.h>
#endif
#include "print_utils.h"
#define TESTS 2
#define MAX(a,b) ((a) > (b) ? a : b)
#define MIN(a,b) ((a) < (b) ? a : b)
/* Subtract the `struct timeval' values X and Y,
* storing the result in RESULT.
*
* Return 1 if the difference is negative, otherwise 0.
*/
int timeval_subtract(struct timeval *result, struct timeval *x, struct timeval *y)
{
/* Perform the carry for the later subtraction by updating y. */
if (x->tv_usec < y->tv_usec)
{
int nsec = (y->tv_usec - x->tv_usec) / 1000000 + 1;
y->tv_usec -= 1000000 * nsec;
y->tv_sec += nsec;
}
if (x->tv_usec - y->tv_usec > 1000000)
{
int nsec = (x->tv_usec - y->tv_usec) / 1000000;
y->tv_usec += 1000000 * nsec;
y->tv_sec -= nsec;
}
/* Compute the time remaining to wait.
* tv_usec is certainly positive.
*/
result->tv_sec = x->tv_sec - y->tv_sec;
result->tv_usec = x->tv_usec - y->tv_usec;
/* Return 1 if result is negative. */
return x->tv_sec < y->tv_sec;
}
int main(int argc, char *argv[])
{
int t, i, j, k, test;
int Nx, Ny, Nz, Nt;
if (argc > 3) {
Nx = atoi(argv[1])+2;
Ny = atoi(argv[2])+2;
Nz = atoi(argv[3])+2;
}
if (argc > 4)
Nt = atoi(argv[4]);
double ****A = (double ****) malloc(sizeof(double***)*2);
A[0] = (double ***) malloc(sizeof(double**)*Nz);
A[1] = (double ***) malloc(sizeof(double**)*Nz);
for(i=0; i<Nz; i++){
A[0][i] = (double**) malloc(sizeof(double*)*Ny);
A[1][i] = (double**) malloc(sizeof(double*)*Ny);
for(j=0;j<Ny;j++){
A[0][i][j] = (double*) malloc(sizeof(double)*Nx);
A[1][i][j] = (double*) malloc(sizeof(double)*Nx);
}
}
// tile size information, including extra element to decide the list length
int *tile_size = (int*) malloc(sizeof(int));
tile_size[0] = -1;
// The list is modified here before source-to-source transformations
tile_size = (int*) realloc((void *)tile_size, sizeof(int)*5);
tile_size[0] = 4;
tile_size[1] = 4;
tile_size[2] = 8;
tile_size[3] = 64;
tile_size[4] = -1;
// for timekeeping
int ts_return = -1;
struct timeval start, end, result;
double tdiff = 0.0, min_tdiff=1.e100;
const int BASE = 1024;
const double alpha = 0.0876;
const double beta = 0.0765;
// initialize variables
//
srand(42);
for (i = 1; i < Nz; i++) {
for (j = 1; j < Ny; j++) {
for (k = 1; k < Nx; k++) {
A[0][i][j][k] = 1.0 * (rand() % BASE);
}
}
}
#ifdef LIKWID_PERFMON
LIKWID_MARKER_INIT;
#pragma omp parallel
{
LIKWID_MARKER_THREADINIT;
#pragma omp barrier
LIKWID_MARKER_START("calc");
}
#endif
int num_threads = 1;
#if defined(_OPENMP)
num_threads = omp_get_max_threads();
#endif
for(test=0; test<TESTS; test++){
gettimeofday(&start, 0);
// serial execution - Addition: 6 && Multiplication: 2
#pragma scop
for (t = 0; t < Nt-1; t++) {
for (i = 1; i < Nz-1; i++) {
for (j = 1; j < Ny-1; j++) {
for (k = 1; k < Nx-1; k++) {
A[(t+1)%2][i][j][k] = alpha * (A[t%2][i][j][k])
+ beta * (A[t%2][i - 1][j][k] + A[t%2][i][j - 1][k] + A[t%2][i][j][k - 1] +
A[t%2][i + 1][j][k] + A[t%2][i][j + 1][k] + A[t%2][i][j][k + 1]);
}
}
}
}
#pragma endscop
gettimeofday(&end, 0);
ts_return = timeval_subtract(&result, &end, &start);
tdiff = (double) (result.tv_sec + result.tv_usec * 1.0e-6);
min_tdiff = min(min_tdiff, tdiff);
printf("Rank 0 TEST# %d time: %f\n", test, tdiff);
}
PRINT_RESULTS(1, "constant")
#ifdef LIKWID_PERFMON
#pragma omp parallel
{
LIKWID_MARKER_STOP("calc");
}
LIKWID_MARKER_CLOSE;
#endif
// Free allocated arrays (Causing performance degradation
/* for(i=0; i<Nz; i++){
for(j=0;j<Ny;j++){
free(A[0][i][j]);
free(A[1][i][j]);
}
free(A[0][i]);
free(A[1][i]);
}
free(A[0]);
free(A[1]);
*/
return 0;
}
|
compare.c | /*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% CCCC OOO M M PPPP AAA RRRR EEEEE %
% C O O MM MM P P A A R R E %
% C O O M M M PPPP AAAAA RRRR EEE %
% C O O M M P A A R R E %
% CCCC OOO M M P A A R R EEEEE %
% %
% %
% MagickCore Image Comparison Methods %
% %
% Software Design %
% Cristy %
% December 2003 %
% %
% %
% Copyright 1999-2021 ImageMagick Studio LLC, a non-profit organization %
% dedicated to making software imaging solutions freely available. %
% %
% You may not use this file except in compliance with the License. You may %
% obtain a copy of the License at %
% %
% https://imagemagick.org/script/license.php %
% %
% Unless required by applicable law or agreed to in writing, software %
% distributed under the License is distributed on an "AS IS" BASIS, %
% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. %
% See the License for the specific language governing permissions and %
% limitations under the License. %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
%
%
*/
/*
Include declarations.
*/
#include "MagickCore/studio.h"
#include "MagickCore/artifact.h"
#include "MagickCore/attribute.h"
#include "MagickCore/cache-view.h"
#include "MagickCore/channel.h"
#include "MagickCore/client.h"
#include "MagickCore/color.h"
#include "MagickCore/color-private.h"
#include "MagickCore/colorspace.h"
#include "MagickCore/colorspace-private.h"
#include "MagickCore/compare.h"
#include "MagickCore/composite-private.h"
#include "MagickCore/constitute.h"
#include "MagickCore/exception-private.h"
#include "MagickCore/geometry.h"
#include "MagickCore/image-private.h"
#include "MagickCore/list.h"
#include "MagickCore/log.h"
#include "MagickCore/memory_.h"
#include "MagickCore/monitor.h"
#include "MagickCore/monitor-private.h"
#include "MagickCore/option.h"
#include "MagickCore/pixel-accessor.h"
#include "MagickCore/property.h"
#include "MagickCore/resource_.h"
#include "MagickCore/string_.h"
#include "MagickCore/statistic.h"
#include "MagickCore/string-private.h"
#include "MagickCore/thread-private.h"
#include "MagickCore/transform.h"
#include "MagickCore/utility.h"
#include "MagickCore/version.h"
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% C o m p a r e I m a g e s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% CompareImages() compares one or more pixel channels of an image to a
% reconstructed image and returns the difference image.
%
% The format of the CompareImages method is:
%
% Image *CompareImages(const Image *image,const Image *reconstruct_image,
% const MetricType metric,double *distortion,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o reconstruct_image: the reconstruct image.
%
% o metric: the metric.
%
% o distortion: the computed distortion between the images.
%
% o exception: return any errors or warnings in this structure.
%
*/
static size_t GetImageChannels(const Image *image)
{
ssize_t
i;
size_t
channels;
channels=0;
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
PixelChannel channel = GetPixelChannelChannel(image,i);
PixelTrait traits = GetPixelChannelTraits(image,channel);
if ((traits & UpdatePixelTrait) != 0)
channels++;
}
return(channels == 0 ? (size_t) 1 : channels);
}
MagickExport Image *CompareImages(Image *image,const Image *reconstruct_image,
const MetricType metric,double *distortion,ExceptionInfo *exception)
{
CacheView
*highlight_view,
*image_view,
*reconstruct_view;
const char
*artifact;
double
fuzz;
Image
*clone_image,
*difference_image,
*highlight_image;
MagickBooleanType
status;
PixelInfo
highlight,
lowlight,
masklight;
RectangleInfo
geometry;
size_t
columns,
rows;
ssize_t
y;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(reconstruct_image != (const Image *) NULL);
assert(reconstruct_image->signature == MagickCoreSignature);
assert(distortion != (double *) NULL);
*distortion=0.0;
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
status=GetImageDistortion(image,reconstruct_image,metric,distortion,
exception);
if (status == MagickFalse)
return((Image *) NULL);
columns=MagickMax(image->columns,reconstruct_image->columns);
rows=MagickMax(image->rows,reconstruct_image->rows);
SetGeometry(image,&geometry);
geometry.width=columns;
geometry.height=rows;
clone_image=CloneImage(image,0,0,MagickTrue,exception);
if (clone_image == (Image *) NULL)
return((Image *) NULL);
(void) SetImageMask(clone_image,ReadPixelMask,(Image *) NULL,exception);
difference_image=ExtentImage(clone_image,&geometry,exception);
clone_image=DestroyImage(clone_image);
if (difference_image == (Image *) NULL)
return((Image *) NULL);
(void) SetImageAlphaChannel(difference_image,OpaqueAlphaChannel,exception);
highlight_image=CloneImage(image,columns,rows,MagickTrue,exception);
if (highlight_image == (Image *) NULL)
{
difference_image=DestroyImage(difference_image);
return((Image *) NULL);
}
status=SetImageStorageClass(highlight_image,DirectClass,exception);
if (status == MagickFalse)
{
difference_image=DestroyImage(difference_image);
highlight_image=DestroyImage(highlight_image);
return((Image *) NULL);
}
(void) SetImageMask(highlight_image,ReadPixelMask,(Image *) NULL,exception);
(void) SetImageAlphaChannel(highlight_image,OpaqueAlphaChannel,exception);
(void) QueryColorCompliance("#f1001ecc",AllCompliance,&highlight,exception);
artifact=GetImageArtifact(image,"compare:highlight-color");
if (artifact != (const char *) NULL)
(void) QueryColorCompliance(artifact,AllCompliance,&highlight,exception);
(void) QueryColorCompliance("#ffffffcc",AllCompliance,&lowlight,exception);
artifact=GetImageArtifact(image,"compare:lowlight-color");
if (artifact != (const char *) NULL)
(void) QueryColorCompliance(artifact,AllCompliance,&lowlight,exception);
(void) QueryColorCompliance("#888888cc",AllCompliance,&masklight,exception);
artifact=GetImageArtifact(image,"compare:masklight-color");
if (artifact != (const char *) NULL)
(void) QueryColorCompliance(artifact,AllCompliance,&masklight,exception);
/*
Generate difference image.
*/
status=MagickTrue;
fuzz=GetFuzzyColorDistance(image,reconstruct_image);
image_view=AcquireVirtualCacheView(image,exception);
reconstruct_view=AcquireVirtualCacheView(reconstruct_image,exception);
highlight_view=AcquireAuthenticCacheView(highlight_image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(status) \
magick_number_threads(image,highlight_image,rows,1)
#endif
for (y=0; y < (ssize_t) rows; y++)
{
MagickBooleanType
sync;
const Quantum
*magick_restrict p,
*magick_restrict q;
Quantum
*magick_restrict r;
ssize_t
x;
if (status == MagickFalse)
continue;
p=GetCacheViewVirtualPixels(image_view,0,y,columns,1,exception);
q=GetCacheViewVirtualPixels(reconstruct_view,0,y,columns,1,exception);
r=QueueCacheViewAuthenticPixels(highlight_view,0,y,columns,1,exception);
if ((p == (const Quantum *) NULL) || (q == (const Quantum *) NULL) ||
(r == (Quantum *) NULL))
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) columns; x++)
{
double
Da,
Sa;
MagickStatusType
difference;
ssize_t
i;
if ((GetPixelReadMask(image,p) <= (QuantumRange/2)) ||
(GetPixelReadMask(reconstruct_image,q) <= (QuantumRange/2)))
{
SetPixelViaPixelInfo(highlight_image,&masklight,r);
p+=GetPixelChannels(image);
q+=GetPixelChannels(reconstruct_image);
r+=GetPixelChannels(highlight_image);
continue;
}
difference=MagickFalse;
Sa=QuantumScale*GetPixelAlpha(image,p);
Da=QuantumScale*GetPixelAlpha(reconstruct_image,q);
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
double
distance,
pixel;
PixelChannel channel = GetPixelChannelChannel(image,i);
PixelTrait traits = GetPixelChannelTraits(image,channel);
PixelTrait reconstruct_traits = GetPixelChannelTraits(reconstruct_image,
channel);
if ((traits == UndefinedPixelTrait) ||
(reconstruct_traits == UndefinedPixelTrait) ||
((reconstruct_traits & UpdatePixelTrait) == 0))
continue;
if (channel == AlphaPixelChannel)
pixel=(double) p[i]-GetPixelChannel(reconstruct_image,channel,q);
else
pixel=Sa*p[i]-Da*GetPixelChannel(reconstruct_image,channel,q);
distance=pixel*pixel;
if (distance >= fuzz)
{
difference=MagickTrue;
break;
}
}
if (difference == MagickFalse)
SetPixelViaPixelInfo(highlight_image,&lowlight,r);
else
SetPixelViaPixelInfo(highlight_image,&highlight,r);
p+=GetPixelChannels(image);
q+=GetPixelChannels(reconstruct_image);
r+=GetPixelChannels(highlight_image);
}
sync=SyncCacheViewAuthenticPixels(highlight_view,exception);
if (sync == MagickFalse)
status=MagickFalse;
}
highlight_view=DestroyCacheView(highlight_view);
reconstruct_view=DestroyCacheView(reconstruct_view);
image_view=DestroyCacheView(image_view);
(void) CompositeImage(difference_image,highlight_image,image->compose,
MagickTrue,0,0,exception);
highlight_image=DestroyImage(highlight_image);
if (status == MagickFalse)
difference_image=DestroyImage(difference_image);
return(difference_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% G e t I m a g e D i s t o r t i o n %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetImageDistortion() compares one or more pixel channels of an image to a
% reconstructed image and returns the specified distortion metric.
%
% The format of the GetImageDistortion method is:
%
% MagickBooleanType GetImageDistortion(const Image *image,
% const Image *reconstruct_image,const MetricType metric,
% double *distortion,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o reconstruct_image: the reconstruct image.
%
% o metric: the metric.
%
% o distortion: the computed distortion between the images.
%
% o exception: return any errors or warnings in this structure.
%
*/
static MagickBooleanType GetAbsoluteDistortion(const Image *image,
const Image *reconstruct_image,double *distortion,ExceptionInfo *exception)
{
CacheView
*image_view,
*reconstruct_view;
double
fuzz;
MagickBooleanType
status;
size_t
columns,
rows;
ssize_t
y;
/*
Compute the absolute difference in pixels between two images.
*/
status=MagickTrue;
fuzz=GetFuzzyColorDistance(image,reconstruct_image);
rows=MagickMax(image->rows,reconstruct_image->rows);
columns=MagickMax(image->columns,reconstruct_image->columns);
image_view=AcquireVirtualCacheView(image,exception);
reconstruct_view=AcquireVirtualCacheView(reconstruct_image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(status) \
magick_number_threads(image,image,rows,1)
#endif
for (y=0; y < (ssize_t) rows; y++)
{
double
channel_distortion[MaxPixelChannels+1];
const Quantum
*magick_restrict p,
*magick_restrict q;
ssize_t
j,
x;
if (status == MagickFalse)
continue;
p=GetCacheViewVirtualPixels(image_view,0,y,columns,1,exception);
q=GetCacheViewVirtualPixels(reconstruct_view,0,y,columns,1,exception);
if ((p == (const Quantum *) NULL) || (q == (const Quantum *) NULL))
{
status=MagickFalse;
continue;
}
(void) memset(channel_distortion,0,sizeof(channel_distortion));
for (x=0; x < (ssize_t) columns; x++)
{
double
Da,
Sa;
MagickBooleanType
difference;
ssize_t
i;
if ((GetPixelReadMask(image,p) <= (QuantumRange/2)) ||
(GetPixelReadMask(reconstruct_image,q) <= (QuantumRange/2)))
{
p+=GetPixelChannels(image);
q+=GetPixelChannels(reconstruct_image);
continue;
}
difference=MagickFalse;
Sa=QuantumScale*GetPixelAlpha(image,p);
Da=QuantumScale*GetPixelAlpha(reconstruct_image,q);
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
double
distance,
pixel;
PixelChannel channel = GetPixelChannelChannel(image,i);
PixelTrait traits = GetPixelChannelTraits(image,channel);
PixelTrait reconstruct_traits = GetPixelChannelTraits(reconstruct_image,
channel);
if ((traits == UndefinedPixelTrait) ||
(reconstruct_traits == UndefinedPixelTrait) ||
((reconstruct_traits & UpdatePixelTrait) == 0))
continue;
if (channel == AlphaPixelChannel)
pixel=(double) p[i]-GetPixelChannel(reconstruct_image,channel,q);
else
pixel=Sa*p[i]-Da*GetPixelChannel(reconstruct_image,channel,q);
distance=pixel*pixel;
if (distance >= fuzz)
{
channel_distortion[i]++;
difference=MagickTrue;
}
}
if (difference != MagickFalse)
channel_distortion[CompositePixelChannel]++;
p+=GetPixelChannels(image);
q+=GetPixelChannels(reconstruct_image);
}
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp critical (MagickCore_GetAbsoluteDistortion)
#endif
for (j=0; j <= MaxPixelChannels; j++)
distortion[j]+=channel_distortion[j];
}
reconstruct_view=DestroyCacheView(reconstruct_view);
image_view=DestroyCacheView(image_view);
return(status);
}
static MagickBooleanType GetFuzzDistortion(const Image *image,
const Image *reconstruct_image,double *distortion,ExceptionInfo *exception)
{
CacheView
*image_view,
*reconstruct_view;
double
area;
MagickBooleanType
status;
ssize_t
j;
size_t
columns,
rows;
ssize_t
y;
status=MagickTrue;
rows=MagickMax(image->rows,reconstruct_image->rows);
columns=MagickMax(image->columns,reconstruct_image->columns);
area=0.0;
image_view=AcquireVirtualCacheView(image,exception);
reconstruct_view=AcquireVirtualCacheView(reconstruct_image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(status) \
magick_number_threads(image,image,rows,1) reduction(+:area)
#endif
for (y=0; y < (ssize_t) rows; y++)
{
double
channel_distortion[MaxPixelChannels+1];
const Quantum
*magick_restrict p,
*magick_restrict q;
ssize_t
x;
if (status == MagickFalse)
continue;
p=GetCacheViewVirtualPixels(image_view,0,y,columns,1,exception);
q=GetCacheViewVirtualPixels(reconstruct_view,0,y,columns,1,exception);
if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL))
{
status=MagickFalse;
continue;
}
(void) memset(channel_distortion,0,sizeof(channel_distortion));
for (x=0; x < (ssize_t) columns; x++)
{
double
Da,
Sa;
ssize_t
i;
if ((GetPixelReadMask(image,p) <= (QuantumRange/2)) ||
(GetPixelReadMask(reconstruct_image,q) <= (QuantumRange/2)))
{
p+=GetPixelChannels(image);
q+=GetPixelChannels(reconstruct_image);
continue;
}
Sa=QuantumScale*GetPixelAlpha(image,p);
Da=QuantumScale*GetPixelAlpha(reconstruct_image,q);
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
double
distance;
PixelChannel channel = GetPixelChannelChannel(image,i);
PixelTrait traits = GetPixelChannelTraits(image,channel);
PixelTrait reconstruct_traits = GetPixelChannelTraits(reconstruct_image,
channel);
if ((traits == UndefinedPixelTrait) ||
(reconstruct_traits == UndefinedPixelTrait) ||
((reconstruct_traits & UpdatePixelTrait) == 0))
continue;
if (channel == AlphaPixelChannel)
distance=QuantumScale*(p[i]-GetPixelChannel(reconstruct_image,
channel,q));
else
distance=QuantumScale*(Sa*p[i]-Da*GetPixelChannel(reconstruct_image,
channel,q));
channel_distortion[i]+=distance*distance;
channel_distortion[CompositePixelChannel]+=distance*distance;
}
area++;
p+=GetPixelChannels(image);
q+=GetPixelChannels(reconstruct_image);
}
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp critical (MagickCore_GetFuzzDistortion)
#endif
for (j=0; j <= MaxPixelChannels; j++)
distortion[j]+=channel_distortion[j];
}
reconstruct_view=DestroyCacheView(reconstruct_view);
image_view=DestroyCacheView(image_view);
area=PerceptibleReciprocal(area);
for (j=0; j <= MaxPixelChannels; j++)
distortion[j]*=area;
distortion[CompositePixelChannel]/=(double) GetImageChannels(image);
distortion[CompositePixelChannel]=sqrt(distortion[CompositePixelChannel]);
return(status);
}
static MagickBooleanType GetMeanAbsoluteDistortion(const Image *image,
const Image *reconstruct_image,double *distortion,ExceptionInfo *exception)
{
CacheView
*image_view,
*reconstruct_view;
double
area;
MagickBooleanType
status;
ssize_t
j;
size_t
columns,
rows;
ssize_t
y;
status=MagickTrue;
rows=MagickMax(image->rows,reconstruct_image->rows);
columns=MagickMax(image->columns,reconstruct_image->columns);
area=0.0;
image_view=AcquireVirtualCacheView(image,exception);
reconstruct_view=AcquireVirtualCacheView(reconstruct_image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(status) \
magick_number_threads(image,image,rows,1) reduction(+:area)
#endif
for (y=0; y < (ssize_t) rows; y++)
{
double
channel_distortion[MaxPixelChannels+1];
const Quantum
*magick_restrict p,
*magick_restrict q;
ssize_t
x;
if (status == MagickFalse)
continue;
p=GetCacheViewVirtualPixels(image_view,0,y,columns,1,exception);
q=GetCacheViewVirtualPixels(reconstruct_view,0,y,columns,1,exception);
if ((p == (const Quantum *) NULL) || (q == (const Quantum *) NULL))
{
status=MagickFalse;
continue;
}
(void) memset(channel_distortion,0,sizeof(channel_distortion));
for (x=0; x < (ssize_t) columns; x++)
{
double
Da,
Sa;
ssize_t
i;
if ((GetPixelReadMask(image,p) <= (QuantumRange/2)) ||
(GetPixelReadMask(reconstruct_image,q) <= (QuantumRange/2)))
{
p+=GetPixelChannels(image);
q+=GetPixelChannels(reconstruct_image);
continue;
}
Sa=QuantumScale*GetPixelAlpha(image,p);
Da=QuantumScale*GetPixelAlpha(reconstruct_image,q);
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
double
distance;
PixelChannel channel = GetPixelChannelChannel(image,i);
PixelTrait traits = GetPixelChannelTraits(image,channel);
PixelTrait reconstruct_traits = GetPixelChannelTraits(reconstruct_image,
channel);
if ((traits == UndefinedPixelTrait) ||
(reconstruct_traits == UndefinedPixelTrait) ||
((reconstruct_traits & UpdatePixelTrait) == 0))
continue;
if (channel == AlphaPixelChannel)
distance=QuantumScale*fabs((double) (p[i]-(double)
GetPixelChannel(reconstruct_image,channel,q)));
else
distance=QuantumScale*fabs((double) (Sa*p[i]-Da*
GetPixelChannel(reconstruct_image,channel,q)));
channel_distortion[i]+=distance;
channel_distortion[CompositePixelChannel]+=distance;
}
area++;
p+=GetPixelChannels(image);
q+=GetPixelChannels(reconstruct_image);
}
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp critical (MagickCore_GetMeanAbsoluteError)
#endif
for (j=0; j <= MaxPixelChannels; j++)
distortion[j]+=channel_distortion[j];
}
reconstruct_view=DestroyCacheView(reconstruct_view);
image_view=DestroyCacheView(image_view);
area=PerceptibleReciprocal(area);
for (j=0; j <= MaxPixelChannels; j++)
distortion[j]*=area;
distortion[CompositePixelChannel]/=(double) GetImageChannels(image);
return(status);
}
static MagickBooleanType GetMeanErrorPerPixel(Image *image,
const Image *reconstruct_image,double *distortion,ExceptionInfo *exception)
{
CacheView
*image_view,
*reconstruct_view;
MagickBooleanType
status;
double
area,
maximum_error,
mean_error;
size_t
columns,
rows;
ssize_t
y;
status=MagickTrue;
area=0.0;
maximum_error=0.0;
mean_error=0.0;
rows=MagickMax(image->rows,reconstruct_image->rows);
columns=MagickMax(image->columns,reconstruct_image->columns);
image_view=AcquireVirtualCacheView(image,exception);
reconstruct_view=AcquireVirtualCacheView(reconstruct_image,exception);
for (y=0; y < (ssize_t) rows; y++)
{
const Quantum
*magick_restrict p,
*magick_restrict q;
ssize_t
x;
p=GetCacheViewVirtualPixels(image_view,0,y,columns,1,exception);
q=GetCacheViewVirtualPixels(reconstruct_view,0,y,columns,1,exception);
if ((p == (const Quantum *) NULL) || (q == (const Quantum *) NULL))
{
status=MagickFalse;
break;
}
for (x=0; x < (ssize_t) columns; x++)
{
double
Da,
Sa;
ssize_t
i;
if ((GetPixelReadMask(image,p) <= (QuantumRange/2)) ||
(GetPixelReadMask(reconstruct_image,q) <= (QuantumRange/2)))
{
p+=GetPixelChannels(image);
q+=GetPixelChannels(reconstruct_image);
continue;
}
Sa=QuantumScale*GetPixelAlpha(image,p);
Da=QuantumScale*GetPixelAlpha(reconstruct_image,q);
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
double
distance;
PixelChannel channel = GetPixelChannelChannel(image,i);
PixelTrait traits = GetPixelChannelTraits(image,channel);
PixelTrait reconstruct_traits = GetPixelChannelTraits(reconstruct_image,
channel);
if ((traits == UndefinedPixelTrait) ||
(reconstruct_traits == UndefinedPixelTrait) ||
((reconstruct_traits & UpdatePixelTrait) == 0))
continue;
if (channel == AlphaPixelChannel)
distance=fabs((double) (p[i]-(double)
GetPixelChannel(reconstruct_image,channel,q)));
else
distance=fabs((double) (Sa*p[i]-Da*
GetPixelChannel(reconstruct_image,channel,q)));
distortion[i]+=distance;
distortion[CompositePixelChannel]+=distance;
mean_error+=distance*distance;
if (distance > maximum_error)
maximum_error=distance;
area++;
}
p+=GetPixelChannels(image);
q+=GetPixelChannels(reconstruct_image);
}
}
reconstruct_view=DestroyCacheView(reconstruct_view);
image_view=DestroyCacheView(image_view);
image->error.mean_error_per_pixel=distortion[CompositePixelChannel]/area;
image->error.normalized_mean_error=QuantumScale*QuantumScale*mean_error/area;
image->error.normalized_maximum_error=QuantumScale*maximum_error;
return(status);
}
static MagickBooleanType GetMeanSquaredDistortion(const Image *image,
const Image *reconstruct_image,double *distortion,ExceptionInfo *exception)
{
CacheView
*image_view,
*reconstruct_view;
double
area;
MagickBooleanType
status;
ssize_t
j;
size_t
columns,
rows;
ssize_t
y;
status=MagickTrue;
rows=MagickMax(image->rows,reconstruct_image->rows);
columns=MagickMax(image->columns,reconstruct_image->columns);
area=0.0;
image_view=AcquireVirtualCacheView(image,exception);
reconstruct_view=AcquireVirtualCacheView(reconstruct_image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(status) \
magick_number_threads(image,image,rows,1) reduction(+:area)
#endif
for (y=0; y < (ssize_t) rows; y++)
{
double
channel_distortion[MaxPixelChannels+1];
const Quantum
*magick_restrict p,
*magick_restrict q;
ssize_t
x;
if (status == MagickFalse)
continue;
p=GetCacheViewVirtualPixels(image_view,0,y,columns,1,exception);
q=GetCacheViewVirtualPixels(reconstruct_view,0,y,columns,1,exception);
if ((p == (const Quantum *) NULL) || (q == (const Quantum *) NULL))
{
status=MagickFalse;
continue;
}
(void) memset(channel_distortion,0,sizeof(channel_distortion));
for (x=0; x < (ssize_t) columns; x++)
{
double
Da,
Sa;
ssize_t
i;
if ((GetPixelReadMask(image,p) <= (QuantumRange/2)) ||
(GetPixelReadMask(reconstruct_image,q) <= (QuantumRange/2)))
{
p+=GetPixelChannels(image);
q+=GetPixelChannels(reconstruct_image);
continue;
}
Sa=QuantumScale*GetPixelAlpha(image,p);
Da=QuantumScale*GetPixelAlpha(reconstruct_image,q);
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
double
distance;
PixelChannel channel = GetPixelChannelChannel(image,i);
PixelTrait traits = GetPixelChannelTraits(image,channel);
PixelTrait reconstruct_traits = GetPixelChannelTraits(reconstruct_image,
channel);
if ((traits == UndefinedPixelTrait) ||
(reconstruct_traits == UndefinedPixelTrait) ||
((reconstruct_traits & UpdatePixelTrait) == 0))
continue;
if (channel == AlphaPixelChannel)
distance=QuantumScale*(p[i]-GetPixelChannel(reconstruct_image,
channel,q));
else
distance=QuantumScale*(Sa*p[i]-Da*GetPixelChannel(reconstruct_image,
channel,q));
channel_distortion[i]+=distance*distance;
channel_distortion[CompositePixelChannel]+=distance*distance;
}
area++;
p+=GetPixelChannels(image);
q+=GetPixelChannels(reconstruct_image);
}
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp critical (MagickCore_GetMeanSquaredError)
#endif
for (j=0; j <= MaxPixelChannels; j++)
distortion[j]+=channel_distortion[j];
}
reconstruct_view=DestroyCacheView(reconstruct_view);
image_view=DestroyCacheView(image_view);
area=PerceptibleReciprocal(area);
for (j=0; j <= MaxPixelChannels; j++)
distortion[j]*=area;
distortion[CompositePixelChannel]/=GetImageChannels(image);
return(status);
}
static MagickBooleanType GetNormalizedCrossCorrelationDistortion(
const Image *image,const Image *reconstruct_image,double *distortion,
ExceptionInfo *exception)
{
#define SimilarityImageTag "Similarity/Image"
CacheView
*image_view,
*reconstruct_view;
ChannelStatistics
*image_statistics,
*reconstruct_statistics;
double
area;
MagickBooleanType
status;
MagickOffsetType
progress;
ssize_t
i;
size_t
columns,
rows;
ssize_t
y;
/*
Normalize to account for variation due to lighting and exposure condition.
*/
image_statistics=GetImageStatistics(image,exception);
reconstruct_statistics=GetImageStatistics(reconstruct_image,exception);
if ((image_statistics == (ChannelStatistics *) NULL) ||
(reconstruct_statistics == (ChannelStatistics *) NULL))
{
if (image_statistics != (ChannelStatistics *) NULL)
image_statistics=(ChannelStatistics *) RelinquishMagickMemory(
image_statistics);
if (reconstruct_statistics != (ChannelStatistics *) NULL)
reconstruct_statistics=(ChannelStatistics *) RelinquishMagickMemory(
reconstruct_statistics);
return(MagickFalse);
}
status=MagickTrue;
progress=0;
for (i=0; i <= MaxPixelChannels; i++)
distortion[i]=0.0;
rows=MagickMax(image->rows,reconstruct_image->rows);
columns=MagickMax(image->columns,reconstruct_image->columns);
area=0.0;
image_view=AcquireVirtualCacheView(image,exception);
reconstruct_view=AcquireVirtualCacheView(reconstruct_image,exception);
for (y=0; y < (ssize_t) rows; y++)
{
const Quantum
*magick_restrict p,
*magick_restrict q;
ssize_t
x;
p=GetCacheViewVirtualPixels(image_view,0,y,columns,1,exception);
q=GetCacheViewVirtualPixels(reconstruct_view,0,y,columns,1,exception);
if ((p == (const Quantum *) NULL) || (q == (const Quantum *) NULL))
{
status=MagickFalse;
break;
}
for (x=0; x < (ssize_t) columns; x++)
{
if ((GetPixelReadMask(image,p) <= (QuantumRange/2)) ||
(GetPixelReadMask(reconstruct_image,q) <= (QuantumRange/2)))
{
p+=GetPixelChannels(image);
q+=GetPixelChannels(reconstruct_image);
continue;
}
area++;
p+=GetPixelChannels(image);
q+=GetPixelChannels(reconstruct_image);
}
}
area=PerceptibleReciprocal(area);
for (y=0; y < (ssize_t) rows; y++)
{
const Quantum
*magick_restrict p,
*magick_restrict q;
ssize_t
x;
p=GetCacheViewVirtualPixels(image_view,0,y,columns,1,exception);
q=GetCacheViewVirtualPixels(reconstruct_view,0,y,columns,1,exception);
if ((p == (const Quantum *) NULL) || (q == (const Quantum *) NULL))
{
status=MagickFalse;
break;
}
for (x=0; x < (ssize_t) columns; x++)
{
double
Da,
Sa;
if ((GetPixelReadMask(image,p) <= (QuantumRange/2)) ||
(GetPixelReadMask(reconstruct_image,q) <= (QuantumRange/2)))
{
p+=GetPixelChannels(image);
q+=GetPixelChannels(reconstruct_image);
continue;
}
Sa=QuantumScale*GetPixelAlpha(image,p);
Da=QuantumScale*GetPixelAlpha(reconstruct_image,q);
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
PixelChannel channel = GetPixelChannelChannel(image,i);
PixelTrait traits = GetPixelChannelTraits(image,channel);
PixelTrait reconstruct_traits = GetPixelChannelTraits(reconstruct_image,
channel);
if ((traits == UndefinedPixelTrait) ||
(reconstruct_traits == UndefinedPixelTrait) ||
((reconstruct_traits & UpdatePixelTrait) == 0))
continue;
if (channel == AlphaPixelChannel)
{
distortion[i]+=area*QuantumScale*(p[i]-
image_statistics[channel].mean)*(GetPixelChannel(
reconstruct_image,channel,q)-
reconstruct_statistics[channel].mean);
}
else
{
distortion[i]+=area*QuantumScale*(Sa*p[i]-
image_statistics[channel].mean)*(Da*GetPixelChannel(
reconstruct_image,channel,q)-
reconstruct_statistics[channel].mean);
}
}
p+=GetPixelChannels(image);
q+=GetPixelChannels(reconstruct_image);
}
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp atomic
#endif
progress++;
proceed=SetImageProgress(image,SimilarityImageTag,progress,rows);
if (proceed == MagickFalse)
{
status=MagickFalse;
break;
}
}
}
reconstruct_view=DestroyCacheView(reconstruct_view);
image_view=DestroyCacheView(image_view);
/*
Divide by the standard deviation.
*/
distortion[CompositePixelChannel]=0.0;
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
double
gamma;
PixelChannel channel = GetPixelChannelChannel(image,i);
gamma=image_statistics[channel].standard_deviation*
reconstruct_statistics[channel].standard_deviation;
gamma=PerceptibleReciprocal(gamma);
distortion[i]=QuantumRange*gamma*distortion[i];
distortion[CompositePixelChannel]+=distortion[i]*distortion[i];
}
distortion[CompositePixelChannel]=sqrt(distortion[CompositePixelChannel]/
GetImageChannels(image));
/*
Free resources.
*/
reconstruct_statistics=(ChannelStatistics *) RelinquishMagickMemory(
reconstruct_statistics);
image_statistics=(ChannelStatistics *) RelinquishMagickMemory(
image_statistics);
return(status);
}
static MagickBooleanType GetPeakAbsoluteDistortion(const Image *image,
const Image *reconstruct_image,double *distortion,ExceptionInfo *exception)
{
CacheView
*image_view,
*reconstruct_view;
MagickBooleanType
status;
size_t
columns,
rows;
ssize_t
y;
status=MagickTrue;
rows=MagickMax(image->rows,reconstruct_image->rows);
columns=MagickMax(image->columns,reconstruct_image->columns);
image_view=AcquireVirtualCacheView(image,exception);
reconstruct_view=AcquireVirtualCacheView(reconstruct_image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(status) \
magick_number_threads(image,image,rows,1)
#endif
for (y=0; y < (ssize_t) rows; y++)
{
double
channel_distortion[MaxPixelChannels+1];
const Quantum
*magick_restrict p,
*magick_restrict q;
ssize_t
j,
x;
if (status == MagickFalse)
continue;
p=GetCacheViewVirtualPixels(image_view,0,y,columns,1,exception);
q=GetCacheViewVirtualPixels(reconstruct_view,0,y,columns,1,exception);
if ((p == (const Quantum *) NULL) || (q == (const Quantum *) NULL))
{
status=MagickFalse;
continue;
}
(void) memset(channel_distortion,0,sizeof(channel_distortion));
for (x=0; x < (ssize_t) columns; x++)
{
double
Da,
Sa;
ssize_t
i;
if ((GetPixelReadMask(image,p) <= (QuantumRange/2)) ||
(GetPixelReadMask(reconstruct_image,q) <= (QuantumRange/2)))
{
p+=GetPixelChannels(image);
q+=GetPixelChannels(reconstruct_image);
continue;
}
Sa=QuantumScale*GetPixelAlpha(image,p);
Da=QuantumScale*GetPixelAlpha(reconstruct_image,q);
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
double
distance;
PixelChannel channel = GetPixelChannelChannel(image,i);
PixelTrait traits = GetPixelChannelTraits(image,channel);
PixelTrait reconstruct_traits = GetPixelChannelTraits(reconstruct_image,
channel);
if ((traits == UndefinedPixelTrait) ||
(reconstruct_traits == UndefinedPixelTrait) ||
((reconstruct_traits & UpdatePixelTrait) == 0))
continue;
if (channel == AlphaPixelChannel)
distance=QuantumScale*fabs((double) (p[i]-(double)
GetPixelChannel(reconstruct_image,channel,q)));
else
distance=QuantumScale*fabs((double) (Sa*p[i]-Da*
GetPixelChannel(reconstruct_image,channel,q)));
if (distance > channel_distortion[i])
channel_distortion[i]=distance;
if (distance > channel_distortion[CompositePixelChannel])
channel_distortion[CompositePixelChannel]=distance;
}
p+=GetPixelChannels(image);
q+=GetPixelChannels(reconstruct_image);
}
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp critical (MagickCore_GetPeakAbsoluteError)
#endif
for (j=0; j <= MaxPixelChannels; j++)
if (channel_distortion[j] > distortion[j])
distortion[j]=channel_distortion[j];
}
reconstruct_view=DestroyCacheView(reconstruct_view);
image_view=DestroyCacheView(image_view);
return(status);
}
static inline double MagickLog10(const double x)
{
#define Log10Epsilon (1.0e-11)
if (fabs(x) < Log10Epsilon)
return(log10(Log10Epsilon));
return(log10(fabs(x)));
}
static MagickBooleanType GetPeakSignalToNoiseRatio(const Image *image,
const Image *reconstruct_image,double *distortion,ExceptionInfo *exception)
{
MagickBooleanType
status;
ssize_t
i;
status=GetMeanSquaredDistortion(image,reconstruct_image,distortion,exception);
for (i=0; i <= MaxPixelChannels; i++)
if (fabs(distortion[i]) < MagickEpsilon)
distortion[i]=INFINITY;
else
distortion[i]=10.0*MagickLog10(1.0)-10.0*MagickLog10(distortion[i]);
return(status);
}
static MagickBooleanType GetPerceptualHashDistortion(const Image *image,
const Image *reconstruct_image,double *distortion,ExceptionInfo *exception)
{
ChannelPerceptualHash
*channel_phash,
*reconstruct_phash;
const char
*artifact;
MagickBooleanType
normalize;
ssize_t
channel;
/*
Compute perceptual hash in the sRGB colorspace.
*/
channel_phash=GetImagePerceptualHash(image,exception);
if (channel_phash == (ChannelPerceptualHash *) NULL)
return(MagickFalse);
reconstruct_phash=GetImagePerceptualHash(reconstruct_image,exception);
if (reconstruct_phash == (ChannelPerceptualHash *) NULL)
{
channel_phash=(ChannelPerceptualHash *) RelinquishMagickMemory(
channel_phash);
return(MagickFalse);
}
artifact=GetImageArtifact(image,"phash:normalize");
normalize=(artifact == (const char *) NULL) ||
(IsStringTrue(artifact) == MagickFalse) ? MagickFalse : MagickTrue;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static)
#endif
for (channel=0; channel < MaxPixelChannels; channel++)
{
double
difference;
ssize_t
i;
difference=0.0;
for (i=0; i < MaximumNumberOfImageMoments; i++)
{
double
alpha,
beta;
ssize_t
j;
for (j=0; j < (ssize_t) channel_phash[0].number_colorspaces; j++)
{
alpha=channel_phash[channel].phash[j][i];
beta=reconstruct_phash[channel].phash[j][i];
if (normalize == MagickFalse)
difference+=(beta-alpha)*(beta-alpha);
else
difference=sqrt((beta-alpha)*(beta-alpha)/
channel_phash[0].number_channels);
}
}
distortion[channel]+=difference;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp critical (MagickCore_GetPerceptualHashDistortion)
#endif
distortion[CompositePixelChannel]+=difference;
}
/*
Free resources.
*/
reconstruct_phash=(ChannelPerceptualHash *) RelinquishMagickMemory(
reconstruct_phash);
channel_phash=(ChannelPerceptualHash *) RelinquishMagickMemory(channel_phash);
return(MagickTrue);
}
static MagickBooleanType GetRootMeanSquaredDistortion(const Image *image,
const Image *reconstruct_image,double *distortion,ExceptionInfo *exception)
{
MagickBooleanType
status;
ssize_t
i;
status=GetMeanSquaredDistortion(image,reconstruct_image,distortion,exception);
for (i=0; i <= MaxPixelChannels; i++)
distortion[i]=sqrt(distortion[i]);
return(status);
}
static MagickBooleanType GetStructuralSimilarityDistortion(const Image *image,
const Image *reconstruct_image,double *distortion,ExceptionInfo *exception)
{
#define SSIMRadius 5.0
#define SSIMSigma 1.5
#define SSIMBlocksize 8
#define SSIMK1 0.01
#define SSIMK2 0.03
#define SSIML 1.0
CacheView
*image_view,
*reconstruct_view;
char
geometry[MagickPathExtent];
const char
*artifact;
double
area,
c1,
c2,
radius,
sigma;
KernelInfo
*kernel_info;
MagickBooleanType
status;
ssize_t
i;
size_t
columns,
rows;
ssize_t
y;
/*
Compute structural similarity index @
https://en.wikipedia.org/wiki/Structural_similarity.
*/
radius=SSIMRadius;
artifact=GetImageArtifact(image,"compare:ssim-radius");
if (artifact != (const char *) NULL)
radius=StringToDouble(artifact,(char **) NULL);
sigma=SSIMSigma;
artifact=GetImageArtifact(image,"compare:ssim-sigma");
if (artifact != (const char *) NULL)
sigma=StringToDouble(artifact,(char **) NULL);
(void) FormatLocaleString(geometry,MagickPathExtent,"gaussian:%.20gx%.20g",
radius,sigma);
kernel_info=AcquireKernelInfo(geometry,exception);
if (kernel_info == (KernelInfo *) NULL)
ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed",
image->filename);
c1=pow(SSIMK1*SSIML,2.0);
artifact=GetImageArtifact(image,"compare:ssim-k1");
if (artifact != (const char *) NULL)
c1=pow(StringToDouble(artifact,(char **) NULL)*SSIML,2.0);
c2=pow(SSIMK2*SSIML,2.0);
artifact=GetImageArtifact(image,"compare:ssim-k2");
if (artifact != (const char *) NULL)
c2=pow(StringToDouble(artifact,(char **) NULL)*SSIML,2.0);
status=MagickTrue;
area=0.0;
rows=MagickMax(image->rows,reconstruct_image->rows);
columns=MagickMax(image->columns,reconstruct_image->columns);
image_view=AcquireVirtualCacheView(image,exception);
reconstruct_view=AcquireVirtualCacheView(reconstruct_image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(status) \
magick_number_threads(image,reconstruct_image,rows,1)
#endif
for (y=0; y < (ssize_t) rows; y++)
{
double
channel_distortion[MaxPixelChannels+1];
const Quantum
*magick_restrict p,
*magick_restrict q;
ssize_t
i,
x;
if (status == MagickFalse)
continue;
p=GetCacheViewVirtualPixels(image_view,-((ssize_t) kernel_info->width/2L),y-
((ssize_t) kernel_info->height/2L),columns+kernel_info->width,
kernel_info->height,exception);
q=GetCacheViewVirtualPixels(reconstruct_view,-((ssize_t) kernel_info->width/
2L),y-((ssize_t) kernel_info->height/2L),columns+kernel_info->width,
kernel_info->height,exception);
if ((p == (const Quantum *) NULL) || (q == (const Quantum *) NULL))
{
status=MagickFalse;
continue;
}
(void) memset(channel_distortion,0,sizeof(channel_distortion));
for (x=0; x < (ssize_t) columns; x++)
{
double
x_pixel_mu[MaxPixelChannels+1],
x_pixel_sigma_squared[MaxPixelChannels+1],
xy_sigma[MaxPixelChannels+1],
y_pixel_mu[MaxPixelChannels+1],
y_pixel_sigma_squared[MaxPixelChannels+1];
const Quantum
*magick_restrict reference,
*magick_restrict target;
MagickRealType
*k;
ssize_t
v;
if ((GetPixelReadMask(image,p) <= (QuantumRange/2)) ||
(GetPixelReadMask(reconstruct_image,q) <= (QuantumRange/2)))
{
p+=GetPixelChannels(image);
q+=GetPixelChannels(reconstruct_image);
continue;
}
(void) memset(x_pixel_mu,0,sizeof(x_pixel_mu));
(void) memset(x_pixel_sigma_squared,0,sizeof(x_pixel_sigma_squared));
(void) memset(xy_sigma,0,sizeof(xy_sigma));
(void) memset(x_pixel_sigma_squared,0,sizeof(y_pixel_sigma_squared));
(void) memset(y_pixel_mu,0,sizeof(y_pixel_mu));
(void) memset(y_pixel_sigma_squared,0,sizeof(y_pixel_sigma_squared));
k=kernel_info->values;
reference=p;
target=q;
for (v=0; v < (ssize_t) kernel_info->height; v++)
{
ssize_t
u;
for (u=0; u < (ssize_t) kernel_info->width; u++)
{
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
double
x_pixel,
y_pixel;
PixelChannel channel = GetPixelChannelChannel(image,i);
PixelTrait traits = GetPixelChannelTraits(image,channel);
PixelTrait reconstruct_traits = GetPixelChannelTraits(
reconstruct_image,channel);
if ((traits == UndefinedPixelTrait) ||
(reconstruct_traits == UndefinedPixelTrait) ||
((reconstruct_traits & UpdatePixelTrait) == 0))
continue;
x_pixel=QuantumScale*reference[i];
x_pixel_mu[i]+=(*k)*x_pixel;
x_pixel_sigma_squared[i]+=(*k)*x_pixel*x_pixel;
y_pixel=QuantumScale*
GetPixelChannel(reconstruct_image,channel,target);
y_pixel_mu[i]+=(*k)*y_pixel;
y_pixel_sigma_squared[i]+=(*k)*y_pixel*y_pixel;
xy_sigma[i]+=(*k)*x_pixel*y_pixel;
}
k++;
reference+=GetPixelChannels(image);
target+=GetPixelChannels(reconstruct_image);
}
reference+=GetPixelChannels(image)*columns;
target+=GetPixelChannels(reconstruct_image)*columns;
}
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
double
ssim,
x_pixel_mu_squared,
x_pixel_sigmas_squared,
xy_mu,
xy_sigmas,
y_pixel_mu_squared,
y_pixel_sigmas_squared;
PixelChannel channel = GetPixelChannelChannel(image,i);
PixelTrait traits = GetPixelChannelTraits(image,channel);
PixelTrait reconstruct_traits = GetPixelChannelTraits(
reconstruct_image,channel);
if ((traits == UndefinedPixelTrait) ||
(reconstruct_traits == UndefinedPixelTrait) ||
((reconstruct_traits & UpdatePixelTrait) == 0))
continue;
x_pixel_mu_squared=x_pixel_mu[i]*x_pixel_mu[i];
y_pixel_mu_squared=y_pixel_mu[i]*y_pixel_mu[i];
xy_mu=x_pixel_mu[i]*y_pixel_mu[i];
xy_sigmas=xy_sigma[i]-xy_mu;
x_pixel_sigmas_squared=x_pixel_sigma_squared[i]-x_pixel_mu_squared;
y_pixel_sigmas_squared=y_pixel_sigma_squared[i]-y_pixel_mu_squared;
ssim=((2.0*xy_mu+c1)*(2.0*xy_sigmas+c2))/
((x_pixel_mu_squared+y_pixel_mu_squared+c1)*
(x_pixel_sigmas_squared+y_pixel_sigmas_squared+c2));
channel_distortion[i]+=ssim;
channel_distortion[CompositePixelChannel]+=ssim;
area++;
}
p+=GetPixelChannels(image);
q+=GetPixelChannels(reconstruct_image);
}
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp critical (MagickCore_GetStructuralSimilarityDistortion)
#endif
for (i=0; i <= MaxPixelChannels; i++)
distortion[i]+=channel_distortion[i];
}
image_view=DestroyCacheView(image_view);
reconstruct_view=DestroyCacheView(reconstruct_view);
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
PixelChannel channel = GetPixelChannelChannel(image,i);
PixelTrait traits = GetPixelChannelTraits(image,channel);
if ((traits == UndefinedPixelTrait) || ((traits & UpdatePixelTrait) == 0))
continue;
distortion[i]/=area;
}
distortion[CompositePixelChannel]/=area;
distortion[CompositePixelChannel]/=(double) GetImageChannels(image);
kernel_info=DestroyKernelInfo(kernel_info);
return(status);
}
static MagickBooleanType GetStructuralDisimilarityDistortion(const Image *image,
const Image *reconstruct_image,double *distortion,ExceptionInfo *exception)
{
MagickBooleanType
status;
ssize_t
i;
status=GetStructuralSimilarityDistortion(image,reconstruct_image,
distortion,exception);
for (i=0; i <= MaxPixelChannels; i++)
distortion[i]=(1.0-(distortion[i]))/2.0;
return(status);
}
MagickExport MagickBooleanType GetImageDistortion(Image *image,
const Image *reconstruct_image,const MetricType metric,double *distortion,
ExceptionInfo *exception)
{
double
*channel_distortion;
MagickBooleanType
status;
size_t
length;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(reconstruct_image != (const Image *) NULL);
assert(reconstruct_image->signature == MagickCoreSignature);
assert(distortion != (double *) NULL);
*distortion=0.0;
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
/*
Get image distortion.
*/
length=MaxPixelChannels+1UL;
channel_distortion=(double *) AcquireQuantumMemory(length,
sizeof(*channel_distortion));
if (channel_distortion == (double *) NULL)
ThrowFatalException(ResourceLimitFatalError,"MemoryAllocationFailed");
(void) memset(channel_distortion,0,length*
sizeof(*channel_distortion));
switch (metric)
{
case AbsoluteErrorMetric:
{
status=GetAbsoluteDistortion(image,reconstruct_image,channel_distortion,
exception);
break;
}
case FuzzErrorMetric:
{
status=GetFuzzDistortion(image,reconstruct_image,channel_distortion,
exception);
break;
}
case MeanAbsoluteErrorMetric:
{
status=GetMeanAbsoluteDistortion(image,reconstruct_image,
channel_distortion,exception);
break;
}
case MeanErrorPerPixelErrorMetric:
{
status=GetMeanErrorPerPixel(image,reconstruct_image,channel_distortion,
exception);
break;
}
case MeanSquaredErrorMetric:
{
status=GetMeanSquaredDistortion(image,reconstruct_image,
channel_distortion,exception);
break;
}
case NormalizedCrossCorrelationErrorMetric:
default:
{
status=GetNormalizedCrossCorrelationDistortion(image,reconstruct_image,
channel_distortion,exception);
break;
}
case PeakAbsoluteErrorMetric:
{
status=GetPeakAbsoluteDistortion(image,reconstruct_image,
channel_distortion,exception);
break;
}
case PeakSignalToNoiseRatioErrorMetric:
{
status=GetPeakSignalToNoiseRatio(image,reconstruct_image,
channel_distortion,exception);
break;
}
case PerceptualHashErrorMetric:
{
status=GetPerceptualHashDistortion(image,reconstruct_image,
channel_distortion,exception);
break;
}
case RootMeanSquaredErrorMetric:
{
status=GetRootMeanSquaredDistortion(image,reconstruct_image,
channel_distortion,exception);
break;
}
case StructuralSimilarityErrorMetric:
{
status=GetStructuralSimilarityDistortion(image,reconstruct_image,
channel_distortion,exception);
break;
}
case StructuralDissimilarityErrorMetric:
{
status=GetStructuralDisimilarityDistortion(image,reconstruct_image,
channel_distortion,exception);
break;
}
}
*distortion=channel_distortion[CompositePixelChannel];
channel_distortion=(double *) RelinquishMagickMemory(channel_distortion);
(void) FormatImageProperty(image,"distortion","%.*g",GetMagickPrecision(),
*distortion);
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% G e t I m a g e D i s t o r t i o n s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetImageDistortions() compares the pixel channels of an image to a
% reconstructed image and returns the specified distortion metric for each
% channel.
%
% The format of the GetImageDistortions method is:
%
% double *GetImageDistortions(const Image *image,
% const Image *reconstruct_image,const MetricType metric,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o reconstruct_image: the reconstruct image.
%
% o metric: the metric.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport double *GetImageDistortions(Image *image,
const Image *reconstruct_image,const MetricType metric,
ExceptionInfo *exception)
{
double
*channel_distortion;
MagickBooleanType
status;
size_t
length;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(reconstruct_image != (const Image *) NULL);
assert(reconstruct_image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
/*
Get image distortion.
*/
length=MaxPixelChannels+1UL;
channel_distortion=(double *) AcquireQuantumMemory(length,
sizeof(*channel_distortion));
if (channel_distortion == (double *) NULL)
ThrowFatalException(ResourceLimitFatalError,"MemoryAllocationFailed");
(void) memset(channel_distortion,0,length*
sizeof(*channel_distortion));
status=MagickTrue;
switch (metric)
{
case AbsoluteErrorMetric:
{
status=GetAbsoluteDistortion(image,reconstruct_image,channel_distortion,
exception);
break;
}
case FuzzErrorMetric:
{
status=GetFuzzDistortion(image,reconstruct_image,channel_distortion,
exception);
break;
}
case MeanAbsoluteErrorMetric:
{
status=GetMeanAbsoluteDistortion(image,reconstruct_image,
channel_distortion,exception);
break;
}
case MeanErrorPerPixelErrorMetric:
{
status=GetMeanErrorPerPixel(image,reconstruct_image,channel_distortion,
exception);
break;
}
case MeanSquaredErrorMetric:
{
status=GetMeanSquaredDistortion(image,reconstruct_image,
channel_distortion,exception);
break;
}
case NormalizedCrossCorrelationErrorMetric:
default:
{
status=GetNormalizedCrossCorrelationDistortion(image,reconstruct_image,
channel_distortion,exception);
break;
}
case PeakAbsoluteErrorMetric:
{
status=GetPeakAbsoluteDistortion(image,reconstruct_image,
channel_distortion,exception);
break;
}
case PeakSignalToNoiseRatioErrorMetric:
{
status=GetPeakSignalToNoiseRatio(image,reconstruct_image,
channel_distortion,exception);
break;
}
case PerceptualHashErrorMetric:
{
status=GetRootMeanSquaredDistortion(image,reconstruct_image,
channel_distortion,exception);
break;
}
case RootMeanSquaredErrorMetric:
{
status=GetRootMeanSquaredDistortion(image,reconstruct_image,
channel_distortion,exception);
break;
}
case StructuralSimilarityErrorMetric:
{
status=GetStructuralSimilarityDistortion(image,reconstruct_image,
channel_distortion,exception);
break;
}
case StructuralDissimilarityErrorMetric:
{
status=GetStructuralDisimilarityDistortion(image,reconstruct_image,
channel_distortion,exception);
break;
}
}
if (status == MagickFalse)
{
channel_distortion=(double *) RelinquishMagickMemory(channel_distortion);
return((double *) NULL);
}
return(channel_distortion);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% I s I m a g e s E q u a l %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% IsImagesEqual() compare the pixels of two images and returns immediately
% if any pixel is not identical.
%
% The format of the IsImagesEqual method is:
%
% MagickBooleanType IsImagesEqual(const Image *image,
% const Image *reconstruct_image,ExceptionInfo *exception)
%
% A description of each parameter follows.
%
% o image: the image.
%
% o reconstruct_image: the reconstruct image.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport MagickBooleanType IsImagesEqual(const Image *image,
const Image *reconstruct_image,ExceptionInfo *exception)
{
CacheView
*image_view,
*reconstruct_view;
size_t
columns,
rows;
ssize_t
y;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
assert(reconstruct_image != (const Image *) NULL);
assert(reconstruct_image->signature == MagickCoreSignature);
rows=MagickMax(image->rows,reconstruct_image->rows);
columns=MagickMax(image->columns,reconstruct_image->columns);
image_view=AcquireVirtualCacheView(image,exception);
reconstruct_view=AcquireVirtualCacheView(reconstruct_image,exception);
for (y=0; y < (ssize_t) rows; y++)
{
const Quantum
*magick_restrict p,
*magick_restrict q;
ssize_t
x;
p=GetCacheViewVirtualPixels(image_view,0,y,columns,1,exception);
q=GetCacheViewVirtualPixels(reconstruct_view,0,y,columns,1,exception);
if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL))
break;
for (x=0; x < (ssize_t) columns; x++)
{
ssize_t
i;
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
double
distance;
PixelChannel channel = GetPixelChannelChannel(image,i);
PixelTrait traits = GetPixelChannelTraits(image,channel);
PixelTrait reconstruct_traits = GetPixelChannelTraits(reconstruct_image,
channel);
if ((traits == UndefinedPixelTrait) ||
(reconstruct_traits == UndefinedPixelTrait) ||
((reconstruct_traits & UpdatePixelTrait) == 0))
continue;
distance=fabs((double) (p[i]-(double) GetPixelChannel(reconstruct_image,
channel,q)));
if (distance >= MagickEpsilon)
break;
}
if (i < (ssize_t) GetPixelChannels(image))
break;
p+=GetPixelChannels(image);
q+=GetPixelChannels(reconstruct_image);
}
if (x < (ssize_t) columns)
break;
}
reconstruct_view=DestroyCacheView(reconstruct_view);
image_view=DestroyCacheView(image_view);
return(y < (ssize_t) rows ? MagickFalse : MagickTrue);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% S e t I m a g e C o l o r M e t r i c %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% SetImageColorMetric() measures the difference between colors at each pixel
% location of two images. A value other than 0 means the colors match
% exactly. Otherwise an error measure is computed by summing over all
% pixels in an image the distance squared in RGB space between each image
% pixel and its corresponding pixel in the reconstruct image. The error
% measure is assigned to these image members:
%
% o mean_error_per_pixel: The mean error for any single pixel in
% the image.
%
% o normalized_mean_error: The normalized mean quantization error for
% any single pixel in the image. This distance measure is normalized to
% a range between 0 and 1. It is independent of the range of red, green,
% and blue values in the image.
%
% o normalized_maximum_error: The normalized maximum quantization
% error for any single pixel in the image. This distance measure is
% normalized to a range between 0 and 1. It is independent of the range
% of red, green, and blue values in your image.
%
% A small normalized mean square error, accessed as
% image->normalized_mean_error, suggests the images are very similar in
% spatial layout and color.
%
% The format of the SetImageColorMetric method is:
%
% MagickBooleanType SetImageColorMetric(Image *image,
% const Image *reconstruct_image,ExceptionInfo *exception)
%
% A description of each parameter follows.
%
% o image: the image.
%
% o reconstruct_image: the reconstruct image.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport MagickBooleanType SetImageColorMetric(Image *image,
const Image *reconstruct_image,ExceptionInfo *exception)
{
CacheView
*image_view,
*reconstruct_view;
double
area,
maximum_error,
mean_error,
mean_error_per_pixel;
MagickBooleanType
status;
size_t
columns,
rows;
ssize_t
y;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
assert(reconstruct_image != (const Image *) NULL);
assert(reconstruct_image->signature == MagickCoreSignature);
area=0.0;
maximum_error=0.0;
mean_error_per_pixel=0.0;
mean_error=0.0;
rows=MagickMax(image->rows,reconstruct_image->rows);
columns=MagickMax(image->columns,reconstruct_image->columns);
image_view=AcquireVirtualCacheView(image,exception);
reconstruct_view=AcquireVirtualCacheView(reconstruct_image,exception);
for (y=0; y < (ssize_t) rows; y++)
{
const Quantum
*magick_restrict p,
*magick_restrict q;
ssize_t
x;
p=GetCacheViewVirtualPixels(image_view,0,y,columns,1,exception);
q=GetCacheViewVirtualPixels(reconstruct_view,0,y,columns,1,exception);
if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL))
break;
for (x=0; x < (ssize_t) columns; x++)
{
ssize_t
i;
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
double
distance;
PixelChannel channel = GetPixelChannelChannel(image,i);
PixelTrait traits = GetPixelChannelTraits(image,channel);
PixelTrait reconstruct_traits = GetPixelChannelTraits(reconstruct_image,
channel);
if ((traits == UndefinedPixelTrait) ||
(reconstruct_traits == UndefinedPixelTrait) ||
((reconstruct_traits & UpdatePixelTrait) == 0))
continue;
distance=fabs((double) (p[i]-(double) GetPixelChannel(reconstruct_image,
channel,q)));
if (distance >= MagickEpsilon)
{
mean_error_per_pixel+=distance;
mean_error+=distance*distance;
if (distance > maximum_error)
maximum_error=distance;
}
area++;
}
p+=GetPixelChannels(image);
q+=GetPixelChannels(reconstruct_image);
}
}
reconstruct_view=DestroyCacheView(reconstruct_view);
image_view=DestroyCacheView(image_view);
image->error.mean_error_per_pixel=(double) (mean_error_per_pixel/area);
image->error.normalized_mean_error=(double) (QuantumScale*QuantumScale*
mean_error/area);
image->error.normalized_maximum_error=(double) (QuantumScale*maximum_error);
status=image->error.mean_error_per_pixel == 0.0 ? MagickTrue : MagickFalse;
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% S i m i l a r i t y I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% SimilarityImage() compares the reference image of the image and returns the
% best match offset. In addition, it returns a similarity image such that an
% exact match location is completely white and if none of the pixels match,
% black, otherwise some gray level in-between.
%
% The format of the SimilarityImageImage method is:
%
% Image *SimilarityImage(const Image *image,const Image *reference,
% const MetricType metric,const double similarity_threshold,
% RectangleInfo *offset,double *similarity,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o reference: find an area of the image that closely resembles this image.
%
% o metric: the metric.
%
% o similarity_threshold: minimum distortion for (sub)image match.
%
% o offset: the best match offset of the reference image within the image.
%
% o similarity: the computed similarity between the images.
%
% o exception: return any errors or warnings in this structure.
%
*/
static double GetSimilarityMetric(const Image *image,const Image *reference,
const MetricType metric,const ssize_t x_offset,const ssize_t y_offset,
ExceptionInfo *exception)
{
double
distortion;
Image
*similarity_image;
MagickBooleanType
status;
RectangleInfo
geometry;
SetGeometry(reference,&geometry);
geometry.x=x_offset;
geometry.y=y_offset;
similarity_image=CropImage(image,&geometry,exception);
if (similarity_image == (Image *) NULL)
return(0.0);
distortion=0.0;
status=GetImageDistortion(similarity_image,reference,metric,&distortion,
exception);
similarity_image=DestroyImage(similarity_image);
if (status == MagickFalse)
return(0.0);
return(distortion);
}
MagickExport Image *SimilarityImage(const Image *image,const Image *reference,
const MetricType metric,const double similarity_threshold,
RectangleInfo *offset,double *similarity_metric,ExceptionInfo *exception)
{
#define SimilarityImageTag "Similarity/Image"
CacheView
*similarity_view;
Image
*similarity_image;
MagickBooleanType
status;
MagickOffsetType
progress;
ssize_t
y;
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
assert(offset != (RectangleInfo *) NULL);
SetGeometry(reference,offset);
*similarity_metric=MagickMaximumValue;
similarity_image=CloneImage(image,image->columns-reference->columns+1,
image->rows-reference->rows+1,MagickTrue,exception);
if (similarity_image == (Image *) NULL)
return((Image *) NULL);
status=SetImageStorageClass(similarity_image,DirectClass,exception);
if (status == MagickFalse)
{
similarity_image=DestroyImage(similarity_image);
return((Image *) NULL);
}
(void) SetImageAlphaChannel(similarity_image,DeactivateAlphaChannel,
exception);
/*
Measure similarity of reference image against image.
*/
status=MagickTrue;
progress=0;
similarity_view=AcquireAuthenticCacheView(similarity_image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) \
shared(progress,status,similarity_metric) \
magick_number_threads(image,image,image->rows-reference->rows+1,1)
#endif
for (y=0; y < (ssize_t) (image->rows-reference->rows+1); y++)
{
double
similarity;
Quantum
*magick_restrict q;
ssize_t
x;
if (status == MagickFalse)
continue;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp flush(similarity_metric)
#endif
if (*similarity_metric <= similarity_threshold)
continue;
q=GetCacheViewAuthenticPixels(similarity_view,0,y,similarity_image->columns,
1,exception);
if (q == (Quantum *) NULL)
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) (image->columns-reference->columns+1); x++)
{
ssize_t
i;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp flush(similarity_metric)
#endif
if (*similarity_metric <= similarity_threshold)
break;
similarity=GetSimilarityMetric(image,reference,metric,x,y,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp critical (MagickCore_SimilarityImage)
#endif
if ((metric == NormalizedCrossCorrelationErrorMetric) ||
(metric == UndefinedErrorMetric))
similarity=1.0-similarity;
if (similarity < *similarity_metric)
{
offset->x=x;
offset->y=y;
*similarity_metric=similarity;
}
if (metric == PerceptualHashErrorMetric)
similarity=MagickMin(0.01*similarity,1.0);
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
PixelChannel channel = GetPixelChannelChannel(image,i);
PixelTrait traits = GetPixelChannelTraits(image,channel);
PixelTrait similarity_traits=GetPixelChannelTraits(similarity_image,
channel);
if ((traits == UndefinedPixelTrait) ||
(similarity_traits == UndefinedPixelTrait) ||
((similarity_traits & UpdatePixelTrait) == 0))
continue;
SetPixelChannel(similarity_image,channel,ClampToQuantum(QuantumRange-
QuantumRange*similarity),q);
}
q+=GetPixelChannels(similarity_image);
}
if (SyncCacheViewAuthenticPixels(similarity_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp atomic
#endif
progress++;
proceed=SetImageProgress(image,SimilarityImageTag,progress,image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
similarity_view=DestroyCacheView(similarity_view);
if (status == MagickFalse)
similarity_image=DestroyImage(similarity_image);
return(similarity_image);
}
|
dynamic_fmt.c | /*
* This software was written by Jim Fougeron jfoug AT cox dot net
* in 2009-2013. No copyright is claimed, and the software is hereby
* placed in the public domain. In case this attempt to disclaim
* copyright and place the software in the public domain is deemed
* null and void, then the software is Copyright (c) 2009-2013 Jim Fougeron
* and it is hereby released to the general public under the following
* terms:
*
* This software may be modified, redistributed, and used for any
* purpose, in source and binary forms, with or without modification.
*
* Generic 'scriptable' hash cracker for JtR
*
* Renamed and changed from md5_gen* to dynamic*. We handle MD5 and SHA1
* at the present time. More crypt types 'may' be added later.
* Added SHA2 (SHA224, SHA256, SHA384, SHA512), GOST, Whirlpool crypt types.
* Whirlpool use oSSSL if OPENSSL_VERSION_NUMBER >= 0x10000000, otherwise use sph_* code.
*
* There used to be a todo list, and other commenting here. It has been
* moved to ./docs/dynamic_history.txt
*
* KNOWN issues, and things to do.
*
* 1. create a new optimize flag, MGF_PASS_AFTER_FIXEDSALT and
* MGF_PASS_BEFORE_FIXEDSALT. Then create DynamicFunc__appendsalt_after_pass[12]
* These would only be valid for a FIXED length salted format. Then
* we can write the pass right into the buffer, and get_key() would read
* it back from there, either skipping over the salt, or removing the salt
* from the end. This would allow crypt($s.$p) and crypt($p.s) to be optimized
* in the way of string loading, and many fewer buffer copies. So dyna_1 could
* be optimized to something like:
// dynamic_1 Joomla md5($p.$s)
static DYNAMIC_primitive_funcp _Funcs_1[] =
{
//Flags=MGF_PASS_BEFORE_FIXEDSALT | MGF_SALTED
// saltlen=3 (or whatever). This fixed size is 'key'
DynamicFunc__appendsalt_after_pass1,
DynamicFunc__crypt_md5,
NULL
};
* WELL, the fixed size salt, it 'may' not be key for the MGF_PASS_BEFORE_FIXEDSALT,
* I think I can make that 'work' for variable sized salts. But for the
* MGF_PASS_AFTER_FIXEDSALT, i.e. crypt($s.$p) the fixed size salt IS key. I would
* like to store all PW's at salt_len offset in the buffer, and simply overwrite the
* first part of each buffer with the salt, never moving the password after the first
* time it is written. THEN it is very important this ONLY be allowed when we KNOW
* the salt length ahead of time.
*
* 2. Change regen-salts to be generic. Add the logic to dynamic_fmt.c proper, and change
* the fake-salts.c, and options so that 'generic' regen-salts can be done.
*/
#include <string.h>
#include <time.h>
#if AC_BUILT
#include "autoconfig.h"
#endif
#include "arch.h"
#if !FAST_FORMATS_OMP
#ifdef _OPENMP
# define FORCE_THREAD_MD5_body
#endif
#undef _OPENMP
#endif
#ifndef DYNAMIC_DISABLED
#ifdef SIMD_COEF_32
#include "simd-intrinsics.h"
#endif
#include "misc.h"
#include "common.h"
#include "formats.h"
#include "params.h"
#include "md5.h"
#include "md4.h"
#include "dynamic.h"
#include "options.h"
#include "config.h"
#include "sha.h"
#include "sha2.h"
#include "gost.h"
#include "sph_haval.h"
#include "sph_ripemd.h"
#include "sph_tiger.h"
#include "sph_md2.h"
#include "sph_panama.h"
#include "sph_skein.h"
#include "sph_whirlpool.h"
#include "memory.h"
#include "unicode.h"
#include "johnswap.h"
#include "crc32.h"
#include "aligned.h"
#include "fake_salts.h"
#include "base64_convert.h"
#if (AC_BUILT && HAVE_WHIRLPOOL) || \
(!AC_BUILT && OPENSSL_VERSION_NUMBER >= 0x10000000 && !HAVE_NO_SSL_WHIRLPOOL)
#include <openssl/whrlpool.h>
#else
// on my 32 bit cygwin builds, this code is about 4x slower than the oSSL code.
#define WHIRLPOOL_CTX sph_whirlpool_context
#define WHIRLPOOL_Init(a) sph_whirlpool_init(a)
#define WHIRLPOOL_Update(a,b,c) sph_whirlpool(a,b,c)
#define WHIRLPOOL_Final(a,b) sph_whirlpool_close(b,a)
#endif
#include "KeccakHash.h"
#define KECCAK_CTX Keccak_HashInstance
#define KECCAK_Update(a,b,c) Keccak_HashUpdate(a,b,(c)*8)
#define KECCAK_Final(a,b) Keccak_HashFinal(b,a)
#define KECCAK_256_Init(hash) Keccak_HashInitialize(hash, 1088, 512, 256, 0x01)
#define KECCAK_512_Init(hash) Keccak_HashInitialize(hash, 576, 1024, 512, 0x01)
// FIPS202 complient
#define SHA3_224_Init(hash) Keccak_HashInitialize(hash, 1152, 448, 224, 0x06)
#define SHA3_256_Init(hash) Keccak_HashInitialize(hash, 1088, 512, 256, 0x06)
#define SHA3_384_Init(hash) Keccak_HashInitialize(hash, 832, 768, 384, 0x06)
#define SHA3_512_Init(hash) Keccak_HashInitialize(hash, 576, 1024, 512, 0x06)
#ifdef _OPENMP
#include <omp.h>
static unsigned int m_ompt;
#endif
#include "dynamic_types.h"
#include "memdbg.h"
#if (defined (_OPENMP)||defined(FORCE_THREAD_MD5_body)) && defined (_MSC_VER)
unsigned DES_bs_max_kpc, DES_bs_min_kpc, DES_bs_all_p;
#undef MD5_body
extern void MD5_body(MD5_word x[15],MD5_word out[4]);
#endif
#define STRINGIZE2(s) #s
#define STRINGIZE(s) STRINGIZE2(s)
static struct fmt_main fmt_Dynamic;
static struct fmt_main *pFmts;
static int nFmts;
static int nLocalFmts;
static struct fmt_main *pLocalFmts;
static int force_md5_ctx;
static void dynamic_RESET(struct fmt_main *fmt);
#define eLargeOut dyna_eLargeOut
eLargeOut_t *eLargeOut;
#define nLargeOff dyna_nLargeOff
unsigned *nLargeOff;
#if ARCH_LITTLE_ENDIAN
#define MD5_swap(x, y, count)
#define MD5_swap2(a,b,c,d,e)
#else
extern char *MD5_DumpHexStr(void *p);
static void MD5_swap(MD5_word *x, MD5_word *y, int count)
{
do {
*y++ = JOHNSWAP(*x++);
} while (--count);
}
#if MD5_X2
static void MD5_swap2(MD5_word *x, MD5_word *x2, MD5_word *y, MD5_word *y2, int count)
{
do {
*y++ = JOHNSWAP(*x++);
*y2++ = JOHNSWAP(*x2++);
} while (--count);
}
#endif
#endif
#define FORMAT_LABEL "dynamic"
#define FORMAT_NAME "Generic MD5"
#ifdef SIMD_COEF_32
# define GETPOS(i, index) ( (index&(SIMD_COEF_32-1))*4 + ((i)&(0xffffffff-3) )*SIMD_COEF_32 + ((i)&3) )
# define SHAGETPOS(i, index) ( (index&(SIMD_COEF_32-1))*4 + ((i)&(0xffffffff-3) )*SIMD_COEF_32 + (3-((i)&3)) ) //for endianity conversion
#endif
#define BENCHMARK_COMMENT ""
#define BENCHMARK_LENGTH -1
#define CIPHERTEXT_LENGTH 32
#define BINARY_SIZE 16
#define BINARY_SIZE_SHA 20
#define BINARY_ALIGN MEM_ALIGN_WORD
// Computation for 'salt_size' The salt (and salt2) is appended to the end of the hash entry.
// The format of a salted entry is: $dynamic_#$hash$SALT_VAL[$$2SALT2_VAL]
// salt 64 bytes,
// salt2 64 bytes,
// salt signature $ 1 byte
// salt2 signature $$2 3 bytes
// null termination 1 byte. This this allows 2 64 byte salt's.
// Note, we now have up to 10 of these.
#define SALT_SIZE (64*4+1+3+1)
#define SALT_ALIGN MEM_ALIGN_WORD
// slots to do 24 'tests'. Note, we copy the
// same 3 tests over and over again. Simply to validate that
// tests use 'multiple' blocks.
static struct fmt_tests dynamic_tests[] = {
{NULL},{NULL},{NULL},{NULL},{NULL},{NULL},{NULL},{NULL},
{NULL},{NULL},{NULL},{NULL},{NULL},{NULL},{NULL},{NULL},
{NULL},{NULL},{NULL},{NULL},{NULL},{NULL},{NULL},{NULL},{NULL}
};
#ifdef SIMD_COEF_32
// SSE2 works only with 54 byte keys. Thus, md5(md5($p).md5($s)) can NOT be used
// with the SSE2, since that final md5 will be over a 64 byte block of data.
static union SIMD_inpup {
ARCH_WORD_32 w[(64*SIMD_COEF_32)/sizeof(ARCH_WORD_32)];
unsigned char c[64*SIMD_COEF_32];
} *input_buf, *input_buf2;
static union SIMD_crypt {
ARCH_WORD_32 w[(BINARY_SIZE*SIMD_COEF_32)/sizeof(ARCH_WORD_32)];
unsigned char c[BINARY_SIZE*SIMD_COEF_32];
} *crypt_key, *crypt_key2;
static unsigned int (*total_len)[SIMD_COEF_32];
static unsigned int (*total_len2)[SIMD_COEF_32];
#define MMX_INP_BUF_SZ (sizeof(input_buf[0]) *BLOCK_LOOPS)
#define MMX_INP_BUF2_SZ (sizeof(input_buf2[0])*BLOCK_LOOPS)
#define MMX_TOT_LEN_SZ (sizeof(*total_len) *BLOCK_LOOPS)
#define MMX_TOT_LEN2_SZ (sizeof(*total_len2)*BLOCK_LOOPS)
#define MMX_INP_BUF_SZ (sizeof(input_buf[0]) *BLOCK_LOOPS)
#define MMX_CRYPT_KEY_SZ (sizeof(crypt_key[0]) *BLOCK_LOOPS+sizeof(crypt_key[0]))
#define MMX_CRYPT_KEY2_SZ (sizeof(crypt_key2[0])*BLOCK_LOOPS)
#endif
#define FLAT_INP_BUF_SZ (sizeof(MD5_IN)*(MAX_KEYS_PER_CRYPT_X86>>MD5_X2))
#define FLAT_TOT_LEN_SZ (sizeof(unsigned int)*(MAX_KEYS_PER_CRYPT_X86))
MD5_OUT *crypt_key_X86;
MD5_OUT *crypt_key2_X86;
MD5_IN *input_buf_X86;
MD5_IN *input_buf2_X86;
unsigned int *total_len_X86;
unsigned int *total_len2_X86;
BIG_HASH_OUT dynamic_BHO[4];
static int keys_dirty;
// We store the salt here
static unsigned char *cursalt;
// length of salt (so we don't have to call strlen() all the time.
static int saltlen;
int get_dynamic_fmt_saltlen() { return saltlen; }
// This array is for the 2nd salt in the hash. I know of no hashes with double salts,
// but test type dynamic_16 (which is 'fake') has 2 salts, and this is the data/code to
// handle double salts.
static unsigned char *cursalt2;
static int saltlen2;
static unsigned char *username;
static int usernamelen;
static unsigned char *flds[10];
static int fld_lens[10];
const char *dynamic_itoa16 = itoa16;
#if !defined (_DEBUG)
#define itoa16_w2 __Dynamic_itoa_w2
#define itoa16_w2_u __Dynamic_itoa_w2_u
#define itoa16_w2_l __Dynamic_itoa_w2_l
#endif
unsigned short itoa16_w2_u[256], itoa16_w2_l[256];
unsigned short *itoa16_w2=itoa16_w2_l;
// array of the keys. Also lengths of the keys. NOTE if store_keys_in_input, then the
// key array will NOT be used (but the length array still is).
#ifndef MAX_KEYS_PER_CRYPT
#define MAX_KEYS_PER_CRYPT MAX_KEYS_PER_CRYPT_X86
#endif
#ifndef PLAINTEXT_LENGTH
#define PLAINTEXT_LENGTH PLAINTEXT_LENGTH_X86
#endif
#define EFFECTIVE_MKPC (MAX_KEYS_PER_CRYPT > MAX_KEYS_PER_CRYPT_X86 ? MAX_KEYS_PER_CRYPT : MAX_KEYS_PER_CRYPT_X86)
#define EFFECTIVE_MAX_LENGTH (PLAINTEXT_LENGTH > PLAINTEXT_LENGTH_X86 ? PLAINTEXT_LENGTH : PLAINTEXT_LENGTH_X86)
// Used to compute length of each string to clean. This is needed, since we have to clean a little more than
// just the length, IF we are cleaning strings that are in different endianity than native for the CPU.
// This is seen on SHA224 (etc) on Intel, or MD5 of BE systems. We still try to clean 'only' as much as
// we need to, but that is usually MORE than what the length of the stored string is. 8 gives us 7 byte spill
// over, plus 1 byte for the 0x80
#define COMPUTE_EX_LEN(a) ( (a) > (sizeof(input_buf_X86[0].x1.b)-8) ) ? sizeof(input_buf_X86[0].x1.b) : ((a)+8)
// this new 'ENCODED_EFFECTIVE_MAX_LENGTH' needed, since we grab up to 125 bytes of data WHEN in -encode:utf8 mode for a unicode format.
#define ENCODED_EFFECTIVE_MAX_LENGTH (EFFECTIVE_MAX_LENGTH > 125 ? EFFECTIVE_MAX_LENGTH : 125)
static char saved_key[EFFECTIVE_MKPC][ENCODED_EFFECTIVE_MAX_LENGTH + 1];
static int saved_key_len[EFFECTIVE_MKPC];
// this is the max generic location we should target. This keeps us from having blown MD buffers or overwrite
// when in utf8->utf16 mode, where we are handling data that likely is larger than we should handle. We have to
// handle this larger data, so that we get as many strings with 1 byte utf8 that would convert to data that would
// blow our buffers. But we want as many as possible for the 2 and 3 byte utf data.
#define MAX_BUFFER_OFFSET_AVOIDING_OVERWRITE (256-17)
// Used in 'get_key' if we are running in store_keys_in_input mode
static char out[ENCODED_EFFECTIVE_MAX_LENGTH + 1];
// This is the GLOBAL count of keys. ALL of the primitives which deal with a count
// will read from this variable.
#if !defined (_DEBUG)
#define m_count m_Dynamic_Count
#endif
unsigned int m_count;
// If we are run in 'specific' mode (say, -format=dynamic -subformat=dynamic_0, then we
// want to 'allow' bare hashes to be 'valid'. This is how we will do this. We have a boolean
// that if set to true, we will perform a 1 time check within the valid function. If at
// that time we find out that we are cracking (or showing, etc) that we will accept lines
// that are either format of $dynamic_0$hhhhhh...32 or simply in the format of hhhhhhh..32
int dynamic_allow_rawhash_fixup = 0;
// this one IS in the private_dat, but since it is accessed SO much, we pull it
// out prior to 'internal' processing. The others are accessed right from
// the structure, since there are accessed infrequently enough to not matter.
static int dynamic_use_sse;
// If set to 1, then do unicode conversion is many string setting functions.
static int *md5_unicode_convert;
#if !defined (_DEBUG)
#define curdat Dynamic_curdat
#endif
private_subformat_data curdat;
// Helper function that loads out 256 unsigned short array that does base-16 conversions
// This function is called at the 'validation' call that loads our preloads (i.e. only
// called one time, pre 'run' (but will be called multiple times when benchmarking, but
// will NOT impact benchmark times.) Loading a word at a time (2 bytes), sped up
// the overall run time of dynamic_2 almost 5%, thus this conversion is MUCH faster than
// the fastest byte by byte I could put together. I tested several ways to access this
// array of unsigned shorts, and the best way was a 2 step method into an array of long
// integer pointers (thus, load 1/2 the 32 bit word, then the other 1/2, into a 32 bit word).
/*********************************************************************************
*********************************************************************************
* Start of the 'normal' *_fmt code for md5-gen
*********************************************************************************
*********************************************************************************/
char *RemoveHEX(char *output, char *input)
{
char *cpi = input;
char *cpo = output;
char *cpH = strstr(input, "$HEX$");
if (!cpH) {
// should never get here, we have a check performed before this function is called.
strcpy(output, input);
return output;
}
while (cpi < cpH)
*cpo++ = *cpi++;
*cpo++ = *cpi;
cpi += 5;
while (*cpi) {
if (*cpi == '0' && cpi[1] == '0') {
strcpy(output, input);
return output;
}
if (atoi16[ARCH_INDEX(*cpi)] != 0x7f && atoi16[ARCH_INDEX(cpi[1])] != 0x7f) {
*cpo++ = atoi16[ARCH_INDEX(*cpi)]*16 + atoi16[ARCH_INDEX(cpi[1])];
cpi += 2;
} else if (*cpi == '$') {
while (*cpi && strncmp(cpi, "$HEX$", 5)) {
*cpo++ = *cpi++;
}
if (!strncmp(cpi, "$HEX$", 5)) {
*cpo++ = *cpi;
cpi += 5;
}
} else {
strcpy(output, input);
return output;
}
}
*cpo = 0;
return output;
}
/*********************************************************************************
* Detects a 'valid' md5-gen format. This function is NOT locked to anything. It
* takes its detection logic from the provided fmt_main pointer. Within there,
* is a 'private' data pointer. When john first loads the md5-gen, it calls a
* function which builds proper 'private' data for EACH type of md5-gen. Then
* john will call valid on EACH of those formats, asking each one if a string is
* valid. Each format has a 'private' properly setup data object.
*********************************************************************************/
static int valid(char *ciphertext, struct fmt_main *pFmt)
{
unsigned int i, cipherTextLen;
char *cp, fixed_ciphertext[1024];
private_subformat_data *pPriv = pFmt->private.data;
if (!pPriv)
return 0;
if (strncmp(ciphertext, pPriv->dynamic_WHICH_TYPE_SIG, strlen(pPriv->dynamic_WHICH_TYPE_SIG)))
return 0;
// this is now simply REMOVED totally, if we detect it. Doing this solves MANY other problems
// of leaving it in there. The ONLY problem we still have is NULL bytes.
if (strstr(ciphertext, "$HEX$")) {
if (strlen(ciphertext) < sizeof(fixed_ciphertext))
ciphertext = RemoveHEX(fixed_ciphertext, ciphertext);
}
cp = &ciphertext[strlen(pPriv->dynamic_WHICH_TYPE_SIG)];
if (pPriv->dynamic_base64_inout == 1 || pPriv->dynamic_base64_inout == 3 || pPriv->dynamic_base64_inout == 5)
{
// jgypwqm.JsMssPLiS8YQ00$BaaaaaSX
unsigned int len;
len = base64_valid_length(cp, pPriv->dynamic_base64_inout==3?e_b64_mime:e_b64_crypt, flg_Base64_MIME_TRAIL_EQ_CNT, 0);
if (len < 20 || len > pPriv->dynamic_SALT_OFFSET+4) return 0;
if (pPriv->dynamic_FIXED_SALT_SIZE == 0)
return !cp[len];
if (pPriv->dynamic_FIXED_SALT_SIZE && cp[len] != '$')
return 0;
if (pPriv->dynamic_FIXED_SALT_SIZE > 0 && strlen(&cp[len+1]) != pPriv->dynamic_FIXED_SALT_SIZE)
return 0;
else if (pPriv->dynamic_FIXED_SALT_SIZE < -1 && strlen(&cp[len+1]) > -(pPriv->dynamic_FIXED_SALT_SIZE))
return 0;
return 1;
}
if (pPriv->dynamic_base64_inout == 2)
{
// h3mJrcH0901pqX/m$alex
unsigned int i;
for (i = 0; i < 16; ++i) {
if (atoi64[ARCH_INDEX(cp[i])] == 0x7F)
return 0;
}
if (pPriv->dynamic_FIXED_SALT_SIZE == 0)
return !cp[i];
if (pPriv->dynamic_FIXED_SALT_SIZE && cp[16] != '$')
return 0;
if (pPriv->dynamic_FIXED_SALT_SIZE > 0 && strlen(&cp[17]) != pPriv->dynamic_FIXED_SALT_SIZE)
return 0;
else if (pPriv->dynamic_FIXED_SALT_SIZE < -1 && strlen(&cp[17]) > -(pPriv->dynamic_FIXED_SALT_SIZE))
return 0;
if (strlen(cp) < 16)
return 0;
return 1;
}
if (strlen(cp) < 32)
return 0;
cipherTextLen = CIPHERTEXT_LENGTH;
if (pPriv->dynamic_40_byte_input) {
cipherTextLen = 40;
} else if (pPriv->dynamic_48_byte_input) {
cipherTextLen = 48;
} else if (pPriv->dynamic_64_byte_input) {
cipherTextLen = 64;
} else if (pPriv->dynamic_56_byte_input) {
cipherTextLen = 56;
} else if (pPriv->dynamic_80_byte_input) {
cipherTextLen = 80;
} else if (pPriv->dynamic_96_byte_input) {
cipherTextLen = 96;
} else if (pPriv->dynamic_128_byte_input) {
cipherTextLen = 128;
}
for (i = 0; i < cipherTextLen; i++) {
if (atoi16[ARCH_INDEX(cp[i])] == 0x7f)
return 0;
}
if ((pPriv->pSetup->flags&MGF_SALTED) == 0) {
if (!cp[cipherTextLen])
return 1;
return 0;
}
if (cp[cipherTextLen] && cp[cipherTextLen] != '$')
return 0;
// NOTE if looking at this in the future, this was not my fix.
if (strlen(&cp[cipherTextLen]) > SALT_SIZE)
return 0;
// end NOTE.
if (pPriv->dynamic_FIXED_SALT_SIZE > 0 && ciphertext[pPriv->dynamic_SALT_OFFSET-1] != '$')
return 0;
if (pPriv->dynamic_FIXED_SALT_SIZE > 0 && strlen(&ciphertext[pPriv->dynamic_SALT_OFFSET]) != pPriv->dynamic_FIXED_SALT_SIZE) {
// first check to see if this salt has left the $HEX$ in the string (i.e. embedded nulls). If so, then
// validate length with this in mind.
if (!memcmp(&ciphertext[pPriv->dynamic_SALT_OFFSET], "HEX$", 4)) {
int len = strlen(&ciphertext[pPriv->dynamic_SALT_OFFSET]);
len = (len-4)>>1;
if (len != pPriv->dynamic_FIXED_SALT_SIZE)
return 0;
} else {
// check if there is a 'salt-2' or 'username', etc If that is the case, then this is still valid.
if (strncmp(&ciphertext[pPriv->dynamic_SALT_OFFSET+pPriv->dynamic_FIXED_SALT_SIZE], "$$", 2))
return 0;
}
}
else if (!regen_salts_options && pPriv->dynamic_FIXED_SALT_SIZE < -1 && strlen(&ciphertext[pPriv->dynamic_SALT_OFFSET]) > -(pPriv->dynamic_FIXED_SALT_SIZE)) {
char *cpX;
// first check to see if this salt has left the $HEX$ in the string (i.e. embedded nulls). If so, then
// validate length with this in mind.
if (!memcmp(&ciphertext[pPriv->dynamic_SALT_OFFSET], "HEX$", 4)) {
int len = strlen(&ciphertext[pPriv->dynamic_SALT_OFFSET]);
len = (len-4)>>1;
if (len > -(pPriv->dynamic_FIXED_SALT_SIZE))
return 0;
} else {
// check if there is a 'salt-2' or 'username', etc If that is the case, then this is still 'valid'
cpX = mem_alloc(-(pPriv->dynamic_FIXED_SALT_SIZE) + 3);
strnzcpy(cpX, &ciphertext[pPriv->dynamic_SALT_OFFSET], -(pPriv->dynamic_FIXED_SALT_SIZE) + 3);
if (!strstr(cpX, "$$")) {
MEM_FREE(cpX);
return 0;
}
MEM_FREE(cpX);
}
}
if (pPriv->b2Salts==1 && !strstr(&ciphertext[pPriv->dynamic_SALT_OFFSET-1], "$$2"))
return 0;
if (pPriv->nUserName && !strstr(&ciphertext[pPriv->dynamic_SALT_OFFSET-1], "$$U"))
return 0;
if (pPriv->FldMask) {
for (i = 0; i < 10; ++i) {
if ((pPriv->FldMask & (MGF_FLDx_BIT<<i)) == (MGF_FLDx_BIT<<i)) {
char Fld[5];
sprintf(Fld, "$$F%d", i);
if (!strstr(&ciphertext[pPriv->dynamic_SALT_OFFSET-1], Fld))
return 0;
}
}
}
return 1;
}
static char *FixupIfNeeded(char *ciphertext, private_subformat_data *pPriv);
static struct fmt_main *dynamic_Get_fmt_main(int which);
static char *HandleCase(char *cp, int caseType);
// 'wrapper' functions. These are here, so we can call these functions to work on ALL data (not simply within the
// thead, which ONLY wants to work on a subset of the data. These functions should NOT be called by threading
// code, EVER. But this functions KNOW what to do. Some actually have threads, others do not need them.
#ifdef _OPENMP
#ifndef SIMD_COEF_32
const unsigned int OMP_INC = (MD5_X2+1);
const unsigned int OMP_MD5_INC = (MD5_X2+1);
const unsigned int OMP_MD4_INC = (MD5_X2+1);
const unsigned int OMP_SHA1_INC = (MD5_X2+1);
#else
const unsigned int OMP_INC = (MD5_X2+1);
const unsigned int OMP_MD5_INC = (SIMD_PARA_MD5*SIMD_COEF_32);
const unsigned int OMP_MD4_INC = (SIMD_PARA_MD4*SIMD_COEF_32);
const unsigned int OMP_SHA1_INC = (SIMD_PARA_SHA1*SIMD_COEF_32);
#endif // SIMD_COEF_32
#endif // _OPENMP
static inline void __nonMP_DynamicFunc__SSEtoX86_switch_output2()
{
#ifdef _OPENMP
DynamicFunc__SSEtoX86_switch_output2(0,m_count,0);
#else
DynamicFunc__SSEtoX86_switch_output2();
#endif
}
static inline void __nonMP_DynamicFunc__append_from_last_output2_to_input1_as_base16()
{
#ifdef _OPENMP
DynamicFunc__append_from_last_output2_to_input1_as_base16(0,m_count,0);
#else
DynamicFunc__append_from_last_output2_to_input1_as_base16();
#endif
}
void __nonMP_eLargeOut(eLargeOut_t what)
{
#ifdef _OPENMP
unsigned int i;
for (i = 1; i < m_ompt; ++i)
eLargeOut[i] = what;
#endif
eLargeOut[0] = what;
}
void __nonMP_nLargeOff(unsigned val)
{
#ifdef _OPENMP
unsigned int i;
for (i = 1; i < m_ompt; ++i)
nLargeOff[i] = val;
#endif
nLargeOff[0] = val;
}
static inline void md5_unicode_convert_set(int what, int tid)
{
md5_unicode_convert[tid] = what;
}
static inline int md5_unicode_convert_get(int tid)
{
return md5_unicode_convert[tid];
}
void __nonMP_md5_unicode_convert(int what)
{
#ifdef _OPENMP
unsigned int i;
for (i = 1; i < m_ompt; ++i)
md5_unicode_convert[i] = what;
#endif
md5_unicode_convert[0] = what;
}
#if !defined (_OPENMP)
#define md5_unicode_convert_set(what, tid) md5_unicode_convert_set(what, 0)
#define md5_unicode_convert_get(tid) md5_unicode_convert_get(0)
#define eLargeOut_set(what, tid) eLargeOut_set(what, 0)
#define eLargeOut_get(tid) eLargeOut_get(0)
#define nLargeOff_set(val, tid) nLargeOff_set(val, 0)
#define nLargeOff_get(tid) nLargeOff_get(0)
#endif
static inline void __nonMP_DynamicFunc__append_keys2()
{
#ifdef _OPENMP
DynamicFunc__append_keys2(0,m_count,0);
#else
DynamicFunc__append_keys2();
#endif
}
static void __possMP_DynamicFunc__crypt2_md5()
{
#ifdef _OPENMP
int i;
unsigned int inc = OMP_MD5_INC;
// if (dynamic_use_sse!=1)
// inc = OMP_INC;
#pragma omp parallel for
for (i = 0; i < m_count; i += inc)
DynamicFunc__crypt2_md5(i,i+inc,omp_get_thread_num());
#else
DynamicFunc__crypt2_md5();
#endif
}
static void __nonMP_DynamicFunc__clean_input()
{
unsigned int i=0;
#ifdef SIMD_COEF_32
if (dynamic_use_sse==1) {
memset(input_buf, 0, MMX_INP_BUF_SZ);
memset(total_len, 0, MMX_TOT_LEN_SZ);
return;
}
#endif
for (; i < MAX_KEYS_PER_CRYPT_X86; ++i) {
//if (total_len_X86[i]) {
#if MD5_X2
if (i&1)
memset(input_buf_X86[i>>MD5_X2].x2.b2, 0, COMPUTE_EX_LEN(total_len_X86[i]));
else
#endif
memset(input_buf_X86[i>>MD5_X2].x1.b, 0, COMPUTE_EX_LEN(total_len_X86[i]));
total_len_X86[i] = 0;
//}
}
return;
}
static void __nonMP_DynamicFunc__clean_input2()
{
unsigned int i=0;
#ifdef SIMD_COEF_32
if (dynamic_use_sse==1) {
memset(input_buf2, 0, MMX_INP_BUF2_SZ);
memset(total_len2, 0, MMX_TOT_LEN2_SZ);
return;
}
#endif
if (curdat.using_flat_buffers_sse2_ok) {
memset(total_len2_X86, 0, sizeof(total_len2_X86[0])*MAX_KEYS_PER_CRYPT_X86);
return;
}
for (; i < MAX_KEYS_PER_CRYPT_X86; ++i) {
//if (total_len2_X86[i]) {
#if MD5_X2
if (i&1)
memset(input_buf2_X86[i>>MD5_X2].x2.b2, 0, COMPUTE_EX_LEN(total_len2_X86[i]));
else
#endif
memset(input_buf2_X86[i>>MD5_X2].x1.b, 0, COMPUTE_EX_LEN(total_len2_X86[i]));
total_len2_X86[i] = 0;
//}
}
return;
}
static void __nonMP_DynamicFunc__clean_input_full()
{
#ifdef SIMD_COEF_32
memset(input_buf, 0, MMX_INP_BUF_SZ);
memset(total_len, 0, MMX_TOT_LEN_SZ);
#endif
memset(input_buf_X86, 0, FLAT_INP_BUF_SZ);
memset(total_len_X86, 0, FLAT_TOT_LEN_SZ);
}
static void __nonMP_DynamicFunc__clean_input2_full()
{
#ifdef SIMD_COEF_32
memset(input_buf2, 0, MMX_INP_BUF2_SZ);
memset(total_len2, 0, MMX_TOT_LEN2_SZ);
#endif
memset(input_buf2_X86, 0, FLAT_INP_BUF_SZ);
memset(total_len2_X86, 0, FLAT_TOT_LEN_SZ);
}
static void __nonMP_DynamicFunc__clean_input_kwik()
{
#ifdef SIMD_COEF_32
if (dynamic_use_sse==1) {
memset(total_len, 0, MMX_TOT_LEN_SZ);
return;
}
#endif
memset(total_len_X86, 0, FLAT_TOT_LEN_SZ);
#if !ARCH_LITTLE_ENDIAN
memset(input_buf_X86, 0, FLAT_INP_BUF_SZ);
#endif
}
#ifndef _OPENMP
static void __nonMP_DynamicFunc__clean_input2_kwik()
{
#ifdef SIMD_COEF_32
if (dynamic_use_sse==1) {
memset(total_len2, 0, MMX_TOT_LEN2_SZ);
return;
}
#endif
memset(total_len2_X86, 0, FLAT_TOT_LEN_SZ);
#if !ARCH_LITTLE_ENDIAN
memset(input_buf2_X86, 0, FLAT_INP_BUF_SZ);
#endif
}
#endif
/*********************************************************************************
* init() here does nothing. NOTE many formats LINKING into us will have a valid
* that DOES do something, but ours does nothing.
*********************************************************************************/
static void init(struct fmt_main *pFmt)
{
private_subformat_data *pPriv = pFmt->private.data;
unsigned int i;
//fprintf(stderr, "init(%s)\n", pPriv->dynamic_WHICH_TYPE_SIG);
/* first off, SAVE the original format structure (owned by JtR). We may need this later */
pPriv->pFmtMain = pFmt;
#ifdef _OPENMP
m_ompt = omp_get_max_threads();
if (!md5_unicode_convert) {
md5_unicode_convert = (int*)mem_calloc(m_ompt, sizeof(int));
eLargeOut = (eLargeOut_t*)mem_calloc(m_ompt, sizeof(eLargeOut_t));
nLargeOff = (unsigned*)mem_calloc(m_ompt, sizeof(unsigned));
for (i = 0; i < m_ompt; ++i) {
eLargeOut[i] = eBase16;
nLargeOff[i] = 0;
}
}
#else
if (!md5_unicode_convert) {
md5_unicode_convert = (int*)mem_calloc(1, sizeof(int));
eLargeOut = (eLargeOut_t*)mem_calloc(1, sizeof(eLargeOut_t));
eLargeOut[0] = eBase16;
nLargeOff = (unsigned*)mem_calloc(1, sizeof(unsigned));
nLargeOff[0] = 0;
}
#endif
#ifdef SIMD_COEF_32
if (!input_buf) {
input_buf = mem_calloc_align(1, MMX_INP_BUF_SZ, MEM_ALIGN_SIMD);
total_len = mem_calloc_align(1, MMX_TOT_LEN_SZ, MEM_ALIGN_SIMD);
total_len2 = mem_calloc_align(1, MMX_TOT_LEN2_SZ, MEM_ALIGN_SIMD);
input_buf2 = mem_calloc_align(1, MMX_INP_BUF2_SZ, MEM_ALIGN_SIMD);
crypt_key = mem_calloc_align(1, MMX_CRYPT_KEY_SZ, MEM_ALIGN_SIMD);
crypt_key2 = mem_calloc_align(1, MMX_CRYPT_KEY2_SZ, MEM_ALIGN_SIMD);
}
#endif
if (!crypt_key_X86) {
crypt_key_X86 = (MD5_OUT *)mem_calloc(((MAX_KEYS_PER_CRYPT_X86>>MD5_X2)+1), sizeof(*crypt_key_X86));
crypt_key2_X86 = (MD5_OUT *)mem_calloc(((MAX_KEYS_PER_CRYPT_X86>>MD5_X2)+1), sizeof(*crypt_key2_X86));
input_buf_X86 = (MD5_IN *)mem_calloc(((MAX_KEYS_PER_CRYPT_X86>>MD5_X2)+1), sizeof(*input_buf_X86));
input_buf2_X86 = (MD5_IN *)mem_calloc(((MAX_KEYS_PER_CRYPT_X86>>MD5_X2)+1), sizeof(*input_buf2_X86));
total_len_X86 = (unsigned int *)mem_calloc((MAX_KEYS_PER_CRYPT_X86+1), sizeof(*total_len_X86));
total_len2_X86 = (unsigned int *)mem_calloc((MAX_KEYS_PER_CRYPT_X86+1), sizeof(*total_len2_X86));
}
for (i = 0; i < 4; ++i)
dynamic_BHO[i].dat = mem_calloc_align(BLOCK_LOOPS, sizeof(*(dynamic_BHO[0].dat)), MEM_ALIGN_SIMD);
gost_init_table();
if (!pPriv || (pPriv->init == 1 && !strcmp(curdat.dynamic_WHICH_TYPE_SIG, pPriv->dynamic_WHICH_TYPE_SIG)))
return;
__nonMP_DynamicFunc__clean_input_full();
__nonMP_DynamicFunc__clean_input2_full();
// Some builds (omp vs non omp, etc) do not call these functions, so to avoid 'unused' warnings, we simply
// call them here.
__nonMP_DynamicFunc__clean_input_kwik();
dynamic_RESET(pFmt);
if (!pPriv)
return;
pPriv->init = 1;
memcpy(&curdat, pPriv, sizeof(private_subformat_data));
dynamic_use_sse = curdat.dynamic_use_sse;
force_md5_ctx = curdat.force_md5_ctx;
fmt_Dynamic.params.max_keys_per_crypt = pFmt->params.max_keys_per_crypt;
fmt_Dynamic.params.min_keys_per_crypt = pFmt->params.max_keys_per_crypt;
if (pFmt->params.min_keys_per_crypt > 64)
pFmt->params.min_keys_per_crypt = 64;
fmt_Dynamic.params.flags = pFmt->params.flags;
fmt_Dynamic.params.format_name = pFmt->params.format_name;
fmt_Dynamic.params.algorithm_name = pFmt->params.algorithm_name;
fmt_Dynamic.params.benchmark_comment = pFmt->params.benchmark_comment;
fmt_Dynamic.params.benchmark_length = pFmt->params.benchmark_length;
// we allow for 3 bytes of utf8 data to make up the number of plaintext_length unicode chars.
if ( (pFmt->params.flags&FMT_UNICODE) && options.target_enc == UTF_8 ) {
//printf ("Here pFmt->params.plaintext_length=%d pPriv->pSetup->MaxInputLen=%d\n", pFmt->params.plaintext_length, pPriv->pSetup->MaxInputLen);
pFmt->params.plaintext_length = MIN(125, pFmt->params.plaintext_length * 3);
}
else
fmt_Dynamic.params.plaintext_length = pFmt->params.plaintext_length;
fmt_Dynamic.params.salt_size = pFmt->params.salt_size;
fmt_Dynamic.params.flags = pFmt->params.flags;
fmt_Dynamic.methods.cmp_all = pFmt->methods.cmp_all;
fmt_Dynamic.methods.cmp_one = pFmt->methods.cmp_one;
fmt_Dynamic.methods.cmp_exact = pFmt->methods.cmp_exact;
fmt_Dynamic.methods.set_salt = pFmt->methods.set_salt;
fmt_Dynamic.methods.salt = pFmt->methods.salt;
fmt_Dynamic.methods.salt_hash = pFmt->methods.salt_hash;
fmt_Dynamic.methods.split = pFmt->methods.split;
fmt_Dynamic.methods.set_key = pFmt->methods.set_key;
fmt_Dynamic.methods.get_key = pFmt->methods.get_key;
fmt_Dynamic.methods.clear_keys = pFmt->methods.clear_keys;
fmt_Dynamic.methods.crypt_all = pFmt->methods.crypt_all;
for (i = 0; i < PASSWORD_HASH_SIZES; ++i)
{
fmt_Dynamic.methods.binary_hash[i] = pFmt->methods.binary_hash[i];
fmt_Dynamic.methods.get_hash[i] = pFmt->methods.get_hash[i];
}
#if !MD5_IMM
{
extern void MD5_std_init(struct fmt_main *pFmt);
MD5_std_init(pFmt);
}
#endif
if (curdat.input2_set_len32) {
for (i = 0; i < MAX_KEYS_PER_CRYPT_X86; ++i)
total_len2_X86[i] = 32;
#ifdef SIMD_COEF_32
for (i = 0; i < BLOCK_LOOPS; ++i) {
unsigned int j;
for (j = 0; j < SIMD_COEF_32; j++) {
input_buf2[i].c[GETPOS(32, j)] = 0x80;
input_buf2[i].c[GETPOS(57, j)] = 0x1;
total_len2[i][j] = 0x20;
}
}
#endif
}
}
static void done(void)
{
int i;
MEM_FREE(total_len2_X86);
MEM_FREE(total_len_X86);
MEM_FREE(input_buf2_X86);
MEM_FREE(input_buf_X86);
MEM_FREE(crypt_key2_X86);
MEM_FREE(crypt_key_X86);
#ifdef SIMD_COEF_32
MEM_FREE(crypt_key2);
MEM_FREE(crypt_key);
MEM_FREE(input_buf2);
MEM_FREE(total_len2);
MEM_FREE(total_len);
MEM_FREE(input_buf);
#endif
MEM_FREE(nLargeOff);
MEM_FREE(eLargeOut);
MEM_FREE(md5_unicode_convert);
for (i = 0; i < 4; ++i)
MEM_FREE(dynamic_BHO[i].dat);
}
/*********************************************************************************
* This function will add a $dynamic_#$ IF there is not one, and if we have a specific
* format requested. Also, it will add things like UserID, Domain, Fld3, Fld4,
* Fld5, etc.
*********************************************************************************/
static char *prepare(char *split_fields[10], struct fmt_main *pFmt)
{
private_subformat_data *pPriv = pFmt->private.data;
char Tmp[80];
int i;
int trim_u=0;
char *cpBuilding=split_fields[1];
if (!pPriv)
return split_fields[1];
// ANY field[1] longer than 490 will simply be ignored, and returned 'as is'.
// the rest of this function makes this assumption.
if (!cpBuilding || strlen(cpBuilding) > 490)
return cpBuilding;
// mime. We want to strip off ALL trailing '=' characters to 'normalize' them
if (pPriv->dynamic_base64_inout == 3 && !strncmp(cpBuilding, "$dynamic_", 9))
{
static char ct[496];
int len;
char *cp = strchr(&cpBuilding[9], '$'), *cp2;
if (!cp) return cpBuilding;
++cp;
len = base64_valid_length(cp, e_b64_mime, flg_Base64_MIME_TRAIL_EQ_CNT, 0);
if (len && cp[len-1] == '=') {
strnzcpy(ct, cpBuilding, cp-cpBuilding+len+1);
cp2 = &ct[strlen(ct)-1];
while (*cp2 == '=')
*cp2-- = 0;
if (cp[len])
strcat(cp2, &cp[len]);
cpBuilding = ct;
}
}
if (pFmt->params.salt_size && !strchr(split_fields[1], '$')) {
if (!pPriv->nUserName && !pPriv->FldMask && options.regen_lost_salts == 0)
return split_fields[1];
}
// handle 'older' md5_gen(x) signature, by simply converting to $dynamic_x$ signature
// Thus older md5_gen() is a valid input (or from john.pot), but ONLY the newer
// $dynamic_x$ will be written out (into .pot, output lines, etc).
if (!strncmp(cpBuilding, "md5_gen(", 8))
{
static char ct[496];
char *cp = &cpBuilding[8], *cpo = &ct[sprintf(ct, "$dynamic_")];
while (*cp >= '0' && *cp <= '9')
*cpo++ = *cp++;
*cpo++ = '$';
++cp;
strcpy(cpo, cp);
cpBuilding = ct;
}
// At this point, max length of cpBuilding is 491 (if it was a md5_gen signature)
// allow a raw hash, if there is a $u but no salt
if (pPriv->nUserName && strlen(split_fields[0]) && !strchr(cpBuilding, '$') && strcmp(split_fields[0], "?")) {
static char ct[496];
strcpy(ct, cpBuilding);
strcat(ct, "$$U");
cpBuilding = ct;
trim_u=1;
}
cpBuilding = FixupIfNeeded(cpBuilding, pPriv);
if (trim_u)
cpBuilding[strlen(cpBuilding)-3] = 0;
// at this point max length is still < 512. 491 + strlen($dynamic_xxxxx$) is 506
if (strncmp(cpBuilding, "$dynamic_", 9)) {
// ok, here we add the 'generic' regen salt code
if (options.regen_lost_salts && !strchr(cpBuilding, '$')) {
char *cp = load_regen_lost_salt_Prepare(cpBuilding);
if (cp)
return cp;
}
return split_fields[1];
}
if ( (pPriv->pSetup->flags&MGF_SALTED) == 0)
return cpBuilding;
/* at this point, we want to convert ANY and all $HEX$hex into values */
/* the reason we want to do this, is so that things read from john.pot file will be in proper 'native' format */
/* the ONE exception to this, is if there is a NULL byte in the $HEX$ string, then we MUST leave that $HEX$ string */
/* alone, and let the later calls in dynamic.c handle them. */
if (strstr(cpBuilding, "$HEX$")) {
char *cp, *cpo;
int bGood=1;
static char ct[512];
strcpy(ct, cpBuilding);
cp = strstr(ct, "$HEX$");
cpo = cp;
*cpo++ = *cp;
cp += 5;
while (*cp && bGood) {
if (*cp == '0' && cp[1] == '0') {
bGood = 0;
break;
}
if (atoi16[ARCH_INDEX(*cp)] != 0x7f && atoi16[ARCH_INDEX(cp[1])] != 0x7f) {
*cpo++ = atoi16[ARCH_INDEX(*cp)]*16 + atoi16[ARCH_INDEX(cp[1])];
*cpo = 0;
cp += 2;
} else if (*cp == '$') {
while (*cp && strncmp(cp, "$HEX$", 5)) {
*cpo++ = *cp++;
}
*cpo = 0;
if (!strncmp(cp, "$HEX$", 5)) {
*cpo++ = *cp;
cp += 5;
}
} else {
return split_fields[1];
}
}
if (bGood)
cpBuilding = ct;
// if we came into $HEX$ removal, then cpBuilding will always be shorter
}
// at this point max length is still < 512. 491 + strlen($dynamic_xxxxx$) is 506
if (pPriv->nUserName && !strstr(cpBuilding, "$$U")) {
if (split_fields[0] && strlen(split_fields[0]) && strcmp(split_fields[0], "?")) {
char *userName=split_fields[0], *cp;
static char ct[1024];
// assume field[0] is in format: username OR DOMAIN\\username If we find a \\, then use the username 'following' it.
cp = strchr(split_fields[0], '\\');
if (cp)
userName = &cp[1];
userName = HandleCase(userName, pPriv->nUserName);
snprintf (ct, sizeof(ct), "%s$$U%s", cpBuilding, userName);
cpBuilding = ct;
}
}
if (pPriv->FldMask) {
for (i = 0; i < 10; ++i) {
if (pPriv->FldMask&(MGF_FLDx_BIT<<i)) {
sprintf(Tmp, "$$F%d", i);
if (split_fields[i] && strlen(split_fields[i]) && strcmp(split_fields[i], "/") && !strstr(cpBuilding, Tmp)) {
static char ct[1024];
char ct2[1024];
snprintf (ct2, sizeof(ct2), "%s$$F%d%s", cpBuilding, i, split_fields[i]);
strcpy(ct, ct2);
cpBuilding = ct;
}
}
}
}
return cpBuilding;
}
static char *split(char *ciphertext, int index, struct fmt_main *pFmt)
{
static char out[1024];
private_subformat_data *pPriv = pFmt->private.data;
if (strlen(ciphertext) > 950)
return ciphertext;
// mime. We want to strip off ALL trailing '=' characters to 'normalize' them
if (pPriv->dynamic_base64_inout == 3 && !strncmp(ciphertext, "$dynamic_", 9))
{
static char ct[496];
unsigned int len;
char *cp = strchr(&ciphertext[9], '$'), *cp2;
if (cp) {
++cp;
len = base64_valid_length(cp, e_b64_mime, flg_Base64_MIME_TRAIL_EQ_CNT, 0);
if (len && cp[len-1] == '=') {
strnzcpy(ct, ciphertext, cp-ciphertext+len+1);
cp2 = &ct[strlen(ct)-1];
while (*cp2 == '=')
*cp2-- = 0;
if (cp[len])
strcat(cp2, &cp[len]);
ciphertext = ct;
}
}
}
if (!strncmp(ciphertext, "$dynamic", 8)) {
if (strstr(ciphertext, "$HEX$"))
return RemoveHEX(out, ciphertext);
return ciphertext;
}
if (!strncmp(ciphertext, "md5_gen(", 8)) {
ciphertext += 8;
do ++ciphertext; while (*ciphertext != ')') ;
++ciphertext;
}
if (strstr(ciphertext, "$HEX$")) {
char *cp = out + sprintf(out, "%s", pPriv->dynamic_WHICH_TYPE_SIG);
RemoveHEX(cp, ciphertext);
} else
snprintf(out, sizeof(out), "%s%s", pPriv->dynamic_WHICH_TYPE_SIG, ciphertext);
return out;
}
// This split unifies case.
static char *split_UC(char *ciphertext, int index, struct fmt_main *pFmt)
{
static char out[1024];
private_subformat_data *pPriv = pFmt->private.data;
if (!strncmp(ciphertext, "$dynamic", 8)) {
if (strstr(ciphertext, "$HEX$"))
RemoveHEX(out, ciphertext);
else
strcpy(out, ciphertext);
} else {
if (!strncmp(ciphertext, "md5_gen(", 8)) {
ciphertext += 8;
do ++ciphertext; while (*ciphertext != ')') ;
++ciphertext;
}
if (strstr(ciphertext, "$HEX$")) {
char *cp = out + sprintf(out, "%s", pPriv->dynamic_WHICH_TYPE_SIG);
RemoveHEX(cp, ciphertext);
} else
sprintf(out, "%s%s", pPriv->dynamic_WHICH_TYPE_SIG, ciphertext);
}
ciphertext = strchr(&out[8], '$')+1;
while (*ciphertext && *ciphertext != '$') {
if (*ciphertext >= 'A' && *ciphertext <= 'Z')
*ciphertext += 0x20; // ASCII specific, but I really do not care.
++ciphertext;
}
// printf("%s\n", out);
return out;
}
/*********************************************************************************
* Stores the new salt provided into our 'working' salt
*********************************************************************************/
static void set_salt(void *salt)
{
unsigned char *cpsalt;
unsigned int todo_bits=0, i, bit;
if (!salt || curdat.dynamic_FIXED_SALT_SIZE == 0) {
saltlen = 0;
return;
}
cpsalt = *((unsigned char**)salt);
saltlen = *cpsalt++ - '0';
saltlen <<= 3;
saltlen += *cpsalt++ - '0';
#if ARCH_ALLOWS_UNALIGNED
if (*((ARCH_WORD_32*)cpsalt) != 0x30303030)
#else
if (memcmp(cpsalt, "0000", 4))
#endif
{
// this is why we used base-8. Takes an extra byte, but there is NO conditional
// logic, building this number, and no multiplication. We HAVE added one conditional
// check, to see if we can skip the entire load, if it is 0000.
todo_bits = *cpsalt++ - '0';
todo_bits <<= 3;
todo_bits += *cpsalt++ - '0';
todo_bits <<= 3;
todo_bits += *cpsalt++ - '0';
todo_bits <<= 3;
todo_bits += *cpsalt++ - '0';
}
else
cpsalt += 4;
cursalt = cpsalt;
if (!todo_bits) return;
cpsalt += saltlen;
if (todo_bits & 1) {
todo_bits ^= 1; // clear that bit.
saltlen2 = *cpsalt++;
cursalt2 = cpsalt;
if (todo_bits == 0) return;
cpsalt += saltlen2;
}
if (todo_bits & 2) {
todo_bits ^= 2; // clear that bit.
usernamelen = *cpsalt++;
username = cpsalt;
if (todo_bits == 0) return;
cpsalt += usernamelen;
}
bit = 4;
for (i = 0; i < 10; ++i, bit<<=1) {
if (todo_bits & bit) {
todo_bits ^= bit; // clear that bit.
fld_lens[i] = *cpsalt++;
flds[i] = cpsalt;
if (todo_bits == 0) return;
cpsalt += fld_lens[i];
}
}
}
/*********************************************************************************
* Sets this key. It will either be dropped DIRECTLY into the input buffer
* number 1, or put into an array of keys. Which one happens depends upon
* HOW the generic functions were laid out for this type. Not all types can
* load into the input. If not they MUST use the key array. Using the input
* buffer is faster, when it can be safely done.
*********************************************************************************/
static void set_key(char *key, int index)
{
unsigned int len;
//printf("idx=%d key=%s\n", index, key);
#ifdef SIMD_COEF_32
if (curdat.store_keys_in_input==2)
dynamic_use_sse = 3;
else if (curdat.md5_startup_in_x86)
dynamic_use_sse = 2;
else if (dynamic_use_sse==2)
dynamic_use_sse = 1;
#endif
if (curdat.nPassCase>1)
key = HandleCase(key, curdat.nPassCase);
// Ok, if the key is in unicode/utf8, we switch it here one time, and are done with it.
if (curdat.store_keys_in_input)
{
#ifdef SIMD_COEF_32
if (dynamic_use_sse==1) {
// code derived from rawMD5_fmt_plug.c code from magnum
#if ARCH_ALLOWS_UNALIGNED
const ARCH_WORD_32 *key32 = (ARCH_WORD_32*)key;
#else
char buf_aligned[PLAINTEXT_LENGTH + 1] JTR_ALIGN(sizeof(uint32_t));
const ARCH_WORD_32 *key32 = is_aligned(key, sizeof(uint32_t)) ?
(uint32_t*)key : (uint32_t*)strcpy(buf_aligned, key);
#endif
unsigned int idx = ( ((unsigned int)index)/SIMD_COEF_32);
ARCH_WORD_32 *keybuffer = &input_buf[idx].w[index&(SIMD_COEF_32-1)];
ARCH_WORD_32 *keybuf_word = keybuffer;
unsigned int len;
ARCH_WORD_32 temp;
len = 0;
while((temp = *key32++) & 0xff) {
if (!(temp & 0xff00))
{
*keybuf_word = (temp & 0xff) | (0x80 << 8);
++len;
goto key_cleaning;
}
if (!(temp & 0xff0000))
{
*keybuf_word = (temp & 0xffff) | (0x80 << 16);
len+=2;
goto key_cleaning;
}
if (!(temp & 0xff000000))
{
*keybuf_word = temp | (0x80U << 24);
len+=3;
goto key_cleaning;
}
*keybuf_word = temp;
len += 4;
keybuf_word += SIMD_COEF_32;
}
*keybuf_word = 0x80;
key_cleaning:
keybuf_word += SIMD_COEF_32;
while(*keybuf_word) {
*keybuf_word = 0;
keybuf_word += SIMD_COEF_32;
}
keybuffer[14*SIMD_COEF_32] = len << 3;
return;
}
#endif
len = strlen(key);
if (len > 110) // we never do UTF-8 -> UTF-16 in this mode
len = 110;
// if(index==0) {
// we 'have' to use full clean here. NOTE 100% sure why, but 10 formats fail if we do not.
// __nonMP_DynamicFunc__clean_input_full();
// }
#if MD5_X2
if (index & 1)
memcpy(input_buf_X86[index>>MD5_X2].x2.b2, key, len);
else
#endif
memcpy(input_buf_X86[index>>MD5_X2].x1.b, key, len);
saved_key_len[index] = total_len_X86[index] = len;
}
else
{
len = strlen(key);
if (len > 110 && !(fmt_Dynamic.params.flags & FMT_UNICODE))
len = 110;
// if(index==0) {
// __nonMP_DynamicFunc__clean_input_full();
// }
keys_dirty = 1;
memcpy(((char*)(saved_key[index])), key, len);
saved_key_len[index] = len;
}
}
static void clear_keys(void)
{
#ifdef SIMD_COEF_32
if (curdat.pSetup->flags & MGF_FULL_CLEAN_REQUIRED) {
__nonMP_DynamicFunc__clean_input_full();
return;
}
if (curdat.store_keys_in_input==1 || curdat.store_keys_in_input==3)
return;
if (curdat.md5_startup_in_x86)
__nonMP_DynamicFunc__clean_input_full();
// This clean was causing failures (dirty buffers left) for dyna_51, 61 and formspring.
// once commented out, dyna fully passes. I see no reason to keep this here at all.
// else
// __nonMP_DynamicFunc__clean_input_kwik();
#else
__nonMP_DynamicFunc__clean_input_full();
#endif
}
/*********************************************************************************
* Returns the key. NOTE how it gets it depends upon if we are storing
* into the array of keys (there we simply return it), or if we are
* loading into input buffer #1. If in input buffer, we have to re-create
* the key, prior to returning it.
*********************************************************************************/
static char *get_key(int index)
{
if (curdat.store_keys_in_input)
{
unsigned int i;
unsigned char *cp;
#ifdef SIMD_COEF_32
//if (dynamic_use_sse==1) {
// Note, if we are not in
if (dynamic_use_sse && !curdat.md5_startup_in_x86) {
unsigned int s;
unsigned int idx = ( ((unsigned int)index)/SIMD_COEF_32);
//if (curdat.store_keys_in_input && dynamic_use_sse==1)
// s = saved_key_len[index]; // NOTE, we now have to get the length from the buffer, we do NOT store it into a saved_key_len buffer.
ARCH_WORD_32 *keybuffer = &input_buf[idx].w[index&(SIMD_COEF_32-1)];
s = keybuffer[14*SIMD_COEF_32] >> 3;
for(i=0;i<s;i++)
out[i] = input_buf[idx].c[GETPOS(i, index&(SIMD_COEF_32-1))];
out[i] = 0;
return (char*)out;
}
#endif
#if MD5_X2
if (index & 1)
cp = input_buf_X86[index>>MD5_X2].x2.B2;
else
#endif
cp = input_buf_X86[index>>MD5_X2].x1.B;
for(i=0;i<saved_key_len[index];++i)
out[i] = cp[i];
out[i] = 0;
return (char*)out;
}
else
{
saved_key[index][saved_key_len[index]] = '\0';
return saved_key[index];
}
}
/*********************************************************************************
* Looks for ANY key that was cracked.
*********************************************************************************/
static int cmp_all(void *binary, int count)
{
unsigned int i;
#ifdef SIMD_COEF_32
unsigned int j;
if (dynamic_use_sse&1) {
unsigned int cnt = ( ((unsigned int)count+SIMD_COEF_32-1)/SIMD_COEF_32);
for (i = 0; i < cnt; ++i)
{
for (j = 0; j < SIMD_COEF_32; ++j)
if( *((ARCH_WORD_32 *)binary) == crypt_key[i].w[j])
return 1;
}
return 0;
}
#endif
for (i = 0; i < count; i++) {
#if MD5_X2
if (i&1) {
if (!(((ARCH_WORD_32 *)binary)[0] - crypt_key_X86[i>>MD5_X2].x2.w2[0]))
return 1;
}
else
#endif
if (!(((ARCH_WORD_32 *)binary)[0] - crypt_key_X86[i>>MD5_X2].x1.w[0]))
return 1;
}
return 0;
}
#if ARCH_LITTLE_ENDIAN
#define MASK_4x6 0x00ffffff
#else
#define MASK_4x6 0xffffff00
#endif
static int cmp_all_64_4x6(void *binary, int count)
{
unsigned int i;
#ifdef SIMD_COEF_32
unsigned int j;
if (dynamic_use_sse==1) {
unsigned int cnt = ( ((unsigned int)count+SIMD_COEF_32-1)/SIMD_COEF_32);
for (i = 0; i < cnt; ++i)
{
for (j = 0; j < SIMD_COEF_32; ++j)
if( *((ARCH_WORD_32 *)binary) == (crypt_key[i].w[j] & MASK_4x6))
return 1;
}
return 0;
}
#endif
for (i = 0; i < count; i++) {
#if MD5_X2
if (i&1) {
if (!(((ARCH_WORD_32 *)binary)[0] - (crypt_key_X86[i>>MD5_X2].x2.w2[0]&MASK_4x6)))
return 1;
}
else
#endif
if (!(((ARCH_WORD_32 *)binary)[0] - (crypt_key_X86[i>>MD5_X2].x1.w[0]&MASK_4x6)))
return 1;
}
return 0;
}
/*********************************************************************************
* In this code, we always do exact compare, so if this function is called, it
* simply returns true.
*********************************************************************************/
static int cmp_exact(char *binary, int index)
{
return 1;
}
/*********************************************************************************
* There was 'something' that was possibly hit. Now john will ask us to check
* each one of the data items, for an 'exact' match.
*********************************************************************************/
static int cmp_one(void *binary, int index)
{
#ifdef SIMD_COEF_32
if (dynamic_use_sse&1) {
unsigned int idx = ( ((unsigned int)index)/SIMD_COEF_32);
if( (((ARCH_WORD_32 *)binary)[0] == ((ARCH_WORD_32 *)&(crypt_key[idx].c))[0*SIMD_COEF_32+(index&(SIMD_COEF_32-1))]) &&
(((ARCH_WORD_32 *)binary)[1] == ((ARCH_WORD_32 *)&(crypt_key[idx].c))[1*SIMD_COEF_32+(index&(SIMD_COEF_32-1))]) &&
(((ARCH_WORD_32 *)binary)[2] == ((ARCH_WORD_32 *)&(crypt_key[idx].c))[2*SIMD_COEF_32+(index&(SIMD_COEF_32-1))]) &&
(((ARCH_WORD_32 *)binary)[3] == ((ARCH_WORD_32 *)&(crypt_key[idx].c))[3*SIMD_COEF_32+(index&(SIMD_COEF_32-1))]))
return 1;
return 0;
}
#endif
#if MD5_X2
if (index & 1) {
if ( (((ARCH_WORD_32 *)binary)[0] == crypt_key_X86[index>>MD5_X2].x2.w2[0] ) &&
(((ARCH_WORD_32 *)binary)[1] == crypt_key_X86[index>>MD5_X2].x2.w2[1] ) &&
(((ARCH_WORD_32 *)binary)[2] == crypt_key_X86[index>>MD5_X2].x2.w2[2] ) &&
(((ARCH_WORD_32 *)binary)[3] == crypt_key_X86[index>>MD5_X2].x2.w2[3] ) )
return 1;
return 0;
}
#endif
if ( (((ARCH_WORD_32 *)binary)[0] == crypt_key_X86[index>>MD5_X2].x1.w[0] ) &&
(((ARCH_WORD_32 *)binary)[1] == crypt_key_X86[index>>MD5_X2].x1.w[1] ) &&
(((ARCH_WORD_32 *)binary)[2] == crypt_key_X86[index>>MD5_X2].x1.w[2] ) &&
(((ARCH_WORD_32 *)binary)[3] == crypt_key_X86[index>>MD5_X2].x1.w[3] ) )
return 1;
return 0;
}
static int cmp_one_64_4x6(void *binary, int index)
{
#ifdef SIMD_COEF_32
if (dynamic_use_sse==1) {
unsigned int idx = ( ((unsigned int)index)/SIMD_COEF_32);
if( (((ARCH_WORD_32 *)binary)[0] == (((ARCH_WORD_32 *)&(crypt_key[idx].c))[0*SIMD_COEF_32+(index&(SIMD_COEF_32-1))] & MASK_4x6)) &&
(((ARCH_WORD_32 *)binary)[1] == (((ARCH_WORD_32 *)&(crypt_key[idx].c))[1*SIMD_COEF_32+(index&(SIMD_COEF_32-1))] & MASK_4x6)) &&
(((ARCH_WORD_32 *)binary)[2] == (((ARCH_WORD_32 *)&(crypt_key[idx].c))[2*SIMD_COEF_32+(index&(SIMD_COEF_32-1))] & MASK_4x6)) &&
(((ARCH_WORD_32 *)binary)[3] == (((ARCH_WORD_32 *)&(crypt_key[idx].c))[3*SIMD_COEF_32+(index&(SIMD_COEF_32-1))] & MASK_4x6)))
return 1;
return 0;
}
#endif
#if MD5_X2
if (index & 1) {
if ( (((ARCH_WORD_32*)binary)[0] == (crypt_key_X86[index>>MD5_X2].x2.w2[0] & MASK_4x6)) &&
(((ARCH_WORD_32*)binary)[1] == (crypt_key_X86[index>>MD5_X2].x2.w2[1] & MASK_4x6)) &&
(((ARCH_WORD_32*)binary)[2] == (crypt_key_X86[index>>MD5_X2].x2.w2[2] & MASK_4x6)) &&
(((ARCH_WORD_32*)binary)[3] == (crypt_key_X86[index>>MD5_X2].x2.w2[3] & MASK_4x6)) )
return 1;
return 0;
}
#endif
if ( (((ARCH_WORD_32*)binary)[0] == (crypt_key_X86[index>>MD5_X2].x1.w[0] & MASK_4x6)) &&
(((ARCH_WORD_32*)binary)[1] == (crypt_key_X86[index>>MD5_X2].x1.w[1] & MASK_4x6)) &&
(((ARCH_WORD_32*)binary)[2] == (crypt_key_X86[index>>MD5_X2].x1.w[2] & MASK_4x6)) &&
(((ARCH_WORD_32*)binary)[3] == (crypt_key_X86[index>>MD5_X2].x1.w[3] & MASK_4x6)) )
return 1;
return 0;
}
/*********************************************************************************
*********************************************************************************
* This is the real 'engine'. It simply calls functions one
* at a time from the array of functions.
*********************************************************************************
*********************************************************************************/
static int crypt_all(int *pcount, struct db_salt *salt)
{
// set m_count. This is our GLOBAL value, used by ALL of the script functions to know how
// many keys are loaded, and how much work we do.
m_count = *pcount;
__nonMP_eLargeOut(eBase16);
__nonMP_nLargeOff(0);
#ifdef SIMD_COEF_32
// If this format is MMX built, but is supposed to start in X86 (but be switchable), then we
// set that value here.
if (curdat.store_keys_in_input==2)
dynamic_use_sse = 3;
else if (curdat.md5_startup_in_x86)
dynamic_use_sse = 2;
else if (dynamic_use_sse==2)
dynamic_use_sse = 1;
#endif
__nonMP_md5_unicode_convert(0);
if (curdat.dynamic_base16_upcase) {
dynamic_itoa16 = itoa16u;
itoa16_w2 = itoa16_w2_u;
}
else {
dynamic_itoa16 = itoa16;
itoa16_w2 = itoa16_w2_l;
}
// There may have to be some 'prelim' work done with the keys. This is so that if we 'know' that keys were
// loaded into the keys[] array, but that we should do something like md5 and base-16 put them into an
// input slot, then we do that FIRST, prior to calling the script functions. Thus for a format such as
// md5(md5($p).$s) we could md5 the pass, and base-16 put it into a input buffer. Then when john sets salt
// and calls crypt all, the crypt script would simply set the input len to 32, append the salt and call a
// single crypt. That eliminates almost 1/2 of the calls to md5_crypt() for the format show in this example.
if (keys_dirty)
{
if (curdat.store_keys_normal_but_precompute_hash_to_output2)
{
keys_dirty = 0;
if (curdat.pSetup->flags & MGF_FULL_CLEAN_REQUIRED2)
__nonMP_DynamicFunc__clean_input2_full();
else
__nonMP_DynamicFunc__clean_input2();
if (curdat.store_keys_in_input_unicode_convert)
__nonMP_md5_unicode_convert(1);
__nonMP_DynamicFunc__append_keys2();
__nonMP_md5_unicode_convert(0);
//if (curdat.using_flat_buffers_sse2_ok) {
if (curdat.dynamic_use_sse == 0) {
if (curdat.store_keys_normal_but_precompute_hash_to_output2_base16_to_input1) {
#ifdef _OPENMP
#define CASE(H) case MGF__##H: DynamicFunc__##H##_crypt_input2_overwrite_input1(0,m_count,0); break
#else
#define CASE(H) case MGF__##H: DynamicFunc__##H##_crypt_input2_overwrite_input1(); break
#endif
switch(curdat.store_keys_normal_but_precompute_hash_to_output2_base16_type)
{
CASE(MD5);
CASE(MD4);
CASE(SHA1);
CASE(SHA224);
CASE(SHA256);
CASE(SHA384);
CASE(SHA512);
CASE(GOST);
CASE(WHIRLPOOL);
CASE(Tiger);
CASE(RIPEMD128);
CASE(RIPEMD160);
CASE(RIPEMD256);
CASE(RIPEMD320);
CASE(HAVAL128_3);
CASE(HAVAL128_4);
CASE(HAVAL128_5);
CASE(HAVAL160_3);
CASE(HAVAL160_4);
CASE(HAVAL160_5);
CASE(HAVAL192_3);
CASE(HAVAL192_4);
CASE(HAVAL192_5);
CASE(HAVAL224_3);
CASE(HAVAL224_4);
CASE(HAVAL224_5);
CASE(HAVAL256_3);
CASE(HAVAL256_4);
CASE(HAVAL256_5);
CASE(MD2);
CASE(PANAMA);
CASE(SKEIN224);
CASE(SKEIN256);
CASE(SKEIN384);
CASE(SKEIN512);
CASE(SHA3_224);
CASE(SHA3_256);
CASE(SHA3_384);
CASE(SHA3_512);
CASE(KECCAK_256);
CASE(KECCAK_512);
// LARGE_HASH_EDIT_POINT
}
} else if (curdat.store_keys_normal_but_precompute_hash_to_output2_base16_to_input1_offsetX) {
unsigned int i;
for (i = 0; i < m_count; ++i)
total_len_X86[i] = curdat.store_keys_normal_but_precompute_hash_to_output2_base16_to_input1_offsetX;
#undef CASE
#ifdef _OPENMP
#define CASE(H) case MGF__##H: DynamicFunc__##H##_crypt_input2_append_input1(0,m_count,0); break
#else
#define CASE(H) case MGF__##H: DynamicFunc__##H##_crypt_input2_append_input1(); break
#endif
switch(curdat.store_keys_normal_but_precompute_hash_to_output2_base16_type) {
CASE(MD5);
CASE(MD4);
CASE(SHA1);
CASE(SHA224);
CASE(SHA256);
CASE(SHA384);
CASE(SHA512);
CASE(GOST);
CASE(WHIRLPOOL);
CASE(Tiger);
CASE(RIPEMD128);
CASE(RIPEMD160);
CASE(RIPEMD256);
CASE(RIPEMD320);
CASE(HAVAL128_3);
CASE(HAVAL128_4);
CASE(HAVAL128_5);
CASE(HAVAL160_3);
CASE(HAVAL160_4);
CASE(HAVAL160_5);
CASE(HAVAL192_3);
CASE(HAVAL192_4);
CASE(HAVAL192_5);
CASE(HAVAL224_3);
CASE(HAVAL224_4);
CASE(HAVAL224_5);
CASE(HAVAL256_3);
CASE(HAVAL256_4);
CASE(HAVAL256_5);
CASE(MD2);
CASE(PANAMA);
CASE(SKEIN224);
CASE(SKEIN256);
CASE(SKEIN384);
CASE(SKEIN512);
CASE(SHA3_224);
CASE(SHA3_256);
CASE(SHA3_384);
CASE(SHA3_512);
CASE(KECCAK_256);
CASE(KECCAK_512);
// LARGE_HASH_EDIT_POINT
}
} else {
// calls 'old' code (ossl, sorry :( We should FIND and remove any format
// written this way, if it is
__possMP_DynamicFunc__crypt2_md5();
}
} else {
__possMP_DynamicFunc__crypt2_md5();
if (curdat.store_keys_normal_but_precompute_hash_to_output2_base16_to_input1)
{
if (curdat.store_keys_normal_but_precompute_hash_to_output2_base16_to_input1==2)
__nonMP_DynamicFunc__SSEtoX86_switch_output2();
__nonMP_DynamicFunc__clean_input();
__nonMP_DynamicFunc__append_from_last_output2_to_input1_as_base16();
}
}
}
}
// Ok, now we 'run' the script. We simply call 1 function right after the other.
// ALL functions are void f(void). They use the globals:
// input_buf1[] input_buf2[] (requires thread safety)
// total_len1[] total_len2[] (requires thread safety)
// crypt1[] crypt2[] (requires thread safety)
// md5_unicode_convert (requires thread safety, had to change to array)
// saved_key[] (const?)
// saved_key_len[] (const)
// cursalt, cursalt2 (const)
// saltlen, saltlen2 (const)
// m_count (const)
// nConsts (const)
// Consts[], ConstsLen[] (const)
// Since this array is in a structure, we assign a simple pointer to it
// before walking. Trivial improvement, but every cycle counts :)
{
#ifdef _OPENMP
if ((curdat.pFmtMain->params.flags & FMT_OMP) == FMT_OMP) {
int j;
unsigned int inc = (m_count+m_ompt-1) / m_ompt;
//printf ("maxkeys=%d m_count=%d inc1=%d granularity=%d inc2=%d\n", curdat.pFmtMain->params.max_keys_per_crypt, m_count, inc, curdat.omp_granularity, ((inc + curdat.omp_granularity-1)/curdat.omp_granularity)*curdat.omp_granularity);
inc = ((inc + curdat.omp_granularity-1)/curdat.omp_granularity)*curdat.omp_granularity;
#pragma omp parallel for shared(curdat, inc, m_count)
for (j = 0; j < m_count; j += inc) {
unsigned int i;
unsigned int top=j+inc;
/* The last block may 'appear' to have more keys than we have in the
entire buffer space. This is due to the granularity. If so,
reduce that last one to stop at end of our buffers. NOT doing
this is causes a huge buffer overflow. */
if (top > curdat.pFmtMain->params.max_keys_per_crypt)
top = curdat.pFmtMain->params.max_keys_per_crypt;
// we now run a full script in this thread, using only a subset of
// the data, from [j,top) The next thread will run from [top,top+inc)
// each thread will take the next inc values, until we get to m_count
for (i = 0; curdat.dynamic_FUNCTIONS[i]; ++i)
(*(curdat.dynamic_FUNCTIONS[i]))(j,top,omp_get_thread_num());
}
} else {
unsigned int i;
// same code (almost), but without the threads.
for (i = 0; curdat.dynamic_FUNCTIONS[i]; ++i)
(*(curdat.dynamic_FUNCTIONS[i]))(0,m_count,0);
}
#else
unsigned int i;
for (i = 0; curdat.dynamic_FUNCTIONS[i]; ++i) {
(*(curdat.dynamic_FUNCTIONS[i]))();
#if 0
// Dump state (for debugging help)
if (i==0) printf("\npassword=%.*s\n", saved_key_len[0], saved_key[0]);
printf ("\nState after function: %s\n", dynamic_Find_Function_Name(curdat.dynamic_FUNCTIONS[i]));
// dump input 1
#ifdef SIMD_COEF_32
dump_stuff_mmx_msg("input_buf[0]", input_buf[0].c, 64, 0);
dump_stuff_mmx_msg("input_buf[1]", input_buf[0].c, 64, 1);
dump_stuff_mmx_msg("input_buf[2]", input_buf[0].c, 64, 2);
dump_stuff_mmx_msg("input_buf[3]", input_buf[0].c, 64, 3);
#endif
printf ("input_buf86[0] : %*.*s\n", total_len_X86[0],total_len_X86[0],input_buf_X86[0].x1.b);
printf ("input_buf86[1] : %*.*s\n", total_len_X86[1],total_len_X86[1],input_buf_X86[1].x1.b);
printf ("input_buf86[2] : %*.*s\n", total_len_X86[2],total_len_X86[2],input_buf_X86[2].x1.b);
printf ("input_buf86[3] : %*.*s\n", total_len_X86[3],total_len_X86[3],input_buf_X86[3].x1.b);
// dump crypt 1
#ifdef SIMD_COEF_32
dump_stuff_mmx_msg("crypt_key[0]", crypt_key[0].c, 16, 0);
dump_stuff_mmx_msg("crypt_key[1]", crypt_key[0].c, 16, 1);
dump_stuff_mmx_msg("crypt_key[2]", crypt_key[0].c, 16, 2);
dump_stuff_mmx_msg("crypt_key[3]", crypt_key[0].c, 16, 3);
#endif
dump_stuff_be_msg("crypt_key_X86[0]", crypt_key_X86[0].x1.b, 16);
dump_stuff_be_msg("crypt_key_X86[1]", crypt_key_X86[1].x1.b, 16);
dump_stuff_be_msg("crypt_key_X86[2]", crypt_key_X86[2].x1.b, 16);
dump_stuff_be_msg("crypt_key_X86[3]", crypt_key_X86[3].x1.b, 16);
// dump input 2
#ifdef SIMD_COEF_32
dump_stuff_mmx_msg("input_buf2[0]", input_buf2[0].c, 64, 0);
dump_stuff_mmx_msg("input_buf2[1]", input_buf2[0].c, 64, 1);
dump_stuff_mmx_msg("input_buf2[2]", input_buf2[0].c, 64, 2);
dump_stuff_mmx_msg("input_buf2[3]", input_buf2[0].c, 64, 3);
#endif
printf ("input2_buf86[0] : %*.*s\n", total_len2_X86[0],total_len2_X86[0],input_buf2_X86[0].x1.b);
printf ("input2_buf86[1] : %*.*s\n", total_len2_X86[1],total_len2_X86[1],input_buf2_X86[1].x1.b);
printf ("input2_buf86[2] : %*.*s\n", total_len2_X86[2],total_len2_X86[2],input_buf2_X86[2].x1.b);
printf ("input2_buf86[3] : %*.*s\n", total_len2_X86[3],total_len2_X86[3],input_buf2_X86[3].x1.b);
// dump crypt 2
#ifdef SIMD_COEF_32
dump_stuff_mmx_msg("crypt_key2[0]", crypt_key2[0].c, 16, 0);
dump_stuff_mmx_msg("crypt_key2[1]", crypt_key2[0].c, 16, 1);
dump_stuff_mmx_msg("crypt_key2[2]", crypt_key2[0].c, 16, 2);
dump_stuff_mmx_msg("crypt_key2[3]", crypt_key2[0].c, 16, 3);
#endif
dump_stuff_be_msg("crypt_key2_X86[0]", crypt_key2_X86[0].x1.b, 16);
dump_stuff_be_msg("crypt_key2_X86[1]", crypt_key2_X86[1].x1.b, 16);
dump_stuff_be_msg("crypt_key2_X86[2]", crypt_key2_X86[2].x1.b, 16);
dump_stuff_be_msg("crypt_key2_X86[3]", crypt_key2_X86[3].x1.b, 16);
#endif
}
#endif
}
return m_count;
}
/*********************************************************************************
* 'normal' hashing functions
*********************************************************************************/
extern char *MD5_DumpHexStr(void *p);
#if !ARCH_LITTLE_ENDIAN
// the lower 8 bits is zero on the binary (but filled in on the hash). We need to dump the low 8
static int binary_hash_0_64x4(void * binary) { return (((ARCH_WORD_32 *)binary)[0]>>8) & PH_MASK_0; }
static int binary_hash_1_64x4(void * binary) { return (((ARCH_WORD_32 *)binary)[0]>>8) & PH_MASK_1; }
static int binary_hash_2_64x4(void * binary) { return (((ARCH_WORD_32 *)binary)[0]>>8) & PH_MASK_2; }
static int binary_hash_3_64x4(void * binary) { return (((ARCH_WORD_32 *)binary)[0]>>8) & PH_MASK_3; }
static int binary_hash_4_64x4(void * binary) { return (((ARCH_WORD_32 *)binary)[0]>>8) & PH_MASK_4; }
static int binary_hash_5_64x4(void * binary) { return (((ARCH_WORD_32 *)binary)[0]>>8) & PH_MASK_5; }
static int get_hash_0_64x4(int index) {
#if MD5_X2
if (index & 1) return (crypt_key_X86[index>>MD5_X2].x2.w2[0]>>8) & PH_MASK_0;
#endif
return (crypt_key_X86[index>>MD5_X2].x1.w[0]>>8) & PH_MASK_0;}
static int get_hash_1_64x4(int index) {
#if MD5_X2
if (index & 1) return (crypt_key_X86[index>>MD5_X2].x2.w2[0]>>8) & PH_MASK_1;
#endif
return (crypt_key_X86[index>>MD5_X2].x1.w[0]>>8) & PH_MASK_1;}
static int get_hash_2_64x4(int index) {
#if MD5_X2
if (index & 1) return (crypt_key_X86[index>>MD5_X2].x2.w2[0]>>8) & PH_MASK_2;
#endif
return (crypt_key_X86[index>>MD5_X2].x1.w[0]>>8) & PH_MASK_2;}
static int get_hash_3_64x4(int index) {
#if MD5_X2
if (index & 1) return (crypt_key_X86[index>>MD5_X2].x2.w2[0]>>8) & PH_MASK_3;
#endif
return (crypt_key_X86[index>>MD5_X2].x1.w[0]>>8) & PH_MASK_3;}
static int get_hash_4_64x4(int index) {
#if MD5_X2
if (index & 1) return (crypt_key_X86[index>>MD5_X2].x2.w2[0]>>8) & PH_MASK_4;
#endif
return (crypt_key_X86[index>>MD5_X2].x1.w[0]>>8) & PH_MASK_4;}
static int get_hash_5_64x4(int index) {
#if MD5_X2
if (index & 1) return (crypt_key_X86[index>>MD5_X2].x2.w2[0]>>8) & PH_MASK_5;
#endif
return (crypt_key_X86[index>>MD5_X2].x1.w[0]>>8) & PH_MASK_5;}
#endif
static int get_hash_0(int index)
{
#ifdef SIMD_COEF_32
if (dynamic_use_sse&1) {
unsigned int idx = ( ((unsigned int)index)/SIMD_COEF_32);
return ((ARCH_WORD_32 *)&(crypt_key[idx].c))[index&(SIMD_COEF_32-1)] & PH_MASK_0;
}
#endif
#if MD5_X2
if (index & 1)
return crypt_key_X86[index>>MD5_X2].x2.w2[0] & PH_MASK_0;
#endif
return crypt_key_X86[index>>MD5_X2].x1.w[0] & PH_MASK_0;
}
static int get_hash_1(int index)
{
#ifdef SIMD_COEF_32
if (dynamic_use_sse&1) {
unsigned int idx = ( ((unsigned int)index)/SIMD_COEF_32);
return ((ARCH_WORD_32 *)&(crypt_key[idx].c))[index&(SIMD_COEF_32-1)] & PH_MASK_1;
}
#endif
#if MD5_X2
if (index & 1)
return crypt_key_X86[index>>MD5_X2].x2.w2[0] & PH_MASK_1;
#endif
return crypt_key_X86[index>>MD5_X2].x1.w[0] & PH_MASK_1;
}
static int get_hash_2(int index)
{
#ifdef SIMD_COEF_32
if (dynamic_use_sse&1) {
unsigned int idx = ( ((unsigned int)index)/SIMD_COEF_32);
return ((ARCH_WORD_32 *)&(crypt_key[idx].c))[index&(SIMD_COEF_32-1)] & PH_MASK_2;
}
#endif
#if MD5_X2
if (index & 1)
return crypt_key_X86[index>>MD5_X2].x2.w2[0] & PH_MASK_2;
#endif
return crypt_key_X86[index>>MD5_X2].x1.w[0] & PH_MASK_2;
}
static int get_hash_3(int index)
{
#ifdef SIMD_COEF_32
if (dynamic_use_sse&1) {
unsigned int idx = ( ((unsigned int)index)/SIMD_COEF_32);
return ((ARCH_WORD_32 *)&(crypt_key[idx].c))[index&(SIMD_COEF_32-1)] & PH_MASK_3;
}
#endif
#if MD5_X2
if (index & 1)
return crypt_key_X86[index>>MD5_X2].x2.w2[0] & PH_MASK_3;
#endif
return crypt_key_X86[index>>MD5_X2].x1.w[0] & PH_MASK_3;
}
static int get_hash_4(int index)
{
#ifdef SIMD_COEF_32
if (dynamic_use_sse&1) {
unsigned int idx = ( ((unsigned int)index)/SIMD_COEF_32);
return ((ARCH_WORD_32 *)&(crypt_key[idx].c))[index&(SIMD_COEF_32-1)] & PH_MASK_4;
}
#endif
#if MD5_X2
if (index & 1)
return crypt_key_X86[index>>MD5_X2].x2.w2[0] & PH_MASK_4;
#endif
return crypt_key_X86[index>>MD5_X2].x1.w[0] & PH_MASK_4;
}
static int get_hash_5(int index)
{
#ifdef SIMD_COEF_32
if (dynamic_use_sse&1) {
unsigned int idx = ( ((unsigned int)index)/SIMD_COEF_32);
return ((ARCH_WORD_32 *)&(crypt_key[idx].c))[index&(SIMD_COEF_32-1)] & PH_MASK_5;
}
#endif
#if MD5_X2
if (index & 1)
return crypt_key_X86[index>>MD5_X2].x2.w2[0] & PH_MASK_5;
#endif
return crypt_key_X86[index>>MD5_X2].x1.w[0] & PH_MASK_5;
}
static int get_hash_6(int index)
{
#ifdef SIMD_COEF_32
if (dynamic_use_sse&1) {
unsigned int idx = ( ((unsigned int)index)/SIMD_COEF_32);
return ((ARCH_WORD_32 *)&(crypt_key[idx].c))[index&(SIMD_COEF_32-1)] & PH_MASK_6;
}
#endif
#if MD5_X2
if (index & 1)
return crypt_key_X86[index>>MD5_X2].x2.w2[0] & PH_MASK_6;
#endif
return crypt_key_X86[index>>MD5_X2].x1.w[0] & PH_MASK_6;
}
/************************************************************************
* We now fully handle all hashing of salts, here in the format. We
* return a pointer ot an allocated salt record. Thus, we search all
* of the salt records, looking for the same salt. If we find it, we
* want to return THAT pointer, and not allocate a new pointer.
* This works great, but forces us to do salt comparision here.
***********************************************************************/
#define DYNA_SALT_HASH_BITS SALT_HASH_LOG
#define DYNA_SALT_HASH_SIZE (1<<DYNA_SALT_HASH_BITS)
#define DYNA_SALT_HASH_MOD (DYNA_SALT_HASH_SIZE-1)
typedef struct dyna_salt_list_entry {
struct dyna_salt_list_entry *next;
unsigned len;
unsigned char *salt;
} dyna_salt_list_entry;
typedef struct {
dyna_salt_list_entry *head, *tail;
int count;
} dyna_salt_list_main;
typedef struct {
dyna_salt_list_main List;
} SaltHashTab_t;
static SaltHashTab_t *SaltHashTab=NULL;
static dyna_salt_list_entry *pSaltHashData=NULL, *pSaltHashDataNext=NULL;
static int dyna_salt_list_count=0;
static unsigned char *pSaltDataBuf=NULL, *pNextSaltDataBuf=NULL;
static int nSaltDataBuf=0;
static unsigned char *AddSaltHash(unsigned char *salt, unsigned int len, unsigned int idx)
{
unsigned char *pRet;
if (dyna_salt_list_count == 0) {
pSaltHashDataNext = pSaltHashData = mem_calloc_tiny(sizeof(dyna_salt_list_entry) * 25000, MEM_ALIGN_WORD);
dyna_salt_list_count = 25000;
}
if (nSaltDataBuf < len) {
pSaltDataBuf = pNextSaltDataBuf = mem_alloc_tiny(0x60000, MEM_ALIGN_NONE);
nSaltDataBuf = 0x60000;
}
pRet = pNextSaltDataBuf;
pSaltHashDataNext->salt = pNextSaltDataBuf;
memcpy(pSaltHashDataNext->salt, salt, len);
pSaltHashDataNext->len = len;
pNextSaltDataBuf += len;
nSaltDataBuf -= len;
if (SaltHashTab[idx].List.count == 0)
SaltHashTab[idx].List.tail = SaltHashTab[idx].List.head = pSaltHashDataNext;
else {
SaltHashTab[idx].List.tail->next = pSaltHashDataNext;
SaltHashTab[idx].List.tail = pSaltHashDataNext;
}
++SaltHashTab[idx].List.count;
++pSaltHashDataNext;
--dyna_salt_list_count;
return pRet;
}
static unsigned char *FindSaltHash(unsigned char *salt, unsigned int len, CRC32_t crc)
{
unsigned int idx = crc & DYNA_SALT_HASH_MOD;
dyna_salt_list_entry *p;
if (!SaltHashTab)
SaltHashTab = mem_calloc_tiny(sizeof(SaltHashTab_t) * DYNA_SALT_HASH_SIZE, MEM_ALIGN_WORD);
if (!SaltHashTab[idx].List.count) {
return AddSaltHash(salt, len, idx);
}
// Ok, we have some salts in this hash list. Now walk the list, searching for an EQUAL salt.
p = SaltHashTab[idx].List.head;
while (p) {
if (len == p->len && !memcmp((char*)salt, (char*)p->salt, len)) {
return p->salt; // found it! return this one, so we do not allocate another.
}
p = p->next;
}
return AddSaltHash(salt, len, idx);
}
static unsigned char *HashSalt(unsigned char *salt, unsigned int len)
{
CRC32_t crc = 0xffffffff, i;
unsigned char *ret_hash;
// compute the hash.
for (i = 0; i < len; ++i)
crc = jtr_crc32(crc,salt[i]);
crc = ~crc;
ret_hash = FindSaltHash(salt, len, crc);
return ret_hash;
}
static int ConvertFromHex(unsigned char *p, int len)
{
unsigned char *cp;
unsigned int i, x;
if (!p || memcmp(p, "HEX$", 4))
return len;
// Ok, do a convert, and return 'new' len.
len -= 4;
len >>= 1;
cp = p;
x = len;
for (i=4; x; --x, i+= 2) {
*cp++ = atoi16[ARCH_INDEX(p[i])]*16 + atoi16[ARCH_INDEX(p[i+1])];
}
*cp = 0;
return len;
}
static unsigned int salt_external_to_internal_convert(unsigned char *extern_salt, unsigned char *Buffer)
{
// Ok, we get this: extern_salt = salt_data$$2salt2$$Uuser ... where anything can be missing or in any order
// the any order has 1 exception of salt_data MUST be first. So if we get $$2salt2, then we know there is no salt-1 value.
unsigned char *salt2=0, *userid=0, *Flds[10];
int i, nsalt2=0, nuserid=0, nFlds[10]={0,0,0,0,0,0,0,0,0,0};
unsigned int len = strlen((char*)extern_salt), bit;
unsigned int bit_array=0;
unsigned int the_real_len = 6; // 2 bytes base-8 length, and 4 bytes base-8 bitmap.
// work from back of string to front, looking for the $$X signatures.
for (i = len-3; i >= 0; --i) {
if (extern_salt[i] == '$' && extern_salt[i+1] == '$') {
// a 'likely' extra salt value.
switch(extern_salt[i+2]) {
case '2':
if (curdat.b2Salts) {
salt2 = &extern_salt[i+3];
nsalt2 = strlen((char*)salt2);
nsalt2 = ConvertFromHex(salt2, nsalt2);
extern_salt[i] = 0;
bit_array |= 1;
the_real_len += (nsalt2+1);
}
break;
case 'U':
if (curdat.nUserName) {
userid = &extern_salt[i+3];
nuserid = strlen((char*)userid);
nuserid = ConvertFromHex(userid, nuserid);
extern_salt[i] = 0;
bit_array |= 2;
the_real_len += (nuserid+1);
}
break;
case 'F': {
if (extern_salt[i+3] >= '0' && extern_salt[i+3] <= '9') {
if (curdat.FldMask && (curdat.FldMask & (MGF_FLDx_BIT<<(extern_salt[i+3]-'0'))) == (MGF_FLDx_BIT<<(extern_salt[i+3]-'0'))) {
Flds[extern_salt[i+3]-'0'] = &extern_salt[i+4];
nFlds[extern_salt[i+3]-'0'] = strlen((char*)(Flds[extern_salt[i+3]-'0']));
nFlds[extern_salt[i+3]-'0'] = ConvertFromHex(Flds[extern_salt[i+3]-'0'], nFlds[extern_salt[i+3]-'0']);
extern_salt[i] = 0;
bit_array |= (1<<(2+extern_salt[i+3]-'0'));
the_real_len += (nFlds[extern_salt[i+3]-'0']+1);
}
break;
}
}
}
}
}
// We have now ripped the data apart. Now put it into Buffer, in proper ORDER
// Length of salt (salt1) These 2 are stored as base-8 numbers.
len = strlen((char*)extern_salt);
len = ConvertFromHex(extern_salt, len);
the_real_len += len;
*Buffer++ = (len>>3) + '0';
*Buffer++ = (len&7) + '0';
// bit array
*Buffer++ = (bit_array>>9) + '0';
*Buffer++ = ((bit_array>>6)&7) + '0';
*Buffer++ = ((bit_array>>3)&7) + '0';
*Buffer++ = (bit_array&7) + '0';
memcpy((char*)Buffer, (char*)extern_salt, len);
Buffer += len;
if (!bit_array)
return the_real_len;
if (nsalt2) {
*Buffer++ = nsalt2;
memcpy((char*)Buffer, (char*)salt2, nsalt2);
Buffer += nsalt2;
bit_array &= ~1;
if (!bit_array)
return the_real_len;
}
if (nuserid) {
*Buffer++ = nuserid;
memcpy((char*)Buffer, (char*)userid, nuserid);
if (curdat.nUserName==2) {
Buffer[nuserid] = 0;
strupr((char*)Buffer);
} else if (curdat.nUserName==2) {
Buffer[nuserid] = 0;
strlwr((char*)Buffer);
}
Buffer += nuserid;
bit_array &= ~2;
if (!bit_array)
return the_real_len;
}
bit = 4;
for (i = 0; i < 10; ++i, bit<<=1) {
if (nFlds[i]) {
*Buffer++ = nFlds[i];
memcpy((char*)Buffer, (char*)(Flds[i]), nFlds[i]);
Buffer += nFlds[i];
bit_array &= ~bit;
if (!bit_array)
return the_real_len;
}
}
return the_real_len;
}
/*********************************************************************************
* This salt function has been TOTALLY re-written. Now, we do these things:
* 1. convert from external format ($salt$$Uuser$$2HEX$salt2_in_hex, etc, into
* our internal format. Our internal format is 2 base-8 numbers (2 digit and 4
* digit), followed by the 'raw' salt bytes, followed by pascal strings of any
* other special salt values (salt2, user, flields 0 to 9). The first 2 digit
* base 8 number is the length of the binary bytes of the 'real' salt. The
* 2nd base-8 4 digit number, is a bit mask of what 'extra' salt types are
* contained.
* 2. We allocate and 'own' the salt buffers here, so that:
* 3. We detect duplicate salts. NOTE, we have normalized the salts, so 2 salts that
* appear different (external format), appear exactly the same on internal format.
* Thus, we dupe remove them here.
* 4. We allocation storage for the salts. The ONLY thing we return to john, is
* a 4 (or 8 byte in 64 bit builds) pointer to the salt. Thus, when we find
* a dupe, we do not have to allocate ANY memory, and simply return the pointer
* to the original salt (which is the same as the one we are working on now).
*
* this is much more complex, however, it allows us to use much less memory, to
* have the set_salt function operate VERY quickly (all processing is done here).
* It also allows john load time to happen FASTER (yes faster), that it was happening
* due to smaller memory footprint, and john's external salt collision to have
* less work to do. The memory footprint was also reduced, because now we store
* JUST the require memory, and a pointer. Before, often we stored a LOT of memory
* for many format types. For a few types, we do use more memory with this method
* than before, but for more the memory usage is way down.
*********************************************************************************/
static void *get_salt(char *ciphertext)
{
char Salt[SALT_SIZE+1], saltIntBuf[SALT_SIZE+1];
int off, possible_neg_one=0;
unsigned char *saltp;
unsigned int the_real_len;
static union x {
unsigned char salt_p[sizeof(unsigned char*)];
ARCH_WORD p[1];
} union_x;
if ( (curdat.pSetup->flags&MGF_SALTED) == 0) {
memset(union_x.salt_p, 0, sizeof(union_x.salt_p));
return union_x.salt_p;
}
memset(Salt, 0, SALT_SIZE+1);
// Ok, see if the wrong dynamic type is loaded (such as the 'last' dynamic type).
if (!strncmp(ciphertext, "$dynamic_", 9)) {
char *cp1 = &ciphertext[9];
char *cp2 = &curdat.dynamic_WHICH_TYPE_SIG[9];
while (*cp2 && *cp2 == *cp1) {
++cp1; ++cp2;
}
if (*cp2) {
char subformat[17];
struct fmt_main *pFmtLocal;
int nFmtNum;
memcpy(subformat, ciphertext, 16);
subformat[16] = 0;
cp2 = &subformat[9];
while (*cp2 && *cp2 != '$')
++cp2;
*cp2 = 0;
nFmtNum = -1;
sscanf(subformat, "$dynamic_%d", &nFmtNum);
if (nFmtNum==-1)
return union_x.salt_p;
pFmtLocal = dynamic_Get_fmt_main(nFmtNum);
memcpy(&curdat, pFmtLocal->private.data, sizeof(private_subformat_data));
}
}
if (curdat.dynamic_FIXED_SALT_SIZE==0 && !curdat.nUserName && !curdat.FldMask)
return union_x.salt_p;
if (!strncmp(ciphertext, "$dynamic_", 9))
off=curdat.dynamic_SALT_OFFSET;
else
off=curdat.dynamic_SALT_OFFSET-strlen(curdat.dynamic_WHICH_TYPE_SIG);
if (ciphertext[off] == '$') {
if (ciphertext[off+1]=='U' && curdat.nUserName)
possible_neg_one = -1;
else if (ciphertext[off+1]=='2' && curdat.b2Salts)
possible_neg_one = -1;
else if (ciphertext[off+1]=='F' && ciphertext[off+2]>='0' && ciphertext[off+2]<='9' && curdat.FldMask) {
if ((curdat.FldMask & (MGF_FLDx_BIT<<(ciphertext[off+2]-'0'))) == (MGF_FLDx_BIT<<(ciphertext[off+2]-'0')))
possible_neg_one = -1;
}
}
strnzcpy(Salt, &ciphertext[off + possible_neg_one], SALT_SIZE);
if (curdat.dynamic_salt_as_hex)
{
unsigned char Buf[128];
unsigned int slen=strlen(Salt);
switch (curdat.dynamic_salt_as_hex_format_type) {
// TODO: Come up with some way to put these into a CASE(HASH) #define
#define SPH_CASE(H,F,S) case MGF__##H: {sph_##F##_context c;sph_##F##_init(&c);sph_##F(&c,(const unsigned char*)Salt,slen);sph_##F##_close(&c,Buf); \
memset(Salt,0,SALT_SIZE+1);base64_convert(Buf,e_b64_raw,S,Salt,e_b64_hex,SALT_SIZE, 0, 0);break; }
#define OSSL_CASE(H,C,S) case MGF__##H: {C##_CTX c;H##_Init(&c);H##_Update(&c,Salt,slen);H##_Final(Buf,&c); \
memset(Salt,0,SALT_SIZE+1);base64_convert(Buf,e_b64_raw,S,Salt,e_b64_hex,SALT_SIZE, 0, 0);break; }
#define KECCAK_CASE(H,S) case MGF__##H: {KECCAK_CTX c;H##_Init(&c);KECCAK_Update(&c,(BitSequence*)Salt,slen);KECCAK_Final(Buf,&c); \
memset(Salt,0,SALT_SIZE+1);base64_convert(Buf,e_b64_raw,S,Salt,e_b64_hex,SALT_SIZE, 0, 0);break; }
case MGF__MD5:
{
// Do not 'worry' about SSE/MMX, Only do 'generic' md5. This is ONLY done
// at the start of the run. We will NEVER see this run, once john starts.
MD5_CTX ctx;
int i;
char *cpo;
MD5_Init(&ctx);
if (curdat.dynamic_salt_as_hex & 0x100)
{
char *s2 = mem_alloc(slen*2+1);
for (i = 0; i < slen; ++i)
{
s2[i<<1] = Salt[i];
s2[(i<<1)+1] = 0;
}
MD5_Update(&ctx, s2, slen*2);
MEM_FREE(s2);
}
else
MD5_Update(&ctx, Salt, slen);
MD5_Final(Buf, &ctx);
if ( (curdat.dynamic_salt_as_hex&3) == 2) {
strcat(Salt, "$$2");
cpo = &Salt[slen+3];
}
else {
cpo = Salt;
memset(Salt, 0, SALT_SIZE+1);
}
base64_convert(Buf, e_b64_raw, 16, cpo, e_b64_hex, SALT_SIZE, 0, 0);
break;
}
OSSL_CASE(MD4,MD4,16)
OSSL_CASE(SHA1,SHA,20)
OSSL_CASE(SHA224,SHA256,28)
OSSL_CASE(SHA256,SHA256,32)
OSSL_CASE(SHA384,SHA512,48)
OSSL_CASE(SHA512,SHA512,64)
OSSL_CASE(WHIRLPOOL,WHIRLPOOL,64)
case MGF__GOST:
{
gost_ctx ctx;
john_gost_init(&ctx);
john_gost_update(&ctx, (const unsigned char*)Salt, slen);
john_gost_final(&ctx, (unsigned char*)Buf);
memset(Salt, 0, SALT_SIZE+1);
base64_convert(Buf, e_b64_raw, 32, Salt, e_b64_hex, SALT_SIZE, 0, 0);
break;
}
SPH_CASE(Tiger,tiger,24)
SPH_CASE(RIPEMD128,ripemd128,16)
SPH_CASE(RIPEMD160,ripemd160,20)
SPH_CASE(RIPEMD256,ripemd256,32)
SPH_CASE(RIPEMD320,ripemd320,40)
SPH_CASE(HAVAL128_3,haval128_3,16)
SPH_CASE(HAVAL128_4,haval128_4,16)
SPH_CASE(HAVAL128_5,haval128_5,16)
SPH_CASE(HAVAL160_3,haval160_3,20)
SPH_CASE(HAVAL160_4,haval160_4,20)
SPH_CASE(HAVAL160_5,haval160_5,20)
SPH_CASE(HAVAL192_3,haval192_3,24)
SPH_CASE(HAVAL192_4,haval192_4,24)
SPH_CASE(HAVAL192_5,haval192_5,24)
SPH_CASE(HAVAL224_3,haval224_3,28)
SPH_CASE(HAVAL224_4,haval224_4,28)
SPH_CASE(HAVAL224_5,haval224_5,28)
SPH_CASE(HAVAL256_3,haval256_3,32)
SPH_CASE(HAVAL256_4,haval256_4,32)
SPH_CASE(HAVAL256_5,haval256_5,32)
SPH_CASE(MD2,md2,16)
SPH_CASE(PANAMA,panama,32)
SPH_CASE(SKEIN224,skein224,28)
SPH_CASE(SKEIN256,skein256,32)
SPH_CASE(SKEIN384,skein384,48)
SPH_CASE(SKEIN512,skein512,64)
KECCAK_CASE(SHA3_224,28)
KECCAK_CASE(SHA3_256,32)
KECCAK_CASE(SHA3_384,48)
KECCAK_CASE(SHA3_512,64)
KECCAK_CASE(KECCAK_256,32)
KECCAK_CASE(KECCAK_512,64)
// LARGE_HASH_EDIT_POINT
default:
{
error_msg("Invalid dynamic flags seen. Data type not yet defined\n");
}
}
}
the_real_len = salt_external_to_internal_convert((unsigned char*)Salt, (unsigned char*)saltIntBuf);
// Now convert this into a stored salt, or find the 'already' stored same salt.
saltp = HashSalt((unsigned char*)saltIntBuf, the_real_len);
memcpy(union_x.salt_p, &saltp, sizeof(saltp));
return union_x.salt_p;
}
/*********************************************************************************
* Now our salt is returned only as a pointer. We
*********************************************************************************/
static int salt_hash(void *salt)
{
unsigned long H;
if (!salt) return 0;
if ( (curdat.pSetup->flags&MGF_SALTED) == 0)
return 0;
// salt is now a pointer, but WORD aligned. We remove that word alingment, and simply use the next bits
H = *((unsigned long*)salt);
// Mix up the pointer value (H^(H>>9)) so that if we have a fixed sized allocation
// that things do get 'stirred' up better.
return ( (H^(H>>9)) & (SALT_HASH_SIZE-1) );
}
static unsigned dynamic_this_salt_length(const void *v) {
const unsigned char *s = (unsigned char*)v;
unsigned l = *s++ - '0';
unsigned bits;
l <<= 3;
l += *s++ - '0';
#if ARCH_ALLOWS_UNALIGNED
if (*((ARCH_WORD_32*)s) == 0x30303030)
#else
if (!memcmp(s, "0000", 4))
#endif
return l;
bits = *s++ - '0';
bits <<= 3;
bits += *s++ - '0';
bits <<= 3;
bits += *s++ - '0';
bits <<= 3;
bits += *s++ - '0';
s += l;
while(bits) {
if (bits & 1) {
l += *s;
s += *s;
++s;
}
bits >>= 1;
}
return l;
}
/*
* dyna compare is required, to get all the shortest
* salt strings first, then the next longer, then the
* next, and finally the longest. Without this change
* there are many dyna formats which will miss finding
* hashes, because old dirty salt information gets left
* over, blowing the next runs. There are many formats
* which try to not clear buffers if they do not need
* to, BUT this only works if salts are taken shortest
* to longest. This sort builds the list of salts that way
*/
static int salt_compare(const void *x, const void *y)
{
/* this is all that is needed in dyna salt_compare().
Dyna is a pointer to a string, NOT the actual string.
The first 2 bytes of string are length (base 8 ascii) */
const char *X = *((const char**)x);
const char *Y = *((const char**)y);
int l1, l2, l;
if (*X<*Y) return -1;
if (*X>*Y) return 1;
if (X[1]<Y[1]) return -1;
if (X[1]>Y[1]) return 1;
// we had to make the salt order 100% deterministic, so that intersalt-restore
l = l1 = dynamic_this_salt_length(X);
l2 = dynamic_this_salt_length(Y);
if (l2 < l) l = l2;
l = memcmp(&X[6], &Y[6], l);
if (l) return l;
if (l1==l2) return 0;
if (l1 > l2) return 1;
return -1;
}
void dynamic_salt_md5(struct db_salt *s) {
MD5_CTX ctx;
int len;
const char *S = *((const char**)s->salt);
MD5_Init(&ctx);
len = dynamic_this_salt_length(S);
MD5_Update(&ctx, S + 6, len);
MD5_Final((unsigned char*)(s->salt_md5), &ctx);
}
/*********************************************************************************
* Gets the binary value from a base-16 hash.
*********************************************************************************/
static void *get_binary(char *_ciphertext)
{
static char *realcipher;
unsigned int i;
char *ciphertext = _ciphertext;
if (!realcipher) realcipher = mem_alloc_tiny(BINARY_SIZE_SHA, MEM_ALIGN_WORD);
if (!strncmp(_ciphertext, "$dynamic_", 9)) {
ciphertext += 9;
while (*ciphertext++ != '$')
;
}
for(i=0;i<BINARY_SIZE;i++)
{
realcipher[i] =
atoi16[ARCH_INDEX(ciphertext[i*2])]*16 +
atoi16[ARCH_INDEX(ciphertext[i*2+1])];
}
return (void *)realcipher;
}
// NOTE NOTE NOTE, we have currently ONLY implemented a non-salted function!!!
static char *source(char *source, void *binary)
{
static char Buf[256];
unsigned char *cpi= (unsigned char*)(binary);
char *cpo = Buf;
unsigned int i;
cpo += sprintf(Buf, "%s", curdat.dynamic_WHICH_TYPE_SIG);
for (i = 0; i < 16; ++i) {
*cpo++ = itoa16[(*cpi)>>4];
*cpo++ = itoa16[*cpi&0xF];
++cpi;
}
*cpo = 0;
return Buf;
}
static char *source_20_hex(char *source, void *binary)
{
static char Buf[256];
unsigned char *cpi= (unsigned char*)(binary);
char *cpo = Buf;
unsigned int i;
cpo += sprintf(Buf, "%s", curdat.dynamic_WHICH_TYPE_SIG);
for (i = 0; i < 20; ++i) {
*cpo++ = itoa16[(*cpi)>>4];
*cpo++ = itoa16[*cpi&0xF];
++cpi;
}
*cpo = 0;
return Buf;
}
static char *source_28_hex(char *source, void *binary)
{
static char Buf[256];
unsigned char *cpi= (unsigned char*)(binary);
char *cpo = Buf;
unsigned int i;
cpo += sprintf(Buf, "%s", curdat.dynamic_WHICH_TYPE_SIG);
for (i = 0; i < 28; ++i) {
*cpo++ = itoa16[(*cpi)>>4];
*cpo++ = itoa16[*cpi&0xF];
++cpi;
}
*cpo = 0;
return Buf;
}
static char *source_32_hex(char *source, void *binary)
{
static char Buf[256];
unsigned char *cpi= (unsigned char*)(binary);
char *cpo = Buf;
unsigned int i;
cpo += sprintf(Buf, "%s", curdat.dynamic_WHICH_TYPE_SIG);
for (i = 0; i < 32; ++i) {
*cpo++ = itoa16[(*cpi)>>4];
*cpo++ = itoa16[*cpi&0xF];
++cpi;
}
*cpo = 0;
return Buf;
}
static char *source_40_hex(char *source, void *binary)
{
static char Buf[256];
unsigned char *cpi= (unsigned char*)(binary);
char *cpo = Buf;
unsigned int i;
cpo += sprintf(Buf, "%s", curdat.dynamic_WHICH_TYPE_SIG);
for (i = 0; i < 40; ++i) {
*cpo++ = itoa16[(*cpi)>>4];
*cpo++ = itoa16[*cpi&0xF];
++cpi;
}
*cpo = 0;
return Buf;
}
static char *source_48_hex(char *source, void *binary)
{
static char Buf[256];
unsigned char *cpi= (unsigned char*)(binary);
char *cpo = Buf;
unsigned int i;
cpo += sprintf(Buf, "%s", curdat.dynamic_WHICH_TYPE_SIG);
for (i = 0; i < 48; ++i) {
*cpo++ = itoa16[(*cpi)>>4];
*cpo++ = itoa16[*cpi&0xF];
++cpi;
}
*cpo = 0;
return Buf;
}
static char *source_64_hex(char *source, void *binary)
{
static char Buf[256];
unsigned char *cpi= (unsigned char*)(binary);
char *cpo = Buf;
unsigned int i;
cpo += sprintf(Buf, "%s", curdat.dynamic_WHICH_TYPE_SIG);
for (i = 0; i < 64; ++i) {
*cpo++ = itoa16[(*cpi)>>4];
*cpo++ = itoa16[*cpi&0xF];
++cpi;
}
*cpo = 0;
return Buf;
}
/*********************************************************************************
* Gets the binary value from a base-64 hash
*********************************************************************************/
static void * binary_b64m(char *ciphertext)
{
unsigned int i;
static unsigned char *b;
char *pos;
if (!b) b = mem_alloc_tiny(64+3, MEM_ALIGN_WORD);
pos = ciphertext;
if (!strncmp(pos, "$dynamic_", 9)) {
pos += 9;
while (*pos++ != '$')
;
}
i = base64_valid_length(pos, e_b64_mime, 0, 0);
base64_convert(pos, e_b64_mime, i, b, e_b64_raw, 64+3, 0, 0);
//printf("\nciphertext=%s\n", ciphertext);
//dump_stuff_msg("binary", b, 16);
return b;
}
static void * binary_b64(char *ciphertext)
{
unsigned int i;
static unsigned char *b;
char *pos;
if (!b) b = mem_alloc_tiny(64+3, MEM_ALIGN_WORD);
pos = ciphertext;
if (!strncmp(pos, "$dynamic_", 9)) {
pos += 9;
while (*pos++ != '$')
;
}
i = base64_valid_length(pos, e_b64_crypt, 0, 0);
base64_convert(pos, e_b64_cryptBS, i, b, e_b64_raw, 64+3, 0, 0);
//printf("\nciphertext=%s\n", ciphertext);
//dump_stuff_msg("binary", b, 16);
return b;
}
static void * binary_b64b(char *ciphertext)
{
unsigned int i;
static unsigned char *b;
char *pos;
if (!b) b = mem_alloc_tiny(64+3, MEM_ALIGN_WORD);
pos = ciphertext;
if (!strncmp(pos, "$dynamic_", 9)) {
pos += 9;
while (*pos++ != '$')
;
}
i = base64_valid_length(pos, e_b64_crypt, 0, 0);
base64_convert(pos, e_b64_crypt, i, b, e_b64_raw, 64+3, 0, 0);
//printf("\nciphertext=%s\n", ciphertext);
//dump_stuff_msg("binary", b, 16);
return b;
}
#define TO_BINARY(b1, b2, b3) \
value = \
(MD5_word)atoi64[ARCH_INDEX(pos[0])] | \
((MD5_word)atoi64[ARCH_INDEX(pos[1])] << 6) | \
((MD5_word)atoi64[ARCH_INDEX(pos[2])] << 12) | \
((MD5_word)atoi64[ARCH_INDEX(pos[3])] << 18); \
pos += 4; \
b[b1] = value >> 16; \
b[b2] = value >> 8; \
b[b3] = value;
static void * binary_b64a(char *ciphertext)
{
static unsigned char *b;
char *pos;
MD5_word value;
if (!b) b = mem_alloc_tiny(16, MEM_ALIGN_WORD);
pos = ciphertext;
if (!strncmp(pos, "$dynamic_", 9)) {
pos += 9;
while (*pos++ != '$')
;
}
TO_BINARY(0, 6, 12);
TO_BINARY(1, 7, 13);
TO_BINARY(2, 8, 14);
TO_BINARY(3, 9, 15);
TO_BINARY(4, 10, 5);
b[11] =
(MD5_word)atoi64[ARCH_INDEX(pos[0])] |
((MD5_word)atoi64[ARCH_INDEX(pos[1])] << 6);
MD5_swap((MD5_word*)b,(MD5_word*)b, 4);
return b;
}
/*********************************************************************************
* Gets the binary value from a base-64 hash (such as cisco PIX)
*********************************************************************************/
static void * binary_b64_4x6(char *ciphertext)
{
static ARCH_WORD_32 *b;
unsigned int i;
char *pos;
if (!b) b = mem_alloc_tiny(16, MEM_ALIGN_WORD);
pos = ciphertext;
if (!strncmp(pos, "$dynamic_", 9)) {
pos += 9;
while (*pos++ != '$')
;
}
for(i = 0; i < 4; i++) {
b[i] =
atoi64[ARCH_INDEX(pos[i*4 + 0])] +
(atoi64[ARCH_INDEX(pos[i*4 + 1])] << 6) +
(atoi64[ARCH_INDEX(pos[i*4 + 2])] << 12) +
(atoi64[ARCH_INDEX(pos[i*4 + 3])] << 18);
}
MD5_swap(b,b, 4);
return (void *)b;
}
/*********************************************************************************
* Here is the main mdg_generic fmt_main. NOTE in its default settings, it is
* ready to handle base-16 hashes.
*********************************************************************************/
static struct fmt_main fmt_Dynamic =
{
{
FORMAT_LABEL,
FORMAT_NAME,
#ifdef SIMD_COEF_32
ALGORITHM_NAME,
#else
ALGORITHM_NAME_X86,
#endif
BENCHMARK_COMMENT,
BENCHMARK_LENGTH,
0,
#ifdef SIMD_COEF_32
PLAINTEXT_LENGTH,
#else
PLAINTEXT_LENGTH_X86,
#endif
BINARY_SIZE,
BINARY_ALIGN,
SALT_SIZE,
SALT_ALIGN,
#ifdef SIMD_COEF_32
MIN_KEYS_PER_CRYPT,
MAX_KEYS_PER_CRYPT,
#else
MIN_KEYS_PER_CRYPT_X86,
MAX_KEYS_PER_CRYPT_X86,
#endif
#ifdef _OPENMP
FMT_OMP | FMT_OMP_BAD |
#endif
FMT_CASE | FMT_8_BIT,
{ NULL },
{ NULL },
dynamic_tests
}, {
init,
done,
fmt_default_reset,
prepare,
valid,
split,
get_binary,
get_salt,
{ NULL },
fmt_default_source,
{
fmt_default_binary_hash_0,
fmt_default_binary_hash_1,
fmt_default_binary_hash_2,
fmt_default_binary_hash_3,
fmt_default_binary_hash_4,
fmt_default_binary_hash_5,
fmt_default_binary_hash_6
},
salt_hash,
salt_compare,
set_salt,
set_key,
get_key,
clear_keys,
crypt_all,
{
get_hash_0,
get_hash_1,
get_hash_2,
get_hash_3,
get_hash_4,
get_hash_5,
get_hash_6
},
cmp_all,
cmp_one,
cmp_exact
}
};
/**************************************************************
**************************************************************
**************************************************************
**************************************************************
* These are the md5 'primitive' functions that are used by
* the build-in expressions, and by the expression generator
* They load passwords, salts, user ids, do crypts, convert
* crypts into base-16, etc. They are pretty encompassing,
* and have been found to be able to do most anything with
* a standard 'base-16' md5 hash, salted or unsalted that
* fits a 'simple' php style expression.
**************************************************************
**************************************************************
**************************************************************
*************************************************************/
static void Dynamic_Load_itoa16_w2()
{
char buf[3];
unsigned int i;
for (i = 0; i < 256; ++i)
{
sprintf(buf, "%X%X", i>>4, i&0xF);
memcpy(&(itoa16_w2_u[i]), buf, 2);
sprintf(buf, "%x%x", i>>4, i&0xF);
memcpy(&(itoa16_w2_l[i]), buf, 2);
}
}
#ifdef SIMD_COEF_32
/**************************************************************
**************************************************************
* Here are some 'helpers' to our helpers, when it comes to
* loading data into the mmx/sse buffers. We have several
* of these common helper functions, and use them in 'most'
* of the helper primitives, instead of having the same
* code being inlined in each of them.
**************************************************************
*************************************************************/
static void __SSE_append_output_base16_to_input(ARCH_WORD_32 *IPBdw, unsigned char *CRY, unsigned int idx_mod)
{
// #3
// 5955K (core2, $dynamic_2$)
// 1565K (core2, $dynamic_1006$)
// 3381K (ath64, $dynamic_2$)
// 824.7k (ath64, $dynamic_1006$)
#undef inc
#define inc ((SIMD_COEF_32-1) * 2)
unsigned short *IPBw = (unsigned short*)IPBdw;
IPBw += (idx_mod<<1);
CRY += (idx_mod<<2);
*IPBw++ = itoa16_w2[*CRY++];
*IPBw++ = itoa16_w2[*CRY++];
IPBw += inc;
*IPBw++ = itoa16_w2[*CRY++];
*IPBw++ = itoa16_w2[*CRY++];
IPBw += inc;
CRY += (inc<<1);
*IPBw++ = itoa16_w2[*CRY++];
*IPBw++ = itoa16_w2[*CRY++];
IPBw += inc;
*IPBw++ = itoa16_w2[*CRY++];
*IPBw++ = itoa16_w2[*CRY++];
IPBw += inc;
CRY += (inc<<1);
*IPBw++ = itoa16_w2[*CRY++];
*IPBw++ = itoa16_w2[*CRY++];
IPBw += inc;
*IPBw++ = itoa16_w2[*CRY++];
*IPBw++ = itoa16_w2[*CRY++];
IPBw += inc;
CRY += (inc<<1);
*IPBw++ = itoa16_w2[*CRY++];
*IPBw++ = itoa16_w2[*CRY++];
IPBw += inc;
*IPBw++ = itoa16_w2[*CRY++];
*IPBw++ = itoa16_w2[*CRY++];
IPBw += inc;
*IPBw = 0x80;
#undef inc
}
static void __SSE_overwrite_output_base16_to_input(ARCH_WORD_32 *IPBdw, unsigned char *CRY, unsigned int idx_mod)
{
// #3
// 5955K (core2, $dynamic_2$)
// 1565K (core2, $dynamic_1006$)
// 3381K (ath64, $dynamic_2$)
// 824.7k (ath64, $dynamic_1006$)
#undef inc
#define inc ((SIMD_COEF_32-1) * 2)
unsigned short *IPBw = (unsigned short *)IPBdw;
IPBw += (idx_mod<<1);
CRY += (idx_mod<<2);
*IPBw++ = itoa16_w2[*CRY++];
*IPBw++ = itoa16_w2[*CRY++];
IPBw += inc;
*IPBw++ = itoa16_w2[*CRY++];
*IPBw++ = itoa16_w2[*CRY++];
IPBw += inc;
CRY += (inc<<1);
*IPBw++ = itoa16_w2[*CRY++];
*IPBw++ = itoa16_w2[*CRY++];
IPBw += inc;
*IPBw++ = itoa16_w2[*CRY++];
*IPBw++ = itoa16_w2[*CRY++];
IPBw += inc;
CRY += (inc<<1);
*IPBw++ = itoa16_w2[*CRY++];
*IPBw++ = itoa16_w2[*CRY++];
IPBw += inc;
*IPBw++ = itoa16_w2[*CRY++];
*IPBw++ = itoa16_w2[*CRY++];
IPBw += inc;
CRY += (inc<<1);
*IPBw++ = itoa16_w2[*CRY++];
*IPBw++ = itoa16_w2[*CRY++];
IPBw += inc;
*IPBw++ = itoa16_w2[*CRY++];
*IPBw++ = itoa16_w2[*CRY++];
IPBw += inc;
#undef inc
}
static void __SSE_append_output_base16_to_input_semi_aligned_2(unsigned int ip, ARCH_WORD_32 *IPBdw, unsigned char *CRY, unsigned int idx_mod)
{
// #1
// 9586k/4740k (core2, $dynamic_9$)
// 5113k/4382k (core2,$dynamic_10$)
// (ath64, $dynamic_9$)
// (ath64, $dynamic_10$)
# define inc SIMD_COEF_32
# define incCRY ((SIMD_COEF_32 - 1) * 4)
// Ok, here we are 1/2 off. We are starting in the 'middle' of a DWORD (and end
// in the middle of the last one).
// start our pointers out at the right 32 bit offset into the first MMX/SSE buffer
IPBdw += idx_mod;
IPBdw += (ip>>2)*SIMD_COEF_32;
CRY += (idx_mod<<2);
// first byte handled here.
*IPBdw &= 0xFFFF;
*IPBdw |= (((ARCH_WORD_32)(itoa16_w2[*CRY++]))<<16);
IPBdw += inc;
*IPBdw = (itoa16_w2[*CRY++]);
*IPBdw |= (((ARCH_WORD_32)(itoa16_w2[*CRY++]))<<16);
IPBdw += inc;
*IPBdw = (itoa16_w2[*CRY++]);
CRY += incCRY;
*IPBdw |= (((ARCH_WORD_32)(itoa16_w2[*CRY++]))<<16);
IPBdw += inc;
*IPBdw = (itoa16_w2[*CRY++]);
*IPBdw |= (((ARCH_WORD_32)(itoa16_w2[*CRY++]))<<16);
IPBdw += inc;
*IPBdw = (itoa16_w2[*CRY++]);
CRY += incCRY;
*IPBdw |= (((ARCH_WORD_32)(itoa16_w2[*CRY++]))<<16);
IPBdw += inc;
*IPBdw = (itoa16_w2[*CRY++]);
*IPBdw |= (((ARCH_WORD_32)(itoa16_w2[*CRY++]))<<16);
IPBdw += inc;
*IPBdw = (itoa16_w2[*CRY++]);
CRY += incCRY;
*IPBdw |= (((ARCH_WORD_32)(itoa16_w2[*CRY++]))<<16);
IPBdw += inc;
*IPBdw = (itoa16_w2[*CRY++]);
*IPBdw |= (((ARCH_WORD_32)(itoa16_w2[*CRY++]))<<16);
IPBdw += inc;
*IPBdw = (itoa16_w2[*CRY++]);
// Add the 0x80 at the proper location (offset 0x21)
*IPBdw |= 0x800000;
#undef inc
#undef incCRY
}
static void __SSE_append_output_base16_to_input_semi_aligned_0(unsigned int ip, ARCH_WORD_32 *IPBdw, unsigned char *CRY, unsigned int idx_mod)
{
// #2
// 6083k (core2, $dynamic_2$)
// 1590K (core2, $dynamic_1006$)
// 3537K (ath64, $dynamic_2$)
// 890.3K (ath64, $dynamic_1006$)
#undef inc
#define inc SIMD_COEF_32
#define incCRY (4*SIMD_COEF_32-2)
// start our pointers out at the right 32 bit offset into the first MMX/SSE buffer
IPBdw += idx_mod;
IPBdw += (ip>>2)*SIMD_COEF_32;
CRY += (idx_mod<<2);
*IPBdw = (((ARCH_WORD_32)(itoa16_w2[*(CRY+1)]))<<16)|(itoa16_w2[*CRY]);
IPBdw += inc;
CRY += 2;
*IPBdw = (((ARCH_WORD_32)(itoa16_w2[*(CRY+1)]))<<16)|(itoa16_w2[*CRY]);
IPBdw += inc;
// CRY += (inc*3)+2;
CRY += incCRY;
*IPBdw = (((ARCH_WORD_32)(itoa16_w2[*(CRY+1)]))<<16)|(itoa16_w2[*CRY]);
IPBdw += inc;
CRY += 2;
*IPBdw = (((ARCH_WORD_32)(itoa16_w2[*(CRY+1)]))<<16)|(itoa16_w2[*CRY]);
IPBdw += inc;
// CRY += (inc*3)+2;
CRY += incCRY;
*IPBdw = (((ARCH_WORD_32)(itoa16_w2[*(CRY+1)]))<<16)|(itoa16_w2[*CRY]);
IPBdw += inc;
CRY += 2;
*IPBdw = (((ARCH_WORD_32)(itoa16_w2[*(CRY+1)]))<<16)|(itoa16_w2[*CRY]);
IPBdw += inc;
// CRY += (inc*3)+2;
CRY += incCRY;
*IPBdw = (((ARCH_WORD_32)(itoa16_w2[*(CRY+1)]))<<16)|(itoa16_w2[*CRY]);
IPBdw += inc;
CRY += 2;
*IPBdw = (((ARCH_WORD_32)(itoa16_w2[*(CRY+1)]))<<16)|(itoa16_w2[*CRY]);
// Add the 0x80 at the proper location (offset 0x21)
IPBdw += inc;
*IPBdw = 0x80;
#undef inc
#undef incCRY
}
static void __SSE_append_string_to_input_unicode(unsigned char *IPB, unsigned int idx_mod, unsigned char *cp, unsigned int len, unsigned int bf_ptr, unsigned int bUpdate0x80)
{
unsigned char *cpO;
#if ARCH_LITTLE_ENDIAN
// if big-endian, we gain nothing from this function (since we would have to byte swap)
if (len>1&&!(bf_ptr&1))
{
unsigned int w32_cnt;
if(bf_ptr&2) {
cpO = &IPB[GETPOS(bf_ptr, idx_mod)];
bf_ptr += 2;
*cpO = *cp++;
cpO[1] = 0;
--len;
}
w32_cnt = len>>1;
if (w32_cnt)
{
ARCH_WORD_32 *wpO;
wpO = (ARCH_WORD_32*)&IPB[GETPOS(bf_ptr, idx_mod)];
len -= (w32_cnt<<1);
bf_ptr += (w32_cnt<<2);
do
{
ARCH_WORD_32 x = 0;
x = cp[1];
x <<= 16;
x += cp[0];
*wpO = x;
cp += 2;
wpO += SIMD_COEF_32;
}
while (--w32_cnt);
}
}
#endif
cpO = &IPB[GETPOS(bf_ptr, idx_mod)];
while (len--)
{
*cpO++ = *cp++;
if ( ((++bf_ptr)&3) == 0)
cpO += ((SIMD_COEF_32-1)*4);
*cpO++ = 0;
if ( ((++bf_ptr)&3) == 0)
cpO += ((SIMD_COEF_32-1)*4);
}
if (bUpdate0x80)
*cpO = 0x80;
}
static void __SSE_append_string_to_input(unsigned char *IPB, unsigned int idx_mod, unsigned char *cp, unsigned int len, unsigned int bf_ptr, unsigned int bUpdate0x80)
{
unsigned char *cpO;
// if our insertion point is on an 'even' DWORD, then we use DWORD * copying, as long as we can
// This provides quite a nice speedup.
#if ARCH_LITTLE_ENDIAN
// if big-endian, we gain nothing from this function (since we would have to byte swap)
if (len>3&&(bf_ptr&3)) {
cpO = &IPB[GETPOS(bf_ptr, idx_mod)];
while (len--)
{
*cpO++ = *cp++;
if ( ((++bf_ptr)&3) == 0) {
if (!len) {
if (bUpdate0x80)
*cpO = 0x80;
return;
}
break;
}
}
}
if (len>3&&!(bf_ptr&3))
{
unsigned int w32_cnt = len>>2;
if (w32_cnt)
{
ARCH_WORD_32 *wpO;
wpO = (ARCH_WORD_32*)&IPB[GETPOS(bf_ptr, idx_mod)];
len -= (w32_cnt<<2);
bf_ptr += (w32_cnt<<2);
do
{
*wpO = *((ARCH_WORD_32*)cp);
cp += 4;
wpO += SIMD_COEF_32;
}
while (--w32_cnt);
}
if (!len) {
if (bUpdate0x80)
IPB[GETPOS(bf_ptr, idx_mod)] = 0x80;
return;
}
}
#endif
cpO = &IPB[GETPOS(bf_ptr, idx_mod)];
while (len--)
{
*cpO++ = *cp++;
if ( ((++bf_ptr)&3) == 0)
cpO += ((SIMD_COEF_32-1)*4);
}
if (bUpdate0x80)
*cpO = 0x80;
}
#endif // #ifdef SIMD_COEF_32 from way above.
static inline void __append_string(DYNA_OMP_PARAMSm unsigned char *Str, unsigned int len)
{
unsigned int j;
unsigned int til;
int utf16 = md5_unicode_convert_get(tid);
#ifdef _OPENMP
til = last;
j = first;
#else
j = 0;
til = m_count;
#endif
#ifdef SIMD_COEF_32
if (dynamic_use_sse==1) {
if (!utf16) {
for (; j < til; ++j) {
unsigned int idx = j/SIMD_COEF_32;
unsigned int idx_mod = j&(SIMD_COEF_32-1);
unsigned int bf_ptr = total_len[idx][idx_mod];
total_len[idx][idx_mod] += len;
__SSE_append_string_to_input(input_buf[idx].c,idx_mod,Str,len,bf_ptr,1);
}
} else {
if (utf16 == 2 || (options.target_enc != ASCII && options.target_enc != ISO_8859_1)) {
UTF16 utf16Str[27+1]; // 27 chars is 'max' that fits in SSE without overflow, so that is where we limit it at now
int outlen;
if (utf16 == 1)
outlen = enc_to_utf16(utf16Str, 27, Str, len) * sizeof(UTF16);
else
outlen = enc_to_utf16_be(utf16Str, 27, Str, len) * sizeof(UTF16);
if (outlen < 0)
outlen = strlen16(utf16Str) * sizeof(UTF16);
for (; j < til; ++j) {
unsigned int idx = j/SIMD_COEF_32;
unsigned int idx_mod = j&(SIMD_COEF_32-1);
unsigned int bf_ptr = total_len[idx][idx_mod];
total_len[idx][idx_mod] += outlen;
// note we use the 'non' unicode variant, since we have already computed the unicode, and length properly
__SSE_append_string_to_input(input_buf[idx].c,idx_mod,(unsigned char*)utf16Str,outlen,bf_ptr,1);
}
} else {
for (; j < til; ++j) {
unsigned int idx = j/SIMD_COEF_32;
unsigned int idx_mod = j&(SIMD_COEF_32-1);
unsigned int bf_ptr = total_len[idx][idx_mod];
total_len[idx][idx_mod] += len << 1;
__SSE_append_string_to_input_unicode(input_buf[idx].c,idx_mod,Str,len,bf_ptr,1);
}
}
}
return;
}
#endif
if (utf16) {
if (utf16 == 2 || (options.target_enc != ASCII && options.target_enc != ISO_8859_1)) {
UTF16 utf16Str[ENCODED_EFFECTIVE_MAX_LENGTH + 1];
int outlen;
if (utf16 == 1)
outlen = enc_to_utf16(utf16Str, ENCODED_EFFECTIVE_MAX_LENGTH, Str, len) * sizeof(UTF16);
else
outlen = enc_to_utf16_be(utf16Str, ENCODED_EFFECTIVE_MAX_LENGTH, Str, len) * sizeof(UTF16);
if (outlen < 0)
outlen = strlen16(utf16Str) * sizeof(UTF16);
for (; j < til; ++j) {
unsigned int z;
unsigned char *cp;
unsigned char *cpi = (unsigned char*)utf16Str;
if (total_len_X86[j] + outlen <= MAX_BUFFER_OFFSET_AVOIDING_OVERWRITE) {
#if MD5_X2
if (j&1)
cp = &(input_buf_X86[j>>MD5_X2].x2.B2[total_len_X86[j]]);
else
#endif
cp = &(input_buf_X86[j>>MD5_X2].x1.B[total_len_X86[j]]);
for (z = 0; z < outlen; ++z) {
*cp++ = *cpi++;
}
total_len_X86[j] += outlen;
}
}
} else {
for (; j < til; ++j) {
unsigned int z;
unsigned char *cp;
unsigned char *cpi = Str;
if (total_len_X86[j] + (len<<1) <= MAX_BUFFER_OFFSET_AVOIDING_OVERWRITE) {
#if MD5_X2
if (j&1)
cp = &(input_buf_X86[j>>MD5_X2].x2.B2[total_len_X86[j]]);
else
#endif
cp = &(input_buf_X86[j>>MD5_X2].x1.B[total_len_X86[j]]);
for (z = 0; z < len; ++z) {
*cp++ = *cpi++;
*cp++ = 0;
}
total_len_X86[j] += (len<<1);
}
}
}
} else {
for (; j < til; ++j) {
#if MD5_X2
if (j&1)
memcpy(&(input_buf_X86[j>>MD5_X2].x2.b2[total_len_X86[j]]), Str, len);
else
#endif
memcpy(&(input_buf_X86[j>>MD5_X2].x1.b[total_len_X86[j]]), Str, len);
total_len_X86[j] += len;
}
}
}
static inline void __append2_string(DYNA_OMP_PARAMSm unsigned char *Str, unsigned int len)
{
unsigned int j;
unsigned int til;
int utf16 = md5_unicode_convert_get(tid);
#ifdef _OPENMP
til = last;
j = first;
#else
j = 0;
til = m_count;
#endif
#ifdef SIMD_COEF_32
if (dynamic_use_sse==1) {
if (!utf16) {
for (; j < til; ++j) {
unsigned int idx = j/SIMD_COEF_32;
unsigned int idx_mod = j&(SIMD_COEF_32-1);
unsigned int bf_ptr = total_len2[idx][idx_mod];
total_len2[idx][idx_mod] += len;
__SSE_append_string_to_input(input_buf2[idx].c,idx_mod,Str,len,bf_ptr,1);
}
} else {
if (options.target_enc != ASCII && options.target_enc != ISO_8859_1) {
UTF16 utf16Str[27+1]; // 27 chars is 'max' that fits in SSE without overflow, so that is where we limit it at now
int outlen;
if (utf16 == 1)
outlen = enc_to_utf16(utf16Str, 27, Str, len) * sizeof(UTF16);
else
outlen = enc_to_utf16_be(utf16Str, 27, Str, len) * sizeof(UTF16);
if (outlen < 0)
outlen = strlen16(utf16Str) * sizeof(UTF16);
for (; j < til; ++j) {
unsigned int idx = j/SIMD_COEF_32;
unsigned int idx_mod = j&(SIMD_COEF_32-1);
unsigned int bf_ptr = total_len2[idx][idx_mod];
total_len2[idx][idx_mod] += outlen;
// note we use the 'non' unicode variant of __SSE_append_string_to_input(), since it's already unicode, and length properly
__SSE_append_string_to_input(input_buf2[idx].c,idx_mod,(unsigned char*)utf16Str,outlen,bf_ptr,1);
}
} else {
for (; j < til; ++j) {
unsigned int idx = j/SIMD_COEF_32;
unsigned int idx_mod = j&(SIMD_COEF_32-1);
unsigned int bf_ptr = total_len2[idx][idx_mod];
total_len2[idx][idx_mod] += len << 1;
__SSE_append_string_to_input_unicode(input_buf2[idx].c,idx_mod,Str,len,bf_ptr,1);
}
}
}
return;
}
#endif
if (utf16) {
if (utf16 == 2 || (options.target_enc != ASCII && options.target_enc != ISO_8859_1)) {
UTF16 utf16Str[ENCODED_EFFECTIVE_MAX_LENGTH + 1];
int outlen;
if (utf16 == 1)
outlen = enc_to_utf16(utf16Str, ENCODED_EFFECTIVE_MAX_LENGTH, Str, len) * sizeof(UTF16);
else
outlen = enc_to_utf16_be(utf16Str, ENCODED_EFFECTIVE_MAX_LENGTH, Str, len) * sizeof(UTF16);
if (outlen < 0)
outlen = strlen16(utf16Str) * sizeof(UTF16);
for (; j < til; ++j) {
unsigned int z;
unsigned char *cp;
unsigned char *cpi = (unsigned char*)utf16Str;
if (total_len2_X86[j] + outlen <= MAX_BUFFER_OFFSET_AVOIDING_OVERWRITE) {
#if MD5_X2
if (j&1)
cp = &(input_buf2_X86[j>>MD5_X2].x2.B2[total_len2_X86[j]]);
else
#endif
cp = &(input_buf2_X86[j>>MD5_X2].x1.B[total_len2_X86[j]]);
for (z = 0; z < outlen; ++z) {
*cp++ = *cpi++;
}
total_len2_X86[j] += outlen;
}
}
} else {
for (; j < til; ++j) {
unsigned int z;
unsigned char *cp;
unsigned char *cpi = Str;
if (total_len2_X86[j] + (len<<1) <= MAX_BUFFER_OFFSET_AVOIDING_OVERWRITE) {
#if MD5_X2
if (j&1)
cp = &(input_buf2_X86[j>>MD5_X2].x2.B2[total_len2_X86[j]]);
else
#endif
cp = &(input_buf2_X86[j>>MD5_X2].x1.B[total_len2_X86[j]]);
for (z = 0; z < len; ++z) {
*cp++ = *cpi++;
*cp++ = 0;
}
total_len2_X86[j] += (len<<1);
}
}
}
} else {
for (; j < til; ++j) {
#if MD5_X2
if (j&1)
memcpy(&(input_buf2_X86[j>>MD5_X2].x2.b2[total_len2_X86[j]]), Str, len);
else
#endif
memcpy(&(input_buf2_X86[j>>MD5_X2].x1.b[total_len2_X86[j]]), Str, len);
total_len2_X86[j] += len;
}
}
}
void DynamicFunc__setmode_unicodeBE(DYNA_OMP_PARAMS) // DYNA_OMP_PARAMS not used. We use omp_thread_num() instead.
{
md5_unicode_convert_set(2,tid);
}
void DynamicFunc__setmode_unicode(DYNA_OMP_PARAMS) // DYNA_OMP_PARAMS not used. We use omp_thread_num() instead.
{
md5_unicode_convert_set(1,tid);
}
void DynamicFunc__setmode_normal (DYNA_OMP_PARAMS) // DYNA_OMP_PARAMS not used. We use omp_thread_num() instead.
{
md5_unicode_convert_set(0,tid);
}
/**************************************************************
* DYNAMIC primitive helper function
* Clears the input variable, and input 'lengths'
*************************************************************/
void DynamicFunc__clean_input(DYNA_OMP_PARAMS)
{
#ifndef _OPENMP
__nonMP_DynamicFunc__clean_input();
#else
unsigned int i=0;
#ifdef SIMD_COEF_32
if (dynamic_use_sse==1) {
unsigned int x = first / SIMD_COEF_32;
unsigned int y = (last+SIMD_COEF_32-1) / SIMD_COEF_32;
while (x < y) {
memset(input_buf[x].c, 0, sizeof(input_buf[0]));
memset(total_len[x], 0, SIMD_COEF_32 * sizeof(total_len[0][0]));
++x;
}
return;
}
#endif
for (i = first; i < last; ++i) {
#if MD5_X2
if (i&1)
memset(input_buf_X86[i>>MD5_X2].x2.b2, 0, COMPUTE_EX_LEN(total_len_X86[i]));
else
#endif
memset(input_buf_X86[i>>MD5_X2].x1.b, 0, COMPUTE_EX_LEN(total_len_X86[i]));
total_len_X86[i] = 0;
}
#endif
}
void DynamicFunc__clean_input2(DYNA_OMP_PARAMS)
{
#ifndef _OPENMP
__nonMP_DynamicFunc__clean_input2();
#else
unsigned int i=0;
#ifdef SIMD_COEF_32
if (dynamic_use_sse==1) {
unsigned int x = first / SIMD_COEF_32;
unsigned int y = (last+SIMD_COEF_32-1) / SIMD_COEF_32;
while (x < y) {
memset(input_buf2[x].c, 0, sizeof(input_buf2[0]));
memset(total_len2[x], 0, SIMD_COEF_32 * sizeof(total_len2[0][0]));
++x;
}
return;
}
#endif
for (i = first; i < last; ++i) {
#if MD5_X2
if (i&1)
memset(input_buf2_X86[i>>MD5_X2].x2.b2, 0, COMPUTE_EX_LEN(total_len2_X86[i]));
else
#endif
memset(input_buf2_X86[i>>MD5_X2].x1.b, 0, COMPUTE_EX_LEN(total_len2_X86[i]));
total_len2_X86[i] = 0;
}
#endif
}
void DynamicFunc__clean_input_full(DYNA_OMP_PARAMS)
{
#ifndef _OPENMP
__nonMP_DynamicFunc__clean_input_full();
#else
unsigned int i;
#ifdef SIMD_COEF_32
unsigned int x = first / SIMD_COEF_32;
unsigned int y = (last+SIMD_COEF_32-1) / SIMD_COEF_32;
while (x < y) {
memset(input_buf[x].c, 0, sizeof(input_buf[0]));
memset(total_len[x], 0, SIMD_COEF_32 * sizeof(total_len[0][0]));
++x;
}
#endif
for (i = first; i < last; ++i) {
#if MD5_X2
if (i&1)
memset(input_buf_X86[i>>MD5_X2].x2.b2, 0, COMPUTE_EX_LEN(total_len_X86[i]));
else
#endif
memset(input_buf_X86[i>>MD5_X2].x1.b, 0, COMPUTE_EX_LEN(total_len_X86[i]));
total_len_X86[i] = 0;
}
#endif
}
void DynamicFunc__clean_input2_full(DYNA_OMP_PARAMS)
{
#ifndef _OPENMP
__nonMP_DynamicFunc__clean_input2_full();
#else
unsigned int i;
#ifdef SIMD_COEF_32
unsigned int x = first / SIMD_COEF_32;
unsigned int y = (last+SIMD_COEF_32-1) / SIMD_COEF_32;
while (x < y) {
memset(input_buf2[x].c, 0, sizeof(input_buf2[0]));
memset(total_len2[x], 0, SIMD_COEF_32 * sizeof(total_len2[0][0]));
++x;
}
#endif
for (i = first; i < last; ++i) {
#if MD5_X2
if (i&1)
memset(input_buf2_X86[i>>MD5_X2].x2.b2, 0, COMPUTE_EX_LEN(total_len2_X86[i]));
else
#endif
memset(input_buf2_X86[i>>MD5_X2].x1.b, 0, COMPUTE_EX_LEN(total_len2_X86[i]));
total_len2_X86[i] = 0;
}
#endif
}
void DynamicFunc__clean_input_kwik(DYNA_OMP_PARAMS)
{
#ifndef _OPENMP
__nonMP_DynamicFunc__clean_input_kwik();
#else
#ifdef SIMD_COEF_32
unsigned int i;
if (dynamic_use_sse==1) {
unsigned int x = first / SIMD_COEF_32;
unsigned int y = (last+SIMD_COEF_32-1) / SIMD_COEF_32;
while (x < y)
memset(total_len[x++], 0, SIMD_COEF_32 * sizeof(total_len[0][0]));
return;
}
#else
unsigned int i;
#endif
for (i = first; i < last; ++i) {
#if !ARCH_LITTLE_ENDIAN
#if MD5_X2
if (i&1)
memset(input_buf_X86[i>>MD5_X2].x2.b2, 0, total_len_X86[i]+5);
else
#endif
memset(input_buf_X86[i>>MD5_X2].x1.b, 0, total_len_X86[i]+5);
#endif
total_len_X86[i] = 0;
}
#endif
}
void DynamicFunc__clean_input2_kwik(DYNA_OMP_PARAMS)
{
#ifndef _OPENMP
__nonMP_DynamicFunc__clean_input2_kwik();
#else
#ifdef SIMD_COEF_32
unsigned int i;
if (dynamic_use_sse==1) {
unsigned int x = first / SIMD_COEF_32;
unsigned int y = (last+SIMD_COEF_32-1) / SIMD_COEF_32;
while (x < y)
memset(total_len2[x++], 0, SIMD_COEF_32 * sizeof(total_len2[0][0]));
return;
}
#else
unsigned int i;
#endif
for (i = first; i < last; ++i) {
#if !ARCH_LITTLE_ENDIAN
#if MD5_X2
if (i&1)
memset(input_buf2_X86[i>>MD5_X2].x2.b2, 0, total_len2_X86[i]+5);
else
#endif
memset(input_buf2_X86[i>>MD5_X2].x1.b, 0, total_len2_X86[i]+5);
#endif
total_len2_X86[i] = 0;
}
#endif
}
/**************************************************************
* DYNAMIC primitive helper function
* Appends all keys to the end of the input variables, and
* updates lengths
*************************************************************/
void DynamicFunc__append_keys(DYNA_OMP_PARAMS)
{
unsigned int j;
unsigned int til;
int utf16 = md5_unicode_convert_get(tid);
#ifdef _OPENMP
til = last;
j = first;
#else
j = 0;
til = m_count;
#endif
#ifdef SIMD_COEF_32
if (dynamic_use_sse==1) {
for (; j < til; ++j) {
unsigned int idx = j/SIMD_COEF_32;
unsigned int idx_mod = j&(SIMD_COEF_32-1);
unsigned int bf_ptr = total_len[idx][idx_mod];
if (utf16) {
if (utf16 == 2 || (options.target_enc != ASCII && options.target_enc != ISO_8859_1)) {
UTF16 utf16Str[27+1]; // 27 chars is 'max' that fits in SSE without overflow, so that is where we limit it at now
int outlen;
int maxlen=27;
if (curdat.pSetup->MaxInputLen < maxlen)
maxlen = curdat.pSetup->MaxInputLen;
if (utf16 == 1)
outlen = enc_to_utf16(utf16Str, maxlen, (unsigned char*)saved_key[j], saved_key_len[j]) * sizeof(UTF16);
else
outlen = enc_to_utf16_be(utf16Str, maxlen, (unsigned char*)saved_key[j], saved_key_len[j]) * sizeof(UTF16);
if (outlen <= 0) {
saved_key_len[j] = -outlen / sizeof(UTF16);
if (outlen < 0)
outlen = strlen16(utf16Str) * sizeof(UTF16);
}
total_len[idx][idx_mod] += outlen;
__SSE_append_string_to_input(input_buf[idx].c,idx_mod,(unsigned char*)utf16Str,outlen,bf_ptr,1);
} else {
total_len[idx][idx_mod] += (saved_key_len[j] << 1);
__SSE_append_string_to_input_unicode(input_buf[idx].c,idx_mod,(unsigned char*)saved_key[j],saved_key_len[j],bf_ptr,1);
}
} else {
total_len[idx][idx_mod] += saved_key_len[j];
__SSE_append_string_to_input(input_buf[idx].c,idx_mod,(unsigned char*)saved_key[j],saved_key_len[j],bf_ptr,1);
}
}
return;
}
#endif
if (utf16) {
if (utf16 == 2 || (options.target_enc != ASCII && options.target_enc != ISO_8859_1)) {
for (; j < til; ++j) {
unsigned int z;
unsigned char *cp, *cpi;
UTF16 utf16Str[ENCODED_EFFECTIVE_MAX_LENGTH + 1];
int outlen;
if (utf16 == 1)
outlen = enc_to_utf16(utf16Str, ENCODED_EFFECTIVE_MAX_LENGTH, (unsigned char*)saved_key[j], saved_key_len[j]) * sizeof(UTF16);
else
outlen = enc_to_utf16_be(utf16Str, ENCODED_EFFECTIVE_MAX_LENGTH, (unsigned char*)saved_key[j], saved_key_len[j]) * sizeof(UTF16);
if (outlen <= 0) {
saved_key_len[j] = -outlen / sizeof(UTF16);
if (outlen < 0)
outlen = strlen16(utf16Str) * sizeof(UTF16);
}
// only copy data if it will NOT trash the buffer
if (total_len_X86[j] + outlen <= MAX_BUFFER_OFFSET_AVOIDING_OVERWRITE)
{
#if MD5_X2
if (j&1)
cp = &(input_buf_X86[j>>MD5_X2].x2.B2[total_len_X86[j]]);
else
#endif
cp = &(input_buf_X86[j>>MD5_X2].x1.B[total_len_X86[j]]);
for (cpi = (unsigned char*)utf16Str, z = 0; z < outlen; ++z)
*cp++ = *cpi++;
total_len_X86[j] += outlen;
}
}
} else {
for (; j < til; ++j) {
unsigned int z;
unsigned char *cp, *cpi = (unsigned char*)saved_key[j];
if (total_len_X86[j] + (saved_key_len[j]<<1) <= MAX_BUFFER_OFFSET_AVOIDING_OVERWRITE) {
#if MD5_X2
if (j&1)
cp = &(input_buf_X86[j>>MD5_X2].x2.B2[total_len_X86[j]]);
else
#endif
cp = &(input_buf_X86[j>>MD5_X2].x1.B[total_len_X86[j]]);
for (z = 0; z < saved_key_len[j]; ++z) {
*cp++ = *cpi++;
*cp++ = 0;
}
total_len_X86[j] += (saved_key_len[j]<<1);
}
}
}
} else {
for (; j < til; ++j) {
#if MD5_X2
if (j&1)
memcpy(&(input_buf_X86[j>>MD5_X2].x2.b2[total_len_X86[j]]), saved_key[j], saved_key_len[j]);
else
#endif
memcpy(&(input_buf_X86[j>>MD5_X2].x1.b[total_len_X86[j]]), saved_key[j], saved_key_len[j]);
total_len_X86[j] += saved_key_len[j];
}
}
}
// DynamicFunc__append_keys_pad16
// append the array of keys to the array input1[], padding with nulls to 16 bytes, if input shorter.
// Needed for net-md5 and net-sha1 formats.
void DynamicFunc__append_keys_pad16(DYNA_OMP_PARAMS)
{
unsigned int j;
unsigned int til;
#ifdef _OPENMP
til = last;
j = first;
#else
j = 0;
til = m_count;
#endif
#ifdef SIMD_COEF_32
if (dynamic_use_sse==1) {
for (; j < til; ++j) {
unsigned int idx = j/SIMD_COEF_32;
unsigned int idx_mod = j&(SIMD_COEF_32-1);
unsigned int bf_ptr = total_len[idx][idx_mod];
saved_key[j][saved_key_len[j]] = 0; // so strncpy 'works'
if (saved_key_len[j] < 16) {
char buf[24];
strncpy(buf, saved_key[j], 18);
total_len[idx][idx_mod] += 16;
__SSE_append_string_to_input(input_buf[idx].c,idx_mod,(unsigned char*)buf,16,bf_ptr,1);
} else {
total_len[idx][idx_mod] += saved_key_len[j];
__SSE_append_string_to_input(input_buf[idx].c,idx_mod,(unsigned char*)saved_key[j],saved_key_len[j],bf_ptr,1);
}
}
return;
}
#endif
for (; j < til; ++j) {
saved_key[j][saved_key_len[j]] = 0; // so strncpy 'works'
#if MD5_X2
if (j&1)
strncpy(&(input_buf_X86[j>>MD5_X2].x2.b2[total_len_X86[j]]), saved_key[j], 17);
else
#endif
strncpy(&(input_buf_X86[j>>MD5_X2].x1.b[total_len_X86[j]]), saved_key[j], 17);
total_len_X86[j] += 16;
}
}
void DynamicFunc__append_keys_pad20(DYNA_OMP_PARAMS)
{
unsigned int j;
unsigned int til;
#ifdef _OPENMP
til = last;
j = first;
#else
j = 0;
til = m_count;
#endif
#ifdef SIMD_COEF_32
if (dynamic_use_sse==1) {
for (; j < til; ++j) {
unsigned int idx = j/SIMD_COEF_32;
unsigned int idx_mod = j&(SIMD_COEF_32-1);
unsigned int bf_ptr = total_len[idx][idx_mod];
saved_key[j][saved_key_len[j]] = 0; // so strncpy 'works'
if (saved_key_len[j] < 20) {
char buf[28];
strncpy(buf, saved_key[j], 22);
total_len[idx][idx_mod] += 20;
__SSE_append_string_to_input(input_buf[idx].c,idx_mod,(unsigned char*)buf,20,bf_ptr,1);
} else {
total_len[idx][idx_mod] += saved_key_len[j];
__SSE_append_string_to_input(input_buf[idx].c,idx_mod,(unsigned char*)saved_key[j],saved_key_len[j],bf_ptr,1);
}
}
return;
}
#endif
for (; j < til; ++j) {
saved_key[j][saved_key_len[j]] = 0; // so strncpy 'works'
#if MD5_X2
if (j&1)
strncpy(&(input_buf_X86[j>>MD5_X2].x2.b2[total_len_X86[j]]), saved_key[j], 21);
else
#endif
strncpy(&(input_buf_X86[j>>MD5_X2].x1.b[total_len_X86[j]]), saved_key[j], 21);
total_len_X86[j] += 20;
}
}
/**************************************************************
* DYNAMIC primitive helper function
* Appends all keys to the end of the 2nd input variables, and
* updates lengths
*************************************************************/
void DynamicFunc__append_keys2(DYNA_OMP_PARAMS)
{
unsigned int j, til;
int utf16 = md5_unicode_convert_get(tid);
#ifdef _OPENMP
til = last;
j = first;
#else
j = 0;
til = m_count;
#endif
#ifdef SIMD_COEF_32
if (dynamic_use_sse==1) {
for (; j < til; ++j) {
unsigned int idx = j/SIMD_COEF_32;
unsigned int idx_mod = j&(SIMD_COEF_32-1);
unsigned int bf_ptr = total_len2[idx][idx_mod];
if (utf16) {
if (utf16 == 2 || (options.target_enc != ASCII && options.target_enc != ISO_8859_1)) {
UTF16 utf16Str[27+1]; // 27 chars is 'max' that fits in SSE without overflow, so that is where we limit it at now
int outlen;
int maxlen=27;
if (curdat.pSetup->MaxInputLen < maxlen)
maxlen = curdat.pSetup->MaxInputLen;
if (utf16 == 1)
outlen = enc_to_utf16(utf16Str, maxlen, (unsigned char*)saved_key[j], saved_key_len[j]) * sizeof(UTF16);
else
outlen = enc_to_utf16_be(utf16Str, maxlen, (unsigned char*)saved_key[j], saved_key_len[j]) * sizeof(UTF16);
if (outlen <= 0) {
saved_key_len[j] = -outlen / sizeof(UTF16);
if (outlen < 0)
outlen = strlen16(utf16Str) * sizeof(UTF16);
}
total_len2[idx][idx_mod] += outlen;
__SSE_append_string_to_input(input_buf2[idx].c,idx_mod,(unsigned char*)utf16Str,outlen,bf_ptr,1);
} else {
total_len2[idx][idx_mod] += (saved_key_len[j] << 1);
__SSE_append_string_to_input_unicode(input_buf2[idx].c,idx_mod,(unsigned char*)saved_key[j],saved_key_len[j],bf_ptr,1);
}
} else {
total_len2[idx][idx_mod] += saved_key_len[j];
__SSE_append_string_to_input(input_buf2[idx].c,idx_mod,(unsigned char*)saved_key[j],saved_key_len[j],bf_ptr,1);
}
}
return;
}
#endif
if (utf16) {
if (utf16 == 2 || (options.target_enc != ASCII && options.target_enc != ISO_8859_1)) {
for (; j < til; ++j) {
unsigned int z;
unsigned char *cp, *cpi;
UTF16 utf16Str[ENCODED_EFFECTIVE_MAX_LENGTH + 1];
int outlen;
if (utf16 == 1)
outlen = enc_to_utf16(utf16Str, ENCODED_EFFECTIVE_MAX_LENGTH, (unsigned char*)saved_key[j], saved_key_len[j]) * sizeof(UTF16);
else
outlen = enc_to_utf16_be(utf16Str, ENCODED_EFFECTIVE_MAX_LENGTH, (unsigned char*)saved_key[j], saved_key_len[j]) * sizeof(UTF16);
if (outlen <= 0) {
saved_key_len[j] = -outlen / sizeof(UTF16);
if (outlen < 0)
outlen = strlen16(utf16Str) * sizeof(UTF16);
}
// only copy data if it will NOT trash the buffer
if (total_len_X86[j] + outlen <= MAX_BUFFER_OFFSET_AVOIDING_OVERWRITE) {
#if MD5_X2
if (j&1)
cp = &(input_buf2_X86[j>>MD5_X2].x2.B2[total_len2_X86[j]]);
else
#endif
cp = &(input_buf2_X86[j>>MD5_X2].x1.B[total_len2_X86[j]]);
for (cpi = (unsigned char*)utf16Str, z = 0; z < outlen; ++z)
*cp++ = *cpi++;
total_len2_X86[j] += outlen;
}
}
} else {
for (; j < til; ++j) {
unsigned int z;
unsigned char *cp, *cpi = (unsigned char*)saved_key[j];
if (total_len2_X86[j] + (saved_key_len[j]<<1) <= MAX_BUFFER_OFFSET_AVOIDING_OVERWRITE) {
#if MD5_X2
if (j&1)
cp = &(input_buf2_X86[j>>MD5_X2].x2.B2[total_len2_X86[j]]);
else
#endif
cp = &(input_buf2_X86[j>>MD5_X2].x1.B[total_len2_X86[j]]);
for (z = 0; z < saved_key_len[j]; ++z) {
*cp++ = *cpi++;
*cp++ = 0;
}
total_len2_X86[j] += (saved_key_len[j]<<1);
}
}
}
} else {
for (; j < til; ++j) {
#if MD5_X2
if (j&1)
memcpy(&(input_buf2_X86[j>>MD5_X2].x2.b2[total_len2_X86[j]]), saved_key[j], saved_key_len[j]);
else
#endif
memcpy(&(input_buf2_X86[j>>MD5_X2].x1.b[total_len2_X86[j]]), saved_key[j], saved_key_len[j]);
total_len2_X86[j] += saved_key_len[j];
}
}
}
void DynamicFunc__set_input_len_16(DYNA_OMP_PARAMS)
{
unsigned int j, til;
#ifdef _OPENMP
til = last;
j = first;
#else
j = 0;
til = m_count;
#endif
#ifdef SIMD_COEF_32
if (dynamic_use_sse==1) {
unsigned int k;
j /= SIMD_COEF_32;
til = (til+SIMD_COEF_32-1)/SIMD_COEF_32;
for (; j < til; ++j)
{
// If length is < 16, then remove existing end of buffer marker, and then set
// one at offset 16
for (k = 0; k < SIMD_COEF_32; ++k) {
unsigned int this_item_len = total_len[j][k];
if (this_item_len < 16)
input_buf[j].c[GETPOS(this_item_len, k&(SIMD_COEF_32-1))] = 0x00;
input_buf[j].c[GETPOS(16, k&(SIMD_COEF_32-1))] = 0x80;
total_len[j][k] = 16;
}
}
return;
}
#endif
for (; j < til; ++j)
{
// TODO: this code MAY need buffer cleaned up if we are using md5_go code!!!
#if MD5_X2
if (j&1) {
while (total_len_X86[j] < 16)
input_buf_X86[j>>MD5_X2].x2.b2[total_len_X86[j]++] = 0;
}
else
#endif
{while (total_len_X86[j] < 16)
input_buf_X86[j>>MD5_X2].x1.b[total_len_X86[j]++] = 0;}
total_len_X86[j] = 16;
}
}
void DynamicFunc__set_input2_len_16(DYNA_OMP_PARAMS)
{
unsigned int j, til;
#ifdef _OPENMP
til = last;
j = first;
#else
j = 0;
til = m_count;
#endif
#ifdef SIMD_COEF_32
if (dynamic_use_sse==1) {
unsigned int k;
j /= SIMD_COEF_32;
til = (til+SIMD_COEF_32-1)/SIMD_COEF_32;
for (; j < til; ++j)
{
// If length is < 16, then remove existing end of buffer marker, and then set
// one at offset 16
for (k = 0; k < SIMD_COEF_32; ++k) {
unsigned int this_item_len = total_len2[j][k];
if (this_item_len < 16)
input_buf2[j].c[GETPOS(this_item_len, k&(SIMD_COEF_32-1))] = 0x00;
input_buf2[j].c[GETPOS(16, k&(SIMD_COEF_32-1))] = 0x80;
total_len2[j][k] = 16;
}
}
return;
}
#endif
for (; j < til; ++j)
{
// TODO: this code MAY need buffer cleaned up if we are using md5_go code!!!
#if MD5_X2
if (j&1) {
while (total_len2_X86[j] < 16)
input_buf2_X86[j>>MD5_X2].x2.b2[total_len2_X86[j]++] = 0;
}
else
#endif
{while (total_len2_X86[j] < 16)
input_buf2_X86[j>>MD5_X2].x1.b[total_len2_X86[j]++] = 0;}
total_len2_X86[j] = 16;
}
}
void DynamicFunc__set_input_len_20(DYNA_OMP_PARAMS)
{
unsigned int j, til;
#ifdef _OPENMP
til = last;
j = first;
#else
j = 0;
til = m_count;
#endif
#ifdef SIMD_COEF_32
if (dynamic_use_sse==1) {
unsigned int k;
j /= SIMD_COEF_32;
til = (til+SIMD_COEF_32-1)/SIMD_COEF_32;
for (; j < til; ++j)
{
// If length is < 20, then remove existing end of buffer marker, and then set
// one at offset 20
for (k = 0; k < SIMD_COEF_32; ++k) {
unsigned int this_item_len = total_len[j][k];
if (this_item_len < 20)
input_buf[j].c[GETPOS(this_item_len, k&(SIMD_COEF_32-1))] = 0x00;
input_buf[j].c[GETPOS(20, k&(SIMD_COEF_32-1))] = 0x80;
total_len[j][k] = 20;
}
}
return;
}
#endif
for (; j < til; ++j)
{
#if MD5_X2
if (j&1) {
while (total_len_X86[j] < 20)
input_buf_X86[j>>MD5_X2].x2.b2[total_len_X86[j]++] = 0;
}
else
#endif
{while (total_len_X86[j] < 20)
input_buf_X86[j>>MD5_X2].x1.b[total_len_X86[j]++] = 0;}
total_len_X86[j] = 20;
}
}
void DynamicFunc__set_input2_len_20(DYNA_OMP_PARAMS)
{
unsigned int j, til;
#ifdef _OPENMP
til = last;
j = first;
#else
j = 0;
til = m_count;
#endif
#ifdef SIMD_COEF_32
if (dynamic_use_sse==1) {
unsigned int k;
j /= SIMD_COEF_32;
til = (til+SIMD_COEF_32-1)/SIMD_COEF_32;
for (; j < til; ++j)
{
// If length is < 20, then remove existing end of buffer marker, and then set
// one at offset 20
for (k = 0; k < SIMD_COEF_32; ++k) {
unsigned int this_item_len = total_len2[j][k];
if (this_item_len < 20)
input_buf2[j].c[GETPOS(this_item_len, k&(SIMD_COEF_32-1))] = 0x00;
input_buf2[j].c[GETPOS(20, k&(SIMD_COEF_32-1))] = 0x80;
total_len2[j][k] = 20;
}
}
return;
}
#endif
for (; j < til; ++j)
{
#if MD5_X2
if (j&1) {
while (total_len2_X86[j] < 20)
input_buf2_X86[j>>MD5_X2].x2.b2[total_len2_X86[j]++] = 0;
}
else
#endif
{while (total_len2_X86[j] < 20)
input_buf2_X86[j>>MD5_X2].x1.b[total_len2_X86[j]++] = 0;}
total_len2_X86[j] = 20;
}
}
void DynamicFunc__set_input_len_32(DYNA_OMP_PARAMS)
{
unsigned int j, til;
#ifdef _OPENMP
til = last;
j = first;
#else
j = 0;
til = m_count;
#endif
for (; j < til; ++j)
total_len_X86[j] = 32;
}
void DynamicFunc__set_input_len_32_cleartop(DYNA_OMP_PARAMS)
{
unsigned int j, til;
#ifdef _OPENMP
til = last;
j = first;
#else
j = 0;
til = m_count;
#endif
#ifdef SIMD_COEF_32
if (dynamic_use_sse==1) {
j /= SIMD_COEF_32;
til = (til+SIMD_COEF_32-1)/SIMD_COEF_32;
for (; j < til; ++j)
{
unsigned int k;
for (k = 0; k < SIMD_COEF_32; ++k) {
input_buf[j].c[GETPOS(32, k&(SIMD_COEF_32-1))] = 0x80;
total_len[j][k] = 32;
}
}
return;
}
#endif
for (; j < til; ++j) {
total_len_X86[j] = 32;
#if !ARCH_LITTLE_ENDIAN
#if MD5_X2
if (j&1) {
//MD5_swap(input_buf_X86[j>>MD5_X2].x2.w2, input_buf2_X86[j>>MD5_X2].x2.w2, 8);
memset(&(input_buf_X86[j>>MD5_X2].x2.B2[32]), 0, 24);
}
else
#endif
{
//MD5_swap(input_buf_X86[j>>MD5_X2].x1.w, input_buf2_X86[j>>MD5_X2].x1.w, 8);
memset(&(input_buf_X86[j>>MD5_X2].x1.B[32]), 0, 24);
}
#endif
}
}
void DynamicFunc__set_input2_len_32(DYNA_OMP_PARAMS)
{
unsigned int j, til;
#ifdef _OPENMP
til = last;
j = first;
#else
j = 0;
til = m_count;
#endif
for (; j < til; ++j)
total_len2_X86[j] = 32;
}
void DynamicFunc__set_input2_len_32_cleartop(DYNA_OMP_PARAMS)
{
unsigned int j, til;
#ifdef _OPENMP
til = last;
j = first;
#else
j = 0;
til = m_count;
#endif
#ifdef SIMD_COEF_32
if (dynamic_use_sse==1) {
j /= SIMD_COEF_32;
til = (til+SIMD_COEF_32-1)/SIMD_COEF_32;
for (; j < til; ++j)
{
unsigned int k;
for (k = 0; k < SIMD_COEF_32; ++k) {
input_buf2[j].c[GETPOS(32, k&(SIMD_COEF_32-1))] = 0x80;
total_len2[j][k] = 32;
}
}
return;
}
#endif
for (; j < til; ++j)
{
total_len2_X86[j] = 32;
#if !ARCH_LITTLE_ENDIAN
#if MD5_X2
if (j&1) {
//MD5_swap(input_buf2_X86[j>>MD5_X2].x2.w2, input_buf2_X86[j>>MD5_X2].x2.w2, 8);
memset(&(input_buf2_X86[j>>MD5_X2].x2.B2[32]), 0, 24);
}
else
#endif
{
//MD5_swap(input_buf2_X86[j>>MD5_X2].x1.w, input_buf2_X86[j>>MD5_X2].x1.w, 8);
memset(&(input_buf2_X86[j>>MD5_X2].x1.B[32]), 0, 24);
}
#endif
}
}
void DynamicFunc__set_input_len_40(DYNA_OMP_PARAMS)
{
unsigned int j, til;
#ifdef _OPENMP
til = last;
j = first;
#else
j = 0;
til = m_count;
#endif
for (; j < til; ++j)
total_len_X86[j] = 40;
}
void DynamicFunc__set_input2_len_40(DYNA_OMP_PARAMS)
{
unsigned int j, til;
#ifdef _OPENMP
til = last;
j = first;
#else
j = 0;
til = m_count;
#endif
for (; j < til; ++j)
total_len2_X86[j] = 40;
}
void DynamicFunc__set_input2_len_40_cleartop(DYNA_OMP_PARAMS)
{
unsigned int j, til;
#ifdef _OPENMP
til = last;
j = first;
#else
j = 0;
til = m_count;
#endif
#ifdef SIMD_COEF_32
if (dynamic_use_sse==1) {
j /= SIMD_COEF_32;
til = (til+SIMD_COEF_32-1)/SIMD_COEF_32;
for (; j < til; ++j)
{
unsigned int k;
for (k = 0; k < SIMD_COEF_32; ++k) {
input_buf2[j].c[GETPOS(40, k&(SIMD_COEF_32-1))] = 0x80;
total_len2[j][k] = 40;
}
}
return;
}
#endif
for (; j < til; ++j)
{
total_len2_X86[j] = 40;
#if !ARCH_LITTLE_ENDIAN
#if MD5_X2
if (j&1) {
memset(&(input_buf2_X86[j>>MD5_X2].x2.B2[40]), 0, 16);
}
else
#endif
{
memset(&(input_buf2_X86[j>>MD5_X2].x1.B[40]), 0, 16);
}
#endif
}
}
void DynamicFunc__set_input_len_64(DYNA_OMP_PARAMS)
{
unsigned int j, til;
#ifdef _OPENMP
til = last;
j = first;
#else
j = 0;
til = m_count;
#endif
#ifdef SIMD_COEF_32
if (dynamic_use_sse == 1)
error_msg("Error, in your DYNAMIC script.\nIt is NOT valid to call DynamicFunc__set_input_len_64 in SSE2/MMX mode\n");
#endif
for (; j < til; ++j)
total_len_X86[j] = 64;
}
void DynamicFunc__set_input2_len_64(DYNA_OMP_PARAMS)
{
unsigned int j, til;
#ifdef _OPENMP
til = last;
j = first;
#else
j = 0;
til = m_count;
#endif
#ifdef SIMD_COEF_32
if (dynamic_use_sse == 1)
error_msg("Error, in your DYNAMIC script.\nIt is NOT valid to call DynamicFunc__set_input2_len_64 in SSE2/MMX mode\n");
#endif
for (; j < til; ++j)
total_len2_X86[j] = 64;
}
void DynamicFunc__set_input_len_100(DYNA_OMP_PARAMS)
{
unsigned int j, til;
#ifdef _OPENMP
til = last;
j = first;
#else
j = 0;
til = m_count;
#endif
#ifdef SIMD_COEF_32
if (dynamic_use_sse == 1)
error_msg("Error, in your DYNAMIC script.\nIt is NOT valid to call DynamicFunc__set_input_len_100 in SSE2/MMX mode\n");
#endif
for (; j < til; ++j) {
unsigned char *cp;
#if MD5_X2
if (j&1)
cp = &(input_buf_X86[j>>MD5_X2].x2.B2[total_len_X86[j]]);
else
#endif
cp = &(input_buf_X86[j>>MD5_X2].x1.B[total_len_X86[j]]);
while (*cp)
*cp++ = 0;
total_len_X86[j] = 100;
}
}
void DynamicFunc__set_input_len_24(DYNA_OMP_PARAMS)
{
unsigned int j, til;
#ifdef _OPENMP
til = last;
j = first;
#else
j = 0;
til = m_count;
#endif
#ifdef SIMD_COEF_32
if (dynamic_use_sse == 1)
error_msg("Error, in your DYNAMIC script.\nIt is NOT valid to call DynamicFunc__set_input_len_24 in SSE2/MMX mode\n");
#endif
for (; j < til; ++j)
total_len_X86[j] = 24;
}
void DynamicFunc__set_input_len_28(DYNA_OMP_PARAMS)
{
unsigned int j, til;
#ifdef _OPENMP
til = last;
j = first;
#else
j = 0;
til = m_count;
#endif
#ifdef SIMD_COEF_32
if (dynamic_use_sse == 1)
error_msg("Error, in your DYNAMIC script.\nIt is NOT valid to call DynamicFunc__set_input_len_28 in SSE2/MMX mode\n");
#endif
for (; j < til; ++j)
total_len_X86[j] = 28;
}
void DynamicFunc__set_input_len_48(DYNA_OMP_PARAMS)
{
unsigned int j, til;
#ifdef _OPENMP
til = last;
j = first;
#else
j = 0;
til = m_count;
#endif
#ifdef SIMD_COEF_32
if (dynamic_use_sse == 1)
error_msg("Error, in your DYNAMIC script.\nIt is NOT valid to call DynamicFunc__set_input_len_48 in SSE2/MMX mode\n");
#endif
for (; j < til; ++j)
total_len_X86[j] = 48;
}
void DynamicFunc__set_input_len_56(DYNA_OMP_PARAMS)
{
unsigned int j, til;
#ifdef _OPENMP
til = last;
j = first;
#else
j = 0;
til = m_count;
#endif
#ifdef SIMD_COEF_32
if (dynamic_use_sse == 1)
error_msg("Error, in your DYNAMIC script.\nIt is NOT valid to call DynamicFunc__set_input_len_56 in SSE2/MMX mode\n");
#endif
for (; j < til; ++j)
total_len_X86[j] = 56;
}
void DynamicFunc__set_input_len_80(DYNA_OMP_PARAMS)
{
unsigned int j, til;
#ifdef _OPENMP
til = last;
j = first;
#else
j = 0;
til = m_count;
#endif
#ifdef SIMD_COEF_32
if (dynamic_use_sse == 1)
error_msg("Error, in your DYNAMIC script.\nIt is NOT valid to call DynamicFunc__set_input_len_80 in SSE2/MMX mode\n");
#endif
for (; j < til; ++j)
total_len_X86[j] = 80;
}
void DynamicFunc__set_input_len_96(DYNA_OMP_PARAMS)
{
unsigned int j, til;
#ifdef _OPENMP
til = last;
j = first;
#else
j = 0;
til = m_count;
#endif
#ifdef SIMD_COEF_32
if (dynamic_use_sse == 1)
error_msg("Error, in your DYNAMIC script.\nIt is NOT valid to call DynamicFunc__set_input_len_96 in SSE2/MMX mode\n");
#endif
for (; j < til; ++j)
total_len_X86[j] = 96;
}
void DynamicFunc__set_input_len_112(DYNA_OMP_PARAMS)
{
unsigned int j, til;
#ifdef _OPENMP
til = last;
j = first;
#else
j = 0;
til = m_count;
#endif
#ifdef SIMD_COEF_32
if (dynamic_use_sse == 1)
error_msg("Error, in your DYNAMIC script.\nIt is NOT valid to call DynamicFunc__set_input_len_112 in SSE2/MMX mode\n");
#endif
for (; j < til; ++j)
total_len_X86[j] = 112;
}
void DynamicFunc__set_input_len_128(DYNA_OMP_PARAMS)
{
unsigned int j, til;
#ifdef _OPENMP
til = last;
j = first;
#else
j = 0;
til = m_count;
#endif
#ifdef SIMD_COEF_32
if (dynamic_use_sse == 1)
error_msg("Error, in your DYNAMIC script.\nIt is NOT valid to call DynamicFunc__set_input_len_128 in SSE2/MMX mode\n");
#endif
for (; j < til; ++j)
total_len_X86[j] = 128;
}
void DynamicFunc__set_input_len_160(DYNA_OMP_PARAMS)
{
unsigned int j, til;
#ifdef _OPENMP
til = last;
j = first;
#else
j = 0;
til = m_count;
#endif
#ifdef SIMD_COEF_32
if (dynamic_use_sse == 1)
error_msg("Error, in your DYNAMIC script.\nIt is NOT valid to call DynamicFunc__set_input_len_160 in SSE2/MMX mode\n");
#endif
for (; j < til; ++j)
total_len_X86[j] = 160;
}
void DynamicFunc__set_input_len_192(DYNA_OMP_PARAMS)
{
unsigned int j, til;
#ifdef _OPENMP
til = last;
j = first;
#else
j = 0;
til = m_count;
#endif
#ifdef SIMD_COEF_32
if (dynamic_use_sse == 1)
error_msg("Error, in your DYNAMIC script.\nIt is NOT valid to call DynamicFunc__set_input_len_192 in SSE2/MMX mode\n");
#endif
for (; j < til; ++j)
total_len_X86[j] = 192;
}
void DynamicFunc__set_input_len_256(DYNA_OMP_PARAMS)
{
unsigned int j, til;
#ifdef _OPENMP
til = last;
j = first;
#else
j = 0;
til = m_count;
#endif
#ifdef SIMD_COEF_32
if (dynamic_use_sse == 1)
error_msg("Error, in your DYNAMIC script.\nIt is NOT valid to call DynamicFunc__set_input_len_256 in SSE2/MMX mode\n");
#endif
for (; j < til; ++j)
total_len_X86[j] = 256;
}
void DynamicFunc__set_input2_len_24(DYNA_OMP_PARAMS)
{
unsigned int j, til;
#ifdef _OPENMP
til = last;
j = first;
#else
j = 0;
til = m_count;
#endif
#ifdef SIMD_COEF_32
if (dynamic_use_sse == 1)
error_msg("Error, in your DYNAMIC script.\nIt is NOT valid to call DynamicFunc__set_input2_len_24 in SSE2/MMX mode\n");
#endif
for (; j < til; ++j)
total_len2_X86[j] = 24;
}
void DynamicFunc__set_input2_len_28(DYNA_OMP_PARAMS)
{
unsigned int j, til;
#ifdef _OPENMP
til = last;
j = first;
#else
j = 0;
til = m_count;
#endif
#ifdef SIMD_COEF_32
if (dynamic_use_sse == 1)
error_msg("Error, in your DYNAMIC script.\nIt is NOT valid to call DynamicFunc__set_input2_len_28 in SSE2/MMX mode\n");
#endif
for (; j < til; ++j)
total_len2_X86[j] = 28;
}
void DynamicFunc__set_input2_len_48(DYNA_OMP_PARAMS)
{
unsigned int j, til;
#ifdef _OPENMP
til = last;
j = first;
#else
j = 0;
til = m_count;
#endif
#ifdef SIMD_COEF_32
if (dynamic_use_sse == 1)
error_msg("Error, in your DYNAMIC script.\nIt is NOT valid to call DynamicFunc__set_input2_len_48 in SSE2/MMX mode\n");
#endif
for (; j < til; ++j)
total_len2_X86[j] = 48;
}
void DynamicFunc__set_input2_len_56(DYNA_OMP_PARAMS)
{
unsigned int j, til;
#ifdef _OPENMP
til = last;
j = first;
#else
j = 0;
til = m_count;
#endif
#ifdef SIMD_COEF_32
if (dynamic_use_sse == 1)
error_msg("Error, in your DYNAMIC script.\nIt is NOT valid to call DynamicFunc__set_input2_len_56 in SSE2/MMX mode\n");
#endif
for (; j < til; ++j)
total_len2_X86[j] = 56;
}
void DynamicFunc__set_input2_len_80(DYNA_OMP_PARAMS)
{
unsigned int j, til;
#ifdef _OPENMP
til = last;
j = first;
#else
j = 0;
til = m_count;
#endif
#ifdef SIMD_COEF_32
if (dynamic_use_sse == 1)
error_msg("Error, in your DYNAMIC script.\nIt is NOT valid to call DynamicFunc__set_input2_len_80 in SSE2/MMX mode\n");
#endif
for (; j < til; ++j)
total_len2_X86[j] = 80;
}
void DynamicFunc__set_input2_len_96(DYNA_OMP_PARAMS)
{
unsigned int j, til;
#ifdef _OPENMP
til = last;
j = first;
#else
j = 0;
til = m_count;
#endif
#ifdef SIMD_COEF_32
if (dynamic_use_sse == 1)
error_msg("Error, in your DYNAMIC script.\nIt is NOT valid to call DynamicFunc__set_input2_len_96 in SSE2/MMX mode\n");
#endif
for (; j < til; ++j)
total_len2_X86[j] = 96;
}
void DynamicFunc__set_input2_len_112(DYNA_OMP_PARAMS)
{
unsigned int j, til;
#ifdef _OPENMP
til = last;
j = first;
#else
j = 0;
til = m_count;
#endif
#ifdef SIMD_COEF_32
if (dynamic_use_sse == 1)
error_msg("Error, in your DYNAMIC script.\nIt is NOT valid to call DynamicFunc__set_input2_len_112 in SSE2/MMX mode\n");
#endif
for (; j < til; ++j)
total_len2_X86[j] = 112;
}
void DynamicFunc__set_input2_len_128(DYNA_OMP_PARAMS)
{
unsigned int j, til;
#ifdef _OPENMP
til = last;
j = first;
#else
j = 0;
til = m_count;
#endif
#ifdef SIMD_COEF_32
if (dynamic_use_sse == 1)
error_msg("Error, in your DYNAMIC script.\nIt is NOT valid to call DynamicFunc__set_input2_len_128 in SSE2/MMX mode\n");
#endif
for (; j < til; ++j)
total_len2_X86[j] = 128;
}
void DynamicFunc__set_input2_len_160(DYNA_OMP_PARAMS)
{
unsigned int j, til;
#ifdef _OPENMP
til = last;
j = first;
#else
j = 0;
til = m_count;
#endif
#ifdef SIMD_COEF_32
if (dynamic_use_sse == 1)
error_msg("Error, in your DYNAMIC script.\nIt is NOT valid to call DynamicFunc__set_input2_len_160 in SSE2/MMX mode\n");
#endif
for (; j < til; ++j)
total_len2_X86[j] = 160;
}
void DynamicFunc__set_input2_len_192(DYNA_OMP_PARAMS)
{
unsigned int j, til;
#ifdef _OPENMP
til = last;
j = first;
#else
j = 0;
til = m_count;
#endif
#ifdef SIMD_COEF_32
if (dynamic_use_sse == 1)
error_msg("Error, in your DYNAMIC script.\nIt is NOT valid to call DynamicFunc__set_input2_len_192 in SSE2/MMX mode\n");
#endif
for (; j < til; ++j)
total_len2_X86[j] = 192;
}
void DynamicFunc__set_input2_len_256(DYNA_OMP_PARAMS)
{
unsigned int j, til;
#ifdef _OPENMP
til = last;
j = first;
#else
j = 0;
til = m_count;
#endif
#ifdef SIMD_COEF_32
if (dynamic_use_sse == 1)
error_msg("Error, in your DYNAMIC script.\nIt is NOT valid to call DynamicFunc__set_input2_len_256 in SSE2/MMX mode\n");
#endif
for (; j < til; ++j)
total_len2_X86[j] = 256;
}
/**************************************************************
* DYNAMIC primitive helper function
* Appends the salt to the end of the input variables, and
* updates lengths
*************************************************************/
void DynamicFunc__append_salt(DYNA_OMP_PARAMS)
{
__append_string(DYNA_OMP_PARAMSdm cursalt, saltlen);
}
/**************************************************************
* DYNAMIC primitive helper function
* Appends the salt to the end of the 2nd input variables, and
* updates lengths
*************************************************************/
void DynamicFunc__append_salt2(DYNA_OMP_PARAMS)
{
__append2_string(DYNA_OMP_PARAMSdm cursalt, saltlen);
}
void DynamicFunc__append_input_from_input2(DYNA_OMP_PARAMS)
{
unsigned int i, til;
#ifdef _OPENMP
til = last;
i = first;
#else
i = 0;
til = m_count;
#endif
#ifdef SIMD_COEF_32
if (dynamic_use_sse==1) {
unsigned int j, k;
til = (til+SIMD_COEF_32-1)/SIMD_COEF_32;
i /= SIMD_COEF_32;
for (; i < til; ++i)
{
for (j = 0; j < SIMD_COEF_32; ++j)
{
unsigned int start_len = total_len[i][j];
unsigned int len1 = total_len2[i][j];
for (k = 0; k < len1; ++k)
input_buf[i].c[GETPOS((k+start_len), j)] = input_buf2[i].c[GETPOS(k,j)];
input_buf[i].c[GETPOS((len1+start_len), j)] = 0x80;
total_len[i][j] += len1;
}
}
return;
}
#endif
for (; i < til; ++i)
{
#if MD5_X2
if (i&1)
memcpy(&(input_buf_X86[i>>MD5_X2].x2.b2[total_len_X86[i]]), input_buf2_X86[i>>MD5_X2].x2.b2, total_len2_X86[i]);
else
#endif
memcpy(&(input_buf_X86[i>>MD5_X2].x1.b[total_len_X86[i]]), input_buf2_X86[i>>MD5_X2].x1.b, total_len2_X86[i]);
total_len_X86[i] += total_len2_X86[i];
}
}
void DynamicFunc__append_input2_from_input(DYNA_OMP_PARAMS)
{
unsigned int i, til;
#ifdef _OPENMP
til = last;
i = first;
#else
i = 0;
til = m_count;
#endif
#ifdef SIMD_COEF_32
if (dynamic_use_sse==1) {
unsigned int j, k;
til = (til+SIMD_COEF_32-1)/SIMD_COEF_32;
i /= SIMD_COEF_32;
for (; i < til; ++i)
{
for (j = 0; j < SIMD_COEF_32; ++j)
{
unsigned int start_len = total_len2[i][j];
unsigned int len1 = total_len[i][j];
for (k = 0; k < len1; ++k)
input_buf2[i].c[GETPOS((k+start_len), j)] = input_buf[i].c[GETPOS(k,j)];
input_buf2[i].c[GETPOS((len1+start_len), j)] = 0x80;
total_len2[i][j] += len1;
}
}
return;
}
#endif
for (; i < til; ++i)
{
#if MD5_X2
if (i&1)
memcpy(&(input_buf2_X86[i>>MD5_X2].x2.b2[total_len2_X86[i]]), input_buf_X86[i>>MD5_X2].x2.b2, total_len_X86[i]);
else
#endif
memcpy(&(input_buf2_X86[i>>MD5_X2].x1.b[total_len2_X86[i]]), input_buf_X86[i>>MD5_X2].x1.b, total_len_X86[i]);
total_len2_X86[i] += total_len_X86[i];
}
}
void DynamicFunc__append_input_from_input(DYNA_OMP_PARAMS)
{
unsigned int i, til;
#ifdef _OPENMP
til = last;
i = first;
#else
i = 0;
til = m_count;
#endif
#ifdef SIMD_COEF_32
if (dynamic_use_sse==1) {
unsigned int j, k;
til = (til+SIMD_COEF_32-1)/SIMD_COEF_32;
i /= SIMD_COEF_32;
for (; i < til; ++i)
{
for (j = 0; j < SIMD_COEF_32; ++j)
{
unsigned int start_len = total_len[i][j];
for (k = 0; k < start_len; ++k)
input_buf[i].c[GETPOS((k+start_len), j)] = input_buf[i].c[GETPOS(k,j)];
input_buf[i].c[GETPOS((start_len+start_len), j)] = 0x80;
total_len[i][j] += start_len;
}
}
return;
}
#endif
for (; i < til; ++i)
{
#if MD5_X2
if (i&1)
memcpy(&(input_buf_X86[i>>MD5_X2].x2.b2[total_len_X86[i]]), input_buf_X86[i>>MD5_X2].x2.b2, total_len_X86[i]);
else
#endif
memcpy(&(input_buf_X86[i>>MD5_X2].x1.b[total_len_X86[i]]), input_buf_X86[i>>MD5_X2].x1.b, total_len_X86[i]);
total_len_X86[i] <<= 1;
}
}
void DynamicFunc__append_input2_from_input2(DYNA_OMP_PARAMS)
{
unsigned int i, til;
#ifdef _OPENMP
til = last;
i = first;
#else
i = 0;
til = m_count;
#endif
#ifdef SIMD_COEF_32
if (dynamic_use_sse==1) {
unsigned int j, k;
til = (til+SIMD_COEF_32-1)/SIMD_COEF_32;
i /= SIMD_COEF_32;
for (; i < til; ++i)
{
for (j = 0; j < SIMD_COEF_32; ++j)
{
unsigned int start_len = total_len2[i][j];
for (k = 0; k < start_len; ++k)
input_buf2[i].c[GETPOS((k+start_len), j)] = input_buf2[i].c[GETPOS(k,j)];
input_buf2[i].c[GETPOS((start_len+start_len), j)] = 0x80;
total_len2[i][j] += start_len;
}
}
return;
}
#endif
for (; i < til; ++i)
{
#if MD5_X2
if (i&1)
memcpy(&(input_buf2_X86[i>>MD5_X2].x2.b2[total_len2_X86[i]]), input_buf2_X86[i>>MD5_X2].x2.b2, total_len2_X86[i]);
else
#endif
memcpy(&(input_buf2_X86[i>>MD5_X2].x1.b[total_len2_X86[i]]), input_buf2_X86[i>>MD5_X2].x1.b, total_len2_X86[i]);
total_len2_X86[i] <<= 1;
}
}
#ifdef SIMD_PARA_MD5
static void SSE_Intrinsics_LoadLens_md5(int side, int i)
{
ARCH_WORD_32 *p;
unsigned int j, k;
if (side == 0)
{
for (j = 0; j < SIMD_PARA_MD5; j++)
{
p = input_buf[i+j].w;
for (k = 0; k < SIMD_COEF_32; k++)
p[14*SIMD_COEF_32+k] = total_len[i+j][k] << 3;
}
}
else
{
for (j = 0; j < SIMD_PARA_MD5; j++)
{
p = input_buf2[i+j].w;
for (k = 0; k < SIMD_COEF_32; k++)
p[14*SIMD_COEF_32+k] = total_len2[i+j][k] << 3;
}
}
}
#endif
#ifdef SIMD_PARA_MD4
static void SSE_Intrinsics_LoadLens_md4(int side, int i)
{
ARCH_WORD_32 *p;
unsigned int j, k;
if (side == 0)
{
for (j = 0; j < SIMD_PARA_MD4; j++)
{
p = input_buf[i+j].w;
for (k = 0; k < SIMD_COEF_32; k++)
p[14*SIMD_COEF_32+k] = total_len[i+j][k] << 3;
}
}
else
{
for (j = 0; j < SIMD_PARA_MD4; j++)
{
p = input_buf2[i+j].w;
for (k = 0; k < SIMD_COEF_32; k++)
p[14*SIMD_COEF_32+k] = total_len2[i+j][k] << 3;
}
}
}
#endif
/**************************************************************
* DYNAMIC primitive helper function
* Encrypts the data in the first input field. The data is
* still in the binary encrypted format, in the crypt_key.
* we do not yet convert to base-16. This is so we can output
* as base-16, or later, if we add base-64, we can output to
* that format instead.
*************************************************************/
void DynamicFunc__crypt_md5(DYNA_OMP_PARAMS)
{
unsigned int i, til;
#ifdef _OPENMP
til = last;
i = first;
#else
i = 0;
til = m_count;
#endif
#ifdef SIMD_COEF_32
if (dynamic_use_sse==1) {
til = (til+SIMD_COEF_32-1)/SIMD_COEF_32;
i /= SIMD_COEF_32;
if (curdat.store_keys_in_input) {
for (; i < til; i += SIMD_PARA_MD5) {
SIMDmd5body(input_buf[i].c, crypt_key[i].w, NULL, SSEi_MIXED_IN);
}
} else {
for (; i < til; i += SIMD_PARA_MD5) {
SSE_Intrinsics_LoadLens_md5(0, i);
SIMDmd5body(input_buf[i].c, crypt_key[i].w, NULL, SSEi_MIXED_IN);
}
}
return;
}
#endif
for (; i < til; ++i) {
#if MD5_X2
unsigned int len[2];
len[0] = total_len_X86[i++];
if (i == m_count)
len[1] = 0;
else
len[1] = total_len_X86[i];
#else
unsigned int len = total_len_X86[i];
#endif
DoMD5(input_buf_X86[i>>MD5_X2], len, crypt_key_X86[i>>MD5_X2]);
}
}
void DynamicFunc__crypt_md4(DYNA_OMP_PARAMS)
{
unsigned int i, til;
#ifdef _OPENMP
til = last;
i = first;
#else
i = 0;
til = m_count;
#endif
#ifdef SIMD_COEF_32
if (dynamic_use_sse==1) {
til = (til+SIMD_COEF_32-1)/SIMD_COEF_32;
i /= SIMD_COEF_32;
if (curdat.store_keys_in_input) {
for (; i < til; i += SIMD_PARA_MD4) {
SIMDmd4body(input_buf[i].c, crypt_key[i].w, NULL, SSEi_MIXED_IN);
}
} else {
for (; i < til; i += SIMD_PARA_MD4) {
SSE_Intrinsics_LoadLens_md4(0, i);
SIMDmd4body(input_buf[i].c, crypt_key[i].w, NULL, SSEi_MIXED_IN);
}
}
return;
}
#endif
for (; i < til; ++i) {
// MD5_X2 sets our input buffers and crypt keys up in 'double' format. Thus, we HAVE
// to treat them just like we do in MD5. The macro hides the details.
#if MD5_X2
unsigned int len[2];
len[0] = total_len_X86[i++];
if (i == m_count)
len[1] = 0;
else
len[1] = total_len_X86[i];
#else
unsigned int len = total_len_X86[i];
#endif
DoMD4(input_buf_X86[i>>MD5_X2], len, crypt_key_X86[i>>MD5_X2]);
}
}
void DynamicFunc__POCrypt(DYNA_OMP_PARAMS)
{
unsigned int i, j;
unsigned int til, len;
unsigned char *pBuf;
#if MD5_X2
unsigned char *pBuf2;
unsigned int lens[2];
#endif
#ifdef _OPENMP
til = last;
i = first;
#else
i = 0;
til = m_count;
#endif
//DynamicFunc__clean_input_kwik();
//DynamicFunc__append_salt,
//DynamicFunc__append_input1_from_CONST1,
//DynamicFunc__append_keys,
//DynamicFunc__append_input1_from_CONST2,
//DynamicFunc__append_salt,
//DynamicFunc__crypt_md5,
pBuf = input_buf_X86[i>>MD5_X2].x1.B;
#if MD5_X2
pBuf2 = input_buf_X86[i>>MD5_X2].x2.B2;
memset(pBuf2, 0, sizeof(input_buf_X86[i>>MD5_X2].x2.B2));
memcpy(pBuf2, cursalt, 32);
pBuf2[32] = 'Y';
#endif
memset(pBuf, 0, sizeof(input_buf_X86[i>>MD5_X2].x1.b));
memcpy(pBuf, cursalt, 32);
pBuf[32] = 'Y';
for (j = i; j < til; ++j) {
len = saved_key_len[j];
memcpy(&pBuf[33], saved_key[j], len);
pBuf[33+len] = 0xf7;
memcpy(&pBuf[34+len], cursalt, 32);
#if MD5_X2
lens[0] = len+66; // len from the 'first'
++j;
if (j < m_count) {
len = saved_key_len[j];
memcpy(&pBuf2[33], saved_key[j], len);
pBuf2[33+len] = 0xf7;
memcpy(&pBuf2[34+len], cursalt, 32);
lens[1] = len+66;
} else {
lens[1] = 0;
}
DoMD5(input_buf_X86[i>>MD5_X2], lens, crypt_key_X86[j>>MD5_X2]);
#else
DoMD5(input_buf_X86[i>>MD5_X2], (len+66), crypt_key_X86[j]);
#endif
}
}
/**************************************************************
* DYNAMIC primitive helper function
* Encrypts the data in the 2nd input field into crypt_keys2.
*************************************************************/
void DynamicFunc__crypt2_md5(DYNA_OMP_PARAMS)
{
unsigned int i, til;
#ifdef _OPENMP
i = first;
til = last;
#else
i = 0;
til = m_count;
#endif
#ifdef SIMD_COEF_32
if (dynamic_use_sse==1) {
til = (til+SIMD_COEF_32-1)/SIMD_COEF_32;
i /= SIMD_COEF_32;
for (; i < til; i += SIMD_PARA_MD5) {
SSE_Intrinsics_LoadLens_md5(1, i);
SIMDmd5body(input_buf2[i].c, crypt_key2[i].w, NULL, SSEi_MIXED_IN);
}
return;
}
#endif
for (; i < til; ++i) {
#if MD5_X2
unsigned int len[2];
len[0] = total_len2_X86[i++];
if (i < m_count)
len[1] = total_len2_X86[i];
else
len[1] = 0;
#else
unsigned int len = total_len2_X86[i];
#endif
DoMD5(input_buf2_X86[i>>MD5_X2], len, crypt_key2_X86[i>>MD5_X2]);
}
}
void DynamicFunc__crypt2_md4(DYNA_OMP_PARAMS)
{
unsigned int i, til;
#ifdef _OPENMP
i = first;
til = last;
#else
i = 0;
til = m_count;
#endif
#ifdef SIMD_COEF_32
if (dynamic_use_sse==1) {
til = (til+SIMD_COEF_32-1)/SIMD_COEF_32;
i /= SIMD_COEF_32;
for (; i < til; i += SIMD_PARA_MD4) {
SSE_Intrinsics_LoadLens_md4(1, i);
SIMDmd4body(input_buf2[i].c, crypt_key2[i].w, NULL, SSEi_MIXED_IN);
}
return;
}
#endif
for (; i < til; ++i) {
// MD5_X2 sets our input buffers and crypt keys up in 'double' format. Thus, we HAVE
// to treat them just like we do in MD5. The macro hides the details.
#if MD5_X2
unsigned int len[2];
len[0] = total_len2_X86[i++];
if (i == m_count)
len[1] = 0;
else
len[1] = total_len2_X86[i];
#else
unsigned int len = total_len2_X86[i];
#endif
DoMD4(input_buf2_X86[i>>MD5_X2], len, crypt_key2_X86[i>>MD5_X2]);
}
}
/**************************************************************
* DYNAMIC primitive helper function
* Encrypts the data in the 1st input field crypt_keys2.
*************************************************************/
void DynamicFunc__crypt_md5_in1_to_out2(DYNA_OMP_PARAMS)
{
unsigned int i, til;
#ifdef _OPENMP
i = first;
til = last;
#else
i = 0;
til = m_count;
#endif
#ifdef SIMD_COEF_32
if (dynamic_use_sse==1) {
til = (til+SIMD_COEF_32-1)/SIMD_COEF_32;
i /= SIMD_COEF_32;
if (curdat.store_keys_in_input) {
for (; i < til; i += SIMD_PARA_MD5) {
SIMDmd5body(input_buf[i].c, crypt_key2[i].w, NULL, SSEi_MIXED_IN);
}
} else {
for (; i < til; i += SIMD_PARA_MD5) {
SSE_Intrinsics_LoadLens_md5(0, i);
SIMDmd5body(input_buf[i].c, crypt_key2[i].w, NULL, SSEi_MIXED_IN);
}
}
return;
}
#endif
for (; i < til; ++i) {
#if MD5_X2
unsigned int len[2];
len[0] = total_len_X86[i++];
if (i == m_count)
len[1] = 0;
else
len[1] = total_len_X86[i];
#else
unsigned int len = total_len_X86[i];
#endif
DoMD5(input_buf_X86[i>>MD5_X2], len, crypt_key2_X86[i>>MD5_X2]);
}
}
void DynamicFunc__crypt_md4_in1_to_out2(DYNA_OMP_PARAMS)
{
unsigned int i, til;
#ifdef _OPENMP
i = first;
til = last;
#else
i = 0;
til = m_count;
#endif
#ifdef SIMD_COEF_32
if (dynamic_use_sse==1) {
til = (til+SIMD_COEF_32-1)/SIMD_COEF_32;
i /= SIMD_COEF_32;
if (curdat.store_keys_in_input) {
for (; i < til; i += SIMD_PARA_MD4) {
SIMDmd4body(input_buf[i].c, crypt_key2[i].w, NULL, SSEi_MIXED_IN);
}
} else {
for (; i < til; i += SIMD_PARA_MD4) {
SSE_Intrinsics_LoadLens_md4(0, i);
SIMDmd4body(input_buf[i].c, crypt_key2[i].w, NULL, SSEi_MIXED_IN);
}
}
return;
}
#endif
for (; i < til; ++i) {
// MD5_X2 sets our input buffers and crypt keys up in 'double' format. Thus, we HAVE
// to treat them just like we do in MD5. The macro hides the details.
#if MD5_X2
unsigned int len[2];
len[0] = total_len_X86[i++];
if (i == m_count)
len[1] = 0;
else
len[1] = total_len_X86[i];
#else
unsigned int len = total_len_X86[i];
#endif
DoMD4(input_buf_X86[i>>MD5_X2], len, crypt_key2_X86[i>>MD5_X2]);
}
}
/**************************************************************
* DYNAMIC primitive helper function
* Encrypts the data in the 2nd input field into crypt_keys.
*************************************************************/
void DynamicFunc__crypt_md5_in2_to_out1(DYNA_OMP_PARAMS)
{
unsigned int i, til;
#ifdef _OPENMP
i = first;
til = last;
#else
i = 0;
til = m_count;
#endif
#ifdef SIMD_COEF_32
if (dynamic_use_sse==1) {
til = (til+SIMD_COEF_32-1)/SIMD_COEF_32;
i /= SIMD_COEF_32;
for (; i < til; i += SIMD_PARA_MD5)
{
SSE_Intrinsics_LoadLens_md5(1, i);
SIMDmd5body(input_buf2[i].c, crypt_key[i].w, NULL, SSEi_MIXED_IN);
//dump_stuff_mmx_msg("DynamicFunc__crypt_md5_in2_to_out1", input_buf2[i].c,64,m_count-1);
}
return;
}
#endif
for (; i < til; ++i) {
#if MD5_X2
unsigned int len[2];
len[0] = total_len2_X86[i++];
if (i == m_count)
len[1] = 0;
else
len[1] = total_len2_X86[i];
#else
unsigned int len = total_len2_X86[i];
#endif
DoMD5(input_buf2_X86[i>>MD5_X2], len, crypt_key_X86[i>>MD5_X2]);
}
}
void DynamicFunc__crypt_md4_in2_to_out1(DYNA_OMP_PARAMS)
{
unsigned int i, til;
#ifdef _OPENMP
i = first;
til = last;
#else
i = 0;
til = m_count;
#endif
#ifdef SIMD_COEF_32
if (dynamic_use_sse==1) {
til = (til+SIMD_COEF_32-1)/SIMD_COEF_32;
i /= SIMD_COEF_32;
for (; i < til; i += SIMD_PARA_MD4)
{
SSE_Intrinsics_LoadLens_md4(1, i);
SIMDmd4body(input_buf2[i].c, crypt_key[i].w, NULL, SSEi_MIXED_IN);
}
return;
}
#endif
for (; i < til; ++i) {
// MD5_X2 sets our input buffers and crypt keys up in 'double' format. Thus, we HAVE
// to treat them just like we do in MD5. The macro hides the details.
#if MD5_X2
unsigned int len[2];
len[0] = total_len2_X86[i++];
if (i == m_count)
len[1] = 0;
else
len[1] = total_len2_X86[i];
#else
unsigned int len = total_len2_X86[i];
#endif
DoMD4(input_buf2_X86[i>>MD5_X2], len, crypt_key_X86[i>>MD5_X2]);
}
}
void DynamicFunc__crypt_md5_to_input_raw(DYNA_OMP_PARAMS)
{
unsigned int i, til;
#ifdef _OPENMP
i = first;
til = last;
#else
i = 0;
til = m_count;
#endif
#ifdef SIMD_COEF_32
if (dynamic_use_sse==1) {
til = (til+SIMD_COEF_32-1)/SIMD_COEF_32;
i /= SIMD_COEF_32;
for (; i < til; i += SIMD_PARA_MD5)
{
unsigned int j, k;
SSE_Intrinsics_LoadLens_md5(0, i);
// NOTE, since crypt_key array is 16 bytes each, and input_buf is 64 bytes
// each, and we are doing 3 at a time, we can NOT directly write to the
// input buff, but have to use the crypt_key buffer, and then memcpy when done.
SIMDmd5body(input_buf[i].c, crypt_key[i].w, NULL, SSEi_MIXED_IN);
for (j = 0; j < SIMD_PARA_MD5; ++j)
{
memset(input_buf[i+j].c, 0, sizeof(input_buf[0]));
memcpy(input_buf[i+j].c, crypt_key[i+j].c, 16*SIMD_COEF_32);
for (k = 0; k < SIMD_COEF_32; k++)
total_len[i+j][k] = 16;
}
}
return;
}
#endif
for (; i < til; ++i) {
#if MD5_X2
unsigned int len[2];
len[0] = total_len_X86[i];
total_len_X86[i++] = 0x10;
if (i == m_count)
len[1] = 0;
else
len[1] = total_len_X86[i];
#else
unsigned int len = total_len_X86[i];
#endif
DoMD5(input_buf_X86[i>>MD5_X2], len, input_buf_X86[i>>MD5_X2]);
total_len_X86[i] = 0x10;
}
}
void DynamicFunc__crypt_md5_to_input_raw_Overwrite_NoLen_but_setlen_in_SSE(DYNA_OMP_PARAMS)
{
unsigned int i, til;
#ifdef _OPENMP
i = first;
til = last;
#else
i = 0;
til = m_count;
#endif
#ifdef SIMD_COEF_32
if (dynamic_use_sse==1) {
til = (til+SIMD_COEF_32-1)/SIMD_COEF_32;
i /= SIMD_COEF_32;
for (; i < til; i += SIMD_PARA_MD5)
{
unsigned int j;
SSE_Intrinsics_LoadLens_md5(0, i);
// NOTE, since crypt_key array is 16 bytes each, and input_buf is 64 bytes
// each, and we are doing 3 at a time, we can NOT directly write to the
// input buff, but have to use the crypt_key buffer, and then memcpy when done.
SIMDmd5body(input_buf[i].c, crypt_key[i].w, NULL, SSEi_MIXED_IN);
for (j = 0; j < SIMD_PARA_MD5; ++j)
memcpy(input_buf[i+j].c, crypt_key[i+j].c, 16*SIMD_COEF_32);
}
return;
}
#endif
for (; i < til; ++i) {
#if MD5_X2
unsigned int len[2];
len[0] = total_len_X86[i++];
if (i == m_count)
len[1] = 0;
else
len[1] = total_len_X86[i];
#else
unsigned int len = total_len_X86[i];
#endif
DoMD5(input_buf_X86[i>>MD5_X2], len, input_buf_X86[i>>MD5_X2]);
}
}
void DynamicFunc__crypt_md5_to_input_raw_Overwrite_NoLen(DYNA_OMP_PARAMS)
{
unsigned int i, til;
#ifdef _OPENMP
i = first;
til = last;
#else
i = 0;
til = m_count;
#endif
#ifdef SIMD_COEF_32
if (dynamic_use_sse==1) {
til = (til+SIMD_COEF_32-1)/SIMD_COEF_32;
i /= SIMD_COEF_32;
for (; i < til; i += SIMD_PARA_MD5)
{
unsigned int j;
// NOTE, since crypt_key array is 16 bytes each, and input_buf is 64 bytes
// each, and we are doing 3 at a time, we can NOT directly write to the
// input buff, but have to use the crypt_key buffer, and then memcpy when done.
SIMDmd5body(input_buf[i].c, crypt_key[i].w, NULL, SSEi_MIXED_IN);
for (j = 0; j < SIMD_PARA_MD5; ++j)
memcpy(input_buf[i+j].c, crypt_key[i+j].c, 16*SIMD_COEF_32);
}
return;
}
#endif
for (; i < til; ++i) {
#if MD5_X2
unsigned int len[2];
len[0] = total_len_X86[i++];
if (i == m_count)
len[1] = 0;
else
len[1] = total_len_X86[i];
#else
unsigned int len = total_len_X86[i];
#endif
// we call DoMD5o so as to 'not' change then length (it was already set)
DoMD5o(input_buf_X86[i>>MD5_X2], len, input_buf_X86[i>>MD5_X2]);
}
}
void DynamicFunc__overwrite_salt_to_input1_no_size_fix(DYNA_OMP_PARAMS)
{
unsigned int j, til;
int utf16 = md5_unicode_convert_get(tid);
#ifdef _OPENMP
j = first;
til = last;
#else
j = 0;
til = m_count;
#endif
#ifdef SIMD_COEF_32
if (dynamic_use_sse==1) {
if (utf16) {
if (utf16 == 2 || (options.target_enc != ASCII && options.target_enc != ISO_8859_1)) {
UTF16 utf16Str[27+1]; // 27 chars is 'max' that fits in SSE without overflow, so that is where we limit it at now
int outlen;
if (utf16 == 1)
outlen = enc_to_utf16(utf16Str, 27, (unsigned char*)cursalt, saltlen) * sizeof(UTF16);
else
outlen = enc_to_utf16_be(utf16Str, 27, (unsigned char*)cursalt, saltlen) * sizeof(UTF16);
if (outlen < 0)
outlen = strlen16(utf16Str) * sizeof(UTF16);
for (; j < til; ++j) {
__SSE_append_string_to_input(input_buf[j/SIMD_COEF_32].c,j&(SIMD_COEF_32-1),(unsigned char*)utf16Str,outlen,0,0);
}
} else {
for (; j < til; ++j)
__SSE_append_string_to_input_unicode(input_buf[j/SIMD_COEF_32].c,j&(SIMD_COEF_32-1),(unsigned char*)cursalt,saltlen,0,0);
}
return;
}
for (; j < til; ++j)
__SSE_append_string_to_input(input_buf[j/SIMD_COEF_32].c,j&(SIMD_COEF_32-1),cursalt,saltlen,0,0);
return;
}
#endif
if (utf16) {
if (utf16 == 2 || (options.target_enc != ASCII && options.target_enc != ISO_8859_1)) {
UTF16 utf16Str[ENCODED_EFFECTIVE_MAX_LENGTH + 1];
int outlen;
if (utf16 == 1)
outlen = enc_to_utf16(utf16Str, ENCODED_EFFECTIVE_MAX_LENGTH, (unsigned char*)cursalt, saltlen) * sizeof(UTF16);
else
outlen = enc_to_utf16_be(utf16Str, ENCODED_EFFECTIVE_MAX_LENGTH, (unsigned char*)cursalt, saltlen) * sizeof(UTF16);
if (outlen < 0)
outlen = strlen16(utf16Str) * sizeof(UTF16);
for (; j < til; ++j) {
unsigned int z;
unsigned char *cp, *cpi = (unsigned char*)utf16Str;
#if MD5_X2
if (j&1)
cp = input_buf_X86[j>>MD5_X2].x2.B2;
else
#endif
cp = input_buf_X86[j>>MD5_X2].x1.B;
for (z = 0; z < outlen; ++z)
*cp++ = *cpi++;
}
} else {
for (; j < til; ++j) {
unsigned int z;
unsigned char *cp, *cpi = (unsigned char*)cursalt;
#if MD5_X2
if (j&1)
cp = input_buf_X86[j>>MD5_X2].x2.B2;
else
#endif
cp = input_buf_X86[j>>MD5_X2].x1.B;
for (z = 0; z < saltlen; ++z) {
*cp++ = *cpi++;
*cp++ = 0;
}
}
}
return;
}
for (; j < til; ++j) {
#if MD5_X2
if (j&1)
memcpy(input_buf_X86[j>>MD5_X2].x2.b2, cursalt, saltlen);
else
#endif
memcpy(input_buf_X86[j>>MD5_X2].x1.b, cursalt, saltlen);
}
}
void DynamicFunc__overwrite_salt_to_input2_no_size_fix(DYNA_OMP_PARAMS)
{
unsigned int j, til;
int utf16 = md5_unicode_convert_get(tid);
#ifdef _OPENMP
j = first;
til = last;
#else
j = 0;
til = m_count;
#endif
#ifdef SIMD_COEF_32
if (dynamic_use_sse==1) {
if (utf16) {
if (utf16 == 2 || (options.target_enc != ASCII && options.target_enc != ISO_8859_1)) {
UTF16 utf16Str[27+1]; // 27 chars is 'max' that fits in SSE without overflow, so that is where we limit it at now
int outlen;
if (utf16 == 1)
outlen = enc_to_utf16(utf16Str, 27, (unsigned char*)cursalt, saltlen) * sizeof(UTF16);
else
outlen = enc_to_utf16_be(utf16Str, 27, (unsigned char*)cursalt, saltlen) * sizeof(UTF16);
if (outlen < 0)
outlen = strlen16(utf16Str) * sizeof(UTF16);
for (; j < til; ++j) {
__SSE_append_string_to_input(input_buf2[j/SIMD_COEF_32].c,j&(SIMD_COEF_32-1),(unsigned char*)utf16Str,outlen,0,0);
}
} else {
for (; j < til; ++j)
__SSE_append_string_to_input_unicode(input_buf2[j/SIMD_COEF_32].c,j&(SIMD_COEF_32-1),(unsigned char*)cursalt,saltlen,0,0);
}
return;
}
for (; j < til; ++j)
__SSE_append_string_to_input(input_buf2[j/SIMD_COEF_32].c,j&(SIMD_COEF_32-1),cursalt,saltlen,0,0);
return;
}
#endif
if (utf16) {
if (utf16 == 2 || (options.target_enc != ASCII && options.target_enc != ISO_8859_1)) {
UTF16 utf16Str[ENCODED_EFFECTIVE_MAX_LENGTH + 1];
int outlen;
if (utf16 == 1)
outlen = enc_to_utf16(utf16Str, ENCODED_EFFECTIVE_MAX_LENGTH, (unsigned char*)cursalt, saltlen) * sizeof(UTF16);
else
outlen = enc_to_utf16_be(utf16Str, ENCODED_EFFECTIVE_MAX_LENGTH, (unsigned char*)cursalt, saltlen) * sizeof(UTF16);
if (outlen < 0)
outlen = strlen16(utf16Str) * sizeof(UTF16);
for (; j < til; ++j) {
unsigned int z;
unsigned char *cp, *cpi = (unsigned char*)utf16Str;
#if MD5_X2
if (j&1)
cp = input_buf2_X86[j>>MD5_X2].x2.B2;
else
#endif
cp = input_buf2_X86[j>>MD5_X2].x1.B;
for (z = 0; z < outlen; ++z)
*cp++ = *cpi++;
}
} else {
for (; j < til; ++j) {
unsigned int z;
unsigned char *cp, *cpi = (unsigned char*)cursalt;
#if MD5_X2
if (j&1)
cp = input_buf2_X86[j>>MD5_X2].x2.B2;
else
#endif
cp = input_buf2_X86[j>>MD5_X2].x1.B;
for (z = 0; z < saltlen; ++z) {
*cp++ = *cpi++;
*cp++ = 0;
}
}
}
return;
}
for (; j < til; ++j) {
#if MD5_X2
if (j&1)
memcpy(input_buf2_X86[j>>MD5_X2].x2.b2, cursalt, saltlen);
else
#endif
memcpy(input_buf2_X86[j>>MD5_X2].x1.b, cursalt, saltlen);
}
}
/**************************************************************
* DYNAMIC primitive helper function
* overwrites start of input1 from the output2 data using base-16
*************************************************************/
void DynamicFunc__overwrite_from_last_output2_to_input1_as_base16_no_size_fix(DYNA_OMP_PARAMS)
{
unsigned int j, til;
#ifdef _OPENMP
j = first;
til = last;
#else
j = 0;
til = m_count;
#endif
#ifdef SIMD_COEF_32
if (dynamic_use_sse==1) {
unsigned int idx;
for (; j < til; ++j)
{
idx = ( ((unsigned int)j)/SIMD_COEF_32);
__SSE_overwrite_output_base16_to_input(input_buf[idx].w, crypt_key2[idx].c, j&(SIMD_COEF_32-1));
}
return;
}
#endif
for (; j < til; ++j)
{
unsigned char *cpo, *cpi;
unsigned int i;
/* MD5_word *w; */
#if MD5_X2
if (j&1)
{cpo = input_buf_X86[j>>MD5_X2].x2.B2; cpi = crypt_key2_X86[j>>MD5_X2].x2.B2; /* w=input_buf_X86[j>>MD5_X2].x2.w2; */}
else
#endif
{cpo = input_buf_X86[j>>MD5_X2].x1.B; cpi = crypt_key2_X86[j>>MD5_X2].x1.B; /* w=input_buf_X86[j>>MD5_X2].x1.w; */ }
for (i = 0; i < 16; ++i, ++cpi)
{
*cpo++ = dynamic_itoa16[*cpi>>4];
*cpo++ = dynamic_itoa16[*cpi&0xF];
}
//MD5_swap(w,w,4);
}
}
/**************************************************************
* DYNAMIC primitive helper function
* overwrites start of input1 from the output1 data using base-16
*************************************************************/
void DynamicFunc__overwrite_from_last_output_as_base16_no_size_fix(DYNA_OMP_PARAMS)
{
unsigned int j, til;
#ifdef _OPENMP
j = first;
til = last;
#else
j = 0;
til = m_count;
#endif
#ifdef SIMD_COEF_32
if (dynamic_use_sse==1) {
unsigned int idx;
for (; j < til; ++j)
{
idx = ( ((unsigned int)j)/SIMD_COEF_32);
__SSE_overwrite_output_base16_to_input(input_buf[idx].w, crypt_key[idx].c, j&(SIMD_COEF_32-1));
}
return;
}
#endif
for (; j < til; ++j)
{
unsigned char *cpo, *cpi;
unsigned int i;
/* MD5_word *w; */
#if MD5_X2
if (j&1)
{cpo = input_buf_X86[j>>MD5_X2].x2.B2; cpi = crypt_key_X86[j>>MD5_X2].x2.B2; /* w=input_buf_X86[j>>MD5_X2].x2.w2; */}
else
#endif
{cpo = input_buf_X86[j>>MD5_X2].x1.B; cpi = crypt_key_X86[j>>MD5_X2].x1.B; /* w=input_buf_X86[j>>MD5_X2].x1.w; */ }
for (i = 0; i < 16; ++i, ++cpi)
{
*cpo++ = dynamic_itoa16[*cpi>>4];
*cpo++ = dynamic_itoa16[*cpi&0xF];
}
//MD5_swap(w,w,4);
}
}
/**************************************************************
* DYNAMIC primitive helper function
* This will take the data stored in the crypt_keys (the encrypted
* 'first' key variable), and use a base-16 text formatting, and
* append this to the first input buffer (adjusting the lengths)
*************************************************************/
void DynamicFunc__append_from_last_output_as_base16(DYNA_OMP_PARAMS)
{
unsigned int j, til;
#ifdef _OPENMP
j = first;
til = last;
#else
j = 0;
til = m_count;
#endif
#ifdef SIMD_COEF_32
if (dynamic_use_sse==1) {
unsigned int idx;
for (; j < til; ++j)
{
unsigned int ip;
idx = ( ((unsigned int)j)/SIMD_COEF_32);
// This is the 'actual' work.
ip = total_len[idx][j & (SIMD_COEF_32 - 1)];
total_len[idx][j & (SIMD_COEF_32 - 1)] += 32;
if (!ip)
__SSE_append_output_base16_to_input(input_buf[idx].w, crypt_key[idx].c, j&(SIMD_COEF_32-1));
else if (ip&1)
{
// Note we are 100% unaligned, and it seems fastest to handle byte/byte (at this time).
unsigned int k;
for (k = 0; k < 16; ++k)
{
unsigned char v = crypt_key[idx].c[GETPOS(k, j&(SIMD_COEF_32-1))];
input_buf[idx].c[GETPOS(ip+(k<<1), j&(SIMD_COEF_32-1))] = dynamic_itoa16[v>>4];
input_buf[idx].c[GETPOS(ip+(k<<1)+1, j&(SIMD_COEF_32-1))] = dynamic_itoa16[v&0xF];
}
input_buf[idx].c[GETPOS(ip+32, j&(SIMD_COEF_32-1))] = 0x80;
}
else if ((ip&3)==0)
__SSE_append_output_base16_to_input_semi_aligned_0(ip, input_buf[idx].w, crypt_key[idx].c, j&(SIMD_COEF_32-1));
else
__SSE_append_output_base16_to_input_semi_aligned_2(ip, input_buf[idx].w, crypt_key[idx].c, j&(SIMD_COEF_32-1));
}
return;
}
#endif
for (; j < til; ++j)
{
unsigned char *cp, *cpi;
unsigned int i;
#if MD5_X2
if (j&1)
{cp = &(input_buf_X86[j>>MD5_X2].x2.B2[total_len_X86[j]]); cpi = crypt_key_X86[j>>MD5_X2].x2.B2; }
else
#endif
{cp = &(input_buf_X86[j>>MD5_X2].x1.B[total_len_X86[j]]); cpi = crypt_key_X86[j>>MD5_X2].x1.B; }
for (i = 0; i < 16; ++i)
{
#if ARCH_ALLOWS_UNALIGNED
*((unsigned short*)cp) = itoa16_w2[*cpi++];
cp += 2;
#else
unsigned char b = *cpi++;
*cp++ = dynamic_itoa16[b>>4];
*cp++ = dynamic_itoa16[b&0xF];
#endif
}
*cp = 0;
total_len_X86[j] += 32;
}
}
/**************************************************************
* DYNAMIC primitive helper function
* This will take the data stored in the crypt_keys2 (the encrypted
* 'second' key variable), and base-16 appends to the 2nd input
*************************************************************/
void DynamicFunc__append_from_last_output2_as_base16(DYNA_OMP_PARAMS)
{
unsigned int i, til;
#ifdef _OPENMP
i = first;
til = last;
#else
i = 0;
til = m_count;
#endif
#ifdef SIMD_COEF_32
if (dynamic_use_sse==1) {
unsigned int idx;
for (; i < til; ++i)
{
unsigned int ip, j;
idx = ( ((unsigned int)i)/SIMD_COEF_32);
// This is the 'actual' work.
ip = total_len2[idx][i&(SIMD_COEF_32-1)];
total_len2[idx][i&(SIMD_COEF_32-1)] += 32;
if (!ip)
__SSE_append_output_base16_to_input(input_buf2[idx].w, crypt_key2[idx].c, i&(SIMD_COEF_32-1));
else if (ip&1)
{
// Note we are 100% unaligned, and it seems fastest to handle byte/byte (at this time).
for (j = 0; j < 16; ++j)
{
unsigned char v = crypt_key2[idx].c[GETPOS(j, i&(SIMD_COEF_32-1))];
input_buf2[idx].c[GETPOS(ip+(j<<1), i&(SIMD_COEF_32-1))] = dynamic_itoa16[v>>4];
input_buf2[idx].c[GETPOS(ip+(j<<1)+1, i&(SIMD_COEF_32-1))] = dynamic_itoa16[v&0xF];
}
input_buf2[idx].c[GETPOS(ip+32, i&(SIMD_COEF_32-1))] = 0x80;
}
else if ((ip&3)==0)
__SSE_append_output_base16_to_input_semi_aligned_0(ip, input_buf2[idx].w, crypt_key2[idx].c, i&(SIMD_COEF_32-1));
else
__SSE_append_output_base16_to_input_semi_aligned_2(ip, input_buf2[idx].w, crypt_key2[idx].c, i&(SIMD_COEF_32-1));
}
return;
}
#endif
for (; i < til; ++i)
{
unsigned int j;
unsigned char *cp, *cpi;
#if MD5_X2
if (i&1)
{cp = &(input_buf2_X86[i>>MD5_X2].x2.B2[total_len2_X86[i]]); cpi = crypt_key2_X86[i>>MD5_X2].x2.B2; }
else
#endif
{cp = &(input_buf2_X86[i>>MD5_X2].x1.B[total_len2_X86[i]]); cpi = crypt_key2_X86[i>>MD5_X2].x1.B; }
for (j = 0; j < 16; ++j)
{
#if ARCH_ALLOWS_UNALIGNED
*((unsigned short*)cp) = itoa16_w2[*cpi++];
cp += 2;
#else
unsigned char b = *cpi++;
*cp++ = dynamic_itoa16[b>>4];
*cp++ = dynamic_itoa16[b&0xF];
#endif
}
*cp = 0;
total_len2_X86[i] += 32;
}
}
/**************************************************************
* DYNAMIC primitive helper function
* overwrites start of input2 from the output1 data using base-16
* an optimization, if the same thing is done over and over
* again, such as md5(md5(md5(md5($p)))) There, we would only
* call the copy and set length once, then simply call copy.
*************************************************************/
void DynamicFunc__overwrite_from_last_output_to_input2_as_base16_no_size_fix(DYNA_OMP_PARAMS)
{
unsigned int i, til,j;
#ifdef _OPENMP
i = first;
til = last;
#else
i = 0;
til = m_count;
#endif
#ifdef SIMD_COEF_32
if (dynamic_use_sse==1) {
unsigned int idx;
for (; i < til; ++i)
{
idx = ( ((unsigned int)i)/SIMD_COEF_32);
__SSE_overwrite_output_base16_to_input(input_buf2[idx].w, crypt_key[idx].c, i&(SIMD_COEF_32-1));
}
return;
}
#endif
j = i;
for (; j < til; ++j)
{
unsigned char *cpo, *cpi;
/* MD5_word *w; */
#if MD5_X2
if (j&1)
{cpo = input_buf2_X86[j>>MD5_X2].x2.B2; cpi = crypt_key_X86[j>>MD5_X2].x2.B2; /* w=input_buf_X86[j>>MD5_X2].x2.w2; */}
else
#endif
{cpo = input_buf2_X86[j>>MD5_X2].x1.B; cpi = crypt_key_X86[j>>MD5_X2].x1.B; /* w=input_buf_X86[j>>MD5_X2].x1.w; */ }
for (i = 0; i < 16; ++i, ++cpi)
{
*cpo++ = dynamic_itoa16[*cpi>>4];
*cpo++ = dynamic_itoa16[*cpi&0xF];
}
//MD5_swap(w,w,4);
}
}
void DynamicFunc__overwrite_from_last_output2_to_input2_as_base16_no_size_fix(DYNA_OMP_PARAMS)
{
unsigned int i, til,j;
#ifdef _OPENMP
i = first;
til = last;
#else
i = 0;
til = m_count;
#endif
#ifdef SIMD_COEF_32
if (dynamic_use_sse==1) {
unsigned int idx;
for (; i < til; ++i)
{
idx = ( ((unsigned int)i)/SIMD_COEF_32);
__SSE_overwrite_output_base16_to_input(input_buf2[idx].w, crypt_key2[idx].c, i&(SIMD_COEF_32-1));
}
return;
}
#endif
j = i;
for (; j < til; ++j)
{
unsigned char *cpo, *cpi;
/* MD5_word *w; */
#if MD5_X2
if (j&1)
{cpo = input_buf2_X86[j>>MD5_X2].x2.B2; cpi = crypt_key2_X86[j>>MD5_X2].x2.B2; /* w=input_buf2_X86[j>>MD5_X2].x2.w2; */}
else
#endif
{cpo = input_buf2_X86[j>>MD5_X2].x1.B; cpi = crypt_key2_X86[j>>MD5_X2].x1.B; /* w=input_buf2_X86[j>>MD5_X2].x1.w; */ }
for (i = 0; i < 16; ++i, ++cpi)
{
*cpo++ = dynamic_itoa16[*cpi>>4];
*cpo++ = dynamic_itoa16[*cpi&0xF];
}
//MD5_swap(w,w,4);
}
}
/**************************************************************
* DYNAMIC primitive helper function
* overwrites start of input2 from the output2 data using base-16
*************************************************************/
void DynamicFunc__overwrite_from_last_output2_as_base16_no_size_fix(DYNA_OMP_PARAMS)
{
unsigned int i, til,j;
#ifdef _OPENMP
i = first;
til = last;
#else
i = 0;
til = m_count;
#endif
#ifdef SIMD_COEF_32
if (dynamic_use_sse==1) {
unsigned int idx;
for (; i < til; ++i)
{
idx = ( ((unsigned int)i)/SIMD_COEF_32);
__SSE_overwrite_output_base16_to_input(input_buf2[idx].w, crypt_key2[idx].c, i&(SIMD_COEF_32-1));
}
return;
}
#endif
j=i;
for (; j < til; ++j)
{
unsigned char *cpo, *cpi;
/* MD5_word *w; */
#if MD5_X2
if (j&1)
{cpo = input_buf2_X86[j>>MD5_X2].x2.B2; cpi = crypt_key2_X86[j>>MD5_X2].x2.B2; /* w=input_buf_X86[j>>MD5_X2].x2.w2; */}
else
#endif
{cpo = input_buf2_X86[j>>MD5_X2].x1.B; cpi = crypt_key2_X86[j>>MD5_X2].x1.B; /* w=input_buf_X86[j>>MD5_X2].x1.w; */ }
for (i = 0; i < 16; ++i, ++cpi)
{
*cpo++ = dynamic_itoa16[*cpi>>4];
*cpo++ = dynamic_itoa16[*cpi&0xF];
}
//MD5_swap(w,w,4);
}
}
/**************************************************************
* DYNAMIC primitive helper function
* This will take the data stored in the crypt_keys1 (the encrypted
* 'first' key variable), and base-16 appends to the 2nd input
*************************************************************/
void DynamicFunc__append_from_last_output_to_input2_as_base16(DYNA_OMP_PARAMS)
{
unsigned int i, til;
#ifdef _OPENMP
i = first;
til = last;
#else
i = 0;
til = m_count;
#endif
#ifdef SIMD_COEF_32
if (dynamic_use_sse==1) {
unsigned int index=i, idx;
for (; index < til; ++index)
{
unsigned int ip;
idx = ( ((unsigned int)index)/SIMD_COEF_32);
// This is the 'actual' work.
ip = total_len2[idx][index&(SIMD_COEF_32-1)];
total_len2[idx][index&(SIMD_COEF_32-1)] += 32;
if (!ip)
__SSE_append_output_base16_to_input(input_buf2[idx].w, crypt_key[idx].c, index&(SIMD_COEF_32-1));
else if (ip&1)
{
// Note we are 100% unaligned, and it seems fastest to handle byte/byte (at this time).
for (i = 0; i < 16; ++i)
{
unsigned char v = crypt_key[idx].c[GETPOS(i, index&(SIMD_COEF_32-1))];
input_buf2[idx].c[GETPOS(ip+(i<<1), index&(SIMD_COEF_32-1))] = dynamic_itoa16[v>>4];
input_buf2[idx].c[GETPOS(ip+(i<<1)+1, index&(SIMD_COEF_32-1))] = dynamic_itoa16[v&0xF];
}
input_buf2[idx].c[GETPOS(ip+32, index&(SIMD_COEF_32-1))] = 0x80;
}
else if ((ip&3)==0)
__SSE_append_output_base16_to_input_semi_aligned_0(ip, input_buf2[idx].w, crypt_key[idx].c, index&(SIMD_COEF_32-1));
else
__SSE_append_output_base16_to_input_semi_aligned_2(ip, input_buf2[idx].w, crypt_key[idx].c, index&(SIMD_COEF_32-1));
}
return;
}
#endif
for (; i < til; ++i)
{
unsigned int j;
unsigned char *cp, *cpi;
#if MD5_X2
if (i&1)
{cpi = crypt_key_X86[i>>MD5_X2].x2.B2; cp = &(input_buf2_X86[i>>MD5_X2].x2.B2[total_len2_X86[i]]); }
else
#endif
{cpi = crypt_key_X86[i>>MD5_X2].x1.B; cp = &(input_buf2_X86[i>>MD5_X2].x1.B[total_len2_X86[i]]);}
for (j = 0; j < 16; ++j)
{
#if ARCH_ALLOWS_UNALIGNED
*((unsigned short*)cp) = itoa16_w2[*cpi++];
cp += 2;
#else
unsigned char b = *cpi++;
*cp++ = dynamic_itoa16[b>>4];
*cp++ = dynamic_itoa16[b&0xF];
#endif
}
*cp = 0;
total_len2_X86[i] += 32;
}
}
/**************************************************************
* DYNAMIC primitive helper function
* This will take the data stored in the crypt_keys2 (the encrypted
* 'second' key variable), and base-16 appends to the 1st input
*************************************************************/
void DynamicFunc__append_from_last_output2_to_input1_as_base16(DYNA_OMP_PARAMS)
{
unsigned int i, til;
#ifdef _OPENMP
i = first;
til = last;
#else
i = 0;
til = m_count;
#endif
#ifdef SIMD_COEF_32
if (dynamic_use_sse==1) {
unsigned int index=i, idx;
for (; index < til; ++index)
{
unsigned int ip;
idx = ( ((unsigned int)index)/SIMD_COEF_32);
// This is the 'actual' work.
ip = total_len[idx][index&(SIMD_COEF_32-1)];
total_len[idx][index&(SIMD_COEF_32-1)] += 32;
if (!ip)
__SSE_append_output_base16_to_input(input_buf[idx].w, crypt_key2[idx].c, index&(SIMD_COEF_32-1));
else if (ip&1)
{
// Note we are 100% unaligned, and it seems fastest to handle byte/byte (at this time).
for (i = 0; i < 16; ++i)
{
unsigned char v = crypt_key2[idx].c[GETPOS(i, index&(SIMD_COEF_32-1))];
input_buf[idx].c[GETPOS(ip+(i<<1), index&(SIMD_COEF_32-1))] = dynamic_itoa16[v>>4];
input_buf[idx].c[GETPOS(ip+(i<<1)+1, index&(SIMD_COEF_32-1))] = dynamic_itoa16[v&0xF];
}
input_buf[idx].c[GETPOS(ip+32, index&(SIMD_COEF_32-1))] = 0x80;
}
else if ((ip&3)==0)
__SSE_append_output_base16_to_input_semi_aligned_0(ip, input_buf[idx].w, crypt_key2[idx].c, index&(SIMD_COEF_32-1));
else
__SSE_append_output_base16_to_input_semi_aligned_2(ip, input_buf[idx].w, crypt_key2[idx].c, index&(SIMD_COEF_32-1));
}
return;
}
#endif
for (; i < til; ++i)
{
unsigned int j;
unsigned char *cp, *cpi;
#if MD5_X2
if (i&1)
{cp = &(input_buf_X86[i>>MD5_X2].x2.B2[total_len_X86[i]]); cpi = crypt_key2_X86[i>>MD5_X2].x2.B2; }
else
#endif
{cp = &(input_buf_X86[i>>MD5_X2].x1.B[total_len_X86[i]]); cpi = crypt_key2_X86[i>>MD5_X2].x1.B; }
for (j = 0; j < 16; ++j)
{
#if ARCH_ALLOWS_UNALIGNED
*((unsigned short*)cp) = itoa16_w2[*cpi++];
cp += 2;
#else
unsigned char b = *cpi++;
*cp++ = dynamic_itoa16[b>>4];
*cp++ = dynamic_itoa16[b&0xF];
#endif
}
*cp = 0;
total_len_X86[i] += 32;
}
}
void DynamicFunc__append_from_last_output2_as_raw(DYNA_OMP_PARAMS)
{
unsigned int i, til;
#ifdef _OPENMP
i = first;
til = last;
#else
i = 0;
til = m_count;
#endif
#ifdef SIMD_COEF_32
if (dynamic_use_sse==1) {
unsigned int index=i, idx;
for (; index < til; ++index)
{
unsigned int ip;
idx = ( ((unsigned int)index)/SIMD_COEF_32);
// This is the 'actual' work.
ip = total_len[idx][index&(SIMD_COEF_32-1)];
if (!ip)
{
ARCH_WORD_32 *po = input_buf[idx].w;
ARCH_WORD_32 *pi = crypt_key2[idx].w;
po += (index&(SIMD_COEF_32-1));
pi += (index&(SIMD_COEF_32-1));
for (i = 0; i < 4; i++)
{
*po = *pi;
po += SIMD_COEF_32;
pi += SIMD_COEF_32;
}
input_buf[idx].c[GETPOS(16, index&(SIMD_COEF_32-1))] = 0x80;
}
else
{
for (i = 0; i < 16; ++i)
input_buf[idx].c[GETPOS(ip+i, index&(SIMD_COEF_32-1))] = crypt_key2[idx].c[GETPOS(i, index&(SIMD_COEF_32-1))];
input_buf[idx].c[GETPOS(ip+16, index&(SIMD_COEF_32-1))] = 0x80;
}
total_len[idx][index&(SIMD_COEF_32-1)] += 16;
}
return;
}
#endif
for (; i < til; ++i)
{
unsigned int j;
unsigned char *cp, *cpi;
#if MD5_X2
if (i&1)
{cp = &(input_buf_X86[i>>MD5_X2].x2.B2[total_len_X86[i]]); cpi = crypt_key2_X86[i>>MD5_X2].x2.B2; }
else
#endif
{cp = &(input_buf_X86[i>>MD5_X2].x1.B[total_len_X86[i]]); cpi = crypt_key2_X86[i>>MD5_X2].x1.B; }
for (j = 0; j < 16; ++j)
*cp++ = *cpi++;
*cp = 0;
total_len_X86[i] += 16;
}
}
void DynamicFunc__append2_from_last_output2_as_raw(DYNA_OMP_PARAMS)
{
unsigned int i, til;
#ifdef _OPENMP
i = first;
til = last;
#else
i = 0;
til = m_count;
#endif
#ifdef SIMD_COEF_32
if (dynamic_use_sse==1) {
unsigned int index=i, idx;
for (; index < til; ++index)
{
unsigned int ip;
idx = ( ((unsigned int)index)/SIMD_COEF_32);
// This is the 'actual' work.
ip = total_len2[idx][index&(SIMD_COEF_32-1)];
if (!ip)
{
ARCH_WORD_32 *po = input_buf2[idx].w;
ARCH_WORD_32 *pi = crypt_key2[idx].w;
po += (index&(SIMD_COEF_32-1));
pi += (index&(SIMD_COEF_32-1));
for (i = 0; i < 4; i++)
{
*po = *pi;
po += SIMD_COEF_32;
pi += SIMD_COEF_32;
}
input_buf2[idx].c[GETPOS(16, index&(SIMD_COEF_32-1))] = 0x80;
}
else
{
for (i = 0; i < 16; ++i)
input_buf2[idx].c[GETPOS(ip+i, index&(SIMD_COEF_32-1))] = crypt_key2[idx].c[GETPOS(i, index&(SIMD_COEF_32-1))];
input_buf2[idx].c[GETPOS(ip+16, index&(SIMD_COEF_32-1))] = 0x80;
}
total_len2[idx][index&(SIMD_COEF_32-1)] += 16;
}
return;
}
#endif
for (; i < til; ++i)
{
unsigned int j;
unsigned char *cp, *cpi;
#if MD5_X2
if (i&1)
{cp = &(input_buf2_X86[i>>MD5_X2].x2.B2[total_len2_X86[i]]); cpi = crypt_key2_X86[i>>MD5_X2].x2.B2; }
else
#endif
{cp = &(input_buf2_X86[i>>MD5_X2].x1.B[total_len2_X86[i]]); cpi = crypt_key2_X86[i>>MD5_X2].x1.B; }
for (j = 0; j < 16; ++j)
*cp++ = *cpi++;
*cp = 0;
total_len2_X86[i] += 16;
}
}
void DynamicFunc__append_from_last_output1_as_raw(DYNA_OMP_PARAMS)
{
unsigned int i, til;
#ifdef _OPENMP
i = first;
til = last;
#else
i = 0;
til = m_count;
#endif
#ifdef SIMD_COEF_32
if (dynamic_use_sse==1) {
unsigned int index, idx;
for (index = i; index < til; ++index)
{
unsigned int ip;
idx = ( ((unsigned int)index)/SIMD_COEF_32);
// This is the 'actual' work.
ip = total_len[idx][index&(SIMD_COEF_32-1)];
if (!ip)
{
ARCH_WORD_32 *po = input_buf[idx].w;
ARCH_WORD_32 *pi = crypt_key[idx].w;
po += (index&(SIMD_COEF_32-1));
pi += (index&(SIMD_COEF_32-1));
for (i = 0; i < 4; i++)
{
*po = *pi;
po += SIMD_COEF_32;
pi += SIMD_COEF_32;
}
input_buf[idx].c[GETPOS(16, index&(SIMD_COEF_32-1))] = 0x80;
}
else
{
for (i = 0; i < 16; ++i)
input_buf[idx].c[GETPOS(ip+i, index&(SIMD_COEF_32-1))] = crypt_key[idx].c[GETPOS(i, index&(SIMD_COEF_32-1))];
input_buf[idx].c[GETPOS(ip+16, index&(SIMD_COEF_32-1))] = 0x80;
}
total_len[idx][index&(SIMD_COEF_32-1)] += 16;
}
return;
}
#endif
for (; i < til; ++i)
{
unsigned int j;
unsigned char *cp, *cpi;
#if MD5_X2
if (i&1)
{cp = &(input_buf_X86[i>>MD5_X2].x2.B2[total_len_X86[i]]); cpi = crypt_key_X86[i>>MD5_X2].x2.B2; }
else
#endif
{cp = &(input_buf_X86[i>>MD5_X2].x1.B[total_len_X86[i]]); cpi = crypt_key_X86[i>>MD5_X2].x1.B; }
for (j = 0; j < 16; ++j)
*cp++ = *cpi++;
*cp = 0;
total_len_X86[i] += 16;
}
}
void DynamicFunc__append2_from_last_output1_as_raw(DYNA_OMP_PARAMS)
{
unsigned int i, til;
#ifdef _OPENMP
i = first;
til = last;
#else
i = 0;
til = m_count;
#endif
#ifdef SIMD_COEF_32
if (dynamic_use_sse==1) {
unsigned int index, idx;
for (index = i; index < til; ++index)
{
unsigned int ip;
idx = ( ((unsigned int)index)/SIMD_COEF_32);
// This is the 'actual' work.
ip = total_len2[idx][index&(SIMD_COEF_32-1)];
if (!ip)
{
ARCH_WORD_32 *po = input_buf2[idx].w;
ARCH_WORD_32 *pi = crypt_key[idx].w;
po += (index&(SIMD_COEF_32-1));
pi += (index&(SIMD_COEF_32-1));
for (i = 0; i < 4; i++)
{
*po = *pi;
po += SIMD_COEF_32;
pi += SIMD_COEF_32;
}
input_buf2[idx].c[GETPOS(16, index&(SIMD_COEF_32-1))] = 0x80;
}
else
{
for (i = 0; i < 16; ++i)
input_buf2[idx].c[GETPOS(ip+i, index&(SIMD_COEF_32-1))] = crypt_key[idx].c[GETPOS(i, index&(SIMD_COEF_32-1))];
input_buf2[idx].c[GETPOS(ip+16, index&(SIMD_COEF_32-1))] = 0x80;
}
total_len2[idx][index&(SIMD_COEF_32-1)] += 16;
}
return;
}
#endif
for (; i < til; ++i)
{
unsigned int j;
unsigned char *cp, *cpi;
#if MD5_X2
if (i&1)
{cp = &(input_buf2_X86[i>>MD5_X2].x2.B2[total_len2_X86[i]]); cpi = crypt_key_X86[i>>MD5_X2].x2.B2; }
else
#endif
{cp = &(input_buf2_X86[i>>MD5_X2].x1.B[total_len2_X86[i]]); cpi = crypt_key_X86[i>>MD5_X2].x1.B; }
for (j = 0; j < 16; ++j)
*cp++ = *cpi++;
*cp = 0;
total_len2_X86[i] += 16;
}
}
/**************************************************************
* DYNAMIC primitive helper function
* Append salt #2 into input 1
*************************************************************/
void DynamicFunc__append_2nd_salt(DYNA_OMP_PARAMS)
{
__append_string(DYNA_OMP_PARAMSdm cursalt2, saltlen2);
}
/**************************************************************
* DYNAMIC primitive helper function
* Append salt #2 into input 2
*************************************************************/
void DynamicFunc__append_2nd_salt2(DYNA_OMP_PARAMS)
{
__append2_string(DYNA_OMP_PARAMSdm cursalt2, saltlen2);
}
/**************************************************************
* DYNAMIC primitive helper function
* Append UserID into input 1
*************************************************************/
void DynamicFunc__append_userid(DYNA_OMP_PARAMS)
{
__append_string(DYNA_OMP_PARAMSdm username, usernamelen);
}
/**************************************************************
* DYNAMIC primitive helper function
* Append UserID into input 2
*************************************************************/
void DynamicFunc__append_userid2(DYNA_OMP_PARAMS)
{
__append2_string(DYNA_OMP_PARAMSdm username, usernamelen);
}
void DynamicFunc__append_input1_from_CONST1(DYNA_OMP_PARAMS)
{
__append_string(DYNA_OMP_PARAMSdm curdat.Consts[0], curdat.ConstsLen[0]);
}
void DynamicFunc__append_input1_from_CONST2(DYNA_OMP_PARAMS)
{
__append_string(DYNA_OMP_PARAMSdm curdat.Consts[1], curdat.ConstsLen[1]);
}
void DynamicFunc__append_input1_from_CONST3(DYNA_OMP_PARAMS)
{
__append_string(DYNA_OMP_PARAMSdm curdat.Consts[2], curdat.ConstsLen[2]);
}
void DynamicFunc__append_input1_from_CONST4(DYNA_OMP_PARAMS)
{
__append_string(DYNA_OMP_PARAMSdm curdat.Consts[3], curdat.ConstsLen[3]);
}
void DynamicFunc__append_input1_from_CONST5(DYNA_OMP_PARAMS)
{
__append_string(DYNA_OMP_PARAMSdm curdat.Consts[4], curdat.ConstsLen[4]);
}
void DynamicFunc__append_input1_from_CONST6(DYNA_OMP_PARAMS)
{
__append_string(DYNA_OMP_PARAMSdm curdat.Consts[5], curdat.ConstsLen[5]);
}
void DynamicFunc__append_input1_from_CONST7(DYNA_OMP_PARAMS)
{
__append_string(DYNA_OMP_PARAMSdm curdat.Consts[6], curdat.ConstsLen[6]);
}
void DynamicFunc__append_input1_from_CONST8(DYNA_OMP_PARAMS)
{
__append_string(DYNA_OMP_PARAMSdm curdat.Consts[7], curdat.ConstsLen[7]);
}
void DynamicFunc__append_input2_from_CONST1(DYNA_OMP_PARAMS)
{
__append2_string(DYNA_OMP_PARAMSdm curdat.Consts[0], curdat.ConstsLen[0]);
}
void DynamicFunc__append_input2_from_CONST2(DYNA_OMP_PARAMS)
{
__append2_string(DYNA_OMP_PARAMSdm curdat.Consts[1], curdat.ConstsLen[1]);
}
void DynamicFunc__append_input2_from_CONST3(DYNA_OMP_PARAMS)
{
__append2_string(DYNA_OMP_PARAMSdm curdat.Consts[2], curdat.ConstsLen[2]);
}
void DynamicFunc__append_input2_from_CONST4(DYNA_OMP_PARAMS)
{
__append2_string(DYNA_OMP_PARAMSdm curdat.Consts[3], curdat.ConstsLen[3]);
}
void DynamicFunc__append_input2_from_CONST5(DYNA_OMP_PARAMS)
{
__append2_string(DYNA_OMP_PARAMSdm curdat.Consts[4], curdat.ConstsLen[4]);
}
void DynamicFunc__append_input2_from_CONST6(DYNA_OMP_PARAMS)
{
__append2_string(DYNA_OMP_PARAMSdm curdat.Consts[5], curdat.ConstsLen[5]);
}
void DynamicFunc__append_input2_from_CONST7(DYNA_OMP_PARAMS)
{
__append2_string(DYNA_OMP_PARAMSdm curdat.Consts[6], curdat.ConstsLen[6]);
}
void DynamicFunc__append_input2_from_CONST8(DYNA_OMP_PARAMS)
{
__append2_string(DYNA_OMP_PARAMSdm curdat.Consts[7], curdat.ConstsLen[7]);
}
void DynamicFunc__append_fld0(DYNA_OMP_PARAMS)
{
__append_string(DYNA_OMP_PARAMSdm flds[0], fld_lens[0]);
}
void DynamicFunc__append_fld1(DYNA_OMP_PARAMS)
{
__append_string(DYNA_OMP_PARAMSdm flds[1], fld_lens[1]);
}
void DynamicFunc__append_fld2(DYNA_OMP_PARAMS)
{
__append_string(DYNA_OMP_PARAMSdm flds[2], fld_lens[2]);
}
void DynamicFunc__append_fld3(DYNA_OMP_PARAMS)
{
__append_string(DYNA_OMP_PARAMSdm flds[3], fld_lens[3]);
}
void DynamicFunc__append_fld4(DYNA_OMP_PARAMS)
{
__append_string(DYNA_OMP_PARAMSdm flds[4], fld_lens[4]);
}
void DynamicFunc__append_fld5(DYNA_OMP_PARAMS)
{
__append_string(DYNA_OMP_PARAMSdm flds[5], fld_lens[5]);
}
void DynamicFunc__append_fld6(DYNA_OMP_PARAMS)
{
__append_string(DYNA_OMP_PARAMSdm flds[6], fld_lens[6]);
}
void DynamicFunc__append_fld7(DYNA_OMP_PARAMS)
{
__append_string(DYNA_OMP_PARAMSdm flds[7], fld_lens[7]);
}
void DynamicFunc__append_fld8(DYNA_OMP_PARAMS)
{
__append_string(DYNA_OMP_PARAMSdm flds[8], fld_lens[8]);
}
void DynamicFunc__append_fld9(DYNA_OMP_PARAMS)
{
__append_string(DYNA_OMP_PARAMSdm flds[9], fld_lens[9]);
}
void DynamicFunc__append2_fld0(DYNA_OMP_PARAMS)
{
__append2_string(DYNA_OMP_PARAMSdm flds[0], fld_lens[0]);
}
void DynamicFunc__append2_fld1(DYNA_OMP_PARAMS)
{
__append2_string(DYNA_OMP_PARAMSdm flds[1], fld_lens[1]);
}
void DynamicFunc__append2_fld2(DYNA_OMP_PARAMS)
{
__append2_string(DYNA_OMP_PARAMSdm flds[2], fld_lens[2]);
}
void DynamicFunc__append2_fld3(DYNA_OMP_PARAMS)
{
__append2_string(DYNA_OMP_PARAMSdm flds[3], fld_lens[3]);
}
void DynamicFunc__append2_fld4(DYNA_OMP_PARAMS)
{
__append2_string(DYNA_OMP_PARAMSdm flds[4], fld_lens[4]);
}
void DynamicFunc__append2_fld5(DYNA_OMP_PARAMS)
{
__append2_string(DYNA_OMP_PARAMSdm flds[5], fld_lens[5]);
}
void DynamicFunc__append2_fld6(DYNA_OMP_PARAMS)
{
__append2_string(DYNA_OMP_PARAMSdm flds[6], fld_lens[6]);
}
void DynamicFunc__append2_fld7(DYNA_OMP_PARAMS)
{
__append2_string(DYNA_OMP_PARAMSdm flds[7], fld_lens[7]);
}
void DynamicFunc__append2_fld8(DYNA_OMP_PARAMS)
{
__append2_string(DYNA_OMP_PARAMSdm flds[8], fld_lens[8]);
}
void DynamicFunc__append2_fld9(DYNA_OMP_PARAMS)
{
__append2_string(DYNA_OMP_PARAMSdm flds[9], fld_lens[9]);
}
void DynamicFunc__SSEtoX86_switch_input1(DYNA_OMP_PARAMS)
{
#ifdef SIMD_COEF_32
unsigned int i, j, k, idx, max;
if (dynamic_use_sse == 0)
return;
dynamic_use_sse = 2;
for (j = 0; j < m_count; j += SIMD_COEF_32)
{
ARCH_WORD_32 *cpi;
ARCH_WORD_32 *cpo[SIMD_COEF_32];
#if (MD5_X2)
for (i = 0; i < SIMD_COEF_32; i += 2) {
cpo[i ] = input_buf_X86[(j>>1)+(i>>1)].x1.w;
cpo[i+1] = input_buf_X86[(j>>1)+(i>>1)].x2.w2;
}
#else
for (i = 0; i < SIMD_COEF_32; i++)
cpo[i] = input_buf_X86[j+i].x1.w;
#endif
idx = j / SIMD_COEF_32;
cpi = input_buf[idx].w;
max = total_len_X86[j] = (total_len[idx][0]);
for (i = 1; i < SIMD_COEF_32; i++)
if (max < (total_len_X86[j+i] = total_len[idx][j]))
max = total_len_X86[j+i];
max = (max+3)>>2;
for (k = 0; k < max; ++k) {
for (i = 0; i < SIMD_COEF_32; i++)
*cpo[i]++ = *cpi++;
}
#if (MD5_X2)
for (i = 0; i < SIMD_COEF_32; i += 2) {
input_buf_X86[(j>>1)+(i>>1)].x1.b[total_len_X86[j+i]] = 0;
input_buf_X86[(j>>1)+(i>>1)].x2.b2[total_len_X86[j+i+1]] = 0;
}
#else
for (i = 0; i < SIMD_COEF_32; i++)
input_buf_X86[j+i].x1.b[total_len_X86[j+i]] = 0;
#endif
}
#endif
}
void DynamicFunc__SSEtoX86_switch_input2(DYNA_OMP_PARAMS)
{
#ifdef SIMD_COEF_32
unsigned int i, j, k, idx, max;
if (dynamic_use_sse == 0)
return;
dynamic_use_sse = 2;
for (j = 0; j < m_count; j += SIMD_COEF_32)
{
ARCH_WORD_32 *cpi;
ARCH_WORD_32 *cpo[SIMD_COEF_32];
#if (MD5_X2)
for (i = 0; i < SIMD_COEF_32; i += 2) {
cpo[i ] = input_buf2_X86[(j>>1)+(i>>1)].x1.w;
cpo[i+1] = input_buf2_X86[(j>>1)+(i>>1)].x2.w2;
}
#else
for (i = 0; i < SIMD_COEF_32; i++)
cpo[i] = input_buf2_X86[j+i].x1.w;
#endif
idx = j / SIMD_COEF_32;
cpi = input_buf2[idx].w;
max = total_len2_X86[j] = (total_len2[idx][0]);
for (i = 1; i < SIMD_COEF_32; i++)
if (max < (total_len2_X86[j+i] = total_len2[idx][i]))
max = total_len2_X86[j+i];
max = (max+3)>>2;
for (k = 0; k < max; ++k) {
for (i = 0; i < SIMD_COEF_32; i++)
*cpo[i]++ = *cpi++;
}
// get rid of the 0x80
#if (MD5_X2)
for (i = 0; i < SIMD_COEF_32; i += 2) {
input_buf2_X86[(j>>1)+(i>>1)].x1.b[total_len_X86[j+i]] = 0;
input_buf2_X86[(j>>1)+(i>>1)].x2.b2[total_len_X86[j+i+1]] = 0;
}
#else
for (i = 0; i < SIMD_COEF_32; i++)
input_buf2_X86[j+i].x1.b[total_len2_X86[j+i]] = 0;
#endif
}
#endif
}
void DynamicFunc__SSEtoX86_switch_output1(DYNA_OMP_PARAMS)
{
#ifdef SIMD_COEF_32
unsigned int i, j, k, idx;
if (dynamic_use_sse == 0)
return;
dynamic_use_sse = 2;
for (j = 0; j < m_count; j += SIMD_COEF_32)
{
ARCH_WORD_32 *cpi;
ARCH_WORD_32 *cpo[SIMD_COEF_32];
#if MD5_X2
for (i = 0; i < SIMD_COEF_32; i += 2) {
cpo[i ] = crypt_key_X86[(j>>1)+(i>>1)].x1.w;
cpo[i+1] = crypt_key_X86[(j>>1)+(i>>1)].x2.w2;
}
#else
for (i = 0; i < SIMD_COEF_32; i++)
cpo[i] = crypt_key_X86[j+i].x1.w;
#endif
idx = j/SIMD_COEF_32;
cpi = (void*)crypt_key[idx].c;
for (k = 0; k < 4; ++k) {
for (i = 0; i < SIMD_COEF_32; i++)
*cpo[i]++ = *cpi++;
}
}
#endif
}
void DynamicFunc__SSEtoX86_switch_output2(DYNA_OMP_PARAMS)
{
#ifdef SIMD_COEF_32
unsigned int i, j, k, idx;
if (dynamic_use_sse == 0)
return;
dynamic_use_sse = 2;
for (j = 0; j < m_count; j += SIMD_COEF_32)
{
ARCH_WORD_32 *cpi;
ARCH_WORD_32 *cpo[SIMD_COEF_32];
#if (MD5_X2)
for (i = 0; i < SIMD_COEF_32; i += 2) {
cpo[i ] = crypt_key2_X86[(j>>1)+(i>>1)].x1.w;
cpo[i+1] = crypt_key2_X86[(j>>1)+(i>>1)].x2.w2;
}
#else
for (i = 0; i < SIMD_COEF_32; i++)
cpo[i] = crypt_key2_X86[j+i].x1.w;
#endif
idx = j / SIMD_COEF_32;
cpi = crypt_key2[idx].w;
for (k = 0; k < 4; ++k) {
for (i = 0; i < SIMD_COEF_32; i++)
*cpo[i]++ = *cpi++;
}
}
#endif
}
void DynamicFunc__X86toSSE_switch_input1(DYNA_OMP_PARAMS)
{
#ifdef SIMD_COEF_32
unsigned int j, idx, idx_mod;
if (dynamic_use_sse == 0)
return;
dynamic_use_sse = 1;
__nonMP_DynamicFunc__clean_input();
for (j = 0; j < m_count; ++j) {
idx = j/SIMD_COEF_32;
idx_mod = j&(SIMD_COEF_32-1);
total_len[idx][idx_mod] += total_len_X86[j];
#if (MD5_X2)
if (j & 1)
__SSE_append_string_to_input(input_buf[idx].c,idx_mod,input_buf_X86[j>>1].x2.B2,total_len_X86[j],0,1);
else
#endif
__SSE_append_string_to_input(input_buf[idx].c,idx_mod,input_buf_X86[j>>MD5_X2].x1.B,total_len_X86[j],0,1);
}
#endif
}
void DynamicFunc__X86toSSE_switch_input2(DYNA_OMP_PARAMS)
{
#ifdef SIMD_COEF_32
unsigned int j, idx, idx_mod;
if (dynamic_use_sse == 0)
return;
dynamic_use_sse = 1;
__nonMP_DynamicFunc__clean_input2();
for (j = 0; j < m_count; ++j) {
idx = j/SIMD_COEF_32;
idx_mod = j&(SIMD_COEF_32-1);
total_len2[idx][idx_mod] += total_len2_X86[j];
#if (MD5_X2)
if (j & 1)
__SSE_append_string_to_input(input_buf2[idx].c,idx_mod,input_buf2_X86[j>>1].x2.B2,total_len2_X86[j],0,1);
else
#endif
__SSE_append_string_to_input(input_buf2[idx].c,idx_mod,input_buf2_X86[j>>MD5_X2].x1.B,total_len2_X86[j],0,1);
}
#endif
}
void DynamicFunc__X86toSSE_switch_output1(DYNA_OMP_PARAMS)
{
#ifdef SIMD_COEF_32
unsigned int i, j, k, idx;
if (dynamic_use_sse == 0)
return;
dynamic_use_sse = 1;
for (j = 0; j < m_count; j += SIMD_COEF_32)
{
ARCH_WORD_32 *cpi;
ARCH_WORD_32 *cpo[SIMD_COEF_32];
#if (MD5_X2)
for (i = 0; i < SIMD_COEF_32; i += 2) {
cpo[i ] = crypt_key_X86[(j>>1)+(i>>1)].x1.w;
cpo[i+1] = crypt_key_X86[(j>>1)+(i>>1)].x2.w2;
}
#else
for (i = 0; i < SIMD_COEF_32; i++)
cpo[i] = crypt_key_X86[j+i].x1.w;
#endif
idx = j / SIMD_COEF_32;
cpi = (void*)crypt_key[idx].c;
for (k = 0; k < 4; ++k) {
for (i = 0; i < SIMD_COEF_32; i++)
*cpi++ = *cpo[i]++;
}
}
#endif
}
void DynamicFunc__X86toSSE_switch_output2(DYNA_OMP_PARAMS)
{
#ifdef SIMD_COEF_32
unsigned int i, j, k, idx;
if (dynamic_use_sse == 0)
return;
dynamic_use_sse = 1;
for (j = 0; j < m_count; j += SIMD_COEF_32)
{
ARCH_WORD_32 *cpi;
ARCH_WORD_32 *cpo[SIMD_COEF_32];
#if (MD5_X2)
for (i = 0; i < SIMD_COEF_32; i += 2) {
cpo[i ] = crypt_key2_X86[(j>>1)+(i>>1)].x1.w;
cpo[i+1] = crypt_key2_X86[(j>>1)+(i>>1)].x2.w2;
}
#else
for (i = 0; i < SIMD_COEF_32; i++)
cpo[i] = crypt_key2_X86[j+i].x1.w;
#endif
idx = j / SIMD_COEF_32;
cpi = crypt_key2[idx].w;
for (k = 0; k < 4; ++k) {
for (i = 0; i < SIMD_COEF_32; i++)
*cpi++ = *cpo[i]++;
}
}
#endif
}
// This function, simply 'switches' back to SSE It does NOT copy any data from X86 to SSE
void DynamicFunc__ToSSE(DYNA_OMP_PARAMS)
{
if (dynamic_use_sse == 0)
return;
dynamic_use_sse = 1;
}
// This function, simply 'switches' to X86 It does NOT copy any data from SSE to X86
void DynamicFunc__ToX86(DYNA_OMP_PARAMS)
{
if (dynamic_use_sse == 0)
return;
dynamic_use_sse = 2;
}
void DynamicFunc__base16_convert_locase(DYNA_OMP_PARAMS)
{
dynamic_itoa16 = itoa16;
itoa16_w2=itoa16_w2_l;
}
void DynamicFunc__base16_convert_upcase(DYNA_OMP_PARAMS)
{
dynamic_itoa16 = itoa16u;
itoa16_w2=itoa16_w2_u;
}
/**************************************************************
* DEPRICATED functions. These are the older pseudo functions
* which we now have flags for. We keep them, so that we can
* add the proper flags, even if the user is running an older
* script.
*************************************************************/
void DynamicFunc__InitialLoadKeysToInput(DYNA_OMP_PARAMS) {}
void DynamicFunc__InitialLoadKeys_md5crypt_ToOutput2(DYNA_OMP_PARAMS) {}
void DynamicFunc__InitialLoadKeys_md5crypt_ToOutput2_Base16_to_Input1(DYNA_OMP_PARAMS) {}
void DynamicFunc__InitialLoadKeys_md5crypt_ToOutput2_Base16_to_Input1_offset32(DYNA_OMP_PARAMS) {}
/**************************************************************
**************************************************************
**************************************************************
**************************************************************
* DYNAMIC primitive helper function
* This is the END of the primitives.
**************************************************************
**************************************************************
**************************************************************
*************************************************************/
static DYNAMIC_primitive_funcp *ConvertFuncs(DYNAMIC_primitive_funcp p, unsigned int *count)
{
static DYNAMIC_primitive_funcp fncs[20];
*count = 0;
if (p==DynamicFunc__InitialLoadKeysToInput ||
p==DynamicFunc__InitialLoadKeys_md5crypt_ToOutput2 ||
p==DynamicFunc__InitialLoadKeys_md5crypt_ToOutput2_Base16_to_Input1 ||
p==DynamicFunc__InitialLoadKeys_md5crypt_ToOutput2_Base16_to_Input1_offset32)
return fncs; // ignore these
#ifndef SIMD_COEF_32
if (p==DynamicFunc__SSEtoX86_switch_input1 || p==DynamicFunc__SSEtoX86_switch_input2 ||
p==DynamicFunc__SSEtoX86_switch_output1 || p==DynamicFunc__SSEtoX86_switch_output2 ||
p==DynamicFunc__X86toSSE_switch_input1 || p==DynamicFunc__X86toSSE_switch_input2 ||
p==DynamicFunc__X86toSSE_switch_output1 || p==DynamicFunc__X86toSSE_switch_output2 ||
p==DynamicFunc__ToSSE || p==DynamicFunc__ToX86)
return fncs; // we ignore these functions 100% in x86 mode.
#endif
// if (p==DynamicFunc__append_input2_from_CONST1) {
// fncs[0] = DynamicFunc__set_input2;
// fncs[1] = DynamicFunc__set_CONST1;
// fncs[2] = DynamicFunc__append_CONST;
// *count = 3;
// }
/* LOOK INTO THIS!!!!! This may not be valid, now that SHA1 is handled 100% outside of the SSE2 code.
But I am not sure just WTF this is supposed to do anyway, since not LE should be using CTX only??? */
#if !ARCH_LITTLE_ENDIAN
if (/*p==DynamicFunc__SHA1_crypt_input1_append_input2_base16 ||*/ p==DynamicFunc__SHA1_crypt_input1_append_input2 ||
/*p==DynamicFunc__SHA1_crypt_input2_append_input1_base16 ||*/ p==DynamicFunc__SHA1_crypt_input2_append_input1 ||
/*p==DynamicFunc__SHA1_crypt_input1_overwrite_input1_base16 ||*/ p==DynamicFunc__SHA1_crypt_input1_overwrite_input1 ||
/*p==DynamicFunc__SHA1_crypt_input2_overwrite_input2_base16 ||*/ p==DynamicFunc__SHA1_crypt_input2_overwrite_input2 ||
/*p==DynamicFunc__SHA1_crypt_input1_overwrite_input2_base16 ||*/ p==DynamicFunc__SHA1_crypt_input1_overwrite_input2 ||
/*p==DynamicFunc__SHA1_crypt_input2_overwrite_input1_base16 ||*/ p==DynamicFunc__SHA1_crypt_input2_overwrite_input1 ||
p==DynamicFunc__SHA1_crypt_input1_to_output1_FINAL ||
p==DynamicFunc__SHA1_crypt_input2_to_output1_FINAL)
curdat.force_md5_ctx = 0;
#endif
*count = 1;
fncs[0] = p;
return fncs;
}
#ifdef _OPENMP
static int isBadOMPFunc(DYNAMIC_primitive_funcp p)
{
// If ANY of these functions are seen, we can NOT use OMP for this single format.
#if SIMD_COEF_32
if (p==DynamicFunc__SSEtoX86_switch_input1 || p==DynamicFunc__SSEtoX86_switch_input2 ||
p==DynamicFunc__SSEtoX86_switch_output1 || p==DynamicFunc__SSEtoX86_switch_output2 ||
p==DynamicFunc__X86toSSE_switch_input1 || p==DynamicFunc__X86toSSE_switch_input2 ||
p==DynamicFunc__X86toSSE_switch_output1 || p==DynamicFunc__X86toSSE_switch_output2 ||
p==DynamicFunc__ToSSE || p==DynamicFunc__ToX86)
return 1;
#endif
if (p==DynamicFunc__base16_convert_locase || p==DynamicFunc__base16_convert_upcase)
return 1;
return 0;
}
#endif
#define RETURN_TRUE_IF_BIG_FUNC(H) if(p==DynamicFunc__##H##_crypt_input1_append_input2 || \
p==DynamicFunc__##H##_crypt_input2_append_input1 || \
p==DynamicFunc__##H##_crypt_input1_overwrite_input1 || \
p==DynamicFunc__##H##_crypt_input2_overwrite_input2 || \
p==DynamicFunc__##H##_crypt_input1_overwrite_input2 || \
p==DynamicFunc__##H##_crypt_input2_overwrite_input1 || \
p==DynamicFunc__##H##_crypt_input1_to_output1_FINAL || \
p==DynamicFunc__##H##_crypt_input2_to_output1_FINAL) \
return 1
static int isMD4Func(DYNAMIC_primitive_funcp p)
{
// handle flats
RETURN_TRUE_IF_BIG_FUNC(MD4);
// handle older mmx_coef variants
if (p==DynamicFunc__crypt_md4 || p==DynamicFunc__crypt_md4_in1_to_out2 ||
p==DynamicFunc__crypt2_md4 || p==DynamicFunc__crypt_md4_in2_to_out1)
return 1;
return 0;
}
#ifdef _OPENMP
// Only used in OMP code, to compute LCM granularity. So we #ifdef it out to avoid compiler warnings.
#ifdef SIMD_COEF_32
// otherwise unused
static int isMD5Func(DYNAMIC_primitive_funcp p)
{
// handle flats
RETURN_TRUE_IF_BIG_FUNC(MD5);
// handle older mmx_coef variants
if (p==DynamicFunc__crypt_md5 || p==DynamicFunc__InitialLoadKeys_md5crypt_ToOutput2_Base16_to_Input1 ||
p==DynamicFunc__crypt_md5_in1_to_out2 || p==DynamicFunc__InitialLoadKeys_md5crypt_ToOutput2 ||
p==DynamicFunc__crypt_md5_to_input_raw || p==DynamicFunc__crypt_md5_to_input_raw_Overwrite_NoLen ||
p==DynamicFunc__crypt_md5_in2_to_out1 || p==DynamicFunc__crypt_md5_to_input_raw_Overwrite_NoLen_but_setlen_in_SSE ||
p==DynamicFunc__crypt2_md5 || p==DynamicFunc__InitialLoadKeys_md5crypt_ToOutput2_Base16_to_Input1_offset32)
return 1;
return 0;
}
#endif
#endif
static int isSHA1Func(DYNAMIC_primitive_funcp p) {
RETURN_TRUE_IF_BIG_FUNC(SHA1);
return 0;
}
static int isSHA2_256Func(DYNAMIC_primitive_funcp p) {
RETURN_TRUE_IF_BIG_FUNC(SHA224); RETURN_TRUE_IF_BIG_FUNC(SHA256);
return 0;
}
static int isSHA2_512Func(DYNAMIC_primitive_funcp p) {
RETURN_TRUE_IF_BIG_FUNC(SHA384); RETURN_TRUE_IF_BIG_FUNC(SHA512);
return 0;
}
static int isGOSTFunc(DYNAMIC_primitive_funcp p) {
RETURN_TRUE_IF_BIG_FUNC(GOST);
return 0;
}
static int isTigerFunc(DYNAMIC_primitive_funcp p) {
RETURN_TRUE_IF_BIG_FUNC(Tiger);
return 0;
}
static int isWHIRLFunc(DYNAMIC_primitive_funcp p) {
RETURN_TRUE_IF_BIG_FUNC(WHIRLPOOL);
return 0;
}
static int isRIPEMDFunc(DYNAMIC_primitive_funcp p) {
RETURN_TRUE_IF_BIG_FUNC(RIPEMD128); RETURN_TRUE_IF_BIG_FUNC(RIPEMD160);
RETURN_TRUE_IF_BIG_FUNC(RIPEMD256); RETURN_TRUE_IF_BIG_FUNC(RIPEMD320);
return 0;
}
static int isHAVALFunc(DYNAMIC_primitive_funcp p) {
RETURN_TRUE_IF_BIG_FUNC(HAVAL128_3); RETURN_TRUE_IF_BIG_FUNC(HAVAL128_4); RETURN_TRUE_IF_BIG_FUNC(HAVAL128_5);
RETURN_TRUE_IF_BIG_FUNC(HAVAL160_3); RETURN_TRUE_IF_BIG_FUNC(HAVAL160_4); RETURN_TRUE_IF_BIG_FUNC(HAVAL160_5);
RETURN_TRUE_IF_BIG_FUNC(HAVAL192_3); RETURN_TRUE_IF_BIG_FUNC(HAVAL192_4); RETURN_TRUE_IF_BIG_FUNC(HAVAL192_5);
RETURN_TRUE_IF_BIG_FUNC(HAVAL224_3); RETURN_TRUE_IF_BIG_FUNC(HAVAL224_4); RETURN_TRUE_IF_BIG_FUNC(HAVAL224_5);
RETURN_TRUE_IF_BIG_FUNC(HAVAL256_3); RETURN_TRUE_IF_BIG_FUNC(HAVAL256_4); RETURN_TRUE_IF_BIG_FUNC(HAVAL256_5);
return 0;
}
static int isMD2Func(DYNAMIC_primitive_funcp p) {
RETURN_TRUE_IF_BIG_FUNC(MD2);
return 0;
}
static int isPANAMAFunc(DYNAMIC_primitive_funcp p) {
RETURN_TRUE_IF_BIG_FUNC(PANAMA);
return 0;
}
static int isSKEINFunc(DYNAMIC_primitive_funcp p) {
RETURN_TRUE_IF_BIG_FUNC(SKEIN224); RETURN_TRUE_IF_BIG_FUNC(SKEIN256);
RETURN_TRUE_IF_BIG_FUNC(SKEIN384); RETURN_TRUE_IF_BIG_FUNC(SKEIN512);
return 0;
}
static int isKECCAKFunc(DYNAMIC_primitive_funcp p) {
RETURN_TRUE_IF_BIG_FUNC(SHA3_224); RETURN_TRUE_IF_BIG_FUNC(SHA3_256); RETURN_TRUE_IF_BIG_FUNC(SHA3_384);
RETURN_TRUE_IF_BIG_FUNC(SHA3_512); RETURN_TRUE_IF_BIG_FUNC(KECCAK_256); RETURN_TRUE_IF_BIG_FUNC(KECCAK_512);
return 0;
}
// LARGE_HASH_EDIT_POINT (Add a new IsXXXFunc() type function)
static int isLargeHashFinalFunc(DYNAMIC_primitive_funcp p)
{
#undef IF
#define IF(H) p==DynamicFunc__##H##_crypt_input1_to_output1_FINAL||p==DynamicFunc__##H##_crypt_input2_to_output1_FINAL
if (IF(SHA1)||IF(SHA224)||IF(SHA256)||IF(SHA384)||IF(SHA512)||IF(GOST)||IF(WHIRLPOOL)||IF(Tiger)||IF(RIPEMD128)||
IF(RIPEMD160)||IF(RIPEMD256)||IF(RIPEMD320)||
IF(HAVAL128_3)||IF(HAVAL128_4)||IF(HAVAL128_5)||IF(HAVAL160_3)||IF(HAVAL160_4)||IF(HAVAL160_5)||
IF(HAVAL192_3)||IF(HAVAL192_4)||IF(HAVAL192_5)||IF(HAVAL224_3)||IF(HAVAL224_4)||IF(HAVAL224_5)||
IF(HAVAL256_3)||IF(HAVAL256_4)||IF(HAVAL256_5)||IF(MD2)||IF(PANAMA)||IF(SKEIN224)||IF(SKEIN256)||
IF(SKEIN384)||IF(SKEIN512)||IF(SHA3_224)||IF(SHA3_256)||IF(SHA3_384)||IF(SHA3_512)||
IF(KECCAK_256)||IF(KECCAK_512))
// LARGE_HASH_EDIT_POINT
return 1;
return 0;
}
#ifdef _OPENMP
#ifdef SIMD_COEF_32
// Simple euclid algorithm for GCD
static int GCD (int a, int b)
{
while (b) {
int t = b;
b = a % b;
a = t;
}
return a;
}
// simple algorithm for LCM is (a*b)/GCD(a,b)
static int LCM(int a, int b)
{
a/=GCD(a,b);
return a*b;
}
#endif
static void dyna_setupOMP(DYNAMIC_Setup *Setup, struct fmt_main *pFmt)
{
unsigned int i;
#ifndef SIMD_COEF_32
curdat.omp_granularity=OMP_INC;
#else
if ((curdat.pSetup->flags& MGF_NOTSSE2Safe) == MGF_NOTSSE2Safe)
curdat.omp_granularity=OMP_INC;
else {
curdat.omp_granularity = 1;
for (i=0; Setup->pFuncs[i]; ++i) {
if (isMD5Func(Setup->pFuncs[i]))
curdat.omp_granularity = LCM(curdat.omp_granularity, SIMD_PARA_MD5*SIMD_COEF_32);
else if (isMD4Func(Setup->pFuncs[i]))
curdat.omp_granularity = LCM(curdat.omp_granularity, SIMD_PARA_MD4*SIMD_COEF_32);
else if (isSHA1Func(Setup->pFuncs[i]))
curdat.omp_granularity = LCM(curdat.omp_granularity, SIMD_PARA_SHA1*SIMD_COEF_32);
else if (isSHA2_256Func(Setup->pFuncs[i]))
#if SIMD_COEF_32
#if SIMD_PARA_SHA256
curdat.omp_granularity = LCM(curdat.omp_granularity, SIMD_PARA_SHA256*SIMD_COEF_32);
#else
curdat.omp_granularity = LCM(curdat.omp_granularity, SIMD_COEF_32);
#endif
#else
curdat.omp_granularity=LCM(curdat.omp_granularity, OMP_INC);
#endif
else if (isSHA2_512Func(Setup->pFuncs[i]))
#if SIMD_COEF_64
#if SIMD_PARA_SHA512
curdat.omp_granularity = LCM(curdat.omp_granularity, SIMD_PARA_SHA512*SIMD_COEF_64);
#else
curdat.omp_granularity = LCM(curdat.omp_granularity, SIMD_COEF_64);
#endif
#else
curdat.omp_granularity=LCM(curdat.omp_granularity, OMP_INC);
#endif
}
}
#endif
for (i=0; Setup->pFuncs[i]; ++i) {
if (isBadOMPFunc(Setup->pFuncs[i]))
pFmt->params.flags &= (~(FMT_OMP|FMT_OMP_BAD));
}
if ((pFmt->params.flags&FMT_OMP)==FMT_OMP && (curdat.pSetup->startFlags&MGF_POOR_OMP)==MGF_POOR_OMP)
pFmt->params.flags |= FMT_OMP_BAD;
}
#endif
int dynamic_SETUP(DYNAMIC_Setup *Setup, struct fmt_main *pFmt)
{
unsigned int i, j, cnt, cnt2, x;
DYNAMIC_primitive_funcp *pFuncs;
if (Setup->flags & MGF_ColonNOTValid)
{
extern struct options_main options;
if (options.loader.field_sep_char == ':')
{
return 0;
}
}
// Deal with depricated 1st functions. Convert them to proper 'flags'
if (Setup->pFuncs[0] == DynamicFunc__InitialLoadKeysToInput)
Setup->startFlags |= MGF_KEYS_INPUT;
if (Setup->pFuncs[0] == DynamicFunc__InitialLoadKeys_md5crypt_ToOutput2)
Setup->startFlags |= MGF_KEYS_CRYPT_IN2;
if (Setup->pFuncs[0] == DynamicFunc__InitialLoadKeys_md5crypt_ToOutput2_Base16_to_Input1)
Setup->startFlags |= MGF_KEYS_BASE16_IN1;
if (Setup->pFuncs[0] == DynamicFunc__InitialLoadKeys_md5crypt_ToOutput2_Base16_to_Input1_offset32)
Setup->startFlags |= MGF_KEYS_BASE16_IN1_Offset32;
curdat.dynamic_40_byte_input = ((Setup->startFlags&MGF_INPUT_20_BYTE)==MGF_INPUT_20_BYTE) ? 1 : 0;
curdat.dynamic_48_byte_input = ((Setup->startFlags&MGF_INPUT_24_BYTE)==MGF_INPUT_24_BYTE) ? 1 : 0;
curdat.dynamic_64_byte_input = ((Setup->startFlags&MGF_INPUT_32_BYTE)==MGF_INPUT_32_BYTE) ? 1 : 0;
curdat.dynamic_56_byte_input = ((Setup->startFlags&MGF_INPUT_28_BYTE)==MGF_INPUT_28_BYTE) ? 1 : 0;
curdat.dynamic_80_byte_input = ((Setup->startFlags&MGF_INPUT_40_BYTE)==MGF_INPUT_40_BYTE) ? 1 : 0;
curdat.dynamic_96_byte_input = ((Setup->startFlags&MGF_INPUT_48_BYTE)==MGF_INPUT_48_BYTE) ? 1 : 0;
curdat.dynamic_128_byte_input= ((Setup->startFlags&MGF_INPUT_64_BYTE)==MGF_INPUT_64_BYTE) ? 1 : 0;
curdat.FldMask = 0;
curdat.b2Salts = ((Setup->flags&MGF_SALTED2)==MGF_SALTED2) ? 1 : 0;
curdat.dynamic_base16_upcase = ((Setup->flags&MGF_BASE_16_OUTPUT_UPCASE)==MGF_BASE_16_OUTPUT_UPCASE) ? 1 : 0;
curdat.FldMask |= ((Setup->flags&MGF_FLD0)==MGF_FLD0) ? MGF_FLD0 : 0;
curdat.FldMask |= ((Setup->flags&MGF_FLD1)==MGF_FLD1) ? MGF_FLD1 : 0;
curdat.FldMask |= ((Setup->flags&MGF_FLD2)==MGF_FLD2) ? MGF_FLD2 : 0;
curdat.FldMask |= ((Setup->flags&MGF_FLD3)==MGF_FLD3) ? MGF_FLD3 : 0;
curdat.FldMask |= ((Setup->flags&MGF_FLD4)==MGF_FLD4) ? MGF_FLD4 : 0;
curdat.FldMask |= ((Setup->flags&MGF_FLD5)==MGF_FLD5) ? MGF_FLD5 : 0;
curdat.FldMask |= ((Setup->flags&MGF_FLD6)==MGF_FLD6) ? MGF_FLD6 : 0;
curdat.FldMask |= ((Setup->flags&MGF_FLD7)==MGF_FLD7) ? MGF_FLD7 : 0;
curdat.FldMask |= ((Setup->flags&MGF_FLD8)==MGF_FLD8) ? MGF_FLD8 : 0;
curdat.FldMask |= ((Setup->flags&MGF_FLD9)==MGF_FLD9) ? MGF_FLD9 : 0;
curdat.dynamic_base64_inout = 0;
curdat.dynamic_salt_as_hex = 0;
curdat.dynamic_salt_as_hex_format_type = 0;
curdat.force_md5_ctx = 0;
curdat.nUserName = 0;
curdat.nPassCase = 1;
curdat.md5_startup_in_x86 = curdat.dynamic_use_sse = 0; // if 0, then never use SSE2
curdat.init = 0;
curdat.pSetup = Setup;
pFmt->methods.binary = get_binary;
pFmt->methods.cmp_all=cmp_all;
pFmt->methods.cmp_one=cmp_one;
pFmt->methods.source=fmt_default_source;
pFmt->methods.salt = get_salt;
pFmt->methods.done = done;
pFmt->methods.set_salt = set_salt;
pFmt->methods.salt_hash = salt_hash;
//pFmt->params.format_name = str_alloc_copy(Setup->szFORMAT_NAME);
pFmt->params.format_name = "";
pFmt->params.benchmark_length = 0; // NOTE 0 'assumes' salted. If unsalted, we set back to -1
pFmt->params.salt_size = 0;
curdat.using_flat_buffers_sse2_ok = 0; // used to distingish MGF_NOTSSE2Safe from MGF_FLAT_BUFFERS
if ((Setup->flags & MGF_FLAT_BUFFERS) == MGF_FLAT_BUFFERS)
curdat.using_flat_buffers_sse2_ok = 1;
#ifdef SIMD_COEF_32
curdat.dynamic_use_sse = 1; // if 1, then we are in SSE2 mode (but can switch out)
if ((Setup->flags & MGF_NOTSSE2Safe) == MGF_NOTSSE2Safe) {
curdat.dynamic_use_sse = 0; // Do not use SSE code at all.
} else if ((Setup->flags & MGF_FLAT_BUFFERS) == MGF_FLAT_BUFFERS) {
curdat.dynamic_use_sse = 0; // uses flat buffers but will use SSE code (large formats use the flat buffers, and the SSE2 code 'mixes' them).
curdat.using_flat_buffers_sse2_ok = 1;
} else if ((Setup->flags & MGF_StartInX86Mode) == MGF_StartInX86Mode) {
curdat.dynamic_use_sse = 2; // if 2, then we are in SSE2 mode, but currently using X86 (and can switch back to SSE2).
curdat.md5_startup_in_x86 = 1;
}
if (curdat.dynamic_use_sse || curdat.using_flat_buffers_sse2_ok) {
pFmt->params.max_keys_per_crypt = MAX_KEYS_PER_CRYPT;
pFmt->params.algorithm_name = ALGORITHM_NAME;
} else {
pFmt->params.max_keys_per_crypt = MAX_KEYS_PER_CRYPT_X86;
pFmt->params.algorithm_name = ALGORITHM_NAME_X86;
}
#else
pFmt->params.max_keys_per_crypt = MAX_KEYS_PER_CRYPT_X86;
pFmt->params.algorithm_name = ALGORITHM_NAME_X86;
#endif
pFmt->params.min_keys_per_crypt = pFmt->params.max_keys_per_crypt;
if (pFmt->params.min_keys_per_crypt > 64)
pFmt->params.min_keys_per_crypt = 64;
dynamic_use_sse = curdat.dynamic_use_sse;
// Ok, set the new 'constants' data
memset(curdat.Consts, 0, sizeof(curdat.Consts));
memset(curdat.ConstsLen, 0, sizeof(curdat.ConstsLen));
for (curdat.nConsts = 0; curdat.nConsts < 8; ++curdat.nConsts)
{
if (Setup->pConstants[curdat.nConsts].Const == NULL)
break;
//curdat.Consts[curdat.nConsts] = (unsigned char*)str_alloc_copy(Setup->pConstants[curdat.nConsts].Const);
//curdat.ConstsLen[curdat.nConsts] = strlen(Setup->pConstants[curdat.nConsts].Const);
// we really do not 'have' to null terminate, but do just to be on the 'safe' side.
curdat.Consts[curdat.nConsts] = mem_alloc_tiny(Setup->pConstants[curdat.nConsts].len+1, MEM_ALIGN_NONE);
memcpy(curdat.Consts[curdat.nConsts], Setup->pConstants[curdat.nConsts].Const, Setup->pConstants[curdat.nConsts].len);
curdat.Consts[curdat.nConsts][Setup->pConstants[curdat.nConsts].len] = 0;
curdat.ConstsLen[curdat.nConsts] = Setup->pConstants[curdat.nConsts].len;
}
if ( (Setup->flags & MGF_INPBASE64) == MGF_INPBASE64)
{
curdat.dynamic_base64_inout = 1;
pFmt->methods.binary = binary_b64;
}
if ( (Setup->flags & MGF_INPBASE64m) == MGF_INPBASE64m)
{
curdat.dynamic_base64_inout = 3;
pFmt->methods.binary = binary_b64m;
}
if ( (Setup->flags & MGF_INPBASE64b) == MGF_INPBASE64b)
{
curdat.dynamic_base64_inout = 5;
pFmt->methods.binary = binary_b64b;
}
if ( (Setup->flags & MGF_INPBASE64_4x6) == MGF_INPBASE64_4x6)
{
curdat.dynamic_base64_inout = 2;
pFmt->methods.binary = binary_b64_4x6;
pFmt->methods.cmp_all = cmp_all_64_4x6;
pFmt->methods.cmp_one = cmp_one_64_4x6;
#if !ARCH_LITTLE_ENDIAN
pFmt->methods.binary_hash[0] = binary_hash_0_64x4;
pFmt->methods.binary_hash[1] = binary_hash_1_64x4;
pFmt->methods.binary_hash[2] = binary_hash_2_64x4;
pFmt->methods.binary_hash[3] = binary_hash_3_64x4;
pFmt->methods.binary_hash[4] = binary_hash_4_64x4;
pFmt->methods.binary_hash[5] = binary_hash_5_64x4;
pFmt->methods.get_hash[0] = get_hash_0_64x4;
pFmt->methods.get_hash[1] = get_hash_1_64x4;
pFmt->methods.get_hash[2] = get_hash_2_64x4;
pFmt->methods.get_hash[3] = get_hash_3_64x4;
pFmt->methods.get_hash[4] = get_hash_4_64x4;
pFmt->methods.get_hash[5] = get_hash_5_64x4;
#endif
// Not enough bits in a single WORD to do the 7th one.
pFmt->methods.binary_hash[6] = NULL;
pFmt->methods.get_hash[6] = NULL;
}
// printf ("%.13s",Setup->szFORMAT_NAME);
if ( (Setup->flags & (MGF_INPBASE64|MGF_INPBASE64_4x6|MGF_INPBASE64a|MGF_INPBASE64m|MGF_INPBASE64b)) == 0) {
pFmt->params.flags |= FMT_SPLIT_UNIFIES_CASE;
// printf (" Setting FMT_SPLIT_UNIFIES_CASE");
if (pFmt->methods.split == split) {
pFmt->methods.split = split_UC;
// printf (" split set to split_UC()\n");
}
}
// else printf (" split set to split()\n");
if (Setup->flags & MGF_UTF8)
pFmt->params.flags |= FMT_UTF8;
if (Setup->flags & MGF_INPBASE64a) {
curdat.dynamic_base64_inout = 1;
pFmt->methods.binary = binary_b64a;
}
if ( (Setup->flags & MGF_USERNAME) == MGF_USERNAME)
curdat.nUserName = 1;
if ( (Setup->flags & MGF_USERNAME_UPCASE) == MGF_USERNAME_UPCASE)
curdat.nUserName = 2;
if ( (Setup->flags & MGF_USERNAME_LOCASE) == MGF_USERNAME_LOCASE)
curdat.nUserName = 3;
// Ok, what 'flag' in the format struct, do we clear???
if ( (Setup->flags & MGF_PASSWORD_UPCASE) == MGF_PASSWORD_UPCASE) {
curdat.nPassCase = 2;
pFmt->params.flags &= (~FMT_CASE);
}
if ( (Setup->flags & MGF_PASSWORD_LOCASE) == MGF_PASSWORD_LOCASE) {
curdat.nPassCase = 3;
pFmt->params.flags &= (~FMT_CASE);
}
if ( (Setup->flags & MGF_SALT_AS_HEX) == MGF_SALT_AS_HEX) {
curdat.dynamic_salt_as_hex = 1;
curdat.dynamic_salt_as_hex_format_type = Setup->flags >> 56;
}
if ( (Setup->flags & MGF_SALT_AS_HEX_TO_SALT2) == MGF_SALT_AS_HEX_TO_SALT2) {
curdat.dynamic_salt_as_hex = 2;
if (curdat.b2Salts)
return !fprintf(stderr, "Error invalid format %s: MGF_SALT_AS_HEX_TO_SALT2 and MGF_SALTED2 are not valid to use in same format\n", Setup->szFORMAT_NAME);
curdat.b2Salts = 2;
}
if ( (Setup->flags & MGF_SALT_UNICODE_B4_CRYPT) == MGF_SALT_UNICODE_B4_CRYPT && curdat.dynamic_salt_as_hex)
curdat.dynamic_salt_as_hex |= 0x100;
if ( (Setup->flags & MGF_SALTED) == 0)
{
curdat.dynamic_FIXED_SALT_SIZE = 0;
pFmt->params.benchmark_length = -1;
pFmt->params.salt_size = 0;
}
else
{
pFmt->params.salt_size = sizeof(void *);
if (Setup->SaltLen > 0)
curdat.dynamic_FIXED_SALT_SIZE = Setup->SaltLen;
else
{
// says we have a salt, but NOT a fixed sized one that we 'know' about.
// if the SaltLen is -1, then there is NO constraints. If the SaltLen
// is -12 (or any other neg number other than -1), then there is no
// fixed salt length, but the 'max' salt size is -SaltLen. So, -12
// means any salt from 1 to 12 is 'valid'.
if (Setup->SaltLen > -2)
curdat.dynamic_FIXED_SALT_SIZE = -1;
else {
curdat.dynamic_FIXED_SALT_SIZE = Setup->SaltLen;
#if !defined (SIMD_COEF_32)
// for non-sse, we limit ourselves to 110 bytes, not 55. So, we can add 55 to this value
curdat.dynamic_FIXED_SALT_SIZE -= 55;
#endif
}
}
}
if (Setup->MaxInputLen)
pFmt->params.plaintext_length = Setup->MaxInputLen;
else {
if ( ((Setup->flags&MGF_FLAT_BUFFERS)==MGF_FLAT_BUFFERS) || ((Setup->flags&MGF_NOTSSE2Safe)==MGF_NOTSSE2Safe)) {
pFmt->params.plaintext_length = 110 - abs(Setup->SaltLen);
if (pFmt->params.plaintext_length < 32)
pFmt->params.plaintext_length = 32;
} else {
pFmt->params.plaintext_length = 55 - abs(Setup->SaltLen);
if (pFmt->params.plaintext_length < 1) {
pFmt->params.plaintext_length = 1;
fprintf(stderr, "\nError, for format %s, MMX build, is not valid due to TOO long of a SaltLength\n", Setup->szFORMAT_NAME);
}
}
}
#ifndef SIMD_COEF_32
if (Setup->MaxInputLenX86) {
pFmt->params.plaintext_length = Setup->MaxInputLenX86;
} else {
if (Setup->SaltLenX86)
pFmt->params.plaintext_length = 110 - abs(Setup->SaltLenX86);
else
pFmt->params.plaintext_length = 110 - abs(Setup->SaltLen);
if (pFmt->params.plaintext_length < 32)
pFmt->params.plaintext_length = 32;
}
#endif
curdat.store_keys_in_input = !!(Setup->startFlags&MGF_KEYS_INPUT );
curdat.input2_set_len32 = !!(Setup->startFlags&MGF_SET_INP2LEN32);
if (Setup->startFlags&MGF_SOURCE) {
if (Setup->startFlags&MGF_INPUT_20_BYTE) pFmt->methods.source = source_20_hex;
else if (Setup->startFlags&MGF_INPUT_28_BYTE) pFmt->methods.source = source_28_hex;
else if (Setup->startFlags&MGF_INPUT_32_BYTE) pFmt->methods.source = source_32_hex;
else if (Setup->startFlags&MGF_INPUT_40_BYTE) pFmt->methods.source = source_40_hex;
else if (Setup->startFlags&MGF_INPUT_48_BYTE) pFmt->methods.source = source_48_hex;
else if (Setup->startFlags&MGF_INPUT_64_BYTE) pFmt->methods.source = source_64_hex;
else pFmt->methods.source = source;
}
if (!curdat.store_keys_in_input && Setup->startFlags&MGF_KEYS_INPUT_BE_SAFE)
curdat.store_keys_in_input = 3;
curdat.store_keys_in_input_unicode_convert = !!(Setup->startFlags&MGF_KEYS_UNICODE_B4_CRYPT);
if (curdat.store_keys_in_input_unicode_convert && curdat.store_keys_in_input)
return !fprintf(stderr, "Error invalid format %s: Using MGF_KEYS_INPUT and MGF_KEYS_UNICODE_B4_CRYPT in same format is NOT valid\n", Setup->szFORMAT_NAME);
curdat.store_keys_normal_but_precompute_hash_to_output2 = !!(Setup->startFlags&MGF_KEYS_CRYPT_IN2);
curdat.store_keys_normal_but_precompute_hash_to_output2_base16_to_input1 = !!(Setup->startFlags&MGF_KEYS_BASE16_IN1);
if (curdat.store_keys_normal_but_precompute_hash_to_output2_base16_to_input1)
curdat.store_keys_normal_but_precompute_hash_to_output2 = 1;
#define IF_CDOFF32(F,L) if (!curdat.store_keys_normal_but_precompute_hash_to_output2_base16_to_input1_offsetX) \
curdat.store_keys_normal_but_precompute_hash_to_output2_base16_to_input1_offsetX = \
(!!((Setup->startFlags&MGF_KEYS_BASE16_IN1_Offset_TYPE)==MGF_KEYS_BASE16_IN1_Offset_ ## F))*L
curdat.store_keys_normal_but_precompute_hash_to_output2_base16_to_input1_offsetX = 0;
IF_CDOFF32(MD5,32); IF_CDOFF32(MD4,32); IF_CDOFF32(SHA1,40); IF_CDOFF32(SHA224,56);
IF_CDOFF32(SHA256,64); IF_CDOFF32(SHA384,96); IF_CDOFF32(SHA512,128); IF_CDOFF32(GOST,64);
IF_CDOFF32(WHIRLPOOL,128); IF_CDOFF32(Tiger,48); IF_CDOFF32(RIPEMD128,32); IF_CDOFF32(RIPEMD160,40);
IF_CDOFF32(RIPEMD256,64); IF_CDOFF32(RIPEMD320,80); IF_CDOFF32(MD2,32); IF_CDOFF32(PANAMA,64);
IF_CDOFF32(HAVAL128_3,32); IF_CDOFF32(HAVAL160_3,40); IF_CDOFF32(HAVAL192_3,48); IF_CDOFF32(HAVAL224_3,56); IF_CDOFF32(HAVAL256_3,64);
IF_CDOFF32(HAVAL128_4,32); IF_CDOFF32(HAVAL160_4,40); IF_CDOFF32(HAVAL192_4,48); IF_CDOFF32(HAVAL224_4,56); IF_CDOFF32(HAVAL256_4,64);
IF_CDOFF32(HAVAL128_5,32); IF_CDOFF32(HAVAL160_5,40); IF_CDOFF32(HAVAL192_5,48); IF_CDOFF32(HAVAL224_5,56); IF_CDOFF32(HAVAL256_5,64);
IF_CDOFF32(SKEIN224,56); IF_CDOFF32(SKEIN256,64); IF_CDOFF32(SKEIN384,96); IF_CDOFF32(SKEIN512,128);
IF_CDOFF32(SHA3_224,56); IF_CDOFF32(SHA3_256,64); IF_CDOFF32(SHA3_384,96); IF_CDOFF32(SHA3_512,128);
IF_CDOFF32(KECCAK_256,64); IF_CDOFF32(KECCAK_512,128);
// LARGE_HASH_EDIT_POINT
if (curdat.store_keys_normal_but_precompute_hash_to_output2_base16_to_input1_offsetX)
{
curdat.store_keys_normal_but_precompute_hash_to_output2 = 1;
}
curdat.store_keys_normal_but_precompute_hash_to_output2_base16_type = Setup->startFlags>>56;
if ((Setup->startFlags) == 0)
{
// Ok, if we do not have some 'special' loader function, we MUST first clean some
// input. If that is not done, there is NO WAY this is a valid format. This is
// NOT an intelligent check, but more like the dummy lights on newer automobiles.
// You know it will not work, but do not know 'why', nor should you care.
if (Setup->pFuncs[0] != DynamicFunc__clean_input &&
Setup->pFuncs[0] != DynamicFunc__clean_input2 &&
Setup->pFuncs[0] != DynamicFunc__clean_input_kwik &&
Setup->pFuncs[0] != DynamicFunc__clean_input2_kwik &&
Setup->pFuncs[0] != DynamicFunc__clean_input_full)
return !fprintf(stderr, "Error invalid format %s: The first command MUST be a clean of input 1 or input 2 OR a special key 2 input loader function\n", Setup->szFORMAT_NAME);
}
if ( (Setup->flags&MGF_SALTED2)==MGF_SALTED2 && (Setup->flags&MGF_SALT_AS_HEX) == MGF_SALT_AS_HEX)
{
// if the user wants salt_as_hex, then here can NOT be 2 salts.
return !fprintf(stderr, "Error invalid format %s: If using MGF_SALT_AS_HEX flag, then you can NOT have a 2nd salt.\n", Setup->szFORMAT_NAME);
}
if (Setup->pFuncs && Setup->pFuncs[0])
{
unsigned int z;
for (z = 0; Setup->pFuncs[z]; ++z)
;
z += 50;
curdat.dynamic_FUNCTIONS = mem_alloc_tiny(z*sizeof(DYNAMIC_primitive_funcp), MEM_ALIGN_WORD);
j = 0;
#if !ARCH_LITTLE_ENDIAN
// for bigendian, we do NOT store into keys, since we byte swap them.
if (curdat.store_keys_in_input==1) {
// this is only a minor speed hit, so simply fix by doing this. There is an
// extra memcpy, that is it.
curdat.store_keys_in_input = 0;
curdat.dynamic_FUNCTIONS[j++] = DynamicFunc__clean_input;
curdat.dynamic_FUNCTIONS[j++] = DynamicFunc__append_keys;
}
// NOTE NOTE NOTE, FIXME. These are 'hacks' which slow stuff way down. We should look at
// building preloads that CAN do this. Store key input to input 1, but then do not use
// input 1. Put a copy to input 2, then append, etc. In that way, we cut the number of
// MD5's down by at least 1.
//
// But for now, just get it working. Get it working faster later.
// NOTE, these are commented out now. I am not sure why they were there
// I think the thought was for SIMD, BUT SIMD is not used on Sparc
// I am leaving this code for now, BUT I think it should NOT be here.
// I was getting failures on the 16 byte sph formats, for any
// hash(hash($p).$s) such as md2(md2($p).$s) However, the modifications
// where curdat.store_keys_in_input==1 is absolutely needed, or we have
// get_key() failures all over the place.
// note, with Setup->pFuncs[0]==DynamicFunc__set_input_len_32, we only will handle type 6 and 7
// for now we have this 'turned' off. It is fixed for type 6, 7 and 14. It is left on for the
// john.ini stuff. Thus, if someone builds the intel version type 6, it will work (but slower).
// if (curdat.store_keys_normal_but_precompute_hash_to_output2_base16_to_input1==1 && Setup->pFuncs[0]==DynamicFunc__set_input_len_32) {
// curdat.store_keys_normal_but_precompute_hash_to_output2_base16_to_input1 = 0;
// curdat.dynamic_FUNCTIONS[j++] = DynamicFunc__clean_input;
// curdat.dynamic_FUNCTIONS[j++] = DynamicFunc__append_keys;
// curdat.dynamic_FUNCTIONS[j++] = DynamicFunc__crypt_md5;
// curdat.dynamic_FUNCTIONS[j++] = DynamicFunc__clean_input;
// Setup->pFuncs[0] = DynamicFunc__append_from_last_output_as_base16;
// }
#endif
for (i=0; Setup->pFuncs[i]; ++i)
{
if (j > z-10)
{
unsigned int k;
z += 100;
curdat.dynamic_FUNCTIONS = mem_alloc_tiny(z*sizeof(DYNAMIC_primitive_funcp), MEM_ALIGN_WORD);
for (k = 0; k <= j; ++k)
curdat.dynamic_FUNCTIONS[k] = curdat.dynamic_FUNCTIONS[k];
}
if (curdat.store_keys_in_input)
{
if (Setup->pFuncs[i] == DynamicFunc__append_keys)
return !fprintf(stderr, "Error invalid format %s: MGF_KEYS_INPUT used, but append_keys called and that is invalid\n", Setup->szFORMAT_NAME);
if (Setup->pFuncs[i] == DynamicFunc__append_keys2)
return !fprintf(stderr, "Error invalid format %s: MGF_KEYS_INPUT used, but append_keys2 called and that is invalid\n", Setup->szFORMAT_NAME);
if (Setup->pFuncs[i] == DynamicFunc__clean_input)
return !fprintf(stderr, "Error invalid format %s: MGF_KEYS_INPUT used, but clean_input called and that is invalid\n", Setup->szFORMAT_NAME);
if (Setup->pFuncs[i] == DynamicFunc__append_salt)
return !fprintf(stderr, "Error invalid format %s: MGF_KEYS_INPUT used, but append_salt called and that is invalid\n", Setup->szFORMAT_NAME);
if (Setup->pFuncs[i] == DynamicFunc__append_from_last_output2_to_input1_as_base16)
return !fprintf(stderr, "Error invalid format %s: MGF_KEYS_INPUT used, but append_from_last_output2_to_input1_as_base16 called and that is invalid\n", Setup->szFORMAT_NAME);
if (Setup->pFuncs[i] == DynamicFunc__overwrite_from_last_output2_to_input1_as_base16_no_size_fix)
return !fprintf(stderr, "Error invalid format %s: MGF_KEYS_INPUT used, but overwrite_from_last_output2_to_input1_as_base16_no_size_fix called and that is invalid\n", Setup->szFORMAT_NAME);
if (Setup->pFuncs[i] == DynamicFunc__append_from_last_output_as_base16)
return !fprintf(stderr, "Error invalid format %s: MGF_KEYS_INPUT used, but append_from_last_output_as_base16s called and that is invalid\n", Setup->szFORMAT_NAME);
if (Setup->pFuncs[i] == DynamicFunc__overwrite_from_last_output_as_base16_no_size_fix)
return !fprintf(stderr, "Error invalid format %s: MGF_KEYS_INPUT used, but overwrite_from_last_output_as_base16_no_size_fix called and that is invalid\n", Setup->szFORMAT_NAME);
if (Setup->pFuncs[i] == DynamicFunc__append_2nd_salt)
return !fprintf(stderr, "Error invalid format %s: MGF_KEYS_INPUT used, but append_2nd_salt called and that is invalid\n", Setup->szFORMAT_NAME);
if (Setup->pFuncs[i] == DynamicFunc__set_input_len_32)
return !fprintf(stderr, "Error invalid format %s: MGF_KEYS_INPUT used, but DynamicFunc__set_input_len_32 called and that is invalid\n", Setup->szFORMAT_NAME);
if (Setup->pFuncs[i] == DynamicFunc__set_input_len_64)
return !fprintf(stderr, "Error invalid format %s: MGF_KEYS_INPUT used, but DynamicFunc__set_input_len_32 called and that is invalid\n", Setup->szFORMAT_NAME);
if (Setup->pFuncs[i] == DynamicFunc__overwrite_salt_to_input1_no_size_fix)
return !fprintf(stderr, "Error invalid format %s: MGF_KEYS_INPUT used, but DynamicFunc__set_input_len_32 called and that is invalid\n", Setup->szFORMAT_NAME);
if (Setup->pFuncs[i] == DynamicFunc__append_input_from_input2)
return !fprintf(stderr, "Error invalid format %s: MGF_KEYS_INPUT used, but DynamicFunc__set_input_len_32 called and that is invalid\n", Setup->szFORMAT_NAME);
}
// Ok if copy constants are set, make SURE we have that many constants.
if ( (Setup->pFuncs[i] == DynamicFunc__append_input1_from_CONST1 || Setup->pFuncs[i] == DynamicFunc__append_input2_from_CONST1) && curdat.nConsts == 0)
return !fprintf(stderr, "Error invalid format %s: Append Constant function called, but NO constants in the format\n", Setup->szFORMAT_NAME);
if ( (Setup->pFuncs[i] == DynamicFunc__append_input1_from_CONST2 || Setup->pFuncs[i] == DynamicFunc__append_input2_from_CONST2) && curdat.nConsts < 2)
return !fprintf(stderr, "Error invalid format %s: Append Constant #2 function called, but NO constants, or less than 2 constants in the format\n", Setup->szFORMAT_NAME);
if ( (Setup->pFuncs[i] == DynamicFunc__append_input1_from_CONST3 || Setup->pFuncs[i] == DynamicFunc__append_input2_from_CONST3) && curdat.nConsts < 3)
return !fprintf(stderr, "Error invalid format %s: Append Constant #3 function called, but NO constants, or less than 3 constants in the format\n", Setup->szFORMAT_NAME);
if ( (Setup->pFuncs[i] == DynamicFunc__append_input1_from_CONST4 || Setup->pFuncs[i] == DynamicFunc__append_input2_from_CONST4) && curdat.nConsts < 4)
return !fprintf(stderr, "Error invalid format %s: Append Constant #4 function called, but NO constants, or less than 4 constants in the format\n", Setup->szFORMAT_NAME);
if ( (Setup->pFuncs[i] == DynamicFunc__append_input1_from_CONST5 || Setup->pFuncs[i] == DynamicFunc__append_input2_from_CONST5) && curdat.nConsts < 5)
return !fprintf(stderr, "Error invalid format %s: Append Constant #5 function called, but NO constants, or less than 5 constants in the format\n", Setup->szFORMAT_NAME);
if ( (Setup->pFuncs[i] == DynamicFunc__append_input1_from_CONST6 || Setup->pFuncs[i] == DynamicFunc__append_input2_from_CONST6) && curdat.nConsts < 6)
return !fprintf(stderr, "Error invalid format %s: Append Constant #6 function called, but NO constants, or less than 6 constants in the format\n", Setup->szFORMAT_NAME);
if ( (Setup->pFuncs[i] == DynamicFunc__append_input1_from_CONST7 || Setup->pFuncs[i] == DynamicFunc__append_input2_from_CONST7) && curdat.nConsts < 7)
return !fprintf(stderr, "Error invalid format %s: Append Constant #7 function called, but NO constants, or less than 7 constants in the format\n", Setup->szFORMAT_NAME);
if ( (Setup->pFuncs[i] == DynamicFunc__append_input1_from_CONST8 || Setup->pFuncs[i] == DynamicFunc__append_input2_from_CONST8) && curdat.nConsts < 8)
return !fprintf(stderr, "Error invalid format %s: Append Constant #8 function called, but NO constants, or less than 8 constants in the format\n", Setup->szFORMAT_NAME);
if ( (Setup->pFuncs[i] == DynamicFunc__append_2nd_salt || Setup->pFuncs[i] == DynamicFunc__append_2nd_salt2) && curdat.b2Salts == 0)
return !fprintf(stderr, "Error invalid format %s: A call to one of the 'salt-2' functions, but this format does not have MFG_SALT2 flag set\n", Setup->szFORMAT_NAME);
// Ok, if we have made it here, the function is 'currently' still valid. Load this pointer into our array of pointers.
pFuncs = ConvertFuncs(Setup->pFuncs[i], &cnt2);
#define IS_FUNC_NAME(H,N) if(is##H##Func(pFuncs[x])){ if (!strcmp(pFmt->params.algorithm_name, ALGORITHM_NAME)) pFmt->params.algorithm_name = ALGORITHM_NAME_##N; \
else if (!strcmp(pFmt->params.algorithm_name, ALGORITHM_NAME_X86)) pFmt->params.algorithm_name = ALGORITHM_NAME_X86_##N; }
for (x = 0; x < cnt2; ++x) {
curdat.dynamic_FUNCTIONS[j++] = pFuncs[x];
if (pFuncs[x] == DynamicFunc__setmode_unicode || pFuncs[x] == DynamicFunc__setmode_unicodeBE)
pFmt->params.flags |= FMT_UNICODE;
IS_FUNC_NAME(SHA1,S)
if (isSHA2_256Func(pFuncs[x])) {
#ifdef SIMD_COEF_32
if (curdat.using_flat_buffers_sse2_ok)
pFmt->params.algorithm_name = ALGORITHM_NAME_S2_256;
else
#endif
pFmt->params.algorithm_name = ALGORITHM_NAME_X86_S2_256;
}
if (isSHA2_512Func(pFuncs[x])) {
#ifdef SIMD_COEF_64
if (curdat.using_flat_buffers_sse2_ok)
pFmt->params.algorithm_name = ALGORITHM_NAME_S2_512;
else
#endif
pFmt->params.algorithm_name = ALGORITHM_NAME_X86_S2_512;
}
IS_FUNC_NAME(MD4,4)
IS_FUNC_NAME(WHIRL,WP2)
IS_FUNC_NAME(GOST,GST2)
IS_FUNC_NAME(Tiger,TGR)
IS_FUNC_NAME(RIPEMD,RIPEMD)
IS_FUNC_NAME(HAVAL,HAVAL)
IS_FUNC_NAME(MD2,MD2)
IS_FUNC_NAME(PANAMA,PANAMA)
IS_FUNC_NAME(SKEIN,SKEIN)
// Note, until we add SIMD keccak, one algoithm is all we 'need'
IS_FUNC_NAME(KECCAK,KECCAK)
// IS_FUNC_NAME(KECCAK,SHA3_256)
// IS_FUNC_NAME(KECCAK,SHA3_384)
// IS_FUNC_NAME(KECCAK,SHA3_512)
// IS_FUNC_NAME(KECCAK,KECCAK_256)
// IS_FUNC_NAME(KECCAK,KECCAK_512)
// LARGE_HASH_EDIT_POINT (MUST match the just added a new IsXXXFunc() type function)
}
if (isLargeHashFinalFunc(curdat.dynamic_FUNCTIONS[j-1]))
{
if (Setup->pFuncs[i+1])
return !fprintf(stderr, "Error invalid format %s: DynamicFunc__LARGE_HASH_crypt_inputX_to_output1_FINAL, can ONLY be used as the last function in a script\n", Setup->szFORMAT_NAME);
}
}
curdat.dynamic_FUNCTIONS[j] = NULL;
}
if (!Setup->pPreloads || Setup->pPreloads[0].ciphertext == NULL)
{
return !fprintf(stderr, "Error invalid format %s: Error, no validation hash(s) for this format\n", Setup->szFORMAT_NAME);
}
cnt = 0;
#ifdef _OPENMP
dyna_setupOMP(Setup, pFmt);
#endif
{
struct fmt_tests *pfx = mem_alloc_tiny(ARRAY_COUNT(dynamic_tests) * sizeof (struct fmt_tests), MEM_ALIGN_WORD);
memset(pfx, 0, ARRAY_COUNT(dynamic_tests) * sizeof (struct fmt_tests));
for (i = 0; cnt < ARRAY_COUNT(dynamic_tests) -1; ++i)
{
if (Setup->pPreloads[i].ciphertext == NULL) {
i = 0;
}
if (Setup->pPreloads[i].ciphertext[0] == 'A' && Setup->pPreloads[i].ciphertext[1] == '=') {
if (options.target_enc != ASCII && options.target_enc != ISO_8859_1)
continue;
pfx[cnt].ciphertext = str_alloc_copy(&Setup->pPreloads[i].ciphertext[2]);
}
else if (Setup->pPreloads[i].ciphertext[0] == 'U' && Setup->pPreloads[i].ciphertext[1] == '=') {
if (options.target_enc != UTF_8)
continue;
pfx[cnt].ciphertext = str_alloc_copy(&Setup->pPreloads[i].ciphertext[2]);
}
else
pfx[cnt].ciphertext = str_alloc_copy(Setup->pPreloads[i].ciphertext);
pfx[cnt].plaintext = str_alloc_copy(Setup->pPreloads[i].plaintext);
pfx[cnt].fields[0] = Setup->pPreloads[i].fields[0] ? str_alloc_copy(Setup->pPreloads[i].fields[0]) : "";
pfx[cnt].fields[1] = pfx[cnt].ciphertext;
for (j = 2; j < 10; ++j)
pfx[cnt].fields[j] = Setup->pPreloads[i].fields[j] ? str_alloc_copy(Setup->pPreloads[i].fields[j]) : "";
++cnt;
}
pfx[cnt].ciphertext = NULL;
pfx[cnt].plaintext = NULL;
pFmt->params.tests = pfx;
}
if (curdat.dynamic_base16_upcase)
dynamic_itoa16 = itoa16u;
else
dynamic_itoa16 = itoa16;
{
char s[512], *cp;
cp = Setup->szFORMAT_NAME;
cp = strchr(Setup->szFORMAT_NAME, ' ');
++cp;
sprintf(s, "%s %s", cp, pFmt->params.algorithm_name);
pFmt->params.algorithm_name = str_alloc_copy(s);
}
if ((Setup->flags & MGF_SALTED) && !Setup->SaltLen)
return !fprintf(stderr, "Error invalid format %s\n\tIt is required to add SaltLen= to the script, for this format\n", Setup->szFORMAT_NAME);
return 1;
}
static int LoadOneFormat(int idx, struct fmt_main *pFmt)
{
extern struct options_main options;
char label[16] = { 0 }, label_id[16] = { 0 }, *cp = NULL;
memcpy(pFmt, &fmt_Dynamic, sizeof(struct fmt_main));
// TODO:
// NOTE, this was commented out, because the late binding @dynamic=expr@
// hashes were killing out possibly pre-setup input buffers. NOTE, that
// things worked fine after this, all self tests do pass, and I am 99%
// sure that all of this 'required' cleaning happens in init(). but I am
// putting this comment in here, so that if at a later time, there are
// problems and are tracked down to this, we will know why.
// dynamic_RESET(pFmt);
// Ok we need to list this as a dynamic format (even for the 'thin' formats)
pFmt->params.flags |= FMT_DYNAMIC;
if (idx < 1000) {
if (dynamic_RESERVED_PRELOAD_SETUP(idx, pFmt) != 1)
return 0;
}
else {
if (dynamic_LOAD_PARSER_FUNCTIONS(idx, pFmt) != 1)
return 0;
}
/* we 'have' to take the sig from the test array. If we do not have */
/* our preload array 'solid', then the idx will not be the proper */
/* number. So we simply grab the label from the test cyphertext string */
strncpy(label, pFmt->params.tests[0].ciphertext, 15);
cp = strchr(&label[1], '$');
if (NULL != cp) cp[1] = 0;
strcpy(label_id, &label[1]);
cp = strchr(label_id, '$');
if (NULL != cp) *cp = 0;
// if (!options.format || strncmp(options.format, "dynamic_", 8))
// pFmt->params.label = str_alloc_copy("dynamic");
// else
pFmt->params.label = str_alloc_copy(label_id);
strcpy(curdat.dynamic_WHICH_TYPE_SIG, label);
curdat.dynamic_HASH_OFFSET = strlen(label);
if (curdat.dynamic_base64_inout == 1 || curdat.dynamic_base64_inout == 3) {
// we have to compute 'proper' offset
const char *cp = pFmt->params.tests[0].ciphertext;
size_t len = base64_valid_length(&cp[curdat.dynamic_HASH_OFFSET], curdat.dynamic_base64_inout == 1 ? e_b64_crypt : e_b64_mime, flg_Base64_MIME_TRAIL_EQ_CNT, 0);
curdat.dynamic_SALT_OFFSET = curdat.dynamic_HASH_OFFSET + len + 1;
}
else if (curdat.dynamic_base64_inout == 2)
curdat.dynamic_SALT_OFFSET = curdat.dynamic_HASH_OFFSET + 16 + 1;
else if (curdat.dynamic_40_byte_input)
curdat.dynamic_SALT_OFFSET = curdat.dynamic_HASH_OFFSET + 40 + 1;
else if (curdat.dynamic_48_byte_input)
curdat.dynamic_SALT_OFFSET = curdat.dynamic_HASH_OFFSET + 48 + 1;
else if (curdat.dynamic_64_byte_input)
curdat.dynamic_SALT_OFFSET = curdat.dynamic_HASH_OFFSET + 64 + 1;
else if (curdat.dynamic_56_byte_input)
curdat.dynamic_SALT_OFFSET = curdat.dynamic_HASH_OFFSET + 56 + 1;
else if (curdat.dynamic_80_byte_input)
curdat.dynamic_SALT_OFFSET = curdat.dynamic_HASH_OFFSET + 80 + 1;
else if (curdat.dynamic_96_byte_input)
curdat.dynamic_SALT_OFFSET = curdat.dynamic_HASH_OFFSET + 96 + 1;
else if (curdat.dynamic_128_byte_input)
curdat.dynamic_SALT_OFFSET = curdat.dynamic_HASH_OFFSET + 128 + 1;
else
curdat.dynamic_SALT_OFFSET = curdat.dynamic_HASH_OFFSET + 32 + 1;
pFmt->private.data = mem_alloc_tiny(sizeof(private_subformat_data), MEM_ALIGN_WORD);
memcpy(pFmt->private.data, &curdat, sizeof(private_subformat_data));
if (strncmp(curdat.dynamic_WHICH_TYPE_SIG, pFmt->params.tests[0].ciphertext, strlen(curdat.dynamic_WHICH_TYPE_SIG)))
{
fprintf(stderr, "ERROR, when loading dynamic formats, the wrong curdat item was linked to this type:\nTYPE_SIG=%s\nTest_Dat=%s\n",
curdat.dynamic_WHICH_TYPE_SIG, pFmt->params.tests[0].ciphertext);
return 0;
}
return 1;
}
struct fmt_main *dynamic_Register_local_format(int *type) {
int num=nLocalFmts++;
private_subformat_data keep;
if (!pLocalFmts)
pLocalFmts = mem_calloc_tiny(1000*sizeof(struct fmt_main), 16);
/* since these are loaded LATE in the process, init() has been called
* and we HAVE to preserve the already loaded setup. This will happen
* if we run a crack, but do not specify a specific dyna format
*/
memcpy(&keep, &curdat, sizeof(private_subformat_data));
LoadOneFormat(num+6000, &(pLocalFmts[num]));
memcpy(&curdat, &keep, sizeof(private_subformat_data));
dynamic_use_sse = curdat.dynamic_use_sse;
force_md5_ctx = curdat.force_md5_ctx;
*type = num+6000;
return &(pLocalFmts[num]);
}
int dynamic_Register_formats(struct fmt_main **ptr)
{
int count, i, idx, single=-1, wildcard = 0, pop[5000];
extern struct options_main options;
if (options.format && strstr(options.format, "*"))
wildcard = 1;
Dynamic_Load_itoa16_w2();
if (!wildcard && options.format &&
!strncmp(options.format, "dynamic_", 8))
sscanf(options.format, "dynamic_%d", &single);
if (options.format && options.subformat && !strcmp(options.format, "dynamic") && !strncmp(options.subformat, "dynamic_", 8))
sscanf(options.subformat, "dynamic_%d", &single);
if (options.dynamic_bare_hashes_always_valid == 'Y')
dynamic_allow_rawhash_fixup = 1;
else if (options.dynamic_bare_hashes_always_valid != 'N' && cfg_get_bool(SECTION_OPTIONS, NULL, "DynamicAlwaysUseBareHashes", 1))
dynamic_allow_rawhash_fixup = 1;
if (single != -1) {
// user wanted only a 'specific' format. Simply load that one.
dynamic_allow_rawhash_fixup = 1;
if (dynamic_IS_VALID(single, 1) == 0)
return 0;
pFmts = mem_alloc_tiny(sizeof(pFmts[0]), MEM_ALIGN_WORD);
if (!LoadOneFormat(single, pFmts))
return 0;
*ptr = pFmts;
return (nFmts = 1);
}
for (count = i = 0; i < 5000; ++i) {
if ((pop[i] = (dynamic_IS_VALID(i, 0) == 1)))
++count;
}
// Ok, now we know how many formats we have. Load them
pFmts = mem_alloc_tiny(sizeof(pFmts[0])*count, MEM_ALIGN_WORD);
for (idx = i = 0; i < 5000; ++i) {
if (pop[i]) {
if (LoadOneFormat(i, &pFmts[idx]) == 0)
--count;
else
++idx;
}
}
*ptr = pFmts;
return (nFmts = count);
}
/*
* finds the 'proper' sub format from the allocated formats, IFF that format 'exists'
*/
static struct fmt_main *dynamic_Get_fmt_main(int which)
{
char label[40];
int i;
sprintf(label, "$dynamic_%d$", which);
for (i = 0; i < nFmts; ++i) {
private_subformat_data *pPriv = pFmts[i].private.data;
if (!strcmp(pPriv->dynamic_WHICH_TYPE_SIG, label))
return &pFmts[i];
}
for (i = 0; i < nLocalFmts; ++i) {
private_subformat_data *pPriv = pLocalFmts[i].private.data;
if (!strcmp(pPriv->dynamic_WHICH_TYPE_SIG, label))
return &pLocalFmts[i];
}
return NULL;
}
/*
* This function will 'forget' which md5-gen subtype we are working with. It will allow
* a different type to be used. Very useful for things like -test (benchmarking).
*/
static void dynamic_RESET(struct fmt_main *fmt)
{
memset(&curdat, 0, sizeof(curdat));
m_count = 0;
keys_dirty = 0;
cursalt=cursalt2=username=0;
saltlen=saltlen2=usernamelen=0;
// make 'sure' we startout with blank inputs.
m_count = 0;
#ifdef SIMD_COEF_32
if (input_buf) {
#else
if (input_buf_X86) {
#endif
__nonMP_DynamicFunc__clean_input_full();
__nonMP_DynamicFunc__clean_input2_full();
}
}
/*
* This will LINK our functions into some other fmt_main struction. That way
* that struction can use our code. The other *_fmt.c file will need to
* 'override' the valid, the binary and the salt functions, and make changes
* to the hash, BEFORE calling into the dynamic valid/binary/salt functions.
* Other than those functions (and calling into this linkage function at init time)
* that is about all that needs to be in that 'other' *_fmt.c file, as long as the
* format is part of the md5-generic 'class' of functions.
*/
struct fmt_main *dynamic_THIN_FORMAT_LINK(struct fmt_main *pFmt, char *ciphertext, char *orig_sig, int bInitAlso)
{
int i, valid, nFmtNum;
struct fmt_main *pFmtLocal;
static char subformat[17], *cp;
dynamic_allow_rawhash_fixup = 0;
strncpy(subformat, ciphertext, 16);
subformat[16] = 0;
cp = strchr(&subformat[9], '$');
if (cp)
cp[1] = 0;
nFmtNum = -1;
sscanf(subformat, "$dynamic_%d", &nFmtNum);
if (nFmtNum == -1)
error_msg("Error, Invalid signature line trying to link to dynamic format.\nOriginal format=%s\nSignature line=%s\n", orig_sig, ciphertext);
pFmtLocal = dynamic_Get_fmt_main(nFmtNum);
if (pFmtLocal == NULL)
error_msg("Error, Invalid signature line trying to link to dynamic format.\nOriginal format=%s\nSignature line=%s\n", orig_sig, ciphertext);
valid = pFmtLocal->methods.valid(ciphertext, pFmtLocal);
if (!valid)
error_msg("Error, trying to link to %s using ciphertext=%s FAILED\n", subformat, ciphertext);
pFmt->params.algorithm_name = pFmtLocal->params.algorithm_name;
if (pFmt->params.plaintext_length == 0 ||
pFmt->params.plaintext_length > pFmtLocal->params.plaintext_length) {
pFmt->params.plaintext_length = pFmtLocal->params.plaintext_length;
pFmt->params.plaintext_min_length = pFmtLocal->params.plaintext_min_length;
}
pFmt->params.max_keys_per_crypt = pFmtLocal->params.max_keys_per_crypt;
pFmt->params.min_keys_per_crypt = pFmtLocal->params.max_keys_per_crypt;
if (pFmt->params.min_keys_per_crypt > 64)
pFmt->params.min_keys_per_crypt = 64;
pFmt->params.flags = pFmtLocal->params.flags;
if (pFmtLocal->params.salt_size)
pFmt->params.salt_size = sizeof(void*);
else
pFmt->params.salt_size = 0;
pFmt->methods.cmp_all = pFmtLocal->methods.cmp_all;
pFmt->methods.cmp_one = pFmtLocal->methods.cmp_one;
pFmt->methods.cmp_exact = pFmtLocal->methods.cmp_exact;
for (i = 0; i < FMT_TUNABLE_COSTS; ++i) {
pFmt->methods.tunable_cost_value[i] = pFmtLocal->methods.tunable_cost_value[i];
pFmt->params.tunable_cost_name[i] = pFmtLocal->params.tunable_cost_name[i];
}
pFmt->methods.source = pFmtLocal->methods.source;
pFmt->methods.set_salt = pFmtLocal->methods.set_salt;
pFmt->methods.salt = pFmtLocal->methods.salt;
pFmt->methods.done = pFmtLocal->methods.done;
pFmt->methods.salt_hash = pFmtLocal->methods.salt_hash;
pFmt->methods.split = pFmtLocal->methods.split;
pFmt->methods.set_key = pFmtLocal->methods.set_key;
pFmt->methods.get_key = pFmtLocal->methods.get_key;
pFmt->methods.clear_keys = pFmtLocal->methods.clear_keys;
pFmt->methods.crypt_all = pFmtLocal->methods.crypt_all;
pFmt->methods.prepare = pFmtLocal->methods.prepare;
pFmt->methods.salt_compare = pFmtLocal->methods.salt_compare;
for (i = 0; i < PASSWORD_HASH_SIZES; ++i)
{
pFmt->methods.binary_hash[i] = pFmtLocal->methods.binary_hash[i];
pFmt->methods.get_hash[i] = pFmtLocal->methods.get_hash[i];
}
if (bInitAlso)
{
//fprintf(stderr, "dynamic_THIN_FORMAT_LINK() calling init(%s)\n", subformat);
init(pFmtLocal);
}
pFmt->private.data = mem_alloc_tiny(sizeof(private_subformat_data), MEM_ALIGN_WORD);
memcpy(pFmt->private.data, pFmtLocal->private.data, sizeof(private_subformat_data));
return pFmtLocal;
}
// We ONLY deal with hex hashes at this time. Is we later have to deal with
// base-64, this will become harder. Before this function we had bugs where
// many things were loaded as 'being' valid, even if not.
static int looks_like_raw_hash(char *ciphertext, private_subformat_data *pPriv)
{
int i, cipherTextLen = CIPHERTEXT_LENGTH;
if (pPriv->dynamic_40_byte_input) {
cipherTextLen = 40;
} else if (pPriv->dynamic_48_byte_input) {
cipherTextLen = 48;
} else if (pPriv->dynamic_64_byte_input) {
cipherTextLen = 64;
} else if (pPriv->dynamic_56_byte_input) {
cipherTextLen = 56;
} else if (pPriv->dynamic_80_byte_input) {
cipherTextLen = 80;
} else if (pPriv->dynamic_96_byte_input) {
cipherTextLen = 96;
} else if (pPriv->dynamic_128_byte_input) {
cipherTextLen = 128;
}
for (i = 0; i < cipherTextLen; i++) {
if (atoi16[ARCH_INDEX(ciphertext[i])] == 0x7f)
return 0;
}
if ((pPriv->pSetup->flags&MGF_SALTED) == 0) {
if (!ciphertext[cipherTextLen])
return 1;
return 0;
}
return ciphertext[cipherTextLen] == '$';
}
static char *FixupIfNeeded(char *ciphertext, private_subformat_data *pPriv)
{
if (!ciphertext || *ciphertext == 0 || *ciphertext == '*')
return ciphertext;
if (dynamic_allow_rawhash_fixup && strncmp(ciphertext, "$dynamic_", 9) && looks_like_raw_hash(ciphertext, pPriv))
{
static char __ciphertext[512+24];
if (pPriv->pSetup->flags & MGF_SALTED) {
if (!strchr(ciphertext, '$'))
return ciphertext;
}
if ( (pPriv->pSetup->flags & MGF_SALTED2) == MGF_SALTED2) {
if (!strstr(ciphertext, "$$2"))
return ciphertext;
}
if ( (pPriv->pSetup->flags & MGF_USERNAME) == MGF_USERNAME) {
if (!strstr(ciphertext, "$$U"))
return ciphertext;
}
if (pPriv->FldMask) {
int i;
for (i = 0; i < 10; ++i) {
if ((pPriv->FldMask & (MGF_FLDx_BIT<<i)) == (MGF_FLDx_BIT<<i)) {
char Fld[5];
sprintf(Fld, "$$F%d", i);
if (!strstr(&ciphertext[pPriv->dynamic_SALT_OFFSET-1], Fld))
return ciphertext;
}
}
}
strcpy(__ciphertext, pPriv->dynamic_WHICH_TYPE_SIG);
strnzcpy(&__ciphertext[strlen(__ciphertext)], ciphertext, 512);
return __ciphertext;
}
return ciphertext;
}
int text_in_dynamic_format_already(struct fmt_main *pFmt, char *ciphertext)
{
private_subformat_data *pPriv;
if (!pFmt) return 0;
/* NOTE, it 'is' possible to get called here, without the private stuff being setup
properly (in valid, etc). So, we simply grab the static private stuff each time */
pPriv = pFmt->private.data;
if (!ciphertext || !pPriv) return 0;
return !strncmp(ciphertext, pPriv->dynamic_WHICH_TYPE_SIG, strlen(pPriv->dynamic_WHICH_TYPE_SIG));
}
// if caseType == 1, return cp
// if caseType == 2, return upcase(cp)
// if caseType == 3, return locase(cp)
// if caseType == 4, return upcaseFirstChar(locase(cp))
static char *HandleCase(char *cp, int caseType)
{
static UTF8 dest[256];
switch(caseType) {
case 1:
return cp;
case 2:
enc_uc(dest, sizeof(dest), (unsigned char*)cp, strlen(cp));
if (!strcmp((char*)dest, cp))
return cp;
break;
case 3:
case 4:
enc_lc(dest, sizeof(dest), (unsigned char*)cp, strlen(cp));
if (caseType == 4)
dest[0] = low2up_ansi(dest[0]);
if (!strcmp((char*)dest, cp))
return cp;
break;
default:
return cp;
}
return (char*)dest;
}
int dynamic_real_salt_length(struct fmt_main *pFmt)
{
if (pFmt->params.flags & FMT_DYNAMIC) {
private_subformat_data *pPriv = pFmt->private.data;
if (pPriv == NULL || pPriv->pSetup == NULL)
return -1; // not a dynamic format, or called before we have loaded them!!
return abs(pPriv->pSetup->SaltLen);
}
// NOT a dynamic format
return -1;
}
#else
#warning Notice: Dynamic format disabled from build.
#endif /* DYNAMIC_DISABLED */
|
GB_unaryop__identity_uint8_uint8.c | //------------------------------------------------------------------------------
// GB_unaryop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2019, All Rights Reserved.
// http://suitesparse.com See GraphBLAS/Doc/License.txt for license.
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_iterator.h"
#include "GB_unaryop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB_unop__identity_uint8_uint8
// op(A') function: GB_tran__identity_uint8_uint8
// C type: uint8_t
// A type: uint8_t
// cast: uint8_t cij = (uint8_t) aij
// unaryop: cij = aij
#define GB_ATYPE \
uint8_t
#define GB_CTYPE \
uint8_t
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
uint8_t aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = x ;
// casting
#define GB_CASTING(z, x) \
uint8_t z = (uint8_t) x ;
// cij = op (cast (aij))
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
GB_GETA (aij, Ax, pA) ; \
/* Cx [pC] = op (cast (aij)) */ \
GB_CASTING (x, aij) ; \
GB_OP (GB_CX (pC), x) ; \
}
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_IDENTITY || GxB_NO_UINT8)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_unop__identity_uint8_uint8
(
uint8_t *restrict Cx,
const uint8_t *restrict Ax,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (int64_t p = 0 ; p < anz ; p++)
{
GB_CAST_OP (p, p) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_tran__identity_uint8_uint8
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t *restrict *Rowcounts,
GBI_single_iterator Iter,
const int64_t *restrict A_slice,
int naslice
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#define GB_PHASE_2_OF_2
#include "GB_unaryop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
ChMatrix.h | // =============================================================================
// PROJECT CHRONO - http://projectchrono.org
//
// Copyright (c) 2014 projectchrono.org
// All rights reserved.
//
// Use of this source code is governed by a BSD-style license that can be found
// in the LICENSE file at the top level of the distribution and at
// http://projectchrono.org/license-chrono.txt.
//
// =============================================================================
// Authors: Alessandro Tasora, Radu Serban
// =============================================================================
#ifndef CHMATRIX_H
#define CHMATRIX_H
#include "chrono/core/ChCoordsys.h"
#include "chrono/core/ChException.h"
#include "chrono/ChConfig.h"
#include "chrono/serialization/ChArchive.h"
#include "chrono/serialization/ChArchiveAsciiDump.h"
#if defined(CHRONO_HAS_SSE) || defined(CHRONO_HAS_AVX)
#include <immintrin.h>
#endif
#if defined(CHRONO_HAS_NEON)
#include <arm_neon.h>
#endif
namespace chrono {
//
// FAST MACROS TO SPEEDUP CODE
//
#define Set33Element(a, b, val) SetElementN(((a * 3) + (b)), val)
#define Get33Element(a, b) GetElementN((a * 3) + (b))
#define Set34Element(a, b, val) SetElementN(((a * 4) + (b)), val)
#define Get34Element(a, b) GetElementN((a * 4) + (b))
#define Set34Row(ma, a, val0, val1, val2, val3) \
ma.SetElementN((a * 4), val0); \
ma.SetElementN((a * 4) + 1, val1); \
ma.SetElementN((a * 4) + 2, val2); \
ma.SetElementN((a * 4) + 3, val3);
#define Set44Element(a, b, val) SetElementN(((a * 4) + (b)), val)
#define Get44Element(a, b) GetElementN((a * 4) + (b))
// forward declaration
template <class Real = double>
class ChMatrixDynamic;
///
/// ChMatrix:
///
/// A base class for matrix objects (tables of NxM numbers).
/// To access elements, the indexes start from zero, and
/// you must indicate first row, then column, that is: m(2,4)
/// means the element at 3rd row, 5th column.
/// This is an abstract class, so you cannot instantiate
/// objects from it: you must rather create matrices using the
/// specialized child classes like ChMatrixDynamic, ChMatrixNM,
/// ChMatrix33 and so on; all of them have this same base class.
/// Warning: for optimization reasons, not all functions will
/// check about boundaries of element indexes and matrix sizes (in
/// some cases, if sizes are wrong, debug asserts are used).
///
/// Further info at the @ref mathematical_objects manual page.
template <class Real = double>
class ChMatrix {
protected:
//
// DATA
//
int rows = 1;
int columns = 1;
Real* address;
public:
//
// CONSTRUCTORS (none - abstract class that must be implemented with child classes)
//
virtual ~ChMatrix() {}
//
// OPERATORS OVERLOADING
//
/// Parenthesis () operator, to access a single element of the matrix, by
/// supplying the row and the column (indexes start from 0).
/// For example: m(3,5) gets the element at the 4th row, 6th column.
/// Value is returned by reference, so it can be modified, like in m(1,2)=10.
Real& operator()(const int row, const int col) {
assert(row >= 0 && col >= 0 && row < rows && col < columns);
return (*(address + col + (row * columns)));
}
const Real& operator()(const int row, const int col) const {
assert(row >= 0 && col >= 0 && row < rows && col < columns);
return (*(address + col + (row * columns)));
}
/// Parenthesis () operator, to access a single element of the matrix, by
/// supplying the ordinal of the element (indexes start from 0).
/// For example: m(3) gets the 4th element, counting row by row.
/// Mostly useful if the matrix is Nx1 sized (i.e. a N-element vector).
/// Value is returned by reference, so it can be modified, like in m(1,2)=10.
Real& operator()(const int el) {
assert(el >= 0 && el < rows * columns);
return (*(address + el));
}
const Real& operator()(const int el) const {
assert(el >= 0 && el < rows * columns);
return (*(address + el));
}
/// The [] operator returns the address of the n-th row. This is mostly
/// for compatibility with old matrix programming styles (2d array-like)
/// where to access an element at row i, column j, one can write mymatrix[i][j].
Real* operator[](const int row) {
assert(row >= 0 && row < rows);
return ((address + (row * columns)));
}
const Real* operator[](const int row) const {
assert(row >= 0 && row < rows);
return ((address + (row * columns)));
}
/// Multiplies this matrix by a factor, in place
ChMatrix<Real>& operator*=(const Real factor) {
MatrScale(factor);
return *this;
}
/// Increments this matrix by another matrix, in place
template <class RealB>
ChMatrix<Real>& operator+=(const ChMatrix<RealB>& matbis) {
MatrInc(matbis);
return *this;
}
/// Decrements this matrix by another matrix, in place
template <class RealB>
ChMatrix<Real>& operator-=(const ChMatrix<RealB>& matbis) {
MatrDec(matbis);
return *this;
}
/// Matrices are equal?
bool operator==(const ChMatrix<Real>& other) { return Equals(other); }
/// Matrices are not equal?
bool operator!=(const ChMatrix<Real>& other) { return !Equals(other); }
/// Assignment operator
virtual ChMatrix<Real>& operator=(const ChMatrix<Real>& matbis) {
if (&matbis != this)
CopyFromMatrix(matbis);
return *this;
}
template <class RealB>
ChMatrix<Real>& operator=(const ChMatrix<RealB>& matbis) {
CopyFromMatrix(matbis);
return *this;
}
//
// FUNCTIONS
//
/// Sets the element at row,col position. Indexes start with zero.
void SetElement(int row, int col, Real elem) {
assert(row >= 0 && col >= 0 && row < rows && col < columns); // boundary checks
*(address + col + (row * columns)) = elem;
}
/// Gets the element at row,col position. Indexes start with zero.
/// The return value is a copy of original value. Use Element() instead if you
/// want to access directly by reference the original element.
Real GetElement(int row, int col) {
assert(row >= 0 && col >= 0 && row < rows && col < columns); // boundary checks
return (*(address + col + (row * columns)));
}
Real GetElement(int row, int col) const {
assert(row >= 0 && col >= 0 && row < rows && col < columns); // boundary checks
return (*(address + col + (row * columns)));
}
/// Sets the Nth element, counting row after row.
void SetElementN(int index, Real elem) {
assert(index >= 0 && index < (rows * columns)); // boundary checks
*(address + index) = elem;
}
/// Gets the Nth element, counting row after row.
Real GetElementN(int index) {
assert(index >= 0 && index < (rows * columns));
return (*(address + index));
}
const Real GetElementN(int index) const {
assert(index >= 0 && index < (rows * columns));
return (*(address + index));
}
/// Access a single element of the matrix, by
/// supplying the row and the column (indexes start from 0).
/// Value is returned by reference, so it can be modified, like in m.Element(1,2)=10.
Real& Element(int row, int col) {
assert(row >= 0 && col >= 0 && row < rows && col < columns);
return (*(address + col + (row * columns)));
}
const Real& Element(int row, int col) const {
assert(row >= 0 && col >= 0 && row < rows && col < columns);
return (*(address + col + (row * columns)));
}
/// Access a single element of the matrix, the Nth element, counting row after row.
/// Value is returned by reference, so it can be modified, like in m.Element(5)=10.
Real& ElementN(int index) {
assert(index >= 0 && index < (rows * columns));
return (*(address + index));
}
const Real& ElementN(int index) const {
assert(index >= 0 && index < (rows * columns));
return (*(address + index));
}
/// Access directly the "Real* address" buffer. Warning! this is a low level
/// function, it should be used in rare cases, if really needed!
Real* GetAddress() { return address; }
const Real* GetAddress() const { return address; }
/// Gets the number of rows
int GetRows() const { return rows; }
/// Gets the number of columns
int GetColumns() const { return columns; }
/// Reallocate memory for a new size. VIRTUAL! Must be implemented by child classes!
virtual void Resize(int nrows, int ncols) {}
/// Swaps the columns a and b
void SwapColumns(int a, int b) {
Real temp;
for (int i = 0; i < rows; i++) {
temp = GetElement(i, a);
SetElement(i, a, GetElement(i, b));
SetElement(i, b, temp);
}
}
/// Swap the rows a and b
void SwapRows(int a, int b) {
Real temp;
for (int i = 0; i < columns; i++) {
temp = GetElement(a, i);
SetElement(a, i, GetElement(b, i));
SetElement(b, i, temp);
}
}
/// Fill the diagonal elements, given a sample.
/// Note that the matrix must already be square (no check for
/// rectangular matrices!), and the extra-diagonal elements are
/// not modified -this function does not set them to 0-
void FillDiag(Real sample) {
for (int i = 0; i < rows; ++i)
SetElement(i, i, sample);
}
/// Fill the matrix with the same value in all elements
void FillElem(Real sample) {
for (int i = 0; i < rows * columns; ++i)
SetElementN(i, sample);
}
/// Fill the matrix with random float numbers, falling within the
/// "max"/"min" range.
void FillRandom(Real max, Real min) {
for (int i = 0; i < rows * columns; ++i)
SetElementN(i, min + (Real)ChRandom() * (max - min));
}
/// Resets the matrix to zero (warning: simply sets memory to 0 bytes!)
virtual void Reset() {
// SetZero(rows*columns); //memset(address, 0, sizeof(Real) * rows * columns);
for (int i = 0; i < rows * columns; ++i)
this->address[i] = 0;
}
/// Reset to zeroes and (if needed) changes the size to have row and col
void Reset(int nrows, int ncols) {
Resize(nrows, ncols);
// SetZero(rows*columns); //memset(address, 0, sizeof(Real) * rows * columns);
for (int i = 0; i < rows * columns; ++i)
this->address[i] = 0;
}
/// Reset to identity matrix (ones on diagonal, zero elsewhere)
void SetIdentity() {
Reset();
FillDiag(1.0);
}
/// Copy a matrix "matra" into this matrix. Note that
/// the destination matrix will be resized if necessary.
template <class RealB>
void CopyFromMatrix(const ChMatrix<RealB>& matra) {
Resize(matra.GetRows(), matra.GetColumns());
// ElementsCopy(address, matra.GetAddress(), rows*columns);
// memcpy (address, matra.address, (sizeof(Real) * rows * columns));
for (int i = 0; i < rows * columns; ++i)
address[i] = (Real)matra.GetAddress()[i];
}
/// Copy the transpose of matrix "matra" into this matrix. Note that
/// the destination matrix will be resized if necessary.
template <class RealB>
void CopyFromMatrixT(const ChMatrix<RealB>& matra) {
Resize(matra.GetColumns(), matra.GetRows());
for (int i = 0; i < matra.GetRows(); ++i)
for (int j = 0; j < matra.GetColumns(); ++j)
SetElement(j, i, (Real)matra.Element(i, j));
}
/// Copy the transposed upper triangular part of "matra" in the lower triangular
/// part of this matrix. (matra must be square)
/// Note that the destination matrix will be resized if necessary.
template <class RealB> // _______ //
void CopyTUpMatrix(const ChMatrix<RealB>& matra) // \ | |\ //
{ // \ A'| ---> | \ //
Resize(matra.GetRows(), matra.GetColumns()); // \ | |this\ //
for (int i = 0; i < matra.GetRows(); i++) { // \| |______\ //
for (int j = 0; j < matra.GetRows(); j++)
SetElement(j, i, (Real)matra.GetElement(i, j));
}
}
/// Copy the transposed lower triangulat part of "matra" in the upper triangular
/// part of this matrix. (matra must be square)
/// Note that the destination matrix will be resized if necessary.
template <class RealB> // _______ //
void CopyTLwMatrix(const ChMatrix<RealB>& matra) // |\ \ | //
{ // | \ ---> \this| //
Resize(matra.GetRows(), matra.GetColumns()); // |A' \ \ | //
for (int i = 0; i < matra.GetRows(); i++) { // |______\ \| //
for (int j = 0; j < matra.GetRows(); j++)
SetElement(i, j, (Real)matra.GetElement(j, i));
}
}
//
// STREAMING
//
/// Method to allow serialization of transient data in archives.
virtual void ArchiveOUT(ChArchiveOut& marchive) {
// suggested: use versioning
marchive.VersionWrite(1);
// stream out all member data
if (ChArchiveAsciiDump* mascii = dynamic_cast<ChArchiveAsciiDump*>(&marchive)) {
// CUSTOM row x col 'intuitive' table-like log when using ChArchiveAsciiDump:
mascii->indent();
mascii->GetStream()->operator<<(rows);
mascii->GetStream()->operator<<(" rows, ");
mascii->GetStream()->operator<<(columns);
mascii->GetStream()->operator<<(" columns:\n");
for (int i = 0; i < rows; i++) {
mascii->indent();
for (int j = 0; j < columns; j++) {
(*mascii->GetStream()) << Element(i, j);
mascii->GetStream()->operator<<(", ");
}
mascii->GetStream()->operator<<("\n");
}
} else {
marchive << make_ChNameValue("rows", rows);
marchive << make_ChNameValue("columns", columns);
// NORMAL array-based serialization:
int tot_elements = GetRows() * GetColumns();
ChValueSpecific< Real* > specVal(this->address, "data", 0);
marchive.out_array_pre(specVal, tot_elements);
for (int i = 0; i < tot_elements; i++) {
marchive << CHNVP(ElementN(i), "");
marchive.out_array_between(specVal, tot_elements);
}
marchive.out_array_end(specVal, tot_elements);
}
}
/// Method to allow de serialization of transient data from archives.
virtual void ArchiveIN(ChArchiveIn& marchive) {
// suggested: use versioning
int version = marchive.VersionRead();
// stream in all member data
int m_row, m_col;
marchive >> make_ChNameValue("rows", m_row);
marchive >> make_ChNameValue("columns", m_col);
Reset(m_row, m_col);
// custom input of matrix data as array
size_t tot_elements = GetRows() * GetColumns();
marchive.in_array_pre("data", tot_elements);
for (int i = 0; i < tot_elements; i++) {
marchive >> CHNVP(ElementN(i));
marchive.in_array_between("data");
}
marchive.in_array_end("data");
}
/// Method to allow serializing transient data into in ascii
/// as a readable item, for example "chrono::GetLog() << myobject;"
/// ***OBSOLETE***
void StreamOUT(ChStreamOutAscii& mstream) {
mstream << "\n"
<< "Matrix " << GetRows() << " rows, " << GetColumns() << " columns."
<< "\n";
for (int i = 0; i < ChMin(GetRows(), 8); i++) {
for (int j = 0; j < ChMin(GetColumns(), 8); j++)
mstream << GetElement(i, j) << " ";
if (GetColumns() > 8)
mstream << "...";
mstream << "\n";
}
if (GetRows() > 8)
mstream << "... \n\n";
}
/// Method to allow serializing transient data into an ascii stream (ex. a file)
/// as a Matlab .dat file (all numbers in a row, separated by space, then CR)
void StreamOUTdenseMatlabFormat(ChStreamOutAscii& mstream) {
for (int ii = 0; ii < this->GetRows(); ii++) {
for (int jj = 0; jj < this->GetColumns(); jj++) {
mstream << this->GetElement(ii, jj);
if (jj < (this->GetColumns() - 1))
mstream << " ";
}
mstream << "\n";
}
}
//
// MATH MEMBER FUNCTIONS.
// For speed reasons, sometimes size checking of operands is left to the user!
//
/// Changes the sign of all the elements of this matrix, in place.
void MatrNeg() {
for (int nel = 0; nel < rows * columns; ++nel)
ElementN(nel) = -ElementN(nel);
}
/// Sum two matrices, and stores the result in "this" matrix: [this]=[A]+[B].
template <class RealB, class RealC>
void MatrAdd(const ChMatrix<RealB>& matra, const ChMatrix<RealC>& matrb) {
assert(matra.GetColumns() == matrb.GetColumns() && matra.rows == matrb.GetRows());
assert(this->columns == matrb.GetColumns() && this->rows == matrb.GetRows());
for (int nel = 0; nel < rows * columns; ++nel)
ElementN(nel) = (Real)(matra.ElementN(nel) + matrb.ElementN(nel));
}
/// Subtract two matrices, and stores the result in "this" matrix: [this]=[A]-[B].
template <class RealB, class RealC>
void MatrSub(const ChMatrix<RealB>& matra, const ChMatrix<RealC>& matrb) {
assert(matra.GetColumns() == matrb.GetColumns() && matra.rows == matrb.GetRows());
assert(this->columns == matrb.GetColumns() && this->rows == matrb.GetRows());
for (int nel = 0; nel < rows * columns; ++nel)
ElementN(nel) = (Real)(matra.ElementN(nel) - matrb.ElementN(nel));
}
/// Increments this matrix with another matrix A, as: [this]+=[A]
template <class RealB>
void MatrInc(const ChMatrix<RealB>& matra) {
assert(matra.GetColumns() == columns && matra.GetRows() == rows);
for (int nel = 0; nel < rows * columns; ++nel)
ElementN(nel) += (Real)matra.ElementN(nel);
}
/// Increments this matrix by \p val, as [this]+=val
void MatrInc(Real val) {
for (int nel = 0; nel < rows * columns; ++nel)
ElementN(nel) += val;
}
/// Decrements this matrix with another matrix A, as: [this]-=[A]
template <class RealB>
void MatrDec(const ChMatrix<RealB>& matra) {
assert(matra.GetColumns() == columns && matra.GetRows() == rows);
for (int nel = 0; nel < rows * columns; ++nel)
ElementN(nel) -= (Real)matra.ElementN(nel);
}
/// Scales a matrix, multiplying all elements by a constant value: [this]*=f
void MatrScale(Real factor) {
for (int nel = 0; nel < rows * columns; ++nel)
ElementN(nel) *= factor;
}
/// Scales a matrix, multiplying all element by all other elements of
/// matra (it is not the classical matrix multiplication!)
template <class RealB>
void MatrScale(const ChMatrix<RealB>& matra) {
assert(matra.GetColumns() == columns && matra.GetRows() == rows);
for (int nel = 0; nel < rows * columns; ++nel)
ElementN(nel) *= (Real)matra.ElementN(nel);
}
/// Scales a matrix, dividing all elements by a constant value: [this]/=f
void MatrDivScale(Real factor) {
for (int nel = 0; nel < rows * columns; ++nel)
ElementN(nel) /= factor;
}
/// Scales a matrix, dividing all element by all other elements of
/// matra (it is not the classical matrix multiplication!)
template <class RealB>
void MatrDivScale(const ChMatrix<RealB>& matra) {
assert(matra.GetColumns() == columns && matra.GetRows() == rows);
for (int nel = 0; nel < rows * columns; ++nel)
ElementN(nel) /= (Real)matra.ElementN(nel);
}
/// Multiplies two matrices, and stores the result in "this" matrix: [this]=[A]*[B].
template <class RealB, class RealC>
void MatrMultiply(const ChMatrix<RealB>& matra, const ChMatrix<RealC>& matrb) {
assert(matra.GetColumns() == matrb.GetRows());
assert(this->rows == matra.GetRows());
assert(this->columns == matrb.GetColumns());
int col, row, colres;
Real sum;
for (colres = 0; colres < matrb.GetColumns(); ++colres) {
for (row = 0; row < matra.GetRows(); ++row) {
sum = 0;
for (col = 0; col < matra.GetColumns(); ++col)
sum += (Real)(matra.Element(row, col) * matrb.Element(col, colres));
SetElement(row, colres, sum);
}
}
}
#ifdef CHRONO_HAS_AVX
/// Multiplies two matrices, and stores the result in "this" matrix: [this]=[A]*[B].
/// AVX implementation: The speed up is marginal if size of the matrices are small, e.g. 3*3
/// Generally, as the matra.GetColumns() increases the method performs better
void MatrMultiplyAVX(const ChMatrix<double>& matra, const ChMatrix<double>& matrb) {
assert(matra.GetColumns() == matrb.GetRows());
assert(this->rows == matra.GetRows());
assert(this->columns == matrb.GetColumns());
int A_Nrow = matra.GetRows();
int B_Nrow = matrb.GetRows();
int A_NCol = matra.GetColumns();
int B_NCol = matrb.GetColumns();
const double* A_add = matra.GetAddress();
const double* B_add = matrb.GetAddress();
double* this_Add = this->GetAddress();
for (int rowA = 0; rowA < A_Nrow; rowA++) {
for (int colB = 0; colB < B_NCol; colB += 4) {
__m256d sum = _mm256_setzero_pd();
for (int elem = 0; elem < A_NCol; elem++) {
__m256d ymmA = _mm256_broadcast_sd(A_add + A_NCol * rowA + elem);
__m256d ymmB = _mm256_loadu_pd(B_add + elem * B_NCol + colB);
__m256d prod = _mm256_mul_pd(ymmA, ymmB);
sum = _mm256_add_pd(sum, prod);
}
_mm256_storeu_pd(this_Add + rowA * B_NCol + colB, sum);
}
}
}
/// Multiplies two matrices (the second is considered transposed): [this]=[A]*[B]'
/// Note: This method is faster than MatrMultiplyT if matra.GetColumns()%4=0 && matra.GetColumns()>8
/// It is still fast if matra.GetColumns() is large enough even if matra.GetColumns()%4!=0
void MatrMultiplyTAVX(const ChMatrix<double>& matra, const ChMatrix<double>& matrb) {
assert(matra.GetColumns() == matrb.GetColumns());
assert(this->GetRows() == matra.GetRows());
assert(this->GetColumns() == matrb.GetRows());
int A_Nrow = matra.GetRows();
int B_Nrow = matrb.GetRows();
int A_NCol = matra.GetColumns();
int B_NCol = matrb.GetColumns();
const double* A_add = matra.GetAddress();
const double* B_add = matrb.GetAddress();
bool NeedsPadding = (B_NCol % 4 != 0);
int CorrectFAT = ((B_NCol >> 2) << 2);
for (int rowA = 0; rowA < A_Nrow; rowA++) {
for (int rowB = 0; rowB < B_Nrow; rowB++) {
int colB;
double temp_sum = 0.0;
__m256d sum = _mm256_setzero_pd();
for (colB = 0; colB < CorrectFAT; colB += 4) {
__m256d ymmA = _mm256_loadu_pd(A_add + rowA * A_NCol + colB);
__m256d ymmB = _mm256_loadu_pd(B_add + rowB * B_NCol + colB);
__m256d prod = _mm256_mul_pd(ymmA, ymmB);
sum = _mm256_add_pd(sum, prod);
}
sum = _mm256_hadd_pd(sum, sum);
temp_sum = ((double*)&sum)[0] + ((double*)&sum)[2];
if (NeedsPadding)
for (colB = CorrectFAT; colB < B_NCol; colB++) {
temp_sum += (matra.Element(rowA, colB) * matrb.Element(rowB, colB));
}
SetElement(rowA, rowB, temp_sum);
}
}
}
#endif
#ifdef CHRONO_HAS_NEON
/// Multiplies two matrices, and stores the result in "this" matrix: [this]=[A]*[B].
/// NEON implementation: The speed up is marginal if size of the matrices are small.
/// Much like AVX, as the matra.GetColumns() increases the method performs better
template <class RealB, class RealC>
void MatrMultiplyNEON(const ChMatrix<double>& matra, const ChMatrix<double>& matrb) {
assert(matra.GetColumns() == matrb.GetRows());
assert(this->rows == matra.GetRows());
assert(this->columns == matrb.GetColumns());
int A_Nrow = matra.GetRows();
int B_Nrow = matrb.GetRows();
int A_NCol = matra.GetColumns();
int B_NCol = matrb.GetColumns();
const double* A_add = matra.GetAddress();
const double* B_add = matrb.GetAddress();
double* this_Add = this->GetAddress();
// NEON doesn't provide direct zeroing, so we need to do it ourselves
float64_t zero_mem = 0.0;
float64x2_t zero_reg = vld1q_dup_f64(&zero_mem);
for (int rowA = 0; rowA < A_Nrow; rowA++) {
for (int colB = 0; colB < B_NCol; colB += 2) {
float64x2_t sum = vmovq_n_f64(zero_reg);
for (int elem = 0; elem < A_NCol; elem++) {
float64x2_t V_2DA = vld1q_dup_f64(A_add + A_NCol * rowA + elem);
float64x2_t V_2DB = vld1q_f64(B_add + elem * B_NCol + colB);
sum = vfmaq_f64(sum, V_2DA, V_2DB);
}
vst1q_f64(this_Add + rowA * B_NCol + colB, sum);
}
}
}
#endif
/// Multiplies two matrices (the second is considered transposed): [this]=[A]*[B]'
/// Faster than doing B.MatrTranspose(); result.MatrMultiply(A,B);
/// Note: no check on mistaken size of this!
template <class RealB, class RealC>
void MatrMultiplyT(const ChMatrix<RealB>& matra, const ChMatrix<RealC>& matrb) {
assert(matra.GetColumns() == matrb.GetColumns());
assert(this->rows == matra.GetRows());
assert(this->columns == matrb.GetRows());
int col, row, colres;
Real sum;
for (colres = 0; colres < matrb.GetRows(); ++colres) {
for (row = 0; row < matra.GetRows(); ++row) {
sum = 0;
for (col = 0; col < matra.GetColumns(); ++col)
sum += (Real)(matra.Element(row, col) * matrb.Element(colres, col));
SetElement(row, colres, sum);
}
}
}
/// Multiplies two matrices (the first is considered transposed): [this]=[A]'*[B]
/// Faster than doing A.MatrTranspose(); result.MatrMultiply(A,B);
template <class RealB, class RealC>
void MatrTMultiply(const ChMatrix<RealB>& matra, const ChMatrix<RealC>& matrb) {
assert(matra.GetRows() == matrb.GetRows());
assert(this->rows == matra.GetColumns());
assert(this->columns == matrb.GetColumns());
int col, row, colres;
Real sum;
for (colres = 0; colres < matrb.GetColumns(); ++colres) {
for (row = 0; row < matra.GetColumns(); ++row) {
sum = 0;
for (col = 0; col < (matra.GetRows()); ++col)
sum += (Real)(matra.Element(col, row) * matrb.Element(col, colres));
SetElement(row, colres, sum);
}
}
}
/// Computes dot product between two column-matrices (vectors) with
/// same size. Returns a scalar value.
template <class RealB, class RealC>
static Real MatrDot(const ChMatrix<RealB>& ma, const ChMatrix<RealC>& mb) {
assert(ma.GetColumns() == mb.GetColumns() && ma.GetRows() == mb.GetRows());
Real tot = 0;
for (int i = 0; i < ma.GetRows(); ++i)
tot += (Real)(ma.ElementN(i) * mb.ElementN(i));
return tot;
}
/// Transpose this matrix in place
void MatrTranspose() {
if (columns == rows) // Square transp.is optimized
{
for (int row = 0; row < rows; ++row)
for (int col = row; col < columns; ++col)
if (row != col) {
Real temp = Element(row, col);
Element(row, col) = Element(col, row);
Element(col, row) = temp;
}
int tmpr = rows;
rows = columns;
columns = tmpr;
} else // Naive implementation for rectangular case. Not in-place. Slower.
{
ChMatrixDynamic<Real> matrcopy(*this);
int tmpr = rows;
rows = columns;
columns = tmpr; // dont' realloc buffer, anyway
for (int row = 0; row < rows; ++row)
for (int col = 0; col < columns; ++col)
Element(row, col) = matrcopy.Element(col, row);
}
}
/// Returns the determinant of the matrix.
/// Note! This method must be used only with max 4x4 matrices,
/// otherwise it throws an exception.
Real Det() {
assert(this->GetRows() == this->GetColumns());
assert(this->GetRows() <= 4);
if (this->GetRows() != this->GetColumns())
throw("Cannot compute matrix determinant because rectangular matrix");
if (this->GetRows() > 4)
throw("Cannot compute matrix determinant because matr. larger than 3x3");
Real det = 0;
switch (this->GetRows()) {
case 1:
det = (*this)(0, 0);
break;
case 2:
det = (*this)(0, 0) * (*this)(1, 1) - (*this)(0, 1) * (*this)(1, 0);
break;
case 3:
det = (*this)(0, 0) * (*this)(1, 1) * (*this)(2, 2) + (*this)(0, 1) * (*this)(1, 2) * (*this)(2, 0) +
(*this)(0, 2) * (*this)(1, 0) * (*this)(2, 1) - (*this)(2, 0) * (*this)(1, 1) * (*this)(0, 2) -
(*this)(2, 1) * (*this)(1, 2) * (*this)(0, 0) - (*this)(2, 2) * (*this)(1, 0) * (*this)(0, 1);
break;
case 4:
det = (*this)(0, 0) * (*this)(1, 1) * (*this)(2, 2) * (*this)(3, 3) +
(*this)(0, 0) * (*this)(1, 2) * (*this)(2, 3) * (*this)(3, 1) +
(*this)(0, 0) * (*this)(1, 3) * (*this)(2, 1) * (*this)(3, 2) +
(*this)(0, 1) * (*this)(1, 0) * (*this)(2, 3) * (*this)(3, 2) +
(*this)(0, 1) * (*this)(1, 2) * (*this)(2, 0) * (*this)(3, 3) +
(*this)(0, 1) * (*this)(1, 3) * (*this)(2, 2) * (*this)(3, 0) +
(*this)(0, 2) * (*this)(1, 0) * (*this)(2, 1) * (*this)(3, 3) +
(*this)(0, 2) * (*this)(1, 1) * (*this)(2, 3) * (*this)(3, 0) +
(*this)(0, 2) * (*this)(1, 3) * (*this)(2, 0) * (*this)(3, 1) +
(*this)(0, 3) * (*this)(1, 0) * (*this)(2, 2) * (*this)(3, 1) +
(*this)(0, 3) * (*this)(1, 1) * (*this)(2, 0) * (*this)(3, 2) +
(*this)(0, 3) * (*this)(1, 2) * (*this)(2, 1) * (*this)(3, 0) -
(*this)(0, 0) * (*this)(1, 1) * (*this)(2, 3) * (*this)(3, 2) -
(*this)(0, 0) * (*this)(1, 2) * (*this)(2, 1) * (*this)(3, 3) -
(*this)(0, 0) * (*this)(1, 3) * (*this)(2, 2) * (*this)(3, 1) -
(*this)(0, 1) * (*this)(1, 0) * (*this)(2, 2) * (*this)(3, 3) -
(*this)(0, 1) * (*this)(1, 2) * (*this)(2, 3) * (*this)(3, 0) -
(*this)(0, 1) * (*this)(1, 3) * (*this)(2, 0) * (*this)(3, 2) -
(*this)(0, 2) * (*this)(1, 0) * (*this)(2, 3) * (*this)(3, 1) -
(*this)(0, 2) * (*this)(1, 1) * (*this)(2, 0) * (*this)(3, 3) -
(*this)(0, 2) * (*this)(1, 3) * (*this)(2, 1) * (*this)(3, 0) -
(*this)(0, 3) * (*this)(1, 0) * (*this)(2, 1) * (*this)(3, 2) -
(*this)(0, 3) * (*this)(1, 1) * (*this)(2, 2) * (*this)(3, 0) -
(*this)(0, 3) * (*this)(1, 2) * (*this)(2, 0) * (*this)(3, 1);
break;
}
return det;
}
/// Returns the inverse of the matrix.
/// Note! This method must be used only with max 4x4 matrices,
/// otherwise it throws an exception.
void MatrInverse() {
assert(this->GetRows() == this->GetColumns());
assert(this->GetRows() <= 4);
assert(this->Det() != 0);
if (this->GetRows() != this->GetColumns())
throw("Cannot compute matrix inverse because rectangular matrix");
if (this->GetRows() > 4)
throw("Cannot compute matrix inverse because matr. larger than 4x4");
if (this->Det() == 0)
throw("Cannot compute matrix inverse because singular matrix");
switch (this->GetRows()) {
case 1:
(*this)(0, 0) = (1 / (*this)(0, 0));
break;
case 2: {
ChMatrixDynamic<Real> inv(2, 2);
inv(0, 0) = (*this)(1, 1);
inv(0, 1) = -(*this)(0, 1);
inv(1, 1) = (*this)(0, 0);
inv(1, 0) = -(*this)(1, 0);
inv.MatrDivScale(this->Det());
this->CopyFromMatrix(inv);
break;
}
case 3: {
ChMatrixDynamic<Real> inv(3, 3);
inv(0, 0) = (*this)(1, 1) * (*this)(2, 2) - (*this)(1, 2) * (*this)(2, 1);
inv(0, 1) = (*this)(2, 1) * (*this)(0, 2) - (*this)(0, 1) * (*this)(2, 2);
inv(0, 2) = (*this)(0, 1) * (*this)(1, 2) - (*this)(0, 2) * (*this)(1, 1);
inv(1, 0) = (*this)(1, 2) * (*this)(2, 0) - (*this)(1, 0) * (*this)(2, 2);
inv(1, 1) = (*this)(2, 2) * (*this)(0, 0) - (*this)(2, 0) * (*this)(0, 2);
inv(1, 2) = (*this)(0, 2) * (*this)(1, 0) - (*this)(1, 2) * (*this)(0, 0);
inv(2, 0) = (*this)(1, 0) * (*this)(2, 1) - (*this)(1, 1) * (*this)(2, 0);
inv(2, 1) = (*this)(0, 1) * (*this)(2, 0) - (*this)(0, 0) * (*this)(2, 1);
inv(2, 2) = (*this)(0, 0) * (*this)(1, 1) - (*this)(0, 1) * (*this)(1, 0);
inv.MatrDivScale(this->Det());
this->CopyFromMatrix(inv);
break;
}
case 4: {
ChMatrixDynamic<Real> inv(4, 4);
inv.SetElement(
0, 0,
(*this)(1, 2) * (*this)(2, 3) * (*this)(3, 1) - (*this)(1, 3) * (*this)(2, 2) * (*this)(3, 1) +
(*this)(1, 3) * (*this)(2, 1) * (*this)(3, 2) - (*this)(1, 1) * (*this)(2, 3) * (*this)(3, 2) -
(*this)(1, 2) * (*this)(2, 1) * (*this)(3, 3) + (*this)(1, 1) * (*this)(2, 2) * (*this)(3, 3));
inv.SetElement(
0, 1,
(*this)(0, 3) * (*this)(2, 2) * (*this)(3, 1) - (*this)(0, 2) * (*this)(2, 3) * (*this)(3, 1) -
(*this)(0, 3) * (*this)(2, 1) * (*this)(3, 2) + (*this)(0, 1) * (*this)(2, 3) * (*this)(3, 2) +
(*this)(0, 2) * (*this)(2, 1) * (*this)(3, 3) - (*this)(0, 1) * (*this)(2, 2) * (*this)(3, 3));
inv.SetElement(
0, 2,
(*this)(0, 2) * (*this)(1, 3) * (*this)(3, 1) - (*this)(0, 3) * (*this)(1, 2) * (*this)(3, 1) +
(*this)(0, 3) * (*this)(1, 1) * (*this)(3, 2) - (*this)(0, 1) * (*this)(1, 3) * (*this)(3, 2) -
(*this)(0, 2) * (*this)(1, 1) * (*this)(3, 3) + (*this)(0, 1) * (*this)(1, 2) * (*this)(3, 3));
inv.SetElement(
0, 3,
(*this)(0, 3) * (*this)(1, 2) * (*this)(2, 1) - (*this)(0, 2) * (*this)(1, 3) * (*this)(2, 1) -
(*this)(0, 3) * (*this)(1, 1) * (*this)(2, 2) + (*this)(0, 1) * (*this)(1, 3) * (*this)(2, 2) +
(*this)(0, 2) * (*this)(1, 1) * (*this)(2, 3) - (*this)(0, 1) * (*this)(1, 2) * (*this)(2, 3));
inv.SetElement(
1, 0,
(*this)(1, 3) * (*this)(2, 2) * (*this)(3, 0) - (*this)(1, 2) * (*this)(2, 3) * (*this)(3, 0) -
(*this)(1, 3) * (*this)(2, 0) * (*this)(3, 2) + (*this)(1, 0) * (*this)(2, 3) * (*this)(3, 2) +
(*this)(1, 2) * (*this)(2, 0) * (*this)(3, 3) - (*this)(1, 0) * (*this)(2, 2) * (*this)(3, 3));
inv.SetElement(
1, 1,
(*this)(0, 2) * (*this)(2, 3) * (*this)(3, 0) - (*this)(0, 3) * (*this)(2, 2) * (*this)(3, 0) +
(*this)(0, 3) * (*this)(2, 0) * (*this)(3, 2) - (*this)(0, 0) * (*this)(2, 3) * (*this)(3, 2) -
(*this)(0, 2) * (*this)(2, 0) * (*this)(3, 3) + (*this)(0, 0) * (*this)(2, 2) * (*this)(3, 3));
inv.SetElement(
1, 2,
(*this)(0, 3) * (*this)(1, 2) * (*this)(3, 0) - (*this)(0, 2) * (*this)(1, 3) * (*this)(3, 0) -
(*this)(0, 3) * (*this)(1, 0) * (*this)(3, 2) + (*this)(0, 0) * (*this)(1, 3) * (*this)(3, 2) +
(*this)(0, 2) * (*this)(1, 0) * (*this)(3, 3) - (*this)(0, 0) * (*this)(1, 2) * (*this)(3, 3));
inv.SetElement(
1, 3,
(*this)(0, 2) * (*this)(1, 3) * (*this)(2, 0) - (*this)(0, 3) * (*this)(1, 2) * (*this)(2, 0) +
(*this)(0, 3) * (*this)(1, 0) * (*this)(2, 2) - (*this)(0, 0) * (*this)(1, 3) * (*this)(2, 2) -
(*this)(0, 2) * (*this)(1, 0) * (*this)(2, 3) + (*this)(0, 0) * (*this)(1, 2) * (*this)(2, 3));
inv.SetElement(
2, 0,
(*this)(1, 1) * (*this)(2, 3) * (*this)(3, 0) - (*this)(1, 3) * (*this)(2, 1) * (*this)(3, 0) +
(*this)(1, 3) * (*this)(2, 0) * (*this)(3, 1) - (*this)(1, 0) * (*this)(2, 3) * (*this)(3, 1) -
(*this)(1, 1) * (*this)(2, 0) * (*this)(3, 3) + (*this)(1, 0) * (*this)(2, 1) * (*this)(3, 3));
inv.SetElement(
2, 1,
(*this)(0, 3) * (*this)(2, 1) * (*this)(3, 0) - (*this)(0, 1) * (*this)(2, 3) * (*this)(3, 0) -
(*this)(0, 3) * (*this)(2, 0) * (*this)(3, 1) + (*this)(0, 0) * (*this)(2, 3) * (*this)(3, 1) +
(*this)(0, 1) * (*this)(2, 0) * (*this)(3, 3) - (*this)(0, 0) * (*this)(2, 1) * (*this)(3, 3));
inv.SetElement(
2, 2,
(*this)(0, 1) * (*this)(1, 3) * (*this)(3, 0) - (*this)(0, 3) * (*this)(1, 1) * (*this)(3, 0) +
(*this)(0, 3) * (*this)(1, 0) * (*this)(3, 1) - (*this)(0, 0) * (*this)(1, 3) * (*this)(3, 1) -
(*this)(0, 1) * (*this)(1, 0) * (*this)(3, 3) + (*this)(0, 0) * (*this)(1, 1) * (*this)(3, 3));
inv.SetElement(
2, 3,
(*this)(0, 3) * (*this)(1, 1) * (*this)(2, 0) - (*this)(0, 1) * (*this)(1, 3) * (*this)(2, 0) -
(*this)(0, 3) * (*this)(1, 0) * (*this)(2, 1) + (*this)(0, 0) * (*this)(1, 3) * (*this)(2, 1) +
(*this)(0, 1) * (*this)(1, 0) * (*this)(2, 3) - (*this)(0, 0) * (*this)(1, 1) * (*this)(2, 3));
inv.SetElement(
3, 0,
(*this)(1, 2) * (*this)(2, 1) * (*this)(3, 0) - (*this)(1, 1) * (*this)(2, 2) * (*this)(3, 0) -
(*this)(1, 2) * (*this)(2, 0) * (*this)(3, 1) + (*this)(1, 0) * (*this)(2, 2) * (*this)(3, 1) +
(*this)(1, 1) * (*this)(2, 0) * (*this)(3, 2) - (*this)(1, 0) * (*this)(2, 1) * (*this)(3, 2));
inv.SetElement(
3, 1,
(*this)(0, 1) * (*this)(2, 2) * (*this)(3, 0) - (*this)(0, 2) * (*this)(2, 1) * (*this)(3, 0) +
(*this)(0, 2) * (*this)(2, 0) * (*this)(3, 1) - (*this)(0, 0) * (*this)(2, 2) * (*this)(3, 1) -
(*this)(0, 1) * (*this)(2, 0) * (*this)(3, 2) + (*this)(0, 0) * (*this)(2, 1) * (*this)(3, 2));
inv.SetElement(
3, 2,
(*this)(0, 2) * (*this)(1, 1) * (*this)(3, 0) - (*this)(0, 1) * (*this)(1, 2) * (*this)(3, 0) -
(*this)(0, 2) * (*this)(1, 0) * (*this)(3, 1) + (*this)(0, 0) * (*this)(1, 2) * (*this)(3, 1) +
(*this)(0, 1) * (*this)(1, 0) * (*this)(3, 2) - (*this)(0, 0) * (*this)(1, 1) * (*this)(3, 2));
inv.SetElement(
3, 3,
(*this)(0, 1) * (*this)(1, 2) * (*this)(2, 0) - (*this)(0, 2) * (*this)(1, 1) * (*this)(2, 0) +
(*this)(0, 2) * (*this)(1, 0) * (*this)(2, 1) - (*this)(0, 0) * (*this)(1, 2) * (*this)(2, 1) -
(*this)(0, 1) * (*this)(1, 0) * (*this)(2, 2) + (*this)(0, 0) * (*this)(1, 1) * (*this)(2, 2));
inv.MatrDivScale(this->Det());
this->CopyFromMatrix(inv);
break;
}
}
}
/// Returns true if vector is identical to other matrix
bool Equals(const ChMatrix<Real>& other) const { return Equals(other, 0.0); }
/// Returns true if vector equals another vector, within a tolerance 'tol'
bool Equals(const ChMatrix<Real>& other, Real tol) const {
if ((other.GetColumns() != this->columns) || (other.GetRows() != this->rows))
return false;
for (int nel = 0; nel < rows * columns; ++nel)
if (fabs(ElementN(nel) - other.ElementN(nel)) > tol)
return false;
return true;
}
/// Multiplies this 3x4 matrix by a quaternion, as v=[G]*q
/// The matrix must be 3x4.
/// \return The result of the multiplication, i.e. a vector.
template <class RealB>
ChVector<Real> Matr34_x_Quat(const ChQuaternion<RealB>& qua) {
assert((rows == 3) && (columns == 4));
return ChVector<Real>(Get34Element(0, 0) * (Real)qua.e0() + Get34Element(0, 1) * (Real)qua.e1() +
Get34Element(0, 2) * (Real)qua.e2() + Get34Element(0, 3) * (Real)qua.e3(),
Get34Element(1, 0) * (Real)qua.e0() + Get34Element(1, 1) * (Real)qua.e1() +
Get34Element(1, 2) * (Real)qua.e2() + Get34Element(1, 3) * (Real)qua.e3(),
Get34Element(2, 0) * (Real)qua.e0() + Get34Element(2, 1) * (Real)qua.e1() +
Get34Element(2, 2) * (Real)qua.e2() + Get34Element(2, 3) * (Real)qua.e3());
}
/// Multiplies this 3x4 matrix (transposed) by a vector, as q=[G]'*v
/// The matrix must be 3x4.
/// \return The result of the multiplication, i.e. a quaternion.
template <class RealB>
ChQuaternion<Real> Matr34T_x_Vect(const ChVector<RealB>& va) {
assert((rows == 3) && (columns == 4));
return ChQuaternion<Real>(
Get34Element(0, 0) * (Real)va.x() + Get34Element(1, 0) * (Real)va.y() + Get34Element(2, 0) * (Real)va.z(),
Get34Element(0, 1) * (Real)va.x() + Get34Element(1, 1) * (Real)va.y() + Get34Element(2, 1) * (Real)va.z(),
Get34Element(0, 2) * (Real)va.x() + Get34Element(1, 2) * (Real)va.y() + Get34Element(2, 2) * (Real)va.z(),
Get34Element(0, 3) * (Real)va.x() + Get34Element(1, 3) * (Real)va.y() + Get34Element(2, 3) * (Real)va.z());
}
/// Multiplies this 4x4 matrix (transposed) by a quaternion,
/// The matrix must be 4x4.
/// \return The result of the multiplication, i.e. a quaternion.
template <class RealB>
ChQuaternion<Real> Matr44_x_Quat(const ChQuaternion<RealB>& qua) {
assert((rows == 4) && (columns == 4));
return ChQuaternion<Real>(Get44Element(0, 0) * (Real)qua.e0() + Get44Element(0, 1) * (Real)qua.e1() +
Get44Element(0, 2) * (Real)qua.e2() + Get44Element(0, 3) * (Real)qua.e3(),
Get44Element(1, 0) * (Real)qua.e0() + Get44Element(1, 1) * (Real)qua.e1() +
Get44Element(1, 2) * (Real)qua.e2() + Get44Element(1, 3) * (Real)qua.e3(),
Get44Element(2, 0) * (Real)qua.e0() + Get44Element(2, 1) * (Real)qua.e1() +
Get44Element(2, 2) * (Real)qua.e2() + Get44Element(2, 3) * (Real)qua.e3(),
Get44Element(3, 0) * (Real)qua.e0() + Get44Element(3, 1) * (Real)qua.e1() +
Get44Element(3, 2) * (Real)qua.e2() + Get44Element(3, 3) * (Real)qua.e3());
}
/// Transposes only the lower-right 3x3 submatrix of a hemisymmetric 4x4 matrix,
/// used when the 4x4 matrix is a "star" matrix [q] coming from a quaternion q:
/// the non commutative quat. product is:
/// q1 x q2 = [q1]*q2 = [q2st]*q1
/// where [q2st] is the "semi-transpose of [q2].
void MatrXq_SemiTranspose() {
SetElement(1, 2, -GetElement(1, 2));
SetElement(1, 3, -GetElement(1, 3));
SetElement(2, 1, -GetElement(2, 1));
SetElement(2, 3, -GetElement(2, 3));
SetElement(3, 1, -GetElement(3, 1));
SetElement(3, 2, -GetElement(3, 2));
}
/// Change the sign of the 2nd, 3rd and 4th columns of a 4x4 matrix,
/// The product between a quaternion q1 and the conjugate of q2 (q2'), is:
/// q1 x q2' = [q1]*q2' = [q1sn]*q2
/// where [q1sn] is the semi-negation of the 4x4 matrix [q1].
void MatrXq_SemiNeg() {
for (int i = 0; i < rows; ++i)
for (int j = 1; j < columns; ++j)
SetElement(i, j, -GetElement(i, j));
}
/// Gets the norm infinite of the matrix, i.e. the max.
/// of its elements in absolute value.
Real NormInf() const {
Real norm = 0;
for (int nel = 0; nel < rows * columns; ++nel)
if ((fabs(ElementN(nel))) > norm)
norm = fabs(ElementN(nel));
return norm;
}
/// Gets the norm two of the matrix, i.e. the square root
/// of the sum of the elements squared.
Real NormTwo() const {
Real norm = 0;
for (int nel = 0; nel < rows * columns; ++nel)
norm += ElementN(nel) * ElementN(nel);
return (sqrt(norm));
}
/// Finds max value among the values of the matrix
Real Max() const {
Real mmax = GetElement(0, 0);
for (int nel = 0; nel < rows * columns; ++nel)
if (ElementN(nel) > mmax)
mmax = ElementN(nel);
return mmax;
}
/// Finds min value among the values of the matrix
Real Min() const {
Real mmin = GetElement(0, 0);
for (int nel = 0; nel < rows * columns; ++nel)
if (ElementN(nel) < mmin)
mmin = ElementN(nel);
return mmin;
}
/// Linear interpolation of two matrices. Parameter mx must be 0...1.
/// [this] =(1-x)[A]+ (x)[B] Matrices must have the same size!!
void LinInterpolate(const ChMatrix<Real>& matra, const ChMatrix<Real>& matrb, Real mx) {
assert(matra.columns == matrb.columns && matra.rows == matrb.rows);
for (int nel = 0; nel < rows * columns; nel++)
ElementN(nel) = matra.ElementN(nel) * (1 - mx) + matrb.ElementN(nel) * (mx);
}
/// Fills a matrix or a vector with a bilinear interpolation,
/// from corner values (as a u-v patch).
void RowColInterp(Real vmin, Real vmax, Real umin, Real umax) {
for (int iu = 0; iu < GetColumns(); iu++)
for (int iv = 0; iv < GetRows(); iv++) {
if (GetRows() > 1)
Element(iv, iu) = vmin + (vmax - vmin) * ((Real)iv / ((Real)(GetRows() - 1)));
if (GetColumns() > 1)
Element(iv, iu) += umin + (umax - umin) * ((Real)iu / ((Real)(GetColumns() - 1)));
}
}
//
// BOOKKEEPING
//
/// Paste a matrix "matra" into "this", inserting at location insrow-inscol.
/// Normal copy for insrow=inscol=0
template <class RealB>
void PasteMatrix(const ChMatrix<RealB>& matra, int insrow, int inscol) {
for (int i = 0; i < matra.GetRows(); ++i)
for (int j = 0; j < matra.GetColumns(); ++j)
Element(i + insrow, j + inscol) = (Real)matra.Element(i, j);
}
/// Paste a matrix "matra" into "this", inserting at location insrow-inscol
/// and performing a sum with the preexisting values.
template <class RealB>
void PasteSumMatrix(const ChMatrix<RealB>& matra, int insrow, int inscol) {
for (int i = 0; i < matra.GetRows(); ++i)
for (int j = 0; j < matra.GetColumns(); ++j)
Element(i + insrow, j + inscol) += (Real)matra.Element(i, j);
}
/// Paste a matrix "matra", transposed, into "this", inserting at location insrow-inscol.
/// Normal copy for insrow=inscol=0
template <class RealB>
void PasteTranspMatrix(const ChMatrix<RealB>& matra, int insrow, int inscol) {
for (int i = 0; i < matra.GetRows(); ++i)
for (int j = 0; j < matra.GetColumns(); ++j)
Element(j + insrow, i + inscol) = (Real)matra.Element(i, j);
}
/// Paste a matrix "matra", transposed, into "this", inserting at location insrow-inscol
/// and performing a sum with the preexisting values.
template <class RealB>
void PasteSumTranspMatrix(const ChMatrix<RealB>& matra, int insrow, int inscol) {
for (int i = 0; i < matra.GetRows(); ++i)
for (int j = 0; j < matra.GetColumns(); ++j)
Element(j + insrow, i + inscol) += (Real)matra.Element(i, j);
}
/// Paste a clipped portion of the matrix "matra" into "this",
/// inserting the clip (of size nrows, ncolumns) at the location insrow-inscol.
template <class RealB>
void PasteClippedMatrix(const ChMatrix<RealB>& matra,
int cliprow,
int clipcol,
int nrows,
int ncolumns,
int insrow,
int inscol) {
for (int i = 0; i < nrows; ++i)
for (int j = 0; j < ncolumns; ++j)
Element(i + insrow, j + inscol) = (Real)matra.Element(i + cliprow, j + clipcol);
}
/// Paste a clipped portion of the matrix "matra" into "this", where "this"
/// is a vector (of ChMatrix type),
/// inserting the clip (of size nrows, ncolumns) at the location insindex.
template <class RealB>
void PasteClippedMatrixToVector(const ChMatrix<RealB>& matra,
int cliprow,
int clipcol,
int nrows,
int ncolumns,
int insindex) {
for (int i = 0; i < nrows; ++i)
for (int j = 0; j < ncolumns; ++j)
ElementN(insindex + i * ncolumns + j) = (Real)matra.Element(cliprow + i, clipcol + j);
}
/// Paste a clipped portion of a vector into "this", where "this"
/// is a matrix (of ChMatrix type),
/// inserting the clip (of size nrows, ncolumns) at the location insindex.
template <class RealB>
void PasteClippedVectorToMatrix(const ChMatrix<RealB>& matra,
int cliprow,
int clipcol,
int nrows,
int ncolumns,
int insindex) {
for (int i = 0; i < nrows; ++i)
for (int j = 0; j < ncolumns; ++j)
Element(i + cliprow, j + clipcol) = (Real)matra.ElementN(insindex + i * ncolumns + j);
}
/// Paste a clipped portion of the matrix "matra" into "this", performing a sum with preexisting values,
/// inserting the clip (of size nrows, ncolumns) at the location insrow-inscol.
template <class RealB>
void PasteSumClippedMatrix(const ChMatrix<RealB>& matra,
int cliprow,
int clipcol,
int nrows,
int ncolumns,
int insrow,
int inscol) {
for (int i = 0; i < nrows; ++i)
for (int j = 0; j < ncolumns; ++j)
#pragma omp atomic
Element(i + insrow, j + inscol) += (Real)matra.Element(i + cliprow, j + clipcol);
}
/// Paste a vector "va" into the matrix.
template <class RealB>
void PasteVector(const ChVector<RealB>& va, int insrow, int inscol) {
SetElement(insrow + 0, inscol, (Real)va.x());
SetElement(insrow + 1, inscol, (Real)va.y());
SetElement(insrow + 2, inscol, (Real)va.z());
}
/// Paste a vector "va" into the matrix, summing it with preexisting values.
template <class RealB>
void PasteSumVector(const ChVector<RealB>& va, int insrow, int inscol) {
Element(insrow + 0, inscol) += (Real)va.x();
Element(insrow + 1, inscol) += (Real)va.y();
Element(insrow + 2, inscol) += (Real)va.z();
}
/// Paste a vector "va" into the matrix, subtracting it from preexisting values.
template <class RealB>
void PasteSubVector(const ChVector<RealB>& va, int insrow, int inscol) {
Element(insrow + 0, inscol) -= (Real)va.x();
Element(insrow + 1, inscol) -= (Real)va.y();
Element(insrow + 2, inscol) -= (Real)va.z();
}
/// Paste a quaternion into the matrix.
template <class RealB>
void PasteQuaternion(const ChQuaternion<RealB>& qa, int insrow, int inscol) {
SetElement(insrow + 0, inscol, (Real)qa.e0());
SetElement(insrow + 1, inscol, (Real)qa.e1());
SetElement(insrow + 2, inscol, (Real)qa.e2());
SetElement(insrow + 3, inscol, (Real)qa.e3());
}
/// Paste a quaternion into the matrix, summing it with preexisting values.
template <class RealB>
void PasteSumQuaternion(const ChQuaternion<RealB>& qa, int insrow, int inscol) {
Element(insrow + 0, inscol) += (Real)qa.e0();
Element(insrow + 1, inscol) += (Real)qa.e1();
Element(insrow + 2, inscol) += (Real)qa.e2();
Element(insrow + 3, inscol) += (Real)qa.e3();
}
/// Paste a coordsys into the matrix.
template <class RealB>
void PasteCoordsys(const ChCoordsys<RealB>& cs, int insrow, int inscol) {
PasteVector(cs.pos, insrow, inscol);
PasteQuaternion(cs.rot, insrow + 3, inscol);
}
/// Returns the vector clipped from insrow, inscol.
ChVector<Real> ClipVector(int insrow, int inscol) const {
return ChVector<Real>(Element(insrow, inscol), Element(insrow + 1, inscol), Element(insrow + 2, inscol));
}
/// Returns the quaternion clipped from insrow, inscol.
ChQuaternion<Real> ClipQuaternion(int insrow, int inscol) const {
return ChQuaternion<Real>(Element(insrow, inscol), Element(insrow + 1, inscol), Element(insrow + 2, inscol),
Element(insrow + 3, inscol));
}
/// Returns the coordsys clipped from insrow, inscol.
ChCoordsys<Real> ClipCoordsys(int insrow, int inscol) const {
return ChCoordsys<Real>(ClipVector(insrow, inscol), ClipQuaternion(insrow + 3, inscol));
}
//
// MULTIBODY SPECIFIC MATH FUCTION
//
/// Fills a 4x4 matrix as the "star" matrix, representing quaternion cross product.
/// That is, given two quaternions a and b, aXb= [Astar]*b
template <class RealB>
void Set_Xq_matrix(const ChQuaternion<RealB>& q) {
Set44Element(0, 0, (Real)q.e0());
Set44Element(0, 1, -(Real)q.e1());
Set44Element(0, 2, -(Real)q.e2());
Set44Element(0, 3, -(Real)q.e3());
Set44Element(1, 0, (Real)q.e1());
Set44Element(1, 1, (Real)q.e0());
Set44Element(1, 2, -(Real)q.e3());
Set44Element(1, 3, (Real)q.e2());
Set44Element(2, 0, (Real)q.e2());
Set44Element(2, 1, (Real)q.e3());
Set44Element(2, 2, (Real)q.e0());
Set44Element(2, 3, -(Real)q.e1());
Set44Element(3, 0, (Real)q.e3());
Set44Element(3, 1, -(Real)q.e2());
Set44Element(3, 2, (Real)q.e1());
Set44Element(3, 3, (Real)q.e0());
}
};
} // end namespace chrono
#endif
|
arrays.c | /**
* module with tools for manipulating arrays
* Julien Lesgourgues, 18.04.2010
*/
#include "arrays.h"
/**
* Called by thermodynamics_init(); perturb_sources().
*/
int array_derive(
double * array,
int n_columns,
int n_lines,
int index_x, /** from 0 to (n_columns-1) */
int index_y,
int index_dydx,
ErrorMsg errmsg) {
int i;
double dx1,dx2,dy1,dy2,weight1,weight2;
class_test((index_dydx == index_x) || (index_dydx == index_y),
errmsg,
"output column %d must differ from input columns %d and %d",index_dydx,index_x,index_y);
dx2=array[1*n_columns+index_x]-array[0*n_columns+index_x];
dy2=array[1*n_columns+index_y]-array[0*n_columns+index_y];
for (i=1; i<n_lines-1; i++) {
dx1 = dx2;
dy1 = dy2;
dx2 = array[(i+1)*n_columns+index_x]-array[i*n_columns+index_x];
dy2 = array[(i+1)*n_columns+index_y]-array[i*n_columns+index_y];
class_test((dx1 == 0) || (dx2 == 0),
errmsg,
"stop to avoid division by zero");
weight1 = dx2*dx2;
weight2 = dx1*dx1;
array[i*n_columns+index_dydx] = (weight1*dy1+weight2*dy2) / (weight1*dx1+weight2*dx2);
if (i == 1)
array[(i-1)*n_columns+index_dydx] = 2.*dy1/dx1 - array[i*n_columns+index_dydx];
if (i == n_lines-2)
array[(i+1)*n_columns+index_dydx] = 2.*dy2/dx2 - array[i*n_columns+index_dydx];
}
return _SUCCESS_;
}
int array_derive_spline(
double * x_array,
int n_lines,
double * array,
double * array_splined,
int n_columns,
int index_y,
int index_dydx,
ErrorMsg errmsg) {
int i;
double h;
class_test(index_dydx == index_y,
errmsg,
"Output column %d must differ from input columns %d",
index_dydx,
index_y);
class_test(n_lines<2,
errmsg,
"no possible derivation with less than two lines");
for (i=0; i<n_lines-1; i++) {
h = x_array[i+1] - x_array[i];
if (h == 0) {
sprintf(errmsg,"%s(L:%d) h=0, stop to avoid division by zero",__func__,__LINE__);
return _FAILURE_;
}
array[i*n_columns+index_dydx] =
(array[(i+1)*n_columns+index_y] - array[i*n_columns+index_y])/h
- h / 6. * (array_splined[(i+1)*n_columns+index_y] + 2. * array_splined[i*n_columns+index_y]);
}
h = x_array[n_lines-1] - x_array[n_lines-2];
array[(n_lines-1)*n_columns+index_dydx] =
(array[(n_lines-1)*n_columns+index_y] - array[(n_lines-2)*n_columns+index_y])/h
+ h / 6. * (2. * array_splined[(n_lines-1)*n_columns+index_y] + array_splined[(n_lines-2)*n_columns+index_y]);
return _SUCCESS_;
}
int array_derive_spline_table_line_to_line(
double * x_array,
int n_lines,
double * array,
int n_columns,
int index_y,
int index_ddy,
int index_dy,
ErrorMsg errmsg) {
int i;
double h;
class_test(index_ddy == index_y,
errmsg,
"Output column %d must differ from input columns %d",
index_ddy,
index_y);
class_test(index_ddy == index_dy,
errmsg,
"Output column %d must differ from input columns %d",
index_ddy,
index_dy);
class_test(n_lines<2,
errmsg,
"no possible derivation with less than two lines");
for (i=0; i<n_lines-1; i++) {
h = x_array[i+1] - x_array[i];
if (h == 0) {
sprintf(errmsg,"%s(L:%d) h=0, stop to avoid division by zero",__func__,__LINE__);
return _FAILURE_;
}
array[i*n_columns+index_dy] =
(array[(i+1)*n_columns+index_y] - array[i*n_columns+index_y])/h
- h / 6. * (array[(i+1)*n_columns+index_ddy] + 2. * array[i*n_columns+index_ddy]);
}
h = x_array[n_lines-1] - x_array[n_lines-2];
array[(n_lines-1)*n_columns+index_dy] =
(array[(n_lines-1)*n_columns+index_y] - array[(n_lines-2)*n_columns+index_y])/h
+ h / 6. * (2. * array[(n_lines-1)*n_columns+index_ddy] + array[(n_lines-2)*n_columns+index_ddy]);
return _SUCCESS_;
}
int array_derive1_order2_table_line_to_line(
double * x_array,
int n_lines,
double * array,
int n_columns,
int index_y,
int index_dy,
ErrorMsg errmsg) {
int i=1;
double dxp,dxm,dyp,dym;
if (n_lines < 2) {
sprintf(errmsg,"%s(L:%d) routine called with n_lines=%d, should be at least 2",__func__,__LINE__,n_lines);
return _FAILURE_;
}
dxp = x_array[2] - x_array[1];
dxm = x_array[0] - x_array[1];
dyp = *(array+2*n_columns+index_y) - *(array+1*n_columns+index_y);
dym = *(array+0*n_columns+index_y) - *(array+1*n_columns+index_y);
if ((dxp*dxm*(dxm-dxp)) == 0.) {
sprintf(errmsg,"%s(L:%d) stop to avoid division by zero",__func__,__LINE__);
return _FAILURE_;
}
*(array+1*n_columns+index_dy) = (dyp*dxm*dxm-dym*dxp*dxp)/(dxp*dxm*(dxm-dxp));
*(array+0*n_columns+index_dy) = *(array+1*n_columns+index_dy)
- (x_array[1] - x_array[0]) * 2.*(dyp*dxm-dym*dxp)/(dxp*dxm*(dxp-dxm));
for (i=2; i<n_lines-1; i++) {
dxp = x_array[i+1] - x_array[i];
dxm = x_array[i-1] - x_array[i];
dyp = *(array+(i+1)*n_columns+index_y) - *(array+i*n_columns+index_y);
dym = *(array+(i-1)*n_columns+index_y) - *(array+i*n_columns+index_y);
if ((dxp*dxm*(dxm-dxp)) == 0.) {
sprintf(errmsg,"%s(L:%d) stop to avoid division by zero",__func__,__LINE__);
return _FAILURE_;
}
*(array+i*n_columns+index_dy) = (dyp*dxm*dxm-dym*dxp*dxp)/(dxp*dxm*(dxm-dxp));
}
*(array+(n_lines-1)*n_columns+index_dy) = *(array+(n_lines-2)*n_columns+index_dy)
+ (x_array[n_lines-1] - x_array[n_lines-2]) * 2.*(dyp*dxm-dym*dxp)/(dxp*dxm*(dxp-dxm));
return _SUCCESS_;
}
int array_derive2_order2_table_line_to_line(
double * x_array,
int n_lines,
double * array,
int n_columns,
int index_y,
int index_dy,
int index_ddy,
ErrorMsg errmsg) {
int i;
double dxp,dxm,dyp,dym;
for (i=1; i<n_lines-1; i++) {
dxp = x_array[i+1] - x_array[i];
dxm = x_array[i-1] - x_array[i];
dyp = *(array+(i+1)*n_columns+index_y) - *(array+i*n_columns+index_y);
dym = *(array+(i-1)*n_columns+index_y) - *(array+i*n_columns+index_y);
if ((dxp*dxm*(dxm-dxp)) == 0.) {
sprintf(errmsg,"%s(L:%d) stop to avoid division by zero",__func__,__LINE__);
return _FAILURE_;
}
*(array+i*n_columns+index_dy) = (dyp*dxm*dxm-dym*dxp*dxp)/(dxp*dxm*(dxm-dxp));
*(array+i*n_columns+index_ddy) = 2.*(dyp*dxm-dym*dxp)/(dxp*dxm*(dxp-dxm));
}
*(array+0*n_columns+index_dy) = *(array+1*n_columns+index_dy)
- (x_array[1] - x_array[0]) * *(array+1*n_columns+index_ddy);
*(array+0*n_columns+index_ddy) = *(array+1*n_columns+index_ddy);
*(array+(n_lines-1)*n_columns+index_dy) = *(array+(n_lines-2)*n_columns+index_dy)
+ (x_array[n_lines-1] - x_array[n_lines-2]) * *(array+(n_lines-2)*n_columns+index_ddy);
*(array+(n_lines-1)*n_columns+index_ddy) = *(array+(n_lines-2)*n_columns+index_ddy);
return _SUCCESS_;
}
int array_integrate_spline_table_line_to_line(
double * x_array,
int n_lines,
double * array,
int n_columns,
int index_y,
int index_ddy,
int index_inty,
ErrorMsg errmsg) {
int i;
double h;
*(array+0*n_columns+index_inty) = 0.;
for (i=0; i < n_lines-1; i++) {
h = (x_array[i+1]-x_array[i]);
*(array+(i+1)*n_columns+index_inty) = *(array+i*n_columns+index_inty) +
(array[i*n_columns+index_y]+array[(i+1)*n_columns+index_y])*h/2.+
(array[i*n_columns+index_ddy]+array[(i+1)*n_columns+index_ddy])*h*h*h/24.;
}
return _SUCCESS_;
}
/**
* Not called.
*/
int array_derive_two(
double * array,
int n_columns,
int n_lines,
int index_x, /** from 0 to (n_columns-1) */
int index_y,
int index_dydx,
int index_ddydxdx,
ErrorMsg errmsg) {
int i;
double dx1,dx2,dy1,dy2,weight1,weight2;
if ((index_dydx == index_x) || (index_dydx == index_y)) {
sprintf(errmsg,"%s(L:%d) : Output column %d must differ from input columns %d and %d",__func__,__LINE__,index_dydx,index_x,index_y);
return _FAILURE_;
}
dx2=*(array+1*n_columns+index_x)-*(array+0*n_columns+index_x);
dy2=*(array+1*n_columns+index_y)-*(array+0*n_columns+index_y);
for (i=1; i<n_lines-1; i++) {
dx1 = dx2;
dy1 = dy2;
dx2 = *(array+(i+1)*n_columns+index_x)-*(array+i*n_columns+index_x);
dy2 = *(array+(i+1)*n_columns+index_y)-*(array+i*n_columns+index_y);
weight1 = dx2*dx2;
weight2 = dx1*dx1;
if ((dx1 == 0.) && (dx2 == 0.)) {
sprintf(errmsg,"%s(L:%d) stop to avoid division by zero",__func__,__LINE__);
return _FAILURE_;
}
*(array+i*n_columns+index_dydx) = (weight1*dy1+weight2*dy2) / (weight1*dx1+weight2*dx2);
*(array+i*n_columns+index_ddydxdx) = (dx2*dy1-dx1*dy2) / (weight1*dx1+weight2*dx2);
if (i == 1) {
*(array+(i-1)*n_columns+index_dydx) = 2.*dy1/dx1 - *(array+i*n_columns+index_dydx);
*(array+(i-1)*n_columns+index_ddydxdx) = *(array+i*n_columns+index_ddydxdx);
}
if (i == n_lines-2) {
*(array+(i+1)*n_columns+index_dydx) = 2.*dy2/dx2 - *(array+i*n_columns+index_dydx);
*(array+(i+1)*n_columns+index_dydx) = *(array+i*n_columns+index_ddydxdx);
}
}
return _SUCCESS_;
}
int array_spline(
double * array,
int n_columns,
int n_lines,
int index_x, /** from 0 to (n_columns-1) */
int index_y,
int index_ddydx2,
short spline_mode,
ErrorMsg errmsg) {
int i,k;
double p,qn,sig,un;
double * u;
double dy_first;
double dy_last;
if (n_lines < 3) {
sprintf(errmsg,"%s(L:%d) n_lines=%d, while routine needs n_lines >= 3",__func__,__LINE__,n_lines);
return _FAILURE_;
}
u = malloc((n_lines-1) * sizeof(double));
if (u == NULL) {
sprintf(errmsg,"%s(L:%d) Cannot allocate u",__func__,__LINE__);
return _FAILURE_;
}
if (spline_mode == _SPLINE_NATURAL_) {
*(array+0*n_columns+index_ddydx2) = u[0] = 0.0;
}
else {
if (spline_mode == _SPLINE_EST_DERIV_) {
dy_first =
((*(array+2*n_columns+index_x)-*(array+0*n_columns+index_x))*
(*(array+2*n_columns+index_x)-*(array+0*n_columns+index_x))*
(*(array+1*n_columns+index_y)-*(array+0*n_columns+index_y))-
(*(array+1*n_columns+index_x)-*(array+0*n_columns+index_x))*
(*(array+1*n_columns+index_x)-*(array+0*n_columns+index_x))*
(*(array+2*n_columns+index_y)-*(array+0*n_columns+index_y)))/
((*(array+2*n_columns+index_x)-*(array+0*n_columns+index_x))*
(*(array+1*n_columns+index_x)-*(array+0*n_columns+index_x))*
(*(array+2*n_columns+index_x)-*(array+1*n_columns+index_x)));
*(array+0*n_columns+index_ddydx2) = -0.5;
u[0] =
(3./(*(array+1*n_columns+index_x) - *(array+0*n_columns+index_x)))*
((*(array+1*n_columns+index_y) - *(array+0*n_columns+index_y))/
(*(array+1*n_columns+index_x) - *(array+0*n_columns+index_x))
-dy_first);
}
else {
sprintf(errmsg,"%s(L:%d) Spline mode not identified: %d",__func__,__LINE__,spline_mode);
return _FAILURE_;
}
}
for (i=1; i < n_lines-1; i++) {
sig = (*(array+i*n_columns+index_x) - *(array+(i-1)*n_columns+index_x))
/ (*(array+(i+1)*n_columns+index_x) - *(array+(i-1)*n_columns+index_x));
p = sig * *(array+(i-1)*n_columns+index_ddydx2) + 2.0;
*(array+i*n_columns+index_ddydx2) = (sig-1.0)/p;
u[i] = (*(array+(i+1)*n_columns+index_y) - *(array+i*n_columns+index_y))
/ (*(array+(i+1)*n_columns+index_x) - *(array+i*n_columns+index_x))
- (*(array+i*n_columns+index_y) - *(array+(i-1)*n_columns+index_y))
/ (*(array+i*n_columns+index_x) - *(array+(i-1)*n_columns+index_x));
u[i]= (6.0 * u[i] /
(*(array+(i+1)*n_columns+index_x) - *(array+(i-1)*n_columns+index_x))
- sig * u[i-1]) / p;
}
if (spline_mode == _SPLINE_NATURAL_) {
qn=0.;
un=0.;
}
else {
if (spline_mode == _SPLINE_EST_DERIV_) {
dy_last =
((*(array+(n_lines-3)*n_columns+index_x)-*(array+(n_lines-1)*n_columns+index_x))*
(*(array+(n_lines-3)*n_columns+index_x)-*(array+(n_lines-1)*n_columns+index_x))*
(*(array+(n_lines-2)*n_columns+index_y)-*(array+(n_lines-1)*n_columns+index_y))-
(*(array+(n_lines-2)*n_columns+index_x)-*(array+(n_lines-1)*n_columns+index_x))*
(*(array+(n_lines-2)*n_columns+index_x)-*(array+(n_lines-1)*n_columns+index_x))*
(*(array+(n_lines-3)*n_columns+index_y)-*(array+(n_lines-1)*n_columns+index_y)))/
((*(array+(n_lines-3)*n_columns+index_x)-*(array+(n_lines-1)*n_columns+index_x))*
(*(array+(n_lines-2)*n_columns+index_x)-*(array+(n_lines-1)*n_columns+index_x))*
(*(array+(n_lines-3)*n_columns+index_x)-*(array+(n_lines-2)*n_columns+index_x)));
qn=0.5;
un =
(3./(*(array+(n_lines-1)*n_columns+index_x) - *(array+(n_lines-2)*n_columns+index_x)))*
(dy_last-(*(array+(n_lines-1)*n_columns+index_y) - *(array+(n_lines-2)*n_columns+index_y))/
(*(array+(n_lines-1)*n_columns+index_x) - *(array+(n_lines-2)*n_columns+index_x)));
}
else {
sprintf(errmsg,"%s(L:%d) Spline mode not identified: %d",__func__,__LINE__,spline_mode);
return _FAILURE_;
}
}
*(array+(n_lines-1)*n_columns+index_ddydx2) =
(un-qn*u[n_lines-2])/(qn* *(array+(n_lines-2)*n_columns+index_ddydx2)+1.0);
for (k=n_lines-2; k>=0; k--)
*(array+k*n_columns+index_ddydx2) = *(array+k*n_columns+index_ddydx2) *
*(array+(k+1)*n_columns+index_ddydx2) + u[k];
free(u);
return _SUCCESS_;
}
int array_spline_table_line_to_line(
double * x, /* vector of size x_size */
int n_lines,
double * array,
int n_columns,
int index_y,
int index_ddydx2,
short spline_mode,
ErrorMsg errmsg) {
int i,k;
double p,qn,sig,un;
double * u;
double dy_first;
double dy_last;
u = malloc((n_lines-1) * sizeof(double));
if (u == NULL) {
sprintf(errmsg,"%s(L:%d) Cannot allocate u",__func__,__LINE__);
return _FAILURE_;
}
if (spline_mode == _SPLINE_NATURAL_) {
*(array+0*n_columns+index_ddydx2) = u[0] = 0.0;
}
else {
if (spline_mode == _SPLINE_EST_DERIV_) {
dy_first =
((x[2]-x[0])*(x[2]-x[0])*
(*(array+1*n_columns+index_y)-*(array+0*n_columns+index_y))-
(x[1]-x[0])*(x[1]-x[0])*
(*(array+2*n_columns+index_y)-*(array+0*n_columns+index_y)))/
((x[2]-x[0])*(x[1]-x[0])*(x[2]-x[1]));
*(array+0*n_columns+index_ddydx2) = -0.5;
u[0] =
(3./(x[1] - x[0]))*
((*(array+1*n_columns+index_y) - *(array+0*n_columns+index_y))/
(x[1] - x[0])-dy_first);
}
else {
sprintf(errmsg,"%s(L:%d) Spline mode not identified: %d",__func__,__LINE__,spline_mode);
return _FAILURE_;
}
}
for (i=1; i < n_lines-1; i++) {
sig = (x[i] - x[i-1]) / (x[i+1] - x[i-1]);
p = sig * *(array+(i-1)*n_columns+index_ddydx2) + 2.0;
*(array+i*n_columns+index_ddydx2) = (sig-1.0)/p;
u[i] = (*(array+(i+1)*n_columns+index_y) - *(array+i*n_columns+index_y))
/ (x[i+1] - x[i])
- (*(array+i*n_columns+index_y) - *(array+(i-1)*n_columns+index_y))
/ (x[i] - x[i-1]);
u[i]= (6.0 * u[i] /
(x[i+1] - x[i-1])
- sig * u[i-1]) / p;
}
if (spline_mode == _SPLINE_NATURAL_) {
qn=0.;
un=0.;
}
else {
if (spline_mode == _SPLINE_EST_DERIV_) {
dy_last =
((x[n_lines-3]-x[n_lines-1])*(x[n_lines-3]-x[n_lines-1])*
(*(array+(n_lines-2)*n_columns+index_y)-*(array+(n_lines-1)*n_columns+index_y))-
(x[n_lines-2]-x[n_lines-1])*(x[n_lines-2]-x[n_lines-1])*
(*(array+(n_lines-3)*n_columns+index_y)-*(array+(n_lines-1)*n_columns+index_y)))/
((x[n_lines-3]-x[n_lines-1])*(x[n_lines-2]-x[n_lines-1])*(x[n_lines-3]-x[n_lines-2]));
qn=0.5;
un =
(3./(x[n_lines-1] - x[n_lines-2]))*
(dy_last-(*(array+(n_lines-1)*n_columns+index_y) - *(array+(n_lines-2)*n_columns+index_y))/
(x[n_lines-1] - x[n_lines-2]));
}
else {
sprintf(errmsg,"%s(L:%d) Spline mode not identified: %d",__func__,__LINE__,spline_mode);
return _FAILURE_;
}
}
*(array+(n_lines-1)*n_columns+index_ddydx2) =
(un-qn*u[n_lines-2])/(qn* *(array+(n_lines-2)*n_columns+index_ddydx2)+1.0);
for (k=n_lines-2; k>=0; k--)
*(array+k*n_columns+index_ddydx2) = *(array+k*n_columns+index_ddydx2) *
*(array+(k+1)*n_columns+index_ddydx2) + u[k];
free(u);
return _SUCCESS_;
}
int array_spline_table_lines(
double * x, /* vector of size x_size */
int x_size,
double * y_array, /* array of size x_size*y_size with elements
y_array[index_x*y_size+index_y] */
int y_size,
double * ddy_array, /* array of size x_size*y_size */
short spline_mode,
ErrorMsg errmsg
) {
double * p;
double * qn;
double * un;
double * u;
double sig;
int index_x;
int index_y;
double dy_first;
double dy_last;
u = malloc((x_size-1) * y_size * sizeof(double));
p = malloc(y_size * sizeof(double));
qn = malloc(y_size * sizeof(double));
un = malloc(y_size * sizeof(double));
if (u == NULL) {
sprintf(errmsg,"%s(L:%d) Cannot allocate u",__func__,__LINE__);
return _FAILURE_;
}
if (p == NULL) {
sprintf(errmsg,"%s(L:%d) Cannot allocate p",__func__,__LINE__);
return _FAILURE_;
}
if (qn == NULL) {
sprintf(errmsg,"%s(L:%d) Cannot allocate qn",__func__,__LINE__);
return _FAILURE_;
}
if (un == NULL) {
sprintf(errmsg,"%s(L:%d) Cannot allocate un",__func__,__LINE__);
return _FAILURE_;
}
index_x=0;
if (spline_mode == _SPLINE_NATURAL_) {
for (index_y=0; index_y < y_size; index_y++) {
ddy_array[index_x*y_size+index_y] = u[index_x*y_size+index_y] = 0.0;
}
}
else {
if (spline_mode == _SPLINE_EST_DERIV_) {
for (index_y=0; index_y < y_size; index_y++) {
dy_first =
((x[2]-x[0])*(x[2]-x[0])*
(y_array[1*y_size+index_y]-y_array[0*y_size+index_y])-
(x[1]-x[0])*(x[1]-x[0])*
(y_array[2*y_size+index_y]-y_array[0*y_size+index_y]))/
((x[2]-x[0])*(x[1]-x[0])*(x[2]-x[1]));
ddy_array[index_x*y_size+index_y] = -0.5;
u[index_x*y_size+index_y] =
(3./(x[1] - x[0]))*
((y_array[1*y_size+index_y]-y_array[0*y_size+index_y])/
(x[1] - x[0])-dy_first);
}
}
else {
sprintf(errmsg,"%s(L:%d) Spline mode not identified: %d",__func__,__LINE__,spline_mode);
return _FAILURE_;
}
}
for (index_x=1; index_x < x_size-1; index_x++) {
sig = (x[index_x] - x[index_x-1])/(x[index_x+1] - x[index_x-1]);
for (index_y=0; index_y < y_size; index_y++) {
p[index_y] = sig * ddy_array[(index_x-1)*y_size+index_y] + 2.0;
ddy_array[index_x*y_size+index_y] = (sig-1.0)/p[index_y];
u[index_x*y_size+index_y] =
(y_array[(index_x+1)*y_size+index_y] - y_array[index_x*y_size+index_y])
/ (x[index_x+1] - x[index_x])
- (y_array[index_x*y_size+index_y] - y_array[(index_x-1)*y_size+index_y])
/ (x[index_x] - x[index_x-1]);
u[index_x*y_size+index_y] = (6.0 * u[index_x*y_size+index_y] /
(x[index_x+1] - x[index_x-1])
- sig * u[(index_x-1)*y_size+index_y]) / p[index_y];
}
}
if (spline_mode == _SPLINE_NATURAL_) {
for (index_y=0; index_y < y_size; index_y++) {
qn[index_y]=un[index_y]=0.0;
}
}
else {
if (spline_mode == _SPLINE_EST_DERIV_) {
for (index_y=0; index_y < y_size; index_y++) {
dy_last =
((x[x_size-3]-x[x_size-1])*(x[x_size-3]-x[x_size-1])*
(y_array[(x_size-2)*y_size+index_y]-y_array[(x_size-1)*y_size+index_y])-
(x[x_size-2]-x[x_size-1])*(x[x_size-2]-x[x_size-1])*
(y_array[(x_size-3)*y_size+index_y]-y_array[(x_size-1)*y_size+index_y]))/
((x[x_size-3]-x[x_size-1])*(x[x_size-2]-x[x_size-1])*(x[x_size-3]-x[x_size-2]));
qn[index_y]=0.5;
un[index_y]=
(3./(x[x_size-1] - x[x_size-2]))*
(dy_last-(y_array[(x_size-1)*y_size+index_y] - y_array[(x_size-2)*y_size+index_y])/
(x[x_size-1] - x[x_size-2]));
}
}
else {
sprintf(errmsg,"%s(L:%d) Spline mode not identified: %d",__func__,__LINE__,spline_mode);
return _FAILURE_;
}
}
index_x=x_size-1;
for (index_y=0; index_y < y_size; index_y++) {
ddy_array[index_x*y_size+index_y] =
(un[index_y] - qn[index_y] * u[(index_x-1)*y_size+index_y]) /
(qn[index_y] * ddy_array[(index_x-1)*y_size+index_y] + 1.0);
}
for (index_x=x_size-2; index_x >= 0; index_x--) {
for (index_y=0; index_y < y_size; index_y++) {
ddy_array[index_x*y_size+index_y] = ddy_array[index_x*y_size+index_y] *
ddy_array[(index_x+1)*y_size+index_y] + u[index_x*y_size+index_y];
}
}
free(qn);
free(un);
free(p);
free(u);
return _SUCCESS_;
}
int array_derivate_spline(
double * __restrict__ x_array,
int n_lines,
double * __restrict__ array,
double * __restrict__ array_splined,
int n_columns,
double x,
int * __restrict__ last_index,
double * __restrict__ result,
int result_size, /** from 1 to n_columns */
ErrorMsg errmsg) {
int inf,sup,mid,i;
double h,a,b;
inf=0;
sup=n_lines-1;
if (x_array[inf] < x_array[sup]){
if (x < x_array[inf]) {
sprintf(errmsg,"%s(L:%d) : x=%e < x_min=%e",__func__,__LINE__,x,x_array[inf]);
return _FAILURE_;
}
if (x > x_array[sup]) {
sprintf(errmsg,"%s(L:%d) : x=%e > x_max=%e",__func__,__LINE__,x,x_array[sup]);
return _FAILURE_;
}
while (sup-inf > 1) {
mid=(int)(0.5*(inf+sup));
if (x < x_array[mid]) {sup=mid;}
else {inf=mid;}
}
}
else {
if (x < x_array[sup]) {
sprintf(errmsg,"%s(L:%d) : x=%e < x_min=%e",__func__,__LINE__,x,x_array[sup]);
return _FAILURE_;
}
if (x > x_array[inf]) {
sprintf(errmsg,"%s(L:%d) : x=%e > x_max=%e",__func__,__LINE__,x,x_array[inf]);
return _FAILURE_;
}
while (sup-inf > 1) {
mid=(int)(0.5*(inf+sup));
if (x > x_array[mid]) {sup=mid;}
else {inf=mid;}
}
}
*last_index = inf;
h = x_array[sup] - x_array[inf];
b = (x-x_array[inf])/h;
a = 1-b;
for (i=0; i<result_size; i++)
*(result+i) =
(*(array+sup*n_columns+i) - *(array+inf*n_columns+i))/h +
(-(3.*a*a-1.)* *(array_splined+inf*n_columns+i) +
(3.*b*b-1.)* *(array_splined+sup*n_columns+i))*h/6.;
return _SUCCESS_;
}
int array_logspline_table_lines(
double * x, /* vector of size x_size */
int x_size,
double * y_array, /* array of size x_size*y_size with elements
y_array[index_x*y_size+index_y] */
int y_size,
double * ddlny_array, /* array of size x_size*y_size */
short spline_mode,
ErrorMsg errmsg
) {
double * p;
double * qn;
double * un;
double * u;
double sig;
int index_x;
int index_y;
double dy_first;
double dy_last;
u = malloc((x_size-1) * y_size * sizeof(double));
p = malloc(y_size * sizeof(double));
qn = malloc(y_size * sizeof(double));
un = malloc(y_size * sizeof(double));
if (u == NULL) {
sprintf(errmsg,"%s(L:%d) Cannot allocate u",__func__,__LINE__);
return _FAILURE_;
}
if (p == NULL) {
sprintf(errmsg,"%s(L:%d) Cannot allocate p",__func__,__LINE__);
return _FAILURE_;
}
if (qn == NULL) {
sprintf(errmsg,"%s(L:%d) Cannot allocate qn",__func__,__LINE__);
return _FAILURE_;
}
if (un == NULL) {
sprintf(errmsg,"%s(L:%d) Cannot allocate un",__func__,__LINE__);
return _FAILURE_;
}
index_x=0;
if (spline_mode == _SPLINE_NATURAL_) {
for (index_y=0; index_y < y_size; index_y++) {
ddlny_array[index_x*y_size+index_y] = u[index_x*y_size+index_y] = 0.0;
}
}
else {
if (spline_mode == _SPLINE_EST_DERIV_) {
for (index_y=0; index_y < y_size; index_y++) {
dy_first =
((log(x[2])-log(x[0]))*(log(x[2])-log(x[0]))*
(log(y_array[1*y_size+index_y])-log(y_array[0*y_size+index_y]))-
(log(x[1])-log(x[0]))*(log(x[1])-log(x[0]))*
(log(y_array[2*y_size+index_y])-log(y_array[0*y_size+index_y])))/
((log(x[2])-log(x[0]))*(log(x[1])-log(x[0]))*(log(x[2])-log(x[1])));
ddlny_array[index_x*y_size+index_y] = -0.5;
u[index_x*y_size+index_y] =
(3./(log(x[1]) - log(x[0])))*
((log(y_array[1*y_size+index_y])-log(y_array[0*y_size+index_y]))/
(log(x[1]) - log(x[0]))-dy_first);
}
}
else {
sprintf(errmsg,"%s(L:%d) Spline mode not identified: %d",__func__,__LINE__,spline_mode);
return _FAILURE_;
}
}
for (index_x=1; index_x < x_size-1; index_x++) {
sig = (log(x[index_x]) - log(x[index_x-1]))/(log(x[index_x+1]) - log(x[index_x-1]));
for (index_y=0; index_y < y_size; index_y++) {
p[index_y] = sig * ddlny_array[(index_x-1)*y_size+index_y] + 2.0;
ddlny_array[index_x*y_size+index_y] = (sig-1.0)/p[index_y];
u[index_x*y_size+index_y] =
(log(y_array[(index_x+1)*y_size+index_y]) - log(y_array[index_x*y_size+index_y]))
/ (log(x[index_x+1]) - log(x[index_x]))
- (log(y_array[index_x*y_size+index_y]) - log(y_array[(index_x-1)*y_size+index_y]))
/ (log(x[index_x]) - log(x[index_x-1]));
u[index_x*y_size+index_y] = (6.0 * u[index_x*y_size+index_y] /
(log(x[index_x+1]) - log(x[index_x-1]))
- sig * u[(index_x-1)*y_size+index_y]) / p[index_y];
}
}
if (spline_mode == _SPLINE_NATURAL_) {
for (index_y=0; index_y < y_size; index_y++) {
qn[index_y]=un[index_y]=0.0;
}
}
else {
if (spline_mode == _SPLINE_EST_DERIV_) {
for (index_y=0; index_y < y_size; index_y++) {
dy_last =
((log(x[x_size-3])-log(x[x_size-1]))*(log(x[x_size-3])-log(x[x_size-1]))*
(log(y_array[(x_size-2)*y_size+index_y])-log(y_array[(x_size-1)*y_size+index_y]))-
(log(x[x_size-2])-log(x[x_size-1]))*(log(x[x_size-2])-log(x[x_size-1]))*
(log(y_array[(x_size-3)*y_size+index_y])-log(y_array[(x_size-1)*y_size+index_y])))/
((log(x[x_size-3])-log(x[x_size-1]))*(log(x[x_size-2])-log(x[x_size-1]))*(log(x[x_size-3])-log(x[x_size-2])));
qn[index_y]=0.5;
un[index_y]=
(3./(log(x[x_size-1]) - log(x[x_size-2])))*
(dy_last-(log(y_array[(x_size-1)*y_size+index_y]) - log(y_array[(x_size-2)*y_size+index_y]))/
(log(x[x_size-1]) - log(x[x_size-2])));
}
}
else {
sprintf(errmsg,"%s(L:%d) Spline mode not identified: %d",__func__,__LINE__,spline_mode);
return _FAILURE_;
}
}
index_x=x_size-1;
for (index_y=0; index_y < y_size; index_y++) {
ddlny_array[index_x*y_size+index_y] =
(un[index_y] - qn[index_y] * u[(index_x-1)*y_size+index_y]) /
(qn[index_y] * ddlny_array[(index_x-1)*y_size+index_y] + 1.0);
}
for (index_x=x_size-2; index_x >= 0; index_x--) {
for (index_y=0; index_y < y_size; index_y++) {
ddlny_array[index_x*y_size+index_y] = ddlny_array[index_x*y_size+index_y] *
ddlny_array[(index_x+1)*y_size+index_y] + u[index_x*y_size+index_y];
}
}
free(qn);
free(un);
free(p);
free(u);
return _SUCCESS_;
}
int array_spline_table_columns(
double * x, /* vector of size x_size */
int x_size,
double * y_array, /* array of size x_size*y_size with elements
y_array[index_y*x_size+index_x] */
int y_size,
double * ddy_array, /* array of size x_size*y_size */
short spline_mode,
ErrorMsg errmsg
) {
double * p;
double * qn;
double * un;
double * u;
double sig;
int index_x;
int index_y;
double dy_first;
double dy_last;
u = malloc((x_size-1) * y_size * sizeof(double));
p = malloc(y_size * sizeof(double));
qn = malloc(y_size * sizeof(double));
un = malloc(y_size * sizeof(double));
if (u == NULL) {
sprintf(errmsg,"%s(L:%d) Cannot allocate u",__func__,__LINE__);
return _FAILURE_;
}
if (p == NULL) {
sprintf(errmsg,"%s(L:%d) Cannot allocate p",__func__,__LINE__);
return _FAILURE_;
}
if (qn == NULL) {
sprintf(errmsg,"%s(L:%d) Cannot allocate qn",__func__,__LINE__);
return _FAILURE_;
}
if (un == NULL) {
sprintf(errmsg,"%s(L:%d) Cannot allocate un",__func__,__LINE__);
return _FAILURE_;
}
index_x=0;
if (spline_mode == _SPLINE_NATURAL_) {
for (index_y=0; index_y < y_size; index_y++) {
ddy_array[index_y*x_size+index_x] = 0.0;
u[index_x*y_size+index_y] = 0.0;
}
}
else {
if (spline_mode == _SPLINE_EST_DERIV_) {
class_test(x[2]-x[0]==0.,
errmsg,
"x[2]=%g, x[0]=%g, stop to avoid seg fault",x[2],x[0]);
class_test(x[1]-x[0]==0.,
errmsg,
"x[1]=%g, x[0]=%g, stop to avoid seg fault",x[1],x[0]);
class_test(x[2]-x[1]==0.,
errmsg,
"x[2]=%g, x[1]=%g, stop to avoid seg fault",x[2],x[1]);
for (index_y=0; index_y < y_size; index_y++) {
dy_first =
((x[2]-x[0])*(x[2]-x[0])*
(y_array[index_y*x_size+1]-y_array[index_y*x_size+0])-
(x[1]-x[0])*(x[1]-x[0])*
(y_array[index_y*x_size+2]-y_array[index_y*x_size+0]))/
((x[2]-x[0])*(x[1]-x[0])*(x[2]-x[1]));
ddy_array[index_y*x_size+index_x] = -0.5;
u[index_x*y_size+index_y] =
(3./(x[1] - x[0]))*
((y_array[index_y*x_size+1]-y_array[index_y*x_size+0])/
(x[1] - x[0])-dy_first);
}
}
else {
sprintf(errmsg,"%s(L:%d) Spline mode not identified: %d",__func__,__LINE__,spline_mode);
return _FAILURE_;
}
}
for (index_x=1; index_x < x_size-1; index_x++) {
sig = (x[index_x] - x[index_x-1])/(x[index_x+1] - x[index_x-1]);
for (index_y=0; index_y < y_size; index_y++) {
p[index_y] = sig * ddy_array[index_y*x_size+(index_x-1)] + 2.0;
ddy_array[index_y*x_size+index_x] = (sig-1.0)/p[index_y];
u[index_x*y_size+index_y] =
(y_array[index_y*x_size+(index_x+1)] - y_array[index_y*x_size+index_x])
/ (x[index_x+1] - x[index_x])
- (y_array[index_y*x_size+index_x] - y_array[index_y*x_size+(index_x-1)])
/ (x[index_x] - x[index_x-1]);
u[index_x*y_size+index_y] = (6.0 * u[index_x*y_size+index_y] /
(x[index_x+1] - x[index_x-1])
- sig * u[(index_x-1)*y_size+index_y]) / p[index_y];
}
}
if (spline_mode == _SPLINE_NATURAL_) {
for (index_y=0; index_y < y_size; index_y++) {
qn[index_y]=un[index_y]=0.0;
}
}
else {
if (spline_mode == _SPLINE_EST_DERIV_) {
for (index_y=0; index_y < y_size; index_y++) {
dy_last =
((x[x_size-3]-x[x_size-1])*(x[x_size-3]-x[x_size-1])*
(y_array[index_y*x_size+(x_size-2)]-y_array[index_y*x_size+(x_size-1)])-
(x[x_size-2]-x[x_size-1])*(x[x_size-2]-x[x_size-1])*
(y_array[index_y*x_size+(x_size-3)]-y_array[index_y*x_size+(x_size-1)]))/
((x[x_size-3]-x[x_size-1])*(x[x_size-2]-x[x_size-1])*(x[x_size-3]-x[x_size-2]));
qn[index_y]=0.5;
un[index_y]=
(3./(x[x_size-1] - x[x_size-2]))*
(dy_last-(y_array[index_y*x_size+(x_size-1)] - y_array[index_y*x_size+(x_size-2)])/
(x[x_size-1] - x[x_size-2]));
}
}
else {
sprintf(errmsg,"%s(L:%d) Spline mode not identified: %d",__func__,__LINE__,spline_mode);
return _FAILURE_;
}
}
index_x=x_size-1;
for (index_y=0; index_y < y_size; index_y++) {
ddy_array[index_y*x_size+index_x] =
(un[index_y] - qn[index_y] * u[(index_x-1)*y_size+index_y]) /
(qn[index_y] * ddy_array[index_y*x_size+(index_x-1)] + 1.0);
}
for (index_x=x_size-2; index_x >= 0; index_x--) {
for (index_y=0; index_y < y_size; index_y++) {
ddy_array[index_y*x_size+index_x] = ddy_array[index_y*x_size+index_x] *
ddy_array[index_y*x_size+(index_x+1)] + u[index_x*y_size+index_y];
}
}
free(qn);
free(p);
free(u);
free(un);
return _SUCCESS_;
}
int array_spline_table_columns2(
double * x, /* vector of size x_size */
int x_size,
double * y_array, /* array of size x_size*y_size with elements
y_array[index_y*x_size+index_x] */
int y_size,
double * ddy_array, /* array of size x_size*y_size */
short spline_mode,
ErrorMsg errmsg
) {
double * p;
double * qn;
double * un;
double * u;
double sig;
int index_x;
int index_y;
double dy_first;
double dy_last;
u = malloc((x_size-1) * y_size * sizeof(double));
p = malloc(y_size * sizeof(double));
qn = malloc(y_size * sizeof(double));
un = malloc(y_size * sizeof(double));
if (u == NULL) {
sprintf(errmsg,"%s(L:%d) Cannot allocate u",__func__,__LINE__);
return _FAILURE_;
}
if (p == NULL) {
sprintf(errmsg,"%s(L:%d) Cannot allocate p",__func__,__LINE__);
return _FAILURE_;
}
if (qn == NULL) {
sprintf(errmsg,"%s(L:%d) Cannot allocate qn",__func__,__LINE__);
return _FAILURE_;
}
if (un == NULL) {
sprintf(errmsg,"%s(L:%d) Cannot allocate un",__func__,__LINE__);
return _FAILURE_;
}
#pragma omp parallel \
shared(x,x_size,y_array,y_size,ddy_array,spline_mode,p,qn,un,u) \
private(index_y,index_x,sig,dy_first,dy_last)
{
#pragma omp for schedule (dynamic)
for (index_y=0; index_y < y_size; index_y++) {
if (spline_mode == _SPLINE_NATURAL_) {
ddy_array[index_y*x_size+0] = 0.0;
u[0*y_size+index_y] = 0.0;
}
else {
dy_first =
((x[2]-x[0])*(x[2]-x[0])*
(y_array[index_y*x_size+1]-y_array[index_y*x_size+0])-
(x[1]-x[0])*(x[1]-x[0])*
(y_array[index_y*x_size+2]-y_array[index_y*x_size+0]))/
((x[2]-x[0])*(x[1]-x[0])*(x[2]-x[1]));
ddy_array[index_y*x_size+0] = -0.5;
u[0*y_size+index_y] =
(3./(x[1] - x[0]))*
((y_array[index_y*x_size+1]-y_array[index_y*x_size+0])/
(x[1] - x[0])-dy_first);
}
for (index_x=1; index_x < x_size-1; index_x++) {
sig = (x[index_x] - x[index_x-1])/(x[index_x+1] - x[index_x-1]);
p[index_y] = sig * ddy_array[index_y*x_size+(index_x-1)] + 2.0;
ddy_array[index_y*x_size+index_x] = (sig-1.0)/p[index_y];
u[index_x*y_size+index_y] =
(y_array[index_y*x_size+(index_x+1)] - y_array[index_y*x_size+index_x])
/ (x[index_x+1] - x[index_x])
- (y_array[index_y*x_size+index_x] - y_array[index_y*x_size+(index_x-1)])
/ (x[index_x] - x[index_x-1]);
u[index_x*y_size+index_y] = (6.0 * u[index_x*y_size+index_y] /
(x[index_x+1] - x[index_x-1])
- sig * u[(index_x-1)*y_size+index_y]) / p[index_y];
}
if (spline_mode == _SPLINE_NATURAL_) {
qn[index_y]=un[index_y]=0.0;
}
else {
dy_last =
((x[x_size-3]-x[x_size-1])*(x[x_size-3]-x[x_size-1])*
(y_array[index_y*x_size+(x_size-2)]-y_array[index_y*x_size+(x_size-1)])-
(x[x_size-2]-x[x_size-1])*(x[x_size-2]-x[x_size-1])*
(y_array[index_y*x_size+(x_size-3)]-y_array[index_y*x_size+(x_size-1)]))/
((x[x_size-3]-x[x_size-1])*(x[x_size-2]-x[x_size-1])*(x[x_size-3]-x[x_size-2]));
qn[index_y]=0.5;
un[index_y]=
(3./(x[x_size-1] - x[x_size-2]))*
(dy_last-(y_array[index_y*x_size+(x_size-1)] - y_array[index_y*x_size+(x_size-2)])/
(x[x_size-1] - x[x_size-2]));
}
index_x=x_size-1;
ddy_array[index_y*x_size+index_x] =
(un[index_y] - qn[index_y] * u[(index_x-1)*y_size+index_y]) /
(qn[index_y] * ddy_array[index_y*x_size+(index_x-1)] + 1.0);
for (index_x=x_size-2; index_x >= 0; index_x--) {
ddy_array[index_y*x_size+index_x] = ddy_array[index_y*x_size+index_x] *
ddy_array[index_y*x_size+(index_x+1)] + u[index_x*y_size+index_y];
}
}
}
free(qn);
free(p);
free(u);
free(un);
return _SUCCESS_;
}
int array_spline_table_one_column(
double * x, /* vector of size x_size */
int x_size,
double * y_array, /* array of size x_size*y_size with elements
y_array[index_y*x_size+index_x] */
int y_size,
int index_y,
double * ddy_array, /* array of size x_size*y_size */
short spline_mode,
ErrorMsg errmsg
) {
double p;
double qn;
double un;
double * u;
double sig;
int index_x;
double dy_first;
double dy_last;
u = malloc((x_size-1) * sizeof(double));
if (u == NULL) {
sprintf(errmsg,"%s(L:%d) Cannot allocate u",__func__,__LINE__);
return _FAILURE_;
}
/************************************************/
index_x=0;
if (spline_mode == _SPLINE_NATURAL_) {
ddy_array[index_y*x_size+index_x] = 0.0;
u[index_x] = 0.0;
}
else {
if (spline_mode == _SPLINE_EST_DERIV_) {
dy_first =
((x[2]-x[0])*(x[2]-x[0])*
(y_array[index_y*x_size+1]-y_array[index_y*x_size+0])-
(x[1]-x[0])*(x[1]-x[0])*
(y_array[index_y*x_size+2]-y_array[index_y*x_size+0]))/
((x[2]-x[0])*(x[1]-x[0])*(x[2]-x[1]));
ddy_array[index_y*x_size+index_x] = -0.5;
u[index_x] =
(3./(x[1] - x[0]))*
((y_array[index_y*x_size+1]-y_array[index_y*x_size+0])/
(x[1] - x[0])-dy_first);
}
else {
sprintf(errmsg,"%s(L:%d) Spline mode not identified: %d",__func__,__LINE__,spline_mode);
return _FAILURE_;
}
}
/************************************************/
for (index_x=1; index_x < x_size-1; index_x++) {
sig = (x[index_x] - x[index_x-1])/(x[index_x+1] - x[index_x-1]);
p = sig * ddy_array[index_y*x_size+(index_x-1)] + 2.0;
ddy_array[index_y*x_size+index_x] = (sig-1.0)/p;
u[index_x] =
(y_array[index_y*x_size+(index_x+1)] - y_array[index_y*x_size+index_x])
/ (x[index_x+1] - x[index_x])
- (y_array[index_y*x_size+index_x] - y_array[index_y*x_size+(index_x-1)])
/ (x[index_x] - x[index_x-1]);
u[index_x] = (6.0 * u[index_x] /
(x[index_x+1] - x[index_x-1])
- sig * u[index_x-1]) / p;
}
/************************************************/
if (spline_mode == _SPLINE_NATURAL_) {
qn=un=0.0;
}
else {
if (spline_mode == _SPLINE_EST_DERIV_) {
dy_last =
((x[x_size-3]-x[x_size-1])*(x[x_size-3]-x[x_size-1])*
(y_array[index_y*x_size+(x_size-2)]-y_array[index_y*x_size+(x_size-1)])-
(x[x_size-2]-x[x_size-1])*(x[x_size-2]-x[x_size-1])*
(y_array[index_y*x_size+(x_size-3)]-y_array[index_y*x_size+(x_size-1)]))/
((x[x_size-3]-x[x_size-1])*(x[x_size-2]-x[x_size-1])*(x[x_size-3]-x[x_size-2]));
qn=0.5;
un=
(3./(x[x_size-1] - x[x_size-2]))*
(dy_last-(y_array[index_y*x_size+(x_size-1)] - y_array[index_y*x_size+(x_size-2)])/
(x[x_size-1] - x[x_size-2]));
}
else {
sprintf(errmsg,"%s(L:%d) Spline mode not identified: %d",__func__,__LINE__,spline_mode);
return _FAILURE_;
}
}
/************************************************/
index_x=x_size-1;
ddy_array[index_y*x_size+index_x] =
(un - qn * u[index_x-1]) /
(qn * ddy_array[index_y*x_size+(index_x-1)] + 1.0);
for (index_x=x_size-2; index_x >= 0; index_x--) {
ddy_array[index_y*x_size+index_x] = ddy_array[index_y*x_size+index_x] *
ddy_array[index_y*x_size+(index_x+1)] + u[index_x];
}
free(u);
return _SUCCESS_;
}
int array_logspline_table_one_column(
double * x, /* vector of size x_size */
int x_size,
int x_stop,
double * y_array, /* array of size x_size*y_size with elements
y_array[index_y*x_size+index_x] */
int y_size,
int index_y,
double * ddlogy_array, /* array of size x_size*y_size */
short spline_mode,
ErrorMsg errmsg
) {
double p;
double qn;
double un;
double * u;
double sig;
int index_x;
double dy_first;
double dy_last;
u = malloc((x_stop-1) * sizeof(double));
if (u == NULL) {
sprintf(errmsg,"%s(L:%d) Cannot allocate u",__func__,__LINE__);
return _FAILURE_;
}
/************************************************/
index_x=0;
if (spline_mode == _SPLINE_NATURAL_) {
ddlogy_array[index_y*x_size+index_x] = 0.0;
u[index_x] = 0.0;
}
else {
if (spline_mode == _SPLINE_EST_DERIV_) {
dy_first =
((log(x[2])-log(x[0]))*(log(x[2])-log(x[0]))*
(log(y_array[index_y*x_size+1])-log(y_array[index_y*x_size+0]))-
(log(x[1])-log(x[0]))*(log(x[1])-log(x[0]))*
(log(y_array[index_y*x_size+2])-log(y_array[index_y*x_size+0])))/
((log(x[2])-log(x[0]))*(log(x[1])-log(x[0]))*(log(x[2])-log(x[1])));
ddlogy_array[index_y*x_size+index_x] = -0.5;
u[index_x] =
(3./(log(x[1]) - log(x[0])))*
((log(y_array[index_y*x_size+1])-log(y_array[index_y*x_size+0]))/
(log(x[1]) - log(x[0]))-dy_first);
}
else {
sprintf(errmsg,"%s(L:%d) Spline mode not identified: %d",__func__,__LINE__,spline_mode);
return _FAILURE_;
}
}
/************************************************/
for (index_x=1; index_x < x_stop-1; index_x++) {
sig = (log(x[index_x]) - log(x[index_x-1]))/(log(x[index_x+1]) - log(x[index_x-1]));
p = sig * ddlogy_array[index_y*x_size+(index_x-1)] + 2.0;
ddlogy_array[index_y*x_size+index_x] = (sig-1.0)/p;
u[index_x] =
(log(y_array[index_y*x_size+(index_x+1)]) - log(y_array[index_y*x_size+index_x]))
/ (log(x[index_x+1]) - log(x[index_x]))
- (log(y_array[index_y*x_size+index_x]) - log(y_array[index_y*x_size+(index_x-1)]))
/ (log(x[index_x]) - log(x[index_x-1]));
u[index_x] = (6.0 * u[index_x] /
(log(x[index_x+1]) - log(x[index_x-1]))
- sig * u[index_x-1]) / p;
}
/************************************************/
if (spline_mode == _SPLINE_NATURAL_) {
qn=un=0.0;
}
else {
if (spline_mode == _SPLINE_EST_DERIV_) {
dy_last =
((log(x[x_stop-3])-log(x[x_stop-1]))*(log(x[x_stop-3])-log(x[x_stop-1]))*
(log(y_array[index_y*x_size+(x_stop-2)])-log(y_array[index_y*x_size+(x_stop-1)]))-
(log(x[x_stop-2])-log(x[x_stop-1]))*(log(x[x_stop-2])-log(x[x_stop-1]))*
(log(y_array[index_y*x_size+(x_stop-3)])-log(y_array[index_y*x_size+(x_stop-1)])))/
((log(x[x_stop-3])-log(x[x_stop-1]))*(log(x[x_stop-2])-log(x[x_stop-1]))*
(log(x[x_stop-3])-log(x[x_stop-2])));
qn=0.5;
un=
(3./(log(x[x_stop-1]) - log(x[x_stop-2])))*
(dy_last-(log(y_array[index_y*x_size+(x_stop-1)]) - log(y_array[index_y*x_size+(x_stop-2)]))/
(log(x[x_stop-1]) - log(x[x_stop-2])));
}
else {
sprintf(errmsg,"%s(L:%d) Spline mode not identified: %d",__func__,__LINE__,spline_mode);
return _FAILURE_;
}
}
/************************************************/
index_x=x_stop-1;
ddlogy_array[index_y*x_size+index_x] =
(un - qn * u[index_x-1]) /
(qn * ddlogy_array[index_y*x_size+(index_x-1)] + 1.0);
for (index_x=x_stop-2; index_x >= 0; index_x--) {
ddlogy_array[index_y*x_size+index_x] = ddlogy_array[index_y*x_size+index_x] *
ddlogy_array[index_y*x_size+(index_x+1)] + u[index_x];
}
free(u);
return _SUCCESS_;
}
int array_integrate_all_spline(
double * array,
int n_columns,
int n_lines,
int index_x, /** from 0 to (n_columns-1) */
int index_y,
int index_ddy,
double * result,
ErrorMsg errmsg) {
int i;
double h;
*result = 0;
for (i=0; i < n_lines-1; i++) {
h = (array[(i+1)*n_columns+index_x]-array[i*n_columns+index_x]);
*result +=
(array[i*n_columns+index_y]+array[(i+1)*n_columns+index_y])*h/2.+
(array[i*n_columns+index_ddy]+array[(i+1)*n_columns+index_ddy])*h*h*h/24.;
}
return _SUCCESS_;
}
int array_integrate_all_trapzd_or_spline(
double * array,
int n_columns,
int n_lines,
int index_start_spline,
int index_x, /** from 0 to (n_columns-1) */
int index_y,
int index_ddy,
double * result,
ErrorMsg errmsg) {
int i;
double h;
if ((index_start_spline<0) || (index_start_spline>=n_lines)) {
sprintf(errmsg,"%s(L:%d) index_start_spline outside of range",__func__,__LINE__);
return _FAILURE_;
}
*result = 0;
/* trapezoidal integration till given index */
for (i=0; i < index_start_spline; i++) {
h = (array[(i+1)*n_columns+index_x]-array[i*n_columns+index_x]);
*result +=
(array[i*n_columns+index_y]+array[(i+1)*n_columns+index_y])*h/2.;
}
/* then, spline integration */
for (i=index_start_spline; i < n_lines-1; i++) {
h = (array[(i+1)*n_columns+index_x]-array[i*n_columns+index_x]);
*result +=
(array[i*n_columns+index_y]+array[(i+1)*n_columns+index_y])*h/2.+
(array[i*n_columns+index_ddy]+array[(i+1)*n_columns+index_ddy])*h*h*h/24.;
}
return _SUCCESS_;
}
/**
* Not called.
*/
int array_integrate(
double * array,
int n_columns,
int n_lines,
int index_x, /** from 0 to (n_columns-1) */
int index_y,
int index_int_y_dx,
ErrorMsg errmsg) {
int i;
double sum;
if ((index_int_y_dx == index_x) || (index_int_y_dx == index_y)) {
sprintf(errmsg,"%s(L:%d) : Output column %d must differ from input columns %d and %d",__func__,__LINE__,index_int_y_dx,index_x,index_y);
return _FAILURE_;
}
sum=0.;
*(array+0*n_columns+index_int_y_dx)=sum;
for (i=1; i<n_lines; i++) {
sum += 0.5 * (*(array+i*n_columns+index_y) + *(array+(i-1)*n_columns+index_y))
* (*(array+i*n_columns+index_x) - *(array+(i-1)*n_columns+index_x));
*(array+i*n_columns+index_int_y_dx)=sum;
}
return _SUCCESS_;
}
/**
* Called by thermodynamics_init().
*/
int array_integrate_ratio(
double * array,
int n_columns,
int n_lines,
int index_x, /** from 0 to (n_columns-1) */
int index_y1,
int index_y2,
int index_int_y1_over_y2_dx,
ErrorMsg errmsg) {
int i;
double sum;
if ((index_int_y1_over_y2_dx == index_x) || (index_int_y1_over_y2_dx == index_y1) || (index_int_y1_over_y2_dx == index_y2)) {
sprintf(errmsg,"%s(L:%d) : Output column %d must differ from input columns %d, %d and %d",__func__,__LINE__,index_int_y1_over_y2_dx,index_x,index_y1,index_y2);
return _FAILURE_;
}
sum=0.;
*(array+0*n_columns+index_int_y1_over_y2_dx)=sum;
for (i=1; i<n_lines; i++) {
sum += 0.5 * (*(array+i*n_columns+index_y1) / *(array+i*n_columns+index_y2)
+ *(array+(i-1)*n_columns+index_y1) / *(array+(i-1)*n_columns+index_y2))
* (*(array+i*n_columns+index_x) - *(array+(i-1)*n_columns+index_x));
*(array+i*n_columns+index_int_y1_over_y2_dx)=sum;
}
return _SUCCESS_;
}
/**
* interpolate to get y_i(x), when x and y_i are all columns of the same array
*
* Called by background_at_eta(); background_eta_of_z(); background_solve(); thermodynamics_at_z().
*/
int array_interpolate(
double * array,
int n_columns,
int n_lines,
int index_x, /** from 0 to (n_columns-1) */
double x,
int * last_index,
double * result,
int result_size, /** from 1 to n_columns */
ErrorMsg errmsg) {
int inf,sup,mid,i;
double weight;
inf=0;
sup=n_lines-1;
if (*(array+inf*n_columns+index_x) < *(array+sup*n_columns+index_x)){
if (x < *(array+inf*n_columns+index_x)) {
sprintf(errmsg,"%s(L:%d) : x=%e < x_min=%e",__func__,__LINE__,x,*(array+inf*n_columns+index_x));
return _FAILURE_;
}
if (x > *(array+sup*n_columns+index_x)) {
sprintf(errmsg,"%s(L:%d) : x=%e > x_max=%e",__func__,__LINE__,x,*(array+sup*n_columns+index_x));
return _FAILURE_;
}
while (sup-inf > 1) {
mid=(int)(0.5*(inf+sup));
if (x < *(array+mid*n_columns+index_x)) {sup=mid;}
else {inf=mid;}
}
}
else {
if (x < *(array+sup*n_columns+index_x)) {
sprintf(errmsg,"%s(L:%d) : x=%e < x_min=%e",__func__,__LINE__,x,*(array+sup*n_columns+index_x));
return _FAILURE_;
}
if (x > *(array+inf*n_columns+index_x)) {
sprintf(errmsg,"%s(L:%d) : x=%e > x_max=%e",__func__,__LINE__,x,*(array+inf*n_columns+index_x));
return _FAILURE_;
}
while (sup-inf > 1) {
mid=(int)(0.5*(inf+sup));
if (x > *(array+mid*n_columns+index_x)) {sup=mid;}
else {inf=mid;}
}
}
*last_index = inf;
weight=(x-*(array+inf*n_columns+index_x))/(*(array+sup*n_columns+index_x)-*(array+inf*n_columns+index_x));
for (i=0; i<result_size; i++)
*(result+i) = *(array+inf*n_columns+i) * (1.-weight)
+ weight * *(array+sup*n_columns+i);
*(result+index_x) = x;
return _SUCCESS_;
}
/**
* interpolate to get y_i(x), when x and y_i are in different arrays
*
* Called by background_at_eta(); background_eta_of_z(); background_solve(); thermodynamics_at_z().
*/
int array_interpolate_spline(
double * __restrict__ x_array,
int n_lines,
double * __restrict__ array,
double * __restrict__ array_splined,
int n_columns,
double x,
int * __restrict__ last_index,
double * __restrict__ result,
int result_size, /** from 1 to n_columns */
ErrorMsg errmsg) {
int inf,sup,mid,i;
double h,a,b;
inf=0;
sup=n_lines-1;
if (x_array[inf] < x_array[sup]){
if (x < x_array[inf]) {
sprintf(errmsg,"%s(L:%d) : x=%e < x_min=%e",__func__,__LINE__,x,x_array[inf]);
return _FAILURE_;
}
if (x > x_array[sup]) {
sprintf(errmsg,"%s(L:%d) : x=%e > x_max=%e",__func__,__LINE__,x,x_array[sup]);
return _FAILURE_;
}
while (sup-inf > 1) {
mid=(int)(0.5*(inf+sup));
if (x < x_array[mid]) {sup=mid;}
else {inf=mid;}
}
}
else {
if (x < x_array[sup]) {
sprintf(errmsg,"%s(L:%d) : x=%e < x_min=%e",__func__,__LINE__,x,x_array[sup]);
return _FAILURE_;
}
if (x > x_array[inf]) {
sprintf(errmsg,"%s(L:%d) : x=%e > x_max=%e",__func__,__LINE__,x,x_array[inf]);
return _FAILURE_;
}
while (sup-inf > 1) {
mid=(int)(0.5*(inf+sup));
if (x > x_array[mid]) {sup=mid;}
else {inf=mid;}
}
}
*last_index = inf;
h = x_array[sup] - x_array[inf];
b = (x-x_array[inf])/h;
a = 1-b;
for (i=0; i<result_size; i++)
*(result+i) =
a * *(array+inf*n_columns+i) +
b * *(array+sup*n_columns+i) +
((a*a*a-a)* *(array_splined+inf*n_columns+i) +
(b*b*b-b)* *(array_splined+sup*n_columns+i))*h*h/6.;
return _SUCCESS_;
}
/**
* interpolate to get y_i(x), when x and y_i are in different arrays
*
* Called by background_at_eta(); background_eta_of_z(); background_solve(); thermodynamics_at_z().
*/
int array_interpolate_linear(
double * x_array,
int n_lines,
double * array,
int n_columns,
double x,
int * last_index,
double * result,
int result_size, /** from 1 to n_columns */
ErrorMsg errmsg) {
int inf,sup,mid,i;
double h,a,b;
inf=0;
sup=n_lines-1;
if (x_array[inf] < x_array[sup]){
if (x < x_array[inf]) {
sprintf(errmsg,"%s(L:%d) : x=%e < x_min=%e",__func__,__LINE__,x,x_array[inf]);
return _FAILURE_;
}
if (x > x_array[sup]) {
sprintf(errmsg,"%s(L:%d) : x=%e > x_max=%e",__func__,__LINE__,x,x_array[sup]);
return _FAILURE_;
}
while (sup-inf > 1) {
mid=(int)(0.5*(inf+sup));
if (x < x_array[mid]) {sup=mid;}
else {inf=mid;}
}
}
else {
if (x < x_array[sup]) {
sprintf(errmsg,"%s(L:%d) : x=%e < x_min=%e",__func__,__LINE__,x,x_array[sup]);
return _FAILURE_;
}
if (x > x_array[inf]) {
sprintf(errmsg,"%s(L:%d) : x=%e > x_max=%e",__func__,__LINE__,x,x_array[inf]);
return _FAILURE_;
}
while (sup-inf > 1) {
mid=(int)(0.5*(inf+sup));
if (x > x_array[mid]) {sup=mid;}
else {inf=mid;}
}
}
*last_index = inf;
h = x_array[sup] - x_array[inf];
b = (x-x_array[inf])/h;
a = 1-b;
for (i=0; i<result_size; i++)
*(result+i) =
a * *(array+inf*n_columns+i) +
b * *(array+sup*n_columns+i);
return _SUCCESS_;
}
/**
* interpolate to get y_i(x), when x and y_i are in different arrays
*
* Called by background_at_eta(); background_eta_of_z(); background_solve(); thermodynamics_at_z().
*/
int array_interpolate_logspline(
double * x_array,
int n_lines,
double * array,
double * array_logsplined,
int n_columns,
double x,
int * last_index,
double * result,
int result_size, /** from 1 to n_columns */
ErrorMsg errmsg) {
int inf,sup,mid,i;
double h,a,b;
inf=0;
sup=n_lines-1;
if (x_array[inf] < x_array[sup]){
if (x < x_array[inf]) {
sprintf(errmsg,"%s(L:%d) : x=%e < x_min=%e",__func__,__LINE__,x,x_array[inf]);
return _FAILURE_;
}
if (x > x_array[sup]) {
sprintf(errmsg,"%s(L:%d) : x=%e > x_max=%e",__func__,__LINE__,x,x_array[sup]);
return _FAILURE_;
}
while (sup-inf > 1) {
mid=(int)(0.5*(inf+sup));
if (x < x_array[mid]) {sup=mid;}
else {inf=mid;}
}
}
else {
if (x < x_array[sup]) {
sprintf(errmsg,"%s(L:%d) : x=%e < x_min=%e",__func__,__LINE__,x,x_array[sup]);
return _FAILURE_;
}
if (x > x_array[inf]) {
sprintf(errmsg,"%s(L:%d) : x=%e > x_max=%e",__func__,__LINE__,x,x_array[inf]);
return _FAILURE_;
}
while (sup-inf > 1) {
mid=(int)(0.5*(inf+sup));
if (x > x_array[mid]) {sup=mid;}
else {inf=mid;}
}
}
*last_index = inf;
h = log(x_array[sup]) - log(x_array[inf]);
b = (log(x)-log(x_array[inf]))/h;
a = 1-b;
for (i=0; i<result_size; i++)
*(result+i) = exp(
a * log(array[inf*n_columns+i]) +
b * log(array[sup*n_columns+i]) +
((a*a*a-a)* array_logsplined[inf*n_columns+i] +
(b*b*b-b)* array_logsplined[sup*n_columns+i])*h*h/6.);
return _SUCCESS_;
}
/**
* interpolate to get y_i(x), when x and y_i are in different arrays
*
*
*/
int array_interpolate_spline_one_column(
double * x_array,
int x_size,
double * y_array, /* array of size x_size*y_size with elements
y_array[index_y*x_size+index_x] */
int y_size,
int index_y,
double * ddy_array, /* array of size x_size*y_size */
double x, /* input */
double * y, /* output */
ErrorMsg errmsg
) {
int inf,sup,mid;
double h,a,b;
inf=0;
sup=x_size-1;
if (x_array[inf] < x_array[sup]){
if (x < x_array[inf]) {
sprintf(errmsg,"%s(L:%d) : x=%e < x_min=%e",__func__,__LINE__,x,x_array[inf]);
return _FAILURE_;
}
if (x > x_array[sup]) {
sprintf(errmsg,"%s(L:%d) : x=%e > x_max=%e",__func__,__LINE__,x,x_array[sup]);
return _FAILURE_;
}
while (sup-inf > 1) {
mid=(int)(0.5*(inf+sup));
if (x < x_array[mid]) {sup=mid;}
else {inf=mid;}
}
}
else {
if (x < x_array[sup]) {
sprintf(errmsg,"%s(L:%d) : x=%e < x_min=%e",__func__,__LINE__,x,x_array[sup]);
return _FAILURE_;
}
if (x > x_array[inf]) {
sprintf(errmsg,"%s(L:%d) : x=%e > x_max=%e",__func__,__LINE__,x,x_array[inf]);
return _FAILURE_;
}
while (sup-inf > 1) {
mid=(int)(0.5*(inf+sup));
if (x > x_array[mid]) {sup=mid;}
else {inf=mid;}
}
}
h = x_array[sup] - x_array[inf];
b = (x-x_array[inf])/h;
a = 1-b;
*y =
a * y_array[index_y * x_size + inf] +
b * y_array[index_y * x_size + sup] +
((a*a*a-a)* ddy_array[index_y * x_size + inf] +
(b*b*b-b)* ddy_array[index_y * x_size + sup])*h*h/6.;
return _SUCCESS_;
}
/**
* interpolate to get y_i(x), when x and y_i are in different arrays
*
*
*/
int array_interpolate_extrapolate_spline_one_column(
double * x_array,
int x_size,
double * y_array, /* array of size x_size*y_size with elements
y_array[index_y*x_size+index_x] */
int y_size,
int index_y,
double * ddy_array, /* array of size x_size*y_size */
double x, /* input */
double * y, /* output */
ErrorMsg errmsg
) {
int inf,sup,mid;
double h,a,b;
if (x > x_array[x_size-2] || x < x_array[0]) {
/*interpolate/extrapolate linearly y as a function of x*/
h = x_array[x_size-1] - x_array[x_size-2];
b = (x-x_array[x_size-2])/h;
a = 1-b;
*y = a * y_array[index_y * x_size + (x_size-2)] +
b * y_array[index_y * x_size + (x_size-1)];
}
else {
/*interpolate y as a function of x with a spline*/
inf=0;
sup=x_size-1;
if (x_array[inf] < x_array[sup]){
if (x < x_array[inf]) {
sprintf(errmsg,"%s(L:%d) : x=%e < x_min=%e",__func__,__LINE__,x,x_array[inf]);
return _FAILURE_;
}
if (x > x_array[sup]) {
sprintf(errmsg,"%s(L:%d) : x=%e > x_max=%e",__func__,__LINE__,x,x_array[sup]);
return _FAILURE_;
}
while (sup-inf > 1) {
mid=(int)(0.5*(inf+sup));
if (x < x_array[mid]) {sup=mid;}
else {inf=mid;}
}
}
else {
if (x < x_array[sup]) {
sprintf(errmsg,"%s(L:%d) : x=%e < x_min=%e",__func__,__LINE__,x,x_array[sup]);
return _FAILURE_;
}
if (x > x_array[inf]) {
sprintf(errmsg,"%s(L:%d) : x=%e > x_max=%e",__func__,__LINE__,x,x_array[inf]);
return _FAILURE_;
}
while (sup-inf > 1) {
mid=(int)(0.5*(inf+sup));
if (x > x_array[mid]) {sup=mid;}
else {inf=mid;}
}
}
h = x_array[sup] - x_array[inf];
b = (x-x_array[inf])/h;
a = 1-b;
*y =
a * y_array[index_y * x_size + inf] +
b * y_array[index_y * x_size + sup] +
((a*a*a-a)* ddy_array[index_y * x_size + inf] +
(b*b*b-b)* ddy_array[index_y * x_size + sup])*h*h/6.;
}
return _SUCCESS_;
}
/**
* interpolate to get y_i(x), when x and y_i are in different arrays
*
*
*/
int array_interpolate_extrapolate_logspline_loglinear_one_column(
double * x_array,
int x_size,
int x_stop,
double * y_array, /* array of size x_size*y_size with elements
y_array[index_y*x_size+index_x] */
int y_size,
int index_y,
double * ddlogy_array, /* array of size x_size*y_size */
double x, /* input */
double * y, /* output */
ErrorMsg errmsg
) {
int inf,sup,mid;
double h,a,b;
if (x > x_array[x_stop-1]) {
/*interpolate/extrapolate linearly ln(y) as a function of ln(x)*/
h = log(x_array[x_stop-1]) - log(x_array[x_stop-2]);
b = (log(x)-log(x_array[x_stop-2]))/h;
a = 1-b;
/* *y = exp(a * log(y_array[index_y * x_size + (x_stop-2)]) + */
/* b * log(y_array[index_y * x_size + (x_stop-1)])); */
*y = exp(log(y_array[index_y * x_size + (x_stop-1)])
+(log(x)-log(x_array[x_stop-1]))
*((log(y_array[index_y * x_size + (x_stop-1)])-log(y_array[index_y * x_size + (x_stop-2)]))/h
+h/6.*(ddlogy_array[index_y * x_size + (x_stop-2)]+2.*ddlogy_array[index_y * x_size + (x_stop-1)])));
}
else {
/*interpolate ln(y) as a function of ln(x) with a spline*/
inf=0;
sup=x_stop-1;
if (x_array[inf] < x_array[sup]){
if (x < x_array[inf]) {
sprintf(errmsg,"%s(L:%d) : x=%e < x_min=%e",__func__,__LINE__,x,x_array[inf]);
return _FAILURE_;
}
if (x > x_array[sup]) {
sprintf(errmsg,"%s(L:%d) : x=%e > x_max=%e",__func__,__LINE__,x,x_array[sup]);
return _FAILURE_;
}
while (sup-inf > 1) {
mid=(int)(0.5*(inf+sup));
if (x < x_array[mid]) {sup=mid;}
else {inf=mid;}
}
}
else {
if (x < x_array[sup]) {
sprintf(errmsg,"%s(L:%d) : x=%e < x_min=%e",__func__,__LINE__,x,x_array[sup]);
return _FAILURE_;
}
if (x > x_array[inf]) {
sprintf(errmsg,"%s(L:%d) : x=%e > x_max=%e",__func__,__LINE__,x,x_array[inf]);
return _FAILURE_;
}
while (sup-inf > 1) {
mid=(int)(0.5*(inf+sup));
if (x > x_array[mid]) {sup=mid;}
else {inf=mid;}
}
}
h = log(x_array[sup]) - log(x_array[inf]);
b = (log(x)-log(x_array[inf]))/h;
a = 1-b;
*y = exp(a * log(y_array[index_y * x_size + inf]) +
b * log(y_array[index_y * x_size + sup]) +
((a*a*a-a)* ddlogy_array[index_y * x_size + inf] +
(b*b*b-b)* ddlogy_array[index_y * x_size + sup])*h*h/6.);
}
return _SUCCESS_;
}
/**
* interpolate to get y_i(x), when x and y_i are all columns of the same array, x is arranged in growing order, and the point x is presumably close to the previous point x from the last call of this function.
*
* Called by background_at_eta(); background_eta_of_z(); background_solve(); thermodynamics_at_z().
*/
int array_interpolate_growing_closeby(
double * array,
int n_columns,
int n_lines,
int index_x, /** from 0 to (n_columns-1) */
double x,
int * last_index,
double * result,
int result_size, /** from 1 to n_columns */
ErrorMsg errmsg) {
int inf,sup,i;
double weight;
inf = *last_index;
sup = *last_index+1;
while (x < *(array+inf*n_columns+index_x)) {
inf--;
if (inf < 0) {
sprintf(errmsg,"%s(L:%d) : x=%e < x_min=%e",__func__,__LINE__,
x,array[index_x]);
return _FAILURE_;
}
}
sup = inf+1;
while (x > *(array+sup*n_columns+index_x)) {
sup++;
if (sup > (n_lines-1)) {
sprintf(errmsg,"%s(L:%d) : x=%e > x_max=%e",__func__,__LINE__,
x,array[(n_lines-1)*n_columns+index_x]);
return _FAILURE_;
}
}
inf = sup-1;
*last_index = inf;
weight=(x-*(array+inf*n_columns+index_x))/(*(array+sup*n_columns+index_x)-*(array+inf*n_columns+index_x));
for (i=0; i<result_size; i++)
*(result+i) = *(array+inf*n_columns+i) * (1.-weight)
+ weight * *(array+sup*n_columns+i);
*(result+index_x) = x;
return _SUCCESS_;
}
/**
* interpolate to get y(x), when x and y are two columns of the same array, x is arranged in growing order, and the point x is presumably close to the previous point x from the last call of this function.
*
* Called by background_at_eta(); background_eta_of_z(); background_solve(); thermodynamics_at_z().
*/
int array_interpolate_one_growing_closeby(
double * array,
int n_columns,
int n_lines,
int index_x, /** from 0 to (n_columns-1) */
double x,
int * last_index,
int index_y,
double * result,
ErrorMsg errmsg) {
int inf,sup;
double weight;
inf = *last_index;
sup = *last_index+1;
while (x < *(array+inf*n_columns+index_x)) {
inf--;
if (inf < 0) {
sprintf(errmsg,"%s(L:%d) : x=%e < x_min=%e",__func__,__LINE__,
x,array[index_x]);
return _FAILURE_;
}
}
sup = inf+1;
while (x > *(array+sup*n_columns+index_x)) {
sup++;
if (sup > (n_lines-1)) {
sprintf(errmsg,"%s(L:%d) : x=%e > x_max=%e",__func__,__LINE__,
x,array[(n_lines-1)*n_columns+index_x]);
return _FAILURE_;
}
}
inf = sup-1;
*last_index = inf;
weight=(x-*(array+inf*n_columns+index_x))/(*(array+sup*n_columns+index_x)-*(array+inf*n_columns+index_x));
*result = *(array+inf*n_columns+index_y) * (1.-weight) + *(array+sup*n_columns+index_y) * weight;
return _SUCCESS_;
}
/**
* interpolate to get y_i(x), when x and y_i are all columns of the same array, x is arranged in growing order, and the point x is presumably very close to the previous point x from the last call of this function.
*
* Called by background_at_eta(); background_eta_of_z(); background_solve(); thermodynamics_at_z().
*/
int array_interpolate_spline_growing_closeby(
double * x_array,
int n_lines,
double * array,
double * array_splined,
int n_columns,
double x,
int * last_index,
double * result,
int result_size, /** from 1 to n_columns */
ErrorMsg errmsg) {
int inf,sup,i;
double h,a,b;
/*
if (*last_index < 0) {
sprintf(errmsg,"%s(L:%d) problem with last_index =%d < 0",__func__,__LINE__,*last_index);
return _FAILURE_;
}
if (*last_index > (n_lines-1)) {
sprintf(errmsg,"%s(L:%d) problem with last_index =%d > %d",__func__,__LINE__,*last_index,n_lines-1);
return _FAILURE_;
}
*/
inf = *last_index;
class_test(inf<0 || inf>(n_lines-1),
errmsg,
"*lastindex=%d out of range [0:%d]\n",inf,n_lines-1);
while (x < x_array[inf]) {
inf--;
if (inf < 0) {
sprintf(errmsg,"%s(L:%d) : x=%e < x_min=%e",__func__,__LINE__,
x,x_array[0]);
return _FAILURE_;
}
}
sup = inf+1;
while (x > x_array[sup]) {
sup++;
if (sup > (n_lines-1)) {
sprintf(errmsg,"%s(L:%d) : x=%e > x_max=%e",__func__,__LINE__,
x,x_array[n_lines-1]);
return _FAILURE_;
}
}
inf = sup-1;
*last_index = inf;
h = x_array[sup] - x_array[inf];
b = (x-x_array[inf])/h;
a = 1-b;
for (i=0; i<result_size; i++)
*(result+i) =
a * *(array+inf*n_columns+i) +
b * *(array+sup*n_columns+i) +
((a*a*a-a)* *(array_splined+inf*n_columns+i) +
(b*b*b-b)* *(array_splined+sup*n_columns+i))*h*h/6.;
return _SUCCESS_;
}
/**
* interpolate to get y_i(x), when x and y_i are all columns of the same array, x is arranged in growing order, and the point x is presumably close (but maybe not so close) to the previous point x from the last call of this function.
*
* Called by background_at_eta(); background_eta_of_z(); background_solve(); thermodynamics_at_z().
*/
int array_interpolate_spline_growing_hunt(
double * x_array,
int n_lines,
double * array,
double * array_splined,
int n_columns,
double x,
int * last_index,
double * result,
int result_size, /** from 1 to n_columns */
ErrorMsg errmsg) {
int inf,sup,mid,i,inc;
double h,a,b;
inc=1;
if (x >= x_array[*last_index]) {
if (x > x_array[n_lines-1]) {
sprintf(errmsg,"%s(L:%d) : x=%e > x_max=%e",__func__,__LINE__,
x,x_array[n_lines-1]);
return _FAILURE_;
}
/* try closest neighboor upward */
inf = *last_index;
sup = inf + inc;
if (x > x_array[sup]) {
/* hunt upward */
while (x > x_array[sup]) {
inf = sup;
inc += 1;
sup += inc;
if (sup > n_lines-1) {
sup = n_lines-1;
}
}
/* bisect */
while (sup-inf > 1) {
mid=(int)(0.5*(inf+sup));
if (x < x_array[mid]) {sup=mid;}
else {inf=mid;}
}
}
}
else {
if (x < x_array[0]) {
sprintf(errmsg,"%s(L:%d) : x=%e < x_min=%e",__func__,__LINE__,
x,x_array[0]);
return _FAILURE_;
}
/* try closest neighboor downward */
sup = *last_index;
inf = sup - inc;
if (x < x_array[inf]) {
/* hunt downward */
while (x < x_array[inf]) {
sup = inf;
inc += 1;
inf -= inc;
if (inf < 0) {
inf = 0;
}
}
/* bisect */
while (sup-inf > 1) {
mid=(int)(0.5*(inf+sup));
if (x < x_array[mid]) {sup=mid;}
else {inf=mid;}
}
}
}
*last_index = inf;
h = x_array[sup] - x_array[inf];
b = (x-x_array[inf])/h;
a = 1-b;
for (i=0; i<result_size; i++)
*(result+i) =
a * *(array+inf*n_columns+i) +
b * *(array+sup*n_columns+i) +
((a*a*a-a)* *(array_splined+inf*n_columns+i) +
(b*b*b-b)* *(array_splined+sup*n_columns+i))*h*h/6.;
return _SUCCESS_;
}
/**
* interpolate linearily to get y_i(x), when x and y_i are in two different arrays
*
* Called by transfer_interpolate_sources(); transfer_functions_at_k(); perturb_sources_at_eta().
*/
int array_interpolate_two(
double * array_x,
int n_columns_x,
int index_x, /** from 0 to (n_columns_x-1) */
double * array_y,
int n_columns_y,
int n_lines, /** must be the same for array_x and array_y */
double x,
double * result,
int result_size, /** from 1 to n_columns_y */
ErrorMsg errmsg) {
int inf,sup,mid,i;
double weight;
inf=0;
sup=n_lines-1;
if (array_x[inf*n_columns_x+index_x] < array_x[sup*n_columns_x+index_x]){
if (x < array_x[inf*n_columns_x+index_x]) {
sprintf(errmsg,"%s(L:%d) : x=%e < x_min=%e",__func__,__LINE__,x,array_x[inf*n_columns_x+index_x]);
return _FAILURE_;
}
if (x > array_x[sup*n_columns_x+index_x]) {
sprintf(errmsg,"%s(L:%d) : x=%e > x_max=%e",__func__,__LINE__,x,array_x[sup*n_columns_x+index_x]);
return _FAILURE_;
}
while (sup-inf > 1) {
mid=(int)(0.5*(inf+sup));
if (x < array_x[mid*n_columns_x+index_x]) {sup=mid;}
else {inf=mid;}
}
}
else {
if (x < *(array_x+sup*n_columns_x+index_x)) {
sprintf(errmsg,"%s(L:%d) : x=%e < x_min=%e",__func__,__LINE__,x,*(array_x+sup*n_columns_x+index_x));
return _FAILURE_;
}
if (x > *(array_x+inf*n_columns_x+index_x)) {
sprintf(errmsg,"%s(L:%d) : x=%e > x_max=%e",__func__,__LINE__,x,*(array_x+inf*n_columns_x+index_x));
return _FAILURE_;
}
while (sup-inf > 1) {
mid=(int)(0.5*(inf+sup));
if (x > *(array_x+mid*n_columns_x+index_x)) {sup=mid;}
else {inf=mid;}
}
}
weight=(x-*(array_x+inf*n_columns_x+index_x))/(*(array_x+sup*n_columns_x+index_x)-*(array_x+inf*n_columns_x+index_x));
for (i=0; i<result_size; i++)
*(result+i) = *(array_y+i*n_lines+inf) * (1.-weight)
+ weight * *(array_y+i*n_lines+sup) ;
return _SUCCESS_;
}
/**
* Same as array_interpolate_two, but with order of indices exchanged in array_y
*/
int array_interpolate_two_bis(
double * array_x,
int n_columns_x,
int index_x, /** from 0 to (n_columns_x-1) */
double * array_y,
int n_columns_y,
int n_lines, /** must be the same for array_x and array_y */
double x,
double * result,
int result_size, /** from 1 to n_columns_y */
ErrorMsg errmsg) {
int inf,sup,mid,i;
double weight;
inf=0;
sup=n_lines-1;
if (array_x[inf*n_columns_x+index_x] < array_x[sup*n_columns_x+index_x]){
if (x < array_x[inf*n_columns_x+index_x]) {
sprintf(errmsg,"%s(L:%d) : x=%e < x_min=%e",__func__,__LINE__,x,array_x[inf*n_columns_x+index_x]);
return _FAILURE_;
}
if (x > array_x[sup*n_columns_x+index_x]) {
sprintf(errmsg,"%s(L:%d) : x=%e > x_max=%e",__func__,__LINE__,x,array_x[sup*n_columns_x+index_x]);
return _FAILURE_;
}
while (sup-inf > 1) {
mid=(int)(0.5*(inf+sup));
if (x < array_x[mid*n_columns_x+index_x]) {sup=mid;}
else {inf=mid;}
}
}
else {
if (x < *(array_x+sup*n_columns_x+index_x)) {
sprintf(errmsg,"%s(L:%d) : x=%e < x_min=%e",__func__,__LINE__,x,*(array_x+sup*n_columns_x+index_x));
return _FAILURE_;
}
if (x > *(array_x+inf*n_columns_x+index_x)) {
sprintf(errmsg,"%s(L:%d) : x=%e > x_max=%e",__func__,__LINE__,x,*(array_x+inf*n_columns_x+index_x));
return _FAILURE_;
}
while (sup-inf > 1) {
mid=(int)(0.5*(inf+sup));
if (x > *(array_x+mid*n_columns_x+index_x)) {sup=mid;}
else {inf=mid;}
}
}
weight=(x-*(array_x+inf*n_columns_x+index_x))/(*(array_x+sup*n_columns_x+index_x)-*(array_x+inf*n_columns_x+index_x));
for (i=0; i<result_size; i++)
*(result+i) = *(array_y+inf*n_columns_y+i) * (1.-weight)
+ weight * *(array_y+sup*n_columns_y+i) ;
return _SUCCESS_;
}
/**
* interpolate linearily to get y_i(x), when x and y_i are in two different arrays
*
* Called by transfer_interpolate_sources(); transfer_functions_at_k(); perturb_sources_at_eta().
*/
int array_interpolate_two_arrays_one_column(
double * array_x, /* assumed to be a vector (i.e. one column array) */
double * array_y,
int n_columns_y,
int index_y, /* between 0 and (n_columns_y-1) */
int n_lines, /** must be the same for array_x and array_y */
double x,
double * result,
ErrorMsg errmsg) {
int inf,sup,mid;
double weight;
inf=0;
sup=n_lines-1;
if (array_x[inf] < array_x[sup]){
class_test(x < array_x[inf],
errmsg,
"x=%e < x_min=%e",x,array_x[inf]);
class_test(x > array_x[sup],
errmsg,
"x=%e > x_max=%e",x,array_x[sup]);
while (sup-inf > 1) {
mid=(int)(0.5*(inf+sup));
if (x < array_x[mid]) {sup=mid;}
else {inf=mid;}
}
}
else {
class_test(x < array_x[sup],
errmsg,
"x=%e < x_min=%e",x,array_x[sup]);
class_test(x > array_x[inf],
errmsg,
"x=%e > x_max=%e",x,array_x[inf]);
while (sup-inf > 1) {
mid=(int)(0.5*(inf+sup));
if (x > array_x[mid]) {sup=mid;}
else {inf=mid;}
}
}
weight=(x-array_x[inf])/(array_x[sup]-array_x[inf]);
*result = array_y[index_y*n_lines+inf] * (1.-weight)
+ weight * array_y[index_y*n_lines+sup];
return _SUCCESS_;
}
/**
* Called by transfer_solve().
*/
int array_interpolate_equal(
double * array,
int n_columns,
int n_lines,
double x,
double x_min,
double x_max,
double * result,
ErrorMsg errmsg) {
int index_minus,i;
double x_step,x_minus,weight;
if (x < x_min) {
sprintf(errmsg,"%s(L:%d) : x out of bounds: x=%e,x_min=%e",__func__,__LINE__,x,x_min);
return _FAILURE_;
}
if (x > x_max) {
sprintf(errmsg,"%s(L:%d) : x out of bounds: x=%e,x_max=%e",__func__,__LINE__,x,x_max);
return _FAILURE_;
}
x_step = (x_max-x_min)/(n_lines-1);
index_minus = (int)((x-x_min)/x_step);
x_minus = index_minus * x_step;
weight = (x-x_minus) / x_step;
for (i=0; i<n_columns; i++)
result[i] = *(array+n_columns*index_minus+i)*(1.-weight)
+ *(array+n_columns*(index_minus+1)+i)*weight;
return _SUCCESS_;
}
/**
* cubic interpolation of array with equally space abscisses
*/
int array_interpolate_cubic_equal(
double x0,
double dx,
double *yarray,
int Nx,
double x,
double * result,
ErrorMsg errmsg) {
int i;
double frac;
class_test((dx > 0 && (x<x0 || x>x0+dx*(Nx-1))),
errmsg,
"x=%e out of range [%e %e]",x,x0,x0+dx*(Nx-1));
class_test((dx < 0 && (x>x0 || x<x0+dx*(Nx-1))),
errmsg,
"x=%e out of range [%e %e]",x,x0+dx*(Nx-1),x0);
i = (int)floor((x-x0)/dx);
if (i<1) i=1;
if (i>Nx-3) i=Nx-3;
frac = (x-x0)/dx-i;
yarray += i-1;
*result=-yarray[0]*frac*(1.-frac)*(2.-frac)/6.
+yarray[1]*(1.+frac)*(1.-frac)*(2.-frac)/2.
+yarray[2]*(1.+frac)*frac*(2.-frac)/2.
+yarray[3]*(1.+frac)*frac*(frac-1.)/6.;
return _SUCCESS_;
}
int array_interpolate_parabola(double x1,
double x2,
double x3,
double x,
double y1,
double y2,
double y3,
double * y,
double * dy,
double * ddy,
ErrorMsg errmsg) {
double a,b,c;
/*
a x_i**2 + b x_i + c = y_i
a (x1**2-x2**2) + b (x1-x2) = y1-y2
a (x3**2-x2**2) + b (x3-x2) = y3-y2
a (x1**2-x2**2)(x3**2-x2**2) + b (x1-x2)(x3**2-x2**2) = (y1-y2)(x3**2-x2**2)
a (x3**2-x2**2)(x1**2-x2**2) + b (x3-x2)(x1**2-x2**2) = (y3-y2)(x1**2-x2**2)
b = [(y1-y2)(x3**2-x2**2) - (y3-y2)(x1**2-x2**2)]/(x1-x2)(x3-x2)(x3-x1)
*/
b = ((y1-y2)*(x3-x2)*(x3+x2) - (y3-y2)*(x1-x2)*(x1+x2))/(x1-x2)/(x3-x2)/(x3-x1);
a = (y1-y2-b*(x1-x2))/(x1-x2)/(x1+x2);
c = y2 - b*x2 - a*x2*x2;
*y = a*x*x + b*x + c;
*dy = 2.*a*x + b;
*ddy = 2.*a;
return _SUCCESS_;
}
/**
* Called by transfer_solve().
*/
int array_integrate_all(
double * array,
int n_columns,
int n_lines,
int index_x, /** from 0 to (n_columns-1) */
int index_y,
double *result) {
int i;
double sum;
sum=0.;
for (i=1; i<n_lines; i++) {
sum += 0.5 * (*(array+i*n_columns+index_y) + *(array+(i-1)*n_columns+index_y))
* (*(array+i*n_columns+index_x) - *(array+(i-1)*n_columns+index_x));
}
*result = sum;
return _SUCCESS_;
}
int array_smooth_trg(double * array,
int k_size,
int starting_k,
int eta_size,
int index_eta,
int radius, /*3, 5 or 7 */
ErrorMsg errmsg) {
double * smooth;
int i,j,jmin,jmax;
double weigth;
double *coeff;
smooth=malloc(k_size*sizeof(double));
if (smooth == NULL) {
sprintf(errmsg,"%s(L:%d) Cannot allocate smooth",__func__,__LINE__);
return _FAILURE_;
}
class_calloc(coeff,2*radius+1,sizeof(double),errmsg);
switch(radius){
case 3:
weigth = 21;
coeff[0] = -2;
coeff[1] = 3;
coeff[2] = 6;
coeff[3] = 7;
coeff[4] = 6;
coeff[5] = 3;
coeff[6] = -2;
break;
case 4:
weigth = 231;
coeff[0] = -21;
coeff[1] = 14;
coeff[2] = 39;
coeff[3] = 54;
coeff[4] = 59;
coeff[5] = 54;
coeff[6] = 39;
coeff[7] = 14;
coeff[8] = -21;
break;
case 5:
weigth = 429;
coeff[0] = -36;
coeff[1] = 9;
coeff[2] = 44;
coeff[3] = 69;
coeff[4] = 84;
coeff[5] = 89;
coeff[6] = 84;
coeff[7] = 69;
coeff[8] = 44;
coeff[9] = 9;
coeff[10] = -36;
break;
case 6:
weigth = 143;
coeff[0] = -11;
coeff[1] = 0;
coeff[2] = 9;
coeff[3] = 16;
coeff[4] = 21;
coeff[5] = 24;
coeff[6] = 25;
coeff[7] = 24;
coeff[8] = 21;
coeff[9] = 16;
coeff[10] = 9;
coeff[11] = 0;
coeff[12] = -11;
break;
case 7:
weigth = 1105;
coeff[0] = -78;
coeff[1] = -13;
coeff[2] = 42;
coeff[3] = 87;
coeff[4] = 122;
coeff[5] = 147;
coeff[6] = 162;
coeff[7] = 167;
coeff[8] = 162;
coeff[9] = 147;
coeff[10] = 122;
coeff[11] = 87;
coeff[12] = 42;
coeff[13] = -13;
coeff[14] = -78;
break;
/* case 8: */
default:
class_stop(errmsg,"Non valid radius %d: please chose between 3 4 5 or 6\n",radius);
weigth=0;
break;
}
for (i=starting_k; i<k_size-radius; i++) {
smooth[i]=0.;
jmin = MAX(i-radius,0);
jmax = MIN(i+radius,k_size-1);
for (j=jmin; j <= jmax; j++) {
smooth[i] += coeff[j-jmin]*array[j+k_size*index_eta];
}
smooth[i] /= weigth;
}
for (i=starting_k; i<k_size-radius; i++)
array[i+k_size*index_eta] = smooth[i];
free(smooth);
free(coeff);
return _SUCCESS_;
}
int array_smooth(double * array,
int n_columns,
int n_lines,
int index, /** from 0 to (n_columns-1) */
int radius,
ErrorMsg errmsg) {
double * smooth;
int i,j,jmin,jmax;
double weigth;
smooth=malloc(n_lines*sizeof(double));
if (smooth == NULL) {
sprintf(errmsg,"%s(L:%d) Cannot allocate smooth",__func__,__LINE__);
return _FAILURE_;
}
for (i=0; i<n_lines; i++) {
smooth[i]=0.;
weigth=0.;
jmin = MAX(i-radius,0);
jmax = MIN(i+radius,n_lines-1);
for (j=jmin; j <= jmax; j++) {
smooth[i] += array[j*n_columns+index];
weigth += 1.;
}
smooth[i] /= weigth;
}
for (i=0; i<n_lines; i++)
array[i*n_columns+index] = smooth[i];
free(smooth);
return _SUCCESS_;
}
/**
* Compute quadrature weights for the trapezoidal integration method, xhen x is in gorwing order.
*
* @param x Input: Grid points on which f() is known.
* @param n Input: number of grid points.
* @param w_trapz Output: Weights of the trapezoidal method.
* @return the error status
*/
int array_trapezoidal_weights(
double * __restrict__ x,
int n,
double * __restrict__ w_trapz,
ErrorMsg errmsg
) {
int i;
/* Case with just one point, w would normally be 0. */
if (n==1){
w_trapz[0] = 0.0;
}
else if (n>1){
//Set edgeweights:
w_trapz[0] = 0.5*(x[1]-x[0]);
w_trapz[n-1] = 0.5*(x[n-1]-x[n-2]);
//Set inner weights:
for (i=1; i<(n-1); i++){
w_trapz[i] = 0.5*(x[i+1]-x[i-1]);
}
}
return _SUCCESS_;
}
/**
* Compute quadrature weights for the trapezoidal integration method, when x is in decreasing order.
*
* @param x Input: Grid points on which f() is known.
* @param n Input: number of grid points.
* @param w_trapz Output: Weights of the trapezoidal method.
* @return the error status
*/
int array_trapezoidal_mweights(
double * __restrict__ x,
int n,
double * __restrict__ w_trapz,
ErrorMsg errmsg
) {
int i;
/* Case with just one point. */
if (n==1){
w_trapz[0] = 1.0;
}
else if (n>1){
//Set edgeweights:
w_trapz[0] = 0.5*(x[0]-x[1]);
w_trapz[n-1] = 0.5*(x[n-2]-x[n-1]);
//Set inner weights:
for (i=1; i<(n-1); i++){
w_trapz[i] = 0.5*(x[i-1]-x[i+1]);
}
}
return _SUCCESS_;
}
/**
* Compute integral of function using trapezoidal method.
*
* @param integrand Input: The function we are integrating.
* @param n Input: Compute integral on grid [0;n-1].
* @param w_trapz Input: Weights of the trapezoidal method.
* @param I Output: The integral.
* @return the error status
*/
int array_trapezoidal_integral(
double * __restrict__ integrand,
int n,
double * __restrict__ w_trapz,
double * __restrict__ I,
ErrorMsg errmsg
) {
int i;
double res=0.0;
for (i=0; i<n; i++){
res += integrand[i]*w_trapz[i];
}
*I = res;
return _SUCCESS_;
}
/**
* Compute convolution integral of product of two functions using trapezoidal method.
*
* @param integrand1 Input: Function 1.
* @param integrand2 Input: Function 2.
* @param n Input: Compute integral on grid [0;n-1].
* @param w_trapz Input: Weights of the trapezoidal method.
* @param I Output: The integral.
* @return the error status
*/
int array_trapezoidal_convolution(
double * __restrict__ integrand1,
double * __restrict__ integrand2,
int n,
double * __restrict__ w_trapz,
double * __restrict__ I,
ErrorMsg errmsg
) {
int i;
double res=0.0;
for (i=0; i<n; i++){
res += integrand1[i]*integrand2[i]*w_trapz[i];
}
*I = res;
return _SUCCESS_;
}
|
GB_binop__land_uint16.c |
//------------------------------------------------------------------------------
// GB_binop: hard-coded functions for each built-in binary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2022, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated2/ folder, do not edit it
// (it is auto-generated from Generator/*).
#include "GB.h"
#ifndef GBCUDA_DEV
#include "GB_emult.h"
#include "GB_control.h"
#include "GB_ek_slice.h"
#include "GB_dense.h"
#include "GB_atomics.h"
#include "GB_bitmap_assign_methods.h"
#include "GB_binop__include.h"
// C=binop(A,B) is defined by the following types and operators:
// A+B function (eWiseAdd): GB (_AaddB__land_uint16)
// A.*B function (eWiseMult): GB (_AemultB_08__land_uint16)
// A.*B function (eWiseMult): GB (_AemultB_02__land_uint16)
// A.*B function (eWiseMult): GB (_AemultB_04__land_uint16)
// A.*B function (eWiseMult): GB (_AemultB_bitmap__land_uint16)
// A*D function (colscale): GB (_AxD__land_uint16)
// D*A function (rowscale): GB (_DxB__land_uint16)
// C+=B function (dense accum): GB (_Cdense_accumB__land_uint16)
// C+=b function (dense accum): GB (_Cdense_accumb__land_uint16)
// C+=A+B function (dense ewise3): GB ((none))
// C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__land_uint16)
// C=scalar+B GB (_bind1st__land_uint16)
// C=scalar+B' GB (_bind1st_tran__land_uint16)
// C=A+scalar GB (_bind2nd__land_uint16)
// C=A'+scalar GB (_bind2nd_tran__land_uint16)
// C type: uint16_t
// A type: uint16_t
// A pattern? 0
// B type: uint16_t
// B pattern? 0
// BinaryOp: cij = ((aij != 0) && (bij != 0))
#define GB_ATYPE \
uint16_t
#define GB_BTYPE \
uint16_t
#define GB_CTYPE \
uint16_t
// true if the types of A and B are identical
#define GB_ATYPE_IS_BTYPE \
1
// true if the types of C and A are identical
#define GB_CTYPE_IS_ATYPE \
1
// true if the types of C and B are identical
#define GB_CTYPE_IS_BTYPE \
1
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA,A_iso) \
uint16_t aij = GBX (Ax, pA, A_iso)
// true if values of A are not used
#define GB_A_IS_PATTERN \
0 \
// bij = Bx [pB]
#define GB_GETB(bij,Bx,pB,B_iso) \
uint16_t bij = GBX (Bx, pB, B_iso)
// true if values of B are not used
#define GB_B_IS_PATTERN \
0 \
// declare scalar of the same type as C
#define GB_CTYPE_SCALAR(t) \
uint16_t t
// cij = Ax [pA]
#define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \
cij = GBX (Ax, pA, A_iso)
// cij = Bx [pB]
#define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \
cij = GBX (Bx, pB, B_iso)
#define GB_CX(p) Cx [p]
// binary operator
#define GB_BINOP(z,x,y,i,j) \
z = ((x != 0) && (y != 0)) ;
// true if the binop must be flipped
#define GB_BINOP_FLIP \
0
// op is second
#define GB_OP_IS_SECOND \
0
// do the numerical phases of GB_add and GB_emult
#define GB_PHASE_2_OF_2
// hard-coded loops can be vectorized
#define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_LAND || GxB_NO_UINT16 || GxB_NO_LAND_UINT16)
//------------------------------------------------------------------------------
// C += A+B, all 3 matrices dense
//------------------------------------------------------------------------------
#if 0
// The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV.
void GB ((none))
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_accum_template.c"
}
#endif
//------------------------------------------------------------------------------
// C = A+B, all 3 matrices dense
//------------------------------------------------------------------------------
void GB (_Cdense_ewise3_noaccum__land_uint16)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_noaccum_template.c"
}
//------------------------------------------------------------------------------
// C += B, accumulate a sparse matrix into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumB__land_uint16)
(
GrB_Matrix C,
const GrB_Matrix B,
const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
#include "GB_dense_subassign_23_template.c"
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += b, accumulate a scalar into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumb__land_uint16)
(
GrB_Matrix C,
const GB_void *p_bwork,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
// get the scalar b for C += b, of type uint16_t
uint16_t bwork = (*((uint16_t *) p_bwork)) ;
#include "GB_dense_subassign_22_template.c"
return (GrB_SUCCESS) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = A*D, column scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_AxD__land_uint16)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix D,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint16_t *restrict Cx = (uint16_t *) C->x ;
#include "GB_AxB_colscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = D*B, row scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_DxB__land_uint16)
(
GrB_Matrix C,
const GrB_Matrix D,
const GrB_Matrix B,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint16_t *restrict Cx = (uint16_t *) C->x ;
#include "GB_AxB_rowscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseAdd: C=A+B, C<M>=A+B, C<!M>=A+B
//------------------------------------------------------------------------------
GrB_Info GB (_AaddB__land_uint16)
(
GrB_Matrix C,
const int C_sparsity,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool is_eWiseUnion,
const GB_void *alpha_scalar_in,
const GB_void *beta_scalar_in,
const bool Ch_is_Mh,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
GB_WERK_DECLARE (M_ek_slicing, int64_t) ;
GB_WERK_DECLARE (A_ek_slicing, int64_t) ;
GB_WERK_DECLARE (B_ek_slicing, int64_t) ;
uint16_t alpha_scalar ;
uint16_t beta_scalar ;
if (is_eWiseUnion)
{
alpha_scalar = (*((uint16_t *) alpha_scalar_in)) ;
beta_scalar = (*((uint16_t *) beta_scalar_in )) ;
}
#include "GB_add_template.c"
GB_FREE_WORKSPACE ;
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, or C<M!>=A.*B where C is sparse/hyper
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_08__land_uint16)
(
GrB_Matrix C,
const int C_sparsity,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_08_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_02__land_uint16)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool flipxy,
const int64_t *restrict Cp_kfirst,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#if GB_BINOP_FLIP
// The operator is not commutative, and does not have a flipped
// variant. For example z=atan2(y,x).
if (flipxy)
{
// use fmult(y,x)
#undef GB_FLIPPED
#define GB_FLIPPED 1
#include "GB_emult_02_template.c"
}
else
{
// use fmult(x,y)
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
}
#else
// No need to handle the flip: the operator is either commutative, or
// has been handled by changing z=div(y,x) to z=rdiv(x,y) for example.
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
#endif
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_04__land_uint16)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict Cp_kfirst,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_04_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_bitmap__land_uint16)
(
GrB_Matrix C,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_bitmap_emult_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st
//------------------------------------------------------------------------------
GrB_Info GB (_bind1st__land_uint16)
(
GB_void *Cx_output, // Cx and Bx may be aliased
const GB_void *x_input,
const GB_void *Bx_input,
const int8_t *restrict Bb,
int64_t bnz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint16_t *Cx = (uint16_t *) Cx_output ;
uint16_t x = (*((uint16_t *) x_input)) ;
uint16_t *Bx = (uint16_t *) Bx_input ;
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < bnz ; p++)
{
if (!GBB (Bb, p)) continue ;
uint16_t bij = GBX (Bx, p, false) ;
Cx [p] = ((x != 0) && (bij != 0)) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd
//------------------------------------------------------------------------------
GrB_Info GB (_bind2nd__land_uint16)
(
GB_void *Cx_output, // Cx and Ax may be aliased
const GB_void *Ax_input,
const GB_void *y_input,
const int8_t *restrict Ab,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
uint16_t *Cx = (uint16_t *) Cx_output ;
uint16_t *Ax = (uint16_t *) Ax_input ;
uint16_t y = (*((uint16_t *) y_input)) ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!GBB (Ab, p)) continue ;
uint16_t aij = GBX (Ax, p, false) ;
Cx [p] = ((aij != 0) && (y != 0)) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (x, A'): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (x, aij), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
uint16_t aij = GBX (Ax, pA, false) ; \
Cx [pC] = ((x != 0) && (aij != 0)) ; \
}
GrB_Info GB (_bind1st_tran__land_uint16)
(
GrB_Matrix C,
const GB_void *x_input,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
// GB_unop_transpose.c uses GB_ATYPE, but A is
// the 2nd input to binary operator z=f(x,y).
#undef GB_ATYPE
#define GB_ATYPE \
uint16_t
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint16_t x = (*((const uint16_t *) x_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
#undef GB_ATYPE
#define GB_ATYPE \
uint16_t
}
//------------------------------------------------------------------------------
// C = op (A', y): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (aij, y), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
uint16_t aij = GBX (Ax, pA, false) ; \
Cx [pC] = ((aij != 0) && (y != 0)) ; \
}
GrB_Info GB (_bind2nd_tran__land_uint16)
(
GrB_Matrix C,
const GrB_Matrix A,
const GB_void *y_input,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint16_t y = (*((const uint16_t *) y_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
convolution_sgemm_fp16s.h | // Tencent is pleased to support the open source community by making ncnn available.
//
// Copyright (C) 2021 THL A29 Limited, a Tencent company. All rights reserved.
//
// Licensed under the BSD 3-Clause License (the "License"); you may not use this file except
// in compliance with the License. You may obtain a copy of the License at
//
// https://opensource.org/licenses/BSD-3-Clause
//
// Unless required by applicable law or agreed to in writing, software distributed
// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
// CONDITIONS OF ANY KIND, either express or implied. See the License for the
// specific language governing permissions and limitations under the License.
static void im2col_sgemm_fp16sa_rvv(const Mat& bottom_im2col, Mat& top_blob, const Mat& kernel, const Mat& _bias, const Option& opt)
{
#if __riscv_vector
const int packn = csrr_vlenb() / 2;
const word_type vl = vsetvl_e16m1(packn);
#endif
// Mat bottom_im2col(size, maxk, inch, 2u, 1, opt.workspace_allocator);
const int size = bottom_im2col.w;
const int maxk = bottom_im2col.h;
const int inch = bottom_im2col.c;
const int outch = top_blob.c;
const __fp16* bias = _bias;
// permute
Mat tmp;
#if __riscv_vector
if (size >= packn)
tmp.create(packn * maxk, inch, size / packn + size % packn, 2u, 1, opt.workspace_allocator);
else
tmp.create(maxk, inch, size, 2u, 1, opt.workspace_allocator);
{
int nn_size = size / packn;
#pragma omp parallel for num_threads(opt.num_threads)
for (int ii = 0; ii < nn_size; ii++)
{
int i = ii * packn;
__fp16* tmpptr = tmp.channel(i / packn);
for (int q = 0; q < inch; q++)
{
const __fp16* img0 = (const __fp16*)bottom_im2col.channel(q) + i;
for (int k = 0; k < maxk; k++)
{
vse16_v_f16m1(tmpptr, vle16_v_f16m1(img0, vl), vl);
img0 += size;
tmpptr += packn;
}
}
}
int remain_size_start = nn_size * packn;
#pragma omp parallel for num_threads(opt.num_threads)
for (int i = remain_size_start; i < size; i++)
{
__fp16* tmpptr = tmp.channel(i / packn + i % packn);
for (int q = 0; q < inch; q++)
{
const __fp16* img0 = (const __fp16*)bottom_im2col.channel(q) + i;
for (int k = 0; k < maxk; k++)
{
tmpptr[0] = img0[0];
img0 += size;
tmpptr += 1;
}
}
}
}
#else // __riscv_vector
tmp.create(maxk, inch, size, 2u, 1, opt.workspace_allocator);
{
#pragma omp parallel for num_threads(opt.num_threads)
for (int i = 0; i < size; i++)
{
__fp16* tmpptr = tmp.channel(i);
for (int q = 0; q < inch; q++)
{
const __fp16* img0 = (const __fp16*)bottom_im2col.channel(q) + i;
for (int k = 0; k < maxk; k++)
{
tmpptr[0] = img0[0];
img0 += size;
tmpptr += 1;
}
}
}
}
#endif // __riscv_vector
#if __riscv_vector
int nn_outch = outch >> 3;
int remain_outch_start = nn_outch << 3;
#pragma omp parallel for num_threads(opt.num_threads)
for (int pp = 0; pp < nn_outch; pp++)
{
int p = pp * 8;
__fp16* outptr0 = top_blob.channel(p);
__fp16* outptr1 = top_blob.channel(p + 1);
__fp16* outptr2 = top_blob.channel(p + 2);
__fp16* outptr3 = top_blob.channel(p + 3);
__fp16* outptr4 = top_blob.channel(p + 4);
__fp16* outptr5 = top_blob.channel(p + 5);
__fp16* outptr6 = top_blob.channel(p + 6);
__fp16* outptr7 = top_blob.channel(p + 7);
const __fp16 zeros[8] = {0.f, 0.f, 0.f, 0.f, 0.f, 0.f, 0.f, 0.f};
const __fp16* biasptr = bias ? bias + p : zeros;
int i = 0;
for (; i + (packn - 1) < size; i += packn)
{
const __fp16* tmpptr = tmp.channel(i / packn);
const __fp16* kptr = kernel.channel(p / 8);
int nn = inch * maxk; // inch always > 0
vfloat16m1_t _sum0 = vfmv_v_f_f16m1(biasptr[0], vl);
vfloat16m1_t _sum1 = vfmv_v_f_f16m1(biasptr[1], vl);
vfloat16m1_t _sum2 = vfmv_v_f_f16m1(biasptr[2], vl);
vfloat16m1_t _sum3 = vfmv_v_f_f16m1(biasptr[3], vl);
vfloat16m1_t _sum4 = vfmv_v_f_f16m1(biasptr[4], vl);
vfloat16m1_t _sum5 = vfmv_v_f_f16m1(biasptr[5], vl);
vfloat16m1_t _sum6 = vfmv_v_f_f16m1(biasptr[6], vl);
vfloat16m1_t _sum7 = vfmv_v_f_f16m1(biasptr[7], vl);
for (int q = 0; q < nn; q++)
{
vfloat16m1_t _val = vle16_v_f16m1(tmpptr, vl);
_sum0 = vfmacc_vf_f16m1(_sum0, kptr[0], _val, vl);
_sum1 = vfmacc_vf_f16m1(_sum1, kptr[1], _val, vl);
_sum2 = vfmacc_vf_f16m1(_sum2, kptr[2], _val, vl);
_sum3 = vfmacc_vf_f16m1(_sum3, kptr[3], _val, vl);
_sum4 = vfmacc_vf_f16m1(_sum4, kptr[4], _val, vl);
_sum5 = vfmacc_vf_f16m1(_sum5, kptr[5], _val, vl);
_sum6 = vfmacc_vf_f16m1(_sum6, kptr[6], _val, vl);
_sum7 = vfmacc_vf_f16m1(_sum7, kptr[7], _val, vl);
tmpptr += packn;
kptr += 8;
}
vse16_v_f16m1(outptr0, _sum0, vl);
vse16_v_f16m1(outptr1, _sum1, vl);
vse16_v_f16m1(outptr2, _sum2, vl);
vse16_v_f16m1(outptr3, _sum3, vl);
vse16_v_f16m1(outptr4, _sum4, vl);
vse16_v_f16m1(outptr5, _sum5, vl);
vse16_v_f16m1(outptr6, _sum6, vl);
vse16_v_f16m1(outptr7, _sum7, vl);
outptr0 += packn;
outptr1 += packn;
outptr2 += packn;
outptr3 += packn;
outptr4 += packn;
outptr5 += packn;
outptr6 += packn;
outptr7 += packn;
}
for (; i < size; i++)
{
const __fp16* tmpptr = tmp.channel(i / packn + i % packn);
const __fp16* kptr = kernel.channel(p / 8);
int nn = inch * maxk; // inch always > 0
__fp16 sum0 = biasptr[0];
__fp16 sum1 = biasptr[1];
__fp16 sum2 = biasptr[2];
__fp16 sum3 = biasptr[3];
__fp16 sum4 = biasptr[4];
__fp16 sum5 = biasptr[5];
__fp16 sum6 = biasptr[6];
__fp16 sum7 = biasptr[7];
for (int q = 0; q < nn; q++)
{
sum0 += tmpptr[0] * kptr[0];
sum1 += tmpptr[0] * kptr[1];
sum2 += tmpptr[0] * kptr[2];
sum3 += tmpptr[0] * kptr[3];
sum4 += tmpptr[0] * kptr[4];
sum5 += tmpptr[0] * kptr[5];
sum6 += tmpptr[0] * kptr[6];
sum7 += tmpptr[0] * kptr[7];
tmpptr++;
kptr += 8;
}
outptr0[0] = sum0;
outptr1[0] = sum1;
outptr2[0] = sum2;
outptr3[0] = sum3;
outptr4[0] = sum4;
outptr5[0] = sum5;
outptr6[0] = sum6;
outptr7[0] = sum7;
outptr0++;
outptr1++;
outptr2++;
outptr3++;
outptr4++;
outptr5++;
outptr6++;
outptr7++;
}
}
nn_outch = (outch - remain_outch_start) >> 2;
#pragma omp parallel for num_threads(opt.num_threads)
for (int pp = 0; pp < nn_outch; pp++)
{
int p = remain_outch_start + pp * 4;
__fp16* outptr0 = top_blob.channel(p);
__fp16* outptr1 = top_blob.channel(p + 1);
__fp16* outptr2 = top_blob.channel(p + 2);
__fp16* outptr3 = top_blob.channel(p + 3);
const __fp16 zeros[4] = {0.f, 0.f, 0.f, 0.f};
const __fp16* biasptr = bias ? bias + p : zeros;
int i = 0;
for (; i + (packn - 1) < size; i += packn)
{
const __fp16* tmpptr = tmp.channel(i / packn);
const __fp16* kptr = kernel.channel(p / 8 + (p % 8) / 4);
int nn = inch * maxk; // inch always > 0
vfloat16m1_t _sum0 = vfmv_v_f_f16m1(biasptr[0], vl);
vfloat16m1_t _sum1 = vfmv_v_f_f16m1(biasptr[1], vl);
vfloat16m1_t _sum2 = vfmv_v_f_f16m1(biasptr[2], vl);
vfloat16m1_t _sum3 = vfmv_v_f_f16m1(biasptr[3], vl);
for (int q = 0; q < nn; q++)
{
vfloat16m1_t _val = vle16_v_f16m1(tmpptr, vl);
_sum0 = vfmacc_vf_f16m1(_sum0, kptr[0], _val, vl);
_sum1 = vfmacc_vf_f16m1(_sum1, kptr[1], _val, vl);
_sum2 = vfmacc_vf_f16m1(_sum2, kptr[2], _val, vl);
_sum3 = vfmacc_vf_f16m1(_sum3, kptr[3], _val, vl);
tmpptr += packn;
kptr += 4;
}
vse16_v_f16m1(outptr0, _sum0, vl);
vse16_v_f16m1(outptr1, _sum1, vl);
vse16_v_f16m1(outptr2, _sum2, vl);
vse16_v_f16m1(outptr3, _sum3, vl);
outptr0 += packn;
outptr1 += packn;
outptr2 += packn;
outptr3 += packn;
}
for (; i < size; i++)
{
const __fp16* tmpptr = tmp.channel(i / packn + i % packn);
const __fp16* kptr = kernel.channel(p / 8 + (p % 8) / 4);
int nn = inch * maxk; // inch always > 0
__fp16 sum0 = biasptr[0];
__fp16 sum1 = biasptr[1];
__fp16 sum2 = biasptr[2];
__fp16 sum3 = biasptr[3];
for (int q = 0; q < nn; q++)
{
sum0 += tmpptr[0] * kptr[0];
sum1 += tmpptr[0] * kptr[1];
sum2 += tmpptr[0] * kptr[2];
sum3 += tmpptr[0] * kptr[3];
tmpptr++;
kptr += 4;
}
outptr0[0] = sum0;
outptr1[0] = sum1;
outptr2[0] = sum2;
outptr3[0] = sum3;
outptr0++;
outptr1++;
outptr2++;
outptr3++;
}
}
remain_outch_start += nn_outch << 2;
#pragma omp parallel for num_threads(opt.num_threads)
for (int p = remain_outch_start; p < outch; p++)
{
__fp16* outptr0 = top_blob.channel(p);
const __fp16 bias0 = bias ? bias[p] : 0.f;
int i = 0;
for (; i + (packn - 1) < size; i += packn)
{
const __fp16* tmpptr = tmp.channel(i / packn);
const __fp16* kptr = kernel.channel(p / 8 + (p % 8) / 4 + p % 4);
int nn = inch * maxk; // inch always > 0
vfloat16m1_t _sum0 = vfmv_v_f_f16m1(bias0, vl);
for (int q = 0; q < nn; q++)
{
_sum0 = vfmacc_vf_f16m1(_sum0, kptr[0], vle16_v_f16m1(tmpptr, vl), vl);
tmpptr += packn;
kptr++;
}
vse16_v_f16m1(outptr0, _sum0, vl);
outptr0 += packn;
}
for (; i < size; i++)
{
const __fp16* tmpptr = tmp.channel(i / packn + i % packn);
const __fp16* kptr = kernel.channel(p / 8 + (p % 8) / 4 + p % 4);
int nn = inch * maxk; // inch always > 0
__fp16 sum0 = bias0;
for (int q = 0; q < nn; q++)
{
sum0 += tmpptr[0] * kptr[0];
tmpptr++;
kptr++;
}
outptr0[0] = sum0;
outptr0++;
}
}
#else // __riscv_vector
#pragma omp parallel for num_threads(opt.num_threads)
for (int p = 0; p < outch; p++)
{
__fp16* outptr0 = top_blob.channel(p);
const __fp16 bias0 = bias ? bias[p] : 0.f;
for (int i = 0; i < size; i++)
{
const __fp16* tmpptr = tmp.channel(i);
const __fp16* kptr = kernel.channel(p);
int nn = inch * maxk; // inch always > 0
__fp16 sum0 = bias0;
for (int q = 0; q < nn; q++)
{
sum0 += tmpptr[0] * kptr[0];
tmpptr++;
kptr++;
}
outptr0[0] = sum0;
outptr0++;
}
}
#endif // __riscv_vector
}
static void convolution_im2col_sgemm_transform_kernel_fp16sa_rvv(const Mat& _kernel, Mat& kernel_tm, int inch, int outch, int kernel_w, int kernel_h)
{
const int maxk = kernel_w * kernel_h;
// interleave
// src = maxk-inch-outch
// dst = 8b-maxk-inch-outch/8b
Mat kernel = _kernel.reshape(maxk, inch, outch);
#if __riscv_vector
kernel_tm.create(8 * maxk, inch, outch / 8 + (outch % 8) / 4 + outch % 4, (size_t)2u);
int q = 0;
for (; q + 7 < outch; q += 8)
{
const Mat k0 = kernel.channel(q);
const Mat k1 = kernel.channel(q + 1);
const Mat k2 = kernel.channel(q + 2);
const Mat k3 = kernel.channel(q + 3);
const Mat k4 = kernel.channel(q + 4);
const Mat k5 = kernel.channel(q + 5);
const Mat k6 = kernel.channel(q + 6);
const Mat k7 = kernel.channel(q + 7);
__fp16* g00 = kernel_tm.channel(q / 8);
for (int p = 0; p < inch; p++)
{
const float* k00 = k0.row(p);
const float* k10 = k1.row(p);
const float* k20 = k2.row(p);
const float* k30 = k3.row(p);
const float* k40 = k4.row(p);
const float* k50 = k5.row(p);
const float* k60 = k6.row(p);
const float* k70 = k7.row(p);
for (int k = 0; k < maxk; k++)
{
g00[0] = (__fp16)k00[k];
g00[1] = (__fp16)k10[k];
g00[2] = (__fp16)k20[k];
g00[3] = (__fp16)k30[k];
g00[4] = (__fp16)k40[k];
g00[5] = (__fp16)k50[k];
g00[6] = (__fp16)k60[k];
g00[7] = (__fp16)k70[k];
g00 += 8;
}
}
}
for (; q + 3 < outch; q += 4)
{
const Mat k0 = kernel.channel(q);
const Mat k1 = kernel.channel(q + 1);
const Mat k2 = kernel.channel(q + 2);
const Mat k3 = kernel.channel(q + 3);
__fp16* g00 = kernel_tm.channel(q / 8 + (q % 8) / 4);
for (int p = 0; p < inch; p++)
{
const float* k00 = k0.row(p);
const float* k10 = k1.row(p);
const float* k20 = k2.row(p);
const float* k30 = k3.row(p);
for (int k = 0; k < maxk; k++)
{
g00[0] = (__fp16)k00[k];
g00[1] = (__fp16)k10[k];
g00[2] = (__fp16)k20[k];
g00[3] = (__fp16)k30[k];
g00 += 4;
}
}
}
for (; q < outch; q++)
{
const Mat k0 = kernel.channel(q);
__fp16* g00 = kernel_tm.channel(q / 8 + (q % 8) / 4 + q % 4);
for (int p = 0; p < inch; p++)
{
const float* k00 = k0.row(p);
for (int k = 0; k < maxk; k++)
{
g00[0] = (__fp16)k00[k];
g00 += 1;
}
}
}
#else
kernel_tm = kernel;
#endif // __riscv_vector
}
static void convolution_im2col_sgemm_fp16sa_rvv(const Mat& bottom_blob, Mat& top_blob, const Mat& kernel, const Mat& _bias, int kernel_w, int kernel_h, int dilation_w, int dilation_h, int stride_w, int stride_h, const Option& opt)
{
int w = bottom_blob.w;
int inch = bottom_blob.c;
int outw = top_blob.w;
int outh = top_blob.h;
const int size = outw * outh;
const int maxk = kernel_w * kernel_h;
// im2col
Mat bottom_im2col(size, maxk, inch, 2u, 1, opt.workspace_allocator);
{
const int gap = w * stride_h - outw * stride_w;
#pragma omp parallel for num_threads(opt.num_threads)
for (int p = 0; p < inch; p++)
{
const Mat img = bottom_blob.channel(p);
__fp16* ptr = bottom_im2col.channel(p);
for (int u = 0; u < kernel_h; u++)
{
for (int v = 0; v < kernel_w; v++)
{
const __fp16* sptr = img.row<const __fp16>(dilation_h * u) + dilation_w * v;
for (int i = 0; i < outh; i++)
{
int j = 0;
for (; j < outw; j++)
{
ptr[0] = sptr[0];
sptr += stride_w;
ptr += 1;
}
sptr += gap;
}
}
}
}
}
im2col_sgemm_fp16sa_rvv(bottom_im2col, top_blob, kernel, _bias, opt);
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.