source stringlengths 3 92 | c stringlengths 26 2.25M |
|---|---|
matrix.h | /**
* @file matrix.h This code provide a templated matrix implementation
* @author TPOC: contact@palisade-crypto.org
*
* @copyright Copyright (c) 2019, New Jersey Institute of Technology (NJIT)
* All rights reserved.
* Redistribution and use in source and binary forms, with or without modification,
* are permitted provided that the following conditions are met:
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright notice, this
* list of conditions and the following disclaimer in the documentation and/or other
* materials provided with the distribution.
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
* WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR
* ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
* (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS
* OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
* NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN
* IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
*/
#ifndef LBCRYPTO_MATH_MATRIX_H
#define LBCRYPTO_MATH_MATRIX_H
#include <iostream>
#include <functional>
#include <cmath>
#include <stdexcept>
#include "../math/backend.h"
#include "../lattice/backend.h"
#include "../math/nbtheory.h"
#include "../math/distrgen.h"
#include "../encoding/encodings.h"
#include "../utils/inttypes.h"
#include "../utils/utilities.h"
#include "../utils/memory.h"
using std::invalid_argument;
namespace lbcrypto {
template<class Element>
class Matrix : public Serializable {
public:
typedef vector<vector<Element>> data_t;
typedef vector<Element> data_row_t;
typedef std::function<Element(void)> alloc_func;
/**
* Constructor that initializes matrix values using a zero allocator
*
* @param &allocZero lambda function for zero initialization.
* @param &rows number of rows.
* @param &rows number of columns.
*/
Matrix(alloc_func allocZero, size_t rows, size_t cols) : data(), rows(rows), cols(cols), allocZero(allocZero) {
data.resize(rows);
for (auto row = data.begin(); row != data.end(); ++row) {
for (size_t col = 0; col < cols; ++col) {
row->push_back(allocZero());
}
}
}
//TODO: add Clear();
/**
* Constructor that initializes matrix values using a distribution generation allocator
*
* @param &allocZero lambda function for zero initialization (used for initializing derived matrix objects)
* @param &rows number of rows.
* @param &rows number of columns.
* @param &allocGen lambda function for initialization using a distribution generator.
*/
Matrix(alloc_func allocZero, size_t rows, size_t cols, alloc_func allocGen);
/**
* Constructor of an empty matrix.
* SetSize must be called on this matrix to use it
* SetAlloc needs to be called if 0 passed to constructor
* This mostly exists to support deserializing
*
* @param &allocZero lambda function for zero initialization.
*/
Matrix(alloc_func allocZero = 0) : data(), rows(0), cols(0), allocZero(allocZero) {}
/**
* Set the size of a matrix, elements are zeroed out
*
* @param rows number of rows
* @param cols number of colums
*/
void SetSize(size_t rows, size_t cols) {
if( this->rows != 0 || this->cols != 0 )
throw std::logic_error("You cannot SetSize on a non-empty matrix");
this->rows = rows;
this->cols = cols;
data.resize(rows);
for (auto row = data.begin(); row != data.end(); ++row) {
for (size_t col = 0; col < cols; ++col) {
row->push_back(allocZero());
}
}
}
/**
* SetAllocator - set the function to allocate a zero;
* basically only required for deserializer
*
* @param allocZero
*/
void SetAllocator(alloc_func allocZero) {
this->allocZero = allocZero;
}
/**
* Copy constructor
*
* @param &other the matrix object to be copied
*/
Matrix(const Matrix<Element>& other) : data(), rows(other.rows), cols(other.cols), allocZero(other.allocZero) {
deepCopyData(other.data);
}
/**
* Assignment operator
*
* @param &other the matrix object whose values are to be copied
* @return the resulting matrix
*/
Matrix<Element>& operator=(const Matrix<Element>& other);
/**
* In-place change of the current matrix to a matrix of all ones
*
* @return the resulting matrix
*/
Matrix<Element>& Ones();
// Macro for convenient definitions of class implementations of special functions
#define ONES_FOR_TYPE(T) \
template<> \
Matrix<T>& Matrix<T>::Ones() { \
for (size_t row = 0; row < rows; ++row) { \
for (size_t col = 0; col < cols; ++col) { \
data[row][col] = 1; \
} \
} \
return *this; \
}
/**
* In-place modulo reduction
*
* @return the resulting matrix
*/
Matrix<Element>& ModEq(const Element &modulus);
/**
* modular subtraction
*
* @return the resulting matrix
*/
Matrix<Element>& ModSubEq(Matrix<Element> const& b, const Element &modulus);
/**
* Fill matrix using the same element
*
* @param &val the element the matrix is filled by
*
* @return the resulting matrix
*/
Matrix<Element>& Fill(const Element &val);
/**
* In-place change of the current matrix to Identity matrix
*
* @return the resulting matrix
*/
Matrix<Element>& Identity();
#define IDENTITY_FOR_TYPE(T) \
template<> \
Matrix<T>& Matrix<T>::Identity() { \
for (size_t row = 0; row < rows; ++row) { \
for (size_t col = 0; col < cols; ++col) { \
if (row == col) { \
data[row][col] = 1; \
} else { \
data[row][col] = 0; \
} \
} \
} \
return *this; \
}
/**
* Sets the first row to be powers of two for when the base is two
*
* @param base is the base the digits of the matrix are represented in
* @return the resulting matrix
*/
Matrix<Element> GadgetVector(int64_t base = 2) const;
#define GADGET_FOR_TYPE(T) \
template<> \
Matrix<T> Matrix<T>::GadgetVector(int64_t base) const { \
Matrix<T> g(allocZero, rows, cols); \
auto base_matrix = allocZero(); \
size_t k = cols/rows; \
base_matrix = base; \
g(0, 0) = 1; \
for (size_t i = 1; i < k; i++) { \
g(0, i) = g(0, i-1) * base_matrix; \
} \
for (size_t row = 1; row < rows; row++) { \
for (size_t i = 0; i < k; i++) { \
g(row, i + row*k) = g(0, i); \
} \
} \
return g; \
}
#define GADGET_FOR_TYPE_DCRT(T) \
template<> \
Matrix<T> Matrix<T>::GadgetVector(int64_t base) const \
{ \
Matrix<T> g(allocZero, rows, cols); \
auto base_matrix = allocZero(); \
base_matrix = base; \
size_t bk = 1; \
\
auto params = g(0,0).GetParams()->GetParams(); \
\
uint64_t digitCount = (long)ceil(log2(params[0]->GetModulus().ConvertToDouble())/log2(base)); \
\
for (size_t k = 0; k < digitCount; k++) { \
for (size_t i = 0; i < params.size(); i++) { \
NativePoly temp(params[i]); \
temp = bk; \
g(0,k+i*digitCount).SetElementAtIndex(i,temp); \
} \
bk *= base; \
} \
\
size_t kCols = cols/rows; \
for (size_t row = 1; row < rows; row++) { \
for (size_t i = 0; i < kCols; i++) { \
g(row, i + row*kCols) = g(0, i); \
} \
} \
return g; \
}
/**
* Computes the infinity norm
*
* @return the norm in double format
*/
double Norm() const;
#define NORM_FOR_TYPE(T) \
template<> \
double Matrix<T>::Norm() const { \
double retVal = 0.0; \
double locVal = 0.0; \
for (size_t row = 0; row < rows; ++row) { \
for (size_t col = 0; col < cols; ++col) { \
locVal = data[row][col].Norm(); \
if (locVal > retVal) { \
retVal = locVal; \
} \
} \
} \
return retVal; \
}
/**
* Matrix multiplication
*
* @param &other the multiplier matrix
* @return the result of multiplication
*/
Matrix<Element> Mult(Matrix<Element> const& other) const;
/**
* Operator for matrix multiplication
*
* @param &other the multiplier matrix
* @return the result of multiplication
*/
Matrix<Element> operator*(Matrix<Element> const& other) const {
return Mult(other);
}
/**
* Multiplication of matrix by a scalar
*
* @param &other the multiplier element
* @return the result of multiplication
*/
Matrix<Element> ScalarMult(Element const& other) const {
Matrix<Element> result(*this);
#pragma omp parallel for
for (size_t col = 0; col < result.cols; ++col) {
for (size_t row = 0; row < result.rows; ++row) {
result.data[row][col] = result.data[row][col] * other;
}
}
return result;
}
/**
* Operator for scalar multiplication
*
* @param &other the multiplier element
* @return the result of multiplication
*/
Matrix<Element> operator*(Element const& other) const {
return ScalarMult(other);
}
/**
* Equality check
*
* @param &other the matrix object to compare to
* @return the boolean result
*/
bool Equal(Matrix<Element> const& other) const {
if (rows != other.rows || cols != other.cols) {
return false;
}
for (size_t i = 0; i < rows; ++i) {
for (size_t j = 0; j < cols; ++j) {
if (data[i][j] != other.data[i][j]) {
return false;
}
}
}
return true;
}
/**
* Operator for equality check
*
* @param &other the matrix object to compare to
* @return the boolean result
*/
bool operator==(Matrix<Element> const& other) const {
return Equal(other);
}
/**
* Operator for non-equality check
*
* @param &other the matrix object to compare to
* @return the boolean result
*/
bool operator!=(Matrix<Element> const& other) const {
return !Equal(other);
}
/**
* Get property to access the data as a vector of vectors
*
* @return the data as vector of vectors
*/
const data_t& GetData() const {
return data;
}
/**
* Get property to access the number of rows in the matrix
*
* @return the number of rows
*/
size_t GetRows() const {
return rows;
}
/**
* Get property to access the number of columns in the matrix
*
* @return the number of columns
*/
size_t GetCols() const {
return cols;
}
/**
* Get property to access the zero allocator for the matrix
*
* @return the lambda function corresponding to the element zero allocator
*/
alloc_func GetAllocator() const {
return allocZero;
}
/**
* Sets the evaluation or coefficient representation for all ring elements that support the SetFormat method
*
* @param &format the enum value corresponding to coefficient or evaluation representation
*/
void SetFormat(Format format);
/**
* Matrix addition
*
* @param &other the matrix to be added
* @return the resulting matrix
*/
Matrix<Element> Add(Matrix<Element> const& other) const {
if (rows != other.rows || cols != other.cols) {
throw invalid_argument("Addition operands have incompatible dimensions");
}
Matrix<Element> result(*this);
#pragma omp parallel for
for (size_t j = 0; j < cols; ++j) {
for (size_t i = 0; i < rows; ++i) {
result.data[i][j] += other.data[i][j];
}
}
return result;
}
/**
* Operator for matrix addition
*
* @param &other the matrix to be added
* @return the resulting matrix
*/
Matrix<Element> operator+(Matrix<Element> const& other) const {
return this->Add(other);
}
/**
* Operator for in-place addition
*
* @param &other the matrix to be added
* @return the resulting matrix (same object)
*/
Matrix<Element>& operator+=(Matrix<Element> const& other);
/**
* Matrix substraction
*
* @param &other the matrix to be substracted
* @return the resulting matrix
*/
Matrix<Element> Sub(Matrix<Element> const& other) const {
if (rows != other.rows || cols != other.cols) {
throw invalid_argument("Subtraction operands have incompatible dimensions");
}
Matrix<Element> result(allocZero, rows, other.cols);
#pragma omp parallel for
for (size_t j = 0; j < cols; ++j) {
for (size_t i = 0; i < rows; ++i) {
result.data[i][j] = data[i][j] - other.data[i][j];
}
}
return result;
}
/**
* Operator for matrix substraction
*
* @param &other the matrix to be substracted
* @return the resulting matrix
*/
Matrix<Element> operator-(Matrix<Element> const& other) const {
return this->Sub(other);
}
/**
* Operator for in-place matrix substraction
*
* @param &other the matrix to be substracted
* @return the resulting matrix (same object)
*/
Matrix<Element>& operator-=(Matrix<Element> const& other);
/**
* Matrix transposition
*
* @return the resulting matrix
*/
Matrix<Element> Transpose() const;
// YSP The signature of this method needs to be changed in the future
/**
* Matrix determinant - found using Laplace formula with complexity O(d!), where d is the dimension
*
* @param *result where the result is stored
*/
void Determinant(Element *result) const;
//Element Determinant() const;
/**
* Cofactor matrix - the matrix of determinants of the minors A_{ij} multiplied by -1^{i+j}
*
* @return the cofactor matrix for the given matrix
*/
Matrix<Element> CofactorMatrix() const;
/**
* Add rows to bottom of the matrix
*
* @param &other the matrix to be added to the bottom of current matrix
* @return the resulting matrix
*/
Matrix<Element>& VStack(Matrix<Element> const& other);
/**
* Add columns the right of the matrix
*
* @param &other the matrix to be added to the right of current matrix
* @return the resulting matrix
*/
Matrix<Element>& HStack(Matrix<Element> const& other);
/**
* Matrix indexing operator - writeable instance of the element
*
* @param &row row index
* @param &col column index
* @return the element at the index
*/
Element& operator()(size_t row, size_t col) {
return data[row][col];
}
/**
* Matrix indexing operator - read-only instance of the element
*
* @param &row row index
* @param &col column index
* @return the element at the index
*/
Element const& operator()(size_t row, size_t col) const {
return data[row][col];
}
/**
* Matrix row extractor
*
* @param &row row index
* @return the row at the index
*/
Matrix<Element> ExtractRow(size_t row) const {
Matrix<Element> result(this->allocZero,1,this->cols);
int i = 0;
for (auto elem = this->GetData()[row].begin(); elem != this->GetData()[row].end(); ++elem) {
result(0,i) = *elem;
i++;
}
return result;
//return *this;
}
/**
* Matrix column extractor
*
* @param &col col index
* @return the col at the index
*/
Matrix<Element> ExtractCol(size_t col) const {
Matrix<Element> result(this->allocZero,this->rows,1);
for (size_t i = 0; i < this->rows; i++) {
result(i,0) = data[i][col];
}
return result;
//return *this;
}
/**
* Matrix rows extractor in a range from row_start to row_and; inclusive
*
* @param &row_start &row_end row indices
* @return the rows in the range delimited by indices inclusive
*/
inline Matrix<Element> ExtractRows(size_t row_start, size_t row_end) const {
Matrix<Element> result(this->allocZero,row_end-row_start+1,this->cols);
for(usint row=row_start; row<row_end+1; row++) {
int i = 0;
for (auto elem = this->GetData()[row].begin(); elem != this->GetData()[row].end(); ++elem) {
result(row-row_start,i) = *elem;
i++;
}
}
return result;
}
friend std::ostream& operator<<(std::ostream& os, const Matrix<Element>& m) {
os << "[ ";
for (size_t row = 0; row < m.GetRows(); ++row) {
os << "[ ";
for (size_t col = 0; col < m.GetCols(); ++col) {
os << m(row,col) << " ";
}
os << "]\n";
}
os << " ]\n";
return os;
}
/**
* Call switch format for each (ring) element
*
*/
void SwitchFormat();
#define NOT_AN_ELEMENT_MATRIX(T) \
template<> \
void Matrix<T>::SwitchFormat() { \
PALISADE_THROW(not_available_error, "Not a matrix of Elements"); \
}
/*
* Multiply the matrix by a vector whose elements are all 1's. This causes the elements of each
* row of the matrix to be added and placed into the corresponding position in the output vector.
*/
Matrix<Element> MultByUnityVector() const;
/*
* Multiply the matrix by a vector of random 1's and 0's, which is the same as adding select
* elements in each row together.
* Return a vector that is a rows x 1 matrix.
*/
Matrix<Element> MultByRandomVector(std::vector<int> ranvec) const;
template <class Archive>
void save( Archive & ar, std::uint32_t const version ) const
{
ar( ::cereal::make_nvp("d", data) );
ar( ::cereal::make_nvp("r", rows) );
ar( ::cereal::make_nvp("c", cols) );
}
template <class Archive>
void load( Archive & ar, std::uint32_t const version )
{
if( version > SerializedVersion() ) {
PALISADE_THROW(deserialize_error, "serialized object version " + std::to_string(version) + " is from a later version of the library");
}
ar( ::cereal::make_nvp("d", data) );
ar( ::cereal::make_nvp("r", rows) );
ar( ::cereal::make_nvp("c", cols) );
// users will need to SetAllocator for any newly deserialized matrix
}
std::string SerializedObjectName() const { return "Matrix"; }
static uint32_t SerializedVersion() { return 1; }
private:
data_t data;
size_t rows;
size_t cols;
alloc_func allocZero;
//mutable int NUM_THREADS = 1;
//deep copy of data - used for copy constructor
void deepCopyData(data_t const& src) {
data.clear();
data.resize(src.size());
for (size_t row = 0; row < src.size(); ++row) {
for (auto elem = src[row].begin(); elem != src[row].end(); ++elem) {
data[row].push_back(*elem);
}
}
}
};
/**
* Operator for scalar multiplication of matrix
*
* @param &e element
* @param &M matrix
* @return the resulting matrix
*/
template<class Element>
Matrix<Element> operator*(Element const& e, Matrix<Element> const& M) {
return M.ScalarMult(e);
}
/**
* Generates a matrix of rotations. See pages 7-8 of https://eprint.iacr.org/2013/297
*
* @param &inMat the matrix of power-of-2 cyclotomic ring elements to be rotated
* @return the resulting matrix of big binary integers
*/
template<typename Element>
Matrix<typename Element::Integer> Rotate(Matrix<Element> const& inMat);
/**
* Each element becomes a square matrix with columns of that element's
* rotations in coefficient form. See pages 7-8 of https://eprint.iacr.org/2013/297
*
* @param &inMat the matrix of power-of-2 cyclotomic ring elements to be rotated
* @return the resulting matrix of big binary integers
*/
template<typename Element>
Matrix<typename Element::Vector> RotateVecResult(Matrix<Element> const& inMat);
/**
* Stream output operator
*
* @param &os stream
* @param &m matrix to be outputted
* @return the chained stream
*/
template<class Element>
std::ostream& operator<<(std::ostream& os, const Matrix<Element>& m);
/**
* Gives the Choleshky decomposition of the input matrix.
* The assumption is that covariance matrix does not have large coefficients because it is formed by
* discrete gaussians e and s; this implies int32_t can be used
* This algorithm can be further improved - see the Darmstadt paper section 4.4
* http://eprint.iacr.org/2013/297.pdf
*
* @param &input the matrix for which the Cholesky decomposition is to be computed
* @return the resulting matrix of floating-point numbers
*/
Matrix<double> Cholesky(const Matrix<int32_t> &input);
void Cholesky(const Matrix<int32_t> &input, Matrix<double> &result);
/**
* Convert a matrix of integers from BigInteger to int32_t
* Convert from Z_q to [-q/2, q/2]
*
* @param &input the input matrix
* @param &modulus the ring modulus
* @return the resulting matrix of int32_t
*/
Matrix<int32_t> ConvertToInt32(const Matrix<BigInteger> &input, const BigInteger& modulus);
/**
* Convert a matrix of BigVector to int32_t
* Convert from Z_q to [-q/2, q/2]
*
* @param &input the input matrix
* @param &modulus the ring modulus
* @return the resulting matrix of int32_t
*/
Matrix<int32_t> ConvertToInt32(const Matrix<BigVector> &input, const BigInteger& modulus);
/**
* Split a vector of int32_t into a vector of ring elements with ring dimension n
*
* @param &other the input matrix
* @param &n the ring dimension
* @param ¶ms Poly element params
* @return the resulting matrix of Poly
*/
template<typename Element>
Matrix<Element> SplitInt64IntoElements(Matrix<int64_t> const& other, size_t n, const shared_ptr<typename Element::Params> params);
#define SPLIT64_FOR_TYPE(T) \
template<> \
Matrix<T> SplitInt64IntoElements(Matrix<int64_t> const& other, size_t n, const shared_ptr<typename T::Params> params) { \
auto zero_alloc = T::Allocator(params, COEFFICIENT); \
size_t rows = other.GetRows() / n; \
Matrix<T> result(zero_alloc, rows, 1); \
for (size_t row = 0; row < rows; ++row) { \
std::vector<int64_t> values(n); \
for (size_t i = 0; i < n; ++i) \
values[i] = other(row*n + i, 0); \
result(row, 0) = values; \
} \
return result; \
}
/**
* Another method for splitting a vector of int32_t into a vector of ring elements with ring dimension n
*
* @param &other the input matrix
* @param &n the ring dimension
* @param ¶ms Poly element params
* @return the resulting matrix of Poly
*/
template<typename Element>
Matrix<Element> SplitInt32AltIntoElements(Matrix<int32_t> const& other, size_t n, const shared_ptr<typename Element::Params> params);
#define SPLIT32ALT_FOR_TYPE(T) \
template<> \
Matrix<T> SplitInt32AltIntoElements(Matrix<int32_t> const& other, size_t n, const shared_ptr<typename T::Params> params) { \
auto zero_alloc = T::Allocator(params, COEFFICIENT); \
size_t rows = other.GetRows(); \
Matrix<T> result(zero_alloc, rows, 1); \
for (size_t row = 0; row < rows; ++row) { \
std::vector<int32_t> values(n); \
for (size_t i = 0; i < n; ++i) \
values[i] = other(row, i); \
result(row, 0) = values; \
} \
return result; \
}
/**
* Split a vector of int64_t into a vector of ring elements with ring dimension n
*
* @param &other the input matrix
* @param &n the ring dimension
* @param ¶ms Poly element params
* @return the resulting matrix of Poly
*/
template<typename Element>
Matrix<Element> SplitInt64AltIntoElements(Matrix<int64_t> const& other, size_t n, const shared_ptr<typename Element::Params> params);
#define SPLIT64ALT_FOR_TYPE(T) \
template<> \
Matrix<T> SplitInt64AltIntoElements(Matrix<int64_t> const& other, size_t n, const shared_ptr<typename T::Params> params) { \
auto zero_alloc = T::Allocator(params, COEFFICIENT); \
size_t rows = other.GetRows(); \
Matrix<T> result(zero_alloc, rows, 1); \
for (size_t row = 0; row < rows; ++row) { \
std::vector<int64_t> values(n); \
for (size_t i = 0; i < n; ++i) \
values[i] = other(row, i); \
result(row, 0) = values; \
} \
return result; \
}
}
#endif // LBCRYPTO_MATH_MATRIX_H
|
main.c | #include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <omp.h>
#include <mpi.h>
unsigned char * readFile(char* inFile[], unsigned char * image);
void save(char* inFile[], unsigned char* image, long fileLength);
unsigned char normalize(double value);
double convolution(int i, int j, unsigned char *image, int height, int width, int dim, const double mask[][7]);
int main(int argc, char * argv[])
{
int col, row;
int width = atoi(argv[1]);
int height = atoi(argv[2]);
char * dir = "../labMPI/infile";
char * bin = "result";
char * fileExtension = ".bin";
char outFile[64], inFile[64];
unsigned char *image;
unsigned char *part;
long size;
int worldSize, worldRank, dataPiece;
double start, end, time;
MPI_Status status;
MPI_Init(&argc, &argv);
MPI_Comm_size(MPI_COMM_WORLD, &worldSize);
MPI_Comm_rank(MPI_COMM_WORLD, &worldRank);
size = width * height;
dataPiece = size / worldSize;
image = (unsigned char *)malloc(size * sizeof(unsigned char));
part = (unsigned char *)malloc(dataPiece * sizeof(unsigned char));
if (worldRank == 0)
{
strcpy(inFile, dir);
strcat(inFile, argv[1]);
strcat(inFile, "_");
strcat(inFile, argv[2]);
strcat(inFile, fileExtension);
strcpy(outFile, bin);
strcat(outFile, argv[1]);
strcat(outFile, "_");
strcat(outFile, argv[2]);
strcat(outFile, fileExtension);
image = readFile(inFile, image);
}
MPI_Bcast(image, size, MPI_UNSIGNED_CHAR, 0, MPI_COMM_WORLD);
if (worldRank == 0)
start = MPI_Wtime();
const double mask[][7] ={{0,0,-1,-1,-1,0,0 },
{0,-1,-3,-3,-3,-1,0 },
{ -1,-3,0,7,0,-3,-1},
{ -1,-3,7,24,7,-3,-1},
{-1,-3,0,7,0,-3,-1},
{ 0,-1,-3,-3,-3,-1,0},
{0,0,-1,-1,-1,0,0 }};
#pragma omp parallel for private(col, row) schedule(dynamic, 100)
for (int i = dataPiece * worldRank; i < dataPiece * (worldRank + 1); i++)
{
col = i % width;
row = i / width;
part[i - dataPiece * worldRank] = normalize(convolution(col, row, image, height, width, 5, mask));
}
MPI_Barrier(MPI_COMM_WORLD);
MPI_Gather(part, dataPiece, MPI_UNSIGNED_CHAR, image, dataPiece, MPI_UNSIGNED_CHAR, 0, MPI_COMM_WORLD);
if (worldRank == 0)
{
end = MPI_Wtime();
time = (double)end - start;
printf("Time: %f \n", time);
save(outFile, image, size);
printf("Saved file: %s\n", outFile);
}
free(image);
free(part);
MPI_Finalize();
return 0;
}
unsigned char * readFile(char* inFile[], unsigned char * image)
{
FILE *file = fopen(inFile, "rb");
fseek(file, 0, SEEK_END);
long fileLength = ftell(file);
fseek(file, 0, SEEK_SET);
image = (unsigned char *)malloc(fileLength * sizeof(unsigned char));
fread(image, sizeof(unsigned char), fileLength, file);
fclose(file);
return image;
}
void save(char* inFile[], unsigned char* image, long fileLength)
{
FILE *write = fopen(inFile, "wb");
fwrite(image, sizeof(unsigned char), fileLength * sizeof(unsigned char), write);
fclose(write);
}
double convolution(int i, int j, unsigned char *image, int height, int width, int dim, const double mask[][7])
{
int x, y, middle, ii, jj;
x = y = dim;
middle = x / 2;
double tmp = 0;
for (long m = 0; m < x; ++m) {
for (long n = 0; n < y; ++n) {
ii = i + (middle - m);
jj = j + (middle - n);
if (ii >= 0 && ii < width && jj >= 0 && jj < height)
tmp += image[jj * width + ii] * mask[m][n];
}
}
return tmp;
}
unsigned char normalize(double value)
{
if (value > 255)
value = 255;
else if (value < 0)
value = 0;
return (unsigned char)value;
}
|
thread_variable.h | #ifndef COMPLIANT_THREAD_VARIABLE_H
#define COMPLIANT_THREAD_VARIABLE_H
#if defined(WIN32)
#include <omp.h>
#endif
template<class A>
class thread_variable {
typedef std::map<int, A*> value_type;
value_type value;
static int id() {
#ifdef _OPENMP
int res = omp_get_thread_num();
#else
int res = 0;
#endif
return res;
}
A* get() {
int i = id();
typename value_type::iterator it;
it = value.find( i );
if( it == value.end() ) {
A* a = new A;
value[ i ] = a;
return a;
} else {
return it->second;
}
}
public:
A* operator->() {
A* res = 0;
#ifdef _OPENMP
#pragma omp critical
#endif
res = get();
return res;
}
void clear() {
for(typename value_type::iterator it = value.begin(), end = value.end(); it != end; ++it) {
delete it->second;
}
value.clear();
}
~thread_variable() { clear(); }
};
#endif
|
o10glogon_fmt_plug.c | /*
* This software was written by JimF jfoug AT cox dot net
* in 2016. No copyright is claimed, and the software is hereby
* placed in the public domain. In case this attempt to disclaim
* copyright and place the software in the public domain is deemed
* null and void, then the software is Copyright (c) 2016 JimF
* and it is hereby released to the general public under the following
* terms:
*
* This software may be modified, redistributed, and used for any
* purpose, in source and binary forms, with or without modification.
*
* This is oracle O10g-logon format. NOTE, if the hashes came from a
* Oracle 10g, and the hash data can be sniffed from network traffic
* TNS records.
*
*/
#if FMT_EXTERNS_H
extern struct fmt_main fmt_o10glogon;
#elif FMT_REGISTERS_H
john_register_one(&fmt_o10glogon);
#else
#include <string.h>
#include <openssl/des.h>
#ifdef _OPENMP
static int omp_t = 1;
#include <omp.h>
#ifndef OMP_SCALE
#define OMP_SCALE 2048
#endif
#endif
#include "arch.h"
#include "misc.h"
#include "common.h"
#include "formats.h"
#include "aes.h"
#include "md5.h"
#include "unicode.h"
#include "base64_convert.h"
#include "memdbg.h"
#define FORMAT_LABEL "o10glogon"
#define FORMAT_NAME "Oracle 10g-logon protocol"
#define FORMAT_TAG "$o10glogon$"
#define FORMAT_TAG_LEN (sizeof(FORMAT_TAG)-1)
#define ALGORITHM_NAME "DES-AES128-MD5 32/" ARCH_BITS_STR
#define BENCHMARK_COMMENT ""
#define BENCHMARK_LENGTH -1
#define PLAINTEXT_LENGTH 32
#define BINARY_SIZE 0
#define BINARY_ALIGN 1
#define MAX_USERNAME_LEN 30
#define SALT_SIZE (sizeof(ora10g_salt))
#define SALT_ALIGN (sizeof(unsigned int))
#define CIPHERTEXT_LENGTH 16
#define MIN_KEYS_PER_CRYPT 1
#define MAX_KEYS_PER_CRYPT 1
#define MAX_HASH_LEN (FORMAT_TAG_LEN+MAX_USERNAME_LEN+1+64+1+64+1+160)
//#define DEBUG_ORACLE
//
// The keys are $o10glogon$oracle-user-name$auth_sess_key$auth_sess_key_c$auth_password
// These can be found in sniffed network traffic.
static struct fmt_tests tests[] = {
{"$o10glogon$jimf$6DA8BE6D9713B7F9190DC0F87F1BB1BDFFE44EB1892E40915592980ECCE60AA3$1C08586339E5806DD45CF8E6D83CC6EA2B8CDCDE7CC9F00ADF43DA0F07309090$E2F3D778138213BF01FD743F2092FC976FD60AB2C9F4A1B1D9B08439325421B1", "JimF"},
{"$o10glogon$SESA218390$3B16F14C3DC6048C993000E2BF543BAB489DF7BD8D6061B7274CC9E1DB743E08$1695D5255EDF15CA6B1F14C5CB39C72C98E2CC2B62FB3224ECA5A6A6790511D4$F0F64E384E567F44E9DF8D7F4C029AA59770FA75094F1C26A66C45AFA9913987", "jimf"},
{"$o10glogon$TESTUSER$EEABE812530C6D4432F781DFC14A7C7F81EAE1804F340D3289732477FD351FCC$7B244D7A1DB5ABE553FB9B7325110024911FCBE95EF99E7965A754BC41CF31C0$4C5E28E66B6382117F9D41B08957A3B9E363B42760C33B44CA5D53EA90204ABE", "TESTPASS"},
{NULL}
};
typedef struct ora10g_salt_t {
int userlen, auth_pass_len;
UTF16 user[MAX_USERNAME_LEN+1];
unsigned char auth_sesskey[32];
unsigned char auth_sesskey_c[32];
unsigned char auth_pass[80];
} ora10g_salt;
static ora10g_salt *cur_salt;
static UTF16 (*cur_key)[PLAINTEXT_LENGTH + 1];
static char (*plain_key)[PLAINTEXT_LENGTH + 1];
static int *cur_key_len;
static int *cracked, any_cracked;
static DES_key_schedule desschedule1; // key 0x0123456789abcdef
static void init(struct fmt_main *self)
{
DES_set_key((DES_cblock *)"\x01\x23\x45\x67\x89\xab\xcd\xef", &desschedule1);
#ifdef _OPENMP
omp_t = omp_get_max_threads();
self->params.min_keys_per_crypt *= omp_t;
omp_t *= OMP_SCALE;
self->params.max_keys_per_crypt *= omp_t;
#endif
cur_key = mem_calloc(self->params.max_keys_per_crypt,
sizeof(*cur_key));
plain_key = mem_calloc(self->params.max_keys_per_crypt,
sizeof(*plain_key));
cur_key_len = mem_calloc(self->params.max_keys_per_crypt,
sizeof(*cur_key_len));
cracked = mem_calloc(self->params.max_keys_per_crypt,
sizeof(*cracked));
}
static void done(void)
{
MEM_FREE(cracked);
MEM_FREE(cur_key_len);
MEM_FREE(plain_key);
MEM_FREE(cur_key);
}
static int valid(char *ciphertext, struct fmt_main *self)
{
char *cp;
char tmp[32*5+1];
UTF16 cur_key_mixedcase[MAX_USERNAME_LEN+2];
int len, extra;
if (strncmp(ciphertext, FORMAT_TAG, FORMAT_TAG_LEN))
return 0;
ciphertext += FORMAT_TAG_LEN;
cp = strchr(ciphertext, '$');
if (!cp)
return 0;
// make sure username fits in MAX_USERNAME_LEN UTF16
if (cp-ciphertext > sizeof(tmp)-1)
return 0;
memcpy(tmp, ciphertext, cp-ciphertext);
tmp[cp-ciphertext] = 0;
len = enc_to_utf16((UTF16 *)cur_key_mixedcase, MAX_USERNAME_LEN+1, (unsigned char*)tmp, strlen(tmp));
if (len < 0 || (len == 0 && cp-ciphertext)) {
static int error_shown = 0;
#ifdef HAVE_FUZZ
if (options.flags & (FLG_FUZZ_CHK | FLG_FUZZ_DUMP_CHK))
return 0;
#endif
if (!error_shown)
fprintf(stderr, "%s: Input file is not UTF-8. Please use --input-enc to specify a codepage.\n", self->params.label);
error_shown = 1;
return 0;
}
if (len > MAX_USERNAME_LEN)
return 0;
ciphertext = cp+1;
cp = strchr(ciphertext, '$');
if (!cp || cp-ciphertext != 64 || hexlenu(ciphertext, 0) != 64)
return 0;
ciphertext = cp+1;
cp = strchr(ciphertext, '$');
if (!cp || cp-ciphertext != 64 || hexlenu(ciphertext, 0) != 64)
return 0;
ciphertext = cp+1;
len = strlen(ciphertext);
cp = strchr(ciphertext, '$');
if (!len || cp || len%16 || hexlenu(ciphertext, &extra) != len || extra)
return 0;
return 1;
}
static char *split(char *ciphertext, int index, struct fmt_main *self)
{
static char out[MAX_HASH_LEN*5+1];
strnzcpy(out, ciphertext, MAX_HASH_LEN+1);
enc_strupper(&out[FORMAT_TAG_LEN]);
return out;
}
static void set_salt(void *salt) {
cur_salt = (ora10g_salt *)salt;
}
static void oracle_set_key(char *key, int index) {
UTF16 cur_key_mixedcase[PLAINTEXT_LENGTH+1];
UTF16 *c;
int key_length;
strnzcpy(plain_key[index], key, sizeof(*plain_key));
// Can't use enc_to_utf16_be() because we need to do utf16_uc later
key_length = enc_to_utf16(cur_key_mixedcase, PLAINTEXT_LENGTH, (unsigned char*)key, strlen(key));
if (key_length < 0)
key_length = strlen16(cur_key_mixedcase);
// We convert and uppercase in one shot
key_length = utf16_uc(cur_key[index], PLAINTEXT_LENGTH, cur_key_mixedcase, key_length);
// we have no way to 'undo' here, since the expansion is due to single-2-multi expansion in the upcase,
// and we can not 'fix' our password. We simply have to 'not' properly decrypt this one, but protect ourselves.
if (key_length < 0)
key_length *= -1;
cur_key_len[index] = key_length * sizeof(UTF16);
// Now byte-swap to UTF16-BE
c = cur_key[index];
while((*c = *c << 8 | *c >> 8))
c++;
#ifdef DEBUG_ORACLE
dump_stuff_msg("cur_key ", (unsigned char*)cur_key[index], cur_key_len[index]);
#endif
}
static char *get_key(int index) {
return plain_key[index];
}
static void ORACLE_TNS_Decrypt_AES128_CBC (unsigned char aes_key_bytes[16], unsigned char* input, int input_len, unsigned char* output)
{
unsigned char iv[] = {0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0};
AES_KEY key;
AES_set_decrypt_key(aes_key_bytes, 128, &key);
AES_cbc_encrypt(input, output, input_len, &key, iv, AES_DECRYPT);
}
static int terminate_ascii_string (char* ascii_string_not_terminated, int len)
{
int ascii_len = 0;
unsigned char padding_byte;
int pos;
for (pos=0; ; pos++) {
if ((ascii_string_not_terminated[pos] < 32) |
(ascii_string_not_terminated[pos] > 126))
break;
}
ascii_len = pos;
padding_byte = ascii_string_not_terminated[pos];
for (;pos<len; pos++) {
if (ascii_string_not_terminated[pos] != padding_byte)
return -1;
}
ascii_string_not_terminated[ascii_len] = 0;
return ascii_len;
}
static void ORACLE_TNS_Combine_SessKeys (unsigned char server_sesskey[16], unsigned char client_sesskey[16], unsigned char* output)
{
unsigned char combined_sesskeys[16];
int i;
MD5_CTX ctx;
for (i=0;i<16;i++)
combined_sesskeys[i] = server_sesskey[i] ^ client_sesskey[i];
MD5_Init (&ctx);
MD5_Update (&ctx, combined_sesskeys,16);
MD5_Final (output, &ctx);
}
static int ORACLE_TNS_Decrypt_Password_10g (unsigned char OracleHash[8], unsigned char *auth_sesskey, unsigned char *auth_sesskey_c, unsigned char *auth_password, int auth_passwordlen, unsigned char *decrypted)
{
int passlen = 0;
unsigned char aes_key_bytes[32];
unsigned char decrypted_server_sesskey[32];
unsigned char decrypted_client_sesskey[32];
unsigned char combined_sesskeys[16];
char decrypted_password[64];
memset (aes_key_bytes,0,sizeof(aes_key_bytes));
memcpy (aes_key_bytes,OracleHash,8);
// Decrypt server and client session keys
ORACLE_TNS_Decrypt_AES128_CBC (aes_key_bytes, auth_sesskey, 32, decrypted_server_sesskey);
ORACLE_TNS_Decrypt_AES128_CBC (aes_key_bytes, auth_sesskey_c, 32, decrypted_client_sesskey);
// Combine server and client session keys
ORACLE_TNS_Combine_SessKeys (&decrypted_server_sesskey[16], &decrypted_client_sesskey[16], combined_sesskeys);
// Decrypt auth password with combined session key
ORACLE_TNS_Decrypt_AES128_CBC (combined_sesskeys, auth_password, auth_passwordlen, (unsigned char*) decrypted_password);
// terminate decrypted password with NULL
passlen = terminate_ascii_string (&decrypted_password[16], auth_passwordlen-16);
if (passlen != -1)
strncpy ((char*)decrypted, &decrypted_password[16], passlen);
return passlen;
}
static int crypt_all(int *pcount, struct db_salt *salt)
{
const int count = *pcount;
int idx = 0;
if (any_cracked) {
memset(cracked, 0, sizeof(*cracked) * count);
any_cracked = 0;
}
#ifdef DEBUG_ORACLE
dump_stuff_msg("cur_salt ", buf, cur_salt->userlen+key_length);
#endif
#ifdef _OPENMP
#pragma omp parallel for
for (idx = 0; idx < count; idx++)
#endif
{
unsigned char buf[256], buf1[256];
unsigned int l;
uint32_t iv[2];
DES_key_schedule desschedule2;
l = cur_salt->userlen + cur_key_len[idx];
memcpy(buf, cur_salt->user, cur_salt->userlen);
memcpy(buf + cur_salt->userlen, cur_key[idx], cur_key_len[idx]);
iv[0] = iv[1] = 0;
DES_ncbc_encrypt((unsigned char *)buf, buf1, l, &desschedule1, (DES_cblock *) iv, DES_ENCRYPT);
DES_set_key((DES_cblock *)iv, &desschedule2);
iv[0] = iv[1] = 0;
DES_ncbc_encrypt((unsigned char *)buf, buf1, l, &desschedule2, (DES_cblock *) iv, DES_ENCRYPT);
#ifdef DEBUG_ORACLE
dump_stuff_msg(" iv (the hash key) ", (unsigned char*)&iv[0], 8);
#endif
ORACLE_TNS_Decrypt_Password_10g ((unsigned char*)iv, cur_salt->auth_sesskey, cur_salt->auth_sesskey_c, cur_salt->auth_pass, cur_salt->auth_pass_len, buf);
if (!strncmp((char*)buf, plain_key[idx], strlen(plain_key[idx])))
{
cracked[idx] = 1;
#ifdef _OPENMP
#pragma omp atomic
#endif
any_cracked |= 1;
}
}
return count;
}
static void *get_salt(char *ciphertext)
{
static ora10g_salt salt;
UTF8 tmp[MAX_USERNAME_LEN*5+1];
char *cp;
memset(&salt, 0, sizeof(salt));
ciphertext += FORMAT_TAG_LEN;
cp = strchr(ciphertext, '$');
strncpy((char*)tmp, ciphertext, cp-ciphertext);
tmp[cp-ciphertext] = 0;
salt.userlen = enc_to_utf16_be(salt.user, MAX_USERNAME_LEN, tmp, cp-ciphertext);
if (salt.userlen < 0)
salt.userlen = strlen16(salt.user);
salt.userlen *= 2;
base64_convert(cp+1,e_b64_hex,64,salt.auth_sesskey,e_b64_raw,32,0,0);
cp = strchr(cp+1, '$');
base64_convert(cp+1,e_b64_hex,64,salt.auth_sesskey_c,e_b64_raw,32,0,0);
cp = strchr(cp+1, '$') + 1;
salt.auth_pass_len = strlen(cp)/2;
base64_convert(cp,e_b64_hex,salt.auth_pass_len*2,salt.auth_pass,e_b64_raw,salt.auth_pass_len,0,0);
return &salt;
}
// Public domain hash function by DJ Bernstein (salt is a username)
static int salt_hash(void *salt)
{
UTF16 *s = ((UTF16*)salt) + 1;
unsigned int hash = 5381;
while (*s)
hash = ((hash << 5) + hash) ^ *s++;
return hash & (SALT_HASH_SIZE - 1);
}
static int cmp_all(void *binary, int count)
{
return any_cracked;
}
static int cmp_one(void *binary, int count)
{
return cracked[count];
}
static int cmp_exact(char *source, int index)
{
return 1;
}
struct fmt_main fmt_o10glogon = {
{
FORMAT_LABEL,
FORMAT_NAME,
ALGORITHM_NAME,
BENCHMARK_COMMENT,
BENCHMARK_LENGTH,
0,
PLAINTEXT_LENGTH,
BINARY_SIZE,
BINARY_ALIGN,
SALT_SIZE,
SALT_ALIGN,
MIN_KEYS_PER_CRYPT,
MAX_KEYS_PER_CRYPT,
FMT_8_BIT | FMT_UNICODE | FMT_UTF8 | FMT_SPLIT_UNIFIES_CASE | FMT_CASE | FMT_OMP,
{ NULL },
{ FORMAT_TAG },
tests
}, {
init,
done,
fmt_default_reset,
fmt_default_prepare,
valid,
split,
fmt_default_binary,
get_salt,
{ NULL },
fmt_default_source,
{
fmt_default_binary_hash
},
salt_hash,
NULL,
set_salt,
oracle_set_key,
get_key,
fmt_default_clear_keys,
crypt_all,
{
fmt_default_get_hash
},
cmp_all,
cmp_one,
cmp_exact
}
};
#endif /* plugin stanza */
|
GB_unop__abs_fp32_fc32.c | //------------------------------------------------------------------------------
// GB_unop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2022, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated2/ folder, do not edit it
// (it is auto-generated from Generator/*).
#include "GB.h"
#ifndef GBCUDA_DEV
#include "GB_control.h"
#include "GB_atomics.h"
#include "GB_unop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB (_unop_apply__abs_fp32_fc32)
// op(A') function: GB (_unop_tran__abs_fp32_fc32)
// C type: float
// A type: GxB_FC32_t
// cast: GxB_FC32_t cij = (aij)
// unaryop: cij = cabsf (aij)
#define GB_ATYPE \
GxB_FC32_t
#define GB_CTYPE \
float
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
GxB_FC32_t aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = cabsf (x) ;
// casting
#define GB_CAST(z, aij) \
GxB_FC32_t z = (aij) ;
// cij = op (aij)
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
GxB_FC32_t aij = Ax [pA] ; \
/* Cx [pC] = op (cast (aij)) */ \
GxB_FC32_t z = (aij) ; \
Cx [pC] = cabsf (z) ; \
}
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_ABS || GxB_NO_FP32 || GxB_NO_FC32)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB (_unop_apply__abs_fp32_fc32)
(
float *Cx, // Cx and Ax may be aliased
const GxB_FC32_t *Ax,
const int8_t *restrict Ab, // A->b if A is bitmap
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
if (Ab == NULL)
{
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
GxB_FC32_t aij = Ax [p] ;
GxB_FC32_t z = (aij) ;
Cx [p] = cabsf (z) ;
}
}
else
{
// bitmap case, no transpose; A->b already memcpy'd into C->b
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!Ab [p]) continue ;
GxB_FC32_t aij = Ax [p] ;
GxB_FC32_t z = (aij) ;
Cx [p] = cabsf (z) ;
}
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB (_unop_tran__abs_fp32_fc32)
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
reduce.h |
#ifndef REDUCE_H
#define REDUCE_H
#include <dll.h>
//#include <string>
#include <helpers/sharedmem.h>
#include <stdio.h>
#include <helpers/shape.h>
#ifdef _OPENMP
#include <omp.h>
#endif
#include <templatemath.h>
#include <helper_cuda.h>
#include <nd4jmalloc.h>
#include <pairwise_util.h>
#include <ops/ops.h>
#include <ops/special_accumulation_ops.h>
#include <op_boilerplate.h>
#pragma once
#ifdef __CUDACC__
#include <cuda.h>
#include <cuda_runtime.h>
#endif
#ifndef _OPENMP
#define omp_get_thread_num() 0
#define omp_get_max_threads() 1
#endif
#include "legacy_ops.h"
//an op for the kernel
namespace functions {
namespace reduce {
/**
* A reduce function
* reduces a vector down to
* a subset of itself
* via aggregating member
* elements.
*/
template<typename T>
class ReduceFunction {
public:
#ifdef __CUDACC__
template<typename OpType>
static inline __device__ void transformCuda1D(T *dx,
int *xShapeInfo,
T *extraParams,
T *result,
int *resultShapeInfo,
int *dimension,
int dimensionLength,
T *reductionBuffer, UnifiedSharedMemory *manager, int *tadOnlyShapeInfo, Nd4jIndex *tadOffsets) {
if (OpType::requiresSpecialAccumulation) {
OpType::execSpecialCuda(dx, xShapeInfo, extraParams, result, resultShapeInfo, dimension, dimensionLength, reductionBuffer, manager, tadOnlyShapeInfo, tadOffsets);
return;
}
//shared memory space for storing intermediate results
__shared__ T *sPartials;// = (T *)manager->getSharedReductionBuffer();
__shared__ int tadLength;
__shared__ int tadEWS;
__shared__ int numTads;
if (threadIdx.x == 0) {
extern __shared__ unsigned char shmem[];
sPartials = (T *) shmem;
tadLength = shape::tadLength(xShapeInfo, dimension, dimensionLength);
tadEWS = shape::elementWiseStride(tadOnlyShapeInfo);
numTads = shape::length(xShapeInfo) / tadLength;
}
__syncthreads();
for (int r = blockIdx.x; r < numTads; r += gridDim.x) {
Nd4jIndex tadOffsetForBlock = tadOffsets[r];
T *rX = dx + tadOffsetForBlock;
sPartials[threadIdx.x] = OpType::startingValue(rX);
if (tadEWS >= 1) {
for (int i = threadIdx.x; i < tadLength; i += blockDim.x) {
sPartials[threadIdx.x] = OpType::update(sPartials[threadIdx.x], OpType::op(rX[i * tadEWS], extraParams), extraParams);
}
} else {
__shared__ int tadRank;
__shared__ int *tadShape;
__shared__ int *tadStride;
int xCoord[MAX_RANK];
if (threadIdx.x == 0) {
tadRank = shape::rank(tadOnlyShapeInfo);
tadShape = shape::shapeOf(tadOnlyShapeInfo);
tadStride = shape::stride(tadOnlyShapeInfo);
}
__syncthreads();
for (int i = threadIdx.x; i < tadLength; i += blockDim.x) {
shape::ind2subC(tadRank, tadShape, i, xCoord);
Nd4jIndex xOffset = shape::getOffset(tadOffsetForBlock, tadShape, tadStride, xCoord, tadRank);
sPartials[threadIdx.x] = OpType::update(sPartials[threadIdx.x], OpType::op(dx[xOffset], extraParams), extraParams);
}
}
__syncthreads();
// aggregate. do NOT reduce for elements > tadLength
aggregatePartials<OpType>(sPartials, threadIdx.x, nd4j::math::nd4j_min<int>(blockDim.x, tadLength), extraParams);
__syncthreads();
if (threadIdx.x == 0) {
result[r] = OpType::postProcess(sPartials[threadIdx.x], tadLength, extraParams);
}
}
}
template<typename OpType>
static inline __device__ void execScalarCuda(
T *dx,
int *xShapeInfo,
T *extraParams,
T *result,
int *resultShapeInfo,
T *reductionBuffer,
UnifiedSharedMemory *manager,
int *tadOnlyShapeInfo) {
int elementWiseStride = shape::elementWiseStride(xShapeInfo);
int n = shape::length(xShapeInfo);
int tid = blockDim.x * blockIdx.x + threadIdx.x;
//shared memory space for storing intermediate results
T *sPartials = (T *)manager->getSharedReductionBuffer();
sPartials[threadIdx.x] = OpType::startingValue(dx);
if (elementWiseStride >= 1) {
for (int i = tid; i < n; i += (blockDim.x * gridDim.x)) {
sPartials[threadIdx.x] = OpType::update(sPartials[threadIdx.x], OpType::op(dx[i * elementWiseStride], extraParams), extraParams);
}
}
else {
__shared__ int rank;
__shared__ int *xShape;
__shared__ int *xStride;
if (threadIdx.x == 0) {
rank = shape::rank(xShapeInfo);
xShape = shape::shapeOf(xShapeInfo);
xStride = shape::stride(xShapeInfo);
}
__syncthreads();
int ind2sub[MAX_RANK];
for (int i = tid; i < n; i += blockDim.x * gridDim.x) {
shape::ind2subC(rank, xShape, i, ind2sub);
Nd4jIndex offset = shape::getOffset(0, xShape, xStride, ind2sub, rank);
sPartials[threadIdx.x] = OpType::update(sPartials[threadIdx.x], OpType::op(dx[offset], extraParams), extraParams);
}
}
__syncthreads();
aggregatePartials<OpType>(sPartials, threadIdx.x, nd4j::math::nd4j_min<int>(blockDim.x, n), extraParams);
__syncthreads();
if (gridDim.x > 1) {
unsigned int *tc = (unsigned int *)reductionBuffer;
__shared__ bool amLast;
tid = threadIdx.x;
if (threadIdx.x == 0) {
reductionBuffer[blockIdx.x] = sPartials[0];//this->postProcess(sPartials[0],n,extraParams);
}
__threadfence();
__syncthreads();
if (threadIdx.x == 0) {
unsigned int ticket = atomicInc(&tc[16384], gridDim.x);
amLast = (ticket == gridDim.x - 1);
}
__syncthreads();
if (amLast) {
tc[16384] = 0;
sPartials[threadIdx.x] = OpType::startingValue(dx);
for (int i = threadIdx.x; i < gridDim.x; i += blockDim.x) {
sPartials[threadIdx.x] = OpType::update(sPartials[threadIdx.x], reductionBuffer[i], extraParams);
}
__syncthreads();
aggregatePartials<OpType>(sPartials, threadIdx.x, nd4j::math::nd4j_min<int>(gridDim.x, blockDim.x), extraParams);
__syncthreads();
if (threadIdx.x == 0) {
result[0] = OpType::postProcess(sPartials[0], n, extraParams);
}
}
}
else {
if (threadIdx.x == 0) {
unsigned int *tc = (unsigned *)reductionBuffer;
tc[16384] = 0;
result[0] = OpType::postProcess(sPartials[0], n, extraParams);
}
}
}
/**
* Kernel invocation for reduce
* @param n the length of the buffer
* @param dx the input
* @param xShapeInfo the shape information for the input
* @param extraParams extra parameters (starting value,..)
* @param result the result buffer
* @param resultShapeInfo the shapeinformation for the result buffer
* @param gpuInformation the gpu information (shared memory allocated,..)
* @param dimension the dimension to do reduce along long
* @param dimensionLength the length of the dimension buffer
* @param postProcessOrNot whether to reduce or not
*/
template<typename OpType>
static inline __device__ void transformCuda3D(
T *dx,
int *xShapeInfo,
T *extraParams,
T *result,
int *resultShapeInfo,
int *dimension,
int dimensionLength,
T *reductionBuffer, UnifiedSharedMemory *manager, int *tadOnlyShapeInfo, Nd4jIndex *tadOffsets) {
if (OpType::requiresSpecialAccumulation) {
OpType::execSpecialCuda(dx, xShapeInfo, extraParams, result, resultShapeInfo, dimension, dimensionLength, reductionBuffer, manager, tadOnlyShapeInfo, tadOffsets);
return;
}
//shared memory space for storing intermediate results
__shared__ T *sPartials; // = (T *)manager->getSharedReductionBuffer();
__shared__ int tadLength;
__shared__ int tadRank;
__shared__ int numTads;
__shared__ int *tadShape;
__shared__ int *tadStride;
if (threadIdx.x == 0) {
extern __shared__ unsigned char shmem[];
sPartials = (T *) shmem;
tadLength = shape::tadLength(xShapeInfo, dimension, dimensionLength);
tadRank = shape::rank(tadOnlyShapeInfo);
numTads = shape::length(xShapeInfo) / tadLength;
tadShape = shape::shapeOf(tadOnlyShapeInfo);
tadStride = shape::stride(tadOnlyShapeInfo);
}
__syncthreads();
int xCoord[3];
for (int r = blockIdx.x; r < numTads; r += gridDim.x) {
Nd4jIndex tadOffsetForBlock = tadOffsets[r];
sPartials[threadIdx.x] = OpType::startingValue(dx + tadOffsetForBlock);
for (int i = threadIdx.x; i < tadLength; i += blockDim.x) {
shape::ind2subC(tadRank, tadShape, i, xCoord);
Nd4jIndex xOffset = shape::getOffset(tadOffsetForBlock, tadShape, tadStride, xCoord, tadRank);
sPartials[threadIdx.x] = OpType::update(sPartials[threadIdx.x], OpType::op(dx[xOffset], extraParams), extraParams);
}
__syncthreads();
// aggregate. do NOT reduce for elements > tadLength
aggregatePartials<OpType>(sPartials, threadIdx.x, nd4j::math::nd4j_min<int>(blockDim.x, tadLength), extraParams);
__syncthreads();
if (threadIdx.x == 0)
result[r] = OpType::postProcess(sPartials[threadIdx.x], tadLength, extraParams);
}
}
template<typename OpType>
static inline __device__ void transformCudaXD(
T *dx,
int *xShapeInfo,
T *extraParams,
T *result,
int *resultShapeInfo,
int *dimension,
int dimensionLength,
T *reductionBuffer,
UnifiedSharedMemory *manager,
int *tadOnlyShapeInfo,
Nd4jIndex *tadOffsets) {
if (OpType::requiresSpecialAccumulation) {
OpType::execSpecialCuda(dx, xShapeInfo, extraParams, result, resultShapeInfo, dimension, dimensionLength, reductionBuffer, manager, tadOnlyShapeInfo, tadOffsets);
return;
}
//shared memory space for storing intermediate results
__shared__ T *sPartials;
// __shared__ shape::TAD *tad;
__shared__ int tadLength;
__shared__ int tadRank;
__shared__ int numTads;
__shared__ int *tadShape;
__shared__ int *tadStride;
if (threadIdx.x == 0) {
extern __shared__ unsigned char shmem[];
sPartials = (T *) shmem;
tadLength = shape::tadLength(xShapeInfo, dimension, dimensionLength);
tadRank = shape::rank(tadOnlyShapeInfo);
numTads = shape::length(xShapeInfo) / tadLength;
tadShape = shape::shapeOf(tadOnlyShapeInfo);
tadStride = shape::stride(tadOnlyShapeInfo);
}
__syncthreads();
int xCoord[MAX_RANK];
for (int r = blockIdx.x; r < numTads; r += gridDim.x) {
Nd4jIndex tadOffsetForBlock = tadOffsets[r];
sPartials[threadIdx.x] = OpType::startingValue(dx + tadOffsetForBlock);
for (int i = threadIdx.x; i < tadLength; i += blockDim.x) {
shape::ind2subC(tadRank, tadShape, i, xCoord);
Nd4jIndex xOffset = shape::getOffset(tadOffsetForBlock, tadShape, tadStride, xCoord, tadRank);
sPartials[threadIdx.x] = OpType::update(sPartials[threadIdx.x], OpType::op(dx[xOffset], extraParams), extraParams);
}
__syncthreads();
// aggregate. do NOT reduce for elements > tadLength
aggregatePartials<OpType>(sPartials, threadIdx.x, nd4j::math::nd4j_min<int>(blockDim.x, tadLength), extraParams);
__syncthreads();
if (threadIdx.x == 0)
result[r] = OpType::postProcess(sPartials[threadIdx.x], tadLength, extraParams);
}
}
/**
*
* @param sPartialsRef
* @param tid
* @param extraParams
*/
template<typename OpType>
__device__ static inline void aggregatePartials(T *sPartials, int tid, int numItems, T *extraParams) {
// start the shared memory loop on the next power of 2 less
// than the block size. If block size is not a power of 2,
// accumulate the intermediate sums in the remainder range.
int floorPow2 = numItems;
if (floorPow2 & (floorPow2 - 1)) {
while (floorPow2 & (floorPow2 - 1)) {
floorPow2 &= floorPow2 - 1;
}
if (tid >= floorPow2) {
sPartials[tid - floorPow2] = OpType::update(sPartials[tid - floorPow2], sPartials[tid], extraParams);
}
__syncthreads();
}
for (int activeThreads = floorPow2 >> 1; activeThreads; activeThreads >>= 1) {
if (tid < activeThreads && tid + activeThreads < numItems) {
sPartials[tid] = OpType::update(sPartials[tid], sPartials[tid + activeThreads], extraParams);
}
__syncthreads();
}
}
#endif
/**
* Reduce down to 1 number
* @param x the input
* @param xShapeInfo the shape information
* for the input
* @param extraParams the extra params
* @return
*/
template<typename OpType>
static _CUDA_H T execScalar(T *x, int *xShapeInfo, T *extraParams) {
const Nd4jIndex length = shape::length(xShapeInfo);
int xElementWiseStride = shape::elementWiseStride(xShapeInfo);
if (xElementWiseStride >= 1) {
return execScalar<OpType>(x, xElementWiseStride, length, extraParams);
}
else {
int shapeIter[MAX_RANK];
int coord[MAX_RANK];
int dim;
int xStridesIter[MAX_RANK];
int *xShape = shape::shapeOf(xShapeInfo);
int *xStride = shape::stride(xShapeInfo);
T start = OpType::startingValue(x);
int rank = shape::rank(xShapeInfo);
if (PrepareOneRawArrayIter<T>(rank,
xShape,
x,
xStride,
&rank,
shapeIter,
&x,
xStridesIter) >= 0) {
ND4J_RAW_ITER_START(dim, rank, coord, shapeIter); {
/* Process the innermost dimension */
const T *xIter = x;
start = OpType::update(start, OpType::op(xIter[0], extraParams), extraParams);
}
ND4J_RAW_ITER_ONE_NEXT(dim,
rank,
coord,
shapeIter,
x,
xStridesIter);
start = OpType::postProcess(start, shape::length(xShapeInfo), extraParams);
}
else {
printf("Unable to prepare array\n");
}
return start;
}
}
static T execScalar(const int opNum, T *x, int *xShapeInfo, T *extraParams) {
RETURNING_DISPATCH_BY_OPNUM(execScalar, PARAMS(x, xShapeInfo, extraParams), REDUCE_OPS);
}
static void exec(const int opNum,
T *x,
int *xShapeInfo,
T *extraParams,
T *result,
int *resultShapeInfoBuffer,
int *dimension,
int dimensionLength,
int *tadShapeInfo,
Nd4jIndex *tadOffset) {
DISPATCH_BY_OPNUM(exec, PARAMS(x,
xShapeInfo,
extraParams,
result,
resultShapeInfoBuffer,
dimension,
dimensionLength,
tadShapeInfo,
tadOffset),
REDUCE_OPS);
}
/**
* Execute on the cpu
* @param x the input data
* @param xShapeInfo the shape information for x
* @param extraParams the extra parameters
* @param result the result buffer
* @param resultShapeInfoBuffer the shape information
* @param dimension the dimension to perform
* the reduce along long
* @param dimensionLength the length of the dimension buffer
*/
template<typename OpType>
static void _CUDA_H exec(T *x,
int *xShapeInfo,
T *extraParams,
T *result,
int *resultShapeInfoBuffer,
int *dimension,
int dimensionLength,
int *tadShapeInfo,
Nd4jIndex *tadOffset) {
int resultLength = shape::length(resultShapeInfoBuffer);
//pre squeezed: this is for keeping the pointer to the original
//shape information for tad offset
//the squeezed information doesn't render the right strides for
//tad offset
// || tad.wholeThing
if (resultLength == 1 || dimension == nullptr || dimensionLength == shape::rank(xShapeInfo)) {
result[0] = execScalar<OpType>(x, xShapeInfo, extraParams);
return;
}
if (OpType::requiresSpecialAccumulation) {
OpType::execSpecial(x, xShapeInfo, extraParams, result, resultShapeInfoBuffer, dimension, dimensionLength, tadShapeInfo, tadOffset);
return;
}
int *tadOnlyShapeInfo = tadShapeInfo;
Nd4jIndex *tadOffsets = tadOffset;
shape::TAD *tad = nullptr;
if (tadOnlyShapeInfo == nullptr || tadOffsets == nullptr) {
tad = new shape::TAD(xShapeInfo, dimension, dimensionLength);
tad->createTadOnlyShapeInfo();
tad->createOffsets();
if (tad->dimensionLength < 1) {
delete tad;
return;
}
tadOnlyShapeInfo = tad->tadOnlyShapeInfo;
tadOffsets = tad->tadOffsets;
}
const int tadLength = shape::tadLength(xShapeInfo, dimension, dimensionLength);
int numTads = shape::length(xShapeInfo) / tadLength;
int tadEWS = shape::elementWiseStride(tadOnlyShapeInfo);
int tadsPerThread = resultLength / TAD_THRESHOLD;
int num_threads = nd4j::math::nd4j_max<int>(1, tadsPerThread);
num_threads = nd4j::math::nd4j_min<int>(num_threads, omp_get_max_threads());
if (tadEWS > 0 && (numTads == 1 || shape::isVector(tadOnlyShapeInfo) || shape::isScalar(tadOnlyShapeInfo))) {
#pragma omp parallel for schedule(guided) num_threads(num_threads) if (num_threads > 1) proc_bind(AFFINITY) default(shared)
for (int i = 0; i < resultLength; i++) {
T *iter = x + tadOffsets[i];
T start = OpType::startingValue(iter);
if (tadEWS == 1) {
// FIXME: proper reduction should be used here
for (int j = 0; j < tadLength; j++) {
start = OpType::update(start, OpType::op(iter[j], extraParams), extraParams);
}
}
else {
// FIXME: proper reduction to be used here
for (int j = 0; j < tadLength; j++) {
start = OpType::update(start, OpType::op(iter[j * tadEWS], extraParams), extraParams);
}
}
result[i] = OpType::postProcess(start, tadLength, extraParams);
}
}
else {
int *tadShape = shape::shapeOf(tadOnlyShapeInfo);
int *tadStride = shape::stride(tadOnlyShapeInfo);
int tadRank = shape::rank(tadOnlyShapeInfo);
#pragma omp parallel for schedule(guided) num_threads(num_threads) if (num_threads > 1) proc_bind(AFFINITY) default(shared)
for (int i = 0; i < resultLength; i++) {
Nd4jIndex offset = tadOffsets[i];
int xCoord[MAX_RANK];
T start = OpType::startingValue(x + offset);
for (int j = 0; j < tadLength; j++) {
shape::ind2subC(tadRank, tadShape, j, xCoord);
Nd4jIndex xOffset = shape::getOffset(offset, tadShape, tadStride, xCoord, tadRank);
start = OpType::update(start, OpType::op(x[xOffset], extraParams), extraParams);
}
result[i] = OpType::postProcess(start, tadLength, extraParams);;
}
}
if (tad != nullptr)
delete tad;
}
/**
* CPU implementation
* @param x the input data
* @param xShapeInfo the shape information for
* the input data
* @param extraParams the extra parameters for the problem
* @param result the result buffer
* @param resultShapeInfo the shape information
*/
template<typename OpType>
static void _CUDA_H exec(T *x,
int *xShapeInfo,
T *extraParams,
T *result,
int *resultShapeInfo) {
return execScalar<OpType>(x, xShapeInfo, extraParams);
}
/**
* Reduce down to 1 number
* @param x the input
* @param xShapeInfo the shape information
* for the input
* @param extraParams the extra params
* @return
*/
template<typename OpType>
static T _CUDA_H execScalar(const T *x, int xElementWiseStride, Nd4jIndex length, T *extraParams) {
T startingVal = OpType::startingValue(x);
if (xElementWiseStride == 1) {
if (length < ELEMENT_THRESHOLD) {
T local = OpType::startingValue(x);
// FIXME: proper reduction to be used here
for (Nd4jIndex i = 0; i < length; i++) {
T curr = OpType::op(x[i], extraParams);
local = OpType::update(local, curr, extraParams);
}
local = OpType::postProcess(local, length, extraParams);
return local;
}
else {
T finalVal = startingVal;
BlockInformation info(length, ELEMENT_THRESHOLD);
T *blocks = new T[info.threads];
#pragma omp parallel num_threads(info.threads) if (info.threads > 1) proc_bind(AFFINITY) default(shared)
{
T local = OpType::startingValue(x);
for (int i = omp_get_thread_num(); i < info.chunks; i += info.threads) {
Nd4jIndex newOffset = (i * info.items);
const T *chunk = x + newOffset;
Nd4jIndex itemsToLoop = info.items;
if (i * info.items >= length) {
break;
}
//handle modulo case
if (newOffset + info.items >= length) {
itemsToLoop = length - newOffset;
}
// FIXME: proper reduction should be used here
for (Nd4jIndex j = 0; j < itemsToLoop && i * info.items + j < length; j++) {
T curr = OpType::op(chunk[j], extraParams);
local = OpType::update(local, curr, extraParams);
}
}
blocks[omp_get_thread_num()] = local;
}
// FIXME: proper reduction should be used here
for (int i = 0; i < info.threads; i++) {
finalVal = OpType::update(finalVal, blocks[i], extraParams);
}
finalVal = OpType::postProcess(finalVal, length, extraParams);
delete[] blocks;
return finalVal;
}
}
else {
if (length < ELEMENT_THRESHOLD) {
T local = OpType::startingValue(x);
// FIXME: proper reduction should be used here
for (Nd4jIndex i = 0; i < length; i++) {
T curr = OpType::op(x[i * xElementWiseStride], extraParams);
local = OpType::update(local, curr, extraParams);
}
local = OpType::postProcess(local, length, extraParams);
return local;
}
T finalVal = startingVal;
BlockInformation info(length, ELEMENT_THRESHOLD);
T *blocks = new T[info.threads];
#pragma omp parallel num_threads(info.threads) if (info.threads > 1) proc_bind(AFFINITY) default(shared)
{
T local = OpType::startingValue(x);
for (int i = omp_get_thread_num(); i < info.chunks; i += info.threads) {
Nd4jIndex newOffset = (i * info.items) * xElementWiseStride;
const T *chunk = x + newOffset;
Nd4jIndex itemsToLoop = info.items;
if (i * info.items >= length)
break;
// FIXME: proper reduction should be used here
for (Nd4jIndex j = 0; j < itemsToLoop && i * info.items + j < length; j++) {
T curr = OpType::op(chunk[j * xElementWiseStride], extraParams);
local = OpType::update(local, curr, extraParams);
}
}
blocks[omp_get_thread_num()] = local;
}
// FIXME: proper reduction should be used here
for (int i = 0; i < info.threads; i++) {
finalVal = OpType::update(finalVal, blocks[i], extraParams);
}
finalVal = OpType::postProcess(finalVal, length, extraParams);
delete[] blocks;
return finalVal;
}
}
};
#ifdef __CUDACC__
/**
*
* @param extraParams
* @param sPartials
* @param sMemSize
*/
template<typename T>
__device__ void initializeShared(T *extraParams, T **sPartials, int sMemSize) {
int sPartialsLength = sMemSize / sizeof(T);
T *sPartialsDeref = (T *) *sPartials;
for (int i = 0; i < sPartialsLength; i++) {
sPartialsDeref[i] = extraParams[0];
}
}
#endif
}
}
#ifdef __CUDACC__
/**
* Interface for the c and driver api
* @param op the operation number
* @param n the length of the problem
* @param dx the input information
* @param xShapeInfo the shape information
* @param extraParams the extra parameters
* @param result the result data
* @param resultShapeInfo the result shape information
* @param gpuInformation the gpu information
* @param dimension the dimension to do reduce along long
* @param dimensionLength the length of the dimension buffer
* @param postProcessOrNot whether to pre process or not
*/
template <typename T, typename OpClass>
__device__ void reduceSimpleGeneric(
T *dx,
int *xShapeInfo,
T *extraParams,
T *result,
int *resultShapeInfo,
int *dimension,
int dimensionLength,
T *reductionBuffer, int *tadOnlyShapeInfo, Nd4jIndex *tadOffsets) {
__shared__ UnifiedSharedMemory *manager;
if (threadIdx.x == 0) {
extern __shared__ unsigned char shmem[];
manager = new(shmem) UnifiedSharedMemory((int *) shmem);
manager->init(sizeof(UnifiedSharedMemory), 0, sizeof(functions::reduce::ReduceFunction<T>), sizeof(shape::TAD), shape::rank(xShapeInfo));
}
__syncthreads();
functions::reduce::ReduceFunction<T>::template transformCudaXD<OpClass>(
dx,
xShapeInfo,
extraParams,
result,
resultShapeInfo,
dimension,
dimensionLength,
reductionBuffer,
manager,
tadOnlyShapeInfo,
tadOffsets);
}
template <typename T, typename OpClass>
__device__ void reduceSimpleGeneric1D(
T *dx,
int *xShapeInfo,
T *extraParams,
T *result,
int *resultShapeInfo,
int *dimension,
int dimensionLength,
T *reductionBuffer,
int *tadOnlyShapeInfo,
Nd4jIndex *tadOffsets) {
functions::reduce::ReduceFunction<T>::template transformCuda1D<OpClass>(
dx,
xShapeInfo,
extraParams,
result,
resultShapeInfo,
dimension,
dimensionLength,
reductionBuffer, nullptr, tadOnlyShapeInfo, tadOffsets);
}
template <typename T, typename OpClass>
__device__ void reduceSimpleGeneric3D(
T *dx,
int *xShapeInfo,
T *extraParams,
T *result,
int *resultShapeInfo,
int *dimension,
int dimensionLength,
T *reductionBuffer,
int *tadOnlyShapeInfo,
Nd4jIndex *tadOffsets) {
functions::reduce::ReduceFunction<T>::template transformCuda3D<OpClass>(
dx,
xShapeInfo,
extraParams,
result,
resultShapeInfo,
dimension,
dimensionLength,
reductionBuffer,
nullptr,
tadOnlyShapeInfo,
tadOffsets);
}
template <typename T, typename OpClass>
__device__ void reduceScalarGeneric(
T *dx,
int *xShapeInfo,
T *extraParams,
T *result,
int *resultShapeInfo,
int *dimension,
int dimensionLength,
T *reductionBuffer, int *tadOnlyShapeInfo) {
__shared__ UnifiedSharedMemory *manager;
if (threadIdx.x == 0) {
extern __shared__ unsigned char shmem[];
manager = new(shmem) UnifiedSharedMemory((int *) shmem);
manager->init(sizeof(UnifiedSharedMemory), 0, sizeof(functions::reduce::ReduceFunction<T>), sizeof(shape::TAD), 0);
}
__syncthreads();
functions::reduce::ReduceFunction<T>::template execScalarCuda<OpClass>(
dx,
xShapeInfo,
extraParams,
result,
resultShapeInfo,
reductionBuffer,
manager,
tadOnlyShapeInfo);
};
/*
*/
// reduceScalar
DISPATCH_KERNEL_SIMPLE(reduceScalarSimple_, reduceScalarGeneric, float, INPUT(float *x, int *xShapeInfo, float *extraParams, float *z, int *zShapeInfo, int *dimension, int dimensionLength, float *reductionBuffer, int *tadOnlyShapeInfo), PARAMS(x, xShapeInfo, extraParams, z, zShapeInfo, dimension, dimensionLength, reductionBuffer, tadOnlyShapeInfo), OPS_A(REDUCE_OPS))
DISPATCH_KERNEL_SIMPLE(reduceScalarSimple_, reduceScalarGeneric, double, INPUT(double *x, int *xShapeInfo, double *extraParams, double *z, int *zShapeInfo, int *dimension, int dimensionLength, double *reductionBuffer, int *tadOnlyShapeInfo), PARAMS(x, xShapeInfo, extraParams, z, zShapeInfo, dimension, dimensionLength, reductionBuffer, tadOnlyShapeInfo), OPS_A(REDUCE_OPS))
DISPATCH_KERNEL_SIMPLE(reduceScalarSimple_, reduceScalarGeneric, float16, INPUT(float16 *x, int *xShapeInfo, float16 *extraParams, float16 *z, int *zShapeInfo, int *dimension, int dimensionLength, float16 *reductionBuffer, int *tadOnlyShapeInfo), PARAMS(x, xShapeInfo, extraParams, z, zShapeInfo, dimension, dimensionLength, reductionBuffer, tadOnlyShapeInfo), OPS_A(REDUCE_OPS))
// reduce1D
DISPATCH_KERNEL_SIMPLE(reduceSimpleGeneric1D_, reduceSimpleGeneric1D, float, INPUT(float *x, int *xShape, float *extraParams, float *z, int *zShape, int *dimension, int dimensionLength, float *reductionPointer, int *tadShapeInfo, Nd4jIndex *tadOffsets), PARAMS(x, xShape, extraParams, z, zShape, dimension, dimensionLength, reductionPointer, tadShapeInfo, tadOffsets), OPS_A(REDUCE_OPS))
DISPATCH_KERNEL_SIMPLE(reduceSimpleGeneric1D_, reduceSimpleGeneric1D, double, INPUT(double *x, int *xShape, double *extraParams, double *z, int *zShape, int *dimension, int dimensionLength, double *reductionPointer, int *tadShapeInfo, Nd4jIndex *tadOffsets), PARAMS(x, xShape, extraParams, z, zShape, dimension, dimensionLength, reductionPointer, tadShapeInfo, tadOffsets), OPS_A(REDUCE_OPS))
DISPATCH_KERNEL_SIMPLE(reduceSimpleGeneric1D_, reduceSimpleGeneric1D, float16, INPUT(float16 *x, int *xShape, float16 *extraParams, float16 *z, int *zShape, int *dimension, int dimensionLength, float16 *reductionPointer, int *tadShapeInfo, Nd4jIndex *tadOffsets), PARAMS(x, xShape, extraParams, z, zShape, dimension, dimensionLength, reductionPointer, tadShapeInfo, tadOffsets), OPS_A(REDUCE_OPS))
// reduce3D
DISPATCH_KERNEL_SIMPLE(reduceSimpleGeneric3D_, reduceSimpleGeneric3D, float, INPUT(float *x, int *xShape, float *extraParams, float *z, int *zShape, int *dimension, int dimensionLength, float *reductionPointer, int *tadShapeInfo, Nd4jIndex *tadOffsets), PARAMS(x, xShape, extraParams, z, zShape, dimension, dimensionLength, reductionPointer, tadShapeInfo, tadOffsets), OPS_A(REDUCE_OPS))
DISPATCH_KERNEL_SIMPLE(reduceSimpleGeneric3D_, reduceSimpleGeneric3D, double, INPUT(double *x, int *xShape, double *extraParams, double *z, int *zShape, int *dimension, int dimensionLength, double *reductionPointer, int *tadShapeInfo, Nd4jIndex *tadOffsets), PARAMS(x, xShape, extraParams, z, zShape, dimension, dimensionLength, reductionPointer, tadShapeInfo, tadOffsets), OPS_A(REDUCE_OPS))
DISPATCH_KERNEL_SIMPLE(reduceSimpleGeneric3D_, reduceSimpleGeneric3D, float16, INPUT(float16 *x, int *xShape, float16 *extraParams, float16 *z, int *zShape, int *dimension, int dimensionLength, float16 *reductionPointer, int *tadShapeInfo, Nd4jIndex *tadOffsets), PARAMS(x, xShape, extraParams, z, zShape, dimension, dimensionLength, reductionPointer, tadShapeInfo, tadOffsets), OPS_A(REDUCE_OPS))
// reduceXD
DISPATCH_KERNEL_SIMPLE(reduceSimpleGenericXD_, reduceSimpleGeneric, float, INPUT(float *x, int *xShape, float *extraParams, float *z, int *zShape, int *dimension, int dimensionLength, float *reductionPointer, int *tadShapeInfo, Nd4jIndex *tadOffsets), PARAMS(x, xShape, extraParams, z, zShape, dimension, dimensionLength, reductionPointer, tadShapeInfo, tadOffsets), OPS_A(REDUCE_OPS))
DISPATCH_KERNEL_SIMPLE(reduceSimpleGenericXD_, reduceSimpleGeneric, double, INPUT(double *x, int *xShape, double *extraParams, double *z, int *zShape, int *dimension, int dimensionLength, double *reductionPointer, int *tadShapeInfo, Nd4jIndex *tadOffsets), PARAMS(x, xShape, extraParams, z, zShape, dimension, dimensionLength, reductionPointer, tadShapeInfo, tadOffsets), OPS_A(REDUCE_OPS))
DISPATCH_KERNEL_SIMPLE(reduceSimpleGenericXD_, reduceSimpleGeneric, float16, INPUT(float16 *x, int *xShape, float16 *extraParams, float16 *z, int *zShape, int *dimension, int dimensionLength, float16 *reductionPointer, int *tadShapeInfo, Nd4jIndex *tadOffsets), PARAMS(x, xShape, extraParams, z, zShape, dimension, dimensionLength, reductionPointer, tadShapeInfo, tadOffsets), OPS_A(REDUCE_OPS))
#endif
#endif
|
GB_unop__identity_bool_fc32.c | //------------------------------------------------------------------------------
// GB_unop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2022, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated2/ folder, do not edit it
// (it is auto-generated from Generator/*).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_atomics.h"
#include "GB_unop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB (_unop_apply__identity_bool_fc32)
// op(A') function: GB (_unop_tran__identity_bool_fc32)
// C type: bool
// A type: GxB_FC32_t
// cast: bool cij = (crealf (aij) != 0) || (cimagf (aij) != 0)
// unaryop: cij = aij
#define GB_ATYPE \
GxB_FC32_t
#define GB_CTYPE \
bool
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
GxB_FC32_t aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = x ;
// casting
#define GB_CAST(z, aij) \
bool z = (crealf (aij) != 0) || (cimagf (aij) != 0) ;
// cij = op (aij)
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
GxB_FC32_t aij = Ax [pA] ; \
/* Cx [pC] = op (cast (aij)) */ \
bool z = (crealf (aij) != 0) || (cimagf (aij) != 0) ; \
Cx [pC] = z ; \
}
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_IDENTITY || GxB_NO_BOOL || GxB_NO_FC32)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB (_unop_apply__identity_bool_fc32)
(
bool *Cx, // Cx and Ax may be aliased
const GxB_FC32_t *Ax,
const int8_t *restrict Ab, // A->b if A is bitmap
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
if (Ab == NULL)
{
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
GxB_FC32_t aij = Ax [p] ;
bool z = (crealf (aij) != 0) || (cimagf (aij) != 0) ;
Cx [p] = z ;
}
}
else
{
// bitmap case, no transpose; A->b already memcpy'd into C->b
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!Ab [p]) continue ;
GxB_FC32_t aij = Ax [p] ;
bool z = (crealf (aij) != 0) || (cimagf (aij) != 0) ;
Cx [p] = z ;
}
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB (_unop_tran__identity_bool_fc32)
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
solver_cvodes.c | /**
* \file
* \brief The integration driver for the CVODE solver
*
* \author Nicholas Curtis
* \date 03/10/2015
*
*/
#include "header.h"
#include "solver.h"
/* CVODES INCLUDES */
#include "sundials/sundials_types.h"
#include "sundials/sundials_math.h"
#include "sundials/sundials_nvector.h"
#include "nvector/nvector_serial.h"
#include "cvodes/cvodes.h"
#include "cvodes/cvodes_lapack.h"
extern N_Vector *y_locals;
extern double* y_local_vectors;
extern void** integrators;
#ifdef GENERATE_DOCS
namespace cvode {
#endif
/**
* \brief Integration driver for the CPU integrators
* \param[in] NUM the number of IVPs to solve
* \param[in] t the current IVP time
* \param[in] t_end the time to integrate the IVP to
* \param[in] pr_global the pressure value for the IVPs
* \param[in, out] y_global the state vectors
*
* The integration driver for the CVODEs solver
*/
void intDriver (const int NUM, const double t, const double t_end,
const double *pr_global, double *y_global)
{
int tid;
double t_next;
#pragma omp parallel for shared(y_global, pr_global, integrators, y_locals) private(tid, t_next)
for (tid = 0; tid < NUM; ++tid) {
int index = omp_get_thread_num();
// local array with initial values
N_Vector fill = y_locals[index];
double pr_local = pr_global[tid];
// load local array with initial values from global array
double* y_local = NV_DATA_S(fill);
for (int i = 0; i < NSP; i++)
{
y_local[i] = y_global[tid + i * NUM];
}
//reinit this integrator for time t, w/ updated state
int flag = CVodeReInit(integrators[index], t, fill);
if (flag != CV_SUCCESS)
{
printf("Error reinitializing integrator for thread %d, code: %d\n", tid, flag);
exit(flag);
}
//set user data to Pr
flag = CVodeSetUserData(integrators[index], &pr_local);
if (flag != CV_SUCCESS)
{
printf("Error setting user data for thread %d, code: %d\n", tid, flag);
exit(flag);
}
//set end time
flag = CVodeSetStopTime(integrators[index], t_end);
if (flag != CV_SUCCESS)
{
printf("Error setting end time for thread %d, code: %d\n", tid, flag);
exit(flag);
}
// call integrator for one time step
flag = CVode(integrators[index], t_end, fill, &t_next, CV_NORMAL);
if ((flag != CV_SUCCESS && flag != CV_TSTOP_RETURN) || t_next != t_end)
{
printf("Error on integration step for thread %d, code %d\n", tid, flag);
exit(flag);
}
// update global array with integrated values
for (int i = 0; i < NSP; i++)
{
y_global[tid + i * NUM] = y_local[i];
}
} // end tid loop
} // end intDriver
#ifdef GENERATE_DOCS
}
#endif
|
valid.mob9.src.h | #pragma once
#include "ukr.h"
#include "omp.h"
#include "transpose.h"
#include "gen_ukr_A6B2gemm_1_1024_7_7_1024_3_3.h"
#include "gen_ukr_A1B2gemm_1_1024_7_7_1024_3_3.h"
void testrun(float* A ,float*B, float*C, float*oriB ){
int tid = omp_get_thread_num();
int Nx = 7;
int Ny = 7;
int Nh = 3;
long long Astrides[6] = {0,1,2,3,4,5};
int b1 = 0;
for (int fpck = (tid%1)*16; fpck < uNf; fpck+=1*16){
for(int cwh = (tid/1)*8; cwh < uNc*uNw*uNh/8*8; cwh+=8*1){
transpose8x8_avx(oriB+ (fpck+0)*uNc*uNw*uNh + cwh, B + fpck*uNc*uNw*uNh + cwh* 16 + 0, uNc*uNw*uNh, 16);
transpose8x8_avx(oriB+ (fpck+8)*uNc*uNw*uNh + cwh, B + fpck*uNc*uNw*uNh + cwh* 16 + 8, uNc*uNw*uNh, 16);
}
}
#pragma omp barrier// begin push button generated block
for(int c5=0;c5<1024+0;c5+=1024)
{
for(int xy5=0;xy5<49+0;xy5+=49)
{
for(int f5=0;f5<1024+0;f5+=1024)
{
for(int c4=c5;c4<min(1024, 1024+c5);c4+=320)
{
for(int xy4=xy5;xy4<min(49, 49+xy5);xy4+=49)
{
for(int f4=f5;f4<min(1024, 1024+f5);f4+=1024)
{
for(int c3=c4;c3<min(1024, 320+c4);c3+=Tc1)
{
for(int xy3=xy4;xy3<min(49, 49+xy4);xy3+=Txy3)
{
for(int f3=f4;f3<min(1024, 1024+f4);f3+=Tf2)
{
for(int xy2=xy3;xy2<min(49, Txy3+xy3);xy2+=6)
{
for(int f2=f3;f2<min(1024, Tf2+f3);f2+=16)
{
for(int c2=c3;c2<min(1024, Tc1+c3);c2+=Tc1)
{
for(int c1=c2;c1<min(1024, Tc1+c2);c1+=Tc1)
{
for(int xy1=xy2;xy1<min(49, 6+xy2);xy1+=6)
{
for(int f1=f2;f1<min(1024, 16+f2);f1+=16)
{
int ctile=min(Tc1, 1024-c1);
int x1=xy1/7;
int y1=xy1%7/1;
int c1_1=c1/1;
int c1_2=c1%1/1;
int kf1_1=f1/16;
int kf1_2=f1%16/1;
int of1_1=f1/1;
int of1_2=f1%1/1;
int offsetA=0+b1*82944+c1_1*81+1*x1*9+1*y1*1+c1_2*1;
int offsetB=0+kf1_1*147456+c1*144+0*48+0*16+kf1_2*1;
int offsetC=0+b1*50176+of1_1*49+x1*7+y1*1+of1_2*1;
if(7-y1>=6){
cnn_ukr_float_scatter_6x2v_cxycgemm(A+offsetA, B+offsetB, C+offsetC, ctile, Astrides);
}
else if(7*7-xy1>=6){
for(int sti=7-y1;sti<6;sti+=1)
{
Astrides[sti]+=2;
}
cnn_ukr_float_scatter_6x2v_cxycgemm(A+offsetA, B+offsetB, C+offsetC, ctile, Astrides);
for(int sti=7-y1;sti<6;sti+=1)
{
Astrides[sti]-=2;
}
}
else{
cnn_ukr_float_scatter_1x2v_cxycgemm(A+offsetA, B+offsetB, C+offsetC, ctile, Astrides);
}
}
}
}
}
}
}
}
}
}
}
}
}
}
}
}
// end push button generated block
} |
pomp_test.c | #include <stdio.h>
int main() {
int i;
#pragma pomp inst init
printf("+++ sequential1\n");
#pragma pomp inst begin(phase1)
#pragma omp parallel for
for (i=0; i<4; ++i) { printf("+++ pdo %d\n", i); }
#pragma pomp inst end(phase1)
printf("+++ sequential2\n");
#pragma omp parallelsections
{
#pragma omp section
printf("+++ psection 1\n");
#pragma omp section
printf("+++ psection 2\n");
}
printf("+++ sequential3\n");
return 0;
}
|
FillInLinearSystemImpl.h | // ----------------------------------------------------------------------------
// - Open3D: www.open3d.org -
// ----------------------------------------------------------------------------
// The MIT License (MIT)
//
// Copyright (c) 2018-2021 www.open3d.org
//
// Permission is hereby granted, free of charge, to any person obtaining a copy
// of this software and associated documentation files (the "Software"), to deal
// in the Software without restriction, including without limitation the rights
// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
// copies of the Software, and to permit persons to whom the Software is
// furnished to do so, subject to the following conditions:
//
// The above copyright notice and this permission notice shall be included in
// all copies or substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
// FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
// IN THE SOFTWARE.
// ----------------------------------------------------------------------------
#include "open3d/core/linalg/kernel/SVD3x3.h"
#include "open3d/t/geometry/kernel/GeometryIndexer.h"
#include "open3d/t/pipelines/kernel/FillInLinearSystem.h"
namespace open3d {
namespace t {
namespace pipelines {
namespace kernel {
#if defined(__CUDACC__)
void FillInRigidAlignmentTermCUDA
#else
void FillInRigidAlignmentTermCPU
#endif
(core::Tensor &AtA,
core::Tensor &Atb,
core::Tensor &residual,
const core::Tensor &Ti_ps,
const core::Tensor &Tj_qs,
const core::Tensor &Ri_normal_ps,
int i,
int j,
float threshold) {
core::Device device = AtA.GetDevice();
int64_t n = Ti_ps.GetLength();
if (Tj_qs.GetLength() != n || Ri_normal_ps.GetLength() != n) {
utility::LogError(
"Unable to setup linear system: input length mismatch.");
}
// First fill in a small 12 x 12 linear system
core::Tensor AtA_local =
core::Tensor::Zeros({12, 12}, core::Float32, device);
core::Tensor Atb_local = core::Tensor::Zeros({12}, core::Float32, device);
float *AtA_local_ptr = static_cast<float *>(AtA_local.GetDataPtr());
float *Atb_local_ptr = static_cast<float *>(Atb_local.GetDataPtr());
float *residual_ptr = static_cast<float *>(residual.GetDataPtr());
const float *Ti_ps_ptr = static_cast<const float *>(Ti_ps.GetDataPtr());
const float *Tj_qs_ptr = static_cast<const float *>(Tj_qs.GetDataPtr());
const float *Ri_normal_ps_ptr =
static_cast<const float *>(Ri_normal_ps.GetDataPtr());
#if defined(__CUDACC__)
namespace launcher = core::kernel::cuda_launcher;
#else
namespace launcher = core::kernel::cpu_launcher;
#endif
launcher::ParallelFor(n, [=] OPEN3D_DEVICE(int64_t workload_idx) {
const float *p_prime = Ti_ps_ptr + 3 * workload_idx;
const float *q_prime = Tj_qs_ptr + 3 * workload_idx;
const float *normal_p_prime = Ri_normal_ps_ptr + 3 * workload_idx;
float r = (p_prime[0] - q_prime[0]) * normal_p_prime[0] +
(p_prime[1] - q_prime[1]) * normal_p_prime[1] +
(p_prime[2] - q_prime[2]) * normal_p_prime[2];
if (abs(r) > threshold) return;
float J_ij[12];
J_ij[0] = -q_prime[2] * normal_p_prime[1] +
q_prime[1] * normal_p_prime[2];
J_ij[1] =
q_prime[2] * normal_p_prime[0] - q_prime[0] * normal_p_prime[2];
J_ij[2] = -q_prime[1] * normal_p_prime[0] +
q_prime[0] * normal_p_prime[1];
J_ij[3] = normal_p_prime[0];
J_ij[4] = normal_p_prime[1];
J_ij[5] = normal_p_prime[2];
for (int k = 0; k < 6; ++k) {
J_ij[k + 6] = -J_ij[k];
}
// Not optimized; Switch to reduction if necessary.
#if defined(BUILD_CUDA_MODULE) && defined(__CUDACC__)
for (int i_local = 0; i_local < 12; ++i_local) {
for (int j_local = 0; j_local < 12; ++j_local) {
atomicAdd(&AtA_local_ptr[i_local * 12 + j_local],
J_ij[i_local] * J_ij[j_local]);
}
atomicAdd(&Atb_local_ptr[i_local], J_ij[i_local] * r);
}
atomicAdd(residual_ptr, r * r);
#else
#pragma omp critical(FillInRigidAlignmentTermCPU)
{
for (int i_local = 0; i_local < 12; ++i_local) {
for (int j_local = 0; j_local < 12; ++j_local) {
AtA_local_ptr[i_local * 12 + j_local]
+= J_ij[i_local] * J_ij[j_local];
}
Atb_local_ptr[i_local] += J_ij[i_local] * r;
}
*residual_ptr += r * r;
}
#endif
});
// Then fill-in the large linear system
std::vector<int64_t> indices_vec(12);
for (int k = 0; k < 6; ++k) {
indices_vec[k] = i * 6 + k;
indices_vec[k + 6] = j * 6 + k;
}
std::vector<int64_t> indices_i_vec;
std::vector<int64_t> indices_j_vec;
for (int local_i = 0; local_i < 12; ++local_i) {
for (int local_j = 0; local_j < 12; ++local_j) {
indices_i_vec.push_back(indices_vec[local_i]);
indices_j_vec.push_back(indices_vec[local_j]);
}
}
core::Tensor indices(indices_vec, {12}, core::Int64, device);
core::Tensor indices_i(indices_i_vec, {12 * 12}, core::Int64, device);
core::Tensor indices_j(indices_j_vec, {12 * 12}, core::Int64, device);
core::Tensor AtA_sub = AtA.IndexGet({indices_i, indices_j});
AtA.IndexSet({indices_i, indices_j}, AtA_sub + AtA_local.View({12 * 12}));
core::Tensor Atb_sub = Atb.IndexGet({indices});
Atb.IndexSet({indices}, Atb_sub + Atb_local.View({12, 1}));
}
#if defined(__CUDACC__)
void FillInSLACAlignmentTermCUDA
#else
void FillInSLACAlignmentTermCPU
#endif
(core::Tensor &AtA,
core::Tensor &Atb,
core::Tensor &residual,
const core::Tensor &Ti_Cps,
const core::Tensor &Tj_Cqs,
const core::Tensor &Cnormal_ps,
const core::Tensor &Ri_Cnormal_ps,
const core::Tensor &RjT_Ri_Cnormal_ps,
const core::Tensor &cgrid_idx_ps,
const core::Tensor &cgrid_idx_qs,
const core::Tensor &cgrid_ratio_qs,
const core::Tensor &cgrid_ratio_ps,
int i,
int j,
int n_frags,
float threshold) {
int64_t n = Ti_Cps.GetLength();
if (Tj_Cqs.GetLength() != n || Cnormal_ps.GetLength() != n ||
Ri_Cnormal_ps.GetLength() != n || RjT_Ri_Cnormal_ps.GetLength() != n ||
cgrid_idx_ps.GetLength() != n || cgrid_ratio_ps.GetLength() != n ||
cgrid_idx_qs.GetLength() != n || cgrid_ratio_qs.GetLength() != n) {
utility::LogError(
"Unable to setup linear system: input length mismatch.");
}
int n_vars = Atb.GetLength();
float *AtA_ptr = static_cast<float *>(AtA.GetDataPtr());
float *Atb_ptr = static_cast<float *>(Atb.GetDataPtr());
float *residual_ptr = static_cast<float *>(residual.GetDataPtr());
// Geometric properties
const float *Ti_Cps_ptr = static_cast<const float *>(Ti_Cps.GetDataPtr());
const float *Tj_Cqs_ptr = static_cast<const float *>(Tj_Cqs.GetDataPtr());
const float *Cnormal_ps_ptr =
static_cast<const float *>(Cnormal_ps.GetDataPtr());
const float *Ri_Cnormal_ps_ptr =
static_cast<const float *>(Ri_Cnormal_ps.GetDataPtr());
const float *RjT_Ri_Cnormal_ps_ptr =
static_cast<const float *>(RjT_Ri_Cnormal_ps.GetDataPtr());
// Association properties
const int *cgrid_idx_ps_ptr =
static_cast<const int *>(cgrid_idx_ps.GetDataPtr());
const int *cgrid_idx_qs_ptr =
static_cast<const int *>(cgrid_idx_qs.GetDataPtr());
const float *cgrid_ratio_ps_ptr =
static_cast<const float *>(cgrid_ratio_ps.GetDataPtr());
const float *cgrid_ratio_qs_ptr =
static_cast<const float *>(cgrid_ratio_qs.GetDataPtr());
#if defined(__CUDACC__)
namespace launcher = core::kernel::cuda_launcher;
#else
namespace launcher = core::kernel::cpu_launcher;
#endif
launcher::ParallelFor(n, [=] OPEN3D_DEVICE(int64_t workload_idx) {
const float *Ti_Cp = Ti_Cps_ptr + 3 * workload_idx;
const float *Tj_Cq = Tj_Cqs_ptr + 3 * workload_idx;
const float *Cnormal_p = Cnormal_ps_ptr + 3 * workload_idx;
const float *Ri_Cnormal_p = Ri_Cnormal_ps_ptr + 3 * workload_idx;
const float *RjTRi_Cnormal_p = RjT_Ri_Cnormal_ps_ptr + 3 * workload_idx;
const int *cgrid_idx_p = cgrid_idx_ps_ptr + 8 * workload_idx;
const int *cgrid_idx_q = cgrid_idx_qs_ptr + 8 * workload_idx;
const float *cgrid_ratio_p = cgrid_ratio_ps_ptr + 8 * workload_idx;
const float *cgrid_ratio_q = cgrid_ratio_qs_ptr + 8 * workload_idx;
float r = (Ti_Cp[0] - Tj_Cq[0]) * Ri_Cnormal_p[0] +
(Ti_Cp[1] - Tj_Cq[1]) * Ri_Cnormal_p[1] +
(Ti_Cp[2] - Tj_Cq[2]) * Ri_Cnormal_p[2];
if (abs(r) > threshold) return;
// Now we fill in a 60 x 60 sub-matrix: 2 x (6 + 8 x 3)
float J[60];
int idx[60];
// Jacobian w.r.t. Ti: 0-6
J[0] = -Tj_Cq[2] * Ri_Cnormal_p[1] + Tj_Cq[1] * Ri_Cnormal_p[2];
J[1] = Tj_Cq[2] * Ri_Cnormal_p[0] - Tj_Cq[0] * Ri_Cnormal_p[2];
J[2] = -Tj_Cq[1] * Ri_Cnormal_p[0] + Tj_Cq[0] * Ri_Cnormal_p[1];
J[3] = Ri_Cnormal_p[0];
J[4] = Ri_Cnormal_p[1];
J[5] = Ri_Cnormal_p[2];
// Jacobian w.r.t. Tj: 6-12
for (int k = 0; k < 6; ++k) {
J[k + 6] = -J[k];
idx[k + 0] = 6 * i + k;
idx[k + 6] = 6 * j + k;
}
// Jacobian w.r.t. C over p: 12-36
for (int k = 0; k < 8; ++k) {
J[12 + k * 3 + 0] = cgrid_ratio_p[k] * Cnormal_p[0];
J[12 + k * 3 + 1] = cgrid_ratio_p[k] * Cnormal_p[1];
J[12 + k * 3 + 2] = cgrid_ratio_p[k] * Cnormal_p[2];
idx[12 + k * 3 + 0] = 6 * n_frags + cgrid_idx_p[k] * 3 + 0;
idx[12 + k * 3 + 1] = 6 * n_frags + cgrid_idx_p[k] * 3 + 1;
idx[12 + k * 3 + 2] = 6 * n_frags + cgrid_idx_p[k] * 3 + 2;
}
// Jacobian w.r.t. C over q: 36-60
for (int k = 0; k < 8; ++k) {
J[36 + k * 3 + 0] = -cgrid_ratio_q[k] * RjTRi_Cnormal_p[0];
J[36 + k * 3 + 1] = -cgrid_ratio_q[k] * RjTRi_Cnormal_p[1];
J[36 + k * 3 + 2] = -cgrid_ratio_q[k] * RjTRi_Cnormal_p[2];
idx[36 + k * 3 + 0] = 6 * n_frags + cgrid_idx_q[k] * 3 + 0;
idx[36 + k * 3 + 1] = 6 * n_frags + cgrid_idx_q[k] * 3 + 1;
idx[36 + k * 3 + 2] = 6 * n_frags + cgrid_idx_q[k] * 3 + 2;
}
// Not optimized; Switch to reduction if necessary.
#if defined(__CUDACC__)
for (int ki = 0; ki < 60; ++ki) {
for (int kj = 0; kj < 60; ++kj) {
float AtA_ij = J[ki] * J[kj];
int ij = idx[ki] * n_vars + idx[kj];
atomicAdd(AtA_ptr + ij, AtA_ij);
}
float Atb_i = J[ki] * r;
atomicAdd(Atb_ptr + idx[ki], Atb_i);
}
atomicAdd(residual_ptr, r * r);
#else
#pragma omp critical(FillInSLACAlignmentTermCPU)
{
for (int ki = 0; ki < 60; ++ki) {
for (int kj = 0; kj < 60; ++kj) {
AtA_ptr[idx[ki] * n_vars + idx[kj]]
+= J[ki] * J[kj];
}
Atb_ptr[idx[ki]] += J[ki] * r;
}
*residual_ptr += r * r;
}
#endif
});
}
#if defined(__CUDACC__)
void FillInSLACRegularizerTermCUDA
#else
void FillInSLACRegularizerTermCPU
#endif
(core::Tensor &AtA,
core::Tensor &Atb,
core::Tensor &residual,
const core::Tensor &grid_idx,
const core::Tensor &grid_nbs_idx,
const core::Tensor &grid_nbs_mask,
const core::Tensor &positions_init,
const core::Tensor &positions_curr,
float weight,
int n_frags,
int anchor_idx) {
int64_t n = grid_idx.GetLength();
int64_t n_vars = Atb.GetLength();
float *AtA_ptr = static_cast<float *>(AtA.GetDataPtr());
float *Atb_ptr = static_cast<float *>(Atb.GetDataPtr());
float *residual_ptr = static_cast<float *>(residual.GetDataPtr());
const int *grid_idx_ptr = static_cast<const int *>(grid_idx.GetDataPtr());
const int *grid_nbs_idx_ptr =
static_cast<const int *>(grid_nbs_idx.GetDataPtr());
const bool *grid_nbs_mask_ptr =
static_cast<const bool *>(grid_nbs_mask.GetDataPtr());
const float *positions_init_ptr =
static_cast<const float *>(positions_init.GetDataPtr());
const float *positions_curr_ptr =
static_cast<const float *>(positions_curr.GetDataPtr());
#if defined(__CUDACC__)
namespace launcher = core::kernel::cuda_launcher;
#else
namespace launcher = core::kernel::cpu_launcher;
#endif
launcher::ParallelFor(n, [=] OPEN3D_DEVICE(int64_t workload_idx) {
// Enumerate 6 neighbors
int idx_i = grid_idx_ptr[workload_idx];
const int *idx_nbs = grid_nbs_idx_ptr + 6 * workload_idx;
const bool *mask_nbs = grid_nbs_mask_ptr + 6 * workload_idx;
// Build a 3x3 linear system to compute the local R
float cov[3][3] = {{0}};
float U[3][3], V[3][3], S[3];
int cnt = 0;
for (int k = 0; k < 6; ++k) {
bool mask_k = mask_nbs[k];
if (!mask_k) continue;
int idx_k = idx_nbs[k];
// Now build linear systems
float diff_ik_init[3] = {positions_init_ptr[idx_i * 3 + 0] -
positions_init_ptr[idx_k * 3 + 0],
positions_init_ptr[idx_i * 3 + 1] -
positions_init_ptr[idx_k * 3 + 1],
positions_init_ptr[idx_i * 3 + 2] -
positions_init_ptr[idx_k * 3 + 2]};
float diff_ik_curr[3] = {positions_curr_ptr[idx_i * 3 + 0] -
positions_curr_ptr[idx_k * 3 + 0],
positions_curr_ptr[idx_i * 3 + 1] -
positions_curr_ptr[idx_k * 3 + 1],
positions_curr_ptr[idx_i * 3 + 2] -
positions_curr_ptr[idx_k * 3 + 2]};
// Build linear system by computing XY^T when formulating Y = RX
// Y: curr
// X: init
for (int i = 0; i < 3; ++i) {
for (int j = 0; j < 3; ++j) {
cov[i][j] += diff_ik_init[i] * diff_ik_curr[j];
}
}
++cnt;
}
if (cnt < 3) {
return;
}
core::linalg::kernel::svd3x3(*cov, *U, S, *V);
float R[3][3];
core::linalg::kernel::transpose3x3_(*U);
core::linalg::kernel::matmul3x3_3x3(*V, *U, *R);
float d = core::linalg::kernel::det3x3(*R);
if (d < 0) {
U[2][0] = -U[2][0];
U[2][1] = -U[2][1];
U[2][2] = -U[2][2];
core::linalg::kernel::matmul3x3_3x3(*V, *U, *R);
}
// Now we have R, we build Hessian and residuals
// But first, we need to anchor a point
if (idx_i == anchor_idx) {
R[0][0] = R[1][1] = R[2][2] = 1;
R[0][1] = R[0][2] = R[1][0] = R[1][2] = R[2][0] = R[2][1] = 0;
}
for (int k = 0; k < 6; ++k) {
bool mask_k = mask_nbs[k];
if (mask_k) {
int idx_k = idx_nbs[k];
float diff_ik_init[3] = {
positions_init_ptr[idx_i * 3 + 0] -
positions_init_ptr[idx_k * 3 + 0],
positions_init_ptr[idx_i * 3 + 1] -
positions_init_ptr[idx_k * 3 + 1],
positions_init_ptr[idx_i * 3 + 2] -
positions_init_ptr[idx_k * 3 + 2]};
float diff_ik_curr[3] = {
positions_curr_ptr[idx_i * 3 + 0] -
positions_curr_ptr[idx_k * 3 + 0],
positions_curr_ptr[idx_i * 3 + 1] -
positions_curr_ptr[idx_k * 3 + 1],
positions_curr_ptr[idx_i * 3 + 2] -
positions_curr_ptr[idx_k * 3 + 2]};
float R_diff_ik_curr[3];
core::linalg::kernel::matmul3x3_3x1(*R, diff_ik_init,
R_diff_ik_curr);
float local_r[3];
local_r[0] = diff_ik_curr[0] - R_diff_ik_curr[0];
local_r[1] = diff_ik_curr[1] - R_diff_ik_curr[1];
local_r[2] = diff_ik_curr[2] - R_diff_ik_curr[2];
int offset_idx_i = 3 * idx_i + 6 * n_frags;
int offset_idx_k = 3 * idx_k + 6 * n_frags;
#if defined(__CUDACC__)
// Update residual
atomicAdd(residual_ptr, weight * (local_r[0] * local_r[0] +
local_r[1] * local_r[1] +
local_r[2] * local_r[2]));
for (int axis = 0; axis < 3; ++axis) {
// Update AtA: 2x2
atomicAdd(&AtA_ptr[(offset_idx_i + axis) * n_vars +
offset_idx_i + axis],
weight);
atomicAdd(&AtA_ptr[(offset_idx_k + axis) * n_vars +
offset_idx_k + axis],
weight);
atomicAdd(&AtA_ptr[(offset_idx_i + axis) * n_vars +
offset_idx_k + axis],
-weight);
atomicAdd(&AtA_ptr[(offset_idx_k + axis) * n_vars +
offset_idx_i + axis],
-weight);
// Update Atb: 2x1
atomicAdd(&Atb_ptr[offset_idx_i + axis],
+weight * local_r[axis]);
atomicAdd(&Atb_ptr[offset_idx_k + axis],
-weight * local_r[axis]);
}
#else
#pragma omp critical(FillInSLACRegularizerTermCPU)
{
// Update residual
*residual_ptr += weight * (local_r[0] * local_r[0] +
local_r[1] * local_r[1] +
local_r[2] * local_r[2]);
for (int axis = 0; axis < 3; ++axis) {
// Update AtA: 2x2
AtA_ptr[(offset_idx_i + axis) * n_vars +
offset_idx_i + axis] += weight;
AtA_ptr[(offset_idx_k + axis) * n_vars +
offset_idx_k + axis] += weight;
AtA_ptr[(offset_idx_i + axis) * n_vars +
offset_idx_k + axis] -= weight;
AtA_ptr[(offset_idx_k + axis) * n_vars +
offset_idx_i + axis] -= weight;
// Update Atb: 2x1
Atb_ptr[offset_idx_i + axis] += weight * local_r[axis];
Atb_ptr[offset_idx_k + axis] -= weight * local_r[axis];
}
}
#endif
}
}
});
}
} // namespace kernel
} // namespace pipelines
} // namespace t
} // namespace open3d
|
key_recovery.c | /********************************************************************
* Practical key-recovery attack against FlexAEAD-64
*
* Written in 2020 by Gaëtan Leurent <gaetan.leurent@inria.fr>
*
* To the extent possible under law, the author(s) have dedicated all
* copyright and related and neighboring rights to this software to
* the public domain worldwide. This software is distributed without
* any warranty.
*
* http://creativecommons.org/publicdomain/zero/1.0/
*/
#define _GNU_SOURCE
#include <stdio.h>
#include <stdlib.h>
#include <stdint.h>
#include <unistd.h>
#include <string.h>
#include <assert.h>
#include <x86intrin.h>
# include <linux/version.h>
# if LINUX_VERSION_CODE >= KERNEL_VERSION(3,17,0)
#if (__GLIBC__ > 2) || (__GLIBC__ == 2 && __GLIBC_MINOR__ >= 25)
#include <sys/random.h>
#else
#include <linux/random.h>
#include <sys/syscall.h>
ssize_t getrandom(void *buf, size_t buflen, unsigned int flags) {
return syscall(SYS_getrandom, buf, buflen, flags);
}
#endif
#else
#include <sys/types.h>
#include <sys/stat.h>
#include <fcntl.h>
ssize_t getrandom(void *buf, size_t buflen, unsigned int flags) {
int fd = open("/dev/urandom", O_RDONLY);
ssize_t ret = read(fd, buf, buflen);
close(fd);
return ret;
}
#endif
#include "encrypt.h"
int crypto_aead_encrypt(
unsigned char *c,unsigned long long *clen,
const unsigned char *m,unsigned long long mlen,
const unsigned char *ad,unsigned long long adlen,
const unsigned char *nsec,
const unsigned char *npub,
const unsigned char *k
);
struct FlexAEADv1 {
unsigned char subkeys[BLOCKSIZE * 8];
unsigned char counter[BLOCKSIZE];
unsigned char checksum[BLOCKSIZE];
unsigned char state[BLOCKSIZE];
unsigned char sn[BLOCKSIZE];
unsigned long long nRounds;
unsigned long long nBytes;
};
void FlexAEADv1_init(struct FlexAEADv1 * self, unsigned char *key );
// Master key
uint8_t Master_K[KEYSIZE];
#define DATA (1ULL<<29)
#define DATA2 ((1ULL<<23)*SQRT2)
#define SQRT2 362/256
typedef struct {
uint64_t C;
uint32_t N:31, t:1;
} __attribute__((packed)) data_t;
/* void print_diff_pair (data_t a, data_t b); */
int test_K2A (uint64_t K);
int test_K2B3A (uint64_t K, uint64_t S0, uint64_t S1);
int filter_diff_phase1(uint64_t delta);
typedef int (*callback_t)(uint64_t);
uint64_t recover_state (uint64_t S0, uint64_t S1, callback_t filter_diff, callback_t test_state);
int compare_data (const void *a, const void *b) {
const data_t *A = a;
const data_t *B = b;
if (A->C < B->C)
return -1;
else if (A->C > B->C)
return 1;
else
return 0;
}
void sort_data (const data_t *d, uint64_t N);
void sort_data_mask (const data_t *d, uint64_t N, uint64_t mask);
void make_nonce(uint8_t N[BLOCKSIZE], uint32_t n) {
for (int i=0; i<8; i++)
N[i] = ((n>>(4*i))&0xf)<<4;
}
static uint8_t AES_SBOX[] = {
0x63, 0x7c, 0x77, 0x7b, 0xf2, 0x6b, 0x6f, 0xc5, 0x30, 0x01, 0x67, 0x2b, 0xfe, 0xd7, 0xab, 0x76,
0xca, 0x82, 0xc9, 0x7d, 0xfa, 0x59, 0x47, 0xf0, 0xad, 0xd4, 0xa2, 0xaf, 0x9c, 0xa4, 0x72, 0xc0,
0xb7, 0xfd, 0x93, 0x26, 0x36, 0x3f, 0xf7, 0xcc, 0x34, 0xa5, 0xe5, 0xf1, 0x71, 0xd8, 0x31, 0x15,
0x04, 0xc7, 0x23, 0xc3, 0x18, 0x96, 0x05, 0x9a, 0x07, 0x12, 0x80, 0xe2, 0xeb, 0x27, 0xb2, 0x75,
0x09, 0x83, 0x2c, 0x1a, 0x1b, 0x6e, 0x5a, 0xa0, 0x52, 0x3b, 0xd6, 0xb3, 0x29, 0xe3, 0x2f, 0x84,
0x53, 0xd1, 0x00, 0xed, 0x20, 0xfc, 0xb1, 0x5b, 0x6a, 0xcb, 0xbe, 0x39, 0x4a, 0x4c, 0x58, 0xcf,
0xd0, 0xef, 0xaa, 0xfb, 0x43, 0x4d, 0x33, 0x85, 0x45, 0xf9, 0x02, 0x7f, 0x50, 0x3c, 0x9f, 0xa8,
0x51, 0xa3, 0x40, 0x8f, 0x92, 0x9d, 0x38, 0xf5, 0xbc, 0xb6, 0xda, 0x21, 0x10, 0xff, 0xf3, 0xd2,
0xcd, 0x0c, 0x13, 0xec, 0x5f, 0x97, 0x44, 0x17, 0xc4, 0xa7, 0x7e, 0x3d, 0x64, 0x5d, 0x19, 0x73,
0x60, 0x81, 0x4f, 0xdc, 0x22, 0x2a, 0x90, 0x88, 0x46, 0xee, 0xb8, 0x14, 0xde, 0x5e, 0x0b, 0xdb,
0xe0, 0x32, 0x3a, 0x0a, 0x49, 0x06, 0x24, 0x5c, 0xc2, 0xd3, 0xac, 0x62, 0x91, 0x95, 0xe4, 0x79,
0xe7, 0xc8, 0x37, 0x6d, 0x8d, 0xd5, 0x4e, 0xa9, 0x6c, 0x56, 0xf4, 0xea, 0x65, 0x7a, 0xae, 0x08,
0xba, 0x78, 0x25, 0x2e, 0x1c, 0xa6, 0xb4, 0xc6, 0xe8, 0xdd, 0x74, 0x1f, 0x4b, 0xbd, 0x8b, 0x8a,
0x70, 0x3e, 0xb5, 0x66, 0x48, 0x03, 0xf6, 0x0e, 0x61, 0x35, 0x57, 0xb9, 0x86, 0xc1, 0x1d, 0x9e,
0xe1, 0xf8, 0x98, 0x11, 0x69, 0xd9, 0x8e, 0x94, 0x9b, 0x1e, 0x87, 0xe9, 0xce, 0x55, 0x28, 0xdf,
0x8c, 0xa1, 0x89, 0x0d, 0xbf, 0xe6, 0x42, 0x68, 0x41, 0x99, 0x2d, 0x0f, 0xb0, 0x54, 0xbb, 0x16
};
#define Sbox(x) AES_SBOX[x]
uint32_t SBOX(uint32_t x) {
uint32_t y =
Sbox( x &0xff) |
(Sbox((x>>8 )&0xff)<<8 ) |
(Sbox((x>>16)&0xff)<<16) |
(Sbox((x>>24)&0xff)<<24);
return y;
}
static uint8_t AES_SBOXI[] = {
0x52, 0x09, 0x6a, 0xd5, 0x30, 0x36, 0xa5, 0x38, 0xbf, 0x40, 0xa3, 0x9e, 0x81, 0xf3, 0xd7, 0xfb,
0x7c, 0xe3, 0x39, 0x82, 0x9b, 0x2f, 0xff, 0x87, 0x34, 0x8e, 0x43, 0x44, 0xc4, 0xde, 0xe9, 0xcb,
0x54, 0x7b, 0x94, 0x32, 0xa6, 0xc2, 0x23, 0x3d, 0xee, 0x4c, 0x95, 0x0b, 0x42, 0xfa, 0xc3, 0x4e,
0x08, 0x2e, 0xa1, 0x66, 0x28, 0xd9, 0x24, 0xb2, 0x76, 0x5b, 0xa2, 0x49, 0x6d, 0x8b, 0xd1, 0x25,
0x72, 0xf8, 0xf6, 0x64, 0x86, 0x68, 0x98, 0x16, 0xd4, 0xa4, 0x5c, 0xcc, 0x5d, 0x65, 0xb6, 0x92,
0x6c, 0x70, 0x48, 0x50, 0xfd, 0xed, 0xb9, 0xda, 0x5e, 0x15, 0x46, 0x57, 0xa7, 0x8d, 0x9d, 0x84,
0x90, 0xd8, 0xab, 0x00, 0x8c, 0xbc, 0xd3, 0x0a, 0xf7, 0xe4, 0x58, 0x05, 0xb8, 0xb3, 0x45, 0x06,
0xd0, 0x2c, 0x1e, 0x8f, 0xca, 0x3f, 0x0f, 0x02, 0xc1, 0xaf, 0xbd, 0x03, 0x01, 0x13, 0x8a, 0x6b,
0x3a, 0x91, 0x11, 0x41, 0x4f, 0x67, 0xdc, 0xea, 0x97, 0xf2, 0xcf, 0xce, 0xf0, 0xb4, 0xe6, 0x73,
0x96, 0xac, 0x74, 0x22, 0xe7, 0xad, 0x35, 0x85, 0xe2, 0xf9, 0x37, 0xe8, 0x1c, 0x75, 0xdf, 0x6e,
0x47, 0xf1, 0x1a, 0x71, 0x1d, 0x29, 0xc5, 0x89, 0x6f, 0xb7, 0x62, 0x0e, 0xaa, 0x18, 0xbe, 0x1b,
0xfc, 0x56, 0x3e, 0x4b, 0xc6, 0xd2, 0x79, 0x20, 0x9a, 0xdb, 0xc0, 0xfe, 0x78, 0xcd, 0x5a, 0xf4,
0x1f, 0xdd, 0xa8, 0x33, 0x88, 0x07, 0xc7, 0x31, 0xb1, 0x12, 0x10, 0x59, 0x27, 0x80, 0xec, 0x5f,
0x60, 0x51, 0x7f, 0xa9, 0x19, 0xb5, 0x4a, 0x0d, 0x2d, 0xe5, 0x7a, 0x9f, 0x93, 0xc9, 0x9c, 0xef,
0xa0, 0xe0, 0x3b, 0x4d, 0xae, 0x2a, 0xf5, 0xb0, 0xc8, 0xeb, 0xbb, 0x3c, 0x83, 0x53, 0x99, 0x61,
0x17, 0x2b, 0x04, 0x7e, 0xba, 0x77, 0xd6, 0x26, 0xe1, 0x69, 0x14, 0x63, 0x55, 0x21, 0x0c, 0x7d
};
#define SboxI(x) AES_SBOXI[x]
uint32_t SBOXI(uint32_t x) {
uint32_t y =
SboxI( x &0xff) |
(SboxI((x>>8 )&0xff)<<8 ) |
(SboxI((x>>16)&0xff)<<16) |
(SboxI((x>>24)&0xff)<<24);
return y;
}
uint64_t round_function(uint64_t x);
uint64_t inverse_round_function(uint64_t x);
// Partial keys recovered
uint64_t K2A;
uint64_t K2B3A;
uint64_t K0B;
uint64_t K0A;
uint64_t K2B;
uint64_t K3B;
uint64_t K3A;
uint64_t K1B;
uint64_t K1A;
int main() {
uint64_t C[(2*BLOCKSIZE+TAGSIZE)/8];
unsigned long long clen;
uint8_t N[BLOCKSIZE] = {0};
int r = getrandom(Master_K, sizeof(Master_K), 0);
assert(r == sizeof(Master_K));
// Compute subkeys
struct FlexAEADv1 flexAEAD;
FlexAEADv1_init(&flexAEAD, Master_K);
/* printf ("K3A: "); */
/* for (int i=0; i<BLOCKSIZE; i++) */
/* printf ("%02x", flexAEAD.subkeys[6*BLOCKSIZE+i]); */
/* printf ("\n"); */
printf ("K2A: ");
for (int i=0; i<BLOCKSIZE; i++)
printf ("%02x", flexAEAD.subkeys[4*BLOCKSIZE+i]);
printf ("\n");
printf ("K2B3A: ");
for (int i=0; i<BLOCKSIZE; i++)
printf ("%02x", flexAEAD.subkeys[5*BLOCKSIZE+i]^flexAEAD.subkeys[6*BLOCKSIZE+i]);
printf ("\n");
printf ("K0B: ");
for (int i=0; i<BLOCKSIZE; i++)
printf ("%02x", flexAEAD.subkeys[1*BLOCKSIZE+i]);
printf ("\n");
printf ("K0A: ");
for (int i=0; i<BLOCKSIZE; i++)
printf ("%02x", flexAEAD.subkeys[0*BLOCKSIZE+i]);
printf ("\n");
printf ("K2B: ");
for (int i=0; i<BLOCKSIZE; i++)
printf ("%02x", flexAEAD.subkeys[5*BLOCKSIZE+i]);
printf ("\n");
printf ("K3B: ");
for (int i=0; i<BLOCKSIZE; i++)
printf ("%02x", flexAEAD.subkeys[7*BLOCKSIZE+i]);
printf ("\n");
printf ("K1B: ");
for (int i=0; i<BLOCKSIZE; i++)
printf ("%02x", flexAEAD.subkeys[3*BLOCKSIZE+i]);
printf ("\n");
printf ("K1A: ");
for (int i=0; i<BLOCKSIZE; i++)
printf ("%02x", flexAEAD.subkeys[2*BLOCKSIZE+i]);
printf ("\n");
// Hash table
data_t *data = malloc(2*DATA*sizeof(data_t));
assert(data);
printf ("Generate phase 1 data...");
fflush(stdout);
// Encrypt zero message with structure of nonces
#pragma omp parallel for private(C) private(clen) firstprivate(N)
for (unsigned n=0; n<DATA; n++) {
make_nonce(N, n);
uint8_t M[2*BLOCKSIZE] = {0};
crypto_aead_encrypt((uint8_t*)C, &clen, M, sizeof(M), NULL, 0, NULL, N, Master_K);
assert(clen <= sizeof(C));
data[2*n ] = (data_t){C: C[0], N: n, t: 0};
data[2*n+1] = (data_t){C: C[1], N: n, t: 1};
}
printf ("Done\n");
fflush(stdout);
// Look for collisions
// qsort(data, 2*DATA, sizeof(data_t), compare_data);
sort_data(data, 2*DATA);
int n=0;
for (unsigned i=1; i<2*DATA; i++)
if (data[i].C == data[i-1].C) n++;
printf ("Found %i collisions\n", n);
fflush(stdout);
for (unsigned i=1; i<2*DATA; i++) {
if (data[i].C == data[i-1].C) {
assert(data[i].t != data[i-1].t);
// print_diff_pair(data[i-1], data[i]);
uint8_t N0[BLOCKSIZE];
uint8_t N1[BLOCKSIZE];
make_nonce(N0, data[i-1].N);
make_nonce(N1, data[i ].N);
uint64_t S0 = 0;
uint64_t S1 = 0;
for (int i=0; i<BLOCKSIZE; i++) {
S0 = (S0<<8)^N0[i];
S1 = (S1<<8)^N1[i];
}
int test_key(uint64_t S) {
return test_K2A(S^S0);
}
if (recover_state(S0, S1, filter_diff_phase1, test_key))
break;
}
}
if (!K2A) {
printf ("Failed to recover K2A\n");
exit(0);
} else {
printf ("Recovered K2A = %016llx\n", (unsigned long long)K2A);
fflush(stdout);
}
printf ("Generate phase 2 data...");
fflush(stdout);
// Generate structure of nonces
#pragma omp parallel for private(C) private(clen) firstprivate(N)
for (unsigned n=0; n<DATA*SQRT2; n++) {
uint64_t S = _pdep_u64(n, 0xf0f0f0f0f0f0f0f0LL);
uint8_t M[BLOCKSIZE] = {n, 0, 0, 0, n};
for (int i=0; i<5; i++) {
S = inverse_round_function(S);
}
S ^= K2A;
uint8_t N[BLOCKSIZE];
for (int i=0; i<BLOCKSIZE; i++) {
N[i] = S>>(56-8*i);
}
crypto_aead_encrypt((uint8_t*)C, &clen, M, sizeof(M), NULL, 0, NULL, N, Master_K);
assert(clen <= sizeof(C));
data[n] = (data_t){C: C[1], N: n, t: 0};
}
printf ("Done\n");
fflush(stdout);
// Look for collisions
// qsort(data, 2*DATA, sizeof(data_t), compare_data);
sort_data(data, DATA*SQRT2);
n=0;
for (unsigned i=1; i<DATA*SQRT2; i++)
if (data[i].C == data[i-1].C) n++;
printf ("Found %i collisions\n", n);
fflush(stdout);
for (unsigned i=1; i<DATA*SQRT2; i++) {
if (data[i].C == data[i-1].C) {
/* for (int z=0; z<2; z++) { */
/* uint64_t S = _pdep_u64(data[i-z].N, 0xf0f0f0f0f0f0f0f0LL); */
/* uint8_t M[BLOCKSIZE] = {data[i-z].N}; */
/* for (int i=0; i<5; i++) { */
/* S = inverse_round_function(S); */
/* } */
/* S ^= K2A; */
/* uint8_t N[BLOCKSIZE]; */
/* for (int i=0; i<BLOCKSIZE; i++) { */
/* N[i] = S>>(56-8*i); */
/* } */
/* flexAEAD_dbg = 1; */
/* crypto_aead_encrypt((uint8_t*)C, &clen, M, sizeof(M), NULL, 0, NULL, N, Master_K); */
/* flexAEAD_dbg = 0; */
/* } */
uint64_t S0 = _pdep_u64(data[i-1].N, 0xf0f0f0f0f0f0f0f0LL);
uint64_t S1 = _pdep_u64(data[i ].N, 0xf0f0f0f0f0f0f0f0LL);
int filter_diff(uint64_t delta) {
if ((delta & 0x00ffffff00ffffffLL) == 0) {
uint64_t d = _pext_u64(S0^S1, 0xf0f0);
d = (d<<56) | (d<<24);
if (delta == d)
return 1;
}
return 0;
}
int test_state(uint64_t S) {
return test_K2B3A(S^S0, S0, S1);
}
if (recover_state(S0, S1, filter_diff, test_state))
break;
}
}
if (!K2B3A) {
printf ("Failed to recover K2B3A\n");
exit(0);
} else {
printf ("Recovered K2B3A = %016llx\n", (unsigned long long)K2B3A);
fflush(stdout);
}
printf ("Generate phase 3 data...");
fflush(stdout);
// Generate structure of Si
#pragma omp parallel for private(C) private(clen) firstprivate(N)
for (unsigned n=0; n<DATA2; n++) {
uint64_t S = _pdep_u64(n*0xdeadbeef, 0xf0f0f0f0f0f0f0f0LL);
uint8_t M[BLOCKSIZE];
for (int i=0; i<BLOCKSIZE; i++) {
M[i] = S>>(56-8*i);
}
for (int i=0; i<5; i++) {
S = inverse_round_function(S);
}
S ^= K2B3A^0x0100000001000000LL;
for (int i=0; i<5; i++) {
S = inverse_round_function(S);
}
S ^= K2A;
uint8_t N[BLOCKSIZE];
for (int i=0; i<BLOCKSIZE; i++) {
N[i] = S>>(56-8*i);
}
crypto_aead_encrypt((uint8_t*)C, &clen, M, sizeof(M), NULL, 0, NULL, N, Master_K);
assert(clen <= sizeof(C));
data[n] = (data_t){C: __builtin_bswap64(C[0]), N: n, t: 0};
}
printf ("Done\n");
fflush(stdout);
// Look for collisions
// qsort(data, DATA2, sizeof(data_t), compare_data);
sort_data_mask(data, DATA2, 0x00ffffff00ffffffLL);
n=0;
for (unsigned i=1; i<DATA2; i++)
if ((data[i].C&0x00ffffff00ffffffLL) == (data[i-1].C&0x00ffffff00ffffffLL)) n++;
printf ("Found %i collisions\n", n);
fflush(stdout);
for (unsigned i=1; i<DATA2; i++) {
if ((data[i].C&0x00ffffff00ffffffLL) == (data[i-1].C&0x00ffffff00ffffffLL)) {
uint64_t S0 = _pdep_u64(data[i-1].N*0xdeadbeef, 0xf0f0f0f0f0f0f0f0LL);
uint64_t S1 = _pdep_u64(data[i ].N*0xdeadbeef, 0xf0f0f0f0f0f0f0f0LL);
/* // DEBUG */
/* { */
/* printf ("%016llx -> %016llx\n", S0, data[i-1].C); */
/* printf ("%016llx -> %016llx\n", S1, data[i ].C); */
/* uint8_t M0[BLOCKSIZE]; */
/* uint8_t M1[BLOCKSIZE]; */
/* for (int i=0; i<BLOCKSIZE; i++) { */
/* M0[i] = S0>>(56-8*i); */
/* M1[i] = S1>>(56-8*i); */
/* } */
/* for (int i=0; i<5; i++) { */
/* S0 = inverse_round_function(S0); */
/* S1 = inverse_round_function(S1); */
/* } */
/* S0 ^= K2B3A^0x0100000001000000LL; */
/* S1 ^= K2B3A^0x0100000001000000LL; */
/* for (int i=0; i<5; i++) { */
/* S0 = inverse_round_function(S0); */
/* S1 = inverse_round_function(S1); */
/* } */
/* S0 ^= K2A; */
/* S1 ^= K2A; */
/* uint8_t N0[BLOCKSIZE]; */
/* uint8_t N1[BLOCKSIZE]; */
/* for (int i=0; i<BLOCKSIZE; i++) { */
/* N0[i] = S0>>(56-8*i); */
/* N1[i] = S1>>(56-8*i); */
/* } */
/* flexAEAD_dbg = 1; */
/* crypto_aead_encrypt((uint8_t*)C, &clen, M0, sizeof(M0), NULL, 0, NULL, N0, Master_K); */
/* crypto_aead_encrypt((uint8_t*)C, &clen, M1, sizeof(M1), NULL, 0, NULL, N1, Master_K); */
/* flexAEAD_dbg = 0; */
/* S0 = _pdep_u64(data[i-1].N, 0xf0f0f0f0f0f0f0f0LL); */
/* S1 = _pdep_u64(data[i ].N, 0xf0f0f0f0f0f0f0f0LL); */
/* } */
int filter_diff(uint64_t delta) {
return delta == (data[i-1].C^data[i].C);
}
int test_state(uint64_t X) {
uint64_t S = X;
for (int i=0; i<5; i++)
S = round_function(S);
uint64_t K = S^data[i-1].C;
// Check known data
for (int z=0; z<16; z++) {
uint64_t T = data[z].C;
T ^= K;
for (int i=0; i<5; i++)
T = inverse_round_function(T);
/* printf ("Candidate: %016llx [%016llx->%016llx] (%016llx)\n", */
/* K, X, S, T^X); */
if (((T^X)&0x0f0f0f0f0f0f0f0fLL) == 0) {
K0B = K;
return 1;
}
}
return 0;
}
if (recover_state(S0, S1, filter_diff, test_state))
break;
}
}
if (!K0B) {
printf ("Failed to recover K0B\n");
exit(0);
} else {
printf ("Recovered K0B = %016llx\n", (unsigned long long)K0B);
fflush(stdout);
}
// Recover K0A
{
uint64_t C[TAGSIZE/8];
unsigned long long clen;
uint8_t N[BLOCKSIZE] = {0};
uint8_t M[0];
crypto_aead_encrypt((uint8_t*)C, &clen, M, sizeof(M), NULL, 0, NULL, N, Master_K);
uint64_t S = __builtin_bswap64(C[0]);
S ^= K0B;
for (int i=0; i<5; i++) {
S = inverse_round_function(S);
}
S ^= 0xAAAAAAAAAAAAAAAALL;
K0A = S;
printf ("Recovered K0A = %016llx\n", (unsigned long long)K0A);
fflush(stdout);
}
printf ("Generate phase 5 data...");
fflush(stdout);
// Generate structure of Si
#pragma omp parallel for private(C) private(clen) firstprivate(N)
for (unsigned n=0; n<DATA2; n++) {
uint64_t S = _pdep_u64(n*0xbadc0fee, 0xf0f0f0f0f0f0f0f0LL)|1;
uint8_t M[BLOCKSIZE] = {0};
for (int i=0; i<5; i++) {
S = inverse_round_function(S);
}
S ^= K2B3A^0x0100000001000000LL;
for (int i=0; i<5; i++) {
S = inverse_round_function(S);
}
S ^= K2A;
uint8_t N[BLOCKSIZE];
for (int i=0; i<BLOCKSIZE; i++) {
N[i] = S>>(56-8*i);
}
crypto_aead_encrypt((uint8_t*)C, &clen, M, sizeof(M), NULL, 0, NULL, N, Master_K);
assert(clen <= sizeof(C));
uint64_t T = __builtin_bswap64(C[1]);
T ^= K0B;
for (int i=0; i<5; i++) {
T = inverse_round_function(T);
}
T ^= K0A;
T ^= 0xAAAAAAAAAAAAAAAALL;
data[n] = (data_t){C: T, N: n, t: 0};
}
printf ("Done\n");
fflush(stdout);
// Look for collisions
// qsort(data, DATA2, sizeof(data_t), compare_data);
sort_data_mask(data, DATA2, 0x00ffffff00ffffffLL);
n=0;
for (unsigned i=1; i<DATA2; i++)
if ((data[i].C&0x00ffffff00ffffffLL) == (data[i-1].C&0x00ffffff00ffffffLL)) n++;
printf ("Found %i collisions\n", n);
fflush(stdout);
for (unsigned i=1; i<DATA2; i++) {
if ((data[i].C&0x00ffffff00ffffffLL) == (data[i-1].C&0x00ffffff00ffffffLL)) {
uint64_t S0 = _pdep_u64(data[i-1].N*0xbadc0fee, 0xf0f0f0f0f0f0f0f0LL)|1;
uint64_t S1 = _pdep_u64(data[i ].N*0xbadc0fee, 0xf0f0f0f0f0f0f0f0LL)|1;
int filter_diff(uint64_t delta) {
return delta == (data[i-1].C^data[i].C);
}
int test_state(uint64_t X) {
uint64_t S = X;
for (int i=0; i<5; i++)
S = round_function(S);
uint64_t K = S^data[i-1].C;
// Check known data
for (int z=0; z<16; z++) {
uint64_t T = data[z].C;
T ^= K;
for (int i=0; i<5; i++)
T = inverse_round_function(T);
/* printf ("Candidate: %016llx [%016llx->%016llx] (%016llx)\n", */
/* K, X, S, T^X); */
if (((T^X)&0x0f0f0f0f0f0f0f0fLL) == 0) {
K2B = K;
K3B = X ^ S0 ^ K2A;
return 1;
}
}
/* uint64_t Z = _pdep_u64(data[0].N, 0xf0f0f0f0f0f0f0f0LL)|1; */
/* // Recompute S from nonce with correct carries */
/* for (int i=0; i<5; i++) { */
/* Z = inverse_round_function(Z); */
/* } */
/* Z ^= K3A^0x0100000001000000LL; */
/* Z += 0x0100000001000000LL; */
/* Z ^= K3A; */
/* for (int i=0; i<5; i++) { */
/* Z = round_function(Z); */
/* } */
/* Z ^= X ^ S0 ^ K2A; */
/* // PF_K2 */
/* Z ^= K2A; */
/* for (int i=0; i<5; i++) { */
/* Z = round_function(Z); */
/* } */
/* Z ^= K; */
/* if (data[0].C == Z) { */
/* K2B = K; */
/* K3B = X ^ S0 ^ K2A; */
/* return 1; */
/* } */
return 0;
}
if (recover_state(S0, S1, filter_diff, test_state))
break;
}
}
if (!K2B) {
printf ("Failed to recover K2B\n");
exit(0);
} else {
printf ("Recovered K2B = %016llx\n", (unsigned long long)K2B);
printf ("Recovered K3B = %016llx\n", (unsigned long long)K3B);
K3A = K2B3A^K2B;
fflush(stdout);
}
printf ("Generate phase 6 data...");
fflush(stdout);
uint64_t ZZ = K2B^K3B; // Random constant
// Generate structure of Si
#pragma omp parallel for private(C) private(clen) firstprivate(N)
for (unsigned n=0; n<DATA2; n++) {
uint64_t S = _pdep_u64(n*0x0D15EA5E, 0xf0f0f0f0f0f0f0f0LL) ^ ZZ;
uint8_t M[BLOCKSIZE] = {0};
// PF_K2^-1
S ^= K2B;
for (int i=0; i<5; i++) {
S = inverse_round_function(S);
}
S ^= K2A;
uint64_t S0 = S;
// PF_K3^-1
S ^= K3B;
for (int i=0; i<5; i++) {
S = inverse_round_function(S);
}
S ^= K3A;
// INC^-1
S -= 0x0100000001000000LL;
// PF_K2^-1
S ^= K2B;
for (int i=0; i<5; i++) {
S = inverse_round_function(S);
}
S ^= K2A;
uint8_t N[BLOCKSIZE];
for (int i=0; i<BLOCKSIZE; i++) {
N[i] = S>>(56-8*i);
}
crypto_aead_encrypt((uint8_t*)C, &clen, M, sizeof(M), NULL, 0, NULL, N, Master_K);
assert(clen <= sizeof(C));
uint64_t X = __builtin_bswap64(C[0]);
X ^= K0B;
for (int i=0; i<5; i++) {
X = inverse_round_function(X);
}
X ^= K0A;
X ^= S0;
data[n] = (data_t){C: X, N: n, t: 0};
}
printf ("Done\n");
fflush(stdout);
// Look for collisions
// qsort(data, DATA2, sizeof(data_t), compare_data);
sort_data_mask(data, DATA2, 0x00ffffff00ffffffLL);
n=0;
for (unsigned i=1; i<DATA2; i++)
if ((data[i].C&0x00ffffff00ffffffLL) == (data[i-1].C&0x00ffffff00ffffffLL)) n++;
printf ("Found %i collisions\n", n);
fflush(stdout);
for (unsigned i=1; i<DATA2; i++) {
if ((data[i].C&0x00ffffff00ffffffLL) == (data[i-1].C&0x00ffffff00ffffffLL)) {
uint64_t S0 = _pdep_u64(data[i-1].N*0x0D15EA5E, 0xf0f0f0f0f0f0f0f0LL) ^ ZZ;
uint64_t S1 = _pdep_u64(data[i ].N*0x0D15EA5E, 0xf0f0f0f0f0f0f0f0LL) ^ ZZ;
int filter_diff(uint64_t delta) {
return delta == (data[i-1].C^data[i].C);
}
int test_state(uint64_t X) {
uint64_t KA = X^S0;
uint64_t S = X;
for (int i=0; i<5; i++)
S = round_function(S);
uint64_t KB = S^data[i-1].C;
// Check known data
uint64_t T = _pdep_u64(data[0].N*0x0D15EA5E, 0xf0f0f0f0f0f0f0f0LL) ^ ZZ;
T ^= KA;
for (int i=0; i<5; i++)
T = round_function(T);
T ^= KB;
if (T == data[0].C) {
K1B = KB;
K1A = KA;
return 1;
}
return 0;
}
if (recover_state(S0, S1, filter_diff, test_state))
break;
}
}
if (!K1B) {
printf ("Failed to recover K1B\n");
exit(0);
} else {
printf ("Recovered K1A = %016llx\n", (unsigned long long)K1A);
printf ("Recovered K1B = %016llx\n", (unsigned long long)K1B);
uint64_t Keys[8] = {
__builtin_bswap64(K0A), __builtin_bswap64(K0B),
__builtin_bswap64(K1A), __builtin_bswap64(K1B),
__builtin_bswap64(K2A), __builtin_bswap64(K2B),
__builtin_bswap64(K3A), __builtin_bswap64(K3B)
};
if (memcmp(Keys, flexAEAD.subkeys, sizeof(Keys)) == 0) {
printf ("SUCCESS!\n");
} else {
printf ("WRONG KEY?\n");
}
fflush(stdout);
}
}
uint64_t round_function(uint64_t x) {
uint64_t T = _pdep_u64(x>>32, 0xf0f0f0f0f0f0f0f0) | _pdep_u64(x, 0x0f0f0f0f0f0f0f0f);
uint32_t R = T;
uint32_t L = T>>32;
R = SBOX(R);
L ^= R;
L = SBOX(L);
R ^= L;
R = SBOX(R);
return R | ((uint64_t)L)<<32;
}
uint64_t inverse_round_function(uint64_t x) {
uint32_t R = x;
uint32_t L = x>>32;
R = SBOXI(R);
R ^= L;
L = SBOXI(L);
L ^= R;
R = SBOXI(R);
uint64_t T = R | ((uint64_t)L)<<32;
return _pext_u64(T,0x0f0f0f0f0f0f0f0f) | _pext_u64(T,0xf0f0f0f0f0f0f0f0)<<32;
}
int filter_diff_phase1(uint64_t delta) {
return ( ((delta & 0x00ffffff00ffffffLL) == 0) &&
((delta & 0x0100000001000000LL) == 0x0100000001000000LL) &&
__builtin_popcountll((delta>>24) + 0x0100000001) == 2);
}
int test_K2A (uint64_t K) {
for (int n=0; n<2; n++) {
// Build pair that should collide
uint64_t S0 = 0x0000000000000000LL ^ (n<<24);
uint64_t S1 = 0x0100000001000000LL ^ (n<<24);
for (int i=0; i<5; i++) {
S0 = inverse_round_function(S0);
S1 = inverse_round_function(S1);
}
S0 ^= K;
S1 ^= K;
uint8_t N0[BLOCKSIZE];
uint8_t N1[BLOCKSIZE];
for (int i=0; i<BLOCKSIZE; i++) {
N0[i] = S0>>(56-8*i);
N1[i] = S1>>(56-8*i);
}
uint8_t M[2*BLOCKSIZE] = {0};
uint64_t C0[(2*BLOCKSIZE+TAGSIZE)/8];
uint64_t C1[(2*BLOCKSIZE+TAGSIZE)/8];
unsigned long long clen;
crypto_aead_encrypt((uint8_t*)C0, &clen, M, sizeof(M), NULL, 0, NULL, N0, Master_K);
assert(clen <= sizeof(C0));
crypto_aead_encrypt((uint8_t*)C1, &clen, M, sizeof(M), NULL, 0, NULL, N1, Master_K);
assert(clen <= sizeof(C1));
if (C0[0] == C1[1] || C0[1] == C1[0]) {
K2A = K;
return 1;
}
}
return 0;
}
int test_pair_K2B3A (uint64_t X0, uint64_t X1, uint64_t Y0, uint64_t Y1) {
// Build pair that should collide
uint64_t S0 = X1;
uint64_t S1 = X0;
for (int i=0; i<5; i++) {
S0 = inverse_round_function(S0);
S1 = inverse_round_function(S1);
}
S0 ^= K2A;
S1 ^= K2A;
uint8_t N0[BLOCKSIZE];
uint8_t N1[BLOCKSIZE];
for (int i=0; i<BLOCKSIZE; i++) {
N0[i] = S0>>(56-8*i);
N1[i] = S1>>(56-8*i);
}
S0 = Y0;
S1 = Y1;
for (int i=0; i<5; i++) {
S0 = round_function(S0);
S1 = round_function(S1);
}
uint8_t M0[BLOCKSIZE];
uint8_t M1[BLOCKSIZE];
for (int i=0; i<BLOCKSIZE; i++) {
M0[i] = S0>>(56-8*i);
M1[i] = S1>>(56-8*i);
}
uint64_t C0[(BLOCKSIZE+TAGSIZE)/8];
uint64_t C1[(BLOCKSIZE+TAGSIZE)/8];
unsigned long long clen;
crypto_aead_encrypt((uint8_t*)C0, &clen, M0, sizeof(M0), NULL, 0, NULL, N0, Master_K);
assert(clen <= sizeof(C0));
crypto_aead_encrypt((uint8_t*)C1, &clen, M1, sizeof(M1), NULL, 0, NULL, N1, Master_K);
assert(clen <= sizeof(C1));
if (C0[1] == C1[1]) {
// printf ("## %08x %08x\n", C0[1], C1[1]);
return 1;
}
return 0;
}
int test_K2B3A (uint64_t K, uint64_t X0, uint64_t X1) {
static int n = 0;
n++;
uint64_t KD = 0;
uint64_t d = 0x0012345600789abcLL;
if (test_pair_K2B3A(X0, X1^d, X0^K, X1^K^d)) {
/* printf ("Candidate key: %016llx\n", K); */
// Clean up carries from plus-one
if (test_pair_K2B3A(X0, X0^(2ULL<<24), X0^K, X0^K^(2ULL<<24))) {
// Ok, no carry
KD ^= 1ULL<<24;
} else {
for (uint64_t mask=3; mask<0x100; mask=2*mask+1) {
if (test_pair_K2B3A(X0, X0^(1ULL<<24), X0^K, X0^K^(mask<<24))) {
KD ^= mask<<24;
break;
}
}
}
if (test_pair_K2B3A(X0, X0^(2ULL<<56), X0^K, X0^K^(2ULL<<56))) {
// Ok, no carry
KD ^= 1ULL<<56;
} else {
for (uint64_t mask=3; mask<0x100; mask=2*mask+1) {
if (test_pair_K2B3A(X0, X0^(1ULL<<56), X0^K, X0^K^(mask<<56))) {
KD ^= mask<<56;
break;
}
}
}
if ((KD&0xff00000000000000LL) == 0 || (KD&0x00000000ff000000LL) == 0) {
return 0;
}
K2B3A = K^KD;
/* printf ("Cleaned-up : %016llx [n=%i]\n", (unsigned long long)(K^KD), n); */
return 1;
} else {
return 0;
}
}
// Recover internal state from difference,
// assuming differential path is followed
// Callbacks:
// - filter_diff to test output difference
// - test_state is called on each candidate
uint64_t recover_state (uint64_t S0, uint64_t S1, callback_t filter_diff, callback_t test_state) {
printf("Trying to recover key from pair (%016llx %016llx)\n", (unsigned long long)S0, (unsigned long long)S1);
int ret = 0;
// first superbox
#pragma omp parallel for
for (uint32_t k1=0; k1 < 0x10000; k1++) {
uint64_t T0 = S0;
uint64_t T1 = S1;
T0 ^= _pdep_u64(k1, 0xf000f000f000f000LL);
T1 ^= _pdep_u64(k1, 0xf000f000f000f000LL);
T0 = round_function(T0);
T1 = round_function(T1);
if ( (((T0^T1) & 0x0f0000000f000000LL) == 0) ||
(((T0^T1) & 0xf0000000f0000000LL) == 0) ) {
// second superbox
for (uint32_t k2=0; k2 < 0x10000; k2++) {
T0 = S0;
T1 = S1;
T0 ^= _pdep_u64(k1, 0xf000f000f000f000LL);
T1 ^= _pdep_u64(k1, 0xf000f000f000f000LL);
T0 ^= _pdep_u64(k2, 0x00f000f000f000f0LL);
T1 ^= _pdep_u64(k2, 0x00f000f000f000f0LL);
T0 = round_function(T0);
T1 = round_function(T1);
if ( (((T0^T1) & 0x0fff0fff0fff0fffLL) == 0) ||
(((T0^T1) & 0xf0fff0fff0fff0ffLL) == 0) ) {
T0 = round_function(T0);
T1 = round_function(T1);
uint64_t mask = 0;
if ( (((T0^T1) & 0x0fffffff0fffffffLL) == 0) ||
(((T0^T1) & 0xff0fffffff0fffffLL) == 0) )
mask = 0xffff0f0fffff0f0fLL;
if ( (((T0^T1) & 0xf0fffffff0ffffffLL) == 0) ||
(((T0^T1) & 0xfff0fffffff0ffffLL) == 0) )
mask = 0xfffff0f0fffff0f0LL;
if (mask) {
int n = 0;
// printf ("### %04x %04x\n", k1, k2);
// Guess additional bytes
for (uint32_t Y=0; Y<0x10000; Y++) {
uint64_t U0 = T0 & mask;
uint64_t U1 = T1 & mask;
U0 |= _pdep_u64(Y, ~mask);
U1 |= _pdep_u64(Y, ~mask);
U0 = round_function(U0);
U1 = round_function(U1);
U0 = round_function(U0);
U1 = round_function(U1);
if ( (((U0^U1) & 0x0fffffff0fffffffLL) == 0) ||
(((U0^U1) & 0xffff0fffffff0fffLL) == 0) ) {
// Guess final bytes
for (uint32_t Z=0; Z<0x10000; Z++) {
n++;
U0 = T0 & 0xffff0000ffff0000LL;
U1 = T1 & 0xffff0000ffff0000LL;
U0 |= _pdep_u64(Y, ~mask);
U1 |= _pdep_u64(Y, ~mask);
U0 |= _pdep_u64(Z, (~mask)^0x0000ffff0000ffffLL);
U1 |= _pdep_u64(Z, (~mask)^0x0000ffff0000ffffLL);
U0 = round_function(U0);
U1 = round_function(U1);
U0 = round_function(U0);
U1 = round_function(U1);
U0 = round_function(U0);
U1 = round_function(U1);
uint64_t delta = U0^U1;
if (filter_diff(delta)) {
U0 = inverse_round_function(U0);
U1 = inverse_round_function(U1);
U0 = inverse_round_function(U0);
U1 = inverse_round_function(U1);
U0 = inverse_round_function(U0);
U1 = inverse_round_function(U1);
U0 = inverse_round_function(U0);
U1 = inverse_round_function(U1);
U0 = inverse_round_function(U0);
U1 = inverse_round_function(U1);
assert((S0^U0) == (S1^U1));
/* printf ("Candidate key: %016llx [delta:%016llx] [%04x %04x]\n", */
/* (unsigned long long)(S0^U0), (unsigned long long)delta, k1, k2); */
if (test_state(U0)) {
// printf ("Recovered key? %016llx [delta:%016llx]\n", (unsigned long long)(S0^U0), (unsigned long long)delta);
#pragma omp critical
{
ret=1;
}
}
}
}
}
}
}
}
}
}
}
return ret;
}
void print_diff_state (uint8_t S0[BLOCKSIZE], uint8_t S1[BLOCKSIZE]) {
for (int i=0; i<BLOCKSIZE; i++)
printf(" %01x%01x", (S0[i]^S1[i])&0xf, (S0[i]^S1[i])>>4);
printf ("\n");
}
inline void dirShuffleLayer( unsigned char * block, unsigned long long blocklen, unsigned char * state )
{
unsigned long long i = 0;
for( i=0; i<blocklen/2; i++)
{
*(state+2*i+0) = ( (*(block+i+0)) & 0xf0) + ((*(block+i+(blocklen/2))&0xf0)>>4);
*(state+2*i+1) = ( (*(block+i+0) & 0x0f)<<4) + ((*(block+i+(blocklen/2))&0x0f));
}
memcpy( block, state, blocklen);
return;
}
/* void print_diff_pair (data_t a, data_t b) { */
/* uint8_t N0[BLOCKSIZE] = {0}; */
/* uint8_t N1[BLOCKSIZE] = {0}; */
/* uint8_t M[2*BLOCKSIZE] = {0}; */
/* uint64_t C[(2*BLOCKSIZE+TAGSIZE)/8]; */
/* unsigned long long clen; */
/* make_nonce(N0, a.N); */
/* make_nonce(N1, b.N); */
/* flexAEAD_dbg = 1; */
/* crypto_aead_encrypt((uint8_t*)C, &clen, M, sizeof(M), NULL, 0, NULL, N0, Master_K); */
/* crypto_aead_encrypt((uint8_t*)C, &clen, M, sizeof(M), NULL, 0, NULL, N1, Master_K); */
/* flexAEAD_dbg = 0; */
/* } */
|
constitute.c | /*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% CCCC OOO N N SSSSS TTTTT IIIII TTTTT U U TTTTT EEEEE %
% C O O NN N SS T I T U U T E %
% C O O N N N ESSS T I T U U T EEE %
% C O O N NN SS T I T U U T E %
% CCCC OOO N N SSSSS T IIIII T UUU T EEEEE %
% %
% %
% MagickCore Methods to Consitute an Image %
% %
% Software Design %
% Cristy %
% October 1998 %
% %
% %
% Copyright 1999-2021 ImageMagick Studio LLC, a non-profit organization %
% dedicated to making software imaging solutions freely available. %
% %
% You may not use this file except in compliance with the License. You may %
% obtain a copy of the License at %
% %
% https://imagemagick.org/script/license.php %
% %
% Unless required by applicable law or agreed to in writing, software %
% distributed under the License is distributed on an "AS IS" BASIS, %
% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. %
% See the License for the specific language governing permissions and %
% limitations under the License. %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
%
*/
/*
Include declarations.
*/
#include "MagickCore/studio.h"
#include "MagickCore/attribute.h"
#include "MagickCore/blob.h"
#include "MagickCore/blob-private.h"
#include "MagickCore/exception.h"
#include "MagickCore/exception-private.h"
#include "MagickCore/cache.h"
#include "MagickCore/client.h"
#include "MagickCore/coder-private.h"
#include "MagickCore/colorspace-private.h"
#include "MagickCore/constitute.h"
#include "MagickCore/constitute-private.h"
#include "MagickCore/delegate.h"
#include "MagickCore/geometry.h"
#include "MagickCore/identify.h"
#include "MagickCore/image-private.h"
#include "MagickCore/list.h"
#include "MagickCore/magick.h"
#include "MagickCore/memory_.h"
#include "MagickCore/monitor.h"
#include "MagickCore/monitor-private.h"
#include "MagickCore/option.h"
#include "MagickCore/pixel.h"
#include "MagickCore/pixel-accessor.h"
#include "MagickCore/policy.h"
#include "MagickCore/profile.h"
#include "MagickCore/profile-private.h"
#include "MagickCore/property.h"
#include "MagickCore/quantum.h"
#include "MagickCore/resize.h"
#include "MagickCore/resource_.h"
#include "MagickCore/semaphore.h"
#include "MagickCore/statistic.h"
#include "MagickCore/stream.h"
#include "MagickCore/string_.h"
#include "MagickCore/string-private.h"
#include "MagickCore/timer.h"
#include "MagickCore/token.h"
#include "MagickCore/transform.h"
#include "MagickCore/utility.h"
#include "MagickCore/utility-private.h"
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% C o n s t i t u t e I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ConstituteImage() returns an image from the pixel data you supply.
% The pixel data must be in scanline order top-to-bottom. The data can be
% char, short int, int, float, or double. Float and double require the
% pixels to be normalized [0..1], otherwise [0..QuantumRange]. For example, to
% create a 640x480 image from unsigned red-green-blue character data, use:
%
% image = ConstituteImage(640,480,"RGB",CharPixel,pixels,&exception);
%
% The format of the ConstituteImage method is:
%
% Image *ConstituteImage(const size_t columns,const size_t rows,
% const char *map,const StorageType storage,const void *pixels,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o columns: width in pixels of the image.
%
% o rows: height in pixels of the image.
%
% o map: This string reflects the expected ordering of the pixel array.
% It can be any combination or order of R = red, G = green, B = blue,
% A = alpha (0 is transparent), O = opacity (0 is opaque), C = cyan,
% Y = yellow, M = magenta, K = black, I = intensity (for grayscale),
% P = pad.
%
% o storage: Define the data type of the pixels. Float and double types are
% expected to be normalized [0..1] otherwise [0..QuantumRange]. Choose
% from these types: CharPixel, DoublePixel, FloatPixel, IntegerPixel,
% LongPixel, QuantumPixel, or ShortPixel.
%
% o pixels: This array of values contain the pixel components as defined by
% map and type. You must preallocate this array where the expected
% length varies depending on the values of width, height, map, and type.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *ConstituteImage(const size_t columns,const size_t rows,
const char *map,const StorageType storage,const void *pixels,
ExceptionInfo *exception)
{
Image
*image;
MagickBooleanType
status;
ssize_t
i;
size_t
length;
/*
Allocate image structure.
*/
assert(map != (const char *) NULL);
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",map);
assert(pixels != (void *) NULL);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
image=AcquireImage((ImageInfo *) NULL,exception);
if (image == (Image *) NULL)
return((Image *) NULL);
switch (storage)
{
case CharPixel: image->depth=8*sizeof(unsigned char); break;
case DoublePixel: image->depth=8*sizeof(double); break;
case FloatPixel: image->depth=8*sizeof(float); break;
case LongPixel: image->depth=8*sizeof(unsigned long); break;
case LongLongPixel: image->depth=8*sizeof(MagickSizeType); break;
case ShortPixel: image->depth=8*sizeof(unsigned short); break;
default: break;
}
length=strlen(map);
for (i=0; i < (ssize_t) length; i++)
{
switch (map[i])
{
case 'a':
case 'A':
case 'O':
case 'o':
{
image->alpha_trait=BlendPixelTrait;
break;
}
case 'C':
case 'c':
case 'm':
case 'M':
case 'Y':
case 'y':
case 'K':
case 'k':
{
image->colorspace=CMYKColorspace;
break;
}
case 'I':
case 'i':
{
image->colorspace=GRAYColorspace;
break;
}
default:
{
if (length == 1)
image->colorspace=GRAYColorspace;
break;
}
}
}
status=SetImageExtent(image,columns,rows,exception);
if (status == MagickFalse)
return(DestroyImageList(image));
status=ResetImagePixels(image,exception);
if (status == MagickFalse)
return(DestroyImageList(image));
status=ImportImagePixels(image,0,0,columns,rows,map,storage,pixels,exception);
if (status == MagickFalse)
image=DestroyImage(image);
return(image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% P i n g I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% PingImage() returns all the properties of an image or image sequence
% except for the pixels. It is much faster and consumes far less memory
% than ReadImage(). On failure, a NULL image is returned and exception
% describes the reason for the failure.
%
% The format of the PingImage method is:
%
% Image *PingImage(const ImageInfo *image_info,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image_info: Ping the image defined by the file or filename members of
% this structure.
%
% o exception: return any errors or warnings in this structure.
%
*/
#if defined(__cplusplus) || defined(c_plusplus)
extern "C" {
#endif
static size_t PingStream(const Image *magick_unused(image),
const void *magick_unused(pixels),const size_t columns)
{
magick_unreferenced(image);
magick_unreferenced(pixels);
return(columns);
}
#if defined(__cplusplus) || defined(c_plusplus)
}
#endif
MagickExport Image *PingImage(const ImageInfo *image_info,
ExceptionInfo *exception)
{
Image
*image;
ImageInfo
*ping_info;
assert(image_info != (ImageInfo *) NULL);
assert(image_info->signature == MagickCoreSignature);
if (image_info->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",
image_info->filename);
assert(exception != (ExceptionInfo *) NULL);
ping_info=CloneImageInfo(image_info);
ping_info->ping=MagickTrue;
image=ReadStream(ping_info,&PingStream,exception);
if (image != (Image *) NULL)
{
ResetTimer(&image->timer);
if (ping_info->verbose != MagickFalse)
(void) IdentifyImage(image,stdout,MagickFalse,exception);
}
ping_info=DestroyImageInfo(ping_info);
return(image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% P i n g I m a g e s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% PingImages() pings one or more images and returns them as an image list.
%
% The format of the PingImage method is:
%
% Image *PingImages(ImageInfo *image_info,const char *filename,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image_info: the image info.
%
% o filename: the image filename.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *PingImages(ImageInfo *image_info,const char *filename,
ExceptionInfo *exception)
{
char
ping_filename[MagickPathExtent];
Image
*image,
*images;
ImageInfo
*read_info;
/*
Ping image list from a file.
*/
assert(image_info != (ImageInfo *) NULL);
assert(image_info->signature == MagickCoreSignature);
if (image_info->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",
image_info->filename);
assert(exception != (ExceptionInfo *) NULL);
(void) SetImageOption(image_info,"filename",filename);
(void) CopyMagickString(image_info->filename,filename,MagickPathExtent);
(void) InterpretImageFilename(image_info,(Image *) NULL,image_info->filename,
(int) image_info->scene,ping_filename,exception);
if (LocaleCompare(ping_filename,image_info->filename) != 0)
{
ExceptionInfo
*sans;
ssize_t
extent,
scene;
/*
Images of the form image-%d.png[1-5].
*/
read_info=CloneImageInfo(image_info);
sans=AcquireExceptionInfo();
(void) SetImageInfo(read_info,0,sans);
sans=DestroyExceptionInfo(sans);
if (read_info->number_scenes == 0)
{
read_info=DestroyImageInfo(read_info);
return(PingImage(image_info,exception));
}
(void) CopyMagickString(ping_filename,read_info->filename,
MagickPathExtent);
images=NewImageList();
extent=(ssize_t) (read_info->scene+read_info->number_scenes);
for (scene=(ssize_t) read_info->scene; scene < (ssize_t) extent; scene++)
{
(void) InterpretImageFilename(image_info,(Image *) NULL,ping_filename,
(int) scene,read_info->filename,exception);
image=PingImage(read_info,exception);
if (image == (Image *) NULL)
continue;
AppendImageToList(&images,image);
}
read_info=DestroyImageInfo(read_info);
return(images);
}
return(PingImage(image_info,exception));
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% R e a d I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ReadImage() reads an image or image sequence from a file or file handle.
% The method returns a NULL if there is a memory shortage or if the image
% cannot be read. On failure, a NULL image is returned and exception
% describes the reason for the failure.
%
% The format of the ReadImage method is:
%
% Image *ReadImage(const ImageInfo *image_info,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image_info: Read the image defined by the file or filename members of
% this structure.
%
% o exception: return any errors or warnings in this structure.
%
*/
static MagickBooleanType IsCoderAuthorized(const char *coder,
const PolicyRights rights,ExceptionInfo *exception)
{
if (IsRightsAuthorized(CoderPolicyDomain,rights,coder) == MagickFalse)
{
errno=EPERM;
(void) ThrowMagickException(exception,GetMagickModule(),PolicyError,
"NotAuthorized","`%s'",coder);
return(MagickFalse);
}
return(MagickTrue);
}
MagickExport Image *ReadImage(const ImageInfo *image_info,
ExceptionInfo *exception)
{
char
filename[MagickPathExtent],
magick[MagickPathExtent],
magick_filename[MagickPathExtent];
const char
*value;
const DelegateInfo
*delegate_info;
const MagickInfo
*magick_info;
DecodeImageHandler
*decoder;
ExceptionInfo
*sans_exception;
GeometryInfo
geometry_info;
Image
*image,
*next;
ImageInfo
*read_info;
MagickBooleanType
status;
MagickStatusType
flags;
/*
Determine image type from filename prefix or suffix (e.g. image.jpg).
*/
assert(image_info != (ImageInfo *) NULL);
assert(image_info->signature == MagickCoreSignature);
assert(image_info->filename != (char *) NULL);
if (image_info->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",
image_info->filename);
assert(exception != (ExceptionInfo *) NULL);
read_info=CloneImageInfo(image_info);
(void) CopyMagickString(magick_filename,read_info->filename,MagickPathExtent);
(void) SetImageInfo(read_info,0,exception);
(void) CopyMagickString(filename,read_info->filename,MagickPathExtent);
(void) CopyMagickString(magick,read_info->magick,MagickPathExtent);
/*
Call appropriate image reader based on image type.
*/
sans_exception=AcquireExceptionInfo();
magick_info=GetMagickInfo(read_info->magick,sans_exception);
if (sans_exception->severity == PolicyError)
InheritException(exception,sans_exception);
sans_exception=DestroyExceptionInfo(sans_exception);
if (magick_info != (const MagickInfo *) NULL)
{
if (GetMagickEndianSupport(magick_info) == MagickFalse)
read_info->endian=UndefinedEndian;
else
if ((image_info->endian == UndefinedEndian) &&
(GetMagickRawSupport(magick_info) != MagickFalse))
{
unsigned long
lsb_first;
lsb_first=1;
read_info->endian=(*(char *) &lsb_first) == 1 ? LSBEndian :
MSBEndian;
}
}
if ((magick_info != (const MagickInfo *) NULL) &&
(GetMagickDecoderSeekableStream(magick_info) != MagickFalse))
{
image=AcquireImage(read_info,exception);
(void) CopyMagickString(image->filename,read_info->filename,
MagickPathExtent);
status=OpenBlob(image_info,image,ReadBinaryBlobMode,exception);
if (status == MagickFalse)
{
read_info=DestroyImageInfo(read_info);
image=DestroyImage(image);
return((Image *) NULL);
}
if (IsBlobSeekable(image) == MagickFalse)
{
/*
Coder requires a seekable stream.
*/
*read_info->filename='\0';
status=ImageToFile(image,read_info->filename,exception);
if (status == MagickFalse)
{
(void) CloseBlob(image);
read_info=DestroyImageInfo(read_info);
image=DestroyImage(image);
return((Image *) NULL);
}
read_info->temporary=MagickTrue;
}
(void) CloseBlob(image);
image=DestroyImage(image);
}
image=NewImageList();
decoder=GetImageDecoder(magick_info);
if (decoder == (DecodeImageHandler *) NULL)
{
delegate_info=GetDelegateInfo(read_info->magick,(char *) NULL,exception);
if (delegate_info == (const DelegateInfo *) NULL)
{
(void) SetImageInfo(read_info,0,exception);
(void) CopyMagickString(read_info->filename,filename,
MagickPathExtent);
magick_info=GetMagickInfo(read_info->magick,exception);
decoder=GetImageDecoder(magick_info);
}
}
if (decoder != (DecodeImageHandler *) NULL)
{
/*
Call appropriate image reader based on image type.
*/
if (GetMagickDecoderThreadSupport(magick_info) == MagickFalse)
LockSemaphoreInfo(magick_info->semaphore);
status=IsCoderAuthorized(read_info->magick,ReadPolicyRights,exception);
image=(Image *) NULL;
if (status != MagickFalse)
image=decoder(read_info,exception);
if (GetMagickDecoderThreadSupport(magick_info) == MagickFalse)
UnlockSemaphoreInfo(magick_info->semaphore);
}
else
{
delegate_info=GetDelegateInfo(read_info->magick,(char *) NULL,exception);
if (delegate_info == (const DelegateInfo *) NULL)
{
(void) ThrowMagickException(exception,GetMagickModule(),
MissingDelegateError,"NoDecodeDelegateForThisImageFormat","`%s'",
read_info->magick);
if (read_info->temporary != MagickFalse)
(void) RelinquishUniqueFileResource(read_info->filename);
read_info=DestroyImageInfo(read_info);
return((Image *) NULL);
}
/*
Let our decoding delegate process the image.
*/
image=AcquireImage(read_info,exception);
if (image == (Image *) NULL)
{
read_info=DestroyImageInfo(read_info);
return((Image *) NULL);
}
(void) CopyMagickString(image->filename,read_info->filename,
MagickPathExtent);
*read_info->filename='\0';
if (GetDelegateThreadSupport(delegate_info) == MagickFalse)
LockSemaphoreInfo(delegate_info->semaphore);
status=InvokeDelegate(read_info,image,read_info->magick,(char *) NULL,
exception);
if (GetDelegateThreadSupport(delegate_info) == MagickFalse)
UnlockSemaphoreInfo(delegate_info->semaphore);
image=DestroyImageList(image);
read_info->temporary=MagickTrue;
if (status != MagickFalse)
(void) SetImageInfo(read_info,0,exception);
magick_info=GetMagickInfo(read_info->magick,exception);
decoder=GetImageDecoder(magick_info);
if (decoder == (DecodeImageHandler *) NULL)
{
if (IsPathAccessible(read_info->filename) != MagickFalse)
(void) ThrowMagickException(exception,GetMagickModule(),
MissingDelegateError,"NoDecodeDelegateForThisImageFormat","`%s'",
read_info->magick);
else
ThrowFileException(exception,FileOpenError,"UnableToOpenFile",
read_info->filename);
read_info=DestroyImageInfo(read_info);
return((Image *) NULL);
}
/*
Call appropriate image reader based on image type.
*/
if (GetMagickDecoderThreadSupport(magick_info) == MagickFalse)
LockSemaphoreInfo(magick_info->semaphore);
status=IsCoderAuthorized(read_info->magick,ReadPolicyRights,exception);
image=(Image *) NULL;
if (status != MagickFalse)
image=(decoder)(read_info,exception);
if (GetMagickDecoderThreadSupport(magick_info) == MagickFalse)
UnlockSemaphoreInfo(magick_info->semaphore);
}
if (read_info->temporary != MagickFalse)
{
(void) RelinquishUniqueFileResource(read_info->filename);
read_info->temporary=MagickFalse;
if (image != (Image *) NULL)
(void) CopyMagickString(image->filename,filename,MagickPathExtent);
}
if (image == (Image *) NULL)
{
read_info=DestroyImageInfo(read_info);
return(image);
}
if (exception->severity >= ErrorException)
(void) LogMagickEvent(ExceptionEvent,GetMagickModule(),
"Coder (%s) generated an image despite an error (%d), "
"notify the developers",image->magick,exception->severity);
if (IsBlobTemporary(image) != MagickFalse)
(void) RelinquishUniqueFileResource(read_info->filename);
if ((IsSceneGeometry(read_info->scenes,MagickFalse) != MagickFalse) &&
(GetImageListLength(image) != 1))
{
Image
*clones;
clones=CloneImages(image,read_info->scenes,exception);
if (clones != (Image *) NULL)
{
image=DestroyImageList(image);
image=GetFirstImageInList(clones);
}
}
for (next=image; next != (Image *) NULL; next=GetNextImageInList(next))
{
char
magick_path[MagickPathExtent],
*property,
timestamp[MagickTimeExtent];
const char
*option;
const StringInfo
*profile;
ssize_t
option_type;
static const char
*source_date_epoch = (const char *) NULL;
static MagickBooleanType
epoch_initalized = MagickFalse;
next->taint=MagickFalse;
GetPathComponent(magick_filename,MagickPath,magick_path);
if ((*magick_path == '\0') && (*next->magick == '\0'))
(void) CopyMagickString(next->magick,magick,MagickPathExtent);
(void) CopyMagickString(next->magick_filename,magick_filename,
MagickPathExtent);
if (IsBlobTemporary(image) != MagickFalse)
(void) CopyMagickString(next->filename,filename,MagickPathExtent);
if (next->magick_columns == 0)
next->magick_columns=next->columns;
if (next->magick_rows == 0)
next->magick_rows=next->rows;
(void) GetImageProperty(next,"exif:*",exception);
(void) GetImageProperty(next,"icc:*",exception);
(void) GetImageProperty(next,"iptc:*",exception);
(void) GetImageProperty(next,"xmp:*",exception);
value=GetImageProperty(next,"exif:Orientation",exception);
if (value == (char *) NULL)
value=GetImageProperty(next,"tiff:Orientation",exception);
if (value != (char *) NULL)
{
next->orientation=(OrientationType) StringToLong(value);
(void) DeleteImageProperty(next,"tiff:Orientation");
(void) DeleteImageProperty(next,"exif:Orientation");
}
value=GetImageProperty(next,"exif:XResolution",exception);
if (value != (char *) NULL)
{
geometry_info.rho=next->resolution.x;
geometry_info.sigma=1.0;
flags=ParseGeometry(value,&geometry_info);
if (geometry_info.sigma != 0)
next->resolution.x=geometry_info.rho/geometry_info.sigma;
if (strchr(value,',') != (char *) NULL)
next->resolution.x=geometry_info.rho+geometry_info.sigma/1000.0;
(void) DeleteImageProperty(next,"exif:XResolution");
}
value=GetImageProperty(next,"exif:YResolution",exception);
if (value != (char *) NULL)
{
geometry_info.rho=next->resolution.y;
geometry_info.sigma=1.0;
flags=ParseGeometry(value,&geometry_info);
if (geometry_info.sigma != 0)
next->resolution.y=geometry_info.rho/geometry_info.sigma;
if (strchr(value,',') != (char *) NULL)
next->resolution.y=geometry_info.rho+geometry_info.sigma/1000.0;
(void) DeleteImageProperty(next,"exif:YResolution");
}
value=GetImageProperty(next,"exif:ResolutionUnit",exception);
if (value == (char *) NULL)
value=GetImageProperty(next,"tiff:ResolutionUnit",exception);
if (value != (char *) NULL)
{
option_type=ParseCommandOption(MagickResolutionOptions,MagickFalse,
value);
if (option_type >= 0)
next->units=(ResolutionType) option_type;
(void) DeleteImageProperty(next,"exif:ResolutionUnit");
(void) DeleteImageProperty(next,"tiff:ResolutionUnit");
}
if (next->page.width == 0)
next->page.width=next->columns;
if (next->page.height == 0)
next->page.height=next->rows;
option=GetImageOption(read_info,"caption");
if (option != (const char *) NULL)
{
property=InterpretImageProperties(read_info,next,option,exception);
(void) SetImageProperty(next,"caption",property,exception);
property=DestroyString(property);
}
option=GetImageOption(read_info,"comment");
if (option != (const char *) NULL)
{
property=InterpretImageProperties(read_info,next,option,exception);
(void) SetImageProperty(next,"comment",property,exception);
property=DestroyString(property);
}
option=GetImageOption(read_info,"label");
if (option != (const char *) NULL)
{
property=InterpretImageProperties(read_info,next,option,exception);
(void) SetImageProperty(next,"label",property,exception);
property=DestroyString(property);
}
if (LocaleCompare(next->magick,"TEXT") == 0)
(void) ParseAbsoluteGeometry("0x0+0+0",&next->page);
if ((read_info->extract != (char *) NULL) &&
(read_info->stream == (StreamHandler) NULL))
{
RectangleInfo
geometry;
SetGeometry(next,&geometry);
flags=ParseAbsoluteGeometry(read_info->extract,&geometry);
if ((next->columns != geometry.width) ||
(next->rows != geometry.height))
{
if (((flags & XValue) != 0) || ((flags & YValue) != 0))
{
Image
*crop_image;
crop_image=CropImage(next,&geometry,exception);
if (crop_image != (Image *) NULL)
ReplaceImageInList(&next,crop_image);
}
else
if (((flags & WidthValue) != 0) || ((flags & HeightValue) != 0))
{
Image
*size_image;
flags=ParseRegionGeometry(next,read_info->extract,&geometry,
exception);
size_image=ResizeImage(next,geometry.width,geometry.height,
next->filter,exception);
if (size_image != (Image *) NULL)
ReplaceImageInList(&next,size_image);
}
}
}
profile=GetImageProfile(next,"icc");
if (profile == (const StringInfo *) NULL)
profile=GetImageProfile(next,"icm");
profile=GetImageProfile(next,"iptc");
if (profile == (const StringInfo *) NULL)
profile=GetImageProfile(next,"8bim");
if (epoch_initalized == MagickFalse)
{
source_date_epoch=getenv("SOURCE_DATE_EPOCH");
epoch_initalized=MagickTrue;
}
if (source_date_epoch == (const char *) NULL)
{
(void) FormatMagickTime((time_t) GetBlobProperties(next)->st_mtime,
sizeof(timestamp),timestamp);
(void) SetImageProperty(next,"date:modify",timestamp,exception);
(void) FormatMagickTime((time_t) GetBlobProperties(next)->st_ctime,
sizeof(timestamp),timestamp);
(void) SetImageProperty(next,"date:create",timestamp,exception);
}
option=GetImageOption(image_info,"delay");
if (option != (const char *) NULL)
{
flags=ParseGeometry(option,&geometry_info);
if ((flags & GreaterValue) != 0)
{
if (next->delay > (size_t) floor(geometry_info.rho+0.5))
next->delay=(size_t) floor(geometry_info.rho+0.5);
}
else
if ((flags & LessValue) != 0)
{
if (next->delay < (size_t) floor(geometry_info.rho+0.5))
next->ticks_per_second=CastDoubleToLong(floor(
geometry_info.sigma+0.5));
}
else
next->delay=(size_t) floor(geometry_info.rho+0.5);
if ((flags & SigmaValue) != 0)
next->ticks_per_second=CastDoubleToLong(floor(
geometry_info.sigma+0.5));
}
option=GetImageOption(image_info,"dispose");
if (option != (const char *) NULL)
{
option_type=ParseCommandOption(MagickDisposeOptions,MagickFalse,
option);
if (option_type >= 0)
next->dispose=(DisposeType) option_type;
}
if (read_info->verbose != MagickFalse)
(void) IdentifyImage(next,stderr,MagickFalse,exception);
image=next;
}
read_info=DestroyImageInfo(read_info);
if (GetBlobError(image) != MagickFalse)
ThrowReaderException(CorruptImageError,"UnableToReadImageData");
return(GetFirstImageInList(image));
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% R e a d I m a g e s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ReadImages() reads one or more images and returns them as an image list.
%
% The format of the ReadImage method is:
%
% Image *ReadImages(ImageInfo *image_info,const char *filename,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image_info: the image info.
%
% o filename: the image filename.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *ReadImages(ImageInfo *image_info,const char *filename,
ExceptionInfo *exception)
{
char
read_filename[MagickPathExtent];
Image
*image,
*images;
ImageInfo
*read_info;
/*
Read image list from a file.
*/
assert(image_info != (ImageInfo *) NULL);
assert(image_info->signature == MagickCoreSignature);
if (image_info->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",
image_info->filename);
assert(exception != (ExceptionInfo *) NULL);
read_info=CloneImageInfo(image_info);
*read_info->magick='\0';
(void) SetImageOption(read_info,"filename",filename);
(void) CopyMagickString(read_info->filename,filename,MagickPathExtent);
(void) InterpretImageFilename(read_info,(Image *) NULL,filename,
(int) read_info->scene,read_filename,exception);
if (LocaleCompare(read_filename,read_info->filename) != 0)
{
ExceptionInfo
*sans;
ssize_t
extent,
scene;
/*
Images of the form image-%d.png[1-5].
*/
sans=AcquireExceptionInfo();
(void) SetImageInfo(read_info,0,sans);
sans=DestroyExceptionInfo(sans);
if (read_info->number_scenes != 0)
{
(void) CopyMagickString(read_filename,read_info->filename,
MagickPathExtent);
images=NewImageList();
extent=(ssize_t) (read_info->scene+read_info->number_scenes);
scene=(ssize_t) read_info->scene;
for ( ; scene < (ssize_t) extent; scene++)
{
(void) InterpretImageFilename(image_info,(Image *) NULL,
read_filename,(int) scene,read_info->filename,exception);
image=ReadImage(read_info,exception);
if (image == (Image *) NULL)
continue;
AppendImageToList(&images,image);
}
read_info=DestroyImageInfo(read_info);
return(images);
}
}
(void) CopyMagickString(read_info->filename,filename,MagickPathExtent);
image=ReadImage(read_info,exception);
read_info=DestroyImageInfo(read_info);
return(image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ R e a d I n l i n e I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ReadInlineImage() reads a Base64-encoded inline image or image sequence.
% The method returns a NULL if there is a memory shortage or if the image
% cannot be read. On failure, a NULL image is returned and exception
% describes the reason for the failure.
%
% The format of the ReadInlineImage method is:
%
% Image *ReadInlineImage(const ImageInfo *image_info,const char *content,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image_info: the image info.
%
% o content: the image encoded in Base64.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *ReadInlineImage(const ImageInfo *image_info,
const char *content,ExceptionInfo *exception)
{
Image
*image;
ImageInfo
*read_info;
unsigned char
*blob;
size_t
length;
const char
*p;
/*
Skip over header (e.g. data:image/gif;base64,).
*/
image=NewImageList();
for (p=content; (*p != ',') && (*p != '\0'); p++) ;
if (*p == '\0')
ThrowReaderException(CorruptImageError,"CorruptImage");
blob=Base64Decode(++p,&length);
if (length == 0)
{
blob=(unsigned char *) RelinquishMagickMemory(blob);
ThrowReaderException(CorruptImageError,"CorruptImage");
}
read_info=CloneImageInfo(image_info);
(void) SetImageInfoProgressMonitor(read_info,(MagickProgressMonitor) NULL,
(void *) NULL);
*read_info->filename='\0';
*read_info->magick='\0';
for (p=content; (*p != '/') && (*p != '\0'); p++) ;
if (*p != '\0')
{
char
*q;
ssize_t
i;
/*
Extract media type.
*/
if (LocaleNCompare(++p,"x-",2) == 0)
p+=2;
(void) strcpy(read_info->filename,"data.");
q=read_info->filename+5;
for (i=0; (*p != ';') && (*p != '\0') && (i < (MagickPathExtent-6)); i++)
*q++=(*p++);
*q++='\0';
}
image=BlobToImage(read_info,blob,length,exception);
blob=(unsigned char *) RelinquishMagickMemory(blob);
read_info=DestroyImageInfo(read_info);
return(image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% W r i t e I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% WriteImage() writes an image or an image sequence to a file or file handle.
% If writing to a file is on disk, the name is defined by the filename member
% of the image structure. WriteImage() returns MagickFalse is there is a
% memory shortage or if the image cannot be written. Check the exception
% member of image to determine the cause for any failure.
%
% The format of the WriteImage method is:
%
% MagickBooleanType WriteImage(const ImageInfo *image_info,Image *image,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image_info: the image info.
%
% o image: the image.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport MagickBooleanType WriteImage(const ImageInfo *image_info,
Image *image,ExceptionInfo *exception)
{
char
filename[MagickPathExtent];
const char
*option;
const DelegateInfo
*delegate_info;
const MagickInfo
*magick_info;
EncodeImageHandler
*encoder;
ExceptionInfo
*sans_exception;
ImageInfo
*write_info;
MagickBooleanType
status,
temporary;
/*
Determine image type from filename prefix or suffix (e.g. image.jpg).
*/
assert(image_info != (ImageInfo *) NULL);
assert(image_info->signature == MagickCoreSignature);
assert(image != (Image *) NULL);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",
image_info->filename);
assert(image->signature == MagickCoreSignature);
assert(exception != (ExceptionInfo *) NULL);
sans_exception=AcquireExceptionInfo();
write_info=CloneImageInfo(image_info);
(void) CopyMagickString(write_info->filename,image->filename,
MagickPathExtent);
(void) SetImageInfo(write_info,1,sans_exception);
if (*write_info->magick == '\0')
(void) CopyMagickString(write_info->magick,image->magick,MagickPathExtent);
(void) CopyMagickString(filename,image->filename,MagickPathExtent);
(void) CopyMagickString(image->filename,write_info->filename,
MagickPathExtent);
/*
Call appropriate image writer based on image type.
*/
magick_info=GetMagickInfo(write_info->magick,sans_exception);
if (sans_exception->severity == PolicyError)
magick_info=GetMagickInfo(write_info->magick,exception);
sans_exception=DestroyExceptionInfo(sans_exception);
if (magick_info != (const MagickInfo *) NULL)
{
if (GetMagickEndianSupport(magick_info) == MagickFalse)
image->endian=UndefinedEndian;
else
if ((image_info->endian == UndefinedEndian) &&
(GetMagickRawSupport(magick_info) != MagickFalse))
{
unsigned long
lsb_first;
lsb_first=1;
image->endian=(*(char *) &lsb_first) == 1 ? LSBEndian : MSBEndian;
}
}
(void) SyncImageProfiles(image);
DisassociateImageStream(image);
option=GetImageOption(image_info,"delegate:bimodal");
if ((IsStringTrue(option) != MagickFalse) &&
(write_info->page == (char *) NULL) &&
(GetPreviousImageInList(image) == (Image *) NULL) &&
(GetNextImageInList(image) == (Image *) NULL) &&
(IsTaintImage(image) == MagickFalse) )
{
delegate_info=GetDelegateInfo(image->magick,write_info->magick,exception);
if ((delegate_info != (const DelegateInfo *) NULL) &&
(GetDelegateMode(delegate_info) == 0) &&
(IsPathAccessible(image->magick_filename) != MagickFalse))
{
/*
Process image with bi-modal delegate.
*/
(void) CopyMagickString(image->filename,image->magick_filename,
MagickPathExtent);
status=InvokeDelegate(write_info,image,image->magick,
write_info->magick,exception);
write_info=DestroyImageInfo(write_info);
(void) CopyMagickString(image->filename,filename,MagickPathExtent);
return(status);
}
}
status=MagickFalse;
temporary=MagickFalse;
if ((magick_info != (const MagickInfo *) NULL) &&
(GetMagickEncoderSeekableStream(magick_info) != MagickFalse))
{
char
image_filename[MagickPathExtent];
(void) CopyMagickString(image_filename,image->filename,MagickPathExtent);
status=OpenBlob(image_info,image,WriteBinaryBlobMode,exception);
(void) CopyMagickString(image->filename, image_filename,MagickPathExtent);
if (status != MagickFalse)
{
if (IsBlobSeekable(image) == MagickFalse)
{
/*
A seekable stream is required by the encoder.
*/
write_info->adjoin=MagickTrue;
(void) CopyMagickString(write_info->filename,image->filename,
MagickPathExtent);
(void) AcquireUniqueFilename(image->filename);
temporary=MagickTrue;
}
(void) CloseBlob(image);
}
}
encoder=GetImageEncoder(magick_info);
if (encoder != (EncodeImageHandler *) NULL)
{
/*
Call appropriate image writer based on image type.
*/
if (GetMagickEncoderThreadSupport(magick_info) == MagickFalse)
LockSemaphoreInfo(magick_info->semaphore);
status=IsCoderAuthorized(write_info->magick,WritePolicyRights,exception);
if (status != MagickFalse)
status=encoder(write_info,image,exception);
if (GetMagickEncoderThreadSupport(magick_info) == MagickFalse)
UnlockSemaphoreInfo(magick_info->semaphore);
}
else
{
delegate_info=GetDelegateInfo((char *) NULL,write_info->magick,exception);
if (delegate_info != (DelegateInfo *) NULL)
{
/*
Process the image with delegate.
*/
*write_info->filename='\0';
if (GetDelegateThreadSupport(delegate_info) == MagickFalse)
LockSemaphoreInfo(delegate_info->semaphore);
status=InvokeDelegate(write_info,image,(char *) NULL,
write_info->magick,exception);
if (GetDelegateThreadSupport(delegate_info) == MagickFalse)
UnlockSemaphoreInfo(delegate_info->semaphore);
(void) CopyMagickString(image->filename,filename,MagickPathExtent);
}
else
{
sans_exception=AcquireExceptionInfo();
magick_info=GetMagickInfo(write_info->magick,sans_exception);
if (sans_exception->severity == PolicyError)
magick_info=GetMagickInfo(write_info->magick,exception);
sans_exception=DestroyExceptionInfo(sans_exception);
if ((write_info->affirm == MagickFalse) &&
(magick_info == (const MagickInfo *) NULL))
{
(void) CopyMagickString(write_info->magick,image->magick,
MagickPathExtent);
magick_info=GetMagickInfo(write_info->magick,exception);
}
encoder=GetImageEncoder(magick_info);
if (encoder == (EncodeImageHandler *) NULL)
{
char
extension[MagickPathExtent];
GetPathComponent(image->filename,ExtensionPath,extension);
if (*extension != '\0')
magick_info=GetMagickInfo(extension,exception);
else
magick_info=GetMagickInfo(image->magick,exception);
(void) CopyMagickString(image->filename,filename,
MagickPathExtent);
encoder=GetImageEncoder(magick_info);
}
if (encoder == (EncodeImageHandler *) NULL)
{
magick_info=GetMagickInfo(image->magick,exception);
encoder=GetImageEncoder(magick_info);
if (encoder == (EncodeImageHandler *) NULL)
(void) ThrowMagickException(exception,GetMagickModule(),
MissingDelegateError,"NoEncodeDelegateForThisImageFormat",
"`%s'",write_info->magick);
}
if (encoder != (EncodeImageHandler *) NULL)
{
/*
Call appropriate image writer based on image type.
*/
if (GetMagickEncoderThreadSupport(magick_info) == MagickFalse)
LockSemaphoreInfo(magick_info->semaphore);
status=IsCoderAuthorized(write_info->magick,WritePolicyRights,
exception);
if (status != MagickFalse)
status=encoder(write_info,image,exception);
if (GetMagickEncoderThreadSupport(magick_info) == MagickFalse)
UnlockSemaphoreInfo(magick_info->semaphore);
}
}
}
if (temporary != MagickFalse)
{
/*
Copy temporary image file to permanent.
*/
status=OpenBlob(write_info,image,ReadBinaryBlobMode,exception);
if (status != MagickFalse)
{
(void) RelinquishUniqueFileResource(write_info->filename);
status=ImageToFile(image,write_info->filename,exception);
}
(void) CloseBlob(image);
(void) RelinquishUniqueFileResource(image->filename);
(void) CopyMagickString(image->filename,write_info->filename,
MagickPathExtent);
}
if ((LocaleCompare(write_info->magick,"info") != 0) &&
(write_info->verbose != MagickFalse))
(void) IdentifyImage(image,stdout,MagickFalse,exception);
write_info=DestroyImageInfo(write_info);
if (GetBlobError(image) != MagickFalse)
ThrowWriterException(FileOpenError,"UnableToWriteFile");
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% W r i t e I m a g e s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% WriteImages() writes an image sequence into one or more files. While
% WriteImage() can write an image sequence, it is limited to writing
% the sequence into a single file using a format which supports multiple
% frames. WriteImages(), however, does not have this limitation, instead it
% generates multiple output files if necessary (or when requested). When
% ImageInfo's adjoin flag is set to MagickFalse, the file name is expected
% to include a printf-style formatting string for the frame number (e.g.
% "image%02d.png").
%
% The format of the WriteImages method is:
%
% MagickBooleanType WriteImages(const ImageInfo *image_info,Image *images,
% const char *filename,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image_info: the image info.
%
% o images: the image list.
%
% o filename: the image filename.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport MagickBooleanType WriteImages(const ImageInfo *image_info,
Image *images,const char *filename,ExceptionInfo *exception)
{
#define WriteImageTag "Write/Image"
ExceptionInfo
*sans_exception;
ImageInfo
*write_info;
MagickBooleanType
proceed;
MagickOffsetType
progress;
MagickProgressMonitor
progress_monitor;
MagickSizeType
number_images;
MagickStatusType
status;
Image
*p;
assert(image_info != (const ImageInfo *) NULL);
assert(image_info->signature == MagickCoreSignature);
assert(images != (Image *) NULL);
assert(images->signature == MagickCoreSignature);
if (images->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",images->filename);
assert(exception != (ExceptionInfo *) NULL);
write_info=CloneImageInfo(image_info);
*write_info->magick='\0';
images=GetFirstImageInList(images);
if (filename != (const char *) NULL)
for (p=images; p != (Image *) NULL; p=GetNextImageInList(p))
(void) CopyMagickString(p->filename,filename,MagickPathExtent);
(void) CopyMagickString(write_info->filename,images->filename,
MagickPathExtent);
sans_exception=AcquireExceptionInfo();
(void) SetImageInfo(write_info,(unsigned int) GetImageListLength(images),
sans_exception);
sans_exception=DestroyExceptionInfo(sans_exception);
if (*write_info->magick == '\0')
(void) CopyMagickString(write_info->magick,images->magick,MagickPathExtent);
p=images;
for ( ; GetNextImageInList(p) != (Image *) NULL; p=GetNextImageInList(p))
{
Image
*next;
next=GetNextImageInList(p);
if (next == (Image *) NULL)
break;
if (p->scene >= next->scene)
{
ssize_t
i;
/*
Generate consistent scene numbers.
*/
i=(ssize_t) images->scene;
for (p=images; p != (Image *) NULL; p=GetNextImageInList(p))
p->scene=(size_t) i++;
break;
}
}
/*
Write images.
*/
status=MagickTrue;
progress_monitor=(MagickProgressMonitor) NULL;
progress=0;
number_images=GetImageListLength(images);
for (p=images; p != (Image *) NULL; p=GetNextImageInList(p))
{
if (number_images != 1)
progress_monitor=SetImageProgressMonitor(p,(MagickProgressMonitor) NULL,
p->client_data);
status&=WriteImage(write_info,p,exception);
if (number_images != 1)
(void) SetImageProgressMonitor(p,progress_monitor,p->client_data);
if (write_info->adjoin != MagickFalse)
break;
if (number_images != 1)
{
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp atomic
#endif
progress++;
proceed=SetImageProgress(p,WriteImageTag,progress,number_images);
if (proceed == MagickFalse)
break;
}
}
write_info=DestroyImageInfo(write_info);
return(status != 0 ? MagickTrue : MagickFalse);
}
|
Graph.h | /*
* Graph.h
*
* Created on: 01.06.2014
* Author: Christian Staudt (christian.staudt@kit.edu), Klara Reichard (klara.reichard@gmail.com), Marvin Ritter (marvin.ritter@gmail.com)
*/
#ifndef GRAPH_H_
#define GRAPH_H_
#include <algorithm>
#include <vector>
#include <stack>
#include <queue>
#include <utility>
#include <stdexcept>
#include <functional>
#include "../Globals.h"
#include "Coordinates.h"
#include "../viz/Point.h"
#include "../auxiliary/Random.h"
#include "../auxiliary/FunctionTraits.h"
#include "../auxiliary/Log.h"
namespace NetworKit {
/**
* @ingroup graph
* A graph (with optional weights) and parallel iterator methods.
*/
class Graph final {
friend class ParallelPartitionCoarsening;
friend class GraphBuilder;
private:
// graph attributes
count id; //!< unique graph id, starts at 0
std::string name; //!< name of the graph, initially G#ID
// scalars
count n; //!< current number of nodes
count m; //!< current number of edges
count storedNumberOfSelfLoops; //!< current number of self loops, edges which have the same origin and target
node z; //!< current upper bound of node ids, z will be the id of the next node
edgeid omega; //!< current upper bound of edge ids, will be the id of the next edge
count t; //!< current time step
bool weighted; //!< true if the graph is weighted, false otherwise
bool directed; //!< true if the graph is directed, false otherwise
bool edgesIndexed; //!< true if edge ids have been assigned
// per node data
std::vector<bool> exists; //!< exists[v] is true if node v has not been removed from the graph
Coordinates<float> coordinates; //!< coordinates of nodes (if present)
std::vector<count> inDeg; //!< only used for directed graphs, number of edges incoming per node
std::vector<count> outDeg; //!< degree of every node, zero if node was removed. For directed graphs only outgoing edges count
std::vector< std::vector<node> > inEdges; //!< only used for directed graphs, inEdges[v] contains all nodes u that have an edge (u, v)
std::vector< std::vector<node> > outEdges; //!< (outgoing) edges, for each edge (u, v) v is saved in outEdges[u] and for undirected also u in outEdges[v]
std::vector< std::vector<edgeweight> > inEdgeWeights; //!< only used for directed graphs, same schema as inEdges
std::vector< std::vector<edgeweight> > outEdgeWeights; //!< same schema (and same order!) as outEdges
std::vector< std::vector<edgeid> > inEdgeIds; //!< only used for directed graphs, same schema as inEdges
std::vector< std::vector<edgeid> > outEdgeIds; //!< same schema (and same order!) as outEdges
/**
* Returns the next unique graph id.
*/
count getNextGraphId();
/**
* Returns the index of node u in the array of incoming edges of node v. (for directed graphs inEdges is searched, while for indirected outEdges is searched, which gives the same result as indexInOutEdgeArray).
*/
index indexInInEdgeArray(node v, node u) const;
/**
* Returns the index of node v in the array of outgoing edges of node u.
*/
index indexInOutEdgeArray(node u, node v) const;
/**
* Returns the edge weight of the outgoing edge of index i in the outgoing edges of node u
* @param u The node
* @param i The index
* @return The weight of the outgoing edge or defaultEdgeWeight if the graph is unweighted
*/
template<bool hasWeights>
inline edgeweight getOutEdgeWeight(node u, index i) const;
/**
* Returns the edge weight of the incoming edge of index i in the incoming edges of node u
*
* @param u The node
* @param i The index in the incoming edge array
* @return The weight of the incoming edge
*/
template<bool hasWeights>
inline edgeweight getInEdgeWeight(node u, index i) const;
/**
* Returns the edge id of the edge of index i in the outgoing edges of node u
*
* @param u The node
* @param i The index in the outgoing edges
* @return The edge id
*/
template<bool graphHasEdgeIds>
inline edgeid getOutEdgeId(node u, index i) const;
/**
* Returns the edge id of the edge of index i in the incoming edges of node u
*
* @param u The node
* @param i The index in the incoming edges of u
* @return The edge id
*/
template<bool graphHasEdgeIds>
inline edgeid getInEdgeId(node u, index i) const;
/**
* @brief Returns if the edge (u, v) shall be used in the iteration of all edgesIndexed
*
* @param u The source node of the edge
* @param v The target node of the edge
* @return If the node shall be used, i.e. if v is not none and in the undirected case if u >= v
*/
template<bool graphIsDirected>
inline bool useEdgeInIteration(node u, node v) const;
/**
* @brief Implementation of the for loop for outgoing edges of u
*
* Note: If all (valid) outgoing edges shall be considered, graphIsDirected needs to be set to true
*
* @param u The node
* @param handle The handle that shall be executed for each edge
* @return void
*/
template<bool graphIsDirected, bool hasWeights, bool graphHasEdgeIds, typename L>
inline void forOutEdgesOfImpl(node u, L handle) const;
/**
* @brief Implementation of the for loop for incoming edges of u
*
* For undirected graphs, this is the same as forOutEdgesOfImpl but u and v are changed in the handle
*
* @param u The node
* @param handle The handle that shall be executed for each edge
* @return void
*/
template<bool graphIsDirected, bool hasWeights, bool graphHasEdgeIds, typename L>
inline void forInEdgesOfImpl(node u, L handle) const;
/**
* @brief Implementation of the for loop for all edges, @see forEdges
*
* @param handle The handle that shall be executed for all edges
* @return void
*/
template<bool graphIsDirected, bool hasWeights, bool graphHasEdgeIds, typename L>
inline void forEdgeImpl(L handle) const;
/**
* @brief Parallel implementation of the for loop for all edges, @see parallelForEdges
*
* @param handle The handle that shall be executed for all edges
* @return void
*/
template<bool graphIsDirected, bool hasWeights, bool graphHasEdgeIds, typename L>
inline void parallelForEdgesImpl(L handle) const;
/**
* @brief Summation variant of the parallel for loop for all edges, @see parallelSumForEdges
*
* @param handle The handle that shall be executed for all edges
* @return void
*/
template<bool graphIsDirected, bool hasWeights, bool graphHasEdgeIds, typename L>
inline double parallelSumForEdgesImpl(L handle) const;
/*
* In the following definition, Aux::FunctionTraits is used in order to only execute lambda functions
* with the appropriate parameters. The decltype-return type is used for determining the return type of
* the lambda (needed for summation) but also determines if the lambda accepts the correct number of parameters.
* Otherwise the return type declaration fails and the function is excluded from overload resoluation.
* Then there are multiple possible lambdas with three (third parameter id or weight) and two (second parameter
* can be second node id or edge weight for neighbor iterators). This is checked using Aux::FunctionTraits and
* std::enable_if. std::enable_if only defines the type member when the given bool is true, this bool comes from
* std::is_same which compares two types. The function traits give either the parameter type or if it is out of bounds
* they define type as void.
*/
/**
* Triggers a static assert error when no other method is chosen. Because of the use of "..." as arguments, the priority
* of this method is lower than the priority of the other methods. This method avoids ugly and unreadable template substitution
* error messages from the other declarations.
*/
template<class F, void* = (void*)0>
typename Aux::FunctionTraits<F>::result_type edgeLambda(F&f, ...) const {
// the strange condition is used in order to delay the eveluation of the static assert to the moment when this function is actually used
static_assert(! std::is_same<F, F>::value, "Your lambda does not support the required parameters or the parameters have the wrong type.");
return std::declval<typename Aux::FunctionTraits<F>::result_type>(); // use the correct return type (this won't compile)
}
/**
* Calls the given function f if its fourth argument is of the type edgeid and third of type edgeweight
* Note that the decltype check is not enough as edgeweight can be casted to node and we want to assure that .
*/
template < class F,
typename std::enable_if <
(Aux::FunctionTraits<F>::arity >= 3) &&
std::is_same<edgeweight, typename Aux::FunctionTraits<F>::template arg<2>::type>::value &&
std::is_same<edgeid, typename Aux::FunctionTraits<F>::template arg<3>::type>::value
>::type * = (void*)0 >
auto edgeLambda(F &f, node u, node v, edgeweight ew, edgeid id) const -> decltype(f(u, v, ew, id)) {
return f(u, v, ew, id);
}
/**
* Calls the given function f if its third argument is of the type edgeid, discards the edge weight
* Note that the decltype check is not enough as edgeweight can be casted to node.
*/
template<class F,
typename std::enable_if<
(Aux::FunctionTraits<F>::arity >= 2) &&
std::is_same<edgeid, typename Aux::FunctionTraits<F>::template arg<2>::type>::value &&
std::is_same<node, typename Aux::FunctionTraits<F>::template arg<1>::type>::value /* prevent f(v, weight, eid) */
>::type* = (void*)0>
auto edgeLambda(F&f, node u, node v, edgeweight ew, edgeid id) const -> decltype(f(u, v, id)) {
return f(u, v, id);
}
/**
* Calls the given function f if its third argument is of type edgeweight, discards the edge id
* Note that the decltype check is not enough as node can be casted to edgeweight.
*/
template<class F,
typename std::enable_if<
(Aux::FunctionTraits<F>::arity >= 2) &&
std::is_same<edgeweight, typename Aux::FunctionTraits<F>::template arg<2>::type>::value
>::type* = (void*)0>
auto edgeLambda(F&f, node u, node v, edgeweight ew, edgeid id) const -> decltype(f(u, v, ew)) {
return f(u, v, ew);
}
/**
* Calls the given function f if it has only two arguments and the second argument is of type node,
* discards edge weight and id
* Note that the decltype check is not enough as edgeweight can be casted to node.
*/
template<class F,
typename std::enable_if<
(Aux::FunctionTraits<F>::arity >= 1) &&
std::is_same<node, typename Aux::FunctionTraits<F>::template arg<1>::type>::value
>::type* = (void*)0>
auto edgeLambda(F&f, node u, node v, edgeweight ew, edgeid id) const -> decltype(f(u, v)) {
return f(u, v);
}
/**
* Calls the given function f if it has only two arguments and the second argument is of type edgeweight,
* discards the first node and the edge id
* Note that the decltype check is not enough as edgeweight can be casted to node.
*/
template<class F,
typename std::enable_if<
(Aux::FunctionTraits<F>::arity >= 1) &&
std::is_same<edgeweight, typename Aux::FunctionTraits<F>::template arg<1>::type>::value
>::type* = (void*)0>
auto edgeLambda(F&f, node u, node v, edgeweight ew, edgeid id) const -> decltype(f(u, ew)) {
return f(v, ew);
}
/**
* Calls the given function f if it has only one argument, discards the first
* node id, the edge weight and the edge id
*/
template<class F,
void* = (void*)0>
auto edgeLambda(F&f, node u, node v, edgeweight ew, edgeid id) const -> decltype(f(v)) {
return f(v);
}
/**
* Calls the given BFS handle with distance parameter
*/
template <class F>
auto callBFSHandle(F &f, node u, count dist) const -> decltype(f(u, dist)) {
return f(u, dist);
}
/**
* Calls the given BFS handle without distance parameter
*/
template <class F>
auto callBFSHandle(F &f, node u, count dist) const -> decltype(f(u)) {
return f(u);
}
public:
/**
* Create a graph of @a n nodes. The graph has assignable edge weights if @a weighted is set to <code>true</code>.
* If @a weighted is set to <code>false</code> each edge has edge weight 1.0 and any other weight assignment will
* be ignored.
* @param n Number of nodes.
* @param weighted If set to <code>true</code>, the graph has edge weights.
* @param directed If set to @c true, the graph will be directed.
*/
Graph(count n = 0, bool weighted = false, bool directed = false);
Graph(const Graph& G, bool weighted, bool directed);
/**
* Create a graph as copy of @a other.
* @param other The graph to copy.
*/
Graph(const Graph& other) = default;
/** Default move constructor */
Graph(Graph&& other) = default;
/** Default destructor */
~Graph() = default;
/** Default move assignment operator */
Graph& operator=(Graph&& other) = default;
/** Default copy assignment operator */
Graph& operator=(const Graph& other) = default;
/** EDGE IDS **/
/**
* Initially assign integer edge identifiers.
*
* @param force Force re-indexing of edges even if they have already been indexed
*/
void indexEdges(bool force = false);
/**
* Checks if edges have been indexed
*
* @return bool if edges have been indexed
*/
bool hasEdgeIds() const { return edgesIndexed; }
/**
* Get the id of the given edge.
*/
edgeid edgeId(node u, node v) const;
/**
* Get an upper bound for the edge ids in the graph.
* @return An upper bound for the edge ids.
*/
index upperEdgeIdBound() const { return omega; }
/** GRAPH INFORMATION **/
/**
* Get the ID of this graph. The ID is a unique unsigned integer given to
* every graph on construction.
*/
count getId() const { return id; }
/**
* Return the type of the graph.
* Graph: not weighted, undirected
* WeightedGraph: weighted, undirected
* DirectedGraph: not weighted, directed
* WeightedDirectedGraph: weighted, directed
*/
std::string typ() const;
/**
* Try to save some memory by shrinking internal data structures of the graph. Only run this
* once you finished editing the graph. Otherwise it will cause unnecessary reallocation of
* memory.
*/
void shrinkToFit();
/**
* Compacts the adjacency arrays by re-using no longer neede slots from deleted edges.
*/
void compactEdges();
/**
* Sorts the adjacency arrays by node id. While the running time is linear this
* temporarily duplicates the memory.
*/
void sortEdges();
/**
* Set name of graph to @a name.
* @param name The name.
*/
void setName(std::string name) { this->name = name; }
/*
* Returns the name of the graph.
* @return The name of the graph.
*/
std::string getName() const { return name; }
/**
* Returns a string representation of the graph.
* @return A string representation.
*/
std::string toString() const;
/* COPYING */
/*
* Copies all nodes to a new graph
* @return graph with the same nodes.
*/
Graph copyNodes() const;
/* NODE MODIFIERS */
/**
* Add a new node to the graph and return it.
* @return The new node.
*/
node addNode();
/**
* DEPRECATED: Coordinates should be handled outside the Graph class
* like general node attributes.
*
* Add a new node to the graph with coordinates @a x and @y and return it.
*/
// TODO: remove method
// [[deprecated("Deprecated: Node coordinates should be stored externally like any other node attribute")]]
node addNode(float x, float y);
/**
* Remove an isolated node @a v from the graph.
*
* @param u Node.
* @note Although it would be convenient to remove all incident edges at the same time,
* this causes complications for dynamic applications. Therefore, removeNode is an
* atomic event. All incident edges need to be removed first and an exception is thrown
* otherwise.
*/
void removeNode(node v);
/**
* Check if node @a v exists in the graph.
*
* @param v Node.
* @return @c true if @a v exists, @c false otherwise.
*/
bool hasNode(node v) const { return (v < z) && this->exists[v]; }
/**
* Restores a previously deleted node @a v with its previous id in the graph.
*
* @param v Node.
*
*/
void restoreNode(node v);
/** NODE PROPERTIES **/
/**
* Returns the number of outgoing neighbors of @a v.
*
* @param v Node.
* @return The number of outgoing neighbors.
*/
count degree(node v) const { return outDeg[v]; }
/**
* Get the number of incoming neighbors of @a v.
*
* @param v Node.
* @return The number of incoming neighbors.
* @note If the graph is not directed, the outgoing degree is returned.
*/
count degreeIn(node v) const { return directed ? inDeg[v] : outDeg[v]; }
/**
* Get the number of outgoing neighbors of @a v.
*
* @param v Node.
* @return The number of outgoing neighbors.
*/
count degreeOut(node v) const { return outDeg[v]; }
/**
* Check whether @a v is isolated, i.e. degree is 0.
* @param v Node.
* @return @c true if the node is isolated (= degree is 0)
*/
bool isIsolated(node v) const { return outDeg[v] == 0 && (!directed || inDeg[v] == 0); }
/**
* Returns the weighted degree of @a v.
*
* @param v Node.
* @return Weighted degree of @a v.
* @note For directed graphs this is the sum of weights of all outgoing edges of @a v.
*/
edgeweight weightedDegree(node v) const;
/**
* Returns the volume of the @a v, which is the weighted degree with self-loops counted twice.
*
* @param v Node.
* @return The volume of the @a v.
*/
edgeweight volume(node v) const;
/**
* Returns a random node of the graph.
* @return A random node.
*/
node randomNode() const;
/**
* Returns a random neighbor of @a u and @c none if degree is zero.
*
* @param u Node.
* @return A random neighbor of @a u.
*/
node randomNeighbor(node u) const;
/* EDGE MODIFIERS */
/**
* Insert an edge between the nodes @a u and @a v. If the graph is weighted you can optionally
* set a weight for this edge. The default weight is 1.0.
* Note: Multi-edges are not supported and will NOT be handled consistently by the graph data
* structure.
* @param u Endpoint of edge.
* @param v Endpoint of edge.
* @param weight Optional edge weight.
*/
void addEdge(node u, node v, edgeweight ew = defaultEdgeWeight);
/**
* Removes the undirected edge {@a u,@a v}.
* @param u Endpoint of edge.
* @param v Endpoint of edge.
*/
void removeEdge(node u, node v);
/**
* Removes all self-loops in the graph.
*/
void removeSelfLoops();
/**
* Changes the edges {@a s1, @a t1} into {@a s1, @a t2} and the edge {@a s2, @a t2} into {@a s2, @a t1}.
*
* If there are edge weights or edge ids, they are preserved. Note that no check is performed if the swap is actually possible, i.e. does not generate duplicate edges.
*
* @param s1 The first source
* @param t1 The first target
* @param s2 The second source
* @param t2 The second target
*/
void swapEdge(NetworKit::node s1, NetworKit::node t1, NetworKit::node s2, NetworKit::node t2);
/**
* Checks if undirected edge {@a u,@a v} exists in the graph.
* @param u Endpoint of edge.
* @param v Endpoint of edge.
* @return <code>true</code> if the edge exists, <code>false</code> otherwise.
*/
bool hasEdge(node u, node v) const;
/**
* Returns a random edge. By default a random node u is chosen and then some random neighbor v. So the probability of choosing (u, v) highly
* depends on the degree of u.
* Setting uniformDistribution to true, will give you a real uniform distributed edge, but will be very slow. So only use uniformDistribution
* for single calls outside of any loops.
*/
std::pair<node, node> randomEdge(bool uniformDistribution = false) const;
/**
* Returns a vector with nr random edges. The edges are chosen uniform random.
*/
std::vector< std::pair<node, node> > randomEdges(count nr) const;
/* GLOBAL PROPERTIES */
/**
* Returns <code>true</code> if this graph supports edge weights other than 1.0.
* @return <code>true</code> if this graph supports edge weights other than 1.0.
*/
bool isWeighted() const { return weighted; }
/**
* Return @c true if this graph supports directed edges.
* @return @c true if this graph supports directed edges.
*/
bool isDirected() const { return directed; }
/**
* Return <code>true</code> if graph contains no nodes.
* @return <code>true</code> if graph contains no nodes.
*/
bool isEmpty() const { return n == 0; }
/**
* Return the number of nodes in the graph.
* @return The number of nodes.
*/
count numberOfNodes() const { return n; }
/**
* Return the number of edges in the graph.
* @return The number of edges.
*/
count numberOfEdges() const { return m; }
/**
* @return a pair (n, m) where n is the number of nodes and m is the number of edges
*/
std::pair<count, count> const size() { return {n, m}; };
/**
* @return the density of the graph
*/
double density() const {
count n = numberOfNodes();
count m = numberOfEdges();
count loops = numberOfSelfLoops();
m -= loops;
double d;
if (isDirected()) {
d = m / (double) (n * (n-1));
} else {
d = (2 * m) / (double) (n * (n-1));
}
return d;
}
/**
* Return the number of loops {v,v} in the graph.
* @return The number of loops.
* @note This involves calculation, so store result if needed multiple times.
*/
count numberOfSelfLoops() const;
/**
* Get an upper bound for the node ids in the graph.
* @return An upper bound for the node ids.
*/
index upperNodeIdBound() const { return z; }
/**
* Check for invalid graph states, such as multi-edges.
* @return False if the graph is in invalid state.
*/
bool checkConsistency() const;
/* DYNAMICS */
/**
* Trigger a time step - increments counter.
*/
void timeStep() { t++; }
/**
* Get time step counter.
* @return Time step counter.
*/
count time() { return t; }
/* COORDINATES */
/**
* DEPRECATED: Coordinates should be handled outside the Graph class
* like general node attributes.
*
* Sets the coordinate of @a v to @a value.
*
* @param v Node.
* @param value The coordinate of @a v.
*/
// TODO: remove method
// [[deprecated("Deprecated: Node coordinates should be stored externally like any other node attribute")]]
void setCoordinate(node v, Point<float> value) { coordinates.setCoordinate(v, value); }
/**
* DEPRECATED: Coordinates should be handled outside the Graph class
* like general node attributes.
*
* Get the coordinate of @a v.
* @param v Node.
* @return The coordinate of @a v.
*/
// TODO: remove method
// [[deprecated("Deprecated: Node coordinates should be stored externally like any other node attribute")]]
Point<float>& getCoordinate(node v) { return coordinates.getCoordinate(v); }
/**
* DEPRECATED: Coordinates should be handled outside the Graph class
* like general node attributes.
*
* Get minimum coordinate of all coordinates with respect to dimension @a dim.
* @param dim The dimension to search for minimum.
* @return The minimum coordinate in dimension @a dim.
*/
// TODO: remove method
// [[deprecated("Deprecated: Node coordinates should be stored externally like any other node attribute")]]
float minCoordinate(count dim) { return coordinates.minCoordinate(dim); }
/**
* DEPRECATED: Coordinates should be handled outside the Graph class
* like general node attributes.
*
* Get maximum coordinate of all coordinates with respect to dimension @a dim.
* @param dim The dimension to search for maximum.
* @return The maximum coordinate in dimension @a dim.
*/
// TODO: remove method
// [[deprecated("Deprecated: Node coordinates should be stored externally like any other node attribute")]]
float maxCoordinate(count dim) { return coordinates.maxCoordinate(dim); }
/**
* DEPRECATED: Coordinates should be handled outside the Graph class
* like general node attributes.
*
* Initializes the coordinates for the nodes in graph.
* @note This has to be called once and before you set coordinates. Call this method again if new nodes have
* been added.
*/
// TODO: remove method
// [[deprecated("Deprecated: Node coordinates should be stored externally like any other node attribute")]]
void initCoordinates() { coordinates.init(z); }
/* EDGE ATTRIBUTES */
/**
* Return edge weight of edge {@a u,@a v}. Returns 0 if edge does not exist.
* BEWARE: Running time is \Theta(deg(u))!
*
* @param u Endpoint of edge.
* @param v Endpoint of edge.
* @return Edge weight of edge {@a u,@a v} or 0 if edge does not exist.
*/
edgeweight weight(node u, node v) const;
/**
* Set the weight of an edge. If the edge does not exist,
* it will be inserted.
*
* @param[in] u endpoint of edge
* @param[in] v endpoint of edge
* @param[in] weight edge weight
*/
void setWeight(node u, node v, edgeweight ew);
/**
* Increase the weight of an edge. If the edge does not exist,
* it will be inserted.
*
* @param[in] u endpoint of edge
* @param[in] v endpoint of edge
* @param[in] weight edge weight
*/
void increaseWeight(node u, node v, edgeweight ew);
/* SUMS */
/**
* Returns the sum of all edge weights.
* @return The sum of all edge weights.
*/
edgeweight totalEdgeWeight() const;
/* Collections */
/**
* Get list of all nodes.
* @return List of all nodes.
*/
std::vector<node> nodes() const;
/**
* Get list of edges as node pairs.
* @return List of edges as node pairs.
*/
std::vector<std::pair<node, node> > edges() const;
/**
* Get list of neighbors of @a u.
*
* @param u Node.
* @return List of neighbors of @a u.
*/
std::vector<node> neighbors(node u) const;
/* Derivative Graphs */
/**
* Return an undirected version of this graph.
*
* @return undirected graph.
*/
Graph toUndirected() const;
/**
* Return the transpose of this graph. The graph must be directed.
*
* @return transpose of the graph.
*/
Graph transpose() const;
/* NODE ITERATORS */
/**
* Iterate over all nodes of the graph and call @a handle (lambda closure).
*
* @param handle Takes parameter <code>(node)</code>.
*/
template<typename L> void forNodes(L handle) const;
/**
* Iterate randomly over all nodes of the graph and call @a handle (lambda closure).
*
* @param handle Takes parameter <code>(node)</code>.
*/
template<typename L> void parallelForNodes(L handle) const;
/** Iterate over all nodes of the graph and call @a handle (lambda closure) as long as @a condition remains true.
* This allows for breaking from a node loop.
*
* @param condition Returning <code>false</code> breaks the loop.
* @param handle Takes parameter <code>(node)</code>.
*/
template<typename C, typename L> void forNodesWhile(C condition, L handle) const;
/**
* Iterate randomly over all nodes of the graph and call @a handle (lambda closure).
*
* @param handle Takes parameter <code>(node)</code>.
*/
template<typename L> void forNodesInRandomOrder(L handle) const;
/**
* Iterate in parallel over all nodes of the graph and call handler (lambda closure).
* Using schedule(guided) to remedy load-imbalances due to e.g. unequal degree distribution.
*
* @param handle Takes parameter <code>(node)</code>.
*/
template<typename L> void balancedParallelForNodes(L handle) const;
/**
* Iterate over all undirected pairs of nodes and call @a handle (lambda closure).
*
* @param handle Takes parameters <code>(node, node)</code>.
*/
template<typename L> void forNodePairs(L handle) const;
/**
* Iterate over all undirected pairs of nodes in parallel and call @a handle (lambda closure).
*
* @param handle Takes parameters <code>(node, node)</code>.
*/
template<typename L> void parallelForNodePairs(L handle) const;
/* EDGE ITERATORS */
/**
* Iterate over all edges of the const graph and call @a handle (lambda closure).
*
* @param handle Takes parameters <code>(node, node)</code>, <code>(node, node, edgweight)</code>, <code>(node, node, edgeid)</code> or <code>(node, node, edgeweight, edgeid)</code>.
*/
template<typename L> void forEdges(L handle) const;
/**
* Iterate in parallel over all edges of the const graph and call @a handle (lambda closure).
*
* @param handle Takes parameters <code>(node, node)</code> or <code>(node, node, edgweight)</code>, <code>(node, node, edgeid)</code> or <code>(node, node, edgeweight, edgeid)</code>.
*/
template<typename L> void parallelForEdges(L handle) const;
/* NEIGHBORHOOD ITERATORS */
/**
* Iterate over all neighbors of a node and call @a handle (lamdba closure).
*
* @param u Node.
* @param handle Takes parameter <code>(node)</code> or <code>(node, edgeweight)</code> which is a neighbor of @a u.
* @note For directed graphs only outgoing edges from @a u are considered.
* A node is its own neighbor if there is a self-loop.
*
*/
template<typename L> void forNeighborsOf(node u, L handle) const;
/**
* Iterate over all incident edges of a node and call @a handle (lamdba closure).
*
* @param u Node.
* @param handle Takes parameters <code>(node, node)</code>, <code>(node, node, edgeweight)</code>, <code>(node, node, edgeid)</code> or <code>(node, node, edgeweight, edgeid)</code> where the first node is @a u and the second is a neighbor of @a u.
* @note For undirected graphs all edges incident to @a u are also outgoing edges.
*/
template<typename L> void forEdgesOf(node u, L handle) const;
/**
* Iterate over all neighbors of a node and call handler (lamdba closure).
* For directed graphs only incoming edges from u are considered.
*/
template<typename L> void forInNeighborsOf(node u, L handle) const;
/**
* Iterate over all incoming edges of a node and call handler (lamdba closure).
* @note For undirected graphs all edges incident to u are also incoming edges.
*
* Handle takes parameters (u, v) or (u, v, w) where w is the edge weight.
*/
template<typename L> void forInEdgesOf(node u, L handle) const;
/* REDUCTION ITERATORS */
/**
* Iterate in parallel over all nodes and sum (reduce +) the values returned by the handler
*/
template<typename L> double parallelSumForNodes(L handle) const;
/**
* Iterate in parallel over all edges and sum (reduce +) the values returned by the handler
*/
template<typename L> double parallelSumForEdges(L handle) const;
/* GRAPH SEARCHES */
/**
* Iterate over nodes in breadth-first search order starting from r until connected component
* of r has been visited.
*
* @param r Node.
* @param handle Takes parameter <code>(node)</code>.
*/
template<typename L> void BFSfrom(node r, L handle) const;
template<typename L> void BFSfrom(const std::vector<node> &startNodes, L handle) const;
template<typename L> void BFSEdgesFrom(node r, L handle) const;
/**
* Iterate over nodes in depth-first search order starting from r until connected component
* of r has been visited.
*
* @param r Node.
* @param handle Takes parameter <code>(node)</code>.
*/
template<typename L> void DFSfrom(node r, L handle) const;
template<typename L> void DFSEdgesFrom(node r, L handle) const;
};
/* NODE ITERATORS */
template<typename L>
void Graph::forNodes(L handle) const {
for (node v = 0; v < z; ++v) {
if (exists[v]) {
handle(v);
}
}
}
template<typename L>
void Graph::parallelForNodes(L handle) const {
#pragma omp parallel for
for (node v = 0; v < z; ++v) {
if (exists[v]) {
handle(v);
}
}
}
template<typename C, typename L>
void Graph::forNodesWhile(C condition, L handle) const {
for (node v = 0; v < z; ++v) {
if (exists[v]) {
if (!condition()) {
break;
}
handle(v);
}
}
}
template<typename L>
void Graph::forNodesInRandomOrder(L handle) const {
std::vector<node> randVec = nodes();
std::shuffle(randVec.begin(), randVec.end(), Aux::Random::getURNG());
for (node v : randVec) {
handle(v);
}
}
template<typename L>
void Graph::balancedParallelForNodes(L handle) const {
#pragma omp parallel for schedule(guided) // TODO: define min block size (and test it!)
for (node v = 0; v < z; ++v) {
if (exists[v]) {
handle(v);
}
}
}
template<typename L>
void Graph::forNodePairs(L handle) const {
for (node u = 0; u < z; ++u) {
if (exists[u]) {
for (node v = u + 1; v < z; ++v) {
if (exists[v]) {
handle(u, v);
}
}
}
}
}
template<typename L>
void Graph::parallelForNodePairs(L handle) const {
#pragma omp parallel for schedule(guided)
for (node u = 0; u < z; ++u) {
if (exists[u]) {
for (node v = u + 1; v < z; ++v) {
if (exists[v]) {
handle(u, v);
}
}
}
}
}
/* EDGE ITERATORS */
/* HELPERS */
template<bool hasWeights> // implementation for weighted == true
inline edgeweight Graph::getOutEdgeWeight(node u, index i) const {
return outEdgeWeights[u][i];
}
template<> // implementation for weighted == false
inline edgeweight Graph::getOutEdgeWeight<false>(node, index) const {
return defaultEdgeWeight;
}
template<bool hasWeights> // implementation for weighted == true
inline edgeweight Graph::getInEdgeWeight(node u, index i) const {
return inEdgeWeights[u][i];
}
template<> // implementation for weighted == false
inline edgeweight Graph::getInEdgeWeight<false>(node, index) const {
return defaultEdgeWeight;
}
template<bool graphHasEdgeIds> // implementation for hasEdgeIds == true
inline edgeid Graph::getOutEdgeId(node u, index i) const {
return outEdgeIds[u][i];
}
template<> // implementation for hasEdgeIds == false
inline edgeid Graph::getOutEdgeId<false>(node, index) const {
return 0;
}
template<bool graphHasEdgeIds> // implementation for hasEdgeIds == true
inline edgeid Graph::getInEdgeId(node u, index i) const {
return inEdgeIds[u][i];
}
template<> // implementation for hasEdgeIds == false
inline edgeid Graph::getInEdgeId<false>(node, index) const {
return 0;
}
template<bool graphIsDirected> // implementation for graphIsDirected == true
inline bool Graph::useEdgeInIteration(node u, node v) const {
return v != none;
}
template<> // implementation for graphIsDirected == false
inline bool Graph::useEdgeInIteration<false>(node u, node v) const {
return u >= v;
}
template<bool graphIsDirected, bool hasWeights, bool graphHasEdgeIds, typename L>
inline void Graph::forOutEdgesOfImpl(node u, L handle) const {
for (index i = 0; i < outEdges[u].size(); ++i) {
node v = outEdges[u][i];
if (useEdgeInIteration<graphIsDirected>(u, v)) {
edgeLambda<L>(handle, u, v, getOutEdgeWeight<hasWeights>(u, i), getOutEdgeId<graphHasEdgeIds>(u, i));
}
}
}
template<bool graphIsDirected, bool hasWeights, bool graphHasEdgeIds, typename L>
inline void Graph::forInEdgesOfImpl(node u, L handle) const {
if (graphIsDirected) {
for (index i = 0; i < inEdges[u].size(); i++) {
node v = inEdges[u][i];
if (useEdgeInIteration<true>(u, v)) {
edgeLambda<L>(handle, u, v, getInEdgeWeight<hasWeights>(u, i), getInEdgeId<graphHasEdgeIds>(u, i));
}
}
} else {
for (index i = 0; i < outEdges[u].size(); ++i) {
node v = outEdges[u][i];
if (useEdgeInIteration<true>(u, v)) {
edgeLambda<L>(handle, u, v, getOutEdgeWeight<hasWeights>(u, i), getOutEdgeId<graphHasEdgeIds>(u, i));
}
}
}
}
template<bool graphIsDirected, bool hasWeights, bool graphHasEdgeIds, typename L>
inline void Graph::forEdgeImpl(L handle) const {
for (node u = 0; u < z; ++u) {
forOutEdgesOfImpl<graphIsDirected, hasWeights, graphHasEdgeIds, L>(u, handle);
}
}
template<bool graphIsDirected, bool hasWeights, bool graphHasEdgeIds, typename L>
inline void Graph::parallelForEdgesImpl(L handle) const {
#pragma omp parallel for schedule(guided)
for (node u = 0; u < z; ++u) {
forOutEdgesOfImpl<graphIsDirected, hasWeights, graphHasEdgeIds, L>(u, handle);
}
}
template<bool graphIsDirected, bool hasWeights, bool graphHasEdgeIds, typename L>
inline double Graph::parallelSumForEdgesImpl(L handle) const {
double sum = 0.0;
#pragma omp parallel for reduction(+:sum)
for (node u = 0; u < z; ++u) {
for (index i = 0; i < outEdges[u].size(); ++i) {
node v = outEdges[u][i];
// undirected, do not iterate over edges twice
// {u, v} instead of (u, v); if v == none, u > v is not fulfilled
if (useEdgeInIteration<graphIsDirected>(u, v)) {
sum += edgeLambda<L>(handle, u, v, getOutEdgeWeight<hasWeights>(u, i), getOutEdgeId<graphHasEdgeIds>(u, i));
}
}
}
return sum;
}
template<typename L>
void Graph::forEdges(L handle) const {
switch (weighted + 2 * directed + 4 * edgesIndexed) {
case 0: // unweighted, undirected, no edgeIds
forEdgeImpl<false, false, false, L>(handle);
break;
case 1: // weighted, undirected, no edgeIds
forEdgeImpl<false, true, false, L>(handle);
break;
case 2: // unweighted, directed, no edgeIds
forEdgeImpl<true, false, false, L>(handle);
break;
case 3: // weighted, directed, no edgeIds
forEdgeImpl<true, true, false, L>(handle);
break;
case 4: // unweighted, undirected, with edgeIds
forEdgeImpl<false, false, true, L>(handle);
break;
case 5: // weighted, undirected, with edgeIds
forEdgeImpl<false, true, true, L>(handle);
break;
case 6: // unweighted, directed, with edgeIds
forEdgeImpl<true, false, true, L>(handle);
break;
case 7: // weighted, directed, with edgeIds
forEdgeImpl<true, true, true, L>(handle);
break;
}
}
template<typename L>
void Graph::parallelForEdges(L handle) const {
switch (weighted + 2 * directed + 4 * edgesIndexed) {
case 0: // unweighted, undirected, no edgeIds
parallelForEdgesImpl<false, false, false, L>(handle);
break;
case 1: // weighted, undirected, no edgeIds
parallelForEdgesImpl<false, true, false, L>(handle);
break;
case 2: // unweighted, directed, no edgeIds
parallelForEdgesImpl<true, false, false, L>(handle);
break;
case 3: // weighted, directed, no edgeIds
parallelForEdgesImpl<true, true, false, L>(handle);
break;
case 4: // unweighted, undirected, with edgeIds
parallelForEdgesImpl<false, false, true, L>(handle);
break;
case 5: // weighted, undirected, with edgeIds
parallelForEdgesImpl<false, true, true, L>(handle);
break;
case 6: // unweighted, directed, with edgeIds
parallelForEdgesImpl<true, false, true, L>(handle);
break;
case 7: // weighted, directed, with edgeIds
parallelForEdgesImpl<true, true, true, L>(handle);
break;
}
}
/* NEIGHBORHOOD ITERATORS */
template<typename L>
void Graph::forNeighborsOf(node u, L handle) const {
forEdgesOf(u, handle);
}
template<typename L>
void Graph::forEdgesOf(node u, L handle) const {
switch (weighted + 2 * edgesIndexed) {
case 0: //not weighted, no edge ids
forOutEdgesOfImpl<true, false, false, L>(u, handle);
break;
case 1: //weighted, no edge ids
forOutEdgesOfImpl<true, true, false, L>(u, handle);
break;
case 2: //not weighted, with edge ids
forOutEdgesOfImpl<true, false, true, L>(u, handle);
break;
case 3: //weighted, with edge ids
forOutEdgesOfImpl<true, true, true, L>(u, handle);
break;
}
}
template<typename L>
void Graph::forInNeighborsOf(node u, L handle) const {
forInEdgesOf(u, handle);
}
template<typename L>
void Graph::forInEdgesOf(node u, L handle) const {
switch (weighted + 2 * directed + 4 * edgesIndexed) {
case 0: //unweighted, undirected, no edge ids
forInEdgesOfImpl<false, false, false, L>(u, handle);
break;
case 1: //weighted, undirected, no edge ids
forInEdgesOfImpl<false, true, false, L>(u, handle);
break;
case 2: //unweighted, directed, no edge ids
forInEdgesOfImpl<true, false, false, L>(u, handle);
break;
case 3: //weighted, directed, no edge ids
forInEdgesOfImpl<true, true, false, L>(u, handle);
break;
case 4: //unweighted, undirected, with edge ids
forInEdgesOfImpl<false, false, true, L>(u, handle);
break;
case 5: //weighted, undirected, with edge ids
forInEdgesOfImpl<false, true, true, L>(u, handle);
break;
case 6: //unweighted, directed, with edge ids
forInEdgesOfImpl<true, false, true, L>(u, handle);
break;
case 7: //weighted, directed, with edge ids
forInEdgesOfImpl<true, true, true, L>(u, handle);
break;
}
}
/* REDUCTION ITERATORS */
template<typename L>
double Graph::parallelSumForNodes(L handle) const {
double sum = 0.0;
#pragma omp parallel for reduction(+:sum)
for (node v = 0; v < z; ++v) {
if (exists[v]) {
sum += handle(v);
}
}
return sum;
}
template<typename L>
double Graph::parallelSumForEdges(L handle) const {
double sum = 0.0;
switch (weighted + 2 * directed + 4 * edgesIndexed) {
case 0: // unweighted, undirected, no edge ids
sum = parallelSumForEdgesImpl<false, false, false, L>(handle);
break;
case 1: // weighted, undirected, no edge ids
sum = parallelSumForEdgesImpl<false, true, false, L>(handle);
break;
case 2: // unweighted, directed, no edge ids
sum = parallelSumForEdgesImpl<true, false, false, L>(handle);
break;
case 3: // weighted, directed, no edge ids
sum = parallelSumForEdgesImpl<true, true, false, L>(handle);
break;
case 4: // unweighted, undirected, with edge ids
sum = parallelSumForEdgesImpl<false, false, true, L>(handle);
break;
case 5: // weighted, undirected, with edge ids
sum = parallelSumForEdgesImpl<false, true, true, L>(handle);
break;
case 6: // unweighted, directed, with edge ids
sum = parallelSumForEdgesImpl<true, false, true, L>(handle);
break;
case 7: // weighted, directed, with edge ids
sum = parallelSumForEdgesImpl<true, true, true, L>(handle);
break;
}
return sum;
}
/* GRAPH SEARCHES */
template<typename L>
void Graph::BFSfrom(node r, L handle) const {
std::vector<node> startNodes(1, r);
BFSfrom(startNodes, handle);
}
template<typename L>
void Graph::BFSfrom(const std::vector<node> &startNodes, L handle) const {
std::vector<bool> marked(z);
std::queue<node> q, qNext;
count dist = 0;
// enqueue start nodes
for (node u : startNodes) {
q.push(u);
marked[u] = true;
}
do {
node u = q.front();
q.pop();
// apply function
callBFSHandle(handle, u, dist);
forNeighborsOf(u, [&](node v) {
if (!marked[v]) {
qNext.push(v);
marked[v] = true;
}
});
if (q.empty() && !qNext.empty()) {
q.swap(qNext);
++dist;
}
} while (!q.empty());
}
template<typename L>
void Graph::BFSEdgesFrom(node r, L handle) const {
std::vector<bool> marked(z);
std::queue<node> q;
q.push(r); // enqueue root
marked[r] = true;
do {
node u = q.front();
q.pop();
// apply function
forNeighborsOf(u, [&](node, node v, edgeweight w, edgeid eid) {
if (!marked[v]) {
handle(u, v, w, eid);
q.push(v);
marked[v] = true;
}
});
} while (!q.empty());
}
template<typename L>
void Graph::DFSfrom(node r, L handle) const {
std::vector<bool> marked(z);
std::stack<node> s;
s.push(r); // enqueue root
marked[r] = true;
do {
node u = s.top();
s.pop();
// apply function
handle(u);
forNeighborsOf(u, [&](node v) {
if (!marked[v]) {
s.push(v);
marked[v] = true;
}
});
} while (!s.empty());
}
template<typename L>
void Graph::DFSEdgesFrom(node r, L handle) const {
std::vector<bool> marked(z);
std::stack<node> s;
s.push(r); // enqueue root
marked[r] = true;
do {
node u = s.top();
s.pop();
// apply function
forNeighborsOf(u, [&](node v) {
if (!marked[v]) {
handle(u, v);
s.push(v);
marked[v] = true;
}
});
} while (!s.empty());
}
} /* namespace NetworKit */
#endif /* GRAPH_H_ */
|
main.c | #include <stdio.h>
#include <stdlib.h>
#include <stddef.h>
#include <string.h>
#include <ctype.h>
#include <assert.h>
#include <math.h>
//#include "ocl-kernel-gen.h"
#include "parse-args.h"
#include "sgtype.h"
#include "sgbuf.h"
#include "sgtime.h"
#include "trace-util.h"
#include "sp_alloc.h"
#if defined( USE_OPENCL )
#include "../opencl/ocl-backend.h"
#endif
#if defined( USE_OPENMP )
#include <omp.h>
#include "openmp/omp-backend.h"
#include "openmp/openmp_kernels.h"
#endif
#if defined ( USE_CUDA )
#include <cuda.h>
#include "cuda/cuda-backend.h"
#endif
#if defined( USE_SERIAL )
#include "serial/serial-kernels.h"
#endif
#if defined( USE_PAPI )
#include <papi.h>
#include "papi_helper.h"
#endif
#define ALIGNMENT (4096)
#define xstr(s) str(s)
#define str(s) #s
//SGBench specific enums
enum sg_backend backend = INVALID_BACKEND;
//Strings defining program behavior
char platform_string[STRING_SIZE];
char device_string[STRING_SIZE];
char kernel_file[STRING_SIZE];
char kernel_name[STRING_SIZE];
int cuda_dev = -1;
int validate_flag = 0, quiet_flag = 0;
int aggregate_flag = 1;
int compress_flag = 0;
int papi_nevents = 0;
#ifdef USE_PAPI
char papi_event_names[PAPI_MAX_COUNTERS][STRING_SIZE];
int papi_event_codes[PAPI_MAX_COUNTERS];
long long papi_event_values[PAPI_MAX_COUNTERS];
extern const char* const papi_ctr_str[];
#endif
void print_papi_names() {
#ifdef USE_PAPI
printf("\nPAPI Counters: %d\n", papi_nevents);
if (papi_nevents > 0) {
printf("{ ");
for (int i = 0; i < papi_nevents; i++) {
printf("\"%s\":\"%s\"", papi_ctr_str[i], papi_event_names[i]);
if (i != papi_nevents-1) {
printf(",\n ");
}
}
printf(" }\n");
}
#endif
}
void print_system_info(){
printf("\nRunning Spatter version 0.0\n");
printf("Compiler: %s ver. %s\n", xstr(SPAT_C_NAME), xstr(SPAT_C_VER));
printf("Compiler Location: %s\n", xstr(SPAT_C));
//printf("Contributors: Patrick Lavin, Jeff Young, Aaron Vose\n");
printf("Backend: ");
if(backend == OPENMP) printf("OPENMP\n");
if(backend == OPENCL) printf("OPENCL\n");
if(backend == CUDA) printf("CUDA\n");
printf("Aggregate Results? %s\n", aggregate_flag ? "YES" : "NO");
#ifdef USE_CUDA
if (backend == CUDA) {
struct cudaDeviceProp prop;
cudaGetDeviceProperties(&prop, cuda_dev);
printf("Device: %s\n", prop.name);
}
#endif
print_papi_names();
printf("\n");
}
void print_header(){
//printf("kernel op time source_size target_size idx_len bytes_moved actual_bandwidth omp_threads vector_len block_dim shmem\n");
printf("%-7s %-12s %-12s", "config", "time(s)","bw(MB/s)");
#ifdef USE_PAPI
for (int i = 0; i < papi_nevents; i++) {
printf(" %-12s", papi_ctr_str[i]);
}
#endif
printf("\n");
}
int compare (const void * a, const void * b)
{
if (*(double*)a > *(double*)b) return 1;
else if (*(double*)a < *(double*)b) return -1;
else return 0;
}
/** Time reported in seconds, sizes reported in bytes, bandwidth reported in mib/s"
*/
double report_time(int ii, double time, struct run_config rc, int idx){
size_t bytes_moved = 0;
double actual_bandwidth = 0;
bytes_moved = sizeof(sgData_t) * rc.pattern_len * rc.generic_len;
actual_bandwidth = bytes_moved / time / 1000. / 1000.;
printf("%-7d %-12.4g %-12.6g", ii, time, actual_bandwidth);
#ifdef USE_PAPI
for (int i = 0; i < papi_nevents; i++) {
printf(" %-12lld", rc.papi_ctr[idx][i]);
}
#endif
printf("\n");
return actual_bandwidth;
}
void report_time2(struct run_config* rc, int nrc) {
double *bw = (double*)malloc(sizeof(double)*nrc);
assert(bw);
for (int k = 0; k < nrc; k++) {
if (aggregate_flag) {
double min_time_ms = rc[k].time_ms[0];
int min_idx = 0;
for (int i = 1; i < rc[k].nruns; i++) {
if (rc[k].time_ms[i] < min_time_ms) {
min_time_ms = rc[k].time_ms[i];
min_idx = i;
}
}
bw[k] = report_time(k, min_time_ms/1000., rc[k], min_idx);
}
else {
for (int i = 0; i < rc[k].nruns; i++) {
report_time(k, rc[k].time_ms[i]/1000., rc[k], i);
}
}
}
if (aggregate_flag) {
double min = bw[0];
double max = bw[0];
double hmean = 0;
double stddev = 0;
double first, med, third;
qsort(bw, nrc, sizeof(double), compare);
for (int i = 0; i < nrc; i++) {
if (bw[i] < min) {
min = bw[i];
}
if (bw[i] > max) {
max = bw[i];
}
}
first = bw[nrc/4];
med = bw[nrc/2];
third = bw[3*nrc/4];
// Harmonic mean
for (int i = 0; i < nrc; i++) {
hmean += 1./bw[i];
}
hmean = 1./hmean * nrc;
// Harmonic Standard Error
// Reference: The Standard Errors of the Geometric and
// Harmonic Means and Their Application to Index Numbers
// Author: Nilan Norris
// URL: https://www.jstor.org/stable/2235723
double E1_x = 0;
for (int i = 0; i < nrc; i++) {
E1_x += 1./bw[i];
}
E1_x = E1_x / nrc;
double theta_22 = pow(1./E1_x, 2);
double sig_1x = 0;
for (int i = 0; i < nrc; i++) {
sig_1x += pow(1./bw[i] - E1_x,2);
}
sig_1x = sqrt(sig_1x / nrc);
double hstderr = theta_22 * sig_1x / sqrt(nrc);
/*
for (int i = 0; i < nrc; i++) {
stddev += pow(bw[i] - hmean, 2);
}
stddev = sqrt((1./nrc)*stddev);
*/
printf("\n%-11s %-12s %-12s %-12s %-12s\n", "Min", "25%","Med","75%", "Max");
printf("%-12.6g %-12.6g %-12.6g %-12.6g %-12.6g\n", min, first, med, third, max);
printf("%-12s %-12s\n", "H.Mean", "H.StdErr");
printf("%-12.6g %-12.6g\n", hmean, hstderr);
/*
printf("%.3lf\t%.3lf\n", hmean, stddev);
*/
}
free(bw);
}
void print_data(double *buf, size_t len){
for (size_t i = 0; i < len; i++){
printf("%.0lf ", buf[i]);
}
printf("\n");
}
void print_sizet(size_t *buf, size_t len){
for (size_t i = 0; i < len; i++){
printf("%zu ", buf[i]);
}
printf("\n");
}
void emit_configs(struct run_config *rc, int nconfigs);
int main(int argc, char **argv)
{
// =======================================
// Declare Variables
// =======================================
// source and target are used for the gather and scatter operations.
// data is gathered from source and placed into target
sgDataBuf source;
sgDataBuf target;
// OpenCL Specific
size_t global_work_size = 1;
char *kernel_string;
#ifdef USE_OPENCL
cl_uint work_dim = 1;
#endif
// =======================================
// Parse Command Line Arguments
// =======================================
struct run_config *rc;
int nrc = 0;
parse_args(argc, argv, &nrc, &rc);
if (nrc <= 0) {
error("No run configurations parsed", ERROR);
}
// If indices span many pages, compress them so that there are no
// pages in the address space which are never accessed
// Pages are assumed to be 4KiB
if (compress_flag) {
for (int i = 0; i < nrc; i++) {
compress_indices(rc[i].pattern, rc[i].pattern_len);
}
}
struct run_config *rc2 = rc;
// Allocate space for timing and papi counter information
for (int i = 0; i < nrc; i++) {
rc2[i].time_ms = (double*)malloc(sizeof(double) * rc2[i].nruns);
#ifdef USE_PAPI
rc2[i].papi_ctr = (long long **)malloc(sizeof(long long *) * rc2[i].nruns);
for (int j = 0; j < rc2[i].nruns; j++){
rc2[i].papi_ctr[j] = (long long*)malloc(sizeof(long long) * papi_nevents);
}
#endif
}
// =======================================
// Initialize PAPI Library
// =======================================
#ifdef USE_PAPI
// Powering up a space shuttle probably has fewer checks than initlizing papi
int err = PAPI_library_init(PAPI_VER_CURRENT);
if (err !=PAPI_VER_CURRENT && err > 0) {
error ("PAPI library version mismatch", ERROR);
}
if (err < 0) papi_err(err);
err = PAPI_is_initialized();
if (err != PAPI_LOW_LEVEL_INITED) {
error ("PAPI was not initialized", ERROR);
}
// OK, now that papi is finally inizlized, we need to make our EventSet
// First, convert names to codes
for (int i = 0; i < papi_nevents; i++) {
papi_err(PAPI_event_name_to_code(papi_event_names[i],&papi_event_codes[i]));
}
int EventSet = PAPI_NULL;
papi_err(PAPI_create_eventset(&EventSet));
for (int i = 0; i < papi_nevents; i++) {
papi_err(PAPI_add_event(EventSet, papi_event_codes[i]));
}
#endif
// =======================================
// Initialize OpenCL Backend
// =======================================
/* Create a context and corresponding queue */
#ifdef USE_OPENCL
if (backend == OPENCL) {
initialize_dev_ocl(platform_string, device_string);
}
#endif
// =======================================
// Compute Buffer Sizes
// =======================================
if (rc2[0].kernel != GATHER && rc2[0].kernel != SCATTER) {
printf("Error: Unsupported kernel\n");
exit(1);
}
size_t max_source_size = 0;
size_t max_target_size = 0;
size_t max_pat_len = 0;
size_t max_ptrs = 0;
size_t max_nruns = 0;
for (int i = 0; i < nrc; i++) {
size_t max_pattern_val = rc2[i].pattern[0];
for (size_t j = 0; j < rc2[i].pattern_len; j++) {
if (rc2[i].pattern[j] > max_pattern_val) {
max_pattern_val = rc2[i].pattern[j];
}
}
//printf("count: %zu, delta: %zu, %zu\n", rc2[i].generic_len, rc2[i].delta, rc2[i].generic_len*rc2[i].delta);
size_t cur_source_size = ((max_pattern_val + 1) + (rc2[i].generic_len-1)*rc2[i].delta) * sizeof(sgData_t);
//printf("max_pattern_val: %zu, source_size %zu\n", max_pattern_val, cur_source_size);
if (cur_source_size > max_source_size) {
max_source_size = cur_source_size;
}
size_t cur_target_size = rc2[i].pattern_len * sizeof(sgData_t) * rc2[i].wrap;
if (cur_target_size > max_target_size) {
max_target_size = cur_target_size;
}
if (rc2[i].omp_threads > max_ptrs) {
max_ptrs = rc2[i].omp_threads;
}
if (rc2[i].pattern_len > max_pat_len) {
max_pat_len = rc2[i].pattern_len;
}
}
source.size = max_source_size;
source.len = source.size / sizeof(sgData_t);
target.size = max_target_size;
target.len = target.size / sizeof(sgData_t);
target.nptrs = max_ptrs;
// =======================================
// Create OpenCL Kernel
// =======================================
#ifdef USE_OPENCL
if (backend == OPENCL) {
//kernel_string = ocl_kernel_gen(index_len, vector_len, kernel);
kernel_string = read_file(kernel_file);
sgp = kernel_from_string(context, kernel_string, kernel_name, NULL);
if (kernel_string) {
free(kernel_string);
}
}
#endif
// =======================================
// Create Host Buffers, Fill With Data
// =======================================
source.host_ptr = (sgData_t*) sp_malloc(source.size, 1, ALIGN_CACHE);
// replicate the target space for every thread
target.host_ptrs = (sgData_t**) sp_malloc(sizeof(sgData_t*), target.nptrs, ALIGN_CACHE);
for (size_t i = 0; i < target.nptrs; i++) {
target.host_ptrs[i] = (sgData_t*) sp_malloc(target.size, 1, ALIGN_PAGE);
}
// Populate buffers cn host
#pragma omp parallel for
for (int i = 0; i < source.len; i++) {
source.host_ptr[i] = i % (source.len / 64);
}
random_data(source.host_ptr, source.len);
// =======================================
// Create Device Buffers, Transfer Data
// =======================================
#ifdef USE_OPENCL
if (backend == OPENCL) {
//TODO: Rewrite to not take index buffers
//create_dev_buffers_ocl(&source, &target, &si, &ti);
}
#endif
#ifdef USE_CUDA
sgIdx_t* pat_dev;
if (backend == CUDA) {
//TODO: Rewrite to not take index buffers
create_dev_buffers_cuda(&source);
cudaMalloc((void**)&pat_dev, sizeof(sgIdx_t) * max_pat_len);
cudaMemcpy(source.dev_ptr_cuda, source.host_ptr, source.size, cudaMemcpyHostToDevice);
cudaDeviceSynchronize();
}
#endif
// =======================================
// Execute Benchmark
// =======================================
// Print some header info
/*
if (print_header_flag)
{
print_system_info();
emit_configs(rc2, nrc);
print_header();
}
*/
if (quiet_flag < 1) {
print_system_info();
}
if (quiet_flag < 2) {
emit_configs(rc2, nrc);
}
if (quiet_flag < 3) {
print_header();
}
// Print config info
for (int k = 0; k < nrc; k++) {
// Time OpenCL Kernel
#ifdef USE_OPENCL
if (backend == OPENCL) {
}
#endif // USE_OPENCL
// Time CUDA Kernel
#ifdef USE_CUDA
int wpt = 1;
if (backend == CUDA) {
float time_ms = 2;
for (int i = -1; i < (int)rc2[k].nruns; i++) {
#define arr_len (1)
unsigned long global_work_size = rc2[k].generic_len / wpt * rc2[k].pattern_len;
unsigned long local_work_size = rc2[k].local_work_size;
unsigned long grid[arr_len] = {global_work_size/local_work_size};
unsigned long block[arr_len] = {local_work_size};
if (rc2[k].random_seed == 0) {
time_ms = cuda_block_wrapper(arr_len, grid, block, rc2[k].kernel, source.dev_ptr_cuda, pat_dev, rc2[k].pattern, rc2[k].pattern_len, rc2[k].delta, rc2[k].generic_len, rc2[k].wrap, wpt);
} else {
time_ms = cuda_block_random_wrapper(arr_len, grid, block, rc2[k].kernel, source.dev_ptr_cuda, pat_dev, rc2[k].pattern, rc2[k].pattern_len, rc2[k].delta, rc2[k].generic_len, rc2[k].wrap, wpt, rc2[k].random_seed);
}
if (i!= -1) rc2[k].time_ms[i] = time_ms;
}
}
#endif // USE_CUDA
// Time OpenMP Kernel
#ifdef USE_OPENMP
if (backend == OPENMP) {
omp_set_num_threads(rc2[k].omp_threads);
// Start at -1 to do a cache warm
for (int i = -1; i < (int)rc2[k].nruns; i++) {
if (i!=-1) sg_zero_time();
#ifdef USE_PAPI
if (i!=-1) profile_start(EventSet);
#endif
switch (rc2[k].kernel) {
case SG:
if (rc2[k].op == OP_COPY) {
//sg_omp (target.host_ptr, ti.host_ptr, source.host_ptr, si.host_ptr,index_len);
} else {
//sg_accum_omp (target.host_ptr, ti.host_ptr, source.host_ptr, si.host_ptr, index_len);
}
break;
case SCATTER:
if (rc2[k].random_seed >= 1) {
scatter_smallbuf_random(source.host_ptr, target.host_ptrs, rc2[k].pattern, rc2[k].pattern_len, rc2[k].delta, rc2[k].generic_len, rc2[k].wrap, rc2[k].random_seed);
}
else if (rc2[k].op == OP_COPY) {
scatter_smallbuf(source.host_ptr, target.host_ptrs, rc2[k].pattern, rc2[k].pattern_len, rc2[k].delta, rc2[k].generic_len, rc2[k].wrap);
// scatter_omp (target.host_ptr, ti.host_ptr, source.host_ptr, si.host_ptr, index_len);
} else {
// scatter_accum_omp (target.host_ptr, ti.host_ptr, source.host_ptr, si.host_ptr, index_len);
}
break;
case GATHER:
if (rc2[k].random_seed >= 1) {
gather_smallbuf_random(target.host_ptrs, source.host_ptr, rc2[k].pattern, rc2[k].pattern_len, rc2[k].delta, rc2[k].generic_len, rc2[k].wrap, rc2[k].random_seed);
}
else if (rc2[k].deltas_len <= 1) {
gather_smallbuf(target.host_ptrs, source.host_ptr, rc2[k].pattern, rc2[k].pattern_len, rc2[k].delta, rc2[k].generic_len, rc2[k].wrap);
} else {
gather_smallbuf_multidelta(target.host_ptrs, source.host_ptr, rc2[k].pattern, rc2[k].pattern_len, rc2[k].deltas_ps, rc2[k].generic_len, rc2[k].wrap, rc2[k].deltas_len);
}
break;
default:
printf("Error: Unable to determine kernel\n");
break;
}
#ifdef USE_PAPI
if (i!= -1) profile_stop(EventSet, rc2[k].papi_ctr[i]);
#endif
if (i!= -1) rc2[k].time_ms[i] = sg_get_time_ms();
}
//report_time2(rc2, nrc);
}
#endif // USE_OPENMP
// Time Serial Kernel
#ifdef USE_SERIAL
if (backend == SERIAL) {
for (int i = 0; i <= rc2[k].nruns; i++) {
if (i!=-1) sg_zero_time();
#ifdef USE_PAPI
if (i!=-1) profile_start(EventSet);
#endif
//TODO: Rewrite serial kernel
switch (rc2[k].kernel) {
case SCATTER:
scatter_smallbuf_serial(source.host_ptr, target.host_ptrs, rc2[k].pattern, rc2[k].pattern_len, rc2[k].delta, rc2[k].generic_len, rc2[k].wrap);
break;
case GATHER:
gather_smallbuf_serial(target.host_ptrs, source.host_ptr, rc2[k].pattern, rc2[k].pattern_len, rc2[k].delta, rc2[k].generic_len, rc2[k].wrap);
break;
default:
printf("Error: Unable to determine kernel\n");
break;
}
//double time_ms = sg_get_time_ms();
//if (i!=0) report_time(k, time_ms/1000., rc2[k], i);
#ifdef USE_PAPI
if (i!= -1) profile_stop(EventSet, rc2[k].papi_ctr[i]);
#endif
if (i!= -1) rc2[k].time_ms[i] = sg_get_time_ms();
}
}
#endif // USE_SERIAL
}
report_time2(rc2, nrc);
#ifdef USE_CUDA
cudaMemcpy(source.host_ptr, source.dev_ptr_cuda, source.size, cudaMemcpyDeviceToHost);
#endif
int good = 0;
int bad = 0;
for (int i = 0; i < source.len; i++) {
if (source.host_ptr[i] == 1337.) {
good++;
}else {
bad++;
}
}
//printf("\ngood: %d, bad: %d\n", good, bad);
// =======================================
// Validation
// =======================================
if(validate_flag) {
//TODO: Rewrite validataion
/*
#ifdef USE_OPENCL
if (backend == OPENCL) {
clEnqueueReadBuffer(queue, target.dev_ptr_opencl, 1, 0, target.size,
target.host_ptr, 0, NULL, &e);
clWaitForEvents(1, &e);
}
#endif
#ifdef USE_CUDA
if (backend == CUDA) {
cudaError_t cerr;
cerr = cudaMemcpy(target.host_ptr, target.dev_ptr_cuda, target.size, cudaMemcpyDeviceToHost);
if(cerr != cudaSuccess){
printf("transfer failed\n");
}
cudaDeviceSynchronize();
}
#endif
sgData_t *target_backup_host = (sgData_t*) sg_safe_cpu_alloc(target.size);
memcpy(target_backup_host, target.host_ptr, target.size);
// =======================================
VALIDATION
=======================================
//
if(validate_flag) {
#ifdef USE_OPENCL
if (backend == OPENCL) {
clEnqueueReadBuffer(queue, target.dev_ptr_opencl, 1, 0, target.size,
target.host_ptr, 0, NULL, &e);
clWaitForEvents(1, &e);
}
#endif
#ifdef USE_CUDA
if (backend == CUDA) {
cudaError_t cerr;
cerr = cudaMemcpy(target.host_ptr, target.dev_ptr_cuda, target.size, cudaMemcpyDeviceToHost);
if(cerr != cudaSuccess){
printf("transfer failed\n");
}
cudaDeviceSynchronize();
}
#endif
sgData_t *target_backup_host = (sgData_t*) sg_safe_cpu_alloc(target.size);
memcpy(target_backup_host, target.host_ptr, target.size);
// TODO: Issue - 13: Replace the hard-coded execution of each function with calls to the serial backend
switch (kernel) {
case SG:
for (size_t i = 0; i < index_len; i++){
target.host_ptr[ti.host_ptr[i]] = source.host_ptr[si.host_ptr[i]];
}
break;
case SCATTER:
for (size_t i = 0; i < index_len; i++){
target.host_ptr[ti.host_ptr[i]] = source.host_ptr[i];
}
break;
case GATHER:
for (size_t i = 0; i < index_len; i++){
target.host_ptr[i] = source.host_ptr[si.host_ptr[i]];
}
break;
}
int num_err = 0;
for (size_t i = 0; i < target.len; i++) {
if (target.host_ptr[i] != target_backup_host[i]) {
printf("%zu: host %lf, device %lf\n", i, target.host_ptr[i], target_backup_host[i]);
num_err++;
}
if (num_err > 99) {
printf("Too many errors. Exiting.\n");
exit(1);
}
}
*/
//}
}
// Free Memory
free(source.host_ptr);
for (size_t i = 0; i < target.nptrs; i++) {
free(target.host_ptrs[i]);
}
if (target.nptrs != 0) {
free(target.host_ptrs);
}
free(rc);
}
void emit_configs(struct run_config *rc, int nconfigs)
{
printf("Run Configurations\n");
printf("[ ");
for (int i = 0; i < nconfigs; i++) {
if (i != 0) {
printf(" ");
}
printf("{");
// Pattern Type
printf("\'name\':\'%s\', ", rc[i].name);
// Kernel
switch (rc[i].kernel) {
case GATHER:
printf("\'kernel\':\'Gather\', ");
break;
case SCATTER:
printf("\'kernel\':\'Scatter\', ");
break;
case SG:
printf("\'kernel\':\'GS\', ");
break;
}
// Pattern
printf("\'pattern\':[");
for (int j = 0; j < rc[i].pattern_len; j++) {
printf("%zu", rc[i].pattern[j]);
if (j != rc[i].pattern_len-1) {
printf(",");
}
}
printf("], ");
//Delta
//TODO: multidelta
if (rc[i].deltas_len == 1) {
printf("\'delta\':%zd", rc[i].delta);
} else {
printf("\'deltas\':[");
for (int j = 0; j < rc[i].deltas_len; j++) {
printf("%zu", rc[i].deltas[j]);
if (j != rc[i].deltas_len-1) {
printf(",");
}
}
printf("]");
}
printf(", ");
// Len
printf("\'length\':%zu, ", rc[i].generic_len);
if (rc[i].random_seed > 0) {
printf("\'seed\':%zu, ", rc[i].random_seed);
}
// Aggregate
if (aggregate_flag) {
printf("\'agg\':%zu, ", rc[i].nruns);
}
// Wrap
if (aggregate_flag) {
printf("\'wrap\':%zu, ", rc[i].wrap);
}
// OpenMP Threads
if (backend == OPENMP) {
printf("\'threads\':%zu", rc[i].omp_threads);
}
printf("}");
if (i != nconfigs-1) {
printf(",\n");
}
}
printf(" ]\n\n");
}
|
zlauum.c | /**
*
* @file
*
* PLASMA is a software package provided by:
* University of Tennessee, US,
* University of Manchester, UK.
*
* @precisions normal z -> s d c
*
**/
#include "plasma.h"
#include "plasma_async.h"
#include "plasma_context.h"
#include "plasma_descriptor.h"
#include "plasma_internal.h"
#include "plasma_tuning.h"
#include "plasma_types.h"
#include "plasma_workspace.h"
/***************************************************************************//**
*
* @ingroup plasma_lauum
*
* Computes the product U * U^H or L^H * L, where the triangular
* factor U or L is stored in the upper or lower triangular part of
* the array A.
*
* If uplo = 'U' or 'u' then the upper triangle of the result is stored,
* overwriting the factor U in A.
* If uplo = 'L' or 'l' then the lower triangle of the result is stored,
* overwriting the factor L in A.
*
*******************************************************************************
*
* @param[in] uplo
* - PlasmaUpper: Upper triangle of A is stored;
* - PlasmaLower: Lower triangle of A is stored.
*
* @param[in] n
* The order of the matrix A. n >= 0.
*
* @param[in,out] pA
* On entry, the triangular factor U or L.
* On exit, if UPLO = 'U', the upper triangle of A is
* overwritten with the upper triangle of the product U * U^H;
* if UPLO = 'L', the lower triangle of A is overwritten with
* the lower triangle of the product L^H * L.
* The diagonal is assumed to be real with no imaginary part.
*
* @param[in] lda
* The leading dimension of the array A. lda >= max(1,n).
*
*******************************************************************************
*
* @retval PlasmaSuccess successful exit.
* @retval < 0 if -i, the i-th argument had an illegal value.
*
*******************************************************************************
*
* @sa plasma_clauum
* @sa plasma_dlauum
* @sa plasma_slauum
*
******************************************************************************/
int plasma_zlauum(plasma_enum_t uplo, int n,
plasma_complex64_t *pA, int lda)
{
// Get PLASMA context.
plasma_context_t *plasma = plasma_context_self();
if (plasma == NULL) {
plasma_error("PLASMA not initialized");
return PlasmaErrorNotInitialized;
}
// Check input arguments.
if (uplo != PlasmaUpper && uplo != PlasmaLower) {
plasma_error("illegal value of uplo");
return -1;
}
if (n < 0) {
plasma_error("illegal value of n");
return -2;
}
if (lda < imax(1, n)) {
plasma_error("illegal value of lda");
return -4;
}
// quick return
if (imax(n, 0) == 0)
return PlasmaSuccess;
// Tune parameters.
if (plasma->tuning)
plasma_tune_lauum(plasma, PlasmaComplexDouble, n);
// Set tiling parameters.
int nb = plasma->nb;
// Create tile matrix.
plasma_desc_t A;
int retval;
retval = plasma_desc_general_create(PlasmaComplexDouble, nb, nb,
n, n, 0, 0, n, n, &A);
if (retval != PlasmaSuccess) {
plasma_error("plasma_desc_general_create() failed");
return retval;
}
// Initialize sequence.
plasma_sequence_t sequence;
retval = plasma_sequence_init(&sequence);
// Initialize request.
plasma_request_t request;
retval = plasma_request_init(&request);
// asynchronous block
#pragma omp parallel
#pragma omp master
{
// Translate to tile layout.
plasma_omp_zge2desc(pA, lda, A, &sequence, &request);
// Call the tile async function.
plasma_omp_zlauum(uplo, A, &sequence, &request);
// Translate back to LAPACK layout.
plasma_omp_zdesc2ge(A, pA, lda, &sequence, &request);
}
// implicit synchronization
// Free matrix A in tile layout.
plasma_desc_destroy(&A);
// Return status.
int status = sequence.status;
return status;
}
/***************************************************************************//**
*
* @ingroup plasma_lauum
* Computes the product U * U^H or L^H * L, where the
* triangular factor U or L is stored in the upper or lower triangular part of
* the array A.
*
*******************************************************************************
*
* @param[in] uplo
* - PlasmaUpper: Upper triangle of A is stored;
* - PlasmaLower: Lower triangle of A is stored.
*
*
* @param[in] A
* Descriptor of matrix A.
*
*
* @param[in] sequence
* Identifies the sequence of function calls that this call belongs to
* (for completion checks and exception handling purposes). Check
* the sequence->status for errors.
*
* @param[out] request
* Identifies this function call (for exception handling purposes).
*
* @retval void
* Errors are returned by setting sequence->status and
* request->status to error values. The sequence->status and
* request->status should never be set to PlasmaSuccess (the
* initial values) since another async call may be setting a
* failure value at the same time.
*
*******************************************************************************
*
* @sa plasma_zlauum
* @sa plasma_omp_zlauum
* @sa plasma_omp_dlauum
* @sa plasma_omp_clauum
* @sa plasma_omp_slauum
*
******************************************************************************/
void plasma_omp_zlauum(plasma_enum_t uplo, plasma_desc_t A,
plasma_sequence_t *sequence, plasma_request_t *request)
{
// Get PLASMA context.
plasma_context_t *plasma = plasma_context_self();
if (plasma == NULL) {
plasma_error("PLASMA not initialized");
plasma_request_fail(sequence, request, PlasmaErrorIllegalValue);
return;
}
// Check input arguments.
if ((uplo != PlasmaUpper) &&
(uplo != PlasmaLower)) {
plasma_error("illegal value of uplo");
plasma_request_fail(sequence, request, PlasmaErrorIllegalValue);
return;
}
if (plasma_desc_check(A) != PlasmaSuccess) {
plasma_error("invalid A");
plasma_request_fail(sequence, request, PlasmaErrorIllegalValue);
return;
}
if (sequence == NULL) {
plasma_error("NULL sequence");
plasma_request_fail(sequence, request, PlasmaErrorIllegalValue);
return;
}
if (request == NULL) {
plasma_error("NULL request");
plasma_request_fail(sequence, request, PlasmaErrorIllegalValue);
return;
}
// quick return
if (A.n == 0)
return;
// Call the parallel function.
plasma_pzlauum(uplo, A, sequence, request);
}
|
GB_unaryop__ainv_int64_fp32.c | //------------------------------------------------------------------------------
// GB_unaryop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2019, All Rights Reserved.
// http://suitesparse.com See GraphBLAS/Doc/License.txt for license.
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_iterator.h"
#include "GB_unaryop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB_unop__ainv_int64_fp32
// op(A') function: GB_tran__ainv_int64_fp32
// C type: int64_t
// A type: float
// cast: int64_t cij ; GB_CAST_SIGNED(cij,aij,64)
// unaryop: cij = -aij
#define GB_ATYPE \
float
#define GB_CTYPE \
int64_t
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
float aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = -x ;
// casting
#define GB_CASTING(z, x) \
int64_t z ; GB_CAST_SIGNED(z,x,64) ;
// cij = op (cast (aij))
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
GB_GETA (aij, Ax, pA) ; \
/* Cx [pC] = op (cast (aij)) */ \
GB_CASTING (x, aij) ; \
GB_OP (GB_CX (pC), x) ; \
}
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_AINV || GxB_NO_INT64 || GxB_NO_FP32)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_unop__ainv_int64_fp32
(
int64_t *restrict Cx,
const float *restrict Ax,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (int64_t p = 0 ; p < anz ; p++)
{
GB_CAST_OP (p, p) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_tran__ainv_int64_fp32
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t *restrict *Rowcounts,
GBI_single_iterator Iter,
const int64_t *restrict A_slice,
int naslice
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#define GB_PHASE_2_OF_2
#include "GB_unaryop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
cython_dL_update_omp.c | /* cython_dL_update_hmc.c
*
* Rutger van Haasteren, December 12 2015, Pasadena
*
*/
#include <stdio.h>
#include <stdlib.h>
#include <omp.h>
/* The aggregated algorithm for use in the Hamiltonian Sampler */
void dL_update_hmc2(const double *pdL, const double *pdLi, const double *pdp,
double *pdM, double *pdtj, const int N) {
/*
Formal derivative of rank-one update of Cholesky decomposition,
adjusted to perform all rank-one updates at once for the derivative
L'L'^{T} = LL^{T} + diag(B)
dL' = L Phi(L^{-1} dB L^{-T}) With Phi the utril function
B = B(x)
We need: dot(d_L_d_x, p), and trace(L^{-1} d_L_d_x)
Assuming we know dB/dx, we can get d_L_d_x from the chain-rule, using
d_L_d_B. The output of this function lets us do that:
dot(d_L_d_x, p) = dot(M, d_B_d_x)
trace(L^{-1} d_L_d_x) = dot(tj, d_B_d_x)
Re-parameterized: also works in the limit where a->0
:param pdL: Current updated Cholesky decomposition (L-prime)
:param pdLi: Inverse of Cholesky decomposition (L^{-1})
:param pdp: Vector we'll need to multiply dL with
:param pdM: The return matrix M (output)
:param pdtj: The return vector tj (output)
:param N: Size of all the objects
*/
double *pdLdot, *pdU, *pdLtrans;
double r, drdot, dcdot, ds, temp;
int i, j, k, index;
//const int maxthreads = omp_get_max_threads();
/* Allocate memory for dL transpose */
pdLtrans = malloc(N*N*sizeof(double));
/* Set the input matrices to zero (is quick), and transpose L */
for(i=0; i<N; ++i) {
for(j=0; j<N; ++j) {
pdM[j+N*i] = 0.0;
pdLtrans[j+N*i] = pdL[i+N*j];
} /* for j */
pdtj[i] = 0.0;
} /* for i */
#pragma omp parallel private(i, j, k, index, pdLdot, pdU, r, drdot, dcdot, ds, temp) shared(pdL, pdLtrans, pdLi, pdp, pdM, pdtj, N) default(none)
{
//const int nthreads = omp_get_num_threads();
//const int ithread = omp_get_thread_num();
double *pdMlocal, dtjlocal;
pdMlocal = calloc(N, sizeof(double));
//printf("In thread %i of %i\n", ithread, nthreads);
/* The index i represents the basis vector we are working with */
#pragma omp for nowait // schedule(dynamic)
for(i=0; i<N; ++i) {
/* Allocate memory inside the parallel region */
pdLdot = calloc(N, sizeof(double)); /* columns of Ldot are stored only */
pdU = calloc(N, sizeof(double)); /* basis vector we are updating */
/* Initialize all our quantities */
pdU[i] = 1.0;
temp = 0.0;
dtjlocal = 0.0;
/* The index k represents the row of Ldot we are working with */
for(k=0; k<N; ++k) {
r = pdL[k+N*k];
/* Initialize the vector quantities */
drdot = 0.5*pdU[k]*pdU[k] / r;
dcdot = drdot/pdL[k+N*k];
ds = pdU[k] / pdL[k+N*k];
/* Clear Ldot data */
if(k > 0) {
pdLdot[k-1] = 0.0;
} /* if k */
pdLdot[k] = drdot;
/* Update Ldot */
/* The index j represents the column of Ldot we are working with */
for(j=k+1; j<N; ++j) {
/* Using the transpose of pdL is faster */
//pdLdot[j] = ds*pdU[j] - dcdot * pdL[k+N*j];
pdLdot[j] = ds*pdU[j] - dcdot * pdLtrans[j+N*k];
} /* for j */
/* Update U */
for(j=k+1; j<N; ++j) {
/* Using the transpose of pdL is faster */
//pdU[j] = pdU[j] - ds*pdL[k+N*j];
pdU[j] = pdU[j] - ds*pdLtrans[j+N*k];
} /* for j */
/* Update M */
temp = 0;
for(j=k; j<N; ++j) {
temp += pdLdot[j]*pdp[j];
} /* for j */
//pdM[i+N*k] += temp;
pdMlocal[k] = temp;
/* Update tj */
temp = 0;
for(j=0; j<N; ++j) {
temp += pdLi[j+N*k]*pdLdot[j];
} /* for j */
//pdtj[i] += temp;
dtjlocal += temp;
} /* for k */
/* How do I update pdM and pdtj FAST????? */
/* Depends on the compiler flags!! */
#pragma omp critical
{
for(k=0; k<N; ++k) {
index = i+N*k;
/* Doing this is FAST */
/* pdM[index] = 1.337; */
/* But instead this, is SLOW */
pdM[index] = pdMlocal[k];
//pdM[index] = 1.337;
} /* for k */
/* Doing this is FAST */
/* pdtj[i] += 1.445; */
/* But instead this, is SLOW */
pdtj[i] += dtjlocal;
//pdtj[i] += 1.445;
}
/* Free memory of parallel regions */
free(pdLdot);
free(pdU);
} /* for i */
free(pdMlocal);
} /* pragma omp parallel */
free(pdLtrans);
return;
} /* dL_update_hmc */
|
libomp_interface.h | // This file does not contain any code; it just contains additional text and formatting
// for doxygen.
/* <copyright>
Copyright (c) 1997-2016 Intel Corporation. All Rights Reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions
are met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the distribution.
* Neither the name of Intel Corporation nor the names of its
contributors may be used to endorse or promote products derived
from this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
</copyright> */
/*! @mainpage Intel® OpenMP* Runtime Library Interface
@section sec_intro Introduction
This document describes the interface provided by the
Intel® OpenMP\other runtime library to the compiler.
Routines that are directly called as simple functions by user code are
not currently described here, since their definition is in the OpenMP
specification available from http://openmp.org
The aim here is to explain the interface from the compiler to the runtime.
The overall design is described, and each function in the interface
has its own description. (At least, that's the ambition, we may not be there yet).
@section sec_building Building the Runtime
For the impatient, we cover building the runtime as the first topic here.
A top-level Makefile is provided that attempts to derive a suitable
configuration for the most commonly used environments. To see the
default settings, type:
@code
% make info
@endcode
You can change the Makefile's behavior with the following options:
- <b>omp_root</b>: The path to the top-level directory containing the top-level
Makefile. By default, this will take on the value of the
current working directory.
- <b>omp_os</b>: Operating system. By default, the build will attempt to
detect this. Currently supports "linux", "macos", and
"windows".
- <b>arch</b>: Architecture. By default, the build will attempt to
detect this if not specified by the user. Currently
supported values are
- "32" for IA-32 architecture
- "32e" for Intel® 64 architecture
- "mic" for Intel® Many Integrated Core Architecture (
If "mic" is specified then "icc" will be used as the
compiler, and appropriate k1om binutils will be used. The
necessary packages must be installed on the build machine
for this to be possible, but an
Intel® Xeon Phi™
coprocessor is not required to build the library).
- <b>compiler</b>: Which compiler to use for the build. Defaults to "icc"
or "icl" depending on the value of omp_os. Also supports
"gcc" when omp_os is "linux" for gcc\other versions
4.6.2 and higher. For icc on OS X\other, OS X\other versions
greater than 10.6 are not supported currently. Also, icc
version 13.0 is not supported. The selected compiler should be
installed and in the user's path. The corresponding
Fortran compiler should also be in the path.
- <b>mode</b>: Library mode: default is "release". Also supports "debug".
To use any of the options above, simple add <option_name>=<value>. For
example, if you want to build with gcc instead of icc, type:
@code
% make compiler=gcc
@endcode
Underneath the hood of the top-level Makefile, the runtime is built by
a perl script that in turn drives a detailed runtime system make. The
script can be found at <tt>tools/build.pl</tt>, and will print
information about all its flags and controls if invoked as
@code
% tools/build.pl --help
@endcode
If invoked with no arguments, it will try to build a set of libraries
that are appropriate for the machine on which the build is happening.
There are many options for building out of tree, and configuring library
features that can also be used. Consult the <tt>--help</tt> output for details.
@section sec_supported Supported RTL Build Configurations
The architectures supported are IA-32 architecture, Intel® 64, and
Intel® Many Integrated Core Architecture. The build configurations
supported are shown in the table below.
<table border=1>
<tr><th> <th>icc/icl<th>gcc
<tr><td>Linux\other OS<td>Yes(1,5)<td>Yes(2,4)
<tr><td>OS X\other<td>Yes(1,3,4)<td>No
<tr><td>Windows\other OS<td>Yes(1,4)<td>No
</table>
(1) On IA-32 architecture and Intel® 64, icc/icl versions 12.x
are supported (12.1 is recommended).<br>
(2) gcc version 4.6.2 is supported.<br>
(3) For icc on OS X\other, OS X\other version 10.5.8 is supported.<br>
(4) Intel® Many Integrated Core Architecture not supported.<br>
(5) On Intel® Many Integrated Core Architecture, icc/icl versions 13.0 or later are required.
@section sec_frontend Front-end Compilers that work with this RTL
The following compilers are known to do compatible code generation for
this RTL: icc/icl, gcc. Code generation is discussed in more detail
later in this document.
@section sec_outlining Outlining
The runtime interface is based on the idea that the compiler
"outlines" sections of code that are to run in parallel into separate
functions that can then be invoked in multiple threads. For instance,
simple code like this
@code
void foo()
{
#pragma omp parallel
{
... do something ...
}
}
@endcode
is converted into something that looks conceptually like this (where
the names used are merely illustrative; the real library function
names will be used later after we've discussed some more issues...)
@code
static void outlinedFooBody()
{
... do something ...
}
void foo()
{
__OMP_runtime_fork(outlinedFooBody, (void*)0); // Not the real function name!
}
@endcode
@subsection SEC_SHAREDVARS Addressing shared variables
In real uses of the OpenMP\other API there are normally references
from the outlined code to shared variables that are in scope in the containing function.
Therefore the containing function must be able to address
these variables. The runtime supports two alternate ways of doing
this.
@subsubsection SEC_SEC_OT Current Technique
The technique currently supported by the runtime library is to receive
a separate pointer to each shared variable that can be accessed from
the outlined function. This is what is shown in the example below.
We hope soon to provide an alternative interface to support the
alternate implementation described in the next section. The
alternative implementation has performance advantages for small
parallel regions that have many shared variables.
@subsubsection SEC_SEC_PT Future Technique
The idea is to treat the outlined function as though it
were a lexically nested function, and pass it a single argument which
is the pointer to the parent's stack frame. Provided that the compiler
knows the layout of the parent frame when it is generating the outlined
function it can then access the up-level variables at appropriate
offsets from the parent frame. This is a classical compiler technique
from the 1960s to support languages like Algol (and its descendants)
that support lexically nested functions.
The main benefit of this technique is that there is no code required
at the fork point to marshal the arguments to the outlined function.
Since the runtime knows statically how many arguments must be passed to the
outlined function, it can easily copy them to the thread's stack
frame. Therefore the performance of the fork code is independent of
the number of shared variables that are accessed by the outlined
function.
If it is hard to determine the stack layout of the parent while generating the
outlined code, it is still possible to use this approach by collecting all of
the variables in the parent that are accessed from outlined functions into
a single `struct` which is placed on the stack, and whose address is passed
to the outlined functions. In this way the offsets of the shared variables
are known (since they are inside the struct) without needing to know
the complete layout of the parent stack-frame. From the point of view
of the runtime either of these techniques is equivalent, since in either
case it only has to pass a single argument to the outlined function to allow
it to access shared variables.
A scheme like this is how gcc\other generates outlined functions.
@section SEC_INTERFACES Library Interfaces
The library functions used for specific parts of the OpenMP\other language implementation
are documented in different modules.
- @ref BASIC_TYPES fundamental types used by the runtime in many places
- @ref DEPRECATED functions that are in the library but are no longer required
- @ref STARTUP_SHUTDOWN functions for initializing and finalizing the runtime
- @ref PARALLEL functions for implementing `omp parallel`
- @ref THREAD_STATES functions for supporting thread state inquiries
- @ref WORK_SHARING functions for work sharing constructs such as `omp for`, `omp sections`
- @ref THREADPRIVATE functions to support thread private data, copyin etc
- @ref SYNCHRONIZATION functions to support `omp critical`, `omp barrier`, `omp master`, reductions etc
- @ref ATOMIC_OPS functions to support atomic operations
- @ref STATS_GATHERING macros to support developer profiling of libiomp5
- Documentation on tasking has still to be written...
@section SEC_EXAMPLES Examples
@subsection SEC_WORKSHARING_EXAMPLE Work Sharing Example
This example shows the code generated for a parallel for with reduction and dynamic scheduling.
@code
extern float foo( void );
int main () {
int i;
float r = 0.0;
#pragma omp parallel for schedule(dynamic) reduction(+:r)
for ( i = 0; i < 10; i ++ ) {
r += foo();
}
}
@endcode
The transformed code looks like this.
@code
extern float foo( void );
int main () {
static int zero = 0;
auto int gtid;
auto float r = 0.0;
__kmpc_begin( & loc3, 0 );
// The gtid is not actually required in this example so could be omitted;
// We show its initialization here because it is often required for calls into
// the runtime and should be locally cached like this.
gtid = __kmpc_global thread num( & loc3 );
__kmpc_fork call( & loc7, 1, main_7_parallel_3, & r );
__kmpc_end( & loc0 );
return 0;
}
struct main_10_reduction_t_5 { float r_10_rpr; };
static kmp_critical_name lck = { 0 };
static ident_t loc10; // loc10.flags should contain KMP_IDENT_ATOMIC_REDUCE bit set
// if compiler has generated an atomic reduction.
void main_7_parallel_3( int *gtid, int *btid, float *r_7_shp ) {
auto int i_7_pr;
auto int lower, upper, liter, incr;
auto struct main_10_reduction_t_5 reduce;
reduce.r_10_rpr = 0.F;
liter = 0;
__kmpc_dispatch_init_4( & loc7,*gtid, 35, 0, 9, 1, 1 );
while ( __kmpc_dispatch_next_4( & loc7, *gtid, & liter, & lower, & upper, & incr ) ) {
for( i_7_pr = lower; upper >= i_7_pr; i_7_pr ++ )
reduce.r_10_rpr += foo();
}
switch( __kmpc_reduce_nowait( & loc10, *gtid, 1, 4, & reduce, main_10_reduce_5, & lck ) ) {
case 1:
*r_7_shp += reduce.r_10_rpr;
__kmpc_end_reduce_nowait( & loc10, *gtid, & lck );
break;
case 2:
__kmpc_atomic_float4_add( & loc10, *gtid, r_7_shp, reduce.r_10_rpr );
break;
default:;
}
}
void main_10_reduce_5( struct main_10_reduction_t_5 *reduce_lhs,
struct main_10_reduction_t_5 *reduce_rhs )
{
reduce_lhs->r_10_rpr += reduce_rhs->r_10_rpr;
}
@endcode
@defgroup BASIC_TYPES Basic Types
Types that are used throughout the runtime.
@defgroup DEPRECATED Deprecated Functions
Functions in this group are for backwards compatibility only, and
should not be used in new code.
@defgroup STARTUP_SHUTDOWN Startup and Shutdown
These functions are for library initialization and shutdown.
@defgroup PARALLEL Parallel (fork/join)
These functions are used for implementing <tt>\#pragma omp parallel</tt>.
@defgroup THREAD_STATES Thread Information
These functions return information about the currently executing thread.
@defgroup WORK_SHARING Work Sharing
These functions are used for implementing
<tt>\#pragma omp for</tt>, <tt>\#pragma omp sections</tt>, <tt>\#pragma omp single</tt> and
<tt>\#pragma omp master</tt> constructs.
When handling loops, there are different functions for each of the signed and unsigned 32 and 64 bit integer types
which have the name suffixes `_4`, `_4u`, `_8` and `_8u`. The semantics of each of the functions is the same,
so they are only described once.
Static loop scheduling is handled by @ref __kmpc_for_static_init_4 and friends. Only a single call is needed,
since the iterations to be executed by any give thread can be determined as soon as the loop parameters are known.
Dynamic scheduling is handled by the @ref __kmpc_dispatch_init_4 and @ref __kmpc_dispatch_next_4 functions.
The init function is called once in each thread outside the loop, while the next function is called each
time that the previous chunk of work has been exhausted.
@defgroup SYNCHRONIZATION Synchronization
These functions are used for implementing barriers.
@defgroup THREADPRIVATE Thread private data support
These functions support copyin/out and thread private data.
@defgroup STATS_GATHERING Statistics Gathering from OMPTB
These macros support profiling the libiomp5 library. Use --stats=on when building with build.pl to enable
and then use the KMP_* macros to profile (through counts or clock ticks) libiomp5 during execution of an OpenMP program.
@section sec_stats_env_vars Environment Variables
This section describes the environment variables relevant to stats-gathering in libiomp5
@code
KMP_STATS_FILE
@endcode
This environment variable is set to an output filename that will be appended *NOT OVERWRITTEN* if it exists. If this environment variable is undefined, the statistics will be output to stderr
@code
KMP_STATS_THREADS
@endcode
This environment variable indicates to print thread-specific statistics as well as aggregate statistics. Each thread's statistics will be shown as well as the collective sum of all threads. The values "true", "on", "1", "yes" will all indicate to print per thread statistics.
@defgroup TASKING Tasking support
These functions support tasking constructs.
@defgroup USER User visible functions
These functions can be called directly by the user, but are runtime library specific, rather than being OpenMP interfaces.
*/
|
tree_utils.h | //
// Created by Atharva on 6/26/20.
//
#ifndef TREE_TRAVERSALS_TREE_UTILS_H
#define TREE_TRAVERSALS_TREE_UTILS_H
#include <malloc.h>
#include <iostream>
#include <omp.h>
using namespace std;
struct node{
int data;
int children;
struct node** pointers;
};
typedef struct node Node;
Node* createNode(int Data, int children, int depth)
{
Node* node= new Node();
node->data = Data;
node->children = children;
if(depth<7)
{
Node* arr = (Node*) malloc(children*sizeof(Node));
node->pointers = &arr;
} else
{
node->pointers= nullptr;
}
return node;
}
void populateChildren(Node* node, int depth)
{
if (depth==7)
{
node = nullptr;
return;
}
#pragma omp parallel for
for (int i=0; i< node->children; i++)
{
int current_depth = depth;
node->pointers[i] = createNode(int(rand()), 3, current_depth+1);
populateChildren(node->pointers[i], current_depth+1);
}
}
Node* createTree()
{
Node* head = createNode(8, 5, 0);
populateChildren(head, 0);
return head;
}
void parallel_tree_search(Node* head, int query)
{
if (head== nullptr)
return;
if(head->data==query)
{
cout<<"Query found at"<<head;
cout<<'\n';
}
#pragma omp parallel for
for (int i=0;i<head->children;i++)
{
parallel_tree_search(head->pointers[i], query);
}
}
#endif //TREE_TRAVERSALS_TREE_UTILS_H |
expm_multiply_parallel.h |
#ifndef _EXPM_MULTIPLY_H
#define _EXPM_MULTIPLY_H
#include "complex_ops.h"
#include <stdio.h>
// #include "iterators.h"
// #include <algorithm>
// #include <numeric>
#include "openmp.h"
#if defined(_OPENMP)
#include "csrmv_merge.h"
#else
template<typename I, typename T1,typename T2,typename T3>
void csr_matvec(const bool overwrite_y,
const I n,
const I Ap[],
const I Aj[],
const T1 Ax[],
const T2 a,
const T3 x[],
I rco[],
T3 vco[],
T3 y[])
{
if(overwrite_y){
for(I k = 0; k<n; k++){
T3 sum = 0;
for(I jj = Ap[k]; jj < Ap[k+1]; jj++){
sum += Ax[jj] * x[Aj[jj]];
}
y[k] = a * sum;
}
}else{
for(I k = 0; k<n; k++){
T3 sum = 0;
for(I jj = Ap[k]; jj < Ap[k+1]; jj++){
sum += Ax[jj] * x[Aj[jj]];
}
y[k] += a * sum;
}
}
}
#endif
#include <algorithm>
#include <vector>
#include "math_functions.h"
// #include <valarray> // std::valarray, std::slice
template<typename I, typename T1,typename T2,typename T3>
void expm_multiply(const I n,
const I Ap[],
const I Aj[],
const T1 Ax[],
const int s,
const int m_star,
const T2 tol,
const T3 mu,
const T3 a,
T3 F[],
T3 work[]
)
{
const int num_threads = omp_get_max_threads();
std::vector<I> rco_vec(num_threads,0);
std::vector<T3> vco_vec(num_threads,0);
std::vector<T2> c1_threads_vec(num_threads,0);
std::vector<T2> c2_threads_vec(num_threads,0);
std::vector<T2> c3_threads_vec(num_threads,0);
T3 * B1 = work;
T3 * B2 = work + n;
I * rco = &rco_vec[0];
T3 * vco = &vco_vec[0];
T2 * c1_threads = &c1_threads_vec[0];
T2 * c2_threads = &c2_threads_vec[0];
T2 * c3_threads = &c3_threads_vec[0];
bool exit_loop=false;
#pragma omp parallel shared(exit_loop,c1_threads,c2_threads,c3_threads,F,B1,B2,rco,vco) firstprivate(num_threads)
{
const int tid = omp_get_thread_num();
const I items_per_thread = (n+num_threads-1)/num_threads;
const I begin = std::min(items_per_thread * tid, n);
const I end = std::min(begin+items_per_thread, n);
const T3 eta = math_functions::exp(a*(mu/T2(s)));
T2 c1_thread=0,c2_thread=0,c3_thread=0,c1=0,c2=0,c3=0;
c1_thread = 0;
for(I k=begin;k<end;k++){
T3 f = F[k];
B1[k] = f;
c1_thread = math_functions::compare_abs(c1_thread,f);
}
#pragma omp barrier
if(tid==0){
c1 = *std::max_element(c1_threads,c1_threads+num_threads);
}
for(int i=0;i<s;i++){
#pragma omp single
{
exit_loop = false;
}
for(int j=1;j<m_star+1 && !exit_loop;j++){
#if defined(_OPENMP)
csrmv_merge<I,T1,T3,T3>(true,n,Ap,Aj,Ax,a/T2(j*s),B1,rco,vco,B2); // implied barrier
#else
csr_matvec<I,T1,T3,T3>(true,n,Ap,Aj,Ax,a/T2(j*s),B1,rco,vco,B2);
#endif
c2_thread = 0; c3_thread = 0;
const T3 b = a * mu / T2(j*s);
for(I k=begin;k<end;k++){
T3 b2 = B2[k] - b * B1[k];
T3 f = F[k] += b2;
B1[k] = b2;
// used cached values to compute comparisons for infinite norm
c2_thread = math_functions::compare_abs(c2_thread,b2);
c3_thread = math_functions::compare_abs(c3_thread,f);
}
c2_threads[tid] = c2_thread;
c3_threads[tid] = c3_thread;
#pragma omp barrier
if(tid==0){
c2 = *std::max_element(c2_threads,c2_threads+num_threads);
c3 = *std::max_element(c3_threads,c3_threads+num_threads);
exit_loop = ((c1+c2)<=(tol*c3));
c1 = c2;
}
#pragma omp barrier
}
c1_thread = 0;
for(I k=begin;k<end;k++){
T3 f = F[k] *= eta;
B1[k] = f;
// used cached values to compute comparisons for infinite norm
c1_thread = math_functions::compare_abs(c1_thread,f);
}
c1_threads[tid] = c1_thread;
#pragma omp barrier
if(tid==0){
c1 = *std::max_element(c1_threads,c1_threads+num_threads);
}
}
}
}
#endif
|
conv_kernel_x86.c | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* License); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* AS IS BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
/*
* Copyright (c) 2020, OPEN AI LAB
* Author: quanwang@openailab.com
*/
#include <stdint.h>
#include <stdlib.h>
#include <math.h>
#include "conv_kernel_x86.h"
#include "wino_conv_kernel_x86.h"
#if __SSE2__
#include <emmintrin.h>
#endif
#include <sys/time.h>
#define max(a, b) ((a) > (b) ? (a) : (b))
#define min(a, b) ((a) < (b) ? (a) : (b))
static double get_current_time()
{
struct timeval tv;
gettimeofday(&tv, NULL);
return tv.tv_sec * 1000.0 + tv.tv_usec / 1000.0;
}
static int get_private_mem_size(struct ir_tensor* filter)
{
if (filter->data_type == TENGINE_DT_UINT8) // simulator uint8 inference with fp32
return filter->elem_num * filter->elem_size * 4;
else
return filter->elem_num * filter->elem_size; // caution
}
static void interleave(struct ir_tensor* filter, struct conv_priv_info* priv_info)
{
/* simply copy the data */
memcpy(priv_info->interleave_buffer, filter->data, filter->elem_num * filter->elem_size);
}
static void interleave_uint8(struct ir_tensor* filter, struct conv_priv_info* priv_info)
{
/* dequant uint8 weight to fp32 for simulator */
float* weight_fp32 = (float* )priv_info->interleave_buffer;
uint8_t* weight_uint8 = (uint8_t*)filter->data;
float scale = filter->scale;
int zero_point = filter->zero_point;
for (int i = 0; i < filter->elem_num; i++)
{
weight_fp32[i] = ((float)weight_uint8[i] - (float)zero_point) * scale;
}
}
void im2col_fp32(float* data_img, float* data_col, int inh, int inw, int inc, int outh, int outw, int ksize_h,
int ksize_w, int sh, int sw, int ph, int pw, int dh, int dw)
{
const int channels_col = ksize_h * ksize_w * inc;
for (int c = 0; c < channels_col; ++c)
{
const int kw = c % ksize_w;
int c_ = c / ksize_w;
const int kh = c_ % ksize_h;
c_ = c_ / ksize_h;
const int im_col = kw * dw - pw;
const int w_low = max(0, -im_col / sw + (-im_col % sw > 0));
const int w_high = min(outw, (inw - im_col) / sw + ((inw - im_col) % sw > 0));
for (int h = 0; h < outh; ++h)
{
const int im_row = kh * dh + h * sh - ph;
float* out = data_col + (c * outh + h) * outw;
const float* end = out + w_high;
if (im_row >= 0 && im_row < inh)
{
float* in = data_img + inw * (im_row + inh * c_) + im_col + (w_low - 1) * sw;
memset(out, 0, w_low * sizeof(float));
out += w_low;
while (out < end)
{
in += sw;
*(out++) = *in;
}
memset(out, 0, (outw - w_high) * sizeof(float));
}
else
{
memset(out, 0, outw * sizeof(float));
}
}
}
}
void im2col_uint8(uint8_t* data_img, float* data_col, struct ir_tensor* input_tensor, struct ir_tensor* output_tensor, struct conv_param* param)
{
int ksize_h = param->kernel_h;
int ksize_w = param->kernel_w;
int inc = param->input_channel / param->group;
int sh = param->stride_h;
int sw = param->stride_w;
int ph = param->pad_h0;
int pw = param->pad_w0;
int dh = param->dilation_h;
int dw = param->dilation_w;
int inh = input_tensor->dims[2];
int inw = input_tensor->dims[3];
int outh = output_tensor->dims[2];
int outw = output_tensor->dims[3];
float scale = input_tensor->scale;
int zero_point = input_tensor->zero_point;
const int channels_col = ksize_h * ksize_w * inc;
for (int c = 0; c < channels_col; ++c)
{
const int kw = c % ksize_w;
int c_ = c / ksize_w;
const int kh = c_ % ksize_h;
c_ = c_ / ksize_h;
const int im_col = kw * dw - pw;
const int w_low = max(0, -im_col / sw + (-im_col % sw > 0));
const int w_high = min(outw, (inw - im_col) / sw + ((inw - im_col) % sw > 0));
for (int h = 0; h < outh; ++h)
{
const int im_row = kh * dh + h * sh - ph;
float* out = data_col + (c * outh + h) * outw;
const float* end = out + w_high;
if (im_row >= 0 && im_row < inh)
{
uint8_t * in = data_img + inw * (im_row + inh * c_) + im_col + (w_low - 1) * sw;
memset(out, 0, w_low * sizeof(float));
out += w_low;
while (out < end)
{
in += sw;
float in_fp32 = ((float)in[0] - (float)zero_point) * scale;
out[0] = in_fp32;
out++;
}
memset(out, 0, (outw - w_high) * sizeof(float));
}
else
{
memset(out, 0, outw * sizeof(float));
}
}
}
}
static void im2col_ir(struct ir_tensor* input, struct ir_tensor* output, struct conv_priv_info* priv_info,
struct conv_param* param, int n, int group)
{
int input_chan = param->input_channel / param->group;
int image_size = input->dims[1] * input->dims[2] * input->dims[3];
int group_size = input_chan * input->dims[2] * input->dims[3];
void* input_base = input->data + (n * image_size + group * group_size) * input->elem_size;
void* im2col_buf = priv_info->im2col_buffer;
if (input->data_type == TENGINE_DT_UINT8)
im2col_uint8(input_base, im2col_buf, input, output, param);
else
im2col_fp32(input_base, im2col_buf, input->dims[2], input->dims[3], input_chan, output->dims[2], output->dims[3],
param->kernel_h, param->kernel_w, param->stride_h, param->stride_w, param->pad_h0, param->pad_w0, param->dilation_h, param->dilation_w);
}
#if __AVX__
void input_pack4(int K, int N, float* pB, float* pB_t, int num_thread)
{
int nn_size = N >> 3;
int remian_size_start = nn_size << 3;
// [ch00, ch10, ch20, ch30, ch01, ch11, ch21, ch31, ch02, ch12, ch22, ch32, ch03, ch13, ch23, ch33 ....]
#pragma omp parallel for num_threads(num_thread)
for (int ii = 0; ii < nn_size; ii++)
{
int i = ii * 8;
const float* img = pB + i;
float* tmp = pB_t + (i / 8) * 8 * K;
for (int j = 0; j < K; j++)
{
#if __AVX__
_mm256_storeu_ps(tmp, _mm256_loadu_ps(img));
#else
tmp[0] = img[0];
tmp[1] = img[1];
tmp[2] = img[2];
tmp[3] = img[3];
tmp[4] = img[4];
tmp[5] = img[5];
tmp[6] = img[6];
tmp[7] = img[7];
#endif // __SSE__
tmp += 8;
img += N;
}
}
// [ch00, ch01, ch02, ch03 ....]
#pragma omp parallel for num_threads(num_thread)
for (int i = remian_size_start; i < N; i++)
{
const float* img = pB + i;
float* tmp = pB_t + (i / 8 + i % 8) * 8 * K;
for (int j = 0; j < K; j++)
{
tmp[0] = img[0];
tmp += 1;
img += N;
}
}
}
static void sgemm(int M, int N, int K, float* pA_t, float* pB_t, float* pC, int num_thread)
{
int nn_outch = 0;
int remain_outch_start = 0;
nn_outch = M >> 3;
remain_outch_start = nn_outch << 3;
#pragma omp parallel for num_threads(num_thread)
for (int pp = 0; pp < nn_outch; pp++)
{
int i = pp * 8;
float* output0 = pC + ( i )*N;
float* output1 = pC + (i + 1) * N;
float* output2 = pC + (i + 2) * N;
float* output3 = pC + (i + 3) * N;
float* output4 = pC + (i + 4) * N;
float* output5 = pC + (i + 5) * N;
float* output6 = pC + (i + 6) * N;
float* output7 = pC + (i + 7) * N;
int j = 0;
for (; j + 7 < N; j += 8)
{
float* va = pA_t + (i / 8) * 8 * K;
float* vb = pB_t + (j / 8) * 8 * K;
#if __AVX__
__m256 _sum0 = _mm256_set1_ps(0.0);
__m256 _sum1 = _mm256_set1_ps(0.0);
__m256 _sum2 = _mm256_set1_ps(0.0);
__m256 _sum3 = _mm256_set1_ps(0.0);
__m256 _sum4 = _mm256_set1_ps(0.0);
__m256 _sum5 = _mm256_set1_ps(0.0);
__m256 _sum6 = _mm256_set1_ps(0.0);
__m256 _sum7 = _mm256_set1_ps(0.0);
int k = 0;
for (; k + 3 < K; k = k + 4)
{
// k0
__m256 _va0 = _mm256_broadcast_ss(va);
__m256 _va1 = _mm256_broadcast_ss(va + 1);
__m256 _va2 = _mm256_broadcast_ss(va + 2);
__m256 _va3 = _mm256_broadcast_ss(va + 3);
__m256 _vb0 = _mm256_loadu_ps(vb);
__m256 _vb1 = _mm256_loadu_ps(vb + 8);
__m256 _vb2 = _mm256_loadu_ps(vb + 16);
__m256 _vb3 = _mm256_loadu_ps(vb + 24);
_sum0 = _mm256_fmadd_ps(_vb0, _va0, _sum0); // sum0 = (a00-a07) * k00
_sum1 = _mm256_fmadd_ps(_vb0, _va1, _sum1); // sum1 = (a00-a07) * k10
_sum2 = _mm256_fmadd_ps(_vb0, _va2, _sum2); // sum2 = (a00-a07) * k20
_sum3 = _mm256_fmadd_ps(_vb0, _va3, _sum3); // sum3 = (a00-a07) * k30
_va0 = _mm256_broadcast_ss(va + 4);
_va1 = _mm256_broadcast_ss(va + 5);
_va2 = _mm256_broadcast_ss(va + 6);
_va3 = _mm256_broadcast_ss(va + 7);
_sum4 = _mm256_fmadd_ps(_vb0, _va0, _sum4); // sum4 = (a00-a07) * k40
_sum5 = _mm256_fmadd_ps(_vb0, _va1, _sum5); // sum5 = (a00-a07) * k50
_sum6 = _mm256_fmadd_ps(_vb0, _va2, _sum6); // sum6 = (a00-a07) * k60
_sum7 = _mm256_fmadd_ps(_vb0, _va3, _sum7); // sum7 = (a00-a07) * k70
va += 8;
// k1
_va0 = _mm256_broadcast_ss(va);
_va1 = _mm256_broadcast_ss(va + 1);
_va2 = _mm256_broadcast_ss(va + 2);
_va3 = _mm256_broadcast_ss(va + 3);
_sum0 = _mm256_fmadd_ps(_vb1, _va0, _sum0); // sum0 += (a10-a17) * k01
_sum1 = _mm256_fmadd_ps(_vb1, _va1, _sum1); // sum1 += (a10-a17) * k11
_sum2 = _mm256_fmadd_ps(_vb1, _va2, _sum2); // sum2 += (a10-a17) * k21
_sum3 = _mm256_fmadd_ps(_vb1, _va3, _sum3); // sum3 += (a10-a17) * k31
_va0 = _mm256_broadcast_ss(va + 4);
_va1 = _mm256_broadcast_ss(va + 5);
_va2 = _mm256_broadcast_ss(va + 6);
_va3 = _mm256_broadcast_ss(va + 7);
_sum4 = _mm256_fmadd_ps(_vb1, _va0, _sum4); // sum4 += (a10-a17) * k41
_sum5 = _mm256_fmadd_ps(_vb1, _va1, _sum5); // sum5 += (a10-a17) * k51
_sum6 = _mm256_fmadd_ps(_vb1, _va2, _sum6); // sum6 += (a10-a17) * k61
_sum7 = _mm256_fmadd_ps(_vb1, _va3, _sum7); // sum7 += (a10-a17) * k71
va += 8;
// k2
_va0 = _mm256_broadcast_ss(va);
_va1 = _mm256_broadcast_ss(va + 1);
_va2 = _mm256_broadcast_ss(va + 2);
_va3 = _mm256_broadcast_ss(va + 3);
_sum0 = _mm256_fmadd_ps(_vb2, _va0, _sum0); // sum0 += (a20-a27) * k02
_sum1 = _mm256_fmadd_ps(_vb2, _va1, _sum1); // sum1 += (a20-a27) * k12
_sum2 = _mm256_fmadd_ps(_vb2, _va2, _sum2); // sum2 += (a20-a27) * k22
_sum3 = _mm256_fmadd_ps(_vb2, _va3, _sum3); // sum3 += (a20-a27) * k32
_va0 = _mm256_broadcast_ss(va + 4);
_va1 = _mm256_broadcast_ss(va + 5);
_va2 = _mm256_broadcast_ss(va + 6);
_va3 = _mm256_broadcast_ss(va + 7);
_sum4 = _mm256_fmadd_ps(_vb2, _va0, _sum4); // sum4 += (a20-a27) * k42
_sum5 = _mm256_fmadd_ps(_vb2, _va1, _sum5); // sum5 += (a20-a27) * k52
_sum6 = _mm256_fmadd_ps(_vb2, _va2, _sum6); // sum6 += (a20-a27) * k62
_sum7 = _mm256_fmadd_ps(_vb2, _va3, _sum7); // sum7 += (a20-a27) * k72
va += 8;
// k3
_va0 = _mm256_broadcast_ss(va);
_va1 = _mm256_broadcast_ss(va + 1);
_va2 = _mm256_broadcast_ss(va + 2);
_va3 = _mm256_broadcast_ss(va + 3);
_sum0 = _mm256_fmadd_ps(_vb3, _va0, _sum0); // sum0 += (a30-a37) * k03
_sum1 = _mm256_fmadd_ps(_vb3, _va1, _sum1); // sum1 += (a30-a37) * k13
_sum2 = _mm256_fmadd_ps(_vb3, _va2, _sum2); // sum2 += (a30-a37) * k23
_sum3 = _mm256_fmadd_ps(_vb3, _va3, _sum3); // sum3 += (a30-a37) * k33
_va0 = _mm256_broadcast_ss(va + 4);
_va1 = _mm256_broadcast_ss(va + 5);
_va2 = _mm256_broadcast_ss(va + 6);
_va3 = _mm256_broadcast_ss(va + 7);
_sum4 = _mm256_fmadd_ps(_vb3, _va0, _sum4); // sum4 += (a30-a37) * k43
_sum5 = _mm256_fmadd_ps(_vb3, _va1, _sum5); // sum5 += (a30-a37) * k53
_sum6 = _mm256_fmadd_ps(_vb3, _va2, _sum6); // sum6 += (a30-a37) * k63
_sum7 = _mm256_fmadd_ps(_vb3, _va3, _sum7); // sum7 += (a30-a37) * k73
va += 8;
vb += 32;
}
for (; k < K; k++)
{
// k0
__m256 _va0 = _mm256_broadcast_ss(va);
__m256 _va1 = _mm256_broadcast_ss(va + 1);
__m256 _va2 = _mm256_broadcast_ss(va + 2);
__m256 _va3 = _mm256_broadcast_ss(va + 3);
__m256 _va4 = _mm256_broadcast_ss(va + 4);
__m256 _va5 = _mm256_broadcast_ss(va + 5);
__m256 _va6 = _mm256_broadcast_ss(va + 6);
__m256 _va7 = _mm256_broadcast_ss(va + 7);
__m256 _vb0 = _mm256_loadu_ps(vb);
_sum0 = _mm256_fmadd_ps(_vb0, _va0, _sum0); // sum0 = (a00-a07) * k00
_sum1 = _mm256_fmadd_ps(_vb0, _va1, _sum1); // sum1 = (a00-a07) * k10
_sum2 = _mm256_fmadd_ps(_vb0, _va2, _sum2); // sum2 = (a00-a07) * k20
_sum3 = _mm256_fmadd_ps(_vb0, _va3, _sum3); // sum3 = (a00-a07) * k30
_sum4 = _mm256_fmadd_ps(_vb0, _va4, _sum4); // sum4 = (a00-a07) * k40
_sum5 = _mm256_fmadd_ps(_vb0, _va5, _sum5); // sum5 = (a00-a07) * k50
_sum6 = _mm256_fmadd_ps(_vb0, _va6, _sum6); // sum6 = (a00-a07) * k60
_sum7 = _mm256_fmadd_ps(_vb0, _va7, _sum7); // sum7 = (a00-a07) * k70
va += 8;
vb += 8;
}
_mm256_storeu_ps(output0, _sum0);
_mm256_storeu_ps(output1, _sum1);
_mm256_storeu_ps(output2, _sum2);
_mm256_storeu_ps(output3, _sum3);
_mm256_storeu_ps(output4, _sum4);
_mm256_storeu_ps(output5, _sum5);
_mm256_storeu_ps(output6, _sum6);
_mm256_storeu_ps(output7, _sum7);
#else
float sum0[8] = {0};
float sum1[8] = {0};
float sum2[8] = {0};
float sum3[8] = {0};
float sum4[8] = {0};
float sum5[8] = {0};
float sum6[8] = {0};
float sum7[8] = {0};
for (int k = 0; k < K; k++)
{
for (int n = 0; n < 8; n++)
{
sum0[n] += va[0] * vb[n];
sum1[n] += va[1] * vb[n];
sum2[n] += va[2] * vb[n];
sum3[n] += va[3] * vb[n];
sum4[n] += va[4] * vb[n];
sum5[n] += va[5] * vb[n];
sum6[n] += va[6] * vb[n];
sum7[n] += va[7] * vb[n];
}
va += 8;
vb += 8;
}
for (int n = 0; n < 8; n++)
{
output0[n] = sum0[n];
output1[n] = sum1[n];
output2[n] = sum2[n];
output3[n] = sum3[n];
output4[n] = sum4[n];
output5[n] = sum5[n];
output6[n] = sum6[n];
output7[n] = sum7[n];
}
#endif // __AVX__
output0 += 8;
output1 += 8;
output2 += 8;
output3 += 8;
output4 += 8;
output5 += 8;
output6 += 8;
output7 += 8;
}
for (; j < N; j++)
{
float* va = pA_t + (i / 8) * 8 * K;
float* vb = pB_t + (j / 8 + j % 8) * 8 * K;
#if __AVX__
__m256 _sum0_7 = _mm256_set1_ps(0.0);
__m256 _sum0 = _mm256_set1_ps(0.0);
__m256 _sum1 = _mm256_set1_ps(0.0);
__m256 _sum2 = _mm256_set1_ps(0.0);
__m256 _sum3 = _mm256_set1_ps(0.0);
int k = 0;
for (; k + 3 < K; k = k + 4)
{
__m256 _vb0 = _mm256_broadcast_ss(vb);
__m256 _vb1 = _mm256_broadcast_ss(vb + 1);
__m256 _vb2 = _mm256_broadcast_ss(vb + 2);
__m256 _vb3 = _mm256_broadcast_ss(vb + 3);
__m256 _va0 = _mm256_loadu_ps(va);
__m256 _va1 = _mm256_loadu_ps(va + 8);
__m256 _va2 = _mm256_loadu_ps(va + 16);
__m256 _va3 = _mm256_loadu_ps(va + 24);
_sum0 = _mm256_fmadd_ps(_va0, _vb0, _sum0); // sum0 += (k00-k70) * a00
_sum1 = _mm256_fmadd_ps(_va1, _vb1, _sum1); // sum1 += (k01-k71) * a10
_sum2 = _mm256_fmadd_ps(_va2, _vb2, _sum2); // sum2 += (k02-k72) * a20
_sum3 = _mm256_fmadd_ps(_va3, _vb3, _sum3); // sum3 += (k03-k73) * a30
va += 32;
vb += 4;
}
_sum0 = _mm256_add_ps(_sum0, _sum1);
_sum2 = _mm256_add_ps(_sum2, _sum3);
_sum0_7 = _mm256_add_ps(_sum0_7, _sum0);
_sum0_7 = _mm256_add_ps(_sum0_7, _sum2);
for (; k < K; k++)
{
__m256 _vb0 = _mm256_broadcast_ss(vb);
__m256 _va = _mm256_loadu_ps(va);
_sum0_7 = _mm256_fmadd_ps(_va, _vb0, _sum0_7); // sum0 += (k00-k70) * a00
va += 8;
vb += 1;
}
float output_sum0_7[8] = {0.f};
_mm256_storeu_ps(output_sum0_7, _sum0_7);
output0[0] = output_sum0_7[0];
output1[0] = output_sum0_7[1];
output2[0] = output_sum0_7[2];
output3[0] = output_sum0_7[3];
output4[0] = output_sum0_7[4];
output5[0] = output_sum0_7[5];
output6[0] = output_sum0_7[6];
output7[0] = output_sum0_7[7];
#else
float sum0 = 0;
float sum1 = 0;
float sum2 = 0;
float sum3 = 0;
float sum4 = 0;
float sum5 = 0;
float sum6 = 0;
float sum7 = 0;
for (int k = 0; k < K; k++)
{
sum0 += va[0] * vb[0];
sum1 += va[1] * vb[0];
sum2 += va[2] * vb[0];
sum3 += va[3] * vb[0];
sum4 += va[4] * vb[0];
sum5 += va[5] * vb[0];
sum6 += va[6] * vb[0];
sum7 += va[7] * vb[0];
va += 8;
vb += 1;
}
output0[0] = sum0;
output1[0] = sum1;
output2[0] = sum2;
output3[0] = sum3;
output4[0] = sum4;
output5[0] = sum5;
output6[0] = sum6;
output7[0] = sum7;
#endif // __AVX__
output0++;
output1++;
output2++;
output3++;
output4++;
output5++;
output6++;
output7++;
}
}
nn_outch = (M - remain_outch_start) >> 2;
for (int pp = 0; pp < nn_outch; pp++)
{
int i = remain_outch_start + pp * 4;
float* output0 = pC + ( i )*N;
float* output1 = pC + (i + 1) * N;
float* output2 = pC + (i + 2) * N;
float* output3 = pC + (i + 3) * N;
int j = 0;
for (; j + 7 < N; j += 8)
{
float* va = pA_t + (i / 8 + (i % 8) / 4) * 8 * K;
float* vb = pB_t + (j / 8) * 8 * K;
#if __AVX__
__m256 _sum0 = _mm256_set1_ps(0.0);
__m256 _sum1 = _mm256_set1_ps(0.0);
__m256 _sum2 = _mm256_set1_ps(0.0);
__m256 _sum3 = _mm256_set1_ps(0.0);
int k = 0;
for (; k + 3 < K; k = k + 4)
{
// k0
__m256 _va0 = _mm256_broadcast_ss(va);
__m256 _va1 = _mm256_broadcast_ss(va + 1);
__m256 _va2 = _mm256_broadcast_ss(va + 2);
__m256 _va3 = _mm256_broadcast_ss(va + 3);
__m256 _vb0 = _mm256_loadu_ps(vb);
__m256 _vb1 = _mm256_loadu_ps(vb + 8);
__m256 _vb2 = _mm256_loadu_ps(vb + 16);
__m256 _vb3 = _mm256_loadu_ps(vb + 24);
_sum0 = _mm256_fmadd_ps(_vb0, _va0, _sum0); // sum0 = (a00-a07) * k00
_sum1 = _mm256_fmadd_ps(_vb0, _va1, _sum1); // sum1 = (a00-a07) * k10
_sum2 = _mm256_fmadd_ps(_vb0, _va2, _sum2); // sum2 = (a00-a07) * k20
_sum3 = _mm256_fmadd_ps(_vb0, _va3, _sum3); // sum3 = (a00-a07) * k30
va += 4;
// k1
_va0 = _mm256_broadcast_ss(va);
_va1 = _mm256_broadcast_ss(va + 1);
_va2 = _mm256_broadcast_ss(va + 2);
_va3 = _mm256_broadcast_ss(va + 3);
_sum0 = _mm256_fmadd_ps(_vb1, _va0, _sum0); // sum0 += (a10-a17) * k01
_sum1 = _mm256_fmadd_ps(_vb1, _va1, _sum1); // sum1 += (a10-a17) * k11
_sum2 = _mm256_fmadd_ps(_vb1, _va2, _sum2); // sum2 += (a10-a17) * k21
_sum3 = _mm256_fmadd_ps(_vb1, _va3, _sum3); // sum3 += (a10-a17) * k31
va += 4;
// k2
_va0 = _mm256_broadcast_ss(va);
_va1 = _mm256_broadcast_ss(va + 1);
_va2 = _mm256_broadcast_ss(va + 2);
_va3 = _mm256_broadcast_ss(va + 3);
_sum0 = _mm256_fmadd_ps(_vb2, _va0, _sum0); // sum0 += (a20-a27) * k02
_sum1 = _mm256_fmadd_ps(_vb2, _va1, _sum1); // sum1 += (a20-a27) * k12
_sum2 = _mm256_fmadd_ps(_vb2, _va2, _sum2); // sum2 += (a20-a27) * k22
_sum3 = _mm256_fmadd_ps(_vb2, _va3, _sum3); // sum3 += (a20-a27) * k32
va += 4;
// k3
_va0 = _mm256_broadcast_ss(va);
_va1 = _mm256_broadcast_ss(va + 1);
_va2 = _mm256_broadcast_ss(va + 2);
_va3 = _mm256_broadcast_ss(va + 3);
_sum0 = _mm256_fmadd_ps(_vb3, _va0, _sum0); // sum0 += (a30-a37) * k03
_sum1 = _mm256_fmadd_ps(_vb3, _va1, _sum1); // sum1 += (a30-a37) * k13
_sum2 = _mm256_fmadd_ps(_vb3, _va2, _sum2); // sum2 += (a30-a37) * k23
_sum3 = _mm256_fmadd_ps(_vb3, _va3, _sum3); // sum3 += (a30-a37) * k33
va += 4;
vb += 32;
}
for (; k < K; k++)
{
// k0
__m256 _va0 = _mm256_broadcast_ss(va);
__m256 _va1 = _mm256_broadcast_ss(va + 1);
__m256 _va2 = _mm256_broadcast_ss(va + 2);
__m256 _va3 = _mm256_broadcast_ss(va + 3);
__m256 _vb0 = _mm256_loadu_ps(vb);
_sum0 = _mm256_fmadd_ps(_vb0, _va0, _sum0); // sum0 = (a00-a07) * k00
_sum1 = _mm256_fmadd_ps(_vb0, _va1, _sum1); // sum1 = (a00-a07) * k10
_sum2 = _mm256_fmadd_ps(_vb0, _va2, _sum2); // sum2 = (a00-a07) * k20
_sum3 = _mm256_fmadd_ps(_vb0, _va3, _sum3); // sum3 = (a00-a07) * k30
va += 4;
vb += 8;
}
_mm256_storeu_ps(output0, _sum0);
_mm256_storeu_ps(output1, _sum1);
_mm256_storeu_ps(output2, _sum2);
_mm256_storeu_ps(output3, _sum3);
#else
float sum0[8] = {0};
float sum1[8] = {0};
float sum2[8] = {0};
float sum3[8] = {0};
for (int k = 0; k < K; k++)
{
for (int n = 0; n < 8; n++)
{
sum0[n] += va[0] * vb[n];
sum1[n] += va[1] * vb[n];
sum2[n] += va[2] * vb[n];
sum3[n] += va[3] * vb[n];
}
va += 4;
vb += 8;
}
for (int n = 0; n < 8; n++)
{
output0[n] = sum0[n];
output1[n] = sum1[n];
output2[n] = sum2[n];
output3[n] = sum3[n];
}
#endif // __AVX__
output0 += 8;
output1 += 8;
output2 += 8;
output3 += 8;
}
for (; j < N; j++)
{
float* va = pA_t + (i / 8 + (i % 8) / 4) * 8 * K;
float* vb = pB_t + (j / 8 + j % 8) * 8 * K;
#if __AVX__
__m128 _sum0_3 = _mm_set1_ps(0.0);
__m128 _sum0 = _mm_set1_ps(0.0);
__m128 _sum1 = _mm_set1_ps(0.0);
__m128 _sum2 = _mm_set1_ps(0.0);
__m128 _sum3 = _mm_set1_ps(0.0);
int k = 0;
for (; k + 3 < K; k = k + 4)
{
__m128 _vb0 = _mm_set1_ps(vb[0]);
__m128 _vb1 = _mm_set1_ps(vb[1]);
__m128 _vb2 = _mm_set1_ps(vb[2]);
__m128 _vb3 = _mm_set1_ps(vb[3]);
__m128 _va0 = _mm_loadu_ps(va);
__m128 _va1 = _mm_loadu_ps(va + 4);
__m128 _va2 = _mm_loadu_ps(va + 8);
__m128 _va3 = _mm_loadu_ps(va + 12);
_sum0 = _mm_fmadd_ps(_va0, _vb0, _sum0); // sum0 += (k00-k30) * a00
_sum1 = _mm_fmadd_ps(_va1, _vb1, _sum1); // sum1 += (k01-k31) * a10
_sum2 = _mm_fmadd_ps(_va2, _vb2, _sum2); // sum2 += (k02-k32) * a20
_sum3 = _mm_fmadd_ps(_va3, _vb3, _sum3); // sum3 += (k03-k33) * a30
va += 16;
vb += 4;
}
_sum0 = _mm_add_ps(_sum0, _sum1);
_sum2 = _mm_add_ps(_sum2, _sum3);
_sum0_3 = _mm_add_ps(_sum0_3, _sum0);
_sum0_3 = _mm_add_ps(_sum0_3, _sum2);
for (; k < K; k++)
{
__m128 _vb0 = _mm_set1_ps(vb[0]);
__m128 _va = _mm_loadu_ps(va);
_sum0_3 = _mm_fmadd_ps(_va, _vb0, _sum0_3); // sum0 += (k00-k30) * a00
va += 4;
vb += 1;
}
float output_sum0_3[4] = {0.f};
_mm_storeu_ps(output_sum0_3, _sum0_3);
output0[0] = output_sum0_3[0];
output1[0] = output_sum0_3[1];
output2[0] = output_sum0_3[2];
output3[0] = output_sum0_3[3];
#else
float sum0 = 0;
float sum1 = 0;
float sum2 = 0;
float sum3 = 0;
for (int k = 0; k < K; k++)
{
sum0 += va[0] * vb[0];
sum1 += va[1] * vb[0];
sum2 += va[2] * vb[0];
sum3 += va[3] * vb[0];
va += 4;
vb += 1;
}
output0[0] = sum0;
output1[0] = sum1;
output2[0] = sum2;
output3[0] = sum3;
#endif // __AVX__
output0++;
output1++;
output2++;
output3++;
}
}
remain_outch_start += nn_outch << 2;
// output ch0
for (int i = remain_outch_start; i < M; i++)
{
float* output = pC + i * N;
int j = 0;
for (; j + 7 < N; j += 8)
{
float* va = pA_t + (i / 8 + (i % 8) / 4 + i % 4) * 8 * K;
float* vb = pB_t + (j / 8) * 8 * K;
#if __AVX__
__m256 _sum0 = _mm256_set1_ps(0.0);
int k = 0;
for (; k + 3 < K; k = k + 4)
{
// k0
__m256 _va0 = _mm256_broadcast_ss(va);
__m256 _va1 = _mm256_broadcast_ss(va + 1);
__m256 _va2 = _mm256_broadcast_ss(va + 2);
__m256 _va3 = _mm256_broadcast_ss(va + 3);
__m256 _vb0 = _mm256_loadu_ps(vb);
__m256 _vb1 = _mm256_loadu_ps(vb + 8);
__m256 _vb2 = _mm256_loadu_ps(vb + 16);
__m256 _vb3 = _mm256_loadu_ps(vb + 24);
_sum0 = _mm256_fmadd_ps(_vb0, _va0, _sum0); // sum0 = (a00-a07) * k00
_sum0 = _mm256_fmadd_ps(_vb1, _va1, _sum0); // sum0 += (a10-a17) * k01
_sum0 = _mm256_fmadd_ps(_vb2, _va2, _sum0); // sum0 += (a20-a27) * k02
_sum0 = _mm256_fmadd_ps(_vb3, _va3, _sum0); // sum0 += (a30-a37) * k03
va += 4;
vb += 32;
}
for (; k < K; k++)
{
// k0
__m256 _va0 = _mm256_broadcast_ss(va);
__m256 _vb0 = _mm256_loadu_ps(vb);
_sum0 = _mm256_fmadd_ps(_vb0, _va0, _sum0); // sum0 = (a00-a07) * k00
va += 1;
vb += 8;
}
_mm256_storeu_ps(output, _sum0);
#else
float sum[8] = {0};
for (int k = 0; k < K; k++)
{
for (int n = 0; n < 8; n++)
{
sum[n] += va[0] * vb[n];
}
va += 1;
vb += 8;
}
for (int n = 0; n < 8; n++)
{
output[n] = sum[n];
}
#endif // __AVX__
output += 8;
}
for (; j < N; j++)
{
float* va = pA_t + (i / 8 + (i % 8) / 4 + i % 4) * 8 * K;
float* vb = pB_t + (j / 8 + j % 8) * 8 * K;
int k = 0;
#if __AVX__
__m128 _sum0 = _mm_set1_ps(0.f);
for (; k + 3 < K; k += 4)
{
__m128 _p0 = _mm_loadu_ps(vb);
__m128 _k0 = _mm_loadu_ps(va);
_sum0 = _mm_add_ps(_sum0, _mm_mul_ps(_p0, _k0));
va += 4;
vb += 4;
}
float sum0 = _sum0[0] + _sum0[1] + _sum0[2] + _sum0[3];
#else
float sum0 = 0.f;
#endif // __AVX__
for (; k < K; k++)
{
sum0 += va[0] * vb[0];
va += 1;
vb += 1;
}
output[0] = sum0;
output++;
}
}
}
#else // SSE2
void input_pack4(int K, int N, float* pB, float* pB_t, int num_thread)
{
int nn_size = N >> 2;
int remian_size_start = nn_size << 2;
// [ch00, ch10, ch20, ch30, ch01, ch11, ch21, ch31, ch02, ch12, ch22, ch32, ch03, ch13, ch23, ch33 ....]
#pragma omp parallel for num_threads(num_thread)
for (int ii = 0; ii < nn_size; ii++)
{
int i = ii * 4;
const float* img = pB + i;
float* tmp = pB_t + (i / 4) * 4 * K;
for (int j = 0; j < K; j++)
{
#if __SSE__
_mm_storeu_ps(tmp, _mm_loadu_ps(img));
#else
tmp[0] = img[0];
tmp[1] = img[1];
tmp[2] = img[2];
tmp[3] = img[3];
#endif // __SSE__
tmp += 4;
img += N;
}
}
// [ch00, ch01, ch02, ch03 ....]
#pragma omp parallel for num_threads(num_thread)
for (int i = remian_size_start; i < N; i++)
{
const float* img = pB + i;
float* tmp = pB_t + (i / 4 + i % 4) * 4 * K;
for (int j = 0; j < K; j++)
{
tmp[0] = img[0];
tmp += 1;
img += N;
}
}
}
// unloop output M, unloop N, packet 4x4, using intrinsic
static void sgemm(int M, int N, int K, float* pA_t, float* pB_t, float* pC, int num_thread)
{
int nn_outch = 0;
int remain_outch_start = 0;
nn_outch = M >> 2;
remain_outch_start = nn_outch << 2;
// output ch0 - ch3
#pragma omp parallel for num_threads(num_thread)
for (int pp = 0; pp < nn_outch; pp++)
{
int i = pp * 4;
float* output0 = pC + ( i )*N;
float* output1 = pC + (i + 1) * N;
float* output2 = pC + (i + 2) * N;
float* output3 = pC + (i + 3) * N;
int j = 0;
for (; j + 3 < N; j += 4)
{
float* va = pA_t + (i / 4) * 4 * K;
float* vb = pB_t + (j / 4) * 4 * K;
#if __SSE__
__m128 _sum0 = _mm_set1_ps(0.f);
__m128 _sum1 = _mm_set1_ps(0.f);
__m128 _sum2 = _mm_set1_ps(0.f);
__m128 _sum3 = _mm_set1_ps(0.f);
int k = 0;
for (; k + 3 < K; k = k + 4)
{
// k0
__m128 _vb = _mm_loadu_ps(vb);
__m128 _va0 = _mm_set1_ps(va[0]);
__m128 _va1 = _mm_set1_ps(va[1]);
__m128 _va2 = _mm_set1_ps(va[2]);
__m128 _va3 = _mm_set1_ps(va[3]);
_sum0 = _mm_add_ps(_sum0, _mm_mul_ps(_vb, _va0)); // sum0 = (a00-a03) * k00
_sum1 = _mm_add_ps(_sum1, _mm_mul_ps(_vb, _va1)); // sum1 = (a00-a03) * k10
_sum2 = _mm_add_ps(_sum2, _mm_mul_ps(_vb, _va2)); // sum2 = (a00-a03) * k20
_sum3 = _mm_add_ps(_sum3, _mm_mul_ps(_vb, _va3)); // sum3 = (a00-a03) * k30
// k1
_vb = _mm_loadu_ps(vb + 4);
_va0 = _mm_set1_ps(va[4]);
_va1 = _mm_set1_ps(va[5]);
_va2 = _mm_set1_ps(va[6]);
_va3 = _mm_set1_ps(va[7]);
_sum0 = _mm_add_ps(_sum0, _mm_mul_ps(_vb, _va0)); // sum0 = (a10-a13) * k01
_sum1 = _mm_add_ps(_sum1, _mm_mul_ps(_vb, _va1)); // sum1 = (a10-a13) * k11
_sum2 = _mm_add_ps(_sum2, _mm_mul_ps(_vb, _va2)); // sum2 = (a10-a13) * k21
_sum3 = _mm_add_ps(_sum3, _mm_mul_ps(_vb, _va3)); // sum3 = (a10-a13) * k31
// k2
_vb = _mm_loadu_ps(vb + 8);
_va0 = _mm_set1_ps(va[8]);
_va1 = _mm_set1_ps(va[9]);
_va2 = _mm_set1_ps(va[10]);
_va3 = _mm_set1_ps(va[11]);
_sum0 = _mm_add_ps(_sum0, _mm_mul_ps(_vb, _va0)); // sum0 = (a20-a23) * k02
_sum1 = _mm_add_ps(_sum1, _mm_mul_ps(_vb, _va1)); // sum1 = (a20-a23) * k12
_sum2 = _mm_add_ps(_sum2, _mm_mul_ps(_vb, _va2)); // sum2 = (a20-a23) * k22
_sum3 = _mm_add_ps(_sum3, _mm_mul_ps(_vb, _va3)); // sum3 = (a20-a23) * k32
// k3
_vb = _mm_loadu_ps(vb + 12);
_va0 = _mm_set1_ps(va[12]);
_va1 = _mm_set1_ps(va[13]);
_va2 = _mm_set1_ps(va[14]);
_va3 = _mm_set1_ps(va[15]);
_sum0 = _mm_add_ps(_sum0, _mm_mul_ps(_vb, _va0)); // sum0 = (a30-a33) * k03
_sum1 = _mm_add_ps(_sum1, _mm_mul_ps(_vb, _va1)); // sum1 = (a30-a33) * k13
_sum2 = _mm_add_ps(_sum2, _mm_mul_ps(_vb, _va2)); // sum2 = (a30-a33) * k23
_sum3 = _mm_add_ps(_sum3, _mm_mul_ps(_vb, _va3)); // sum3 = (a30-a33) * k33
va += 16;
vb += 16;
}
for (; k < K; k++)
{
// k0
__m128 _vb = _mm_loadu_ps(vb);
__m128 _va0 = _mm_set1_ps(va[0]);
__m128 _va1 = _mm_set1_ps(va[1]);
__m128 _va2 = _mm_set1_ps(va[2]);
__m128 _va3 = _mm_set1_ps(va[3]);
_sum0 = _mm_add_ps(_sum0, _mm_mul_ps(_vb, _va0)); // sum0 = (a00-a03) * k00
_sum1 = _mm_add_ps(_sum1, _mm_mul_ps(_vb, _va1)); // sum1 = (a00-a03) * k10
_sum2 = _mm_add_ps(_sum2, _mm_mul_ps(_vb, _va2)); // sum2 = (a00-a03) * k20
_sum3 = _mm_add_ps(_sum3, _mm_mul_ps(_vb, _va3)); // sum3 = (a00-a03) * k30
va += 4;
vb += 4;
}
_mm_storeu_ps(output0, _sum0);
_mm_storeu_ps(output1, _sum1);
_mm_storeu_ps(output2, _sum2);
_mm_storeu_ps(output3, _sum3);
#else
float sum0[4] = {0};
float sum1[4] = {0};
float sum2[4] = {0};
float sum3[4] = {0};
for (int k = 0; k < K; k++)
{
for (int n = 0; n < 4; n++)
{
sum0[n] += va[0] * vb[n];
sum1[n] += va[1] * vb[n];
sum2[n] += va[2] * vb[n];
sum3[n] += va[3] * vb[n];
}
va += 4;
vb += 4;
}
for (int n = 0; n < 4; n++)
{
output0[n] = sum0[n];
output1[n] = sum1[n];
output2[n] = sum2[n];
output3[n] = sum3[n];
}
#endif // __SSE__
output0 += 4;
output1 += 4;
output2 += 4;
output3 += 4;
}
for (; j < N; j++)
{
float* va = pA_t + (i / 4) * 4 * K;
float* vb = pB_t + (j / 4 + j % 4) * 4 * K;
#if __SSE__
__m128 _sum0_3 = _mm_set1_ps(0.f);
__m128 _sum0 = _mm_set1_ps(0.f);
__m128 _sum1 = _mm_set1_ps(0.f);
__m128 _sum2 = _mm_set1_ps(0.f);
__m128 _sum3 = _mm_set1_ps(0.f);
int k = 0;
for (; k + 3 < K; k = k + 4)
{
__m128 _vb0 = _mm_set1_ps(vb[0]);
__m128 _vb1 = _mm_set1_ps(vb[1]);
__m128 _vb2 = _mm_set1_ps(vb[2]);
__m128 _vb3 = _mm_set1_ps(vb[3]);
__m128 _va0 = _mm_loadu_ps(va);
__m128 _va1 = _mm_loadu_ps(va + 4);
__m128 _va2 = _mm_loadu_ps(va + 8);
__m128 _va3 = _mm_loadu_ps(va + 12);
_sum0 = _mm_add_ps(_sum0, _mm_mul_ps(_va0, _vb0)); // sum0 += (k00-k30) * a00
_sum1 = _mm_add_ps(_sum1, _mm_mul_ps(_va1, _vb1)); // sum1 += (k01-k31) * a10
_sum2 = _mm_add_ps(_sum2, _mm_mul_ps(_va2, _vb2)); // sum2 += (k02-k32) * a20
_sum3 = _mm_add_ps(_sum3, _mm_mul_ps(_va3, _vb3)); // sum3 += (k03-k33) * a30
va += 16;
vb += 4;
}
_sum0 = _mm_add_ps(_sum0, _sum1);
_sum2 = _mm_add_ps(_sum2, _sum3);
_sum0_3 = _mm_add_ps(_sum0_3, _sum0);
_sum0_3 = _mm_add_ps(_sum0_3, _sum2);
for (; k < K; k++)
{
__m128 _vb0 = _mm_set1_ps(vb[0]);
__m128 _va = _mm_loadu_ps(va);
_sum0_3 = _mm_add_ps(_sum0_3, _mm_mul_ps(_va, _vb0)); // sum0 += (k00-k30) * a00
va += 4;
vb += 1;
}
output0[0] = _sum0_3[0];
output1[0] = _sum0_3[1];
output2[0] = _sum0_3[2];
output3[0] = _sum0_3[3];
#else
float sum0 = 0;
float sum1 = 0;
float sum2 = 0;
float sum3 = 0;
for (int k = 0; k < K; k++)
{
sum0 += va[0] * vb[0];
sum1 += va[1] * vb[0];
sum2 += va[2] * vb[0];
sum3 += va[3] * vb[0];
va += 4;
vb += 1;
}
output0[0] = sum0;
output1[0] = sum1;
output2[0] = sum2;
output3[0] = sum3;
#endif // __SSE__
output0++;
output1++;
output2++;
output3++;
}
}
// output ch0
#pragma omp parallel for num_threads(num_thread)
for (int i = remain_outch_start; i < M; i++)
{
float* output = pC + i * N;
int j = 0;
for (; j + 3 < N; j += 4)
{
float* va = pA_t + (i / 4 + i % 4) * 4 * K;
float* vb = pB_t + (j / 4) * 4 * K;
#if __SSE__
__m128 _sum0 = _mm_set1_ps(0.f);
int k = 0;
for (; k + 3 < K; k = k + 4)
{
// k0
__m128 _va0 = _mm_set1_ps(va[0]);
__m128 _va1 = _mm_set1_ps(va[1]);
__m128 _va2 = _mm_set1_ps(va[2]);
__m128 _va3 = _mm_set1_ps(va[3]);
__m128 _vb0 = _mm_loadu_ps(vb);
__m128 _vb1 = _mm_loadu_ps(vb + 4);
__m128 _vb2 = _mm_loadu_ps(vb + 8);
__m128 _vb3 = _mm_loadu_ps(vb + 12);
_sum0 = _mm_add_ps(_sum0, _mm_mul_ps(_vb0, _va0)); // sum0 = (a00-a03) * k00
_sum0 = _mm_add_ps(_sum0, _mm_mul_ps(_vb1, _va1)); // sum0 += (a10-a13) * k01
_sum0 = _mm_add_ps(_sum0, _mm_mul_ps(_vb2, _va2)); // sum0 += (a20-a23) * k02
_sum0 = _mm_add_ps(_sum0, _mm_mul_ps(_vb3, _va3)); // sum0 += (a30-a33) * k03
va += 4;
vb += 16;
}
for (; k < K; k++)
{
// k0
__m128 _va0 = _mm_set1_ps(va[0]);
__m128 _vb0 = _mm_loadu_ps(vb);
_sum0 = _mm_add_ps(_sum0, _mm_mul_ps(_vb0, _va0)); // sum0 = (a00-a03) * k00
va += 1;
vb += 4;
}
_mm_storeu_ps(output, _sum0);
#else
float sum[4] = {0};
for (int k = 0; k < K; k++)
{
for (int n = 0; n < 4; n++)
{
sum[n] += va[0] * vb[n];
}
va += 1;
vb += 4;
}
for (int n = 0; n < 4; n++)
{
output[n] = sum[n];
}
#endif // __SSE__
output += 4;
}
for (; j < N; j++)
{
float* va = pA_t + (i / 4 + i % 4) * 4 * K;
float* vb = pB_t + (j / 4 + j % 4) * 4 * K;
int k = 0;
#if __SSE__
__m128 _sum0 = _mm_set1_ps(0.f);
for (; k + 3 < K; k += 4)
{
__m128 _p0 = _mm_loadu_ps(vb);
__m128 _k0 = _mm_loadu_ps(va);
_sum0 = _mm_add_ps(_sum0, _mm_mul_ps(_p0, _k0));
va += 4;
vb += 4;
}
float sum0 = _sum0[0] + _sum0[1] + _sum0[2] + _sum0[3];
#else
float sum0 = 0.f;
#endif // __SSE__
for (; k < K; k++)
{
sum0 += va[0] * vb[0];
va += 1;
vb += 1;
}
output[0] = sum0;
output++;
}
}
}
#endif // __AVX2__
static void sgemm_fp32(struct ir_tensor* input, struct ir_tensor* filter, struct ir_tensor* bias,
struct ir_tensor* output, struct conv_priv_info* priv_info, struct conv_param* param, int n,
int group, int num_thread)
{
int kernel_size = param->kernel_h * param->kernel_w * param->input_channel / param->group;
int outchan_g = param->output_channel / param->group;
int out_h = output->dims[2];
int out_w = output->dims[3];
int out_image_size = output->dims[1] * output->dims[2] * output->dims[3];
float* interleave_fp32 = ( float* )priv_info->interleave_buffer_pack4 + outchan_g * group * kernel_size;
float* im2col_pack4_fp32 = priv_info->im2col_buffer_pack4;
float* output_fp32 = ( float* )output->data + n * out_image_size + outchan_g * group * out_h * out_w;
float* bias_fp32 = NULL;
if (bias)
bias_fp32 = ( float* )bias->data + outchan_g * group;
float* filter_sgemm = interleave_fp32;
float* input_sgemm_pack4 = im2col_pack4_fp32;
float* output_sgemm = output_fp32;
sgemm(outchan_g, out_h * out_w, kernel_size, filter_sgemm, input_sgemm_pack4, output_sgemm, num_thread);
// process bias
if (bias)
{
for (int i = 0; i < outchan_g; i++)
{
for (int j = 0; j < out_h * out_w; j++)
{
int output_off = i * (out_h * out_w) + j;
output_fp32[output_off] += bias_fp32[i];
}
}
}
// process activation relu
if (param->activation == 0)
{
for (int i = 0; i < outchan_g; i++)
{
for (int j = 0; j < out_h * out_w; j++)
{
int output_off = i * (out_h * out_w) + j;
if (output_fp32[output_off] < 0)
output_fp32[output_off] = 0;
}
}
}
// process activation relu6
if (param->activation > 0)
{
for (int i = 0; i < outchan_g; i++)
{
for (int j = 0; j < out_h * out_w; j++)
{
int output_off = i * (out_h * out_w) + j;
if (output_fp32[output_off] < 0)
output_fp32[output_off] = 0;
if (output_fp32[output_off] > 6)
output_fp32[output_off] = 6;
}
}
}
}
static void sgemm_uint8(struct ir_tensor* input, struct ir_tensor* filter, struct ir_tensor* bias,
struct ir_tensor* output, struct conv_priv_info* priv_info, struct conv_param* param, int n,
int group, int num_thread)
{
int kernel_size = param->kernel_h * param->kernel_w * param->input_channel / param->group;
int outchan_g = param->output_channel / param->group;
int out_h = output->dims[2];
int out_w = output->dims[3];
int out_image_size = output->dims[1] * output->dims[2] * output->dims[3];
float* interleave_fp32 = ( float* )priv_info->interleave_buffer_pack4 + outchan_g * group * kernel_size;
float* im2col_pack4_fp32 = priv_info->im2col_buffer_pack4;
uint8_t * output_uint8 = ( uint8_t* )output->data + n * out_image_size + outchan_g * group * out_h * out_w;
int* bias_int32 = NULL;
float bias_scale = 0.f;
if (bias)
{
bias_int32 = ( int* )bias->data + outchan_g * group;
bias_scale = input->scale * filter->scale;
}
float* filter_sgemm = interleave_fp32;
float* input_sgemm_pack4 = im2col_pack4_fp32;
float* output_sgemm = (float*)sys_malloc(outchan_g * out_h * out_w * sizeof(float));
sgemm(outchan_g, out_h * out_w, kernel_size, filter_sgemm, input_sgemm_pack4, output_sgemm, num_thread);
/* process bias */
if (bias)
{
for (int i = 0; i < outchan_g; i++)
{
for (int j = 0; j < out_h * out_w; j++)
{
int output_off = i * (out_h * out_w) + j;
output_sgemm[output_off] += (float )bias_int32[i] * bias_scale;
}
}
}
/* process activation relu */
if (param->activation == 0)
{
for (int i = 0; i < outchan_g; i++)
{
for (int j = 0; j < out_h * out_w; j++)
{
int output_off = i * (out_h * out_w) + j;
if (output_sgemm[output_off] < 0)
output_sgemm[output_off] = 0;
}
}
}
/* process activation relu6 */
if (param->activation > 0)
{
for (int i = 0; i < outchan_g; i++)
{
for (int j = 0; j < out_h * out_w; j++)
{
int output_off = i * (out_h * out_w) + j;
if (output_sgemm[output_off] < 0)
output_sgemm[output_off] = 0;
if (output_sgemm[output_off] > 6)
output_sgemm[output_off] = 6;
}
}
}
/* quant from fp32 to uint8 */
for (int i = 0; i < outchan_g; i++)
{
for (int j = 0; j < out_h * out_w; j++)
{
int output_off = i * (out_h * out_w) + j;
int udata = ( int )(round(output_sgemm[output_off] / output->scale) + output->zero_point);
if (udata > 255)
udata = 255;
else if (udata < 0)
udata = 0;
output_uint8[output_off] = udata;
}
}
sys_free(output_sgemm);
}
/* check the conv wheather need to be using winograd */
static int winograd_support(struct conv_param* param, int in_h, int in_w)
{
int kernel_h = param->kernel_h;
int kernel_w = param->kernel_w;
int stride_h = param->stride_h;
int stride_w = param->stride_w;
int dilation_h = param->dilation_h;
int dilation_w = param->dilation_w;
int input_chan = param->input_channel;
int output_chan = param->output_channel;
int group = param->group;
if (in_h <= 10 && in_w <= 10)
return 0;
if (group != 1 || kernel_h != 3 || kernel_w != 3 || stride_h != 1 || stride_w != 1 || dilation_h != 1 ||
dilation_w != 1 || input_chan < 16 || output_chan < 16 || output_chan % 16)
return 0;
return 1;
}
int conv_hcl_get_shared_mem_size(struct ir_tensor* input, struct ir_tensor* output, struct conv_param* param)
{
int group = param->group;
int input_chan = param->input_channel / group;
int kernel_size = input_chan * param->kernel_h * param->kernel_w;
int output_xy = output->dims[2] * output->dims[3];
int elem_size = input->elem_size;
// simulator uint8 inference with fp32
if (input->data_type == TENGINE_DT_UINT8)
elem_size = 4;
return elem_size * output_xy * kernel_size;
}
#if __AVX__
int conv_hcl_get_shared_pack4_mem_size(struct ir_tensor* filter, struct ir_tensor* output, struct conv_param* param)
{
int K = filter->elem_num / filter->dims[0];
int N = output->dims[2] * output->dims[3];
int elem_size = filter->elem_size;
// simulator uint8 inference with fp32
if (filter->data_type == TENGINE_DT_UINT8)
elem_size = 4;
return (8 * K * (N / 8 + N % 8)) * elem_size;
}
int conv_hcl_get_interleave_pack4_size(int M, int K, struct ir_tensor* filter)
{
int elem_size = filter->elem_size;
// simulator uint8 inference with fp32
if (filter->data_type == TENGINE_DT_UINT8)
elem_size = 4;
int size = 8 * K * (M / 8 + (M % 8) / 4 + M % 4) * elem_size;
return size;
}
void conv_hcl_interleave_pack4(int M, int K, struct conv_priv_info* priv_info)
{
float* pA = ( float* )priv_info->interleave_buffer;
float* pA_t = ( float* )priv_info->interleave_buffer_pack4;
int nn_outch = M >> 3;
int remain_outch_start = nn_outch << 3;
for (int pp = 0; pp < nn_outch; pp++)
{
int p = pp * 8;
const float* k0 = pA + (p + 0) * K;
const float* k1 = pA + (p + 1) * K;
const float* k2 = pA + (p + 2) * K;
const float* k3 = pA + (p + 3) * K;
const float* k4 = pA + (p + 4) * K;
const float* k5 = pA + (p + 5) * K;
const float* k6 = pA + (p + 6) * K;
const float* k7 = pA + (p + 7) * K;
float* ktmp = pA_t + (p / 8) * 8 * K;
for (int q = 0; q < K; q++)
{
ktmp[0] = k0[0];
ktmp[1] = k1[0];
ktmp[2] = k2[0];
ktmp[3] = k3[0];
ktmp[4] = k4[0];
ktmp[5] = k5[0];
ktmp[6] = k6[0];
ktmp[7] = k7[0];
ktmp += 8;
k0 += 1;
k1 += 1;
k2 += 1;
k3 += 1;
k4 += 1;
k5 += 1;
k6 += 1;
k7 += 1;
}
}
nn_outch = (M - remain_outch_start) >> 2;
for (int pp = 0; pp < nn_outch; pp++)
{
int p = remain_outch_start + pp * 4;
const float* k0 = pA + (p + 0) * K;
const float* k1 = pA + (p + 1) * K;
const float* k2 = pA + (p + 2) * K;
const float* k3 = pA + (p + 3) * K;
float* ktmp = pA_t + (p / 8 + (p % 8) / 4) * 8 * K;
for (int q = 0; q < K; q++)
{
ktmp[0] = k0[0];
ktmp[1] = k1[0];
ktmp[2] = k2[0];
ktmp[3] = k3[0];
ktmp += 4;
k0 += 1;
k1 += 1;
k2 += 1;
k3 += 1;
}
}
remain_outch_start += nn_outch << 2;
for (int p = remain_outch_start; p < M; p++)
{
const float* k0 = pA + (p + 0) * K;
float* ktmp = pA_t + (p / 8 + (p % 8) / 4 + p % 4) * 8 * K;
for (int q = 0; q < K; q++)
{
ktmp[0] = k0[0];
ktmp++;
k0++;
}
}
}
#else
int conv_hcl_get_shared_pack4_mem_size(struct ir_tensor* filter, struct ir_tensor* output, struct conv_param* param)
{
int K = filter->elem_num / filter->dims[0];
int N = output->dims[2] * output->dims[3];
int elem_size = filter->elem_size;
// simulator uint8 inference with fp32
if (filter->data_type == TENGINE_DT_UINT8)
elem_size = 4;
return (4 * K * (N / 4 + N % 4)) * elem_size;
}
int conv_hcl_get_interleave_pack4_size(int M, int K, struct ir_tensor* filter)
{
int elem_size = filter->elem_size;
// simulator uint8 inference with fp32
if (filter->data_type == TENGINE_DT_UINT8)
elem_size = 4;
int size = 4 * K * (M / 4 + M % 4) * elem_size;
return size;
}
void conv_hcl_interleave_pack4(int M, int K, struct conv_priv_info* priv_info)
{
float* pA = ( float* )priv_info->interleave_buffer;
float* pA_t = ( float* )priv_info->interleave_buffer_pack4;
int nn_outch = M >> 2;
int remain_outch_start = nn_outch << 2;
for (int pp = 0; pp < nn_outch; pp++)
{
int p = pp * 4;
const float* k0 = pA + (p + 0) * K;
const float* k1 = pA + (p + 1) * K;
const float* k2 = pA + (p + 2) * K;
const float* k3 = pA + (p + 3) * K;
float* ktmp = pA_t + (p / 4) * 4 * K;
for (int q = 0; q < K; q++)
{
ktmp[0] = k0[0];
ktmp[1] = k1[0];
ktmp[2] = k2[0];
ktmp[3] = k3[0];
ktmp += 4;
k0 += 1;
k1 += 1;
k2 += 1;
k3 += 1;
}
}
for (int p = remain_outch_start; p < M; p++)
{
const float* k0 = pA + (p + 0) * K;
float* ktmp = pA_t + (p / 4 + p % 4) * 4 * K;
for (int q = 0; q < K; q++)
{
ktmp[0] = k0[0];
ktmp++;
k0++;
}
}
}
#endif
int conv_hcl_prerun(struct ir_tensor* input_tensor, struct ir_tensor* filter_tensor, struct ir_tensor* output_tensor,
struct conv_priv_info* priv_info, struct conv_param* param)
{
int in_h = input_tensor->dims[2];
int in_w = input_tensor->dims[3];
/* check winograd implement, only for conv3x3s1 */
if (input_tensor->data_type == TENGINE_DT_FP32)
{
priv_info->winograd = winograd_support(param, in_h, in_w);
if (priv_info->winograd)
{
return wino_conv_hcl_prerun(input_tensor, filter_tensor, output_tensor, priv_info, param);
}
}
if (!priv_info->external_im2col_mem)
{
int mem_size = conv_hcl_get_shared_mem_size(input_tensor, output_tensor, param);
void* mem = sys_malloc(mem_size);
priv_info->im2col_buffer = mem;
priv_info->im2col_buffer_size = mem_size;
}
if (!priv_info->external_im2col_pack4_mem)
{
int mem_size = conv_hcl_get_shared_pack4_mem_size(filter_tensor, output_tensor, param);
void* mem = sys_malloc(mem_size);
priv_info->im2col_buffer_pack4 = mem;
priv_info->im2col_buffer_pack4_size = mem_size;
}
if (!priv_info->external_interleave_mem)
{
int mem_size = get_private_mem_size(filter_tensor);
void* mem = sys_malloc(mem_size);
priv_info->interleave_buffer = mem;
priv_info->interleave_buffer_size = mem_size;
}
if (input_tensor->data_type == TENGINE_DT_UINT8)
interleave_uint8(filter_tensor, priv_info);
else
interleave(filter_tensor, priv_info);
if (priv_info->external_interleave_pack4_mem)
{
int M = filter_tensor->dims[0];
int K = filter_tensor->elem_num / filter_tensor->dims[0];
int mem_size = conv_hcl_get_interleave_pack4_size(M, K, filter_tensor);
void* mem = sys_malloc(mem_size);
priv_info->interleave_buffer_pack4 = mem;
priv_info->interleave_buffer_pack4_size = mem_size;
conv_hcl_interleave_pack4(M, K, priv_info);
if (!priv_info->external_interleave_mem && priv_info->interleave_buffer)
{
sys_free(priv_info->interleave_buffer);
priv_info->interleave_buffer = NULL;
}
}
else
{
priv_info->interleave_buffer_pack4 = priv_info->interleave_buffer;
priv_info->interleave_buffer_pack4_size = priv_info->interleave_buffer_size;
}
return 0;
}
int conv_hcl_postrun(struct conv_priv_info* priv_info)
{
if (priv_info->winograd)
{
return wino_conv_hcl_postrun(priv_info);
}
if (priv_info->external_interleave_pack4_mem && !priv_info->external_interleave_mem &&
priv_info->interleave_buffer != NULL)
{
sys_free(priv_info->interleave_buffer_pack4);
priv_info->interleave_buffer_pack4 = NULL;
}
if (!priv_info->external_im2col_mem && priv_info->im2col_buffer != NULL)
{
sys_free(priv_info->im2col_buffer);
priv_info->im2col_buffer = NULL;
}
if (!priv_info->external_im2col_pack4_mem && priv_info->im2col_buffer_pack4 != NULL)
{
sys_free(priv_info->im2col_buffer_pack4);
priv_info->im2col_buffer_pack4 = NULL;
}
if (priv_info->external_interleave_pack4_mem && priv_info->interleave_buffer_pack4 != NULL)
{
sys_free(priv_info->interleave_buffer_pack4);
priv_info->interleave_buffer_pack4 = NULL;
}
return 0;
}
int conv_hcl_run(struct ir_tensor* input_tensor, struct ir_tensor* filter_tensor, struct ir_tensor* bias_tensor,
struct ir_tensor* output_tensor, struct conv_priv_info* priv_info, struct conv_param* param,
int num_thread, int cpu_affinity)
{
int group = param->group;
int type = input_tensor->data_type;
if (priv_info->winograd)
{
return wino_conv_hcl_run(input_tensor, filter_tensor, bias_tensor, output_tensor, priv_info, param, num_thread,
cpu_affinity);
}
for (int i = 0; i < input_tensor->dims[0]; i++) // batch size
{
for (int j = 0; j < group; j++)
{
im2col_ir(input_tensor, output_tensor, priv_info, param, i, j);
int K = filter_tensor->elem_num / filter_tensor->dims[0];
int N = output_tensor->dims[2] * output_tensor->dims[3];
float* im2col_fp32 = priv_info->im2col_buffer;
if (priv_info->external_interleave_pack4_mem)
{
input_pack4(K, N, im2col_fp32, ( float* )priv_info->im2col_buffer_pack4, num_thread);
}
else
{
priv_info->im2col_buffer_pack4 = im2col_fp32;
}
if (type == TENGINE_DT_UINT8)
sgemm_uint8(input_tensor, filter_tensor, bias_tensor, output_tensor, priv_info, param, i, j, num_thread);
else
sgemm_fp32(input_tensor, filter_tensor, bias_tensor, output_tensor, priv_info, param, i, j, num_thread);
}
}
return 0;
}
int conv_hcl_set_shared_mem(struct conv_priv_info* priv_info, void* mem, int mem_size)
{
priv_info->external_im2col_mem = 1;
priv_info->im2col_buffer = mem;
priv_info->im2col_buffer_size = mem_size;
return 0;
}
int conv_hcl_set_shared_pack4_mem(struct conv_priv_info* priv_info, void* mem, int mem_size)
{
priv_info->external_im2col_pack4_mem = 1;
priv_info->im2col_buffer_pack4 = mem;
priv_info->im2col_buffer_pack4_size = mem_size;
return 0;
}
|
for_simd_misc_messages.c | // RUN: %clang_cc1 -fsyntax-only -fopenmp -verify %s
// expected-error@+1 {{unexpected OpenMP directive '#pragma omp for simd'}}
#pragma omp for simd
// expected-error@+1 {{unexpected OpenMP directive '#pragma omp for simd'}}
#pragma omp for simd foo
void test_no_clause() {
int i;
#pragma omp for simd
for (i = 0; i < 16; ++i)
;
// expected-error@+2 {{statement after '#pragma omp for simd' must be a for loop}}
#pragma omp for simd
++i;
}
void test_branch_protected_scope() {
int i = 0;
L1:
++i;
int x[24];
#pragma omp parallel
#pragma omp for simd
for (i = 0; i < 16; ++i) {
if (i == 5)
goto L1; // expected-error {{use of undeclared label 'L1'}}
else if (i == 6)
return; // expected-error {{cannot return from OpenMP region}}
else if (i == 7)
goto L2;
else if (i == 8) {
L2:
x[i]++;
}
}
if (x[0] == 0)
goto L2; // expected-error {{use of undeclared label 'L2'}}
else if (x[1] == 1)
goto L1;
}
void test_invalid_clause() {
int i;
#pragma omp parallel
// expected-warning@+1 {{extra tokens at the end of '#pragma omp for simd' are ignored}}
#pragma omp for simd foo bar
for (i = 0; i < 16; ++i)
;
}
void test_non_identifiers() {
int i, x;
#pragma omp parallel
// expected-warning@+1 {{extra tokens at the end of '#pragma omp for simd' are ignored}}
#pragma omp for simd;
for (i = 0; i < 16; ++i)
;
#pragma omp parallel
// expected-warning@+1 {{extra tokens at the end of '#pragma omp for simd' are ignored}}
#pragma omp for simd linear(x);
for (i = 0; i < 16; ++i)
;
#pragma omp parallel
// expected-warning@+1 {{extra tokens at the end of '#pragma omp for simd' are ignored}}
#pragma omp for simd private(x);
for (i = 0; i < 16; ++i)
;
#pragma omp parallel
// expected-warning@+1 {{extra tokens at the end of '#pragma omp for simd' are ignored}}
#pragma omp for simd, private(x);
for (i = 0; i < 16; ++i)
;
}
extern int foo();
void test_safelen() {
int i;
// expected-error@+1 {{expected '('}}
#pragma omp for simd safelen
for (i = 0; i < 16; ++i)
;
// expected-error@+1 {{expected expression}} expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}}
#pragma omp for simd safelen(
for (i = 0; i < 16; ++i)
;
// expected-error@+1 {{expected expression}}
#pragma omp for simd safelen()
for (i = 0; i < 16; ++i)
;
// expected-error@+1 {{expected expression}} expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}}
#pragma omp for simd safelen(,
for (i = 0; i < 16; ++i)
;
// expected-error@+1 {{expected expression}} expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}}
#pragma omp for simd safelen(, )
for (i = 0; i < 16; ++i)
;
// expected-warning@+2 {{extra tokens at the end of '#pragma omp for simd' are ignored}}
// expected-error@+1 {{expected '('}}
#pragma omp for simd safelen 4)
for (i = 0; i < 16; ++i)
;
// expected-error@+2 {{expected ')'}}
// expected-note@+1 {{to match this '('}}
#pragma omp for simd safelen(4
for (i = 0; i < 16; ++i)
;
// expected-error@+2 {{expected ')'}}
// expected-note@+1 {{to match this '('}}
#pragma omp for simd safelen(4,
for (i = 0; i < 16; ++i)
;
// expected-error@+2 {{expected ')'}}
// expected-note@+1 {{to match this '('}}
#pragma omp for simd safelen(4, )
for (i = 0; i < 16; ++i)
;
#pragma omp for simd safelen(4)
for (i = 0; i < 16; ++i)
;
// expected-error@+2 {{expected ')'}}
// expected-note@+1 {{to match this '('}}
#pragma omp for simd safelen(4 4)
for (i = 0; i < 16; ++i)
;
// expected-error@+2 {{expected ')'}}
// expected-note@+1 {{to match this '('}}
#pragma omp for simd safelen(4, , 4)
for (i = 0; i < 16; ++i)
;
#pragma omp for simd safelen(4)
for (i = 0; i < 16; ++i)
;
// expected-error@+2 {{expected ')'}}
// expected-note@+1 {{to match this '('}}
#pragma omp for simd safelen(4, 8)
for (i = 0; i < 16; ++i)
;
// expected-error@+1 {{expression is not an integer constant expression}}
#pragma omp for simd safelen(2.5)
for (i = 0; i < 16; ++i)
;
// expected-error@+1 {{expression is not an integer constant expression}}
#pragma omp for simd safelen(foo())
for (i = 0; i < 16; ++i)
;
// expected-error@+1 {{argument to 'safelen' clause must be a strictly positive integer value}}
#pragma omp for simd safelen(-5)
for (i = 0; i < 16; ++i)
;
// expected-error@+1 {{argument to 'safelen' clause must be a strictly positive integer value}}
#pragma omp for simd safelen(0)
for (i = 0; i < 16; ++i)
;
// expected-error@+1 {{argument to 'safelen' clause must be a strictly positive integer value}}
#pragma omp for simd safelen(5 - 5)
for (i = 0; i < 16; ++i)
;
}
void test_simdlen() {
int i;
// expected-error@+1 {{expected '('}}
#pragma omp for simd simdlen
for (i = 0; i < 16; ++i)
;
// expected-error@+1 {{expected expression}} expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}}
#pragma omp for simd simdlen(
for (i = 0; i < 16; ++i)
;
// expected-error@+1 {{expected expression}}
#pragma omp for simd simdlen()
for (i = 0; i < 16; ++i)
;
// expected-error@+1 {{expected expression}} expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}}
#pragma omp for simd simdlen(,
for (i = 0; i < 16; ++i)
;
// expected-error@+1 {{expected expression}} expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}}
#pragma omp for simd simdlen(, )
for (i = 0; i < 16; ++i)
;
// expected-warning@+2 {{extra tokens at the end of '#pragma omp for simd' are ignored}}
// expected-error@+1 {{expected '('}}
#pragma omp for simd simdlen 4)
for (i = 0; i < 16; ++i)
;
// expected-error@+2 {{expected ')'}}
// expected-note@+1 {{to match this '('}}
#pragma omp for simd simdlen(4
for (i = 0; i < 16; ++i)
;
// expected-error@+2 {{expected ')'}}
// expected-note@+1 {{to match this '('}}
#pragma omp for simd simdlen(4,
for (i = 0; i < 16; ++i)
;
// expected-error@+2 {{expected ')'}}
// expected-note@+1 {{to match this '('}}
#pragma omp for simd simdlen(4, )
for (i = 0; i < 16; ++i)
;
#pragma omp for simd simdlen(4)
for (i = 0; i < 16; ++i)
;
// expected-error@+2 {{expected ')'}}
// expected-note@+1 {{to match this '('}}
#pragma omp for simd simdlen(4 4)
for (i = 0; i < 16; ++i)
;
// expected-error@+2 {{expected ')'}}
// expected-note@+1 {{to match this '('}}
#pragma omp for simd simdlen(4, , 4)
for (i = 0; i < 16; ++i)
;
#pragma omp for simd simdlen(4)
for (i = 0; i < 16; ++i)
;
// expected-error@+2 {{expected ')'}}
// expected-note@+1 {{to match this '('}}
#pragma omp for simd simdlen(4, 8)
for (i = 0; i < 16; ++i)
;
// expected-error@+1 {{expression is not an integer constant expression}}
#pragma omp for simd simdlen(2.5)
for (i = 0; i < 16; ++i)
;
// expected-error@+1 {{expression is not an integer constant expression}}
#pragma omp for simd simdlen(foo())
for (i = 0; i < 16; ++i)
;
// expected-error@+1 {{argument to 'simdlen' clause must be a strictly positive integer value}}
#pragma omp for simd simdlen(-5)
for (i = 0; i < 16; ++i)
;
// expected-error@+1 {{argument to 'simdlen' clause must be a strictly positive integer value}}
#pragma omp for simd simdlen(0)
for (i = 0; i < 16; ++i)
;
// expected-error@+1 {{argument to 'simdlen' clause must be a strictly positive integer value}}
#pragma omp for simd simdlen(5 - 5)
for (i = 0; i < 16; ++i)
;
}
void test_safelen_simdlen() {
int i;
// expected-error@+1 {{the value of 'simdlen' parameter must be less than or equal to the value of the 'safelen' parameter}}
#pragma omp for simd simdlen(6) safelen(5)
for (i = 0; i < 16; ++i)
;
// expected-error@+1 {{the value of 'simdlen' parameter must be less than or equal to the value of the 'safelen' parameter}}
#pragma omp for simd safelen(5) simdlen(6)
for (i = 0; i < 16; ++i)
;
}
void test_collapse() {
int i;
#pragma omp parallel
// expected-error@+1 {{expected '('}}
#pragma omp for simd collapse
for (i = 0; i < 16; ++i)
;
#pragma omp parallel
// expected-error@+1 {{expected expression}} expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}}
#pragma omp for simd collapse(
for (i = 0; i < 16; ++i)
;
#pragma omp parallel
// expected-error@+1 {{expected expression}}
#pragma omp for simd collapse()
for (i = 0; i < 16; ++i)
;
#pragma omp parallel
// expected-error@+1 {{expected expression}} expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}}
#pragma omp for simd collapse(,
for (i = 0; i < 16; ++i)
;
#pragma omp parallel
// expected-error@+1 {{expected expression}} expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}}
#pragma omp for simd collapse(, )
for (i = 0; i < 16; ++i)
;
#pragma omp parallel
// expected-warning@+2 {{extra tokens at the end of '#pragma omp for simd' are ignored}}
// expected-error@+1 {{expected '('}}
#pragma omp for simd collapse 4)
for (i = 0; i < 16; ++i)
;
#pragma omp parallel
// expected-error@+2 {{expected ')'}}
// expected-note@+1 {{to match this '('}} expected-note@+1 {{as specified in 'collapse' clause}}
#pragma omp for simd collapse(4
for (i = 0; i < 16; ++i)
; // expected-error {{expected 4 for loops after '#pragma omp for simd', but found only 1}}
#pragma omp parallel
// expected-error@+2 {{expected ')'}}
// expected-note@+1 {{to match this '('}} expected-note@+1 {{as specified in 'collapse' clause}}
#pragma omp for simd collapse(4,
for (i = 0; i < 16; ++i)
; // expected-error {{expected 4 for loops after '#pragma omp for simd', but found only 1}}
#pragma omp parallel
// expected-error@+2 {{expected ')'}}
// expected-note@+1 {{to match this '('}} expected-note@+1 {{as specified in 'collapse' clause}}
#pragma omp for simd collapse(4, )
for (i = 0; i < 16; ++i)
; // expected-error {{expected 4 for loops after '#pragma omp for simd', but found only 1}}
#pragma omp parallel
// expected-note@+1 {{as specified in 'collapse' clause}}
#pragma omp for simd collapse(4)
for (i = 0; i < 16; ++i)
; // expected-error {{expected 4 for loops after '#pragma omp for simd', but found only 1}}
#pragma omp parallel
// expected-error@+2 {{expected ')'}}
// expected-note@+1 {{to match this '('}} expected-note@+1 {{as specified in 'collapse' clause}}
#pragma omp for simd collapse(4 4)
for (i = 0; i < 16; ++i)
; // expected-error {{expected 4 for loops after '#pragma omp for simd', but found only 1}}
#pragma omp parallel
// expected-error@+2 {{expected ')'}}
// expected-note@+1 {{to match this '('}} expected-note@+1 {{as specified in 'collapse' clause}}
#pragma omp for simd collapse(4, , 4)
for (i = 0; i < 16; ++i)
; // expected-error {{expected 4 for loops after '#pragma omp for simd', but found only 1}}
#pragma omp parallel
#pragma omp for simd collapse(4)
for (int i1 = 0; i1 < 16; ++i1)
for (int i2 = 0; i2 < 16; ++i2)
for (int i3 = 0; i3 < 16; ++i3)
for (int i4 = 0; i4 < 16; ++i4)
foo();
#pragma omp parallel
// expected-error@+2 {{expected ')'}}
// expected-note@+1 {{to match this '('}} expected-note@+1 {{as specified in 'collapse' clause}}
#pragma omp for simd collapse(4, 8)
for (i = 0; i < 16; ++i)
; // expected-error {{expected 4 for loops after '#pragma omp for simd', but found only 1}}
#pragma omp parallel
// expected-error@+1 {{expression is not an integer constant expression}}
#pragma omp for simd collapse(2.5)
for (i = 0; i < 16; ++i)
;
#pragma omp parallel
// expected-error@+1 {{expression is not an integer constant expression}}
#pragma omp for simd collapse(foo())
for (i = 0; i < 16; ++i)
;
#pragma omp parallel
// expected-error@+1 {{argument to 'collapse' clause must be a strictly positive integer value}}
#pragma omp for simd collapse(-5)
for (i = 0; i < 16; ++i)
;
#pragma omp parallel
// expected-error@+1 {{argument to 'collapse' clause must be a strictly positive integer value}}
#pragma omp for simd collapse(0)
for (i = 0; i < 16; ++i)
;
#pragma omp parallel
// expected-error@+1 {{argument to 'collapse' clause must be a strictly positive integer value}}
#pragma omp for simd collapse(5 - 5)
for (i = 0; i < 16; ++i)
;
#pragma omp parallel
#pragma omp for simd collapse(2)
for (i = 0; i < 16; ++i)
// expected-note@+1 {{variable with automatic storage duration is predetermined as private; perhaps you forget to enclose 'omp for simd' directive into a parallel or another task region?}}
for (int j = 0; j < 16; ++j)
// expected-error@+2 {{reduction variable must be shared}}
// expected-error@+1 {{OpenMP constructs may not be nested inside a simd region}}
#pragma omp for simd reduction(+ : i, j)
for (int k = 0; k < 16; ++k)
i += j;
}
void test_linear() {
int i;
// expected-error@+1 {{expected expression}} expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}}
#pragma omp for simd linear(
for (i = 0; i < 16; ++i)
;
// expected-error@+2 {{expected expression}}
// expected-error@+1 {{expected expression}} expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}}
#pragma omp for simd linear(,
for (i = 0; i < 16; ++i)
;
// expected-error@+2 {{expected expression}}
// expected-error@+1 {{expected expression}}
#pragma omp for simd linear(, )
for (i = 0; i < 16; ++i)
;
// expected-error@+1 {{expected expression}}
#pragma omp for simd linear()
for (i = 0; i < 16; ++i)
;
// expected-error@+1 {{expected expression}}
#pragma omp for simd linear(int)
for (i = 0; i < 16; ++i)
;
// expected-error@+1 {{expected variable name}}
#pragma omp for simd linear(0)
for (i = 0; i < 16; ++i)
;
// expected-error@+1 {{use of undeclared identifier 'x'}}
#pragma omp for simd linear(x)
for (i = 0; i < 16; ++i)
;
// expected-error@+2 {{use of undeclared identifier 'x'}}
// expected-error@+1 {{use of undeclared identifier 'y'}}
#pragma omp for simd linear(x, y)
for (i = 0; i < 16; ++i)
;
// expected-error@+3 {{use of undeclared identifier 'x'}}
// expected-error@+2 {{use of undeclared identifier 'y'}}
// expected-error@+1 {{use of undeclared identifier 'z'}}
#pragma omp for simd linear(x, y, z)
for (i = 0; i < 16; ++i)
;
int x, y;
// expected-error@+1 {{expected expression}}
#pragma omp for simd linear(x :)
for (i = 0; i < 16; ++i)
;
// expected-error@+1 {{expected expression}} expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}}
#pragma omp for simd linear(x :, )
for (i = 0; i < 16; ++i)
;
#pragma omp for simd linear(x : 1)
for (i = 0; i < 16; ++i)
;
#pragma omp for simd linear(x : 2 * 2)
for (i = 0; i < 16; ++i)
;
// expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}}
#pragma omp for simd linear(x : 1, y)
for (i = 0; i < 16; ++i)
;
// expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}}
#pragma omp for simd linear(x : 1, y, z : 1)
for (i = 0; i < 16; ++i)
;
// expected-note@+2 {{defined as linear}}
// expected-error@+1 {{linear variable cannot be linear}}
#pragma omp for simd linear(x) linear(x)
for (i = 0; i < 16; ++i)
;
// expected-note@+2 {{defined as private}}
// expected-error@+1 {{private variable cannot be linear}}
#pragma omp for simd private(x) linear(x)
for (i = 0; i < 16; ++i)
;
// expected-note@+2 {{defined as linear}}
// expected-error@+1 {{linear variable cannot be private}}
#pragma omp for simd linear(x) private(x)
for (i = 0; i < 16; ++i)
;
// expected-warning@+1 {{zero linear step (x and other variables in clause should probably be const)}}
#pragma omp for simd linear(x, y : 0)
for (i = 0; i < 16; ++i)
;
// expected-note@+2 {{defined as linear}}
// expected-error@+1 {{linear variable cannot be lastprivate}}
#pragma omp for simd linear(x) lastprivate(x)
for (i = 0; i < 16; ++i)
;
#pragma omp parallel
// expected-note@+2 {{defined as lastprivate}}
// expected-error@+1 {{lastprivate variable cannot be linear}}
#pragma omp for simd lastprivate(x) linear(x)
for (i = 0; i < 16; ++i)
;
}
void test_aligned() {
int i;
// expected-error@+1 {{expected expression}} expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}}
#pragma omp for simd aligned(
for (i = 0; i < 16; ++i)
;
// expected-error@+2 {{expected expression}}
// expected-error@+1 {{expected expression}} expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}}
#pragma omp for simd aligned(,
for (i = 0; i < 16; ++i)
;
// expected-error@+2 {{expected expression}}
// expected-error@+1 {{expected expression}}
#pragma omp for simd aligned(, )
for (i = 0; i < 16; ++i)
;
// expected-error@+1 {{expected expression}}
#pragma omp for simd aligned()
for (i = 0; i < 16; ++i)
;
// expected-error@+1 {{expected expression}}
#pragma omp for simd aligned(int)
for (i = 0; i < 16; ++i)
;
// expected-error@+1 {{expected variable name}}
#pragma omp for simd aligned(0)
for (i = 0; i < 16; ++i)
;
// expected-error@+1 {{use of undeclared identifier 'x'}}
#pragma omp for simd aligned(x)
for (i = 0; i < 16; ++i)
;
// expected-error@+2 {{use of undeclared identifier 'x'}}
// expected-error@+1 {{use of undeclared identifier 'y'}}
#pragma omp for simd aligned(x, y)
for (i = 0; i < 16; ++i)
;
// expected-error@+3 {{use of undeclared identifier 'x'}}
// expected-error@+2 {{use of undeclared identifier 'y'}}
// expected-error@+1 {{use of undeclared identifier 'z'}}
#pragma omp for simd aligned(x, y, z)
for (i = 0; i < 16; ++i)
;
int *x, y, z[25]; // expected-note 4 {{'y' defined here}}
#pragma omp for simd aligned(x)
for (i = 0; i < 16; ++i)
;
#pragma omp for simd aligned(z)
for (i = 0; i < 16; ++i)
;
// expected-error@+1 {{expected expression}}
#pragma omp for simd aligned(x :)
for (i = 0; i < 16; ++i)
;
// expected-error@+1 {{expected expression}} expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}}
#pragma omp for simd aligned(x :, )
for (i = 0; i < 16; ++i)
;
#pragma omp for simd aligned(x : 1)
for (i = 0; i < 16; ++i)
;
#pragma omp for simd aligned(x : 2 * 2)
for (i = 0; i < 16; ++i)
;
// expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}}
#pragma omp for simd aligned(x : 1, y)
for (i = 0; i < 16; ++i)
;
// expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}}
#pragma omp for simd aligned(x : 1, y, z : 1)
for (i = 0; i < 16; ++i)
;
// expected-error@+1 {{argument of aligned clause should be array or pointer, not 'int'}}
#pragma omp for simd aligned(x, y)
for (i = 0; i < 16; ++i)
;
// expected-error@+1 {{argument of aligned clause should be array or pointer, not 'int'}}
#pragma omp for simd aligned(x, y, z)
for (i = 0; i < 16; ++i)
;
// expected-note@+2 {{defined as aligned}}
// expected-error@+1 {{a variable cannot appear in more than one aligned clause}}
#pragma omp for simd aligned(x) aligned(z, x)
for (i = 0; i < 16; ++i)
;
// expected-note@+3 {{defined as aligned}}
// expected-error@+2 {{a variable cannot appear in more than one aligned clause}}
// expected-error@+1 2 {{argument of aligned clause should be array or pointer, not 'int'}}
#pragma omp for simd aligned(x, y, z) aligned(y, z)
for (i = 0; i < 16; ++i)
;
}
void test_private() {
int i;
#pragma omp parallel
// expected-error@+2 {{expected expression}}
// expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}}
#pragma omp for simd private(
for (i = 0; i < 16; ++i)
;
#pragma omp parallel
// expected-error@+2 {{expected ')'}} expected-note@+2 {{to match this '('}}
// expected-error@+1 2 {{expected expression}}
#pragma omp for simd private(,
for (i = 0; i < 16; ++i)
;
#pragma omp parallel
// expected-error@+1 2 {{expected expression}}
#pragma omp for simd private(, )
for (i = 0; i < 16; ++i)
;
#pragma omp parallel
// expected-error@+1 {{expected expression}}
#pragma omp for simd private()
for (i = 0; i < 16; ++i)
;
#pragma omp parallel
// expected-error@+1 {{expected expression}}
#pragma omp for simd private(int)
for (i = 0; i < 16; ++i)
;
#pragma omp parallel
// expected-error@+1 {{expected variable name}}
#pragma omp for simd private(0)
for (i = 0; i < 16; ++i)
;
int x, y, z;
#pragma omp parallel
#pragma omp for simd private(x)
for (i = 0; i < 16; ++i)
;
#pragma omp parallel
#pragma omp for simd private(x, y)
for (i = 0; i < 16; ++i)
;
#pragma omp parallel
#pragma omp for simd private(x, y, z)
for (i = 0; i < 16; ++i) {
x = y * i + z;
}
}
void test_lastprivate() {
int i;
#pragma omp parallel
// expected-error@+2 {{expected ')'}} expected-note@+2 {{to match this '('}}
// expected-error@+1 {{expected expression}}
#pragma omp for simd lastprivate(
for (i = 0; i < 16; ++i)
;
#pragma omp parallel
// expected-error@+2 {{expected ')'}} expected-note@+2 {{to match this '('}}
// expected-error@+1 2 {{expected expression}}
#pragma omp for simd lastprivate(,
for (i = 0; i < 16; ++i)
;
#pragma omp parallel
// expected-error@+1 2 {{expected expression}}
#pragma omp for simd lastprivate(, )
for (i = 0; i < 16; ++i)
;
#pragma omp parallel
// expected-error@+1 {{expected expression}}
#pragma omp for simd lastprivate()
for (i = 0; i < 16; ++i)
;
#pragma omp parallel
// expected-error@+1 {{expected expression}}
#pragma omp for simd lastprivate(int)
for (i = 0; i < 16; ++i)
;
#pragma omp parallel
// expected-error@+1 {{expected variable name}}
#pragma omp for simd lastprivate(0)
for (i = 0; i < 16; ++i)
;
int x, y, z;
#pragma omp parallel
#pragma omp for simd lastprivate(x)
for (i = 0; i < 16; ++i)
;
#pragma omp parallel
#pragma omp for simd lastprivate(x, y)
for (i = 0; i < 16; ++i)
;
#pragma omp parallel
#pragma omp for simd lastprivate(x, y, z)
for (i = 0; i < 16; ++i)
;
}
void test_firstprivate() {
int i;
#pragma omp parallel
// expected-error@+2 {{expected ')'}} expected-note@+2 {{to match this '('}}
// expected-error@+1 {{expected expression}}
#pragma omp for simd firstprivate(
for (i = 0; i < 16; ++i)
;
#pragma omp parallel
// expected-error@+2 {{expected ')'}} expected-note@+2 {{to match this '('}}
// expected-error@+1 2 {{expected expression}}
#pragma omp for simd firstprivate(,
for (i = 0; i < 16; ++i)
;
#pragma omp parallel
// expected-error@+1 2 {{expected expression}}
#pragma omp for simd firstprivate(, )
for (i = 0; i < 16; ++i)
;
#pragma omp parallel
// expected-error@+1 {{expected expression}}
#pragma omp for simd firstprivate()
for (i = 0; i < 16; ++i)
;
#pragma omp parallel
// expected-error@+1 {{expected expression}}
#pragma omp for simd firstprivate(int)
for (i = 0; i < 16; ++i)
;
#pragma omp parallel
// expected-error@+1 {{expected variable name}}
#pragma omp for simd firstprivate(0)
for (i = 0; i < 16; ++i)
;
int x, y, z;
#pragma omp parallel
#pragma omp for simd lastprivate(x) firstprivate(x)
for (i = 0; i < 16; ++i)
;
#pragma omp parallel
#pragma omp for simd lastprivate(x, y) firstprivate(x, y)
for (i = 0; i < 16; ++i)
;
#pragma omp parallel
#pragma omp for simd lastprivate(x, y, z) firstprivate(x, y, z)
for (i = 0; i < 16; ++i)
;
}
void test_loop_messages() {
float a[100], b[100], c[100];
#pragma omp parallel
// expected-error@+2 {{variable must be of integer or pointer type}}
#pragma omp for simd
for (float fi = 0; fi < 10.0; fi++) {
c[(int)fi] = a[(int)fi] + b[(int)fi];
}
#pragma omp parallel
// expected-error@+2 {{variable must be of integer or pointer type}}
#pragma omp for simd
for (double fi = 0; fi < 10.0; fi++) {
c[(int)fi] = a[(int)fi] + b[(int)fi];
}
}
|
id2pos.c | /*
gcc -fopenmp -lm -lgsl -lgslcblas -lgad -L ./ mk_id_list.c -o ~/bin/mk_id_list OctTree.o
*/
#include <stdlib.h>
#include <stdio.h>
#include <math.h>
#include <time.h>
#include <string.h>
#include "libgad.h"
#include "ompfuncs.h"
#define USE 63
// #ifdef LONGIDS
// typedef unsigned long long IDtype;
// #else
// typedef unsigned int IDtype;
// #endif
int cmp_IDtype (const void *first, const void *second)
{
IDtype *a = (IDtype *)first;
IDtype *b = (IDtype *)second;
if (*a > *b) return 1;
else if (*a < *b) return -1;
else return 0;
}
const int MAX_HALO_ID = 100000;
const float EXTEND = 500;
const float TRACE_FACTOR = 2.;
const float SEARCHDIST = 25;
const float MAXDIST = 3000.;
const float SOFTENING = 1.0;
void usage()
{
fprintf(stderr," search positions of ID list - reads a list of IDs and creates position file of corresponding particles\n");
fprintf(stderr,"\t-o \t<ID list base file name>\n");
fprintf(stderr,"\t-i \t<snaphsot file name>\n");
fprintf(stderr,"\t-max\t<max Halo ID>\n");
fprintf(stderr,"\t-use\t<bitcode particle types to use (default 2¹)>\n\n");
exit(1);
}
int main (int argc, char *argv[])
{
FILE *fp;
char infile[256];
char outbase[256];
char catname[256];
char **output;
int i,j,k, usepart;
struct gadpart *part, *wpart;
struct header head;
int max_halo_id = MAX_HALO_ID;
float extend = EXTEND;
float trace_factor = TRACE_FACTOR;
int verbose = 0;
float searchdist = SEARCHDIST;
float def_maxdist = MAXDIST;
double conv_dist = 1.;
int start_id = 0;
int num_halos = 0;
int write_catalogue = 0;
int write_gad_file = 0;
int outpos = 1;
double soft = SOFTENING;
strcpy(outbase,"idlist");
i=1;
usepart=USE;
if (1==argc) usage();
while (i<argc)
{
if (!strcmp(argv[i],"-i"))
{
i++;
strcpy(infile,argv[i]);
i++;
}
else if (*argv[i]!='-')
{
strcpy(infile,argv[i]);
i++;
}
else if (!strcmp(argv[i],"-o"))
{
i++;
strcpy(outbase,argv[i]);
i++;
}
else if (!strcmp(argv[i],"-c"))
{
i++;
strcpy(catname,argv[i]);
write_catalogue = 1;
i++;
}
else if (!strcmp(argv[i],"-gad"))
{
i++;
write_gad_file = 1;
}
else if (!strcmp(argv[i],"-pos"))
{
i++;
outpos = 1;
}
else if (!strcmp(argv[i],"-v"))
{
i++;
verbose = 1;
}
else if (!strcmp(argv[i],"-s"))
{
i++;
start_id = atoi(argv[i]);
i++;
}
else if (!strcmp(argv[i],"-max"))
{
i++;
max_halo_id = atoi(argv[i]);
i++;
}
else if (!strcmp(argv[i],"-e"))
{
i++;
extend = atof(argv[i]);
i++;
}
else if (!strcmp(argv[i],"-sfl"))
{
i++;
soft = atof(argv[i]);
i++;
}
else if (!strcmp(argv[i],"-md"))
{
i++;
def_maxdist = atof(argv[i]);
i++;
}
else if (!strcmp(argv[i],"-cd"))
{
i++;
conv_dist = atof(argv[i]);
i++;
}
else if (!strcmp(argv[i],"-tf"))
{
i++;
trace_factor = atof(argv[i]);
i++;
}
else if (!strcmp(argv[i],"-sd"))
{
i++;
searchdist = atof(argv[i]);
i++;
}
else if (!strcmp(argv[i],"-use")) {
i++;
if (!strcmp(argv[i],"all")) usepart=63;
else usepart=atoi(argv[i]);
i++;
} else {
usage();
}
}
if (verbose)
{
printf("reading snapshot\n");
fflush(stdout);
}
unsigned int numpart_all;
if (!(numpart_all=readgadget_part(infile, &head, &part)))
{
extern int libgaderr;
printf("error reading file %s\nError Code %d\n",infile, libgaderr);
exit(1);
}
if (verbose)
{
printf("sorting snapshot\n");
fflush(stdout);
}
myqsort(part, numpart_all, sizeof(gadpart), cmp_id);
/*********************************************************************
Program code goes here
*********************************************************************/
if (verbose)
{
printf("main loop...\n");
fflush(stdout);
}
int haloid;
#pragma omp parallel for private (i,j,k) reduction (+ : num_halos)
for ( haloid = start_id; haloid <= max_halo_id; haloid++ )
{
char idlistname[128];
sprintf(idlistname, "%s_%d", outbase, haloid);
char posfilename[128];
sprintf(posfilename, "%s_positions_%d", outbase, haloid);
FILE *fp = fopen(idlistname, "rb");
if (fp == NULL)
{
continue;
}
num_halos++;
int numids=0;
fltarr center;
IDtype *idlist;
fltarr *pos = NULL;
float maxdist = 0;
fread(&numids, sizeof(int), 1, fp);
fread(center, sizeof(float), 3, fp);
if (verbose)
{
printf("haloid %d | numids %d | center %g %g %g\n", haloid, numids, center[0], center[1], center[2]);
fflush(stdout);
}
if (numids)
{
fread(&maxdist, sizeof(float), 1, fp);
idlist = calloc(numids, sizeof(IDtype));
fread(&idlist[0], sizeof(IDtype), numids, fp);
qsort(idlist, numids, sizeof(IDtype), cmp_IDtype);
pos = (fltarr*) calloc(numids, sizeof(fltarr));
}
fclose(fp);
if (verbose)
{
printf("haloid %d | center %g %g %g\n", haloid, center[0], center[1], center[2]);
fflush(stdout);
}
if (numids)
{
int numfnd = 0;
gadpart *start = part;
for ( i = 0; i < numids; i++ )
{
gadpart *fnd;
gadpart idpart;
idpart.id = idlist[i];
long int size = &part[numpart_all] - start;
fnd = bsearch( &idpart, start, size, sizeof(gadpart), cmp_id);
if (fnd != NULL)
{
start = fnd;
for ( j = 0; j < 3; j++)
pos[numfnd][j] = fnd->pos[j] / head.boxsize;
numfnd++;
}
// if (numfnd >= numids) break;
}
if (verbose)
{
printf("haloid %d | numfnd %d\n", haloid, numfnd);
if (numfnd != numids)
{
fprintf(stderr, "particle not found | halo %d\n", haloid);
exit(1);
}
fflush(stdout);
}
}
int totnumids = numids;
fp = fopen(posfilename, "w");
fwrite(&totnumids, sizeof(int), 1, fp);
if (numids)
fwrite(&pos[0], sizeof(fltarr), numids, fp);
fclose(fp);
if (numids)
{
if (outpos)
free(pos);
}
}
return 0;
}
|
array-mpi.h | /**
* @file
* @brief Reference MPI communication with arrays
*
* This includes packing/unpacking and communication with MPI_Types
*/
#ifndef BRICK_ARRAY_MPI_H
#define BRICK_ARRAY_MPI_H
#include "brick-mpi.h"
#include <mpi.h>
/**
* OpenMP-enabled copy kernel
* @param dst destination
* @param src source
* @param size in number of bElem
*/
inline void elemcpy(bElem *dst, const bElem *src, unsigned long size) {
#pragma omp simd
for (unsigned long i = 0; i < size; ++i)
dst[i] = src[i];
}
template<unsigned dim>
inline bElem *pack(bElem *arr, BitSet neighbor, bElem *buffer_out, const std::vector<unsigned long> &arrstride,
const std::vector<long> &dimlist, const std::vector<long> &padding, const std::vector<long> &ghost) {
// Inner region
long sec = 0;
long st = 0;
int d = dim - 1;
if (neighbor.get(dim)) {
sec = 1;
st = padding[d] + dimlist[d];
} else if (neighbor.get(-(int) dim)) {
sec = -1;
st = padding[d] + ghost[d];
}
if (sec) {
for (unsigned i = 0; i < ghost[d]; ++i)
buffer_out = pack<dim - 1>(arr + arrstride[d] * (st + i), neighbor, buffer_out,
arrstride, dimlist, padding, ghost);
} else {
for (unsigned i = 0; i < dimlist[d]; ++i)
buffer_out = pack<dim - 1>(arr + arrstride[d] * (padding[d] + ghost[d] + i), neighbor, buffer_out,
arrstride, dimlist, padding, ghost);
}
return buffer_out;
}
template<>
inline bElem *pack<1>(bElem *arr, BitSet neighbor, bElem *buffer_out, const std::vector<unsigned long> &arrstride,
const std::vector<long> &dimlist, const std::vector<long> &padding,
const std::vector<long> &ghost) {
// Inner region
long sec = 0;
long st = 0;
int d = 0;
if (neighbor.get(1)) {
sec = 1;
st = padding[d] + dimlist[d];
} else if (neighbor.get(-1)) {
sec = -1;
st = padding[d] + ghost[d];
}
if (sec != 0) {
elemcpy(buffer_out, arr + st, ghost[d]);
return buffer_out + ghost[d];
} else {
elemcpy(buffer_out, arr + padding[d] + ghost[d], dimlist[d]);
return buffer_out + dimlist[d];
}
}
template<unsigned dim>
inline bElem *unpack(bElem *arr, BitSet neighbor, bElem *buffer_recv, const std::vector<unsigned long> &arrstride,
const std::vector<long> &dimlist, const std::vector<long> &padding,
const std::vector<long> &ghost) {
// Inner region
long sec = 0;
long st = 0;
int d = (int) dim - 1;
if (neighbor.get(dim)) {
sec = 1;
st = padding[d] + dimlist[d] + ghost[d];
} else if (neighbor.get(-(int) dim)) {
sec = -1;
st = padding[d];
}
if (sec) {
for (unsigned i = 0; i < ghost[d]; ++i)
buffer_recv = unpack<dim - 1>(arr + arrstride[d] * (st + i), neighbor, buffer_recv,
arrstride, dimlist, padding, ghost);
} else {
for (unsigned i = 0; i < dimlist[d]; ++i)
buffer_recv = unpack<dim - 1>(arr + arrstride[d] * (padding[d] + ghost[d] + i), neighbor, buffer_recv,
arrstride, dimlist, padding, ghost);
}
return buffer_recv;
}
template<>
inline bElem *unpack<1>(bElem *arr, BitSet neighbor, bElem *buffer_recv, const std::vector<unsigned long> &arrstride,
const std::vector<long> &dimlist, const std::vector<long> &padding,
const std::vector<long> &ghost) {
// Inner region
long sec = 0;
long st = 0;
int d = 0;
if (neighbor.get(1)) {
sec = 1;
st = padding[d] + dimlist[d] + ghost[d];
} else if (neighbor.get(-1)) {
sec = -1;
st = padding[d];
}
if (sec) {
elemcpy(arr + st, buffer_recv, ghost[d]);
return buffer_recv + ghost[d];
} else {
elemcpy(arr + padding[d] + ghost[d], buffer_recv, dimlist[d]);
return buffer_recv + dimlist[d];
}
}
inline unsigned
evalsize(BitSet region, const std::vector<long> &dimlist, const std::vector<long> &ghost, bool inner = true) {
// Inner region
unsigned size = 1;
for (int i = 1; i <= (int) dimlist.size(); ++i)
if (region.get(i) || region.get(-i))
size = size * ghost[i - 1];
else
size = size * (dimlist[i - 1] - (inner ? 2 * ghost[i - 1] : 0));
return size;
}
extern std::vector<bElem *> arr_buffers_out;
extern std::vector<bElem *> arr_buffers_recv;
// ID is used to prevent message mismatch from messages with the same node, low performance only for validation testing.
template<unsigned dim>
void exchangeArr(bElem *arr, const MPI_Comm &comm, std::unordered_map<uint64_t, int> &rank_map,
const std::vector<long> &dimlist, const std::vector<long> &padding, const std::vector<long> &ghost) {
std::vector<BitSet> neighbors;
allneighbors(0, 1, dim, neighbors);
neighbors.erase(neighbors.begin() + (neighbors.size() / 2));
std::vector<unsigned long> tot(neighbors.size());
std::vector<MPI_Request> requests(neighbors.size() * 2);
std::vector<MPI_Status> stats(requests.size());
std::vector<unsigned long> arrstride(dimlist.size());
unsigned long stri = 1;
for (int i = 0; i < arrstride.size(); ++i) {
arrstride[i] = stri;
stri = stri * ((padding[i] + ghost[i]) * 2 + dimlist[i]);
}
for (int i = 0; i < (int) neighbors.size(); ++i) {
tot[i] = (unsigned long) evalsize(neighbors[i], dimlist, ghost, false);
}
if (arr_buffers_out.size() == 0)
for (int i = 0; i < (int) neighbors.size(); ++i) {
arr_buffers_recv.emplace_back((bElem*)aligned_alloc(4096, sizeof(bElem) * tot[i]));
arr_buffers_out.emplace_back((bElem*)aligned_alloc(4096, sizeof(bElem) * tot[i]));
}
double st = omp_get_wtime(), ed;
// Pack
#pragma omp parallel for
for (int i = 0; i < (int) neighbors.size(); ++i)
pack<dim>(arr, neighbors[i], arr_buffers_out[i], arrstride, dimlist, padding, ghost);
ed = omp_get_wtime();
packtime += ed - st;
#ifdef BARRIER_TIMESTEP
MPI_Barrier(comm);
#endif
st = omp_get_wtime();
for (int i = 0; i < (int) neighbors.size(); ++i) {
MPI_Irecv(arr_buffers_recv[i], (int) (tot[i] * sizeof(bElem)), MPI_CHAR, rank_map[neighbors[i].set],
(int) neighbors.size() - i - 1, comm, &(requests[i * 2]));
MPI_Isend(arr_buffers_out[i], (int) (tot[i] * sizeof(bElem)), MPI_CHAR, rank_map[neighbors[i].set], i, comm,
&(requests[i * 2 + 1]));
}
ed = omp_get_wtime();
calltime += ed - st;
st = ed;
// Wait
MPI_Waitall(static_cast<int>(requests.size()), requests.data(), stats.data());
ed = omp_get_wtime();
waittime += ed - st;
st = ed;
// Unpack
#pragma omp parallel for
for (int i = 0; i < (int) neighbors.size(); ++i)
unpack<dim>(arr, neighbors[i], arr_buffers_recv[i], arrstride, dimlist, padding, ghost);
ed = omp_get_wtime();
packtime += ed - st;
}
inline MPI_Datatype pack_type(BitSet neighbor, const std::vector<long> &dimlist, const std::vector<long> &padding,
const std::vector<long> &ghost) {
int ndims = dimlist.size();
std::vector<int> size(ndims), subsize(ndims), start(ndims);
for (long dd = 0; dd < dimlist.size(); ++dd) {
long d = (long)dimlist.size() - dd - 1;
size[dd] = dimlist[d] + 2 * (padding[d] + ghost[d]);
long dim = d + 1;
long sec = 0;
if (neighbor.get(dim)) {
sec = 1;
start[dd] = padding[d] + dimlist[d];
} else if (neighbor.get(-(int) dim)) {
sec = -1;
start[dd] = padding[d] + ghost[d];
}
if (sec) {
subsize[dd] = ghost[d];
} else {
subsize[dd] = dimlist[d];
start[dd] = padding[d] + ghost[d];
}
}
MPI_Datatype ret;
// Subarray is most contiguous dimension first (largest index)
MPI_Type_create_subarray(ndims, size.data(), subsize.data(), start.data(), MPI_ORDER_C, MPI_DOUBLE, &ret);
return ret;
}
inline MPI_Datatype unpack_type(BitSet neighbor, const std::vector<long> &dimlist, const std::vector<long> &padding,
const std::vector<long> &ghost) {
int ndims = dimlist.size();
std::vector<int> size(ndims), subsize(ndims), start(ndims);
for (long dd = 0; dd < dimlist.size(); ++dd) {
long d = (long)dimlist.size() - dd - 1;
size[dd] = dimlist[d] + 2 * (padding[d] + ghost[d]);
long dim = d + 1;
long sec = 0;
if (neighbor.get(dim)) {
sec = 1;
start[dd] = padding[d] + dimlist[d] + ghost[d];
} else if (neighbor.get(-(int) dim)) {
sec = -1;
start[dd] = padding[d];
}
if (sec) {
subsize[dd] = ghost[d];
} else {
subsize[dd] = dimlist[d];
start[dd] = padding[d] + ghost[d];
}
}
MPI_Datatype ret;
// Subarray is most contiguous dimension first (largest index)
MPI_Type_create_subarray(ndims, size.data(), subsize.data(), start.data(), MPI_ORDER_C, MPI_DOUBLE, &ret);
return ret;
}
template<unsigned dim>
void exchangeArrPrepareTypes(std::unordered_map<uint64_t, MPI_Datatype> &stypemap,
std::unordered_map<uint64_t, MPI_Datatype> &rtypemap,
const std::vector<long> &dimlist, const std::vector<long> &padding,
const std::vector<long> &ghost) {
std::vector<BitSet> neighbors;
allneighbors(0, 1, dim, neighbors);
neighbors.erase(neighbors.begin() + (neighbors.size() / 2));
std::vector<MPI_Request> requests(neighbors.size() * 2);
for (auto n: neighbors) {
MPI_Datatype MPI_rtype = unpack_type(n, dimlist, padding, ghost);
MPI_Type_commit(&MPI_rtype);
rtypemap[n.set] = MPI_rtype;
MPI_Datatype MPI_stype = pack_type(n, dimlist, padding, ghost);
MPI_Type_commit(&MPI_stype);
stypemap[n.set] = MPI_stype;
}
}
// Using data types
template<unsigned dim>
void exchangeArrTypes(bElem *arr, const MPI_Comm &comm, std::unordered_map<uint64_t, int> &rank_map,
std::unordered_map<uint64_t, MPI_Datatype> &stypemap,
std::unordered_map<uint64_t, MPI_Datatype> &rtypemap) {
std::vector<BitSet> neighbors;
allneighbors(0, 1, dim, neighbors);
neighbors.erase(neighbors.begin() + (neighbors.size() / 2));
std::vector<MPI_Request> requests(neighbors.size() * 2);
int rank;
MPI_Comm_rank(comm, &rank);
double st = omp_get_wtime(), ed;
for (int i = 0; i < (int) neighbors.size(); ++i) {
MPI_Irecv(arr, 1, rtypemap[neighbors[i].set], rank_map[neighbors[i].set],
(int) neighbors.size() - i - 1, comm, &(requests[i * 2]));
MPI_Isend(arr, 1, stypemap[neighbors[i].set], rank_map[neighbors[i].set], i, comm, &(requests[i * 2 + 1]));
}
ed = omp_get_wtime();
calltime += ed - st;
st = ed;
// Wait
std::vector<MPI_Status> stats(requests.size());
MPI_Waitall(static_cast<int>(requests.size()), requests.data(), stats.data());
ed = omp_get_wtime();
waittime += ed - st;
}
typedef struct {
bElem *arr;
std::unordered_map<uint64_t, int> *rank_map;
std::unordered_map<uint64_t, int> *id_map;
int id;
} ArrExPack;
template<unsigned dim>
void exchangeArrAll(std::vector<ArrExPack> arr, const MPI_Comm &comm,
const std::vector<long> &dimlist, const std::vector<long> &padding,
const std::vector<long> &ghost) {
std::vector<BitSet> neighbors;
allneighbors(0, 1, dim, neighbors);
neighbors.erase(neighbors.begin() + (neighbors.size() / 2));
std::vector<bElem *> buffers_out(arr.size() * neighbors.size(), nullptr);
std::vector<bElem *> buffers_recv(arr.size() * neighbors.size(), nullptr);
std::vector<unsigned long> tot(neighbors.size());
std::vector<MPI_Request> requests(arr.size() * neighbors.size() * 2);
std::vector<unsigned long> arrstride(dimlist.size());
unsigned long stri = 1;
for (int i = 0; i < arrstride.size(); ++i) {
arrstride[i] = stri;
stri = stri * ((padding[i] + ghost[i]) * 2 + dimlist[i]);
}
for (int i = 0; i < (int) neighbors.size(); ++i) {
tot[i] = (unsigned long) evalsize(neighbors[i], dimlist, ghost, false);
for (int s = 0; s < arr.size(); ++s) {
buffers_recv[i + s * neighbors.size()] = new bElem[tot[i]];
buffers_out[i + s * neighbors.size()] = new bElem[tot[i]];
}
}
double st = omp_get_wtime(), ed;
// Pack
#pragma omp parallel for
for (int i = 0; i < (int) neighbors.size(); ++i)
for (int s = 0; s < arr.size(); ++s)
pack<dim>(arr[s].arr, neighbors[i], buffers_out[i + s * neighbors.size()], arrstride, dimlist, padding, ghost);
ed = omp_get_wtime();
packtime += ed - st;
#ifdef BARRIER_TIMESTEP
MPI_Barrier(comm);
#endif
st = omp_get_wtime();
for (int i = 0; i < (int) neighbors.size(); ++i)
for (int s = 0; s < arr.size(); ++s) {
MPI_Irecv(buffers_recv[i + s * neighbors.size()], (int) (tot[i] * sizeof(bElem)), MPI_CHAR,
arr[s].rank_map->at(neighbors[i].set),
arr[s].id_map->at(neighbors[i].set) * 100 + (int) neighbors.size() - i - 1,
comm, &(requests[i * 2 + s * neighbors.size() * 2]));
MPI_Isend(buffers_out[i + s * neighbors.size()], (int) (tot[i] * sizeof(bElem)), MPI_CHAR,
arr[s].rank_map->at(neighbors[i].set), arr[s].id * 100 + i, comm,
&(requests[i * 2 + s * neighbors.size() * 2 + 1]));
}
ed = omp_get_wtime();
calltime += ed - st;
st = ed;
// Wait
std::vector<MPI_Status> stats(requests.size());
MPI_Waitall(static_cast<int>(requests.size()), requests.data(), stats.data());
ed = omp_get_wtime();
waittime += ed - st;
st = ed;
// Unpack
#pragma omp parallel for
for (int i = 0; i < (int) neighbors.size(); ++i)
for (int s = 0; s < arr.size(); ++s)
unpack<dim>(arr[s].arr, neighbors[i], buffers_recv[i + s * neighbors.size()], arrstride, dimlist, padding, ghost);
ed = omp_get_wtime();
packtime += ed - st;
// Cleanup
for (auto b: buffers_out)
delete[] b;
for (auto b: buffers_recv)
delete[] b;
}
#endif //BRICK_ARRAY_MPI_H
|
gradient_operators.c | /*
This source file is part of the Geophysical Fluids Modeling Framework (GAME), which is released under the MIT license.
Github repository: https://github.com/OpenNWP/GAME
*/
/*
This file contains the gradient operators.
*/
#include <stdio.h>
#include "../game_types.h"
#include "spatial_operators.h"
int grad_hor_cov(Scalar_field in_field, Vector_field out_field, Grid *grid)
{
/*
calculates the horizontal covariant gradient
*/
int vector_index;
#pragma omp parallel for private(vector_index)
for (int h_index = 0; h_index < NO_OF_VECTORS_H; ++h_index)
{
for (int layer_index = 0; layer_index < NO_OF_LAYERS; ++layer_index)
{
vector_index = NO_OF_SCALARS_H + layer_index*NO_OF_VECTORS_PER_LAYER + h_index;
out_field[vector_index]
= (in_field[grid -> to_index[h_index] + layer_index*NO_OF_SCALARS_H]
- in_field[grid -> from_index[h_index] + layer_index*NO_OF_SCALARS_H])
/grid -> normal_distance[vector_index];
}
}
return 0;
}
int grad_vert_cov(Scalar_field in_field, Vector_field out_field, Grid *grid)
{
/*
calculates the vertical covariant gradient
*/
int layer_index, h_index, lower_index, upper_index, vector_index;
// loop over the inner grid points
#pragma omp parallel for private(layer_index, h_index, lower_index, upper_index, vector_index)
for (int i = NO_OF_SCALARS_H; i < NO_OF_V_VECTORS - NO_OF_SCALARS_H; ++i)
{
layer_index = i/NO_OF_SCALARS_H;
h_index = i - layer_index*NO_OF_SCALARS_H;
lower_index = h_index + layer_index*NO_OF_SCALARS_H;
upper_index = h_index + (layer_index - 1)*NO_OF_SCALARS_H;
vector_index = h_index + layer_index*NO_OF_VECTORS_PER_LAYER;
out_field[vector_index]
= (in_field[upper_index] - in_field[lower_index])/grid -> normal_distance[vector_index];
}
return 0;
}
int grad_cov(Scalar_field in_field, Vector_field out_field, Grid *grid)
{
/*
calculates the covariant gradient
*/
grad_hor_cov(in_field, out_field, grid);
grad_vert_cov(in_field, out_field, grid);
return 0;
}
int grad(Scalar_field in_field, Vector_field out_field, Grid *grid)
{
/*
calculates the gradient (horizontally contravariant, vertically covariant)
*/
grad_cov(in_field, out_field, grid);
vector_field_hor_cov_to_con(out_field, grid);
return 0;
}
int grad_hor(Scalar_field in_field, Vector_field out_field, Grid *grid)
{
/*
This function calculates the horizontal contravariant gradient.
*/
grad(in_field, out_field, grid);
int layer_index, h_index;
#pragma omp parallel for private(layer_index, h_index)
for (int i = 0; i < NO_OF_V_VECTORS; ++i)
{
layer_index = i/NO_OF_SCALARS_H;
h_index = i - layer_index*NO_OF_SCALARS_H;
out_field[h_index + layer_index*NO_OF_VECTORS_PER_LAYER] = 0;
}
return 0;
}
|
util_test.h | /*******************************************************************************
* Copyright 2018 Tensor Tang. All Rights Reserved
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*******************************************************************************/
/**
* This file defines some utilities that used in gtest
*/
#pragma once
#include <cmath>
#include "gtest/gtest.h"
#include "jitinfer.h"
#include "omp_thread.h"
#include "util.h"
#include "util_jitinfer.h"
namespace jitinfer {
namespace util {
template <typename data_t>
static inline data_t set_value(size_t index) {
using data_type = jitinfer::memory::dtype;
if (type2dtype<data_t>::dtype == data_type::f32) {
double mean = 1., deviation = 1e-2;
return static_cast<data_t>(mean + deviation * sinf(float(index % 37)));
} else if (one_of(type2dtype<data_t>::dtype, data_type::s8, data_type::s32)) {
return data_t(rand() % 21 - 10);
} else if (type2dtype<data_t>::dtype == data_type::u8) {
return data_t(rand() % 17);
} else {
return data_t(0);
}
}
template <typename T>
void fill_data(T* p, size_t sz) {
#pragma omp parallel for schedule(static)
for (size_t i = 0; i < sz; i++) {
p[i] = set_value<T>(i);
}
}
template <typename data_t>
static inline data_t set_value(size_t index, data_t mmin, data_t mmax) {
using data_type = jitinfer::memory::dtype;
if (type2dtype<data_t>::dtype == data_type::f32) {
return static_cast<data_t>(mmin +
(mmax - mmin) * (float)(rand() % 100) / 100.f);
} else if (one_of(type2dtype<data_t>::dtype,
data_type::s8,
data_type::u8,
data_type::s32)) {
return data_t(mmin + rand() % (s32)(mmax - mmin));
} else {
return data_t(0);
}
}
template <typename T>
void fill_data(T* p, size_t sz, T a, T b) {
#pragma omp parallel for schedule(static)
for (size_t i = 0; i < sz; i++) {
p[i] = set_value<T>(i, std::min(a, b), std::max(a, b));
}
}
template <typename T>
void compare_array(T* dst, T* ref, size_t sz) {
#pragma omp parallel for schedule(static)
for (size_t i = 0; i < sz; ++i) {
if (std::is_same<T, f32>::value) {
f32 diff = dst[i] - ref[i];
f32 e = (std::fabs(ref[i]) > (f32)1e-4) ? diff / ref[i] : diff;
EXPECT_NEAR(e, 0.f, (f32)1e-4) << "Index: " << i << " Total: " << sz;
} else {
EXPECT_EQ(dst[i], ref[i]) << "Index: " << i << " Total: " << sz;
}
}
}
}
}
|
common.h | #ifndef LIGHTGBM_UTILS_COMMON_FUN_H_
#define LIGHTGBM_UTILS_COMMON_FUN_H_
#include <LightGBM/utils/log.h>
#include <LightGBM/utils/openmp_wrapper.h>
#include <cstdio>
#include <string>
#include <vector>
#include <sstream>
#include <cstdint>
#include <algorithm>
#include <cmath>
#include <functional>
#include <memory>
#include <iterator>
#include <type_traits>
#include <iomanip>
#ifdef _MSC_VER
#include "intrin.h"
#endif
namespace LightGBM {
namespace Common {
inline static char tolower(char in) {
if (in <= 'Z' && in >= 'A')
return in - ('Z' - 'z');
return in;
}
inline static std::string Trim(std::string str) {
if (str.empty()) {
return str;
}
str.erase(str.find_last_not_of(" \f\n\r\t\v") + 1);
str.erase(0, str.find_first_not_of(" \f\n\r\t\v"));
return str;
}
inline static std::string RemoveQuotationSymbol(std::string str) {
if (str.empty()) {
return str;
}
str.erase(str.find_last_not_of("'\"") + 1);
str.erase(0, str.find_first_not_of("'\""));
return str;
}
inline static bool StartsWith(const std::string& str, const std::string prefix) {
if (str.substr(0, prefix.size()) == prefix) {
return true;
} else {
return false;
}
}
inline static std::vector<std::string> Split(const char* c_str, char delimiter) {
std::vector<std::string> ret;
std::string str(c_str);
size_t i = 0;
size_t pos = 0;
while (pos < str.length()) {
if (str[pos] == delimiter) {
if (i < pos) {
ret.push_back(str.substr(i, pos - i));
}
++pos;
i = pos;
} else {
++pos;
}
}
if (i < pos) {
ret.push_back(str.substr(i));
}
return ret;
}
inline static std::vector<std::string> SplitLines(const char* c_str) {
std::vector<std::string> ret;
std::string str(c_str);
size_t i = 0;
size_t pos = 0;
while (pos < str.length()) {
if (str[pos] == '\n' || str[pos] == '\r') {
if (i < pos) {
ret.push_back(str.substr(i, pos - i));
}
// skip the line endings
while (str[pos] == '\n' || str[pos] == '\r') ++pos;
// new begin
i = pos;
} else {
++pos;
}
}
if (i < pos) {
ret.push_back(str.substr(i));
}
return ret;
}
inline static std::vector<std::string> Split(const char* c_str, const char* delimiters) {
std::vector<std::string> ret;
std::string str(c_str);
size_t i = 0;
size_t pos = 0;
while (pos < str.length()) {
bool met_delimiters = false;
for (int j = 0; delimiters[j] != '\0'; ++j) {
if (str[pos] == delimiters[j]) {
met_delimiters = true;
break;
}
}
if (met_delimiters) {
if (i < pos) {
ret.push_back(str.substr(i, pos - i));
}
++pos;
i = pos;
} else {
++pos;
}
}
if (i < pos) {
ret.push_back(str.substr(i));
}
return ret;
}
template<typename T>
inline static const char* Atoi(const char* p, T* out) {
int sign;
T value;
while (*p == ' ') {
++p;
}
sign = 1;
if (*p == '-') {
sign = -1;
++p;
} else if (*p == '+') {
++p;
}
for (value = 0; *p >= '0' && *p <= '9'; ++p) {
value = value * 10 + (*p - '0');
}
*out = static_cast<T>(sign * value);
while (*p == ' ') {
++p;
}
return p;
}
template<typename T>
inline static double Pow(T base, int power) {
if (power < 0) {
return 1.0 / Pow(base, -power);
} else if (power == 0) {
return 1;
} else if (power % 2 == 0) {
return Pow(base*base, power / 2);
} else if (power % 3 == 0) {
return Pow(base*base*base, power / 3);
} else {
return base * Pow(base, power - 1);
}
}
inline static const char* Atof(const char* p, double* out) {
int frac;
double sign, value, scale;
*out = NAN;
// Skip leading white space, if any.
while (*p == ' ') {
++p;
}
// Get sign, if any.
sign = 1.0;
if (*p == '-') {
sign = -1.0;
++p;
} else if (*p == '+') {
++p;
}
// is a number
if ((*p >= '0' && *p <= '9') || *p == '.' || *p == 'e' || *p == 'E') {
// Get digits before decimal point or exponent, if any.
for (value = 0.0; *p >= '0' && *p <= '9'; ++p) {
value = value * 10.0 + (*p - '0');
}
// Get digits after decimal point, if any.
if (*p == '.') {
double right = 0.0;
int nn = 0;
++p;
while (*p >= '0' && *p <= '9') {
right = (*p - '0') + right * 10.0;
++nn;
++p;
}
value += right / Pow(10.0, nn);
}
// Handle exponent, if any.
frac = 0;
scale = 1.0;
if ((*p == 'e') || (*p == 'E')) {
uint32_t expon;
// Get sign of exponent, if any.
++p;
if (*p == '-') {
frac = 1;
++p;
} else if (*p == '+') {
++p;
}
// Get digits of exponent, if any.
for (expon = 0; *p >= '0' && *p <= '9'; ++p) {
expon = expon * 10 + (*p - '0');
}
if (expon > 308) expon = 308;
// Calculate scaling factor.
while (expon >= 50) { scale *= 1E50; expon -= 50; }
while (expon >= 8) { scale *= 1E8; expon -= 8; }
while (expon > 0) { scale *= 10.0; expon -= 1; }
}
// Return signed and scaled floating point result.
*out = sign * (frac ? (value / scale) : (value * scale));
} else {
size_t cnt = 0;
while (*(p + cnt) != '\0' && *(p + cnt) != ' '
&& *(p + cnt) != '\t' && *(p + cnt) != ','
&& *(p + cnt) != '\n' && *(p + cnt) != '\r'
&& *(p + cnt) != ':') {
++cnt;
}
if (cnt > 0) {
std::string tmp_str(p, cnt);
std::transform(tmp_str.begin(), tmp_str.end(), tmp_str.begin(), Common::tolower);
if (tmp_str == std::string("na") || tmp_str == std::string("nan") ||
tmp_str == std::string("null")) {
*out = NAN;
} else if (tmp_str == std::string("inf") || tmp_str == std::string("infinity")) {
*out = sign * 1e308;
} else {
Log::Fatal("Unknown token %s in data file", tmp_str.c_str());
}
p += cnt;
}
}
while (*p == ' ') {
++p;
}
return p;
}
inline static bool AtoiAndCheck(const char* p, int* out) {
const char* after = Atoi(p, out);
if (*after != '\0') {
return false;
}
return true;
}
inline static bool AtofAndCheck(const char* p, double* out) {
const char* after = Atof(p, out);
if (*after != '\0') {
return false;
}
return true;
}
inline static unsigned CountDecimalDigit32(uint32_t n) {
#if defined(_MSC_VER) || defined(__GNUC__)
static const uint32_t powers_of_10[] = {
0,
10,
100,
1000,
10000,
100000,
1000000,
10000000,
100000000,
1000000000
};
#ifdef _MSC_VER
unsigned long i = 0;
_BitScanReverse(&i, n | 1);
uint32_t t = (i + 1) * 1233 >> 12;
#elif __GNUC__
uint32_t t = (32 - __builtin_clz(n | 1)) * 1233 >> 12;
#endif
return t - (n < powers_of_10[t]) + 1;
#else
if (n < 10) return 1;
if (n < 100) return 2;
if (n < 1000) return 3;
if (n < 10000) return 4;
if (n < 100000) return 5;
if (n < 1000000) return 6;
if (n < 10000000) return 7;
if (n < 100000000) return 8;
if (n < 1000000000) return 9;
return 10;
#endif
}
inline static void Uint32ToStr(uint32_t value, char* buffer) {
const char kDigitsLut[200] = {
'0', '0', '0', '1', '0', '2', '0', '3', '0', '4', '0', '5', '0', '6', '0', '7', '0', '8', '0', '9',
'1', '0', '1', '1', '1', '2', '1', '3', '1', '4', '1', '5', '1', '6', '1', '7', '1', '8', '1', '9',
'2', '0', '2', '1', '2', '2', '2', '3', '2', '4', '2', '5', '2', '6', '2', '7', '2', '8', '2', '9',
'3', '0', '3', '1', '3', '2', '3', '3', '3', '4', '3', '5', '3', '6', '3', '7', '3', '8', '3', '9',
'4', '0', '4', '1', '4', '2', '4', '3', '4', '4', '4', '5', '4', '6', '4', '7', '4', '8', '4', '9',
'5', '0', '5', '1', '5', '2', '5', '3', '5', '4', '5', '5', '5', '6', '5', '7', '5', '8', '5', '9',
'6', '0', '6', '1', '6', '2', '6', '3', '6', '4', '6', '5', '6', '6', '6', '7', '6', '8', '6', '9',
'7', '0', '7', '1', '7', '2', '7', '3', '7', '4', '7', '5', '7', '6', '7', '7', '7', '8', '7', '9',
'8', '0', '8', '1', '8', '2', '8', '3', '8', '4', '8', '5', '8', '6', '8', '7', '8', '8', '8', '9',
'9', '0', '9', '1', '9', '2', '9', '3', '9', '4', '9', '5', '9', '6', '9', '7', '9', '8', '9', '9'
};
unsigned digit = CountDecimalDigit32(value);
buffer += digit;
*buffer = '\0';
while (value >= 100) {
const unsigned i = (value % 100) << 1;
value /= 100;
*--buffer = kDigitsLut[i + 1];
*--buffer = kDigitsLut[i];
}
if (value < 10) {
*--buffer = static_cast<char>(value) + '0';
} else {
const unsigned i = value << 1;
*--buffer = kDigitsLut[i + 1];
*--buffer = kDigitsLut[i];
}
}
inline static void Int32ToStr(int32_t value, char* buffer) {
uint32_t u = static_cast<uint32_t>(value);
if (value < 0) {
*buffer++ = '-';
u = ~u + 1;
}
Uint32ToStr(u, buffer);
}
inline static void DoubleToStr(double value, char* buffer, size_t
#ifdef _MSC_VER
buffer_len
#endif
) {
#ifdef _MSC_VER
sprintf_s(buffer, buffer_len, "%.17g", value);
#else
sprintf(buffer, "%.17g", value);
#endif
}
inline static const char* SkipSpaceAndTab(const char* p) {
while (*p == ' ' || *p == '\t') {
++p;
}
return p;
}
inline static const char* SkipReturn(const char* p) {
while (*p == '\n' || *p == '\r' || *p == ' ') {
++p;
}
return p;
}
template<typename T, typename T2>
inline static std::vector<T2> ArrayCast(const std::vector<T>& arr) {
std::vector<T2> ret(arr.size());
for (size_t i = 0; i < arr.size(); ++i) {
ret[i] = static_cast<T2>(arr[i]);
}
return ret;
}
template<typename T, bool is_float, bool is_unsign>
struct __TToStringHelperFast {
void operator()(T value, char* buffer, size_t) const {
Int32ToStr(value, buffer);
}
};
template<typename T>
struct __TToStringHelperFast<T, true, false> {
void operator()(T value, char* buffer, size_t
#ifdef _MSC_VER
buf_len
#endif
) const {
#ifdef _MSC_VER
sprintf_s(buffer, buf_len, "%g", value);
#else
sprintf(buffer, "%g", value);
#endif
}
};
template<typename T>
struct __TToStringHelperFast<T, false, true> {
void operator()(T value, char* buffer, size_t) const {
Uint32ToStr(value, buffer);
}
};
template<typename T>
inline static std::string ArrayToStringFast(const std::vector<T>& arr, size_t n) {
if (arr.empty() || n == 0) {
return std::string("");
}
__TToStringHelperFast<T, std::is_floating_point<T>::value, std::is_unsigned<T>::value> helper;
const size_t buf_len = 16;
std::vector<char> buffer(buf_len);
std::stringstream str_buf;
helper(arr[0], buffer.data(), buf_len);
str_buf << buffer.data();
for (size_t i = 1; i < std::min(n, arr.size()); ++i) {
helper(arr[i], buffer.data(), buf_len);
str_buf << ' ' << buffer.data();
}
return str_buf.str();
}
inline static std::string ArrayToString(const std::vector<double>& arr, size_t n) {
if (arr.empty() || n == 0) {
return std::string("");
}
const size_t buf_len = 32;
std::vector<char> buffer(buf_len);
std::stringstream str_buf;
DoubleToStr(arr[0], buffer.data(), buf_len);
str_buf << buffer.data();
for (size_t i = 1; i < std::min(n, arr.size()); ++i) {
DoubleToStr(arr[i], buffer.data(), buf_len);
str_buf << ' ' << buffer.data();
}
return str_buf.str();
}
template<typename T, bool is_float>
struct __StringToTHelper {
T operator()(const std::string& str) const {
T ret = 0;
Atoi(str.c_str(), &ret);
return ret;
}
};
template<typename T>
struct __StringToTHelper<T, true> {
T operator()(const std::string& str) const {
return static_cast<T>(std::stod(str));
}
};
template<typename T>
inline static std::vector<T> StringToArray(const std::string& str, char delimiter) {
std::vector<std::string> strs = Split(str.c_str(), delimiter);
std::vector<T> ret;
ret.reserve(strs.size());
__StringToTHelper<T, std::is_floating_point<T>::value> helper;
for (const auto& s : strs) {
ret.push_back(helper(s));
}
return ret;
}
template<typename T>
inline static std::vector<T> StringToArray(const std::string& str, int n) {
if (n == 0) {
return std::vector<T>();
}
std::vector<std::string> strs = Split(str.c_str(), ' ');
CHECK(strs.size() == static_cast<size_t>(n));
std::vector<T> ret;
ret.reserve(strs.size());
__StringToTHelper<T, std::is_floating_point<T>::value> helper;
for (const auto& s : strs) {
ret.push_back(helper(s));
}
return ret;
}
template<typename T, bool is_float>
struct __StringToTHelperFast {
const char* operator()(const char*p, T* out) const {
return Atoi(p, out);
}
};
template<typename T>
struct __StringToTHelperFast<T, true> {
const char* operator()(const char*p, T* out) const {
double tmp = 0.0f;
auto ret = Atof(p, &tmp);
*out = static_cast<T>(tmp);
return ret;
}
};
template<typename T>
inline static std::vector<T> StringToArrayFast(const std::string& str, int n) {
if (n == 0) {
return std::vector<T>();
}
auto p_str = str.c_str();
__StringToTHelperFast<T, std::is_floating_point<T>::value> helper;
std::vector<T> ret(n);
for (int i = 0; i < n; ++i) {
p_str = helper(p_str, &ret[i]);
}
return ret;
}
template<typename T>
inline static std::string Join(const std::vector<T>& strs, const char* delimiter) {
if (strs.empty()) {
return std::string("");
}
std::stringstream str_buf;
str_buf << std::setprecision(std::numeric_limits<double>::digits10 + 2);
str_buf << strs[0];
for (size_t i = 1; i < strs.size(); ++i) {
str_buf << delimiter;
str_buf << strs[i];
}
return str_buf.str();
}
template<typename T>
inline static std::string Join(const std::vector<T>& strs, size_t start, size_t end, const char* delimiter) {
if (end - start <= 0) {
return std::string("");
}
start = std::min(start, static_cast<size_t>(strs.size()) - 1);
end = std::min(end, static_cast<size_t>(strs.size()));
std::stringstream str_buf;
str_buf << std::setprecision(std::numeric_limits<double>::digits10 + 2);
str_buf << strs[start];
for (size_t i = start + 1; i < end; ++i) {
str_buf << delimiter;
str_buf << strs[i];
}
return str_buf.str();
}
inline static int64_t Pow2RoundUp(int64_t x) {
int64_t t = 1;
for (int i = 0; i < 64; ++i) {
if (t >= x) {
return t;
}
t <<= 1;
}
return 0;
}
/*!
* \brief Do inplace softmax transformaton on p_rec
* \param p_rec The input/output vector of the values.
*/
inline static void Softmax(std::vector<double>* p_rec) {
std::vector<double> &rec = *p_rec;
double wmax = rec[0];
for (size_t i = 1; i < rec.size(); ++i) {
wmax = std::max(rec[i], wmax);
}
double wsum = 0.0f;
for (size_t i = 0; i < rec.size(); ++i) {
rec[i] = std::exp(rec[i] - wmax);
wsum += rec[i];
}
for (size_t i = 0; i < rec.size(); ++i) {
rec[i] /= static_cast<double>(wsum);
}
}
inline static void Softmax(const double* input, double* output, int len) {
double wmax = input[0];
for (int i = 1; i < len; ++i) {
wmax = std::max(input[i], wmax);
}
double wsum = 0.0f;
for (int i = 0; i < len; ++i) {
output[i] = std::exp(input[i] - wmax);
wsum += output[i];
}
for (int i = 0; i < len; ++i) {
output[i] /= static_cast<double>(wsum);
}
}
template<typename T>
std::vector<const T*> ConstPtrInVectorWrapper(const std::vector<std::unique_ptr<T>>& input) {
std::vector<const T*> ret;
for (size_t i = 0; i < input.size(); ++i) {
ret.push_back(input.at(i).get());
}
return ret;
}
template<typename T1, typename T2>
inline static void SortForPair(std::vector<T1>& keys, std::vector<T2>& values, size_t start, bool is_reverse = false) {
std::vector<std::pair<T1, T2>> arr;
for (size_t i = start; i < keys.size(); ++i) {
arr.emplace_back(keys[i], values[i]);
}
if (!is_reverse) {
std::stable_sort(arr.begin(), arr.end(), [](const std::pair<T1, T2>& a, const std::pair<T1, T2>& b) {
return a.first < b.first;
});
} else {
std::stable_sort(arr.begin(), arr.end(), [](const std::pair<T1, T2>& a, const std::pair<T1, T2>& b) {
return a.first > b.first;
});
}
for (size_t i = start; i < arr.size(); ++i) {
keys[i] = arr[i].first;
values[i] = arr[i].second;
}
}
template <typename T>
inline static std::vector<T*> Vector2Ptr(std::vector<std::vector<T>>& data) {
std::vector<T*> ptr(data.size());
for (size_t i = 0; i < data.size(); ++i) {
ptr[i] = data[i].data();
}
return ptr;
}
template <typename T>
inline static std::vector<int> VectorSize(const std::vector<std::vector<T>>& data) {
std::vector<int> ret(data.size());
for (size_t i = 0; i < data.size(); ++i) {
ret[i] = static_cast<int>(data[i].size());
}
return ret;
}
inline static double AvoidInf(double x) {
if (x >= 1e300) {
return 1e300;
} else if (x <= -1e300) {
return -1e300;
} else {
return x;
}
}
inline static float AvoidInf(float x) {
if (x >= 1e38) {
return 1e38f;
} else if (x <= -1e38) {
return -1e38f;
} else {
return x;
}
}
template<typename _Iter> inline
static typename std::iterator_traits<_Iter>::value_type* IteratorValType(_Iter) {
return (0);
}
template<typename _RanIt, typename _Pr, typename _VTRanIt> inline
static void ParallelSort(_RanIt _First, _RanIt _Last, _Pr _Pred, _VTRanIt*) {
size_t len = _Last - _First;
const size_t kMinInnerLen = 1024;
int num_threads = 1;
#pragma omp parallel
#pragma omp master
{
num_threads = omp_get_num_threads();
}
if (len <= kMinInnerLen || num_threads <= 1) {
std::sort(_First, _Last, _Pred);
return;
}
size_t inner_size = (len + num_threads - 1) / num_threads;
inner_size = std::max(inner_size, kMinInnerLen);
num_threads = static_cast<int>((len + inner_size - 1) / inner_size);
#pragma omp parallel for schedule(static, 1)
for (int i = 0; i < num_threads; ++i) {
size_t left = inner_size*i;
size_t right = left + inner_size;
right = std::min(right, len);
if (right > left) {
std::sort(_First + left, _First + right, _Pred);
}
}
// Buffer for merge.
std::vector<_VTRanIt> temp_buf(len);
_RanIt buf = temp_buf.begin();
size_t s = inner_size;
// Recursive merge
while (s < len) {
int loop_size = static_cast<int>((len + s * 2 - 1) / (s * 2));
#pragma omp parallel for schedule(static, 1)
for (int i = 0; i < loop_size; ++i) {
size_t left = i * 2 * s;
size_t mid = left + s;
size_t right = mid + s;
right = std::min(len, right);
if (mid >= right) { continue; }
std::copy(_First + left, _First + mid, buf + left);
std::merge(buf + left, buf + mid, _First + mid, _First + right, _First + left, _Pred);
}
s *= 2;
}
}
template<typename _RanIt, typename _Pr> inline
static void ParallelSort(_RanIt _First, _RanIt _Last, _Pr _Pred) {
return ParallelSort(_First, _Last, _Pred, IteratorValType(_First));
}
// Check that all y[] are in interval [ymin, ymax] (end points included); throws error if not
template <typename T>
inline static void CheckElementsIntervalClosed(const T *y, T ymin, T ymax, int ny, const char *callername) {
auto fatal_msg = [&y, &ymin, &ymax, &callername](int i) {
std::ostringstream os;
os << "[%s]: does not tolerate element [#%i = " << y[i] << "] outside [" << ymin << ", " << ymax << "]";
Log::Fatal(os.str().c_str(), callername, i);
};
for (int i = 1; i < ny; i += 2) {
if (y[i - 1] < y[i]) {
if (y[i - 1] < ymin) {
fatal_msg(i - 1);
} else if (y[i] > ymax) {
fatal_msg(i);
}
} else {
if (y[i - 1] > ymax) {
fatal_msg(i - 1);
} else if (y[i] < ymin) {
fatal_msg(i);
}
}
}
if (ny & 1) { // odd
if (y[ny - 1] < ymin || y[ny - 1] > ymax) {
fatal_msg(ny - 1);
}
}
}
// One-pass scan over array w with nw elements: find min, max and sum of elements;
// this is useful for checking weight requirements.
template <typename T1, typename T2>
inline static void ObtainMinMaxSum(const T1 *w, int nw, T1 *mi, T1 *ma, T2 *su) {
T1 minw;
T1 maxw;
T1 sumw;
int i;
if (nw & 1) { // odd
minw = w[0];
maxw = w[0];
sumw = w[0];
i = 2;
} else { // even
if (w[0] < w[1]) {
minw = w[0];
maxw = w[1];
} else {
minw = w[1];
maxw = w[0];
}
sumw = w[0] + w[1];
i = 3;
}
for (; i < nw; i += 2) {
if (w[i - 1] < w[i]) {
minw = std::min(minw, w[i - 1]);
maxw = std::max(maxw, w[i]);
} else {
minw = std::min(minw, w[i]);
maxw = std::max(maxw, w[i - 1]);
}
sumw += w[i - 1] + w[i];
}
if (mi != nullptr) {
*mi = minw;
}
if (ma != nullptr) {
*ma = maxw;
}
if (su != nullptr) {
*su = static_cast<T2>(sumw);
}
}
inline static std::vector<uint32_t> EmptyBitset(int n){
int size = n / 32;
if(n % 32 != 0) size++;
return std::vector<uint32_t>(size);
}
template<typename T>
inline static void InsertBitset(std::vector<uint32_t>& vec, const T val){
int i1 = val / 32;
int i2 = val % 32;
if (static_cast<int>(vec.size()) < i1 + 1) {
vec.resize(i1 + 1, 0);
}
vec[i1] |= (1 << i2);
}
template<typename T>
inline static std::vector<uint32_t> ConstructBitset(const T* vals, int n) {
std::vector<uint32_t> ret;
for (int i = 0; i < n; ++i) {
int i1 = vals[i] / 32;
int i2 = vals[i] % 32;
if (static_cast<int>(ret.size()) < i1 + 1) {
ret.resize(i1 + 1, 0);
}
ret[i1] |= (1 << i2);
}
return ret;
}
template<typename T>
inline static bool FindInBitset(const uint32_t* bits, int n, T pos) {
int i1 = pos / 32;
if (i1 >= n) {
return false;
}
int i2 = pos % 32;
return (bits[i1] >> i2) & 1;
}
inline static bool CheckDoubleEqualOrdered(double a, double b) {
double upper = std::nextafter(a, INFINITY);
return b <= upper;
}
inline static double GetDoubleUpperBound(double a) {
return std::nextafter(a, INFINITY);;
}
inline static size_t GetLine(const char* str) {
auto start = str;
while (*str != '\0' && *str != '\n' && *str != '\r') {
++str;
}
return str - start;
}
inline static const char* SkipNewLine(const char* str) {
if (*str == '\r') {
++str;
}
if (*str == '\n') {
++str;
}
return str;
}
template <typename T>
static int Sign(T x) {
return (x > T(0)) - (x < T(0));
}
template <typename T>
static T SafeLog(T x) {
if (x > 0) {
return std::log(x);
} else {
return -INFINITY;
}
}
} // namespace Common
} // namespace LightGBM
#endif // LightGBM_UTILS_COMMON_FUN_H_
|
rawKeccak_256_fmt_plug.c | /* Keccak-256 cracker patch for JtR. Hacked together during May of 2013
* by Dhiru Kholia <dhiru.kholia at gmail.com>.
*
* Usage: john --format:raw-keccak-256 <hash file>
*
* This file is part of John the Ripper password cracker,
* Copyright (c) 2012 by Solar Designer
*
* based on rawMD4_fmt.c code, with trivial changes by groszek.
*/
#if FMT_EXTERNS_H
extern struct fmt_main fmt_rawKeccak_256;
#elif FMT_REGISTERS_H
john_register_one(&fmt_rawKeccak_256);
#else
#include <string.h>
#include "arch.h"
#include "params.h"
#include "common.h"
#include "formats.h"
#include "options.h"
#include "KeccakHash.h"
#ifdef _OPENMP
#ifndef OMP_SCALE
#define OMP_SCALE 2048
#endif
#include <omp.h>
#endif
#include "memdbg.h"
#define FORMAT_TAG "$keccak256$"
#define TAG_LENGTH 11
#define FORMAT_LABEL "Raw-Keccak-256"
#define FORMAT_NAME ""
#define ALGORITHM_NAME "32/" ARCH_BITS_STR
#define BENCHMARK_COMMENT ""
#define BENCHMARK_LENGTH -1
#define PLAINTEXT_LENGTH 125
#define CIPHERTEXT_LENGTH 64
#define BINARY_SIZE 32
#define SALT_SIZE 0
#define BINARY_ALIGN 4
#define SALT_ALIGN 1
#define MIN_KEYS_PER_CRYPT 1
#define MAX_KEYS_PER_CRYPT 1
static struct fmt_tests tests[] = {
{"4e03657aea45a94fc7d47ba826c8d667c0d1e6e33a64a036ec44f58fa12d6c45", "abc"},
{"$keccak256$4e03657aea45a94fc7d47ba826c8d667c0d1e6e33a64a036ec44f58fa12d6c45", "abc"},
{"$keccak256$3b673b24a64aebb286f193e5c985c8e528db8590f997d9130889ca7f5f4cfe6e", "passWOrd"},
{"$keccak256$2a359feeb8e488a1af2c03b908b3ed7990400555db73e1421181d97cac004d48", "123456789"},
{"$keccak256$c5d2460186f7233c927e7db2dcc703c0e500b653ca82273b7bfad8045d85a470", ""},
{NULL}
};
static int (*saved_len);
// the Keccak function can read up to next even 8 byte offset.
// making the buffer larger avoid reading past end of buffer
static char (*saved_key)[(((PLAINTEXT_LENGTH+1)+7)/8)*8];
static ARCH_WORD_32 (*crypt_out)
[(BINARY_SIZE + sizeof(ARCH_WORD_32) - 1) / sizeof(ARCH_WORD_32)];
static void init(struct fmt_main *self)
{
#ifdef _OPENMP
int omp_t;
omp_t = omp_get_max_threads();
self->params.min_keys_per_crypt *= omp_t;
omp_t *= OMP_SCALE;
self->params.max_keys_per_crypt *= omp_t;
#endif
saved_len = mem_calloc(self->params.max_keys_per_crypt,
sizeof(*saved_len));
saved_key = mem_calloc(self->params.max_keys_per_crypt,
sizeof(*saved_key));
crypt_out = mem_calloc(self->params.max_keys_per_crypt,
sizeof(*crypt_out));
}
static void done(void)
{
MEM_FREE(crypt_out);
MEM_FREE(saved_key);
MEM_FREE(saved_len);
}
static int valid(char *ciphertext, struct fmt_main *self)
{
char *p, *q;
p = ciphertext;
if (!strncmp(p, FORMAT_TAG, TAG_LENGTH))
p += TAG_LENGTH;
q = p;
while (atoi16[ARCH_INDEX(*q)] != 0x7F)
q++;
return !*q && q - p == CIPHERTEXT_LENGTH;
}
static char *split(char *ciphertext, int index, struct fmt_main *pFmt)
{
static char out[TAG_LENGTH + CIPHERTEXT_LENGTH + 1];
if (!strncmp(ciphertext, FORMAT_TAG, TAG_LENGTH))
ciphertext += TAG_LENGTH;
memcpy(out, FORMAT_TAG, TAG_LENGTH);
memcpy(out + TAG_LENGTH, ciphertext, CIPHERTEXT_LENGTH + 1);
strlwr(out + TAG_LENGTH);
return out;
}
static void *get_binary(char *ciphertext)
{
static unsigned char *out;
char *p;
int i;
if (!out) out = mem_alloc_tiny(BINARY_SIZE, MEM_ALIGN_WORD);
p = ciphertext + TAG_LENGTH;
for (i = 0; i < BINARY_SIZE; i++) {
out[i] = (atoi16[ARCH_INDEX(*p)] << 4) | atoi16[ARCH_INDEX(p[1])];
p += 2;
}
return out;
}
static int get_hash_0(int index)
{
return crypt_out[index][0] & PH_MASK_0;
}
static int get_hash_1(int index)
{
return crypt_out[index][0] & PH_MASK_1;
}
static int get_hash_2(int index)
{
return crypt_out[index][0] & PH_MASK_2;
}
static int get_hash_3(int index)
{
return crypt_out[index][0] & PH_MASK_3;
}
static int get_hash_4(int index)
{
return crypt_out[index][0] & PH_MASK_4;
}
static int get_hash_5(int index)
{
return crypt_out[index][0] & PH_MASK_5;
}
static int get_hash_6(int index)
{
return crypt_out[index][0] & PH_MASK_6;
}
static void set_key(char *key, int index)
{
int len = strlen(key);
saved_len[index] = len;
if (len > PLAINTEXT_LENGTH)
len = saved_len[index] = PLAINTEXT_LENGTH;
memcpy(saved_key[index], key, len);
}
static char *get_key(int index)
{
saved_key[index][saved_len[index]] = 0;
return saved_key[index];
}
static int crypt_all(int *pcount, struct db_salt *salt)
{
const int count = *pcount;
int index = 0;
#ifdef _OPENMP
#pragma omp parallel for
#endif
for (index = 0; index < count; index++)
{
Keccak_HashInstance hash;
Keccak_HashInitialize(&hash, 1088, 512, 256, 0x01);
Keccak_HashUpdate(&hash, (unsigned char*)saved_key[index], saved_len[index] * 8);
Keccak_HashFinal(&hash, (unsigned char*)crypt_out[index]);
}
return count;
}
static int cmp_all(void *binary, int count)
{
int index = 0;
for (; index < count; index++)
if (!memcmp(binary, crypt_out[index], ARCH_SIZE))
return 1;
return 0;
}
static int cmp_one(void *binary, int index)
{
return !memcmp(binary, crypt_out[index], BINARY_SIZE);
}
static int cmp_exact(char *source, int index)
{
return 1;
}
struct fmt_main fmt_rawKeccak_256 = {
{
FORMAT_LABEL,
FORMAT_NAME,
"Keccak 256 " ALGORITHM_NAME,
BENCHMARK_COMMENT,
BENCHMARK_LENGTH,
0,
PLAINTEXT_LENGTH,
BINARY_SIZE,
BINARY_ALIGN,
SALT_SIZE,
SALT_ALIGN,
MIN_KEYS_PER_CRYPT,
MAX_KEYS_PER_CRYPT,
FMT_CASE | FMT_8_BIT | FMT_OMP | FMT_OMP_BAD |
FMT_SPLIT_UNIFIES_CASE,
{ NULL },
tests
}, {
init,
done,
fmt_default_reset,
fmt_default_prepare,
valid,
split,
get_binary,
fmt_default_salt,
{ NULL },
fmt_default_source,
{
fmt_default_binary_hash_0,
fmt_default_binary_hash_1,
fmt_default_binary_hash_2,
fmt_default_binary_hash_3,
fmt_default_binary_hash_4,
fmt_default_binary_hash_5,
fmt_default_binary_hash_6
},
fmt_default_salt_hash,
NULL,
fmt_default_set_salt,
set_key,
get_key,
fmt_default_clear_keys,
crypt_all,
{
get_hash_0,
get_hash_1,
get_hash_2,
get_hash_3,
get_hash_4,
get_hash_5,
get_hash_6
},
cmp_all,
cmp_one,
cmp_exact
}
};
#endif /* plugin stanza */
|
Parser.h | //===--- Parser.h - C Language Parser ---------------------------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// This file defines the Parser interface.
//
//===----------------------------------------------------------------------===//
#ifndef LLVM_CLANG_PARSE_PARSER_H
#define LLVM_CLANG_PARSE_PARSER_H
#include "clang/AST/Availability.h"
#include "clang/Basic/BitmaskEnum.h"
#include "clang/Basic/OpenMPKinds.h"
#include "clang/Basic/OperatorPrecedence.h"
#include "clang/Basic/Specifiers.h"
#include "clang/Lex/CodeCompletionHandler.h"
#include "clang/Lex/Preprocessor.h"
#include "clang/Sema/DeclSpec.h"
#include "clang/Sema/Sema.h"
#include "llvm/ADT/SmallVector.h"
#include "llvm/Frontend/OpenMP/OMPContext.h"
#include "llvm/Support/Compiler.h"
#include "llvm/Support/PrettyStackTrace.h"
#include "llvm/Support/SaveAndRestore.h"
#include <memory>
#include <stack>
namespace clang {
class PragmaHandler;
class Scope;
class BalancedDelimiterTracker;
class CorrectionCandidateCallback;
class DeclGroupRef;
class DiagnosticBuilder;
struct LoopHint;
class Parser;
class ParsingDeclRAIIObject;
class ParsingDeclSpec;
class ParsingDeclarator;
class ParsingFieldDeclarator;
class ColonProtectionRAIIObject;
class InMessageExpressionRAIIObject;
class PoisonSEHIdentifiersRAIIObject;
class OMPClause;
class ObjCTypeParamList;
class ObjCTypeParameter;
struct OMPTraitProperty;
struct OMPTraitSelector;
struct OMPTraitSet;
class OMPTraitInfo;
/// Parser - This implements a parser for the C family of languages. After
/// parsing units of the grammar, productions are invoked to handle whatever has
/// been read.
///
class Parser : public CodeCompletionHandler {
friend class ColonProtectionRAIIObject;
friend class ParsingOpenMPDirectiveRAII;
friend class InMessageExpressionRAIIObject;
friend class PoisonSEHIdentifiersRAIIObject;
friend class ObjCDeclContextSwitch;
friend class ParenBraceBracketBalancer;
friend class BalancedDelimiterTracker;
Preprocessor &PP;
/// Tok - The current token we are peeking ahead. All parsing methods assume
/// that this is valid.
Token Tok;
// PrevTokLocation - The location of the token we previously
// consumed. This token is used for diagnostics where we expected to
// see a token following another token (e.g., the ';' at the end of
// a statement).
SourceLocation PrevTokLocation;
/// Tracks an expected type for the current token when parsing an expression.
/// Used by code completion for ranking.
PreferredTypeBuilder PreferredType;
unsigned short ParenCount = 0, BracketCount = 0, BraceCount = 0;
unsigned short MisplacedModuleBeginCount = 0;
/// Actions - These are the callbacks we invoke as we parse various constructs
/// in the file.
Sema &Actions;
DiagnosticsEngine &Diags;
/// ScopeCache - Cache scopes to reduce malloc traffic.
enum { ScopeCacheSize = 16 };
unsigned NumCachedScopes;
Scope *ScopeCache[ScopeCacheSize];
/// Identifiers used for SEH handling in Borland. These are only
/// allowed in particular circumstances
// __except block
IdentifierInfo *Ident__exception_code,
*Ident___exception_code,
*Ident_GetExceptionCode;
// __except filter expression
IdentifierInfo *Ident__exception_info,
*Ident___exception_info,
*Ident_GetExceptionInfo;
// __finally
IdentifierInfo *Ident__abnormal_termination,
*Ident___abnormal_termination,
*Ident_AbnormalTermination;
/// Contextual keywords for Microsoft extensions.
IdentifierInfo *Ident__except;
mutable IdentifierInfo *Ident_sealed;
/// Ident_super - IdentifierInfo for "super", to support fast
/// comparison.
IdentifierInfo *Ident_super;
/// Ident_vector, Ident_bool - cached IdentifierInfos for "vector" and
/// "bool" fast comparison. Only present if AltiVec or ZVector are enabled.
IdentifierInfo *Ident_vector;
IdentifierInfo *Ident_bool;
/// Ident_pixel - cached IdentifierInfos for "pixel" fast comparison.
/// Only present if AltiVec enabled.
IdentifierInfo *Ident_pixel;
/// Objective-C contextual keywords.
IdentifierInfo *Ident_instancetype;
/// Identifier for "introduced".
IdentifierInfo *Ident_introduced;
/// Identifier for "deprecated".
IdentifierInfo *Ident_deprecated;
/// Identifier for "obsoleted".
IdentifierInfo *Ident_obsoleted;
/// Identifier for "unavailable".
IdentifierInfo *Ident_unavailable;
/// Identifier for "message".
IdentifierInfo *Ident_message;
/// Identifier for "strict".
IdentifierInfo *Ident_strict;
/// Identifier for "replacement".
IdentifierInfo *Ident_replacement;
/// Identifiers used by the 'external_source_symbol' attribute.
IdentifierInfo *Ident_language, *Ident_defined_in,
*Ident_generated_declaration;
/// C++11 contextual keywords.
mutable IdentifierInfo *Ident_final;
mutable IdentifierInfo *Ident_GNU_final;
mutable IdentifierInfo *Ident_override;
// C++2a contextual keywords.
mutable IdentifierInfo *Ident_import;
mutable IdentifierInfo *Ident_module;
// C++ type trait keywords that can be reverted to identifiers and still be
// used as type traits.
llvm::SmallDenseMap<IdentifierInfo *, tok::TokenKind> RevertibleTypeTraits;
std::unique_ptr<PragmaHandler> AlignHandler;
std::unique_ptr<PragmaHandler> GCCVisibilityHandler;
std::unique_ptr<PragmaHandler> OptionsHandler;
std::unique_ptr<PragmaHandler> PackHandler;
std::unique_ptr<PragmaHandler> MSStructHandler;
std::unique_ptr<PragmaHandler> UnusedHandler;
std::unique_ptr<PragmaHandler> WeakHandler;
std::unique_ptr<PragmaHandler> RedefineExtnameHandler;
std::unique_ptr<PragmaHandler> FPContractHandler;
std::unique_ptr<PragmaHandler> OpenCLExtensionHandler;
std::unique_ptr<PragmaHandler> OpenMPHandler;
std::unique_ptr<PragmaHandler> PCSectionHandler;
std::unique_ptr<PragmaHandler> MSCommentHandler;
std::unique_ptr<PragmaHandler> MSDetectMismatchHandler;
std::unique_ptr<PragmaHandler> FloatControlHandler;
std::unique_ptr<PragmaHandler> MSPointersToMembers;
std::unique_ptr<PragmaHandler> MSVtorDisp;
std::unique_ptr<PragmaHandler> MSInitSeg;
std::unique_ptr<PragmaHandler> MSDataSeg;
std::unique_ptr<PragmaHandler> MSBSSSeg;
std::unique_ptr<PragmaHandler> MSConstSeg;
std::unique_ptr<PragmaHandler> MSCodeSeg;
std::unique_ptr<PragmaHandler> MSSection;
std::unique_ptr<PragmaHandler> MSRuntimeChecks;
std::unique_ptr<PragmaHandler> MSIntrinsic;
std::unique_ptr<PragmaHandler> MSOptimize;
std::unique_ptr<PragmaHandler> CUDAForceHostDeviceHandler;
std::unique_ptr<PragmaHandler> OptimizeHandler;
std::unique_ptr<PragmaHandler> LoopHintHandler;
std::unique_ptr<PragmaHandler> UnrollHintHandler;
std::unique_ptr<PragmaHandler> NoUnrollHintHandler;
std::unique_ptr<PragmaHandler> UnrollAndJamHintHandler;
std::unique_ptr<PragmaHandler> NoUnrollAndJamHintHandler;
std::unique_ptr<PragmaHandler> FPHandler;
std::unique_ptr<PragmaHandler> STDCFENVHandler;
std::unique_ptr<PragmaHandler> STDCCXLIMITHandler;
std::unique_ptr<PragmaHandler> STDCUnknownHandler;
std::unique_ptr<PragmaHandler> AttributePragmaHandler;
std::unique_ptr<PragmaHandler> MaxTokensHerePragmaHandler;
std::unique_ptr<PragmaHandler> MaxTokensTotalPragmaHandler;
std::unique_ptr<CommentHandler> CommentSemaHandler;
/// Whether the '>' token acts as an operator or not. This will be
/// true except when we are parsing an expression within a C++
/// template argument list, where the '>' closes the template
/// argument list.
bool GreaterThanIsOperator;
/// ColonIsSacred - When this is false, we aggressively try to recover from
/// code like "foo : bar" as if it were a typo for "foo :: bar". This is not
/// safe in case statements and a few other things. This is managed by the
/// ColonProtectionRAIIObject RAII object.
bool ColonIsSacred;
/// Parsing OpenMP directive mode.
bool OpenMPDirectiveParsing = false;
/// When true, we are directly inside an Objective-C message
/// send expression.
///
/// This is managed by the \c InMessageExpressionRAIIObject class, and
/// should not be set directly.
bool InMessageExpression;
/// Gets set to true after calling ProduceSignatureHelp, it is for a
/// workaround to make sure ProduceSignatureHelp is only called at the deepest
/// function call.
bool CalledSignatureHelp = false;
/// The "depth" of the template parameters currently being parsed.
unsigned TemplateParameterDepth;
/// Current kind of OpenMP clause
OpenMPClauseKind OMPClauseKind = llvm::omp::OMPC_unknown;
/// RAII class that manages the template parameter depth.
class TemplateParameterDepthRAII {
unsigned &Depth;
unsigned AddedLevels;
public:
explicit TemplateParameterDepthRAII(unsigned &Depth)
: Depth(Depth), AddedLevels(0) {}
~TemplateParameterDepthRAII() {
Depth -= AddedLevels;
}
void operator++() {
++Depth;
++AddedLevels;
}
void addDepth(unsigned D) {
Depth += D;
AddedLevels += D;
}
void setAddedDepth(unsigned D) {
Depth = Depth - AddedLevels + D;
AddedLevels = D;
}
unsigned getDepth() const { return Depth; }
unsigned getOriginalDepth() const { return Depth - AddedLevels; }
};
/// Factory object for creating ParsedAttr objects.
AttributeFactory AttrFactory;
/// Gathers and cleans up TemplateIdAnnotations when parsing of a
/// top-level declaration is finished.
SmallVector<TemplateIdAnnotation *, 16> TemplateIds;
void MaybeDestroyTemplateIds() {
if (!TemplateIds.empty() &&
(Tok.is(tok::eof) || !PP.mightHavePendingAnnotationTokens()))
DestroyTemplateIds();
}
void DestroyTemplateIds();
/// RAII object to destroy TemplateIdAnnotations where possible, from a
/// likely-good position during parsing.
struct DestroyTemplateIdAnnotationsRAIIObj {
Parser &Self;
DestroyTemplateIdAnnotationsRAIIObj(Parser &Self) : Self(Self) {}
~DestroyTemplateIdAnnotationsRAIIObj() { Self.MaybeDestroyTemplateIds(); }
};
/// Identifiers which have been declared within a tentative parse.
SmallVector<IdentifierInfo *, 8> TentativelyDeclaredIdentifiers;
/// Tracker for '<' tokens that might have been intended to be treated as an
/// angle bracket instead of a less-than comparison.
///
/// This happens when the user intends to form a template-id, but typoes the
/// template-name or forgets a 'template' keyword for a dependent template
/// name.
///
/// We track these locations from the point where we see a '<' with a
/// name-like expression on its left until we see a '>' or '>>' that might
/// match it.
struct AngleBracketTracker {
/// Flags used to rank candidate template names when there is more than one
/// '<' in a scope.
enum Priority : unsigned short {
/// A non-dependent name that is a potential typo for a template name.
PotentialTypo = 0x0,
/// A dependent name that might instantiate to a template-name.
DependentName = 0x2,
/// A space appears before the '<' token.
SpaceBeforeLess = 0x0,
/// No space before the '<' token
NoSpaceBeforeLess = 0x1,
LLVM_MARK_AS_BITMASK_ENUM(/*LargestValue*/ DependentName)
};
struct Loc {
Expr *TemplateName;
SourceLocation LessLoc;
AngleBracketTracker::Priority Priority;
unsigned short ParenCount, BracketCount, BraceCount;
bool isActive(Parser &P) const {
return P.ParenCount == ParenCount && P.BracketCount == BracketCount &&
P.BraceCount == BraceCount;
}
bool isActiveOrNested(Parser &P) const {
return isActive(P) || P.ParenCount > ParenCount ||
P.BracketCount > BracketCount || P.BraceCount > BraceCount;
}
};
SmallVector<Loc, 8> Locs;
/// Add an expression that might have been intended to be a template name.
/// In the case of ambiguity, we arbitrarily select the innermost such
/// expression, for example in 'foo < bar < baz', 'bar' is the current
/// candidate. No attempt is made to track that 'foo' is also a candidate
/// for the case where we see a second suspicious '>' token.
void add(Parser &P, Expr *TemplateName, SourceLocation LessLoc,
Priority Prio) {
if (!Locs.empty() && Locs.back().isActive(P)) {
if (Locs.back().Priority <= Prio) {
Locs.back().TemplateName = TemplateName;
Locs.back().LessLoc = LessLoc;
Locs.back().Priority = Prio;
}
} else {
Locs.push_back({TemplateName, LessLoc, Prio,
P.ParenCount, P.BracketCount, P.BraceCount});
}
}
/// Mark the current potential missing template location as having been
/// handled (this happens if we pass a "corresponding" '>' or '>>' token
/// or leave a bracket scope).
void clear(Parser &P) {
while (!Locs.empty() && Locs.back().isActiveOrNested(P))
Locs.pop_back();
}
/// Get the current enclosing expression that might hve been intended to be
/// a template name.
Loc *getCurrent(Parser &P) {
if (!Locs.empty() && Locs.back().isActive(P))
return &Locs.back();
return nullptr;
}
};
AngleBracketTracker AngleBrackets;
IdentifierInfo *getSEHExceptKeyword();
/// True if we are within an Objective-C container while parsing C-like decls.
///
/// This is necessary because Sema thinks we have left the container
/// to parse the C-like decls, meaning Actions.getObjCDeclContext() will
/// be NULL.
bool ParsingInObjCContainer;
/// Whether to skip parsing of function bodies.
///
/// This option can be used, for example, to speed up searches for
/// declarations/definitions when indexing.
bool SkipFunctionBodies;
/// The location of the expression statement that is being parsed right now.
/// Used to determine if an expression that is being parsed is a statement or
/// just a regular sub-expression.
SourceLocation ExprStatementTokLoc;
/// Flags describing a context in which we're parsing a statement.
enum class ParsedStmtContext {
/// This context permits declarations in language modes where declarations
/// are not statements.
AllowDeclarationsInC = 0x1,
/// This context permits standalone OpenMP directives.
AllowStandaloneOpenMPDirectives = 0x2,
/// This context is at the top level of a GNU statement expression.
InStmtExpr = 0x4,
/// The context of a regular substatement.
SubStmt = 0,
/// The context of a compound-statement.
Compound = AllowDeclarationsInC | AllowStandaloneOpenMPDirectives,
LLVM_MARK_AS_BITMASK_ENUM(InStmtExpr)
};
/// Act on an expression statement that might be the last statement in a
/// GNU statement expression. Checks whether we are actually at the end of
/// a statement expression and builds a suitable expression statement.
StmtResult handleExprStmt(ExprResult E, ParsedStmtContext StmtCtx);
public:
Parser(Preprocessor &PP, Sema &Actions, bool SkipFunctionBodies);
~Parser() override;
const LangOptions &getLangOpts() const { return PP.getLangOpts(); }
const TargetInfo &getTargetInfo() const { return PP.getTargetInfo(); }
Preprocessor &getPreprocessor() const { return PP; }
Sema &getActions() const { return Actions; }
AttributeFactory &getAttrFactory() { return AttrFactory; }
const Token &getCurToken() const { return Tok; }
Scope *getCurScope() const { return Actions.getCurScope(); }
void incrementMSManglingNumber() const {
return Actions.incrementMSManglingNumber();
}
Decl *getObjCDeclContext() const { return Actions.getObjCDeclContext(); }
// Type forwarding. All of these are statically 'void*', but they may all be
// different actual classes based on the actions in place.
typedef OpaquePtr<DeclGroupRef> DeclGroupPtrTy;
typedef OpaquePtr<TemplateName> TemplateTy;
typedef SmallVector<TemplateParameterList *, 4> TemplateParameterLists;
typedef Sema::FullExprArg FullExprArg;
// Parsing methods.
/// Initialize - Warm up the parser.
///
void Initialize();
/// Parse the first top-level declaration in a translation unit.
bool ParseFirstTopLevelDecl(DeclGroupPtrTy &Result);
/// ParseTopLevelDecl - Parse one top-level declaration. Returns true if
/// the EOF was encountered.
bool ParseTopLevelDecl(DeclGroupPtrTy &Result, bool IsFirstDecl = false);
bool ParseTopLevelDecl() {
DeclGroupPtrTy Result;
return ParseTopLevelDecl(Result);
}
/// ConsumeToken - Consume the current 'peek token' and lex the next one.
/// This does not work with special tokens: string literals, code completion,
/// annotation tokens and balanced tokens must be handled using the specific
/// consume methods.
/// Returns the location of the consumed token.
SourceLocation ConsumeToken() {
assert(!isTokenSpecial() &&
"Should consume special tokens with Consume*Token");
PrevTokLocation = Tok.getLocation();
PP.Lex(Tok);
return PrevTokLocation;
}
bool TryConsumeToken(tok::TokenKind Expected) {
if (Tok.isNot(Expected))
return false;
assert(!isTokenSpecial() &&
"Should consume special tokens with Consume*Token");
PrevTokLocation = Tok.getLocation();
PP.Lex(Tok);
return true;
}
bool TryConsumeToken(tok::TokenKind Expected, SourceLocation &Loc) {
if (!TryConsumeToken(Expected))
return false;
Loc = PrevTokLocation;
return true;
}
/// ConsumeAnyToken - Dispatch to the right Consume* method based on the
/// current token type. This should only be used in cases where the type of
/// the token really isn't known, e.g. in error recovery.
SourceLocation ConsumeAnyToken(bool ConsumeCodeCompletionTok = false) {
if (isTokenParen())
return ConsumeParen();
if (isTokenBracket())
return ConsumeBracket();
if (isTokenBrace())
return ConsumeBrace();
if (isTokenStringLiteral())
return ConsumeStringToken();
if (Tok.is(tok::code_completion))
return ConsumeCodeCompletionTok ? ConsumeCodeCompletionToken()
: handleUnexpectedCodeCompletionToken();
if (Tok.isAnnotation())
return ConsumeAnnotationToken();
return ConsumeToken();
}
SourceLocation getEndOfPreviousToken() {
return PP.getLocForEndOfToken(PrevTokLocation);
}
/// Retrieve the underscored keyword (_Nonnull, _Nullable) that corresponds
/// to the given nullability kind.
IdentifierInfo *getNullabilityKeyword(NullabilityKind nullability) {
return Actions.getNullabilityKeyword(nullability);
}
private:
//===--------------------------------------------------------------------===//
// Low-Level token peeking and consumption methods.
//
/// isTokenParen - Return true if the cur token is '(' or ')'.
bool isTokenParen() const {
return Tok.isOneOf(tok::l_paren, tok::r_paren);
}
/// isTokenBracket - Return true if the cur token is '[' or ']'.
bool isTokenBracket() const {
return Tok.isOneOf(tok::l_square, tok::r_square);
}
/// isTokenBrace - Return true if the cur token is '{' or '}'.
bool isTokenBrace() const {
return Tok.isOneOf(tok::l_brace, tok::r_brace);
}
/// isTokenStringLiteral - True if this token is a string-literal.
bool isTokenStringLiteral() const {
return tok::isStringLiteral(Tok.getKind());
}
/// isTokenSpecial - True if this token requires special consumption methods.
bool isTokenSpecial() const {
return isTokenStringLiteral() || isTokenParen() || isTokenBracket() ||
isTokenBrace() || Tok.is(tok::code_completion) || Tok.isAnnotation();
}
/// Returns true if the current token is '=' or is a type of '='.
/// For typos, give a fixit to '='
bool isTokenEqualOrEqualTypo();
/// Return the current token to the token stream and make the given
/// token the current token.
void UnconsumeToken(Token &Consumed) {
Token Next = Tok;
PP.EnterToken(Consumed, /*IsReinject*/true);
PP.Lex(Tok);
PP.EnterToken(Next, /*IsReinject*/true);
}
SourceLocation ConsumeAnnotationToken() {
assert(Tok.isAnnotation() && "wrong consume method");
SourceLocation Loc = Tok.getLocation();
PrevTokLocation = Tok.getAnnotationEndLoc();
PP.Lex(Tok);
return Loc;
}
/// ConsumeParen - This consume method keeps the paren count up-to-date.
///
SourceLocation ConsumeParen() {
assert(isTokenParen() && "wrong consume method");
if (Tok.getKind() == tok::l_paren)
++ParenCount;
else if (ParenCount) {
AngleBrackets.clear(*this);
--ParenCount; // Don't let unbalanced )'s drive the count negative.
}
PrevTokLocation = Tok.getLocation();
PP.Lex(Tok);
return PrevTokLocation;
}
/// ConsumeBracket - This consume method keeps the bracket count up-to-date.
///
SourceLocation ConsumeBracket() {
assert(isTokenBracket() && "wrong consume method");
if (Tok.getKind() == tok::l_square)
++BracketCount;
else if (BracketCount) {
AngleBrackets.clear(*this);
--BracketCount; // Don't let unbalanced ]'s drive the count negative.
}
PrevTokLocation = Tok.getLocation();
PP.Lex(Tok);
return PrevTokLocation;
}
/// ConsumeBrace - This consume method keeps the brace count up-to-date.
///
SourceLocation ConsumeBrace() {
assert(isTokenBrace() && "wrong consume method");
if (Tok.getKind() == tok::l_brace)
++BraceCount;
else if (BraceCount) {
AngleBrackets.clear(*this);
--BraceCount; // Don't let unbalanced }'s drive the count negative.
}
PrevTokLocation = Tok.getLocation();
PP.Lex(Tok);
return PrevTokLocation;
}
/// ConsumeStringToken - Consume the current 'peek token', lexing a new one
/// and returning the token kind. This method is specific to strings, as it
/// handles string literal concatenation, as per C99 5.1.1.2, translation
/// phase #6.
SourceLocation ConsumeStringToken() {
assert(isTokenStringLiteral() &&
"Should only consume string literals with this method");
PrevTokLocation = Tok.getLocation();
PP.Lex(Tok);
return PrevTokLocation;
}
/// Consume the current code-completion token.
///
/// This routine can be called to consume the code-completion token and
/// continue processing in special cases where \c cutOffParsing() isn't
/// desired, such as token caching or completion with lookahead.
SourceLocation ConsumeCodeCompletionToken() {
assert(Tok.is(tok::code_completion));
PrevTokLocation = Tok.getLocation();
PP.Lex(Tok);
return PrevTokLocation;
}
///\ brief When we are consuming a code-completion token without having
/// matched specific position in the grammar, provide code-completion results
/// based on context.
///
/// \returns the source location of the code-completion token.
SourceLocation handleUnexpectedCodeCompletionToken();
/// Abruptly cut off parsing; mainly used when we have reached the
/// code-completion point.
void cutOffParsing() {
if (PP.isCodeCompletionEnabled())
PP.setCodeCompletionReached();
// Cut off parsing by acting as if we reached the end-of-file.
Tok.setKind(tok::eof);
}
/// Determine if we're at the end of the file or at a transition
/// between modules.
bool isEofOrEom() {
tok::TokenKind Kind = Tok.getKind();
return Kind == tok::eof || Kind == tok::annot_module_begin ||
Kind == tok::annot_module_end || Kind == tok::annot_module_include;
}
/// Checks if the \p Level is valid for use in a fold expression.
bool isFoldOperator(prec::Level Level) const;
/// Checks if the \p Kind is a valid operator for fold expressions.
bool isFoldOperator(tok::TokenKind Kind) const;
/// Initialize all pragma handlers.
void initializePragmaHandlers();
/// Destroy and reset all pragma handlers.
void resetPragmaHandlers();
/// Handle the annotation token produced for #pragma unused(...)
void HandlePragmaUnused();
/// Handle the annotation token produced for
/// #pragma GCC visibility...
void HandlePragmaVisibility();
/// Handle the annotation token produced for
/// #pragma pack...
void HandlePragmaPack();
/// Handle the annotation token produced for
/// #pragma ms_struct...
void HandlePragmaMSStruct();
/// Handle the annotation token produced for
/// #pragma comment...
void HandlePragmaMSComment();
void HandlePragmaMSPointersToMembers();
void HandlePragmaMSVtorDisp();
void HandlePragmaMSPragma();
bool HandlePragmaMSSection(StringRef PragmaName,
SourceLocation PragmaLocation);
bool HandlePragmaMSSegment(StringRef PragmaName,
SourceLocation PragmaLocation);
bool HandlePragmaMSInitSeg(StringRef PragmaName,
SourceLocation PragmaLocation);
/// Handle the annotation token produced for
/// #pragma align...
void HandlePragmaAlign();
/// Handle the annotation token produced for
/// #pragma clang __debug dump...
void HandlePragmaDump();
/// Handle the annotation token produced for
/// #pragma weak id...
void HandlePragmaWeak();
/// Handle the annotation token produced for
/// #pragma weak id = id...
void HandlePragmaWeakAlias();
/// Handle the annotation token produced for
/// #pragma redefine_extname...
void HandlePragmaRedefineExtname();
/// Handle the annotation token produced for
/// #pragma STDC FP_CONTRACT...
void HandlePragmaFPContract();
/// Handle the annotation token produced for
/// #pragma STDC FENV_ACCESS...
void HandlePragmaFEnvAccess();
/// Handle the annotation token produced for
/// #pragma float_control
void HandlePragmaFloatControl();
/// \brief Handle the annotation token produced for
/// #pragma clang fp ...
void HandlePragmaFP();
/// Handle the annotation token produced for
/// #pragma OPENCL EXTENSION...
void HandlePragmaOpenCLExtension();
/// Handle the annotation token produced for
/// #pragma clang __debug captured
StmtResult HandlePragmaCaptured();
/// Handle the annotation token produced for
/// #pragma clang loop and #pragma unroll.
bool HandlePragmaLoopHint(LoopHint &Hint);
bool ParsePragmaAttributeSubjectMatchRuleSet(
attr::ParsedSubjectMatchRuleSet &SubjectMatchRules,
SourceLocation &AnyLoc, SourceLocation &LastMatchRuleEndLoc);
void HandlePragmaAttribute();
/// GetLookAheadToken - This peeks ahead N tokens and returns that token
/// without consuming any tokens. LookAhead(0) returns 'Tok', LookAhead(1)
/// returns the token after Tok, etc.
///
/// Note that this differs from the Preprocessor's LookAhead method, because
/// the Parser always has one token lexed that the preprocessor doesn't.
///
const Token &GetLookAheadToken(unsigned N) {
if (N == 0 || Tok.is(tok::eof)) return Tok;
return PP.LookAhead(N-1);
}
public:
/// NextToken - This peeks ahead one token and returns it without
/// consuming it.
const Token &NextToken() {
return PP.LookAhead(0);
}
/// getTypeAnnotation - Read a parsed type out of an annotation token.
static TypeResult getTypeAnnotation(const Token &Tok) {
if (!Tok.getAnnotationValue())
return TypeError();
return ParsedType::getFromOpaquePtr(Tok.getAnnotationValue());
}
private:
static void setTypeAnnotation(Token &Tok, TypeResult T) {
assert((T.isInvalid() || T.get()) &&
"produced a valid-but-null type annotation?");
Tok.setAnnotationValue(T.isInvalid() ? nullptr : T.get().getAsOpaquePtr());
}
static NamedDecl *getNonTypeAnnotation(const Token &Tok) {
return static_cast<NamedDecl*>(Tok.getAnnotationValue());
}
static void setNonTypeAnnotation(Token &Tok, NamedDecl *ND) {
Tok.setAnnotationValue(ND);
}
static IdentifierInfo *getIdentifierAnnotation(const Token &Tok) {
return static_cast<IdentifierInfo*>(Tok.getAnnotationValue());
}
static void setIdentifierAnnotation(Token &Tok, IdentifierInfo *ND) {
Tok.setAnnotationValue(ND);
}
/// Read an already-translated primary expression out of an annotation
/// token.
static ExprResult getExprAnnotation(const Token &Tok) {
return ExprResult::getFromOpaquePointer(Tok.getAnnotationValue());
}
/// Set the primary expression corresponding to the given annotation
/// token.
static void setExprAnnotation(Token &Tok, ExprResult ER) {
Tok.setAnnotationValue(ER.getAsOpaquePointer());
}
public:
// If NeedType is true, then TryAnnotateTypeOrScopeToken will try harder to
// find a type name by attempting typo correction.
bool TryAnnotateTypeOrScopeToken();
bool TryAnnotateTypeOrScopeTokenAfterScopeSpec(CXXScopeSpec &SS,
bool IsNewScope);
bool TryAnnotateCXXScopeToken(bool EnteringContext = false);
bool MightBeCXXScopeToken() {
return Tok.is(tok::identifier) || Tok.is(tok::coloncolon) ||
(Tok.is(tok::annot_template_id) &&
NextToken().is(tok::coloncolon)) ||
Tok.is(tok::kw_decltype) || Tok.is(tok::kw___super);
}
bool TryAnnotateOptionalCXXScopeToken(bool EnteringContext = false) {
return MightBeCXXScopeToken() && TryAnnotateCXXScopeToken(EnteringContext);
}
private:
enum AnnotatedNameKind {
/// Annotation has failed and emitted an error.
ANK_Error,
/// The identifier is a tentatively-declared name.
ANK_TentativeDecl,
/// The identifier is a template name. FIXME: Add an annotation for that.
ANK_TemplateName,
/// The identifier can't be resolved.
ANK_Unresolved,
/// Annotation was successful.
ANK_Success
};
AnnotatedNameKind TryAnnotateName(CorrectionCandidateCallback *CCC = nullptr);
/// Push a tok::annot_cxxscope token onto the token stream.
void AnnotateScopeToken(CXXScopeSpec &SS, bool IsNewAnnotation);
/// TryAltiVecToken - Check for context-sensitive AltiVec identifier tokens,
/// replacing them with the non-context-sensitive keywords. This returns
/// true if the token was replaced.
bool TryAltiVecToken(DeclSpec &DS, SourceLocation Loc,
const char *&PrevSpec, unsigned &DiagID,
bool &isInvalid) {
if (!getLangOpts().AltiVec && !getLangOpts().ZVector)
return false;
if (Tok.getIdentifierInfo() != Ident_vector &&
Tok.getIdentifierInfo() != Ident_bool &&
(!getLangOpts().AltiVec || Tok.getIdentifierInfo() != Ident_pixel))
return false;
return TryAltiVecTokenOutOfLine(DS, Loc, PrevSpec, DiagID, isInvalid);
}
/// TryAltiVecVectorToken - Check for context-sensitive AltiVec vector
/// identifier token, replacing it with the non-context-sensitive __vector.
/// This returns true if the token was replaced.
bool TryAltiVecVectorToken() {
if ((!getLangOpts().AltiVec && !getLangOpts().ZVector) ||
Tok.getIdentifierInfo() != Ident_vector) return false;
return TryAltiVecVectorTokenOutOfLine();
}
bool TryAltiVecVectorTokenOutOfLine();
bool TryAltiVecTokenOutOfLine(DeclSpec &DS, SourceLocation Loc,
const char *&PrevSpec, unsigned &DiagID,
bool &isInvalid);
/// Returns true if the current token is the identifier 'instancetype'.
///
/// Should only be used in Objective-C language modes.
bool isObjCInstancetype() {
assert(getLangOpts().ObjC);
if (Tok.isAnnotation())
return false;
if (!Ident_instancetype)
Ident_instancetype = PP.getIdentifierInfo("instancetype");
return Tok.getIdentifierInfo() == Ident_instancetype;
}
/// TryKeywordIdentFallback - For compatibility with system headers using
/// keywords as identifiers, attempt to convert the current token to an
/// identifier and optionally disable the keyword for the remainder of the
/// translation unit. This returns false if the token was not replaced,
/// otherwise emits a diagnostic and returns true.
bool TryKeywordIdentFallback(bool DisableKeyword);
/// Get the TemplateIdAnnotation from the token.
TemplateIdAnnotation *takeTemplateIdAnnotation(const Token &tok);
/// TentativeParsingAction - An object that is used as a kind of "tentative
/// parsing transaction". It gets instantiated to mark the token position and
/// after the token consumption is done, Commit() or Revert() is called to
/// either "commit the consumed tokens" or revert to the previously marked
/// token position. Example:
///
/// TentativeParsingAction TPA(*this);
/// ConsumeToken();
/// ....
/// TPA.Revert();
///
class TentativeParsingAction {
Parser &P;
PreferredTypeBuilder PrevPreferredType;
Token PrevTok;
size_t PrevTentativelyDeclaredIdentifierCount;
unsigned short PrevParenCount, PrevBracketCount, PrevBraceCount;
bool isActive;
public:
explicit TentativeParsingAction(Parser& p) : P(p) {
PrevPreferredType = P.PreferredType;
PrevTok = P.Tok;
PrevTentativelyDeclaredIdentifierCount =
P.TentativelyDeclaredIdentifiers.size();
PrevParenCount = P.ParenCount;
PrevBracketCount = P.BracketCount;
PrevBraceCount = P.BraceCount;
P.PP.EnableBacktrackAtThisPos();
isActive = true;
}
void Commit() {
assert(isActive && "Parsing action was finished!");
P.TentativelyDeclaredIdentifiers.resize(
PrevTentativelyDeclaredIdentifierCount);
P.PP.CommitBacktrackedTokens();
isActive = false;
}
void Revert() {
assert(isActive && "Parsing action was finished!");
P.PP.Backtrack();
P.PreferredType = PrevPreferredType;
P.Tok = PrevTok;
P.TentativelyDeclaredIdentifiers.resize(
PrevTentativelyDeclaredIdentifierCount);
P.ParenCount = PrevParenCount;
P.BracketCount = PrevBracketCount;
P.BraceCount = PrevBraceCount;
isActive = false;
}
~TentativeParsingAction() {
assert(!isActive && "Forgot to call Commit or Revert!");
}
};
/// A TentativeParsingAction that automatically reverts in its destructor.
/// Useful for disambiguation parses that will always be reverted.
class RevertingTentativeParsingAction
: private Parser::TentativeParsingAction {
public:
RevertingTentativeParsingAction(Parser &P)
: Parser::TentativeParsingAction(P) {}
~RevertingTentativeParsingAction() { Revert(); }
};
class UnannotatedTentativeParsingAction;
/// ObjCDeclContextSwitch - An object used to switch context from
/// an objective-c decl context to its enclosing decl context and
/// back.
class ObjCDeclContextSwitch {
Parser &P;
Decl *DC;
SaveAndRestore<bool> WithinObjCContainer;
public:
explicit ObjCDeclContextSwitch(Parser &p)
: P(p), DC(p.getObjCDeclContext()),
WithinObjCContainer(P.ParsingInObjCContainer, DC != nullptr) {
if (DC)
P.Actions.ActOnObjCTemporaryExitContainerContext(cast<DeclContext>(DC));
}
~ObjCDeclContextSwitch() {
if (DC)
P.Actions.ActOnObjCReenterContainerContext(cast<DeclContext>(DC));
}
};
/// ExpectAndConsume - The parser expects that 'ExpectedTok' is next in the
/// input. If so, it is consumed and false is returned.
///
/// If a trivial punctuator misspelling is encountered, a FixIt error
/// diagnostic is issued and false is returned after recovery.
///
/// If the input is malformed, this emits the specified diagnostic and true is
/// returned.
bool ExpectAndConsume(tok::TokenKind ExpectedTok,
unsigned Diag = diag::err_expected,
StringRef DiagMsg = "");
/// The parser expects a semicolon and, if present, will consume it.
///
/// If the next token is not a semicolon, this emits the specified diagnostic,
/// or, if there's just some closing-delimiter noise (e.g., ')' or ']') prior
/// to the semicolon, consumes that extra token.
bool ExpectAndConsumeSemi(unsigned DiagID);
/// The kind of extra semi diagnostic to emit.
enum ExtraSemiKind {
OutsideFunction = 0,
InsideStruct = 1,
InstanceVariableList = 2,
AfterMemberFunctionDefinition = 3
};
/// Consume any extra semi-colons until the end of the line.
void ConsumeExtraSemi(ExtraSemiKind Kind, DeclSpec::TST T = TST_unspecified);
/// Return false if the next token is an identifier. An 'expected identifier'
/// error is emitted otherwise.
///
/// The parser tries to recover from the error by checking if the next token
/// is a C++ keyword when parsing Objective-C++. Return false if the recovery
/// was successful.
bool expectIdentifier();
/// Kinds of compound pseudo-tokens formed by a sequence of two real tokens.
enum class CompoundToken {
/// A '(' '{' beginning a statement-expression.
StmtExprBegin,
/// A '}' ')' ending a statement-expression.
StmtExprEnd,
/// A '[' '[' beginning a C++11 or C2x attribute.
AttrBegin,
/// A ']' ']' ending a C++11 or C2x attribute.
AttrEnd,
/// A '::' '*' forming a C++ pointer-to-member declaration.
MemberPtr,
};
/// Check that a compound operator was written in a "sensible" way, and warn
/// if not.
void checkCompoundToken(SourceLocation FirstTokLoc,
tok::TokenKind FirstTokKind, CompoundToken Op);
public:
//===--------------------------------------------------------------------===//
// Scope manipulation
/// ParseScope - Introduces a new scope for parsing. The kind of
/// scope is determined by ScopeFlags. Objects of this type should
/// be created on the stack to coincide with the position where the
/// parser enters the new scope, and this object's constructor will
/// create that new scope. Similarly, once the object is destroyed
/// the parser will exit the scope.
class ParseScope {
Parser *Self;
ParseScope(const ParseScope &) = delete;
void operator=(const ParseScope &) = delete;
public:
// ParseScope - Construct a new object to manage a scope in the
// parser Self where the new Scope is created with the flags
// ScopeFlags, but only when we aren't about to enter a compound statement.
ParseScope(Parser *Self, unsigned ScopeFlags, bool EnteredScope = true,
bool BeforeCompoundStmt = false)
: Self(Self) {
if (EnteredScope && !BeforeCompoundStmt)
Self->EnterScope(ScopeFlags);
else {
if (BeforeCompoundStmt)
Self->incrementMSManglingNumber();
this->Self = nullptr;
}
}
// Exit - Exit the scope associated with this object now, rather
// than waiting until the object is destroyed.
void Exit() {
if (Self) {
Self->ExitScope();
Self = nullptr;
}
}
~ParseScope() {
Exit();
}
};
/// Introduces zero or more scopes for parsing. The scopes will all be exited
/// when the object is destroyed.
class MultiParseScope {
Parser &Self;
unsigned NumScopes = 0;
MultiParseScope(const MultiParseScope&) = delete;
public:
MultiParseScope(Parser &Self) : Self(Self) {}
void Enter(unsigned ScopeFlags) {
Self.EnterScope(ScopeFlags);
++NumScopes;
}
void Exit() {
while (NumScopes) {
Self.ExitScope();
--NumScopes;
}
}
~MultiParseScope() {
Exit();
}
};
/// EnterScope - Start a new scope.
void EnterScope(unsigned ScopeFlags);
/// ExitScope - Pop a scope off the scope stack.
void ExitScope();
/// Re-enter the template scopes for a declaration that might be a template.
unsigned ReenterTemplateScopes(MultiParseScope &S, Decl *D);
private:
/// RAII object used to modify the scope flags for the current scope.
class ParseScopeFlags {
Scope *CurScope;
unsigned OldFlags;
ParseScopeFlags(const ParseScopeFlags &) = delete;
void operator=(const ParseScopeFlags &) = delete;
public:
ParseScopeFlags(Parser *Self, unsigned ScopeFlags, bool ManageFlags = true);
~ParseScopeFlags();
};
//===--------------------------------------------------------------------===//
// Diagnostic Emission and Error recovery.
public:
DiagnosticBuilder Diag(SourceLocation Loc, unsigned DiagID);
DiagnosticBuilder Diag(const Token &Tok, unsigned DiagID);
DiagnosticBuilder Diag(unsigned DiagID) {
return Diag(Tok, DiagID);
}
private:
void SuggestParentheses(SourceLocation Loc, unsigned DK,
SourceRange ParenRange);
void CheckNestedObjCContexts(SourceLocation AtLoc);
public:
/// Control flags for SkipUntil functions.
enum SkipUntilFlags {
StopAtSemi = 1 << 0, ///< Stop skipping at semicolon
/// Stop skipping at specified token, but don't skip the token itself
StopBeforeMatch = 1 << 1,
StopAtCodeCompletion = 1 << 2 ///< Stop at code completion
};
friend constexpr SkipUntilFlags operator|(SkipUntilFlags L,
SkipUntilFlags R) {
return static_cast<SkipUntilFlags>(static_cast<unsigned>(L) |
static_cast<unsigned>(R));
}
/// SkipUntil - Read tokens until we get to the specified token, then consume
/// it (unless StopBeforeMatch is specified). Because we cannot guarantee
/// that the token will ever occur, this skips to the next token, or to some
/// likely good stopping point. If Flags has StopAtSemi flag, skipping will
/// stop at a ';' character. Balances (), [], and {} delimiter tokens while
/// skipping.
///
/// If SkipUntil finds the specified token, it returns true, otherwise it
/// returns false.
bool SkipUntil(tok::TokenKind T,
SkipUntilFlags Flags = static_cast<SkipUntilFlags>(0)) {
return SkipUntil(llvm::makeArrayRef(T), Flags);
}
bool SkipUntil(tok::TokenKind T1, tok::TokenKind T2,
SkipUntilFlags Flags = static_cast<SkipUntilFlags>(0)) {
tok::TokenKind TokArray[] = {T1, T2};
return SkipUntil(TokArray, Flags);
}
bool SkipUntil(tok::TokenKind T1, tok::TokenKind T2, tok::TokenKind T3,
SkipUntilFlags Flags = static_cast<SkipUntilFlags>(0)) {
tok::TokenKind TokArray[] = {T1, T2, T3};
return SkipUntil(TokArray, Flags);
}
bool SkipUntil(ArrayRef<tok::TokenKind> Toks,
SkipUntilFlags Flags = static_cast<SkipUntilFlags>(0));
/// SkipMalformedDecl - Read tokens until we get to some likely good stopping
/// point for skipping past a simple-declaration.
void SkipMalformedDecl();
/// The location of the first statement inside an else that might
/// have a missleading indentation. If there is no
/// MisleadingIndentationChecker on an else active, this location is invalid.
SourceLocation MisleadingIndentationElseLoc;
private:
//===--------------------------------------------------------------------===//
// Lexing and parsing of C++ inline methods.
struct ParsingClass;
/// [class.mem]p1: "... the class is regarded as complete within
/// - function bodies
/// - default arguments
/// - exception-specifications (TODO: C++0x)
/// - and brace-or-equal-initializers for non-static data members
/// (including such things in nested classes)."
/// LateParsedDeclarations build the tree of those elements so they can
/// be parsed after parsing the top-level class.
class LateParsedDeclaration {
public:
virtual ~LateParsedDeclaration();
virtual void ParseLexedMethodDeclarations();
virtual void ParseLexedMemberInitializers();
virtual void ParseLexedMethodDefs();
virtual void ParseLexedAttributes();
virtual void ParseLexedPragmas();
};
/// Inner node of the LateParsedDeclaration tree that parses
/// all its members recursively.
class LateParsedClass : public LateParsedDeclaration {
public:
LateParsedClass(Parser *P, ParsingClass *C);
~LateParsedClass() override;
void ParseLexedMethodDeclarations() override;
void ParseLexedMemberInitializers() override;
void ParseLexedMethodDefs() override;
void ParseLexedAttributes() override;
void ParseLexedPragmas() override;
private:
Parser *Self;
ParsingClass *Class;
};
/// Contains the lexed tokens of an attribute with arguments that
/// may reference member variables and so need to be parsed at the
/// end of the class declaration after parsing all other member
/// member declarations.
/// FIXME: Perhaps we should change the name of LateParsedDeclaration to
/// LateParsedTokens.
struct LateParsedAttribute : public LateParsedDeclaration {
Parser *Self;
CachedTokens Toks;
IdentifierInfo &AttrName;
IdentifierInfo *MacroII = nullptr;
SourceLocation AttrNameLoc;
SmallVector<Decl*, 2> Decls;
explicit LateParsedAttribute(Parser *P, IdentifierInfo &Name,
SourceLocation Loc)
: Self(P), AttrName(Name), AttrNameLoc(Loc) {}
void ParseLexedAttributes() override;
void addDecl(Decl *D) { Decls.push_back(D); }
};
/// Contains the lexed tokens of a pragma with arguments that
/// may reference member variables and so need to be parsed at the
/// end of the class declaration after parsing all other member
/// member declarations.
class LateParsedPragma : public LateParsedDeclaration {
Parser *Self = nullptr;
AccessSpecifier AS = AS_none;
CachedTokens Toks;
public:
explicit LateParsedPragma(Parser *P, AccessSpecifier AS)
: Self(P), AS(AS) {}
void takeToks(CachedTokens &Cached) { Toks.swap(Cached); }
const CachedTokens &toks() const { return Toks; }
AccessSpecifier getAccessSpecifier() const { return AS; }
void ParseLexedPragmas() override;
};
// A list of late-parsed attributes. Used by ParseGNUAttributes.
class LateParsedAttrList: public SmallVector<LateParsedAttribute *, 2> {
public:
LateParsedAttrList(bool PSoon = false) : ParseSoon(PSoon) { }
bool parseSoon() { return ParseSoon; }
private:
bool ParseSoon; // Are we planning to parse these shortly after creation?
};
/// Contains the lexed tokens of a member function definition
/// which needs to be parsed at the end of the class declaration
/// after parsing all other member declarations.
struct LexedMethod : public LateParsedDeclaration {
Parser *Self;
Decl *D;
CachedTokens Toks;
explicit LexedMethod(Parser *P, Decl *MD) : Self(P), D(MD) {}
void ParseLexedMethodDefs() override;
};
/// LateParsedDefaultArgument - Keeps track of a parameter that may
/// have a default argument that cannot be parsed yet because it
/// occurs within a member function declaration inside the class
/// (C++ [class.mem]p2).
struct LateParsedDefaultArgument {
explicit LateParsedDefaultArgument(Decl *P,
std::unique_ptr<CachedTokens> Toks = nullptr)
: Param(P), Toks(std::move(Toks)) { }
/// Param - The parameter declaration for this parameter.
Decl *Param;
/// Toks - The sequence of tokens that comprises the default
/// argument expression, not including the '=' or the terminating
/// ')' or ','. This will be NULL for parameters that have no
/// default argument.
std::unique_ptr<CachedTokens> Toks;
};
/// LateParsedMethodDeclaration - A method declaration inside a class that
/// contains at least one entity whose parsing needs to be delayed
/// until the class itself is completely-defined, such as a default
/// argument (C++ [class.mem]p2).
struct LateParsedMethodDeclaration : public LateParsedDeclaration {
explicit LateParsedMethodDeclaration(Parser *P, Decl *M)
: Self(P), Method(M), ExceptionSpecTokens(nullptr) {}
void ParseLexedMethodDeclarations() override;
Parser* Self;
/// Method - The method declaration.
Decl *Method;
/// DefaultArgs - Contains the parameters of the function and
/// their default arguments. At least one of the parameters will
/// have a default argument, but all of the parameters of the
/// method will be stored so that they can be reintroduced into
/// scope at the appropriate times.
SmallVector<LateParsedDefaultArgument, 8> DefaultArgs;
/// The set of tokens that make up an exception-specification that
/// has not yet been parsed.
CachedTokens *ExceptionSpecTokens;
};
/// LateParsedMemberInitializer - An initializer for a non-static class data
/// member whose parsing must to be delayed until the class is completely
/// defined (C++11 [class.mem]p2).
struct LateParsedMemberInitializer : public LateParsedDeclaration {
LateParsedMemberInitializer(Parser *P, Decl *FD)
: Self(P), Field(FD) { }
void ParseLexedMemberInitializers() override;
Parser *Self;
/// Field - The field declaration.
Decl *Field;
/// CachedTokens - The sequence of tokens that comprises the initializer,
/// including any leading '='.
CachedTokens Toks;
};
/// LateParsedDeclarationsContainer - During parsing of a top (non-nested)
/// C++ class, its method declarations that contain parts that won't be
/// parsed until after the definition is completed (C++ [class.mem]p2),
/// the method declarations and possibly attached inline definitions
/// will be stored here with the tokens that will be parsed to create those
/// entities.
typedef SmallVector<LateParsedDeclaration*,2> LateParsedDeclarationsContainer;
/// Representation of a class that has been parsed, including
/// any member function declarations or definitions that need to be
/// parsed after the corresponding top-level class is complete.
struct ParsingClass {
ParsingClass(Decl *TagOrTemplate, bool TopLevelClass, bool IsInterface)
: TopLevelClass(TopLevelClass), IsInterface(IsInterface),
TagOrTemplate(TagOrTemplate) {}
/// Whether this is a "top-level" class, meaning that it is
/// not nested within another class.
bool TopLevelClass : 1;
/// Whether this class is an __interface.
bool IsInterface : 1;
/// The class or class template whose definition we are parsing.
Decl *TagOrTemplate;
/// LateParsedDeclarations - Method declarations, inline definitions and
/// nested classes that contain pieces whose parsing will be delayed until
/// the top-level class is fully defined.
LateParsedDeclarationsContainer LateParsedDeclarations;
};
/// The stack of classes that is currently being
/// parsed. Nested and local classes will be pushed onto this stack
/// when they are parsed, and removed afterward.
std::stack<ParsingClass *> ClassStack;
ParsingClass &getCurrentClass() {
assert(!ClassStack.empty() && "No lexed method stacks!");
return *ClassStack.top();
}
/// RAII object used to manage the parsing of a class definition.
class ParsingClassDefinition {
Parser &P;
bool Popped;
Sema::ParsingClassState State;
public:
ParsingClassDefinition(Parser &P, Decl *TagOrTemplate, bool TopLevelClass,
bool IsInterface)
: P(P), Popped(false),
State(P.PushParsingClass(TagOrTemplate, TopLevelClass, IsInterface)) {
}
/// Pop this class of the stack.
void Pop() {
assert(!Popped && "Nested class has already been popped");
Popped = true;
P.PopParsingClass(State);
}
~ParsingClassDefinition() {
if (!Popped)
P.PopParsingClass(State);
}
};
/// Contains information about any template-specific
/// information that has been parsed prior to parsing declaration
/// specifiers.
struct ParsedTemplateInfo {
ParsedTemplateInfo()
: Kind(NonTemplate), TemplateParams(nullptr), TemplateLoc() { }
ParsedTemplateInfo(TemplateParameterLists *TemplateParams,
bool isSpecialization,
bool lastParameterListWasEmpty = false)
: Kind(isSpecialization? ExplicitSpecialization : Template),
TemplateParams(TemplateParams),
LastParameterListWasEmpty(lastParameterListWasEmpty) { }
explicit ParsedTemplateInfo(SourceLocation ExternLoc,
SourceLocation TemplateLoc)
: Kind(ExplicitInstantiation), TemplateParams(nullptr),
ExternLoc(ExternLoc), TemplateLoc(TemplateLoc),
LastParameterListWasEmpty(false){ }
/// The kind of template we are parsing.
enum {
/// We are not parsing a template at all.
NonTemplate = 0,
/// We are parsing a template declaration.
Template,
/// We are parsing an explicit specialization.
ExplicitSpecialization,
/// We are parsing an explicit instantiation.
ExplicitInstantiation
} Kind;
/// The template parameter lists, for template declarations
/// and explicit specializations.
TemplateParameterLists *TemplateParams;
/// The location of the 'extern' keyword, if any, for an explicit
/// instantiation
SourceLocation ExternLoc;
/// The location of the 'template' keyword, for an explicit
/// instantiation.
SourceLocation TemplateLoc;
/// Whether the last template parameter list was empty.
bool LastParameterListWasEmpty;
SourceRange getSourceRange() const LLVM_READONLY;
};
// In ParseCXXInlineMethods.cpp.
struct ReenterTemplateScopeRAII;
struct ReenterClassScopeRAII;
void LexTemplateFunctionForLateParsing(CachedTokens &Toks);
void ParseLateTemplatedFuncDef(LateParsedTemplate &LPT);
static void LateTemplateParserCallback(void *P, LateParsedTemplate &LPT);
Sema::ParsingClassState
PushParsingClass(Decl *TagOrTemplate, bool TopLevelClass, bool IsInterface);
void DeallocateParsedClasses(ParsingClass *Class);
void PopParsingClass(Sema::ParsingClassState);
enum CachedInitKind {
CIK_DefaultArgument,
CIK_DefaultInitializer
};
NamedDecl *ParseCXXInlineMethodDef(AccessSpecifier AS,
ParsedAttributes &AccessAttrs,
ParsingDeclarator &D,
const ParsedTemplateInfo &TemplateInfo,
const VirtSpecifiers &VS,
SourceLocation PureSpecLoc);
void ParseCXXNonStaticMemberInitializer(Decl *VarD);
void ParseLexedAttributes(ParsingClass &Class);
void ParseLexedAttributeList(LateParsedAttrList &LAs, Decl *D,
bool EnterScope, bool OnDefinition);
void ParseLexedAttribute(LateParsedAttribute &LA,
bool EnterScope, bool OnDefinition);
void ParseLexedMethodDeclarations(ParsingClass &Class);
void ParseLexedMethodDeclaration(LateParsedMethodDeclaration &LM);
void ParseLexedMethodDefs(ParsingClass &Class);
void ParseLexedMethodDef(LexedMethod &LM);
void ParseLexedMemberInitializers(ParsingClass &Class);
void ParseLexedMemberInitializer(LateParsedMemberInitializer &MI);
void ParseLexedObjCMethodDefs(LexedMethod &LM, bool parseMethod);
void ParseLexedPragmas(ParsingClass &Class);
void ParseLexedPragma(LateParsedPragma &LP);
bool ConsumeAndStoreFunctionPrologue(CachedTokens &Toks);
bool ConsumeAndStoreInitializer(CachedTokens &Toks, CachedInitKind CIK);
bool ConsumeAndStoreConditional(CachedTokens &Toks);
bool ConsumeAndStoreUntil(tok::TokenKind T1,
CachedTokens &Toks,
bool StopAtSemi = true,
bool ConsumeFinalToken = true) {
return ConsumeAndStoreUntil(T1, T1, Toks, StopAtSemi, ConsumeFinalToken);
}
bool ConsumeAndStoreUntil(tok::TokenKind T1, tok::TokenKind T2,
CachedTokens &Toks,
bool StopAtSemi = true,
bool ConsumeFinalToken = true);
//===--------------------------------------------------------------------===//
// C99 6.9: External Definitions.
struct ParsedAttributesWithRange : ParsedAttributes {
ParsedAttributesWithRange(AttributeFactory &factory)
: ParsedAttributes(factory) {}
void clear() {
ParsedAttributes::clear();
Range = SourceRange();
}
SourceRange Range;
};
struct ParsedAttributesViewWithRange : ParsedAttributesView {
ParsedAttributesViewWithRange() : ParsedAttributesView() {}
void clearListOnly() {
ParsedAttributesView::clearListOnly();
Range = SourceRange();
}
SourceRange Range;
};
DeclGroupPtrTy ParseExternalDeclaration(ParsedAttributesWithRange &attrs,
ParsingDeclSpec *DS = nullptr);
bool isDeclarationAfterDeclarator();
bool isStartOfFunctionDefinition(const ParsingDeclarator &Declarator);
DeclGroupPtrTy ParseDeclarationOrFunctionDefinition(
ParsedAttributesWithRange &attrs,
ParsingDeclSpec *DS = nullptr,
AccessSpecifier AS = AS_none);
DeclGroupPtrTy ParseDeclOrFunctionDefInternal(ParsedAttributesWithRange &attrs,
ParsingDeclSpec &DS,
AccessSpecifier AS);
void SkipFunctionBody();
Decl *ParseFunctionDefinition(ParsingDeclarator &D,
const ParsedTemplateInfo &TemplateInfo = ParsedTemplateInfo(),
LateParsedAttrList *LateParsedAttrs = nullptr);
void ParseKNRParamDeclarations(Declarator &D);
// EndLoc is filled with the location of the last token of the simple-asm.
ExprResult ParseSimpleAsm(bool ForAsmLabel, SourceLocation *EndLoc);
ExprResult ParseAsmStringLiteral(bool ForAsmLabel);
// Objective-C External Declarations
void MaybeSkipAttributes(tok::ObjCKeywordKind Kind);
DeclGroupPtrTy ParseObjCAtDirectives(ParsedAttributesWithRange &Attrs);
DeclGroupPtrTy ParseObjCAtClassDeclaration(SourceLocation atLoc);
Decl *ParseObjCAtInterfaceDeclaration(SourceLocation AtLoc,
ParsedAttributes &prefixAttrs);
class ObjCTypeParamListScope;
ObjCTypeParamList *parseObjCTypeParamList();
ObjCTypeParamList *parseObjCTypeParamListOrProtocolRefs(
ObjCTypeParamListScope &Scope, SourceLocation &lAngleLoc,
SmallVectorImpl<IdentifierLocPair> &protocolIdents,
SourceLocation &rAngleLoc, bool mayBeProtocolList = true);
void HelperActionsForIvarDeclarations(Decl *interfaceDecl, SourceLocation atLoc,
BalancedDelimiterTracker &T,
SmallVectorImpl<Decl *> &AllIvarDecls,
bool RBraceMissing);
void ParseObjCClassInstanceVariables(Decl *interfaceDecl,
tok::ObjCKeywordKind visibility,
SourceLocation atLoc);
bool ParseObjCProtocolReferences(SmallVectorImpl<Decl *> &P,
SmallVectorImpl<SourceLocation> &PLocs,
bool WarnOnDeclarations,
bool ForObjCContainer,
SourceLocation &LAngleLoc,
SourceLocation &EndProtoLoc,
bool consumeLastToken);
/// Parse the first angle-bracket-delimited clause for an
/// Objective-C object or object pointer type, which may be either
/// type arguments or protocol qualifiers.
void parseObjCTypeArgsOrProtocolQualifiers(
ParsedType baseType,
SourceLocation &typeArgsLAngleLoc,
SmallVectorImpl<ParsedType> &typeArgs,
SourceLocation &typeArgsRAngleLoc,
SourceLocation &protocolLAngleLoc,
SmallVectorImpl<Decl *> &protocols,
SmallVectorImpl<SourceLocation> &protocolLocs,
SourceLocation &protocolRAngleLoc,
bool consumeLastToken,
bool warnOnIncompleteProtocols);
/// Parse either Objective-C type arguments or protocol qualifiers; if the
/// former, also parse protocol qualifiers afterward.
void parseObjCTypeArgsAndProtocolQualifiers(
ParsedType baseType,
SourceLocation &typeArgsLAngleLoc,
SmallVectorImpl<ParsedType> &typeArgs,
SourceLocation &typeArgsRAngleLoc,
SourceLocation &protocolLAngleLoc,
SmallVectorImpl<Decl *> &protocols,
SmallVectorImpl<SourceLocation> &protocolLocs,
SourceLocation &protocolRAngleLoc,
bool consumeLastToken);
/// Parse a protocol qualifier type such as '<NSCopying>', which is
/// an anachronistic way of writing 'id<NSCopying>'.
TypeResult parseObjCProtocolQualifierType(SourceLocation &rAngleLoc);
/// Parse Objective-C type arguments and protocol qualifiers, extending the
/// current type with the parsed result.
TypeResult parseObjCTypeArgsAndProtocolQualifiers(SourceLocation loc,
ParsedType type,
bool consumeLastToken,
SourceLocation &endLoc);
void ParseObjCInterfaceDeclList(tok::ObjCKeywordKind contextKey,
Decl *CDecl);
DeclGroupPtrTy ParseObjCAtProtocolDeclaration(SourceLocation atLoc,
ParsedAttributes &prefixAttrs);
struct ObjCImplParsingDataRAII {
Parser &P;
Decl *Dcl;
bool HasCFunction;
typedef SmallVector<LexedMethod*, 8> LateParsedObjCMethodContainer;
LateParsedObjCMethodContainer LateParsedObjCMethods;
ObjCImplParsingDataRAII(Parser &parser, Decl *D)
: P(parser), Dcl(D), HasCFunction(false) {
P.CurParsedObjCImpl = this;
Finished = false;
}
~ObjCImplParsingDataRAII();
void finish(SourceRange AtEnd);
bool isFinished() const { return Finished; }
private:
bool Finished;
};
ObjCImplParsingDataRAII *CurParsedObjCImpl;
void StashAwayMethodOrFunctionBodyTokens(Decl *MDecl);
DeclGroupPtrTy ParseObjCAtImplementationDeclaration(SourceLocation AtLoc,
ParsedAttributes &Attrs);
DeclGroupPtrTy ParseObjCAtEndDeclaration(SourceRange atEnd);
Decl *ParseObjCAtAliasDeclaration(SourceLocation atLoc);
Decl *ParseObjCPropertySynthesize(SourceLocation atLoc);
Decl *ParseObjCPropertyDynamic(SourceLocation atLoc);
IdentifierInfo *ParseObjCSelectorPiece(SourceLocation &MethodLocation);
// Definitions for Objective-c context sensitive keywords recognition.
enum ObjCTypeQual {
objc_in=0, objc_out, objc_inout, objc_oneway, objc_bycopy, objc_byref,
objc_nonnull, objc_nullable, objc_null_unspecified,
objc_NumQuals
};
IdentifierInfo *ObjCTypeQuals[objc_NumQuals];
bool isTokIdentifier_in() const;
ParsedType ParseObjCTypeName(ObjCDeclSpec &DS, DeclaratorContext Ctx,
ParsedAttributes *ParamAttrs);
void ParseObjCMethodRequirement();
Decl *ParseObjCMethodPrototype(
tok::ObjCKeywordKind MethodImplKind = tok::objc_not_keyword,
bool MethodDefinition = true);
Decl *ParseObjCMethodDecl(SourceLocation mLoc, tok::TokenKind mType,
tok::ObjCKeywordKind MethodImplKind = tok::objc_not_keyword,
bool MethodDefinition=true);
void ParseObjCPropertyAttribute(ObjCDeclSpec &DS);
Decl *ParseObjCMethodDefinition();
public:
//===--------------------------------------------------------------------===//
// C99 6.5: Expressions.
/// TypeCastState - State whether an expression is or may be a type cast.
enum TypeCastState {
NotTypeCast = 0,
MaybeTypeCast,
IsTypeCast
};
ExprResult ParseExpression(TypeCastState isTypeCast = NotTypeCast);
ExprResult ParseConstantExpressionInExprEvalContext(
TypeCastState isTypeCast = NotTypeCast);
ExprResult ParseConstantExpression(TypeCastState isTypeCast = NotTypeCast);
ExprResult ParseCaseExpression(SourceLocation CaseLoc);
ExprResult ParseConstraintExpression();
ExprResult
ParseConstraintLogicalAndExpression(bool IsTrailingRequiresClause);
ExprResult ParseConstraintLogicalOrExpression(bool IsTrailingRequiresClause);
// Expr that doesn't include commas.
ExprResult ParseAssignmentExpression(TypeCastState isTypeCast = NotTypeCast);
ExprResult ParseMSAsmIdentifier(llvm::SmallVectorImpl<Token> &LineToks,
unsigned &NumLineToksConsumed,
bool IsUnevaluated);
ExprResult ParseStringLiteralExpression(bool AllowUserDefinedLiteral = false);
private:
ExprResult ParseExpressionWithLeadingAt(SourceLocation AtLoc);
ExprResult ParseExpressionWithLeadingExtension(SourceLocation ExtLoc);
ExprResult ParseRHSOfBinaryExpression(ExprResult LHS,
prec::Level MinPrec);
/// Control what ParseCastExpression will parse.
enum CastParseKind {
AnyCastExpr = 0,
UnaryExprOnly,
PrimaryExprOnly
};
ExprResult ParseCastExpression(CastParseKind ParseKind,
bool isAddressOfOperand,
bool &NotCastExpr,
TypeCastState isTypeCast,
bool isVectorLiteral = false,
bool *NotPrimaryExpression = nullptr);
ExprResult ParseCastExpression(CastParseKind ParseKind,
bool isAddressOfOperand = false,
TypeCastState isTypeCast = NotTypeCast,
bool isVectorLiteral = false,
bool *NotPrimaryExpression = nullptr);
/// Returns true if the next token cannot start an expression.
bool isNotExpressionStart();
/// Returns true if the next token would start a postfix-expression
/// suffix.
bool isPostfixExpressionSuffixStart() {
tok::TokenKind K = Tok.getKind();
return (K == tok::l_square || K == tok::l_paren ||
K == tok::period || K == tok::arrow ||
K == tok::plusplus || K == tok::minusminus);
}
bool diagnoseUnknownTemplateId(ExprResult TemplateName, SourceLocation Less);
void checkPotentialAngleBracket(ExprResult &PotentialTemplateName);
bool checkPotentialAngleBracketDelimiter(const AngleBracketTracker::Loc &,
const Token &OpToken);
bool checkPotentialAngleBracketDelimiter(const Token &OpToken) {
if (auto *Info = AngleBrackets.getCurrent(*this))
return checkPotentialAngleBracketDelimiter(*Info, OpToken);
return false;
}
ExprResult ParsePostfixExpressionSuffix(ExprResult LHS);
ExprResult ParseUnaryExprOrTypeTraitExpression();
ExprResult ParseBuiltinPrimaryExpression();
ExprResult ParseUniqueStableNameExpression();
ExprResult ParseExprAfterUnaryExprOrTypeTrait(const Token &OpTok,
bool &isCastExpr,
ParsedType &CastTy,
SourceRange &CastRange);
typedef SmallVector<Expr*, 20> ExprListTy;
typedef SmallVector<SourceLocation, 20> CommaLocsTy;
/// ParseExpressionList - Used for C/C++ (argument-)expression-list.
bool ParseExpressionList(SmallVectorImpl<Expr *> &Exprs,
SmallVectorImpl<SourceLocation> &CommaLocs,
llvm::function_ref<void()> ExpressionStarts =
llvm::function_ref<void()>());
/// ParseSimpleExpressionList - A simple comma-separated list of expressions,
/// used for misc language extensions.
bool ParseSimpleExpressionList(SmallVectorImpl<Expr*> &Exprs,
SmallVectorImpl<SourceLocation> &CommaLocs);
/// ParenParseOption - Control what ParseParenExpression will parse.
enum ParenParseOption {
SimpleExpr, // Only parse '(' expression ')'
FoldExpr, // Also allow fold-expression <anything>
CompoundStmt, // Also allow '(' compound-statement ')'
CompoundLiteral, // Also allow '(' type-name ')' '{' ... '}'
CastExpr // Also allow '(' type-name ')' <anything>
};
ExprResult ParseParenExpression(ParenParseOption &ExprType,
bool stopIfCastExpr,
bool isTypeCast,
ParsedType &CastTy,
SourceLocation &RParenLoc);
ExprResult ParseCXXAmbiguousParenExpression(
ParenParseOption &ExprType, ParsedType &CastTy,
BalancedDelimiterTracker &Tracker, ColonProtectionRAIIObject &ColonProt);
ExprResult ParseCompoundLiteralExpression(ParsedType Ty,
SourceLocation LParenLoc,
SourceLocation RParenLoc);
ExprResult ParseGenericSelectionExpression();
ExprResult ParseObjCBoolLiteral();
ExprResult ParseFoldExpression(ExprResult LHS, BalancedDelimiterTracker &T);
//===--------------------------------------------------------------------===//
// C++ Expressions
ExprResult tryParseCXXIdExpression(CXXScopeSpec &SS, bool isAddressOfOperand,
Token &Replacement);
ExprResult ParseCXXIdExpression(bool isAddressOfOperand = false);
bool areTokensAdjacent(const Token &A, const Token &B);
void CheckForTemplateAndDigraph(Token &Next, ParsedType ObjectTypePtr,
bool EnteringContext, IdentifierInfo &II,
CXXScopeSpec &SS);
bool ParseOptionalCXXScopeSpecifier(CXXScopeSpec &SS,
ParsedType ObjectType,
bool ObjectHasErrors,
bool EnteringContext,
bool *MayBePseudoDestructor = nullptr,
bool IsTypename = false,
IdentifierInfo **LastII = nullptr,
bool OnlyNamespace = false,
bool InUsingDeclaration = false);
//===--------------------------------------------------------------------===//
// C++11 5.1.2: Lambda expressions
/// Result of tentatively parsing a lambda-introducer.
enum class LambdaIntroducerTentativeParse {
/// This appears to be a lambda-introducer, which has been fully parsed.
Success,
/// This is a lambda-introducer, but has not been fully parsed, and this
/// function needs to be called again to parse it.
Incomplete,
/// This is definitely an Objective-C message send expression, rather than
/// a lambda-introducer, attribute-specifier, or array designator.
MessageSend,
/// This is not a lambda-introducer.
Invalid,
};
// [...] () -> type {...}
ExprResult ParseLambdaExpression();
ExprResult TryParseLambdaExpression();
bool
ParseLambdaIntroducer(LambdaIntroducer &Intro,
LambdaIntroducerTentativeParse *Tentative = nullptr);
ExprResult ParseLambdaExpressionAfterIntroducer(LambdaIntroducer &Intro);
//===--------------------------------------------------------------------===//
// C++ 5.2p1: C++ Casts
ExprResult ParseCXXCasts();
/// Parse a __builtin_bit_cast(T, E), used to implement C++2a std::bit_cast.
ExprResult ParseBuiltinBitCast();
//===--------------------------------------------------------------------===//
// C++ 5.2p1: C++ Type Identification
ExprResult ParseCXXTypeid();
//===--------------------------------------------------------------------===//
// C++ : Microsoft __uuidof Expression
ExprResult ParseCXXUuidof();
//===--------------------------------------------------------------------===//
// C++ 5.2.4: C++ Pseudo-Destructor Expressions
ExprResult ParseCXXPseudoDestructor(Expr *Base, SourceLocation OpLoc,
tok::TokenKind OpKind,
CXXScopeSpec &SS,
ParsedType ObjectType);
//===--------------------------------------------------------------------===//
// C++ 9.3.2: C++ 'this' pointer
ExprResult ParseCXXThis();
//===--------------------------------------------------------------------===//
// C++ 15: C++ Throw Expression
ExprResult ParseThrowExpression();
ExceptionSpecificationType tryParseExceptionSpecification(
bool Delayed,
SourceRange &SpecificationRange,
SmallVectorImpl<ParsedType> &DynamicExceptions,
SmallVectorImpl<SourceRange> &DynamicExceptionRanges,
ExprResult &NoexceptExpr,
CachedTokens *&ExceptionSpecTokens);
// EndLoc is filled with the location of the last token of the specification.
ExceptionSpecificationType ParseDynamicExceptionSpecification(
SourceRange &SpecificationRange,
SmallVectorImpl<ParsedType> &Exceptions,
SmallVectorImpl<SourceRange> &Ranges);
//===--------------------------------------------------------------------===//
// C++0x 8: Function declaration trailing-return-type
TypeResult ParseTrailingReturnType(SourceRange &Range,
bool MayBeFollowedByDirectInit);
//===--------------------------------------------------------------------===//
// C++ 2.13.5: C++ Boolean Literals
ExprResult ParseCXXBoolLiteral();
//===--------------------------------------------------------------------===//
// C++ 5.2.3: Explicit type conversion (functional notation)
ExprResult ParseCXXTypeConstructExpression(const DeclSpec &DS);
/// ParseCXXSimpleTypeSpecifier - [C++ 7.1.5.2] Simple type specifiers.
/// This should only be called when the current token is known to be part of
/// simple-type-specifier.
void ParseCXXSimpleTypeSpecifier(DeclSpec &DS);
bool ParseCXXTypeSpecifierSeq(DeclSpec &DS);
//===--------------------------------------------------------------------===//
// C++ 5.3.4 and 5.3.5: C++ new and delete
bool ParseExpressionListOrTypeId(SmallVectorImpl<Expr*> &Exprs,
Declarator &D);
void ParseDirectNewDeclarator(Declarator &D);
ExprResult ParseCXXNewExpression(bool UseGlobal, SourceLocation Start);
ExprResult ParseCXXDeleteExpression(bool UseGlobal,
SourceLocation Start);
//===--------------------------------------------------------------------===//
// C++ if/switch/while/for condition expression.
struct ForRangeInfo;
Sema::ConditionResult ParseCXXCondition(StmtResult *InitStmt,
SourceLocation Loc,
Sema::ConditionKind CK,
ForRangeInfo *FRI = nullptr);
//===--------------------------------------------------------------------===//
// C++ Coroutines
ExprResult ParseCoyieldExpression();
//===--------------------------------------------------------------------===//
// C++ Concepts
ExprResult ParseRequiresExpression();
void ParseTrailingRequiresClause(Declarator &D);
//===--------------------------------------------------------------------===//
// C99 6.7.8: Initialization.
/// ParseInitializer
/// initializer: [C99 6.7.8]
/// assignment-expression
/// '{' ...
ExprResult ParseInitializer() {
if (Tok.isNot(tok::l_brace))
return ParseAssignmentExpression();
return ParseBraceInitializer();
}
bool MayBeDesignationStart();
ExprResult ParseBraceInitializer();
ExprResult ParseInitializerWithPotentialDesignator(
llvm::function_ref<void(const Designation &)> CodeCompleteCB);
//===--------------------------------------------------------------------===//
// clang Expressions
ExprResult ParseBlockLiteralExpression(); // ^{...}
//===--------------------------------------------------------------------===//
// Objective-C Expressions
ExprResult ParseObjCAtExpression(SourceLocation AtLocation);
ExprResult ParseObjCStringLiteral(SourceLocation AtLoc);
ExprResult ParseObjCCharacterLiteral(SourceLocation AtLoc);
ExprResult ParseObjCNumericLiteral(SourceLocation AtLoc);
ExprResult ParseObjCBooleanLiteral(SourceLocation AtLoc, bool ArgValue);
ExprResult ParseObjCArrayLiteral(SourceLocation AtLoc);
ExprResult ParseObjCDictionaryLiteral(SourceLocation AtLoc);
ExprResult ParseObjCBoxedExpr(SourceLocation AtLoc);
ExprResult ParseObjCEncodeExpression(SourceLocation AtLoc);
ExprResult ParseObjCSelectorExpression(SourceLocation AtLoc);
ExprResult ParseObjCProtocolExpression(SourceLocation AtLoc);
bool isSimpleObjCMessageExpression();
ExprResult ParseObjCMessageExpression();
ExprResult ParseObjCMessageExpressionBody(SourceLocation LBracloc,
SourceLocation SuperLoc,
ParsedType ReceiverType,
Expr *ReceiverExpr);
ExprResult ParseAssignmentExprWithObjCMessageExprStart(
SourceLocation LBracloc, SourceLocation SuperLoc,
ParsedType ReceiverType, Expr *ReceiverExpr);
bool ParseObjCXXMessageReceiver(bool &IsExpr, void *&TypeOrExpr);
//===--------------------------------------------------------------------===//
// C99 6.8: Statements and Blocks.
/// A SmallVector of statements, with stack size 32 (as that is the only one
/// used.)
typedef SmallVector<Stmt*, 32> StmtVector;
/// A SmallVector of expressions, with stack size 12 (the maximum used.)
typedef SmallVector<Expr*, 12> ExprVector;
/// A SmallVector of types.
typedef SmallVector<ParsedType, 12> TypeVector;
StmtResult
ParseStatement(SourceLocation *TrailingElseLoc = nullptr,
ParsedStmtContext StmtCtx = ParsedStmtContext::SubStmt);
StmtResult ParseStatementOrDeclaration(
StmtVector &Stmts, ParsedStmtContext StmtCtx,
SourceLocation *TrailingElseLoc = nullptr);
StmtResult ParseStatementOrDeclarationAfterAttributes(
StmtVector &Stmts,
ParsedStmtContext StmtCtx,
SourceLocation *TrailingElseLoc,
ParsedAttributesWithRange &Attrs);
StmtResult ParseExprStatement(ParsedStmtContext StmtCtx);
StmtResult ParseLabeledStatement(ParsedAttributesWithRange &attrs,
ParsedStmtContext StmtCtx);
StmtResult ParseCaseStatement(ParsedStmtContext StmtCtx,
bool MissingCase = false,
ExprResult Expr = ExprResult());
StmtResult ParseDefaultStatement(ParsedStmtContext StmtCtx);
StmtResult ParseCompoundStatement(bool isStmtExpr = false);
StmtResult ParseCompoundStatement(bool isStmtExpr,
unsigned ScopeFlags);
void ParseCompoundStatementLeadingPragmas();
bool ConsumeNullStmt(StmtVector &Stmts);
StmtResult ParseCompoundStatementBody(bool isStmtExpr = false);
bool ParseParenExprOrCondition(StmtResult *InitStmt,
Sema::ConditionResult &CondResult,
SourceLocation Loc, Sema::ConditionKind CK,
SourceLocation *LParenLoc = nullptr,
SourceLocation *RParenLoc = nullptr);
StmtResult ParseIfStatement(SourceLocation *TrailingElseLoc);
StmtResult ParseSwitchStatement(SourceLocation *TrailingElseLoc);
StmtResult ParseWhileStatement(SourceLocation *TrailingElseLoc);
StmtResult ParseDoStatement();
StmtResult ParseForStatement(SourceLocation *TrailingElseLoc);
StmtResult ParseGotoStatement();
StmtResult ParseContinueStatement();
StmtResult ParseBreakStatement();
StmtResult ParseReturnStatement();
StmtResult ParseAsmStatement(bool &msAsm);
StmtResult ParseMicrosoftAsmStatement(SourceLocation AsmLoc);
StmtResult ParsePragmaLoopHint(StmtVector &Stmts,
ParsedStmtContext StmtCtx,
SourceLocation *TrailingElseLoc,
ParsedAttributesWithRange &Attrs);
/// Describes the behavior that should be taken for an __if_exists
/// block.
enum IfExistsBehavior {
/// Parse the block; this code is always used.
IEB_Parse,
/// Skip the block entirely; this code is never used.
IEB_Skip,
/// Parse the block as a dependent block, which may be used in
/// some template instantiations but not others.
IEB_Dependent
};
/// Describes the condition of a Microsoft __if_exists or
/// __if_not_exists block.
struct IfExistsCondition {
/// The location of the initial keyword.
SourceLocation KeywordLoc;
/// Whether this is an __if_exists block (rather than an
/// __if_not_exists block).
bool IsIfExists;
/// Nested-name-specifier preceding the name.
CXXScopeSpec SS;
/// The name we're looking for.
UnqualifiedId Name;
/// The behavior of this __if_exists or __if_not_exists block
/// should.
IfExistsBehavior Behavior;
};
bool ParseMicrosoftIfExistsCondition(IfExistsCondition& Result);
void ParseMicrosoftIfExistsStatement(StmtVector &Stmts);
void ParseMicrosoftIfExistsExternalDeclaration();
void ParseMicrosoftIfExistsClassDeclaration(DeclSpec::TST TagType,
ParsedAttributes &AccessAttrs,
AccessSpecifier &CurAS);
bool ParseMicrosoftIfExistsBraceInitializer(ExprVector &InitExprs,
bool &InitExprsOk);
bool ParseAsmOperandsOpt(SmallVectorImpl<IdentifierInfo *> &Names,
SmallVectorImpl<Expr *> &Constraints,
SmallVectorImpl<Expr *> &Exprs);
//===--------------------------------------------------------------------===//
// C++ 6: Statements and Blocks
StmtResult ParseCXXTryBlock();
StmtResult ParseCXXTryBlockCommon(SourceLocation TryLoc, bool FnTry = false);
StmtResult ParseCXXCatchBlock(bool FnCatch = false);
//===--------------------------------------------------------------------===//
// MS: SEH Statements and Blocks
StmtResult ParseSEHTryBlock();
StmtResult ParseSEHExceptBlock(SourceLocation Loc);
StmtResult ParseSEHFinallyBlock(SourceLocation Loc);
StmtResult ParseSEHLeaveStatement();
//===--------------------------------------------------------------------===//
// Objective-C Statements
StmtResult ParseObjCAtStatement(SourceLocation atLoc,
ParsedStmtContext StmtCtx);
StmtResult ParseObjCTryStmt(SourceLocation atLoc);
StmtResult ParseObjCThrowStmt(SourceLocation atLoc);
StmtResult ParseObjCSynchronizedStmt(SourceLocation atLoc);
StmtResult ParseObjCAutoreleasePoolStmt(SourceLocation atLoc);
//===--------------------------------------------------------------------===//
// C99 6.7: Declarations.
/// A context for parsing declaration specifiers. TODO: flesh this
/// out, there are other significant restrictions on specifiers than
/// would be best implemented in the parser.
enum class DeclSpecContext {
DSC_normal, // normal context
DSC_class, // class context, enables 'friend'
DSC_type_specifier, // C++ type-specifier-seq or C specifier-qualifier-list
DSC_trailing, // C++11 trailing-type-specifier in a trailing return type
DSC_alias_declaration, // C++11 type-specifier-seq in an alias-declaration
DSC_top_level, // top-level/namespace declaration context
DSC_template_param, // template parameter context
DSC_template_type_arg, // template type argument context
DSC_objc_method_result, // ObjC method result context, enables 'instancetype'
DSC_condition // condition declaration context
};
/// Is this a context in which we are parsing just a type-specifier (or
/// trailing-type-specifier)?
static bool isTypeSpecifier(DeclSpecContext DSC) {
switch (DSC) {
case DeclSpecContext::DSC_normal:
case DeclSpecContext::DSC_template_param:
case DeclSpecContext::DSC_class:
case DeclSpecContext::DSC_top_level:
case DeclSpecContext::DSC_objc_method_result:
case DeclSpecContext::DSC_condition:
return false;
case DeclSpecContext::DSC_template_type_arg:
case DeclSpecContext::DSC_type_specifier:
case DeclSpecContext::DSC_trailing:
case DeclSpecContext::DSC_alias_declaration:
return true;
}
llvm_unreachable("Missing DeclSpecContext case");
}
/// Whether a defining-type-specifier is permitted in a given context.
enum class AllowDefiningTypeSpec {
/// The grammar doesn't allow a defining-type-specifier here, and we must
/// not parse one (eg, because a '{' could mean something else).
No,
/// The grammar doesn't allow a defining-type-specifier here, but we permit
/// one for error recovery purposes. Sema will reject.
NoButErrorRecovery,
/// The grammar allows a defining-type-specifier here, even though it's
/// always invalid. Sema will reject.
YesButInvalid,
/// The grammar allows a defining-type-specifier here, and one can be valid.
Yes
};
/// Is this a context in which we are parsing defining-type-specifiers (and
/// so permit class and enum definitions in addition to non-defining class and
/// enum elaborated-type-specifiers)?
static AllowDefiningTypeSpec
isDefiningTypeSpecifierContext(DeclSpecContext DSC) {
switch (DSC) {
case DeclSpecContext::DSC_normal:
case DeclSpecContext::DSC_class:
case DeclSpecContext::DSC_top_level:
case DeclSpecContext::DSC_alias_declaration:
case DeclSpecContext::DSC_objc_method_result:
return AllowDefiningTypeSpec::Yes;
case DeclSpecContext::DSC_condition:
case DeclSpecContext::DSC_template_param:
return AllowDefiningTypeSpec::YesButInvalid;
case DeclSpecContext::DSC_template_type_arg:
case DeclSpecContext::DSC_type_specifier:
return AllowDefiningTypeSpec::NoButErrorRecovery;
case DeclSpecContext::DSC_trailing:
return AllowDefiningTypeSpec::No;
}
llvm_unreachable("Missing DeclSpecContext case");
}
/// Is this a context in which an opaque-enum-declaration can appear?
static bool isOpaqueEnumDeclarationContext(DeclSpecContext DSC) {
switch (DSC) {
case DeclSpecContext::DSC_normal:
case DeclSpecContext::DSC_class:
case DeclSpecContext::DSC_top_level:
return true;
case DeclSpecContext::DSC_alias_declaration:
case DeclSpecContext::DSC_objc_method_result:
case DeclSpecContext::DSC_condition:
case DeclSpecContext::DSC_template_param:
case DeclSpecContext::DSC_template_type_arg:
case DeclSpecContext::DSC_type_specifier:
case DeclSpecContext::DSC_trailing:
return false;
}
llvm_unreachable("Missing DeclSpecContext case");
}
/// Is this a context in which we can perform class template argument
/// deduction?
static bool isClassTemplateDeductionContext(DeclSpecContext DSC) {
switch (DSC) {
case DeclSpecContext::DSC_normal:
case DeclSpecContext::DSC_template_param:
case DeclSpecContext::DSC_class:
case DeclSpecContext::DSC_top_level:
case DeclSpecContext::DSC_condition:
case DeclSpecContext::DSC_type_specifier:
return true;
case DeclSpecContext::DSC_objc_method_result:
case DeclSpecContext::DSC_template_type_arg:
case DeclSpecContext::DSC_trailing:
case DeclSpecContext::DSC_alias_declaration:
return false;
}
llvm_unreachable("Missing DeclSpecContext case");
}
/// Information on a C++0x for-range-initializer found while parsing a
/// declaration which turns out to be a for-range-declaration.
struct ForRangeInit {
SourceLocation ColonLoc;
ExprResult RangeExpr;
bool ParsedForRangeDecl() { return !ColonLoc.isInvalid(); }
};
struct ForRangeInfo : ForRangeInit {
StmtResult LoopVar;
};
DeclGroupPtrTy ParseDeclaration(DeclaratorContext Context,
SourceLocation &DeclEnd,
ParsedAttributesWithRange &attrs,
SourceLocation *DeclSpecStart = nullptr);
DeclGroupPtrTy
ParseSimpleDeclaration(DeclaratorContext Context, SourceLocation &DeclEnd,
ParsedAttributesWithRange &attrs, bool RequireSemi,
ForRangeInit *FRI = nullptr,
SourceLocation *DeclSpecStart = nullptr);
bool MightBeDeclarator(DeclaratorContext Context);
DeclGroupPtrTy ParseDeclGroup(ParsingDeclSpec &DS, DeclaratorContext Context,
SourceLocation *DeclEnd = nullptr,
ForRangeInit *FRI = nullptr);
Decl *ParseDeclarationAfterDeclarator(Declarator &D,
const ParsedTemplateInfo &TemplateInfo = ParsedTemplateInfo());
bool ParseAsmAttributesAfterDeclarator(Declarator &D);
Decl *ParseDeclarationAfterDeclaratorAndAttributes(
Declarator &D,
const ParsedTemplateInfo &TemplateInfo = ParsedTemplateInfo(),
ForRangeInit *FRI = nullptr);
Decl *ParseFunctionStatementBody(Decl *Decl, ParseScope &BodyScope);
Decl *ParseFunctionTryBlock(Decl *Decl, ParseScope &BodyScope);
/// When in code-completion, skip parsing of the function/method body
/// unless the body contains the code-completion point.
///
/// \returns true if the function body was skipped.
bool trySkippingFunctionBody();
bool ParseImplicitInt(DeclSpec &DS, CXXScopeSpec *SS,
const ParsedTemplateInfo &TemplateInfo,
AccessSpecifier AS, DeclSpecContext DSC,
ParsedAttributesWithRange &Attrs);
DeclSpecContext
getDeclSpecContextFromDeclaratorContext(DeclaratorContext Context);
void ParseDeclarationSpecifiers(
DeclSpec &DS,
const ParsedTemplateInfo &TemplateInfo = ParsedTemplateInfo(),
AccessSpecifier AS = AS_none,
DeclSpecContext DSC = DeclSpecContext::DSC_normal,
LateParsedAttrList *LateAttrs = nullptr);
bool DiagnoseMissingSemiAfterTagDefinition(
DeclSpec &DS, AccessSpecifier AS, DeclSpecContext DSContext,
LateParsedAttrList *LateAttrs = nullptr);
void ParseSpecifierQualifierList(
DeclSpec &DS, AccessSpecifier AS = AS_none,
DeclSpecContext DSC = DeclSpecContext::DSC_normal);
void ParseObjCTypeQualifierList(ObjCDeclSpec &DS,
DeclaratorContext Context);
void ParseEnumSpecifier(SourceLocation TagLoc, DeclSpec &DS,
const ParsedTemplateInfo &TemplateInfo,
AccessSpecifier AS, DeclSpecContext DSC);
void ParseEnumBody(SourceLocation StartLoc, Decl *TagDecl);
void ParseStructUnionBody(SourceLocation StartLoc, DeclSpec::TST TagType,
RecordDecl *TagDecl);
void ParseStructDeclaration(
ParsingDeclSpec &DS,
llvm::function_ref<void(ParsingFieldDeclarator &)> FieldsCallback);
bool isDeclarationSpecifier(bool DisambiguatingWithExpression = false);
bool isTypeSpecifierQualifier();
/// isKnownToBeTypeSpecifier - Return true if we know that the specified token
/// is definitely a type-specifier. Return false if it isn't part of a type
/// specifier or if we're not sure.
bool isKnownToBeTypeSpecifier(const Token &Tok) const;
/// Return true if we know that we are definitely looking at a
/// decl-specifier, and isn't part of an expression such as a function-style
/// cast. Return false if it's no a decl-specifier, or we're not sure.
bool isKnownToBeDeclarationSpecifier() {
if (getLangOpts().CPlusPlus)
return isCXXDeclarationSpecifier() == TPResult::True;
return isDeclarationSpecifier(true);
}
/// isDeclarationStatement - Disambiguates between a declaration or an
/// expression statement, when parsing function bodies.
/// Returns true for declaration, false for expression.
bool isDeclarationStatement() {
if (getLangOpts().CPlusPlus)
return isCXXDeclarationStatement();
return isDeclarationSpecifier(true);
}
/// isForInitDeclaration - Disambiguates between a declaration or an
/// expression in the context of the C 'clause-1' or the C++
// 'for-init-statement' part of a 'for' statement.
/// Returns true for declaration, false for expression.
bool isForInitDeclaration() {
if (getLangOpts().OpenMP)
Actions.startOpenMPLoop();
if (getLangOpts().CPlusPlus)
return isCXXSimpleDeclaration(/*AllowForRangeDecl=*/true);
return isDeclarationSpecifier(true);
}
/// Determine whether this is a C++1z for-range-identifier.
bool isForRangeIdentifier();
/// Determine whether we are currently at the start of an Objective-C
/// class message that appears to be missing the open bracket '['.
bool isStartOfObjCClassMessageMissingOpenBracket();
/// Starting with a scope specifier, identifier, or
/// template-id that refers to the current class, determine whether
/// this is a constructor declarator.
bool isConstructorDeclarator(bool Unqualified, bool DeductionGuide = false);
/// Specifies the context in which type-id/expression
/// disambiguation will occur.
enum TentativeCXXTypeIdContext {
TypeIdInParens,
TypeIdUnambiguous,
TypeIdAsTemplateArgument
};
/// isTypeIdInParens - Assumes that a '(' was parsed and now we want to know
/// whether the parens contain an expression or a type-id.
/// Returns true for a type-id and false for an expression.
bool isTypeIdInParens(bool &isAmbiguous) {
if (getLangOpts().CPlusPlus)
return isCXXTypeId(TypeIdInParens, isAmbiguous);
isAmbiguous = false;
return isTypeSpecifierQualifier();
}
bool isTypeIdInParens() {
bool isAmbiguous;
return isTypeIdInParens(isAmbiguous);
}
/// Checks if the current tokens form type-id or expression.
/// It is similar to isTypeIdInParens but does not suppose that type-id
/// is in parenthesis.
bool isTypeIdUnambiguously() {
bool IsAmbiguous;
if (getLangOpts().CPlusPlus)
return isCXXTypeId(TypeIdUnambiguous, IsAmbiguous);
return isTypeSpecifierQualifier();
}
/// isCXXDeclarationStatement - C++-specialized function that disambiguates
/// between a declaration or an expression statement, when parsing function
/// bodies. Returns true for declaration, false for expression.
bool isCXXDeclarationStatement();
/// isCXXSimpleDeclaration - C++-specialized function that disambiguates
/// between a simple-declaration or an expression-statement.
/// If during the disambiguation process a parsing error is encountered,
/// the function returns true to let the declaration parsing code handle it.
/// Returns false if the statement is disambiguated as expression.
bool isCXXSimpleDeclaration(bool AllowForRangeDecl);
/// isCXXFunctionDeclarator - Disambiguates between a function declarator or
/// a constructor-style initializer, when parsing declaration statements.
/// Returns true for function declarator and false for constructor-style
/// initializer. Sets 'IsAmbiguous' to true to indicate that this declaration
/// might be a constructor-style initializer.
/// If during the disambiguation process a parsing error is encountered,
/// the function returns true to let the declaration parsing code handle it.
bool isCXXFunctionDeclarator(bool *IsAmbiguous = nullptr);
struct ConditionDeclarationOrInitStatementState;
enum class ConditionOrInitStatement {
Expression, ///< Disambiguated as an expression (either kind).
ConditionDecl, ///< Disambiguated as the declaration form of condition.
InitStmtDecl, ///< Disambiguated as a simple-declaration init-statement.
ForRangeDecl, ///< Disambiguated as a for-range declaration.
Error ///< Can't be any of the above!
};
/// Disambiguates between the different kinds of things that can happen
/// after 'if (' or 'switch ('. This could be one of two different kinds of
/// declaration (depending on whether there is a ';' later) or an expression.
ConditionOrInitStatement
isCXXConditionDeclarationOrInitStatement(bool CanBeInitStmt,
bool CanBeForRangeDecl);
bool isCXXTypeId(TentativeCXXTypeIdContext Context, bool &isAmbiguous);
bool isCXXTypeId(TentativeCXXTypeIdContext Context) {
bool isAmbiguous;
return isCXXTypeId(Context, isAmbiguous);
}
/// TPResult - Used as the result value for functions whose purpose is to
/// disambiguate C++ constructs by "tentatively parsing" them.
enum class TPResult {
True, False, Ambiguous, Error
};
/// Determine whether we could have an enum-base.
///
/// \p AllowSemi If \c true, then allow a ';' after the enum-base; otherwise
/// only consider this to be an enum-base if the next token is a '{'.
///
/// \return \c false if this cannot possibly be an enum base; \c true
/// otherwise.
bool isEnumBase(bool AllowSemi);
/// isCXXDeclarationSpecifier - Returns TPResult::True if it is a
/// declaration specifier, TPResult::False if it is not,
/// TPResult::Ambiguous if it could be either a decl-specifier or a
/// function-style cast, and TPResult::Error if a parsing error was
/// encountered. If it could be a braced C++11 function-style cast, returns
/// BracedCastResult.
/// Doesn't consume tokens.
TPResult
isCXXDeclarationSpecifier(TPResult BracedCastResult = TPResult::False,
bool *InvalidAsDeclSpec = nullptr);
/// Given that isCXXDeclarationSpecifier returns \c TPResult::True or
/// \c TPResult::Ambiguous, determine whether the decl-specifier would be
/// a type-specifier other than a cv-qualifier.
bool isCXXDeclarationSpecifierAType();
/// Determine whether the current token sequence might be
/// '<' template-argument-list '>'
/// rather than a less-than expression.
TPResult isTemplateArgumentList(unsigned TokensToSkip);
/// Determine whether an '(' after an 'explicit' keyword is part of a C++20
/// 'explicit(bool)' declaration, in earlier language modes where that is an
/// extension.
TPResult isExplicitBool();
/// Determine whether an identifier has been tentatively declared as a
/// non-type. Such tentative declarations should not be found to name a type
/// during a tentative parse, but also should not be annotated as a non-type.
bool isTentativelyDeclared(IdentifierInfo *II);
// "Tentative parsing" functions, used for disambiguation. If a parsing error
// is encountered they will return TPResult::Error.
// Returning TPResult::True/False indicates that the ambiguity was
// resolved and tentative parsing may stop. TPResult::Ambiguous indicates
// that more tentative parsing is necessary for disambiguation.
// They all consume tokens, so backtracking should be used after calling them.
TPResult TryParseSimpleDeclaration(bool AllowForRangeDecl);
TPResult TryParseTypeofSpecifier();
TPResult TryParseProtocolQualifiers();
TPResult TryParsePtrOperatorSeq();
TPResult TryParseOperatorId();
TPResult TryParseInitDeclaratorList();
TPResult TryParseDeclarator(bool mayBeAbstract, bool mayHaveIdentifier = true,
bool mayHaveDirectInit = false);
TPResult
TryParseParameterDeclarationClause(bool *InvalidAsDeclaration = nullptr,
bool VersusTemplateArg = false);
TPResult TryParseFunctionDeclarator();
TPResult TryParseBracketDeclarator();
TPResult TryConsumeDeclarationSpecifier();
/// Try to skip a possibly empty sequence of 'attribute-specifier's without
/// full validation of the syntactic structure of attributes.
bool TrySkipAttributes();
public:
TypeResult ParseTypeName(SourceRange *Range = nullptr,
DeclaratorContext Context
= DeclaratorContext::TypeNameContext,
AccessSpecifier AS = AS_none,
Decl **OwnedType = nullptr,
ParsedAttributes *Attrs = nullptr);
private:
void ParseBlockId(SourceLocation CaretLoc);
/// Are [[]] attributes enabled?
bool standardAttributesAllowed() const {
const LangOptions &LO = getLangOpts();
return LO.DoubleSquareBracketAttributes;
}
// Check for the start of an attribute-specifier-seq in a context where an
// attribute is not allowed.
bool CheckProhibitedCXX11Attribute() {
assert(Tok.is(tok::l_square));
if (!standardAttributesAllowed() || NextToken().isNot(tok::l_square))
return false;
return DiagnoseProhibitedCXX11Attribute();
}
bool DiagnoseProhibitedCXX11Attribute();
void CheckMisplacedCXX11Attribute(ParsedAttributesWithRange &Attrs,
SourceLocation CorrectLocation) {
if (!standardAttributesAllowed())
return;
if ((Tok.isNot(tok::l_square) || NextToken().isNot(tok::l_square)) &&
Tok.isNot(tok::kw_alignas))
return;
DiagnoseMisplacedCXX11Attribute(Attrs, CorrectLocation);
}
void DiagnoseMisplacedCXX11Attribute(ParsedAttributesWithRange &Attrs,
SourceLocation CorrectLocation);
void stripTypeAttributesOffDeclSpec(ParsedAttributesWithRange &Attrs,
DeclSpec &DS, Sema::TagUseKind TUK);
// FixItLoc = possible correct location for the attributes
void ProhibitAttributes(ParsedAttributesWithRange &Attrs,
SourceLocation FixItLoc = SourceLocation()) {
if (Attrs.Range.isInvalid())
return;
DiagnoseProhibitedAttributes(Attrs.Range, FixItLoc);
Attrs.clear();
}
void ProhibitAttributes(ParsedAttributesViewWithRange &Attrs,
SourceLocation FixItLoc = SourceLocation()) {
if (Attrs.Range.isInvalid())
return;
DiagnoseProhibitedAttributes(Attrs.Range, FixItLoc);
Attrs.clearListOnly();
}
void DiagnoseProhibitedAttributes(const SourceRange &Range,
SourceLocation FixItLoc);
// Forbid C++11 and C2x attributes that appear on certain syntactic locations
// which standard permits but we don't supported yet, for example, attributes
// appertain to decl specifiers.
void ProhibitCXX11Attributes(ParsedAttributesWithRange &Attrs,
unsigned DiagID);
/// Skip C++11 and C2x attributes and return the end location of the
/// last one.
/// \returns SourceLocation() if there are no attributes.
SourceLocation SkipCXX11Attributes();
/// Diagnose and skip C++11 and C2x attributes that appear in syntactic
/// locations where attributes are not allowed.
void DiagnoseAndSkipCXX11Attributes();
/// Parses syntax-generic attribute arguments for attributes which are
/// known to the implementation, and adds them to the given ParsedAttributes
/// list with the given attribute syntax. Returns the number of arguments
/// parsed for the attribute.
unsigned
ParseAttributeArgsCommon(IdentifierInfo *AttrName, SourceLocation AttrNameLoc,
ParsedAttributes &Attrs, SourceLocation *EndLoc,
IdentifierInfo *ScopeName, SourceLocation ScopeLoc,
ParsedAttr::Syntax Syntax);
void MaybeParseGNUAttributes(Declarator &D,
LateParsedAttrList *LateAttrs = nullptr) {
if (Tok.is(tok::kw___attribute)) {
ParsedAttributes attrs(AttrFactory);
SourceLocation endLoc;
ParseGNUAttributes(attrs, &endLoc, LateAttrs, &D);
D.takeAttributes(attrs, endLoc);
}
}
void MaybeParseGNUAttributes(ParsedAttributes &attrs,
SourceLocation *endLoc = nullptr,
LateParsedAttrList *LateAttrs = nullptr) {
if (Tok.is(tok::kw___attribute))
ParseGNUAttributes(attrs, endLoc, LateAttrs);
}
void ParseGNUAttributes(ParsedAttributes &attrs,
SourceLocation *endLoc = nullptr,
LateParsedAttrList *LateAttrs = nullptr,
Declarator *D = nullptr);
void ParseGNUAttributeArgs(IdentifierInfo *AttrName,
SourceLocation AttrNameLoc,
ParsedAttributes &Attrs, SourceLocation *EndLoc,
IdentifierInfo *ScopeName, SourceLocation ScopeLoc,
ParsedAttr::Syntax Syntax, Declarator *D);
IdentifierLoc *ParseIdentifierLoc();
unsigned
ParseClangAttributeArgs(IdentifierInfo *AttrName, SourceLocation AttrNameLoc,
ParsedAttributes &Attrs, SourceLocation *EndLoc,
IdentifierInfo *ScopeName, SourceLocation ScopeLoc,
ParsedAttr::Syntax Syntax);
void MaybeParseCXX11Attributes(Declarator &D) {
if (standardAttributesAllowed() && isCXX11AttributeSpecifier()) {
ParsedAttributesWithRange attrs(AttrFactory);
SourceLocation endLoc;
ParseCXX11Attributes(attrs, &endLoc);
D.takeAttributes(attrs, endLoc);
}
}
bool MaybeParseCXX11Attributes(ParsedAttributes &attrs,
SourceLocation *endLoc = nullptr) {
if (standardAttributesAllowed() && isCXX11AttributeSpecifier()) {
ParsedAttributesWithRange attrsWithRange(AttrFactory);
ParseCXX11Attributes(attrsWithRange, endLoc);
attrs.takeAllFrom(attrsWithRange);
return true;
}
return false;
}
void MaybeParseCXX11Attributes(ParsedAttributesWithRange &attrs,
SourceLocation *endLoc = nullptr,
bool OuterMightBeMessageSend = false) {
if (standardAttributesAllowed() &&
isCXX11AttributeSpecifier(false, OuterMightBeMessageSend))
ParseCXX11Attributes(attrs, endLoc);
}
void ParseCXX11AttributeSpecifier(ParsedAttributes &attrs,
SourceLocation *EndLoc = nullptr);
void ParseCXX11Attributes(ParsedAttributesWithRange &attrs,
SourceLocation *EndLoc = nullptr);
/// Parses a C++11 (or C2x)-style attribute argument list. Returns true
/// if this results in adding an attribute to the ParsedAttributes list.
bool ParseCXX11AttributeArgs(IdentifierInfo *AttrName,
SourceLocation AttrNameLoc,
ParsedAttributes &Attrs, SourceLocation *EndLoc,
IdentifierInfo *ScopeName,
SourceLocation ScopeLoc);
IdentifierInfo *TryParseCXX11AttributeIdentifier(SourceLocation &Loc);
void MaybeParseMicrosoftAttributes(ParsedAttributes &attrs,
SourceLocation *endLoc = nullptr) {
if (getLangOpts().MicrosoftExt && Tok.is(tok::l_square))
ParseMicrosoftAttributes(attrs, endLoc);
}
void ParseMicrosoftUuidAttributeArgs(ParsedAttributes &Attrs);
void ParseMicrosoftAttributes(ParsedAttributes &attrs,
SourceLocation *endLoc = nullptr);
void MaybeParseMicrosoftDeclSpecs(ParsedAttributes &Attrs,
SourceLocation *End = nullptr) {
const auto &LO = getLangOpts();
if (LO.DeclSpecKeyword && Tok.is(tok::kw___declspec))
ParseMicrosoftDeclSpecs(Attrs, End);
}
void ParseMicrosoftDeclSpecs(ParsedAttributes &Attrs,
SourceLocation *End = nullptr);
bool ParseMicrosoftDeclSpecArgs(IdentifierInfo *AttrName,
SourceLocation AttrNameLoc,
ParsedAttributes &Attrs);
void ParseMicrosoftTypeAttributes(ParsedAttributes &attrs);
void DiagnoseAndSkipExtendedMicrosoftTypeAttributes();
SourceLocation SkipExtendedMicrosoftTypeAttributes();
void ParseMicrosoftInheritanceClassAttributes(ParsedAttributes &attrs);
void ParseBorlandTypeAttributes(ParsedAttributes &attrs);
void ParseOpenCLKernelAttributes(ParsedAttributes &attrs);
void ParseOpenCLQualifiers(ParsedAttributes &Attrs);
/// Parses opencl_unroll_hint attribute if language is OpenCL v2.0
/// or higher.
/// \return false if error happens.
bool MaybeParseOpenCLUnrollHintAttribute(ParsedAttributes &Attrs) {
if (getLangOpts().OpenCL)
return ParseOpenCLUnrollHintAttribute(Attrs);
return true;
}
/// Parses opencl_unroll_hint attribute.
/// \return false if error happens.
bool ParseOpenCLUnrollHintAttribute(ParsedAttributes &Attrs);
/// Parses intelfpga:: and clang:: loop attributes if the language is SYCL
bool MaybeParseSYCLLoopAttributes(ParsedAttributes &Attrs) {
if (getLangOpts().SYCLIsDevice || getLangOpts().SYCLIsHost)
return ParseSYCLLoopAttributes(Attrs);
return true;
}
bool ParseSYCLLoopAttributes(ParsedAttributes &Attrs);
void ParseNullabilityTypeSpecifiers(ParsedAttributes &attrs);
VersionTuple ParseVersionTuple(SourceRange &Range);
void ParseAvailabilityAttribute(IdentifierInfo &Availability,
SourceLocation AvailabilityLoc,
ParsedAttributes &attrs,
SourceLocation *endLoc,
IdentifierInfo *ScopeName,
SourceLocation ScopeLoc,
ParsedAttr::Syntax Syntax);
Optional<AvailabilitySpec> ParseAvailabilitySpec();
ExprResult ParseAvailabilityCheckExpr(SourceLocation StartLoc);
void ParseExternalSourceSymbolAttribute(IdentifierInfo &ExternalSourceSymbol,
SourceLocation Loc,
ParsedAttributes &Attrs,
SourceLocation *EndLoc,
IdentifierInfo *ScopeName,
SourceLocation ScopeLoc,
ParsedAttr::Syntax Syntax);
void ParseObjCBridgeRelatedAttribute(IdentifierInfo &ObjCBridgeRelated,
SourceLocation ObjCBridgeRelatedLoc,
ParsedAttributes &attrs,
SourceLocation *endLoc,
IdentifierInfo *ScopeName,
SourceLocation ScopeLoc,
ParsedAttr::Syntax Syntax);
void ParseTypeTagForDatatypeAttribute(IdentifierInfo &AttrName,
SourceLocation AttrNameLoc,
ParsedAttributes &Attrs,
SourceLocation *EndLoc,
IdentifierInfo *ScopeName,
SourceLocation ScopeLoc,
ParsedAttr::Syntax Syntax);
void
ParseAttributeWithTypeArg(IdentifierInfo &AttrName,
SourceLocation AttrNameLoc, ParsedAttributes &Attrs,
SourceLocation *EndLoc, IdentifierInfo *ScopeName,
SourceLocation ScopeLoc, ParsedAttr::Syntax Syntax);
void ParseTypeofSpecifier(DeclSpec &DS);
SourceLocation ParseDecltypeSpecifier(DeclSpec &DS);
void AnnotateExistingDecltypeSpecifier(const DeclSpec &DS,
SourceLocation StartLoc,
SourceLocation EndLoc);
void ParseUnderlyingTypeSpecifier(DeclSpec &DS);
void ParseAtomicSpecifier(DeclSpec &DS);
ExprResult ParseAlignArgument(SourceLocation Start,
SourceLocation &EllipsisLoc);
void ParseAlignmentSpecifier(ParsedAttributes &Attrs,
SourceLocation *endLoc = nullptr);
ExprResult ParseExtIntegerArgument();
VirtSpecifiers::Specifier isCXX11VirtSpecifier(const Token &Tok) const;
VirtSpecifiers::Specifier isCXX11VirtSpecifier() const {
return isCXX11VirtSpecifier(Tok);
}
void ParseOptionalCXX11VirtSpecifierSeq(VirtSpecifiers &VS, bool IsInterface,
SourceLocation FriendLoc);
bool isCXX11FinalKeyword() const;
/// DeclaratorScopeObj - RAII object used in Parser::ParseDirectDeclarator to
/// enter a new C++ declarator scope and exit it when the function is
/// finished.
class DeclaratorScopeObj {
Parser &P;
CXXScopeSpec &SS;
bool EnteredScope;
bool CreatedScope;
public:
DeclaratorScopeObj(Parser &p, CXXScopeSpec &ss)
: P(p), SS(ss), EnteredScope(false), CreatedScope(false) {}
void EnterDeclaratorScope() {
assert(!EnteredScope && "Already entered the scope!");
assert(SS.isSet() && "C++ scope was not set!");
CreatedScope = true;
P.EnterScope(0); // Not a decl scope.
if (!P.Actions.ActOnCXXEnterDeclaratorScope(P.getCurScope(), SS))
EnteredScope = true;
}
~DeclaratorScopeObj() {
if (EnteredScope) {
assert(SS.isSet() && "C++ scope was cleared ?");
P.Actions.ActOnCXXExitDeclaratorScope(P.getCurScope(), SS);
}
if (CreatedScope)
P.ExitScope();
}
};
/// ParseDeclarator - Parse and verify a newly-initialized declarator.
void ParseDeclarator(Declarator &D);
/// A function that parses a variant of direct-declarator.
typedef void (Parser::*DirectDeclParseFunction)(Declarator&);
void ParseDeclaratorInternal(Declarator &D,
DirectDeclParseFunction DirectDeclParser);
enum AttrRequirements {
AR_NoAttributesParsed = 0, ///< No attributes are diagnosed.
AR_GNUAttributesParsedAndRejected = 1 << 0, ///< Diagnose GNU attributes.
AR_GNUAttributesParsed = 1 << 1,
AR_CXX11AttributesParsed = 1 << 2,
AR_DeclspecAttributesParsed = 1 << 3,
AR_AllAttributesParsed = AR_GNUAttributesParsed |
AR_CXX11AttributesParsed |
AR_DeclspecAttributesParsed,
AR_VendorAttributesParsed = AR_GNUAttributesParsed |
AR_DeclspecAttributesParsed
};
void ParseTypeQualifierListOpt(
DeclSpec &DS, unsigned AttrReqs = AR_AllAttributesParsed,
bool AtomicAllowed = true, bool IdentifierRequired = false,
Optional<llvm::function_ref<void()>> CodeCompletionHandler = None);
void ParseDirectDeclarator(Declarator &D);
void ParseDecompositionDeclarator(Declarator &D);
void ParseParenDeclarator(Declarator &D);
void ParseFunctionDeclarator(Declarator &D,
ParsedAttributes &attrs,
BalancedDelimiterTracker &Tracker,
bool IsAmbiguous,
bool RequiresArg = false);
void InitCXXThisScopeForDeclaratorIfRelevant(
const Declarator &D, const DeclSpec &DS,
llvm::Optional<Sema::CXXThisScopeRAII> &ThisScope);
bool ParseRefQualifier(bool &RefQualifierIsLValueRef,
SourceLocation &RefQualifierLoc);
bool isFunctionDeclaratorIdentifierList();
void ParseFunctionDeclaratorIdentifierList(
Declarator &D,
SmallVectorImpl<DeclaratorChunk::ParamInfo> &ParamInfo);
void ParseParameterDeclarationClause(
DeclaratorContext DeclaratorContext,
ParsedAttributes &attrs,
SmallVectorImpl<DeclaratorChunk::ParamInfo> &ParamInfo,
SourceLocation &EllipsisLoc);
void ParseBracketDeclarator(Declarator &D);
void ParseMisplacedBracketDeclarator(Declarator &D);
//===--------------------------------------------------------------------===//
// C++ 7: Declarations [dcl.dcl]
/// The kind of attribute specifier we have found.
enum CXX11AttributeKind {
/// This is not an attribute specifier.
CAK_NotAttributeSpecifier,
/// This should be treated as an attribute-specifier.
CAK_AttributeSpecifier,
/// The next tokens are '[[', but this is not an attribute-specifier. This
/// is ill-formed by C++11 [dcl.attr.grammar]p6.
CAK_InvalidAttributeSpecifier
};
CXX11AttributeKind
isCXX11AttributeSpecifier(bool Disambiguate = false,
bool OuterMightBeMessageSend = false);
void DiagnoseUnexpectedNamespace(NamedDecl *Context);
DeclGroupPtrTy ParseNamespace(DeclaratorContext Context,
SourceLocation &DeclEnd,
SourceLocation InlineLoc = SourceLocation());
struct InnerNamespaceInfo {
SourceLocation NamespaceLoc;
SourceLocation InlineLoc;
SourceLocation IdentLoc;
IdentifierInfo *Ident;
};
using InnerNamespaceInfoList = llvm::SmallVector<InnerNamespaceInfo, 4>;
void ParseInnerNamespace(const InnerNamespaceInfoList &InnerNSs,
unsigned int index, SourceLocation &InlineLoc,
ParsedAttributes &attrs,
BalancedDelimiterTracker &Tracker);
Decl *ParseLinkage(ParsingDeclSpec &DS, DeclaratorContext Context);
Decl *ParseExportDeclaration();
DeclGroupPtrTy ParseUsingDirectiveOrDeclaration(
DeclaratorContext Context, const ParsedTemplateInfo &TemplateInfo,
SourceLocation &DeclEnd, ParsedAttributesWithRange &attrs);
Decl *ParseUsingDirective(DeclaratorContext Context,
SourceLocation UsingLoc,
SourceLocation &DeclEnd,
ParsedAttributes &attrs);
struct UsingDeclarator {
SourceLocation TypenameLoc;
CXXScopeSpec SS;
UnqualifiedId Name;
SourceLocation EllipsisLoc;
void clear() {
TypenameLoc = EllipsisLoc = SourceLocation();
SS.clear();
Name.clear();
}
};
bool ParseUsingDeclarator(DeclaratorContext Context, UsingDeclarator &D);
DeclGroupPtrTy ParseUsingDeclaration(DeclaratorContext Context,
const ParsedTemplateInfo &TemplateInfo,
SourceLocation UsingLoc,
SourceLocation &DeclEnd,
AccessSpecifier AS = AS_none);
Decl *ParseAliasDeclarationAfterDeclarator(
const ParsedTemplateInfo &TemplateInfo, SourceLocation UsingLoc,
UsingDeclarator &D, SourceLocation &DeclEnd, AccessSpecifier AS,
ParsedAttributes &Attrs, Decl **OwnedType = nullptr);
Decl *ParseStaticAssertDeclaration(SourceLocation &DeclEnd);
Decl *ParseNamespaceAlias(SourceLocation NamespaceLoc,
SourceLocation AliasLoc, IdentifierInfo *Alias,
SourceLocation &DeclEnd);
//===--------------------------------------------------------------------===//
// C++ 9: classes [class] and C structs/unions.
bool isValidAfterTypeSpecifier(bool CouldBeBitfield);
void ParseClassSpecifier(tok::TokenKind TagTokKind, SourceLocation TagLoc,
DeclSpec &DS, const ParsedTemplateInfo &TemplateInfo,
AccessSpecifier AS, bool EnteringContext,
DeclSpecContext DSC,
ParsedAttributesWithRange &Attributes);
void SkipCXXMemberSpecification(SourceLocation StartLoc,
SourceLocation AttrFixitLoc,
unsigned TagType,
Decl *TagDecl);
void ParseCXXMemberSpecification(SourceLocation StartLoc,
SourceLocation AttrFixitLoc,
ParsedAttributesWithRange &Attrs,
unsigned TagType,
Decl *TagDecl);
ExprResult ParseCXXMemberInitializer(Decl *D, bool IsFunction,
SourceLocation &EqualLoc);
bool
ParseCXXMemberDeclaratorBeforeInitializer(Declarator &DeclaratorInfo,
VirtSpecifiers &VS,
ExprResult &BitfieldSize,
LateParsedAttrList &LateAttrs);
void MaybeParseAndDiagnoseDeclSpecAfterCXX11VirtSpecifierSeq(Declarator &D,
VirtSpecifiers &VS);
DeclGroupPtrTy ParseCXXClassMemberDeclaration(
AccessSpecifier AS, ParsedAttributes &Attr,
const ParsedTemplateInfo &TemplateInfo = ParsedTemplateInfo(),
ParsingDeclRAIIObject *DiagsFromTParams = nullptr);
DeclGroupPtrTy ParseCXXClassMemberDeclarationWithPragmas(
AccessSpecifier &AS, ParsedAttributesWithRange &AccessAttrs,
DeclSpec::TST TagType, Decl *Tag);
void ParseConstructorInitializer(Decl *ConstructorDecl);
MemInitResult ParseMemInitializer(Decl *ConstructorDecl);
void HandleMemberFunctionDeclDelays(Declarator& DeclaratorInfo,
Decl *ThisDecl);
//===--------------------------------------------------------------------===//
// C++ 10: Derived classes [class.derived]
TypeResult ParseBaseTypeSpecifier(SourceLocation &BaseLoc,
SourceLocation &EndLocation);
void ParseBaseClause(Decl *ClassDecl);
BaseResult ParseBaseSpecifier(Decl *ClassDecl);
AccessSpecifier getAccessSpecifierIfPresent() const;
bool ParseUnqualifiedIdTemplateId(CXXScopeSpec &SS,
ParsedType ObjectType,
bool ObjectHadErrors,
SourceLocation TemplateKWLoc,
IdentifierInfo *Name,
SourceLocation NameLoc,
bool EnteringContext,
UnqualifiedId &Id,
bool AssumeTemplateId);
bool ParseUnqualifiedIdOperator(CXXScopeSpec &SS, bool EnteringContext,
ParsedType ObjectType,
UnqualifiedId &Result);
//===--------------------------------------------------------------------===//
// OpenMP: Directives and clauses.
/// Parse clauses for '#pragma omp declare simd'.
DeclGroupPtrTy ParseOMPDeclareSimdClauses(DeclGroupPtrTy Ptr,
CachedTokens &Toks,
SourceLocation Loc);
/// Parse a property kind into \p TIProperty for the selector set \p Set and
/// selector \p Selector.
void parseOMPTraitPropertyKind(OMPTraitProperty &TIProperty,
llvm::omp::TraitSet Set,
llvm::omp::TraitSelector Selector,
llvm::StringMap<SourceLocation> &Seen);
/// Parse a selector kind into \p TISelector for the selector set \p Set.
void parseOMPTraitSelectorKind(OMPTraitSelector &TISelector,
llvm::omp::TraitSet Set,
llvm::StringMap<SourceLocation> &Seen);
/// Parse a selector set kind into \p TISet.
void parseOMPTraitSetKind(OMPTraitSet &TISet,
llvm::StringMap<SourceLocation> &Seen);
/// Parses an OpenMP context property.
void parseOMPContextProperty(OMPTraitSelector &TISelector,
llvm::omp::TraitSet Set,
llvm::StringMap<SourceLocation> &Seen);
/// Parses an OpenMP context selector.
void parseOMPContextSelector(OMPTraitSelector &TISelector,
llvm::omp::TraitSet Set,
llvm::StringMap<SourceLocation> &SeenSelectors);
/// Parses an OpenMP context selector set.
void parseOMPContextSelectorSet(OMPTraitSet &TISet,
llvm::StringMap<SourceLocation> &SeenSets);
/// Parses OpenMP context selectors.
bool parseOMPContextSelectors(SourceLocation Loc, OMPTraitInfo &TI);
/// Parse a `match` clause for an '#pragma omp declare variant'. Return true
/// if there was an error.
bool parseOMPDeclareVariantMatchClause(SourceLocation Loc, OMPTraitInfo &TI);
/// Parse clauses for '#pragma omp declare variant'.
void ParseOMPDeclareVariantClauses(DeclGroupPtrTy Ptr, CachedTokens &Toks,
SourceLocation Loc);
/// Parse clauses for '#pragma omp declare target'.
DeclGroupPtrTy ParseOMPDeclareTargetClauses();
/// Parse '#pragma omp end declare target'.
void ParseOMPEndDeclareTargetDirective(OpenMPDirectiveKind DKind,
SourceLocation Loc);
/// Skip tokens until a `annot_pragma_openmp_end` was found. Emit a warning if
/// it is not the current token.
void skipUntilPragmaOpenMPEnd(OpenMPDirectiveKind DKind);
/// Check the \p FoundKind against the \p ExpectedKind, if not issue an error
/// that the "end" matching the "begin" directive of kind \p BeginKind was not
/// found. Finally, if the expected kind was found or if \p SkipUntilOpenMPEnd
/// is set, skip ahead using the helper `skipUntilPragmaOpenMPEnd`.
void parseOMPEndDirective(OpenMPDirectiveKind BeginKind,
OpenMPDirectiveKind ExpectedKind,
OpenMPDirectiveKind FoundKind,
SourceLocation MatchingLoc,
SourceLocation FoundLoc,
bool SkipUntilOpenMPEnd);
/// Parses declarative OpenMP directives.
DeclGroupPtrTy ParseOpenMPDeclarativeDirectiveWithExtDecl(
AccessSpecifier &AS, ParsedAttributesWithRange &Attrs,
bool Delayed = false, DeclSpec::TST TagType = DeclSpec::TST_unspecified,
Decl *TagDecl = nullptr);
/// Parse 'omp declare reduction' construct.
DeclGroupPtrTy ParseOpenMPDeclareReductionDirective(AccessSpecifier AS);
/// Parses initializer for provided omp_priv declaration inside the reduction
/// initializer.
void ParseOpenMPReductionInitializerForDecl(VarDecl *OmpPrivParm);
/// Parses 'omp declare mapper' directive.
DeclGroupPtrTy ParseOpenMPDeclareMapperDirective(AccessSpecifier AS);
/// Parses variable declaration in 'omp declare mapper' directive.
TypeResult parseOpenMPDeclareMapperVarDecl(SourceRange &Range,
DeclarationName &Name,
AccessSpecifier AS = AS_none);
/// Tries to parse cast part of OpenMP array shaping operation:
/// '[' expression ']' { '[' expression ']' } ')'.
bool tryParseOpenMPArrayShapingCastPart();
/// Parses simple list of variables.
///
/// \param Kind Kind of the directive.
/// \param Callback Callback function to be called for the list elements.
/// \param AllowScopeSpecifier true, if the variables can have fully
/// qualified names.
///
bool ParseOpenMPSimpleVarList(
OpenMPDirectiveKind Kind,
const llvm::function_ref<void(CXXScopeSpec &, DeclarationNameInfo)> &
Callback,
bool AllowScopeSpecifier);
/// Parses declarative or executable directive.
///
/// \param StmtCtx The context in which we're parsing the directive.
StmtResult
ParseOpenMPDeclarativeOrExecutableDirective(ParsedStmtContext StmtCtx);
/// Parses clause of kind \a CKind for directive of a kind \a Kind.
///
/// \param DKind Kind of current directive.
/// \param CKind Kind of current clause.
/// \param FirstClause true, if this is the first clause of a kind \a CKind
/// in current directive.
///
OMPClause *ParseOpenMPClause(OpenMPDirectiveKind DKind,
OpenMPClauseKind CKind, bool FirstClause);
/// Parses clause with a single expression of a kind \a Kind.
///
/// \param Kind Kind of current clause.
/// \param ParseOnly true to skip the clause's semantic actions and return
/// nullptr.
///
OMPClause *ParseOpenMPSingleExprClause(OpenMPClauseKind Kind,
bool ParseOnly);
/// Parses simple clause of a kind \a Kind.
///
/// \param Kind Kind of current clause.
/// \param ParseOnly true to skip the clause's semantic actions and return
/// nullptr.
///
OMPClause *ParseOpenMPSimpleClause(OpenMPClauseKind Kind, bool ParseOnly);
/// Parses clause with a single expression and an additional argument
/// of a kind \a Kind.
///
/// \param DKind Directive kind.
/// \param Kind Kind of current clause.
/// \param ParseOnly true to skip the clause's semantic actions and return
/// nullptr.
///
OMPClause *ParseOpenMPSingleExprWithArgClause(OpenMPDirectiveKind DKind,
OpenMPClauseKind Kind,
bool ParseOnly);
/// Parses clause without any additional arguments.
///
/// \param Kind Kind of current clause.
/// \param ParseOnly true to skip the clause's semantic actions and return
/// nullptr.
///
OMPClause *ParseOpenMPClause(OpenMPClauseKind Kind, bool ParseOnly = false);
/// Parses clause with the list of variables of a kind \a Kind.
///
/// \param Kind Kind of current clause.
/// \param ParseOnly true to skip the clause's semantic actions and return
/// nullptr.
///
OMPClause *ParseOpenMPVarListClause(OpenMPDirectiveKind DKind,
OpenMPClauseKind Kind, bool ParseOnly);
/// Parses and creates OpenMP 5.0 iterators expression:
/// <iterators> = 'iterator' '(' { [ <iterator-type> ] identifier =
/// <range-specification> }+ ')'
ExprResult ParseOpenMPIteratorsExpr();
/// Parses allocators and traits in the context of the uses_allocator clause.
/// Expected format:
/// '(' { <allocator> [ '(' <allocator_traits> ')' ] }+ ')'
OMPClause *ParseOpenMPUsesAllocatorClause(OpenMPDirectiveKind DKind);
public:
/// Parses simple expression in parens for single-expression clauses of OpenMP
/// constructs.
/// \param RLoc Returned location of right paren.
ExprResult ParseOpenMPParensExpr(StringRef ClauseName, SourceLocation &RLoc,
bool IsAddressOfOperand = false);
/// Data used for parsing list of variables in OpenMP clauses.
struct OpenMPVarListDataTy {
Expr *DepModOrTailExpr = nullptr;
SourceLocation ColonLoc;
SourceLocation RLoc;
CXXScopeSpec ReductionOrMapperIdScopeSpec;
DeclarationNameInfo ReductionOrMapperId;
int ExtraModifier = -1; ///< Additional modifier for linear, map, depend or
///< lastprivate clause.
SmallVector<OpenMPMapModifierKind, NumberOfOMPMapClauseModifiers>
MapTypeModifiers;
SmallVector<SourceLocation, NumberOfOMPMapClauseModifiers>
MapTypeModifiersLoc;
SmallVector<OpenMPMotionModifierKind, NumberOfOMPMotionModifiers>
MotionModifiers;
SmallVector<SourceLocation, NumberOfOMPMotionModifiers> MotionModifiersLoc;
bool IsMapTypeImplicit = false;
SourceLocation ExtraModifierLoc;
};
/// Parses clauses with list.
bool ParseOpenMPVarList(OpenMPDirectiveKind DKind, OpenMPClauseKind Kind,
SmallVectorImpl<Expr *> &Vars,
OpenMPVarListDataTy &Data);
bool ParseUnqualifiedId(CXXScopeSpec &SS, ParsedType ObjectType,
bool ObjectHadErrors, bool EnteringContext,
bool AllowDestructorName, bool AllowConstructorName,
bool AllowDeductionGuide,
SourceLocation *TemplateKWLoc, UnqualifiedId &Result);
/// Parses the mapper modifier in map, to, and from clauses.
bool parseMapperModifier(OpenMPVarListDataTy &Data);
/// Parses map-type-modifiers in map clause.
/// map([ [map-type-modifier[,] [map-type-modifier[,] ...] map-type : ] list)
/// where, map-type-modifier ::= always | close | mapper(mapper-identifier)
bool parseMapTypeModifiers(OpenMPVarListDataTy &Data);
private:
//===--------------------------------------------------------------------===//
// C++ 14: Templates [temp]
// C++ 14.1: Template Parameters [temp.param]
Decl *ParseDeclarationStartingWithTemplate(DeclaratorContext Context,
SourceLocation &DeclEnd,
ParsedAttributes &AccessAttrs,
AccessSpecifier AS = AS_none);
Decl *ParseTemplateDeclarationOrSpecialization(DeclaratorContext Context,
SourceLocation &DeclEnd,
ParsedAttributes &AccessAttrs,
AccessSpecifier AS);
Decl *ParseSingleDeclarationAfterTemplate(
DeclaratorContext Context, const ParsedTemplateInfo &TemplateInfo,
ParsingDeclRAIIObject &DiagsFromParams, SourceLocation &DeclEnd,
ParsedAttributes &AccessAttrs, AccessSpecifier AS = AS_none);
bool ParseTemplateParameters(MultiParseScope &TemplateScopes, unsigned Depth,
SmallVectorImpl<NamedDecl *> &TemplateParams,
SourceLocation &LAngleLoc,
SourceLocation &RAngleLoc);
bool ParseTemplateParameterList(unsigned Depth,
SmallVectorImpl<NamedDecl*> &TemplateParams);
TPResult isStartOfTemplateTypeParameter();
NamedDecl *ParseTemplateParameter(unsigned Depth, unsigned Position);
NamedDecl *ParseTypeParameter(unsigned Depth, unsigned Position);
NamedDecl *ParseTemplateTemplateParameter(unsigned Depth, unsigned Position);
NamedDecl *ParseNonTypeTemplateParameter(unsigned Depth, unsigned Position);
bool isTypeConstraintAnnotation();
bool TryAnnotateTypeConstraint();
NamedDecl *
ParseConstrainedTemplateTypeParameter(unsigned Depth, unsigned Position);
void DiagnoseMisplacedEllipsis(SourceLocation EllipsisLoc,
SourceLocation CorrectLoc,
bool AlreadyHasEllipsis,
bool IdentifierHasName);
void DiagnoseMisplacedEllipsisInDeclarator(SourceLocation EllipsisLoc,
Declarator &D);
// C++ 14.3: Template arguments [temp.arg]
typedef SmallVector<ParsedTemplateArgument, 16> TemplateArgList;
bool ParseGreaterThanInTemplateList(SourceLocation LAngleLoc,
SourceLocation &RAngleLoc,
bool ConsumeLastToken,
bool ObjCGenericList);
bool ParseTemplateIdAfterTemplateName(bool ConsumeLastToken,
SourceLocation &LAngleLoc,
TemplateArgList &TemplateArgs,
SourceLocation &RAngleLoc);
bool AnnotateTemplateIdToken(TemplateTy Template, TemplateNameKind TNK,
CXXScopeSpec &SS,
SourceLocation TemplateKWLoc,
UnqualifiedId &TemplateName,
bool AllowTypeAnnotation = true,
bool TypeConstraint = false);
void AnnotateTemplateIdTokenAsType(CXXScopeSpec &SS,
bool IsClassName = false);
bool ParseTemplateArgumentList(TemplateArgList &TemplateArgs);
ParsedTemplateArgument ParseTemplateTemplateArgument();
ParsedTemplateArgument ParseTemplateArgument();
Decl *ParseExplicitInstantiation(DeclaratorContext Context,
SourceLocation ExternLoc,
SourceLocation TemplateLoc,
SourceLocation &DeclEnd,
ParsedAttributes &AccessAttrs,
AccessSpecifier AS = AS_none);
// C++2a: Template, concept definition [temp]
Decl *
ParseConceptDefinition(const ParsedTemplateInfo &TemplateInfo,
SourceLocation &DeclEnd);
//===--------------------------------------------------------------------===//
// Modules
DeclGroupPtrTy ParseModuleDecl(bool IsFirstDecl);
Decl *ParseModuleImport(SourceLocation AtLoc);
bool parseMisplacedModuleImport();
bool tryParseMisplacedModuleImport() {
tok::TokenKind Kind = Tok.getKind();
if (Kind == tok::annot_module_begin || Kind == tok::annot_module_end ||
Kind == tok::annot_module_include)
return parseMisplacedModuleImport();
return false;
}
bool ParseModuleName(
SourceLocation UseLoc,
SmallVectorImpl<std::pair<IdentifierInfo *, SourceLocation>> &Path,
bool IsImport);
//===--------------------------------------------------------------------===//
// C++11/G++: Type Traits [Type-Traits.html in the GCC manual]
ExprResult ParseTypeTrait();
//===--------------------------------------------------------------------===//
// Embarcadero: Arary and Expression Traits
ExprResult ParseArrayTypeTrait();
ExprResult ParseExpressionTrait();
//===--------------------------------------------------------------------===//
// Preprocessor code-completion pass-through
void CodeCompleteDirective(bool InConditional) override;
void CodeCompleteInConditionalExclusion() override;
void CodeCompleteMacroName(bool IsDefinition) override;
void CodeCompletePreprocessorExpression() override;
void CodeCompleteMacroArgument(IdentifierInfo *Macro, MacroInfo *MacroInfo,
unsigned ArgumentIndex) override;
void CodeCompleteIncludedFile(llvm::StringRef Dir, bool IsAngled) override;
void CodeCompleteNaturalLanguage() override;
class GNUAsmQualifiers {
unsigned Qualifiers = AQ_unspecified;
public:
enum AQ {
AQ_unspecified = 0,
AQ_volatile = 1,
AQ_inline = 2,
AQ_goto = 4,
};
static const char *getQualifierName(AQ Qualifier);
bool setAsmQualifier(AQ Qualifier);
inline bool isVolatile() const { return Qualifiers & AQ_volatile; };
inline bool isInline() const { return Qualifiers & AQ_inline; };
inline bool isGoto() const { return Qualifiers & AQ_goto; }
};
bool isGCCAsmStatement(const Token &TokAfterAsm) const;
bool isGNUAsmQualifier(const Token &TokAfterAsm) const;
GNUAsmQualifiers::AQ getGNUAsmQualifier(const Token &Tok) const;
bool parseGNUAsmQualifierListOpt(GNUAsmQualifiers &AQ);
};
} // end namespace clang
#endif
|
3d7pt.c | /*
* Order-1, 3D 7 point stencil
* Adapted from PLUTO and Pochoir test bench
*
* Tareq Malas
*/
#include <stdio.h>
#include <stdlib.h>
#include <sys/time.h>
#ifdef LIKWID_PERFMON
#include <likwid.h>
#endif
#include "print_utils.h"
#define TESTS 2
#define MAX(a,b) ((a) > (b) ? a : b)
#define MIN(a,b) ((a) < (b) ? a : b)
/* Subtract the `struct timeval' values X and Y,
* storing the result in RESULT.
*
* Return 1 if the difference is negative, otherwise 0.
*/
int timeval_subtract(struct timeval *result, struct timeval *x, struct timeval *y)
{
/* Perform the carry for the later subtraction by updating y. */
if (x->tv_usec < y->tv_usec)
{
int nsec = (y->tv_usec - x->tv_usec) / 1000000 + 1;
y->tv_usec -= 1000000 * nsec;
y->tv_sec += nsec;
}
if (x->tv_usec - y->tv_usec > 1000000)
{
int nsec = (x->tv_usec - y->tv_usec) / 1000000;
y->tv_usec += 1000000 * nsec;
y->tv_sec -= nsec;
}
/* Compute the time remaining to wait.
* tv_usec is certainly positive.
*/
result->tv_sec = x->tv_sec - y->tv_sec;
result->tv_usec = x->tv_usec - y->tv_usec;
/* Return 1 if result is negative. */
return x->tv_sec < y->tv_sec;
}
int main(int argc, char *argv[])
{
int t, i, j, k, test;
int Nx, Ny, Nz, Nt;
if (argc > 3) {
Nx = atoi(argv[1])+2;
Ny = atoi(argv[2])+2;
Nz = atoi(argv[3])+2;
}
if (argc > 4)
Nt = atoi(argv[4]);
double ****A = (double ****) malloc(sizeof(double***)*2);
A[0] = (double ***) malloc(sizeof(double**)*Nz);
A[1] = (double ***) malloc(sizeof(double**)*Nz);
for(i=0; i<Nz; i++){
A[0][i] = (double**) malloc(sizeof(double*)*Ny);
A[1][i] = (double**) malloc(sizeof(double*)*Ny);
for(j=0;j<Ny;j++){
A[0][i][j] = (double*) malloc(sizeof(double)*Nx);
A[1][i][j] = (double*) malloc(sizeof(double)*Nx);
}
}
// tile size information, including extra element to decide the list length
int *tile_size = (int*) malloc(sizeof(int));
tile_size[0] = -1;
// The list is modified here before source-to-source transformations
tile_size = (int*) realloc((void *)tile_size, sizeof(int)*5);
tile_size[0] = 4;
tile_size[1] = 4;
tile_size[2] = 16;
tile_size[3] = 64;
tile_size[4] = -1;
// for timekeeping
int ts_return = -1;
struct timeval start, end, result;
double tdiff = 0.0, min_tdiff=1.e100;
const int BASE = 1024;
const double alpha = 0.0876;
const double beta = 0.0765;
// initialize variables
//
srand(42);
for (i = 1; i < Nz; i++) {
for (j = 1; j < Ny; j++) {
for (k = 1; k < Nx; k++) {
A[0][i][j][k] = 1.0 * (rand() % BASE);
}
}
}
#ifdef LIKWID_PERFMON
LIKWID_MARKER_INIT;
#pragma omp parallel
{
LIKWID_MARKER_THREADINIT;
#pragma omp barrier
LIKWID_MARKER_START("calc");
}
#endif
int num_threads = 1;
#if defined(_OPENMP)
num_threads = omp_get_max_threads();
#endif
for(test=0; test<TESTS; test++){
gettimeofday(&start, 0);
// serial execution - Addition: 6 && Multiplication: 2
#pragma scop
for (t = 0; t < Nt-1; t++) {
for (i = 1; i < Nz-1; i++) {
for (j = 1; j < Ny-1; j++) {
for (k = 1; k < Nx-1; k++) {
A[(t+1)%2][i][j][k] = alpha * (A[t%2][i][j][k])
+ beta * (A[t%2][i - 1][j][k] + A[t%2][i][j - 1][k] + A[t%2][i][j][k - 1] +
A[t%2][i + 1][j][k] + A[t%2][i][j + 1][k] + A[t%2][i][j][k + 1]);
}
}
}
}
#pragma endscop
gettimeofday(&end, 0);
ts_return = timeval_subtract(&result, &end, &start);
tdiff = (double) (result.tv_sec + result.tv_usec * 1.0e-6);
min_tdiff = min(min_tdiff, tdiff);
printf("Rank 0 TEST# %d time: %f\n", test, tdiff);
}
PRINT_RESULTS(1, "constant")
#ifdef LIKWID_PERFMON
#pragma omp parallel
{
LIKWID_MARKER_STOP("calc");
}
LIKWID_MARKER_CLOSE;
#endif
// Free allocated arrays (Causing performance degradation
/* for(i=0; i<Nz; i++){
for(j=0;j<Ny;j++){
free(A[0][i][j]);
free(A[1][i][j]);
}
free(A[0][i]);
free(A[1][i]);
}
free(A[0]);
free(A[1]);
*/
return 0;
}
|
gdcpp.h | /* gdcpp.h
*
* Author: Fabian Meyer
* Created On: 12 Jul 2019
* License: MIT
*/
#ifndef GDCPP_GDCPP_H_
#define GDCPP_GDCPP_H_
#include <Eigen/Geometry>
#include <limits>
#include <iostream>
#include <iomanip>
#include <functional>
namespace gdc
{
typedef long int Index;
/** Functor to compute forward differences.
* Computes the gradient of the objective f(x) as follows:
*
* grad(x) = (f(x + eps) - f(x)) / eps
*
* The computation requires len(x) evaluations of the objective.
*/
template<typename Scalar>
class ForwardDifferences
{
public:
typedef Eigen::Matrix<Scalar, Eigen::Dynamic, 1> Vector;
typedef std::function<Scalar(const Vector &)> Objective;
private:
Scalar eps_;
Index threads_;
Objective objective_;
public:
ForwardDifferences()
: ForwardDifferences(
std::sqrt(std::numeric_limits<Scalar>::epsilon()))
{ }
ForwardDifferences(const Scalar eps)
: eps_(eps), threads_(1), objective_()
{ }
void setNumericalEpsilon(const Scalar eps)
{
eps_ = eps;
}
void setThreads(const Index threads)
{
threads_ = threads;
}
void setObjective(const Objective &objective)
{
objective_ = objective;
}
void operator()(const Vector &xval,
const Scalar fval,
Vector &gradient)
{
assert(objective_);
gradient.resize(xval.size());
#pragma omp parallel for num_threads(threads_)
for(Index i = 0; i < xval.size(); ++i)
{
Vector xvalN = xval;
xvalN(i) += eps_;
Scalar fvalN = objective_(xvalN);
gradient(i) = (fvalN - fval) / eps_;
}
}
};
/** Functor to compute backward differences.
* Computes the gradient of the objective f(x) as follows:
*
* grad(x) = (f(x) - f(x - eps)) / eps
*
* The computation requires len(x) evaluations of the objective.
*/
template<typename Scalar>
class BackwardDifferences
{
public:
typedef Eigen::Matrix<Scalar, Eigen::Dynamic, 1> Vector;
typedef std::function<Scalar(const Vector &)> Objective;
private:
Scalar eps_;
Index threads_;
Objective objective_;
public:
BackwardDifferences()
: BackwardDifferences(
std::sqrt(std::numeric_limits<Scalar>::epsilon()))
{ }
BackwardDifferences(const Scalar eps)
: eps_(eps), threads_(1), objective_()
{ }
void setNumericalEpsilon(const Scalar eps)
{
eps_ = eps;
}
void setThreads(const Index threads)
{
threads_ = threads;
}
void setObjective(const Objective &objective)
{
objective_ = objective;
}
void operator()(const Vector &xval,
const Scalar fval,
Vector &gradient)
{
assert(objective_);
gradient.resize(xval.size());
#pragma omp parallel for num_threads(threads_)
for(Index i = 0; i < xval.size(); ++i)
{
Vector xvalN = xval;
xvalN(i) -= eps_;
Scalar fvalN = objective_(xvalN);
gradient(i) = (fval - fvalN) / eps_;
}
}
};
/** Functor to compute central differences.
* Computes the gradient of the objective f(x) as follows:
*
* grad(x) = (f(x + 0.5 eps) - f(x - 0.5 eps)) / eps
*
* The computation requires 2 * len(x) evaluations of the objective.
*/
template<typename Scalar>
struct CentralDifferences
{
public:
typedef Eigen::Matrix<Scalar, Eigen::Dynamic, 1> Vector;
typedef std::function<Scalar(const Vector &)> Objective;
private:
Scalar eps_;
Index threads_;
Objective objective_;
public:
CentralDifferences()
: CentralDifferences(
std::sqrt(std::numeric_limits<Scalar>::epsilon()))
{ }
CentralDifferences(const Scalar eps)
: eps_(eps), threads_(1), objective_()
{ }
void setNumericalEpsilon(const Scalar eps)
{
eps_ = eps;
}
void setThreads(const Index threads)
{
threads_ = threads;
}
void setObjective(const Objective &objective)
{
objective_ = objective;
}
void operator()(const Vector &xval,
const Scalar,
Vector &gradient)
{
assert(objective_);
Vector fvals(xval.size() * 2);
#pragma omp parallel for num_threads(threads_)
for(Index i = 0; i < fvals.size(); ++i)
{
Index idx = i / 2;
Vector xvalN = xval;
if(i % 2 == 0)
xvalN(idx) += eps_ / 2;
else
xvalN(idx) -= eps_ / 2;
fvals(i) = objective_(xvalN);
}
gradient.resize(xval.size());
for(Index i = 0; i < xval.size(); ++i)
gradient(i) = (fvals(i * 2) - fvals(i * 2 + 1)) / eps_;
}
};
/** Dummy callback functor, which does nothing. */
template<typename Scalar>
struct NoCallback
{
typedef Eigen::Matrix<Scalar, Eigen::Dynamic, 1> Vector;
bool operator()(const Index,
const Vector &,
const Scalar,
const Vector &) const
{
return true;
}
};
/** Step size functor, which returns a constant step size. */
template<typename Scalar>
class ConstantStepSize
{
public:
typedef Eigen::Matrix<Scalar, Eigen::Dynamic, 1> Vector;
typedef std::function<Scalar(const Vector &, Vector &)> Objective;
typedef std::function<void(const Vector &, const Scalar, Vector &)> FiniteDifferences;
private:
Scalar stepSize_;
public:
ConstantStepSize()
: ConstantStepSize(0.7)
{ }
ConstantStepSize(const Scalar stepSize)
: stepSize_(stepSize)
{ }
/** Set the step size returned by this functor.
* @param stepSize step size returned by functor */
void setStepSize(const Scalar stepSize)
{
stepSize_ = stepSize;
}
void setObjective(const Objective &)
{ }
void setFiniteDifferences(const FiniteDifferences &)
{ }
Scalar operator()(const Vector &,
const Scalar,
const Vector &)
{
return stepSize_;
}
};
/** Step size functor to compute Barzilai-Borwein (BB) steps.
* The functor can either compute the direct or inverse BB step.
* The steps are computed as follows:
*
* s_k = x_k - x_k-1 k >= 1
* y_k = grad_k - grad_k-1 k >= 1
* Direct: stepSize = (s_k^T * s_k) / (y_k^T * s_k)
* Inverse: stepSize = (y_k^T * s_k) / (y_k^T * y_k)
*
* The very first step is computed as a constant. */
template<typename Scalar>
class BarzilaiBorwein
{
public:
typedef Eigen::Matrix<Scalar, Eigen::Dynamic, 1> Vector;
typedef std::function<Scalar(const Vector &, Vector &)> Objective;
typedef std::function<void(const Vector &, const Scalar, Vector &)> FiniteDifferences;
enum class Method
{
Direct,
Inverse
};
private:
Vector lastXval_;
Vector lastGradient_;
Method method_;
Scalar constStep_;
Scalar constantStep() const
{
return constStep_;
}
Scalar directStep(const Vector &xval,
const Vector &gradient)
{
auto sk = xval - lastXval_;
auto yk = gradient - lastGradient_;
Scalar num = sk.dot(sk);
Scalar denom = sk.dot(yk);
if(denom == 0)
return 1;
else
return std::abs(num / denom);
}
Scalar inverseStep(const Vector &xval,
const Vector &gradient)
{
auto sk = xval - lastXval_;
auto yk = gradient - lastGradient_;
Scalar num = sk.dot(yk);
Scalar denom = yk.dot(yk);
if(denom == 0)
return 1;
else
return std::abs(num / denom);
}
public:
BarzilaiBorwein()
: BarzilaiBorwein(Method::Direct, 1e-4)
{ }
BarzilaiBorwein(const Method method, const Scalar constStep)
: lastXval_(), lastGradient_(), method_(method),
constStep_(constStep)
{ }
void setObjective(const Objective &)
{ }
void setFiniteDifferences(const FiniteDifferences &)
{ }
void setMethod(const Method method)
{
method_ = method;
}
void setConstStepSize(const Scalar stepSize)
{
constStep_ = stepSize;
}
Scalar operator()(const Vector &xval,
const Scalar,
const Vector &gradient)
{
Scalar stepSize = 0;
if(lastXval_.size() == 0)
{
stepSize = constStep_;
}
else
{
switch(method_)
{
case Method::Direct:
stepSize = directStep(xval, gradient);
break;
case Method::Inverse:
stepSize = inverseStep(xval, gradient);
break;
default:
assert(false);
break;
}
}
lastGradient_ = gradient;
lastXval_ = xval;
return stepSize;
}
};
/** Step size functor to perform Armijo Linesearch with backtracking.
* The functor iteratively decreases the step size until the following
* conditions are met:
*
* Armijo: f(x - stepSize * grad(x)) <= f(x) - cArmijo * stepSize * grad(x)^T * grad(x)
*
* If either condition does not hold the step size is decreased:
*
* stepSize = decrease * stepSize */
template<typename Scalar>
class ArmijoBacktracking
{
public:
typedef Eigen::Matrix<Scalar, Eigen::Dynamic, 1> Vector;
typedef std::function<Scalar(const Vector &, Vector &)> Objective;
typedef std::function<void(const Vector &, const Scalar, Vector &)> FiniteDifferences;
protected:
Scalar decrease_;
Scalar cArmijo_;
Scalar minStep_;
Scalar maxStep_;
Index maxIt_;
Objective objective_;
FiniteDifferences finiteDifferences_;
Scalar evaluateObjective(const Vector &xval, Vector &gradient)
{
gradient.resize(0);
Scalar fval = objective_(xval, gradient);
if(gradient.size() == 0)
finiteDifferences_(xval, fval, gradient);
return fval;
}
virtual bool computeSecondCondition(const Scalar,
const Scalar,
const Scalar,
const Vector &,
const Vector &)
{
return true;
}
public:
ArmijoBacktracking()
: ArmijoBacktracking(0.8, 1e-4, 1e-12, 1.0, 0)
{ }
ArmijoBacktracking(const Scalar decrease,
const Scalar cArmijo,
const Scalar minStep,
const Scalar maxStep,
const Index iterations)
: decrease_(decrease), cArmijo_(cArmijo), minStep_(minStep),
maxStep_(maxStep), maxIt_(iterations), objective_()
{
assert(decrease > 0);
assert(decrease < 1);
assert(cArmijo > 0);
assert(cArmijo < 0.5);
assert(minStep < maxStep);
}
/** Set the decreasing factor for backtracking.
* Assure that decrease in (0, 1).
* @param decrease decreasing factor */
void setBacktrackingDecrease(const Scalar decrease)
{
assert(decrease > 0);
assert(decrease < 1);
decrease_ = decrease;
}
/** Set the relaxation constant for the Armijo condition (see class
* description).
* Assure cArmijo in (0, 0.5).
* @param cArmijo armijo constant */
void setArmijoConstant(const Scalar cArmijo)
{
assert(cArmijo > 0);
assert(cArmijo < 0.5);
cArmijo_ = cArmijo;
}
/** Set the bounds for the step size during linesearch.
* The final step size is guaranteed to be in [minStep, maxStep].
* @param minStep minimum step size
* @param maxStep maximum step size */
void setStepBounds(const Scalar minStep, const Scalar maxStep)
{
assert(minStep < maxStep);
minStep_ = minStep;
maxStep_ = maxStep;
}
/** Set the maximum number of iterations.
* Set to 0 or negative for infinite iterations.
* @param iterations maximum number of iterations */
void setMaxIterations(const Index iterations)
{
maxIt_ = iterations;
}
void setObjective(const Objective &objective)
{
objective_ = objective;
}
void setFiniteDifferences(const FiniteDifferences &finiteDifferences)
{
finiteDifferences_ = finiteDifferences;
}
Scalar operator()(const Vector &xval,
const Scalar fval,
const Vector &gradient)
{
assert(objective_);
assert(finiteDifferences_);
Scalar stepSize = maxStep_ / decrease_;
Vector gradientN;
Vector xvalN;
Scalar fvalN;
bool armijoCondition = false;
bool secondCondition = false;
Index iterations = 0;
while((maxIt_ <= 0 || iterations < maxIt_) &&
stepSize * decrease_ >= minStep_ &&
!(armijoCondition && secondCondition))
{
stepSize = decrease_ * stepSize;
xvalN = xval - stepSize * gradient;
fvalN = evaluateObjective(xvalN, gradientN);
armijoCondition = fvalN <= fval - cArmijo_ * stepSize * gradient.dot(gradient);
secondCondition = computeSecondCondition(stepSize, fval, fvalN, gradient, gradientN);
++iterations;
}
return stepSize;
}
};
/** Step size functor to perform Wolfe Linesearch with backtracking.
* The functor iteratively decreases the step size until the following
* conditions are met:
*
* Armijo: f(x - stepSize * grad(x)) <= f(x) - cArmijo * stepSize * grad(x)^T * grad(x)
* Wolfe: grad(x)^T grad(x - stepSize * grad(x)) <= cWolfe * grad(x)^T * grad(x)
*
* If either condition does not hold the step size is decreased:
*
* stepSize = decrease * stepSize */
template<typename Scalar>
class WolfeBacktracking : public ArmijoBacktracking<Scalar>
{
public:
typedef Eigen::Matrix<Scalar, Eigen::Dynamic, 1> Vector;
typedef std::function<Scalar(const Vector &, Vector &)> Objective;
typedef std::function<void(const Vector &, const Scalar, Vector &)> FiniteDifferences;
protected:
Scalar cWolfe_;
virtual bool computeSecondCondition(const Scalar,
const Scalar ,
const Scalar ,
const Vector &gradient,
const Vector &gradientN)
{
return gradient.dot(gradientN) <= cWolfe_ * gradient.dot(gradient);
}
public:
WolfeBacktracking()
: WolfeBacktracking(0.8, 1e-4, 0.9, 1e-12, 1.0, 0)
{ }
WolfeBacktracking(const Scalar decrease,
const Scalar cArmijo,
const Scalar cWolfe,
const Scalar minStep,
const Scalar maxStep,
const Index iterations)
: ArmijoBacktracking<Scalar>(decrease, cArmijo, minStep, maxStep,
iterations),cWolfe_(cWolfe)
{
assert(cWolfe < 1);
assert(cArmijo < cWolfe);
}
/** Set the wolfe constants for Armijo and Wolfe condition (see class
* description).
* Assure that c1 < c2 < 1 and c1 in (0, 0.5).
* @param c1 armijo constant
* @param c2 wolfe constant */
void setWolfeConstant(const Scalar cWolfe)
{
assert(cWolfe < 1);
cWolfe_ = cWolfe;
}
};
/** Step size functor which searches for a step that reduces the function
* value.
* The functor iteratively decreases the step size until the following
* condition is met:
*
* f(x - stepSize * grad) < f(x)
*
* If this condition does not hold the step size is decreased:
*
* stepSize = decrease * stepSize
*
* This functor does not require to compute any gradients and does not use
* finite differences. */
template<typename Scalar>
class DecreaseBacktracking
{
public:
typedef Eigen::Matrix<Scalar, Eigen::Dynamic, 1> Vector;
typedef std::function<Scalar(const Vector &, Vector &)> Objective;
typedef std::function<void(const Vector &, const Scalar, Vector &)> FiniteDifferences;
private:
Scalar decrease_;
Scalar minStep_;
Scalar maxStep_;
Index maxIt_;
Objective objective_;
public:
DecreaseBacktracking()
: DecreaseBacktracking(0.8, 1e-12, 1.0, 0)
{ }
DecreaseBacktracking(const Scalar decrease,
const Scalar minStep,
const Scalar maxStep,
const Index iterations)
: decrease_(decrease), minStep_(minStep),
maxStep_(maxStep), maxIt_(iterations), objective_()
{ }
/** Set the decreasing factor for backtracking.
* Assure that decrease in (0, 1).
* @param decrease decreasing factor */
void setBacktrackingDecrease(const Scalar decrease)
{
decrease_ = decrease;
}
/** Set the bounds for the step size during linesearch.
* The final step size is guaranteed to be in [minStep, maxStep].
* @param minStep minimum step size
* @param maxStep maximum step size */
void setStepBounds(const Scalar minStep, const Scalar maxStep)
{
assert(minStep < maxStep);
minStep_ = minStep;
maxStep_ = maxStep;
}
/** Set the maximum number of iterations.
* Set to 0 or negative for infinite iterations.
* @param iterations maximum number of iterations */
void setMaxIterations(const Index iterations)
{
maxIt_ = iterations;
}
void setObjective(const Objective &objective)
{
objective_ = objective;
}
void setFiniteDifferences(const FiniteDifferences &)
{ }
Scalar operator()(const Vector &xval,
const Scalar fval,
const Vector &gradient)
{
assert(objective_);
Scalar stepSize = maxStep_ / decrease_;
Vector xvalN;
Vector gradientN;
Scalar fvalN;
bool improvement = false;
Index iterations = 0;
while((maxIt_ <= 0 || iterations < maxIt_) &&
stepSize * decrease_ >= minStep_ &&
!improvement)
{
stepSize = decrease_ * stepSize;
xvalN = xval - stepSize * gradient;
fvalN = objective_(xvalN, gradientN);
improvement = fvalN < fval;
++iterations;
}
return stepSize;
}
};
template<typename Scalar,
typename Objective,
typename StepSize=BarzilaiBorwein<Scalar>,
typename Callback=NoCallback<Scalar>,
typename FiniteDifferences=CentralDifferences<Scalar>>
class GradientDescent
{
public:
typedef Eigen::Matrix<Scalar, Eigen::Dynamic, 1> Vector;
struct Result
{
Index iterations;
bool converged;
Scalar fval;
Vector xval;
};
protected:
Index maxIt_;
Scalar minGradientLen_;
Scalar minStepLen_;
Scalar momentum_;
Index verbosity_;
Objective objective_;
StepSize stepSize_;
Callback callback_;
FiniteDifferences finiteDifferences_;
Scalar evaluateObjective(const Vector &xval, Vector &gradient)
{
gradient.resize(0);
Scalar fval = objective_(xval, gradient);
if(gradient.size() == 0)
finiteDifferences_(xval, fval, gradient);
return fval;
}
std::string vector2str(const Vector &vec) const
{
std::stringstream ss1;
ss1 << std::fixed << std::showpoint << std::setprecision(6);
std::stringstream ss2;
ss2 << '[';
for(Index i = 0; i < vec.size(); ++i)
{
ss1 << vec(i);
ss2 << std::setfill(' ') << std::setw(10) << ss1.str();
if(i != vec.size() - 1)
ss2 << ' ';
ss1.str("");
}
ss2 << ']';
return ss2.str();
}
public:
GradientDescent()
: maxIt_(0), minGradientLen_(static_cast<Scalar>(1e-9)),
minStepLen_(static_cast<Scalar>(1e-9)), momentum_(0),
verbosity_(0), objective_(), stepSize_(), callback_(),
finiteDifferences_()
{
}
~GradientDescent()
{
}
void setThreads(const Index threads)
{
finiteDifferences_.setThreads(threads);
}
void setNumericalEpsilon(const Scalar eps)
{
finiteDifferences_.setNumericalEpsilon(eps);
}
void setMaxIterations(const Index iterations)
{
maxIt_ = iterations;
}
void setObjective(const Objective &objective)
{
objective_ = objective;
}
void setCallback(const Callback &callback)
{
callback_ = callback;
}
void setMinGradientLength(const Scalar gradientLen)
{
minGradientLen_ = gradientLen;
}
void setMinStepLength(const Scalar stepLen)
{
minStepLen_ = stepLen;
}
void setStepSize(const StepSize stepSize)
{
stepSize_ = stepSize;
}
void setMomentum(const Scalar momentum)
{
momentum_ = momentum;
}
void setVerbosity(const Index verbosity)
{
verbosity_ = verbosity;
}
Result minimize(const Vector &initialGuess)
{
finiteDifferences_.setObjective(
[this](const Vector &xval)
{ Vector tmp; return this->objective_(xval, tmp); });
stepSize_.setObjective(
[this](const Vector &xval, Vector &gradient)
{ return this->objective_(xval, gradient); });
stepSize_.setFiniteDifferences(
[this](const Vector &xval, const Scalar fval, Vector &gradient)
{ this->finiteDifferences_(xval, fval, gradient); });
Vector xval = initialGuess;
Vector gradient;
Scalar fval;
Scalar gradientLen = minGradientLen_ + 1;
Scalar stepSize;
Vector step = Vector::Zero(xval.size());
Scalar stepLen = minStepLen_ + 1;
bool callbackResult = true;
Index iterations = 0;
while((maxIt_ <= 0 || iterations < maxIt_) &&
gradientLen >= minGradientLen_ &&
stepLen >= minStepLen_
&& callbackResult)
{
xval -= step;
fval = evaluateObjective(xval, gradient);
gradientLen = gradient.norm();
// update step according to step size and momentum
stepSize = stepSize_(xval, fval, gradient);
step = momentum_ * step + (1 - momentum_) * stepSize * gradient;
stepLen = step.norm();
// evaluate callback an save its result
callbackResult = callback_(iterations, xval, fval, gradient);
if(verbosity_ > 0)
{
std::stringstream ss;
ss << "it=" << std::setfill('0')
<< std::setw(4) << iterations
<< std::fixed << std::showpoint << std::setprecision(6)
<< " gradlen=" << gradientLen
<< " stepsize=" << stepSize
<< " steplen=" << stepLen;
if(verbosity_ > 2)
ss << " callback=" << (callbackResult ? "true" : "false");
ss << " fval=" << fval;
if(verbosity_ > 1)
ss << " xval=" << vector2str(xval);
if(verbosity_ > 2)
ss << " gradient=" << vector2str(gradient);
if(verbosity_ > 3)
ss << " step=" << vector2str(step);
std::cout << ss.str() << std::endl;
}
++iterations;
}
Result result;
result.xval = xval;
result.fval = fval;
result.iterations = iterations;
result.converged = gradientLen < minGradientLen_ ||
stepLen < minStepLen_;
return result;
}
};
}
#endif
|
convolutiondepthwise_3x3_pack8_fp16.h | // Tencent is pleased to support the open source community by making ncnn available.
//
// Copyright (C) 2019 THL A29 Limited, a Tencent company. All rights reserved.
//
// Licensed under the BSD 3-Clause License (the "License"); you may not use this file except
// in compliance with the License. You may obtain a copy of the License at
//
// https://opensource.org/licenses/BSD-3-Clause
//
// Unless required by applicable law or agreed to in writing, software distributed
// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
// CONDITIONS OF ANY KIND, either express or implied. See the License for the
// specific language governing permissions and limitations under the License.
static void convdw3x3s1_fp16_pack8_avx(const Mat& bottom_blob, Mat& top_blob, const Mat& kernel, const Mat& _bias, const Option& opt)
{
int outw = top_blob.w;
int outh = top_blob.h;
const int group = bottom_blob.c;
const float* bias = _bias;
#pragma omp parallel for num_threads(opt.num_threads)
for (int g = 0; g < group; g++)
{
Mat out = top_blob.channel(g);
__m256 _bias0 = bias ? _mm256_loadu_ps((const float*)bias + g * 8) : _mm256_set1_ps(0.f);
const unsigned short* k0 = (const unsigned short*)kernel.row(g);
float* outptr0 = out.row(0);
const Mat img0 = bottom_blob.channel(g);
const float* r0 = img0.row(0);
const float* r1 = img0.row(1);
const float* r2 = img0.row(2);
__m256 _k00 = loadfp16(k0);
__m256 _k01 = loadfp16(k0 + 8);
__m256 _k02 = loadfp16(k0 + 16);
__m256 _k10 = loadfp16(k0 + 24);
__m256 _k11 = loadfp16(k0 + 32);
__m256 _k12 = loadfp16(k0 + 40);
__m256 _k20 = loadfp16(k0 + 48);
__m256 _k21 = loadfp16(k0 + 56);
__m256 _k22 = loadfp16(k0 + 64);
int i = 0;
for (; i < outh; i++)
{
int j = 0;
for (; j + 7 < outw; j += 8)
{
__m256 _sum0 = _bias0;
__m256 _r00 = _mm256_loadu_ps(r0);
__m256 _r01 = _mm256_loadu_ps(r0 + 8);
__m256 _r02 = _mm256_loadu_ps(r0 + 16);
__m256 _r10 = _mm256_loadu_ps(r1);
__m256 _r11 = _mm256_loadu_ps(r1 + 8);
__m256 _r12 = _mm256_loadu_ps(r1 + 16);
__m256 _r20 = _mm256_loadu_ps(r2);
__m256 _r21 = _mm256_loadu_ps(r2 + 8);
__m256 _r22 = _mm256_loadu_ps(r2 + 16);
_sum0 = _mm256_comp_fmadd_ps(_k00, _r00, _sum0);
_sum0 = _mm256_comp_fmadd_ps(_k01, _r01, _sum0);
_sum0 = _mm256_comp_fmadd_ps(_k02, _r02, _sum0);
_sum0 = _mm256_comp_fmadd_ps(_k10, _r10, _sum0);
_sum0 = _mm256_comp_fmadd_ps(_k11, _r11, _sum0);
_sum0 = _mm256_comp_fmadd_ps(_k12, _r12, _sum0);
_sum0 = _mm256_comp_fmadd_ps(_k20, _r20, _sum0);
_sum0 = _mm256_comp_fmadd_ps(_k21, _r21, _sum0);
_sum0 = _mm256_comp_fmadd_ps(_k22, _r22, _sum0);
__m256 _sum1 = _bias0;
__m256 _r03 = _mm256_loadu_ps(r0 + 24);
__m256 _r13 = _mm256_loadu_ps(r1 + 24);
__m256 _r23 = _mm256_loadu_ps(r2 + 24);
_mm256_storeu_ps(outptr0, _sum0);
_sum1 = _mm256_comp_fmadd_ps(_k00, _r01, _sum1);
_sum1 = _mm256_comp_fmadd_ps(_k01, _r02, _sum1);
_sum1 = _mm256_comp_fmadd_ps(_k02, _r03, _sum1);
_sum1 = _mm256_comp_fmadd_ps(_k10, _r11, _sum1);
_sum1 = _mm256_comp_fmadd_ps(_k11, _r12, _sum1);
_sum1 = _mm256_comp_fmadd_ps(_k12, _r13, _sum1);
_sum1 = _mm256_comp_fmadd_ps(_k20, _r21, _sum1);
_sum1 = _mm256_comp_fmadd_ps(_k21, _r22, _sum1);
_sum1 = _mm256_comp_fmadd_ps(_k22, _r23, _sum1);
__m256 _sum2 = _bias0;
__m256 _r04 = _mm256_loadu_ps(r0 + 32);
__m256 _r14 = _mm256_loadu_ps(r1 + 32);
__m256 _r24 = _mm256_loadu_ps(r2 + 32);
_mm256_storeu_ps(outptr0 + 8, _sum1);
_sum2 = _mm256_comp_fmadd_ps(_k00, _r02, _sum2);
_sum2 = _mm256_comp_fmadd_ps(_k01, _r03, _sum2);
_sum2 = _mm256_comp_fmadd_ps(_k02, _r04, _sum2);
_sum2 = _mm256_comp_fmadd_ps(_k10, _r12, _sum2);
_sum2 = _mm256_comp_fmadd_ps(_k11, _r13, _sum2);
_sum2 = _mm256_comp_fmadd_ps(_k12, _r14, _sum2);
_sum2 = _mm256_comp_fmadd_ps(_k20, _r22, _sum2);
_sum2 = _mm256_comp_fmadd_ps(_k21, _r23, _sum2);
_sum2 = _mm256_comp_fmadd_ps(_k22, _r24, _sum2);
__m256 _sum3 = _bias0;
__m256 _r05 = _mm256_loadu_ps(r0 + 40);
__m256 _r15 = _mm256_loadu_ps(r1 + 40);
__m256 _r25 = _mm256_loadu_ps(r2 + 40);
_mm256_storeu_ps(outptr0 + 16, _sum2);
_sum3 = _mm256_comp_fmadd_ps(_k00, _r03, _sum3);
_sum3 = _mm256_comp_fmadd_ps(_k01, _r04, _sum3);
_sum3 = _mm256_comp_fmadd_ps(_k02, _r05, _sum3);
_sum3 = _mm256_comp_fmadd_ps(_k10, _r13, _sum3);
_sum3 = _mm256_comp_fmadd_ps(_k11, _r14, _sum3);
_sum3 = _mm256_comp_fmadd_ps(_k12, _r15, _sum3);
_sum3 = _mm256_comp_fmadd_ps(_k20, _r23, _sum3);
_sum3 = _mm256_comp_fmadd_ps(_k21, _r24, _sum3);
_sum3 = _mm256_comp_fmadd_ps(_k22, _r25, _sum3);
__m256 _sum4 = _bias0;
__m256 _r06 = _mm256_loadu_ps(r0 + 48);
__m256 _r16 = _mm256_loadu_ps(r1 + 48);
__m256 _r26 = _mm256_loadu_ps(r2 + 48);
_mm256_storeu_ps(outptr0 + 24, _sum3);
_sum4 = _mm256_comp_fmadd_ps(_k00, _r04, _sum4);
_sum4 = _mm256_comp_fmadd_ps(_k01, _r05, _sum4);
_sum4 = _mm256_comp_fmadd_ps(_k02, _r06, _sum4);
_sum4 = _mm256_comp_fmadd_ps(_k10, _r14, _sum4);
_sum4 = _mm256_comp_fmadd_ps(_k11, _r15, _sum4);
_sum4 = _mm256_comp_fmadd_ps(_k12, _r16, _sum4);
_sum4 = _mm256_comp_fmadd_ps(_k20, _r24, _sum4);
_sum4 = _mm256_comp_fmadd_ps(_k21, _r25, _sum4);
_sum4 = _mm256_comp_fmadd_ps(_k22, _r26, _sum4);
__m256 _sum5 = _bias0;
__m256 _r07 = _mm256_loadu_ps(r0 + 56);
__m256 _r17 = _mm256_loadu_ps(r1 + 56);
__m256 _r27 = _mm256_loadu_ps(r2 + 56);
_mm256_storeu_ps(outptr0 + 32, _sum4);
_sum5 = _mm256_comp_fmadd_ps(_k00, _r05, _sum5);
_sum5 = _mm256_comp_fmadd_ps(_k01, _r06, _sum5);
_sum5 = _mm256_comp_fmadd_ps(_k02, _r07, _sum5);
_sum5 = _mm256_comp_fmadd_ps(_k10, _r15, _sum5);
_sum5 = _mm256_comp_fmadd_ps(_k11, _r16, _sum5);
_sum5 = _mm256_comp_fmadd_ps(_k12, _r17, _sum5);
_sum5 = _mm256_comp_fmadd_ps(_k20, _r25, _sum5);
_sum5 = _mm256_comp_fmadd_ps(_k21, _r26, _sum5);
_sum5 = _mm256_comp_fmadd_ps(_k22, _r27, _sum5);
__m256 _sum6 = _bias0;
__m256 _r08 = _mm256_loadu_ps(r0 + 64);
__m256 _r18 = _mm256_loadu_ps(r1 + 64);
__m256 _r28 = _mm256_loadu_ps(r2 + 64);
_mm256_storeu_ps(outptr0 + 40, _sum5);
_sum6 = _mm256_comp_fmadd_ps(_k00, _r06, _sum6);
_sum6 = _mm256_comp_fmadd_ps(_k01, _r07, _sum6);
_sum6 = _mm256_comp_fmadd_ps(_k02, _r08, _sum6);
_sum6 = _mm256_comp_fmadd_ps(_k10, _r16, _sum6);
_sum6 = _mm256_comp_fmadd_ps(_k11, _r17, _sum6);
_sum6 = _mm256_comp_fmadd_ps(_k12, _r18, _sum6);
_sum6 = _mm256_comp_fmadd_ps(_k20, _r26, _sum6);
_sum6 = _mm256_comp_fmadd_ps(_k21, _r27, _sum6);
_sum6 = _mm256_comp_fmadd_ps(_k22, _r28, _sum6);
__m256 _sum7 = _bias0;
__m256 _r09 = _mm256_loadu_ps(r0 + 72);
__m256 _r19 = _mm256_loadu_ps(r1 + 72);
__m256 _r29 = _mm256_loadu_ps(r2 + 72);
_mm256_storeu_ps(outptr0 + 48, _sum6);
_sum7 = _mm256_comp_fmadd_ps(_k00, _r07, _sum7);
_sum7 = _mm256_comp_fmadd_ps(_k01, _r08, _sum7);
_sum7 = _mm256_comp_fmadd_ps(_k02, _r09, _sum7);
_sum7 = _mm256_comp_fmadd_ps(_k10, _r17, _sum7);
_sum7 = _mm256_comp_fmadd_ps(_k11, _r18, _sum7);
_sum7 = _mm256_comp_fmadd_ps(_k12, _r19, _sum7);
_sum7 = _mm256_comp_fmadd_ps(_k20, _r27, _sum7);
_sum7 = _mm256_comp_fmadd_ps(_k21, _r28, _sum7);
_sum7 = _mm256_comp_fmadd_ps(_k22, _r29, _sum7);
_mm256_storeu_ps(outptr0 + 56, _sum7);
r0 += 64;
r1 += 64;
r2 += 64;
outptr0 += 64;
}
for (; j + 3 < outw; j += 4)
{
__m256 _sum0 = _bias0;
__m256 _r00 = _mm256_loadu_ps(r0);
__m256 _r01 = _mm256_loadu_ps(r0 + 8);
__m256 _r02 = _mm256_loadu_ps(r0 + 16);
__m256 _r10 = _mm256_loadu_ps(r1);
__m256 _r11 = _mm256_loadu_ps(r1 + 8);
__m256 _r12 = _mm256_loadu_ps(r1 + 16);
__m256 _r20 = _mm256_loadu_ps(r2);
__m256 _r21 = _mm256_loadu_ps(r2 + 8);
__m256 _r22 = _mm256_loadu_ps(r2 + 16);
_sum0 = _mm256_comp_fmadd_ps(_k00, _r00, _sum0);
_sum0 = _mm256_comp_fmadd_ps(_k01, _r01, _sum0);
_sum0 = _mm256_comp_fmadd_ps(_k02, _r02, _sum0);
_sum0 = _mm256_comp_fmadd_ps(_k10, _r10, _sum0);
_sum0 = _mm256_comp_fmadd_ps(_k11, _r11, _sum0);
_sum0 = _mm256_comp_fmadd_ps(_k12, _r12, _sum0);
_sum0 = _mm256_comp_fmadd_ps(_k20, _r20, _sum0);
_sum0 = _mm256_comp_fmadd_ps(_k21, _r21, _sum0);
_sum0 = _mm256_comp_fmadd_ps(_k22, _r22, _sum0);
__m256 _sum1 = _bias0;
__m256 _r03 = _mm256_loadu_ps(r0 + 24);
__m256 _r13 = _mm256_loadu_ps(r1 + 24);
__m256 _r23 = _mm256_loadu_ps(r2 + 24);
_mm256_storeu_ps(outptr0, _sum0);
_sum1 = _mm256_comp_fmadd_ps(_k00, _r01, _sum1);
_sum1 = _mm256_comp_fmadd_ps(_k01, _r02, _sum1);
_sum1 = _mm256_comp_fmadd_ps(_k02, _r03, _sum1);
_sum1 = _mm256_comp_fmadd_ps(_k10, _r11, _sum1);
_sum1 = _mm256_comp_fmadd_ps(_k11, _r12, _sum1);
_sum1 = _mm256_comp_fmadd_ps(_k12, _r13, _sum1);
_sum1 = _mm256_comp_fmadd_ps(_k20, _r21, _sum1);
_sum1 = _mm256_comp_fmadd_ps(_k21, _r22, _sum1);
_sum1 = _mm256_comp_fmadd_ps(_k22, _r23, _sum1);
__m256 _sum2 = _bias0;
__m256 _r04 = _mm256_loadu_ps(r0 + 32);
__m256 _r14 = _mm256_loadu_ps(r1 + 32);
__m256 _r24 = _mm256_loadu_ps(r2 + 32);
_mm256_storeu_ps(outptr0 + 8, _sum1);
_sum2 = _mm256_comp_fmadd_ps(_k00, _r02, _sum2);
_sum2 = _mm256_comp_fmadd_ps(_k01, _r03, _sum2);
_sum2 = _mm256_comp_fmadd_ps(_k02, _r04, _sum2);
_sum2 = _mm256_comp_fmadd_ps(_k10, _r12, _sum2);
_sum2 = _mm256_comp_fmadd_ps(_k11, _r13, _sum2);
_sum2 = _mm256_comp_fmadd_ps(_k12, _r14, _sum2);
_sum2 = _mm256_comp_fmadd_ps(_k20, _r22, _sum2);
_sum2 = _mm256_comp_fmadd_ps(_k21, _r23, _sum2);
_sum2 = _mm256_comp_fmadd_ps(_k22, _r24, _sum2);
__m256 _sum3 = _bias0;
__m256 _r05 = _mm256_loadu_ps(r0 + 40);
__m256 _r15 = _mm256_loadu_ps(r1 + 40);
__m256 _r25 = _mm256_loadu_ps(r2 + 40);
_mm256_storeu_ps(outptr0 + 16, _sum2);
_sum3 = _mm256_comp_fmadd_ps(_k00, _r03, _sum3);
_sum3 = _mm256_comp_fmadd_ps(_k01, _r04, _sum3);
_sum3 = _mm256_comp_fmadd_ps(_k02, _r05, _sum3);
_sum3 = _mm256_comp_fmadd_ps(_k10, _r13, _sum3);
_sum3 = _mm256_comp_fmadd_ps(_k11, _r14, _sum3);
_sum3 = _mm256_comp_fmadd_ps(_k12, _r15, _sum3);
_sum3 = _mm256_comp_fmadd_ps(_k20, _r23, _sum3);
_sum3 = _mm256_comp_fmadd_ps(_k21, _r24, _sum3);
_sum3 = _mm256_comp_fmadd_ps(_k22, _r25, _sum3);
_mm256_storeu_ps(outptr0 + 24, _sum3);
r0 += 32;
r1 += 32;
r2 += 32;
outptr0 += 32;
}
for (; j + 1 < outw; j += 2)
{
__m256 _sum0 = _bias0;
__m256 _r00 = _mm256_loadu_ps(r0);
__m256 _r01 = _mm256_loadu_ps(r0 + 8);
__m256 _r02 = _mm256_loadu_ps(r0 + 16);
__m256 _r10 = _mm256_loadu_ps(r1);
__m256 _r11 = _mm256_loadu_ps(r1 + 8);
__m256 _r12 = _mm256_loadu_ps(r1 + 16);
__m256 _r20 = _mm256_loadu_ps(r2);
__m256 _r21 = _mm256_loadu_ps(r2 + 8);
__m256 _r22 = _mm256_loadu_ps(r2 + 16);
_sum0 = _mm256_comp_fmadd_ps(_k00, _r00, _sum0);
_sum0 = _mm256_comp_fmadd_ps(_k01, _r01, _sum0);
_sum0 = _mm256_comp_fmadd_ps(_k02, _r02, _sum0);
_sum0 = _mm256_comp_fmadd_ps(_k10, _r10, _sum0);
_sum0 = _mm256_comp_fmadd_ps(_k11, _r11, _sum0);
_sum0 = _mm256_comp_fmadd_ps(_k12, _r12, _sum0);
_sum0 = _mm256_comp_fmadd_ps(_k20, _r20, _sum0);
_sum0 = _mm256_comp_fmadd_ps(_k21, _r21, _sum0);
_sum0 = _mm256_comp_fmadd_ps(_k22, _r22, _sum0);
__m256 _sum1 = _bias0;
__m256 _r03 = _mm256_loadu_ps(r0 + 24);
__m256 _r13 = _mm256_loadu_ps(r1 + 24);
__m256 _r23 = _mm256_loadu_ps(r2 + 24);
_mm256_storeu_ps(outptr0, _sum0);
_sum1 = _mm256_comp_fmadd_ps(_k00, _r01, _sum1);
_sum1 = _mm256_comp_fmadd_ps(_k01, _r02, _sum1);
_sum1 = _mm256_comp_fmadd_ps(_k02, _r03, _sum1);
_sum1 = _mm256_comp_fmadd_ps(_k10, _r11, _sum1);
_sum1 = _mm256_comp_fmadd_ps(_k11, _r12, _sum1);
_sum1 = _mm256_comp_fmadd_ps(_k12, _r13, _sum1);
_sum1 = _mm256_comp_fmadd_ps(_k20, _r21, _sum1);
_sum1 = _mm256_comp_fmadd_ps(_k21, _r22, _sum1);
_sum1 = _mm256_comp_fmadd_ps(_k22, _r23, _sum1);
_mm256_storeu_ps(outptr0 + 8, _sum1);
r0 += 16;
r1 += 16;
r2 += 16;
outptr0 += 16;
}
for (; j < outw; j++)
{
__m256 _sum0 = _bias0;
__m256 _r00 = _mm256_loadu_ps(r0);
__m256 _r01 = _mm256_loadu_ps(r0 + 8);
__m256 _r02 = _mm256_loadu_ps(r0 + 16);
__m256 _r10 = _mm256_loadu_ps(r1);
__m256 _r11 = _mm256_loadu_ps(r1 + 8);
__m256 _r12 = _mm256_loadu_ps(r1 + 16);
__m256 _r20 = _mm256_loadu_ps(r2);
__m256 _r21 = _mm256_loadu_ps(r2 + 8);
__m256 _r22 = _mm256_loadu_ps(r2 + 16);
_sum0 = _mm256_comp_fmadd_ps(_k00, _r00, _sum0);
_sum0 = _mm256_comp_fmadd_ps(_k01, _r01, _sum0);
_sum0 = _mm256_comp_fmadd_ps(_k02, _r02, _sum0);
_sum0 = _mm256_comp_fmadd_ps(_k10, _r10, _sum0);
_sum0 = _mm256_comp_fmadd_ps(_k11, _r11, _sum0);
_sum0 = _mm256_comp_fmadd_ps(_k12, _r12, _sum0);
_sum0 = _mm256_comp_fmadd_ps(_k20, _r20, _sum0);
_sum0 = _mm256_comp_fmadd_ps(_k21, _r21, _sum0);
_sum0 = _mm256_comp_fmadd_ps(_k22, _r22, _sum0);
_mm256_storeu_ps(outptr0, _sum0);
r0 += 8;
r1 += 8;
r2 += 8;
outptr0 += 8;
}
r0 += 2 * 8;
r1 += 2 * 8;
r2 += 2 * 8;
}
}
}
static void convdw3x3s2_fp16_pack8_avx(const Mat& bottom_blob, Mat& top_blob, const Mat& kernel, const Mat& _bias, const Option& opt)
{
int w = bottom_blob.w;
int outw = top_blob.w;
int outh = top_blob.h;
const int group = bottom_blob.c;
const int tailstep = (w - 2 * outw + w) * 8;
const float* bias = _bias;
#pragma omp parallel for num_threads(opt.num_threads)
for (int g = 0; g < group; g++)
{
Mat out = top_blob.channel(g);
__m256 _bias0 = bias ? _mm256_loadu_ps((const float*)bias + g * 8) : _mm256_set1_ps(0.f);
const unsigned short* k0 = (const unsigned short*)kernel.row(g);
float* outptr0 = out.row(0);
const Mat img0 = bottom_blob.channel(g);
const float* r0 = img0.row(0);
const float* r1 = img0.row(1);
const float* r2 = img0.row(2);
__m256 _k00 = loadfp16(k0);
__m256 _k01 = loadfp16(k0 + 8);
__m256 _k02 = loadfp16(k0 + 16);
__m256 _k10 = loadfp16(k0 + 24);
__m256 _k11 = loadfp16(k0 + 32);
__m256 _k12 = loadfp16(k0 + 40);
__m256 _k20 = loadfp16(k0 + 48);
__m256 _k21 = loadfp16(k0 + 56);
__m256 _k22 = loadfp16(k0 + 64);
int i = 0;
for (; i < outh; i++)
{
int j = 0;
for (; j + 3 < outw; j += 4)
{
__m256 _sum0 = _bias0;
__m256 _r00 = _mm256_loadu_ps(r0);
__m256 _r01 = _mm256_loadu_ps(r0 + 8);
__m256 _r02 = _mm256_loadu_ps(r0 + 16);
__m256 _r10 = _mm256_loadu_ps(r1);
__m256 _r11 = _mm256_loadu_ps(r1 + 8);
__m256 _r12 = _mm256_loadu_ps(r1 + 16);
__m256 _r20 = _mm256_loadu_ps(r2);
__m256 _r21 = _mm256_loadu_ps(r2 + 8);
__m256 _r22 = _mm256_loadu_ps(r2 + 16);
_sum0 = _mm256_comp_fmadd_ps(_k00, _r00, _sum0);
_sum0 = _mm256_comp_fmadd_ps(_k01, _r01, _sum0);
_sum0 = _mm256_comp_fmadd_ps(_k02, _r02, _sum0);
_sum0 = _mm256_comp_fmadd_ps(_k10, _r10, _sum0);
_sum0 = _mm256_comp_fmadd_ps(_k11, _r11, _sum0);
_sum0 = _mm256_comp_fmadd_ps(_k12, _r12, _sum0);
_sum0 = _mm256_comp_fmadd_ps(_k20, _r20, _sum0);
_sum0 = _mm256_comp_fmadd_ps(_k21, _r21, _sum0);
_sum0 = _mm256_comp_fmadd_ps(_k22, _r22, _sum0);
__m256 _sum1 = _bias0;
__m256 _r03 = _mm256_loadu_ps(r0 + 24);
__m256 _r13 = _mm256_loadu_ps(r1 + 24);
__m256 _r23 = _mm256_loadu_ps(r2 + 24);
__m256 _r04 = _mm256_loadu_ps(r0 + 32);
__m256 _r14 = _mm256_loadu_ps(r1 + 32);
__m256 _r24 = _mm256_loadu_ps(r2 + 32);
_mm256_storeu_ps(outptr0, _sum0);
_sum1 = _mm256_comp_fmadd_ps(_k00, _r02, _sum1);
_sum1 = _mm256_comp_fmadd_ps(_k01, _r03, _sum1);
_sum1 = _mm256_comp_fmadd_ps(_k02, _r04, _sum1);
_sum1 = _mm256_comp_fmadd_ps(_k10, _r12, _sum1);
_sum1 = _mm256_comp_fmadd_ps(_k11, _r13, _sum1);
_sum1 = _mm256_comp_fmadd_ps(_k12, _r14, _sum1);
_sum1 = _mm256_comp_fmadd_ps(_k20, _r22, _sum1);
_sum1 = _mm256_comp_fmadd_ps(_k21, _r23, _sum1);
_sum1 = _mm256_comp_fmadd_ps(_k22, _r24, _sum1);
__m256 _sum2 = _bias0;
__m256 _r05 = _mm256_loadu_ps(r0 + 40);
__m256 _r15 = _mm256_loadu_ps(r1 + 40);
__m256 _r25 = _mm256_loadu_ps(r2 + 40);
__m256 _r06 = _mm256_loadu_ps(r0 + 48);
__m256 _r16 = _mm256_loadu_ps(r1 + 48);
__m256 _r26 = _mm256_loadu_ps(r2 + 48);
_mm256_storeu_ps(outptr0 + 8, _sum1);
_sum2 = _mm256_comp_fmadd_ps(_k00, _r04, _sum2);
_sum2 = _mm256_comp_fmadd_ps(_k01, _r05, _sum2);
_sum2 = _mm256_comp_fmadd_ps(_k02, _r06, _sum2);
_sum2 = _mm256_comp_fmadd_ps(_k10, _r14, _sum2);
_sum2 = _mm256_comp_fmadd_ps(_k11, _r15, _sum2);
_sum2 = _mm256_comp_fmadd_ps(_k12, _r16, _sum2);
_sum2 = _mm256_comp_fmadd_ps(_k20, _r24, _sum2);
_sum2 = _mm256_comp_fmadd_ps(_k21, _r25, _sum2);
_sum2 = _mm256_comp_fmadd_ps(_k22, _r26, _sum2);
__m256 _sum3 = _bias0;
__m256 _r07 = _mm256_loadu_ps(r0 + 56);
__m256 _r17 = _mm256_loadu_ps(r1 + 56);
__m256 _r27 = _mm256_loadu_ps(r2 + 56);
__m256 _r08 = _mm256_loadu_ps(r0 + 64);
__m256 _r18 = _mm256_loadu_ps(r1 + 64);
__m256 _r28 = _mm256_loadu_ps(r2 + 64);
_mm256_storeu_ps(outptr0 + 16, _sum2);
_sum3 = _mm256_comp_fmadd_ps(_k00, _r06, _sum3);
_sum3 = _mm256_comp_fmadd_ps(_k01, _r07, _sum3);
_sum3 = _mm256_comp_fmadd_ps(_k02, _r08, _sum3);
_sum3 = _mm256_comp_fmadd_ps(_k10, _r16, _sum3);
_sum3 = _mm256_comp_fmadd_ps(_k11, _r17, _sum3);
_sum3 = _mm256_comp_fmadd_ps(_k12, _r18, _sum3);
_sum3 = _mm256_comp_fmadd_ps(_k20, _r26, _sum3);
_sum3 = _mm256_comp_fmadd_ps(_k21, _r27, _sum3);
_sum3 = _mm256_comp_fmadd_ps(_k22, _r28, _sum3);
_mm256_storeu_ps(outptr0 + 24, _sum3);
r0 += 2 * 32;
r1 += 2 * 32;
r2 += 2 * 32;
outptr0 += 32;
}
for (; j + 1 < outw; j += 2)
{
__m256 _sum0 = _bias0;
__m256 _r00 = _mm256_loadu_ps(r0);
__m256 _r01 = _mm256_loadu_ps(r0 + 8);
__m256 _r02 = _mm256_loadu_ps(r0 + 16);
__m256 _r10 = _mm256_loadu_ps(r1);
__m256 _r11 = _mm256_loadu_ps(r1 + 8);
__m256 _r12 = _mm256_loadu_ps(r1 + 16);
__m256 _r20 = _mm256_loadu_ps(r2);
__m256 _r21 = _mm256_loadu_ps(r2 + 8);
__m256 _r22 = _mm256_loadu_ps(r2 + 16);
_sum0 = _mm256_comp_fmadd_ps(_k00, _r00, _sum0);
_sum0 = _mm256_comp_fmadd_ps(_k01, _r01, _sum0);
_sum0 = _mm256_comp_fmadd_ps(_k02, _r02, _sum0);
_sum0 = _mm256_comp_fmadd_ps(_k10, _r10, _sum0);
_sum0 = _mm256_comp_fmadd_ps(_k11, _r11, _sum0);
_sum0 = _mm256_comp_fmadd_ps(_k12, _r12, _sum0);
_sum0 = _mm256_comp_fmadd_ps(_k20, _r20, _sum0);
_sum0 = _mm256_comp_fmadd_ps(_k21, _r21, _sum0);
_sum0 = _mm256_comp_fmadd_ps(_k22, _r22, _sum0);
__m256 _sum1 = _bias0;
__m256 _r03 = _mm256_loadu_ps(r0 + 24);
__m256 _r13 = _mm256_loadu_ps(r1 + 24);
__m256 _r23 = _mm256_loadu_ps(r2 + 24);
__m256 _r04 = _mm256_loadu_ps(r0 + 32);
__m256 _r14 = _mm256_loadu_ps(r1 + 32);
__m256 _r24 = _mm256_loadu_ps(r2 + 32);
_mm256_storeu_ps(outptr0, _sum0);
_sum1 = _mm256_comp_fmadd_ps(_k00, _r02, _sum1);
_sum1 = _mm256_comp_fmadd_ps(_k01, _r03, _sum1);
_sum1 = _mm256_comp_fmadd_ps(_k02, _r04, _sum1);
_sum1 = _mm256_comp_fmadd_ps(_k10, _r12, _sum1);
_sum1 = _mm256_comp_fmadd_ps(_k11, _r13, _sum1);
_sum1 = _mm256_comp_fmadd_ps(_k12, _r14, _sum1);
_sum1 = _mm256_comp_fmadd_ps(_k20, _r22, _sum1);
_sum1 = _mm256_comp_fmadd_ps(_k21, _r23, _sum1);
_sum1 = _mm256_comp_fmadd_ps(_k22, _r24, _sum1);
_mm256_storeu_ps(outptr0 + 8, _sum1);
r0 += 2 * 16;
r1 += 2 * 16;
r2 += 2 * 16;
outptr0 += 16;
}
for (; j < outw; j++)
{
__m256 _sum0 = _bias0;
__m256 _r00 = _mm256_loadu_ps(r0);
__m256 _r01 = _mm256_loadu_ps(r0 + 8);
__m256 _r02 = _mm256_loadu_ps(r0 + 16);
__m256 _r10 = _mm256_loadu_ps(r1);
__m256 _r11 = _mm256_loadu_ps(r1 + 8);
__m256 _r12 = _mm256_loadu_ps(r1 + 16);
__m256 _r20 = _mm256_loadu_ps(r2);
__m256 _r21 = _mm256_loadu_ps(r2 + 8);
__m256 _r22 = _mm256_loadu_ps(r2 + 16);
_sum0 = _mm256_comp_fmadd_ps(_k00, _r00, _sum0);
_sum0 = _mm256_comp_fmadd_ps(_k01, _r01, _sum0);
_sum0 = _mm256_comp_fmadd_ps(_k02, _r02, _sum0);
_sum0 = _mm256_comp_fmadd_ps(_k10, _r10, _sum0);
_sum0 = _mm256_comp_fmadd_ps(_k11, _r11, _sum0);
_sum0 = _mm256_comp_fmadd_ps(_k12, _r12, _sum0);
_sum0 = _mm256_comp_fmadd_ps(_k20, _r20, _sum0);
_sum0 = _mm256_comp_fmadd_ps(_k21, _r21, _sum0);
_sum0 = _mm256_comp_fmadd_ps(_k22, _r22, _sum0);
_mm256_storeu_ps(outptr0, _sum0);
r0 += 2 * 8;
r1 += 2 * 8;
r2 += 2 * 8;
outptr0 += 8;
}
r0 += tailstep;
r1 += tailstep;
r2 += tailstep;
}
}
}
|
GB_AxB_saxpy5_iso_or_pattern.c | //------------------------------------------------------------------------------
// GB_AxB_saxpy5_iso_or_pattern.c: C+=A*B; C full, A bitmap/full and iso/pattern
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2022, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// C is as-if-full.
// A is bitmap or full, and either iso or pattern-only
// B is sparse or hypersparse.
{
//--------------------------------------------------------------------------
// get C, A, and B
//--------------------------------------------------------------------------
const int64_t m = C->vlen ; // # of rows of C and A
#if GB_A_IS_BITMAP
const int8_t *restrict Ab = A->b ;
#endif
const int64_t *restrict Bp = B->p ;
const int64_t *restrict Bh = B->h ;
const int64_t *restrict Bi = B->i ;
const bool B_iso = B->iso ;
#if !GB_A_IS_PATTERN
const GB_ATYPE *restrict Ax = (GB_ATYPE *) A->x ;
#endif
#if !GB_B_IS_PATTERN
const GB_BTYPE *restrict Bx = (GB_BTYPE *) B->x ;
#endif
GB_CTYPE *restrict Cx = (GB_CTYPE *) C->x ;
//--------------------------------------------------------------------------
// C += A*B where A is bitmap/full, and either iso-valued or pattern-only
//--------------------------------------------------------------------------
int tid ;
#pragma omp parallel for num_threads(nthreads) schedule(dynamic,1)
for (tid = 0 ; tid < ntasks ; tid++)
{
#if !GB_A_IS_PATTERN
// get the iso value of A
const GB_ATYPE ax = Ax [0] ;
#endif
// get the task descriptor
const int64_t jB_start = B_slice [tid] ;
const int64_t jB_end = B_slice [tid+1] ;
// C(:,jB_start:jB_end-1) += A * B(:,jB_start:jB_end-1)
for (int64_t jB = jB_start ; jB < jB_end ; jB++)
{
// get B(:,j) and C(:,j)
const int64_t j = GBH (Bh, jB) ;
const int64_t pC = j * m ;
const int64_t pB_start = Bp [jB] ;
const int64_t pB_end = Bp [jB+1] ;
// C(:,j) += A*B(:,j)
for (int64_t pB = pB_start ; pB < pB_end ; pB++)
{
// get B(k,j)
const int64_t k = Bi [pB] ;
#if GB_A_IS_BITMAP
// get A(:,k)
const int64_t pA = k * m ;
#endif
#if GB_IS_FIRSTI_MULTIPLIER
// s depends on i
#define s (i + GB_OFFSET)
#else
// s = ax * bkj, not dependent on i
GB_CTYPE s ;
GB_MULT (s, ax, GBX (Bx, pB, B_iso), ignore, k, j) ;
#endif
// C(:,j) += s
for (int64_t i = 0 ; i < m ; i++)
{
#if GB_A_IS_BITMAP
if (!Ab [pA + i]) continue ;
#endif
// C(i,j) += s ;
GB_CIJ_UPDATE (pC + i, s) ;
}
}
}
}
}
#undef s
|
ten_tusscher_2004_epi_S1_6.c | //Original Ten Tusscher
#include <assert.h>
#include <stdlib.h>
#include "ten_tusscher_2004_epi_S1_6.h"
GET_CELL_MODEL_DATA(init_cell_model_data) {
assert(cell_model);
if(get_initial_v)
cell_model->initial_v = INITIAL_V;
if(get_neq)
cell_model->number_of_ode_equations = NEQ;
}
//TODO: this should be called only once for the whole mesh, like in the GPU code
SET_ODE_INITIAL_CONDITIONS_CPU(set_model_initial_conditions_cpu) {
// Default initial conditions
/*
sv[0] = INITIAL_V; // V; millivolt
sv[1] = 0.f; //M
sv[2] = 0.75; //H
sv[3] = 0.75f; //J
sv[4] = 0.f; //Xr1
sv[5] = 1.f; //Xr2
sv[6] = 0.f; //Xs
sv[7] = 1.f; //S
sv[8] = 0.f; //R
sv[9] = 0.f; //D
sv[10] = 1.f; //F
sv[11] = 1.f; //FCa
sv[12] = 1.f; //G
sv[13] = 0.0002; //Cai
sv[14] = 0.2f; //CaSR
sv[15] = 11.6f; //Nai
sv[16] = 138.3f; //Ki
*/
// Elnaz's steady-state initial conditions
real sv_sst[]={-86.6064669642929,0.00127958647137661,0.780646393787312,0.780487891408514,0.000173584624633959,0.485487828596219,0.00293230969261734,0.999998360971933,1.92121849077563e-08,1.88145674866789e-05,0.999776948081716,1.00718539597045,0.999996533595373,4.30563502204742e-05,0.716390886105942,9.21744894085960,140.245419902480};
for (uint32_t i = 0; i < NEQ; i++)
sv[i] = sv_sst[i];
}
SOLVE_MODEL_ODES_CPU(solve_model_odes_cpu) {
uint32_t sv_id;
int i;
#pragma omp parallel for private(sv_id)
for (i = 0; i < num_cells_to_solve; i++) {
if(cells_to_solve)
sv_id = cells_to_solve[i];
else
sv_id = i;
for (int j = 0; j < num_steps; ++j) {
solve_model_ode_cpu(dt, sv + (sv_id * NEQ), stim_currents[i]);
}
}
}
void solve_model_ode_cpu(real dt, real *sv, real stim_current) {
assert(sv);
real rY[NEQ], rDY[NEQ];
for(int i = 0; i < NEQ; i++)
rY[i] = sv[i];
RHS_cpu(rY, rDY, stim_current, dt);
for(int i = 0; i < NEQ; i++)
sv[i] = rDY[i];
}
void RHS_cpu(const real *sv, real *rDY_, real stim_current, real dt) {
// State variables
real svolt = sv[0];
real sm = sv[1];
real sh = sv[2];
real sj = sv[3];
real sxr1 = sv[4];
real sxr2 = sv[5];
real sxs = sv[6];
real ss = sv[7];
real sr = sv[8];
real sd = sv[9];
real sf = sv[10];
real sfca = sv[11];
real sg = sv[12];
real Cai = sv[13];
real CaSR = sv[14];
real Nai = sv[15];
real Ki = sv[16];
//External concentrations
real Ko=5.4;
real Cao=2.0;
real Nao=140.0;
//Intracellular volumes
real Vc=0.016404;
real Vsr=0.001094;
//Calcium dynamics
real Bufc=0.15f;
real Kbufc=0.001f;
real Bufsr=10.f;
real Kbufsr=0.3f;
real taufca=2.f;
real taug=2.f;
real Vmaxup=0.000425f;
real Kup=0.00025f;
//Constants
const real R = 8314.472f;
const real F = 96485.3415f;
const real T =310.0f;
real RTONF =(R*T)/F;
//Cellular capacitance
real CAPACITANCE=0.185;
//Parameters for currents
//Parameters for IKr
real Gkr=0.096;
//Parameters for Iks
real pKNa=0.03;
///#ifdef EPI
real Gks=0.245;
///#endif
///#ifdef ENDO
/// real Gks=0.245;
///#endif
///#ifdef MCELL
/// real Gks=0.062;
///#endif
//Parameters for Ik1
real GK1=5.405;
//Parameters for Ito
//#ifdef EPI
real Gto=0.294;
//#endif
// #ifdef ENDO
// real Gto=0.073;
//#endif
//#ifdef MCELL
// real Gto=0.294;
///#endif
//Parameters for INa
real GNa=14.838;
//Parameters for IbNa
real GbNa=0.00029;
//Parameters for INaK
real KmK=1.0;
real KmNa=40.0;
real knak=1.362;
//Parameters for ICaL
real GCaL=0.000175;
//Parameters for IbCa
real GbCa=0.000592;
//Parameters for INaCa
real knaca=1000;
real KmNai=87.5;
real KmCa=1.38;
real ksat=0.1;
real n=0.35;
//Parameters for IpCa
real GpCa=0.825;
real KpCa=0.0005;
//Parameters for IpK;
real GpK=0.0146;
real parameters []={13.9565782218666,0.000287174371586985,0.000141340119238607,0.000581300894818177,0.247996276322519,0.183526744381808,0.0916439019365131,3.36936874118326,0.0142522777756354,2.50047611779782,1098.80622386062,0.000523336135399631,0.308744870110979,0.0177121653217909,0.00514911951229914,2.73381165333318e-05};
GNa=parameters[0];
GbNa=parameters[1];
GCaL=parameters[2];
GbCa=parameters[3];
Gto=parameters[4];
Gkr=parameters[5];
Gks=parameters[6];
GK1=parameters[7];
GpK=parameters[8];
knak=parameters[9];
knaca=parameters[10];
Vmaxup=parameters[11];
GpCa=parameters[12];
real arel=parameters[13];
real crel=parameters[14];
real Vleak=parameters[15];
real IKr;
real IKs;
real IK1;
real Ito;
real INa;
real IbNa;
real ICaL;
real IbCa;
real INaCa;
real IpCa;
real IpK;
real INaK;
real Irel;
real Ileak;
real dNai;
real dKi;
real dCai;
real dCaSR;
real A;
// real BufferFactorc;
// real BufferFactorsr;
real SERCA;
real Caisquare;
real CaSRsquare;
real CaCurrent;
real CaSRCurrent;
real fcaold;
real gold;
real Ek;
real Ena;
real Eks;
real Eca;
real CaCSQN;
real bjsr;
real cjsr;
real CaBuf;
real bc;
real cc;
real Ak1;
real Bk1;
real rec_iK1;
real rec_ipK;
real rec_iNaK;
real AM;
real BM;
real AH_1;
real BH_1;
real AH_2;
real BH_2;
real AJ_1;
real BJ_1;
real AJ_2;
real BJ_2;
real M_INF;
real H_INF;
real J_INF;
real TAU_M;
real TAU_H;
real TAU_J;
real axr1;
real bxr1;
real axr2;
real bxr2;
real Xr1_INF;
real Xr2_INF;
real TAU_Xr1;
real TAU_Xr2;
real Axs;
real Bxs;
real Xs_INF;
real TAU_Xs;
real R_INF;
real TAU_R;
real S_INF;
real TAU_S;
real Ad;
real Bd;
real Cd;
real TAU_D;
real D_INF;
real TAU_F;
real F_INF;
real FCa_INF;
real G_INF;
real inverseVcF2=1/(2*Vc*F);
real inverseVcF=1./(Vc*F);
real Kupsquare=Kup*Kup;
// real BufcKbufc=Bufc*Kbufc;
// real Kbufcsquare=Kbufc*Kbufc;
// real Kbufc2=2*Kbufc;
// real BufsrKbufsr=Bufsr*Kbufsr;
// const real Kbufsrsquare=Kbufsr*Kbufsr;
// const real Kbufsr2=2*Kbufsr;
const real exptaufca=exp(-dt/taufca);
const real exptaug=exp(-dt/taug);
real sItot;
//Needed to compute currents
Ek=RTONF*(log((Ko/Ki)));
Ena=RTONF*(log((Nao/Nai)));
Eks=RTONF*(log((Ko+pKNa*Nao)/(Ki+pKNa*Nai)));
Eca=0.5*RTONF*(log((Cao/Cai)));
Ak1=0.1/(1.+exp(0.06*(svolt-Ek-200)));
Bk1=(3.*exp(0.0002*(svolt-Ek+100))+
exp(0.1*(svolt-Ek-10)))/(1.+exp(-0.5*(svolt-Ek)));
rec_iK1=Ak1/(Ak1+Bk1);
rec_iNaK=(1./(1.+0.1245*exp(-0.1*svolt*F/(R*T))+0.0353*exp(-svolt*F/(R*T))));
rec_ipK=1./(1.+exp((25-svolt)/5.98));
//Compute currents
INa=GNa*sm*sm*sm*sh*sj*(svolt-Ena);
ICaL=GCaL*sd*sf*sfca*4*svolt*(F*F/(R*T))*
(exp(2*svolt*F/(R*T))*Cai-0.341*Cao)/(exp(2*svolt*F/(R*T))-1.);
Ito=Gto*sr*ss*(svolt-Ek);
IKr=Gkr*sqrt(Ko/5.4)*sxr1*sxr2*(svolt-Ek);
IKs=Gks*sxs*sxs*(svolt-Eks);
IK1=GK1*rec_iK1*(svolt-Ek);
INaCa=knaca*(1./(KmNai*KmNai*KmNai+Nao*Nao*Nao))*(1./(KmCa+Cao))*
(1./(1+ksat*exp((n-1)*svolt*F/(R*T))))*
(exp(n*svolt*F/(R*T))*Nai*Nai*Nai*Cao-
exp((n-1)*svolt*F/(R*T))*Nao*Nao*Nao*Cai*2.5);
INaK=knak*(Ko/(Ko+KmK))*(Nai/(Nai+KmNa))*rec_iNaK;
IpCa=GpCa*Cai/(KpCa+Cai);
IpK=GpK*rec_ipK*(svolt-Ek);
IbNa=GbNa*(svolt-Ena);
IbCa=GbCa*(svolt-Eca);
//Determine total current
(sItot) = IKr +
IKs +
IK1 +
Ito +
INa +
IbNa +
ICaL +
IbCa +
INaK +
INaCa +
IpCa +
IpK +
stim_current;
//update concentrations
Caisquare=Cai*Cai;
CaSRsquare=CaSR*CaSR;
CaCurrent=-(ICaL+IbCa+IpCa-2.0f*INaCa)*inverseVcF2*CAPACITANCE;
///A=0.016464f*CaSRsquare/(0.0625f+CaSRsquare)+0.008232f;
A=arel*CaSRsquare/(0.0625f+CaSRsquare)+crel;
Irel=A*sd*sg;
///Ileak=0.00008f*(CaSR-Cai);
Ileak=Vleak*(CaSR-Cai);
SERCA=Vmaxup/(1.f+(Kupsquare/Caisquare));
CaSRCurrent=SERCA-Irel-Ileak;
CaCSQN=Bufsr*CaSR/(CaSR+Kbufsr);
dCaSR=dt*(Vc/Vsr)*CaSRCurrent;
bjsr=Bufsr-CaCSQN-dCaSR-CaSR+Kbufsr;
cjsr=Kbufsr*(CaCSQN+dCaSR+CaSR);
CaSR=(sqrt(bjsr*bjsr+4.*cjsr)-bjsr)/2.;
CaBuf=Bufc*Cai/(Cai+Kbufc);
dCai=dt*(CaCurrent-CaSRCurrent);
bc=Bufc-CaBuf-dCai-Cai+Kbufc;
cc=Kbufc*(CaBuf+dCai+Cai);
Cai=(sqrt(bc*bc+4*cc)-bc)/2;
dNai=-(INa+IbNa+3*INaK+3*INaCa)*inverseVcF*CAPACITANCE;
Nai+=dt*dNai;
dKi=-(stim_current+IK1+Ito+IKr+IKs-2*INaK+IpK)*inverseVcF*CAPACITANCE;
Ki+=dt*dKi;
//compute steady state values and time constants
AM=1./(1.+exp((-60.-svolt)/5.));
BM=0.1/(1.+exp((svolt+35.)/5.))+0.10/(1.+exp((svolt-50.)/200.));
TAU_M=AM*BM;
M_INF=1./((1.+exp((-56.86-svolt)/9.03))*(1.+exp((-56.86-svolt)/9.03)));
if (svolt>=-40.)
{
AH_1=0.;
BH_1=(0.77/(0.13*(1.+exp(-(svolt+10.66)/11.1))));
TAU_H= 1.0/(AH_1+BH_1);
}
else
{
AH_2=(0.057*exp(-(svolt+80.)/6.8));
BH_2=(2.7*exp(0.079*svolt)+(3.1e5)*exp(0.3485*svolt));
TAU_H=1.0/(AH_2+BH_2);
}
H_INF=1./((1.+exp((svolt+71.55)/7.43))*(1.+exp((svolt+71.55)/7.43)));
if(svolt>=-40.)
{
AJ_1=0.;
BJ_1=(0.6*exp((0.057)*svolt)/(1.+exp(-0.1*(svolt+32.))));
TAU_J= 1.0/(AJ_1+BJ_1);
}
else
{
AJ_2=(((-2.5428e4)*exp(0.2444*svolt)-(6.948e-6)*
exp(-0.04391*svolt))*(svolt+37.78)/
(1.+exp(0.311*(svolt+79.23))));
BJ_2=(0.02424*exp(-0.01052*svolt)/(1.+exp(-0.1378*(svolt+40.14))));
TAU_J= 1.0/(AJ_2+BJ_2);
}
J_INF=H_INF;
Xr1_INF=1./(1.+exp((-26.-svolt)/7.));
axr1=450./(1.+exp((-45.-svolt)/10.));
bxr1=6./(1.+exp((svolt-(-30.))/11.5));
TAU_Xr1=axr1*bxr1;
Xr2_INF=1./(1.+exp((svolt-(-88.))/24.));
axr2=3./(1.+exp((-60.-svolt)/20.));
bxr2=1.12/(1.+exp((svolt-60.)/20.));
TAU_Xr2=axr2*bxr2;
Xs_INF=1./(1.+exp((-5.-svolt)/14.));
Axs=1100./(sqrt(1.+exp((-10.-svolt)/6)));
Bxs=1./(1.+exp((svolt-60.)/20.));
TAU_Xs=Axs*Bxs;
#ifdef EPI
R_INF=1./(1.+exp((20-svolt)/6.));
S_INF=1./(1.+exp((svolt+20)/5.));
TAU_R=9.5*exp(-(svolt+40.)*(svolt+40.)/1800.)+0.8;
TAU_S=85.*exp(-(svolt+45.)*(svolt+45.)/320.)+5./(1.+exp((svolt-20.)/5.))+3.;
#endif
#ifdef ENDO
R_INF=1./(1.+exp((20-svolt)/6.));
S_INF=1./(1.+exp((svolt+28)/5.));
TAU_R=9.5*exp(-(svolt+40.)*(svolt+40.)/1800.)+0.8;
TAU_S=1000.*exp(-(svolt+67)*(svolt+67)/1000.)+8.;
#endif
#ifdef MCELL
R_INF=1./(1.+exp((20-svolt)/6.));
S_INF=1./(1.+exp((svolt+20)/5.));
TAU_R=9.5*exp(-(svolt+40.)*(svolt+40.)/1800.)+0.8;
TAU_S=85.*exp(-(svolt+45.)*(svolt+45.)/320.)+5./(1.+exp((svolt-20.)/5.))+3.;
#endif
D_INF=1./(1.+exp((-5-svolt)/7.5));
Ad=1.4/(1.+exp((-35-svolt)/13))+0.25;
Bd=1.4/(1.+exp((svolt+5)/5));
Cd=1./(1.+exp((50-svolt)/20));
TAU_D=Ad*Bd+Cd;
F_INF=1./(1.+exp((svolt+20)/7));
TAU_F=1125*exp(-(svolt+27)*(svolt+27)/240)+80+165/(1.+exp((25-svolt)/10));
FCa_INF=(1./(1.+pow((Cai/0.000325),8))+
0.1/(1.+exp((Cai-0.0005)/0.0001))+
0.20/(1.+exp((Cai-0.00075)/0.0008))+
0.23 )/1.46;
if(Cai<0.00035)
G_INF=1./(1.+pow((Cai/0.00035),6));
else
G_INF=1./(1.+pow((Cai/0.00035),16));
//Update gates
rDY_[1] = M_INF-(M_INF-sm)*exp(-dt/TAU_M);
rDY_[2] = H_INF-(H_INF-sh)*exp(-dt/TAU_H);
rDY_[3] = J_INF-(J_INF-sj)*exp(-dt/TAU_J);
rDY_[4] = Xr1_INF-(Xr1_INF-sxr1)*exp(-dt/TAU_Xr1);
rDY_[5] = Xr2_INF-(Xr2_INF-sxr2)*exp(-dt/TAU_Xr2);
rDY_[6] = Xs_INF-(Xs_INF-sxs)*exp(-dt/TAU_Xs);
rDY_[7] = S_INF-(S_INF-ss)*exp(-dt/TAU_S);
rDY_[8] = R_INF-(R_INF-sr)*exp(-dt/TAU_R);
rDY_[9] = D_INF-(D_INF-sd)*exp(-dt/TAU_D);
rDY_[10] = F_INF-(F_INF-sf)*exp(-dt/TAU_F);
fcaold= sfca;
sfca = FCa_INF-(FCa_INF-sfca)*exptaufca;
if(sfca>fcaold && (svolt)>-37.0)
sfca = fcaold;
gold = sg;
sg = G_INF-(G_INF-sg)*exptaug;
if(sg>gold && (svolt)>-37.0)
sg=gold;
//update voltage
rDY_[0] = svolt + dt*(-sItot);
rDY_[11] = sfca;
rDY_[12] = sg;
rDY_[13] = Cai;
rDY_[14] = CaSR;
rDY_[15] = Nai;
rDY_[16] = Ki;
}
|
GB_binop__band_uint64.c | //------------------------------------------------------------------------------
// GB_binop: hard-coded functions for each built-in binary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_ek_slice.h"
#include "GB_dense.h"
#include "GB_atomics.h"
#include "GB_bitmap_assign_methods.h"
#include "GB_binop__include.h"
// C=binop(A,B) is defined by the following types and operators:
// A+B function (eWiseAdd): GB_AaddB__band_uint64
// A.*B function (eWiseMult): GB_AemultB__band_uint64
// A*D function (colscale): GB_AxD__band_uint64
// D*A function (rowscale): GB_DxB__band_uint64
// C+=B function (dense accum): GB_Cdense_accumB__band_uint64
// C+=b function (dense accum): GB_Cdense_accumb__band_uint64
// C+=A+B function (dense ewise3): (none)
// C=A+B function (dense ewise3): GB_Cdense_ewise3_noaccum__band_uint64
// C=scalar+B GB_bind1st__band_uint64
// C=scalar+B' GB_bind1st_tran__band_uint64
// C=A+scalar GB_bind2nd__band_uint64
// C=A'+scalar GB_bind2nd_tran__band_uint64
// C type: uint64_t
// A type: uint64_t
// B,b type: uint64_t
// BinaryOp: cij = (aij) & (bij)
#define GB_ATYPE \
uint64_t
#define GB_BTYPE \
uint64_t
#define GB_CTYPE \
uint64_t
// true if the types of A and B are identical
#define GB_ATYPE_IS_BTYPE \
1
// true if the types of C and A are identical
#define GB_CTYPE_IS_ATYPE \
1
// true if the types of C and B are identical
#define GB_CTYPE_IS_BTYPE \
1
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
uint64_t aij = Ax [pA]
// bij = Bx [pB]
#define GB_GETB(bij,Bx,pB) \
uint64_t bij = Bx [pB]
// declare scalar of the same type as C
#define GB_CTYPE_SCALAR(t) \
uint64_t t
// cij = Ax [pA]
#define GB_COPY_A_TO_C(cij,Ax,pA) \
cij = Ax [pA]
// cij = Bx [pB]
#define GB_COPY_B_TO_C(cij,Bx,pB) \
cij = Bx [pB]
#define GB_CX(p) Cx [p]
// binary operator
#define GB_BINOP(z, x, y, i, j) \
z = (x) & (y) ;
// op is second
#define GB_OP_IS_SECOND \
0
// op is plus_fp32 or plus_fp64
#define GB_OP_IS_PLUS_REAL \
0
// op is minus_fp32 or minus_fp64
#define GB_OP_IS_MINUS_REAL \
0
// GB_cblas_*axpy gateway routine, if it exists for this operator and type:
#define GB_CBLAS_AXPY \
(none)
// do the numerical phases of GB_add and GB_emult
#define GB_PHASE_2_OF_2
// hard-coded loops can be vectorized
#define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_BAND || GxB_NO_UINT64 || GxB_NO_BAND_UINT64)
//------------------------------------------------------------------------------
// C += A+B, all 3 matrices dense
//------------------------------------------------------------------------------
#if 0
// The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV.
void (none)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_accum_template.c"
}
#endif
//------------------------------------------------------------------------------
// C = A+B, all 3 matrices dense
//------------------------------------------------------------------------------
GrB_Info GB_Cdense_ewise3_noaccum__band_uint64
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_dense_ewise3_noaccum_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += B, accumulate a sparse matrix into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB_Cdense_accumB__band_uint64
(
GrB_Matrix C,
const GrB_Matrix B,
const int64_t *GB_RESTRICT kfirst_slice,
const int64_t *GB_RESTRICT klast_slice,
const int64_t *GB_RESTRICT pstart_slice,
const int ntasks,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
#include "GB_dense_subassign_23_template.c"
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += b, accumulate a scalar into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB_Cdense_accumb__band_uint64
(
GrB_Matrix C,
const GB_void *p_bwork,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
// get the scalar b for C += b, of type uint64_t
uint64_t bwork = (*((uint64_t *) p_bwork)) ;
#include "GB_dense_subassign_22_template.c"
return (GrB_SUCCESS) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = A*D, column scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB_AxD__band_uint64
(
GrB_Matrix C,
const GrB_Matrix A, bool A_is_pattern,
const GrB_Matrix D, bool D_is_pattern,
const int64_t *GB_RESTRICT kfirst_slice,
const int64_t *GB_RESTRICT klast_slice,
const int64_t *GB_RESTRICT pstart_slice,
const int ntasks,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint64_t *GB_RESTRICT Cx = (uint64_t *) C->x ;
#include "GB_AxB_colscale_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = D*B, row scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB_DxB__band_uint64
(
GrB_Matrix C,
const GrB_Matrix D, bool D_is_pattern,
const GrB_Matrix B, bool B_is_pattern,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint64_t *GB_RESTRICT Cx = (uint64_t *) C->x ;
#include "GB_AxB_rowscale_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseAdd: C = A+B or C<M> = A+B
//------------------------------------------------------------------------------
#undef GB_FREE_ALL
#define GB_FREE_ALL \
{ \
GB_ek_slice_free (&pstart_Mslice, &kfirst_Mslice, &klast_Mslice) ; \
GB_ek_slice_free (&pstart_Aslice, &kfirst_Aslice, &klast_Aslice) ; \
GB_ek_slice_free (&pstart_Bslice, &kfirst_Bslice, &klast_Bslice) ; \
}
GrB_Info GB_AaddB__band_uint64
(
GrB_Matrix C,
const int C_sparsity,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool Ch_is_Mh,
const int64_t *GB_RESTRICT C_to_M,
const int64_t *GB_RESTRICT C_to_A,
const int64_t *GB_RESTRICT C_to_B,
const GB_task_struct *GB_RESTRICT TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t *pstart_Mslice = NULL, *kfirst_Mslice = NULL, *klast_Mslice = NULL ;
int64_t *pstart_Aslice = NULL, *kfirst_Aslice = NULL, *klast_Aslice = NULL ;
int64_t *pstart_Bslice = NULL, *kfirst_Bslice = NULL, *klast_Bslice = NULL ;
#include "GB_add_template.c"
GB_FREE_ALL ;
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C = A.*B or C<M> = A.*B
//------------------------------------------------------------------------------
GrB_Info GB_AemultB__band_uint64
(
GrB_Matrix C,
const int C_sparsity,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *GB_RESTRICT C_to_M,
const int64_t *GB_RESTRICT C_to_A,
const int64_t *GB_RESTRICT C_to_B,
const GB_task_struct *GB_RESTRICT TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t *pstart_Mslice = NULL, *kfirst_Mslice = NULL, *klast_Mslice = NULL ;
int64_t *pstart_Aslice = NULL, *kfirst_Aslice = NULL, *klast_Aslice = NULL ;
int64_t *pstart_Bslice = NULL, *kfirst_Bslice = NULL, *klast_Bslice = NULL ;
#include "GB_emult_template.c"
GB_FREE_ALL ;
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st
//------------------------------------------------------------------------------
GrB_Info GB_bind1st__band_uint64
(
GB_void *Cx_output, // Cx and Bx may be aliased
const GB_void *x_input,
const GB_void *Bx_input,
const int8_t *GB_RESTRICT Bb,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint64_t *Cx = (uint64_t *) Cx_output ;
uint64_t x = (*((uint64_t *) x_input)) ;
uint64_t *Bx = (uint64_t *) Bx_input ;
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!GBB (Bb, p)) continue ;
uint64_t bij = Bx [p] ;
Cx [p] = (x) & (bij) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd
//------------------------------------------------------------------------------
GrB_Info GB_bind2nd__band_uint64
(
GB_void *Cx_output, // Cx and Ax may be aliased
const GB_void *Ax_input,
const GB_void *y_input,
const int8_t *GB_RESTRICT Ab,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
uint64_t *Cx = (uint64_t *) Cx_output ;
uint64_t *Ax = (uint64_t *) Ax_input ;
uint64_t y = (*((uint64_t *) y_input)) ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!GBB (Ab, p)) continue ;
uint64_t aij = Ax [p] ;
Cx [p] = (aij) & (y) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (x, A'): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (x, aij), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
uint64_t aij = Ax [pA] ; \
Cx [pC] = (x) & (aij) ; \
}
GrB_Info GB_bind1st_tran__band_uint64
(
GrB_Matrix C,
const GB_void *x_input,
const GrB_Matrix A,
int64_t *GB_RESTRICT *Workspaces,
const int64_t *GB_RESTRICT A_slice,
int nworkspaces,
int nthreads
)
{
// GB_unop_transpose.c uses GB_ATYPE, but A is
// the 2nd input to binary operator z=f(x,y).
#undef GB_ATYPE
#define GB_ATYPE \
uint64_t
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint64_t x = (*((const uint64_t *) x_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
#undef GB_ATYPE
#define GB_ATYPE \
uint64_t
}
//------------------------------------------------------------------------------
// C = op (A', y): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (aij, y), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
uint64_t aij = Ax [pA] ; \
Cx [pC] = (aij) & (y) ; \
}
GrB_Info GB_bind2nd_tran__band_uint64
(
GrB_Matrix C,
const GrB_Matrix A,
const GB_void *y_input,
int64_t *GB_RESTRICT *Workspaces,
const int64_t *GB_RESTRICT A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint64_t y = (*((const uint64_t *) y_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
GB_unop__identity_fc64_int64.c | //------------------------------------------------------------------------------
// GB_unop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated2/ folder, do not edit it
// (it is auto-generated from Generator/*).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_atomics.h"
#include "GB_unop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB (_unop_apply__identity_fc64_int64)
// op(A') function: GB (_unop_tran__identity_fc64_int64)
// C type: GxB_FC64_t
// A type: int64_t
// cast: GxB_FC64_t cij = GxB_CMPLX ((double) (aij), 0)
// unaryop: cij = aij
#define GB_ATYPE \
int64_t
#define GB_CTYPE \
GxB_FC64_t
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
int64_t aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = x ;
// casting
#define GB_CAST(z, aij) \
GxB_FC64_t z = GxB_CMPLX ((double) (aij), 0) ;
// cij = op (aij)
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
int64_t aij = Ax [pA] ; \
/* Cx [pC] = op (cast (aij)) */ \
GxB_FC64_t z = GxB_CMPLX ((double) (aij), 0) ; \
Cx [pC] = z ; \
}
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_IDENTITY || GxB_NO_FC64 || GxB_NO_INT64)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB (_unop_apply__identity_fc64_int64)
(
GxB_FC64_t *Cx, // Cx and Ax may be aliased
const int64_t *Ax,
const int8_t *restrict Ab, // A->b if A is bitmap
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
if (Ab == NULL)
{
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
int64_t aij = Ax [p] ;
GxB_FC64_t z = GxB_CMPLX ((double) (aij), 0) ;
Cx [p] = z ;
}
}
else
{
// bitmap case, no transpose; A->b already memcpy'd into C->b
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!Ab [p]) continue ;
int64_t aij = Ax [p] ;
GxB_FC64_t z = GxB_CMPLX ((double) (aij), 0) ;
Cx [p] = z ;
}
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB (_unop_tran__identity_fc64_int64)
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
GB_binop__pair_uint32.c | //------------------------------------------------------------------------------
// GB_binop: hard-coded functions for each built-in binary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated2/ folder, do not edit it
// (it is auto-generated from Generator/*).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_emult.h"
#include "GB_control.h"
#include "GB_ek_slice.h"
#include "GB_dense.h"
#include "GB_atomics.h"
#include "GB_bitmap_assign_methods.h"
#include "GB_binop__include.h"
// C=binop(A,B) is defined by the following types and operators:
// A+B function (eWiseAdd): GB (_AaddB__pair_uint32)
// A.*B function (eWiseMult): GB ((none))
// A.*B function (eWiseMult): GB ((none))
// A.*B function (eWiseMult): GB ((none))
// A.*B function (eWiseMult): GB ((none))
// A*D function (colscale): GB ((none))
// D*A function (rowscale): GB ((none))
// C+=B function (dense accum): GB (_Cdense_accumB__pair_uint32)
// C+=b function (dense accum): GB (_Cdense_accumb__pair_uint32)
// C+=A+B function (dense ewise3): GB ((none))
// C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__pair_uint32)
// C=scalar+B GB ((none))
// C=scalar+B' GB ((none))
// C=A+scalar GB ((none))
// C=A'+scalar GB ((none))
// C type: uint32_t
// A type: uint32_t
// B,b type: uint32_t
// BinaryOp: cij = 1
#define GB_ATYPE \
uint32_t
#define GB_BTYPE \
uint32_t
#define GB_CTYPE \
uint32_t
// true if the types of A and B are identical
#define GB_ATYPE_IS_BTYPE \
1
// true if the types of C and A are identical
#define GB_CTYPE_IS_ATYPE \
1
// true if the types of C and B are identical
#define GB_CTYPE_IS_BTYPE \
1
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA,A_iso) \
;
// bij = Bx [pB]
#define GB_GETB(bij,Bx,pB,B_iso) \
;
// declare scalar of the same type as C
#define GB_CTYPE_SCALAR(t) \
uint32_t t
// cij = Ax [pA]
#define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \
cij = GBX (Ax, pA, A_iso)
// cij = Bx [pB]
#define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \
cij = GBX (Bx, pB, B_iso)
#define GB_CX(p) Cx [p]
// binary operator
#define GB_BINOP(z,x,y,i,j) \
z = 1 ;
// true if the binop must be flipped
#define GB_BINOP_FLIP \
0
// op is second
#define GB_OP_IS_SECOND \
0
// do the numerical phases of GB_add and GB_emult
#define GB_PHASE_2_OF_2
// hard-coded loops can be vectorized
#define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_PAIR || GxB_NO_UINT32 || GxB_NO_PAIR_UINT32)
//------------------------------------------------------------------------------
// C += A+B, all 3 matrices dense
//------------------------------------------------------------------------------
#if 0
// The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV.
void GB ((none))
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_accum_template.c"
}
#endif
//------------------------------------------------------------------------------
// C = A+B, all 3 matrices dense
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_ewise3_noaccum__pair_uint32)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_dense_ewise3_noaccum_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += B, accumulate a sparse matrix into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumB__pair_uint32)
(
GrB_Matrix C,
const GrB_Matrix B,
const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
#include "GB_dense_subassign_23_template.c"
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += b, accumulate a scalar into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumb__pair_uint32)
(
GrB_Matrix C,
const GB_void *p_bwork,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
// get the scalar b for C += b, of type uint32_t
uint32_t bwork = (*((uint32_t *) p_bwork)) ;
#include "GB_dense_subassign_22_template.c"
return (GrB_SUCCESS) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = A*D, column scale with diagonal D matrix
//------------------------------------------------------------------------------
#if 0
GrB_Info GB ((none))
(
GrB_Matrix C,
const GrB_Matrix A, bool A_is_pattern,
const GrB_Matrix D, bool D_is_pattern,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint32_t *restrict Cx = (uint32_t *) C->x ;
#include "GB_AxB_colscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
//------------------------------------------------------------------------------
// C = D*B, row scale with diagonal D matrix
//------------------------------------------------------------------------------
#if 0
GrB_Info GB ((none))
(
GrB_Matrix C,
const GrB_Matrix D, bool D_is_pattern,
const GrB_Matrix B, bool B_is_pattern,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint32_t *restrict Cx = (uint32_t *) C->x ;
#include "GB_AxB_rowscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
//------------------------------------------------------------------------------
// eWiseAdd: C = A+B or C<M> = A+B
//------------------------------------------------------------------------------
GrB_Info GB (_AaddB__pair_uint32)
(
GrB_Matrix C,
const int C_sparsity,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool Ch_is_Mh,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
GB_WERK_DECLARE (M_ek_slicing, int64_t) ;
GB_WERK_DECLARE (A_ek_slicing, int64_t) ;
GB_WERK_DECLARE (B_ek_slicing, int64_t) ;
#include "GB_add_template.c"
GB_FREE_WORK ;
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C = A.*B or C<M> = A.*B
//------------------------------------------------------------------------------
#if 0
GrB_Info GB ((none))
(
GrB_Matrix C,
const int C_sparsity,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_01_meta.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
//------------------------------------------------------------------------------
// eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full
//------------------------------------------------------------------------------
#if 0
GrB_Info GB ((none))
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool flipxy,
const int64_t *restrict Cp_kfirst,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#if GB_BINOP_FLIP
// The operator is not commutative, and does not have a flipped
// variant. For example z=atan2(y,x).
if (flipxy)
{
// use fmult(y,x)
#undef GB_FLIPPED
#define GB_FLIPPED 1
#include "GB_emult_02_template.c"
}
else
{
// use fmult(x,y)
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
}
#else
// No need to handle the flip: the operator is either commutative, or
// has been handled by changing z=div(y,x) to z=rdiv(x,y) for example.
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
#endif
return (GrB_SUCCESS) ;
#endif
}
#endif
//------------------------------------------------------------------------------
// eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full
//------------------------------------------------------------------------------
#if 0
GrB_Info GB ((none))
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict Cp_kfirst,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_03_template.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap
//------------------------------------------------------------------------------
#if 0
GrB_Info GB ((none))
(
GrB_Matrix C,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_bitmap_emult_template.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
//------------------------------------------------------------------------------
// Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st
//------------------------------------------------------------------------------
#if 0
GrB_Info GB ((none))
(
GB_void *Cx_output, // Cx and Bx may be aliased
const GB_void *x_input,
const GB_void *Bx_input,
const int8_t *restrict Bb,
int64_t bnz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint32_t *Cx = (uint32_t *) Cx_output ;
uint32_t x = (*((uint32_t *) x_input)) ;
uint32_t *Bx = (uint32_t *) Bx_input ;
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < bnz ; p++)
{
if (!GBB (Bb, p)) continue ;
; ;
Cx [p] = 1 ;
}
return (GrB_SUCCESS) ;
#endif
}
#endif
//------------------------------------------------------------------------------
// Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd
//------------------------------------------------------------------------------
#if 0
GrB_Info GB ((none))
(
GB_void *Cx_output, // Cx and Ax may be aliased
const GB_void *Ax_input,
const GB_void *y_input,
const int8_t *restrict Ab,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
uint32_t *Cx = (uint32_t *) Cx_output ;
uint32_t *Ax = (uint32_t *) Ax_input ;
uint32_t y = (*((uint32_t *) y_input)) ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!GBB (Ab, p)) continue ;
; ;
Cx [p] = 1 ;
}
return (GrB_SUCCESS) ;
#endif
}
#endif
//------------------------------------------------------------------------------
// C = op (x, A'): transpose and apply a binary operator
//------------------------------------------------------------------------------
#if 0
// cij = op (x, aij), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
; ; \
Cx [pC] = 1 ; \
}
GrB_Info GB ((none))
(
GrB_Matrix C,
const GB_void *x_input,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
// GB_unop_transpose.c uses GB_ATYPE, but A is
// the 2nd input to binary operator z=f(x,y).
#undef GB_ATYPE
#define GB_ATYPE \
uint32_t
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint32_t x = (*((const uint32_t *) x_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
#undef GB_ATYPE
#define GB_ATYPE \
uint32_t
}
#endif
//------------------------------------------------------------------------------
// C = op (A', y): transpose and apply a binary operator
//------------------------------------------------------------------------------
#if 0
// cij = op (aij, y), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
; ; \
Cx [pC] = 1 ; \
}
GrB_Info GB ((none))
(
GrB_Matrix C,
const GrB_Matrix A,
const GB_void *y_input,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint32_t y = (*((const uint32_t *) y_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
#endif
|
image_pyramid.h | /*
*
* This file is part of the open-source SeetaFace engine, which includes three modules:
* SeetaFace Detection, SeetaFace Alignment, and SeetaFace Identification.
*
* This file is part of the SeetaFace Detection module, containing codes implementing the
* face detection method described in the following paper:
*
*
* Funnel-structured cascade for multi-view face detection with alignment awareness,
* Shuzhe Wu, Meina Kan, Zhenliang He, Shiguang Shan, Xilin Chen.
* In Neurocomputing (under review)
*
*
* Copyright (C) 2016, Visual Information Processing and Learning (VIPL) group,
* Institute of Computing Technology, Chinese Academy of Sciences, Beijing, China.
*
* The codes are mainly developed by Shuzhe Wu (a Ph.D supervised by Prof. Shiguang Shan)
*
* As an open-source face recognition engine: you can redistribute SeetaFace source codes
* and/or modify it under the terms of the BSD 2-Clause License.
*
* You should have received a copy of the BSD 2-Clause License along with the software.
* If not, see < https://opensource.org/licenses/BSD-2-Clause>.
*
* Contact Info: you can send an email to SeetaFace@vipl.ict.ac.cn for any problems.
*
* Note: the above information must be kept whenever or wherever the codes are used.
*
*/
#ifndef SEETA_FD_UTIL_IMAGE_PYRAMID_H_
#define SEETA_FD_UTIL_IMAGE_PYRAMID_H_
#include <cstdint>
#include <string>
#include <cstring>
#include "common.h"
namespace seeta {
namespace fd {
static void ResizeImage(const seeta::ImageData & src, seeta::ImageData* dest) {
int32_t src_width = src.width;
int32_t src_height = src.height;
int32_t dest_width = dest->width;
int32_t dest_height = dest->height;
if (src_width == dest_width && src_height == dest_height) {
std::memcpy(dest->data, src.data, src_width * src_height * sizeof(uint8_t));
return;
}
double lf_x_scl = static_cast<double>(src_width) / dest_width;
double lf_y_Scl = static_cast<double>(src_height) / dest_height;
const uint8_t* src_data = src.data;
uint8_t* dest_data = dest->data;
#pragma omp parallel num_threads(SEETA_NUM_THREADS)
{
#pragma omp for nowait
for (int32_t y = 0; y < dest_height; y++) {
for (int32_t x = 0; x < dest_width; x++) {
double lf_x_s = lf_x_scl * x;
double lf_y_s = lf_y_Scl * y;
int32_t n_x_s = static_cast<int>(lf_x_s);
n_x_s = (n_x_s <= (src_width - 2) ? n_x_s : (src_width - 2));
int32_t n_y_s = static_cast<int>(lf_y_s);
n_y_s = (n_y_s <= (src_height - 2) ? n_y_s : (src_height - 2));
double lf_weight_x = lf_x_s - n_x_s;
double lf_weight_y = lf_y_s - n_y_s;
double dest_val = (1 - lf_weight_y) * ((1 - lf_weight_x) *
src_data[n_y_s * src_width + n_x_s] +
lf_weight_x * src_data[n_y_s * src_width + n_x_s + 1]) +
lf_weight_y * ((1 - lf_weight_x) * src_data[(n_y_s + 1) * src_width + n_x_s] +
lf_weight_x * src_data[(n_y_s + 1) * src_width + n_x_s + 1]);
dest_data[y * dest_width + x] = static_cast<uint8_t>(dest_val);
}
}
}
}
class ImagePyramid {
public:
ImagePyramid()
: max_scale_(1.0f), min_scale_(1.0f),
scale_factor_(1.0f), scale_step_(0.8f),
width1x_(0), height1x_(0),
width_scaled_(0), height_scaled_(0),
buf_img_width_(2), buf_img_height_(2),
buf_scaled_width_(2), buf_scaled_height_(2) {
buf_img_ = new uint8_t[buf_img_width_ * buf_img_height_];
buf_img_scaled_ = new uint8_t[buf_scaled_width_ * buf_scaled_height_];
}
~ImagePyramid() {
delete[] buf_img_;
buf_img_ = nullptr;
buf_img_width_ = 0;
buf_img_height_ = 0;
delete[] buf_img_scaled_;
buf_img_scaled_ = nullptr;
buf_scaled_width_ = 0;
buf_scaled_height_ = 0;
img_scaled_.data = nullptr;
img_scaled_.width = 0;
img_scaled_.height = 0;
}
inline void SetScaleStep(float step) {
if (step > 0.0f && step <= 1.0f)
scale_step_ = step;
}
inline void SetMinScale(float min_scale) {
min_scale_ = min_scale;
}
inline void SetMaxScale(float max_scale) {
max_scale_ = max_scale;
scale_factor_ = max_scale;
UpdateBufScaled();
}
void SetImage1x(const uint8_t* img_data, int32_t width, int32_t height);
inline float min_scale() const { return min_scale_; }
inline float max_scale() const { return max_scale_; }
inline seeta::ImageData image1x() {
seeta::ImageData img(width1x_, height1x_, 1);
img.data = buf_img_;
return img;
}
const seeta::ImageData* GetNextScaleImage(float* scale_factor = nullptr);
private:
void UpdateBufScaled();
float max_scale_;
float min_scale_;
float scale_factor_;
float scale_step_;
int32_t width1x_;
int32_t height1x_;
int32_t width_scaled_;
int32_t height_scaled_;
uint8_t* buf_img_;
int32_t buf_img_width_;
int32_t buf_img_height_;
uint8_t* buf_img_scaled_;
int32_t buf_scaled_width_;
int32_t buf_scaled_height_;
seeta::ImageData img_scaled_;
};
} // namespace fd
} // namespace seeta
#endif // SEETA_FD_UTIL_IMAGE_PYRAMID_H_
|
loop_array_test.c | // RUN: ${CATO_ROOT}/src/scripts/cexecute_pass.py %s -o %t
// RUN: diff <(mpirun -np 4 %t) %s.reference_output
#include <stdio.h>
#include <stdlib.h>
#include <omp.h>
int main()
{
int* array = (int*)malloc(sizeof(int)*10);
#pragma omp parallel for
{
for(int i = 0; i < 10; i++)
{
array[i] = i;
}
}
printf("[%d, %d, %d, %d, %d, %d, %d, %d, %d, %d]\n", array[0], array[1],array[2], array[3],array[4], array[5],array[6], array[7],array[8], array[9]);
}
|
for_firstprivate.c | #include <stdio.h>
#include <math.h>
#include "omp_testsuite.h"
#include <omp.h>
int
check_for_firstprivate (FILE * logFile)
{
int sum = 0;
int sum0 = 12345; /*bug 162, Liao*/
int sum1 = 0;
int known_sum;
int threadsnum;
int i;
#pragma omp parallel firstprivate(sum1)
{
#pragma omp single
{
threadsnum=omp_get_num_threads();
}
/*sum0=0; */
#pragma omp for firstprivate(sum0)
for (i = 1; i <= LOOPCOUNT; i++)
{
sum0 = sum0 + i;
sum1 = sum0;
} /*end of for */
#pragma omp critical
{
sum = sum + sum1;
} /*end of critical */
} /* end of parallel */
/* bug 162 , Liao*/
known_sum = 12345* threadsnum+ (LOOPCOUNT * (LOOPCOUNT + 1)) / 2;
return (known_sum == sum);
/*return (known_sum == sum); */
} /* end of check_for_fistprivate */
int
crosscheck_for_firstprivate (FILE * logFile)
{
int sum = 0;
int sum0 = 12345;
int sum1 = 0;
int known_sum;
int threadsnum;
int i;
#pragma omp parallel
{
threadsnum=omp_get_num_threads();
}
#pragma omp parallel firstprivate(sum1)
{
/*sum0=0; */
#pragma omp for private(sum0)
for (i = 1; i <= LOOPCOUNT; i++)
{
sum0 = sum0 + i;
sum1 = sum0;
} /*end of for */
#pragma omp critical
{
sum = sum + sum1;
} /*end of critical */
} /* end of parallel */
known_sum = 12345* threadsnum+ (LOOPCOUNT * (LOOPCOUNT + 1)) / 2;
return (known_sum == sum);
} /* end of check_for_fistprivate */
|
CSRMatrix.h | /*
* CSRMatrix.h
*
* Created on: May 6, 2015
* Author: Michael Wegner (michael.wegner@student.kit.edu)
*/
#ifndef CSRMATRIX_H_
#define CSRMATRIX_H_
#include <vector>
#include "../Globals.h"
#include "AlgebraicGlobals.h"
#include "Vector.h"
#include "../graph/Graph.h"
#include "../algebraic/SparseAccumulator.h"
#include "../auxiliary/Timer.h"
namespace NetworKit {
/**
* @ingroup algebraic
* The CSRMatrix class represents a sparse matrix stored in CSR-Format (i.e. compressed sparse row).
* If speed is important, use this CSRMatrix instead of the Matrix class.
*/
class CSRMatrix {
private:
std::vector<index> rowIdx;
std::vector<index> columnIdx;
std::vector<double> nonZeros;
count nRows;
count nCols;
bool isSorted;
double zero;
/**
* Quicksort algorithm on columnIdx between [@a left, @a right].
* @param left
* @param right
*/
void quicksort(index left, index right);
/**
* Partitions columnIdx between [@a left, @a right] after selecting the pivot in the middle.
* @param left
* @param right
* @return The pivot.
*/
index partition(index left, index right);
/**
* Binary search the sorted columnIdx vector between [@a left, @a right] for column @a j.
* If @a j is not present, the index that is immediately left of the place where @a j would be
* is returned. If
* @param left
* @param right
* @param j
* @return The position of column @a j in columnIdx or the element immediately to the left of the place where @a j
* would be.
*/
index binarySearchColumns(index left, index right, index j) const;
public:
/** Default constructor */
CSRMatrix();
/**
* Constructs the CSRMatrix with size @a dimension x @a dimension.
* @param dimension Defines how many rows and columns this matrix has.
* @param zero The zero element (default = 0.0).
*/
CSRMatrix(const count dimension, const double zero = 0.0);
/**
* Constructs the CSRMatrix with size @a nRows x @a nCols.
* @param nRows Number of rows.
* @param nCols Number of columns.
* @param zero The zero element (default = 0.0).
*/
CSRMatrix(const count nRows, const count nCols, const double zero = 0.0);
/**
* Constructs the @a dimension x @a dimension Matrix from the elements at position @a positions with values @values.
* @param dimension Defines how many rows and columns this matrix has.
* @param triplets The nonzero elements.
* @param zero The zero element (default is 0.0).
* @param isSorted True, if the triplets are sorted per row. Default is false.
*/
CSRMatrix(const count dimension, const std::vector<Triplet>& triplets, const double zero = 0.0, bool isSorted = false);
/**
* Constructs the @a nRows x @a nCols Matrix from the elements at position @a positions with values @values.
* @param nRows Defines how many rows this matrix has.
* @param nCols Defines how many columns this matrix has.
* @param triplets The nonzero elements.
* @param zero The zero element (default is 0.0).
* @param isSorted True, if the triplets are sorted per row. Default is false.
*/
CSRMatrix(const count nRows, const count nCols, const std::vector<Triplet>& triplets, const double zero = 0.0, bool isSorted = false);
/**
* Constructs the @a nRows x @a nCols Matrix from the elements stored in @a columnIdx and @a values. @a columnIdx and @a values store the colums and values by row.
* @param nRows
* @param nCols
* @param columnIdx
* @param values
* @param zero The zero element (default is 0.0).
* @param isSorted True if the column indices in @a columnIdx are sorted in every row.
*/
CSRMatrix(const count nRows, const count nCols, const std::vector<std::vector<index>> &columnIdx, const std::vector<std::vector<double>> &values, const double zero = 0.0, bool isSorted = false);
/**
* Constructs the @a nRows x @a nCols Matrix from the elements at position @a positions with values @values.
* @param nRows Defines how many rows this matrix has.
* @param nCols Defines how many columns this matrix has.
* @param rowIdx The rowIdx vector of the CSR format.
* @param columnIdx The columnIdx vector of the CSR format.
* @param nonZeros The nonZero vector of the CSR format. Should be as long as the @a columnIdx vector.
* @param zero The zero element (default is 0.0).
* @param isSorted True, if the triplets are sorted per row. Default is false.
*/
CSRMatrix(const count nRows, const count nCols, const std::vector<index>& rowIdx, const std::vector<index>& columnIdx, const std::vector<double>& nonZeros, const double zero = 0.0, bool isSorted = false);
/** Default copy constructor */
CSRMatrix (const CSRMatrix &other) = default;
/** Default move constructor */
CSRMatrix (CSRMatrix &&other) = default;
/** Default destructor */
virtual ~CSRMatrix() = default;
/** Default move assignment operator */
CSRMatrix& operator=(CSRMatrix &&other) = default;
/** Default copy assignment operator */
CSRMatrix& operator=(const CSRMatrix &other) = default;
/**
* Compares this matrix to @a other and returns true if the shape and zero element are the same as well as
* all entries, otherwise returns false.
* @param other
*/
bool operator==(const CSRMatrix& other) const {
bool equal = nRows == other.nRows && nCols == other.nCols && zero == other.zero;
if (equal) {
forNonZeroElementsInRowOrder([&](index i, index j, double value) {
if (other(i,j) != value) {
equal = false;
return;
}
});
}
return equal;
}
/**
* Compares this matrix to @a other and returns false if the shape and zero element are the same as well as
* all entries, otherwise returns true.
* @param other
*/
bool operator!=(const CSRMatrix& other) const {
return !((*this) == other);
}
/**
* @return Number of rows.
*/
inline count numberOfRows() const {
return nRows;
}
/**
* @return Number of columns.
*/
inline count numberOfColumns() const {
return nCols;
}
/**
* Returns the zero element of the matrix.
*/
inline double getZero() const {
return zero;
}
/**
* @param i The row index.
* @return Number of non-zeros in row @a i.
*/
count nnzInRow(const index i) const;
/**
* @return Number of non-zeros in this matrix.
*/
count nnz() const;
/**
* @return Value at matrix position (i,j).
*/
double operator()(const index i, const index j) const;
/**
* Set the matrix at position (@a i, @a j) to @a value.
* @note This operation can be linear in the number of non-zeros due to vector element movements
*/
void setValue(const index i, const index j, const double value);
/**
* Sorts the column indices in each row for faster access.
*/
void sort();
/**
* @return True if the matrix is sorted, otherwise false.
*/
bool sorted() const;
/**
* @return Row @a i of this matrix as vector.
*/
Vector row(const index i) const;
/**
* @return Column @a j of this matrix as vector.
*/
Vector column(const index j) const;
/**
* @return The main diagonal of this matrix.
*/
Vector diagonal() const;
/**
* Adds this matrix to @a other and returns the result.
* @return The sum of this matrix and @a other.
*/
CSRMatrix operator+(const CSRMatrix &other) const;
/**
* Adds @a other to this matrix.
* @return Reference to this matrix.
*/
CSRMatrix& operator+=(const CSRMatrix &other);
/**
* Subtracts @a other from this matrix and returns the result.
* @return The difference of this matrix and @a other.
*
*/
CSRMatrix operator-(const CSRMatrix &other) const;
/**
* Subtracts @a other from this matrix.
* @return Reference to this matrix.
*/
CSRMatrix& operator-=(const CSRMatrix &other);
/**
* Multiplies this matrix with a scalar specified in @a scalar and returns the result.
* @return The result of multiplying this matrix with @a scalar.
*/
CSRMatrix operator*(const double &scalar) const;
/**
* Multiplies this matrix with a scalar specified in @a scalar.
* @return Reference to this matrix.
*/
CSRMatrix& operator*=(const double &scalar);
/**
* Multiplies this matrix with @a vector and returns the result.
* @return The result of multiplying this matrix with @a vector.
*/
Vector operator*(const Vector &vector) const;
/**
* Multiplies this matrix with @a other and returns the result in a new matrix.
* @return The result of multiplying this matrix with @a other.
*/
CSRMatrix operator*(const CSRMatrix &other) const;
/**
* Divides this matrix by a divisor specified in @a divisor and returns the result in a new matrix.
* @return The result of dividing this matrix by @a divisor.
*/
CSRMatrix operator/(const double &divisor) const;
/**
* Divides this matrix by a divisor specified in @a divisor.
* @return Reference to this matrix.
*/
CSRMatrix& operator/=(const double &divisor);
/**
* Computes @a A @a binaryOp @a B on the elements of matrix @a A and matrix @a B.
* @param A Sorted CSRMatrix.
* @param B Sorted CSRMatrix.
* @param binaryOp Function handling (double, double) -> double
* @return @a A @a binaryOp @a B.
* @note @a A and @a B must have the same dimensions and must be sorted.
*/
template<typename L> static CSRMatrix binaryOperator(const CSRMatrix &A, const CSRMatrix &B, L binaryOp);
/**
* Computes @a A^T * @a B.
* @param A
* @param B
* @return @a A^T * @a B.
* @note The number of rows of @a A must be equal to the number of rows of @a B.
*/
static CSRMatrix mTmMultiply(const CSRMatrix &A, const CSRMatrix &B);
/**
* Computes @a A * @a B^T.
* @param A
* @param B
* @return @a A * @a B^T.
* @note The number of columns of @a A must be equal to the number of columns of @a B.
*/
static CSRMatrix mmTMultiply(const CSRMatrix &A, const CSRMatrix &B);
/**
* Computes @a matrix^T * @a vector.
* @param matrix
* @param vector
* @return @a matrix^T * @a vector.
* @note The number of rows of @a matrix must be equal to the dimension of @a vector.
*/
static Vector mTvMultiply(const CSRMatrix &matrix, const Vector &vector);
/**
* Transposes this matrix and returns it.
*/
CSRMatrix transpose() const;
/**
* Extracts a matrix with rows and columns specified by @a rowIndices and @a columnIndices from this matrix.
* The order of rows and columns is equal to the order in @a rowIndices and @a columnIndices. It is also
* possible to specify a row or column more than once to get duplicates.
* @param rowIndices
* @param columnIndices
*/
CSRMatrix extract(const std::vector<index>& rowIndices, const std::vector<index>& columnIndices) const;
/**
* Assign the contents of the matrix @a source to this matrix at rows and columns specified by @a rowIndices and
* @a columnIndices. That is, entry (i,j) of @a source is assigned to entry (rowIndices[i], columnIndices[j]) of
* this matrix. Note that the dimensions of @rowIndices and @a columnIndices must coincide with the number of rows
* and columns of @a source.
* @param rowIndices
* @param columnIndices
* @param source
*/
void assign(const std::vector<index>& rowIndices, const std::vector<index>& columnIndices, const CSRMatrix& source);
/**
* Applies the unary function @a unaryElementFunction to each value in the matrix. Note that it must hold that the
* function applied to the zero element of this matrix returns the zero element.
* @param unaryElementFunction
*/
template<typename F>
void apply(const F unaryElementFunction);
/**
* Compute the (weighted) adjacency matrix of the (weighted) Graph @a graph.
* @param graph
*/
static CSRMatrix adjacencyMatrix(const Graph& graph, double zero = 0.0);
/**
* Creates a diagonal matrix with dimension equal to the dimension of the Vector @a diagonalElements. The values on
* the diagonal are the ones stored in @a diagonalElements (i.e. D(i,i) = diagonalElements[i]).
* @param diagonalElements
*/
static CSRMatrix diagonalMatrix(const Vector& diagonalElements, double zero = 0.0);
/**
* Returns the (weighted) incidence matrix of the (weighted) Graph @a graph.
* @param graph
*/
static CSRMatrix incidenceMatrix(const Graph& graph, double zero = 0.0);
/**
* Compute the (weighted) Laplacian of the (weighted) Graph @a graph.
* @param graph
*/
static CSRMatrix laplacianMatrix(const Graph& graph, double zero = 0.0);
/**
* Returns the (weighted) normalized Laplacian matrix of the (weighted) Graph @a graph
* @param graph
*/
static CSRMatrix normalizedLaplacianMatrix(const Graph& graph, double zero = 0.0);
/**
* Iterate over all non-zero elements of row @a row in the matrix and call handler(index column, double value)
*/
template<typename L> void forNonZeroElementsInRow(index row, L handle) const;
/**
* Iterate in parallel over all non-zero elements of row @a row in the matrix and call handler(index column, double value)
*/
template<typename L> void parallelForNonZeroElementsInRow(index row, L handle) const;
/**
* Iterate over all elements in row @a i in the matrix and call handle(index column, double value)
*/
template<typename L> void forElementsInRow(index i, L handle) const;
/**
* Iterate over all non-zero elements of the matrix in row order and call handler (lambda closure).
*/
template<typename L> void forNonZeroElementsInRowOrder(L handle) const;
/**
* Iterate in parallel over all rows and call handler (lambda closure) on non-zero elements of the matrix.
*/
template<typename L> void parallelForNonZeroElementsInRowOrder(L handle) const;
};
template<typename L> inline CSRMatrix NetworKit::CSRMatrix::binaryOperator(const CSRMatrix &A, const CSRMatrix &B, L binaryOp) {
assert(A.nRows == B.nRows && A.nCols == B.nCols);
if (A.sorted() && B.sorted()) {
std::vector<index> rowIdx(A.nRows+1);
std::vector<std::vector<index>> columns(A.nRows);
rowIdx[0] = 0;
#pragma omp parallel for
for (omp_index i = 0; i < static_cast<omp_index>(A.nRows); ++i) {
index k = A.rowIdx[i];
index l = B.rowIdx[i];
while (k < A.rowIdx[i+1] && l < B.rowIdx[i+1]) {
if (A.columnIdx[k] < B.columnIdx[l]) {
columns[i].push_back(A.columnIdx[k]);
++k;
} else if (A.columnIdx[k] > B.columnIdx[l]) {
columns[i].push_back(B.columnIdx[l]);
++l;
} else { // A.columnIdx[k] == B.columnIdx[l]
columns[i].push_back(A.columnIdx[k]);
++k;
++l;
}
++rowIdx[i+1];
}
while (k < A.rowIdx[i+1]) {
columns[i].push_back(A.columnIdx[k]);
++k;
++rowIdx[i+1];
}
while (l < B.rowIdx[i+1]) {
columns[i].push_back(B.columnIdx[l]);
++l;
++rowIdx[i+1];
}
}
for (index i = 0; i < A.nRows; ++i) {
rowIdx[i+1] += rowIdx[i];
}
count nnz = rowIdx[A.nRows];
std::vector<index> columnIdx(nnz);
std::vector<double> nonZeros(nnz, A.zero);
#pragma omp parallel for
for (omp_index i = 0; i < static_cast<omp_index>(A.nRows); ++i) {
for (index cIdx = rowIdx[i], j = 0; cIdx < rowIdx[i+1]; ++cIdx, ++j) {
columnIdx[cIdx] = columns[i][j];
}
columns[i].clear();
columns[i].resize(0);
columns[i].shrink_to_fit();
}
#pragma omp parallel for
for (omp_index i = 0; i < static_cast<omp_index>(A.nRows); ++i) {
index k = A.rowIdx[i];
index l = B.rowIdx[i];
for (index cIdx = rowIdx[i]; cIdx < rowIdx[i+1]; ++cIdx) {
if (k < A.rowIdx[i+1] && columnIdx[cIdx] == A.columnIdx[k]) {
nonZeros[cIdx] = A.nonZeros[k];
++k;
}
if (l < B.rowIdx[i+1] && columnIdx[cIdx] == B.columnIdx[l]) {
nonZeros[cIdx] = binaryOp(nonZeros[cIdx], B.nonZeros[l]);
++l;
}
}
}
return CSRMatrix(A.nRows, A.nCols, rowIdx, columnIdx, nonZeros, A.zero, true);
} else { // A or B not sorted
std::vector<int64_t> columnPointer(A.nCols, -1);
std::vector<double> Arow(A.nCols, A.zero);
std::vector<double> Brow(A.nCols, B.zero);
std::vector<Triplet> triplets;
for (index i = 0; i < A.nRows; ++i) {
index listHead = 0;
count nnz = 0;
// search for nonZeros in our own matrix
for (index k = A.rowIdx[i]; k < A.rowIdx[i+1]; ++k) {
index j = A.columnIdx[k];
Arow[j] = A.nonZeros[k];
columnPointer[j] = listHead;
listHead = j;
nnz++;
}
// search for nonZeros in the other matrix
for (index k = B.rowIdx[i]; k < B.rowIdx[i+1]; ++k) {
index j = B.columnIdx[k];
Brow[j] = B.nonZeros[k];
if (columnPointer[j] == -1) { // our own matrix does not have a nonZero entry in column j
columnPointer[j] = listHead;
listHead = j;
nnz++;
}
}
// apply operator on the found nonZeros in A and B
for (count k = 0; k < nnz; ++k) {
double value = binaryOp(Arow[listHead], Brow[listHead]);
if (value != A.zero) {
triplets.push_back({i,listHead,value});
}
index temp = listHead;
listHead = columnPointer[listHead];
// reset for next row
columnPointer[temp] = -1;
Arow[temp] = A.zero;
Brow[temp] = B.zero;
}
nnz = 0;
}
return CSRMatrix(A.numberOfRows(), A.numberOfColumns(), triplets);
}
}
template<typename F>
void CSRMatrix::apply(const F unaryElementFunction) {
#pragma omp parallel for
for (omp_index k = 0; k < static_cast<omp_index>(nonZeros.size()); ++k) {
nonZeros[k] = unaryElementFunction(nonZeros[k]);
}
}
} /* namespace NetworKit */
template<typename L>
inline void NetworKit::CSRMatrix::forNonZeroElementsInRow(index i, L handle) const {
for (index k = rowIdx[i]; k < rowIdx[i+1]; ++k) {
handle(columnIdx[k], nonZeros[k]);
}
}
template<typename L>
inline void NetworKit::CSRMatrix::parallelForNonZeroElementsInRow(index i, L handle) const {
#pragma omp parallel for
for (omp_index k = rowIdx[i]; k < static_cast<omp_index>(rowIdx[i+1]); ++k) {
handle(columnIdx[k], nonZeros[k]);
}
}
template<typename L>
inline void NetworKit::CSRMatrix::forElementsInRow(index i, L handle) const {
Vector rowVector = row(i);
index j = 0;
rowVector.forElements([&](double val) {
handle(j++, val);
});
}
template<typename L>
inline void NetworKit::CSRMatrix::forNonZeroElementsInRowOrder(L handle) const {
for (index i = 0; i < nRows; ++i) {
for (index k = rowIdx[i]; k < rowIdx[i+1]; ++k) {
handle(i, columnIdx[k], nonZeros[k]);
}
}
}
template<typename L>
inline void NetworKit::CSRMatrix::parallelForNonZeroElementsInRowOrder(L handle) const {
#pragma omp parallel for
for (omp_index i = 0; i < static_cast<omp_index>(nRows); ++i) {
for (index k = rowIdx[i]; k < rowIdx[i+1]; ++k) {
handle(i, columnIdx[k], nonZeros[k]);
}
}
}
#endif /* TESTMATRIX_H_ */
|
cf_cilk.c | #include <stdio.h>
#include <math.h>
#include <stdlib.h>
#include <string.h>
//#include <omp.h>
#include <cilk/cilk.h>
#include <cilk/reducer_opadd.h>
#include <time.h>
#include <sys/time.h>
#define MICRO_IN_SEC 1000000.00
// the item number and user number should be start from 1, if it start from 0, our program in get_*_data should be modified
#define USER_COUNT 384546
//#define USER_COUNT 3500
#define ITEM_COUNT 1019318
#define TEST_COUNT 19315653
#define RECORD_COUNT 29057933
#define ITEM_BLOCK 3500
//#define ITEM_BLOCK 384546
#define ITEM_ROUND (ITEM_COUNT/ITEM_BLOCK)
#define ITEM_LAST (ITEM_COUNT%ITEM_BLOCK)
#define USER_BLOCK 6000
#define USER_ROUND (USER_COUNT/USER_BLOCK)
#define USER_LAST (USER_COUNT%USER_BLOCK)
#define K_SORT 100
#define BLOCK_MINI 100
typedef struct record_item_struct_define
{
int item_id;
int user_id;
float rating;
}record_item_struct;
typedef struct k_similarity_struct_define
{
int k_index;
double k_similarity;
}k_similarity_struct;
double microtime(){
int tv_sec,tv_usec;
double time;
struct timeval tv;
struct timezone tz;
gettimeofday(&tv,&tz);
return tv.tv_sec+tv.tv_usec/MICRO_IN_SEC;
}
float compute_multiply(float* rating_m_block, float* rating_n_block)
{
float sum_multiply;
return sum_multiply = __sec_reduce_add(rating_m_block[0:BLOCK_MINI] * rating_n_block[0:BLOCK_MINI]);
}
// Here, we assume that the item in {user,item,predict_rating} of the test dataset will cover all the items.
void get_pearson_similarity(float* rating_m, float* rating_n, double* average_matrix, k_similarity_struct * k_similarity_matrix, int start_index_m, int start_index_n, int flag){
printf(" in pearson_similarity start_index_m=%d, start_index_n=%d\n", start_index_m, start_index_n);
int nPadded = ( USER_COUNT%8 == 0 ? USER_COUNT : USER_COUNT + (8-USER_COUNT%8) );
double similarity;
// int i,j,m,n,s,k;
int ua,ub,uc;
/*
double sum_numerator=0;
double sum_denominator_square_m=0;
double sum_denominator_square_n=0;
*/
double sum_denominator=0;
double* sum_numerator_matrix =(double*) _mm_malloc(sizeof(double)*USER_COUNT,64);
double* sum_denominator_matrix_m =(double*) _mm_malloc(sizeof(double)*USER_COUNT,64);
double* sum_denominator_matrix_n =(double*) _mm_malloc(sizeof(double)*USER_COUNT,64);
memset(sum_numerator_matrix,0,sizeof(double)*USER_COUNT);
memset(sum_denominator_matrix_m,0,sizeof(double)*USER_COUNT);
memset(sum_denominator_matrix_n,0,sizeof(double)*USER_COUNT);
// float * simi_temp = (float*)_mm_malloc(sizeof(float)*ITEM_BLOCK*USER_COUNT,64);
int numthreads;
int item_index;
// int block_m,block_n;
int end_m=(ITEM_COUNT<(start_index_m+ITEM_BLOCK) ? ITEM_COUNT:(start_index_m+ITEM_BLOCK));
int end_n=(ITEM_COUNT<(start_index_n+ITEM_BLOCK) ? ITEM_COUNT:(start_index_n+ITEM_BLOCK));
//printf("the number of threads is %d\n", omp_get_num_threads());
int block_mini_int = BLOCK_MINI;
printf(" end_m = %d , end_n = %d,...BLOCK_MINI = %d\n",end_m,end_n,block_mini_int);
/*
cilk::reducer_opadd<unsigned long> kk(0);
cilk_for (int k=0; k<1000; k++)
{
kk+=k;
}
*/
double a = microtime();
//compute the pearson similarity
//#pragma omp parallel for collapse(2) private(i,j,k,m,n) reduction(+:sum_numerator,sum_denominator_square_m,sum_denominator_square_n)
//#pragma omp parallel for collapse(2) private(i,j,k,m,n)
//#pragma omp parallel
// #pragma cilk grainsize = ((end_m-start_index_m)*(end_n-start_index_n))/244
// cilk_for (unsigned int mn = 0; mn < (((end_m-start_index_m)*(end_n-start_index_n))/16)*16; mn++)
cilk_for (unsigned int mn = 0; mn < (end_m-start_index_m)*(end_n-start_index_n); mn++)
{
// if (m%100==0) printf ("m = %d, percent= %f/%\n",m,(float)m/ITEM_COUNT*100);
// for(int n=start_index_n; n< end_n; n++)
// {
int m = start_index_m + mn / ( end_n - start_index_n );
int n = start_index_n + mn % ( end_n - start_index_n );
//float* block_m = rating_m + (m - start_index_m)*USER_COUNT;
//float* block_n = rating_n + (n - start_index_n)*USER_COUNT;
int block_m = m - start_index_m;
int block_n = n - start_index_n;
//similarity = cilk_spawn compute_kernel(rating_m,rating_n,block_m,block_n);
// int block_m=1,block_n=1;
float sum_numerator=0;
float sum_denominator_square_m=0;
float sum_denominator_square_n=0;
// cilk::reducer_opadd<float> sum_numerator(0);
// cilk::reducer_opadd<float> sum_denominator_square_m(0);
// cilk::reducer_opadd<float> sum_denominator_square_n(0);
// cilk::reducer_opadd<double> sum_denominator(0);
float * rating_m_block = rating_m+block_m*USER_COUNT;
float * rating_n_block = rating_n+block_n*USER_COUNT;
//#pragma omp for schedule(static) nowait
// #pragma omp parallel for private(i)
// #pragma simd reduction(+:sum_numerator,sum_denominator_square_m,sum_denominator_square_n)
// #pragma vector aligned
// #pragma simd vecremainder vectorlength(16) private(sum_numerator,sum_denominator_square_m,sum_denominator_square_n)
// #pragma simd vecremainder
// #pragma vector aligned
// #pragma ivdep
// #pragma vector always
// #pragma cilk grainsize=(USER_COUNT/1)
// for (int i=0;i < USER_COUNT/BLOCK_MINI; i++)
for (int i=0;i < USER_COUNT; i++)
{
//compute numerator
/* // simi_temp[block_m*USER_COUNT+i] = rating_m[block_m*USER_COUNT+i];
float sum_numerator_tmp = cilk_spawn compute_multiply(rating_m_block + i*BLOCK_MINI, rating_n_block + i*BLOCK_MINI);
sum_numerator += sum_numerator_tmp;
float sum_denominator_square_m_tmp = cilk_spawn compute_multiply(rating_m_block + i*BLOCK_MINI, rating_m_block + i*BLOCK_MINI);
sum_denominator_square_m += sum_denominator_square_m_tmp;
float sum_denominator_square_n_tmp = cilk_spawn compute_multiply(rating_n_block + i*BLOCK_MINI, rating_n_block + i*BLOCK_MINI);
sum_denominator_square_n += sum_denominator_square_n_tmp;
*/
sum_numerator += (rating_m[block_m*USER_COUNT+i])*(rating_n[block_n*USER_COUNT+i]);
//compute the squre in denominator
// sum_denominator_square_m += powf(rating_m[block_m*USER_COUNT+i],2.0);
sum_denominator_square_m += rating_m[block_m*USER_COUNT+i]*rating_m[block_m*USER_COUNT+i];
// sum_denominator_square_n += powf(rating_n[block_n*USER_COUNT+i],2.0);
sum_denominator_square_n += rating_n[block_n*USER_COUNT+i]*rating_n[block_n*USER_COUNT+i];
// if( m==100 && n==100)
// printf("m=%d,n=%d,i=%d, running on thread %d\n",m,n,i, omp_get_thread_num());
}
/*
float * rating_m_block = rating_m+block_m*USER_COUNT;
float * rating_n_block = rating_n+block_n*USER_COUNT;
// #pragma vector aligned (rating_m_block,rating_n_block)
float sum_numerator = __sec_reduce_add(rating_m_block[0:USER_COUNT] * rating_n_block[0:USER_COUNT]);
// float sum_numerator = cilk_spawn compute_numerator(rating_m_block, rating_n_block);
// #pragma vector aligned (rating_m_block)
float sum_denominator_square_m = __sec_reduce_add(rating_m_block[0:USER_COUNT]*rating_m_block[0:USER_COUNT]);
// #pragma vector aligned (rating_n_block)
float sum_denominator_square_n = __sec_reduce_add(rating_n_block[0:USER_COUNT]*rating_n_block[0:USER_COUNT]);
*/
//compute the denominator
sum_denominator = sqrt (sum_denominator_square_m*sum_denominator_square_n);
if(sum_denominator!=0)
similarity = sum_numerator/sum_denominator;
else
similarity = 0;
/*
for (j=0;j<K_SORT;j++)
{
item_index = k_similarity_matrix[m*K_SORT+j].k_index;
if(item_index==-1 || similarity > k_similarity_matrix[m*K_SORT+j].k_similarity)
{
for (s=K_SORT-1;s>j;s--)
{
k_similarity_matrix[m*K_SORT+s].k_index = k_similarity_matrix[m*K_SORT+s-1].k_index;
k_similarity_matrix[m*K_SORT+s].k_similarity = k_similarity_matrix[m*K_SORT+s-1].k_similarity;
}
k_similarity_matrix[m*K_SORT+j].k_index = n;
k_similarity_matrix[m*K_SORT+j].k_similarity = similarity;
break;
}
else if( similarity == k_similarity_matrix[m*K_SORT+j].k_similarity && item_index == n)
{
break;
}
}
if(flag==0) continue;
for (k=0;k<K_SORT;k++)
{
item_index = k_similarity_matrix[n*K_SORT+k].k_index;
if(item_index==-1 || similarity > k_similarity_matrix[n*K_SORT+k].k_similarity)
{
for (s=K_SORT-1;s>j;s--)
{
k_similarity_matrix[n*K_SORT+s].k_index = k_similarity_matrix[n*K_SORT+s-1].k_index;
k_similarity_matrix[n*K_SORT+s].k_similarity = k_similarity_matrix[n*K_SORT+s-1].k_similarity;
}
k_similarity_matrix[n*K_SORT+k].k_index = m;
k_similarity_matrix[n*K_SORT+k].k_similarity = similarity;
break;
}
else if( similarity == k_similarity_matrix[n*K_SORT+k].k_similarity && item_index == m)
{
break;
}
}
*/
// }
}
double b = microtime();
double duration = b-a;
printf(" time consumed: %fs\n", duration);
exit(0);
}
int get_predict_rating(double* predict_rating, float* rating, k_similarity_struct* index_matrix, int* test, double* average_index, int user_start_index,int test_start_index)
{
// Firstly, we should find the union set between rating&index_matrix for the users in test[];
/*
printf(" in predict_rating ...........user_start_index=%d, test_start_index=%d\n", user_start_index, test_start_index);
int m,n,i,j;
int user_id,item_id,k_id;
double sum_rating = 0;
int sum_rating_index = 0;
int sum_no_rating_index=0;
double numerator = 0;
double denominator = 0;
int block_user_id;
// #pragma omp parallel for private(m,i) reduction(+:numerator,denominator)
for (m = test_start_index; m < TEST_COUNT; m++)
{
user_id = test[m*2+0];
item_id = test[m*2+1];
numerator =0;
denominator = 0;
if( user_id < user_start_index) printf(" error__________________+++++++++++++++\n");
if( user_id > ((user_start_index+USER_BLOCK) > USER_COUNT ? USER_COUNT:(user_start_index+USER_BLOCK))) break;
block_user_id = user_id - user_start_index;
for (i = 0; i < K_SORT; i++)
{
k_id = index_matrix[item_id*K_SORT+i].k_index;
if ( rating[block_user_id*ITEM_COUNT+k_id] !=0 )
{
numerator += index_matrix[item_id*K_SORT+i].k_similarity*(rating[block_user_id*ITEM_COUNT+k_id]-average_index[k_id]);
denominator += index_matrix[item_id*K_SORT+i].k_similarity;
}
}
if(denominator !=0)
predict_rating[m] = average_index[item_id] + numerator/ denominator;
else
predict_rating[m] = average_index[item_id];
}
// return predict_rating;
return m;
*/
}
void print_matrix_double(double * matrix,int row,int column)
{
/*
int i,j;
int sum_0=0;
for (i=0;i<row;i++)
{
for(j=0;j<column;j++)
if (matrix[i*column+j]==0) sum_0++;
// printf("%lf ",matrix[i*column+j]);
// printf("\n");
}
printf("sum_0 is %d in a whole %d\n",sum_0,row);
*/
}
void get_item_data(record_item_struct* item_data, char* filename)
{
FILE *fp;
if ((fp=fopen(filename,"r")) == NULL)
{
printf("cannot open this file");
exit(0);
}
int user_id, item_id, timestamp;
float rating=0;
int i=0;
while (fscanf(fp,"%d %d %f %d", &item_id, &user_id, &rating, ×tamp) != EOF)
{
item_data[i].item_id = item_id;
item_data[i].user_id = user_id;
item_data[i].rating = rating;
i++;
}
fclose(fp);
}
int get_item_block_data(int round, float* data, int file_start_index, record_item_struct* item_data)
{
// int i=0;
int p=0;
// #pragma omp parallel for
cilk_for (int i = 0; i<ITEM_BLOCK*USER_COUNT;i++)
{
data[i]=0;
}
// memset(data, 0, sizeof(float)*ITEM_BLOCK*USER_COUNT);
int user_id, item_id;
float rating=0;
for(int i=file_start_index; ;i++)
{
item_id = item_data[i].item_id;
user_id = item_data[i].user_id;
rating = item_data[i].rating;
if ((item_id-1) >= (round+1)*ITEM_BLOCK) break;
data[(item_id-1-(round*ITEM_BLOCK))*USER_COUNT + user_id-1] = rating;
p++;
}
return p;
}
long get_user_block_data(int round, float* data, long file_start_index)
{
FILE *fp;
int i=0;
//int large_user_id=0;
//int large_item_id=0;
/*
float * data = (float*)malloc(sizeof(float)*USER_BLOCK*ITEM_COUNT);
if (data==NULL)
{
printf(" malloc of base_data failed\n");
exit(1);
}
*/
memset(data, 0, sizeof(float)*USER_BLOCK*ITEM_COUNT);
if ((fp=fopen("r1.train.raw","r")) == NULL)
{
printf("cannot open this file");
exit(0);
}
int user_id, item_id, timestamp;
float rating=0;
long file_offset = 0;
fseek(fp, file_start_index, 0);
while (fscanf(fp,"%d %d %f %d", &user_id, &item_id, &rating, ×tamp) != EOF)
{
if ((user_id-1) < round*USER_BLOCK) continue;
if ((user_id-1) >= (round+1)*USER_BLOCK) break;
data[(user_id-1-(round*USER_BLOCK))*ITEM_COUNT + item_id-1] = rating;
file_offset = ftell(fp);
// data[(user_id-1)*ITEM_COUNT + item_id-1] = rating;
// if (user_id > large_user_id) large_user_id = user_id;
// if (item_id > large_item_id) large_item_id = item_id;
// printf("getting base_data on line i=%d,user_id=%d,item_id=%d,rating=%f\n",i++,user_id,item_id,rating);
}
// printf("the largest user_id is %d\n the largest item_id is %d\n",large_user_id,large_item_id);
fclose(fp);
return file_offset;
// return data;
}
void get_test_data(int* data, float* rating)
{
FILE *fp;
int i=0;
memset(data, 0, sizeof(int));
if ((fp=fopen("r1.test","r")) == NULL)
{
printf("cannot open this file");
exit(0);
}
int user_id, item_id, timestamp;
float rating_temp;
while (fscanf(fp,"%d %d %f %d", &user_id, &item_id, &rating_temp, ×tamp) != EOF)
{
data[i*2+0] = user_id-1;
data[i*2+1] = item_id-1;
rating[i] = rating_temp;
i++;
//printf("getting test_data on line i=%d,user_id=%d,item_id=%d,rating_temp=%f\n",i,user_id,item_id,rating_temp);
}
fclose(fp);
}
double get_rmse(float* test_rating, double* predict_data)
{
/*
int m,n,i,j;
double numerator = 0;
double denominator = TEST_COUNT;
#pragma omp parallel for private(i) reduction(+:numerator)
for(i=0;i<TEST_COUNT;i++)
{
numerator += pow ((test_rating[i] - predict_data[i]),2.0);
}
return (numerator/denominator);
*/
return 0;
}
void get_item_average_matrix(float* rating,double* average_matrix, int start_index)
{
// int m,n,i,j;
// int average_index =0;
// int average_sum = 0;
// int block_m = 0;
double average_item=0;
// #pragma omp parallel for private(m,n) reduction(+:average_sum,average_index)
cilk_for (int m = start_index; m<(ITEM_COUNT < (start_index+ITEM_BLOCK) ? ITEM_COUNT:(start_index+ITEM_BLOCK)); m++ )
{
cilk::reducer_opadd<int> average_sum(0);
cilk::reducer_opadd<int> average_index(0);
int block_m = m-start_index;
#pragma simd
#pragma vector aligned
#pragma vector always
#pragma ivdep
for(int n=0;n<USER_COUNT;n++)
{
if(rating[block_m*USER_COUNT+n] !=0)
{
average_sum += rating[block_m*USER_COUNT+n];
average_index += 1;
}
}
if(average_index.get_value()!=0)
{
average_matrix[m]=(double)average_sum.get_value()/(double)average_index.get_value();
}
else
{
average_matrix[m]=0;
}
}
// #pragma omp parallel for private(m,n)
// #pragma vector aligned
// #pragma ivdep
cilk_for (int m = start_index; m<(ITEM_COUNT < (start_index+ITEM_BLOCK) ? ITEM_COUNT:(start_index+ITEM_BLOCK)); m++ )
{
int block_m = m-start_index;
average_item = average_matrix[m];
#pragma simd
for(int n=0;n<USER_COUNT;n++)
{
rating[block_m*USER_COUNT+n] -= (float)average_item;
}
}
}
int main(int argc, char ** argv){
//first, read the data in files into an array in order to process it more efficiently.
record_item_struct * item_data = (record_item_struct *) _mm_malloc(sizeof(record_item_struct)*RECORD_COUNT,64);
memset(item_data, 0, sizeof(record_item_struct)*RECORD_COUNT);
get_item_data(item_data, argv[1]);
float * item_block_data_i = (float*)_mm_malloc(sizeof(float)*ITEM_BLOCK*USER_COUNT,64);
float * item_block_data_j = (float*)_mm_malloc(sizeof(float)*ITEM_BLOCK*USER_COUNT,64);
if (item_block_data_i==NULL || item_block_data_j == NULL)
{
printf(" malloc of base_data failed\n");
exit(1);
}
double * item_average_matrix = (double*)_mm_malloc(sizeof(double)*ITEM_COUNT,64);
memset(item_average_matrix,0,sizeof(double)*ITEM_COUNT);
k_similarity_struct * item_index_matrix = (k_similarity_struct *) _mm_malloc (sizeof(k_similarity_struct)*K_SORT*ITEM_COUNT,64);
memset (item_index_matrix,-1,sizeof(k_similarity_struct));
int item_start_index_i=0;
int item_start_index_j=0;
// int i,j;
for (int i=0; i <= ITEM_ROUND; i++)
{
printf("round %d ================== with ITEM_BLOCK %d\n",i,ITEM_BLOCK);
//block_data
printf("get item_block_data begins\n");
item_start_index_i = get_item_block_data(i, item_block_data_i,item_start_index_i, item_data);
printf("get_item_block_data ends\n");
//average matrix
printf("get_item_average_matrix begins\n");
get_item_average_matrix(item_block_data_i, item_average_matrix,i*ITEM_BLOCK);
printf("get_item_average_matrix ends\n");
item_start_index_j = 0;
for(int j=0; j<= i;j++)
{
if( i==j)
{
//the index of item after sorting the similarity matrix
printf("get k_similarity_begins\n");
get_pearson_similarity(item_block_data_i,item_block_data_i,item_average_matrix,item_index_matrix,i*ITEM_BLOCK,i*ITEM_BLOCK,0);
printf("get k_similarity_ends\n");
continue;
}
//block_data
printf("get item_block_data begins\n");
item_start_index_j = get_item_block_data(j, item_block_data_j, item_start_index_j, item_data);
printf("get_item_block_data ends\n");
//the index of item after sorting the similarity matrix
printf("get k_similarity_begins\n");
get_pearson_similarity(item_block_data_i,item_block_data_j,item_average_matrix,item_index_matrix,i*ITEM_BLOCK,j*ITEM_BLOCK,1);
printf("get k_similarity_ends\n");
}
}
_mm_free(item_block_data_i);
_mm_free(item_block_data_j);
int *test_data;
float *test_rating;
test_data = (int*)_mm_malloc (sizeof(int)*2*TEST_COUNT,64);
test_rating= (float*)_mm_malloc(sizeof(float)*TEST_COUNT,64);
printf("get_test_data begins\n");
get_test_data(test_data,test_rating);
printf("get_test_data ends\n");
long user_file_start_index = 0;
float * user_block_data = (float*)_mm_malloc(sizeof(float)*USER_BLOCK*ITEM_COUNT,64);
if (user_block_data==NULL)
{
printf(" malloc of base_data failed\n");
exit(1);
}
int test_start_index = 0;
double * item_predict_rating = (double*)_mm_malloc (sizeof(double)*TEST_COUNT,64);
for(int i=0;i<=USER_ROUND;i++)
{
user_file_start_index = get_user_block_data(i,user_block_data, user_file_start_index);
printf("get_predict_rating begins\n");
test_start_index=get_predict_rating(item_predict_rating, user_block_data, item_index_matrix, test_data,item_average_matrix,i*USER_BLOCK, test_start_index);
printf("get_predict_rating ends\n");
if ( test_start_index == TEST_COUNT)
break;
}
_mm_free (user_block_data);
double rmse;
printf("get_rmse begins\n");
rmse = get_rmse(test_rating,item_predict_rating);
printf("ge_rmse ends\n");
printf("rmse= %f\n", rmse);
return 0;
}
|
FloydsAlgorithm.c | /*
Algorithm 11 - Floyd's Algorithm
Implement All-Pairs Shortest Paths Problem using Floyd's algorithm. Parallelize this algorithm, implement it using OpenMP and determine the speed-up achieved.
*/
#include <stdio.h>
#include <time.h>
#include <omp.h>
int total_threads;
int minimum(int a, int b);
void floydsAlgorithm(int a[10][10], int n);
int main() {
int a[10][10], n, i, j;
double time_taken;
clock_t begin_clock, end_clock;
printf("Enter the number of nodes: ");
scanf("%d", &n);
printf("Enter the cost adjacency matrix:\n");
for (i = 1; i <= n; i++)
for (j = 1; j <= n; j++)
scanf("%d", &a[i][j]);
begin_clock = clock();
floydsAlgorithm(a, n);
end_clock = clock();
printf("All-Pairs Shortest Paths is as follows:\n");
for (i = 1; i <= n; i++) {
for (j = 1; j <= n; j++)
printf("d ", a[i][j]);
printf("\n");
}
time_taken = (end_clock - begin_clock) / (double) CLOCKS_PER_SEC;
printf("\nThe time taken to perform Floyd's Algorithm is: %f\n", time_taken);
return 0;
}
int minimum(int a, int b) {
return (a < b) ? a : b;
}
void floydsAlgorithm(int a[10][10], int n) {
int i, j, k, min;
int no_of_threads, thread_id;
for (i = 1; i <= 10000; i++)
for (j = 1; j <= 10000; j++);
#pragma omp parallel shared (i, j, k, a, n, no_of_threads, thread_id)
{
#pragma omp sections nowait
{
for (k = 1; k <= n; k++)
for (i = 1; i <= n; i++)
for (j = 1; j <= n; j++)
a[i][j] = minimum(a[i][j], a[i][k] + a[k][j]);
}
thread_id = omp_get_thread_num();
if (thread_id == 0) {
no_of_threads = omp_get_num_threads();
if (total_threads < no_of_threads)
total_threads = no_of_threads;
printf("\nTotal Threads Used are: %d\n", no_of_threads);
}
}
}
/*
Output
------
Enter the number of nodes: 4
Enter the cost adjacency matrix:
0 1 2 999
3 0 1 999
2 5 0 4
999 6 2 0
Total Threads Used are: 8
All-Pairs Shortest Paths is as follows:
0 1 2 6
3 0 1 5
2 3 0 4
4 5 2 0
The time taken to perform Floyd's Algorithm is: 0.217000
*/
|
private.c | #include <stdio.h>
int main(int argc, char *argv[]){
int n=20000;
double x;
#pragma omp parallel for private(x)
for (int i=0; i < n; i++){
x = 1.0;
double y = x*2.0;
}
double z = x;
printf("Outside parallel loop x is %lf z is %lf\n",x,z);
}
|
thread_scale.c | /* -*- Mode: C; c-basic-offset:4 ; indent-tabs-mode:nil ; -*- */
/*
* See COPYRIGHT in top-level directory.
*/
#define _GNU_SOURCE
#include <stdio.h>
#include <omp.h>
#include <sched.h>
#include <unistd.h>
#include <pthread.h>
#include "zmtest_abslock.h"
#define TEST_NITER (1<<22)
#define WARMUP_ITER 128
#define CACHELINE_SZ 64
#define ARRAY_LEN 10
char cache_lines[CACHELINE_SZ*ARRAY_LEN] = {0};
#if ARRAY_LEN == 10
int indices [] = {3,6,1,7,0,2,9,4,8,5};
#elif ARRAY_LEN == 4
int indices [] = {2,1,3,0};
#endif
zm_abslock_t lock;
#if defined (ZM_BIND_MANUAL)
void bind_compact(){
int tid = omp_get_thread_num();
/* Compute the target core */
int tgt_core = tid;
cpu_set_t set;
CPU_ZERO(&set);
CPU_SET(tgt_core, &set);
if (pthread_setaffinity_np(pthread_self(), sizeof(set), &set) < 0) {
perror("pthread_setaffinity_np");
}
}
#else
#define bind_compact()
#endif
static void test_thruput()
{
unsigned nthreads = omp_get_max_threads();
zm_abslock_init(&lock);
int cur_nthreads;
/* Throughput = lock acquisitions per second */
printf("nthreads,thruput,lat\n");
for(cur_nthreads=1; cur_nthreads <= nthreads; cur_nthreads+= ((cur_nthreads==1) ? 1 : 2)) {
double start_time, stop_time;
#pragma omp parallel num_threads(cur_nthreads)
{
bind_compact();
int tid = omp_get_thread_num();
/* Warmup */
for(int iter=0; iter < WARMUP_ITER; iter++) {
zm_abslock_acquire(&lock);
/* Computation */
for(int i = 0; i < ARRAY_LEN; i++)
cache_lines[indices[i]] += cache_lines[indices[ARRAY_LEN-1-i]];
zm_abslock_release(&lock);
}
#pragma omp barrier
#pragma omp single
{
start_time = omp_get_wtime();
}
#pragma omp for schedule(static)
for(int iter = 0; iter < TEST_NITER; iter++) {
zm_abslock_acquire(&lock);
/* Computation */
for(int i = 0; i < ARRAY_LEN; i++)
cache_lines[indices[i]] += cache_lines[indices[ARRAY_LEN-1-i]];
zm_abslock_release(&lock);
}
}
stop_time = omp_get_wtime();
double elapsed_time = stop_time - start_time;
double thruput = (double)TEST_NITER/elapsed_time;
double latency = elapsed_time*1e9/TEST_NITER; // latency in nanoseconds
printf("%d,%.2lf,%.2lf\n", cur_nthreads, thruput, latency);
}
}
int main(int argc, char **argv)
{
test_thruput();
return 0;
}
|
openmp.c | #include <stdio.h>
#include <omp.h>
// COMPILE WIH -fopenmp flag
int main() {
int tid;
int gid = 1;
#pragma omp parallel private(tid) shared(gid)
{
tid = omp_get_thread_num();
gid = tid;
printf("Hello World %d %d\n", tid, gid);
}
} |
bitcoin_fmt_plug.c | /* bitcoin-qt (bitcoin) wallet cracker patch for JtR. Hacked together during
* April of 2013 by Dhiru Kholia <dhiru at openwall dot com>.
*
* Also works for Litecoin-Qt (litecoin) wallet files!
*
* This software is Copyright (c) 2013, Dhiru Kholia <dhiru at openwall dot com>,
* and it is hereby released to the general public under the following terms:
* Redistribution and use in source and binary forms, with or without modification,
* are permitted.
*
* This cracks password protected bitcoin (bitcoin-qt) "wallet" files.
*
* bitcoin => https://github.com/bitcoin/bitcoin
*
* Thanks to Solar for asking to add support for bitcoin wallet files.
*
* Works fine with bitcoin-core-0.14.0 from March, 2017.
*/
#if FMT_EXTERNS_H
extern struct fmt_main fmt_bitcoin;
#elif FMT_REGISTERS_H
john_register_one(&fmt_bitcoin);
#else
#include <stdint.h>
#include <string.h>
#ifdef _OPENMP
#include <omp.h>
#ifndef OMP_SCALE
#define OMP_SCALE 1
#endif
static int omp_t = 1;
#endif
#include "arch.h"
#include "misc.h"
#include "common.h"
#include "formats.h"
#include "params.h"
#include "options.h"
#include "sha2.h"
#include "aes.h"
#include "johnswap.h"
#include "simd-intrinsics.h"
#include "jumbo.h"
#include "memdbg.h"
#define FORMAT_LABEL "Bitcoin"
#define FORMAT_NAME "Bitcoin Core"
#define FORMAT_TAG "$bitcoin$"
#define FORMAT_TAG_LEN (sizeof(FORMAT_TAG)-1)
#ifdef SIMD_COEF_64
#define ALGORITHM_NAME "SHA512 AES " SHA512_ALGORITHM_NAME
#else
#if ARCH_BITS >= 64
#define ALGORITHM_NAME "SHA512 AES 64/" ARCH_BITS_STR " " SHA2_LIB
#else
#define ALGORITHM_NAME "SHA512 AES 32/" ARCH_BITS_STR " " SHA2_LIB
#endif
#endif
#if !defined (SHA512_DIGEST_LENGTH)
#define SHA512_DIGEST_LENGTH 64
#endif
#define BENCHMARK_COMMENT ""
#define BENCHMARK_LENGTH -1
#define PLAINTEXT_LENGTH 125
#define BINARY_SIZE 0
#define BINARY_ALIGN 1
#define SALT_ALIGN sizeof(int)
#define SALT_SIZE sizeof(struct custom_salt)
#ifdef SIMD_COEF_64
#define MIN_KEYS_PER_CRYPT (SIMD_COEF_64*SIMD_PARA_SHA512)
#define MAX_KEYS_PER_CRYPT (SIMD_COEF_64*SIMD_PARA_SHA512)
#else
#define MIN_KEYS_PER_CRYPT 1
#define MAX_KEYS_PER_CRYPT 1
#endif
#define SZ 128
static struct fmt_tests bitcoin_tests[] = {
/* bitcoin wallet hashes */
{"$bitcoin$96$169ce74743c260678fbbba92e926198702fd84e46ba555190f6f3d82f6852e4adeaa340d2ac065288e8605f13d1d7c86$16$26049c64dda292d5$177864$96$62aee49c1967b5635b663fc3b047d8bc562f7000921453ab15b98e5a5f2d2adc74393e789fe15c5a3fbc4625536be98a$66$020027f255fbfa6d4c010a1a5984e487443c68e1b32869ccfde92e92005814fd27", "openwall"},
{"$bitcoin$96$bd97a08e00e38910550e76848949285b9702fe64460f70d464feb2b63f83e1194c745e58fa4a0f09ac35e5777c507839$16$26049c64dda292d5$258507$96$62aee49c1967b5635b663fc3b047d8bc562f7000921453ab15b98e5a5f2d2adc74393e789fe15c5a3fbc4625536be98a$66$020027f255fbfa6d4c010a1a5984e487443c68e1b32869ccfde92e92005814fd27", "password"},
{"$bitcoin$96$4eca412eeb04971428efec70c9e18fb9375be0aa105e7eec55e528d0ba33a07eb6302add36da86736054dee9140ec9b8$16$26049c64dda292d5$265155$96$62aee49c1967b5635b663fc3b047d8bc562f7000921453ab15b98e5a5f2d2adc74393e789fe15c5a3fbc4625536be98a$66$020027f255fbfa6d4c010a1a5984e487443c68e1b32869ccfde92e92005814fd27", "strongpassword"},
/* litecoin wallet hash */
{"$bitcoin$96$54401984b32448917b6d18b7a11debe91d62aaa343ab62ed98e1d3063f30817832c744360331df94cbf1dcececf6d00e$16$bfbc8ee2c07bbb4b$194787$96$07a206d5422640cfa65a8482298ad8e8598b94d99e2c4ce09c9d015b734632778cb46541b8c10284b9e14e5468b654b9$66$03fe6587bf580ee38b719f0b8689c80d300840bbc378707dce51e6f1fe20f49c20", "isyourpasswordstronger"},
/* bitcoin-core-0.14.0 wallet */
{"$bitcoin$96$8e7be42551c822c7e55a384e15b4fbfec69ceaed000925870dfb262d3381ed4405507f6c94defbae174a218eed0b5ce8$16$b469e6dbd76926cf$244139$96$ec03604094ada8a5d76bbdb455d260ac8b202ec475d5362d334314c4e7012a2f4b8f9cf8761c9862cd20892e138cd29e$66$03fdd0341a72d1a119ea1de51e477f0687a2bf601c07c032cc87ef82e0f8f49b19", "password@12345"},
/* bitcoin-core-0.14.0 wallet */
{"$bitcoin$96$2559c50151aeec013a9820c571fbee02e5892a3ead07607ee8de9d0ff55798cff6fe60dbd71d7873cb794a03e0d63b70$16$672204f8ab168ff6$136157$96$a437e8bd884c928603ee00cf85eaaf9245a071efa763db03ab485cb757f155976edc7294a6a731734f383850fcac4316$66$03ff84bb48f454662b91a6e588af8752da0674efa5dae82e7340152afcc38f4ba4", "AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA"},
{NULL}
};
static char (*saved_key)[PLAINTEXT_LENGTH + 1];
static int any_cracked, *cracked;
static size_t cracked_size;
static struct custom_salt {
unsigned char cry_master[SZ];
int cry_master_length;
unsigned char cry_salt[SZ];
int cry_salt_length;
int cry_rounds;
unsigned char ckey[SZ];
int ckey_length;
unsigned char public_key[SZ];
int public_key_length;
} *cur_salt;
static void init(struct fmt_main *self)
{
#ifdef _OPENMP
omp_t = omp_get_max_threads();
self->params.min_keys_per_crypt *= omp_t;
omp_t *= OMP_SCALE;
self->params.max_keys_per_crypt *= omp_t;
#endif
saved_key = mem_calloc_align(sizeof(*saved_key),
self->params.max_keys_per_crypt, MEM_ALIGN_WORD);
any_cracked = 0;
cracked_size = sizeof(*cracked) * self->params.max_keys_per_crypt;
cracked = mem_calloc_align(sizeof(*cracked), self->params.max_keys_per_crypt, MEM_ALIGN_WORD);
}
static void done(void)
{
MEM_FREE(cracked);
MEM_FREE(saved_key);
}
// #define BTC_DEBUG
#ifdef BTC_DEBUG
static void print_hex(unsigned char *str, int len)
{
int i;
for (i = 0; i < len; ++i)
printf("%02x", str[i]);
printf("\n");
}
#endif
static int valid(char *ciphertext, struct fmt_main *self)
{
char *ctcopy;
char *keeptr;
char *p = NULL;
int res;
if (strncmp(ciphertext, FORMAT_TAG, FORMAT_TAG_LEN))
return 0;
ctcopy = strdup(ciphertext);
keeptr = ctcopy;
ctcopy += FORMAT_TAG_LEN;
if ((p = strtokm(ctcopy, "$")) == NULL) /* cry_master_length (of the hex string) */
goto err;
if (!isdec(p))
goto err;
res = atoi(p);
if ((p = strtokm(NULL, "$")) == NULL) /* cry_master */
goto err;
if (strlen(p) != res || strlen(p) > SZ * 2) /* validates atoi() and cry_master */
goto err;
if (!ishexlc(p))
goto err;
if ((p = strtokm(NULL, "$")) == NULL) /* cry_salt_length (length of hex string) */
goto err;
if (!isdec(p))
goto err;
res = atoi(p);
if ((p = strtokm(NULL, "$")) == NULL) /* cry_salt */
goto err;
if (strlen(p) != res || strlen(p) > SZ * 2) /* validates atoi() and cry_salt */
goto err;
if (!ishexlc(p))
goto err;
if ((p = strtokm(NULL, "$")) == NULL) /* cry_rounds */
goto err;
if (!isdec(p))
goto err;
res = atoi(p);
if ((p = strtokm(NULL, "$")) == NULL) /* ckey_length (of hex) */
goto err;
if (!isdec(p))
goto err;
res = atoi(p);
if ((p = strtokm(NULL, "$")) == NULL) /* ckey */
goto err;
if (strlen(p) != res || strlen(p) > SZ * 2) /* validates atoi() and ckey */
goto err;
if (!ishexlc(p))
goto err;
if ((p = strtokm(NULL, "$")) == NULL) /* public_key_length */
goto err;
if (!isdec(p))
goto err;
res = atoi(p);
if ((p = strtokm(NULL, "$")) == NULL) /* public_key */
goto err;
if (strlen(p) != res || strlen(p) > SZ * 2) /* validates atoi() and public_key */
goto err;
if (!ishexlc(p))
goto err;
MEM_FREE(keeptr);
return 1;
err:
MEM_FREE(keeptr);
return 0;
}
static void *get_salt(char *ciphertext)
{
int i;
char *p;
char *ctcopy = strdup(ciphertext);
char *keeptr = ctcopy;
static struct custom_salt cs;
memset(&cs, 0, sizeof(cs));
ctcopy += FORMAT_TAG_LEN;
p = strtokm(ctcopy, "$");
cs.cry_master_length = atoi(p) / 2;
p = strtokm(NULL, "$");
for (i = 0; i < cs.cry_master_length; i++)
cs.cry_master[i] = atoi16[ARCH_INDEX(p[i * 2])]
* 16 + atoi16[ARCH_INDEX(p[i * 2 + 1])];
p = strtokm(NULL, "$");
cs.cry_salt_length = atoi(p) / 2;
p = strtokm(NULL, "$");
for (i = 0; i < cs.cry_salt_length; i++)
cs.cry_salt[i] = atoi16[ARCH_INDEX(p[i * 2])]
* 16 + atoi16[ARCH_INDEX(p[i * 2 + 1])];
p = strtokm(NULL, "$");
cs.cry_rounds = atoi(p);
p = strtokm(NULL, "$");
cs.ckey_length = atoi(p) / 2;
p = strtokm(NULL, "$");
for (i = 0; i < cs.ckey_length; i++)
cs.ckey[i] = atoi16[ARCH_INDEX(p[i * 2])]
* 16 + atoi16[ARCH_INDEX(p[i * 2 + 1])];
p = strtokm(NULL, "$");
cs.public_key_length = atoi(p) / 2;
p = strtokm(NULL, "$");
for (i = 0; i < cs.public_key_length; i++)
cs.public_key[i] = atoi16[ARCH_INDEX(p[i * 2])]
* 16 + atoi16[ARCH_INDEX(p[i * 2 + 1])];
MEM_FREE(keeptr);
return (void *)&cs;
}
static void set_salt(void *salt)
{
cur_salt = (struct custom_salt *)salt;
}
static int crypt_all(int *pcount, struct db_salt *salt)
{
const int count = *pcount;
int index = 0;
if (any_cracked) {
memset(cracked, 0, cracked_size);
any_cracked = 0;
}
#ifdef _OPENMP
#pragma omp parallel for
for (index = 0; index < count; index += MAX_KEYS_PER_CRYPT)
#endif
{
unsigned char output[SZ];
SHA512_CTX sha_ctx;
int i;
#ifdef SIMD_COEF_64
char unaligned_buf[MAX_KEYS_PER_CRYPT*SHA_BUF_SIZ*sizeof(uint64_t)+MEM_ALIGN_SIMD];
uint64_t *key_iv = (uint64_t*)mem_align(unaligned_buf, MEM_ALIGN_SIMD);
JTR_ALIGN(8) unsigned char hash1[SHA512_DIGEST_LENGTH]; // 512 bits
int index2;
for (index2 = 0; index2 < MAX_KEYS_PER_CRYPT; index2++) {
// The first hash for this password
SHA512_Init(&sha_ctx);
SHA512_Update(&sha_ctx, saved_key[index+index2], strlen(saved_key[index+index2]));
SHA512_Update(&sha_ctx, cur_salt->cry_salt, cur_salt->cry_salt_length);
SHA512_Final(hash1, &sha_ctx);
// Now copy and convert hash1 from flat into SIMD_COEF_64 buffers.
for (i = 0; i < SHA512_DIGEST_LENGTH/sizeof(uint64_t); ++i) {
#if COMMON_DIGEST_FOR_OPENSSL
key_iv[SIMD_COEF_64*i + (index2&(SIMD_COEF_64-1)) + index2/SIMD_COEF_64*SHA_BUF_SIZ*SIMD_COEF_64] = sha_ctx.hash[i]; // this is in BE format
#else
key_iv[SIMD_COEF_64*i + (index2&(SIMD_COEF_64-1)) + index2/SIMD_COEF_64*SHA_BUF_SIZ*SIMD_COEF_64] = sha_ctx.h[i];
#endif
}
// We need to set ONE time, the upper half of the data buffer. We put the 0x80 byte (in BE format), at offset
// 512-bits (SHA512_DIGEST_LENGTH) multiplied by the SIMD_COEF_64 (same as MAX_KEYS_PER_CRYPT), then zero
// out the rest of the buffer, putting 512 (#bits) at the end. Once this part of the buffer is set up, we never
// touch it again, for the rest of the crypt. We simply overwrite the first half of this buffer, over and over
// again, with BE results of the prior hash.
key_iv[ SHA512_DIGEST_LENGTH/sizeof(uint64_t) * SIMD_COEF_64 + (index2&(SIMD_COEF_64-1)) + index2/SIMD_COEF_64*SHA_BUF_SIZ*SIMD_COEF_64 ] = 0x8000000000000000ULL;
for (i = (SHA512_DIGEST_LENGTH/sizeof(uint64_t)+1); i < 15; i++)
key_iv[i*SIMD_COEF_64 + (index2&(SIMD_COEF_64-1)) + index2/SIMD_COEF_64*SHA_BUF_SIZ*SIMD_COEF_64] = 0;
key_iv[15*SIMD_COEF_64 + (index2&(SIMD_COEF_64-1)) + index2/SIMD_COEF_64*SHA_BUF_SIZ*SIMD_COEF_64] = (SHA512_DIGEST_LENGTH << 3);
}
for (i = 1; i < cur_salt->cry_rounds; i++) // start at 1; the first iteration is already done
SIMDSHA512body(key_iv, key_iv, NULL, SSEi_MIXED_IN|SSEi_OUTPUT_AS_INP_FMT);
for (index2 = 0; index2 < MAX_KEYS_PER_CRYPT; index2++) {
AES_KEY aes_key;
unsigned char key[32];
unsigned char iv[16];
// Copy and convert from SIMD_COEF_64 buffers back into flat buffers, in little-endian
#if ARCH_LITTLE_ENDIAN==1
for (i = 0; i < sizeof(key)/sizeof(uint64_t); i++) // the derived key
((uint64_t *)key)[i] = JOHNSWAP64(key_iv[SIMD_COEF_64*i + (index2&(SIMD_COEF_64-1)) + index2/SIMD_COEF_64*SHA_BUF_SIZ*SIMD_COEF_64]);
for (i = 0; i < sizeof(iv)/sizeof(uint64_t); i++) // the derived iv
((uint64_t *)iv)[i] = JOHNSWAP64(key_iv[SIMD_COEF_64*(sizeof(key)/sizeof(uint64_t) + i) + (index2&(SIMD_COEF_64-1)) + index2/SIMD_COEF_64*SHA_BUF_SIZ*SIMD_COEF_64]);
#else
for (i = 0; i < sizeof(key)/sizeof(uint64_t); i++) // the derived key
((uint64_t *)key)[i] = key_iv[SIMD_COEF_64*i + (index2&(SIMD_COEF_64-1)) + index2/SIMD_COEF_64*SHA_BUF_SIZ*SIMD_COEF_64];
for (i = 0; i < sizeof(iv)/sizeof(uint64_t); i++) // the derived iv
((uint64_t *)iv)[i] = key_iv[SIMD_COEF_64*(sizeof(key)/sizeof(uint64_t) + i) + (index2&(SIMD_COEF_64-1)) + index2/SIMD_COEF_64*SHA_BUF_SIZ*SIMD_COEF_64];
#endif
AES_set_decrypt_key(key, 256, &aes_key);
AES_cbc_encrypt(cur_salt->cry_master, output, cur_salt->cry_master_length, &aes_key, iv, AES_DECRYPT);
if (check_pkcs_pad(output, cur_salt->cry_master_length, 16) == 32) {
cracked[index + index2] = 1;
#ifdef _OPENMP
#pragma omp atomic
#endif
any_cracked |= 1;
}
}
#else
AES_KEY aes_key;
unsigned char key_iv[SHA512_DIGEST_LENGTH]; // buffer for both the derived key and iv
SHA512_Init(&sha_ctx);
SHA512_Update(&sha_ctx, saved_key[index], strlen(saved_key[index]));
SHA512_Update(&sha_ctx, cur_salt->cry_salt, cur_salt->cry_salt_length);
SHA512_Final(key_iv, &sha_ctx);
for (i = 1; i < cur_salt->cry_rounds; i++) { // start at 1; the first iteration is already done
SHA512_Init(&sha_ctx);
SHA512_Update(&sha_ctx, key_iv, SHA512_DIGEST_LENGTH);
SHA512_Final(key_iv, &sha_ctx);
}
AES_set_decrypt_key(key_iv, 256, &aes_key);
AES_cbc_encrypt(cur_salt->cry_master, output, cur_salt->cry_master_length, &aes_key, key_iv + 32, AES_DECRYPT);
if (check_pkcs_pad(output, cur_salt->cry_master_length, 16) == 32) {
cracked[index] = 1;
#ifdef _OPENMP
#pragma omp atomic
#endif
any_cracked |= 1;
}
#endif
}
return count;
}
static int cmp_all(void *binary, int count)
{
return any_cracked;
}
static int cmp_one(void *binary, int index)
{
return cracked[index];
}
static int cmp_exact(char *source, int index)
{
return cracked[index];
}
static void bitcoin_set_key(char *key, int index)
{
strnzcpy(saved_key[index], key, sizeof(*saved_key));
}
static char *get_key(int index)
{
return saved_key[index];
}
static unsigned int iteration_count(void *salt)
{
struct custom_salt *my_salt;
my_salt = salt;
return (unsigned int)my_salt->cry_rounds;
}
struct fmt_main fmt_bitcoin = {
{
FORMAT_LABEL,
FORMAT_NAME,
ALGORITHM_NAME,
BENCHMARK_COMMENT,
BENCHMARK_LENGTH,
0,
PLAINTEXT_LENGTH,
BINARY_SIZE,
BINARY_ALIGN,
SALT_SIZE,
SALT_ALIGN,
MIN_KEYS_PER_CRYPT,
MAX_KEYS_PER_CRYPT,
FMT_CASE | FMT_8_BIT | FMT_OMP,
{
"iteration count",
},
{ FORMAT_TAG },
bitcoin_tests
}, {
init,
done,
fmt_default_reset,
fmt_default_prepare,
valid,
fmt_default_split,
fmt_default_binary,
get_salt,
{
iteration_count,
},
fmt_default_source,
{
fmt_default_binary_hash
},
fmt_default_salt_hash,
NULL,
set_salt,
bitcoin_set_key,
get_key,
fmt_default_clear_keys,
crypt_all,
{
fmt_default_get_hash
},
cmp_all,
cmp_one,
cmp_exact
}
};
#endif /* plugin stanza */
|
ssha512_fmt_plug.c | /*
* ssha512 support for LDAP style password storage
*
* This software is Copyright (c) 2013 magnum, and it is hereby released to the
* general public under the following terms: Redistribution and use in source
* and binary forms, with or without modification, are permitted.
*/
#if FMT_EXTERNS_H
extern struct fmt_main fmt_saltedsha2;
#elif FMT_REGISTERS_H
john_register_one(&fmt_saltedsha2);
#else
#include "arch.h"
#include "misc.h"
#include "formats.h"
#include "options.h"
#include "johnswap.h"
#include "common.h"
#include "sha2.h"
#include "base64_convert.h"
#include "simd-intrinsics.h"
#include <string.h>
#include "rawSHA512_common.h"
#ifdef _OPENMP
#ifdef SIMD_COEF_64
#ifndef OMP_SCALE
#define OMP_SCALE 1024
#endif
#else
#ifndef OMP_SCALE
#define OMP_SCALE 2048
#endif
#endif
#include <omp.h>
#endif
#include "memdbg.h"
#define FORMAT_LABEL "SSHA512"
#define FORMAT_NAME "LDAP"
#ifdef SIMD_COEF_64
#define ALGORITHM_NAME "SHA512 " SHA512_ALGORITHM_NAME
#else
#if ARCH_BITS >= 64
#define ALGORITHM_NAME "SHA512 64/" ARCH_BITS_STR " " SHA2_LIB
#else
#define ALGORITHM_NAME "SHA512 32/" ARCH_BITS_STR " " SHA2_LIB
#endif
#endif
#define PLAINTEXT_LENGTH (111-NSLDAP_SALT_LEN)
#define SALT_ALIGN 4
#ifdef SIMD_COEF_64
#define MIN_KEYS_PER_CRYPT (SIMD_COEF_64*SIMD_PARA_SHA512)
#define MAX_KEYS_PER_CRYPT (SIMD_COEF_64*SIMD_PARA_SHA512)
#else
#define MIN_KEYS_PER_CRYPT 1
#define MAX_KEYS_PER_CRYPT 1
#endif
struct s_salt
{
unsigned int len;
union {
unsigned char c[NSLDAP_SALT_LEN];
uint32_t w32;
} data;
};
static struct s_salt *saved_salt;
#ifdef SIMD_COEF_64
#define FMT_IS_64BIT
#define FMT_IS_BE
#include "common-simd-getpos.h"
static uint64_t (*saved_key)[SHA_BUF_SIZ*SIMD_COEF_64];
static uint64_t (*crypt_out)[8*SIMD_COEF_64];
static uint64_t (**len_ptr64);
static int max_count;
#else
static uint32_t (*crypt_out)[DIGEST_SIZE / 4];
static char (*saved_key)[PLAINTEXT_LENGTH + 1];
#endif
static int *saved_len;
static void init(struct fmt_main *self)
{
#ifdef SIMD_COEF_64
unsigned int i, j;
#endif
#ifdef _OPENMP
int omp_t = omp_get_max_threads();
self->params.min_keys_per_crypt *= omp_t;
omp_t *= OMP_SCALE;
self->params.max_keys_per_crypt *= omp_t;
#endif
saved_len = mem_calloc(self->params.max_keys_per_crypt,
sizeof(*saved_len));
#ifndef SIMD_COEF_64
saved_key = mem_calloc(self->params.max_keys_per_crypt,
sizeof(*saved_key));
crypt_out = mem_calloc(self->params.max_keys_per_crypt,
sizeof(*crypt_out));
#else
len_ptr64 = mem_calloc_align(self->params.max_keys_per_crypt,
sizeof(*len_ptr64), MEM_ALIGN_SIMD);
saved_key = mem_calloc_align(self->params.max_keys_per_crypt /
SIMD_COEF_64,
sizeof(*saved_key), MEM_ALIGN_SIMD);
crypt_out = mem_calloc_align(self->params.max_keys_per_crypt /
SIMD_COEF_64,
sizeof(*crypt_out), MEM_ALIGN_SIMD);
for (i = 0; i < self->params.max_keys_per_crypt; i += SIMD_COEF_64) {
uint64_t *keybuffer = &((uint64_t *)saved_key)[(i&(SIMD_COEF_64-1)) + (i/SIMD_COEF_64)*SHA_BUF_SIZ*SIMD_COEF_64];
for (j = 0; j < SIMD_COEF_64; ++j) {
len_ptr64[i+j] = &keybuffer[15*SIMD_COEF_64];
++keybuffer;
}
}
max_count = self->params.max_keys_per_crypt;
#endif
}
static void done(void)
{
MEM_FREE(crypt_out);
MEM_FREE(saved_key);
#ifdef SIMD_COEF_64
MEM_FREE(len_ptr64);
#endif
MEM_FREE(saved_len);
}
#define SET_SAVED_LEN
#include "common-simd-setkey64.h"
static void * get_salt(char * ciphertext)
{
static struct s_salt cursalt;
char *p;
char realcipher[CIPHERTEXT_LENGTH];
int len;
ciphertext += NSLDAP_TAG_LENGTH;
memset(realcipher, 0, sizeof(realcipher));
memset(&cursalt, 0, sizeof(struct s_salt));
len = strlen(ciphertext);
base64_convert(ciphertext, e_b64_mime, len, realcipher, e_b64_raw, sizeof(realcipher), flg_Base64_DONOT_NULL_TERMINATE, 0);
// We now support any salt length up to NSLDAP_SALT_SIZE
cursalt.len = (len + 3) / 4 * 3 - DIGEST_SIZE;
p = &ciphertext[len];
while (*--p == '=')
cursalt.len--;
memcpy(cursalt.data.c, realcipher+DIGEST_SIZE, cursalt.len);
return &cursalt;
}
static int cmp_all(void *binary, int count) {
unsigned int index;
for (index = 0; index < count; index++)
#ifdef SIMD_COEF_64
if (((uint64_t *) binary)[0] == crypt_out[index/SIMD_COEF_64][index&(SIMD_COEF_64-1)])
#else
if ( ((uint32_t*)binary)[0] == crypt_out[index][0] )
#endif
return 1;
return 0;
}
static int cmp_one(void *binary, int index)
{
#ifdef SIMD_COEF_64
int i;
for (i = 0; i < DIGEST_SIZE/sizeof(uint64_t); i++)
if (((uint64_t *) binary)[i] != crypt_out[index/SIMD_COEF_64][(index&(SIMD_COEF_64-1))+i*SIMD_COEF_64])
return 0;
return 1;
#else
return !memcmp(binary, crypt_out[index], DIGEST_SIZE);
#endif
}
static int cmp_exact(char *source, int index)
{
return 1;
}
static void set_salt(void *salt) {
saved_salt = salt;
}
static int crypt_all(int *pcount, struct db_salt *salt)
{
const int count = *pcount;
int index;
#ifdef _OPENMP
#pragma omp parallel for
#endif
for (index = 0; index < count; index+=MAX_KEYS_PER_CRYPT) {
#ifndef SIMD_COEF_64
SHA512_CTX ctx;
SHA512_Init(&ctx);
SHA512_Update(&ctx, saved_key[index], saved_len[index]);
SHA512_Update(&ctx, saved_salt->data.c, saved_salt->len);
SHA512_Final((unsigned char*)crypt_out[index], &ctx);
#else
// We have to append salt (and re-clean buffer if it is dirty),
// then append final length of password.salt
int i, j;
unsigned char *sk = (unsigned char*)saved_key;
for (i = 0; i < MAX_KEYS_PER_CRYPT; ++i) {
int idx = i+index;
int x = saved_len[idx];
for (j = 0; j < saved_salt->len; ++j)
sk[GETPOS(x+j,idx)] = saved_salt->data.c[j];
x += j;
sk[GETPOS(x,idx)] = 0x80;
++x;
while (sk[GETPOS(x,idx)]) {
sk[GETPOS(x,idx)] = 0;
++x;
}
*(len_ptr64[idx]) = (saved_len[idx]+saved_salt->len)<<3;
}
SIMDSHA512body(&saved_key[index/SIMD_COEF_64], crypt_out[index/SIMD_COEF_64], NULL, SSEi_MIXED_IN);
#endif
}
return count;
}
#ifdef SIMD_COEF_64
static int get_hash_0 (int index) { return crypt_out[(unsigned int)index/SIMD_COEF_64][index&(SIMD_COEF_64-1)] & PH_MASK_0; }
static int get_hash_1 (int index) { return crypt_out[(unsigned int)index/SIMD_COEF_64][index&(SIMD_COEF_64-1)] & PH_MASK_1; }
static int get_hash_2 (int index) { return crypt_out[(unsigned int)index/SIMD_COEF_64][index&(SIMD_COEF_64-1)] & PH_MASK_2; }
static int get_hash_3 (int index) { return crypt_out[(unsigned int)index/SIMD_COEF_64][index&(SIMD_COEF_64-1)] & PH_MASK_3; }
static int get_hash_4 (int index) { return crypt_out[(unsigned int)index/SIMD_COEF_64][index&(SIMD_COEF_64-1)] & PH_MASK_4; }
static int get_hash_5 (int index) { return crypt_out[(unsigned int)index/SIMD_COEF_64][index&(SIMD_COEF_64-1)] & PH_MASK_5; }
static int get_hash_6 (int index) { return crypt_out[(unsigned int)index/SIMD_COEF_64][index&(SIMD_COEF_64-1)] & PH_MASK_6; }
#else
static int get_hash_0(int index) { return crypt_out[index][0] & PH_MASK_0; }
static int get_hash_1(int index) { return crypt_out[index][0] & PH_MASK_1; }
static int get_hash_2(int index) { return crypt_out[index][0] & PH_MASK_2; }
static int get_hash_3(int index) { return crypt_out[index][0] & PH_MASK_3; }
static int get_hash_4(int index) { return crypt_out[index][0] & PH_MASK_4; }
static int get_hash_5(int index) { return crypt_out[index][0] & PH_MASK_5; }
static int get_hash_6(int index) { return crypt_out[index][0] & PH_MASK_6; }
#endif
static int binary_hash_0 (void *p) { return *((uint64_t*)p) & PH_MASK_0; }
static int binary_hash_1 (void *p) { return *((uint64_t*)p) & PH_MASK_1; }
static int binary_hash_2 (void *p) { return *((uint64_t*)p) & PH_MASK_2; }
static int binary_hash_3 (void *p) { return *((uint64_t*)p) & PH_MASK_3; }
static int binary_hash_4 (void *p) { return *((uint64_t*)p) & PH_MASK_4; }
static int binary_hash_5 (void *p) { return *((uint64_t*)p) & PH_MASK_5; }
static int binary_hash_6 (void *p) { return *((uint64_t*)p) & PH_MASK_6; }
static int salt_hash(void *salt)
{
struct s_salt * mysalt = salt;
return mysalt->data.w32 & (SALT_HASH_SIZE - 1);
}
struct fmt_main fmt_saltedsha2 = {
{
FORMAT_LABEL,
FORMAT_NAME,
ALGORITHM_NAME,
BENCHMARK_COMMENT,
NSLDAP_BENCHMARK_LENGTH,
0,
PLAINTEXT_LENGTH,
DIGEST_SIZE,
BINARY_ALIGN,
NSLDAP_SALT_SIZE,
SALT_ALIGN,
MIN_KEYS_PER_CRYPT,
MAX_KEYS_PER_CRYPT,
FMT_CASE | FMT_8_BIT | FMT_OMP,
{ NULL },
{ NSLDAP_FORMAT_TAG },
sha512_common_tests_ssha512
}, {
init,
done,
fmt_default_reset,
fmt_default_prepare,
sha512_common_valid_nsldap,
fmt_default_split,
sha512_common_binary_nsldap,
get_salt,
{ NULL },
fmt_default_source,
{
binary_hash_0,
binary_hash_1,
binary_hash_2,
binary_hash_3,
binary_hash_4,
binary_hash_5,
binary_hash_6
},
salt_hash,
NULL,
set_salt,
set_key,
get_key,
fmt_default_clear_keys,
crypt_all,
{
get_hash_0,
get_hash_1,
get_hash_2,
get_hash_3,
get_hash_4,
get_hash_5,
get_hash_6
},
cmp_all,
cmp_one,
cmp_exact
}
};
#endif /* plugin stanza */
|
matrixmultiply-ompacc2.c | /*
Naive matrix-matrix multiplication(mmm)
multiple GPUs, standard OpenMP 4.0 directives
By C. Liao
*/
#include <stdio.h>
#include <assert.h>
#include <omp.h>
#define N 1024
#define M 1024
#define K 1024
#define REAL float
int i,j,k;
REAL a[N][M],b[M][K],c[N][K], c2[N][K];
int init();
int mmm();
int mmm2();
int verify();
//#define MAX_GPU_COUNT 4
int main(void)
{
init();
mmm();
mmm2();
return verify();
}
int init()
{
for (i=0;i<N;i++)
for(j=0;j<M;j++)
a[i][j]=3.0*i*j/N/M;
for (i=0;i<M;i++)
for(j=0;j<K;j++)
b[i][j]=5.0*j*i/N/M;
for (i=0;i<N;i++)
for(j=0;j<K;j++)
{
c[i][j]=0.0;
c2[i][j]=0.0;
}
return 0;
}
/*
TODO: try different i,j,k orders
a b e f a*e+ b*g , a*f+ b*h
c d x g h = c*e+ d*g, c*f+ d*h
*/
int mmm()
{
int GPU_N , idev;
int n = N;
// GPU_N = xomp_get_num_devices();
GPU_N = 1;
printf("CUDA-capable device count: %i\n", GPU_N);
#if 0
if (GPU_N > MAX_GPU_COUNT)
{
GPU_N = MAX_GPU_COUNT;
}
assert (GPU_N>0 && GPU_N<=MAX_GPU_COUNT);
#endif
omp_set_num_threads(GPU_N);
#pragma omp parallel shared (GPU_N, a, b, c, n) private(idev)
// for (idev = 0; idev < GPU_N; idev++)
{
int tid = omp_get_thread_num();
// cudaSetDevice(tid);
xomp_set_default_device (tid);
long size ;
long offset;
#if 0
int size = n / GPU_N;
int offset = size * tid;
if(tid < n%GPU_N)
{
size++;
}
if(tid >= n%GPU_N)
offset += n%GPU_N;
else
offset += tid;
#endif
XOMP_static_even_divide (0, n, GPU_N, tid, &offset, &size);
printf("thread %d working on GPU devices %d with size %d copying data from y_ompacc with offset %d\n",tid, tid, size,offset);
int i, j, k;
#pragma omp target device (tid) map(tofrom:c[offset:size][0:n]), map(to:a[offset:size][0:n],b[0:n][0:n], offset,size,n)
#pragma omp parallel for private(i,j,k) shared (a,b,c, n, offset, size)
for (i = offset; i < offset + size; i++)
for (j = 0; j < M; j++)
for (k = 0; k < K; k++)
c[i][j]= c[i][j]+a[i][k]*b[k][j];
}
return 0;
}
int mmm2()
{
for (i = 0; i < N; i++)
for (j = 0; j < M; j++)
for (k = 0; k < K; k++)
c2[i][j]= c2[i][j]+a[i][k]*b[k][j];
return 0;
}
int verify()
{
REAL sum=0.0, sum2=0.0;
for (i=0;i<N;i++)
for(j=0;j<K;j++)
{
sum+=c[i][j];
sum2+=c2[i][j];
}
printf("sum of c[i][j] is %f\n",sum);
printf("sum of c2[i][j] is %f\n",sum2);
assert (sum == sum2);
return 0;
}
|
pgl_ext.c |
//Raw draw functions that bypass the OpenGL pipeline and draw
//points/lines/triangles directly to the framebuffer, modify as needed.
//
//Example modifications:
//add the blending part of OpenGL to put_pixel
//change them to take vec4's instead of Color's
//change put_triangle to draw all one color or have a separate path/function
//that draws a single color triangle faster (no need to blend)
//
//pass the framebuffer in instead of drawing to c->back_buffer so
//you can use it elsewhere, independently of a glContext
//etc.
//
void pglClearScreen()
{
memset(c->back_buffer.buf, 255, c->back_buffer.w * c->back_buffer.h * 4);
}
void pglSetInterp(GLsizei n, GLenum* interpolation)
{
c->programs.a[c->cur_program].vs_output_size = n;
c->vs_output.size = n;
memcpy(c->programs.a[c->cur_program].interpolation, interpolation, n*sizeof(GLenum));
cvec_reserve_float(&c->vs_output.output_buf, n * MAX_VERTICES);
//vs_output.interpolation would be already pointing at current program's array
//unless the programs array was realloced since the last glUseProgram because
//they've created a bunch of programs. Unlikely they'd be changing a shader
//before creating all their shaders but whatever.
c->vs_output.interpolation = c->programs.a[c->cur_program].interpolation;
}
//TODO
//pglDrawRect(x, y, w, h)
//pglDrawPoint(x, y)
void pglDrawFrame()
{
frag_func frag_shader = c->programs.a[c->cur_program].fragment_shader;
Shader_Builtins builtins;
#pragma omp parallel for private(builtins)
for (int y=0; y<c->back_buffer.h; ++y) {
for (int x=0; x<c->back_buffer.w; ++x) {
//ignore z and w components
builtins.gl_FragCoord.x = x + 0.5f;
builtins.gl_FragCoord.y = y + 0.5f;
builtins.discard = GL_FALSE;
frag_shader(NULL, &builtins, c->programs.a[c->cur_program].uniform);
if (!builtins.discard)
draw_pixel(builtins.gl_FragColor, x, y, 0.0f); //depth isn't used for pglDrawFrame
}
}
}
void pglBufferData(GLenum target, GLsizei size, const GLvoid* data, GLenum usage)
{
if (target != GL_ARRAY_BUFFER && target != GL_ELEMENT_ARRAY_BUFFER) {
if (!c->error)
c->error = GL_INVALID_ENUM;
return;
}
//check for usage later
target -= GL_ARRAY_BUFFER;
if (c->bound_buffers[target] == 0) {
if (!c->error)
c->error = GL_INVALID_OPERATION;
return;
}
// data can't be null for user_owned data
if (!data) {
if (!c->error)
c->error = GL_INVALID_VALUE;
return;
}
// TODO Should I change this in spec functions too? Or just say don't mix them
// otherwise bad things/undefined behavior??
if (!c->buffers.a[c->bound_buffers[target]].user_owned) {
free(c->buffers.a[c->bound_buffers[target]].data);
}
// user_owned buffer, just assign the pointer, will not free
c->buffers.a[c->bound_buffers[target]].data = (u8*)data;
c->buffers.a[c->bound_buffers[target]].user_owned = GL_TRUE;
c->buffers.a[c->bound_buffers[target]].size = size;
if (target == GL_ELEMENT_ARRAY_BUFFER) {
c->vertex_arrays.a[c->cur_vertex_array].element_buffer = c->bound_buffers[target];
}
}
void pglTexImage1D(GLenum target, GLint level, GLint internalFormat, GLsizei width, GLint border, GLenum format, GLenum type, const GLvoid* data)
{
if (target != GL_TEXTURE_1D) {
if (!c->error)
c->error = GL_INVALID_ENUM;
return;
}
if (border) {
if (!c->error)
c->error = GL_INVALID_VALUE;
return;
}
// data can't be null for user_owned data
if (!data) {
if (!c->error)
c->error = GL_INVALID_VALUE;
return;
}
//ignore level for now
int cur_tex = c->bound_textures[target-GL_TEXTURE_UNBOUND-1];
c->textures.a[cur_tex].w = width;
if (type != GL_UNSIGNED_BYTE) {
return;
}
int components;
if (format == GL_RED) components = 1;
else if (format == GL_RG) components = 2;
else if (format == GL_RGB || format == GL_BGR) components = 3;
else if (format == GL_RGBA || format == GL_BGRA) components = 4;
else {
if (!c->error)
c->error = GL_INVALID_ENUM;
return;
}
// TODO see pglBufferData
if (!c->textures.a[cur_tex].user_owned)
free(c->textures.a[cur_tex].data);
//TODO support other internal formats? components should be of internalformat not format
c->textures.a[cur_tex].data = (u8*)data;
c->textures.a[cur_tex].user_owned = GL_TRUE;
//TODO
//assume for now always RGBA coming in and that's what I'm storing it as
}
void pglTexImage2D(GLenum target, GLint level, GLint internalFormat, GLsizei width, GLsizei height, GLint border, GLenum format, GLenum type, const GLvoid* data)
{
//GL_TEXTURE_1D, GL_TEXTURE_2D, GL_TEXTURE_3D, GL_TEXTURE_1D_ARRAY, GL_TEXTURE_2D_ARRAY, GL_TEXTURE_RECTANGLE, or GL_TEXTURE_CUBE_MAP.
//will add others as they're implemented
if (target != GL_TEXTURE_2D &&
target != GL_TEXTURE_RECTANGLE &&
target != GL_TEXTURE_CUBE_MAP_POSITIVE_X &&
target != GL_TEXTURE_CUBE_MAP_NEGATIVE_X &&
target != GL_TEXTURE_CUBE_MAP_POSITIVE_Y &&
target != GL_TEXTURE_CUBE_MAP_NEGATIVE_Y &&
target != GL_TEXTURE_CUBE_MAP_POSITIVE_Z &&
target != GL_TEXTURE_CUBE_MAP_NEGATIVE_Z) {
if (!c->error)
c->error = GL_INVALID_ENUM;
return;
}
if (border) {
if (!c->error)
c->error = GL_INVALID_VALUE;
return;
}
// data can't be null for user_owned data
if (!data) {
if (!c->error)
c->error = GL_INVALID_VALUE;
return;
}
//ignore level for now
//TODO support other types?
if (type != GL_UNSIGNED_BYTE) {
if (!c->error)
c->error = GL_INVALID_ENUM;
return;
}
// TODO I don't actually support anything other than GL_RGBA for input or
// internal format ... so I should probably make the others errors and
// I'm not even checking internalFormat currently..
int components;
if (format == GL_RED) components = 1;
else if (format == GL_RG) components = 2;
else if (format == GL_RGB || format == GL_BGR) components = 3;
else if (format == GL_RGBA || format == GL_BGRA) components = 4;
else {
if (!c->error)
c->error = GL_INVALID_ENUM;
return;
}
int cur_tex;
if (target == GL_TEXTURE_2D || target == GL_TEXTURE_RECTANGLE) {
cur_tex = c->bound_textures[target-GL_TEXTURE_UNBOUND-1];
c->textures.a[cur_tex].w = width;
c->textures.a[cur_tex].h = height;
// TODO see pglBufferData
if (!c->textures.a[cur_tex].user_owned)
free(c->textures.a[cur_tex].data);
//TODO support other internal formats? components should be of internalformat not format
// If you're using these pgl mapped functions, it assumes you are respecting
// your own current unpack alignment settings already
c->textures.a[cur_tex].data = (u8*)data;
c->textures.a[cur_tex].user_owned = GL_TRUE;
} else { //CUBE_MAP
/*
* TODO, doesn't make sense to call this six times when mapping, you'd set
* them all up beforehand and set the pointer once...so change this or
* make a pglCubeMapData() function?
*
cur_tex = c->bound_textures[GL_TEXTURE_CUBE_MAP-GL_TEXTURE_UNBOUND-1];
// TODO see pglBufferData
if (!c->textures.a[cur_tex].user_owned)
free(c->textures.a[cur_tex].data);
if (width != height) {
//TODO spec says INVALID_VALUE, man pages say INVALID_ENUM ?
if (!c->error)
c->error = GL_INVALID_VALUE;
return;
}
int mem_size = width*height*6 * components;
if (c->textures.a[cur_tex].w == 0) {
c->textures.a[cur_tex].w = width;
c->textures.a[cur_tex].h = width; //same cause square
} else if (c->textures.a[cur_tex].w != width) {
//TODO spec doesn't say all sides must have same dimensions but it makes sense
//and this site suggests it http://www.opengl.org/wiki/Cubemap_Texture
if (!c->error)
c->error = GL_INVALID_VALUE;
return;
}
target -= GL_TEXTURE_CUBE_MAP_POSITIVE_X; //use target as plane index
c->textures.a[cur_tex].data = (u8*)data;
c->textures.a[cur_tex].user_owned = GL_TRUE;
*/
} //end CUBE_MAP
}
void pglTexImage3D(GLenum target, GLint level, GLint internalFormat, GLsizei width, GLsizei height, GLsizei depth, GLint border, GLenum format, GLenum type, const GLvoid* data)
{
if (target != GL_TEXTURE_3D && target != GL_TEXTURE_2D_ARRAY) {
if (!c->error)
c->error = GL_INVALID_ENUM;
return;
}
if (border) {
if (!c->error)
c->error = GL_INVALID_VALUE;
return;
}
// data can't be null for user_owned data
if (!data) {
if (!c->error)
c->error = GL_INVALID_VALUE;
return;
}
//ignore level for now
int cur_tex = c->bound_textures[target-GL_TEXTURE_UNBOUND-1];
c->textures.a[cur_tex].w = width;
c->textures.a[cur_tex].h = height;
c->textures.a[cur_tex].d = depth;
if (type != GL_UNSIGNED_BYTE) {
// TODO
return;
}
// TODO add error? only support GL_RGBA for now
int components;
if (format == GL_RED) components = 1;
else if (format == GL_RG) components = 2;
else if (format == GL_RGB || format == GL_BGR) components = 3;
else if (format == GL_RGBA || format == GL_BGRA) components = 4;
else {
if (!c->error)
c->error = GL_INVALID_ENUM;
return;
}
// TODO see pglBufferData
if (!c->textures.a[cur_tex].user_owned)
free(c->textures.a[cur_tex].data);
//TODO support other internal formats? components should be of internalformat not format
c->textures.a[cur_tex].data = (u8*)data;
c->textures.a[cur_tex].user_owned = GL_TRUE;
//TODO
//assume for now always RGBA coming in and that's what I'm storing it as
}
void pglGetBufferData(GLuint buffer, GLvoid** data)
{
// why'd you even call it?
if (!data) {
if (!c->error) {
c->error = GL_INVALID_VALUE;
}
return;
}
if (buffer && buffer < c->buffers.size && !c->buffers.a[buffer].deleted) {
*data = c->buffers.a[buffer].data;
} else if (!c->error) {
c->error = GL_INVALID_OPERATION; // matching error code of binding invalid buffer
}
}
void pglGetTextureData(GLuint texture, GLvoid** data)
{
// why'd you even call it?
if (!data) {
if (!c->error) {
c->error = GL_INVALID_VALUE;
}
return;
}
if (texture < c->textures.size && !c->textures.a[texture].deleted) {
*data = c->textures.a[texture].data;
} else if (!c->error) {
c->error = GL_INVALID_OPERATION; // matching error code of binding invalid buffer
}
}
void put_pixel(Color color, int x, int y)
{
u32* dest = &((u32*)c->back_buffer.lastrow)[-y*c->back_buffer.w + x];
*dest = color.a << c->Ashift | color.r << c->Rshift | color.g << c->Gshift | color.b << c->Bshift;
}
//Should I have it take a glFramebuffer as paramater?
void put_line(Color the_color, float x1, float y1, float x2, float y2)
{
float tmp;
//always draw from left to right
if (x2 < x1) {
tmp = x1;
x1 = x2;
x2 = tmp;
tmp = y1;
y1 = y2;
y2 = tmp;
}
//calculate slope and implicit line parameters once
float m = (y2-y1)/(x2-x1);
float A = y1 - y2;
float B = x2 - x1;
float C = x1*y2 -x2*y1;
int x, y;
float x_min = MAX(0, MIN(x1, x2));
float x_max = MIN(c->back_buffer.w-1, MAX(x1, x2));
float y_min = MAX(0, MIN(y1, y2));
float y_max = MIN(c->back_buffer.h-1, MAX(y1, y2));
//4 cases based on slope
if (m <= -1) { //(-infinite, -1]
x = x1;
for (y=y_max; y>=y_min; --y) {
put_pixel(the_color, x, y);
if (A*(x+0.5f) + B*(y-1) + C < 0)
x++;
}
} else if (m <= 0) { //(-1, 0]
y = y1;
for (x=x_min; x<=x_max; ++x) {
put_pixel(the_color, x, y);
if (A*(x+1) + B*(y-0.5f) + C > 0)
y--;
}
} else if (m <= 1) { //(0, 1]
y = y1;
for (x=x_min; x<=x_max; ++x) {
put_pixel(the_color, x, y);
if (A*(x+1) + B*(y+0.5f) + C < 0)
y++;
}
} else { //(1, +infinite)
x = x1;
for (y=y_min; y<=y_max; ++y) {
put_pixel(the_color, x, y);
if (A*(x+0.5f) + B*(y+1) + C > 0)
x++;
}
}
}
void put_triangle(Color c1, Color c2, Color c3, vec2 p1, vec2 p2, vec2 p3)
{
//can't think of a better/cleaner way to do this than these 8 lines
float x_min = MIN(floor(p1.x), floor(p2.x));
float x_max = MAX(ceil(p1.x), ceil(p2.x));
float y_min = MIN(floor(p1.y), floor(p2.y));
float y_max = MAX(ceil(p1.y), ceil(p2.y));
x_min = MIN(floor(p3.x), x_min);
x_max = MAX(ceil(p3.x), x_max);
y_min = MIN(floor(p3.y), y_min);
y_max = MAX(ceil(p3.y), y_max);
x_min = MAX(0, x_min);
x_max = MIN(c->back_buffer.w-1, x_max);
y_min = MAX(0, y_min);
y_max = MIN(c->back_buffer.h-1, y_max);
//form implicit lines
Line l12 = make_Line(p1.x, p1.y, p2.x, p2.y);
Line l23 = make_Line(p2.x, p2.y, p3.x, p3.y);
Line l31 = make_Line(p3.x, p3.y, p1.x, p1.y);
float alpha, beta, gamma;
Color c;
float x, y;
//y += 0.5f; //center of pixel
// TODO(rswinkle): floor( + 0.5f) like draw_triangle?
for (y=y_min; y<=y_max; ++y) {
for (x=x_min; x<=x_max; ++x) {
gamma = line_func(&l12, x, y)/line_func(&l12, p3.x, p3.y);
beta = line_func(&l31, x, y)/line_func(&l31, p2.x, p2.y);
alpha = 1 - beta - gamma;
if (alpha >= 0 && beta >= 0 && gamma >= 0)
//if it's on the edge (==0), draw if the opposite vertex is on the same side as arbitrary point -1, -1
//this is a deterministic way of choosing which triangle gets a pixel for trinagles that share
//edges
if ((alpha > 0 || line_func(&l23, p1.x, p1.y) * line_func(&l23, -1, -1) > 0) &&
(beta > 0 || line_func(&l31, p2.x, p2.y) * line_func(&l31, -1, -1) > 0) &&
(gamma > 0 || line_func(&l12, p3.x, p3.y) * line_func(&l12, -1, -1) > 0)) {
//calculate interoplation here
c.r = alpha*c1.r + beta*c2.r + gamma*c3.r;
c.g = alpha*c1.g + beta*c2.g + gamma*c3.g;
c.b = alpha*c1.b + beta*c2.b + gamma*c3.b;
put_pixel(c, x, y);
}
}
}
}
|
common.h | #pragma once
#include <iostream>
#include <chrono>
#include <iomanip>
#include <string>
#include <ctime>
#include <vector>
#include <math.h>
#include <random>
#include <omp.h>
#include <mpi.h>
#include <stdexcept> // exceptions
#include <algorithm>
#include <iterator>
#include <unordered_map>
#define _USE_MATH_DEFINES
#include<cmath>
#include <comparison.h>
// make argument into integer
int getArg(char *argv[],int idx){
std::size_t pos;
std::string arg = argv[idx];
int argi = std::stoi(arg,&pos);
return argi;
}
// make argument into double
double getArgD(char *argv[],int idx){
/* std::size_t pos; */
std::string arg = argv[idx];
double argd = std::stod(arg);
return argd;
}
/* double payoff_call(double St,double E){ */
/* if(St-E < 0) return 0; */
/* else return St-E; */
/* } */
/* double payoff_put(double St,double E){ */
/* if(E-St < 0) return 0; */
/* else return E-St; */
/* } */
/* double payoff(double St,double E,std::string payoff_fun){ */
/* if(payoff_fun == "call") return payoff_call(St,E); */
/* if(payoff_fun == "put") return payoff_put(St,E); */
/* if(payoff_fun != "call" && payoff_fun != "put") throw std::invalid_argument("Unknown payoff function"); */
/* } */
double payoff(double St,double E,int payoff_fun){
// 1 = call option payoff
// -1 = put option payoff
return std::max(payoff_fun*(St-E),0.0);
}
// for binom embar
double comb(int N,int i){
if (i==0 || i==N) return 0;
if (i==1 || i==(N-1)) return log(N);
double result=0;
for(int n=N;n>i;--n)
result += log((double)n);
for(int j=2;j<=(N-i);++j)
result -= log((double)j);
return result;
}
// print vector
void vecprinter(std::vector<double> vec)
{
for(int i = 0;i<vec.size();++i){
std::cout<<vec[i]<< " " ;
};
std::cout<<std::endl;
}
// print matrix
void matprinter(std::vector<std::vector<double>> mat)
{
for(int i = 0;i<mat.size();++i){
for(int j = 0;j<mat[i].size();++j){
std::cout<<mat[i][j] << " " ;
};
std::cout<<std::endl;
};
}
// for mc amer
// in my case it is always 3x3 or 2x2 matrix
std::vector<std::vector<double>> inverse
(
std::vector<std::vector<double>> x
,int size=3
)
{
std::vector<std::vector<double>> inversed(size);
for(int i=0;i<size;++i){
inversed[i].resize(size);
};
double determinant=0;
//finding determinant of the matrix
for(int i=0; i<size;++i){
determinant += (x[0][i] * (x[1][(i+1)%3] * x[2][(i+2)%3] - x[1][(i+2)%3] * x[2][(i+1)%3]));
};
//Condition to check if the derterminat is zero or not if zero than inverse dont exists
if(determinant<=0){
throw std::invalid_argument("Detereminant is not > 0");
};
for(int i=0;i<size;++i){
for(int j=0;j<size;++j){
inversed[j][i] = ((x[(j+1)%3][(i+1)%3] * x[(j+2)%3][(i+2)%3]) - (x[(j+1)%3][(i+2)%3] * x[(j+2)%3][(i+1)%3]))/determinant;
};
};
return inversed;
}
// for mc amer
// matrix/vector multiplicationi function for current solution.
std::vector<double> mat_vec_mul
(
std::vector<std::vector<double>> x
,std::vector<double> y
)
{
std::vector<double> mat(x.size());
for(int i=0;i<x.size();++i){
for(int j=0;j<y.size();++j){
mat[i]+=x[i][j]*y[j];
};
};
return mat;
}
// for mc_amer
// user defined matrix transpose function
std::vector<std::vector<double>> transpose
(
std::vector<std::vector<double>> y
)
{
std::vector<std::vector<double>> transposed(y[0].size());
for(int i=0;i<y[0].size();++i){
transposed[i].resize(y.size());
};
#pragma omp parallel
{
#pragma omp for schedule(dynamic,1000) nowait
for(int j=0;j<y[0].size();++j){
for(int i=0;i<y.size();++i){
transposed[j][i] = y[i][j];
};
};
}
return transposed;
}
// for mc amer
// user defined matrix transpose function
std::vector<std::vector<double>> pathsfinder
(
double S0
,double E
,double r
,double sigma
,double T
,int N
,int M
,int parallel=0
)
{
if (N%2!=0) throw std::invalid_argument("N needs to be divisible by 2 for finding paths");
double dt = T/M;
// matrix to store paths
std::vector<std::vector<double>> paths(M+1);
for(int i=0;i<M+1;++i){
paths[i].resize(N);
};
// prepare generator.
time_t cur_time;
std::random_device rd{};
std::mt19937 gen{rd()};
std::normal_distribution<> norm{0,sqrt(dt)};
gen.seed(time(&cur_time)+100*parallel);
// generate paths
for(int n=0;n<N/2;++n){
// init new path
paths[0][n] = S0;
paths[0][n+N/2] = S0;
// fill path
for(int m=1;m<M+1;++m){
double w = norm(gen);
paths[m][n] = paths[m-1][n]*exp((r-0.5*sigma*sigma)*dt+sigma*w);
paths[m][n+N/2] = paths[m-1][n+N/2]*exp((r-0.5*sigma*sigma)*dt-sigma*w);
};
};
return paths;
}
// output calculation results
void reporting
(
std::string method
,std::string payoff_fun
,double S0
,double E
,double r
,double sigma
,double T
,double time_overall
,double time
,double result
,double comparison
,int N
,int parallel=0
,int M=0
,int assets=1
)
{
std::cout << std::setprecision(10) \
<< method << "," \
<< payoff_fun << "," \
<< S0 << "," \
<< E << "," \
<< r << "," \
<< sigma << "," \
<< T << "," \
<< N << "," \
<< M << "," \
<< parallel << ","\
<< assets << ","\
<< time_overall << "," \
<< time << "," \
<< result << "," \
<< abs(result-comparison) << "," \
<< result-comparison/* << ","*/ \
<< std::endl;
}
|
feature.c | /*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% FFFFF EEEEE AAA TTTTT U U RRRR EEEEE %
% F E A A T U U R R E %
% FFF EEE AAAAA T U U RRRR EEE %
% F E A A T U U R R E %
% F EEEEE A A T UUU R R EEEEE %
% %
% %
% MagickCore Image Feature Methods %
% %
% Software Design %
% Cristy %
% July 1992 %
% %
% %
% Copyright 1999-2014 ImageMagick Studio LLC, a non-profit organization %
% dedicated to making software imaging solutions freely available. %
% %
% You may not use this file except in compliance with the License. You may %
% obtain a copy of the License at %
% %
% http://www.imagemagick.org/script/license.php %
% %
% Unless required by applicable law or agreed to in writing, software %
% distributed under the License is distributed on an "AS IS" BASIS, %
% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. %
% See the License for the specific language governing permissions and %
% limitations under the License. %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
%
%
*/
/*
Include declarations.
*/
#include "magick/studio.h"
#include "magick/animate.h"
#include "magick/artifact.h"
#include "magick/blob.h"
#include "magick/blob-private.h"
#include "magick/cache.h"
#include "magick/cache-private.h"
#include "magick/cache-view.h"
#include "magick/client.h"
#include "magick/color.h"
#include "magick/color-private.h"
#include "magick/colorspace.h"
#include "magick/colorspace-private.h"
#include "magick/composite.h"
#include "magick/composite-private.h"
#include "magick/compress.h"
#include "magick/constitute.h"
#include "magick/deprecate.h"
#include "magick/display.h"
#include "magick/draw.h"
#include "magick/enhance.h"
#include "magick/exception.h"
#include "magick/exception-private.h"
#include "magick/feature.h"
#include "magick/gem.h"
#include "magick/geometry.h"
#include "magick/list.h"
#include "magick/image-private.h"
#include "magick/magic.h"
#include "magick/magick.h"
#include "magick/matrix.h"
#include "magick/memory_.h"
#include "magick/module.h"
#include "magick/monitor.h"
#include "magick/monitor-private.h"
#include "magick/morphology-private.h"
#include "magick/option.h"
#include "magick/paint.h"
#include "magick/pixel-private.h"
#include "magick/profile.h"
#include "magick/property.h"
#include "magick/quantize.h"
#include "magick/random_.h"
#include "magick/resource_.h"
#include "magick/segment.h"
#include "magick/semaphore.h"
#include "magick/signature-private.h"
#include "magick/string_.h"
#include "magick/thread-private.h"
#include "magick/timer.h"
#include "magick/token.h"
#include "magick/utility.h"
#include "magick/version.h"
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% C a n n y E d g e I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% CannyEdgeImage() uses a multi-stage algorithm to detect a wide range of
% edges in images.
%
% The format of the CannyEdgeImage method is:
%
% Image *CannyEdgeImage(const Image *image,const double radius,
% const double sigma,const double lower_percent,
% const double upper_percent,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o radius: the radius of the gaussian smoothing filter.
%
% o sigma: the sigma of the gaussian smoothing filter.
%
% o lower_percent: percentage of edge pixels in the lower threshold.
%
% o upper_percent: percentage of edge pixels in the upper threshold.
%
% o exception: return any errors or warnings in this structure.
%
*/
typedef struct _CannyInfo
{
double
magnitude,
intensity;
int
orientation;
ssize_t
x,
y;
} CannyInfo;
static inline MagickBooleanType IsAuthenticPixel(const Image *image,
const ssize_t x,const ssize_t y)
{
if ((x < 0) || (x >= (ssize_t) image->columns))
return(MagickFalse);
if ((y < 0) || (y >= (ssize_t) image->rows))
return(MagickFalse);
return(MagickTrue);
}
static MagickBooleanType TraceEdges(Image *edge_image,CacheView *edge_view,
MatrixInfo *canny_cache,const ssize_t x,const ssize_t y,
const double lower_threshold,ExceptionInfo *exception)
{
CannyInfo
edge,
pixel;
MagickBooleanType
status;
register PixelPacket
*q;
register ssize_t
i;
q=GetCacheViewAuthenticPixels(edge_view,x,y,1,1,exception);
if (q == (PixelPacket *) NULL)
return(MagickFalse);
q->red=QuantumRange;
q->green=QuantumRange;
q->blue=QuantumRange;
status=SyncCacheViewAuthenticPixels(edge_view,exception);
if (status == MagickFalse)
return(MagickFalse);
if (GetMatrixElement(canny_cache,0,0,&edge) == MagickFalse)
return(MagickFalse);
edge.x=x;
edge.y=y;
if (SetMatrixElement(canny_cache,0,0,&edge) == MagickFalse)
return(MagickFalse);
for (i=1; i != 0; )
{
ssize_t
v;
i--;
status=GetMatrixElement(canny_cache,i,0,&edge);
if (status == MagickFalse)
return(MagickFalse);
for (v=(-1); v <= 1; v++)
{
ssize_t
u;
for (u=(-1); u <= 1; u++)
{
if ((u == 0) && (v == 0))
continue;
if (IsAuthenticPixel(edge_image,edge.x+u,edge.y+v) == MagickFalse)
continue;
/*
Not an edge if gradient value is below the lower threshold.
*/
q=GetCacheViewAuthenticPixels(edge_view,edge.x+u,edge.y+v,1,1,
exception);
if (q == (PixelPacket *) NULL)
return(MagickFalse);
status=GetMatrixElement(canny_cache,edge.x+u,edge.y+v,&pixel);
if (status == MagickFalse)
return(MagickFalse);
if ((GetPixelIntensity(edge_image,q) == 0.0) &&
(pixel.intensity >= lower_threshold))
{
q->red=QuantumRange;
q->green=QuantumRange;
q->blue=QuantumRange;
status=SyncCacheViewAuthenticPixels(edge_view,exception);
if (status == MagickFalse)
return(MagickFalse);
edge.x+=u;
edge.y+=v;
status=SetMatrixElement(canny_cache,i,0,&edge);
if (status == MagickFalse)
return(MagickFalse);
i++;
}
}
}
}
return(MagickTrue);
}
MagickExport Image *CannyEdgeImage(const Image *image,const double radius,
const double sigma,const double lower_percent,const double upper_percent,
ExceptionInfo *exception)
{
#define CannyEdgeImageTag "CannyEdge/Image"
CacheView
*edge_view;
CannyInfo
pixel;
char
geometry[MaxTextExtent];
double
lower_threshold,
max,
min,
upper_threshold;
Image
*edge_image;
KernelInfo
*kernel_info;
MagickBooleanType
status;
MagickOffsetType
progress;
MatrixInfo
*canny_cache;
ssize_t
y;
assert(image != (const Image *) NULL);
assert(image->signature == MagickSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickSignature);
/*
Filter out noise.
*/
(void) FormatLocaleString(geometry,MaxTextExtent,
"blur:%.20gx%.20g;blur:%.20gx%.20g+90",radius,sigma,radius,sigma);
kernel_info=AcquireKernelInfo(geometry);
if (kernel_info == (KernelInfo *) NULL)
ThrowImageException(ResourceLimitError,"MemoryAllocationFailed");
edge_image=MorphologyApply(image,DefaultChannels,ConvolveMorphology,1,
kernel_info,UndefinedCompositeOp,0.0,exception);
kernel_info=DestroyKernelInfo(kernel_info);
if (edge_image == (Image *) NULL)
return((Image *) NULL);
if (SetImageColorspace(edge_image,GRAYColorspace) == MagickFalse)
{
edge_image=DestroyImage(edge_image);
return((Image *) NULL);
}
/*
Find the intensity gradient of the image.
*/
canny_cache=AcquireMatrixInfo(edge_image->columns,edge_image->rows,
sizeof(CannyInfo),exception);
if (canny_cache == (MatrixInfo *) NULL)
{
edge_image=DestroyImage(edge_image);
return((Image *) NULL);
}
status=MagickTrue;
edge_view=AcquireVirtualCacheView(edge_image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static,4) shared(status) \
magick_threads(edge_image,edge_image,edge_image->rows,1)
#endif
for (y=0; y < (ssize_t) edge_image->rows; y++)
{
register const PixelPacket
*restrict p;
register ssize_t
x;
if (status == MagickFalse)
continue;
p=GetCacheViewVirtualPixels(edge_view,0,y,edge_image->columns+1,2,
exception);
if (p == (const PixelPacket *) NULL)
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) edge_image->columns; x++)
{
CannyInfo
pixel;
double
dx,
dy;
register const PixelPacket
*restrict kernel_pixels;
ssize_t
v;
static double
Gx[2][2] =
{
{ -1.0, +1.0 },
{ -1.0, +1.0 }
},
Gy[2][2] =
{
{ +1.0, +1.0 },
{ -1.0, -1.0 }
};
(void) ResetMagickMemory(&pixel,0,sizeof(pixel));
dx=0.0;
dy=0.0;
kernel_pixels=p;
for (v=0; v < 2; v++)
{
ssize_t
u;
for (u=0; u < 2; u++)
{
double
intensity;
intensity=GetPixelIntensity(edge_image,kernel_pixels+u);
dx+=0.5*Gx[v][u]*intensity;
dy+=0.5*Gy[v][u]*intensity;
}
kernel_pixels+=edge_image->columns+1;
}
pixel.magnitude=hypot(dx,dy);
pixel.orientation=0;
if (fabs(dx) > MagickEpsilon)
{
double
slope;
slope=dy/dx;
if (slope < 0.0)
{
if (slope < -2.41421356237)
pixel.orientation=0;
else
if (slope < -0.414213562373)
pixel.orientation=1;
else
pixel.orientation=2;
}
else
{
if (slope > 2.41421356237)
pixel.orientation=0;
else
if (slope > 0.414213562373)
pixel.orientation=3;
else
pixel.orientation=2;
}
}
if (SetMatrixElement(canny_cache,x,y,&pixel) == MagickFalse)
continue;
p++;
}
}
edge_view=DestroyCacheView(edge_view);
/*
Non-maxima suppression, remove pixels that are not considered to be part
of an edge.
*/
progress=0;
(void) GetMatrixElement(canny_cache,0,0,&pixel);
max=pixel.intensity;
min=pixel.intensity;
edge_view=AcquireAuthenticCacheView(edge_image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static,4) shared(status) \
magick_threads(edge_image,edge_image,edge_image->rows,1)
#endif
for (y=0; y < (ssize_t) edge_image->rows; y++)
{
register PixelPacket
*restrict q;
register ssize_t
x;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(edge_view,0,y,edge_image->columns,1,
exception);
if (q == (PixelPacket *) NULL)
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) edge_image->columns; x++)
{
CannyInfo
alpha_pixel,
beta_pixel,
pixel;
(void) GetMatrixElement(canny_cache,x,y,&pixel);
switch (pixel.orientation)
{
case 0:
default:
{
/*
0 degrees, north and south.
*/
(void) GetMatrixElement(canny_cache,x,y-1,&alpha_pixel);
(void) GetMatrixElement(canny_cache,x,y+1,&beta_pixel);
break;
}
case 1:
{
/*
45 degrees, northwest and southeast.
*/
(void) GetMatrixElement(canny_cache,x-1,y-1,&alpha_pixel);
(void) GetMatrixElement(canny_cache,x+1,y+1,&beta_pixel);
break;
}
case 2:
{
/*
90 degrees, east and west.
*/
(void) GetMatrixElement(canny_cache,x-1,y,&alpha_pixel);
(void) GetMatrixElement(canny_cache,x+1,y,&beta_pixel);
break;
}
case 3:
{
/*
135 degrees, northeast and southwest.
*/
(void) GetMatrixElement(canny_cache,x+1,y-1,&beta_pixel);
(void) GetMatrixElement(canny_cache,x-1,y+1,&alpha_pixel);
break;
}
}
pixel.intensity=pixel.magnitude;
if ((pixel.magnitude < alpha_pixel.magnitude) ||
(pixel.magnitude < beta_pixel.magnitude))
pixel.intensity=0;
(void) SetMatrixElement(canny_cache,x,y,&pixel);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp critical (MagickCore_CannyEdgeImage)
#endif
{
if (pixel.intensity < min)
min=pixel.intensity;
if (pixel.intensity > max)
max=pixel.intensity;
}
q->red=0;
q->green=0;
q->blue=0;
q++;
}
if (SyncCacheViewAuthenticPixels(edge_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp critical (MagickCore_CannyEdgeImage)
#endif
proceed=SetImageProgress(image,CannyEdgeImageTag,progress++,
image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
edge_view=DestroyCacheView(edge_view);
/*
Estimate hysteresis threshold.
*/
lower_threshold=lower_percent*(max-min)+min;
upper_threshold=upper_percent*(max-min)+min;
/*
Hysteresis threshold.
*/
edge_view=AcquireAuthenticCacheView(edge_image,exception);
for (y=0; y < (ssize_t) edge_image->rows; y++)
{
register ssize_t
x;
if (status == MagickFalse)
continue;
for (x=0; x < (ssize_t) edge_image->columns; x++)
{
CannyInfo
pixel;
register const PixelPacket
*restrict p;
/*
Edge if pixel gradient higher than upper threshold.
*/
p=GetCacheViewVirtualPixels(edge_view,x,y,1,1,exception);
if (p == (const PixelPacket *) NULL)
continue;
status=GetMatrixElement(canny_cache,x,y,&pixel);
if (status == MagickFalse)
continue;
if ((GetPixelIntensity(edge_image,p) == 0.0) &&
(pixel.intensity >= upper_threshold))
status=TraceEdges(edge_image,edge_view,canny_cache,x,y,lower_threshold,
exception);
}
}
edge_view=DestroyCacheView(edge_view);
/*
Free resources.
*/
canny_cache=DestroyMatrixInfo(canny_cache);
return(edge_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% G e t I m a g e C h a n n e l F e a t u r e s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetImageChannelFeatures() returns features for each channel in the image in
% each of four directions (horizontal, vertical, left and right diagonals)
% for the specified distance. The features include the angular second
% moment, contrast, correlation, sum of squares: variance, inverse difference
% moment, sum average, sum varience, sum entropy, entropy, difference variance,% difference entropy, information measures of correlation 1, information
% measures of correlation 2, and maximum correlation coefficient. You can
% access the red channel contrast, for example, like this:
%
% channel_features=GetImageChannelFeatures(image,1,exception);
% contrast=channel_features[RedChannel].contrast[0];
%
% Use MagickRelinquishMemory() to free the features buffer.
%
% The format of the GetImageChannelFeatures method is:
%
% ChannelFeatures *GetImageChannelFeatures(const Image *image,
% const size_t distance,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o distance: the distance.
%
% o exception: return any errors or warnings in this structure.
%
*/
static inline ssize_t MagickAbsoluteValue(const ssize_t x)
{
if (x < 0)
return(-x);
return(x);
}
static inline double MagickLog10(const double x)
{
#define Log10Epsilon (1.0e-11)
if (fabs(x) < Log10Epsilon)
return(log10(Log10Epsilon));
return(log10(fabs(x)));
}
MagickExport ChannelFeatures *GetImageChannelFeatures(const Image *image,
const size_t distance,ExceptionInfo *exception)
{
typedef struct _ChannelStatistics
{
DoublePixelPacket
direction[4]; /* horizontal, vertical, left and right diagonals */
} ChannelStatistics;
CacheView
*image_view;
ChannelFeatures
*channel_features;
ChannelStatistics
**cooccurrence,
correlation,
*density_x,
*density_xy,
*density_y,
entropy_x,
entropy_xy,
entropy_xy1,
entropy_xy2,
entropy_y,
mean,
**Q,
*sum,
sum_squares,
variance;
LongPixelPacket
gray,
*grays;
MagickBooleanType
status;
register ssize_t
i;
size_t
length;
ssize_t
y;
unsigned int
number_grays;
assert(image != (Image *) NULL);
assert(image->signature == MagickSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
if ((image->columns < (distance+1)) || (image->rows < (distance+1)))
return((ChannelFeatures *) NULL);
length=CompositeChannels+1UL;
channel_features=(ChannelFeatures *) AcquireQuantumMemory(length,
sizeof(*channel_features));
if (channel_features == (ChannelFeatures *) NULL)
ThrowFatalException(ResourceLimitFatalError,"MemoryAllocationFailed");
(void) ResetMagickMemory(channel_features,0,length*
sizeof(*channel_features));
/*
Form grays.
*/
grays=(LongPixelPacket *) AcquireQuantumMemory(MaxMap+1UL,sizeof(*grays));
if (grays == (LongPixelPacket *) NULL)
{
channel_features=(ChannelFeatures *) RelinquishMagickMemory(
channel_features);
(void) ThrowMagickException(exception,GetMagickModule(),
ResourceLimitError,"MemoryAllocationFailed","`%s'",image->filename);
return(channel_features);
}
for (i=0; i <= (ssize_t) MaxMap; i++)
{
grays[i].red=(~0U);
grays[i].green=(~0U);
grays[i].blue=(~0U);
grays[i].opacity=(~0U);
grays[i].index=(~0U);
}
status=MagickTrue;
image_view=AcquireVirtualCacheView(image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static,4) shared(status) \
magick_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
register const IndexPacket
*restrict indexes;
register const PixelPacket
*restrict p;
register ssize_t
x;
if (status == MagickFalse)
continue;
p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception);
if (p == (const PixelPacket *) NULL)
{
status=MagickFalse;
continue;
}
indexes=GetCacheViewVirtualIndexQueue(image_view);
for (x=0; x < (ssize_t) image->columns; x++)
{
grays[ScaleQuantumToMap(GetPixelRed(p))].red=
ScaleQuantumToMap(GetPixelRed(p));
grays[ScaleQuantumToMap(GetPixelGreen(p))].green=
ScaleQuantumToMap(GetPixelGreen(p));
grays[ScaleQuantumToMap(GetPixelBlue(p))].blue=
ScaleQuantumToMap(GetPixelBlue(p));
if (image->colorspace == CMYKColorspace)
grays[ScaleQuantumToMap(GetPixelIndex(indexes+x))].index=
ScaleQuantumToMap(GetPixelIndex(indexes+x));
if (image->matte != MagickFalse)
grays[ScaleQuantumToMap(GetPixelOpacity(p))].opacity=
ScaleQuantumToMap(GetPixelOpacity(p));
p++;
}
}
image_view=DestroyCacheView(image_view);
if (status == MagickFalse)
{
grays=(LongPixelPacket *) RelinquishMagickMemory(grays);
channel_features=(ChannelFeatures *) RelinquishMagickMemory(
channel_features);
return(channel_features);
}
(void) ResetMagickMemory(&gray,0,sizeof(gray));
for (i=0; i <= (ssize_t) MaxMap; i++)
{
if (grays[i].red != ~0U)
grays[(ssize_t) gray.red++].red=grays[i].red;
if (grays[i].green != ~0U)
grays[(ssize_t) gray.green++].green=grays[i].green;
if (grays[i].blue != ~0U)
grays[(ssize_t) gray.blue++].blue=grays[i].blue;
if (image->colorspace == CMYKColorspace)
if (grays[i].index != ~0U)
grays[(ssize_t) gray.index++].index=grays[i].index;
if (image->matte != MagickFalse)
if (grays[i].opacity != ~0U)
grays[(ssize_t) gray.opacity++].opacity=grays[i].opacity;
}
/*
Allocate spatial dependence matrix.
*/
number_grays=gray.red;
if (gray.green > number_grays)
number_grays=gray.green;
if (gray.blue > number_grays)
number_grays=gray.blue;
if (image->colorspace == CMYKColorspace)
if (gray.index > number_grays)
number_grays=gray.index;
if (image->matte != MagickFalse)
if (gray.opacity > number_grays)
number_grays=gray.opacity;
cooccurrence=(ChannelStatistics **) AcquireQuantumMemory(number_grays,
sizeof(*cooccurrence));
density_x=(ChannelStatistics *) AcquireQuantumMemory(2*(number_grays+1),
sizeof(*density_x));
density_xy=(ChannelStatistics *) AcquireQuantumMemory(2*(number_grays+1),
sizeof(*density_xy));
density_y=(ChannelStatistics *) AcquireQuantumMemory(2*(number_grays+1),
sizeof(*density_y));
Q=(ChannelStatistics **) AcquireQuantumMemory(number_grays,sizeof(*Q));
sum=(ChannelStatistics *) AcquireQuantumMemory(number_grays,sizeof(*sum));
if ((cooccurrence == (ChannelStatistics **) NULL) ||
(density_x == (ChannelStatistics *) NULL) ||
(density_xy == (ChannelStatistics *) NULL) ||
(density_y == (ChannelStatistics *) NULL) ||
(Q == (ChannelStatistics **) NULL) ||
(sum == (ChannelStatistics *) NULL))
{
if (Q != (ChannelStatistics **) NULL)
{
for (i=0; i < (ssize_t) number_grays; i++)
Q[i]=(ChannelStatistics *) RelinquishMagickMemory(Q[i]);
Q=(ChannelStatistics **) RelinquishMagickMemory(Q);
}
if (sum != (ChannelStatistics *) NULL)
sum=(ChannelStatistics *) RelinquishMagickMemory(sum);
if (density_y != (ChannelStatistics *) NULL)
density_y=(ChannelStatistics *) RelinquishMagickMemory(density_y);
if (density_xy != (ChannelStatistics *) NULL)
density_xy=(ChannelStatistics *) RelinquishMagickMemory(density_xy);
if (density_x != (ChannelStatistics *) NULL)
density_x=(ChannelStatistics *) RelinquishMagickMemory(density_x);
if (cooccurrence != (ChannelStatistics **) NULL)
{
for (i=0; i < (ssize_t) number_grays; i++)
cooccurrence[i]=(ChannelStatistics *)
RelinquishMagickMemory(cooccurrence[i]);
cooccurrence=(ChannelStatistics **) RelinquishMagickMemory(
cooccurrence);
}
grays=(LongPixelPacket *) RelinquishMagickMemory(grays);
channel_features=(ChannelFeatures *) RelinquishMagickMemory(
channel_features);
(void) ThrowMagickException(exception,GetMagickModule(),
ResourceLimitError,"MemoryAllocationFailed","`%s'",image->filename);
return(channel_features);
}
(void) ResetMagickMemory(&correlation,0,sizeof(correlation));
(void) ResetMagickMemory(density_x,0,2*(number_grays+1)*sizeof(*density_x));
(void) ResetMagickMemory(density_xy,0,2*(number_grays+1)*sizeof(*density_xy));
(void) ResetMagickMemory(density_y,0,2*(number_grays+1)*sizeof(*density_y));
(void) ResetMagickMemory(&mean,0,sizeof(mean));
(void) ResetMagickMemory(sum,0,number_grays*sizeof(*sum));
(void) ResetMagickMemory(&sum_squares,0,sizeof(sum_squares));
(void) ResetMagickMemory(density_xy,0,2*number_grays*sizeof(*density_xy));
(void) ResetMagickMemory(&entropy_x,0,sizeof(entropy_x));
(void) ResetMagickMemory(&entropy_xy,0,sizeof(entropy_xy));
(void) ResetMagickMemory(&entropy_xy1,0,sizeof(entropy_xy1));
(void) ResetMagickMemory(&entropy_xy2,0,sizeof(entropy_xy2));
(void) ResetMagickMemory(&entropy_y,0,sizeof(entropy_y));
(void) ResetMagickMemory(&variance,0,sizeof(variance));
for (i=0; i < (ssize_t) number_grays; i++)
{
cooccurrence[i]=(ChannelStatistics *) AcquireQuantumMemory(number_grays,
sizeof(**cooccurrence));
Q[i]=(ChannelStatistics *) AcquireQuantumMemory(number_grays,sizeof(**Q));
if ((cooccurrence[i] == (ChannelStatistics *) NULL) ||
(Q[i] == (ChannelStatistics *) NULL))
break;
(void) ResetMagickMemory(cooccurrence[i],0,number_grays*
sizeof(**cooccurrence));
(void) ResetMagickMemory(Q[i],0,number_grays*sizeof(**Q));
}
if (i < (ssize_t) number_grays)
{
for (i--; i >= 0; i--)
{
if (Q[i] != (ChannelStatistics *) NULL)
Q[i]=(ChannelStatistics *) RelinquishMagickMemory(Q[i]);
if (cooccurrence[i] != (ChannelStatistics *) NULL)
cooccurrence[i]=(ChannelStatistics *)
RelinquishMagickMemory(cooccurrence[i]);
}
Q=(ChannelStatistics **) RelinquishMagickMemory(Q);
cooccurrence=(ChannelStatistics **) RelinquishMagickMemory(cooccurrence);
sum=(ChannelStatistics *) RelinquishMagickMemory(sum);
density_y=(ChannelStatistics *) RelinquishMagickMemory(density_y);
density_xy=(ChannelStatistics *) RelinquishMagickMemory(density_xy);
density_x=(ChannelStatistics *) RelinquishMagickMemory(density_x);
grays=(LongPixelPacket *) RelinquishMagickMemory(grays);
channel_features=(ChannelFeatures *) RelinquishMagickMemory(
channel_features);
(void) ThrowMagickException(exception,GetMagickModule(),
ResourceLimitError,"MemoryAllocationFailed","`%s'",image->filename);
return(channel_features);
}
/*
Initialize spatial dependence matrix.
*/
status=MagickTrue;
image_view=AcquireVirtualCacheView(image,exception);
for (y=0; y < (ssize_t) image->rows; y++)
{
register const IndexPacket
*restrict indexes;
register const PixelPacket
*restrict p;
register ssize_t
x;
ssize_t
i,
offset,
u,
v;
if (status == MagickFalse)
continue;
p=GetCacheViewVirtualPixels(image_view,-(ssize_t) distance,y,image->columns+
2*distance,distance+2,exception);
if (p == (const PixelPacket *) NULL)
{
status=MagickFalse;
continue;
}
indexes=GetCacheViewVirtualIndexQueue(image_view);
p+=distance;
indexes+=distance;
for (x=0; x < (ssize_t) image->columns; x++)
{
for (i=0; i < 4; i++)
{
switch (i)
{
case 0:
default:
{
/*
Horizontal adjacency.
*/
offset=(ssize_t) distance;
break;
}
case 1:
{
/*
Vertical adjacency.
*/
offset=(ssize_t) (image->columns+2*distance);
break;
}
case 2:
{
/*
Right diagonal adjacency.
*/
offset=(ssize_t) ((image->columns+2*distance)-distance);
break;
}
case 3:
{
/*
Left diagonal adjacency.
*/
offset=(ssize_t) ((image->columns+2*distance)+distance);
break;
}
}
u=0;
v=0;
while (grays[u].red != ScaleQuantumToMap(GetPixelRed(p)))
u++;
while (grays[v].red != ScaleQuantumToMap(GetPixelRed(p+offset)))
v++;
cooccurrence[u][v].direction[i].red++;
cooccurrence[v][u].direction[i].red++;
u=0;
v=0;
while (grays[u].green != ScaleQuantumToMap(GetPixelGreen(p)))
u++;
while (grays[v].green != ScaleQuantumToMap(GetPixelGreen(p+offset)))
v++;
cooccurrence[u][v].direction[i].green++;
cooccurrence[v][u].direction[i].green++;
u=0;
v=0;
while (grays[u].blue != ScaleQuantumToMap(GetPixelBlue(p)))
u++;
while (grays[v].blue != ScaleQuantumToMap((p+offset)->blue))
v++;
cooccurrence[u][v].direction[i].blue++;
cooccurrence[v][u].direction[i].blue++;
if (image->colorspace == CMYKColorspace)
{
u=0;
v=0;
while (grays[u].index != ScaleQuantumToMap(GetPixelIndex(indexes+x)))
u++;
while (grays[v].index != ScaleQuantumToMap(GetPixelIndex(indexes+x+offset)))
v++;
cooccurrence[u][v].direction[i].index++;
cooccurrence[v][u].direction[i].index++;
}
if (image->matte != MagickFalse)
{
u=0;
v=0;
while (grays[u].opacity != ScaleQuantumToMap(GetPixelOpacity(p)))
u++;
while (grays[v].opacity != ScaleQuantumToMap((p+offset)->opacity))
v++;
cooccurrence[u][v].direction[i].opacity++;
cooccurrence[v][u].direction[i].opacity++;
}
}
p++;
}
}
grays=(LongPixelPacket *) RelinquishMagickMemory(grays);
image_view=DestroyCacheView(image_view);
if (status == MagickFalse)
{
for (i=0; i < (ssize_t) number_grays; i++)
cooccurrence[i]=(ChannelStatistics *)
RelinquishMagickMemory(cooccurrence[i]);
cooccurrence=(ChannelStatistics **) RelinquishMagickMemory(cooccurrence);
channel_features=(ChannelFeatures *) RelinquishMagickMemory(
channel_features);
(void) ThrowMagickException(exception,GetMagickModule(),
ResourceLimitError,"MemoryAllocationFailed","`%s'",image->filename);
return(channel_features);
}
/*
Normalize spatial dependence matrix.
*/
for (i=0; i < 4; i++)
{
double
normalize;
register ssize_t
y;
switch (i)
{
case 0:
default:
{
/*
Horizontal adjacency.
*/
normalize=2.0*image->rows*(image->columns-distance);
break;
}
case 1:
{
/*
Vertical adjacency.
*/
normalize=2.0*(image->rows-distance)*image->columns;
break;
}
case 2:
{
/*
Right diagonal adjacency.
*/
normalize=2.0*(image->rows-distance)*(image->columns-distance);
break;
}
case 3:
{
/*
Left diagonal adjacency.
*/
normalize=2.0*(image->rows-distance)*(image->columns-distance);
break;
}
}
normalize=PerceptibleReciprocal(normalize);
for (y=0; y < (ssize_t) number_grays; y++)
{
register ssize_t
x;
for (x=0; x < (ssize_t) number_grays; x++)
{
cooccurrence[x][y].direction[i].red*=normalize;
cooccurrence[x][y].direction[i].green*=normalize;
cooccurrence[x][y].direction[i].blue*=normalize;
if (image->colorspace == CMYKColorspace)
cooccurrence[x][y].direction[i].index*=normalize;
if (image->matte != MagickFalse)
cooccurrence[x][y].direction[i].opacity*=normalize;
}
}
}
/*
Compute texture features.
*/
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static,4) shared(status) \
magick_threads(image,image,number_grays,1)
#endif
for (i=0; i < 4; i++)
{
register ssize_t
y;
for (y=0; y < (ssize_t) number_grays; y++)
{
register ssize_t
x;
for (x=0; x < (ssize_t) number_grays; x++)
{
/*
Angular second moment: measure of homogeneity of the image.
*/
channel_features[RedChannel].angular_second_moment[i]+=
cooccurrence[x][y].direction[i].red*
cooccurrence[x][y].direction[i].red;
channel_features[GreenChannel].angular_second_moment[i]+=
cooccurrence[x][y].direction[i].green*
cooccurrence[x][y].direction[i].green;
channel_features[BlueChannel].angular_second_moment[i]+=
cooccurrence[x][y].direction[i].blue*
cooccurrence[x][y].direction[i].blue;
if (image->colorspace == CMYKColorspace)
channel_features[BlackChannel].angular_second_moment[i]+=
cooccurrence[x][y].direction[i].index*
cooccurrence[x][y].direction[i].index;
if (image->matte != MagickFalse)
channel_features[OpacityChannel].angular_second_moment[i]+=
cooccurrence[x][y].direction[i].opacity*
cooccurrence[x][y].direction[i].opacity;
/*
Correlation: measure of linear-dependencies in the image.
*/
sum[y].direction[i].red+=cooccurrence[x][y].direction[i].red;
sum[y].direction[i].green+=cooccurrence[x][y].direction[i].green;
sum[y].direction[i].blue+=cooccurrence[x][y].direction[i].blue;
if (image->colorspace == CMYKColorspace)
sum[y].direction[i].index+=cooccurrence[x][y].direction[i].index;
if (image->matte != MagickFalse)
sum[y].direction[i].opacity+=cooccurrence[x][y].direction[i].opacity;
correlation.direction[i].red+=x*y*cooccurrence[x][y].direction[i].red;
correlation.direction[i].green+=x*y*
cooccurrence[x][y].direction[i].green;
correlation.direction[i].blue+=x*y*
cooccurrence[x][y].direction[i].blue;
if (image->colorspace == CMYKColorspace)
correlation.direction[i].index+=x*y*
cooccurrence[x][y].direction[i].index;
if (image->matte != MagickFalse)
correlation.direction[i].opacity+=x*y*
cooccurrence[x][y].direction[i].opacity;
/*
Inverse Difference Moment.
*/
channel_features[RedChannel].inverse_difference_moment[i]+=
cooccurrence[x][y].direction[i].red/((y-x)*(y-x)+1);
channel_features[GreenChannel].inverse_difference_moment[i]+=
cooccurrence[x][y].direction[i].green/((y-x)*(y-x)+1);
channel_features[BlueChannel].inverse_difference_moment[i]+=
cooccurrence[x][y].direction[i].blue/((y-x)*(y-x)+1);
if (image->colorspace == CMYKColorspace)
channel_features[IndexChannel].inverse_difference_moment[i]+=
cooccurrence[x][y].direction[i].index/((y-x)*(y-x)+1);
if (image->matte != MagickFalse)
channel_features[OpacityChannel].inverse_difference_moment[i]+=
cooccurrence[x][y].direction[i].opacity/((y-x)*(y-x)+1);
/*
Sum average.
*/
density_xy[y+x+2].direction[i].red+=
cooccurrence[x][y].direction[i].red;
density_xy[y+x+2].direction[i].green+=
cooccurrence[x][y].direction[i].green;
density_xy[y+x+2].direction[i].blue+=
cooccurrence[x][y].direction[i].blue;
if (image->colorspace == CMYKColorspace)
density_xy[y+x+2].direction[i].index+=
cooccurrence[x][y].direction[i].index;
if (image->matte != MagickFalse)
density_xy[y+x+2].direction[i].opacity+=
cooccurrence[x][y].direction[i].opacity;
/*
Entropy.
*/
channel_features[RedChannel].entropy[i]-=
cooccurrence[x][y].direction[i].red*
MagickLog10(cooccurrence[x][y].direction[i].red);
channel_features[GreenChannel].entropy[i]-=
cooccurrence[x][y].direction[i].green*
MagickLog10(cooccurrence[x][y].direction[i].green);
channel_features[BlueChannel].entropy[i]-=
cooccurrence[x][y].direction[i].blue*
MagickLog10(cooccurrence[x][y].direction[i].blue);
if (image->colorspace == CMYKColorspace)
channel_features[IndexChannel].entropy[i]-=
cooccurrence[x][y].direction[i].index*
MagickLog10(cooccurrence[x][y].direction[i].index);
if (image->matte != MagickFalse)
channel_features[OpacityChannel].entropy[i]-=
cooccurrence[x][y].direction[i].opacity*
MagickLog10(cooccurrence[x][y].direction[i].opacity);
/*
Information Measures of Correlation.
*/
density_x[x].direction[i].red+=cooccurrence[x][y].direction[i].red;
density_x[x].direction[i].green+=cooccurrence[x][y].direction[i].green;
density_x[x].direction[i].blue+=cooccurrence[x][y].direction[i].blue;
if (image->colorspace == CMYKColorspace)
density_x[x].direction[i].index+=
cooccurrence[x][y].direction[i].index;
if (image->matte != MagickFalse)
density_x[x].direction[i].opacity+=
cooccurrence[x][y].direction[i].opacity;
density_y[y].direction[i].red+=cooccurrence[x][y].direction[i].red;
density_y[y].direction[i].green+=cooccurrence[x][y].direction[i].green;
density_y[y].direction[i].blue+=cooccurrence[x][y].direction[i].blue;
if (image->colorspace == CMYKColorspace)
density_y[y].direction[i].index+=
cooccurrence[x][y].direction[i].index;
if (image->matte != MagickFalse)
density_y[y].direction[i].opacity+=
cooccurrence[x][y].direction[i].opacity;
}
mean.direction[i].red+=y*sum[y].direction[i].red;
sum_squares.direction[i].red+=y*y*sum[y].direction[i].red;
mean.direction[i].green+=y*sum[y].direction[i].green;
sum_squares.direction[i].green+=y*y*sum[y].direction[i].green;
mean.direction[i].blue+=y*sum[y].direction[i].blue;
sum_squares.direction[i].blue+=y*y*sum[y].direction[i].blue;
if (image->colorspace == CMYKColorspace)
{
mean.direction[i].index+=y*sum[y].direction[i].index;
sum_squares.direction[i].index+=y*y*sum[y].direction[i].index;
}
if (image->matte != MagickFalse)
{
mean.direction[i].opacity+=y*sum[y].direction[i].opacity;
sum_squares.direction[i].opacity+=y*y*sum[y].direction[i].opacity;
}
}
/*
Correlation: measure of linear-dependencies in the image.
*/
channel_features[RedChannel].correlation[i]=
(correlation.direction[i].red-mean.direction[i].red*
mean.direction[i].red)/(sqrt(sum_squares.direction[i].red-
(mean.direction[i].red*mean.direction[i].red))*sqrt(
sum_squares.direction[i].red-(mean.direction[i].red*
mean.direction[i].red)));
channel_features[GreenChannel].correlation[i]=
(correlation.direction[i].green-mean.direction[i].green*
mean.direction[i].green)/(sqrt(sum_squares.direction[i].green-
(mean.direction[i].green*mean.direction[i].green))*sqrt(
sum_squares.direction[i].green-(mean.direction[i].green*
mean.direction[i].green)));
channel_features[BlueChannel].correlation[i]=
(correlation.direction[i].blue-mean.direction[i].blue*
mean.direction[i].blue)/(sqrt(sum_squares.direction[i].blue-
(mean.direction[i].blue*mean.direction[i].blue))*sqrt(
sum_squares.direction[i].blue-(mean.direction[i].blue*
mean.direction[i].blue)));
if (image->colorspace == CMYKColorspace)
channel_features[IndexChannel].correlation[i]=
(correlation.direction[i].index-mean.direction[i].index*
mean.direction[i].index)/(sqrt(sum_squares.direction[i].index-
(mean.direction[i].index*mean.direction[i].index))*sqrt(
sum_squares.direction[i].index-(mean.direction[i].index*
mean.direction[i].index)));
if (image->matte != MagickFalse)
channel_features[OpacityChannel].correlation[i]=
(correlation.direction[i].opacity-mean.direction[i].opacity*
mean.direction[i].opacity)/(sqrt(sum_squares.direction[i].opacity-
(mean.direction[i].opacity*mean.direction[i].opacity))*sqrt(
sum_squares.direction[i].opacity-(mean.direction[i].opacity*
mean.direction[i].opacity)));
}
/*
Compute more texture features.
*/
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static,4) shared(status) \
magick_threads(image,image,number_grays,1)
#endif
for (i=0; i < 4; i++)
{
register ssize_t
x;
for (x=2; x < (ssize_t) (2*number_grays); x++)
{
/*
Sum average.
*/
channel_features[RedChannel].sum_average[i]+=
x*density_xy[x].direction[i].red;
channel_features[GreenChannel].sum_average[i]+=
x*density_xy[x].direction[i].green;
channel_features[BlueChannel].sum_average[i]+=
x*density_xy[x].direction[i].blue;
if (image->colorspace == CMYKColorspace)
channel_features[IndexChannel].sum_average[i]+=
x*density_xy[x].direction[i].index;
if (image->matte != MagickFalse)
channel_features[OpacityChannel].sum_average[i]+=
x*density_xy[x].direction[i].opacity;
/*
Sum entropy.
*/
channel_features[RedChannel].sum_entropy[i]-=
density_xy[x].direction[i].red*
MagickLog10(density_xy[x].direction[i].red);
channel_features[GreenChannel].sum_entropy[i]-=
density_xy[x].direction[i].green*
MagickLog10(density_xy[x].direction[i].green);
channel_features[BlueChannel].sum_entropy[i]-=
density_xy[x].direction[i].blue*
MagickLog10(density_xy[x].direction[i].blue);
if (image->colorspace == CMYKColorspace)
channel_features[IndexChannel].sum_entropy[i]-=
density_xy[x].direction[i].index*
MagickLog10(density_xy[x].direction[i].index);
if (image->matte != MagickFalse)
channel_features[OpacityChannel].sum_entropy[i]-=
density_xy[x].direction[i].opacity*
MagickLog10(density_xy[x].direction[i].opacity);
/*
Sum variance.
*/
channel_features[RedChannel].sum_variance[i]+=
(x-channel_features[RedChannel].sum_entropy[i])*
(x-channel_features[RedChannel].sum_entropy[i])*
density_xy[x].direction[i].red;
channel_features[GreenChannel].sum_variance[i]+=
(x-channel_features[GreenChannel].sum_entropy[i])*
(x-channel_features[GreenChannel].sum_entropy[i])*
density_xy[x].direction[i].green;
channel_features[BlueChannel].sum_variance[i]+=
(x-channel_features[BlueChannel].sum_entropy[i])*
(x-channel_features[BlueChannel].sum_entropy[i])*
density_xy[x].direction[i].blue;
if (image->colorspace == CMYKColorspace)
channel_features[IndexChannel].sum_variance[i]+=
(x-channel_features[IndexChannel].sum_entropy[i])*
(x-channel_features[IndexChannel].sum_entropy[i])*
density_xy[x].direction[i].index;
if (image->matte != MagickFalse)
channel_features[OpacityChannel].sum_variance[i]+=
(x-channel_features[OpacityChannel].sum_entropy[i])*
(x-channel_features[OpacityChannel].sum_entropy[i])*
density_xy[x].direction[i].opacity;
}
}
/*
Compute more texture features.
*/
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static,4) shared(status) \
magick_threads(image,image,number_grays,1)
#endif
for (i=0; i < 4; i++)
{
register ssize_t
y;
for (y=0; y < (ssize_t) number_grays; y++)
{
register ssize_t
x;
for (x=0; x < (ssize_t) number_grays; x++)
{
/*
Sum of Squares: Variance
*/
variance.direction[i].red+=(y-mean.direction[i].red+1)*
(y-mean.direction[i].red+1)*cooccurrence[x][y].direction[i].red;
variance.direction[i].green+=(y-mean.direction[i].green+1)*
(y-mean.direction[i].green+1)*cooccurrence[x][y].direction[i].green;
variance.direction[i].blue+=(y-mean.direction[i].blue+1)*
(y-mean.direction[i].blue+1)*cooccurrence[x][y].direction[i].blue;
if (image->colorspace == CMYKColorspace)
variance.direction[i].index+=(y-mean.direction[i].index+1)*
(y-mean.direction[i].index+1)*cooccurrence[x][y].direction[i].index;
if (image->matte != MagickFalse)
variance.direction[i].opacity+=(y-mean.direction[i].opacity+1)*
(y-mean.direction[i].opacity+1)*
cooccurrence[x][y].direction[i].opacity;
/*
Sum average / Difference Variance.
*/
density_xy[MagickAbsoluteValue(y-x)].direction[i].red+=
cooccurrence[x][y].direction[i].red;
density_xy[MagickAbsoluteValue(y-x)].direction[i].green+=
cooccurrence[x][y].direction[i].green;
density_xy[MagickAbsoluteValue(y-x)].direction[i].blue+=
cooccurrence[x][y].direction[i].blue;
if (image->colorspace == CMYKColorspace)
density_xy[MagickAbsoluteValue(y-x)].direction[i].index+=
cooccurrence[x][y].direction[i].index;
if (image->matte != MagickFalse)
density_xy[MagickAbsoluteValue(y-x)].direction[i].opacity+=
cooccurrence[x][y].direction[i].opacity;
/*
Information Measures of Correlation.
*/
entropy_xy.direction[i].red-=cooccurrence[x][y].direction[i].red*
MagickLog10(cooccurrence[x][y].direction[i].red);
entropy_xy.direction[i].green-=cooccurrence[x][y].direction[i].green*
MagickLog10(cooccurrence[x][y].direction[i].green);
entropy_xy.direction[i].blue-=cooccurrence[x][y].direction[i].blue*
MagickLog10(cooccurrence[x][y].direction[i].blue);
if (image->colorspace == CMYKColorspace)
entropy_xy.direction[i].index-=cooccurrence[x][y].direction[i].index*
MagickLog10(cooccurrence[x][y].direction[i].index);
if (image->matte != MagickFalse)
entropy_xy.direction[i].opacity-=
cooccurrence[x][y].direction[i].opacity*MagickLog10(
cooccurrence[x][y].direction[i].opacity);
entropy_xy1.direction[i].red-=(cooccurrence[x][y].direction[i].red*
MagickLog10(density_x[x].direction[i].red*
density_y[y].direction[i].red));
entropy_xy1.direction[i].green-=(cooccurrence[x][y].direction[i].green*
MagickLog10(density_x[x].direction[i].green*
density_y[y].direction[i].green));
entropy_xy1.direction[i].blue-=(cooccurrence[x][y].direction[i].blue*
MagickLog10(density_x[x].direction[i].blue*
density_y[y].direction[i].blue));
if (image->colorspace == CMYKColorspace)
entropy_xy1.direction[i].index-=(
cooccurrence[x][y].direction[i].index*MagickLog10(
density_x[x].direction[i].index*density_y[y].direction[i].index));
if (image->matte != MagickFalse)
entropy_xy1.direction[i].opacity-=(
cooccurrence[x][y].direction[i].opacity*MagickLog10(
density_x[x].direction[i].opacity*
density_y[y].direction[i].opacity));
entropy_xy2.direction[i].red-=(density_x[x].direction[i].red*
density_y[y].direction[i].red*MagickLog10(
density_x[x].direction[i].red*density_y[y].direction[i].red));
entropy_xy2.direction[i].green-=(density_x[x].direction[i].green*
density_y[y].direction[i].green*MagickLog10(
density_x[x].direction[i].green*density_y[y].direction[i].green));
entropy_xy2.direction[i].blue-=(density_x[x].direction[i].blue*
density_y[y].direction[i].blue*MagickLog10(
density_x[x].direction[i].blue*density_y[y].direction[i].blue));
if (image->colorspace == CMYKColorspace)
entropy_xy2.direction[i].index-=(density_x[x].direction[i].index*
density_y[y].direction[i].index*MagickLog10(
density_x[x].direction[i].index*density_y[y].direction[i].index));
if (image->matte != MagickFalse)
entropy_xy2.direction[i].opacity-=(density_x[x].direction[i].opacity*
density_y[y].direction[i].opacity*MagickLog10(
density_x[x].direction[i].opacity*
density_y[y].direction[i].opacity));
}
}
channel_features[RedChannel].variance_sum_of_squares[i]=
variance.direction[i].red;
channel_features[GreenChannel].variance_sum_of_squares[i]=
variance.direction[i].green;
channel_features[BlueChannel].variance_sum_of_squares[i]=
variance.direction[i].blue;
if (image->colorspace == CMYKColorspace)
channel_features[RedChannel].variance_sum_of_squares[i]=
variance.direction[i].index;
if (image->matte != MagickFalse)
channel_features[RedChannel].variance_sum_of_squares[i]=
variance.direction[i].opacity;
}
/*
Compute more texture features.
*/
(void) ResetMagickMemory(&variance,0,sizeof(variance));
(void) ResetMagickMemory(&sum_squares,0,sizeof(sum_squares));
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static,4) shared(status) \
magick_threads(image,image,number_grays,1)
#endif
for (i=0; i < 4; i++)
{
register ssize_t
x;
for (x=0; x < (ssize_t) number_grays; x++)
{
/*
Difference variance.
*/
variance.direction[i].red+=density_xy[x].direction[i].red;
variance.direction[i].green+=density_xy[x].direction[i].green;
variance.direction[i].blue+=density_xy[x].direction[i].blue;
if (image->colorspace == CMYKColorspace)
variance.direction[i].index+=density_xy[x].direction[i].index;
if (image->matte != MagickFalse)
variance.direction[i].opacity+=density_xy[x].direction[i].opacity;
sum_squares.direction[i].red+=density_xy[x].direction[i].red*
density_xy[x].direction[i].red;
sum_squares.direction[i].green+=density_xy[x].direction[i].green*
density_xy[x].direction[i].green;
sum_squares.direction[i].blue+=density_xy[x].direction[i].blue*
density_xy[x].direction[i].blue;
if (image->colorspace == CMYKColorspace)
sum_squares.direction[i].index+=density_xy[x].direction[i].index*
density_xy[x].direction[i].index;
if (image->matte != MagickFalse)
sum_squares.direction[i].opacity+=density_xy[x].direction[i].opacity*
density_xy[x].direction[i].opacity;
/*
Difference entropy.
*/
channel_features[RedChannel].difference_entropy[i]-=
density_xy[x].direction[i].red*
MagickLog10(density_xy[x].direction[i].red);
channel_features[GreenChannel].difference_entropy[i]-=
density_xy[x].direction[i].green*
MagickLog10(density_xy[x].direction[i].green);
channel_features[BlueChannel].difference_entropy[i]-=
density_xy[x].direction[i].blue*
MagickLog10(density_xy[x].direction[i].blue);
if (image->colorspace == CMYKColorspace)
channel_features[IndexChannel].difference_entropy[i]-=
density_xy[x].direction[i].index*
MagickLog10(density_xy[x].direction[i].index);
if (image->matte != MagickFalse)
channel_features[OpacityChannel].difference_entropy[i]-=
density_xy[x].direction[i].opacity*
MagickLog10(density_xy[x].direction[i].opacity);
/*
Information Measures of Correlation.
*/
entropy_x.direction[i].red-=(density_x[x].direction[i].red*
MagickLog10(density_x[x].direction[i].red));
entropy_x.direction[i].green-=(density_x[x].direction[i].green*
MagickLog10(density_x[x].direction[i].green));
entropy_x.direction[i].blue-=(density_x[x].direction[i].blue*
MagickLog10(density_x[x].direction[i].blue));
if (image->colorspace == CMYKColorspace)
entropy_x.direction[i].index-=(density_x[x].direction[i].index*
MagickLog10(density_x[x].direction[i].index));
if (image->matte != MagickFalse)
entropy_x.direction[i].opacity-=(density_x[x].direction[i].opacity*
MagickLog10(density_x[x].direction[i].opacity));
entropy_y.direction[i].red-=(density_y[x].direction[i].red*
MagickLog10(density_y[x].direction[i].red));
entropy_y.direction[i].green-=(density_y[x].direction[i].green*
MagickLog10(density_y[x].direction[i].green));
entropy_y.direction[i].blue-=(density_y[x].direction[i].blue*
MagickLog10(density_y[x].direction[i].blue));
if (image->colorspace == CMYKColorspace)
entropy_y.direction[i].index-=(density_y[x].direction[i].index*
MagickLog10(density_y[x].direction[i].index));
if (image->matte != MagickFalse)
entropy_y.direction[i].opacity-=(density_y[x].direction[i].opacity*
MagickLog10(density_y[x].direction[i].opacity));
}
/*
Difference variance.
*/
channel_features[RedChannel].difference_variance[i]=
(((double) number_grays*number_grays*sum_squares.direction[i].red)-
(variance.direction[i].red*variance.direction[i].red))/
((double) number_grays*number_grays*number_grays*number_grays);
channel_features[GreenChannel].difference_variance[i]=
(((double) number_grays*number_grays*sum_squares.direction[i].green)-
(variance.direction[i].green*variance.direction[i].green))/
((double) number_grays*number_grays*number_grays*number_grays);
channel_features[BlueChannel].difference_variance[i]=
(((double) number_grays*number_grays*sum_squares.direction[i].blue)-
(variance.direction[i].blue*variance.direction[i].blue))/
((double) number_grays*number_grays*number_grays*number_grays);
if (image->matte != MagickFalse)
channel_features[OpacityChannel].difference_variance[i]=
(((double) number_grays*number_grays*sum_squares.direction[i].opacity)-
(variance.direction[i].opacity*variance.direction[i].opacity))/
((double) number_grays*number_grays*number_grays*number_grays);
if (image->colorspace == CMYKColorspace)
channel_features[IndexChannel].difference_variance[i]=
(((double) number_grays*number_grays*sum_squares.direction[i].index)-
(variance.direction[i].index*variance.direction[i].index))/
((double) number_grays*number_grays*number_grays*number_grays);
/*
Information Measures of Correlation.
*/
channel_features[RedChannel].measure_of_correlation_1[i]=
(entropy_xy.direction[i].red-entropy_xy1.direction[i].red)/
(entropy_x.direction[i].red > entropy_y.direction[i].red ?
entropy_x.direction[i].red : entropy_y.direction[i].red);
channel_features[GreenChannel].measure_of_correlation_1[i]=
(entropy_xy.direction[i].green-entropy_xy1.direction[i].green)/
(entropy_x.direction[i].green > entropy_y.direction[i].green ?
entropy_x.direction[i].green : entropy_y.direction[i].green);
channel_features[BlueChannel].measure_of_correlation_1[i]=
(entropy_xy.direction[i].blue-entropy_xy1.direction[i].blue)/
(entropy_x.direction[i].blue > entropy_y.direction[i].blue ?
entropy_x.direction[i].blue : entropy_y.direction[i].blue);
if (image->colorspace == CMYKColorspace)
channel_features[IndexChannel].measure_of_correlation_1[i]=
(entropy_xy.direction[i].index-entropy_xy1.direction[i].index)/
(entropy_x.direction[i].index > entropy_y.direction[i].index ?
entropy_x.direction[i].index : entropy_y.direction[i].index);
if (image->matte != MagickFalse)
channel_features[OpacityChannel].measure_of_correlation_1[i]=
(entropy_xy.direction[i].opacity-entropy_xy1.direction[i].opacity)/
(entropy_x.direction[i].opacity > entropy_y.direction[i].opacity ?
entropy_x.direction[i].opacity : entropy_y.direction[i].opacity);
channel_features[RedChannel].measure_of_correlation_2[i]=
(sqrt(fabs(1.0-exp(-2.0*(entropy_xy2.direction[i].red-
entropy_xy.direction[i].red)))));
channel_features[GreenChannel].measure_of_correlation_2[i]=
(sqrt(fabs(1.0-exp(-2.0*(entropy_xy2.direction[i].green-
entropy_xy.direction[i].green)))));
channel_features[BlueChannel].measure_of_correlation_2[i]=
(sqrt(fabs(1.0-exp(-2.0*(entropy_xy2.direction[i].blue-
entropy_xy.direction[i].blue)))));
if (image->colorspace == CMYKColorspace)
channel_features[IndexChannel].measure_of_correlation_2[i]=
(sqrt(fabs(1.0-exp(-2.0*(entropy_xy2.direction[i].index-
entropy_xy.direction[i].index)))));
if (image->matte != MagickFalse)
channel_features[OpacityChannel].measure_of_correlation_2[i]=
(sqrt(fabs(1.0-exp(-2.0*(entropy_xy2.direction[i].opacity-
entropy_xy.direction[i].opacity)))));
}
/*
Compute more texture features.
*/
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static,4) shared(status) \
magick_threads(image,image,number_grays,1)
#endif
for (i=0; i < 4; i++)
{
register ssize_t
z;
for (z=0; z < (ssize_t) number_grays; z++)
{
register ssize_t
y;
ChannelStatistics
pixel;
(void) ResetMagickMemory(&pixel,0,sizeof(pixel));
for (y=0; y < (ssize_t) number_grays; y++)
{
register ssize_t
x;
for (x=0; x < (ssize_t) number_grays; x++)
{
/*
Contrast: amount of local variations present in an image.
*/
if (((y-x) == z) || ((x-y) == z))
{
pixel.direction[i].red+=cooccurrence[x][y].direction[i].red;
pixel.direction[i].green+=cooccurrence[x][y].direction[i].green;
pixel.direction[i].blue+=cooccurrence[x][y].direction[i].blue;
if (image->colorspace == CMYKColorspace)
pixel.direction[i].index+=cooccurrence[x][y].direction[i].index;
if (image->matte != MagickFalse)
pixel.direction[i].opacity+=
cooccurrence[x][y].direction[i].opacity;
}
/*
Maximum Correlation Coefficient.
*/
Q[z][y].direction[i].red+=cooccurrence[z][x].direction[i].red*
cooccurrence[y][x].direction[i].red/density_x[z].direction[i].red/
density_y[x].direction[i].red;
Q[z][y].direction[i].green+=cooccurrence[z][x].direction[i].green*
cooccurrence[y][x].direction[i].green/
density_x[z].direction[i].green/density_y[x].direction[i].red;
Q[z][y].direction[i].blue+=cooccurrence[z][x].direction[i].blue*
cooccurrence[y][x].direction[i].blue/density_x[z].direction[i].blue/
density_y[x].direction[i].blue;
if (image->colorspace == CMYKColorspace)
Q[z][y].direction[i].index+=cooccurrence[z][x].direction[i].index*
cooccurrence[y][x].direction[i].index/
density_x[z].direction[i].index/density_y[x].direction[i].index;
if (image->matte != MagickFalse)
Q[z][y].direction[i].opacity+=
cooccurrence[z][x].direction[i].opacity*
cooccurrence[y][x].direction[i].opacity/
density_x[z].direction[i].opacity/
density_y[x].direction[i].opacity;
}
}
channel_features[RedChannel].contrast[i]+=z*z*pixel.direction[i].red;
channel_features[GreenChannel].contrast[i]+=z*z*pixel.direction[i].green;
channel_features[BlueChannel].contrast[i]+=z*z*pixel.direction[i].blue;
if (image->colorspace == CMYKColorspace)
channel_features[BlackChannel].contrast[i]+=z*z*
pixel.direction[i].index;
if (image->matte != MagickFalse)
channel_features[OpacityChannel].contrast[i]+=z*z*
pixel.direction[i].opacity;
}
/*
Maximum Correlation Coefficient.
Future: return second largest eigenvalue of Q.
*/
channel_features[RedChannel].maximum_correlation_coefficient[i]=
sqrt((double) -1.0);
channel_features[GreenChannel].maximum_correlation_coefficient[i]=
sqrt((double) -1.0);
channel_features[BlueChannel].maximum_correlation_coefficient[i]=
sqrt((double) -1.0);
if (image->colorspace == CMYKColorspace)
channel_features[IndexChannel].maximum_correlation_coefficient[i]=
sqrt((double) -1.0);
if (image->matte != MagickFalse)
channel_features[OpacityChannel].maximum_correlation_coefficient[i]=
sqrt((double) -1.0);
}
/*
Relinquish resources.
*/
sum=(ChannelStatistics *) RelinquishMagickMemory(sum);
for (i=0; i < (ssize_t) number_grays; i++)
Q[i]=(ChannelStatistics *) RelinquishMagickMemory(Q[i]);
Q=(ChannelStatistics **) RelinquishMagickMemory(Q);
density_y=(ChannelStatistics *) RelinquishMagickMemory(density_y);
density_xy=(ChannelStatistics *) RelinquishMagickMemory(density_xy);
density_x=(ChannelStatistics *) RelinquishMagickMemory(density_x);
for (i=0; i < (ssize_t) number_grays; i++)
cooccurrence[i]=(ChannelStatistics *)
RelinquishMagickMemory(cooccurrence[i]);
cooccurrence=(ChannelStatistics **) RelinquishMagickMemory(cooccurrence);
return(channel_features);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% H o u g h L i n e I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% Use HoughLineImage() in conjunction with any binary edge extracted image (we
% recommand Canny) to identify lines in the image. The algorithm accumulates
% counts for every white pixel for every possible orientation (for angles from
% 0 to 179 in 1 degree increments) and distance from the center of the image to
% the corner (in 1 px increments) and stores the counts in an accumulator matrix
% of angle vs distance. The size of the accumulator is 180x(diagonal/2). Next
% it searches this space for peaks in counts and converts the locations of the
% peaks to slope and intercept in the normal x,y input image space. Use the
% slope/intercepts to find the endpoints clipped to the bounds of the image. The
% lines are then drawn. The counts are a measure of the length of the lines
%
% The format of the HoughLineImage method is:
%
% Image *HoughLineImage(const Image *image,const size_t width,
% const size_t height,const size_t threshold,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o width, height: find line pairs as local maxima in this neighborhood.
%
% o threshold: the line count threshold.
%
% o exception: return any errors or warnings in this structure.
%
*/
static inline double MagickRound(double x)
{
/*
Round the fraction to nearest integer.
*/
if ((x-floor(x)) < (ceil(x)-x))
return(floor(x));
return(ceil(x));
}
MagickExport Image *HoughLineImage(const Image *image,const size_t width,
const size_t height,const size_t threshold,ExceptionInfo *exception)
{
#define HoughLineImageTag "HoughLine/Image"
CacheView
*image_view;
char
message[MaxTextExtent],
path[MaxTextExtent];
const char
*artifact;
double
hough_height;
Image
*lines_image = NULL;
ImageInfo
*image_info;
int
file;
MagickBooleanType
status;
MagickOffsetType
progress;
MatrixInfo
*accumulator;
PointInfo
center;
register ssize_t
y;
size_t
accumulator_height,
accumulator_width,
line_count;
/*
Create the accumulator.
*/
assert(image != (const Image *) NULL);
assert(image->signature == MagickSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickSignature);
accumulator_width=180;
hough_height=((sqrt(2.0)*(double) (image->rows > image->columns ?
image->rows : image->columns))/2.0);
accumulator_height=(size_t) (2.0*hough_height);
accumulator=AcquireMatrixInfo(accumulator_width,accumulator_height,
sizeof(double),exception);
if (accumulator == (MatrixInfo *) NULL)
ThrowImageException(ResourceLimitError,"MemoryAllocationFailed");
if (NullMatrix(accumulator) == MagickFalse)
{
accumulator=DestroyMatrixInfo(accumulator);
ThrowImageException(ResourceLimitError,"MemoryAllocationFailed");
}
/*
Populate the accumulator.
*/
status=MagickTrue;
progress=0;
center.x=(double) image->columns/2.0;
center.y=(double) image->rows/2.0;
image_view=AcquireVirtualCacheView(image,exception);
for (y=0; y < (ssize_t) image->rows; y++)
{
register const PixelPacket
*restrict p;
register ssize_t
x;
if (status == MagickFalse)
continue;
p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception);
if (p == (PixelPacket *) NULL)
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) image->columns; x++)
{
if (GetPixelIntensity(image,p) > (QuantumRange/2))
{
register ssize_t
i;
for (i=0; i < 180; i++)
{
double
count,
radius;
radius=(((double) x-center.x)*cos(DegreesToRadians((double) i)))+
(((double) y-center.y)*sin(DegreesToRadians((double) i)));
(void) GetMatrixElement(accumulator,i,(ssize_t)
MagickRound(radius+hough_height),&count);
count++;
(void) SetMatrixElement(accumulator,i,(ssize_t)
MagickRound(radius+hough_height),&count);
}
}
p++;
}
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp critical (MagickCore_HoughLineImage)
#endif
proceed=SetImageProgress(image,HoughLineImageTag,progress++,
image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
image_view=DestroyCacheView(image_view);
if (status == MagickFalse)
{
accumulator=DestroyMatrixInfo(accumulator);
return((Image *) NULL);
}
/*
Generate line segments from accumulator.
*/
file=AcquireUniqueFileResource(path);
if (file == -1)
{
accumulator=DestroyMatrixInfo(accumulator);
return((Image *) NULL);
}
(void) FormatLocaleString(message,MaxTextExtent,
"# Hough line transform: %.20gx%.20g%+.20g\n",(double) width,
(double) height,(double) threshold);
if ((size_t)write(file,message,strlen(message)) != strlen(message))
status=MagickFalse;
(void) FormatLocaleString(message,MaxTextExtent,"viewbox 0 0 %.20g %.20g\n",
(double) image->columns,(double) image->rows);
if ((size_t)write(file,message,strlen(message)) != strlen(message))
status=MagickFalse;
line_count=image->columns > image->rows ? image->columns/4 : image->rows/4;
if (threshold != 0)
line_count=threshold;
for (y=0; y < (ssize_t) accumulator_height; y++)
{
register ssize_t
x;
for (x=0; x < (ssize_t) accumulator_width; x++)
{
double
count;
(void) GetMatrixElement(accumulator,x,y,&count);
if (count >= (double) line_count)
{
double
maxima;
SegmentInfo
line;
ssize_t
v;
/*
Is point a local maxima?
*/
maxima=count;
for (v=(-((ssize_t) height/2)); v <= (((ssize_t) height/2)); v++)
{
ssize_t
u;
for (u=(-((ssize_t) width/2)); u <= (((ssize_t) width/2)); u++)
{
if ((u != 0) || (v !=0))
{
(void) GetMatrixElement(accumulator,x+u,y+v,&count);
if (count > maxima)
{
maxima=count;
break;
}
}
}
if (u < (ssize_t) (width/2))
break;
}
(void) GetMatrixElement(accumulator,x,y,&count);
if (maxima > count)
continue;
if ((x >= 45) && (x <= 135))
{
/*
y = (r-x cos(t))/sin(t)
*/
line.x1=0.0;
line.y1=((double) (y-(accumulator_height/2.0))-((line.x1-
(image->columns/2.0))*cos(DegreesToRadians((double) x))))/
sin(DegreesToRadians((double) x))+(image->rows/2.0);
line.x2=(double) image->columns;
line.y2=((double) (y-(accumulator_height/2.0))-((line.x2-
(image->columns/2.0))*cos(DegreesToRadians((double) x))))/
sin(DegreesToRadians((double) x))+(image->rows/2.0);
}
else
{
/*
x = (r-y cos(t))/sin(t)
*/
line.y1=0.0;
line.x1=((double) (y-(accumulator_height/2.0))-((line.y1-
(image->rows/2.0))*sin(DegreesToRadians((double) x))))/
cos(DegreesToRadians((double) x))+(image->columns/2.0);
line.y2=(double) image->rows;
line.x2=((double) (y-(accumulator_height/2.0))-((line.y2-
(image->rows/2.0))*sin(DegreesToRadians((double) x))))/
cos(DegreesToRadians((double) x))+(image->columns/2.0);
}
(void) FormatLocaleString(message,MaxTextExtent,
"line %g,%g %g,%g # %g\n",line.x1,line.y1,line.x2,line.y2,maxima);
if ((size_t)write(file,message,strlen(message)) != strlen(message))
status=MagickFalse;
}
}
}
(void) close(file);
/*
Render lines to image canvas.
*/
image_info=AcquireImageInfo();
image_info->background_color=image->background_color;
(void) FormatLocaleString(image_info->filename,MaxTextExtent,"mvg:%s",path);
artifact=GetImageArtifact(image,"background");
if (artifact != (const char *) NULL)
(void) SetImageOption(image_info,"background",artifact);
artifact=GetImageArtifact(image,"fill");
if (artifact != (const char *) NULL)
(void) SetImageOption(image_info,"fill",artifact);
artifact=GetImageArtifact(image,"stroke");
if (artifact != (const char *) NULL)
(void) SetImageOption(image_info,"stroke",artifact);
artifact=GetImageArtifact(image,"strokewidth");
if (artifact != (const char *) NULL)
(void) SetImageOption(image_info,"strokewidth",artifact);
lines_image=ReadImage(image_info,exception);
artifact=GetImageArtifact(image,"hough-lines:accumulator");
if ((lines_image != (Image *) NULL) &&
(IsMagickTrue(artifact) != MagickFalse))
{
Image
*accumulator_image;
accumulator_image=MatrixToImage(accumulator,exception);
if (accumulator_image != (Image *) NULL)
AppendImageToList(&lines_image,accumulator_image);
}
/*
Free resources.
*/
accumulator=DestroyMatrixInfo(accumulator);
image_info=DestroyImageInfo(image_info);
(void) RelinquishUniqueFileResource(path);
return(GetFirstImageInList(lines_image));
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% M e a n S h i f t I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% MeanShiftImage() delineate arbitrarily shaped clusters in the image. For
% each pixel, it visits all the pixels in the neighborhood specified by
% the window centered at the pixel and excludes those that are outside the
% radius=(window-1)/2 surrounding the pixel. From those pixels, it finds those
% that are within the specified color distance from the current mean, and
% computes a new x,y centroid from those coordinates and a new mean. This new
% x,y centroid is used as the center for a new window. This process iterates
% until it converges and the final mean is replaces the (original window
% center) pixel value. It repeats this process for the next pixel, etc.,
% until it processes all pixels in the image. Results are typically better with
% colorspaces other than sRGB. We recommend YIQ, YUV or YCbCr.
%
% The format of the MeanShiftImage method is:
%
% Image *MeanShiftImage(const Image *image,const size_t width,
% const size_t height,const double color_distance,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o width, height: find pixels in this neighborhood.
%
% o color_distance: the color distance.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *MeanShiftImage(const Image *image,const size_t width,
const size_t height,const double color_distance,ExceptionInfo *exception)
{
#define MaxMeanShiftIterations 100
#define MeanShiftImageTag "MeanShift/Image"
CacheView
*image_view,
*mean_view,
*pixel_view;
Image
*mean_image;
MagickBooleanType
status;
MagickOffsetType
progress;
ssize_t
y;
assert(image != (const Image *) NULL);
assert(image->signature == MagickSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickSignature);
mean_image=CloneImage(image,image->columns,image->rows,MagickTrue,exception);
if (mean_image == (Image *) NULL)
return((Image *) NULL);
if (SetImageStorageClass(mean_image,DirectClass) == MagickFalse)
{
InheritException(exception,&mean_image->exception);
mean_image=DestroyImage(mean_image);
return((Image *) NULL);
}
status=MagickTrue;
progress=0;
image_view=AcquireVirtualCacheView(image,exception);
pixel_view=AcquireVirtualCacheView(image,exception);
mean_view=AcquireAuthenticCacheView(mean_image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static,4) shared(status,progress) \
magick_threads(mean_image,mean_image,mean_image->rows,1)
#endif
for (y=0; y < (ssize_t) mean_image->rows; y++)
{
register const IndexPacket
*restrict indexes;
register const PixelPacket
*restrict p;
register PixelPacket
*restrict q;
register ssize_t
x;
if (status == MagickFalse)
continue;
p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception);
q=GetCacheViewAuthenticPixels(mean_view,0,y,mean_image->columns,1,
exception);
if ((p == (const PixelPacket *) NULL) || (q == (PixelPacket *) NULL))
{
status=MagickFalse;
continue;
}
indexes=GetCacheViewVirtualIndexQueue(image_view);
for (x=0; x < (ssize_t) mean_image->columns; x++)
{
MagickPixelPacket
mean_pixel,
previous_pixel;
PointInfo
mean_location,
previous_location;
register ssize_t
i;
GetMagickPixelPacket(image,&mean_pixel);
SetMagickPixelPacket(image,p,indexes+x,&mean_pixel);
mean_location.x=(double) x;
mean_location.y=(double) y;
for (i=0; i < MaxMeanShiftIterations; i++)
{
double
distance,
gamma;
MagickPixelPacket
sum_pixel;
PointInfo
sum_location;
ssize_t
count,
v;
sum_location.x=0.0;
sum_location.y=0.0;
GetMagickPixelPacket(image,&sum_pixel);
previous_location=mean_location;
previous_pixel=mean_pixel;
count=0;
for (v=(-((ssize_t) height/2)); v <= (((ssize_t) height/2)); v++)
{
ssize_t
u;
for (u=(-((ssize_t) width/2)); u <= (((ssize_t) width/2)); u++)
{
if ((v*v+u*u) <= (ssize_t) ((width/2)*(height/2)))
{
PixelPacket
pixel;
status=GetOneCacheViewVirtualPixel(pixel_view,(ssize_t)
MagickRound(mean_location.x+u),(ssize_t) MagickRound(
mean_location.y+v),&pixel,exception);
distance=(mean_pixel.red-pixel.red)*(mean_pixel.red-pixel.red)+
(mean_pixel.green-pixel.green)*(mean_pixel.green-pixel.green)+
(mean_pixel.blue-pixel.blue)*(mean_pixel.blue-pixel.blue);
if (distance <= (color_distance*color_distance))
{
sum_location.x+=mean_location.x+u;
sum_location.y+=mean_location.y+v;
sum_pixel.red+=pixel.red;
sum_pixel.green+=pixel.green;
sum_pixel.blue+=pixel.blue;
sum_pixel.opacity+=pixel.opacity;
count++;
}
}
}
}
gamma=1.0/count;
mean_location.x=gamma*sum_location.x;
mean_location.y=gamma*sum_location.y;
mean_pixel.red=gamma*sum_pixel.red;
mean_pixel.green=gamma*sum_pixel.green;
mean_pixel.blue=gamma*sum_pixel.blue;
mean_pixel.opacity=gamma*sum_pixel.opacity;
distance=(mean_location.x-previous_location.x)*
(mean_location.x-previous_location.x)+
(mean_location.y-previous_location.y)*
(mean_location.y-previous_location.y)+
255.0*QuantumScale*(mean_pixel.red-previous_pixel.red)*
255.0*QuantumScale*(mean_pixel.red-previous_pixel.red)+
255.0*QuantumScale*(mean_pixel.green-previous_pixel.green)*
255.0*QuantumScale*(mean_pixel.green-previous_pixel.green)+
255.0*QuantumScale*(mean_pixel.blue-previous_pixel.blue)*
255.0*QuantumScale*(mean_pixel.blue-previous_pixel.blue);
if (distance <= 3.0)
break;
}
q->red=ClampToQuantum(mean_pixel.red);
q->green=ClampToQuantum(mean_pixel.green);
q->blue=ClampToQuantum(mean_pixel.blue);
q->opacity=ClampToQuantum(mean_pixel.opacity);
p++;
q++;
}
if (SyncCacheViewAuthenticPixels(mean_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp critical (MagickCore_MeanShiftImage)
#endif
proceed=SetImageProgress(image,MeanShiftImageTag,progress++,
image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
mean_view=DestroyCacheView(mean_view);
pixel_view=DestroyCacheView(pixel_view);
image_view=DestroyCacheView(image_view);
return(mean_image);
}
|
7759.c | /* POLYBENCH/GPU-OPENMP
*
* This file is a part of the Polybench/GPU-OpenMP suite
*
* Contact:
* William Killian <killian@udel.edu>
*
* Copyright 2013, The University of Delaware
*/
#include <stdio.h>
#include <unistd.h>
#include <string.h>
#include <math.h>
/* Include polybench common header. */
#include <polybench.h>
/* Include benchmark-specific header. */
/* Default data type is double, default size is 4096x4096. */
#include "convolution-2d.h"
/* Array initialization. */
static
void init_array (int ni, int nj,
DATA_TYPE POLYBENCH_2D(A,NI,NJ,ni,nj))
{
// printf("Initializing Array\n");
int i, j;
for (i = 0; i < ni; i++)
for (j = 0; j < nj; j++)
{
A[i][j] = ((DATA_TYPE) (i + j) / nj);
}
}
/* DCE code. Must scan the entire live-out data.
Can be used also to check the correctness of the output. */
static
void print_array(int ni, int nj,
DATA_TYPE POLYBENCH_2D(B,NI,NJ,ni,nj))
{
int i, j;
for (i = 0; i < ni; i++)
for (j = 0; j < nj; j++) {
fprintf(stderr, DATA_PRINTF_MODIFIER, B[i][j]);
if ((i * NJ + j) % 20 == 0) fprintf(stderr, "\n");
}
fprintf(stderr, "\n");
}
/* Main computational kernel. The whole function will be timed,
including the call and return. */
static
void kernel_conv2d(int ni,
int nj,
DATA_TYPE POLYBENCH_2D(A,NI,NJ,ni,nj),
DATA_TYPE POLYBENCH_2D(B,NI,NJ,ni,nj))
{
int i, j;
#pragma scop
#pragma omp parallel for private(j) collapse(2) schedule(dynamic, 1) num_threads(1)
for (i = 1; i < _PB_NI - 1; ++i)
{
for (j = 1; j < _PB_NJ - 1; ++j)
{
B[i][j] = 0.2 * A[i-1][j-1] + 0.5 * A[i-1][j] + -0.8 * A[i-1][j+1]
+ -0.3 * A[ i ][j-1] + 0.6 * A[ i ][j] + -0.9 * A[ i ][j+1]
+ 0.4 * A[i+1][j-1] + 0.7 * A[i+1][j] + 0.1 * A[i+1][j+1];
}
}
#pragma endscop
// printf("Kernal computation complete !!\n");
}
int main(int argc, char** argv)
{
/* Retrieve problem size. */
int ni = NI;
int nj = NJ;
/* Variable declaration/allocation. */
POLYBENCH_2D_ARRAY_DECL(A, DATA_TYPE, NI, NJ, ni, nj);
POLYBENCH_2D_ARRAY_DECL(B, DATA_TYPE, NI, NJ, ni, nj);
/* Initialize array(s). */
init_array (ni, nj, POLYBENCH_ARRAY(A));
/* Start timer. */
//polybench_start_instruments;
polybench_timer_start();
/* Run kernel. */
kernel_conv2d (ni, nj, POLYBENCH_ARRAY(A), POLYBENCH_ARRAY(B));
/* Stop and print timer. */
polybench_timer_stop();
polybench_timer_print();
//polybench_stop_instruments;
//polybench_print_instruments;
/* Prevent dead-code elimination. All live-out data must be printed
by the function call in argument. */
polybench_prevent_dce(print_array(ni, nj, POLYBENCH_ARRAY(B)));
/* Be clean. */
POLYBENCH_FREE_ARRAY(A);
POLYBENCH_FREE_ARRAY(B);
return 0;
}
|
time_multi_omp.c | #include "multi_bspline.h"
#include "bspline.h"
#include "multi_nubspline.h"
#include "nubspline.h"
#include <stdio.h>
#include <stdlib.h>
#include <math.h>
#include <time.h>
#ifdef _OPENMP
#include <omp.h>
#endif _OPENMP
double drand48();
inline double get_time()
{
#ifdef _OPENMP
return omp_get_wtime();
#else
return (double)clock() / (double)CLOCKS_PER_SEC;
#endif
}
void
time_3d_real_double_omp()
{
// int avail = numa_available();
#ifdef _OPENMP
int nthr = omp_get_max_threads();
#else
int nthr = 1;
#endif
// int nnodes = numa_max_node();
// fprintf (stderr, "Performing test with %d NUMA nodes.\n",
// avail, nnodes);
// if (!nnodes)
// nnodes++;
int nnodes = nthr;
fprintf (stderr, "Using %d threads.\n", nnodes);
int Nx=63; int Ny=61; int Nz = 69;
int num_splines = 256;
Ugrid x_grid, y_grid, z_grid;
x_grid.start = 3.1; x_grid.end = 9.1; x_grid.num = Nx;
y_grid.start = 8.7; y_grid.end = 12.7; y_grid.num = Ny;
z_grid.start = 4.5; z_grid.end = 9.3; z_grid.num = Nz;
BCtype_d xBC, yBC, zBC;
xBC.lCode = xBC.rCode = PERIODIC;
yBC.lCode = yBC.rCode = PERIODIC;
zBC.lCode = zBC.rCode = PERIODIC;
// First, create splines the normal way
UBspline_3d_d* norm_splines[num_splines];
multi_UBspline_3d_d *multi_spline[nnodes];
// First, create multispline
#pragma omp parallel for
for (int node=0; node<nnodes; node++)
{
// nodemask_t mask;
// nodemask_zero(&mask);
// nodemask_set (&mask, node);
// numa_set_membind (&mask);
multi_spline[node] = create_multi_UBspline_3d_d
(x_grid, y_grid, z_grid, xBC, yBC, zBC, num_splines);
}
double data[Nx*Ny*Nz];
// Now, create normal splines and set multispline data
for (int i=0; i<num_splines; i++) {
for (int j=0; j<Nx*Ny*Nz; j++)
data[j] = (drand48()-0.5);
norm_splines[i] = create_UBspline_3d_d
(x_grid, y_grid, z_grid, xBC, yBC, zBC, data);
#pragma omp parallel for
for (int node=0; node<nnodes; node++) {
// nodemask_t mask;
// nodemask_zero(&mask);
// nodemask_set (&mask, node);
// numa_set_membind (&mask);
set_multi_UBspline_3d_d (multi_spline[node], i, data);
}
}
// Now, test random values
double rand_start, rand_end, norm_start[nthr], norm_end[nthr], multi_start[nthr], multi_end[nthr];
int num_vals = 10000;
double multi_vals[nthr][num_splines], norm_vals[nthr][num_splines];
double multi_grads[nthr][3*num_splines], norm_grads[nthr][3*num_splines];
double multi_lapl[nthr][num_splines], norm_lapl[nthr][num_splines];
double multi_hess[nthr][9*num_splines], norm_hess[nthr][9*num_splines];
rand_start = get_time();
for (int i=0; i<num_vals; i++) {
double rx = drand48(); double x = rx*x_grid.start + (1.0-rx)*x_grid.end;
double ry = drand48(); double y = ry*y_grid.start + (1.0-ry)*y_grid.end;
double rz = drand48(); double z = rz*z_grid.start + (1.0-rz)*z_grid.end;
}
rand_end = get_time();
///////////////////////
// Check value routine //
///////////////////////
double rx = drand48(); double x = rx*x_grid.start + (1.0-rx)*x_grid.end;
double ry = drand48(); double y = ry*y_grid.start + (1.0-ry)*y_grid.end;
double rz = drand48(); double z = rz*z_grid.start + (1.0-rz)*z_grid.end;
int thr_per_node = nthr/nnodes;
#pragma omp parallel for
for (int thr=0; thr<nthr; thr++) {
int node = thr/thr_per_node;
multi_start[thr] = get_time();
for (int i=0; i<num_vals; i++) {
double rx = drand48(); double x = rx*x_grid.start + (1.0-rx)*x_grid.end;
double ry = drand48(); double y = ry*y_grid.start + (1.0-ry)*y_grid.end;
double rz = drand48(); double z = rz*z_grid.start + (1.0-rz)*z_grid.end;
eval_multi_UBspline_3d_d (multi_spline[node], x, y, z, multi_vals[thr]);
}
multi_end[thr] = get_time();
}
// #pragma omp parallel for
// for (int thr=0; thr<nthr; thr++) {
// norm_start[thr] = get_time();
// for (int i=0; i<num_vals; i++) {
// double rx = drand48(); double x = rx*x_grid.start + (1.0-rx)*x_grid.end;
// double ry = drand48(); double y = ry*y_grid.start + (1.0-ry)*y_grid.end;
// double rz = drand48(); double z = rz*z_grid.start + (1.0-rz)*z_grid.end;
// for (int j=0; j<num_splines; j++)
// eval_UBspline_3d_d (norm_splines[j], x, y, z, &(norm_vals[thr][j]));
// }
// norm_end[thr] = get_time();
// }
double norm_avg=0.0, multi_avg=0.0;
for (int thr=0; thr<nthr; thr++) {
double norm_time = (double)(norm_end[thr] - norm_start[thr] + rand_start - rand_end);
double multi_time = (double)(multi_end[thr] - multi_start[thr] + rand_start - rand_end);
norm_avg += norm_time;
multi_avg += multi_time;
}
norm_avg /= nthr;
multi_avg /= nthr;
double norm_speed = (double) num_vals*num_splines / norm_avg;
double multi_speed = (double) num_vals*num_splines / multi_avg;
// fprintf (stderr, "Normal value speed = %13.3f evaluations per second.\n",
// norm_speed);
fprintf (stderr, "Multi value speed = %13.3f evaluations per second.\n",
multi_speed);
fprintf (stderr, "Aggregate bandwidth = %1.3f GB/s per socket\n", multi_speed * 64.0*8.0 * 8 * 1.0e-9);
///////////////////////
// Check VGH routine //
///////////////////////
#pragma omp parallel for
for (int thr=0; thr<nthr; thr++) {
int node = thr/thr_per_node;
multi_start[thr] = get_time();
for (int i=0; i<num_vals; i++) {
double rx = drand48(); double x = rx*x_grid.start + (1.0-rx)*x_grid.end;
double ry = drand48(); double y = ry*y_grid.start + (1.0-ry)*y_grid.end;
double rz = drand48(); double z = rz*z_grid.start + (1.0-rz)*z_grid.end;
eval_multi_UBspline_3d_d_vgh
(multi_spline[node], x, y, z, multi_vals[thr],
multi_grads[thr], multi_hess[thr]);
}
multi_end[thr] = get_time();
}
// #pragma omp parallel for
// for (int thr=0; thr<nthr; thr++) {
// norm_start[thr] = get_time();
// for (int i=0; i<num_vals; i++) {
// double rx = drand48(); double x = rx*x_grid.start + (1.0-rx)*x_grid.end;
// double ry = drand48(); double y = ry*y_grid.start + (1.0-ry)*y_grid.end;
// double rz = drand48(); double z = rz*z_grid.start + (1.0-rz)*z_grid.end;
// for (int j=0; j<num_splines; j++)
// eval_UBspline_3d_d_vgh (norm_splines[j], x, y, z, &(norm_vals[thr][j]),
// &(norm_grads[thr][3*j]), &(norm_hess[thr][9*j]));
// }
// norm_end[thr] = get_time();
// }
norm_avg = multi_avg = 0.0;
for (int thr=0; thr<nthr; thr++) {
double norm_time = (double)(norm_end[thr] - norm_start[thr] + rand_start - rand_end);
double multi_time = (double)(multi_end[thr] - multi_start[thr] + rand_start - rand_end);
norm_avg += norm_time;
multi_avg += multi_time;
}
norm_avg /= nthr;
multi_avg /= nthr;
norm_speed = (double) num_vals*num_splines / norm_avg;
multi_speed = (double) num_vals*num_splines / multi_avg;
// fprintf (stderr, "Normal VGH speed = %13.3f evaluations per second.\n",
// norm_speed);
fprintf (stderr, "Multi VGH speed = %13.3f evaluations per second.\n",
multi_speed);
fprintf (stderr, "%1.3f GFLOPS per socket\n", multi_speed * 64.0*2.0*10.0 * 8 * 1.0e-9);
// destroy_Bspline (multi_spline);
// for (int i=0; i<num_splines; i++)
// destroy_Bspline(norm_splines[i]);
}
void
time_3d_complex_double_omp()
{
#ifdef _OPENMP
int nthr = omp_get_max_threads();
#else
int nthr = 1;
#endif
int nnodes = nthr;
fprintf (stderr, "Using %d threads.\n", nthr);
int Nx=32; int Ny=32; int Nz = 32;
int num_splines = 256;
Ugrid x_grid, y_grid, z_grid;
x_grid.start = 3.1; x_grid.end = 9.1; x_grid.num = Nx;
y_grid.start = 8.7; y_grid.end = 12.7; y_grid.num = Ny;
z_grid.start = 4.5; z_grid.end = 9.3; z_grid.num = Nz;
BCtype_z xBC, yBC, zBC;
xBC.lCode = xBC.rCode = PERIODIC;
yBC.lCode = yBC.rCode = PERIODIC;
zBC.lCode = zBC.rCode = PERIODIC;
// First, create splines the normal way
UBspline_3d_z* norm_splines[num_splines];
multi_UBspline_3d_z *multi_spline[nthr];
// First, create multispline
#pragma omp parallel for
for (int node=0; node<nthr; node++)
{
// nodemask_t mask;
// nodemask_zero(&mask);
// nodemask_set (&mask, node);
// numa_set_membind (&mask);
multi_spline[node] = create_multi_UBspline_3d_z
(x_grid, y_grid, z_grid, xBC, yBC, zBC, num_splines);
}
double data[Nx*Ny*Nz*2];
// Now, create normal splines and set multispline data
for (int i=0; i<num_splines; i++) {
for (int j=0; j<Nx*Ny*Nz; j++)
data[j] = (drand48()-0.5);
norm_splines[i] = create_UBspline_3d_z
(x_grid, y_grid, z_grid, xBC, yBC, zBC, (complex_double*)data);
#pragma omp parallel for
for (int node=0; node<nthr; node++) {
// nodemask_t mask;
// nodemask_zero(&mask);
// nodemask_set (&mask, node);
// numa_set_membind (&mask);
set_multi_UBspline_3d_z (multi_spline[node], i, data);
}
}
// Now, test random values
double rand_start, rand_end, norm_start[nthr], norm_end[nthr], multi_start[nthr], multi_end[nthr];
int num_vals = 10000;
complex_double multi_vals[nthr][num_splines], norm_vals[nthr][num_splines];
complex_double multi_grads[nthr][3*num_splines], norm_grads[nthr][3*num_splines];
complex_double multi_lapl[nthr][num_splines], norm_lapl[nthr][num_splines];
complex_double multi_hess[nthr][9*num_splines], norm_hess[nthr][9*num_splines];
rand_start = get_time();
for (int i=0; i<num_vals; i++) {
double rx = drand48(); double x = rx*x_grid.start + (1.0-rx)*x_grid.end;
double ry = drand48(); double y = ry*y_grid.start + (1.0-ry)*y_grid.end;
double rz = drand48(); double z = rz*z_grid.start + (1.0-rz)*z_grid.end;
}
rand_end = get_time();
///////////////////////
// Check value routine //
///////////////////////
double rx = drand48(); double x = rx*x_grid.start + (1.0-rx)*x_grid.end;
double ry = drand48(); double y = ry*y_grid.start + (1.0-ry)*y_grid.end;
double rz = drand48(); double z = rz*z_grid.start + (1.0-rz)*z_grid.end;
int thr_per_node = nthr/nthr;
#pragma omp parallel for
for (int thr=0; thr<nthr; thr++) {
int node = thr/thr_per_node;
multi_start[thr] = get_time();
for (int i=0; i<num_vals; i++) {
double rx = drand48(); double x = rx*x_grid.start + (1.0-rx)*x_grid.end;
double ry = drand48(); double y = ry*y_grid.start + (1.0-ry)*y_grid.end;
double rz = drand48(); double z = rz*z_grid.start + (1.0-rz)*z_grid.end;
eval_multi_UBspline_3d_z (multi_spline[node], x, y, z, multi_vals[thr]);
}
multi_end[thr] = get_time();
}
// #pragma omp parallel for
// for (int thr=0; thr<nthr; thr++) {
// norm_start[thr] = get_time();
// for (int i=0; i<num_vals; i++) {
// double rx = drand48(); double x = rx*x_grid.start + (1.0-rx)*x_grid.end;
// double ry = drand48(); double y = ry*y_grid.start + (1.0-ry)*y_grid.end;
// double rz = drand48(); double z = rz*z_grid.start + (1.0-rz)*z_grid.end;
// for (int j=0; j<num_splines; j++)
// eval_UBspline_3d_z (norm_splines[j], x, y, z, &(norm_vals[thr][j]));
// }
// norm_end[thr] = get_time();
// }
double norm_avg=0.0, multi_avg=0.0;
for (int thr=0; thr<nthr; thr++) {
double norm_time = (double)(norm_end[thr] - norm_start[thr] + rand_start - rand_end);
double multi_time = (double)(multi_end[thr] - multi_start[thr] + rand_start - rand_end);
norm_avg += norm_time;
multi_avg += multi_time;
}
norm_avg /= nthr;
multi_avg /= nthr;
double norm_speed = (double) num_vals*num_splines / norm_avg;
double multi_speed = (double) num_vals*num_splines / multi_avg;
// fprintf (stderr, "Normal value speed = %13.3f evaluations per second.\n",
// norm_speed);
fprintf (stderr, "Multi value speed = %13.3f evaluations per second.\n",
multi_speed);
fprintf (stderr, "Aggregate bandwidth = %1.3f GB/s per socket\n", multi_speed * 64.0*16.0 * 8 * 1.0e-9);
fprintf (stderr, "%1.3f GFLOPS per socket\n", multi_speed * 64.0*4.0 * 8 * 1.0e-9);
///////////////////////
// Check VGH routine //
///////////////////////
#pragma omp parallel for
for (int thr=0; thr<nthr; thr++) {
int node = thr/thr_per_node;
multi_start[thr] = get_time();
for (int i=0; i<num_vals; i++) {
double rx = drand48(); double x = rx*x_grid.start + (1.0-rx)*x_grid.end;
double ry = drand48(); double y = ry*y_grid.start + (1.0-ry)*y_grid.end;
double rz = drand48(); double z = rz*z_grid.start + (1.0-rz)*z_grid.end;
eval_multi_UBspline_3d_z_vgh
(multi_spline[node], x, y, z, multi_vals[thr],
multi_grads[thr], multi_hess[thr]);
}
multi_end[thr] = get_time();
}
// #pragma omp parallel for
// for (int thr=0; thr<nthr; thr++) {
// norm_start[thr] = get_time();
// for (int i=0; i<num_vals; i++) {
// double rx = drand48(); double x = rx*x_grid.start + (1.0-rx)*x_grid.end;
// double ry = drand48(); double y = ry*y_grid.start + (1.0-ry)*y_grid.end;
// double rz = drand48(); double z = rz*z_grid.start + (1.0-rz)*z_grid.end;
// for (int j=0; j<num_splines; j++)
// eval_UBspline_3d_z_vgh (norm_splines[j], x, y, z, &(norm_vals[thr][j]),
// &(norm_grads[thr][3*j]), &(norm_hess[thr][9*j]));
// }
// norm_end[thr] = get_time();
// }
norm_avg = multi_avg = 0.0;
for (int thr=0; thr<nthr; thr++) {
double norm_time = (double)(norm_end[thr] - norm_start[thr] + rand_start - rand_end);
double multi_time = (double)(multi_end[thr] - multi_start[thr] + rand_start - rand_end);
norm_avg += norm_time;
multi_avg += multi_time;
}
norm_avg /= nthr;
multi_avg /= nthr;
norm_speed = (double) num_vals*num_splines / norm_avg;
multi_speed = (double) num_vals*num_splines / multi_avg;
// fprintf (stderr, "Normal VGH speed = %13.3f evaluations per second.\n",
// norm_speed);
fprintf (stderr, "Multi VGH speed = %13.3f evaluations per second.\n",
multi_speed);
fprintf (stderr, "%1.3f GFLOPS per socket\n", multi_speed * 64.0*4.0*10.0 * 8 * 1.0e-9);
// destroy_Bspline (multi_spline);
// for (int i=0; i<num_splines; i++)
// destroy_Bspline(norm_splines[i]);
}
main()
{
// fprintf (stderr, "Real:\n");
// time_3d_real_double_omp();
fprintf (stderr, "\nComplex:\n");
time_3d_complex_double_omp();
}
|
test-zrocks.c | #include <omp.h>
#include <stdint.h>
#include <stdlib.h>
#include <libzrocks.h>
#include <xztl.h>
#include "CUnit/Basic.h"
/* Number of Objects */
#define TEST_N_BUFFERS 2
/* Number of random objects to read */
#define TEST_RANDOM_ID 2
/* Object Size */
#define TEST_BUFFER_SZ (1024 * 1024 * 16) /* 16 MB */
static uint8_t *wbuf[TEST_N_BUFFERS];
static uint8_t *rbuf[TEST_N_BUFFERS];
static const char **devname;
static void cunit_zrocks_assert_ptr (char *fn, void *ptr)
{
CU_ASSERT ((uint64_t) ptr != 0);
if (!ptr)
printf ("\n %s: ptr %p\n", fn, ptr);
}
static void cunit_zrocks_assert_int (char *fn, uint64_t status)
{
CU_ASSERT (status == 0);
if (status)
printf ("\n %s: %lx\n", fn, status);
}
static int cunit_zrocks_init (void)
{
return 0;
}
static int cunit_zrocks_exit (void)
{
return 0;
}
static void test_zrocks_init (void)
{
int ret;
ret = zrocks_init (*devname);
cunit_zrocks_assert_int ("zrocks_init", ret);
}
static void test_zrocks_exit (void)
{
zrocks_exit ();
}
static void test_zrocks_fill_buffer (uint32_t id)
{
uint32_t byte;
uint8_t value = 0x1;
for (byte = 0; byte < TEST_BUFFER_SZ; byte += 16) {
value += 0x1;
memset (&wbuf[id][byte], value, 16);
}
}
static int test_zrocks_check_buffer (uint32_t id, uint32_t off, uint32_t size)
{
/*printf (" \nMem check:\n");
for (int i = off; i < off + size; i++) {
if (i % 16 == 0 && i)
printf("\n %d-%d ", i - (i%16), (i - (i%16)) + 16);
printf (" %x/%x", wbuf[id][i], rbuf[id][i]);
}
printf("\n");
*/
return memcmp (wbuf[id], rbuf[id], size);
}
static void test_zrocks_new (void)
{
uint32_t ids;
uint64_t id, phys[TEST_N_BUFFERS];
uint32_t size;
uint8_t level;
int ret[TEST_N_BUFFERS];
ids = TEST_N_BUFFERS;
size = TEST_BUFFER_SZ;
level = 0;
#pragma omp parallel for
for (id = 0; id < ids; id++) {
/* Allocate DMA memory */
wbuf[id] = xztl_media_dma_alloc (size, &phys[id]);
cunit_zrocks_assert_ptr ("xztl_media_dma_alloc", wbuf[id]);
if (!wbuf[id])
continue;
test_zrocks_fill_buffer (id);
ret[id] = zrocks_new (id + 1, wbuf[id], size, level);
cunit_zrocks_assert_int ("zrocks_new", ret[id]);
}
}
static void test_zrocks_read (void)
{
uint32_t ids, offset;
uint64_t id, phys[TEST_N_BUFFERS];
int ret[TEST_N_BUFFERS];
size_t read_sz, size;
ids = TEST_N_BUFFERS;
read_sz = 1024 * 64; /* 64 KB */
size = TEST_BUFFER_SZ;
for (id = 0; id < ids; id++) {
/* Allocate DMA memory */
rbuf[id] = xztl_media_dma_alloc (size, &phys[id]);
cunit_zrocks_assert_ptr ("xztl_media_dma_alloc", rbuf[id]);
if (!rbuf[id])
continue;
memset (rbuf[id], 0x0, size);
offset = 0;
while (offset < size) {
ret[id] = zrocks_read_obj (id + 1, offset, rbuf[id] + offset, read_sz);
cunit_zrocks_assert_int ("zrocks_read_obj", ret[id]);
if (ret[id])
printf ("Read error: ID %lu, offset %d, status: %x\n",
id + 1, offset, ret[id]);
offset += read_sz;
}
ret[id] = test_zrocks_check_buffer (id, 0, TEST_BUFFER_SZ);
cunit_zrocks_assert_int ("zrocks_read_obj:check", ret[id]);
if (ret[id])
printf ("Corruption: ID %lu, corrupted: %d bytes\n", id + 1, ret[id]);
xztl_media_dma_free (rbuf[id]);
}
}
static void test_zrocks_random_read (void)
{
uint64_t id, phys;
uint64_t random_off[4] = {63, 24567, 175678, 267192};
size_t random_sz[4] = {532, 53, 2695, 1561};
//uint64_t random_off[1] = {24567};
//size_t random_sz[1] = {53};
int readi, ret;
uint8_t *buf, *woff;
id = TEST_RANDOM_ID;
buf = xztl_media_dma_alloc (1024 * 512, &phys);
cunit_zrocks_assert_ptr ("xztl_media_dma_alloc", buf);
if (!buf)
return;
for (readi = 0; readi < 4; readi++) {
memset (buf, 0x0, random_sz[readi]);
ret = zrocks_read_obj (id, random_off[readi], buf, random_sz[readi]);
cunit_zrocks_assert_int ("zrocks_read_obj", ret);
woff = &wbuf[id - 1][random_off[readi]];
/* Uncomment for a detailed read check (per-byte print)
printf (" \nMem check:\n");
for (int i = 0; i < random_sz[readi] + 4096; i++) {
if (i % 16 == 0)
printf("\n %lu-%lu ",
(i+random_off[readi]) - ((i+random_off[readi]) % 16) + random_off[readi] % 16,
((i+random_off[readi]) - ((i+random_off[readi]) % 16)) + 16 + random_off[readi] % 16);
printf (" %x/%x", woff[i], buf[i]);
}
printf("\n");
*/
cunit_zrocks_assert_int ("zrocks_read_obj:check",
memcmp (woff, buf, random_sz[readi]));
}
xztl_media_dma_free (buf);
for (int i = 0; i < TEST_N_BUFFERS; i++)
xztl_media_dma_free (wbuf[i]);
}
int main (int argc, const char **argv)
{
int failed;
if (argc < 2) {
printf ("Please provide the device path. e.g. liou:/dev/nvme0n2\n");
return -1;
}
devname = &argv[1];
printf ("Device: %s\n", *devname);
CU_pSuite pSuite = NULL;
if (CUE_SUCCESS != CU_initialize_registry())
return CU_get_error();
pSuite = CU_add_suite("Suite_zrocks", cunit_zrocks_init, cunit_zrocks_exit);
if (pSuite == NULL) {
CU_cleanup_registry();
return CU_get_error();
}
if ((CU_add_test (pSuite, "Initialize ZRocks",
test_zrocks_init) == NULL) ||
(CU_add_test (pSuite, "ZRocks New",
test_zrocks_new) == NULL) ||
(CU_add_test (pSuite, "ZRocks Read",
test_zrocks_read) == NULL) ||
(CU_add_test (pSuite, "ZRocks Random Read",
test_zrocks_random_read) == NULL) ||
(CU_add_test (pSuite, "Close ZRocks",
test_zrocks_exit) == NULL)) {
CU_cleanup_registry();
return CU_get_error();
}
CU_basic_set_mode(CU_BRM_VERBOSE);
CU_basic_run_tests();
failed = CU_get_number_of_tests_failed();
CU_cleanup_registry();
return failed;
}
|
ast-dump-openmp-begin-declare-variant_11.c | // RUN: %clang_cc1 -triple x86_64-unknown-unknown -fopenmp -verify=c_mode -ast-dump %s | FileCheck %s --check-prefix=C
// RUN: %clang_cc1 -triple x86_64-unknown-unknown -fopenmp -verify=cxx_mode -ast-dump %s -x c++| FileCheck %s --check-prefix=CXX
// c_mode-no-diagnostics
#ifdef __cplusplus
#define CONST constexpr
#else
#define CONST __attribute__((const))
#endif
#pragma omp begin declare variant match(implementation = {vendor(llvm)})
CONST int also_after1(void) { // cxx_mode-note {{previous declaration is here}}
return 0;
}
static int also_after2(void) {
return 0;
}
__attribute__((nothrow)) int also_after3(void) {
return 0;
}
static CONST __attribute__((nothrow, always_inline)) __inline__ int also_after4(void) { // cxx_mode-note {{previous declaration is here}}
return 0;
}
#pragma omp end declare variant
int also_after1(void) { // cxx_mode-error {{non-constexpr declaration of 'also_after1' follows constexpr declaration}}
return 1;
}
int also_after2(void) {
return 2;
}
int also_after3(void) {
return 3;
}
int also_after4(void) { // cxx_mode-error {{non-constexpr declaration of 'also_after4' follows constexpr declaration}}
return 4;
}
int main(void) {
// Should return 0.
return also_after1() + also_after2() + also_after3() + also_after4();
}
// Make sure:
// - we see the specialization in the AST
// - we pick the right callees
// C: |-FunctionDecl [[ADDR_0:0x[a-z0-9]*]] <{{.*}}, line:13:27> col:11 implicit used also_after1 'int ({{.*}})'
// C-NEXT: | |-ConstAttr [[ADDR_1:0x[a-z0-9]*]] <line:9:30>
// C-NEXT: | `-OMPDeclareVariantAttr [[ADDR_2:0x[a-z0-9]*]] <<invalid sloc>> Implicit implementation={vendor(llvm)}
// C-NEXT: | `-DeclRefExpr [[ADDR_3:0x[a-z0-9]*]] <col:15> 'int ({{.*}})' Function [[ADDR_4:0x[a-z0-9]*]] 'also_after1[implementation={vendor(llvm)}]' 'int ({{.*}})'
// C-NEXT: |-FunctionDecl [[ADDR_4]] <col:15, line:15:1> line:9:15 also_after1[implementation={vendor(llvm)}] 'int ({{.*}})'
// C-NEXT: | |-CompoundStmt [[ADDR_5:0x[a-z0-9]*]] <line:13:29, line:15:1>
// C-NEXT: | | `-ReturnStmt [[ADDR_6:0x[a-z0-9]*]] <line:14:3, col:10>
// C-NEXT: | | `-IntegerLiteral [[ADDR_7:0x[a-z0-9]*]] <col:10> 'int' 0
// C-NEXT: | `-ConstAttr [[ADDR_8:0x[a-z0-9]*]] <line:9:30>
// C-NEXT: |-FunctionDecl [[ADDR_9:0x[a-z0-9]*]] <line:16:1, col:28> col:12 implicit used also_after2 'int ({{.*}})' static
// C-NEXT: | `-OMPDeclareVariantAttr [[ADDR_10:0x[a-z0-9]*]] <<invalid sloc>> Implicit implementation={vendor(llvm)}
// C-NEXT: | `-DeclRefExpr [[ADDR_11:0x[a-z0-9]*]] <col:1> 'int ({{.*}})' Function [[ADDR_12:0x[a-z0-9]*]] 'also_after2[implementation={vendor(llvm)}]' 'int ({{.*}})'
// C-NEXT: |-FunctionDecl [[ADDR_12]] <col:1, line:18:1> line:16:1 also_after2[implementation={vendor(llvm)}] 'int ({{.*}})' static
// C-NEXT: | `-CompoundStmt [[ADDR_13:0x[a-z0-9]*]] <col:30, line:18:1>
// C-NEXT: | `-ReturnStmt [[ADDR_14:0x[a-z0-9]*]] <line:17:3, col:10>
// C-NEXT: | `-IntegerLiteral [[ADDR_15:0x[a-z0-9]*]] <col:10> 'int' 0
// C-NEXT: |-FunctionDecl [[ADDR_16:0x[a-z0-9]*]] <line:19:1, col:46> col:30 implicit used also_after3 'int ({{.*}})'
// C-NEXT: | |-NoThrowAttr [[ADDR_17:0x[a-z0-9]*]] <col:16>
// C-NEXT: | `-OMPDeclareVariantAttr [[ADDR_18:0x[a-z0-9]*]] <<invalid sloc>> Implicit implementation={vendor(llvm)}
// C-NEXT: | `-DeclRefExpr [[ADDR_19:0x[a-z0-9]*]] <col:1> 'int ({{.*}})' Function [[ADDR_20:0x[a-z0-9]*]] 'also_after3[implementation={vendor(llvm)}]' 'int ({{.*}})'
// C-NEXT: |-FunctionDecl [[ADDR_20]] <col:1, line:21:1> line:19:1 also_after3[implementation={vendor(llvm)}] 'int ({{.*}})'
// C-NEXT: | |-CompoundStmt [[ADDR_21:0x[a-z0-9]*]] <col:48, line:21:1>
// C-NEXT: | | `-ReturnStmt [[ADDR_22:0x[a-z0-9]*]] <line:20:3, col:10>
// C-NEXT: | | `-IntegerLiteral [[ADDR_23:0x[a-z0-9]*]] <col:10> 'int' 0
// C-NEXT: | `-NoThrowAttr [[ADDR_24:0x[a-z0-9]*]] <line:19:16>
// C-NEXT: |-FunctionDecl [[ADDR_25:0x[a-z0-9]*]] <line:22:1, col:85> col:69 implicit used also_after4 'int ({{.*}})' static inline
// C-NEXT: | |-ConstAttr [[ADDR_26:0x[a-z0-9]*]] <line:9:30>
// C-NEXT: | |-NoThrowAttr [[ADDR_27:0x[a-z0-9]*]] <line:22:29>
// C-NEXT: | |-AlwaysInlineAttr [[ADDR_28:0x[a-z0-9]*]] <col:38> always_inline
// C-NEXT: | `-OMPDeclareVariantAttr [[ADDR_29:0x[a-z0-9]*]] <<invalid sloc>> Implicit implementation={vendor(llvm)}
// C-NEXT: | `-DeclRefExpr [[ADDR_30:0x[a-z0-9]*]] <col:1> 'int ({{.*}})' Function [[ADDR_31:0x[a-z0-9]*]] 'also_after4[implementation={vendor(llvm)}]' 'int ({{.*}})'
// C-NEXT: |-FunctionDecl [[ADDR_31]] <col:1, line:24:1> line:22:1 also_after4[implementation={vendor(llvm)}] 'int ({{.*}})' static inline
// C-NEXT: | |-CompoundStmt [[ADDR_32:0x[a-z0-9]*]] <col:87, line:24:1>
// C-NEXT: | | `-ReturnStmt [[ADDR_33:0x[a-z0-9]*]] <line:23:3, col:10>
// C-NEXT: | | `-IntegerLiteral [[ADDR_34:0x[a-z0-9]*]] <col:10> 'int' 0
// C-NEXT: | |-ConstAttr [[ADDR_35:0x[a-z0-9]*]] <line:9:30>
// C-NEXT: | |-NoThrowAttr [[ADDR_36:0x[a-z0-9]*]] <line:22:29>
// C-NEXT: | `-AlwaysInlineAttr [[ADDR_37:0x[a-z0-9]*]] <col:38> always_inline
// C-NEXT: |-FunctionDecl [[ADDR_38:0x[a-z0-9]*]] prev [[ADDR_0]] <line:27:1, line:29:1> line:27:5 used also_after1 'int ({{.*}})'
// C-NEXT: | |-CompoundStmt [[ADDR_39:0x[a-z0-9]*]] <col:23, line:29:1>
// C-NEXT: | | `-ReturnStmt [[ADDR_40:0x[a-z0-9]*]] <line:28:3, col:10>
// C-NEXT: | | `-IntegerLiteral [[ADDR_41:0x[a-z0-9]*]] <col:10> 'int' 1
// C-NEXT: | |-ConstAttr [[ADDR_42:0x[a-z0-9]*]] <line:9:30> Inherited
// C-NEXT: | `-OMPDeclareVariantAttr [[ADDR_43:0x[a-z0-9]*]] <<invalid sloc>> Inherited Implicit implementation={vendor(llvm)}
// C-NEXT: | `-DeclRefExpr [[ADDR_3]] <col:15> 'int ({{.*}})' Function [[ADDR_4]] 'also_after1[implementation={vendor(llvm)}]' 'int ({{.*}})'
// C-NEXT: |-FunctionDecl [[ADDR_44:0x[a-z0-9]*]] prev [[ADDR_9]] <line:30:1, line:32:1> line:30:5 used also_after2 'int ({{.*}})'
// C-NEXT: | |-CompoundStmt [[ADDR_45:0x[a-z0-9]*]] <col:23, line:32:1>
// C-NEXT: | | `-ReturnStmt [[ADDR_46:0x[a-z0-9]*]] <line:31:3, col:10>
// C-NEXT: | | `-IntegerLiteral [[ADDR_47:0x[a-z0-9]*]] <col:10> 'int' 2
// C-NEXT: | `-OMPDeclareVariantAttr [[ADDR_48:0x[a-z0-9]*]] <<invalid sloc>> Inherited Implicit implementation={vendor(llvm)}
// C-NEXT: | `-DeclRefExpr [[ADDR_11]] <line:16:1> 'int ({{.*}})' Function [[ADDR_12]] 'also_after2[implementation={vendor(llvm)}]' 'int ({{.*}})'
// C-NEXT: |-FunctionDecl [[ADDR_49:0x[a-z0-9]*]] prev [[ADDR_16]] <line:33:1, line:35:1> line:33:5 used also_after3 'int ({{.*}})'
// C-NEXT: | |-CompoundStmt [[ADDR_50:0x[a-z0-9]*]] <col:23, line:35:1>
// C-NEXT: | | `-ReturnStmt [[ADDR_51:0x[a-z0-9]*]] <line:34:3, col:10>
// C-NEXT: | | `-IntegerLiteral [[ADDR_52:0x[a-z0-9]*]] <col:10> 'int' 3
// C-NEXT: | |-NoThrowAttr [[ADDR_53:0x[a-z0-9]*]] <line:19:16> Inherited
// C-NEXT: | `-OMPDeclareVariantAttr [[ADDR_54:0x[a-z0-9]*]] <<invalid sloc>> Inherited Implicit implementation={vendor(llvm)}
// C-NEXT: | `-DeclRefExpr [[ADDR_19]] <col:1> 'int ({{.*}})' Function [[ADDR_20]] 'also_after3[implementation={vendor(llvm)}]' 'int ({{.*}})'
// C-NEXT: |-FunctionDecl [[ADDR_55:0x[a-z0-9]*]] prev [[ADDR_25]] <line:36:1, line:38:1> line:36:5 used also_after4 'int ({{.*}})'
// C-NEXT: | |-CompoundStmt [[ADDR_56:0x[a-z0-9]*]] <col:23, line:38:1>
// C-NEXT: | | `-ReturnStmt [[ADDR_57:0x[a-z0-9]*]] <line:37:3, col:10>
// C-NEXT: | | `-IntegerLiteral [[ADDR_58:0x[a-z0-9]*]] <col:10> 'int' 4
// C-NEXT: | |-ConstAttr [[ADDR_59:0x[a-z0-9]*]] <line:9:30> Inherited
// C-NEXT: | |-NoThrowAttr [[ADDR_60:0x[a-z0-9]*]] <line:22:29> Inherited
// C-NEXT: | |-AlwaysInlineAttr [[ADDR_61:0x[a-z0-9]*]] <col:38> Inherited always_inline
// C-NEXT: | `-OMPDeclareVariantAttr [[ADDR_62:0x[a-z0-9]*]] <<invalid sloc>> Inherited Implicit implementation={vendor(llvm)}
// C-NEXT: | `-DeclRefExpr [[ADDR_30]] <col:1> 'int ({{.*}})' Function [[ADDR_31]] 'also_after4[implementation={vendor(llvm)}]' 'int ({{.*}})'
// C-NEXT: `-FunctionDecl [[ADDR_63:0x[a-z0-9]*]] <line:41:1, line:44:1> line:41:5 main 'int ({{.*}})'
// C-NEXT: `-CompoundStmt [[ADDR_64:0x[a-z0-9]*]] <col:16, line:44:1>
// C-NEXT: `-ReturnStmt [[ADDR_65:0x[a-z0-9]*]] <line:43:3, col:70>
// C-NEXT: `-BinaryOperator [[ADDR_66:0x[a-z0-9]*]] <col:10, col:70> 'int' '+'
// C-NEXT: |-BinaryOperator [[ADDR_67:0x[a-z0-9]*]] <col:10, col:54> 'int' '+'
// C-NEXT: | |-BinaryOperator [[ADDR_68:0x[a-z0-9]*]] <col:10, col:38> 'int' '+'
// C-NEXT: | | |-PseudoObjectExpr [[ADDR_69:0x[a-z0-9]*]] <col:10, col:22> 'int'
// C-NEXT: | | | |-CallExpr [[ADDR_70:0x[a-z0-9]*]] <col:10, col:22> 'int'
// C-NEXT: | | | | `-ImplicitCastExpr [[ADDR_71:0x[a-z0-9]*]] <col:10> 'int (*)({{.*}})' <FunctionToPointerDecay>
// C-NEXT: | | | | `-DeclRefExpr [[ADDR_72:0x[a-z0-9]*]] <col:10> 'int ({{.*}})' Function [[ADDR_38]] 'also_after1' 'int ({{.*}})'
// C-NEXT: | | | `-CallExpr [[ADDR_73:0x[a-z0-9]*]] <line:9:15, line:43:22> 'int'
// C-NEXT: | | | `-ImplicitCastExpr [[ADDR_74:0x[a-z0-9]*]] <line:9:15> 'int (*)({{.*}})' <FunctionToPointerDecay>
// C-NEXT: | | | `-DeclRefExpr [[ADDR_3]] <col:15> 'int ({{.*}})' Function [[ADDR_4]] 'also_after1[implementation={vendor(llvm)}]' 'int ({{.*}})'
// C-NEXT: | | `-PseudoObjectExpr [[ADDR_75:0x[a-z0-9]*]] <line:43:26, col:38> 'int'
// C-NEXT: | | |-CallExpr [[ADDR_76:0x[a-z0-9]*]] <col:26, col:38> 'int'
// C-NEXT: | | | `-ImplicitCastExpr [[ADDR_77:0x[a-z0-9]*]] <col:26> 'int (*)({{.*}})' <FunctionToPointerDecay>
// C-NEXT: | | | `-DeclRefExpr [[ADDR_78:0x[a-z0-9]*]] <col:26> 'int ({{.*}})' Function [[ADDR_44]] 'also_after2' 'int ({{.*}})'
// C-NEXT: | | `-CallExpr [[ADDR_79:0x[a-z0-9]*]] <line:16:1, line:43:38> 'int'
// C-NEXT: | | `-ImplicitCastExpr [[ADDR_80:0x[a-z0-9]*]] <line:16:1> 'int (*)({{.*}})' <FunctionToPointerDecay>
// C-NEXT: | | `-DeclRefExpr [[ADDR_11]] <col:1> 'int ({{.*}})' Function [[ADDR_12]] 'also_after2[implementation={vendor(llvm)}]' 'int ({{.*}})'
// C-NEXT: | `-PseudoObjectExpr [[ADDR_81:0x[a-z0-9]*]] <line:43:42, col:54> 'int'
// C-NEXT: | |-CallExpr [[ADDR_82:0x[a-z0-9]*]] <col:42, col:54> 'int'
// C-NEXT: | | `-ImplicitCastExpr [[ADDR_83:0x[a-z0-9]*]] <col:42> 'int (*)({{.*}})' <FunctionToPointerDecay>
// C-NEXT: | | `-DeclRefExpr [[ADDR_84:0x[a-z0-9]*]] <col:42> 'int ({{.*}})' Function [[ADDR_49]] 'also_after3' 'int ({{.*}})'
// C-NEXT: | `-CallExpr [[ADDR_85:0x[a-z0-9]*]] <line:19:1, line:43:54> 'int'
// C-NEXT: | `-ImplicitCastExpr [[ADDR_86:0x[a-z0-9]*]] <line:19:1> 'int (*)({{.*}})' <FunctionToPointerDecay>
// C-NEXT: | `-DeclRefExpr [[ADDR_19]] <col:1> 'int ({{.*}})' Function [[ADDR_20]] 'also_after3[implementation={vendor(llvm)}]' 'int ({{.*}})'
// C-NEXT: `-PseudoObjectExpr [[ADDR_87:0x[a-z0-9]*]] <line:43:58, col:70> 'int'
// C-NEXT: |-CallExpr [[ADDR_88:0x[a-z0-9]*]] <col:58, col:70> 'int'
// C-NEXT: | `-ImplicitCastExpr [[ADDR_89:0x[a-z0-9]*]] <col:58> 'int (*)({{.*}})' <FunctionToPointerDecay>
// C-NEXT: | `-DeclRefExpr [[ADDR_90:0x[a-z0-9]*]] <col:58> 'int ({{.*}})' Function [[ADDR_55]] 'also_after4' 'int ({{.*}})'
// C-NEXT: `-CallExpr [[ADDR_91:0x[a-z0-9]*]] <line:22:1, line:43:70> 'int'
// C-NEXT: `-ImplicitCastExpr [[ADDR_92:0x[a-z0-9]*]] <line:22:1> 'int (*)({{.*}})' <FunctionToPointerDecay>
// C-NEXT: `-DeclRefExpr [[ADDR_30]] <col:1> 'int ({{.*}})' Function [[ADDR_31]] 'also_after4[implementation={vendor(llvm)}]' 'int ({{.*}})'
// CXX: |-FunctionDecl [[ADDR_0:0x[a-z0-9]*]] <{{.*}}, line:13:27> col:11 implicit used constexpr also_after1 'int ({{.*}})'
// CXX-NEXT: | `-OMPDeclareVariantAttr [[ADDR_1:0x[a-z0-9]*]] <<invalid sloc>> Implicit implementation={vendor(llvm)}
// CXX-NEXT: | `-DeclRefExpr [[ADDR_2:0x[a-z0-9]*]] <line:7:15> 'int ({{.*}})' Function [[ADDR_3:0x[a-z0-9]*]] 'also_after1[implementation={vendor(llvm)}]' 'int ({{.*}})'
// CXX-NEXT: |-FunctionDecl [[ADDR_3]] <col:15, line:15:1> line:7:15 constexpr also_after1[implementation={vendor(llvm)}] 'int ({{.*}})'
// CXX-NEXT: | `-CompoundStmt [[ADDR_4:0x[a-z0-9]*]] <line:13:29, line:15:1>
// CXX-NEXT: | `-ReturnStmt [[ADDR_5:0x[a-z0-9]*]] <line:14:3, col:10>
// CXX-NEXT: | `-IntegerLiteral [[ADDR_6:0x[a-z0-9]*]] <col:10> 'int' 0
// CXX-NEXT: |-FunctionDecl [[ADDR_7:0x[a-z0-9]*]] <line:16:1, col:28> col:12 implicit used also_after2 'int ({{.*}})' static
// CXX-NEXT: | `-OMPDeclareVariantAttr [[ADDR_8:0x[a-z0-9]*]] <<invalid sloc>> Implicit implementation={vendor(llvm)}
// CXX-NEXT: | `-DeclRefExpr [[ADDR_9:0x[a-z0-9]*]] <col:1> 'int ({{.*}})' Function [[ADDR_10:0x[a-z0-9]*]] 'also_after2[implementation={vendor(llvm)}]' 'int ({{.*}})'
// CXX-NEXT: |-FunctionDecl [[ADDR_10]] <col:1, line:18:1> line:16:1 also_after2[implementation={vendor(llvm)}] 'int ({{.*}})' static
// CXX-NEXT: | `-CompoundStmt [[ADDR_11:0x[a-z0-9]*]] <col:30, line:18:1>
// CXX-NEXT: | `-ReturnStmt [[ADDR_12:0x[a-z0-9]*]] <line:17:3, col:10>
// CXX-NEXT: | `-IntegerLiteral [[ADDR_13:0x[a-z0-9]*]] <col:10> 'int' 0
// CXX-NEXT: |-FunctionDecl [[ADDR_14:0x[a-z0-9]*]] <line:19:1, col:46> col:30 implicit used also_after3 'int ({{.*}}) __attribute__((nothrow))'
// CXX-NEXT: | `-OMPDeclareVariantAttr [[ADDR_15:0x[a-z0-9]*]] <<invalid sloc>> Implicit implementation={vendor(llvm)}
// CXX-NEXT: | `-DeclRefExpr [[ADDR_16:0x[a-z0-9]*]] <col:1> 'int ({{.*}}) __attribute__((nothrow))' Function [[ADDR_17:0x[a-z0-9]*]] 'also_after3[implementation={vendor(llvm)}]' 'int ({{.*}}) __attribute__((nothrow))'
// CXX-NEXT: |-FunctionDecl [[ADDR_17]] <col:1, line:21:1> line:19:1 also_after3[implementation={vendor(llvm)}] 'int ({{.*}}) __attribute__((nothrow))'
// CXX-NEXT: | `-CompoundStmt [[ADDR_18:0x[a-z0-9]*]] <col:48, line:21:1>
// CXX-NEXT: | `-ReturnStmt [[ADDR_19:0x[a-z0-9]*]] <line:20:3, col:10>
// CXX-NEXT: | `-IntegerLiteral [[ADDR_20:0x[a-z0-9]*]] <col:10> 'int' 0
// CXX-NEXT: |-FunctionDecl [[ADDR_21:0x[a-z0-9]*]] <line:22:1, col:85> col:69 implicit used constexpr also_after4 'int ({{.*}}) __attribute__((nothrow))' static inline
// CXX-NEXT: | |-AlwaysInlineAttr [[ADDR_22:0x[a-z0-9]*]] <col:38> always_inline
// CXX-NEXT: | `-OMPDeclareVariantAttr [[ADDR_23:0x[a-z0-9]*]] <<invalid sloc>> Implicit implementation={vendor(llvm)}
// CXX-NEXT: | `-DeclRefExpr [[ADDR_24:0x[a-z0-9]*]] <col:1> 'int ({{.*}}) __attribute__((nothrow))' Function [[ADDR_25:0x[a-z0-9]*]] 'also_after4[implementation={vendor(llvm)}]' 'int ({{.*}}) __attribute__((nothrow))'
// CXX-NEXT: |-FunctionDecl [[ADDR_25]] <col:1, line:24:1> line:22:1 constexpr also_after4[implementation={vendor(llvm)}] 'int ({{.*}}) __attribute__((nothrow))' static inline
// CXX-NEXT: | |-CompoundStmt [[ADDR_26:0x[a-z0-9]*]] <col:87, line:24:1>
// CXX-NEXT: | | `-ReturnStmt [[ADDR_27:0x[a-z0-9]*]] <line:23:3, col:10>
// CXX-NEXT: | | `-IntegerLiteral [[ADDR_28:0x[a-z0-9]*]] <col:10> 'int' 0
// CXX-NEXT: | `-AlwaysInlineAttr [[ADDR_29:0x[a-z0-9]*]] <line:22:38> always_inline
// CXX-NEXT: |-FunctionDecl [[ADDR_30:0x[a-z0-9]*]] <line:27:1, line:29:1> line:27:5 invalid also_after1 'int ({{.*}})'
// CXX-NEXT: | |-CompoundStmt [[ADDR_31:0x[a-z0-9]*]] <col:23, line:29:1>
// CXX-NEXT: | | `-ReturnStmt [[ADDR_32:0x[a-z0-9]*]] <line:28:3, col:10>
// CXX-NEXT: | | `-IntegerLiteral [[ADDR_33:0x[a-z0-9]*]] <col:10> 'int' 1
// CXX-NEXT: | `-OMPDeclareVariantAttr [[ADDR_34:0x[a-z0-9]*]] <<invalid sloc>> Inherited Implicit implementation={vendor(llvm)}
// CXX-NEXT: | `-DeclRefExpr [[ADDR_2]] <line:7:15> 'int ({{.*}})' Function [[ADDR_3]] 'also_after1[implementation={vendor(llvm)}]' 'int ({{.*}})'
// CXX-NEXT: |-FunctionDecl [[ADDR_35:0x[a-z0-9]*]] prev [[ADDR_7]] <line:30:1, line:32:1> line:30:5 used also_after2 'int ({{.*}})'
// CXX-NEXT: | |-CompoundStmt [[ADDR_36:0x[a-z0-9]*]] <col:23, line:32:1>
// CXX-NEXT: | | `-ReturnStmt [[ADDR_37:0x[a-z0-9]*]] <line:31:3, col:10>
// CXX-NEXT: | | `-IntegerLiteral [[ADDR_38:0x[a-z0-9]*]] <col:10> 'int' 2
// CXX-NEXT: | `-OMPDeclareVariantAttr [[ADDR_39:0x[a-z0-9]*]] <<invalid sloc>> Inherited Implicit implementation={vendor(llvm)}
// CXX-NEXT: | `-DeclRefExpr [[ADDR_9]] <line:16:1> 'int ({{.*}})' Function [[ADDR_10]] 'also_after2[implementation={vendor(llvm)}]' 'int ({{.*}})'
// CXX-NEXT: |-FunctionDecl [[ADDR_40:0x[a-z0-9]*]] prev [[ADDR_14]] <line:33:1, line:35:1> line:33:5 used also_after3 'int ({{.*}})'
// CXX-NEXT: | |-CompoundStmt [[ADDR_41:0x[a-z0-9]*]] <col:23, line:35:1>
// CXX-NEXT: | | `-ReturnStmt [[ADDR_42:0x[a-z0-9]*]] <line:34:3, col:10>
// CXX-NEXT: | | `-IntegerLiteral [[ADDR_43:0x[a-z0-9]*]] <col:10> 'int' 3
// CXX-NEXT: | `-OMPDeclareVariantAttr [[ADDR_44:0x[a-z0-9]*]] <<invalid sloc>> Inherited Implicit implementation={vendor(llvm)}
// CXX-NEXT: | `-DeclRefExpr [[ADDR_16]] <line:19:1> 'int ({{.*}}) __attribute__((nothrow))' Function [[ADDR_17]] 'also_after3[implementation={vendor(llvm)}]' 'int ({{.*}}) __attribute__((nothrow))'
// CXX-NEXT: |-FunctionDecl [[ADDR_45:0x[a-z0-9]*]] <line:36:1, line:38:1> line:36:5 invalid also_after4 'int ({{.*}})'
// CXX-NEXT: | |-CompoundStmt [[ADDR_46:0x[a-z0-9]*]] <col:23, line:38:1>
// CXX-NEXT: | | `-ReturnStmt [[ADDR_47:0x[a-z0-9]*]] <line:37:3, col:10>
// CXX-NEXT: | | `-IntegerLiteral [[ADDR_48:0x[a-z0-9]*]] <col:10> 'int' 4
// CXX-NEXT: | |-AlwaysInlineAttr [[ADDR_49:0x[a-z0-9]*]] <line:22:38> Inherited always_inline
// CXX-NEXT: | `-OMPDeclareVariantAttr [[ADDR_50:0x[a-z0-9]*]] <<invalid sloc>> Inherited Implicit implementation={vendor(llvm)}
// CXX-NEXT: | `-DeclRefExpr [[ADDR_24]] <col:1> 'int ({{.*}}) __attribute__((nothrow))' Function [[ADDR_25]] 'also_after4[implementation={vendor(llvm)}]' 'int ({{.*}}) __attribute__((nothrow))'
// CXX-NEXT: `-FunctionDecl [[ADDR_51:0x[a-z0-9]*]] <line:41:1, line:44:1> line:41:5 main 'int ({{.*}})'
// CXX-NEXT: `-CompoundStmt [[ADDR_52:0x[a-z0-9]*]] <col:16, line:44:1>
// CXX-NEXT: `-ReturnStmt [[ADDR_53:0x[a-z0-9]*]] <line:43:3, col:70>
// CXX-NEXT: `-BinaryOperator [[ADDR_54:0x[a-z0-9]*]] <col:10, col:70> 'int' '+'
// CXX-NEXT: |-BinaryOperator [[ADDR_55:0x[a-z0-9]*]] <col:10, col:54> 'int' '+'
// CXX-NEXT: | |-BinaryOperator [[ADDR_56:0x[a-z0-9]*]] <col:10, col:38> 'int' '+'
// CXX-NEXT: | | |-PseudoObjectExpr [[ADDR_57:0x[a-z0-9]*]] <col:10, col:22> 'int'
// CXX-NEXT: | | | |-CallExpr [[ADDR_58:0x[a-z0-9]*]] <col:10, col:22> 'int'
// CXX-NEXT: | | | | `-ImplicitCastExpr [[ADDR_59:0x[a-z0-9]*]] <col:10> 'int (*)({{.*}})' <FunctionToPointerDecay>
// CXX-NEXT: | | | | `-DeclRefExpr [[ADDR_60:0x[a-z0-9]*]] <col:10> 'int ({{.*}})' {{.*}}Function [[ADDR_0]] 'also_after1' 'int ({{.*}})'
// CXX-NEXT: | | | `-CallExpr [[ADDR_61:0x[a-z0-9]*]] <line:7:15, line:43:22> 'int'
// CXX-NEXT: | | | `-ImplicitCastExpr [[ADDR_62:0x[a-z0-9]*]] <line:7:15> 'int (*)({{.*}})' <FunctionToPointerDecay>
// CXX-NEXT: | | | `-DeclRefExpr [[ADDR_2]] <col:15> 'int ({{.*}})' Function [[ADDR_3]] 'also_after1[implementation={vendor(llvm)}]' 'int ({{.*}})'
// CXX-NEXT: | | `-PseudoObjectExpr [[ADDR_63:0x[a-z0-9]*]] <line:43:26, col:38> 'int'
// CXX-NEXT: | | |-CallExpr [[ADDR_64:0x[a-z0-9]*]] <col:26, col:38> 'int'
// CXX-NEXT: | | | `-ImplicitCastExpr [[ADDR_65:0x[a-z0-9]*]] <col:26> 'int (*)({{.*}})' <FunctionToPointerDecay>
// CXX-NEXT: | | | `-DeclRefExpr [[ADDR_66:0x[a-z0-9]*]] <col:26> 'int ({{.*}})' {{.*}}Function [[ADDR_35]] 'also_after2' 'int ({{.*}})'
// CXX-NEXT: | | `-CallExpr [[ADDR_67:0x[a-z0-9]*]] <line:16:1, line:43:38> 'int'
// CXX-NEXT: | | `-ImplicitCastExpr [[ADDR_68:0x[a-z0-9]*]] <line:16:1> 'int (*)({{.*}})' <FunctionToPointerDecay>
// CXX-NEXT: | | `-DeclRefExpr [[ADDR_9]] <col:1> 'int ({{.*}})' Function [[ADDR_10]] 'also_after2[implementation={vendor(llvm)}]' 'int ({{.*}})'
// CXX-NEXT: | `-PseudoObjectExpr [[ADDR_69:0x[a-z0-9]*]] <line:43:42, col:54> 'int'
// CXX-NEXT: | |-CallExpr [[ADDR_70:0x[a-z0-9]*]] <col:42, col:54> 'int'
// CXX-NEXT: | | `-ImplicitCastExpr [[ADDR_71:0x[a-z0-9]*]] <col:42> 'int (*)({{.*}})' <FunctionToPointerDecay>
// CXX-NEXT: | | `-DeclRefExpr [[ADDR_72:0x[a-z0-9]*]] <col:42> 'int ({{.*}})' {{.*}}Function [[ADDR_40]] 'also_after3' 'int ({{.*}})'
// CXX-NEXT: | `-CallExpr [[ADDR_73:0x[a-z0-9]*]] <line:19:1, line:43:54> 'int'
// CXX-NEXT: | `-ImplicitCastExpr [[ADDR_74:0x[a-z0-9]*]] <line:19:1> 'int (*)({{.*}}) __attribute__((nothrow))' <FunctionToPointerDecay>
// CXX-NEXT: | `-DeclRefExpr [[ADDR_16]] <col:1> 'int ({{.*}}) __attribute__((nothrow))' Function [[ADDR_17]] 'also_after3[implementation={vendor(llvm)}]' 'int ({{.*}}) __attribute__((nothrow))'
// CXX-NEXT: `-PseudoObjectExpr [[ADDR_75:0x[a-z0-9]*]] <line:43:58, col:70> 'int'
// CXX-NEXT: |-CallExpr [[ADDR_76:0x[a-z0-9]*]] <col:58, col:70> 'int'
// CXX-NEXT: | `-ImplicitCastExpr [[ADDR_77:0x[a-z0-9]*]] <col:58> 'int (*)({{.*}}) __attribute__((nothrow))' <FunctionToPointerDecay>
// CXX-NEXT: | `-DeclRefExpr [[ADDR_78:0x[a-z0-9]*]] <col:58> 'int ({{.*}}) __attribute__((nothrow))' {{.*}}Function [[ADDR_21]] 'also_after4' 'int ({{.*}}) __attribute__((nothrow))'
// CXX-NEXT: `-CallExpr [[ADDR_79:0x[a-z0-9]*]] <line:22:1, line:43:70> 'int'
// CXX-NEXT: `-ImplicitCastExpr [[ADDR_80:0x[a-z0-9]*]] <line:22:1> 'int (*)({{.*}}) __attribute__((nothrow))' <FunctionToPointerDecay>
// CXX-NEXT: `-DeclRefExpr [[ADDR_24]] <col:1> 'int ({{.*}}) __attribute__((nothrow))' Function [[ADDR_25]] 'also_after4[implementation={vendor(llvm)}]' 'int ({{.*}}) __attribute__((nothrow))'
|
compute_ktopipi_type2.h | #ifndef _COMPUTE_KTOPIPI_TYPE2_H
#define _COMPUTE_KTOPIPI_TYPE2_H
#include<alg/a2a/mf_productstore.h>
CPS_START_NAMESPACE
//TYPE 2
//Each contraction of this type is made up of different trace combinations of two objects (below [but not in the code!] for simplicity we ignore the fact that the two vectors in
//the meson fields are allowed to vary in position relative to each other):
//1) \sum_{ \vec x_K } \Gamma_1 \prop^L(x_op,x_K) \gamma^5 \prop^H(x_K,x_op)
//2) \sum_{ \vec y, \vec z } \Gamma_2 \prop^L(x_op,y) S_2 \prop^L(y,z) S_2 \prop^L(z,x_op)
//We use g5-hermiticity on the strange propagator
//1) -> \sum_{ \vec x_K } \Gamma_1 \prop^L(x_op,x_K) [\prop^H(x_op,x_K)]^\dagger \gamma^5
// = \sum_{ \vec x_K } \Gamma_1 vL(x_op) wL^dag(x_K) [ vH(x_op) wH^dag(x_K) ]^dag \gamma^5
// = \sum_{ \vec x_K } \Gamma_1 vL(x_op) [[ wL^dag(x_K) wH(x_K) ]] [vH(x_op)]^dag \gamma^5
// where [[ ]] indicate meson fields
//2) In terms of v and w
// \sum_{ \vec y, \vec z } \Gamma_2 vL(x_op) [[ wL^dag(y) S_2 vL(y) ]] [[ wL^dag(z) S_2 vL(z) ]] wL^dag(x_op)
//Run inside threaded environment
template<typename mf_Policies>
void ComputeKtoPiPiGparity<mf_Policies>::type2_contract(ResultsContainerType &result, const int t_K, const int t_dis, const int thread_id, const SCFmat &part1, const SCFmatVector &part2){
#ifndef MEMTEST_MODE
static const int n_contract = 6; //six type2 diagrams
static const int con_off = 7; //index of first contraction in set
for(int mu=0;mu<4;mu++){ //sum over mu here
for(int gcombidx=0;gcombidx<8;gcombidx++){
const SCFmat &G1 = Gamma1<ComplexType>(gcombidx,mu);
const SCFmat &G2 = Gamma2<ComplexType>(gcombidx,mu);
SCFmat G1_pt1 = part1; //= G1*part1;
multGammaLeft(G1_pt1,1,gcombidx,mu);
CPScolorMatrix<ComplexType> tr_sf_G1_pt1 = G1_pt1.SpinFlavorTrace();
for(int pt2_pion=0; pt2_pion<2; pt2_pion++){ //which pion comes first in part 2?
SCFmat G2_pt2 = part2[pt2_pion]; //= G2*part2[pt2_pion];
multGammaLeft(G2_pt2,2,gcombidx,mu);
CPScolorMatrix<ComplexType> tr_sf_G2_pt2 = G2_pt2.SpinFlavorTrace();
SCFmat ctrans_G2_pt2(G2_pt2); //speedup by transposing part 1
ctrans_G2_pt2.TransposeColor();
#define C(IDX) result(t_K,t_dis,IDX-con_off,gcombidx,thread_id)
C(7) += G1_pt1.Trace() * G2_pt2.Trace();
C(8) += Trace( tr_sf_G1_pt1 , Transpose(tr_sf_G2_pt2) );
C(9) += Trace( tr_sf_G1_pt1 , tr_sf_G2_pt2 );
C(10) += Trace( G1_pt1 , G2_pt2 );
C(11) += Trace( G1_pt1, ctrans_G2_pt2 );
C(12) += Trace( G1_pt1.ColorTrace() , G2_pt2.ColorTrace() );
#undef C
}
}
}
#endif
}
template<typename mf_Policies>
void ComputeKtoPiPiGparity<mf_Policies>::type2_compute_mfproducts(std::vector<A2AmesonField<mf_Policies,A2AvectorWfftw,A2AvectorVfftw> > &con_pi1_pi2,
std::vector<A2AmesonField<mf_Policies,A2AvectorWfftw,A2AvectorVfftw> > &con_pi2_pi1,
const int tsep_pion, const int tstep, const std::vector<ThreeMomentum> &p_pi_1_all,
MesonFieldMomentumContainer<mf_Policies> &mf_pions,
const int Lt, const int tpi_sampled){
con_pi1_pi2.resize(tpi_sampled); //y is associated with pi1, z with pi2
con_pi2_pi1.resize(tpi_sampled); //y is associated with pi2, z with pi1
A2AmesonField<mf_Policies,A2AvectorWfftw,A2AvectorVfftw> tmp;
if(!UniqueID()){ printf("Computing con_*_*\n"); fflush(stdout); }
//Some of these mults are quite likely duplicates, so use the product store to maximize reuse
MesonFieldProductStore<mf_Policies> products;
int nmom = p_pi_1_all.size();
for(int pidx=0;pidx<nmom;pidx++){
const ThreeMomentum &p_pi_1 = p_pi_1_all[pidx];
ThreeMomentum p_pi_2 = -p_pi_1;
std::vector<A2AmesonField<mf_Policies,A2AvectorWfftw,A2AvectorVfftw> > &mf_pi1 = mf_pions.get(p_pi_1);
std::vector<A2AmesonField<mf_Policies,A2AvectorWfftw,A2AvectorVfftw> > &mf_pi2 = mf_pions.get(p_pi_2);
#ifdef NODE_DISTRIBUTE_MESONFIELDS
nodeGetMany(2,&mf_pi1,&mf_pi2);
#endif
//nodeGetPionMf(mf_pi1,mf_pi2);
//for(int tpi1=0;tpi1<Lt;tpi1 += tstep){ //my sensible ordering
for(int t_pi1_lin = 1; t_pi1_lin <= Lt; t_pi1_lin += tstep){ //Daiqian's weird ordering
int tpi1 = modLt(t_pi1_lin,Lt);
int tpi1_idx = tpi1 / tstep;
int tpi2 = modLt(tpi1 + tsep_pion, Lt);
if(pidx==0){
con_pi1_pi2[tpi1_idx] = products.getProduct(mf_pi1[tpi1], mf_pi2[tpi2]); //node distributed
con_pi2_pi1[tpi1_idx] = products.getProduct(mf_pi2[tpi2], mf_pi1[tpi1]);
//mult(con_pi1_pi2[tpi1_idx], mf_pi1[tpi1], mf_pi2[tpi2]);
//mult(con_pi2_pi1[tpi1_idx], mf_pi2[tpi2], mf_pi1[tpi1]);
}else{
//mult(tmp, mf_pi1[tpi1], mf_pi2[tpi2]);
tmp = products.getProduct(mf_pi1[tpi1], mf_pi2[tpi2]);
con_pi1_pi2[tpi1_idx].plus_equals(tmp, true);
//mult(tmp, mf_pi2[tpi2], mf_pi1[tpi1]);
tmp = products.getProduct(mf_pi2[tpi2], mf_pi1[tpi1]);
con_pi2_pi1[tpi1_idx].plus_equals(tmp, true);
}
//NB time coordinate of con_*_* is the time coordinate of pi1 (that closest to the kaon)
}
#ifdef NODE_DISTRIBUTE_MESONFIELDS
nodeDistributeMany(2,&mf_pi1,&mf_pi2);
#endif
//nodeDistributePionMf(mf_pi1,mf_pi2);
}
if(nmom > 1)
for(int t=0;t<tpi_sampled;t++){
con_pi1_pi2[t].times_equals(1./nmom); con_pi2_pi1[t].times_equals(1./nmom);
}
if(!UniqueID()){ printf("Finished computing con_pi_pi\n"); fflush(stdout); }
}
template<typename mf_Policies>
void ComputeKtoPiPiGparity<mf_Policies>::type2_mult_vMv_setup(std::vector<mult_vMv_split<mf_Policies,A2AvectorV,A2AvectorWfftw,A2AvectorWfftw,A2AvectorV> > &mult_vMv_split_part1,
std::vector<mult_vMv_split<mf_Policies,A2AvectorV,A2AvectorWfftw,A2AvectorVfftw,A2AvectorW> > &mult_vMv_split_part2_pi1_pi2,
std::vector<mult_vMv_split<mf_Policies,A2AvectorV,A2AvectorWfftw,A2AvectorVfftw,A2AvectorW> > &mult_vMv_split_part2_pi2_pi1,
const std::vector< A2AmesonField<mf_Policies,A2AvectorWfftw,A2AvectorVfftw> > &con_pi1_pi2,
const std::vector< A2AmesonField<mf_Policies,A2AvectorWfftw,A2AvectorVfftw> > &con_pi2_pi1,
const A2AvectorV<mf_Policies> & vL, const A2AvectorV<mf_Policies> & vH, const A2AvectorW<mf_Policies> & wL,
const std::vector<A2AmesonField<mf_Policies,A2AvectorWfftw,A2AvectorWfftw> > &mf_kaon,
const std::vector<int> &t_K_all, const int top_loc, const int tstep, const int Lt,const int tpi_sampled,
const std::vector< std::vector<bool> > &node_top_used, const std::vector< std::vector<bool> > &node_top_used_kaon){
//Split the vector-mesonfield outer product into two stages where in the first we reorder the mesonfield to optimize cache hits
mult_vMv_split_part1.resize(t_K_all.size());
mult_vMv_split_part2_pi1_pi2.resize(tpi_sampled);
mult_vMv_split_part2_pi2_pi1.resize(tpi_sampled);
int top_glb = top_loc + GJP.TnodeCoor()*GJP.TnodeSites();
//Part 1
#pragma omp parallel for
for(int tkidx=0; tkidx < t_K_all.size(); tkidx++){
if(!node_top_used_kaon[tkidx][top_loc]) continue;
int t_K = t_K_all[tkidx];
mult_vMv_split_part1[tkidx].setup(vL,mf_kaon[t_K],vH,top_glb);
}
//Part 2
#pragma omp parallel for
for(int t_pi1_lin = 1; t_pi1_lin <= Lt; t_pi1_lin += tstep){ //Daiqian's weird ordering
int t_pi1 = modLt(t_pi1_lin,Lt);
int t_pi1_idx = t_pi1 / tstep;
if(!node_top_used[t_pi1_idx][top_loc]) continue; //can be better parallelized!
mult_vMv_split_part2_pi1_pi2[t_pi1_idx].setup(vL,con_pi1_pi2[t_pi1_idx],wL, top_glb);
mult_vMv_split_part2_pi2_pi1[t_pi1_idx].setup(vL,con_pi2_pi1[t_pi1_idx],wL, top_glb);
}
}
template<typename mf_Policies>
void ComputeKtoPiPiGparity<mf_Policies>::type2_precompute_part1_part2(std::vector<SCFmatVector > &mult_vMv_contracted_part1,
std::vector<SCFmatVector > &mult_vMv_contracted_part2_pi1_pi2,
std::vector<SCFmatVector > &mult_vMv_contracted_part2_pi2_pi1,
std::vector<mult_vMv_split<mf_Policies,A2AvectorV,A2AvectorWfftw,A2AvectorWfftw,A2AvectorV> > &mult_vMv_split_part1,
std::vector<mult_vMv_split<mf_Policies,A2AvectorV,A2AvectorWfftw,A2AvectorVfftw,A2AvectorW> > &mult_vMv_split_part2_pi1_pi2,
std::vector<mult_vMv_split<mf_Policies,A2AvectorV,A2AvectorWfftw,A2AvectorVfftw,A2AvectorW> > &mult_vMv_split_part2_pi2_pi1,
const std::vector<int> &t_K_all, const int top_loc, const int tstep, const int Lt,const int tpi_sampled,
const std::vector< std::vector<bool> > &node_top_used, const std::vector< std::vector<bool> > &node_top_used_kaon){
mult_vMv_contracted_part1.resize(t_K_all.size());
mult_vMv_contracted_part2_pi1_pi2.resize(tpi_sampled);
mult_vMv_contracted_part2_pi2_pi1.resize(tpi_sampled);
for(int tkidx=0; tkidx < t_K_all.size(); tkidx++){
if(!node_top_used_kaon[tkidx][top_loc]) continue;
int t_K = t_K_all[tkidx];
mult_vMv_split_part1[tkidx].contract(mult_vMv_contracted_part1[tkidx], false, true);
mult_vMv_split_part1[tkidx].free_mem();
}
for(int t_pi1_lin = 1; t_pi1_lin <= Lt; t_pi1_lin += tstep){ //Daiqian's weird ordering
int t_pi1 = modLt(t_pi1_lin,Lt);
int t_pi1_idx = t_pi1 / tstep;
if(!node_top_used[t_pi1_idx][top_loc]) continue;
mult_vMv_split_part2_pi1_pi2[t_pi1_idx].contract(mult_vMv_contracted_part2_pi1_pi2[t_pi1_idx], false,true);
mult_vMv_split_part2_pi1_pi2[t_pi1_idx].free_mem();
mult_vMv_split_part2_pi2_pi1[t_pi1_idx].contract(mult_vMv_contracted_part2_pi2_pi1[t_pi1_idx], false,true);
mult_vMv_split_part2_pi2_pi1[t_pi1_idx].free_mem();
}
}
//This version averages over multiple pion momentum configurations. Use to project onto A1 representation at run-time. Saves a lot of time!
//This version also overlaps computation for multiple K->pi separations. Result should be an array of ResultsContainerType the same size as the vector 'tsep_k_pi'
template<typename mf_Policies>
void ComputeKtoPiPiGparity<mf_Policies>::type2(ResultsContainerType result[],
const std::vector<int> &tsep_k_pi, const int &tsep_pion, const int &tstep, const std::vector<ThreeMomentum> &p_pi_1_all,
const std::vector<A2AmesonField<mf_Policies,A2AvectorWfftw,A2AvectorWfftw> > &mf_kaon, MesonFieldMomentumContainer<mf_Policies> &mf_pions,
const A2AvectorV<mf_Policies> & vL, const A2AvectorV<mf_Policies> & vH,
const A2AvectorW<mf_Policies> & wL, const A2AvectorW<mf_Policies> & wH){
const int Lt = GJP.Tnodes()*GJP.TnodeSites();
assert(Lt % tstep == 0);
const int tpi_sampled = Lt/tstep;
static const int n_contract = 6; //six type2 diagrams
static const int con_off = 7; //index of first contraction in set
const int nthread = omp_get_max_threads();
for(int tkp=0;tkp<tsep_k_pi.size();tkp++)
result[tkp].resize(n_contract,nthread); //Resize zeroes output. Result will be thread-reduced before this method ends
const int size_3d = vL.getMode(0).nodeSites(0)*vL.getMode(0).nodeSites(1)*vL.getMode(0).nodeSites(2);
//Compile some information about which timeslices are involved in the calculation such that we can minimize work by skipping unused timeslices
std::vector< std::vector<bool> > node_top_used(tpi_sampled); //Which local operator timeslices are used for a given pi1 index
std::vector<int> t_K_all; //Which kaon timeslices we need overall
for(int t_pi1_lin = 1; t_pi1_lin <= Lt; t_pi1_lin += tstep){ //Daiqian's weird ordering
int t_pi1 = modLt(t_pi1_lin,Lt); int t_pi1_idx = t_pi1 / tstep;
getUsedTimeslices(node_top_used[t_pi1_idx],t_K_all,tsep_k_pi,t_pi1);
}
std::vector< std::vector<bool> > node_top_used_kaon(t_K_all.size()); //Which local operator timeslices are used for a given kaon index
std::vector<int> tkidx_map(Lt,-1);
for(int tkidx=0;tkidx<t_K_all.size();tkidx++){
getUsedTimeslicesForKaon(node_top_used_kaon[tkidx],tsep_k_pi,t_K_all[tkidx]);
tkidx_map[t_K_all[tkidx]] = tkidx; //allow us to map into the storage given a value of t_K
}
//Form the product of the two meson fields
//con_*_* = \sum_{\vec y,\vec z} [[ wL^dag(y) S_2 vL(y) ]] [[ wL^dag(z) S_2 vL(z) ]]
std::vector< A2AmesonField<mf_Policies,A2AvectorWfftw,A2AvectorVfftw> > con_pi1_pi2;//(tpi_sampled); //y is associated with pi1, z with pi2
std::vector< A2AmesonField<mf_Policies,A2AvectorWfftw,A2AvectorVfftw> > con_pi2_pi1; //(tpi_sampled); //y is associated with pi2, z with pi1
type2_compute_mfproducts(con_pi1_pi2,con_pi2_pi1,tsep_pion,tstep,p_pi_1_all,mf_pions, Lt, tpi_sampled);
for(int top_loc = 0; top_loc < GJP.TnodeSites(); top_loc++){
const int top_glb = top_loc + GJP.TnodeCoor()*GJP.TnodeSites();
#ifndef DISABLE_TYPE2_SPLIT_VMV
//Split the vector-mesonfield outer product into two stages where in the first we reorder the mesonfield to optimize cache hits
std::vector<mult_vMv_split<mf_Policies,A2AvectorV,A2AvectorWfftw,A2AvectorWfftw,A2AvectorV> > mult_vMv_split_part1; //[t_K_all.size()];
std::vector<mult_vMv_split<mf_Policies,A2AvectorV,A2AvectorWfftw,A2AvectorVfftw,A2AvectorW> > mult_vMv_split_part2_pi1_pi2; //[tpi_sampled];
std::vector<mult_vMv_split<mf_Policies,A2AvectorV,A2AvectorWfftw,A2AvectorVfftw,A2AvectorW> > mult_vMv_split_part2_pi2_pi1; //[tpi_sampled];
type2_mult_vMv_setup(mult_vMv_split_part1,mult_vMv_split_part2_pi1_pi2,mult_vMv_split_part2_pi2_pi1,con_pi1_pi2,con_pi2_pi1,vL,vH,wL,mf_kaon,t_K_all,top_loc,tstep, Lt,tpi_sampled,node_top_used,node_top_used_kaon);
# ifndef DISABLE_TYPE2_PRECOMPUTE
//Contract on all 3d sites on this node with fixed operator time coord top_glb into a canonically ordered output vector
std::vector<SCFmatVector > mult_vMv_contracted_part1; //[t_K_all.size()][x3d];
std::vector<SCFmatVector > mult_vMv_contracted_part2_pi1_pi2; //[tpi_sampled][x3d];
std::vector<SCFmatVector > mult_vMv_contracted_part2_pi2_pi1; //[tpi_sampled][x3d];
type2_precompute_part1_part2(mult_vMv_contracted_part1,mult_vMv_contracted_part2_pi1_pi2,mult_vMv_contracted_part2_pi2_pi1,
mult_vMv_split_part1,mult_vMv_split_part2_pi1_pi2,mult_vMv_split_part2_pi2_pi1,
t_K_all,top_loc,tstep,Lt,tpi_sampled,node_top_used,node_top_used_kaon);
# endif
#endif
//Now loop over Q_i insertion location. Each node naturally has its own sublattice to work on. Thread over sites in usual way
#pragma omp parallel for
for(int xop3d_loc = 0; xop3d_loc < size_3d; xop3d_loc++){
int thread_id = omp_get_thread_num();
//Part 1 does not care about the location of the pion, only that of the kaon. It may be used multiple times if we have multiple K->pi seps, so compute it separately.
SCFmatVector part1_storage(t_K_all.size());
for(int tkidx=0; tkidx < t_K_all.size(); tkidx++){
if(!node_top_used_kaon[tkidx][top_loc]) continue;
int t_K = t_K_all[tkidx];
//Compute part 1
// = \sum_{ \vec x_K } \Gamma_1 vL(x_op) [[ wL^dag(x_K) wH(x_K) ]] [vH(x_op)]^dag \gamma^5
SCFmat &part1 = part1_storage[tkidx];
#if defined(DISABLE_TYPE2_SPLIT_VMV)
mult(part1, vL, mf_kaon[t_K], vH, xop3d_loc, top_loc, false, true);
#elif defined(DISABLE_TYPE2_PRECOMPUTE)
mult_vMv_split_part1[tkidx].contract(part1,xop3d_loc,false,true);
#else
part1 = mult_vMv_contracted_part1[tkidx][xop3d_loc];
#endif
part1.gr(-5); //right multiply by g5
}
//for(int t_pi1 = 0; t_pi1 < Lt; t_pi1 += tstep){ //my sensible ordering
for(int t_pi1_lin = 1; t_pi1_lin <= Lt; t_pi1_lin += tstep){ //Daiqian's weird ordering
int t_pi1 = modLt(t_pi1_lin,Lt);
int t_pi1_idx = t_pi1 / tstep;
int t_pi2 = modLt(t_pi1 + tsep_pion, Lt);
if(!node_top_used[t_pi1_idx][top_loc]) continue; //skip unused timeslices
//Construct part 2 (this doesn't involve the kaon):
// \sum_{ \vec y, \vec z } \Gamma_2 vL(x_op) [[ wL^dag(y) S_2 vL(y) ]] [[ wL^dag(z) S_2 vL(z) ]] wL^dag(x_op)
//SCFmat part2[2];
SCFmatVector part2(2);
#if defined(DISABLE_TYPE2_SPLIT_VMV)
mult(part2[0], vL, con_pi1_pi2[t_pi1_idx], wL, xop3d_loc, top_loc, false, true); //part2 goes from insertion to pi1 to pi2 and back to insertion
mult(part2[1], vL, con_pi2_pi1[t_pi1_idx], wL, xop3d_loc, top_loc, false, true); //part2 goes from insertion to pi2 to pi1 and back to insertion
#elif defined(DISABLE_TYPE2_PRECOMPUTE)
mult_vMv_split_part2_pi1_pi2[t_pi1_idx].contract(part2[0],xop3d_loc,false,true);
mult_vMv_split_part2_pi2_pi1[t_pi1_idx].contract(part2[1],xop3d_loc,false,true);
#else
part2[0] = mult_vMv_contracted_part2_pi1_pi2[t_pi1_idx][xop3d_loc];
part2[1] = mult_vMv_contracted_part2_pi2_pi1[t_pi1_idx][xop3d_loc];
#endif
for(int tkpi_idx = 0; tkpi_idx < tsep_k_pi.size(); tkpi_idx++){
int t_K = modLt(t_pi1 - tsep_k_pi[tkpi_idx], Lt);
int t_dis = modLt(top_glb - t_K, Lt); //distance between kaon and operator is the output time coordinate
if(t_dis >= tsep_k_pi[tkpi_idx] || t_dis == 0) continue; //don't bother computing operator insertion locations outside of the region between the kaon and first pion or on top of either operator
const SCFmat &part1 = part1_storage[tkidx_map[t_K]];
type2_contract(result[tkpi_idx],t_K,t_dis,thread_id,part1,part2);
}
}//tpi1 loop
}//xop3d_loc loop
}//top_loc loop
for(int tkp=0;tkp<tsep_k_pi.size();tkp++){
result[tkp].threadSum();
result[tkp].nodeSum();
#ifndef DAIQIAN_COMPATIBILITY_MODE
result[tkp] *= Float(0.5); //coefficient of 0.5 associated with average over pt2 pion ordering
#endif
}
}
CPS_END_NAMESPACE
#endif
|
nested_parallel_for_irregular_omp.c | /* -*- Mode: C; c-basic-offset:4 ; indent-tabs-mode:nil ; -*- */
/*
* See LICENSE.txt in top-level directory.
*/
/* Nested Pragma omp parallel for directive evaluation
* Output: avg time
*/
#include <assert.h>
#include <omp.h>
#include <stdio.h>
#include <stdlib.h>
#include <math.h>
#include <sys/time.h>
#define NUM_ELEMS 5017600 /* 2GB */
#define NUM_REPS 1
int main(int argc, char *argv[])
{
int i, j, r, nthreads;
double *time, avg_time = 0.0;
#pragma omp parallel
{
#pragma omp master
{
nthreads = omp_get_num_threads();
}
}
int n = (argc > 1) ? atoi(argv[1]) : NUM_ELEMS;
int in_th = (argc > 2) ? atoi(argv[2]) : nthreads;
int rep = (argc > 3) ? atoi(argv[3]) : 3;
int it = ceil(sqrt((double)n));
srand(1983);
n = it * it;
time = (double *)malloc(sizeof(double) * rep);
for (r = 0; r < rep; r++) {
time[r] = omp_get_wtime();
#pragma omp parallel for
for (j = 0; j < it; j++) {
omp_set_num_threads(in_th);
#pragma omp parallel for
for (i = 0; i < it; i++) {
int random = rand() % 10000;
volatile int kk = 0;
int k;
for (k = 0; k < random; k++)
kk++;
assert(kk == random);
}
}
time[r] = omp_get_wtime() - time[r];
avg_time += time[r];
}
avg_time /= rep;
printf("%d %d %d %f\n", nthreads, in_th, n, avg_time);
free(time);
return EXIT_SUCCESS;
}
|
effect.c | /*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% EEEEE FFFFF FFFFF EEEEE CCCC TTTTT %
% E F F E C T %
% EEE FFF FFF EEE C T %
% E F F E C T %
% EEEEE F F EEEEE CCCC T %
% %
% %
% MagickCore Image Effects Methods %
% %
% Software Design %
% John Cristy %
% October 1996 %
% %
% %
% Copyright 1999-2011 ImageMagick Studio LLC, a non-profit organization %
% dedicated to making software imaging solutions freely available. %
% %
% You may not use this file except in compliance with the License. You may %
% obtain a copy of the License at %
% %
% http://www.imagemagick.org/script/license.php %
% %
% Unless required by applicable law or agreed to in writing, software %
% distributed under the License is distributed on an "AS IS" BASIS, %
% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. %
% See the License for the specific language governing permissions and %
% limitations under the License. %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
%
%
*/
/*
Include declarations.
*/
#include "magick/studio.h"
#include "magick/accelerate.h"
#include "magick/blob.h"
#include "magick/cache-view.h"
#include "magick/color.h"
#include "magick/color-private.h"
#include "magick/colorspace.h"
#include "magick/constitute.h"
#include "magick/decorate.h"
#include "magick/draw.h"
#include "magick/enhance.h"
#include "magick/exception.h"
#include "magick/exception-private.h"
#include "magick/effect.h"
#include "magick/fx.h"
#include "magick/gem.h"
#include "magick/geometry.h"
#include "magick/image-private.h"
#include "magick/list.h"
#include "magick/log.h"
#include "magick/memory_.h"
#include "magick/monitor.h"
#include "magick/monitor-private.h"
#include "magick/montage.h"
#include "magick/morphology.h"
#include "magick/paint.h"
#include "magick/pixel-private.h"
#include "magick/property.h"
#include "magick/quantize.h"
#include "magick/quantum.h"
#include "magick/random_.h"
#include "magick/random-private.h"
#include "magick/resample.h"
#include "magick/resample-private.h"
#include "magick/resize.h"
#include "magick/resource_.h"
#include "magick/segment.h"
#include "magick/shear.h"
#include "magick/signature-private.h"
#include "magick/string_.h"
#include "magick/thread-private.h"
#include "magick/transform.h"
#include "magick/threshold.h"
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% A d a p t i v e B l u r I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% AdaptiveBlurImage() adaptively blurs the image by blurring less
% intensely near image edges and more intensely far from edges. We blur the
% image with a Gaussian operator of the given radius and standard deviation
% (sigma). For reasonable results, radius should be larger than sigma. Use a
% radius of 0 and AdaptiveBlurImage() selects a suitable radius for you.
%
% The format of the AdaptiveBlurImage method is:
%
% Image *AdaptiveBlurImage(const Image *image,const double radius,
% const double sigma,ExceptionInfo *exception)
% Image *AdaptiveBlurImageChannel(const Image *image,
% const ChannelType channel,double radius,const double sigma,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o channel: the channel type.
%
% o radius: the radius of the Gaussian, in pixels, not counting the center
% pixel.
%
% o sigma: the standard deviation of the Laplacian, in pixels.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *AdaptiveBlurImage(const Image *image,const double radius,
const double sigma,ExceptionInfo *exception)
{
Image
*blur_image;
blur_image=AdaptiveBlurImageChannel(image,DefaultChannels,radius,sigma,
exception);
return(blur_image);
}
MagickExport Image *AdaptiveBlurImageChannel(const Image *image,
const ChannelType channel,const double radius,const double sigma,
ExceptionInfo *exception)
{
#define AdaptiveBlurImageTag "Convolve/Image"
#define MagickSigma (fabs(sigma) <= MagickEpsilon ? 1.0 : sigma)
CacheView
*blur_view,
*edge_view,
*image_view;
double
**kernel,
normalize;
Image
*blur_image,
*edge_image,
*gaussian_image;
MagickBooleanType
status;
MagickOffsetType
progress;
MagickPixelPacket
bias;
register ssize_t
i;
size_t
width;
ssize_t
j,
k,
u,
v,
y;
assert(image != (const Image *) NULL);
assert(image->signature == MagickSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickSignature);
blur_image=CloneImage(image,image->columns,image->rows,MagickTrue,exception);
if (blur_image == (Image *) NULL)
return((Image *) NULL);
if (fabs(sigma) <= MagickEpsilon)
return(blur_image);
if (SetImageStorageClass(blur_image,DirectClass) == MagickFalse)
{
InheritException(exception,&blur_image->exception);
blur_image=DestroyImage(blur_image);
return((Image *) NULL);
}
/*
Edge detect the image brighness channel, level, blur, and level again.
*/
edge_image=EdgeImage(image,radius,exception);
if (edge_image == (Image *) NULL)
{
blur_image=DestroyImage(blur_image);
return((Image *) NULL);
}
(void) LevelImage(edge_image,"20%,95%");
gaussian_image=GaussianBlurImage(edge_image,radius,sigma,exception);
if (gaussian_image != (Image *) NULL)
{
edge_image=DestroyImage(edge_image);
edge_image=gaussian_image;
}
(void) LevelImage(edge_image,"10%,95%");
/*
Create a set of kernels from maximum (radius,sigma) to minimum.
*/
width=GetOptimalKernelWidth2D(radius,sigma);
kernel=(double **) AcquireQuantumMemory((size_t) width,sizeof(*kernel));
if (kernel == (double **) NULL)
{
edge_image=DestroyImage(edge_image);
blur_image=DestroyImage(blur_image);
ThrowImageException(ResourceLimitError,"MemoryAllocationFailed");
}
(void) ResetMagickMemory(kernel,0,(size_t) width*sizeof(*kernel));
for (i=0; i < (ssize_t) width; i+=2)
{
kernel[i]=(double *) AcquireQuantumMemory((size_t) (width-i),(width-i)*
sizeof(**kernel));
if (kernel[i] == (double *) NULL)
break;
normalize=0.0;
j=(ssize_t) (width-i)/2;
k=0;
for (v=(-j); v <= j; v++)
{
for (u=(-j); u <= j; u++)
{
kernel[i][k]=(double) (exp(-((double) u*u+v*v)/(2.0*MagickSigma*
MagickSigma))/(2.0*MagickPI*MagickSigma*MagickSigma));
normalize+=kernel[i][k];
k++;
}
}
if (fabs(normalize) <= MagickEpsilon)
normalize=1.0;
normalize=1.0/normalize;
for (k=0; k < (j*j); k++)
kernel[i][k]=normalize*kernel[i][k];
}
if (i < (ssize_t) width)
{
for (i-=2; i >= 0; i-=2)
kernel[i]=(double *) RelinquishMagickMemory(kernel[i]);
kernel=(double **) RelinquishMagickMemory(kernel);
edge_image=DestroyImage(edge_image);
blur_image=DestroyImage(blur_image);
ThrowImageException(ResourceLimitError,"MemoryAllocationFailed");
}
/*
Adaptively blur image.
*/
status=MagickTrue;
progress=0;
GetMagickPixelPacket(image,&bias);
SetMagickPixelPacketBias(image,&bias);
image_view=AcquireCacheView(image);
edge_view=AcquireCacheView(edge_image);
blur_view=AcquireCacheView(blur_image);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(dynamic,4) shared(progress,status)
#endif
for (y=0; y < (ssize_t) blur_image->rows; y++)
{
register const IndexPacket
*restrict indexes;
register const PixelPacket
*restrict p,
*restrict r;
register IndexPacket
*restrict blur_indexes;
register PixelPacket
*restrict q;
register ssize_t
x;
if (status == MagickFalse)
continue;
r=GetCacheViewVirtualPixels(edge_view,0,y,edge_image->columns,1,exception);
q=QueueCacheViewAuthenticPixels(blur_view,0,y,blur_image->columns,1,
exception);
if ((r == (const PixelPacket *) NULL) || (q == (PixelPacket *) NULL))
{
status=MagickFalse;
continue;
}
blur_indexes=GetCacheViewAuthenticIndexQueue(blur_view);
for (x=0; x < (ssize_t) blur_image->columns; x++)
{
MagickPixelPacket
pixel;
MagickRealType
alpha,
gamma;
register const double
*restrict k;
register ssize_t
i,
u,
v;
gamma=0.0;
i=(ssize_t) ceil((double) width*QuantumScale*PixelIntensity(r)-0.5);
if (i < 0)
i=0;
else
if (i > (ssize_t) width)
i=(ssize_t) width;
if ((i & 0x01) != 0)
i--;
p=GetCacheViewVirtualPixels(image_view,x-((ssize_t) (width-i)/2L),y-
(ssize_t) ((width-i)/2L),width-i,width-i,exception);
if (p == (const PixelPacket *) NULL)
break;
indexes=GetCacheViewVirtualIndexQueue(image_view);
pixel=bias;
k=kernel[i];
for (v=0; v < (ssize_t) (width-i); v++)
{
for (u=0; u < (ssize_t) (width-i); u++)
{
alpha=1.0;
if (((channel & OpacityChannel) != 0) &&
(image->matte != MagickFalse))
alpha=(MagickRealType) (QuantumScale*GetAlphaPixelComponent(p));
if ((channel & RedChannel) != 0)
pixel.red+=(*k)*alpha*GetRedPixelComponent(p);
if ((channel & GreenChannel) != 0)
pixel.green+=(*k)*alpha*GetGreenPixelComponent(p);
if ((channel & BlueChannel) != 0)
pixel.blue+=(*k)*alpha*GetBluePixelComponent(p);
if ((channel & OpacityChannel) != 0)
pixel.opacity+=(*k)*GetOpacityPixelComponent(p);
if (((channel & IndexChannel) != 0) &&
(image->colorspace == CMYKColorspace))
pixel.index+=(*k)*alpha*GetIndexPixelComponent(indexes+x+(width-i)*
v+u);
gamma+=(*k)*alpha;
k++;
p++;
}
}
gamma=1.0/(fabs((double) gamma) <= MagickEpsilon ? 1.0 : gamma);
if ((channel & RedChannel) != 0)
SetRedPixelComponent(q,ClampToQuantum(gamma*pixel.red));
if ((channel & GreenChannel) != 0)
SetGreenPixelComponent(q,ClampToQuantum(gamma*pixel.green));
if ((channel & BlueChannel) != 0)
SetBluePixelComponent(q,ClampToQuantum(gamma*pixel.blue));
if ((channel & OpacityChannel) != 0)
SetOpacityPixelComponent(q,ClampToQuantum(pixel.opacity));
if (((channel & IndexChannel) != 0) &&
(image->colorspace == CMYKColorspace))
SetIndexPixelComponent(blur_indexes+x,ClampToQuantum(gamma*
pixel.index));
q++;
r++;
}
if (SyncCacheViewAuthenticPixels(blur_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp critical (MagickCore_AdaptiveBlurImageChannel)
#endif
proceed=SetImageProgress(image,AdaptiveBlurImageTag,progress++,
image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
blur_image->type=image->type;
blur_view=DestroyCacheView(blur_view);
edge_view=DestroyCacheView(edge_view);
image_view=DestroyCacheView(image_view);
edge_image=DestroyImage(edge_image);
for (i=0; i < (ssize_t) width; i+=2)
kernel[i]=(double *) RelinquishMagickMemory(kernel[i]);
kernel=(double **) RelinquishMagickMemory(kernel);
if (status == MagickFalse)
blur_image=DestroyImage(blur_image);
return(blur_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% A d a p t i v e S h a r p e n I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% AdaptiveSharpenImage() adaptively sharpens the image by sharpening more
% intensely near image edges and less intensely far from edges. We sharpen the
% image with a Gaussian operator of the given radius and standard deviation
% (sigma). For reasonable results, radius should be larger than sigma. Use a
% radius of 0 and AdaptiveSharpenImage() selects a suitable radius for you.
%
% The format of the AdaptiveSharpenImage method is:
%
% Image *AdaptiveSharpenImage(const Image *image,const double radius,
% const double sigma,ExceptionInfo *exception)
% Image *AdaptiveSharpenImageChannel(const Image *image,
% const ChannelType channel,double radius,const double sigma,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o channel: the channel type.
%
% o radius: the radius of the Gaussian, in pixels, not counting the center
% pixel.
%
% o sigma: the standard deviation of the Laplacian, in pixels.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *AdaptiveSharpenImage(const Image *image,const double radius,
const double sigma,ExceptionInfo *exception)
{
Image
*sharp_image;
sharp_image=AdaptiveSharpenImageChannel(image,DefaultChannels,radius,sigma,
exception);
return(sharp_image);
}
MagickExport Image *AdaptiveSharpenImageChannel(const Image *image,
const ChannelType channel,const double radius,const double sigma,
ExceptionInfo *exception)
{
#define AdaptiveSharpenImageTag "Convolve/Image"
#define MagickSigma (fabs(sigma) <= MagickEpsilon ? 1.0 : sigma)
CacheView
*sharp_view,
*edge_view,
*image_view;
double
**kernel,
normalize;
Image
*sharp_image,
*edge_image,
*gaussian_image;
MagickBooleanType
status;
MagickOffsetType
progress;
MagickPixelPacket
bias;
register ssize_t
i;
size_t
width;
ssize_t
j,
k,
u,
v,
y;
assert(image != (const Image *) NULL);
assert(image->signature == MagickSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickSignature);
sharp_image=CloneImage(image,0,0,MagickTrue,exception);
if (sharp_image == (Image *) NULL)
return((Image *) NULL);
if (fabs(sigma) <= MagickEpsilon)
return(sharp_image);
if (SetImageStorageClass(sharp_image,DirectClass) == MagickFalse)
{
InheritException(exception,&sharp_image->exception);
sharp_image=DestroyImage(sharp_image);
return((Image *) NULL);
}
/*
Edge detect the image brighness channel, level, sharp, and level again.
*/
edge_image=EdgeImage(image,radius,exception);
if (edge_image == (Image *) NULL)
{
sharp_image=DestroyImage(sharp_image);
return((Image *) NULL);
}
(void) LevelImage(edge_image,"20%,95%");
gaussian_image=GaussianBlurImage(edge_image,radius,sigma,exception);
if (gaussian_image != (Image *) NULL)
{
edge_image=DestroyImage(edge_image);
edge_image=gaussian_image;
}
(void) LevelImage(edge_image,"10%,95%");
/*
Create a set of kernels from maximum (radius,sigma) to minimum.
*/
width=GetOptimalKernelWidth2D(radius,sigma);
kernel=(double **) AcquireQuantumMemory((size_t) width,sizeof(*kernel));
if (kernel == (double **) NULL)
{
edge_image=DestroyImage(edge_image);
sharp_image=DestroyImage(sharp_image);
ThrowImageException(ResourceLimitError,"MemoryAllocationFailed");
}
(void) ResetMagickMemory(kernel,0,(size_t) width*sizeof(*kernel));
for (i=0; i < (ssize_t) width; i+=2)
{
kernel[i]=(double *) AcquireQuantumMemory((size_t) (width-i),(width-i)*
sizeof(**kernel));
if (kernel[i] == (double *) NULL)
break;
normalize=0.0;
j=(ssize_t) (width-i)/2;
k=0;
for (v=(-j); v <= j; v++)
{
for (u=(-j); u <= j; u++)
{
kernel[i][k]=(double) (-exp(-((double) u*u+v*v)/(2.0*MagickSigma*
MagickSigma))/(2.0*MagickPI*MagickSigma*MagickSigma));
normalize+=kernel[i][k];
k++;
}
}
if (fabs(normalize) <= MagickEpsilon)
normalize=1.0;
normalize=1.0/normalize;
for (k=0; k < (j*j); k++)
kernel[i][k]=normalize*kernel[i][k];
}
if (i < (ssize_t) width)
{
for (i-=2; i >= 0; i-=2)
kernel[i]=(double *) RelinquishMagickMemory(kernel[i]);
kernel=(double **) RelinquishMagickMemory(kernel);
edge_image=DestroyImage(edge_image);
sharp_image=DestroyImage(sharp_image);
ThrowImageException(ResourceLimitError,"MemoryAllocationFailed");
}
/*
Adaptively sharpen image.
*/
status=MagickTrue;
progress=0;
GetMagickPixelPacket(image,&bias);
SetMagickPixelPacketBias(image,&bias);
image_view=AcquireCacheView(image);
edge_view=AcquireCacheView(edge_image);
sharp_view=AcquireCacheView(sharp_image);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(dynamic,4) shared(progress,status)
#endif
for (y=0; y < (ssize_t) sharp_image->rows; y++)
{
register const IndexPacket
*restrict indexes;
register const PixelPacket
*restrict p,
*restrict r;
register IndexPacket
*restrict sharp_indexes;
register PixelPacket
*restrict q;
register ssize_t
x;
if (status == MagickFalse)
continue;
r=GetCacheViewVirtualPixels(edge_view,0,y,edge_image->columns,1,exception);
q=QueueCacheViewAuthenticPixels(sharp_view,0,y,sharp_image->columns,1,
exception);
if ((r == (const PixelPacket *) NULL) || (q == (PixelPacket *) NULL))
{
status=MagickFalse;
continue;
}
sharp_indexes=GetCacheViewAuthenticIndexQueue(sharp_view);
for (x=0; x < (ssize_t) sharp_image->columns; x++)
{
MagickPixelPacket
pixel;
MagickRealType
alpha,
gamma;
register const double
*restrict k;
register ssize_t
i,
u,
v;
gamma=0.0;
i=(ssize_t) ceil((double) width*(QuantumRange-QuantumScale*
PixelIntensity(r))-0.5);
if (i < 0)
i=0;
else
if (i > (ssize_t) width)
i=(ssize_t) width;
if ((i & 0x01) != 0)
i--;
p=GetCacheViewVirtualPixels(image_view,x-((ssize_t) (width-i)/2L),y-
(ssize_t) ((width-i)/2L),width-i,width-i,exception);
if (p == (const PixelPacket *) NULL)
break;
indexes=GetCacheViewVirtualIndexQueue(image_view);
k=kernel[i];
pixel=bias;
for (v=0; v < (ssize_t) (width-i); v++)
{
for (u=0; u < (ssize_t) (width-i); u++)
{
alpha=1.0;
if (((channel & OpacityChannel) != 0) &&
(image->matte != MagickFalse))
alpha=(MagickRealType) (QuantumScale*GetAlphaPixelComponent(p));
if ((channel & RedChannel) != 0)
pixel.red+=(*k)*alpha*GetRedPixelComponent(p);
if ((channel & GreenChannel) != 0)
pixel.green+=(*k)*alpha*GetGreenPixelComponent(p);
if ((channel & BlueChannel) != 0)
pixel.blue+=(*k)*alpha*GetBluePixelComponent(p);
if ((channel & OpacityChannel) != 0)
pixel.opacity+=(*k)*GetOpacityPixelComponent(p);
if (((channel & IndexChannel) != 0) &&
(image->colorspace == CMYKColorspace))
pixel.index+=(*k)*alpha*GetIndexPixelComponent(indexes+x+(width-i)*
v+u);
gamma+=(*k)*alpha;
k++;
p++;
}
}
gamma=1.0/(fabs((double) gamma) <= MagickEpsilon ? 1.0 : gamma);
if ((channel & RedChannel) != 0)
SetRedPixelComponent(q,ClampToQuantum(gamma*pixel.red));
if ((channel & GreenChannel) != 0)
SetGreenPixelComponent(q,ClampToQuantum(gamma*pixel.green));
if ((channel & BlueChannel) != 0)
SetBluePixelComponent(q,ClampToQuantum(gamma*pixel.blue));
if ((channel & OpacityChannel) != 0)
SetOpacityPixelComponent(q,ClampToQuantum(pixel.opacity));
if (((channel & IndexChannel) != 0) &&
(image->colorspace == CMYKColorspace))
SetIndexPixelComponent(sharp_indexes+x,ClampToQuantum(gamma*
pixel.index));
q++;
r++;
}
if (SyncCacheViewAuthenticPixels(sharp_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp critical (MagickCore_AdaptiveSharpenImageChannel)
#endif
proceed=SetImageProgress(image,AdaptiveSharpenImageTag,progress++,
image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
sharp_image->type=image->type;
sharp_view=DestroyCacheView(sharp_view);
edge_view=DestroyCacheView(edge_view);
image_view=DestroyCacheView(image_view);
edge_image=DestroyImage(edge_image);
for (i=0; i < (ssize_t) width; i+=2)
kernel[i]=(double *) RelinquishMagickMemory(kernel[i]);
kernel=(double **) RelinquishMagickMemory(kernel);
if (status == MagickFalse)
sharp_image=DestroyImage(sharp_image);
return(sharp_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% B l u r I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% BlurImage() blurs an image. We convolve the image with a Gaussian operator
% of the given radius and standard deviation (sigma). For reasonable results,
% the radius should be larger than sigma. Use a radius of 0 and BlurImage()
% selects a suitable radius for you.
%
% BlurImage() differs from GaussianBlurImage() in that it uses a separable
% kernel which is faster but mathematically equivalent to the non-separable
% kernel.
%
% The format of the BlurImage method is:
%
% Image *BlurImage(const Image *image,const double radius,
% const double sigma,ExceptionInfo *exception)
% Image *BlurImageChannel(const Image *image,const ChannelType channel,
% const double radius,const double sigma,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o channel: the channel type.
%
% o radius: the radius of the Gaussian, in pixels, not counting the center
% pixel.
%
% o sigma: the standard deviation of the Gaussian, in pixels.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *BlurImage(const Image *image,const double radius,
const double sigma,ExceptionInfo *exception)
{
Image
*blur_image;
blur_image=BlurImageChannel(image,DefaultChannels,radius,sigma,exception);
return(blur_image);
}
static double *GetBlurKernel(const size_t width,const double sigma)
{
double
*kernel,
normalize;
register ssize_t
i;
ssize_t
j,
k;
/*
Generate a 1-D convolution kernel.
*/
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"...");
kernel=(double *) AcquireQuantumMemory((size_t) width,sizeof(*kernel));
if (kernel == (double *) NULL)
return(0);
normalize=0.0;
j=(ssize_t) width/2;
i=0;
for (k=(-j); k <= j; k++)
{
kernel[i]=(double) (exp(-((double) k*k)/(2.0*MagickSigma*MagickSigma))/
(MagickSQ2PI*MagickSigma));
normalize+=kernel[i];
i++;
}
for (i=0; i < (ssize_t) width; i++)
kernel[i]/=normalize;
return(kernel);
}
MagickExport Image *BlurImageChannel(const Image *image,
const ChannelType channel,const double radius,const double sigma,
ExceptionInfo *exception)
{
#define BlurImageTag "Blur/Image"
CacheView
*blur_view,
*image_view;
double
*kernel;
Image
*blur_image;
MagickBooleanType
status;
MagickOffsetType
progress;
MagickPixelPacket
bias;
register ssize_t
i;
size_t
width;
ssize_t
x,
y;
/*
Initialize blur image attributes.
*/
assert(image != (Image *) NULL);
assert(image->signature == MagickSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickSignature);
blur_image=CloneImage(image,0,0,MagickTrue,exception);
if (blur_image == (Image *) NULL)
return((Image *) NULL);
if (fabs(sigma) <= MagickEpsilon)
return(blur_image);
if (SetImageStorageClass(blur_image,DirectClass) == MagickFalse)
{
InheritException(exception,&blur_image->exception);
blur_image=DestroyImage(blur_image);
return((Image *) NULL);
}
width=GetOptimalKernelWidth1D(radius,sigma);
kernel=GetBlurKernel(width,sigma);
if (kernel == (double *) NULL)
{
blur_image=DestroyImage(blur_image);
ThrowImageException(ResourceLimitError,"MemoryAllocationFailed");
}
if (image->debug != MagickFalse)
{
char
format[MaxTextExtent],
*message;
register const double
*k;
(void) LogMagickEvent(TransformEvent,GetMagickModule(),
" BlurImage with %.20g kernel:",(double) width);
message=AcquireString("");
k=kernel;
for (i=0; i < (ssize_t) width; i++)
{
*message='\0';
(void) FormatLocaleString(format,MaxTextExtent,"%.20g: ",(double) i);
(void) ConcatenateString(&message,format);
(void) FormatLocaleString(format,MaxTextExtent,"%g ",*k++);
(void) ConcatenateString(&message,format);
(void) LogMagickEvent(TransformEvent,GetMagickModule(),"%s",message);
}
message=DestroyString(message);
}
/*
Blur rows.
*/
status=MagickTrue;
progress=0;
GetMagickPixelPacket(image,&bias);
SetMagickPixelPacketBias(image,&bias);
image_view=AcquireCacheView(image);
blur_view=AcquireCacheView(blur_image);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(dynamic,4) shared(progress,status)
#endif
for (y=0; y < (ssize_t) blur_image->rows; y++)
{
register const IndexPacket
*restrict indexes;
register const PixelPacket
*restrict p;
register IndexPacket
*restrict blur_indexes;
register PixelPacket
*restrict q;
register ssize_t
x;
if (status == MagickFalse)
continue;
p=GetCacheViewVirtualPixels(image_view,-((ssize_t) width/2L),y,
image->columns+width,1,exception);
q=GetCacheViewAuthenticPixels(blur_view,0,y,blur_image->columns,1,
exception);
if ((p == (const PixelPacket *) NULL) || (q == (PixelPacket *) NULL))
{
status=MagickFalse;
continue;
}
indexes=GetCacheViewVirtualIndexQueue(image_view);
blur_indexes=GetCacheViewAuthenticIndexQueue(blur_view);
for (x=0; x < (ssize_t) blur_image->columns; x++)
{
MagickPixelPacket
pixel;
register const double
*restrict k;
register const PixelPacket
*restrict kernel_pixels;
register ssize_t
i;
pixel=bias;
k=kernel;
kernel_pixels=p;
if (((channel & OpacityChannel) == 0) || (image->matte == MagickFalse))
{
for (i=0; i < (ssize_t) width; i++)
{
pixel.red+=(*k)*GetRedPixelComponent(kernel_pixels);
pixel.green+=(*k)*GetGreenPixelComponent(kernel_pixels);
pixel.blue+=(*k)*GetBluePixelComponent(kernel_pixels);
k++;
kernel_pixels++;
}
if ((channel & RedChannel) != 0)
SetRedPixelComponent(q,ClampToQuantum(pixel.red));
if ((channel & GreenChannel) != 0)
SetGreenPixelComponent(q,ClampToQuantum(pixel.green));
if ((channel & BlueChannel) != 0)
SetBluePixelComponent(q,ClampToQuantum(pixel.blue));
if ((channel & OpacityChannel) != 0)
{
k=kernel;
kernel_pixels=p;
for (i=0; i < (ssize_t) width; i++)
{
pixel.opacity+=(*k)*GetOpacityPixelComponent(kernel_pixels);
k++;
kernel_pixels++;
}
SetOpacityPixelComponent(q,ClampToQuantum(pixel.opacity));
}
if (((channel & IndexChannel) != 0) &&
(image->colorspace == CMYKColorspace))
{
register const IndexPacket
*restrict kernel_indexes;
k=kernel;
kernel_indexes=indexes;
for (i=0; i < (ssize_t) width; i++)
{
pixel.index+=(*k)*GetIndexPixelComponent(kernel_indexes);
k++;
kernel_indexes++;
}
SetIndexPixelComponent(blur_indexes+x,ClampToQuantum(
pixel.index));
}
}
else
{
MagickRealType
alpha,
gamma;
gamma=0.0;
for (i=0; i < (ssize_t) width; i++)
{
alpha=(MagickRealType) (QuantumScale*
GetAlphaPixelComponent(kernel_pixels));
pixel.red+=(*k)*alpha*GetRedPixelComponent(kernel_pixels);
pixel.green+=(*k)*alpha*GetGreenPixelComponent(kernel_pixels);
pixel.blue+=(*k)*alpha*GetBluePixelComponent(kernel_pixels);
gamma+=(*k)*alpha;
k++;
kernel_pixels++;
}
gamma=1.0/(fabs((double) gamma) <= MagickEpsilon ? 1.0 : gamma);
if ((channel & RedChannel) != 0)
SetRedPixelComponent(q,ClampToQuantum(gamma*pixel.red));
if ((channel & GreenChannel) != 0)
SetGreenPixelComponent(q,ClampToQuantum(gamma*pixel.green));
if ((channel & BlueChannel) != 0)
SetBluePixelComponent(q,ClampToQuantum(gamma*pixel.blue));
if ((channel & OpacityChannel) != 0)
{
k=kernel;
kernel_pixels=p;
for (i=0; i < (ssize_t) width; i++)
{
pixel.opacity+=(*k)*GetOpacityPixelComponent(kernel_pixels);
k++;
kernel_pixels++;
}
SetOpacityPixelComponent(q,ClampToQuantum(pixel.opacity));
}
if (((channel & IndexChannel) != 0) &&
(image->colorspace == CMYKColorspace))
{
register const IndexPacket
*restrict kernel_indexes;
k=kernel;
kernel_pixels=p;
kernel_indexes=indexes;
for (i=0; i < (ssize_t) width; i++)
{
alpha=(MagickRealType) (QuantumScale*
GetAlphaPixelComponent(kernel_pixels));
pixel.index+=(*k)*alpha*(*kernel_indexes);
k++;
kernel_pixels++;
kernel_indexes++;
}
SetIndexPixelComponent(blur_indexes+x,ClampToQuantum(gamma*
pixel.index));
}
}
indexes++;
p++;
q++;
}
if (SyncCacheViewAuthenticPixels(blur_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp critical (MagickCore_BlurImageChannel)
#endif
proceed=SetImageProgress(image,BlurImageTag,progress++,blur_image->rows+
blur_image->columns);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
blur_view=DestroyCacheView(blur_view);
image_view=DestroyCacheView(image_view);
/*
Blur columns.
*/
image_view=AcquireCacheView(blur_image);
blur_view=AcquireCacheView(blur_image);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(dynamic,4) shared(progress,status)
#endif
for (x=0; x < (ssize_t) blur_image->columns; x++)
{
register const IndexPacket
*restrict indexes;
register const PixelPacket
*restrict p;
register IndexPacket
*restrict blur_indexes;
register PixelPacket
*restrict q;
register ssize_t
y;
if (status == MagickFalse)
continue;
p=GetCacheViewVirtualPixels(image_view,x,-((ssize_t) width/2L),1,
image->rows+width,exception);
q=GetCacheViewAuthenticPixels(blur_view,x,0,1,blur_image->rows,exception);
if ((p == (const PixelPacket *) NULL) || (q == (PixelPacket *) NULL))
{
status=MagickFalse;
continue;
}
indexes=GetCacheViewVirtualIndexQueue(image_view);
blur_indexes=GetCacheViewAuthenticIndexQueue(blur_view);
for (y=0; y < (ssize_t) blur_image->rows; y++)
{
MagickPixelPacket
pixel;
register const double
*restrict k;
register const PixelPacket
*restrict kernel_pixels;
register ssize_t
i;
pixel=bias;
k=kernel;
kernel_pixels=p;
if (((channel & OpacityChannel) == 0) || (image->matte == MagickFalse))
{
for (i=0; i < (ssize_t) width; i++)
{
pixel.red+=(*k)*GetRedPixelComponent(kernel_pixels);
pixel.green+=(*k)*GetGreenPixelComponent(kernel_pixels);
pixel.blue+=(*k)*GetBluePixelComponent(kernel_pixels);
k++;
kernel_pixels++;
}
if ((channel & RedChannel) != 0)
SetRedPixelComponent(q,ClampToQuantum(pixel.red));
if ((channel & GreenChannel) != 0)
SetGreenPixelComponent(q,ClampToQuantum(pixel.green));
if ((channel & BlueChannel) != 0)
SetBluePixelComponent(q,ClampToQuantum(pixel.blue));
if ((channel & OpacityChannel) != 0)
{
k=kernel;
kernel_pixels=p;
for (i=0; i < (ssize_t) width; i++)
{
pixel.opacity+=(*k)*GetOpacityPixelComponent(kernel_pixels);
k++;
kernel_pixels++;
}
SetOpacityPixelComponent(q,ClampToQuantum(pixel.opacity));
}
if (((channel & IndexChannel) != 0) &&
(image->colorspace == CMYKColorspace))
{
register const IndexPacket
*restrict kernel_indexes;
k=kernel;
kernel_indexes=indexes;
for (i=0; i < (ssize_t) width; i++)
{
pixel.index+=(*k)*GetIndexPixelComponent(kernel_indexes);
k++;
kernel_indexes++;
}
SetIndexPixelComponent(blur_indexes+y,ClampToQuantum(
pixel.index));
}
}
else
{
MagickRealType
alpha,
gamma;
gamma=0.0;
for (i=0; i < (ssize_t) width; i++)
{
alpha=(MagickRealType) (QuantumScale*
GetAlphaPixelComponent(kernel_pixels));
pixel.red+=(*k)*alpha*GetRedPixelComponent(kernel_pixels);
pixel.green+=(*k)*alpha*GetGreenPixelComponent(kernel_pixels);
pixel.blue+=(*k)*alpha*GetBluePixelComponent(kernel_pixels);
gamma+=(*k)*alpha;
k++;
kernel_pixels++;
}
gamma=1.0/(fabs((double) gamma) <= MagickEpsilon ? 1.0 : gamma);
if ((channel & RedChannel) != 0)
SetRedPixelComponent(q,ClampToQuantum(gamma*pixel.red));
if ((channel & GreenChannel) != 0)
SetGreenPixelComponent(q,ClampToQuantum(gamma*pixel.green));
if ((channel & BlueChannel) != 0)
SetBluePixelComponent(q,ClampToQuantum(gamma*pixel.blue));
if ((channel & OpacityChannel) != 0)
{
k=kernel;
kernel_pixels=p;
for (i=0; i < (ssize_t) width; i++)
{
pixel.opacity+=(*k)*GetOpacityPixelComponent(kernel_pixels);
k++;
kernel_pixels++;
}
SetOpacityPixelComponent(q,ClampToQuantum(pixel.opacity));
}
if (((channel & IndexChannel) != 0) &&
(image->colorspace == CMYKColorspace))
{
register const IndexPacket
*restrict kernel_indexes;
k=kernel;
kernel_pixels=p;
kernel_indexes=indexes;
for (i=0; i < (ssize_t) width; i++)
{
alpha=(MagickRealType) (QuantumScale*
GetAlphaPixelComponent(kernel_pixels));
pixel.index+=(*k)*alpha*(*kernel_indexes);
k++;
kernel_pixels++;
kernel_indexes++;
}
SetIndexPixelComponent(blur_indexes+y,ClampToQuantum(gamma*
pixel.index));
}
}
indexes++;
p++;
q++;
}
if (SyncCacheViewAuthenticPixels(blur_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp critical (MagickCore_BlurImageChannel)
#endif
proceed=SetImageProgress(image,BlurImageTag,progress++,blur_image->rows+
blur_image->columns);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
blur_view=DestroyCacheView(blur_view);
image_view=DestroyCacheView(image_view);
kernel=(double *) RelinquishMagickMemory(kernel);
if (status == MagickFalse)
blur_image=DestroyImage(blur_image);
blur_image->type=image->type;
return(blur_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% C o n v o l v e I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ConvolveImage() applies a custom convolution kernel to the image.
%
% The format of the ConvolveImage method is:
%
% Image *ConvolveImage(const Image *image,const size_t order,
% const double *kernel,ExceptionInfo *exception)
% Image *ConvolveImageChannel(const Image *image,const ChannelType channel,
% const size_t order,const double *kernel,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o channel: the channel type.
%
% o order: the number of columns and rows in the filter kernel.
%
% o kernel: An array of double representing the convolution kernel.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *ConvolveImage(const Image *image,const size_t order,
const double *kernel,ExceptionInfo *exception)
{
Image
*convolve_image;
convolve_image=ConvolveImageChannel(image,DefaultChannels,order,kernel,
exception);
return(convolve_image);
}
MagickExport Image *ConvolveImageChannel(const Image *image,
const ChannelType channel,const size_t order,const double *kernel,
ExceptionInfo *exception)
{
#define ConvolveImageTag "Convolve/Image"
CacheView
*convolve_view,
*image_view;
double
*normal_kernel;
Image
*convolve_image;
MagickBooleanType
status;
MagickOffsetType
progress;
MagickPixelPacket
bias;
MagickRealType
gamma;
register ssize_t
i;
size_t
width;
ssize_t
y;
/*
Initialize convolve image attributes.
*/
assert(image != (Image *) NULL);
assert(image->signature == MagickSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickSignature);
width=order;
if ((width % 2) == 0)
ThrowImageException(OptionError,"KernelWidthMustBeAnOddNumber");
convolve_image=CloneImage(image,0,0,MagickTrue,exception);
if (convolve_image == (Image *) NULL)
return((Image *) NULL);
if (SetImageStorageClass(convolve_image,DirectClass) == MagickFalse)
{
InheritException(exception,&convolve_image->exception);
convolve_image=DestroyImage(convolve_image);
return((Image *) NULL);
}
if (image->debug != MagickFalse)
{
char
format[MaxTextExtent],
*message;
register const double
*k;
ssize_t
u,
v;
(void) LogMagickEvent(TransformEvent,GetMagickModule(),
" ConvolveImage with %.20gx%.20g kernel:",(double) width,(double)
width);
message=AcquireString("");
k=kernel;
for (v=0; v < (ssize_t) width; v++)
{
*message='\0';
(void) FormatLocaleString(format,MaxTextExtent,"%.20g: ",(double) v);
(void) ConcatenateString(&message,format);
for (u=0; u < (ssize_t) width; u++)
{
(void) FormatLocaleString(format,MaxTextExtent,"%g ",*k++);
(void) ConcatenateString(&message,format);
}
(void) LogMagickEvent(TransformEvent,GetMagickModule(),"%s",message);
}
message=DestroyString(message);
}
/*
Normalize kernel.
*/
normal_kernel=(double *) AcquireQuantumMemory(width*width,
sizeof(*normal_kernel));
if (normal_kernel == (double *) NULL)
{
convolve_image=DestroyImage(convolve_image);
ThrowImageException(ResourceLimitError,"MemoryAllocationFailed");
}
gamma=0.0;
for (i=0; i < (ssize_t) (width*width); i++)
gamma+=kernel[i];
gamma=1.0/(fabs((double) gamma) <= MagickEpsilon ? 1.0 : gamma);
for (i=0; i < (ssize_t) (width*width); i++)
normal_kernel[i]=gamma*kernel[i];
/*
Convolve image.
*/
status=MagickTrue;
progress=0;
GetMagickPixelPacket(image,&bias);
SetMagickPixelPacketBias(image,&bias);
image_view=AcquireCacheView(image);
convolve_view=AcquireCacheView(convolve_image);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(dynamic,4) shared(progress,status)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
MagickBooleanType
sync;
register const IndexPacket
*restrict indexes;
register const PixelPacket
*restrict p;
register IndexPacket
*restrict convolve_indexes;
register PixelPacket
*restrict q;
register ssize_t
x;
if (status == MagickFalse)
continue;
p=GetCacheViewVirtualPixels(image_view,-((ssize_t) width/2L),y-(ssize_t)
(width/2L),image->columns+width,width,exception);
q=GetCacheViewAuthenticPixels(convolve_view,0,y,convolve_image->columns,1,
exception);
if ((p == (const PixelPacket *) NULL) || (q == (PixelPacket *) NULL))
{
status=MagickFalse;
continue;
}
indexes=GetCacheViewVirtualIndexQueue(image_view);
convolve_indexes=GetCacheViewAuthenticIndexQueue(convolve_view);
for (x=0; x < (ssize_t) image->columns; x++)
{
MagickPixelPacket
pixel;
register const double
*restrict k;
register const PixelPacket
*restrict kernel_pixels;
register ssize_t
u;
ssize_t
v;
pixel=bias;
k=normal_kernel;
kernel_pixels=p;
if (((channel & OpacityChannel) == 0) || (image->matte == MagickFalse))
{
for (v=0; v < (ssize_t) width; v++)
{
for (u=0; u < (ssize_t) width; u++)
{
pixel.red+=(*k)*kernel_pixels[u].red;
pixel.green+=(*k)*kernel_pixels[u].green;
pixel.blue+=(*k)*kernel_pixels[u].blue;
k++;
}
kernel_pixels+=image->columns+width;
}
if ((channel & RedChannel) != 0)
SetRedPixelComponent(q,ClampToQuantum(pixel.red));
if ((channel & GreenChannel) != 0)
SetGreenPixelComponent(q,ClampToQuantum(pixel.green));
if ((channel & BlueChannel) != 0)
SetBluePixelComponent(q,ClampToQuantum(pixel.blue));
if ((channel & OpacityChannel) != 0)
{
k=normal_kernel;
kernel_pixels=p;
for (v=0; v < (ssize_t) width; v++)
{
for (u=0; u < (ssize_t) width; u++)
{
pixel.opacity+=(*k)*kernel_pixels[u].opacity;
k++;
}
kernel_pixels+=image->columns+width;
}
SetOpacityPixelComponent(q,ClampToQuantum(pixel.opacity));
}
if (((channel & IndexChannel) != 0) &&
(image->colorspace == CMYKColorspace))
{
register const IndexPacket
*restrict kernel_indexes;
k=normal_kernel;
kernel_indexes=indexes;
for (v=0; v < (ssize_t) width; v++)
{
for (u=0; u < (ssize_t) width; u++)
{
pixel.index+=(*k)*GetIndexPixelComponent(kernel_indexes+u);
k++;
}
kernel_indexes+=image->columns+width;
}
SetIndexPixelComponent(convolve_indexes+x,ClampToQuantum(
pixel.index));
}
}
else
{
MagickRealType
alpha,
gamma;
gamma=0.0;
for (v=0; v < (ssize_t) width; v++)
{
for (u=0; u < (ssize_t) width; u++)
{
alpha=(MagickRealType) (QuantumScale*(QuantumRange-
kernel_pixels[u].opacity));
pixel.red+=(*k)*alpha*kernel_pixels[u].red;
pixel.green+=(*k)*alpha*kernel_pixels[u].green;
pixel.blue+=(*k)*alpha*kernel_pixels[u].blue;
gamma+=(*k)*alpha;
k++;
}
kernel_pixels+=image->columns+width;
}
gamma=1.0/(fabs((double) gamma) <= MagickEpsilon ? 1.0 : gamma);
if ((channel & RedChannel) != 0)
SetRedPixelComponent(q,ClampToQuantum(gamma*pixel.red));
if ((channel & GreenChannel) != 0)
SetGreenPixelComponent(q,ClampToQuantum(gamma*pixel.green));
if ((channel & BlueChannel) != 0)
SetBluePixelComponent(q,ClampToQuantum(gamma*pixel.blue));
if ((channel & OpacityChannel) != 0)
{
k=normal_kernel;
kernel_pixels=p;
for (v=0; v < (ssize_t) width; v++)
{
for (u=0; u < (ssize_t) width; u++)
{
pixel.opacity+=(*k)*GetOpacityPixelComponent(kernel_pixels+u);
k++;
}
kernel_pixels+=image->columns+width;
}
SetOpacityPixelComponent(q,ClampToQuantum(pixel.opacity));
}
if (((channel & IndexChannel) != 0) &&
(image->colorspace == CMYKColorspace))
{
register const IndexPacket
*restrict kernel_indexes;
k=normal_kernel;
kernel_pixels=p;
kernel_indexes=indexes;
for (v=0; v < (ssize_t) width; v++)
{
for (u=0; u < (ssize_t) width; u++)
{
alpha=(MagickRealType) (QuantumScale*(QuantumRange-
kernel_pixels[u].opacity));
pixel.index+=(*k)*alpha*GetIndexPixelComponent(
kernel_indexes+u);
k++;
}
kernel_pixels+=image->columns+width;
kernel_indexes+=image->columns+width;
}
SetIndexPixelComponent(convolve_indexes+x,ClampToQuantum(gamma*
pixel.index));
}
}
indexes++;
p++;
q++;
}
sync=SyncCacheViewAuthenticPixels(convolve_view,exception);
if (sync == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp critical (MagickCore_ConvolveImageChannel)
#endif
proceed=SetImageProgress(image,ConvolveImageTag,progress++,image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
convolve_image->type=image->type;
convolve_view=DestroyCacheView(convolve_view);
image_view=DestroyCacheView(image_view);
normal_kernel=(double *) RelinquishMagickMemory(normal_kernel);
if (status == MagickFalse)
convolve_image=DestroyImage(convolve_image);
return(convolve_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% D e s p e c k l e I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% DespeckleImage() reduces the speckle noise in an image while perserving the
% edges of the original image.
%
% The format of the DespeckleImage method is:
%
% Image *DespeckleImage(const Image *image,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o exception: return any errors or warnings in this structure.
%
*/
static void Hull(const ssize_t x_offset,const ssize_t y_offset,
const size_t columns,const size_t rows,Quantum *f,Quantum *g,
const int polarity)
{
MagickRealType
v;
register Quantum
*p,
*q,
*r,
*s;
register ssize_t
x;
ssize_t
y;
assert(f != (Quantum *) NULL);
assert(g != (Quantum *) NULL);
p=f+(columns+2);
q=g+(columns+2);
r=p+(y_offset*((ssize_t) columns+2)+x_offset);
for (y=0; y < (ssize_t) rows; y++)
{
p++;
q++;
r++;
if (polarity > 0)
for (x=(ssize_t) columns; x != 0; x--)
{
v=(MagickRealType) (*p);
if ((MagickRealType) *r >= (v+(MagickRealType) ScaleCharToQuantum(2)))
v+=ScaleCharToQuantum(1);
*q=(Quantum) v;
p++;
q++;
r++;
}
else
for (x=(ssize_t) columns; x != 0; x--)
{
v=(MagickRealType) (*p);
if ((MagickRealType) *r <= (v-(MagickRealType) ScaleCharToQuantum(2)))
v-=(ssize_t) ScaleCharToQuantum(1);
*q=(Quantum) v;
p++;
q++;
r++;
}
p++;
q++;
r++;
}
p=f+(columns+2);
q=g+(columns+2);
r=q+(y_offset*((ssize_t) columns+2)+x_offset);
s=q-(y_offset*((ssize_t) columns+2)+x_offset);
for (y=0; y < (ssize_t) rows; y++)
{
p++;
q++;
r++;
s++;
if (polarity > 0)
for (x=(ssize_t) columns; x != 0; x--)
{
v=(MagickRealType) (*q);
if (((MagickRealType) *s >=
(v+(MagickRealType) ScaleCharToQuantum(2))) &&
((MagickRealType) *r > v))
v+=ScaleCharToQuantum(1);
*p=(Quantum) v;
p++;
q++;
r++;
s++;
}
else
for (x=(ssize_t) columns; x != 0; x--)
{
v=(MagickRealType) (*q);
if (((MagickRealType) *s <=
(v-(MagickRealType) ScaleCharToQuantum(2))) &&
((MagickRealType) *r < v))
v-=(MagickRealType) ScaleCharToQuantum(1);
*p=(Quantum) v;
p++;
q++;
r++;
s++;
}
p++;
q++;
r++;
s++;
}
}
MagickExport Image *DespeckleImage(const Image *image,ExceptionInfo *exception)
{
#define DespeckleImageTag "Despeckle/Image"
CacheView
*despeckle_view,
*image_view;
Image
*despeckle_image;
MagickBooleanType
status;
register ssize_t
i;
Quantum
*restrict buffers,
*restrict pixels;
size_t
length,
number_channels;
static const ssize_t
X[4] = {0, 1, 1,-1},
Y[4] = {1, 0, 1, 1};
/*
Allocate despeckled image.
*/
assert(image != (const Image *) NULL);
assert(image->signature == MagickSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickSignature);
despeckle_image=CloneImage(image,image->columns,image->rows,MagickTrue,
exception);
if (despeckle_image == (Image *) NULL)
return((Image *) NULL);
if (SetImageStorageClass(despeckle_image,DirectClass) == MagickFalse)
{
InheritException(exception,&despeckle_image->exception);
despeckle_image=DestroyImage(despeckle_image);
return((Image *) NULL);
}
/*
Allocate image buffers.
*/
length=(size_t) ((image->columns+2)*(image->rows+2));
pixels=(Quantum *) AcquireQuantumMemory(length,2*sizeof(*pixels));
buffers=(Quantum *) AcquireQuantumMemory(length,2*sizeof(*pixels));
if ((pixels == (Quantum *) NULL) || (buffers == (Quantum *) NULL))
{
if (buffers != (Quantum *) NULL)
buffers=(Quantum *) RelinquishMagickMemory(buffers);
if (pixels != (Quantum *) NULL)
pixels=(Quantum *) RelinquishMagickMemory(pixels);
despeckle_image=DestroyImage(despeckle_image);
ThrowImageException(ResourceLimitError,"MemoryAllocationFailed");
}
/*
Reduce speckle in the image.
*/
status=MagickTrue;
number_channels=(size_t) (image->colorspace == CMYKColorspace ? 5 : 4);
image_view=AcquireCacheView(image);
despeckle_view=AcquireCacheView(despeckle_image);
for (i=0; i < (ssize_t) number_channels; i++)
{
register Quantum
*buffer,
*pixel;
register ssize_t
k,
x;
ssize_t
j,
y;
if (status == MagickFalse)
continue;
pixel=pixels;
(void) ResetMagickMemory(pixel,0,length*sizeof(*pixel));
buffer=buffers;
j=(ssize_t) image->columns+2;
for (y=0; y < (ssize_t) image->rows; y++)
{
register const IndexPacket
*restrict indexes;
register const PixelPacket
*restrict p;
p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception);
if (p == (const PixelPacket *) NULL)
break;
indexes=GetCacheViewVirtualIndexQueue(image_view);
j++;
for (x=0; x < (ssize_t) image->columns; x++)
{
switch (i)
{
case 0: pixel[j]=GetRedPixelComponent(p); break;
case 1: pixel[j]=GetGreenPixelComponent(p); break;
case 2: pixel[j]=GetBluePixelComponent(p); break;
case 3: pixel[j]=GetOpacityPixelComponent(p); break;
case 4: pixel[j]=GetBlackPixelComponent(indexes+x); break;
default: break;
}
p++;
j++;
}
j++;
}
(void) ResetMagickMemory(buffer,0,length*sizeof(*buffer));
for (k=0; k < 4; k++)
{
Hull(X[k],Y[k],image->columns,image->rows,pixel,buffer,1);
Hull(-X[k],-Y[k],image->columns,image->rows,pixel,buffer,1);
Hull(-X[k],-Y[k],image->columns,image->rows,pixel,buffer,-1);
Hull(X[k],Y[k],image->columns,image->rows,pixel,buffer,-1);
}
j=(ssize_t) image->columns+2;
for (y=0; y < (ssize_t) image->rows; y++)
{
MagickBooleanType
sync;
register IndexPacket
*restrict indexes;
register PixelPacket
*restrict q;
q=GetCacheViewAuthenticPixels(despeckle_view,0,y,despeckle_image->columns,
1,exception);
if (q == (PixelPacket *) NULL)
break;
indexes=GetCacheViewAuthenticIndexQueue(image_view);
j++;
for (x=0; x < (ssize_t) image->columns; x++)
{
switch (i)
{
case 0: SetRedPixelComponent(q,pixel[j]); break;
case 1: SetGreenPixelComponent(q,pixel[j]); break;
case 2: SetBluePixelComponent(q,pixel[j]); break;
case 3: SetOpacityPixelComponent(q,pixel[j]); break;
case 4: SetIndexPixelComponent(indexes+x,pixel[j]); break;
default: break;
}
q++;
j++;
}
sync=SyncCacheViewAuthenticPixels(despeckle_view,exception);
if (sync == MagickFalse)
{
status=MagickFalse;
break;
}
j++;
}
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
proceed=SetImageProgress(image,DespeckleImageTag,(MagickOffsetType) i,
number_channels);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
despeckle_view=DestroyCacheView(despeckle_view);
image_view=DestroyCacheView(image_view);
buffers=(Quantum *) RelinquishMagickMemory(buffers);
pixels=(Quantum *) RelinquishMagickMemory(pixels);
despeckle_image->type=image->type;
if (status == MagickFalse)
despeckle_image=DestroyImage(despeckle_image);
return(despeckle_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% E d g e I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% EdgeImage() finds edges in an image. Radius defines the radius of the
% convolution filter. Use a radius of 0 and EdgeImage() selects a suitable
% radius for you.
%
% The format of the EdgeImage method is:
%
% Image *EdgeImage(const Image *image,const double radius,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o radius: the radius of the pixel neighborhood.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *EdgeImage(const Image *image,const double radius,
ExceptionInfo *exception)
{
Image
*edge_image;
double
*kernel;
register ssize_t
i;
size_t
width;
assert(image != (const Image *) NULL);
assert(image->signature == MagickSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickSignature);
width=GetOptimalKernelWidth1D(radius,0.5);
kernel=(double *) AcquireQuantumMemory((size_t) width,width*sizeof(*kernel));
if (kernel == (double *) NULL)
ThrowImageException(ResourceLimitError,"MemoryAllocationFailed");
for (i=0; i < (ssize_t) (width*width); i++)
kernel[i]=(-1.0);
kernel[i/2]=(double) (width*width-1.0);
edge_image=ConvolveImage(image,width,kernel,exception);
kernel=(double *) RelinquishMagickMemory(kernel);
return(edge_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% E m b o s s I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% EmbossImage() returns a grayscale image with a three-dimensional effect.
% We convolve the image with a Gaussian operator of the given radius and
% standard deviation (sigma). For reasonable results, radius should be
% larger than sigma. Use a radius of 0 and Emboss() selects a suitable
% radius for you.
%
% The format of the EmbossImage method is:
%
% Image *EmbossImage(const Image *image,const double radius,
% const double sigma,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o radius: the radius of the pixel neighborhood.
%
% o sigma: the standard deviation of the Gaussian, in pixels.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *EmbossImage(const Image *image,const double radius,
const double sigma,ExceptionInfo *exception)
{
double
*kernel;
Image
*emboss_image;
register ssize_t
i;
size_t
width;
ssize_t
j,
k,
u,
v;
assert(image != (Image *) NULL);
assert(image->signature == MagickSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickSignature);
width=GetOptimalKernelWidth2D(radius,sigma);
kernel=(double *) AcquireQuantumMemory((size_t) width,width*sizeof(*kernel));
if (kernel == (double *) NULL)
ThrowImageException(ResourceLimitError,"MemoryAllocationFailed");
j=(ssize_t) width/2;
k=j;
i=0;
for (v=(-j); v <= j; v++)
{
for (u=(-j); u <= j; u++)
{
kernel[i]=(double) (((u < 0) || (v < 0) ? -8.0 : 8.0)*
exp(-((double) u*u+v*v)/(2.0*MagickSigma*MagickSigma))/
(2.0*MagickPI*MagickSigma*MagickSigma));
if (u != k)
kernel[i]=0.0;
i++;
}
k--;
}
emboss_image=ConvolveImage(image,width,kernel,exception);
if (emboss_image != (Image *) NULL)
(void) EqualizeImage(emboss_image);
kernel=(double *) RelinquishMagickMemory(kernel);
return(emboss_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% F i l t e r I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% FilterImage() applies a custom convolution kernel to the image.
%
% The format of the FilterImage method is:
%
% Image *FilterImage(const Image *image,const KernelInfo *kernel,
% ExceptionInfo *exception)
% Image *FilterImageChannel(const Image *image,const ChannelType channel,
% const KernelInfo *kernel,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o channel: the channel type.
%
% o kernel: the filtering kernel.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *FilterImage(const Image *image,const KernelInfo *kernel,
ExceptionInfo *exception)
{
Image
*filter_image;
filter_image=FilterImageChannel(image,DefaultChannels,kernel,exception);
return(filter_image);
}
MagickExport Image *FilterImageChannel(const Image *image,
const ChannelType channel,const KernelInfo *kernel,ExceptionInfo *exception)
{
#define FilterImageTag "Filter/Image"
CacheView
*filter_view,
*image_view;
Image
*filter_image;
MagickBooleanType
status;
MagickOffsetType
progress;
MagickPixelPacket
bias;
ssize_t
y;
/*
Initialize filter image attributes.
*/
assert(image != (Image *) NULL);
assert(image->signature == MagickSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickSignature);
if ((kernel->width % 2) == 0)
ThrowImageException(OptionError,"KernelWidthMustBeAnOddNumber");
filter_image=CloneImage(image,0,0,MagickTrue,exception);
if (filter_image == (Image *) NULL)
return((Image *) NULL);
if (SetImageStorageClass(filter_image,DirectClass) == MagickFalse)
{
InheritException(exception,&filter_image->exception);
filter_image=DestroyImage(filter_image);
return((Image *) NULL);
}
if (image->debug != MagickFalse)
{
char
format[MaxTextExtent],
*message;
register const double
*k;
ssize_t
u,
v;
(void) LogMagickEvent(TransformEvent,GetMagickModule(),
" FilterImage with %.20gx%.20g kernel:",(double) kernel->width,(double)
kernel->height);
message=AcquireString("");
k=kernel->values;
for (v=0; v < (ssize_t) kernel->height; v++)
{
*message='\0';
(void) FormatLocaleString(format,MaxTextExtent,"%.20g: ",(double) v);
(void) ConcatenateString(&message,format);
for (u=0; u < (ssize_t) kernel->width; u++)
{
(void) FormatLocaleString(format,MaxTextExtent,"%g ",*k++);
(void) ConcatenateString(&message,format);
}
(void) LogMagickEvent(TransformEvent,GetMagickModule(),"%s",message);
}
message=DestroyString(message);
}
status=AccelerateConvolveImage(image,kernel,filter_image,exception);
if (status == MagickTrue)
return(filter_image);
/*
Filter image.
*/
status=MagickTrue;
progress=0;
GetMagickPixelPacket(image,&bias);
SetMagickPixelPacketBias(image,&bias);
image_view=AcquireCacheView(image);
filter_view=AcquireCacheView(filter_image);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(dynamic,4) shared(progress,status)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
MagickBooleanType
sync;
register const IndexPacket
*restrict indexes;
register const PixelPacket
*restrict p;
register IndexPacket
*restrict filter_indexes;
register PixelPacket
*restrict q;
register ssize_t
x;
if (status == MagickFalse)
continue;
p=GetCacheViewVirtualPixels(image_view,-((ssize_t) kernel->width/2L),
y-(ssize_t) (kernel->height/2L),image->columns+kernel->width,
kernel->height,exception);
q=GetCacheViewAuthenticPixels(filter_view,0,y,filter_image->columns,1,
exception);
if ((p == (const PixelPacket *) NULL) || (q == (PixelPacket *) NULL))
{
status=MagickFalse;
continue;
}
indexes=GetCacheViewVirtualIndexQueue(image_view);
filter_indexes=GetCacheViewAuthenticIndexQueue(filter_view);
for (x=0; x < (ssize_t) image->columns; x++)
{
MagickPixelPacket
pixel;
register const double
*restrict k;
register const PixelPacket
*restrict kernel_pixels;
register ssize_t
u;
ssize_t
v;
pixel=bias;
k=kernel->values;
kernel_pixels=p;
if (((channel & OpacityChannel) == 0) || (image->matte == MagickFalse))
{
for (v=0; v < (ssize_t) kernel->width; v++)
{
for (u=0; u < (ssize_t) kernel->height; u++)
{
pixel.red+=(*k)*kernel_pixels[u].red;
pixel.green+=(*k)*kernel_pixels[u].green;
pixel.blue+=(*k)*kernel_pixels[u].blue;
k++;
}
kernel_pixels+=image->columns+kernel->width;
}
if ((channel & RedChannel) != 0)
SetRedPixelComponent(q,ClampToQuantum(pixel.red));
if ((channel & GreenChannel) != 0)
SetGreenPixelComponent(q,ClampToQuantum(pixel.green));
if ((channel & BlueChannel) != 0)
SetBluePixelComponent(q,ClampToQuantum(pixel.blue));
if ((channel & OpacityChannel) != 0)
{
k=kernel->values;
kernel_pixels=p;
for (v=0; v < (ssize_t) kernel->width; v++)
{
for (u=0; u < (ssize_t) kernel->height; u++)
{
pixel.opacity+=(*k)*kernel_pixels[u].opacity;
k++;
}
kernel_pixels+=image->columns+kernel->width;
}
SetOpacityPixelComponent(q,ClampToQuantum(pixel.opacity));
}
if (((channel & IndexChannel) != 0) &&
(image->colorspace == CMYKColorspace))
{
register const IndexPacket
*restrict kernel_indexes;
k=kernel->values;
kernel_indexes=indexes;
for (v=0; v < (ssize_t) kernel->width; v++)
{
for (u=0; u < (ssize_t) kernel->height; u++)
{
pixel.index+=(*k)*GetIndexPixelComponent(kernel_indexes+u);
k++;
}
kernel_indexes+=image->columns+kernel->width;
}
SetIndexPixelComponent(filter_indexes+x,ClampToQuantum(
pixel.index));
}
}
else
{
MagickRealType
alpha,
gamma;
gamma=0.0;
for (v=0; v < (ssize_t) kernel->width; v++)
{
for (u=0; u < (ssize_t) kernel->height; u++)
{
alpha=(MagickRealType) (QuantumScale*(QuantumRange-
GetOpacityPixelComponent(kernel_pixels+u)));
pixel.red+=(*k)*alpha*GetRedPixelComponent(kernel_pixels+u);
pixel.green+=(*k)*alpha*GetGreenPixelComponent(kernel_pixels+u);
pixel.blue+=(*k)*alpha*GetBluePixelComponent(kernel_pixels+u);
gamma+=(*k)*alpha;
k++;
}
kernel_pixels+=image->columns+kernel->width;
}
gamma=1.0/(fabs((double) gamma) <= MagickEpsilon ? 1.0 : gamma);
if ((channel & RedChannel) != 0)
SetRedPixelComponent(q,ClampToQuantum(gamma*pixel.red));
if ((channel & GreenChannel) != 0)
SetGreenPixelComponent(q,ClampToQuantum(gamma*pixel.green));
if ((channel & BlueChannel) != 0)
SetBluePixelComponent(q,ClampToQuantum(gamma*pixel.blue));
if ((channel & OpacityChannel) != 0)
{
k=kernel->values;
kernel_pixels=p;
for (v=0; v < (ssize_t) kernel->width; v++)
{
for (u=0; u < (ssize_t) kernel->height; u++)
{
pixel.opacity+=(*k)*GetOpacityPixelComponent(kernel_pixels+u);
k++;
}
kernel_pixels+=image->columns+kernel->width;
}
SetOpacityPixelComponent(q,ClampToQuantum(pixel.opacity));
}
if (((channel & IndexChannel) != 0) &&
(image->colorspace == CMYKColorspace))
{
register const IndexPacket
*restrict kernel_indexes;
k=kernel->values;
kernel_pixels=p;
kernel_indexes=indexes;
for (v=0; v < (ssize_t) kernel->width; v++)
{
for (u=0; u < (ssize_t) kernel->height; u++)
{
alpha=(MagickRealType) (QuantumScale*(QuantumRange-
kernel_pixels[u].opacity));
pixel.index+=(*k)*alpha*GetIndexPixelComponent(
kernel_indexes+u);
k++;
}
kernel_pixels+=image->columns+kernel->width;
kernel_indexes+=image->columns+kernel->width;
}
SetIndexPixelComponent(filter_indexes+x,ClampToQuantum(gamma*
pixel.index));
}
}
indexes++;
p++;
q++;
}
sync=SyncCacheViewAuthenticPixels(filter_view,exception);
if (sync == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp critical (MagickCore_FilterImageChannel)
#endif
proceed=SetImageProgress(image,FilterImageTag,progress++,image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
filter_image->type=image->type;
filter_view=DestroyCacheView(filter_view);
image_view=DestroyCacheView(image_view);
if (status == MagickFalse)
filter_image=DestroyImage(filter_image);
return(filter_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% G a u s s i a n B l u r I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GaussianBlurImage() blurs an image. We convolve the image with a
% Gaussian operator of the given radius and standard deviation (sigma).
% For reasonable results, the radius should be larger than sigma. Use a
% radius of 0 and GaussianBlurImage() selects a suitable radius for you
%
% The format of the GaussianBlurImage method is:
%
% Image *GaussianBlurImage(const Image *image,onst double radius,
% const double sigma,ExceptionInfo *exception)
% Image *GaussianBlurImageChannel(const Image *image,
% const ChannelType channel,const double radius,const double sigma,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o channel: the channel type.
%
% o radius: the radius of the Gaussian, in pixels, not counting the center
% pixel.
%
% o sigma: the standard deviation of the Gaussian, in pixels.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *GaussianBlurImage(const Image *image,const double radius,
const double sigma,ExceptionInfo *exception)
{
Image
*blur_image;
blur_image=GaussianBlurImageChannel(image,DefaultChannels,radius,sigma,
exception);
return(blur_image);
}
MagickExport Image *GaussianBlurImageChannel(const Image *image,
const ChannelType channel,const double radius,const double sigma,
ExceptionInfo *exception)
{
double
*kernel;
Image
*blur_image;
register ssize_t
i;
size_t
width;
ssize_t
j,
u,
v;
assert(image != (const Image *) NULL);
assert(image->signature == MagickSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickSignature);
width=GetOptimalKernelWidth2D(radius,sigma);
kernel=(double *) AcquireQuantumMemory((size_t) width,width*sizeof(*kernel));
if (kernel == (double *) NULL)
ThrowImageException(ResourceLimitError,"MemoryAllocationFailed");
j=(ssize_t) width/2;
i=0;
for (v=(-j); v <= j; v++)
{
for (u=(-j); u <= j; u++)
kernel[i++]=(double) (exp(-((double) u*u+v*v)/(2.0*MagickSigma*
MagickSigma))/(2.0*MagickPI*MagickSigma*MagickSigma));
}
blur_image=ConvolveImageChannel(image,channel,width,kernel,exception);
kernel=(double *) RelinquishMagickMemory(kernel);
return(blur_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% M o t i o n B l u r I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% MotionBlurImage() simulates motion blur. We convolve the image with a
% Gaussian operator of the given radius and standard deviation (sigma).
% For reasonable results, radius should be larger than sigma. Use a
% radius of 0 and MotionBlurImage() selects a suitable radius for you.
% Angle gives the angle of the blurring motion.
%
% Andrew Protano contributed this effect.
%
% The format of the MotionBlurImage method is:
%
% Image *MotionBlurImage(const Image *image,const double radius,
% const double sigma,const double angle,ExceptionInfo *exception)
% Image *MotionBlurImageChannel(const Image *image,const ChannelType channel,
% const double radius,const double sigma,const double angle,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o channel: the channel type.
%
% o radius: the radius of the Gaussian, in pixels, not counting the center
% o radius: the radius of the Gaussian, in pixels, not counting
% the center pixel.
%
% o sigma: the standard deviation of the Gaussian, in pixels.
%
% o angle: Apply the effect along this angle.
%
% o exception: return any errors or warnings in this structure.
%
*/
static double *GetMotionBlurKernel(const size_t width,const double sigma)
{
double
*kernel,
normalize;
register ssize_t
i;
/*
Generate a 1-D convolution kernel.
*/
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"...");
kernel=(double *) AcquireQuantumMemory((size_t) width,sizeof(*kernel));
if (kernel == (double *) NULL)
return(kernel);
normalize=0.0;
for (i=0; i < (ssize_t) width; i++)
{
kernel[i]=(double) (exp((-((double) i*i)/(double) (2.0*MagickSigma*
MagickSigma)))/(MagickSQ2PI*MagickSigma));
normalize+=kernel[i];
}
for (i=0; i < (ssize_t) width; i++)
kernel[i]/=normalize;
return(kernel);
}
MagickExport Image *MotionBlurImage(const Image *image,const double radius,
const double sigma,const double angle,ExceptionInfo *exception)
{
Image
*motion_blur;
motion_blur=MotionBlurImageChannel(image,DefaultChannels,radius,sigma,angle,
exception);
return(motion_blur);
}
MagickExport Image *MotionBlurImageChannel(const Image *image,
const ChannelType channel,const double radius,const double sigma,
const double angle,ExceptionInfo *exception)
{
CacheView
*blur_view,
*image_view;
double
*kernel;
Image
*blur_image;
MagickBooleanType
status;
MagickOffsetType
progress;
MagickPixelPacket
bias;
OffsetInfo
*offset;
PointInfo
point;
register ssize_t
i;
size_t
width;
ssize_t
y;
assert(image != (Image *) NULL);
assert(image->signature == MagickSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
width=GetOptimalKernelWidth1D(radius,sigma);
kernel=GetMotionBlurKernel(width,sigma);
if (kernel == (double *) NULL)
ThrowImageException(ResourceLimitError,"MemoryAllocationFailed");
offset=(OffsetInfo *) AcquireQuantumMemory(width,sizeof(*offset));
if (offset == (OffsetInfo *) NULL)
{
kernel=(double *) RelinquishMagickMemory(kernel);
ThrowImageException(ResourceLimitError,"MemoryAllocationFailed");
}
blur_image=CloneImage(image,0,0,MagickTrue,exception);
if (blur_image == (Image *) NULL)
{
kernel=(double *) RelinquishMagickMemory(kernel);
offset=(OffsetInfo *) RelinquishMagickMemory(offset);
return((Image *) NULL);
}
if (SetImageStorageClass(blur_image,DirectClass) == MagickFalse)
{
kernel=(double *) RelinquishMagickMemory(kernel);
offset=(OffsetInfo *) RelinquishMagickMemory(offset);
InheritException(exception,&blur_image->exception);
blur_image=DestroyImage(blur_image);
return((Image *) NULL);
}
point.x=(double) width*sin(DegreesToRadians(angle));
point.y=(double) width*cos(DegreesToRadians(angle));
for (i=0; i < (ssize_t) width; i++)
{
offset[i].x=(ssize_t) ceil((double) (i*point.y)/hypot(point.x,point.y)-0.5);
offset[i].y=(ssize_t) ceil((double) (i*point.x)/hypot(point.x,point.y)-0.5);
}
/*
Motion blur image.
*/
status=MagickTrue;
progress=0;
GetMagickPixelPacket(image,&bias);
image_view=AcquireCacheView(image);
blur_view=AcquireCacheView(blur_image);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(dynamic,4) shared(progress,status) omp_throttle(1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
register IndexPacket
*restrict blur_indexes;
register PixelPacket
*restrict q;
register ssize_t
x;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(blur_view,0,y,blur_image->columns,1,
exception);
if (q == (PixelPacket *) NULL)
{
status=MagickFalse;
continue;
}
blur_indexes=GetCacheViewAuthenticIndexQueue(blur_view);
for (x=0; x < (ssize_t) image->columns; x++)
{
MagickPixelPacket
qixel;
PixelPacket
pixel;
register const IndexPacket
*restrict indexes;
register double
*restrict k;
register ssize_t
i;
k=kernel;
qixel=bias;
if (((channel & OpacityChannel) == 0) || (image->matte == MagickFalse))
{
for (i=0; i < (ssize_t) width; i++)
{
(void) GetOneCacheViewVirtualPixel(image_view,x+offset[i].x,y+
offset[i].y,&pixel,exception);
qixel.red+=(*k)*pixel.red;
qixel.green+=(*k)*pixel.green;
qixel.blue+=(*k)*pixel.blue;
qixel.opacity+=(*k)*pixel.opacity;
if (image->colorspace == CMYKColorspace)
{
indexes=GetCacheViewVirtualIndexQueue(image_view);
qixel.index+=(*k)*(*indexes);
}
k++;
}
if ((channel & RedChannel) != 0)
SetRedPixelComponent(q,ClampToQuantum(qixel.red));
if ((channel & GreenChannel) != 0)
SetGreenPixelComponent(q,ClampToQuantum(qixel.green));
if ((channel & BlueChannel) != 0)
SetBluePixelComponent(q,ClampToQuantum(qixel.blue));
if ((channel & OpacityChannel) != 0)
SetOpacityPixelComponent(q,ClampToQuantum(qixel.opacity));
if (((channel & IndexChannel) != 0) &&
(image->colorspace == CMYKColorspace))
SetIndexPixelComponent(blur_indexes+x,ClampToQuantum(qixel.index));
}
else
{
MagickRealType
alpha,
gamma;
alpha=0.0;
gamma=0.0;
for (i=0; i < (ssize_t) width; i++)
{
(void) GetOneCacheViewVirtualPixel(image_view,x+offset[i].x,y+
offset[i].y,&pixel,exception);
alpha=(MagickRealType) (QuantumScale*
GetAlphaPixelComponent(&pixel));
qixel.red+=(*k)*alpha*pixel.red;
qixel.green+=(*k)*alpha*pixel.green;
qixel.blue+=(*k)*alpha*pixel.blue;
qixel.opacity+=(*k)*pixel.opacity;
if (image->colorspace == CMYKColorspace)
{
indexes=GetCacheViewVirtualIndexQueue(image_view);
qixel.index+=(*k)*alpha*GetIndexPixelComponent(indexes);
}
gamma+=(*k)*alpha;
k++;
}
gamma=1.0/(fabs((double) gamma) <= MagickEpsilon ? 1.0 : gamma);
if ((channel & RedChannel) != 0)
SetRedPixelComponent(q,ClampToQuantum(gamma*qixel.red));
if ((channel & GreenChannel) != 0)
SetGreenPixelComponent(q,ClampToQuantum(gamma*qixel.green));
if ((channel & BlueChannel) != 0)
SetBluePixelComponent(q,ClampToQuantum(gamma*qixel.blue));
if ((channel & OpacityChannel) != 0)
SetOpacityPixelComponent(q,ClampToQuantum(qixel.opacity));
if (((channel & IndexChannel) != 0) &&
(image->colorspace == CMYKColorspace))
SetIndexPixelComponent(blur_indexes+x,ClampToQuantum(gamma*
qixel.index));
}
q++;
}
if (SyncCacheViewAuthenticPixels(blur_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp critical (MagickCore_MotionBlurImageChannel)
#endif
proceed=SetImageProgress(image,BlurImageTag,progress++,image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
blur_view=DestroyCacheView(blur_view);
image_view=DestroyCacheView(image_view);
kernel=(double *) RelinquishMagickMemory(kernel);
offset=(OffsetInfo *) RelinquishMagickMemory(offset);
if (status == MagickFalse)
blur_image=DestroyImage(blur_image);
return(blur_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% P r e v i e w I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% PreviewImage() tiles 9 thumbnails of the specified image with an image
% processing operation applied with varying parameters. This may be helpful
% pin-pointing an appropriate parameter for a particular image processing
% operation.
%
% The format of the PreviewImages method is:
%
% Image *PreviewImages(const Image *image,const PreviewType preview,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o preview: the image processing operation.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *PreviewImage(const Image *image,const PreviewType preview,
ExceptionInfo *exception)
{
#define NumberTiles 9
#define PreviewImageTag "Preview/Image"
#define DefaultPreviewGeometry "204x204+10+10"
char
factor[MaxTextExtent],
label[MaxTextExtent];
double
degrees,
gamma,
percentage,
radius,
sigma,
threshold;
Image
*images,
*montage_image,
*preview_image,
*thumbnail;
ImageInfo
*preview_info;
MagickBooleanType
proceed;
MontageInfo
*montage_info;
QuantizeInfo
quantize_info;
RectangleInfo
geometry;
register ssize_t
i,
x;
size_t
colors;
ssize_t
y;
/*
Open output image file.
*/
assert(image != (Image *) NULL);
assert(image->signature == MagickSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
colors=2;
degrees=0.0;
gamma=(-0.2f);
preview_info=AcquireImageInfo();
SetGeometry(image,&geometry);
(void) ParseMetaGeometry(DefaultPreviewGeometry,&geometry.x,&geometry.y,
&geometry.width,&geometry.height);
images=NewImageList();
percentage=12.5;
GetQuantizeInfo(&quantize_info);
radius=0.0;
sigma=1.0;
threshold=0.0;
x=0;
y=0;
for (i=0; i < NumberTiles; i++)
{
thumbnail=ThumbnailImage(image,geometry.width,geometry.height,exception);
if (thumbnail == (Image *) NULL)
break;
(void) SetImageProgressMonitor(thumbnail,(MagickProgressMonitor) NULL,
(void *) NULL);
(void) SetImageProperty(thumbnail,"label",DefaultTileLabel);
if (i == (NumberTiles/2))
{
(void) QueryColorDatabase("#dfdfdf",&thumbnail->matte_color,exception);
AppendImageToList(&images,thumbnail);
continue;
}
switch (preview)
{
case RotatePreview:
{
degrees+=45.0;
preview_image=RotateImage(thumbnail,degrees,exception);
(void) FormatLocaleString(label,MaxTextExtent,"rotate %g",degrees);
break;
}
case ShearPreview:
{
degrees+=5.0;
preview_image=ShearImage(thumbnail,degrees,degrees,exception);
(void) FormatLocaleString(label,MaxTextExtent,"shear %gx%g",
degrees,2.0*degrees);
break;
}
case RollPreview:
{
x=(ssize_t) ((i+1)*thumbnail->columns)/NumberTiles;
y=(ssize_t) ((i+1)*thumbnail->rows)/NumberTiles;
preview_image=RollImage(thumbnail,x,y,exception);
(void) FormatLocaleString(label,MaxTextExtent,"roll %+.20gx%+.20g",
(double) x,(double) y);
break;
}
case HuePreview:
{
preview_image=CloneImage(thumbnail,0,0,MagickTrue,exception);
if (preview_image == (Image *) NULL)
break;
(void) FormatLocaleString(factor,MaxTextExtent,"100,100,%g",
2.0*percentage);
(void) ModulateImage(preview_image,factor);
(void) FormatLocaleString(label,MaxTextExtent,"modulate %s",factor);
break;
}
case SaturationPreview:
{
preview_image=CloneImage(thumbnail,0,0,MagickTrue,exception);
if (preview_image == (Image *) NULL)
break;
(void) FormatLocaleString(factor,MaxTextExtent,"100,%g",
2.0*percentage);
(void) ModulateImage(preview_image,factor);
(void) FormatLocaleString(label,MaxTextExtent,"modulate %s",factor);
break;
}
case BrightnessPreview:
{
preview_image=CloneImage(thumbnail,0,0,MagickTrue,exception);
if (preview_image == (Image *) NULL)
break;
(void) FormatLocaleString(factor,MaxTextExtent,"%g",2.0*percentage);
(void) ModulateImage(preview_image,factor);
(void) FormatLocaleString(label,MaxTextExtent,"modulate %s",factor);
break;
}
case GammaPreview:
default:
{
preview_image=CloneImage(thumbnail,0,0,MagickTrue,exception);
if (preview_image == (Image *) NULL)
break;
gamma+=0.4f;
(void) GammaImageChannel(preview_image,DefaultChannels,gamma);
(void) FormatLocaleString(label,MaxTextExtent,"gamma %g",gamma);
break;
}
case SpiffPreview:
{
preview_image=CloneImage(thumbnail,0,0,MagickTrue,exception);
if (preview_image != (Image *) NULL)
for (x=0; x < i; x++)
(void) ContrastImage(preview_image,MagickTrue);
(void) FormatLocaleString(label,MaxTextExtent,"contrast (%.20g)",
(double) i+1);
break;
}
case DullPreview:
{
preview_image=CloneImage(thumbnail,0,0,MagickTrue,exception);
if (preview_image == (Image *) NULL)
break;
for (x=0; x < i; x++)
(void) ContrastImage(preview_image,MagickFalse);
(void) FormatLocaleString(label,MaxTextExtent,"+contrast (%.20g)",
(double) i+1);
break;
}
case GrayscalePreview:
{
preview_image=CloneImage(thumbnail,0,0,MagickTrue,exception);
if (preview_image == (Image *) NULL)
break;
colors<<=1;
quantize_info.number_colors=colors;
quantize_info.colorspace=GRAYColorspace;
(void) QuantizeImage(&quantize_info,preview_image);
(void) FormatLocaleString(label,MaxTextExtent,
"-colorspace gray -colors %.20g",(double) colors);
break;
}
case QuantizePreview:
{
preview_image=CloneImage(thumbnail,0,0,MagickTrue,exception);
if (preview_image == (Image *) NULL)
break;
colors<<=1;
quantize_info.number_colors=colors;
(void) QuantizeImage(&quantize_info,preview_image);
(void) FormatLocaleString(label,MaxTextExtent,"colors %.20g",(double)
colors);
break;
}
case DespecklePreview:
{
for (x=0; x < (i-1); x++)
{
preview_image=DespeckleImage(thumbnail,exception);
if (preview_image == (Image *) NULL)
break;
thumbnail=DestroyImage(thumbnail);
thumbnail=preview_image;
}
preview_image=DespeckleImage(thumbnail,exception);
if (preview_image == (Image *) NULL)
break;
(void) FormatLocaleString(label,MaxTextExtent,"despeckle (%.20g)",
(double) i+1);
break;
}
case ReduceNoisePreview:
{
preview_image=StatisticImage(thumbnail,NonpeakStatistic,(size_t) radius,
(size_t) radius,exception);
(void) FormatLocaleString(label,MaxTextExtent,"noise %g",radius);
break;
}
case AddNoisePreview:
{
switch ((int) i)
{
case 0:
{
(void) CopyMagickString(factor,"uniform",MaxTextExtent);
break;
}
case 1:
{
(void) CopyMagickString(factor,"gaussian",MaxTextExtent);
break;
}
case 2:
{
(void) CopyMagickString(factor,"multiplicative",MaxTextExtent);
break;
}
case 3:
{
(void) CopyMagickString(factor,"impulse",MaxTextExtent);
break;
}
case 4:
{
(void) CopyMagickString(factor,"laplacian",MaxTextExtent);
break;
}
case 5:
{
(void) CopyMagickString(factor,"Poisson",MaxTextExtent);
break;
}
default:
{
(void) CopyMagickString(thumbnail->magick,"NULL",MaxTextExtent);
break;
}
}
preview_image=StatisticImage(thumbnail,NonpeakStatistic,(size_t) i,
(size_t) i,exception);
(void) FormatLocaleString(label,MaxTextExtent,"+noise %s",factor);
break;
}
case SharpenPreview:
{
preview_image=SharpenImage(thumbnail,radius,sigma,exception);
(void) FormatLocaleString(label,MaxTextExtent,"sharpen %gx%g",
radius,sigma);
break;
}
case BlurPreview:
{
preview_image=BlurImage(thumbnail,radius,sigma,exception);
(void) FormatLocaleString(label,MaxTextExtent,"blur %gx%g",radius,
sigma);
break;
}
case ThresholdPreview:
{
preview_image=CloneImage(thumbnail,0,0,MagickTrue,exception);
if (preview_image == (Image *) NULL)
break;
(void) BilevelImage(thumbnail,
(double) (percentage*((MagickRealType) QuantumRange+1.0))/100.0);
(void) FormatLocaleString(label,MaxTextExtent,"threshold %g",
(double) (percentage*((MagickRealType) QuantumRange+1.0))/100.0);
break;
}
case EdgeDetectPreview:
{
preview_image=EdgeImage(thumbnail,radius,exception);
(void) FormatLocaleString(label,MaxTextExtent,"edge %g",radius);
break;
}
case SpreadPreview:
{
preview_image=SpreadImage(thumbnail,radius,exception);
(void) FormatLocaleString(label,MaxTextExtent,"spread %g",
radius+0.5);
break;
}
case SolarizePreview:
{
preview_image=CloneImage(thumbnail,0,0,MagickTrue,exception);
if (preview_image == (Image *) NULL)
break;
(void) SolarizeImage(preview_image,(double) QuantumRange*
percentage/100.0);
(void) FormatLocaleString(label,MaxTextExtent,"solarize %g",
(QuantumRange*percentage)/100.0);
break;
}
case ShadePreview:
{
degrees+=10.0;
preview_image=ShadeImage(thumbnail,MagickTrue,degrees,degrees,
exception);
(void) FormatLocaleString(label,MaxTextExtent,"shade %gx%g",
degrees,degrees);
break;
}
case RaisePreview:
{
preview_image=CloneImage(thumbnail,0,0,MagickTrue,exception);
if (preview_image == (Image *) NULL)
break;
geometry.width=(size_t) (2*i+2);
geometry.height=(size_t) (2*i+2);
geometry.x=i/2;
geometry.y=i/2;
(void) RaiseImage(preview_image,&geometry,MagickTrue);
(void) FormatLocaleString(label,MaxTextExtent,
"raise %.20gx%.20g%+.20g%+.20g",(double) geometry.width,(double)
geometry.height,(double) geometry.x,(double) geometry.y);
break;
}
case SegmentPreview:
{
preview_image=CloneImage(thumbnail,0,0,MagickTrue,exception);
if (preview_image == (Image *) NULL)
break;
threshold+=0.4f;
(void) SegmentImage(preview_image,RGBColorspace,MagickFalse,threshold,
threshold);
(void) FormatLocaleString(label,MaxTextExtent,"segment %gx%g",
threshold,threshold);
break;
}
case SwirlPreview:
{
preview_image=SwirlImage(thumbnail,degrees,exception);
(void) FormatLocaleString(label,MaxTextExtent,"swirl %g",degrees);
degrees+=45.0;
break;
}
case ImplodePreview:
{
degrees+=0.1f;
preview_image=ImplodeImage(thumbnail,degrees,exception);
(void) FormatLocaleString(label,MaxTextExtent,"implode %g",degrees);
break;
}
case WavePreview:
{
degrees+=5.0f;
preview_image=WaveImage(thumbnail,0.5*degrees,2.0*degrees,exception);
(void) FormatLocaleString(label,MaxTextExtent,"wave %gx%g",
0.5*degrees,2.0*degrees);
break;
}
case OilPaintPreview:
{
preview_image=OilPaintImage(thumbnail,(double) radius,exception);
(void) FormatLocaleString(label,MaxTextExtent,"paint %g",radius);
break;
}
case CharcoalDrawingPreview:
{
preview_image=CharcoalImage(thumbnail,(double) radius,(double) sigma,
exception);
(void) FormatLocaleString(label,MaxTextExtent,"charcoal %gx%g",
radius,sigma);
break;
}
case JPEGPreview:
{
char
filename[MaxTextExtent];
int
file;
MagickBooleanType
status;
preview_image=CloneImage(thumbnail,0,0,MagickTrue,exception);
if (preview_image == (Image *) NULL)
break;
preview_info->quality=(size_t) percentage;
(void) FormatLocaleString(factor,MaxTextExtent,"%.20g",(double)
preview_info->quality);
file=AcquireUniqueFileResource(filename);
if (file != -1)
file=close(file)-1;
(void) FormatLocaleString(preview_image->filename,MaxTextExtent,
"jpeg:%s",filename);
status=WriteImage(preview_info,preview_image);
if (status != MagickFalse)
{
Image
*quality_image;
(void) CopyMagickString(preview_info->filename,
preview_image->filename,MaxTextExtent);
quality_image=ReadImage(preview_info,exception);
if (quality_image != (Image *) NULL)
{
preview_image=DestroyImage(preview_image);
preview_image=quality_image;
}
}
(void) RelinquishUniqueFileResource(preview_image->filename);
if ((GetBlobSize(preview_image)/1024) >= 1024)
(void) FormatLocaleString(label,MaxTextExtent,"quality %s\n%gmb ",
factor,(double) ((MagickOffsetType) GetBlobSize(preview_image))/
1024.0/1024.0);
else
if (GetBlobSize(preview_image) >= 1024)
(void) FormatLocaleString(label,MaxTextExtent,
"quality %s\n%gkb ",factor,(double) ((MagickOffsetType)
GetBlobSize(preview_image))/1024.0);
else
(void) FormatLocaleString(label,MaxTextExtent,"quality %s\n%.20gb ",
factor,(double) ((MagickOffsetType) GetBlobSize(thumbnail)));
break;
}
}
thumbnail=DestroyImage(thumbnail);
percentage+=12.5;
radius+=0.5;
sigma+=0.25;
if (preview_image == (Image *) NULL)
break;
(void) DeleteImageProperty(preview_image,"label");
(void) SetImageProperty(preview_image,"label",label);
AppendImageToList(&images,preview_image);
proceed=SetImageProgress(image,PreviewImageTag,(MagickOffsetType) i,
NumberTiles);
if (proceed == MagickFalse)
break;
}
if (images == (Image *) NULL)
{
preview_info=DestroyImageInfo(preview_info);
return((Image *) NULL);
}
/*
Create the montage.
*/
montage_info=CloneMontageInfo(preview_info,(MontageInfo *) NULL);
(void) CopyMagickString(montage_info->filename,image->filename,MaxTextExtent);
montage_info->shadow=MagickTrue;
(void) CloneString(&montage_info->tile,"3x3");
(void) CloneString(&montage_info->geometry,DefaultPreviewGeometry);
(void) CloneString(&montage_info->frame,DefaultTileFrame);
montage_image=MontageImages(images,montage_info,exception);
montage_info=DestroyMontageInfo(montage_info);
images=DestroyImageList(images);
if (montage_image == (Image *) NULL)
ThrowImageException(ResourceLimitError,"MemoryAllocationFailed");
if (montage_image->montage != (char *) NULL)
{
/*
Free image directory.
*/
montage_image->montage=(char *) RelinquishMagickMemory(
montage_image->montage);
if (image->directory != (char *) NULL)
montage_image->directory=(char *) RelinquishMagickMemory(
montage_image->directory);
}
preview_info=DestroyImageInfo(preview_info);
return(montage_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% R a d i a l B l u r I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% RadialBlurImage() applies a radial blur to the image.
%
% Andrew Protano contributed this effect.
%
% The format of the RadialBlurImage method is:
%
% Image *RadialBlurImage(const Image *image,const double angle,
% ExceptionInfo *exception)
% Image *RadialBlurImageChannel(const Image *image,const ChannelType channel,
% const double angle,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o channel: the channel type.
%
% o angle: the angle of the radial blur.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *RadialBlurImage(const Image *image,const double angle,
ExceptionInfo *exception)
{
Image
*blur_image;
blur_image=RadialBlurImageChannel(image,DefaultChannels,angle,exception);
return(blur_image);
}
MagickExport Image *RadialBlurImageChannel(const Image *image,
const ChannelType channel,const double angle,ExceptionInfo *exception)
{
CacheView
*blur_view,
*image_view;
Image
*blur_image;
MagickBooleanType
status;
MagickOffsetType
progress;
MagickPixelPacket
bias;
MagickRealType
blur_radius,
*cos_theta,
offset,
*sin_theta,
theta;
PointInfo
blur_center;
register ssize_t
i;
size_t
n;
ssize_t
y;
/*
Allocate blur image.
*/
assert(image != (Image *) NULL);
assert(image->signature == MagickSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickSignature);
blur_image=CloneImage(image,0,0,MagickTrue,exception);
if (blur_image == (Image *) NULL)
return((Image *) NULL);
if (SetImageStorageClass(blur_image,DirectClass) == MagickFalse)
{
InheritException(exception,&blur_image->exception);
blur_image=DestroyImage(blur_image);
return((Image *) NULL);
}
blur_center.x=(double) image->columns/2.0;
blur_center.y=(double) image->rows/2.0;
blur_radius=hypot(blur_center.x,blur_center.y);
n=(size_t) fabs(4.0*DegreesToRadians(angle)*sqrt((double) blur_radius)+2UL);
theta=DegreesToRadians(angle)/(MagickRealType) (n-1);
cos_theta=(MagickRealType *) AcquireQuantumMemory((size_t) n,
sizeof(*cos_theta));
sin_theta=(MagickRealType *) AcquireQuantumMemory((size_t) n,
sizeof(*sin_theta));
if ((cos_theta == (MagickRealType *) NULL) ||
(sin_theta == (MagickRealType *) NULL))
{
blur_image=DestroyImage(blur_image);
ThrowImageException(ResourceLimitError,"MemoryAllocationFailed");
}
offset=theta*(MagickRealType) (n-1)/2.0;
for (i=0; i < (ssize_t) n; i++)
{
cos_theta[i]=cos((double) (theta*i-offset));
sin_theta[i]=sin((double) (theta*i-offset));
}
/*
Radial blur image.
*/
status=MagickTrue;
progress=0;
GetMagickPixelPacket(image,&bias);
image_view=AcquireCacheView(image);
blur_view=AcquireCacheView(blur_image);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(dynamic,4) shared(progress,status)
#endif
for (y=0; y < (ssize_t) blur_image->rows; y++)
{
register const IndexPacket
*restrict indexes;
register IndexPacket
*restrict blur_indexes;
register PixelPacket
*restrict q;
register ssize_t
x;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(blur_view,0,y,blur_image->columns,1,
exception);
if (q == (PixelPacket *) NULL)
{
status=MagickFalse;
continue;
}
blur_indexes=GetCacheViewAuthenticIndexQueue(blur_view);
for (x=0; x < (ssize_t) blur_image->columns; x++)
{
MagickPixelPacket
qixel;
MagickRealType
normalize,
radius;
PixelPacket
pixel;
PointInfo
center;
register ssize_t
i;
size_t
step;
center.x=(double) x-blur_center.x;
center.y=(double) y-blur_center.y;
radius=hypot((double) center.x,center.y);
if (radius == 0)
step=1;
else
{
step=(size_t) (blur_radius/radius);
if (step == 0)
step=1;
else
if (step >= n)
step=n-1;
}
normalize=0.0;
qixel=bias;
if (((channel & OpacityChannel) == 0) || (image->matte == MagickFalse))
{
for (i=0; i < (ssize_t) n; i+=(ssize_t) step)
{
(void) GetOneCacheViewVirtualPixel(image_view,(ssize_t)
(blur_center.x+center.x*cos_theta[i]-center.y*sin_theta[i]+0.5),
(ssize_t) (blur_center.y+center.x*sin_theta[i]+center.y*
cos_theta[i]+0.5),&pixel,exception);
qixel.red+=pixel.red;
qixel.green+=pixel.green;
qixel.blue+=pixel.blue;
qixel.opacity+=pixel.opacity;
if (image->colorspace == CMYKColorspace)
{
indexes=GetCacheViewVirtualIndexQueue(image_view);
qixel.index+=(*indexes);
}
normalize+=1.0;
}
normalize=1.0/(fabs((double) normalize) <= MagickEpsilon ? 1.0 :
normalize);
if ((channel & RedChannel) != 0)
SetRedPixelComponent(q,ClampToQuantum(normalize*qixel.red));
if ((channel & GreenChannel) != 0)
SetGreenPixelComponent(q,ClampToQuantum(normalize*qixel.green));
if ((channel & BlueChannel) != 0)
SetBluePixelComponent(q,ClampToQuantum(normalize*qixel.blue));
if ((channel & OpacityChannel) != 0)
SetOpacityPixelComponent(q,ClampToQuantum(normalize*qixel.opacity));
if (((channel & IndexChannel) != 0) &&
(image->colorspace == CMYKColorspace))
SetIndexPixelComponent(blur_indexes+x,ClampToQuantum(normalize*
qixel.index));
}
else
{
MagickRealType
alpha,
gamma;
alpha=1.0;
gamma=0.0;
for (i=0; i < (ssize_t) n; i+=(ssize_t) step)
{
(void) GetOneCacheViewVirtualPixel(image_view,(ssize_t)
(blur_center.x+center.x*cos_theta[i]-center.y*sin_theta[i]+0.5),
(ssize_t) (blur_center.y+center.x*sin_theta[i]+center.y*
cos_theta[i]+0.5),&pixel,exception);
alpha=(MagickRealType) (QuantumScale*
GetAlphaPixelComponent(&pixel));
qixel.red+=alpha*pixel.red;
qixel.green+=alpha*pixel.green;
qixel.blue+=alpha*pixel.blue;
qixel.opacity+=pixel.opacity;
if (image->colorspace == CMYKColorspace)
{
indexes=GetCacheViewVirtualIndexQueue(image_view);
qixel.index+=alpha*(*indexes);
}
gamma+=alpha;
normalize+=1.0;
}
gamma=1.0/(fabs((double) gamma) <= MagickEpsilon ? 1.0 : gamma);
normalize=1.0/(fabs((double) normalize) <= MagickEpsilon ? 1.0 :
normalize);
if ((channel & RedChannel) != 0)
SetRedPixelComponent(q,ClampToQuantum(gamma*qixel.red));
if ((channel & GreenChannel) != 0)
SetGreenPixelComponent(q,ClampToQuantum(gamma*qixel.green));
if ((channel & BlueChannel) != 0)
SetBluePixelComponent(q,ClampToQuantum(gamma*qixel.blue));
if ((channel & OpacityChannel) != 0)
SetOpacityPixelComponent(q,ClampToQuantum(normalize*qixel.opacity));
if (((channel & IndexChannel) != 0) &&
(image->colorspace == CMYKColorspace))
SetIndexPixelComponent(blur_indexes+x,ClampToQuantum(gamma*
qixel.index));
}
q++;
}
if (SyncCacheViewAuthenticPixels(blur_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp critical (MagickCore_RadialBlurImageChannel)
#endif
proceed=SetImageProgress(image,BlurImageTag,progress++,image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
blur_view=DestroyCacheView(blur_view);
image_view=DestroyCacheView(image_view);
cos_theta=(MagickRealType *) RelinquishMagickMemory(cos_theta);
sin_theta=(MagickRealType *) RelinquishMagickMemory(sin_theta);
if (status == MagickFalse)
blur_image=DestroyImage(blur_image);
return(blur_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% S e l e c t i v e B l u r I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% SelectiveBlurImage() selectively blur pixels within a contrast threshold.
% It is similar to the unsharpen mask that sharpens everything with contrast
% above a certain threshold.
%
% The format of the SelectiveBlurImage method is:
%
% Image *SelectiveBlurImage(const Image *image,const double radius,
% const double sigma,const double threshold,ExceptionInfo *exception)
% Image *SelectiveBlurImageChannel(const Image *image,
% const ChannelType channel,const double radius,const double sigma,
% const double threshold,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o channel: the channel type.
%
% o radius: the radius of the Gaussian, in pixels, not counting the center
% pixel.
%
% o sigma: the standard deviation of the Gaussian, in pixels.
%
% o threshold: only pixels within this contrast threshold are included
% in the blur operation.
%
% o exception: return any errors or warnings in this structure.
%
*/
static inline MagickBooleanType SelectiveContrast(const PixelPacket *p,
const PixelPacket *q,const double threshold)
{
if (fabs(PixelIntensity(p)-PixelIntensity(q)) < threshold)
return(MagickTrue);
return(MagickFalse);
}
MagickExport Image *SelectiveBlurImage(const Image *image,const double radius,
const double sigma,const double threshold,ExceptionInfo *exception)
{
Image
*blur_image;
blur_image=SelectiveBlurImageChannel(image,DefaultChannels,radius,sigma,
threshold,exception);
return(blur_image);
}
MagickExport Image *SelectiveBlurImageChannel(const Image *image,
const ChannelType channel,const double radius,const double sigma,
const double threshold,ExceptionInfo *exception)
{
#define SelectiveBlurImageTag "SelectiveBlur/Image"
CacheView
*blur_view,
*image_view;
double
*kernel;
Image
*blur_image;
MagickBooleanType
status;
MagickOffsetType
progress;
MagickPixelPacket
bias;
register ssize_t
i;
size_t
width;
ssize_t
j,
u,
v,
y;
/*
Initialize blur image attributes.
*/
assert(image != (Image *) NULL);
assert(image->signature == MagickSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickSignature);
width=GetOptimalKernelWidth1D(radius,sigma);
kernel=(double *) AcquireQuantumMemory((size_t) width,width*sizeof(*kernel));
if (kernel == (double *) NULL)
ThrowImageException(ResourceLimitError,"MemoryAllocationFailed");
j=(ssize_t) width/2;
i=0;
for (v=(-j); v <= j; v++)
{
for (u=(-j); u <= j; u++)
kernel[i++]=(double) (exp(-((double) u*u+v*v)/(2.0*MagickSigma*
MagickSigma))/(2.0*MagickPI*MagickSigma*MagickSigma));
}
if (image->debug != MagickFalse)
{
char
format[MaxTextExtent],
*message;
register const double
*k;
ssize_t
u,
v;
(void) LogMagickEvent(TransformEvent,GetMagickModule(),
" SelectiveBlurImage with %.20gx%.20g kernel:",(double) width,(double)
width);
message=AcquireString("");
k=kernel;
for (v=0; v < (ssize_t) width; v++)
{
*message='\0';
(void) FormatLocaleString(format,MaxTextExtent,"%.20g: ",(double) v);
(void) ConcatenateString(&message,format);
for (u=0; u < (ssize_t) width; u++)
{
(void) FormatLocaleString(format,MaxTextExtent,"%+f ",*k++);
(void) ConcatenateString(&message,format);
}
(void) LogMagickEvent(TransformEvent,GetMagickModule(),"%s",message);
}
message=DestroyString(message);
}
blur_image=CloneImage(image,0,0,MagickTrue,exception);
if (blur_image == (Image *) NULL)
return((Image *) NULL);
if (SetImageStorageClass(blur_image,DirectClass) == MagickFalse)
{
InheritException(exception,&blur_image->exception);
blur_image=DestroyImage(blur_image);
return((Image *) NULL);
}
/*
Threshold blur image.
*/
status=MagickTrue;
progress=0;
GetMagickPixelPacket(image,&bias);
SetMagickPixelPacketBias(image,&bias);
image_view=AcquireCacheView(image);
blur_view=AcquireCacheView(blur_image);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(dynamic,4) shared(progress,status)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
MagickBooleanType
sync;
MagickRealType
gamma;
register const IndexPacket
*restrict indexes;
register const PixelPacket
*restrict p;
register IndexPacket
*restrict blur_indexes;
register PixelPacket
*restrict q;
register ssize_t
x;
if (status == MagickFalse)
continue;
p=GetCacheViewVirtualPixels(image_view,-((ssize_t) width/2L),y-(ssize_t)
(width/2L),image->columns+width,width,exception);
q=GetCacheViewAuthenticPixels(blur_view,0,y,blur_image->columns,1,
exception);
if ((p == (const PixelPacket *) NULL) || (q == (PixelPacket *) NULL))
{
status=MagickFalse;
continue;
}
indexes=GetCacheViewVirtualIndexQueue(image_view);
blur_indexes=GetCacheViewAuthenticIndexQueue(blur_view);
for (x=0; x < (ssize_t) image->columns; x++)
{
MagickPixelPacket
pixel;
register const double
*restrict k;
register ssize_t
u;
ssize_t
j,
v;
pixel=bias;
k=kernel;
gamma=0.0;
j=0;
if (((channel & OpacityChannel) == 0) || (image->matte == MagickFalse))
{
for (v=0; v < (ssize_t) width; v++)
{
for (u=0; u < (ssize_t) width; u++)
{
if (SelectiveContrast(p+u+j,q,threshold) != MagickFalse)
{
pixel.red+=(*k)*GetRedPixelComponent(p+u+j);
pixel.green+=(*k)*GetGreenPixelComponent(p+u+j);
pixel.blue+=(*k)*GetBluePixelComponent(p+u+j);
gamma+=(*k);
k++;
}
}
j+=(ssize_t) (image->columns+width);
}
if (gamma != 0.0)
{
gamma=1.0/(fabs((double) gamma) <= MagickEpsilon ? 1.0 : gamma);
if ((channel & RedChannel) != 0)
SetRedPixelComponent(q,ClampToQuantum(gamma*pixel.red));
if ((channel & GreenChannel) != 0)
SetGreenPixelComponent(q,ClampToQuantum(gamma*pixel.green));
if ((channel & BlueChannel) != 0)
SetBluePixelComponent(q,ClampToQuantum(gamma*pixel.blue));
}
if ((channel & OpacityChannel) != 0)
{
gamma=0.0;
j=0;
for (v=0; v < (ssize_t) width; v++)
{
for (u=0; u < (ssize_t) width; u++)
{
if (SelectiveContrast(p+u+j,q,threshold) != MagickFalse)
{
pixel.opacity+=(*k)*(p+u+j)->opacity;
gamma+=(*k);
k++;
}
}
j+=(ssize_t) (image->columns+width);
}
if (gamma != 0.0)
{
gamma=1.0/(fabs((double) gamma) <= MagickEpsilon ? 1.0 :
gamma);
SetOpacityPixelComponent(q,ClampToQuantum(gamma*
pixel.opacity));
}
}
if (((channel & IndexChannel) != 0) &&
(image->colorspace == CMYKColorspace))
{
gamma=0.0;
j=0;
for (v=0; v < (ssize_t) width; v++)
{
for (u=0; u < (ssize_t) width; u++)
{
if (SelectiveContrast(p+u+j,q,threshold) != MagickFalse)
{
pixel.index+=(*k)*GetIndexPixelComponent(indexes+x+u+j);
gamma+=(*k);
k++;
}
}
j+=(ssize_t) (image->columns+width);
}
if (gamma != 0.0)
{
gamma=1.0/(fabs((double) gamma) <= MagickEpsilon ? 1.0 :
gamma);
SetIndexPixelComponent(blur_indexes+x,ClampToQuantum(gamma*
pixel.index));
}
}
}
else
{
MagickRealType
alpha;
for (v=0; v < (ssize_t) width; v++)
{
for (u=0; u < (ssize_t) width; u++)
{
if (SelectiveContrast(p+u+j,q,threshold) != MagickFalse)
{
alpha=(MagickRealType) (QuantumScale*
GetAlphaPixelComponent(p+u+j));
pixel.red+=(*k)*alpha*GetRedPixelComponent(p+u+j);
pixel.green+=(*k)*alpha*GetGreenPixelComponent(p+u+j);
pixel.blue+=(*k)*alpha*GetBluePixelComponent(p+u+j);
pixel.opacity+=(*k)*GetOpacityPixelComponent(p+u+j);
gamma+=(*k)*alpha;
k++;
}
}
j+=(ssize_t) (image->columns+width);
}
if (gamma != 0.0)
{
gamma=1.0/(fabs((double) gamma) <= MagickEpsilon ? 1.0 : gamma);
if ((channel & RedChannel) != 0)
SetRedPixelComponent(q,ClampToQuantum(gamma*pixel.red));
if ((channel & GreenChannel) != 0)
SetGreenPixelComponent(q,ClampToQuantum(gamma*pixel.green));
if ((channel & BlueChannel) != 0)
SetBluePixelComponent(q,ClampToQuantum(gamma*pixel.blue));
}
if ((channel & OpacityChannel) != 0)
{
gamma=0.0;
j=0;
for (v=0; v < (ssize_t) width; v++)
{
for (u=0; u < (ssize_t) width; u++)
{
if (SelectiveContrast(p+u+j,q,threshold) != MagickFalse)
{
pixel.opacity+=(*k)*GetOpacityPixelComponent(p+u+j);
gamma+=(*k);
k++;
}
}
j+=(ssize_t) (image->columns+width);
}
if (gamma != 0.0)
{
gamma=1.0/(fabs((double) gamma) <= MagickEpsilon ? 1.0 :
gamma);
SetOpacityPixelComponent(q,ClampToQuantum(pixel.opacity));
}
}
if (((channel & IndexChannel) != 0) &&
(image->colorspace == CMYKColorspace))
{
gamma=0.0;
j=0;
for (v=0; v < (ssize_t) width; v++)
{
for (u=0; u < (ssize_t) width; u++)
{
if (SelectiveContrast(p+u+j,q,threshold) != MagickFalse)
{
alpha=(MagickRealType) (QuantumScale*
GetAlphaPixelComponent(p+u+j));
pixel.index+=(*k)*alpha*GetIndexPixelComponent(indexes+x+
u+j);
gamma+=(*k);
k++;
}
}
j+=(ssize_t) (image->columns+width);
}
if (gamma != 0.0)
{
gamma=1.0/(fabs((double) gamma) <= MagickEpsilon ? 1.0 :
gamma);
SetIndexPixelComponent(blur_indexes+x,ClampToQuantum(gamma*
pixel.index));
}
}
}
p++;
q++;
}
sync=SyncCacheViewAuthenticPixels(blur_view,exception);
if (sync == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp critical (MagickCore_SelectiveBlurImageChannel)
#endif
proceed=SetImageProgress(image,SelectiveBlurImageTag,progress++,
image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
blur_image->type=image->type;
blur_view=DestroyCacheView(blur_view);
image_view=DestroyCacheView(image_view);
kernel=(double *) RelinquishMagickMemory(kernel);
if (status == MagickFalse)
blur_image=DestroyImage(blur_image);
return(blur_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% S h a d e I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ShadeImage() shines a distant light on an image to create a
% three-dimensional effect. You control the positioning of the light with
% azimuth and elevation; azimuth is measured in degrees off the x axis
% and elevation is measured in pixels above the Z axis.
%
% The format of the ShadeImage method is:
%
% Image *ShadeImage(const Image *image,const MagickBooleanType gray,
% const double azimuth,const double elevation,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o gray: A value other than zero shades the intensity of each pixel.
%
% o azimuth, elevation: Define the light source direction.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *ShadeImage(const Image *image,const MagickBooleanType gray,
const double azimuth,const double elevation,ExceptionInfo *exception)
{
#define ShadeImageTag "Shade/Image"
CacheView
*image_view,
*shade_view;
Image
*shade_image;
MagickBooleanType
status;
MagickOffsetType
progress;
PrimaryInfo
light;
ssize_t
y;
/*
Initialize shaded image attributes.
*/
assert(image != (const Image *) NULL);
assert(image->signature == MagickSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickSignature);
shade_image=CloneImage(image,image->columns,image->rows,MagickTrue,exception);
if (shade_image == (Image *) NULL)
return((Image *) NULL);
if (SetImageStorageClass(shade_image,DirectClass) == MagickFalse)
{
InheritException(exception,&shade_image->exception);
shade_image=DestroyImage(shade_image);
return((Image *) NULL);
}
/*
Compute the light vector.
*/
light.x=(double) QuantumRange*cos(DegreesToRadians(azimuth))*
cos(DegreesToRadians(elevation));
light.y=(double) QuantumRange*sin(DegreesToRadians(azimuth))*
cos(DegreesToRadians(elevation));
light.z=(double) QuantumRange*sin(DegreesToRadians(elevation));
/*
Shade image.
*/
status=MagickTrue;
progress=0;
image_view=AcquireCacheView(image);
shade_view=AcquireCacheView(shade_image);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(dynamic,4) shared(progress,status)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
MagickRealType
distance,
normal_distance,
shade;
PrimaryInfo
normal;
register const PixelPacket
*restrict p,
*restrict s0,
*restrict s1,
*restrict s2;
register PixelPacket
*restrict q;
register ssize_t
x;
if (status == MagickFalse)
continue;
p=GetCacheViewVirtualPixels(image_view,-1,y-1,image->columns+2,3,exception);
q=QueueCacheViewAuthenticPixels(shade_view,0,y,shade_image->columns,1,
exception);
if ((p == (PixelPacket *) NULL) || (q == (PixelPacket *) NULL))
{
status=MagickFalse;
continue;
}
/*
Shade this row of pixels.
*/
normal.z=2.0*(double) QuantumRange; /* constant Z of surface normal */
s0=p+1;
s1=s0+image->columns+2;
s2=s1+image->columns+2;
for (x=0; x < (ssize_t) image->columns; x++)
{
/*
Determine the surface normal and compute shading.
*/
normal.x=(double) (PixelIntensity(s0-1)+PixelIntensity(s1-1)+
PixelIntensity(s2-1)-PixelIntensity(s0+1)-PixelIntensity(s1+1)-
PixelIntensity(s2+1));
normal.y=(double) (PixelIntensity(s2-1)+PixelIntensity(s2)+
PixelIntensity(s2+1)-PixelIntensity(s0-1)-PixelIntensity(s0)-
PixelIntensity(s0+1));
if ((normal.x == 0.0) && (normal.y == 0.0))
shade=light.z;
else
{
shade=0.0;
distance=normal.x*light.x+normal.y*light.y+normal.z*light.z;
if (distance > MagickEpsilon)
{
normal_distance=
normal.x*normal.x+normal.y*normal.y+normal.z*normal.z;
if (normal_distance > (MagickEpsilon*MagickEpsilon))
shade=distance/sqrt((double) normal_distance);
}
}
if (gray != MagickFalse)
{
SetRedPixelComponent(q,shade);
SetGreenPixelComponent(q,shade);
SetBluePixelComponent(q,shade);
}
else
{
SetRedPixelComponent(q,ClampToQuantum(QuantumScale*shade*
GetRedPixelComponent(s1)));
SetGreenPixelComponent(q,ClampToQuantum(QuantumScale*shade*
GetGreenPixelComponent(s1)));
SetBluePixelComponent(q,ClampToQuantum(QuantumScale*shade*
GetBluePixelComponent(s1)));
}
q->opacity=s1->opacity;
s0++;
s1++;
s2++;
q++;
}
if (SyncCacheViewAuthenticPixels(shade_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp critical (MagickCore_ShadeImage)
#endif
proceed=SetImageProgress(image,ShadeImageTag,progress++,image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
shade_view=DestroyCacheView(shade_view);
image_view=DestroyCacheView(image_view);
if (status == MagickFalse)
shade_image=DestroyImage(shade_image);
return(shade_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% S h a r p e n I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% SharpenImage() sharpens the image. We convolve the image with a Gaussian
% operator of the given radius and standard deviation (sigma). For
% reasonable results, radius should be larger than sigma. Use a radius of 0
% and SharpenImage() selects a suitable radius for you.
%
% Using a separable kernel would be faster, but the negative weights cancel
% out on the corners of the kernel producing often undesirable ringing in the
% filtered result; this can be avoided by using a 2D gaussian shaped image
% sharpening kernel instead.
%
% The format of the SharpenImage method is:
%
% Image *SharpenImage(const Image *image,const double radius,
% const double sigma,ExceptionInfo *exception)
% Image *SharpenImageChannel(const Image *image,const ChannelType channel,
% const double radius,const double sigma,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o channel: the channel type.
%
% o radius: the radius of the Gaussian, in pixels, not counting the center
% pixel.
%
% o sigma: the standard deviation of the Laplacian, in pixels.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *SharpenImage(const Image *image,const double radius,
const double sigma,ExceptionInfo *exception)
{
Image
*sharp_image;
sharp_image=SharpenImageChannel(image,DefaultChannels,radius,sigma,exception);
return(sharp_image);
}
MagickExport Image *SharpenImageChannel(const Image *image,
const ChannelType channel,const double radius,const double sigma,
ExceptionInfo *exception)
{
double
*kernel,
normalize;
Image
*sharp_image;
register ssize_t
i;
size_t
width;
ssize_t
j,
u,
v;
assert(image != (const Image *) NULL);
assert(image->signature == MagickSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickSignature);
width=GetOptimalKernelWidth2D(radius,sigma);
kernel=(double *) AcquireQuantumMemory((size_t) width*width,sizeof(*kernel));
if (kernel == (double *) NULL)
ThrowImageException(ResourceLimitError,"MemoryAllocationFailed");
normalize=0.0;
j=(ssize_t) width/2;
i=0;
for (v=(-j); v <= j; v++)
{
for (u=(-j); u <= j; u++)
{
kernel[i]=(double) (-exp(-((double) u*u+v*v)/(2.0*MagickSigma*
MagickSigma))/(2.0*MagickPI*MagickSigma*MagickSigma));
normalize+=kernel[i];
i++;
}
}
kernel[i/2]=(double) ((-2.0)*normalize);
sharp_image=ConvolveImageChannel(image,channel,width,kernel,exception);
kernel=(double *) RelinquishMagickMemory(kernel);
return(sharp_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% S p r e a d I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% SpreadImage() is a special effects method that randomly displaces each
% pixel in a block defined by the radius parameter.
%
% The format of the SpreadImage method is:
%
% Image *SpreadImage(const Image *image,const double radius,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o radius: Choose a random pixel in a neighborhood of this extent.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *SpreadImage(const Image *image,const double radius,
ExceptionInfo *exception)
{
#define SpreadImageTag "Spread/Image"
CacheView
*image_view,
*spread_view;
Image
*spread_image;
MagickBooleanType
status;
MagickOffsetType
progress;
MagickPixelPacket
bias;
RandomInfo
**restrict random_info;
size_t
width;
ssize_t
y;
/*
Initialize spread image attributes.
*/
assert(image != (Image *) NULL);
assert(image->signature == MagickSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickSignature);
spread_image=CloneImage(image,image->columns,image->rows,MagickTrue,
exception);
if (spread_image == (Image *) NULL)
return((Image *) NULL);
if (SetImageStorageClass(spread_image,DirectClass) == MagickFalse)
{
InheritException(exception,&spread_image->exception);
spread_image=DestroyImage(spread_image);
return((Image *) NULL);
}
/*
Spread image.
*/
status=MagickTrue;
progress=0;
GetMagickPixelPacket(spread_image,&bias);
width=GetOptimalKernelWidth1D(radius,0.5);
random_info=AcquireRandomInfoThreadSet();
image_view=AcquireCacheView(image);
spread_view=AcquireCacheView(spread_image);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(dynamic,4) shared(progress,status) omp_throttle(1)
#endif
for (y=0; y < (ssize_t) spread_image->rows; y++)
{
const int
id = GetOpenMPThreadId();
MagickPixelPacket
pixel;
register IndexPacket
*restrict indexes;
register PixelPacket
*restrict q;
register ssize_t
x;
if (status == MagickFalse)
continue;
q=QueueCacheViewAuthenticPixels(spread_view,0,y,spread_image->columns,1,
exception);
if (q == (PixelPacket *) NULL)
{
status=MagickFalse;
continue;
}
indexes=GetCacheViewAuthenticIndexQueue(spread_view);
pixel=bias;
for (x=0; x < (ssize_t) spread_image->columns; x++)
{
(void) InterpolateMagickPixelPacket(image,image_view,
UndefinedInterpolatePixel,(double) x+width*(GetPseudoRandomValue(
random_info[id])-0.5),(double) y+width*(GetPseudoRandomValue(
random_info[id])-0.5),&pixel,exception);
SetPixelPacket(spread_image,&pixel,q,indexes+x);
q++;
}
if (SyncCacheViewAuthenticPixels(spread_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp critical (MagickCore_SpreadImage)
#endif
proceed=SetImageProgress(image,SpreadImageTag,progress++,image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
spread_view=DestroyCacheView(spread_view);
image_view=DestroyCacheView(image_view);
random_info=DestroyRandomInfoThreadSet(random_info);
return(spread_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% S t a t i s t i c I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% StatisticImage() makes each pixel the min / max / median / mode / etc. of
% the neighborhood of the specified width and height.
%
% The format of the StatisticImage method is:
%
% Image *StatisticImage(const Image *image,const StatisticType type,
% const size_t width,const size_t height,ExceptionInfo *exception)
% Image *StatisticImageChannel(const Image *image,
% const ChannelType channel,const StatisticType type,
% const size_t width,const size_t height,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o channel: the image channel.
%
% o type: the statistic type (median, mode, etc.).
%
% o width: the width of the pixel neighborhood.
%
% o height: the height of the pixel neighborhood.
%
% o exception: return any errors or warnings in this structure.
%
*/
#define ListChannels 5
typedef struct _ListNode
{
size_t
next[9],
count,
signature;
} ListNode;
typedef struct _SkipList
{
ssize_t
level;
ListNode
*nodes;
} SkipList;
typedef struct _PixelList
{
size_t
length,
seed,
signature;
SkipList
lists[ListChannels];
} PixelList;
static PixelList *DestroyPixelList(PixelList *pixel_list)
{
register ssize_t
i;
if (pixel_list == (PixelList *) NULL)
return((PixelList *) NULL);
for (i=0; i < ListChannels; i++)
if (pixel_list->lists[i].nodes != (ListNode *) NULL)
pixel_list->lists[i].nodes=(ListNode *) RelinquishMagickMemory(
pixel_list->lists[i].nodes);
pixel_list=(PixelList *) RelinquishMagickMemory(pixel_list);
return(pixel_list);
}
static PixelList **DestroyPixelListThreadSet(PixelList **pixel_list)
{
register ssize_t
i;
assert(pixel_list != (PixelList **) NULL);
for (i=0; i < (ssize_t) GetOpenMPMaximumThreads(); i++)
if (pixel_list[i] != (PixelList *) NULL)
pixel_list[i]=DestroyPixelList(pixel_list[i]);
pixel_list=(PixelList **) RelinquishMagickMemory(pixel_list);
return(pixel_list);
}
static PixelList *AcquirePixelList(const size_t width,const size_t height)
{
PixelList
*pixel_list;
register ssize_t
i;
pixel_list=(PixelList *) AcquireMagickMemory(sizeof(*pixel_list));
if (pixel_list == (PixelList *) NULL)
return(pixel_list);
(void) ResetMagickMemory((void *) pixel_list,0,sizeof(*pixel_list));
pixel_list->length=width*height;
for (i=0; i < ListChannels; i++)
{
pixel_list->lists[i].nodes=(ListNode *) AcquireQuantumMemory(65537UL,
sizeof(*pixel_list->lists[i].nodes));
if (pixel_list->lists[i].nodes == (ListNode *) NULL)
return(DestroyPixelList(pixel_list));
(void) ResetMagickMemory(pixel_list->lists[i].nodes,0,65537UL*
sizeof(*pixel_list->lists[i].nodes));
}
pixel_list->signature=MagickSignature;
return(pixel_list);
}
static PixelList **AcquirePixelListThreadSet(const size_t width,
const size_t height)
{
PixelList
**pixel_list;
register ssize_t
i;
size_t
number_threads;
number_threads=GetOpenMPMaximumThreads();
pixel_list=(PixelList **) AcquireQuantumMemory(number_threads,
sizeof(*pixel_list));
if (pixel_list == (PixelList **) NULL)
return((PixelList **) NULL);
(void) ResetMagickMemory(pixel_list,0,number_threads*sizeof(*pixel_list));
for (i=0; i < (ssize_t) number_threads; i++)
{
pixel_list[i]=AcquirePixelList(width,height);
if (pixel_list[i] == (PixelList *) NULL)
return(DestroyPixelListThreadSet(pixel_list));
}
return(pixel_list);
}
static void AddNodePixelList(PixelList *pixel_list,const ssize_t channel,
const size_t color)
{
register SkipList
*list;
register ssize_t
level;
size_t
search,
update[9];
/*
Initialize the node.
*/
list=pixel_list->lists+channel;
list->nodes[color].signature=pixel_list->signature;
list->nodes[color].count=1;
/*
Determine where it belongs in the list.
*/
search=65536UL;
for (level=list->level; level >= 0; level--)
{
while (list->nodes[search].next[level] < color)
search=list->nodes[search].next[level];
update[level]=search;
}
/*
Generate a pseudo-random level for this node.
*/
for (level=0; ; level++)
{
pixel_list->seed=(pixel_list->seed*42893621L)+1L;
if ((pixel_list->seed & 0x300) != 0x300)
break;
}
if (level > 8)
level=8;
if (level > (list->level+2))
level=list->level+2;
/*
If we're raising the list's level, link back to the root node.
*/
while (level > list->level)
{
list->level++;
update[list->level]=65536UL;
}
/*
Link the node into the skip-list.
*/
do
{
list->nodes[color].next[level]=list->nodes[update[level]].next[level];
list->nodes[update[level]].next[level]=color;
} while (level-- > 0);
}
static MagickPixelPacket GetMaximumPixelList(PixelList *pixel_list)
{
MagickPixelPacket
pixel;
register SkipList
*list;
register ssize_t
channel;
size_t
color,
maximum;
ssize_t
count;
unsigned short
channels[ListChannels];
/*
Find the maximum value for each of the color.
*/
for (channel=0; channel < 5; channel++)
{
list=pixel_list->lists+channel;
color=65536L;
count=0;
maximum=list->nodes[color].next[0];
do
{
color=list->nodes[color].next[0];
if (color > maximum)
maximum=color;
count+=list->nodes[color].count;
} while (count < (ssize_t) pixel_list->length);
channels[channel]=(unsigned short) maximum;
}
GetMagickPixelPacket((const Image *) NULL,&pixel);
pixel.red=(MagickRealType) ScaleShortToQuantum(channels[0]);
pixel.green=(MagickRealType) ScaleShortToQuantum(channels[1]);
pixel.blue=(MagickRealType) ScaleShortToQuantum(channels[2]);
pixel.opacity=(MagickRealType) ScaleShortToQuantum(channels[3]);
pixel.index=(MagickRealType) ScaleShortToQuantum(channels[4]);
return(pixel);
}
static MagickPixelPacket GetMeanPixelList(PixelList *pixel_list)
{
MagickPixelPacket
pixel;
MagickRealType
sum;
register SkipList
*list;
register ssize_t
channel;
size_t
color;
ssize_t
count;
unsigned short
channels[ListChannels];
/*
Find the mean value for each of the color.
*/
for (channel=0; channel < 5; channel++)
{
list=pixel_list->lists+channel;
color=65536L;
count=0;
sum=0.0;
do
{
color=list->nodes[color].next[0];
sum+=(MagickRealType) list->nodes[color].count*color;
count+=list->nodes[color].count;
} while (count < (ssize_t) pixel_list->length);
sum/=pixel_list->length;
channels[channel]=(unsigned short) sum;
}
GetMagickPixelPacket((const Image *) NULL,&pixel);
pixel.red=(MagickRealType) ScaleShortToQuantum(channels[0]);
pixel.green=(MagickRealType) ScaleShortToQuantum(channels[1]);
pixel.blue=(MagickRealType) ScaleShortToQuantum(channels[2]);
pixel.opacity=(MagickRealType) ScaleShortToQuantum(channels[3]);
pixel.index=(MagickRealType) ScaleShortToQuantum(channels[4]);
return(pixel);
}
static MagickPixelPacket GetMedianPixelList(PixelList *pixel_list)
{
MagickPixelPacket
pixel;
register SkipList
*list;
register ssize_t
channel;
size_t
color;
ssize_t
count;
unsigned short
channels[ListChannels];
/*
Find the median value for each of the color.
*/
for (channel=0; channel < 5; channel++)
{
list=pixel_list->lists+channel;
color=65536L;
count=0;
do
{
color=list->nodes[color].next[0];
count+=list->nodes[color].count;
} while (count <= (ssize_t) (pixel_list->length >> 1));
channels[channel]=(unsigned short) color;
}
GetMagickPixelPacket((const Image *) NULL,&pixel);
pixel.red=(MagickRealType) ScaleShortToQuantum(channels[0]);
pixel.green=(MagickRealType) ScaleShortToQuantum(channels[1]);
pixel.blue=(MagickRealType) ScaleShortToQuantum(channels[2]);
pixel.opacity=(MagickRealType) ScaleShortToQuantum(channels[3]);
pixel.index=(MagickRealType) ScaleShortToQuantum(channels[4]);
return(pixel);
}
static MagickPixelPacket GetMinimumPixelList(PixelList *pixel_list)
{
MagickPixelPacket
pixel;
register SkipList
*list;
register ssize_t
channel;
size_t
color,
minimum;
ssize_t
count;
unsigned short
channels[ListChannels];
/*
Find the minimum value for each of the color.
*/
for (channel=0; channel < 5; channel++)
{
list=pixel_list->lists+channel;
count=0;
color=65536UL;
minimum=list->nodes[color].next[0];
do
{
color=list->nodes[color].next[0];
if (color < minimum)
minimum=color;
count+=list->nodes[color].count;
} while (count < (ssize_t) pixel_list->length);
channels[channel]=(unsigned short) minimum;
}
GetMagickPixelPacket((const Image *) NULL,&pixel);
pixel.red=(MagickRealType) ScaleShortToQuantum(channels[0]);
pixel.green=(MagickRealType) ScaleShortToQuantum(channels[1]);
pixel.blue=(MagickRealType) ScaleShortToQuantum(channels[2]);
pixel.opacity=(MagickRealType) ScaleShortToQuantum(channels[3]);
pixel.index=(MagickRealType) ScaleShortToQuantum(channels[4]);
return(pixel);
}
static MagickPixelPacket GetModePixelList(PixelList *pixel_list)
{
MagickPixelPacket
pixel;
register SkipList
*list;
register ssize_t
channel;
size_t
color,
max_count,
mode;
ssize_t
count;
unsigned short
channels[5];
/*
Make each pixel the 'predominate color' of the specified neighborhood.
*/
for (channel=0; channel < 5; channel++)
{
list=pixel_list->lists+channel;
color=65536L;
mode=color;
max_count=list->nodes[mode].count;
count=0;
do
{
color=list->nodes[color].next[0];
if (list->nodes[color].count > max_count)
{
mode=color;
max_count=list->nodes[mode].count;
}
count+=list->nodes[color].count;
} while (count < (ssize_t) pixel_list->length);
channels[channel]=(unsigned short) mode;
}
GetMagickPixelPacket((const Image *) NULL,&pixel);
pixel.red=(MagickRealType) ScaleShortToQuantum(channels[0]);
pixel.green=(MagickRealType) ScaleShortToQuantum(channels[1]);
pixel.blue=(MagickRealType) ScaleShortToQuantum(channels[2]);
pixel.opacity=(MagickRealType) ScaleShortToQuantum(channels[3]);
pixel.index=(MagickRealType) ScaleShortToQuantum(channels[4]);
return(pixel);
}
static MagickPixelPacket GetNonpeakPixelList(PixelList *pixel_list)
{
MagickPixelPacket
pixel;
register SkipList
*list;
register ssize_t
channel;
size_t
color,
next,
previous;
ssize_t
count;
unsigned short
channels[5];
/*
Finds the non peak value for each of the colors.
*/
for (channel=0; channel < 5; channel++)
{
list=pixel_list->lists+channel;
color=65536L;
next=list->nodes[color].next[0];
count=0;
do
{
previous=color;
color=next;
next=list->nodes[color].next[0];
count+=list->nodes[color].count;
} while (count <= (ssize_t) (pixel_list->length >> 1));
if ((previous == 65536UL) && (next != 65536UL))
color=next;
else
if ((previous != 65536UL) && (next == 65536UL))
color=previous;
channels[channel]=(unsigned short) color;
}
GetMagickPixelPacket((const Image *) NULL,&pixel);
pixel.red=(MagickRealType) ScaleShortToQuantum(channels[0]);
pixel.green=(MagickRealType) ScaleShortToQuantum(channels[1]);
pixel.blue=(MagickRealType) ScaleShortToQuantum(channels[2]);
pixel.opacity=(MagickRealType) ScaleShortToQuantum(channels[3]);
pixel.index=(MagickRealType) ScaleShortToQuantum(channels[4]);
return(pixel);
}
static MagickPixelPacket GetStandardDeviationPixelList(PixelList *pixel_list)
{
MagickPixelPacket
pixel;
MagickRealType
sum,
sum_squared;
register SkipList
*list;
register ssize_t
channel;
size_t
color;
ssize_t
count;
unsigned short
channels[ListChannels];
/*
Find the standard-deviation value for each of the color.
*/
for (channel=0; channel < 5; channel++)
{
list=pixel_list->lists+channel;
color=65536L;
count=0;
sum=0.0;
sum_squared=0.0;
do
{
register ssize_t
i;
color=list->nodes[color].next[0];
sum+=(MagickRealType) list->nodes[color].count*color;
for (i=0; i < (ssize_t) list->nodes[color].count; i++)
sum_squared+=((MagickRealType) color)*((MagickRealType) color);
count+=list->nodes[color].count;
} while (count < (ssize_t) pixel_list->length);
sum/=pixel_list->length;
sum_squared/=pixel_list->length;
channels[channel]=(unsigned short) sqrt(sum_squared-(sum*sum));
}
GetMagickPixelPacket((const Image *) NULL,&pixel);
pixel.red=(MagickRealType) ScaleShortToQuantum(channels[0]);
pixel.green=(MagickRealType) ScaleShortToQuantum(channels[1]);
pixel.blue=(MagickRealType) ScaleShortToQuantum(channels[2]);
pixel.opacity=(MagickRealType) ScaleShortToQuantum(channels[3]);
pixel.index=(MagickRealType) ScaleShortToQuantum(channels[4]);
return(pixel);
}
static inline void InsertPixelList(const Image *image,const PixelPacket *pixel,
const IndexPacket *indexes,PixelList *pixel_list)
{
size_t
signature;
unsigned short
index;
index=ScaleQuantumToShort(GetRedPixelComponent(pixel));
signature=pixel_list->lists[0].nodes[index].signature;
if (signature == pixel_list->signature)
pixel_list->lists[0].nodes[index].count++;
else
AddNodePixelList(pixel_list,0,index);
index=ScaleQuantumToShort(GetGreenPixelComponent(pixel));
signature=pixel_list->lists[1].nodes[index].signature;
if (signature == pixel_list->signature)
pixel_list->lists[1].nodes[index].count++;
else
AddNodePixelList(pixel_list,1,index);
index=ScaleQuantumToShort(GetBluePixelComponent(pixel));
signature=pixel_list->lists[2].nodes[index].signature;
if (signature == pixel_list->signature)
pixel_list->lists[2].nodes[index].count++;
else
AddNodePixelList(pixel_list,2,index);
index=ScaleQuantumToShort(GetOpacityPixelComponent(pixel));
signature=pixel_list->lists[3].nodes[index].signature;
if (signature == pixel_list->signature)
pixel_list->lists[3].nodes[index].count++;
else
AddNodePixelList(pixel_list,3,index);
if (image->colorspace == CMYKColorspace)
index=ScaleQuantumToShort(GetIndexPixelComponent(indexes));
signature=pixel_list->lists[4].nodes[index].signature;
if (signature == pixel_list->signature)
pixel_list->lists[4].nodes[index].count++;
else
AddNodePixelList(pixel_list,4,index);
}
static inline MagickRealType MagickAbsoluteValue(const MagickRealType x)
{
if (x < 0)
return(-x);
return(x);
}
static void ResetPixelList(PixelList *pixel_list)
{
int
level;
register ListNode
*root;
register SkipList
*list;
register ssize_t
channel;
/*
Reset the skip-list.
*/
for (channel=0; channel < 5; channel++)
{
list=pixel_list->lists+channel;
root=list->nodes+65536UL;
list->level=0;
for (level=0; level < 9; level++)
root->next[level]=65536UL;
}
pixel_list->seed=pixel_list->signature++;
}
MagickExport Image *StatisticImage(const Image *image,const StatisticType type,
const size_t width,const size_t height,ExceptionInfo *exception)
{
Image
*statistic_image;
statistic_image=StatisticImageChannel(image,DefaultChannels,type,width,
height,exception);
return(statistic_image);
}
MagickExport Image *StatisticImageChannel(const Image *image,
const ChannelType channel,const StatisticType type,const size_t width,
const size_t height,ExceptionInfo *exception)
{
#define StatisticWidth \
(width == 0 ? GetOptimalKernelWidth2D((double) width,0.5) : width)
#define StatisticHeight \
(height == 0 ? GetOptimalKernelWidth2D((double) height,0.5) : height)
#define StatisticImageTag "Statistic/Image"
CacheView
*image_view,
*statistic_view;
Image
*statistic_image;
MagickBooleanType
status;
MagickOffsetType
progress;
PixelList
**restrict pixel_list;
ssize_t
y;
/*
Initialize statistics image attributes.
*/
assert(image != (Image *) NULL);
assert(image->signature == MagickSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickSignature);
statistic_image=CloneImage(image,image->columns,image->rows,MagickTrue,
exception);
if (statistic_image == (Image *) NULL)
return((Image *) NULL);
if (SetImageStorageClass(statistic_image,DirectClass) == MagickFalse)
{
InheritException(exception,&statistic_image->exception);
statistic_image=DestroyImage(statistic_image);
return((Image *) NULL);
}
pixel_list=AcquirePixelListThreadSet(StatisticWidth,StatisticHeight);
if (pixel_list == (PixelList **) NULL)
{
statistic_image=DestroyImage(statistic_image);
ThrowImageException(ResourceLimitError,"MemoryAllocationFailed");
}
/*
Make each pixel the min / max / median / mode / etc. of the neighborhood.
*/
status=MagickTrue;
progress=0;
image_view=AcquireCacheView(image);
statistic_view=AcquireCacheView(statistic_image);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(dynamic,4) shared(progress,status)
#endif
for (y=0; y < (ssize_t) statistic_image->rows; y++)
{
const int
id = GetOpenMPThreadId();
register const IndexPacket
*restrict indexes;
register const PixelPacket
*restrict p;
register IndexPacket
*restrict statistic_indexes;
register PixelPacket
*restrict q;
register ssize_t
x;
if (status == MagickFalse)
continue;
p=GetCacheViewVirtualPixels(image_view,-((ssize_t) StatisticWidth/2L),y-
(ssize_t) (StatisticHeight/2L),image->columns+StatisticWidth,
StatisticHeight,exception);
q=QueueCacheViewAuthenticPixels(statistic_view,0,y,statistic_image->columns, 1,exception);
if ((p == (const PixelPacket *) NULL) || (q == (PixelPacket *) NULL))
{
status=MagickFalse;
continue;
}
indexes=GetCacheViewVirtualIndexQueue(image_view);
statistic_indexes=GetCacheViewAuthenticIndexQueue(statistic_view);
for (x=0; x < (ssize_t) statistic_image->columns; x++)
{
MagickPixelPacket
pixel;
register const IndexPacket
*restrict s;
register const PixelPacket
*restrict r;
register ssize_t
u,
v;
r=p;
s=indexes+x;
ResetPixelList(pixel_list[id]);
for (v=0; v < (ssize_t) StatisticHeight; v++)
{
for (u=0; u < (ssize_t) StatisticWidth; u++)
InsertPixelList(image,r+u,s+u,pixel_list[id]);
r+=image->columns+StatisticWidth;
s+=image->columns+StatisticWidth;
}
GetMagickPixelPacket(image,&pixel);
SetMagickPixelPacket(image,p+StatisticWidth*StatisticHeight/2,indexes+
StatisticWidth*StatisticHeight/2+x,&pixel);
switch (type)
{
case GradientStatistic:
{
MagickPixelPacket
maximum,
minimum;
minimum=GetMinimumPixelList(pixel_list[id]);
maximum=GetMaximumPixelList(pixel_list[id]);
pixel.red=MagickAbsoluteValue(maximum.red-minimum.red);
pixel.green=MagickAbsoluteValue(maximum.green-minimum.green);
pixel.blue=MagickAbsoluteValue(maximum.blue-minimum.blue);
pixel.opacity=MagickAbsoluteValue(maximum.opacity-minimum.opacity);
if (image->colorspace == CMYKColorspace)
pixel.index=MagickAbsoluteValue(maximum.index-minimum.index);
break;
}
case MaximumStatistic:
{
pixel=GetMaximumPixelList(pixel_list[id]);
break;
}
case MeanStatistic:
{
pixel=GetMeanPixelList(pixel_list[id]);
break;
}
case MedianStatistic:
default:
{
pixel=GetMedianPixelList(pixel_list[id]);
break;
}
case MinimumStatistic:
{
pixel=GetMinimumPixelList(pixel_list[id]);
break;
}
case ModeStatistic:
{
pixel=GetModePixelList(pixel_list[id]);
break;
}
case NonpeakStatistic:
{
pixel=GetNonpeakPixelList(pixel_list[id]);
break;
}
case StandardDeviationStatistic:
{
pixel=GetStandardDeviationPixelList(pixel_list[id]);
break;
}
}
if ((channel & RedChannel) != 0)
SetRedPixelComponent(q,ClampToQuantum(pixel.red));
if ((channel & GreenChannel) != 0)
SetGreenPixelComponent(q,ClampToQuantum(pixel.green));
if ((channel & BlueChannel) != 0)
SetBluePixelComponent(q,ClampToQuantum(pixel.blue));
if (((channel & OpacityChannel) != 0) &&
(image->matte != MagickFalse))
SetOpacityPixelComponent(q,ClampToQuantum(pixel.opacity));
if (((channel & IndexChannel) != 0) &&
(image->colorspace == CMYKColorspace))
SetIndexPixelComponent(statistic_indexes+x,ClampToQuantum(pixel.index));
p++;
q++;
}
if (SyncCacheViewAuthenticPixels(statistic_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp critical (MagickCore_StatisticImage)
#endif
proceed=SetImageProgress(image,StatisticImageTag,progress++,
image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
statistic_view=DestroyCacheView(statistic_view);
image_view=DestroyCacheView(image_view);
pixel_list=DestroyPixelListThreadSet(pixel_list);
return(statistic_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% U n s h a r p M a s k I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% UnsharpMaskImage() sharpens one or more image channels. We convolve the
% image with a Gaussian operator of the given radius and standard deviation
% (sigma). For reasonable results, radius should be larger than sigma. Use a
% radius of 0 and UnsharpMaskImage() selects a suitable radius for you.
%
% The format of the UnsharpMaskImage method is:
%
% Image *UnsharpMaskImage(const Image *image,const double radius,
% const double sigma,const double amount,const double threshold,
% ExceptionInfo *exception)
% Image *UnsharpMaskImageChannel(const Image *image,
% const ChannelType channel,const double radius,const double sigma,
% const double amount,const double threshold,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o channel: the channel type.
%
% o radius: the radius of the Gaussian, in pixels, not counting the center
% pixel.
%
% o sigma: the standard deviation of the Gaussian, in pixels.
%
% o amount: the percentage of the difference between the original and the
% blur image that is added back into the original.
%
% o threshold: the threshold in pixels needed to apply the diffence amount.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *UnsharpMaskImage(const Image *image,const double radius,
const double sigma,const double amount,const double threshold,
ExceptionInfo *exception)
{
Image
*sharp_image;
sharp_image=UnsharpMaskImageChannel(image,DefaultChannels,radius,sigma,amount,
threshold,exception);
return(sharp_image);
}
MagickExport Image *UnsharpMaskImageChannel(const Image *image,
const ChannelType channel,const double radius,const double sigma,
const double amount,const double threshold,ExceptionInfo *exception)
{
#define SharpenImageTag "Sharpen/Image"
CacheView
*image_view,
*unsharp_view;
Image
*unsharp_image;
MagickBooleanType
status;
MagickOffsetType
progress;
MagickPixelPacket
bias;
MagickRealType
quantum_threshold;
ssize_t
y;
assert(image != (const Image *) NULL);
assert(image->signature == MagickSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
unsharp_image=BlurImageChannel(image,channel,radius,sigma,exception);
if (unsharp_image == (Image *) NULL)
return((Image *) NULL);
quantum_threshold=(MagickRealType) QuantumRange*threshold;
/*
Unsharp-mask image.
*/
status=MagickTrue;
progress=0;
GetMagickPixelPacket(image,&bias);
image_view=AcquireCacheView(image);
unsharp_view=AcquireCacheView(unsharp_image);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(dynamic,4) shared(progress,status)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
MagickPixelPacket
pixel;
register const IndexPacket
*restrict indexes;
register const PixelPacket
*restrict p;
register IndexPacket
*restrict unsharp_indexes;
register PixelPacket
*restrict q;
register ssize_t
x;
if (status == MagickFalse)
continue;
p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception);
q=GetCacheViewAuthenticPixels(unsharp_view,0,y,unsharp_image->columns,1,
exception);
if ((p == (const PixelPacket *) NULL) || (q == (PixelPacket *) NULL))
{
status=MagickFalse;
continue;
}
indexes=GetCacheViewVirtualIndexQueue(image_view);
unsharp_indexes=GetCacheViewAuthenticIndexQueue(unsharp_view);
pixel=bias;
for (x=0; x < (ssize_t) image->columns; x++)
{
if ((channel & RedChannel) != 0)
{
pixel.red=GetRedPixelComponent(p)-(MagickRealType)
GetRedPixelComponent(q);
if (fabs(2.0*pixel.red) < quantum_threshold)
pixel.red=(MagickRealType) GetRedPixelComponent(p);
else
pixel.red=(MagickRealType) GetRedPixelComponent(p)+
(pixel.red*amount);
SetRedPixelComponent(q,ClampToQuantum(pixel.red));
}
if ((channel & GreenChannel) != 0)
{
pixel.green=GetGreenPixelComponent(p)-(MagickRealType) q->green;
if (fabs(2.0*pixel.green) < quantum_threshold)
pixel.green=(MagickRealType) GetGreenPixelComponent(p);
else
pixel.green=(MagickRealType) GetGreenPixelComponent(p)+(pixel.green*amount);
SetGreenPixelComponent(q,ClampToQuantum(pixel.green));
}
if ((channel & BlueChannel) != 0)
{
pixel.blue=GetBluePixelComponent(p)-(MagickRealType) q->blue;
if (fabs(2.0*pixel.blue) < quantum_threshold)
pixel.blue=(MagickRealType) GetBluePixelComponent(p);
else
pixel.blue=(MagickRealType) GetBluePixelComponent(p)+(pixel.blue*amount);
SetBluePixelComponent(q,ClampToQuantum(pixel.blue));
}
if ((channel & OpacityChannel) != 0)
{
pixel.opacity=GetOpacityPixelComponent(p)-(MagickRealType) q->opacity;
if (fabs(2.0*pixel.opacity) < quantum_threshold)
pixel.opacity=(MagickRealType) GetOpacityPixelComponent(p);
else
pixel.opacity=GetOpacityPixelComponent(p)+(pixel.opacity*amount);
SetOpacityPixelComponent(q,ClampToQuantum(pixel.opacity));
}
if (((channel & IndexChannel) != 0) &&
(image->colorspace == CMYKColorspace))
{
pixel.index=GetIndexPixelComponent(indexes+x)-(MagickRealType)
GetIndexPixelComponent(unsharp_indexes+x);
if (fabs(2.0*pixel.index) < quantum_threshold)
pixel.index=(MagickRealType) GetIndexPixelComponent(indexes+x);
else
pixel.index=(MagickRealType) GetIndexPixelComponent(indexes+x)+
(pixel.index*amount);
SetIndexPixelComponent(unsharp_indexes+x,ClampToQuantum(pixel.index));
}
p++;
q++;
}
if (SyncCacheViewAuthenticPixels(unsharp_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp critical (MagickCore_UnsharpMaskImageChannel)
#endif
proceed=SetImageProgress(image,SharpenImageTag,progress++,image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
unsharp_image->type=image->type;
unsharp_view=DestroyCacheView(unsharp_view);
image_view=DestroyCacheView(image_view);
if (status == MagickFalse)
unsharp_image=DestroyImage(unsharp_image);
return(unsharp_image);
}
|
hierarchical_source.c | FLT * X(chebyshev_points)(char KIND, int n) {
int nd2 = n>>1;
FLT * x = malloc(n*sizeof(FLT));
if (KIND == '1') {
for (int k = 0; k <=nd2; k++)
x[k] = Y(__sinpi)((n-2*k-ONE(FLT))/(2*n));
for (int k = 0; k < nd2; k++)
x[n-1-k] = -x[k];
}
else if (KIND == '2') {
for (int k = 0; k <=nd2; k++)
x[k] = Y(__sinpi)((n-2*k-ONE(FLT))/(2*n-2));
for (int k = 0; k < nd2; k++)
x[n-1-k] = -x[k];
}
return x;
}
FLT * X(chebyshev_barycentric_weights)(char KIND, int n) {
int nd2 = n>>1;
FLT * l = malloc(n*sizeof(FLT));
if (KIND == '1') {
for (int k = 0; k <=nd2; k++)
l[k] = Y(__sinpi)((2*k+ONE(FLT))/(2*n));
for (int k = 0; k < nd2; k++)
l[n-1-k] = l[k];
for (int k = 1; k < n; k += 2)
l[k] *= -1;
}
else if (KIND == '2') {
l[0] = ONE(FLT)/TWO(FLT);
for (int k = 1; k <=nd2; k++)
l[k] = 1;
for (int k = 0; k < nd2; k++)
l[n-1-k] = l[k];
for (int k = 1; k < n; k += 2)
l[k] *= -1;
}
return l;
}
// Evaluate a polynomial interpolant of degree n-1 through the distinct points
// y_j, 0 ≤ j < n at the distinct points x_i, 0 ≤ i < m. This is effected by a
// matrix-vector product with A_{i,j} and the polynomial interpolant's ordinates.
void X(barycentricmatrix)(FLT * A, FLT * x, int m, FLT * y, FLT * l, int n) {
int k;
FLT yj, lj, temp;
for (int j = 0; j < n; j++) {
yj = y[j];
lj = l[j];
for (int i = 0; i < m; i++)
A[i+m*j] = lj/(x[i]-yj);
}
for (int i = 0; i < m; i++) {
k = -1;
temp = 0;
for (int j = 0; j < n; j++) {
if (Y(isfinite)(A[i+m*j])) temp += A[i+m*j];
else {k = j; break;}
}
if (k != -1) {
for (int j = 0; j < n; j++)
A[i+m*j] = 0;
A[i+m*k] = 1;
}
else {
temp = 1/temp;
for (int j = 0; j < n; j++)
A[i+m*j] *= temp;
}
}
}
void X(destroy_densematrix)(X(densematrix) * A) {
free(A->A);
free(A);
}
void X(destroy_lowrankmatrix)(X(lowrankmatrix) * L) {
free(L->U);
free(L->S);
free(L->V);
free(L->t1);
free(L->t2);
free(L);
}
void X(destroy_hierarchicalmatrix)(X(hierarchicalmatrix) * H) {
int M = H->M, N = H->N;
for (int n = 0; n < N; n++) {
for (int m = 0; m < M; m++) {
switch (H->hash(m, n)) {
case 1: X(destroy_hierarchicalmatrix)(H->hierarchicalmatrices(m, n)); break;
case 2: X(destroy_densematrix)(H->densematrices(m, n)); break;
case 3: X(destroy_lowrankmatrix)(H->lowrankmatrices(m, n)); break;
}
}
}
free(H->hierarchicalmatrices);
free(H->densematrices);
free(H->lowrankmatrices);
free(H->hash);
free(H);
}
X(densematrix) * X(calloc_densematrix)(int m, int n) {
X(densematrix) * A = malloc(sizeof(X(densematrix)));
A->A = calloc(m*n, sizeof(FLT));
A->m = m;
A->n = n;
return A;
}
X(densematrix) * X(malloc_densematrix)(int m, int n) {
X(densematrix) * A = malloc(sizeof(X(densematrix)));
A->A = malloc(m*n*sizeof(FLT));
A->m = m;
A->n = n;
return A;
}
X(densematrix) * X(sample_densematrix)(FLT (*f)(FLT x, FLT y), FLT * x, FLT * y, unitrange i, unitrange j) {
int M = i.stop-i.start;
X(densematrix) * AD = X(malloc_densematrix)(M, j.stop-j.start);
FLT * A = AD->A;
for (int n = j.start; n < j.stop; n++)
for (int m = i.start; m < i.stop; m++)
A[m-i.start+M*(n-j.start)] = f(x[m], y[n]);
return AD;
}
X(densematrix) * X(sample_accurately_densematrix)(FLT (*f)(FLT x, FLT ylo, FLT yhi), FLT * x, FLT * ylo, FLT * yhi, unitrange i, unitrange j) {
int M = i.stop-i.start;
X(densematrix) * AD = X(malloc_densematrix)(M, j.stop-j.start);
FLT * A = AD->A;
for (int n = j.start; n < j.stop; n++)
for (int m = i.start; m < i.stop; m++)
A[m-i.start+M*(n-j.start)] = f(x[m], ylo[n], yhi[n]);
return AD;
}
X(lowrankmatrix) * X(calloc_lowrankmatrix)(char N, int m, int n, int r) {
int sz = 0;
if (N == '2') sz = r;
else if (N == '3') sz = r*r;
X(lowrankmatrix) * L = malloc(sizeof(X(lowrankmatrix)));
L->U = calloc(m*r, sizeof(FLT));
L->S = calloc(sz, sizeof(FLT));
L->V = calloc(n*r, sizeof(FLT));
L->t1 = calloc(r*FT_GET_MAX_THREADS(), sizeof(FLT));
L->t2 = calloc(r*FT_GET_MAX_THREADS(), sizeof(FLT));
L->m = m;
L->n = n;
L->r = r;
L->p = FT_GET_MAX_THREADS();
L->N = N;
return L;
}
X(lowrankmatrix) * X(malloc_lowrankmatrix)(char N, int m, int n, int r) {
int sz = 0;
if (N == '2') sz = r;
else if (N == '3') sz = r*r;
X(lowrankmatrix) * L = malloc(sizeof(X(lowrankmatrix)));
L->U = malloc(m*r*sizeof(FLT));
L->S = malloc(sz*sizeof(FLT));
L->V = malloc(n*r*sizeof(FLT));
L->t1 = calloc(r*FT_GET_MAX_THREADS(), sizeof(FLT));
L->t2 = calloc(r*FT_GET_MAX_THREADS(), sizeof(FLT));
L->m = m;
L->n = n;
L->r = r;
L->p = FT_GET_MAX_THREADS();
L->N = N;
return L;
}
X(lowrankmatrix) * X(sample_lowrankmatrix)(FLT (*f)(FLT x, FLT y), FLT * x, FLT * y, unitrange i, unitrange j) {
int M = i.stop-i.start, N = j.stop-j.start, r = BLOCKRANK;
X(lowrankmatrix) * L = X(malloc_lowrankmatrix)('3', M, N, r);
FLT * xc1 = X(chebyshev_points)('1', r);
FLT * xc2 = X(chebyshev_points)('1', r);
FLT * lc = X(chebyshev_barycentric_weights)('1', r);
FLT a = x[i.start], b = x[i.stop-1];
FLT c = y[j.start], d = y[j.stop-1];
FLT ab2 = (a+b)/2, ba2 = (b-a)/2;
FLT cd2 = (c+d)/2, dc2 = (d-c)/2;
for (int p = 0; p < r; p++)
xc1[p] = ab2+ba2*xc1[p];
for (int q = 0; q < r; q++)
xc2[q] = cd2+dc2*xc2[q];
for (int q = 0; q < r; q++)
for (int p = 0; p < r; p++)
L->S[p+r*q] = f(xc1[p], xc2[q]);
X(barycentricmatrix)(L->U, x+i.start, M, xc1, lc, r);
X(barycentricmatrix)(L->V, y+j.start, N, xc2, lc, r);
free(xc1);
free(xc2);
free(lc);
return L;
}
X(hierarchicalmatrix) * X(malloc_hierarchicalmatrix)(const int M, const int N) {
X(hierarchicalmatrix) * H = malloc(sizeof(X(hierarchicalmatrix)));
H->hierarchicalmatrices = malloc(M*N*sizeof(X(hierarchicalmatrix) *));
H->densematrices = malloc(M*N*sizeof(X(densematrix) *));
H->lowrankmatrices = malloc(M*N*sizeof(X(lowrankmatrix) *));
H->hash = calloc(M*N, sizeof(int));
H->M = M;
H->N = N;
return H;
}
// Assumes x and y are increasing sequences
static FLT X(dist)(FLT * x, FLT * y, unitrange i, unitrange j) {
if (y[j.start] > x[i.stop-1])
return y[j.start] - x[i.stop-1];
else if (y[j.start] >= x[i.start])
return ZERO(FLT);
else if (y[j.stop-1] >= x[i.start])
return ZERO(FLT);
else
return x[i.start] - y[j.stop-1];
}
// Assumes x is an increasing sequence
static FLT X(diam)(FLT * x, unitrange i) {return x[i.stop-1] - x[i.start];}
X(hierarchicalmatrix) * X(sample_hierarchicalmatrix)(FLT (*f)(FLT x, FLT y), FLT * x, FLT * y, unitrange i, unitrange j, char SPLITTING) {
int M = 2, N = 2;
X(hierarchicalmatrix) * H = X(malloc_hierarchicalmatrix)(M, N);
X(hierarchicalmatrix) ** HH = H->hierarchicalmatrices;
X(densematrix) ** HD = H->densematrices;
X(lowrankmatrix) ** HL = H->lowrankmatrices;
unitrange i1, i2, j1, j2;
if (SPLITTING == 'I') {
i1.start = i.start;
i1.stop = i2.start = i.start + ((i.stop-i.start)>>1);
i2.stop = i.stop;
j1.start = j.start;
j1.stop = j2.start = j.start + ((j.stop-j.start)>>1);
j2.stop = j.stop;
}
else if (SPLITTING == 'G') {
X(indsplit)(x, i, &i1, &i2, x[i.start], x[i.stop-1]);
X(indsplit)(y, j, &j1, &j2, y[j.start], y[j.stop-1]);
}
if (i1.stop-i1.start < BLOCKSIZE || j1.stop-j1.start < BLOCKSIZE) {
HD[0] = X(sample_densematrix)(f, x, y, i1, j1);
H->hash(0, 0) = 2;
}
else if (X(dist)(x, y, i1, j1) >= MIN(X(diam)(x, i1), X(diam)(y, j1))) {
HL[0] = X(sample_lowrankmatrix)(f, x, y, i1, j1);
H->hash(0, 0) = 3;
}
else {
HH[0] = X(sample_hierarchicalmatrix)(f, x, y, i1, j1, SPLITTING);
H->hash(0, 0) = 1;
}
if (i2.stop-i2.start < BLOCKSIZE || j1.stop-j1.start < BLOCKSIZE) {
HD[1] = X(sample_densematrix)(f, x, y, i2, j1);
H->hash(1, 0) = 2;
}
else if (X(dist)(x, y, i2, j1) >= MIN(X(diam)(x, i2), X(diam)(y, j1))) {
HL[1] = X(sample_lowrankmatrix)(f, x, y, i2, j1);
H->hash(1, 0) = 3;
}
else {
HH[1] = X(sample_hierarchicalmatrix)(f, x, y, i2, j1, SPLITTING);
H->hash(1, 0) = 1;
}
if (i1.stop-i1.start < BLOCKSIZE || j2.stop-j2.start < BLOCKSIZE) {
HD[2] = X(sample_densematrix)(f, x, y, i1, j2);
H->hash(0, 1) = 2;
}
else if (X(dist)(x, y, i1, j2) >= MIN(X(diam)(x, i1), X(diam)(y, j2))) {
HL[2] = X(sample_lowrankmatrix)(f, x, y, i1, j2);
H->hash(0, 1) = 3;
}
else {
HH[2] = X(sample_hierarchicalmatrix)(f, x, y, i1, j2, SPLITTING);
H->hash(0, 1) = 1;
}
if (i2.stop-i2.start < BLOCKSIZE || j2.stop-j2.start < BLOCKSIZE) {
HD[3] = X(sample_densematrix)(f, x, y, i2, j2);
H->hash(1, 1) = 2;
}
else if (X(dist)(x, y, i2, j2) >= MIN(X(diam)(x, i2), X(diam)(y, j2))) {
HL[3] = X(sample_lowrankmatrix)(f, x, y, i2, j2);
H->hash(1, 1) = 3;
}
else {
HH[3] = X(sample_hierarchicalmatrix)(f, x, y, i2, j2, SPLITTING);
H->hash(1, 1) = 1;
}
H->m = i.stop-i.start;
H->n = j.stop-j.start;
return H;
}
X(hierarchicalmatrix) * X(sample_accurately_hierarchicalmatrix)(FLT (*f)(FLT x, FLT y), FLT (*f2)(FLT x, FLT ylo, FLT yhi), FLT * x, FLT * y, FLT * ylo, FLT * yhi, unitrange i, unitrange j, char SPLITTING) {
int M = 2, N = 2;
X(hierarchicalmatrix) * H = X(malloc_hierarchicalmatrix)(M, N);
X(hierarchicalmatrix) ** HH = H->hierarchicalmatrices;
X(densematrix) ** HD = H->densematrices;
X(lowrankmatrix) ** HL = H->lowrankmatrices;
unitrange i1, i2, j1, j2;
if (SPLITTING == 'I') {
i1.start = i.start;
i1.stop = i2.start = i.start + ((i.stop-i.start)>>1);
i2.stop = i.stop;
j1.start = j.start;
j1.stop = j2.start = j.start + ((j.stop-j.start)>>1);
j2.stop = j.stop;
}
else if (SPLITTING == 'G') {
X(indsplit)(x, i, &i1, &i2, x[i.start], x[i.stop-1]);
X(indsplit)(y, j, &j1, &j2, y[j.start], y[j.stop-1]);
}
if (i1.stop-i1.start < BLOCKSIZE || j1.stop-j1.start < BLOCKSIZE) {
HD[0] = X(sample_accurately_densematrix)(f2, x, ylo, yhi, i1, j1);
H->hash(0, 0) = 2;
}
else if (X(dist)(x, y, i1, j1) >= MIN(X(diam)(x, i1), X(diam)(y, j1))) {
HL[0] = X(sample_lowrankmatrix)(f, x, y, i1, j1);
H->hash(0, 0) = 3;
}
else {
HH[0] = X(sample_accurately_hierarchicalmatrix)(f, f2, x, y, ylo, yhi, i1, j1, SPLITTING);
H->hash(0, 0) = 1;
}
if (i2.stop-i2.start < BLOCKSIZE || j1.stop-j1.start < BLOCKSIZE) {
HD[1] = X(sample_accurately_densematrix)(f2, x, ylo, yhi, i2, j1);
H->hash(1, 0) = 2;
}
else if (X(dist)(x, y, i2, j1) >= MIN(X(diam)(x, i2), X(diam)(y, j1))) {
HL[1] = X(sample_lowrankmatrix)(f, x, y, i2, j1);
H->hash(1, 0) = 3;
}
else {
HH[1] = X(sample_accurately_hierarchicalmatrix)(f, f2, x, y, ylo, yhi, i2, j1, SPLITTING);
H->hash(1, 0) = 1;
}
if (i1.stop-i1.start < BLOCKSIZE || j2.stop-j2.start < BLOCKSIZE) {
HD[2] = X(sample_accurately_densematrix)(f2, x, ylo, yhi, i1, j2);
H->hash(0, 1) = 2;
}
else if (X(dist)(x, y, i1, j2) >= MIN(X(diam)(x, i1), X(diam)(y, j2))) {
HL[2] = X(sample_lowrankmatrix)(f, x, y, i1, j2);
H->hash(0, 1) = 3;
}
else {
HH[2] = X(sample_accurately_hierarchicalmatrix)(f, f2, x, y, ylo, yhi, i1, j2, SPLITTING);
H->hash(0, 1) = 1;
}
if (i2.stop-i2.start < BLOCKSIZE || j2.stop-j2.start < BLOCKSIZE) {
HD[3] = X(sample_accurately_densematrix)(f2, x, ylo, yhi, i2, j2);
H->hash(1, 1) = 2;
}
else if (X(dist)(x, y, i2, j2) >= MIN(X(diam)(x, i2), X(diam)(y, j2))) {
HL[3] = X(sample_lowrankmatrix)(f, x, y, i2, j2);
H->hash(1, 1) = 3;
}
else {
HH[3] = X(sample_accurately_hierarchicalmatrix)(f, f2, x, y, ylo, yhi, i2, j2, SPLITTING);
H->hash(1, 1) = 1;
}
H->m = i.stop-i.start;
H->n = j.stop-j.start;
return H;
}
static inline int X(size_densematrix)(X(densematrix) * A, int k) {
if (k == 1) return A->m;
else if (k == 2) return A->n;
else return 1;
}
static inline int X(size_lowrankmatrix)(X(lowrankmatrix) * L, int k) {
if (k == 1) return L->m;
else if (k == 2) return L->n;
else return 1;
}
static inline int X(size_hierarchicalmatrix)(X(hierarchicalmatrix) * H, int k) {
if (k == 1) return H->m;
else if (k == 2) return H->n;
else return 1;
}
static inline int X(blocksize_hierarchicalmatrix)(X(hierarchicalmatrix) * H, int m, int n, int k) {
int M = H->M, N = H->N;
switch (H->hash(m, n)) {
case 1: return X(size_hierarchicalmatrix)(H->hierarchicalmatrices(m, n), k);
case 2: return X(size_densematrix)(H->densematrices(m, n), k);
case 3: return X(size_lowrankmatrix)(H->lowrankmatrices(m, n), k);
default: return 1;
}
}
size_t X(summary_size_densematrix)(X(densematrix) * A) {return sizeof(FLT)*A->m*A->n;};
size_t X(summary_size_lowrankmatrix)(X(lowrankmatrix) * L) {return L->N == '2' ? sizeof(FLT)*(L->m+L->n+1)*L->r : L->N == '3' ? sizeof(FLT)*(L->m+L->n+L->r)*L->r : 0;};
size_t X(summary_size_hierarchicalmatrix)(X(hierarchicalmatrix) * H) {
size_t M = H->M, N = H->N, S = 0;
for (int n = 0; n < N; n++)
for (int m = 0; m < M; m++)
switch (H->hash(m, n)) {
case 1: S += X(summary_size_hierarchicalmatrix)(H->hierarchicalmatrices(m, n)); break;
case 2: S += X(summary_size_densematrix)(H->densematrices(m, n)); break;
case 3: S += X(summary_size_lowrankmatrix)(H->lowrankmatrices(m, n)); break;
}
return S;
}
int X(nlevels_hierarchicalmatrix)(X(hierarchicalmatrix) * H) {
int M = H->M, N = H->N, L = 0;
for (int n = 0; n < N; n++)
for (int m = 0; m < M; m++)
if (H->hash(m, n) == 1)
L = MAX(L, 1+X(nlevels_hierarchicalmatrix)(H->hierarchicalmatrices(m, n)));
return L;
}
static inline FLT X(norm2_densematrix)(X(densematrix) * A) {
int m = A->m, n = A->n;
FLT ret = 0, * M = A->A;
for (int i = 0; i < m*n; i++)
ret += M[i]*M[i];
return ret;
}
// ||UVᵀ||_F² = tr(UᵀU VᵀV) or ||USVᵀ||_F² = tr(UᵀUS VᵀVSᵀ)
static inline FLT X(norm2_lowrankmatrix)(X(lowrankmatrix) * L) {
FLT ret = 0;
int m = L->m, n = L->n, r = L->r;
FLT * U = L->U, * S = L->S, * V = L->V;
if (L->N == '2') {
for (int l = 0; l < r; l++) {
for (int k = 0; k < r; k++) {
FLT tU = 0, tV = 0;
for (int i = 0; i < m; i++)
tU += U[i+l*m]*U[i+k*m];
for (int j = 0; j < n; j++)
tV += V[j+k*n]*V[j+l*n];
ret += tU*tV;
}
}
}
else if (L->N == '3') {
for (int l = 0; l < r; l++) {
for (int k = 0; k < r; k++) {
FLT tUS = 0, tVS = 0;
for (int j = 0; j < r; j++) {
FLT tU = 0;
for (int i = 0; i < m; i++)
tU += U[i+l*m]*U[i+j*m];
tUS += tU*S[j+k*r];
}
for (int i = 0; i < r; i++) {
FLT tV = 0;
for (int j = 0; j < n; j++)
tV += V[j+k*n]*V[j+i*n];
tVS += tV*S[l+i*r];
}
ret += tUS*tVS;
}
}
}
return ret;
}
static inline FLT X(norm2_hierarchicalmatrix)(X(hierarchicalmatrix) * H) {
int M = H->M, N = H->N;
FLT ret = 0;
for (int n = 0; n < N; n++)
for (int m = 0; m < M; m++)
switch (H->hash(m, n)) {
case 1: ret += X(norm2_hierarchicalmatrix)(H->hierarchicalmatrices(m, n)); break;
case 2: ret += X(norm2_densematrix)(H->densematrices(m, n)); break;
case 3: ret += X(norm2_lowrankmatrix)(H->lowrankmatrices(m, n)); break;
}
return ret;
}
FLT X(norm_densematrix)(X(densematrix) * A) {return Y(sqrt)(X(norm2_densematrix)(A));}
FLT X(norm_lowrankmatrix)(X(lowrankmatrix) * L) {return Y(sqrt)(X(norm2_lowrankmatrix)(L));}
FLT X(norm_hierarchicalmatrix)(X(hierarchicalmatrix) * H) {return Y(sqrt)(X(norm2_hierarchicalmatrix)(H));}
void X(scale_rows_densematrix)(FLT alpha, FLT * x, X(densematrix) * AD) {
int m = AD->m, n = AD->n;
FLT * A = AD->A;
for (int j = 0; j < n; j++) {
for (int i = 0; i < m; i++)
A[i] *= alpha*x[i];
A += m;
}
}
void X(scale_columns_densematrix)(FLT alpha, FLT * x, X(densematrix) * AD) {
int m = AD->m, n = AD->n;
FLT * A = AD->A;
FLT axj;
for (int j = 0; j < n; j++) {
axj = alpha*x[j];
for (int i = 0; i < m; i++)
A[i] *= axj;
A += m;
}
}
void X(scale_rows_lowrankmatrix)(FLT alpha, FLT * x, X(lowrankmatrix) * L) {
int m = L->m, r = L->r;
FLT * U = L->U;
for (int j = 0; j < r; j++) {
for (int i = 0; i < m; i++)
U[i] *= alpha*x[i];
U += m;
}
}
void X(scale_columns_lowrankmatrix)(FLT alpha, FLT * x, X(lowrankmatrix) * L) {
int n = L->n, r = L->r;
FLT * V = L->V;
for (int j = 0; j < r; j++) {
for (int i = 0; i < n; i++)
V[i] *= alpha*x[i];
V += n;
}
}
void X(scale_rows_hierarchicalmatrix)(FLT alpha, FLT * x, X(hierarchicalmatrix) * H) {
int M = H->M, N = H->N;
for (int n = 0; n < N; n++) {
int p = 0;
for (int m = 0; m < M; m++) {
switch (H->hash(m, n)) {
case 1: X(scale_rows_hierarchicalmatrix)(alpha, x+p, H->hierarchicalmatrices(m, n)); break;
case 2: X(scale_rows_densematrix)(alpha, x+p, H->densematrices(m, n)); break;
case 3: X(scale_rows_lowrankmatrix)(alpha, x+p, H->lowrankmatrices(m, n)); break;
}
p += X(blocksize_hierarchicalmatrix)(H, m, N-1, 1);
}
}
}
void X(scale_columns_hierarchicalmatrix)(FLT alpha, FLT * x, X(hierarchicalmatrix) * H) {
int M = H->M, N = H->N;
int q = 0;
for (int n = 0; n < N; n++) {
for (int m = 0; m < M; m++) {
switch (H->hash(m, n)) {
case 1: X(scale_columns_hierarchicalmatrix)(alpha, x+q, H->hierarchicalmatrices(m, n)); break;
case 2: X(scale_columns_densematrix)(alpha, x+q, H->densematrices(m, n)); break;
case 3: X(scale_columns_lowrankmatrix)(alpha, x+q, H->lowrankmatrices(m, n)); break;
}
}
q += X(blocksize_hierarchicalmatrix)(H, 0, n, 2);
}
}
// y ← α*A*x + β*y, y ← α*Aᵀ*x + β*y
void X(gemv)(char TRANS, int m, int n, FLT alpha, FLT * A, int LDA, FLT * x, FLT beta, FLT * y) {
FLT t;
if (TRANS == 'N') {
if (beta != 1) {
if (beta == 0)
for (int i = 0; i < m; i++)
y[i] = 0;
else
for (int i = 0; i < m; i++)
y[i] = beta*y[i];
}
for (int j = 0; j < n; j++) {
t = alpha*x[j];
for (int i = 0; i < m; i++)
y[i] += A[i+j*LDA]*t;
}
}
else if (TRANS == 'T') {
if (beta != 1) {
if (beta == 0)
for (int i = 0; i < n; i++)
y[i] = 0;
else
for (int i = 0; i < n; i++)
y[i] = beta*y[i];
}
for (int i = 0; i < n; i++) {
t = 0;
for (int j = 0; j < m; j++)
t += A[j+i*LDA]*x[j];
y[i] += alpha*t;
}
}
}
// C ← α*A*B + β*C, C ← α*Aᵀ*B + β*C
#if defined(FT_USE_CBLAS_S)
void X(gemm)(char TRANS, int m, int n, int p, FLT alpha, FLT * A, int LDA, FLT * B, int LDB, FLT beta, FLT * C, int LDC) {
if (TRANS == 'N')
cblas_sgemm(CblasColMajor, CblasNoTrans, CblasNoTrans, m, p, n, alpha, A, LDA, B, LDB, beta, C, LDC);
else if (TRANS == 'T')
cblas_sgemm(CblasColMajor, CblasTrans, CblasNoTrans, n, p, m, alpha, A, LDA, B, LDB, beta, C, LDC);
}
#elif defined(FT_USE_CBLAS_D)
void X(gemm)(char TRANS, int m, int n, int p, FLT alpha, FLT * A, int LDA, FLT * B, int LDB, FLT beta, FLT * C, int LDC) {
if (TRANS == 'N')
cblas_dgemm(CblasColMajor, CblasNoTrans, CblasNoTrans, m, p, n, alpha, A, LDA, B, LDB, beta, C, LDC);
else if (TRANS == 'T')
cblas_dgemm(CblasColMajor, CblasTrans, CblasNoTrans, n, p, m, alpha, A, LDA, B, LDB, beta, C, LDC);
}
#else
void X(gemm)(char TRANS, int m, int n, int p, FLT alpha, FLT * A, int LDA, FLT * B, int LDB, FLT beta, FLT * C, int LDC) {
FLT t;
if (TRANS == 'N') {
for (int k = 0; k < p; k++) {
if (beta != 1) {
if (beta == 0)
for (int i = 0; i < m; i++)
C[i+k*LDC] = 0;
else
for (int i = 0; i < m; i++)
C[i+k*LDC] = beta*C[i+k*LDC];
}
for (int j = 0; j < n; j++) {
t = alpha*B[j+k*LDB];
for (int i = 0; i < m; i++)
C[i+k*LDC] += A[i+j*LDA]*t;
}
}
}
else if (TRANS == 'T') {
for (int k = 0; k < p; k++) {
if (beta != 1) {
if (beta == 0)
for (int i = 0; i < n; i++)
C[i+k*LDC] = 0;
else
for (int i = 0; i < n; i++)
C[i+k*LDC] = beta*C[i+k*LDC];
}
for (int i = 0; i < n; i++) {
t = 0;
for (int j = 0; j < m; j++)
t += A[j+i*LDA]*B[j+k*LDB];
C[i+k*LDC] += alpha*t;
}
}
}
}
#endif
void X(demv)(char TRANS, FLT alpha, X(densematrix) * A, FLT * x, FLT beta, FLT * y) {
X(gemv)(TRANS, A->m, A->n, alpha, A->A, A->m, x, beta, y);
}
void X(demm)(char TRANS, int p, FLT alpha, X(densematrix) * A, FLT * B, int LDB, FLT beta, FLT * C, int LDC) {
X(gemm)(TRANS, A->m, A->n, p, alpha, A->A, A->m, B, LDB, beta, C, LDC);
}
// y ← α*(USVᵀ)*x + β*y, y ← α*(VSᵀUᵀ)*x + β*y
void X(lrmv)(char TRANS, FLT alpha, X(lowrankmatrix) * L, FLT * x, FLT beta, FLT * y) {
int m = L->m, n = L->n, r = L->r;
FLT * t1 = L->t1+r*FT_GET_THREAD_NUM(), * t2 = L->t2+r*FT_GET_THREAD_NUM();
if (TRANS == 'N') {
if (L->N == '2') {
X(gemv)('T', n, r, 1, L->V, n, x, 0, t1);
X(gemv)('N', m, r, alpha, L->U, m, t1, beta, y);
}
else if (L->N == '3') {
X(gemv)('T', n, r, 1, L->V, n, x, 0, t1);
X(gemv)('N', r, r, 1, L->S, r, t1, 0, t2);
X(gemv)('N', m, r, alpha, L->U, m, t2, beta, y);
}
}
else if (TRANS == 'T') {
if (L->N == '2') {
X(gemv)('T', m, r, 1, L->U, m, x, 0, t1);
X(gemv)('N', n, r, alpha, L->V, n, t1, beta, y);
}
else if (L->N == '3') {
X(gemv)('T', m, r, 1, L->U, m, x, 0, t1);
X(gemv)('T', r, r, 1, L->S, r, t1, 0, t2);
X(gemv)('N', n, r, alpha, L->V, n, t2, beta, y);
}
}
}
static inline void X(check_temps_lowrankmatrix)(X(lowrankmatrix) * L, int p) {
if (L->p < p) {
L->t1 = realloc(L->t1, L->r*p*sizeof(FLT));
L->t2 = realloc(L->t2, L->r*p*sizeof(FLT));
L->p = p;
}
}
// C ← α*(USVᵀ)*B + β*C, C ← α*(VSᵀUᵀ)*B + β*C
void X(lrmm)(char TRANS, int p, FLT alpha, X(lowrankmatrix) * L, FLT * B, int LDB, FLT beta, FLT * C, int LDC) {
X(check_temps_lowrankmatrix)(L, p);
int m = L->m, n = L->n, r = L->r;
FLT * t1 = L->t1, * t2 = L->t2;
if (TRANS == 'N') {
if (L->N == '2') {
X(gemm)('T', n, r, p, 1, L->V, n, B, LDB, 0, t1, r);
X(gemm)('N', m, r, p, alpha, L->U, m, t1, r, beta, C, LDC);
}
else if (L->N == '3') {
X(gemm)('T', n, r, p, 1, L->V, n, B, LDB, 0, t1, r);
X(gemm)('N', r, r, p, 1, L->S, r, t1, r, 0, t2, r);
X(gemm)('N', m, r, p, alpha, L->U, m, t2, r, beta, C, LDC);
}
}
else if (TRANS == 'T') {
if (L->N == '2') {
X(gemm)('T', m, r, p, 1, L->U, m, B, LDB, 0, t1, r);
X(gemm)('N', n, r, p, alpha, L->V, n, t1, r, beta, C, LDC);
}
else if (L->N == '3') {
X(gemm)('T', m, r, p, 1, L->U, m, B, LDB, 0, t1, r);
X(gemm)('T', r, r, p, 1, L->S, r, t1, r, 0, t2, r);
X(gemm)('N', n, r, p, alpha, L->V, n, t2, r, beta, C, LDC);
}
}
}
// y ← α*H*x + β*y, y ← α*Hᵀ*x + β*y
void X(ghmv)(char TRANS, FLT alpha, X(hierarchicalmatrix) * H, FLT * x, FLT beta, FLT * y) {
int M = H->M, N = H->N;
int p, q = 0;
if (TRANS == 'N') {
if (beta != 1) {
if (beta == 0)
for (int i = 0; i < X(size_hierarchicalmatrix)(H, 1); i++)
y[i] = 0;
else
for (int i = 0; i < X(size_hierarchicalmatrix)(H, 1); i++)
y[i] = beta*y[i];
}
for (int n = 0; n < N; n++) {
p = 0;
for (int m = 0; m < M; m++) {
switch (H->hash(m, n)) {
case 1: X(ghmv)(TRANS, alpha, H->hierarchicalmatrices(m, n), x+q, 1, y+p); break;
case 2: X(demv)(TRANS, alpha, H->densematrices(m, n), x+q, 1, y+p); break;
case 3: X(lrmv)(TRANS, alpha, H->lowrankmatrices(m, n), x+q, 1, y+p); break;
}
p += X(blocksize_hierarchicalmatrix)(H, m, N-1, 1);
}
q += X(blocksize_hierarchicalmatrix)(H, 0, n, 2);
}
}
else if (TRANS == 'T') {
if (beta != 1) {
if (beta == 0)
for (int i = 0; i < X(size_hierarchicalmatrix)(H, 2); i++)
y[i] = 0;
else
for (int i = 0; i < X(size_hierarchicalmatrix)(H, 2); i++)
y[i] = beta*y[i];
}
for (int m = 0; m < M; m++) {
p = 0;
for (int n = 0; n < N; n++) {
switch (H->hash(m, n)) {
case 1: X(ghmv)(TRANS, alpha, H->hierarchicalmatrices(m, n), x+q, 1, y+p); break;
case 2: X(demv)(TRANS, alpha, H->densematrices(m, n), x+q, 1, y+p); break;
case 3: X(lrmv)(TRANS, alpha, H->lowrankmatrices(m, n), x+q, 1, y+p); break;
}
p += X(blocksize_hierarchicalmatrix)(H, 0, n, 2);
}
q += X(blocksize_hierarchicalmatrix)(H, m, N-1, 1);
}
}
}
// C ← α*H*B + β*C, C ← α*Hᵀ*B + β*C
void X(ghmm)(char TRANS, int p, FLT alpha, X(hierarchicalmatrix) * H, FLT * B, int LDB, FLT beta, FLT * C, int LDC) {
int M = H->M, N = H->N, P = 2;
int pcols[] = {0, p/P, p};
int * mrows = calloc(M, sizeof(int));
for (int m = 1; m < M; m++)
mrows[m] = mrows[m-1] + X(blocksize_hierarchicalmatrix)(H, m-1, N-1, 1);
int * ncols = calloc(N, sizeof(int));
for (int n = 1; n < N; n++)
ncols[n] = ncols[n-1] + X(blocksize_hierarchicalmatrix)(H, 0, n-1, 2);
if (TRANS == 'N') {
if (beta != 1) {
if (beta == 0) {
#pragma omp parallel for
for (int j = 0; j < p; j++)
for (int i = 0; i < X(size_hierarchicalmatrix)(H, 1); i++)
C[i+j*LDC] = 0;
}
else {
#pragma omp parallel for
for (int j = 0; j < p; j++)
for (int i = 0; i < X(size_hierarchicalmatrix)(H, 1); i++)
C[i+j*LDC] = beta*C[i+j*LDC];
}
}
if (p >= X(size_hierarchicalmatrix)(H, 2)) {
for (int p = 0; p < P; p++) {
#pragma omp parallel for
for (int m = 0; m < M; m++) {
for (int n = 0; n < N; n++) {
switch (H->hash(m, n)) {
case 1: X(ghmm)(TRANS, pcols[p+1]-pcols[p], alpha, H->hierarchicalmatrices(m, n), B+ncols[n]+pcols[p]*LDB, LDB, 1, C+mrows[m]+pcols[p]*LDC, LDC); break;
case 2: X(demm)(TRANS, pcols[p+1]-pcols[p], alpha, H->densematrices(m, n), B+ncols[n]+pcols[p]*LDB, LDB, 1, C+mrows[m]+pcols[p]*LDC, LDC); break;
case 3: X(lrmm)(TRANS, pcols[p+1]-pcols[p], alpha, H->lowrankmatrices(m, n), B+ncols[n]+pcols[p]*LDB, LDB, 1, C+mrows[m]+pcols[p]*LDC, LDC); break;
}
}
}
}
}
else {
#pragma omp parallel for
for (int m = 0; m < M; m++) {
for (int n = 0; n < N; n++) {
switch (H->hash(m, n)) {
case 1: X(ghmm)(TRANS, p, alpha, H->hierarchicalmatrices(m, n), B+ncols[n], LDB, 1, C+mrows[m], LDC); break;
case 2: X(demm)(TRANS, p, alpha, H->densematrices(m, n), B+ncols[n], LDB, 1, C+mrows[m], LDC); break;
case 3: X(lrmm)(TRANS, p, alpha, H->lowrankmatrices(m, n), B+ncols[n], LDB, 1, C+mrows[m], LDC); break;
}
}
}
}
}
else if (TRANS == 'T') {
if (beta != 1) {
if (beta == 0) {
#pragma omp parallel for
for (int j = 0; j < p; j++)
for (int i = 0; i < X(size_hierarchicalmatrix)(H, 2); i++)
C[i+j*LDC] = 0;
}
else {
#pragma omp parallel for
for (int j = 0; j < p; j++)
for (int i = 0; i < X(size_hierarchicalmatrix)(H, 2); i++)
C[i+j*LDC] = beta*C[i+j*LDC];
}
}
if (p >= X(size_hierarchicalmatrix)(H, 1)) {
for (int p = 0; p < P; p++) {
#pragma omp parallel for
for (int n = 0; n < N; n++) {
for (int m = 0; m < M; m++) {
switch (H->hash(m, n)) {
case 1: X(ghmm)(TRANS, pcols[p+1]-pcols[p], alpha, H->hierarchicalmatrices(m, n), B+mrows[m]+pcols[p]*LDB, LDB, 1, C+ncols[n]+pcols[p]*LDC, LDC); break;
case 2: X(demm)(TRANS, pcols[p+1]-pcols[p], alpha, H->densematrices(m, n), B+mrows[m]+pcols[p]*LDB, LDB, 1, C+ncols[n]+pcols[p]*LDC, LDC); break;
case 3: X(lrmm)(TRANS, pcols[p+1]-pcols[p], alpha, H->lowrankmatrices(m, n), B+mrows[m]+pcols[p]*LDB, LDB, 1, C+ncols[n]+pcols[p]*LDC, LDC); break;
}
}
}
}
}
else {
#pragma omp parallel for
for (int n = 0; n < N; n++) {
for (int m = 0; m < M; m++) {
switch (H->hash(m, n)) {
case 1: X(ghmm)(TRANS, p, alpha, H->hierarchicalmatrices(m, n), B+mrows[m], LDB, 1, C+ncols[n], LDC); break;
case 2: X(demm)(TRANS, p, alpha, H->densematrices(m, n), B+mrows[m], LDB, 1, C+ncols[n], LDC); break;
case 3: X(lrmm)(TRANS, p, alpha, H->lowrankmatrices(m, n), B+mrows[m], LDB, 1, C+ncols[n], LDC); break;
}
}
}
}
}
free(mrows);
free(ncols);
}
int X(binarysearch)(FLT * x, int start, int stop, FLT y) {
int j;
while (stop >= start) {
j = (start+stop)/2;
if (x[j] < y) start = j+1;
else if (x[j] > y) stop = j-1;
else break;
}
if (x[j] < y) j += 1;
return j;
}
/*
indsplit takes a unitrange `start ≤ ir < stop`, and splits it into
two unitranges `i1` and `i2` such that
`a ≤ x[i] < (a+b)/2` for `i ∈ i1`, and
`(a+b)/2 ≤ x[i] ≤ b` for `i ∈ i2`.
*/
void X(indsplit)(FLT * x, unitrange ir, unitrange * i1, unitrange * i2, FLT a, FLT b) {
int start = ir.start, stop = ir.stop;
i1->start = start;
i1->stop = i2->start = X(binarysearch)(x, start, stop, (a+b)/2);
i2->stop = stop;
}
FLT X(cauchykernel)(FLT x, FLT y) {return 1/(x-y);}
FLT X(coulombkernel)(FLT x, FLT y) {return 1/((x-y)*(x-y));}
FLT X(coulombprimekernel)(FLT x, FLT y) {return 1/(((x-y)*(x-y))*(x-y));}
FLT X(logkernel)(FLT x, FLT y) {return Y(log)(Y(fabs)(x-y));}
static FLT X(diff)(FLT x, FLT y) {return x - y;}
FLT X(cauchykernel2)(FLT x, FLT ylo, FLT yhi) {return 1/(X(diff)(x, yhi) - ylo);}
FLT X(coulombkernel2)(FLT x, FLT ylo, FLT yhi) {return 1/((X(diff)(x, yhi) - ylo)*(X(diff)(x, yhi) - ylo));}
FLT X(coulombprimekernel2)(FLT x, FLT ylo, FLT yhi) {return 1/(((X(diff)(x, yhi) - ylo)*(X(diff)(x, yhi) - ylo))*(X(diff)(x, yhi) - ylo));}
FLT X(logkernel2)(FLT x, FLT ylo, FLT yhi) {return Y(log)(Y(fabs)(X(diff)(x, yhi) - ylo));}
FLT X(thresholded_cauchykernel)(FLT x, FLT y) {
if (Y(fabs)(x - y) < 16*Y(sqrt)(Y(eps)())*MAX(Y(fabs)(x), Y(fabs)(y)))
return 0;
else
return 1/(x-y);
}
|
openmp-test.c | /*
Copyright (c) 1997-2019 OpenMP Architecture Review Board.
All rights reserved.
Permission to redistribute and use without fee all or part of the source
codes and the associated document (the Software), with or without
modification, is granted, provided that the following conditions are met:
* Redistributions of the software must retain the above copyright
notice, this list of conditions and the following disclaimer.
* Neither the name of the OpenMP Architecture Review Board nor the
names of its contributors may be used to endorse or promote products
derived from this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE OPENMP ARCHITECTURE REVIEW BOARD "AS IS"
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE OPENMP ARCHITECTURE REVIEW
BOARD BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY,
OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
THE POSSIBILITY OF SUCH DAMAGE.
*/
/*
* @@name: fpriv_sections.1c
* @@type: C
* @@compilable: yes
* @@linkable: yes
* @@expect: success
*/
#include <omp.h>
#include <stdio.h>
#define NT 4
int main()
{
int section_count = 0;
omp_set_dynamic(0);
omp_set_num_threads(NT);
#pragma omp parallel
#pragma omp sections firstprivate(section_count)
{
#pragma omp section
{
section_count++;
/* may print the number one or two */
printf("section_count %d\n", section_count);
}
#pragma omp section
{
section_count++;
/* may print the number one or two */
printf("section_count %d\n", section_count);
}
}
return 0;
} |
Parser.h | //===--- Parser.h - C Language Parser ---------------------------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// This file defines the Parser interface.
//
//===----------------------------------------------------------------------===//
#ifndef LLVM_CLANG_PARSE_PARSER_H
#define LLVM_CLANG_PARSE_PARSER_H
#include "clang/AST/Availability.h"
#include "clang/Basic/BitmaskEnum.h"
#include "clang/Basic/OpenMPKinds.h"
#include "clang/Basic/OperatorPrecedence.h"
#include "clang/Basic/Specifiers.h"
#include "clang/Lex/CodeCompletionHandler.h"
#include "clang/Lex/Preprocessor.h"
#include "clang/Sema/DeclSpec.h"
#include "clang/Sema/Sema.h"
#include "llvm/ADT/SmallVector.h"
#include "llvm/Frontend/OpenMP/OMPContext.h"
#include "llvm/Support/Compiler.h"
#include "llvm/Support/PrettyStackTrace.h"
#include "llvm/Support/SaveAndRestore.h"
#include <memory>
#include <stack>
namespace clang {
class PragmaHandler;
class Scope;
class BalancedDelimiterTracker;
class CorrectionCandidateCallback;
class DeclGroupRef;
class DiagnosticBuilder;
struct LoopHint;
class Parser;
class ParsingDeclRAIIObject;
class ParsingDeclSpec;
class ParsingDeclarator;
class ParsingFieldDeclarator;
class ColonProtectionRAIIObject;
class InMessageExpressionRAIIObject;
class PoisonSEHIdentifiersRAIIObject;
class OMPClause;
class ObjCTypeParamList;
struct OMPTraitProperty;
struct OMPTraitSelector;
struct OMPTraitSet;
class OMPTraitInfo;
/// Parser - This implements a parser for the C family of languages. After
/// parsing units of the grammar, productions are invoked to handle whatever has
/// been read.
///
class Parser : public CodeCompletionHandler {
friend class ColonProtectionRAIIObject;
friend class ParsingOpenMPDirectiveRAII;
friend class InMessageExpressionRAIIObject;
friend class PoisonSEHIdentifiersRAIIObject;
friend class ObjCDeclContextSwitch;
friend class ParenBraceBracketBalancer;
friend class BalancedDelimiterTracker;
Preprocessor &PP;
/// Tok - The current token we are peeking ahead. All parsing methods assume
/// that this is valid.
Token Tok;
// PrevTokLocation - The location of the token we previously
// consumed. This token is used for diagnostics where we expected to
// see a token following another token (e.g., the ';' at the end of
// a statement).
SourceLocation PrevTokLocation;
/// Tracks an expected type for the current token when parsing an expression.
/// Used by code completion for ranking.
PreferredTypeBuilder PreferredType;
unsigned short ParenCount = 0, BracketCount = 0, BraceCount = 0;
unsigned short MisplacedModuleBeginCount = 0;
/// Actions - These are the callbacks we invoke as we parse various constructs
/// in the file.
Sema &Actions;
DiagnosticsEngine &Diags;
/// ScopeCache - Cache scopes to reduce malloc traffic.
enum { ScopeCacheSize = 16 };
unsigned NumCachedScopes;
Scope *ScopeCache[ScopeCacheSize];
/// Identifiers used for SEH handling in Borland. These are only
/// allowed in particular circumstances
// __except block
IdentifierInfo *Ident__exception_code,
*Ident___exception_code,
*Ident_GetExceptionCode;
// __except filter expression
IdentifierInfo *Ident__exception_info,
*Ident___exception_info,
*Ident_GetExceptionInfo;
// __finally
IdentifierInfo *Ident__abnormal_termination,
*Ident___abnormal_termination,
*Ident_AbnormalTermination;
/// Contextual keywords for Microsoft extensions.
IdentifierInfo *Ident__except;
mutable IdentifierInfo *Ident_sealed;
mutable IdentifierInfo *Ident_abstract;
/// Ident_super - IdentifierInfo for "super", to support fast
/// comparison.
IdentifierInfo *Ident_super;
/// Ident_vector, Ident_bool, Ident_Bool - cached IdentifierInfos for "vector"
/// and "bool" fast comparison. Only present if AltiVec or ZVector are
/// enabled.
IdentifierInfo *Ident_vector;
IdentifierInfo *Ident_bool;
IdentifierInfo *Ident_Bool;
/// Ident_pixel - cached IdentifierInfos for "pixel" fast comparison.
/// Only present if AltiVec enabled.
IdentifierInfo *Ident_pixel;
/// Objective-C contextual keywords.
IdentifierInfo *Ident_instancetype;
/// Identifier for "introduced".
IdentifierInfo *Ident_introduced;
/// Identifier for "deprecated".
IdentifierInfo *Ident_deprecated;
/// Identifier for "obsoleted".
IdentifierInfo *Ident_obsoleted;
/// Identifier for "unavailable".
IdentifierInfo *Ident_unavailable;
/// Identifier for "message".
IdentifierInfo *Ident_message;
/// Identifier for "strict".
IdentifierInfo *Ident_strict;
/// Identifier for "replacement".
IdentifierInfo *Ident_replacement;
/// Identifiers used by the 'external_source_symbol' attribute.
IdentifierInfo *Ident_language, *Ident_defined_in,
*Ident_generated_declaration;
/// C++11 contextual keywords.
mutable IdentifierInfo *Ident_final;
mutable IdentifierInfo *Ident_GNU_final;
mutable IdentifierInfo *Ident_override;
// C++2a contextual keywords.
mutable IdentifierInfo *Ident_import;
mutable IdentifierInfo *Ident_module;
// C++ type trait keywords that can be reverted to identifiers and still be
// used as type traits.
llvm::SmallDenseMap<IdentifierInfo *, tok::TokenKind> RevertibleTypeTraits;
std::unique_ptr<PragmaHandler> AlignHandler;
std::unique_ptr<PragmaHandler> GCCVisibilityHandler;
std::unique_ptr<PragmaHandler> OptionsHandler;
std::unique_ptr<PragmaHandler> PackHandler;
std::unique_ptr<PragmaHandler> MSStructHandler;
std::unique_ptr<PragmaHandler> UnusedHandler;
std::unique_ptr<PragmaHandler> WeakHandler;
std::unique_ptr<PragmaHandler> RedefineExtnameHandler;
std::unique_ptr<PragmaHandler> FPContractHandler;
std::unique_ptr<PragmaHandler> OpenCLExtensionHandler;
std::unique_ptr<PragmaHandler> OpenMPHandler;
std::unique_ptr<PragmaHandler> PCSectionHandler;
std::unique_ptr<PragmaHandler> MSCommentHandler;
std::unique_ptr<PragmaHandler> MSDetectMismatchHandler;
std::unique_ptr<PragmaHandler> FloatControlHandler;
std::unique_ptr<PragmaHandler> MSPointersToMembers;
std::unique_ptr<PragmaHandler> MSVtorDisp;
std::unique_ptr<PragmaHandler> MSInitSeg;
std::unique_ptr<PragmaHandler> MSDataSeg;
std::unique_ptr<PragmaHandler> MSBSSSeg;
std::unique_ptr<PragmaHandler> MSConstSeg;
std::unique_ptr<PragmaHandler> MSCodeSeg;
std::unique_ptr<PragmaHandler> MSSection;
std::unique_ptr<PragmaHandler> MSRuntimeChecks;
std::unique_ptr<PragmaHandler> MSIntrinsic;
std::unique_ptr<PragmaHandler> MSOptimize;
std::unique_ptr<PragmaHandler> MSFenvAccess;
std::unique_ptr<PragmaHandler> CUDAForceHostDeviceHandler;
std::unique_ptr<PragmaHandler> OptimizeHandler;
std::unique_ptr<PragmaHandler> LoopHintHandler;
std::unique_ptr<PragmaHandler> UnrollHintHandler;
std::unique_ptr<PragmaHandler> NoUnrollHintHandler;
std::unique_ptr<PragmaHandler> UnrollAndJamHintHandler;
std::unique_ptr<PragmaHandler> NoUnrollAndJamHintHandler;
std::unique_ptr<PragmaHandler> FPHandler;
std::unique_ptr<PragmaHandler> STDCFenvAccessHandler;
std::unique_ptr<PragmaHandler> STDCFenvRoundHandler;
std::unique_ptr<PragmaHandler> STDCCXLIMITHandler;
std::unique_ptr<PragmaHandler> STDCUnknownHandler;
std::unique_ptr<PragmaHandler> AttributePragmaHandler;
std::unique_ptr<PragmaHandler> MaxTokensHerePragmaHandler;
std::unique_ptr<PragmaHandler> MaxTokensTotalPragmaHandler;
std::unique_ptr<CommentHandler> CommentSemaHandler;
/// Whether the '>' token acts as an operator or not. This will be
/// true except when we are parsing an expression within a C++
/// template argument list, where the '>' closes the template
/// argument list.
bool GreaterThanIsOperator;
/// ColonIsSacred - When this is false, we aggressively try to recover from
/// code like "foo : bar" as if it were a typo for "foo :: bar". This is not
/// safe in case statements and a few other things. This is managed by the
/// ColonProtectionRAIIObject RAII object.
bool ColonIsSacred;
/// Parsing OpenMP directive mode.
bool OpenMPDirectiveParsing = false;
/// When true, we are directly inside an Objective-C message
/// send expression.
///
/// This is managed by the \c InMessageExpressionRAIIObject class, and
/// should not be set directly.
bool InMessageExpression;
/// Gets set to true after calling ProduceSignatureHelp, it is for a
/// workaround to make sure ProduceSignatureHelp is only called at the deepest
/// function call.
bool CalledSignatureHelp = false;
/// The "depth" of the template parameters currently being parsed.
unsigned TemplateParameterDepth;
/// Current kind of OpenMP clause
OpenMPClauseKind OMPClauseKind = llvm::omp::OMPC_unknown;
/// RAII class that manages the template parameter depth.
class TemplateParameterDepthRAII {
unsigned &Depth;
unsigned AddedLevels;
public:
explicit TemplateParameterDepthRAII(unsigned &Depth)
: Depth(Depth), AddedLevels(0) {}
~TemplateParameterDepthRAII() {
Depth -= AddedLevels;
}
void operator++() {
++Depth;
++AddedLevels;
}
void addDepth(unsigned D) {
Depth += D;
AddedLevels += D;
}
void setAddedDepth(unsigned D) {
Depth = Depth - AddedLevels + D;
AddedLevels = D;
}
unsigned getDepth() const { return Depth; }
unsigned getOriginalDepth() const { return Depth - AddedLevels; }
};
/// Factory object for creating ParsedAttr objects.
AttributeFactory AttrFactory;
/// Gathers and cleans up TemplateIdAnnotations when parsing of a
/// top-level declaration is finished.
SmallVector<TemplateIdAnnotation *, 16> TemplateIds;
void MaybeDestroyTemplateIds() {
if (!TemplateIds.empty() &&
(Tok.is(tok::eof) || !PP.mightHavePendingAnnotationTokens()))
DestroyTemplateIds();
}
void DestroyTemplateIds();
/// RAII object to destroy TemplateIdAnnotations where possible, from a
/// likely-good position during parsing.
struct DestroyTemplateIdAnnotationsRAIIObj {
Parser &Self;
DestroyTemplateIdAnnotationsRAIIObj(Parser &Self) : Self(Self) {}
~DestroyTemplateIdAnnotationsRAIIObj() { Self.MaybeDestroyTemplateIds(); }
};
/// Identifiers which have been declared within a tentative parse.
SmallVector<IdentifierInfo *, 8> TentativelyDeclaredIdentifiers;
/// Tracker for '<' tokens that might have been intended to be treated as an
/// angle bracket instead of a less-than comparison.
///
/// This happens when the user intends to form a template-id, but typoes the
/// template-name or forgets a 'template' keyword for a dependent template
/// name.
///
/// We track these locations from the point where we see a '<' with a
/// name-like expression on its left until we see a '>' or '>>' that might
/// match it.
struct AngleBracketTracker {
/// Flags used to rank candidate template names when there is more than one
/// '<' in a scope.
enum Priority : unsigned short {
/// A non-dependent name that is a potential typo for a template name.
PotentialTypo = 0x0,
/// A dependent name that might instantiate to a template-name.
DependentName = 0x2,
/// A space appears before the '<' token.
SpaceBeforeLess = 0x0,
/// No space before the '<' token
NoSpaceBeforeLess = 0x1,
LLVM_MARK_AS_BITMASK_ENUM(/*LargestValue*/ DependentName)
};
struct Loc {
Expr *TemplateName;
SourceLocation LessLoc;
AngleBracketTracker::Priority Priority;
unsigned short ParenCount, BracketCount, BraceCount;
bool isActive(Parser &P) const {
return P.ParenCount == ParenCount && P.BracketCount == BracketCount &&
P.BraceCount == BraceCount;
}
bool isActiveOrNested(Parser &P) const {
return isActive(P) || P.ParenCount > ParenCount ||
P.BracketCount > BracketCount || P.BraceCount > BraceCount;
}
};
SmallVector<Loc, 8> Locs;
/// Add an expression that might have been intended to be a template name.
/// In the case of ambiguity, we arbitrarily select the innermost such
/// expression, for example in 'foo < bar < baz', 'bar' is the current
/// candidate. No attempt is made to track that 'foo' is also a candidate
/// for the case where we see a second suspicious '>' token.
void add(Parser &P, Expr *TemplateName, SourceLocation LessLoc,
Priority Prio) {
if (!Locs.empty() && Locs.back().isActive(P)) {
if (Locs.back().Priority <= Prio) {
Locs.back().TemplateName = TemplateName;
Locs.back().LessLoc = LessLoc;
Locs.back().Priority = Prio;
}
} else {
Locs.push_back({TemplateName, LessLoc, Prio,
P.ParenCount, P.BracketCount, P.BraceCount});
}
}
/// Mark the current potential missing template location as having been
/// handled (this happens if we pass a "corresponding" '>' or '>>' token
/// or leave a bracket scope).
void clear(Parser &P) {
while (!Locs.empty() && Locs.back().isActiveOrNested(P))
Locs.pop_back();
}
/// Get the current enclosing expression that might hve been intended to be
/// a template name.
Loc *getCurrent(Parser &P) {
if (!Locs.empty() && Locs.back().isActive(P))
return &Locs.back();
return nullptr;
}
};
AngleBracketTracker AngleBrackets;
IdentifierInfo *getSEHExceptKeyword();
/// True if we are within an Objective-C container while parsing C-like decls.
///
/// This is necessary because Sema thinks we have left the container
/// to parse the C-like decls, meaning Actions.getObjCDeclContext() will
/// be NULL.
bool ParsingInObjCContainer;
/// Whether to skip parsing of function bodies.
///
/// This option can be used, for example, to speed up searches for
/// declarations/definitions when indexing.
bool SkipFunctionBodies;
/// The location of the expression statement that is being parsed right now.
/// Used to determine if an expression that is being parsed is a statement or
/// just a regular sub-expression.
SourceLocation ExprStatementTokLoc;
/// Flags describing a context in which we're parsing a statement.
enum class ParsedStmtContext {
/// This context permits declarations in language modes where declarations
/// are not statements.
AllowDeclarationsInC = 0x1,
/// This context permits standalone OpenMP directives.
AllowStandaloneOpenMPDirectives = 0x2,
/// This context is at the top level of a GNU statement expression.
InStmtExpr = 0x4,
/// The context of a regular substatement.
SubStmt = 0,
/// The context of a compound-statement.
Compound = AllowDeclarationsInC | AllowStandaloneOpenMPDirectives,
LLVM_MARK_AS_BITMASK_ENUM(InStmtExpr)
};
/// Act on an expression statement that might be the last statement in a
/// GNU statement expression. Checks whether we are actually at the end of
/// a statement expression and builds a suitable expression statement.
StmtResult handleExprStmt(ExprResult E, ParsedStmtContext StmtCtx);
public:
Parser(Preprocessor &PP, Sema &Actions, bool SkipFunctionBodies);
~Parser() override;
const LangOptions &getLangOpts() const { return PP.getLangOpts(); }
const TargetInfo &getTargetInfo() const { return PP.getTargetInfo(); }
Preprocessor &getPreprocessor() const { return PP; }
Sema &getActions() const { return Actions; }
AttributeFactory &getAttrFactory() { return AttrFactory; }
const Token &getCurToken() const { return Tok; }
Scope *getCurScope() const { return Actions.getCurScope(); }
void incrementMSManglingNumber() const {
return Actions.incrementMSManglingNumber();
}
Decl *getObjCDeclContext() const { return Actions.getObjCDeclContext(); }
// Type forwarding. All of these are statically 'void*', but they may all be
// different actual classes based on the actions in place.
typedef OpaquePtr<DeclGroupRef> DeclGroupPtrTy;
typedef OpaquePtr<TemplateName> TemplateTy;
typedef SmallVector<TemplateParameterList *, 4> TemplateParameterLists;
typedef Sema::FullExprArg FullExprArg;
// Parsing methods.
/// Initialize - Warm up the parser.
///
void Initialize();
/// Parse the first top-level declaration in a translation unit.
bool ParseFirstTopLevelDecl(DeclGroupPtrTy &Result);
/// ParseTopLevelDecl - Parse one top-level declaration. Returns true if
/// the EOF was encountered.
bool ParseTopLevelDecl(DeclGroupPtrTy &Result, bool IsFirstDecl = false);
bool ParseTopLevelDecl() {
DeclGroupPtrTy Result;
return ParseTopLevelDecl(Result);
}
/// ConsumeToken - Consume the current 'peek token' and lex the next one.
/// This does not work with special tokens: string literals, code completion,
/// annotation tokens and balanced tokens must be handled using the specific
/// consume methods.
/// Returns the location of the consumed token.
SourceLocation ConsumeToken() {
assert(!isTokenSpecial() &&
"Should consume special tokens with Consume*Token");
PrevTokLocation = Tok.getLocation();
PP.Lex(Tok);
return PrevTokLocation;
}
bool TryConsumeToken(tok::TokenKind Expected) {
if (Tok.isNot(Expected))
return false;
assert(!isTokenSpecial() &&
"Should consume special tokens with Consume*Token");
PrevTokLocation = Tok.getLocation();
PP.Lex(Tok);
return true;
}
bool TryConsumeToken(tok::TokenKind Expected, SourceLocation &Loc) {
if (!TryConsumeToken(Expected))
return false;
Loc = PrevTokLocation;
return true;
}
/// ConsumeAnyToken - Dispatch to the right Consume* method based on the
/// current token type. This should only be used in cases where the type of
/// the token really isn't known, e.g. in error recovery.
SourceLocation ConsumeAnyToken(bool ConsumeCodeCompletionTok = false) {
if (isTokenParen())
return ConsumeParen();
if (isTokenBracket())
return ConsumeBracket();
if (isTokenBrace())
return ConsumeBrace();
if (isTokenStringLiteral())
return ConsumeStringToken();
if (Tok.is(tok::code_completion))
return ConsumeCodeCompletionTok ? ConsumeCodeCompletionToken()
: handleUnexpectedCodeCompletionToken();
if (Tok.isAnnotation())
return ConsumeAnnotationToken();
return ConsumeToken();
}
SourceLocation getEndOfPreviousToken() {
return PP.getLocForEndOfToken(PrevTokLocation);
}
/// Retrieve the underscored keyword (_Nonnull, _Nullable) that corresponds
/// to the given nullability kind.
IdentifierInfo *getNullabilityKeyword(NullabilityKind nullability) {
return Actions.getNullabilityKeyword(nullability);
}
private:
//===--------------------------------------------------------------------===//
// Low-Level token peeking and consumption methods.
//
/// isTokenParen - Return true if the cur token is '(' or ')'.
bool isTokenParen() const {
return Tok.isOneOf(tok::l_paren, tok::r_paren);
}
/// isTokenBracket - Return true if the cur token is '[' or ']'.
bool isTokenBracket() const {
return Tok.isOneOf(tok::l_square, tok::r_square);
}
/// isTokenBrace - Return true if the cur token is '{' or '}'.
bool isTokenBrace() const {
return Tok.isOneOf(tok::l_brace, tok::r_brace);
}
/// isTokenStringLiteral - True if this token is a string-literal.
bool isTokenStringLiteral() const {
return tok::isStringLiteral(Tok.getKind());
}
/// isTokenSpecial - True if this token requires special consumption methods.
bool isTokenSpecial() const {
return isTokenStringLiteral() || isTokenParen() || isTokenBracket() ||
isTokenBrace() || Tok.is(tok::code_completion) || Tok.isAnnotation();
}
/// Returns true if the current token is '=' or is a type of '='.
/// For typos, give a fixit to '='
bool isTokenEqualOrEqualTypo();
/// Return the current token to the token stream and make the given
/// token the current token.
void UnconsumeToken(Token &Consumed) {
Token Next = Tok;
PP.EnterToken(Consumed, /*IsReinject*/true);
PP.Lex(Tok);
PP.EnterToken(Next, /*IsReinject*/true);
}
SourceLocation ConsumeAnnotationToken() {
assert(Tok.isAnnotation() && "wrong consume method");
SourceLocation Loc = Tok.getLocation();
PrevTokLocation = Tok.getAnnotationEndLoc();
PP.Lex(Tok);
return Loc;
}
/// ConsumeParen - This consume method keeps the paren count up-to-date.
///
SourceLocation ConsumeParen() {
assert(isTokenParen() && "wrong consume method");
if (Tok.getKind() == tok::l_paren)
++ParenCount;
else if (ParenCount) {
AngleBrackets.clear(*this);
--ParenCount; // Don't let unbalanced )'s drive the count negative.
}
PrevTokLocation = Tok.getLocation();
PP.Lex(Tok);
return PrevTokLocation;
}
/// ConsumeBracket - This consume method keeps the bracket count up-to-date.
///
SourceLocation ConsumeBracket() {
assert(isTokenBracket() && "wrong consume method");
if (Tok.getKind() == tok::l_square)
++BracketCount;
else if (BracketCount) {
AngleBrackets.clear(*this);
--BracketCount; // Don't let unbalanced ]'s drive the count negative.
}
PrevTokLocation = Tok.getLocation();
PP.Lex(Tok);
return PrevTokLocation;
}
/// ConsumeBrace - This consume method keeps the brace count up-to-date.
///
SourceLocation ConsumeBrace() {
assert(isTokenBrace() && "wrong consume method");
if (Tok.getKind() == tok::l_brace)
++BraceCount;
else if (BraceCount) {
AngleBrackets.clear(*this);
--BraceCount; // Don't let unbalanced }'s drive the count negative.
}
PrevTokLocation = Tok.getLocation();
PP.Lex(Tok);
return PrevTokLocation;
}
/// ConsumeStringToken - Consume the current 'peek token', lexing a new one
/// and returning the token kind. This method is specific to strings, as it
/// handles string literal concatenation, as per C99 5.1.1.2, translation
/// phase #6.
SourceLocation ConsumeStringToken() {
assert(isTokenStringLiteral() &&
"Should only consume string literals with this method");
PrevTokLocation = Tok.getLocation();
PP.Lex(Tok);
return PrevTokLocation;
}
/// Consume the current code-completion token.
///
/// This routine can be called to consume the code-completion token and
/// continue processing in special cases where \c cutOffParsing() isn't
/// desired, such as token caching or completion with lookahead.
SourceLocation ConsumeCodeCompletionToken() {
assert(Tok.is(tok::code_completion));
PrevTokLocation = Tok.getLocation();
PP.Lex(Tok);
return PrevTokLocation;
}
///\ brief When we are consuming a code-completion token without having
/// matched specific position in the grammar, provide code-completion results
/// based on context.
///
/// \returns the source location of the code-completion token.
SourceLocation handleUnexpectedCodeCompletionToken();
/// Abruptly cut off parsing; mainly used when we have reached the
/// code-completion point.
void cutOffParsing() {
if (PP.isCodeCompletionEnabled())
PP.setCodeCompletionReached();
// Cut off parsing by acting as if we reached the end-of-file.
Tok.setKind(tok::eof);
}
/// Determine if we're at the end of the file or at a transition
/// between modules.
bool isEofOrEom() {
tok::TokenKind Kind = Tok.getKind();
return Kind == tok::eof || Kind == tok::annot_module_begin ||
Kind == tok::annot_module_end || Kind == tok::annot_module_include;
}
/// Checks if the \p Level is valid for use in a fold expression.
bool isFoldOperator(prec::Level Level) const;
/// Checks if the \p Kind is a valid operator for fold expressions.
bool isFoldOperator(tok::TokenKind Kind) const;
/// Initialize all pragma handlers.
void initializePragmaHandlers();
/// Destroy and reset all pragma handlers.
void resetPragmaHandlers();
/// Handle the annotation token produced for #pragma unused(...)
void HandlePragmaUnused();
/// Handle the annotation token produced for
/// #pragma GCC visibility...
void HandlePragmaVisibility();
/// Handle the annotation token produced for
/// #pragma pack...
void HandlePragmaPack();
/// Handle the annotation token produced for
/// #pragma ms_struct...
void HandlePragmaMSStruct();
void HandlePragmaMSPointersToMembers();
void HandlePragmaMSVtorDisp();
void HandlePragmaMSPragma();
bool HandlePragmaMSSection(StringRef PragmaName,
SourceLocation PragmaLocation);
bool HandlePragmaMSSegment(StringRef PragmaName,
SourceLocation PragmaLocation);
bool HandlePragmaMSInitSeg(StringRef PragmaName,
SourceLocation PragmaLocation);
/// Handle the annotation token produced for
/// #pragma align...
void HandlePragmaAlign();
/// Handle the annotation token produced for
/// #pragma clang __debug dump...
void HandlePragmaDump();
/// Handle the annotation token produced for
/// #pragma weak id...
void HandlePragmaWeak();
/// Handle the annotation token produced for
/// #pragma weak id = id...
void HandlePragmaWeakAlias();
/// Handle the annotation token produced for
/// #pragma redefine_extname...
void HandlePragmaRedefineExtname();
/// Handle the annotation token produced for
/// #pragma STDC FP_CONTRACT...
void HandlePragmaFPContract();
/// Handle the annotation token produced for
/// #pragma STDC FENV_ACCESS...
void HandlePragmaFEnvAccess();
/// Handle the annotation token produced for
/// #pragma STDC FENV_ROUND...
void HandlePragmaFEnvRound();
/// Handle the annotation token produced for
/// #pragma float_control
void HandlePragmaFloatControl();
/// \brief Handle the annotation token produced for
/// #pragma clang fp ...
void HandlePragmaFP();
/// Handle the annotation token produced for
/// #pragma OPENCL EXTENSION...
void HandlePragmaOpenCLExtension();
/// Handle the annotation token produced for
/// #pragma clang __debug captured
StmtResult HandlePragmaCaptured();
/// Handle the annotation token produced for
/// #pragma clang loop and #pragma unroll.
bool HandlePragmaLoopHint(LoopHint &Hint);
bool ParsePragmaAttributeSubjectMatchRuleSet(
attr::ParsedSubjectMatchRuleSet &SubjectMatchRules,
SourceLocation &AnyLoc, SourceLocation &LastMatchRuleEndLoc);
void HandlePragmaAttribute();
/// GetLookAheadToken - This peeks ahead N tokens and returns that token
/// without consuming any tokens. LookAhead(0) returns 'Tok', LookAhead(1)
/// returns the token after Tok, etc.
///
/// Note that this differs from the Preprocessor's LookAhead method, because
/// the Parser always has one token lexed that the preprocessor doesn't.
///
const Token &GetLookAheadToken(unsigned N) {
if (N == 0 || Tok.is(tok::eof)) return Tok;
return PP.LookAhead(N-1);
}
public:
/// NextToken - This peeks ahead one token and returns it without
/// consuming it.
const Token &NextToken() {
return PP.LookAhead(0);
}
/// getTypeAnnotation - Read a parsed type out of an annotation token.
static TypeResult getTypeAnnotation(const Token &Tok) {
if (!Tok.getAnnotationValue())
return TypeError();
return ParsedType::getFromOpaquePtr(Tok.getAnnotationValue());
}
private:
static void setTypeAnnotation(Token &Tok, TypeResult T) {
assert((T.isInvalid() || T.get()) &&
"produced a valid-but-null type annotation?");
Tok.setAnnotationValue(T.isInvalid() ? nullptr : T.get().getAsOpaquePtr());
}
static NamedDecl *getNonTypeAnnotation(const Token &Tok) {
return static_cast<NamedDecl*>(Tok.getAnnotationValue());
}
static void setNonTypeAnnotation(Token &Tok, NamedDecl *ND) {
Tok.setAnnotationValue(ND);
}
static IdentifierInfo *getIdentifierAnnotation(const Token &Tok) {
return static_cast<IdentifierInfo*>(Tok.getAnnotationValue());
}
static void setIdentifierAnnotation(Token &Tok, IdentifierInfo *ND) {
Tok.setAnnotationValue(ND);
}
/// Read an already-translated primary expression out of an annotation
/// token.
static ExprResult getExprAnnotation(const Token &Tok) {
return ExprResult::getFromOpaquePointer(Tok.getAnnotationValue());
}
/// Set the primary expression corresponding to the given annotation
/// token.
static void setExprAnnotation(Token &Tok, ExprResult ER) {
Tok.setAnnotationValue(ER.getAsOpaquePointer());
}
public:
// If NeedType is true, then TryAnnotateTypeOrScopeToken will try harder to
// find a type name by attempting typo correction.
bool TryAnnotateTypeOrScopeToken();
bool TryAnnotateTypeOrScopeTokenAfterScopeSpec(CXXScopeSpec &SS,
bool IsNewScope);
bool TryAnnotateCXXScopeToken(bool EnteringContext = false);
bool MightBeCXXScopeToken() {
return Tok.is(tok::identifier) || Tok.is(tok::coloncolon) ||
(Tok.is(tok::annot_template_id) &&
NextToken().is(tok::coloncolon)) ||
Tok.is(tok::kw_decltype) || Tok.is(tok::kw___super);
}
bool TryAnnotateOptionalCXXScopeToken(bool EnteringContext = false) {
return MightBeCXXScopeToken() && TryAnnotateCXXScopeToken(EnteringContext);
}
private:
enum AnnotatedNameKind {
/// Annotation has failed and emitted an error.
ANK_Error,
/// The identifier is a tentatively-declared name.
ANK_TentativeDecl,
/// The identifier is a template name. FIXME: Add an annotation for that.
ANK_TemplateName,
/// The identifier can't be resolved.
ANK_Unresolved,
/// Annotation was successful.
ANK_Success
};
AnnotatedNameKind TryAnnotateName(CorrectionCandidateCallback *CCC = nullptr);
/// Push a tok::annot_cxxscope token onto the token stream.
void AnnotateScopeToken(CXXScopeSpec &SS, bool IsNewAnnotation);
/// TryAltiVecToken - Check for context-sensitive AltiVec identifier tokens,
/// replacing them with the non-context-sensitive keywords. This returns
/// true if the token was replaced.
bool TryAltiVecToken(DeclSpec &DS, SourceLocation Loc,
const char *&PrevSpec, unsigned &DiagID,
bool &isInvalid) {
if (!getLangOpts().AltiVec && !getLangOpts().ZVector)
return false;
if (Tok.getIdentifierInfo() != Ident_vector &&
Tok.getIdentifierInfo() != Ident_bool &&
Tok.getIdentifierInfo() != Ident_Bool &&
(!getLangOpts().AltiVec || Tok.getIdentifierInfo() != Ident_pixel))
return false;
return TryAltiVecTokenOutOfLine(DS, Loc, PrevSpec, DiagID, isInvalid);
}
/// TryAltiVecVectorToken - Check for context-sensitive AltiVec vector
/// identifier token, replacing it with the non-context-sensitive __vector.
/// This returns true if the token was replaced.
bool TryAltiVecVectorToken() {
if ((!getLangOpts().AltiVec && !getLangOpts().ZVector) ||
Tok.getIdentifierInfo() != Ident_vector) return false;
return TryAltiVecVectorTokenOutOfLine();
}
bool TryAltiVecVectorTokenOutOfLine();
bool TryAltiVecTokenOutOfLine(DeclSpec &DS, SourceLocation Loc,
const char *&PrevSpec, unsigned &DiagID,
bool &isInvalid);
/// Returns true if the current token is the identifier 'instancetype'.
///
/// Should only be used in Objective-C language modes.
bool isObjCInstancetype() {
assert(getLangOpts().ObjC);
if (Tok.isAnnotation())
return false;
if (!Ident_instancetype)
Ident_instancetype = PP.getIdentifierInfo("instancetype");
return Tok.getIdentifierInfo() == Ident_instancetype;
}
/// TryKeywordIdentFallback - For compatibility with system headers using
/// keywords as identifiers, attempt to convert the current token to an
/// identifier and optionally disable the keyword for the remainder of the
/// translation unit. This returns false if the token was not replaced,
/// otherwise emits a diagnostic and returns true.
bool TryKeywordIdentFallback(bool DisableKeyword);
/// Get the TemplateIdAnnotation from the token.
TemplateIdAnnotation *takeTemplateIdAnnotation(const Token &tok);
/// TentativeParsingAction - An object that is used as a kind of "tentative
/// parsing transaction". It gets instantiated to mark the token position and
/// after the token consumption is done, Commit() or Revert() is called to
/// either "commit the consumed tokens" or revert to the previously marked
/// token position. Example:
///
/// TentativeParsingAction TPA(*this);
/// ConsumeToken();
/// ....
/// TPA.Revert();
///
class TentativeParsingAction {
Parser &P;
PreferredTypeBuilder PrevPreferredType;
Token PrevTok;
size_t PrevTentativelyDeclaredIdentifierCount;
unsigned short PrevParenCount, PrevBracketCount, PrevBraceCount;
bool isActive;
public:
explicit TentativeParsingAction(Parser &p)
: P(p), PrevPreferredType(P.PreferredType) {
PrevTok = P.Tok;
PrevTentativelyDeclaredIdentifierCount =
P.TentativelyDeclaredIdentifiers.size();
PrevParenCount = P.ParenCount;
PrevBracketCount = P.BracketCount;
PrevBraceCount = P.BraceCount;
P.PP.EnableBacktrackAtThisPos();
isActive = true;
}
void Commit() {
assert(isActive && "Parsing action was finished!");
P.TentativelyDeclaredIdentifiers.resize(
PrevTentativelyDeclaredIdentifierCount);
P.PP.CommitBacktrackedTokens();
isActive = false;
}
void Revert() {
assert(isActive && "Parsing action was finished!");
P.PP.Backtrack();
P.PreferredType = PrevPreferredType;
P.Tok = PrevTok;
P.TentativelyDeclaredIdentifiers.resize(
PrevTentativelyDeclaredIdentifierCount);
P.ParenCount = PrevParenCount;
P.BracketCount = PrevBracketCount;
P.BraceCount = PrevBraceCount;
isActive = false;
}
~TentativeParsingAction() {
assert(!isActive && "Forgot to call Commit or Revert!");
}
};
/// A TentativeParsingAction that automatically reverts in its destructor.
/// Useful for disambiguation parses that will always be reverted.
class RevertingTentativeParsingAction
: private Parser::TentativeParsingAction {
public:
RevertingTentativeParsingAction(Parser &P)
: Parser::TentativeParsingAction(P) {}
~RevertingTentativeParsingAction() { Revert(); }
};
class UnannotatedTentativeParsingAction;
/// ObjCDeclContextSwitch - An object used to switch context from
/// an objective-c decl context to its enclosing decl context and
/// back.
class ObjCDeclContextSwitch {
Parser &P;
Decl *DC;
SaveAndRestore<bool> WithinObjCContainer;
public:
explicit ObjCDeclContextSwitch(Parser &p)
: P(p), DC(p.getObjCDeclContext()),
WithinObjCContainer(P.ParsingInObjCContainer, DC != nullptr) {
if (DC)
P.Actions.ActOnObjCTemporaryExitContainerContext(cast<DeclContext>(DC));
}
~ObjCDeclContextSwitch() {
if (DC)
P.Actions.ActOnObjCReenterContainerContext(cast<DeclContext>(DC));
}
};
/// ExpectAndConsume - The parser expects that 'ExpectedTok' is next in the
/// input. If so, it is consumed and false is returned.
///
/// If a trivial punctuator misspelling is encountered, a FixIt error
/// diagnostic is issued and false is returned after recovery.
///
/// If the input is malformed, this emits the specified diagnostic and true is
/// returned.
bool ExpectAndConsume(tok::TokenKind ExpectedTok,
unsigned Diag = diag::err_expected,
StringRef DiagMsg = "");
/// The parser expects a semicolon and, if present, will consume it.
///
/// If the next token is not a semicolon, this emits the specified diagnostic,
/// or, if there's just some closing-delimiter noise (e.g., ')' or ']') prior
/// to the semicolon, consumes that extra token.
bool ExpectAndConsumeSemi(unsigned DiagID);
/// The kind of extra semi diagnostic to emit.
enum ExtraSemiKind {
OutsideFunction = 0,
InsideStruct = 1,
InstanceVariableList = 2,
AfterMemberFunctionDefinition = 3
};
/// Consume any extra semi-colons until the end of the line.
void ConsumeExtraSemi(ExtraSemiKind Kind, DeclSpec::TST T = TST_unspecified);
/// Return false if the next token is an identifier. An 'expected identifier'
/// error is emitted otherwise.
///
/// The parser tries to recover from the error by checking if the next token
/// is a C++ keyword when parsing Objective-C++. Return false if the recovery
/// was successful.
bool expectIdentifier();
/// Kinds of compound pseudo-tokens formed by a sequence of two real tokens.
enum class CompoundToken {
/// A '(' '{' beginning a statement-expression.
StmtExprBegin,
/// A '}' ')' ending a statement-expression.
StmtExprEnd,
/// A '[' '[' beginning a C++11 or C2x attribute.
AttrBegin,
/// A ']' ']' ending a C++11 or C2x attribute.
AttrEnd,
/// A '::' '*' forming a C++ pointer-to-member declaration.
MemberPtr,
};
/// Check that a compound operator was written in a "sensible" way, and warn
/// if not.
void checkCompoundToken(SourceLocation FirstTokLoc,
tok::TokenKind FirstTokKind, CompoundToken Op);
public:
//===--------------------------------------------------------------------===//
// Scope manipulation
/// ParseScope - Introduces a new scope for parsing. The kind of
/// scope is determined by ScopeFlags. Objects of this type should
/// be created on the stack to coincide with the position where the
/// parser enters the new scope, and this object's constructor will
/// create that new scope. Similarly, once the object is destroyed
/// the parser will exit the scope.
class ParseScope {
Parser *Self;
ParseScope(const ParseScope &) = delete;
void operator=(const ParseScope &) = delete;
public:
// ParseScope - Construct a new object to manage a scope in the
// parser Self where the new Scope is created with the flags
// ScopeFlags, but only when we aren't about to enter a compound statement.
ParseScope(Parser *Self, unsigned ScopeFlags, bool EnteredScope = true,
bool BeforeCompoundStmt = false)
: Self(Self) {
if (EnteredScope && !BeforeCompoundStmt)
Self->EnterScope(ScopeFlags);
else {
if (BeforeCompoundStmt)
Self->incrementMSManglingNumber();
this->Self = nullptr;
}
}
// Exit - Exit the scope associated with this object now, rather
// than waiting until the object is destroyed.
void Exit() {
if (Self) {
Self->ExitScope();
Self = nullptr;
}
}
~ParseScope() {
Exit();
}
};
/// Introduces zero or more scopes for parsing. The scopes will all be exited
/// when the object is destroyed.
class MultiParseScope {
Parser &Self;
unsigned NumScopes = 0;
MultiParseScope(const MultiParseScope&) = delete;
public:
MultiParseScope(Parser &Self) : Self(Self) {}
void Enter(unsigned ScopeFlags) {
Self.EnterScope(ScopeFlags);
++NumScopes;
}
void Exit() {
while (NumScopes) {
Self.ExitScope();
--NumScopes;
}
}
~MultiParseScope() {
Exit();
}
};
/// EnterScope - Start a new scope.
void EnterScope(unsigned ScopeFlags);
/// ExitScope - Pop a scope off the scope stack.
void ExitScope();
/// Re-enter the template scopes for a declaration that might be a template.
unsigned ReenterTemplateScopes(MultiParseScope &S, Decl *D);
private:
/// RAII object used to modify the scope flags for the current scope.
class ParseScopeFlags {
Scope *CurScope;
unsigned OldFlags;
ParseScopeFlags(const ParseScopeFlags &) = delete;
void operator=(const ParseScopeFlags &) = delete;
public:
ParseScopeFlags(Parser *Self, unsigned ScopeFlags, bool ManageFlags = true);
~ParseScopeFlags();
};
//===--------------------------------------------------------------------===//
// Diagnostic Emission and Error recovery.
public:
DiagnosticBuilder Diag(SourceLocation Loc, unsigned DiagID);
DiagnosticBuilder Diag(const Token &Tok, unsigned DiagID);
DiagnosticBuilder Diag(unsigned DiagID) {
return Diag(Tok, DiagID);
}
private:
void SuggestParentheses(SourceLocation Loc, unsigned DK,
SourceRange ParenRange);
void CheckNestedObjCContexts(SourceLocation AtLoc);
public:
/// Control flags for SkipUntil functions.
enum SkipUntilFlags {
StopAtSemi = 1 << 0, ///< Stop skipping at semicolon
/// Stop skipping at specified token, but don't skip the token itself
StopBeforeMatch = 1 << 1,
StopAtCodeCompletion = 1 << 2 ///< Stop at code completion
};
friend constexpr SkipUntilFlags operator|(SkipUntilFlags L,
SkipUntilFlags R) {
return static_cast<SkipUntilFlags>(static_cast<unsigned>(L) |
static_cast<unsigned>(R));
}
/// SkipUntil - Read tokens until we get to the specified token, then consume
/// it (unless StopBeforeMatch is specified). Because we cannot guarantee
/// that the token will ever occur, this skips to the next token, or to some
/// likely good stopping point. If Flags has StopAtSemi flag, skipping will
/// stop at a ';' character. Balances (), [], and {} delimiter tokens while
/// skipping.
///
/// If SkipUntil finds the specified token, it returns true, otherwise it
/// returns false.
bool SkipUntil(tok::TokenKind T,
SkipUntilFlags Flags = static_cast<SkipUntilFlags>(0)) {
return SkipUntil(llvm::makeArrayRef(T), Flags);
}
bool SkipUntil(tok::TokenKind T1, tok::TokenKind T2,
SkipUntilFlags Flags = static_cast<SkipUntilFlags>(0)) {
tok::TokenKind TokArray[] = {T1, T2};
return SkipUntil(TokArray, Flags);
}
bool SkipUntil(tok::TokenKind T1, tok::TokenKind T2, tok::TokenKind T3,
SkipUntilFlags Flags = static_cast<SkipUntilFlags>(0)) {
tok::TokenKind TokArray[] = {T1, T2, T3};
return SkipUntil(TokArray, Flags);
}
bool SkipUntil(ArrayRef<tok::TokenKind> Toks,
SkipUntilFlags Flags = static_cast<SkipUntilFlags>(0));
/// SkipMalformedDecl - Read tokens until we get to some likely good stopping
/// point for skipping past a simple-declaration.
void SkipMalformedDecl();
/// The location of the first statement inside an else that might
/// have a missleading indentation. If there is no
/// MisleadingIndentationChecker on an else active, this location is invalid.
SourceLocation MisleadingIndentationElseLoc;
private:
//===--------------------------------------------------------------------===//
// Lexing and parsing of C++ inline methods.
struct ParsingClass;
/// [class.mem]p1: "... the class is regarded as complete within
/// - function bodies
/// - default arguments
/// - exception-specifications (TODO: C++0x)
/// - and brace-or-equal-initializers for non-static data members
/// (including such things in nested classes)."
/// LateParsedDeclarations build the tree of those elements so they can
/// be parsed after parsing the top-level class.
class LateParsedDeclaration {
public:
virtual ~LateParsedDeclaration();
virtual void ParseLexedMethodDeclarations();
virtual void ParseLexedMemberInitializers();
virtual void ParseLexedMethodDefs();
virtual void ParseLexedAttributes();
virtual void ParseLexedPragmas();
};
/// Inner node of the LateParsedDeclaration tree that parses
/// all its members recursively.
class LateParsedClass : public LateParsedDeclaration {
public:
LateParsedClass(Parser *P, ParsingClass *C);
~LateParsedClass() override;
void ParseLexedMethodDeclarations() override;
void ParseLexedMemberInitializers() override;
void ParseLexedMethodDefs() override;
void ParseLexedAttributes() override;
void ParseLexedPragmas() override;
private:
Parser *Self;
ParsingClass *Class;
};
/// Contains the lexed tokens of an attribute with arguments that
/// may reference member variables and so need to be parsed at the
/// end of the class declaration after parsing all other member
/// member declarations.
/// FIXME: Perhaps we should change the name of LateParsedDeclaration to
/// LateParsedTokens.
struct LateParsedAttribute : public LateParsedDeclaration {
Parser *Self;
CachedTokens Toks;
IdentifierInfo &AttrName;
IdentifierInfo *MacroII = nullptr;
SourceLocation AttrNameLoc;
SmallVector<Decl*, 2> Decls;
explicit LateParsedAttribute(Parser *P, IdentifierInfo &Name,
SourceLocation Loc)
: Self(P), AttrName(Name), AttrNameLoc(Loc) {}
void ParseLexedAttributes() override;
void addDecl(Decl *D) { Decls.push_back(D); }
};
/// Contains the lexed tokens of a pragma with arguments that
/// may reference member variables and so need to be parsed at the
/// end of the class declaration after parsing all other member
/// member declarations.
class LateParsedPragma : public LateParsedDeclaration {
Parser *Self = nullptr;
AccessSpecifier AS = AS_none;
CachedTokens Toks;
public:
explicit LateParsedPragma(Parser *P, AccessSpecifier AS)
: Self(P), AS(AS) {}
void takeToks(CachedTokens &Cached) { Toks.swap(Cached); }
const CachedTokens &toks() const { return Toks; }
AccessSpecifier getAccessSpecifier() const { return AS; }
void ParseLexedPragmas() override;
};
// A list of late-parsed attributes. Used by ParseGNUAttributes.
class LateParsedAttrList: public SmallVector<LateParsedAttribute *, 2> {
public:
LateParsedAttrList(bool PSoon = false) : ParseSoon(PSoon) { }
bool parseSoon() { return ParseSoon; }
private:
bool ParseSoon; // Are we planning to parse these shortly after creation?
};
/// Contains the lexed tokens of a member function definition
/// which needs to be parsed at the end of the class declaration
/// after parsing all other member declarations.
struct LexedMethod : public LateParsedDeclaration {
Parser *Self;
Decl *D;
CachedTokens Toks;
explicit LexedMethod(Parser *P, Decl *MD) : Self(P), D(MD) {}
void ParseLexedMethodDefs() override;
};
/// LateParsedDefaultArgument - Keeps track of a parameter that may
/// have a default argument that cannot be parsed yet because it
/// occurs within a member function declaration inside the class
/// (C++ [class.mem]p2).
struct LateParsedDefaultArgument {
explicit LateParsedDefaultArgument(Decl *P,
std::unique_ptr<CachedTokens> Toks = nullptr)
: Param(P), Toks(std::move(Toks)) { }
/// Param - The parameter declaration for this parameter.
Decl *Param;
/// Toks - The sequence of tokens that comprises the default
/// argument expression, not including the '=' or the terminating
/// ')' or ','. This will be NULL for parameters that have no
/// default argument.
std::unique_ptr<CachedTokens> Toks;
};
/// LateParsedMethodDeclaration - A method declaration inside a class that
/// contains at least one entity whose parsing needs to be delayed
/// until the class itself is completely-defined, such as a default
/// argument (C++ [class.mem]p2).
struct LateParsedMethodDeclaration : public LateParsedDeclaration {
explicit LateParsedMethodDeclaration(Parser *P, Decl *M)
: Self(P), Method(M), ExceptionSpecTokens(nullptr) {}
void ParseLexedMethodDeclarations() override;
Parser *Self;
/// Method - The method declaration.
Decl *Method;
/// DefaultArgs - Contains the parameters of the function and
/// their default arguments. At least one of the parameters will
/// have a default argument, but all of the parameters of the
/// method will be stored so that they can be reintroduced into
/// scope at the appropriate times.
SmallVector<LateParsedDefaultArgument, 8> DefaultArgs;
/// The set of tokens that make up an exception-specification that
/// has not yet been parsed.
CachedTokens *ExceptionSpecTokens;
};
/// LateParsedMemberInitializer - An initializer for a non-static class data
/// member whose parsing must to be delayed until the class is completely
/// defined (C++11 [class.mem]p2).
struct LateParsedMemberInitializer : public LateParsedDeclaration {
LateParsedMemberInitializer(Parser *P, Decl *FD)
: Self(P), Field(FD) { }
void ParseLexedMemberInitializers() override;
Parser *Self;
/// Field - The field declaration.
Decl *Field;
/// CachedTokens - The sequence of tokens that comprises the initializer,
/// including any leading '='.
CachedTokens Toks;
};
/// LateParsedDeclarationsContainer - During parsing of a top (non-nested)
/// C++ class, its method declarations that contain parts that won't be
/// parsed until after the definition is completed (C++ [class.mem]p2),
/// the method declarations and possibly attached inline definitions
/// will be stored here with the tokens that will be parsed to create those
/// entities.
typedef SmallVector<LateParsedDeclaration*,2> LateParsedDeclarationsContainer;
/// Representation of a class that has been parsed, including
/// any member function declarations or definitions that need to be
/// parsed after the corresponding top-level class is complete.
struct ParsingClass {
ParsingClass(Decl *TagOrTemplate, bool TopLevelClass, bool IsInterface)
: TopLevelClass(TopLevelClass), IsInterface(IsInterface),
TagOrTemplate(TagOrTemplate) {}
/// Whether this is a "top-level" class, meaning that it is
/// not nested within another class.
bool TopLevelClass : 1;
/// Whether this class is an __interface.
bool IsInterface : 1;
/// The class or class template whose definition we are parsing.
Decl *TagOrTemplate;
/// LateParsedDeclarations - Method declarations, inline definitions and
/// nested classes that contain pieces whose parsing will be delayed until
/// the top-level class is fully defined.
LateParsedDeclarationsContainer LateParsedDeclarations;
};
/// The stack of classes that is currently being
/// parsed. Nested and local classes will be pushed onto this stack
/// when they are parsed, and removed afterward.
std::stack<ParsingClass *> ClassStack;
ParsingClass &getCurrentClass() {
assert(!ClassStack.empty() && "No lexed method stacks!");
return *ClassStack.top();
}
/// RAII object used to manage the parsing of a class definition.
class ParsingClassDefinition {
Parser &P;
bool Popped;
Sema::ParsingClassState State;
public:
ParsingClassDefinition(Parser &P, Decl *TagOrTemplate, bool TopLevelClass,
bool IsInterface)
: P(P), Popped(false),
State(P.PushParsingClass(TagOrTemplate, TopLevelClass, IsInterface)) {
}
/// Pop this class of the stack.
void Pop() {
assert(!Popped && "Nested class has already been popped");
Popped = true;
P.PopParsingClass(State);
}
~ParsingClassDefinition() {
if (!Popped)
P.PopParsingClass(State);
}
};
/// Contains information about any template-specific
/// information that has been parsed prior to parsing declaration
/// specifiers.
struct ParsedTemplateInfo {
ParsedTemplateInfo()
: Kind(NonTemplate), TemplateParams(nullptr), TemplateLoc() { }
ParsedTemplateInfo(TemplateParameterLists *TemplateParams,
bool isSpecialization,
bool lastParameterListWasEmpty = false)
: Kind(isSpecialization? ExplicitSpecialization : Template),
TemplateParams(TemplateParams),
LastParameterListWasEmpty(lastParameterListWasEmpty) { }
explicit ParsedTemplateInfo(SourceLocation ExternLoc,
SourceLocation TemplateLoc)
: Kind(ExplicitInstantiation), TemplateParams(nullptr),
ExternLoc(ExternLoc), TemplateLoc(TemplateLoc),
LastParameterListWasEmpty(false){ }
/// The kind of template we are parsing.
enum {
/// We are not parsing a template at all.
NonTemplate = 0,
/// We are parsing a template declaration.
Template,
/// We are parsing an explicit specialization.
ExplicitSpecialization,
/// We are parsing an explicit instantiation.
ExplicitInstantiation
} Kind;
/// The template parameter lists, for template declarations
/// and explicit specializations.
TemplateParameterLists *TemplateParams;
/// The location of the 'extern' keyword, if any, for an explicit
/// instantiation
SourceLocation ExternLoc;
/// The location of the 'template' keyword, for an explicit
/// instantiation.
SourceLocation TemplateLoc;
/// Whether the last template parameter list was empty.
bool LastParameterListWasEmpty;
SourceRange getSourceRange() const LLVM_READONLY;
};
// In ParseCXXInlineMethods.cpp.
struct ReenterTemplateScopeRAII;
struct ReenterClassScopeRAII;
void LexTemplateFunctionForLateParsing(CachedTokens &Toks);
void ParseLateTemplatedFuncDef(LateParsedTemplate &LPT);
static void LateTemplateParserCallback(void *P, LateParsedTemplate &LPT);
Sema::ParsingClassState
PushParsingClass(Decl *TagOrTemplate, bool TopLevelClass, bool IsInterface);
void DeallocateParsedClasses(ParsingClass *Class);
void PopParsingClass(Sema::ParsingClassState);
enum CachedInitKind {
CIK_DefaultArgument,
CIK_DefaultInitializer
};
NamedDecl *ParseCXXInlineMethodDef(AccessSpecifier AS,
ParsedAttributes &AccessAttrs,
ParsingDeclarator &D,
const ParsedTemplateInfo &TemplateInfo,
const VirtSpecifiers &VS,
SourceLocation PureSpecLoc);
void ParseCXXNonStaticMemberInitializer(Decl *VarD);
void ParseLexedAttributes(ParsingClass &Class);
void ParseLexedAttributeList(LateParsedAttrList &LAs, Decl *D,
bool EnterScope, bool OnDefinition);
void ParseLexedAttribute(LateParsedAttribute &LA,
bool EnterScope, bool OnDefinition);
void ParseLexedMethodDeclarations(ParsingClass &Class);
void ParseLexedMethodDeclaration(LateParsedMethodDeclaration &LM);
void ParseLexedMethodDefs(ParsingClass &Class);
void ParseLexedMethodDef(LexedMethod &LM);
void ParseLexedMemberInitializers(ParsingClass &Class);
void ParseLexedMemberInitializer(LateParsedMemberInitializer &MI);
void ParseLexedObjCMethodDefs(LexedMethod &LM, bool parseMethod);
void ParseLexedPragmas(ParsingClass &Class);
void ParseLexedPragma(LateParsedPragma &LP);
bool ConsumeAndStoreFunctionPrologue(CachedTokens &Toks);
bool ConsumeAndStoreInitializer(CachedTokens &Toks, CachedInitKind CIK);
bool ConsumeAndStoreConditional(CachedTokens &Toks);
bool ConsumeAndStoreUntil(tok::TokenKind T1,
CachedTokens &Toks,
bool StopAtSemi = true,
bool ConsumeFinalToken = true) {
return ConsumeAndStoreUntil(T1, T1, Toks, StopAtSemi, ConsumeFinalToken);
}
bool ConsumeAndStoreUntil(tok::TokenKind T1, tok::TokenKind T2,
CachedTokens &Toks,
bool StopAtSemi = true,
bool ConsumeFinalToken = true);
//===--------------------------------------------------------------------===//
// C99 6.9: External Definitions.
DeclGroupPtrTy ParseExternalDeclaration(ParsedAttributesWithRange &attrs,
ParsingDeclSpec *DS = nullptr);
bool isDeclarationAfterDeclarator();
bool isStartOfFunctionDefinition(const ParsingDeclarator &Declarator);
DeclGroupPtrTy ParseDeclarationOrFunctionDefinition(
ParsedAttributesWithRange &attrs,
ParsingDeclSpec *DS = nullptr,
AccessSpecifier AS = AS_none);
DeclGroupPtrTy ParseDeclOrFunctionDefInternal(ParsedAttributesWithRange &attrs,
ParsingDeclSpec &DS,
AccessSpecifier AS);
void SkipFunctionBody();
Decl *ParseFunctionDefinition(ParsingDeclarator &D,
const ParsedTemplateInfo &TemplateInfo = ParsedTemplateInfo(),
LateParsedAttrList *LateParsedAttrs = nullptr);
void ParseKNRParamDeclarations(Declarator &D);
// EndLoc is filled with the location of the last token of the simple-asm.
ExprResult ParseSimpleAsm(bool ForAsmLabel, SourceLocation *EndLoc);
ExprResult ParseAsmStringLiteral(bool ForAsmLabel);
// Objective-C External Declarations
void MaybeSkipAttributes(tok::ObjCKeywordKind Kind);
DeclGroupPtrTy ParseObjCAtDirectives(ParsedAttributesWithRange &Attrs);
DeclGroupPtrTy ParseObjCAtClassDeclaration(SourceLocation atLoc);
Decl *ParseObjCAtInterfaceDeclaration(SourceLocation AtLoc,
ParsedAttributes &prefixAttrs);
class ObjCTypeParamListScope;
ObjCTypeParamList *parseObjCTypeParamList();
ObjCTypeParamList *parseObjCTypeParamListOrProtocolRefs(
ObjCTypeParamListScope &Scope, SourceLocation &lAngleLoc,
SmallVectorImpl<IdentifierLocPair> &protocolIdents,
SourceLocation &rAngleLoc, bool mayBeProtocolList = true);
void HelperActionsForIvarDeclarations(Decl *interfaceDecl, SourceLocation atLoc,
BalancedDelimiterTracker &T,
SmallVectorImpl<Decl *> &AllIvarDecls,
bool RBraceMissing);
void ParseObjCClassInstanceVariables(Decl *interfaceDecl,
tok::ObjCKeywordKind visibility,
SourceLocation atLoc);
bool ParseObjCProtocolReferences(SmallVectorImpl<Decl *> &P,
SmallVectorImpl<SourceLocation> &PLocs,
bool WarnOnDeclarations,
bool ForObjCContainer,
SourceLocation &LAngleLoc,
SourceLocation &EndProtoLoc,
bool consumeLastToken);
/// Parse the first angle-bracket-delimited clause for an
/// Objective-C object or object pointer type, which may be either
/// type arguments or protocol qualifiers.
void parseObjCTypeArgsOrProtocolQualifiers(
ParsedType baseType,
SourceLocation &typeArgsLAngleLoc,
SmallVectorImpl<ParsedType> &typeArgs,
SourceLocation &typeArgsRAngleLoc,
SourceLocation &protocolLAngleLoc,
SmallVectorImpl<Decl *> &protocols,
SmallVectorImpl<SourceLocation> &protocolLocs,
SourceLocation &protocolRAngleLoc,
bool consumeLastToken,
bool warnOnIncompleteProtocols);
/// Parse either Objective-C type arguments or protocol qualifiers; if the
/// former, also parse protocol qualifiers afterward.
void parseObjCTypeArgsAndProtocolQualifiers(
ParsedType baseType,
SourceLocation &typeArgsLAngleLoc,
SmallVectorImpl<ParsedType> &typeArgs,
SourceLocation &typeArgsRAngleLoc,
SourceLocation &protocolLAngleLoc,
SmallVectorImpl<Decl *> &protocols,
SmallVectorImpl<SourceLocation> &protocolLocs,
SourceLocation &protocolRAngleLoc,
bool consumeLastToken);
/// Parse a protocol qualifier type such as '<NSCopying>', which is
/// an anachronistic way of writing 'id<NSCopying>'.
TypeResult parseObjCProtocolQualifierType(SourceLocation &rAngleLoc);
/// Parse Objective-C type arguments and protocol qualifiers, extending the
/// current type with the parsed result.
TypeResult parseObjCTypeArgsAndProtocolQualifiers(SourceLocation loc,
ParsedType type,
bool consumeLastToken,
SourceLocation &endLoc);
void ParseObjCInterfaceDeclList(tok::ObjCKeywordKind contextKey,
Decl *CDecl);
DeclGroupPtrTy ParseObjCAtProtocolDeclaration(SourceLocation atLoc,
ParsedAttributes &prefixAttrs);
struct ObjCImplParsingDataRAII {
Parser &P;
Decl *Dcl;
bool HasCFunction;
typedef SmallVector<LexedMethod*, 8> LateParsedObjCMethodContainer;
LateParsedObjCMethodContainer LateParsedObjCMethods;
ObjCImplParsingDataRAII(Parser &parser, Decl *D)
: P(parser), Dcl(D), HasCFunction(false) {
P.CurParsedObjCImpl = this;
Finished = false;
}
~ObjCImplParsingDataRAII();
void finish(SourceRange AtEnd);
bool isFinished() const { return Finished; }
private:
bool Finished;
};
ObjCImplParsingDataRAII *CurParsedObjCImpl;
void StashAwayMethodOrFunctionBodyTokens(Decl *MDecl);
DeclGroupPtrTy ParseObjCAtImplementationDeclaration(SourceLocation AtLoc,
ParsedAttributes &Attrs);
DeclGroupPtrTy ParseObjCAtEndDeclaration(SourceRange atEnd);
Decl *ParseObjCAtAliasDeclaration(SourceLocation atLoc);
Decl *ParseObjCPropertySynthesize(SourceLocation atLoc);
Decl *ParseObjCPropertyDynamic(SourceLocation atLoc);
IdentifierInfo *ParseObjCSelectorPiece(SourceLocation &MethodLocation);
// Definitions for Objective-c context sensitive keywords recognition.
enum ObjCTypeQual {
objc_in=0, objc_out, objc_inout, objc_oneway, objc_bycopy, objc_byref,
objc_nonnull, objc_nullable, objc_null_unspecified,
objc_NumQuals
};
IdentifierInfo *ObjCTypeQuals[objc_NumQuals];
bool isTokIdentifier_in() const;
ParsedType ParseObjCTypeName(ObjCDeclSpec &DS, DeclaratorContext Ctx,
ParsedAttributes *ParamAttrs);
Decl *ParseObjCMethodPrototype(
tok::ObjCKeywordKind MethodImplKind = tok::objc_not_keyword,
bool MethodDefinition = true);
Decl *ParseObjCMethodDecl(SourceLocation mLoc, tok::TokenKind mType,
tok::ObjCKeywordKind MethodImplKind = tok::objc_not_keyword,
bool MethodDefinition=true);
void ParseObjCPropertyAttribute(ObjCDeclSpec &DS);
Decl *ParseObjCMethodDefinition();
public:
//===--------------------------------------------------------------------===//
// C99 6.5: Expressions.
/// TypeCastState - State whether an expression is or may be a type cast.
enum TypeCastState {
NotTypeCast = 0,
MaybeTypeCast,
IsTypeCast
};
ExprResult ParseExpression(TypeCastState isTypeCast = NotTypeCast);
ExprResult ParseConstantExpressionInExprEvalContext(
TypeCastState isTypeCast = NotTypeCast);
ExprResult ParseConstantExpression(TypeCastState isTypeCast = NotTypeCast);
ExprResult ParseCaseExpression(SourceLocation CaseLoc);
ExprResult ParseConstraintExpression();
ExprResult
ParseConstraintLogicalAndExpression(bool IsTrailingRequiresClause);
ExprResult ParseConstraintLogicalOrExpression(bool IsTrailingRequiresClause);
// Expr that doesn't include commas.
ExprResult ParseAssignmentExpression(TypeCastState isTypeCast = NotTypeCast);
ExprResult ParseMSAsmIdentifier(llvm::SmallVectorImpl<Token> &LineToks,
unsigned &NumLineToksConsumed,
bool IsUnevaluated);
ExprResult ParseStringLiteralExpression(bool AllowUserDefinedLiteral = false);
private:
ExprResult ParseExpressionWithLeadingAt(SourceLocation AtLoc);
ExprResult ParseExpressionWithLeadingExtension(SourceLocation ExtLoc);
ExprResult ParseRHSOfBinaryExpression(ExprResult LHS,
prec::Level MinPrec);
/// Control what ParseCastExpression will parse.
enum CastParseKind {
AnyCastExpr = 0,
UnaryExprOnly,
PrimaryExprOnly
};
ExprResult ParseCastExpression(CastParseKind ParseKind,
bool isAddressOfOperand,
bool &NotCastExpr,
TypeCastState isTypeCast,
bool isVectorLiteral = false,
bool *NotPrimaryExpression = nullptr);
ExprResult ParseCastExpression(CastParseKind ParseKind,
bool isAddressOfOperand = false,
TypeCastState isTypeCast = NotTypeCast,
bool isVectorLiteral = false,
bool *NotPrimaryExpression = nullptr);
/// Returns true if the next token cannot start an expression.
bool isNotExpressionStart();
/// Returns true if the next token would start a postfix-expression
/// suffix.
bool isPostfixExpressionSuffixStart() {
tok::TokenKind K = Tok.getKind();
return (K == tok::l_square || K == tok::l_paren ||
K == tok::period || K == tok::arrow ||
K == tok::plusplus || K == tok::minusminus);
}
bool diagnoseUnknownTemplateId(ExprResult TemplateName, SourceLocation Less);
void checkPotentialAngleBracket(ExprResult &PotentialTemplateName);
bool checkPotentialAngleBracketDelimiter(const AngleBracketTracker::Loc &,
const Token &OpToken);
bool checkPotentialAngleBracketDelimiter(const Token &OpToken) {
if (auto *Info = AngleBrackets.getCurrent(*this))
return checkPotentialAngleBracketDelimiter(*Info, OpToken);
return false;
}
ExprResult ParsePostfixExpressionSuffix(ExprResult LHS);
ExprResult ParseUnaryExprOrTypeTraitExpression();
ExprResult ParseBuiltinPrimaryExpression();
ExprResult ParseSYCLUniqueStableNameExpression();
ExprResult ParseSYCLUniqueStableIdExpression();
ExprResult ParseExprAfterUnaryExprOrTypeTrait(const Token &OpTok,
bool &isCastExpr,
ParsedType &CastTy,
SourceRange &CastRange);
typedef SmallVector<SourceLocation, 20> CommaLocsTy;
/// ParseExpressionList - Used for C/C++ (argument-)expression-list.
bool ParseExpressionList(SmallVectorImpl<Expr *> &Exprs,
SmallVectorImpl<SourceLocation> &CommaLocs,
llvm::function_ref<void()> ExpressionStarts =
llvm::function_ref<void()>());
/// ParseSimpleExpressionList - A simple comma-separated list of expressions,
/// used for misc language extensions.
bool ParseSimpleExpressionList(SmallVectorImpl<Expr*> &Exprs,
SmallVectorImpl<SourceLocation> &CommaLocs);
/// ParenParseOption - Control what ParseParenExpression will parse.
enum ParenParseOption {
SimpleExpr, // Only parse '(' expression ')'
FoldExpr, // Also allow fold-expression <anything>
CompoundStmt, // Also allow '(' compound-statement ')'
CompoundLiteral, // Also allow '(' type-name ')' '{' ... '}'
CastExpr // Also allow '(' type-name ')' <anything>
};
ExprResult ParseParenExpression(ParenParseOption &ExprType,
bool stopIfCastExpr,
bool isTypeCast,
ParsedType &CastTy,
SourceLocation &RParenLoc);
ExprResult ParseCXXAmbiguousParenExpression(
ParenParseOption &ExprType, ParsedType &CastTy,
BalancedDelimiterTracker &Tracker, ColonProtectionRAIIObject &ColonProt);
ExprResult ParseCompoundLiteralExpression(ParsedType Ty,
SourceLocation LParenLoc,
SourceLocation RParenLoc);
ExprResult ParseGenericSelectionExpression();
ExprResult ParseObjCBoolLiteral();
ExprResult ParseFoldExpression(ExprResult LHS, BalancedDelimiterTracker &T);
//===--------------------------------------------------------------------===//
// C++ Expressions
ExprResult tryParseCXXIdExpression(CXXScopeSpec &SS, bool isAddressOfOperand,
Token &Replacement);
ExprResult ParseCXXIdExpression(bool isAddressOfOperand = false);
bool areTokensAdjacent(const Token &A, const Token &B);
void CheckForTemplateAndDigraph(Token &Next, ParsedType ObjectTypePtr,
bool EnteringContext, IdentifierInfo &II,
CXXScopeSpec &SS);
bool ParseOptionalCXXScopeSpecifier(CXXScopeSpec &SS,
ParsedType ObjectType,
bool ObjectHasErrors,
bool EnteringContext,
bool *MayBePseudoDestructor = nullptr,
bool IsTypename = false,
IdentifierInfo **LastII = nullptr,
bool OnlyNamespace = false,
bool InUsingDeclaration = false);
//===--------------------------------------------------------------------===//
// C++11 5.1.2: Lambda expressions
/// Result of tentatively parsing a lambda-introducer.
enum class LambdaIntroducerTentativeParse {
/// This appears to be a lambda-introducer, which has been fully parsed.
Success,
/// This is a lambda-introducer, but has not been fully parsed, and this
/// function needs to be called again to parse it.
Incomplete,
/// This is definitely an Objective-C message send expression, rather than
/// a lambda-introducer, attribute-specifier, or array designator.
MessageSend,
/// This is not a lambda-introducer.
Invalid,
};
// [...] () -> type {...}
ExprResult ParseLambdaExpression();
ExprResult TryParseLambdaExpression();
bool
ParseLambdaIntroducer(LambdaIntroducer &Intro,
LambdaIntroducerTentativeParse *Tentative = nullptr);
ExprResult ParseLambdaExpressionAfterIntroducer(LambdaIntroducer &Intro);
//===--------------------------------------------------------------------===//
// C++ 5.2p1: C++ Casts
ExprResult ParseCXXCasts();
/// Parse a __builtin_bit_cast(T, E), used to implement C++2a std::bit_cast.
ExprResult ParseBuiltinBitCast();
//===--------------------------------------------------------------------===//
// C++ 5.2p1: C++ Type Identification
ExprResult ParseCXXTypeid();
//===--------------------------------------------------------------------===//
// C++ : Microsoft __uuidof Expression
ExprResult ParseCXXUuidof();
//===--------------------------------------------------------------------===//
// C++ 5.2.4: C++ Pseudo-Destructor Expressions
ExprResult ParseCXXPseudoDestructor(Expr *Base, SourceLocation OpLoc,
tok::TokenKind OpKind,
CXXScopeSpec &SS,
ParsedType ObjectType);
//===--------------------------------------------------------------------===//
// C++ 9.3.2: C++ 'this' pointer
ExprResult ParseCXXThis();
//===--------------------------------------------------------------------===//
// C++ 15: C++ Throw Expression
ExprResult ParseThrowExpression();
ExceptionSpecificationType tryParseExceptionSpecification(
bool Delayed,
SourceRange &SpecificationRange,
SmallVectorImpl<ParsedType> &DynamicExceptions,
SmallVectorImpl<SourceRange> &DynamicExceptionRanges,
ExprResult &NoexceptExpr,
CachedTokens *&ExceptionSpecTokens);
// EndLoc is filled with the location of the last token of the specification.
ExceptionSpecificationType ParseDynamicExceptionSpecification(
SourceRange &SpecificationRange,
SmallVectorImpl<ParsedType> &Exceptions,
SmallVectorImpl<SourceRange> &Ranges);
//===--------------------------------------------------------------------===//
// C++0x 8: Function declaration trailing-return-type
TypeResult ParseTrailingReturnType(SourceRange &Range,
bool MayBeFollowedByDirectInit);
//===--------------------------------------------------------------------===//
// C++ 2.13.5: C++ Boolean Literals
ExprResult ParseCXXBoolLiteral();
//===--------------------------------------------------------------------===//
// C++ 5.2.3: Explicit type conversion (functional notation)
ExprResult ParseCXXTypeConstructExpression(const DeclSpec &DS);
/// ParseCXXSimpleTypeSpecifier - [C++ 7.1.5.2] Simple type specifiers.
/// This should only be called when the current token is known to be part of
/// simple-type-specifier.
void ParseCXXSimpleTypeSpecifier(DeclSpec &DS);
bool ParseCXXTypeSpecifierSeq(DeclSpec &DS);
//===--------------------------------------------------------------------===//
// C++ 5.3.4 and 5.3.5: C++ new and delete
bool ParseExpressionListOrTypeId(SmallVectorImpl<Expr*> &Exprs,
Declarator &D);
void ParseDirectNewDeclarator(Declarator &D);
ExprResult ParseCXXNewExpression(bool UseGlobal, SourceLocation Start);
ExprResult ParseCXXDeleteExpression(bool UseGlobal,
SourceLocation Start);
//===--------------------------------------------------------------------===//
// C++ if/switch/while/for condition expression.
struct ForRangeInfo;
Sema::ConditionResult ParseCXXCondition(StmtResult *InitStmt,
SourceLocation Loc,
Sema::ConditionKind CK,
ForRangeInfo *FRI = nullptr,
bool EnterForConditionScope = false);
DeclGroupPtrTy
ParseAliasDeclarationInInitStatement(DeclaratorContext Context,
ParsedAttributesWithRange &Attrs);
//===--------------------------------------------------------------------===//
// C++ Coroutines
ExprResult ParseCoyieldExpression();
//===--------------------------------------------------------------------===//
// C++ Concepts
ExprResult ParseRequiresExpression();
void ParseTrailingRequiresClause(Declarator &D);
//===--------------------------------------------------------------------===//
// C99 6.7.8: Initialization.
/// ParseInitializer
/// initializer: [C99 6.7.8]
/// assignment-expression
/// '{' ...
ExprResult ParseInitializer() {
if (Tok.isNot(tok::l_brace))
return ParseAssignmentExpression();
return ParseBraceInitializer();
}
bool MayBeDesignationStart();
ExprResult ParseBraceInitializer();
struct DesignatorCompletionInfo {
SmallVectorImpl<Expr *> &InitExprs;
QualType PreferredBaseType;
};
ExprResult ParseInitializerWithPotentialDesignator(DesignatorCompletionInfo);
//===--------------------------------------------------------------------===//
// clang Expressions
ExprResult ParseBlockLiteralExpression(); // ^{...}
//===--------------------------------------------------------------------===//
// Objective-C Expressions
ExprResult ParseObjCAtExpression(SourceLocation AtLocation);
ExprResult ParseObjCStringLiteral(SourceLocation AtLoc);
ExprResult ParseObjCCharacterLiteral(SourceLocation AtLoc);
ExprResult ParseObjCNumericLiteral(SourceLocation AtLoc);
ExprResult ParseObjCBooleanLiteral(SourceLocation AtLoc, bool ArgValue);
ExprResult ParseObjCArrayLiteral(SourceLocation AtLoc);
ExprResult ParseObjCDictionaryLiteral(SourceLocation AtLoc);
ExprResult ParseObjCBoxedExpr(SourceLocation AtLoc);
ExprResult ParseObjCEncodeExpression(SourceLocation AtLoc);
ExprResult ParseObjCSelectorExpression(SourceLocation AtLoc);
ExprResult ParseObjCProtocolExpression(SourceLocation AtLoc);
bool isSimpleObjCMessageExpression();
ExprResult ParseObjCMessageExpression();
ExprResult ParseObjCMessageExpressionBody(SourceLocation LBracloc,
SourceLocation SuperLoc,
ParsedType ReceiverType,
Expr *ReceiverExpr);
ExprResult ParseAssignmentExprWithObjCMessageExprStart(
SourceLocation LBracloc, SourceLocation SuperLoc,
ParsedType ReceiverType, Expr *ReceiverExpr);
bool ParseObjCXXMessageReceiver(bool &IsExpr, void *&TypeOrExpr);
//===--------------------------------------------------------------------===//
// C99 6.8: Statements and Blocks.
/// A SmallVector of statements, with stack size 32 (as that is the only one
/// used.)
typedef SmallVector<Stmt*, 32> StmtVector;
/// A SmallVector of expressions, with stack size 12 (the maximum used.)
typedef SmallVector<Expr*, 12> ExprVector;
/// A SmallVector of types.
typedef SmallVector<ParsedType, 12> TypeVector;
StmtResult
ParseStatement(SourceLocation *TrailingElseLoc = nullptr,
ParsedStmtContext StmtCtx = ParsedStmtContext::SubStmt);
StmtResult ParseStatementOrDeclaration(
StmtVector &Stmts, ParsedStmtContext StmtCtx,
SourceLocation *TrailingElseLoc = nullptr);
StmtResult ParseStatementOrDeclarationAfterAttributes(
StmtVector &Stmts,
ParsedStmtContext StmtCtx,
SourceLocation *TrailingElseLoc,
ParsedAttributesWithRange &Attrs);
StmtResult ParseExprStatement(ParsedStmtContext StmtCtx);
StmtResult ParseLabeledStatement(ParsedAttributesWithRange &attrs,
ParsedStmtContext StmtCtx);
StmtResult ParseCaseStatement(ParsedStmtContext StmtCtx,
bool MissingCase = false,
ExprResult Expr = ExprResult());
StmtResult ParseDefaultStatement(ParsedStmtContext StmtCtx);
StmtResult ParseCompoundStatement(bool isStmtExpr = false);
StmtResult ParseCompoundStatement(bool isStmtExpr,
unsigned ScopeFlags);
void ParseCompoundStatementLeadingPragmas();
bool ConsumeNullStmt(StmtVector &Stmts);
StmtResult ParseCompoundStatementBody(bool isStmtExpr = false);
bool ParseParenExprOrCondition(StmtResult *InitStmt,
Sema::ConditionResult &CondResult,
SourceLocation Loc, Sema::ConditionKind CK,
SourceLocation *LParenLoc = nullptr,
SourceLocation *RParenLoc = nullptr);
StmtResult ParseIfStatement(SourceLocation *TrailingElseLoc);
StmtResult ParseSwitchStatement(SourceLocation *TrailingElseLoc);
StmtResult ParseWhileStatement(SourceLocation *TrailingElseLoc);
StmtResult ParseDoStatement();
StmtResult ParseForStatement(SourceLocation *TrailingElseLoc);
StmtResult ParseGotoStatement();
StmtResult ParseContinueStatement();
StmtResult ParseBreakStatement();
StmtResult ParseReturnStatement();
StmtResult ParseAsmStatement(bool &msAsm);
StmtResult ParseMicrosoftAsmStatement(SourceLocation AsmLoc);
StmtResult ParsePragmaLoopHint(StmtVector &Stmts,
ParsedStmtContext StmtCtx,
SourceLocation *TrailingElseLoc,
ParsedAttributesWithRange &Attrs);
/// Describes the behavior that should be taken for an __if_exists
/// block.
enum IfExistsBehavior {
/// Parse the block; this code is always used.
IEB_Parse,
/// Skip the block entirely; this code is never used.
IEB_Skip,
/// Parse the block as a dependent block, which may be used in
/// some template instantiations but not others.
IEB_Dependent
};
/// Describes the condition of a Microsoft __if_exists or
/// __if_not_exists block.
struct IfExistsCondition {
/// The location of the initial keyword.
SourceLocation KeywordLoc;
/// Whether this is an __if_exists block (rather than an
/// __if_not_exists block).
bool IsIfExists;
/// Nested-name-specifier preceding the name.
CXXScopeSpec SS;
/// The name we're looking for.
UnqualifiedId Name;
/// The behavior of this __if_exists or __if_not_exists block
/// should.
IfExistsBehavior Behavior;
};
bool ParseMicrosoftIfExistsCondition(IfExistsCondition& Result);
void ParseMicrosoftIfExistsStatement(StmtVector &Stmts);
void ParseMicrosoftIfExistsExternalDeclaration();
void ParseMicrosoftIfExistsClassDeclaration(DeclSpec::TST TagType,
ParsedAttributes &AccessAttrs,
AccessSpecifier &CurAS);
bool ParseMicrosoftIfExistsBraceInitializer(ExprVector &InitExprs,
bool &InitExprsOk);
bool ParseAsmOperandsOpt(SmallVectorImpl<IdentifierInfo *> &Names,
SmallVectorImpl<Expr *> &Constraints,
SmallVectorImpl<Expr *> &Exprs);
//===--------------------------------------------------------------------===//
// C++ 6: Statements and Blocks
StmtResult ParseCXXTryBlock();
StmtResult ParseCXXTryBlockCommon(SourceLocation TryLoc, bool FnTry = false);
StmtResult ParseCXXCatchBlock(bool FnCatch = false);
//===--------------------------------------------------------------------===//
// MS: SEH Statements and Blocks
StmtResult ParseSEHTryBlock();
StmtResult ParseSEHExceptBlock(SourceLocation Loc);
StmtResult ParseSEHFinallyBlock(SourceLocation Loc);
StmtResult ParseSEHLeaveStatement();
//===--------------------------------------------------------------------===//
// Objective-C Statements
StmtResult ParseObjCAtStatement(SourceLocation atLoc,
ParsedStmtContext StmtCtx);
StmtResult ParseObjCTryStmt(SourceLocation atLoc);
StmtResult ParseObjCThrowStmt(SourceLocation atLoc);
StmtResult ParseObjCSynchronizedStmt(SourceLocation atLoc);
StmtResult ParseObjCAutoreleasePoolStmt(SourceLocation atLoc);
//===--------------------------------------------------------------------===//
// C99 6.7: Declarations.
/// A context for parsing declaration specifiers. TODO: flesh this
/// out, there are other significant restrictions on specifiers than
/// would be best implemented in the parser.
enum class DeclSpecContext {
DSC_normal, // normal context
DSC_class, // class context, enables 'friend'
DSC_type_specifier, // C++ type-specifier-seq or C specifier-qualifier-list
DSC_trailing, // C++11 trailing-type-specifier in a trailing return type
DSC_alias_declaration, // C++11 type-specifier-seq in an alias-declaration
DSC_top_level, // top-level/namespace declaration context
DSC_template_param, // template parameter context
DSC_template_type_arg, // template type argument context
DSC_objc_method_result, // ObjC method result context, enables 'instancetype'
DSC_condition // condition declaration context
};
/// Is this a context in which we are parsing just a type-specifier (or
/// trailing-type-specifier)?
static bool isTypeSpecifier(DeclSpecContext DSC) {
switch (DSC) {
case DeclSpecContext::DSC_normal:
case DeclSpecContext::DSC_template_param:
case DeclSpecContext::DSC_class:
case DeclSpecContext::DSC_top_level:
case DeclSpecContext::DSC_objc_method_result:
case DeclSpecContext::DSC_condition:
return false;
case DeclSpecContext::DSC_template_type_arg:
case DeclSpecContext::DSC_type_specifier:
case DeclSpecContext::DSC_trailing:
case DeclSpecContext::DSC_alias_declaration:
return true;
}
llvm_unreachable("Missing DeclSpecContext case");
}
/// Whether a defining-type-specifier is permitted in a given context.
enum class AllowDefiningTypeSpec {
/// The grammar doesn't allow a defining-type-specifier here, and we must
/// not parse one (eg, because a '{' could mean something else).
No,
/// The grammar doesn't allow a defining-type-specifier here, but we permit
/// one for error recovery purposes. Sema will reject.
NoButErrorRecovery,
/// The grammar allows a defining-type-specifier here, even though it's
/// always invalid. Sema will reject.
YesButInvalid,
/// The grammar allows a defining-type-specifier here, and one can be valid.
Yes
};
/// Is this a context in which we are parsing defining-type-specifiers (and
/// so permit class and enum definitions in addition to non-defining class and
/// enum elaborated-type-specifiers)?
static AllowDefiningTypeSpec
isDefiningTypeSpecifierContext(DeclSpecContext DSC) {
switch (DSC) {
case DeclSpecContext::DSC_normal:
case DeclSpecContext::DSC_class:
case DeclSpecContext::DSC_top_level:
case DeclSpecContext::DSC_alias_declaration:
case DeclSpecContext::DSC_objc_method_result:
return AllowDefiningTypeSpec::Yes;
case DeclSpecContext::DSC_condition:
case DeclSpecContext::DSC_template_param:
return AllowDefiningTypeSpec::YesButInvalid;
case DeclSpecContext::DSC_template_type_arg:
case DeclSpecContext::DSC_type_specifier:
return AllowDefiningTypeSpec::NoButErrorRecovery;
case DeclSpecContext::DSC_trailing:
return AllowDefiningTypeSpec::No;
}
llvm_unreachable("Missing DeclSpecContext case");
}
/// Is this a context in which an opaque-enum-declaration can appear?
static bool isOpaqueEnumDeclarationContext(DeclSpecContext DSC) {
switch (DSC) {
case DeclSpecContext::DSC_normal:
case DeclSpecContext::DSC_class:
case DeclSpecContext::DSC_top_level:
return true;
case DeclSpecContext::DSC_alias_declaration:
case DeclSpecContext::DSC_objc_method_result:
case DeclSpecContext::DSC_condition:
case DeclSpecContext::DSC_template_param:
case DeclSpecContext::DSC_template_type_arg:
case DeclSpecContext::DSC_type_specifier:
case DeclSpecContext::DSC_trailing:
return false;
}
llvm_unreachable("Missing DeclSpecContext case");
}
/// Is this a context in which we can perform class template argument
/// deduction?
static bool isClassTemplateDeductionContext(DeclSpecContext DSC) {
switch (DSC) {
case DeclSpecContext::DSC_normal:
case DeclSpecContext::DSC_template_param:
case DeclSpecContext::DSC_class:
case DeclSpecContext::DSC_top_level:
case DeclSpecContext::DSC_condition:
case DeclSpecContext::DSC_type_specifier:
return true;
case DeclSpecContext::DSC_objc_method_result:
case DeclSpecContext::DSC_template_type_arg:
case DeclSpecContext::DSC_trailing:
case DeclSpecContext::DSC_alias_declaration:
return false;
}
llvm_unreachable("Missing DeclSpecContext case");
}
/// Information on a C++0x for-range-initializer found while parsing a
/// declaration which turns out to be a for-range-declaration.
struct ForRangeInit {
SourceLocation ColonLoc;
ExprResult RangeExpr;
bool ParsedForRangeDecl() { return !ColonLoc.isInvalid(); }
};
struct ForRangeInfo : ForRangeInit {
StmtResult LoopVar;
};
DeclGroupPtrTy ParseDeclaration(DeclaratorContext Context,
SourceLocation &DeclEnd,
ParsedAttributesWithRange &attrs,
SourceLocation *DeclSpecStart = nullptr);
DeclGroupPtrTy
ParseSimpleDeclaration(DeclaratorContext Context, SourceLocation &DeclEnd,
ParsedAttributesWithRange &attrs, bool RequireSemi,
ForRangeInit *FRI = nullptr,
SourceLocation *DeclSpecStart = nullptr);
bool MightBeDeclarator(DeclaratorContext Context);
DeclGroupPtrTy ParseDeclGroup(ParsingDeclSpec &DS, DeclaratorContext Context,
SourceLocation *DeclEnd = nullptr,
ForRangeInit *FRI = nullptr);
Decl *ParseDeclarationAfterDeclarator(Declarator &D,
const ParsedTemplateInfo &TemplateInfo = ParsedTemplateInfo());
bool ParseAsmAttributesAfterDeclarator(Declarator &D);
Decl *ParseDeclarationAfterDeclaratorAndAttributes(
Declarator &D,
const ParsedTemplateInfo &TemplateInfo = ParsedTemplateInfo(),
ForRangeInit *FRI = nullptr);
Decl *ParseFunctionStatementBody(Decl *Decl, ParseScope &BodyScope);
Decl *ParseFunctionTryBlock(Decl *Decl, ParseScope &BodyScope);
/// When in code-completion, skip parsing of the function/method body
/// unless the body contains the code-completion point.
///
/// \returns true if the function body was skipped.
bool trySkippingFunctionBody();
bool ParseImplicitInt(DeclSpec &DS, CXXScopeSpec *SS,
const ParsedTemplateInfo &TemplateInfo,
AccessSpecifier AS, DeclSpecContext DSC,
ParsedAttributesWithRange &Attrs);
DeclSpecContext
getDeclSpecContextFromDeclaratorContext(DeclaratorContext Context);
void ParseDeclarationSpecifiers(
DeclSpec &DS,
const ParsedTemplateInfo &TemplateInfo = ParsedTemplateInfo(),
AccessSpecifier AS = AS_none,
DeclSpecContext DSC = DeclSpecContext::DSC_normal,
LateParsedAttrList *LateAttrs = nullptr);
bool DiagnoseMissingSemiAfterTagDefinition(
DeclSpec &DS, AccessSpecifier AS, DeclSpecContext DSContext,
LateParsedAttrList *LateAttrs = nullptr);
void ParseSpecifierQualifierList(
DeclSpec &DS, AccessSpecifier AS = AS_none,
DeclSpecContext DSC = DeclSpecContext::DSC_normal);
void ParseObjCTypeQualifierList(ObjCDeclSpec &DS,
DeclaratorContext Context);
void ParseEnumSpecifier(SourceLocation TagLoc, DeclSpec &DS,
const ParsedTemplateInfo &TemplateInfo,
AccessSpecifier AS, DeclSpecContext DSC);
void ParseEnumBody(SourceLocation StartLoc, Decl *TagDecl);
void ParseStructUnionBody(SourceLocation StartLoc, DeclSpec::TST TagType,
RecordDecl *TagDecl);
void ParseStructDeclaration(
ParsingDeclSpec &DS,
llvm::function_ref<void(ParsingFieldDeclarator &)> FieldsCallback);
bool isDeclarationSpecifier(bool DisambiguatingWithExpression = false);
bool isTypeSpecifierQualifier();
/// isKnownToBeTypeSpecifier - Return true if we know that the specified token
/// is definitely a type-specifier. Return false if it isn't part of a type
/// specifier or if we're not sure.
bool isKnownToBeTypeSpecifier(const Token &Tok) const;
/// Return true if we know that we are definitely looking at a
/// decl-specifier, and isn't part of an expression such as a function-style
/// cast. Return false if it's no a decl-specifier, or we're not sure.
bool isKnownToBeDeclarationSpecifier() {
if (getLangOpts().CPlusPlus)
return isCXXDeclarationSpecifier() == TPResult::True;
return isDeclarationSpecifier(true);
}
/// isDeclarationStatement - Disambiguates between a declaration or an
/// expression statement, when parsing function bodies.
/// Returns true for declaration, false for expression.
bool isDeclarationStatement() {
if (getLangOpts().CPlusPlus)
return isCXXDeclarationStatement();
return isDeclarationSpecifier(true);
}
/// isForInitDeclaration - Disambiguates between a declaration or an
/// expression in the context of the C 'clause-1' or the C++
// 'for-init-statement' part of a 'for' statement.
/// Returns true for declaration, false for expression.
bool isForInitDeclaration() {
if (getLangOpts().OpenMP)
Actions.startOpenMPLoop();
if (getLangOpts().CPlusPlus)
return Tok.is(tok::kw_using) ||
isCXXSimpleDeclaration(/*AllowForRangeDecl=*/true);
return isDeclarationSpecifier(true);
}
/// Determine whether this is a C++1z for-range-identifier.
bool isForRangeIdentifier();
/// Determine whether we are currently at the start of an Objective-C
/// class message that appears to be missing the open bracket '['.
bool isStartOfObjCClassMessageMissingOpenBracket();
/// Starting with a scope specifier, identifier, or
/// template-id that refers to the current class, determine whether
/// this is a constructor declarator.
bool isConstructorDeclarator(bool Unqualified, bool DeductionGuide = false);
/// Specifies the context in which type-id/expression
/// disambiguation will occur.
enum TentativeCXXTypeIdContext {
TypeIdInParens,
TypeIdUnambiguous,
TypeIdAsTemplateArgument
};
/// isTypeIdInParens - Assumes that a '(' was parsed and now we want to know
/// whether the parens contain an expression or a type-id.
/// Returns true for a type-id and false for an expression.
bool isTypeIdInParens(bool &isAmbiguous) {
if (getLangOpts().CPlusPlus)
return isCXXTypeId(TypeIdInParens, isAmbiguous);
isAmbiguous = false;
return isTypeSpecifierQualifier();
}
bool isTypeIdInParens() {
bool isAmbiguous;
return isTypeIdInParens(isAmbiguous);
}
/// Checks if the current tokens form type-id or expression.
/// It is similar to isTypeIdInParens but does not suppose that type-id
/// is in parenthesis.
bool isTypeIdUnambiguously() {
bool IsAmbiguous;
if (getLangOpts().CPlusPlus)
return isCXXTypeId(TypeIdUnambiguous, IsAmbiguous);
return isTypeSpecifierQualifier();
}
/// isCXXDeclarationStatement - C++-specialized function that disambiguates
/// between a declaration or an expression statement, when parsing function
/// bodies. Returns true for declaration, false for expression.
bool isCXXDeclarationStatement();
/// isCXXSimpleDeclaration - C++-specialized function that disambiguates
/// between a simple-declaration or an expression-statement.
/// If during the disambiguation process a parsing error is encountered,
/// the function returns true to let the declaration parsing code handle it.
/// Returns false if the statement is disambiguated as expression.
bool isCXXSimpleDeclaration(bool AllowForRangeDecl);
/// isCXXFunctionDeclarator - Disambiguates between a function declarator or
/// a constructor-style initializer, when parsing declaration statements.
/// Returns true for function declarator and false for constructor-style
/// initializer. Sets 'IsAmbiguous' to true to indicate that this declaration
/// might be a constructor-style initializer.
/// If during the disambiguation process a parsing error is encountered,
/// the function returns true to let the declaration parsing code handle it.
bool isCXXFunctionDeclarator(bool *IsAmbiguous = nullptr);
struct ConditionDeclarationOrInitStatementState;
enum class ConditionOrInitStatement {
Expression, ///< Disambiguated as an expression (either kind).
ConditionDecl, ///< Disambiguated as the declaration form of condition.
InitStmtDecl, ///< Disambiguated as a simple-declaration init-statement.
ForRangeDecl, ///< Disambiguated as a for-range declaration.
Error ///< Can't be any of the above!
};
/// Disambiguates between the different kinds of things that can happen
/// after 'if (' or 'switch ('. This could be one of two different kinds of
/// declaration (depending on whether there is a ';' later) or an expression.
ConditionOrInitStatement
isCXXConditionDeclarationOrInitStatement(bool CanBeInitStmt,
bool CanBeForRangeDecl);
bool isCXXTypeId(TentativeCXXTypeIdContext Context, bool &isAmbiguous);
bool isCXXTypeId(TentativeCXXTypeIdContext Context) {
bool isAmbiguous;
return isCXXTypeId(Context, isAmbiguous);
}
/// TPResult - Used as the result value for functions whose purpose is to
/// disambiguate C++ constructs by "tentatively parsing" them.
enum class TPResult {
True, False, Ambiguous, Error
};
/// Determine whether we could have an enum-base.
///
/// \p AllowSemi If \c true, then allow a ';' after the enum-base; otherwise
/// only consider this to be an enum-base if the next token is a '{'.
///
/// \return \c false if this cannot possibly be an enum base; \c true
/// otherwise.
bool isEnumBase(bool AllowSemi);
/// isCXXDeclarationSpecifier - Returns TPResult::True if it is a
/// declaration specifier, TPResult::False if it is not,
/// TPResult::Ambiguous if it could be either a decl-specifier or a
/// function-style cast, and TPResult::Error if a parsing error was
/// encountered. If it could be a braced C++11 function-style cast, returns
/// BracedCastResult.
/// Doesn't consume tokens.
TPResult
isCXXDeclarationSpecifier(TPResult BracedCastResult = TPResult::False,
bool *InvalidAsDeclSpec = nullptr);
/// Given that isCXXDeclarationSpecifier returns \c TPResult::True or
/// \c TPResult::Ambiguous, determine whether the decl-specifier would be
/// a type-specifier other than a cv-qualifier.
bool isCXXDeclarationSpecifierAType();
/// Determine whether the current token sequence might be
/// '<' template-argument-list '>'
/// rather than a less-than expression.
TPResult isTemplateArgumentList(unsigned TokensToSkip);
/// Determine whether an '(' after an 'explicit' keyword is part of a C++20
/// 'explicit(bool)' declaration, in earlier language modes where that is an
/// extension.
TPResult isExplicitBool();
/// Determine whether an identifier has been tentatively declared as a
/// non-type. Such tentative declarations should not be found to name a type
/// during a tentative parse, but also should not be annotated as a non-type.
bool isTentativelyDeclared(IdentifierInfo *II);
// "Tentative parsing" functions, used for disambiguation. If a parsing error
// is encountered they will return TPResult::Error.
// Returning TPResult::True/False indicates that the ambiguity was
// resolved and tentative parsing may stop. TPResult::Ambiguous indicates
// that more tentative parsing is necessary for disambiguation.
// They all consume tokens, so backtracking should be used after calling them.
TPResult TryParseSimpleDeclaration(bool AllowForRangeDecl);
TPResult TryParseTypeofSpecifier();
TPResult TryParseProtocolQualifiers();
TPResult TryParsePtrOperatorSeq();
TPResult TryParseOperatorId();
TPResult TryParseInitDeclaratorList();
TPResult TryParseDeclarator(bool mayBeAbstract, bool mayHaveIdentifier = true,
bool mayHaveDirectInit = false);
TPResult
TryParseParameterDeclarationClause(bool *InvalidAsDeclaration = nullptr,
bool VersusTemplateArg = false);
TPResult TryParseFunctionDeclarator();
TPResult TryParseBracketDeclarator();
TPResult TryConsumeDeclarationSpecifier();
/// Try to skip a possibly empty sequence of 'attribute-specifier's without
/// full validation of the syntactic structure of attributes.
bool TrySkipAttributes();
/// Diagnoses use of _ExtInt as being deprecated, and diagnoses use of
/// _BitInt as an extension when appropriate.
void DiagnoseBitIntUse(const Token &Tok);
public:
TypeResult
ParseTypeName(SourceRange *Range = nullptr,
DeclaratorContext Context = DeclaratorContext::TypeName,
AccessSpecifier AS = AS_none, Decl **OwnedType = nullptr,
ParsedAttributes *Attrs = nullptr);
private:
void ParseBlockId(SourceLocation CaretLoc);
/// Are [[]] attributes enabled?
bool standardAttributesAllowed() const {
const LangOptions &LO = getLangOpts();
return LO.DoubleSquareBracketAttributes;
}
// Check for the start of an attribute-specifier-seq in a context where an
// attribute is not allowed.
bool CheckProhibitedCXX11Attribute() {
assert(Tok.is(tok::l_square));
if (!standardAttributesAllowed() || NextToken().isNot(tok::l_square))
return false;
return DiagnoseProhibitedCXX11Attribute();
}
bool DiagnoseProhibitedCXX11Attribute();
void CheckMisplacedCXX11Attribute(ParsedAttributesWithRange &Attrs,
SourceLocation CorrectLocation) {
if (!standardAttributesAllowed())
return;
if ((Tok.isNot(tok::l_square) || NextToken().isNot(tok::l_square)) &&
Tok.isNot(tok::kw_alignas))
return;
DiagnoseMisplacedCXX11Attribute(Attrs, CorrectLocation);
}
void DiagnoseMisplacedCXX11Attribute(ParsedAttributesWithRange &Attrs,
SourceLocation CorrectLocation);
void stripTypeAttributesOffDeclSpec(ParsedAttributesWithRange &Attrs,
DeclSpec &DS, Sema::TagUseKind TUK);
// FixItLoc = possible correct location for the attributes
void ProhibitAttributes(ParsedAttributesWithRange &Attrs,
SourceLocation FixItLoc = SourceLocation()) {
if (Attrs.Range.isInvalid())
return;
DiagnoseProhibitedAttributes(Attrs.Range, FixItLoc);
Attrs.clear();
}
void ProhibitAttributes(ParsedAttributesViewWithRange &Attrs,
SourceLocation FixItLoc = SourceLocation()) {
if (Attrs.Range.isInvalid())
return;
DiagnoseProhibitedAttributes(Attrs.Range, FixItLoc);
Attrs.clearListOnly();
}
void DiagnoseProhibitedAttributes(const SourceRange &Range,
SourceLocation FixItLoc);
// Forbid C++11 and C2x attributes that appear on certain syntactic locations
// which standard permits but we don't supported yet, for example, attributes
// appertain to decl specifiers.
void ProhibitCXX11Attributes(ParsedAttributesWithRange &Attrs,
unsigned DiagID,
bool DiagnoseEmptyAttrs = false);
/// Skip C++11 and C2x attributes and return the end location of the
/// last one.
/// \returns SourceLocation() if there are no attributes.
SourceLocation SkipCXX11Attributes();
/// Diagnose and skip C++11 and C2x attributes that appear in syntactic
/// locations where attributes are not allowed.
void DiagnoseAndSkipCXX11Attributes();
/// Emit warnings for C++11 and C2x attributes that are in a position that
/// clang accepts as an extension.
void DiagnoseCXX11AttributeExtension(ParsedAttributesWithRange &Attrs);
/// Parses syntax-generic attribute arguments for attributes which are
/// known to the implementation, and adds them to the given ParsedAttributes
/// list with the given attribute syntax. Returns the number of arguments
/// parsed for the attribute.
unsigned
ParseAttributeArgsCommon(IdentifierInfo *AttrName, SourceLocation AttrNameLoc,
ParsedAttributes &Attrs, SourceLocation *EndLoc,
IdentifierInfo *ScopeName, SourceLocation ScopeLoc,
ParsedAttr::Syntax Syntax);
enum ParseAttrKindMask {
PAKM_GNU = 1 << 0,
PAKM_Declspec = 1 << 1,
PAKM_CXX11 = 1 << 2,
};
/// \brief Parse attributes based on what syntaxes are desired, allowing for
/// the order to vary. e.g. with PAKM_GNU | PAKM_Declspec:
/// __attribute__((...)) __declspec(...) __attribute__((...)))
/// Note that Microsoft attributes (spelled with single square brackets) are
/// not supported by this because of parsing ambiguities with other
/// constructs.
///
/// There are some attribute parse orderings that should not be allowed in
/// arbitrary order. e.g.,
///
/// [[]] __attribute__(()) int i; // OK
/// __attribute__(()) [[]] int i; // Not OK
///
/// Such situations should use the specific attribute parsing functionality.
void ParseAttributes(unsigned WhichAttrKinds,
ParsedAttributesWithRange &Attrs,
SourceLocation *End = nullptr,
LateParsedAttrList *LateAttrs = nullptr);
void ParseAttributes(unsigned WhichAttrKinds, ParsedAttributes &Attrs,
SourceLocation *End = nullptr,
LateParsedAttrList *LateAttrs = nullptr) {
ParsedAttributesWithRange AttrsWithRange(AttrFactory);
ParseAttributes(WhichAttrKinds, AttrsWithRange, End, LateAttrs);
Attrs.takeAllFrom(AttrsWithRange);
}
/// \brief Possibly parse attributes based on what syntaxes are desired,
/// allowing for the order to vary.
bool MaybeParseAttributes(unsigned WhichAttrKinds,
ParsedAttributesWithRange &Attrs,
SourceLocation *End = nullptr,
LateParsedAttrList *LateAttrs = nullptr) {
if (Tok.isOneOf(tok::kw___attribute, tok::kw___declspec) ||
(standardAttributesAllowed() && isCXX11AttributeSpecifier())) {
ParseAttributes(WhichAttrKinds, Attrs, End, LateAttrs);
return true;
}
return false;
}
bool MaybeParseAttributes(unsigned WhichAttrKinds, ParsedAttributes &Attrs,
SourceLocation *End = nullptr,
LateParsedAttrList *LateAttrs = nullptr) {
if (Tok.isOneOf(tok::kw___attribute, tok::kw___declspec) ||
(standardAttributesAllowed() && isCXX11AttributeSpecifier())) {
ParseAttributes(WhichAttrKinds, Attrs, End, LateAttrs);
return true;
}
return false;
}
void MaybeParseGNUAttributes(Declarator &D,
LateParsedAttrList *LateAttrs = nullptr) {
if (Tok.is(tok::kw___attribute)) {
ParsedAttributes attrs(AttrFactory);
SourceLocation endLoc;
ParseGNUAttributes(attrs, &endLoc, LateAttrs, &D);
D.takeAttributes(attrs, endLoc);
}
}
/// Parses GNU-style attributes and returns them without source range
/// information.
///
/// This API is discouraged. Use the version that takes a
/// ParsedAttributesWithRange instead.
bool MaybeParseGNUAttributes(ParsedAttributes &Attrs,
SourceLocation *EndLoc = nullptr,
LateParsedAttrList *LateAttrs = nullptr) {
if (Tok.is(tok::kw___attribute)) {
ParsedAttributesWithRange AttrsWithRange(AttrFactory);
ParseGNUAttributes(Attrs, EndLoc, LateAttrs);
Attrs.takeAllFrom(AttrsWithRange);
return true;
}
return false;
}
bool MaybeParseGNUAttributes(ParsedAttributesWithRange &Attrs,
SourceLocation *EndLoc = nullptr,
LateParsedAttrList *LateAttrs = nullptr) {
if (Tok.is(tok::kw___attribute)) {
ParseGNUAttributes(Attrs, EndLoc, LateAttrs);
return true;
}
return false;
}
/// Parses GNU-style attributes and returns them without source range
/// information.
///
/// This API is discouraged. Use the version that takes a
/// ParsedAttributesWithRange instead.
void ParseGNUAttributes(ParsedAttributes &Attrs,
SourceLocation *EndLoc = nullptr,
LateParsedAttrList *LateAttrs = nullptr,
Declarator *D = nullptr) {
ParsedAttributesWithRange AttrsWithRange(AttrFactory);
ParseGNUAttributes(AttrsWithRange, EndLoc, LateAttrs, D);
Attrs.takeAllFrom(AttrsWithRange);
}
void ParseGNUAttributes(ParsedAttributesWithRange &Attrs,
SourceLocation *EndLoc = nullptr,
LateParsedAttrList *LateAttrs = nullptr,
Declarator *D = nullptr);
void ParseGNUAttributeArgs(IdentifierInfo *AttrName,
SourceLocation AttrNameLoc,
ParsedAttributes &Attrs, SourceLocation *EndLoc,
IdentifierInfo *ScopeName, SourceLocation ScopeLoc,
ParsedAttr::Syntax Syntax, Declarator *D);
IdentifierLoc *ParseIdentifierLoc();
unsigned
ParseClangAttributeArgs(IdentifierInfo *AttrName, SourceLocation AttrNameLoc,
ParsedAttributes &Attrs, SourceLocation *EndLoc,
IdentifierInfo *ScopeName, SourceLocation ScopeLoc,
ParsedAttr::Syntax Syntax);
void ReplayOpenMPAttributeTokens(CachedTokens &OpenMPTokens) {
// If parsing the attributes found an OpenMP directive, emit those tokens
// to the parse stream now.
if (!OpenMPTokens.empty()) {
PP.EnterToken(Tok, /*IsReinject*/ true);
PP.EnterTokenStream(OpenMPTokens, /*DisableMacroExpansion*/ true,
/*IsReinject*/ true);
ConsumeAnyToken(/*ConsumeCodeCompletionTok*/ true);
}
}
void MaybeParseCXX11Attributes(Declarator &D) {
if (standardAttributesAllowed() && isCXX11AttributeSpecifier()) {
ParsedAttributesWithRange attrs(AttrFactory);
SourceLocation endLoc;
ParseCXX11Attributes(attrs, &endLoc);
D.takeAttributes(attrs, endLoc);
}
}
bool MaybeParseCXX11Attributes(ParsedAttributes &attrs,
SourceLocation *endLoc = nullptr) {
if (standardAttributesAllowed() && isCXX11AttributeSpecifier()) {
ParsedAttributesWithRange attrsWithRange(AttrFactory);
ParseCXX11Attributes(attrsWithRange, endLoc);
attrs.takeAllFrom(attrsWithRange);
return true;
}
return false;
}
bool MaybeParseCXX11Attributes(ParsedAttributesWithRange &attrs,
SourceLocation *endLoc = nullptr,
bool OuterMightBeMessageSend = false) {
if (standardAttributesAllowed() &&
isCXX11AttributeSpecifier(false, OuterMightBeMessageSend)) {
ParseCXX11Attributes(attrs, endLoc);
return true;
}
return false;
}
void ParseOpenMPAttributeArgs(IdentifierInfo *AttrName,
CachedTokens &OpenMPTokens);
void ParseCXX11AttributeSpecifierInternal(ParsedAttributes &Attrs,
CachedTokens &OpenMPTokens,
SourceLocation *EndLoc = nullptr);
void ParseCXX11AttributeSpecifier(ParsedAttributes &Attrs,
SourceLocation *EndLoc = nullptr) {
CachedTokens OpenMPTokens;
ParseCXX11AttributeSpecifierInternal(Attrs, OpenMPTokens, EndLoc);
ReplayOpenMPAttributeTokens(OpenMPTokens);
}
void ParseCXX11Attributes(ParsedAttributesWithRange &attrs,
SourceLocation *EndLoc = nullptr);
/// Parses a C++11 (or C2x)-style attribute argument list. Returns true
/// if this results in adding an attribute to the ParsedAttributes list.
bool ParseCXX11AttributeArgs(IdentifierInfo *AttrName,
SourceLocation AttrNameLoc,
ParsedAttributes &Attrs, SourceLocation *EndLoc,
IdentifierInfo *ScopeName,
SourceLocation ScopeLoc,
CachedTokens &OpenMPTokens);
IdentifierInfo *TryParseCXX11AttributeIdentifier(
SourceLocation &Loc,
Sema::AttributeCompletion Completion = Sema::AttributeCompletion::None,
const IdentifierInfo *EnclosingScope = nullptr);
void MaybeParseMicrosoftAttributes(ParsedAttributes &attrs,
SourceLocation *endLoc = nullptr) {
if (getLangOpts().MicrosoftExt && Tok.is(tok::l_square))
ParseMicrosoftAttributes(attrs, endLoc);
}
void ParseMicrosoftUuidAttributeArgs(ParsedAttributes &Attrs);
void ParseMicrosoftAttributes(ParsedAttributes &attrs,
SourceLocation *endLoc = nullptr);
bool MaybeParseMicrosoftDeclSpecs(ParsedAttributes &Attrs,
SourceLocation *End = nullptr) {
const auto &LO = getLangOpts();
if (LO.DeclSpecKeyword && Tok.is(tok::kw___declspec)) {
ParseMicrosoftDeclSpecs(Attrs, End);
return true;
}
return false;
}
void ParseMicrosoftDeclSpecs(ParsedAttributes &Attrs,
SourceLocation *End = nullptr);
bool ParseMicrosoftDeclSpecArgs(IdentifierInfo *AttrName,
SourceLocation AttrNameLoc,
ParsedAttributes &Attrs);
void ParseMicrosoftTypeAttributes(ParsedAttributes &attrs);
void DiagnoseAndSkipExtendedMicrosoftTypeAttributes();
SourceLocation SkipExtendedMicrosoftTypeAttributes();
void ParseMicrosoftInheritanceClassAttributes(ParsedAttributes &attrs);
void ParseBorlandTypeAttributes(ParsedAttributes &attrs);
void ParseOpenCLKernelAttributes(ParsedAttributes &attrs);
void ParseOpenCLQualifiers(ParsedAttributes &Attrs);
void ParseNullabilityTypeSpecifiers(ParsedAttributes &attrs);
VersionTuple ParseVersionTuple(SourceRange &Range);
void ParseAvailabilityAttribute(IdentifierInfo &Availability,
SourceLocation AvailabilityLoc,
ParsedAttributes &attrs,
SourceLocation *endLoc,
IdentifierInfo *ScopeName,
SourceLocation ScopeLoc,
ParsedAttr::Syntax Syntax);
Optional<AvailabilitySpec> ParseAvailabilitySpec();
ExprResult ParseAvailabilityCheckExpr(SourceLocation StartLoc);
void ParseExternalSourceSymbolAttribute(IdentifierInfo &ExternalSourceSymbol,
SourceLocation Loc,
ParsedAttributes &Attrs,
SourceLocation *EndLoc,
IdentifierInfo *ScopeName,
SourceLocation ScopeLoc,
ParsedAttr::Syntax Syntax);
void ParseObjCBridgeRelatedAttribute(IdentifierInfo &ObjCBridgeRelated,
SourceLocation ObjCBridgeRelatedLoc,
ParsedAttributes &attrs,
SourceLocation *endLoc,
IdentifierInfo *ScopeName,
SourceLocation ScopeLoc,
ParsedAttr::Syntax Syntax);
void ParseSwiftNewTypeAttribute(IdentifierInfo &AttrName,
SourceLocation AttrNameLoc,
ParsedAttributes &Attrs,
SourceLocation *EndLoc,
IdentifierInfo *ScopeName,
SourceLocation ScopeLoc,
ParsedAttr::Syntax Syntax);
void ParseTypeTagForDatatypeAttribute(IdentifierInfo &AttrName,
SourceLocation AttrNameLoc,
ParsedAttributes &Attrs,
SourceLocation *EndLoc,
IdentifierInfo *ScopeName,
SourceLocation ScopeLoc,
ParsedAttr::Syntax Syntax);
void
ParseAttributeWithTypeArg(IdentifierInfo &AttrName,
SourceLocation AttrNameLoc, ParsedAttributes &Attrs,
SourceLocation *EndLoc, IdentifierInfo *ScopeName,
SourceLocation ScopeLoc, ParsedAttr::Syntax Syntax);
void ParseTypeofSpecifier(DeclSpec &DS);
SourceLocation ParseDecltypeSpecifier(DeclSpec &DS);
void AnnotateExistingDecltypeSpecifier(const DeclSpec &DS,
SourceLocation StartLoc,
SourceLocation EndLoc);
void ParseUnderlyingTypeSpecifier(DeclSpec &DS);
void ParseAtomicSpecifier(DeclSpec &DS);
ExprResult ParseAlignArgument(SourceLocation Start,
SourceLocation &EllipsisLoc);
void ParseAlignmentSpecifier(ParsedAttributes &Attrs,
SourceLocation *endLoc = nullptr);
ExprResult ParseExtIntegerArgument();
VirtSpecifiers::Specifier isCXX11VirtSpecifier(const Token &Tok) const;
VirtSpecifiers::Specifier isCXX11VirtSpecifier() const {
return isCXX11VirtSpecifier(Tok);
}
void ParseOptionalCXX11VirtSpecifierSeq(VirtSpecifiers &VS, bool IsInterface,
SourceLocation FriendLoc);
bool isCXX11FinalKeyword() const;
bool isClassCompatibleKeyword() const;
/// DeclaratorScopeObj - RAII object used in Parser::ParseDirectDeclarator to
/// enter a new C++ declarator scope and exit it when the function is
/// finished.
class DeclaratorScopeObj {
Parser &P;
CXXScopeSpec &SS;
bool EnteredScope;
bool CreatedScope;
public:
DeclaratorScopeObj(Parser &p, CXXScopeSpec &ss)
: P(p), SS(ss), EnteredScope(false), CreatedScope(false) {}
void EnterDeclaratorScope() {
assert(!EnteredScope && "Already entered the scope!");
assert(SS.isSet() && "C++ scope was not set!");
CreatedScope = true;
P.EnterScope(0); // Not a decl scope.
if (!P.Actions.ActOnCXXEnterDeclaratorScope(P.getCurScope(), SS))
EnteredScope = true;
}
~DeclaratorScopeObj() {
if (EnteredScope) {
assert(SS.isSet() && "C++ scope was cleared ?");
P.Actions.ActOnCXXExitDeclaratorScope(P.getCurScope(), SS);
}
if (CreatedScope)
P.ExitScope();
}
};
/// ParseDeclarator - Parse and verify a newly-initialized declarator.
void ParseDeclarator(Declarator &D);
/// A function that parses a variant of direct-declarator.
typedef void (Parser::*DirectDeclParseFunction)(Declarator&);
void ParseDeclaratorInternal(Declarator &D,
DirectDeclParseFunction DirectDeclParser);
enum AttrRequirements {
AR_NoAttributesParsed = 0, ///< No attributes are diagnosed.
AR_GNUAttributesParsedAndRejected = 1 << 0, ///< Diagnose GNU attributes.
AR_GNUAttributesParsed = 1 << 1,
AR_CXX11AttributesParsed = 1 << 2,
AR_DeclspecAttributesParsed = 1 << 3,
AR_AllAttributesParsed = AR_GNUAttributesParsed |
AR_CXX11AttributesParsed |
AR_DeclspecAttributesParsed,
AR_VendorAttributesParsed = AR_GNUAttributesParsed |
AR_DeclspecAttributesParsed
};
void ParseTypeQualifierListOpt(
DeclSpec &DS, unsigned AttrReqs = AR_AllAttributesParsed,
bool AtomicAllowed = true, bool IdentifierRequired = false,
Optional<llvm::function_ref<void()>> CodeCompletionHandler = None);
void ParseDirectDeclarator(Declarator &D);
void ParseDecompositionDeclarator(Declarator &D);
void ParseParenDeclarator(Declarator &D);
void ParseFunctionDeclarator(Declarator &D,
ParsedAttributes &attrs,
BalancedDelimiterTracker &Tracker,
bool IsAmbiguous,
bool RequiresArg = false);
void InitCXXThisScopeForDeclaratorIfRelevant(
const Declarator &D, const DeclSpec &DS,
llvm::Optional<Sema::CXXThisScopeRAII> &ThisScope);
bool ParseRefQualifier(bool &RefQualifierIsLValueRef,
SourceLocation &RefQualifierLoc);
bool isFunctionDeclaratorIdentifierList();
void ParseFunctionDeclaratorIdentifierList(
Declarator &D,
SmallVectorImpl<DeclaratorChunk::ParamInfo> &ParamInfo);
void ParseParameterDeclarationClause(
DeclaratorContext DeclaratorContext,
ParsedAttributes &attrs,
SmallVectorImpl<DeclaratorChunk::ParamInfo> &ParamInfo,
SourceLocation &EllipsisLoc);
void ParseBracketDeclarator(Declarator &D);
void ParseMisplacedBracketDeclarator(Declarator &D);
//===--------------------------------------------------------------------===//
// C++ 7: Declarations [dcl.dcl]
/// The kind of attribute specifier we have found.
enum CXX11AttributeKind {
/// This is not an attribute specifier.
CAK_NotAttributeSpecifier,
/// This should be treated as an attribute-specifier.
CAK_AttributeSpecifier,
/// The next tokens are '[[', but this is not an attribute-specifier. This
/// is ill-formed by C++11 [dcl.attr.grammar]p6.
CAK_InvalidAttributeSpecifier
};
CXX11AttributeKind
isCXX11AttributeSpecifier(bool Disambiguate = false,
bool OuterMightBeMessageSend = false);
void DiagnoseUnexpectedNamespace(NamedDecl *Context);
DeclGroupPtrTy ParseNamespace(DeclaratorContext Context,
SourceLocation &DeclEnd,
SourceLocation InlineLoc = SourceLocation());
struct InnerNamespaceInfo {
SourceLocation NamespaceLoc;
SourceLocation InlineLoc;
SourceLocation IdentLoc;
IdentifierInfo *Ident;
};
using InnerNamespaceInfoList = llvm::SmallVector<InnerNamespaceInfo, 4>;
void ParseInnerNamespace(const InnerNamespaceInfoList &InnerNSs,
unsigned int index, SourceLocation &InlineLoc,
ParsedAttributes &attrs,
BalancedDelimiterTracker &Tracker);
Decl *ParseLinkage(ParsingDeclSpec &DS, DeclaratorContext Context);
Decl *ParseExportDeclaration();
DeclGroupPtrTy ParseUsingDirectiveOrDeclaration(
DeclaratorContext Context, const ParsedTemplateInfo &TemplateInfo,
SourceLocation &DeclEnd, ParsedAttributesWithRange &attrs);
Decl *ParseUsingDirective(DeclaratorContext Context,
SourceLocation UsingLoc,
SourceLocation &DeclEnd,
ParsedAttributes &attrs);
struct UsingDeclarator {
SourceLocation TypenameLoc;
CXXScopeSpec SS;
UnqualifiedId Name;
SourceLocation EllipsisLoc;
void clear() {
TypenameLoc = EllipsisLoc = SourceLocation();
SS.clear();
Name.clear();
}
};
bool ParseUsingDeclarator(DeclaratorContext Context, UsingDeclarator &D);
DeclGroupPtrTy ParseUsingDeclaration(DeclaratorContext Context,
const ParsedTemplateInfo &TemplateInfo,
SourceLocation UsingLoc,
SourceLocation &DeclEnd,
ParsedAttributesWithRange &Attrs,
AccessSpecifier AS = AS_none);
Decl *ParseAliasDeclarationAfterDeclarator(
const ParsedTemplateInfo &TemplateInfo, SourceLocation UsingLoc,
UsingDeclarator &D, SourceLocation &DeclEnd, AccessSpecifier AS,
ParsedAttributes &Attrs, Decl **OwnedType = nullptr);
Decl *ParseStaticAssertDeclaration(SourceLocation &DeclEnd);
Decl *ParseNamespaceAlias(SourceLocation NamespaceLoc,
SourceLocation AliasLoc, IdentifierInfo *Alias,
SourceLocation &DeclEnd);
//===--------------------------------------------------------------------===//
// C++ 9: classes [class] and C structs/unions.
bool isValidAfterTypeSpecifier(bool CouldBeBitfield);
void ParseClassSpecifier(tok::TokenKind TagTokKind, SourceLocation TagLoc,
DeclSpec &DS, const ParsedTemplateInfo &TemplateInfo,
AccessSpecifier AS, bool EnteringContext,
DeclSpecContext DSC,
ParsedAttributesWithRange &Attributes);
void SkipCXXMemberSpecification(SourceLocation StartLoc,
SourceLocation AttrFixitLoc,
unsigned TagType,
Decl *TagDecl);
void ParseCXXMemberSpecification(SourceLocation StartLoc,
SourceLocation AttrFixitLoc,
ParsedAttributesWithRange &Attrs,
unsigned TagType,
Decl *TagDecl);
ExprResult ParseCXXMemberInitializer(Decl *D, bool IsFunction,
SourceLocation &EqualLoc);
bool
ParseCXXMemberDeclaratorBeforeInitializer(Declarator &DeclaratorInfo,
VirtSpecifiers &VS,
ExprResult &BitfieldSize,
LateParsedAttrList &LateAttrs);
void MaybeParseAndDiagnoseDeclSpecAfterCXX11VirtSpecifierSeq(Declarator &D,
VirtSpecifiers &VS);
DeclGroupPtrTy ParseCXXClassMemberDeclaration(
AccessSpecifier AS, ParsedAttributes &Attr,
const ParsedTemplateInfo &TemplateInfo = ParsedTemplateInfo(),
ParsingDeclRAIIObject *DiagsFromTParams = nullptr);
DeclGroupPtrTy ParseCXXClassMemberDeclarationWithPragmas(
AccessSpecifier &AS, ParsedAttributesWithRange &AccessAttrs,
DeclSpec::TST TagType, Decl *Tag);
void ParseConstructorInitializer(Decl *ConstructorDecl);
MemInitResult ParseMemInitializer(Decl *ConstructorDecl);
void HandleMemberFunctionDeclDelays(Declarator& DeclaratorInfo,
Decl *ThisDecl);
//===--------------------------------------------------------------------===//
// C++ 10: Derived classes [class.derived]
TypeResult ParseBaseTypeSpecifier(SourceLocation &BaseLoc,
SourceLocation &EndLocation);
void ParseBaseClause(Decl *ClassDecl);
BaseResult ParseBaseSpecifier(Decl *ClassDecl);
AccessSpecifier getAccessSpecifierIfPresent() const;
bool ParseUnqualifiedIdTemplateId(CXXScopeSpec &SS,
ParsedType ObjectType,
bool ObjectHadErrors,
SourceLocation TemplateKWLoc,
IdentifierInfo *Name,
SourceLocation NameLoc,
bool EnteringContext,
UnqualifiedId &Id,
bool AssumeTemplateId);
bool ParseUnqualifiedIdOperator(CXXScopeSpec &SS, bool EnteringContext,
ParsedType ObjectType,
UnqualifiedId &Result);
//===--------------------------------------------------------------------===//
// OpenMP: Directives and clauses.
/// Parse clauses for '#pragma omp declare simd'.
DeclGroupPtrTy ParseOMPDeclareSimdClauses(DeclGroupPtrTy Ptr,
CachedTokens &Toks,
SourceLocation Loc);
/// Parse a property kind into \p TIProperty for the selector set \p Set and
/// selector \p Selector.
void parseOMPTraitPropertyKind(OMPTraitProperty &TIProperty,
llvm::omp::TraitSet Set,
llvm::omp::TraitSelector Selector,
llvm::StringMap<SourceLocation> &Seen);
/// Parse a selector kind into \p TISelector for the selector set \p Set.
void parseOMPTraitSelectorKind(OMPTraitSelector &TISelector,
llvm::omp::TraitSet Set,
llvm::StringMap<SourceLocation> &Seen);
/// Parse a selector set kind into \p TISet.
void parseOMPTraitSetKind(OMPTraitSet &TISet,
llvm::StringMap<SourceLocation> &Seen);
/// Parses an OpenMP context property.
void parseOMPContextProperty(OMPTraitSelector &TISelector,
llvm::omp::TraitSet Set,
llvm::StringMap<SourceLocation> &Seen);
/// Parses an OpenMP context selector.
void parseOMPContextSelector(OMPTraitSelector &TISelector,
llvm::omp::TraitSet Set,
llvm::StringMap<SourceLocation> &SeenSelectors);
/// Parses an OpenMP context selector set.
void parseOMPContextSelectorSet(OMPTraitSet &TISet,
llvm::StringMap<SourceLocation> &SeenSets);
/// Parses OpenMP context selectors.
bool parseOMPContextSelectors(SourceLocation Loc, OMPTraitInfo &TI);
/// Parse an 'append_args' clause for '#pragma omp declare variant'.
bool parseOpenMPAppendArgs(
SmallVectorImpl<OMPDeclareVariantAttr::InteropType> &InterOpTypes);
/// Parse a `match` clause for an '#pragma omp declare variant'. Return true
/// if there was an error.
bool parseOMPDeclareVariantMatchClause(SourceLocation Loc, OMPTraitInfo &TI,
OMPTraitInfo *ParentTI);
/// Parse clauses for '#pragma omp declare variant'.
void ParseOMPDeclareVariantClauses(DeclGroupPtrTy Ptr, CachedTokens &Toks,
SourceLocation Loc);
/// Parse 'omp [begin] assume[s]' directive.
void ParseOpenMPAssumesDirective(OpenMPDirectiveKind DKind,
SourceLocation Loc);
/// Parse 'omp end assumes' directive.
void ParseOpenMPEndAssumesDirective(SourceLocation Loc);
/// Parse clauses for '#pragma omp [begin] declare target'.
void ParseOMPDeclareTargetClauses(Sema::DeclareTargetContextInfo &DTCI);
/// Parse '#pragma omp end declare target'.
void ParseOMPEndDeclareTargetDirective(OpenMPDirectiveKind BeginDKind,
OpenMPDirectiveKind EndDKind,
SourceLocation Loc);
/// Skip tokens until a `annot_pragma_openmp_end` was found. Emit a warning if
/// it is not the current token.
void skipUntilPragmaOpenMPEnd(OpenMPDirectiveKind DKind);
/// Check the \p FoundKind against the \p ExpectedKind, if not issue an error
/// that the "end" matching the "begin" directive of kind \p BeginKind was not
/// found. Finally, if the expected kind was found or if \p SkipUntilOpenMPEnd
/// is set, skip ahead using the helper `skipUntilPragmaOpenMPEnd`.
void parseOMPEndDirective(OpenMPDirectiveKind BeginKind,
OpenMPDirectiveKind ExpectedKind,
OpenMPDirectiveKind FoundKind,
SourceLocation MatchingLoc,
SourceLocation FoundLoc,
bool SkipUntilOpenMPEnd);
/// Parses declarative OpenMP directives.
DeclGroupPtrTy ParseOpenMPDeclarativeDirectiveWithExtDecl(
AccessSpecifier &AS, ParsedAttributesWithRange &Attrs,
bool Delayed = false, DeclSpec::TST TagType = DeclSpec::TST_unspecified,
Decl *TagDecl = nullptr);
/// Parse 'omp declare reduction' construct.
DeclGroupPtrTy ParseOpenMPDeclareReductionDirective(AccessSpecifier AS);
/// Parses initializer for provided omp_priv declaration inside the reduction
/// initializer.
void ParseOpenMPReductionInitializerForDecl(VarDecl *OmpPrivParm);
/// Parses 'omp declare mapper' directive.
DeclGroupPtrTy ParseOpenMPDeclareMapperDirective(AccessSpecifier AS);
/// Parses variable declaration in 'omp declare mapper' directive.
TypeResult parseOpenMPDeclareMapperVarDecl(SourceRange &Range,
DeclarationName &Name,
AccessSpecifier AS = AS_none);
/// Tries to parse cast part of OpenMP array shaping operation:
/// '[' expression ']' { '[' expression ']' } ')'.
bool tryParseOpenMPArrayShapingCastPart();
/// Parses simple list of variables.
///
/// \param Kind Kind of the directive.
/// \param Callback Callback function to be called for the list elements.
/// \param AllowScopeSpecifier true, if the variables can have fully
/// qualified names.
///
bool ParseOpenMPSimpleVarList(
OpenMPDirectiveKind Kind,
const llvm::function_ref<void(CXXScopeSpec &, DeclarationNameInfo)> &
Callback,
bool AllowScopeSpecifier);
/// Parses declarative or executable directive.
///
/// \param StmtCtx The context in which we're parsing the directive.
StmtResult
ParseOpenMPDeclarativeOrExecutableDirective(ParsedStmtContext StmtCtx);
/// Parses clause of kind \a CKind for directive of a kind \a Kind.
///
/// \param DKind Kind of current directive.
/// \param CKind Kind of current clause.
/// \param FirstClause true, if this is the first clause of a kind \a CKind
/// in current directive.
///
OMPClause *ParseOpenMPClause(OpenMPDirectiveKind DKind,
OpenMPClauseKind CKind, bool FirstClause);
/// Parses clause with a single expression of a kind \a Kind.
///
/// \param Kind Kind of current clause.
/// \param ParseOnly true to skip the clause's semantic actions and return
/// nullptr.
///
OMPClause *ParseOpenMPSingleExprClause(OpenMPClauseKind Kind,
bool ParseOnly);
/// Parses simple clause of a kind \a Kind.
///
/// \param Kind Kind of current clause.
/// \param ParseOnly true to skip the clause's semantic actions and return
/// nullptr.
///
OMPClause *ParseOpenMPSimpleClause(OpenMPClauseKind Kind, bool ParseOnly);
/// Parses clause with a single expression and an additional argument
/// of a kind \a Kind.
///
/// \param DKind Directive kind.
/// \param Kind Kind of current clause.
/// \param ParseOnly true to skip the clause's semantic actions and return
/// nullptr.
///
OMPClause *ParseOpenMPSingleExprWithArgClause(OpenMPDirectiveKind DKind,
OpenMPClauseKind Kind,
bool ParseOnly);
/// Parses the 'sizes' clause of a '#pragma omp tile' directive.
OMPClause *ParseOpenMPSizesClause();
/// Parses clause without any additional arguments.
///
/// \param Kind Kind of current clause.
/// \param ParseOnly true to skip the clause's semantic actions and return
/// nullptr.
///
OMPClause *ParseOpenMPClause(OpenMPClauseKind Kind, bool ParseOnly = false);
/// Parses clause with the list of variables of a kind \a Kind.
///
/// \param Kind Kind of current clause.
/// \param ParseOnly true to skip the clause's semantic actions and return
/// nullptr.
///
OMPClause *ParseOpenMPVarListClause(OpenMPDirectiveKind DKind,
OpenMPClauseKind Kind, bool ParseOnly);
/// Parses and creates OpenMP 5.0 iterators expression:
/// <iterators> = 'iterator' '(' { [ <iterator-type> ] identifier =
/// <range-specification> }+ ')'
ExprResult ParseOpenMPIteratorsExpr();
/// Parses allocators and traits in the context of the uses_allocator clause.
/// Expected format:
/// '(' { <allocator> [ '(' <allocator_traits> ')' ] }+ ')'
OMPClause *ParseOpenMPUsesAllocatorClause(OpenMPDirectiveKind DKind);
/// Parses clause with an interop variable of kind \a Kind.
///
/// \param Kind Kind of current clause.
/// \param ParseOnly true to skip the clause's semantic actions and return
/// nullptr.
//
OMPClause *ParseOpenMPInteropClause(OpenMPClauseKind Kind, bool ParseOnly);
public:
/// Parses simple expression in parens for single-expression clauses of OpenMP
/// constructs.
/// \param RLoc Returned location of right paren.
ExprResult ParseOpenMPParensExpr(StringRef ClauseName, SourceLocation &RLoc,
bool IsAddressOfOperand = false);
/// Data used for parsing list of variables in OpenMP clauses.
struct OpenMPVarListDataTy {
Expr *DepModOrTailExpr = nullptr;
SourceLocation ColonLoc;
SourceLocation RLoc;
CXXScopeSpec ReductionOrMapperIdScopeSpec;
DeclarationNameInfo ReductionOrMapperId;
int ExtraModifier = -1; ///< Additional modifier for linear, map, depend or
///< lastprivate clause.
SmallVector<OpenMPMapModifierKind, NumberOfOMPMapClauseModifiers>
MapTypeModifiers;
SmallVector<SourceLocation, NumberOfOMPMapClauseModifiers>
MapTypeModifiersLoc;
SmallVector<OpenMPMotionModifierKind, NumberOfOMPMotionModifiers>
MotionModifiers;
SmallVector<SourceLocation, NumberOfOMPMotionModifiers> MotionModifiersLoc;
bool IsMapTypeImplicit = false;
SourceLocation ExtraModifierLoc;
};
/// Parses clauses with list.
bool ParseOpenMPVarList(OpenMPDirectiveKind DKind, OpenMPClauseKind Kind,
SmallVectorImpl<Expr *> &Vars,
OpenMPVarListDataTy &Data);
bool ParseUnqualifiedId(CXXScopeSpec &SS, ParsedType ObjectType,
bool ObjectHadErrors, bool EnteringContext,
bool AllowDestructorName, bool AllowConstructorName,
bool AllowDeductionGuide,
SourceLocation *TemplateKWLoc, UnqualifiedId &Result);
/// Parses the mapper modifier in map, to, and from clauses.
bool parseMapperModifier(OpenMPVarListDataTy &Data);
/// Parses map-type-modifiers in map clause.
/// map([ [map-type-modifier[,] [map-type-modifier[,] ...] map-type : ] list)
/// where, map-type-modifier ::= always | close | mapper(mapper-identifier)
bool parseMapTypeModifiers(OpenMPVarListDataTy &Data);
private:
//===--------------------------------------------------------------------===//
// C++ 14: Templates [temp]
// C++ 14.1: Template Parameters [temp.param]
Decl *ParseDeclarationStartingWithTemplate(DeclaratorContext Context,
SourceLocation &DeclEnd,
ParsedAttributes &AccessAttrs,
AccessSpecifier AS = AS_none);
Decl *ParseTemplateDeclarationOrSpecialization(DeclaratorContext Context,
SourceLocation &DeclEnd,
ParsedAttributes &AccessAttrs,
AccessSpecifier AS);
Decl *ParseSingleDeclarationAfterTemplate(
DeclaratorContext Context, const ParsedTemplateInfo &TemplateInfo,
ParsingDeclRAIIObject &DiagsFromParams, SourceLocation &DeclEnd,
ParsedAttributes &AccessAttrs, AccessSpecifier AS = AS_none);
bool ParseTemplateParameters(MultiParseScope &TemplateScopes, unsigned Depth,
SmallVectorImpl<NamedDecl *> &TemplateParams,
SourceLocation &LAngleLoc,
SourceLocation &RAngleLoc);
bool ParseTemplateParameterList(unsigned Depth,
SmallVectorImpl<NamedDecl*> &TemplateParams);
TPResult isStartOfTemplateTypeParameter();
NamedDecl *ParseTemplateParameter(unsigned Depth, unsigned Position);
NamedDecl *ParseTypeParameter(unsigned Depth, unsigned Position);
NamedDecl *ParseTemplateTemplateParameter(unsigned Depth, unsigned Position);
NamedDecl *ParseNonTypeTemplateParameter(unsigned Depth, unsigned Position);
bool isTypeConstraintAnnotation();
bool TryAnnotateTypeConstraint();
void DiagnoseMisplacedEllipsis(SourceLocation EllipsisLoc,
SourceLocation CorrectLoc,
bool AlreadyHasEllipsis,
bool IdentifierHasName);
void DiagnoseMisplacedEllipsisInDeclarator(SourceLocation EllipsisLoc,
Declarator &D);
// C++ 14.3: Template arguments [temp.arg]
typedef SmallVector<ParsedTemplateArgument, 16> TemplateArgList;
bool ParseGreaterThanInTemplateList(SourceLocation LAngleLoc,
SourceLocation &RAngleLoc,
bool ConsumeLastToken,
bool ObjCGenericList);
bool ParseTemplateIdAfterTemplateName(bool ConsumeLastToken,
SourceLocation &LAngleLoc,
TemplateArgList &TemplateArgs,
SourceLocation &RAngleLoc);
bool AnnotateTemplateIdToken(TemplateTy Template, TemplateNameKind TNK,
CXXScopeSpec &SS,
SourceLocation TemplateKWLoc,
UnqualifiedId &TemplateName,
bool AllowTypeAnnotation = true,
bool TypeConstraint = false);
void AnnotateTemplateIdTokenAsType(CXXScopeSpec &SS,
bool IsClassName = false);
bool ParseTemplateArgumentList(TemplateArgList &TemplateArgs);
ParsedTemplateArgument ParseTemplateTemplateArgument();
ParsedTemplateArgument ParseTemplateArgument();
Decl *ParseExplicitInstantiation(DeclaratorContext Context,
SourceLocation ExternLoc,
SourceLocation TemplateLoc,
SourceLocation &DeclEnd,
ParsedAttributes &AccessAttrs,
AccessSpecifier AS = AS_none);
// C++2a: Template, concept definition [temp]
Decl *
ParseConceptDefinition(const ParsedTemplateInfo &TemplateInfo,
SourceLocation &DeclEnd);
//===--------------------------------------------------------------------===//
// Modules
DeclGroupPtrTy ParseModuleDecl(bool IsFirstDecl);
Decl *ParseModuleImport(SourceLocation AtLoc);
bool parseMisplacedModuleImport();
bool tryParseMisplacedModuleImport() {
tok::TokenKind Kind = Tok.getKind();
if (Kind == tok::annot_module_begin || Kind == tok::annot_module_end ||
Kind == tok::annot_module_include)
return parseMisplacedModuleImport();
return false;
}
bool ParseModuleName(
SourceLocation UseLoc,
SmallVectorImpl<std::pair<IdentifierInfo *, SourceLocation>> &Path,
bool IsImport);
//===--------------------------------------------------------------------===//
// C++11/G++: Type Traits [Type-Traits.html in the GCC manual]
ExprResult ParseTypeTrait();
//===--------------------------------------------------------------------===//
// Embarcadero: Arary and Expression Traits
ExprResult ParseArrayTypeTrait();
ExprResult ParseExpressionTrait();
/// SYCL Type Traits
// __builtin_num_fields, __builtin_num_bases
ExprResult ParseSYCLBuiltinNum();
// __builtin_field_type, __builtin_base_type
ExprResult ParseSYCLBuiltinType();
//===--------------------------------------------------------------------===//
// Preprocessor code-completion pass-through
void CodeCompleteDirective(bool InConditional) override;
void CodeCompleteInConditionalExclusion() override;
void CodeCompleteMacroName(bool IsDefinition) override;
void CodeCompletePreprocessorExpression() override;
void CodeCompleteMacroArgument(IdentifierInfo *Macro, MacroInfo *MacroInfo,
unsigned ArgumentIndex) override;
void CodeCompleteIncludedFile(llvm::StringRef Dir, bool IsAngled) override;
void CodeCompleteNaturalLanguage() override;
class GNUAsmQualifiers {
unsigned Qualifiers = AQ_unspecified;
public:
enum AQ {
AQ_unspecified = 0,
AQ_volatile = 1,
AQ_inline = 2,
AQ_goto = 4,
};
static const char *getQualifierName(AQ Qualifier);
bool setAsmQualifier(AQ Qualifier);
inline bool isVolatile() const { return Qualifiers & AQ_volatile; };
inline bool isInline() const { return Qualifiers & AQ_inline; };
inline bool isGoto() const { return Qualifiers & AQ_goto; }
};
bool isGCCAsmStatement(const Token &TokAfterAsm) const;
bool isGNUAsmQualifier(const Token &TokAfterAsm) const;
GNUAsmQualifiers::AQ getGNUAsmQualifier(const Token &Tok) const;
bool parseGNUAsmQualifierListOpt(GNUAsmQualifiers &AQ);
};
} // end namespace clang
#endif
|
exercise2.c | #include <stdio.h>
#include <stdlib.h>
#include <math.h>
#include <omp.h>
#define I(x,y) (x)*(Lx-(x))*(y)*(Ly-(y))
#define V(x,y) (x)*(Lx-(x))*(y)*(Ly-(y))
#define S(x,y,t) 0
#define f(x,y,t) 2*c*c*(1+(t)/2)*((y)*(Ly-(y))+(x)*(Lx-(x)))
#define ua(x,y,t) (x)*(Lx-(x))*(y)*(Ly-(y))*(1+(t)/2)
// if S=const. (dS/dt=0) set 0, else set 1
#define St 0
#define DxDx(u,i,j) (u[i-1][j]-2*u[i][j]+u[i+1][j])
#define DyDy(u,i,j) (u[i][j-1]-2*u[i][j]+u[i][j+1])
void output_n(FILE* file, size_t Nx, size_t Ny,
double t, double* x, double* y, double** u);
void output_a(FILE* file, size_t Nx, size_t Ny, double Lx, double Ly,
double t, double* x, double* y);
int main(int argc, char **argv)
{
//conf
const double Lx=10, Ly=10, T=20, c=1;
const size_t Nx=40, Ny=40;
const double dt=0.1, tp[]= {0,5,10,20};
FILE *file_n = fopen("wave2d_numeric.dat","w");
FILE *file_a = fopen("wave2d_analytic.dat","w");
//vars
const double dx=Lx/(Nx-1),
dy=Ly/(Ny-1),
cx2=(c*dt/dx)*(c*dt/dx),
cy2=(c*dt/dy)*(c*dt/dy),
dt2=dt*dt;
double t=0, t0, t1;
double *x, *y, **un, **u, **uo;
size_t i, j, p=0;
if(argc==2) {
int nt;
sscanf(argv[1],"%d",&nt);
omp_set_num_threads(nt);
}
x = (double*)malloc(Nx*sizeof(double));
y = (double*)malloc(Ny*sizeof(double));
un = (double**)malloc(Nx*sizeof(double*)); //u^(n+1)
u = (double**)malloc(Nx*sizeof(double*)); //u^(n)
uo = (double**)malloc(Nx*sizeof(double*)); //u^(n-1)
for(i=0; i<Nx; ++i) {
un[i] = (double*)malloc(Ny*sizeof(double));
u[i] = (double*)malloc(Ny*sizeof(double));
uo[i] = (double*)malloc(Ny*sizeof(double));
}
printf("Working...\n");
t0 = omp_get_wtime();
#pragma omp parallel default(none) \
private(i,j) \
shared(p,t,x,y,un,u,uo,file_n,file_a)
{
#pragma omp for nowait
for(i=0; i<Nx; ++i)
x[i] = i*dx;
#pragma omp for
for(j=0; j<Ny; ++j)
y[j] = j*dy;
#pragma omp for
for(i=0; i<Nx; ++i) {
u[i][0] = S(x[i],y[0],t);
u[i][Ny-1] = S(x[i],y[Ny-1],t);
}
#pragma omp for
for(j=0; j<Ny; ++j) {
u[0][j] = S(x[0],y[j],t);
u[Nx-1][j] = S(x[Nx-1],y[j],t);
}
#pragma omp for collapse(2)
for(i=1; i<Nx-1; ++i)
for(j=1; j<Ny-1; ++j)
u[i][j] = I(x[i],y[j]);
#pragma omp single
{
if(fabs(t-tp[p])<dt/2) {
output_n(file_n,Nx,Ny,t,x,y,u);
output_a(file_a,Nx,Ny,Lx,Ly,t,x,y);
++p;
}
}
#pragma omp for collapse(2)
for(i=1; i<Nx-1; ++i)
for(j=1; j<Ny-1; ++j)
un[i][j] = u[i][j]
+dt*V(x[i],y[j])
+0.5*cx2*DxDx(u,i,j)
+0.5*cy2*DyDy(u,i,j)
+0.5*dt2*f(x[i],y[j],t);
#pragma omp single
t += dt;
#pragma omp for
for(i=0; i<Nx; ++i) {
un[i][0] = S(x[i],y[0],t);
un[i][Ny-1] = S(x[i],y[Ny-1],t);
}
#pragma omp for
for(j=0; j<Ny; ++j) {
un[0][j] = S(x[0],y[j],t);
un[Nx-1][j] = S(x[Nx-1],y[j],t);
}
while(t<T) {
#pragma omp for collapse(2)
for(i=0; i<Nx; ++i)
for(j=0; j<Ny; ++j) {
uo[i][j] = u[i][j];
u[i][j] = un[i][j];
}
#pragma omp for collapse(2)
for(i=1; i<Nx-1; ++i)
for(j=1; j<Ny-1; ++j)
un[i][j] = 2*u[i][j]
-uo[i][j]
+cx2*DxDx(u,i,j)
+cy2*DyDy(u,i,j)
+dt2*f(x[i],y[j],t);
#pragma omp single
t += dt;
#if St
#pragma omp for
for(i=0; i<Nx; ++i) {
un[i][0] = S(x[i],y[0],t);
un[i][Ny-1] = S(x[i],y[Ny-1],t);
}
#pragma omp for
for(j=0; j<Ny; ++j) {
un[0][j] = S(x[0],y[j],t);
un[Nx-1][j] = S(x[Nx-1],y[j],t);
}
#endif
#pragma omp single
{
if(fabs(t-tp[p])<dt/2) {
output_n(file_n,Nx,Ny,t,x,y,un);
output_a(file_a,Nx,Ny,Lx,Ly,t,x,y);
++p;
}
}
}
}
t1 = omp_get_wtime();
printf("Time taken: %f.\n",t1-t0);
printf("Center value: %f.\n",u[Nx/2][Ny/2]);
fclose(file_n);
fclose(file_a);
free(x);
free(y);
for(i=0; i<Nx; ++i) {
free(un[i]);
free(u[i]);
free(uo[i]);
}
free(un);
free(u);
free(uo);
return 0;
}
void output_n(FILE* file, size_t Nx, size_t Ny,
double t, double* x, double* y, double** u)
{
for(size_t i=0; i<Nx; ++i)
for(size_t j=0; j<Ny; ++j)
fprintf(file,"%f\t%f\t%f\t%f\n",t,x[i],y[j],u[i][j]);
}
void output_a(FILE* file, size_t Nx, size_t Ny, double Lx, double Ly,
double t, double* x, double* y)
{
for(size_t i=0; i<Nx; ++i)
for(size_t j=0; j<Ny; ++j)
fprintf(file,"%f\t%f\t%f\t%f\n",t,x[i],y[j],ua(x[i],y[j],t));
}
|
convolution_5x5_pack4_bf16s.h | // Tencent is pleased to support the open source community by making ncnn available.
//
// Copyright (C) 2020 THL A29 Limited, a Tencent company. All rights reserved.
//
// Licensed under the BSD 3-Clause License (the "License"); you may not use this file except
// in compliance with the License. You may obtain a copy of the License at
//
// https://opensource.org/licenses/BSD-3-Clause
//
// Unless required by applicable law or agreed to in writing, software distributed
// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
// CONDITIONS OF ANY KIND, either express or implied. See the License for the
// specific language governing permissions and limitations under the License.
static void conv5x5s1_pack4_bf16s_neon(const Mat& bottom_blob, Mat& top_blob, const Mat& kernel, const Mat& _bias, const Option& opt)
{
int inch = bottom_blob.c;
int outw = top_blob.w;
int outh = top_blob.h;
int outch = top_blob.c;
Mat top_blob_fp32(outw, outh, opt.num_threads, (size_t)4u * 4, 4, opt.workspace_allocator);
const float* bias = _bias;
#pragma omp parallel for num_threads(opt.num_threads)
for (int p = 0; p < outch; p++)
{
Mat out0 = top_blob_fp32.channel(get_omp_thread_num());
float32x4_t _bias0 = bias ? vld1q_f32((const float*)bias + p * 4) : vdupq_n_f32(0.f);
out0.fill(_bias0);
int q = 0;
for (; q < inch - 1; q++)
{
float* outptr0 = out0.row(0);
const Mat img0 = bottom_blob.channel(q);
const unsigned short* r0 = img0.row<const unsigned short>(0);
const unsigned short* r1 = img0.row<const unsigned short>(1);
const unsigned short* r2 = img0.row<const unsigned short>(2);
const unsigned short* r3 = img0.row<const unsigned short>(3);
const unsigned short* r4 = img0.row<const unsigned short>(4);
const unsigned short* kptr = kernel.channel(p).row<const unsigned short>(q);
int i = 0;
for (; i < outh; i++)
{
int j = 0;
for (; j + 3 < outw; j += 4)
{
#if __aarch64__
asm volatile(
"prfm pldl1keep, [%0, #512] \n"
"ld1 {v20.4s, v21.4s, v22.4s, v23.4s}, [%0] \n" // sum0 sum1 sum2 sum3
"prfm pldl1keep, [%1, #256] \n"
"ld1 {v0.4h, v1.4h, v2.4h, v3.4h}, [%1], #32 \n" // r00 r01 r02 r03
"shll v0.4s, v0.4h, #16 \n"
"shll v1.4s, v1.4h, #16 \n"
"shll v2.4s, v2.4h, #16 \n"
"shll v3.4s, v3.4h, #16 \n"
"prfm pldl1keep, [%6, #256] \n"
"ld1 {v16.4h, v17.4h, v18.4h, v19.4h}, [%6], #32 \n"
"shll v16.4s, v16.4h, #16 \n"
"shll v17.4s, v17.4h, #16 \n"
"shll v18.4s, v18.4h, #16 \n"
"shll v19.4s, v19.4h, #16 \n"
"fmla v20.4s, v16.4s, v0.s[0] \n"
"fmla v21.4s, v16.4s, v1.s[0] \n"
"fmla v22.4s, v16.4s, v2.s[0] \n"
"fmla v23.4s, v16.4s, v3.s[0] \n"
"fmla v20.4s, v17.4s, v0.s[1] \n"
"fmla v21.4s, v17.4s, v1.s[1] \n"
"fmla v22.4s, v17.4s, v2.s[1] \n"
"fmla v23.4s, v17.4s, v3.s[1] \n"
"prfm pldl1keep, [%6, #256] \n"
"ld1 {v24.4h, v25.4h, v26.4h, v27.4h}, [%6], #32 \n"
"shll v24.4s, v24.4h, #16 \n"
"shll v25.4s, v25.4h, #16 \n"
"shll v26.4s, v26.4h, #16 \n"
"shll v27.4s, v27.4h, #16 \n"
"fmla v20.4s, v18.4s, v0.s[2] \n"
"fmla v21.4s, v18.4s, v1.s[2] \n"
"fmla v22.4s, v18.4s, v2.s[2] \n"
"fmla v23.4s, v18.4s, v3.s[2] \n"
"fmla v20.4s, v19.4s, v0.s[3] \n"
"fmla v21.4s, v19.4s, v1.s[3] \n"
"fmla v22.4s, v19.4s, v2.s[3] \n"
"fmla v23.4s, v19.4s, v3.s[3] \n"
"prfm pldl1keep, [%1, #256] \n"
"ld1 {v4.4h, v5.4h, v6.4h, v7.4h}, [%1] \n" // r04 r05 r06 r07
"shll v4.4s, v4.4h, #16 \n"
"shll v5.4s, v5.4h, #16 \n"
"shll v6.4s, v6.4h, #16 \n"
"shll v7.4s, v7.4h, #16 \n"
"fmla v20.4s, v24.4s, v1.s[0] \n"
"fmla v21.4s, v24.4s, v2.s[0] \n"
"fmla v22.4s, v24.4s, v3.s[0] \n"
"fmla v23.4s, v24.4s, v4.s[0] \n"
"fmla v20.4s, v25.4s, v1.s[1] \n"
"fmla v21.4s, v25.4s, v2.s[1] \n"
"fmla v22.4s, v25.4s, v3.s[1] \n"
"fmla v23.4s, v25.4s, v4.s[1] \n"
"prfm pldl1keep, [%6, #256] \n"
"ld1 {v16.4h, v17.4h, v18.4h, v19.4h}, [%6], #32 \n"
"shll v16.4s, v16.4h, #16 \n"
"shll v17.4s, v17.4h, #16 \n"
"shll v18.4s, v18.4h, #16 \n"
"shll v19.4s, v19.4h, #16 \n"
"fmla v20.4s, v26.4s, v1.s[2] \n"
"fmla v21.4s, v26.4s, v2.s[2] \n"
"fmla v22.4s, v26.4s, v3.s[2] \n"
"fmla v23.4s, v26.4s, v4.s[2] \n"
"fmla v20.4s, v27.4s, v1.s[3] \n"
"fmla v21.4s, v27.4s, v2.s[3] \n"
"fmla v22.4s, v27.4s, v3.s[3] \n"
"fmla v23.4s, v27.4s, v4.s[3] \n"
"fmla v20.4s, v16.4s, v2.s[0] \n"
"fmla v21.4s, v16.4s, v3.s[0] \n"
"fmla v22.4s, v16.4s, v4.s[0] \n"
"fmla v23.4s, v16.4s, v5.s[0] \n"
"fmla v20.4s, v17.4s, v2.s[1] \n"
"fmla v21.4s, v17.4s, v3.s[1] \n"
"fmla v22.4s, v17.4s, v4.s[1] \n"
"fmla v23.4s, v17.4s, v5.s[1] \n"
"prfm pldl1keep, [%6, #256] \n"
"ld1 {v24.4h, v25.4h, v26.4h, v27.4h}, [%6], #32 \n"
"shll v24.4s, v24.4h, #16 \n"
"shll v25.4s, v25.4h, #16 \n"
"shll v26.4s, v26.4h, #16 \n"
"shll v27.4s, v27.4h, #16 \n"
"fmla v20.4s, v18.4s, v2.s[2] \n"
"fmla v21.4s, v18.4s, v3.s[2] \n"
"fmla v22.4s, v18.4s, v4.s[2] \n"
"fmla v23.4s, v18.4s, v5.s[2] \n"
"fmla v20.4s, v19.4s, v2.s[3] \n"
"fmla v21.4s, v19.4s, v3.s[3] \n"
"fmla v22.4s, v19.4s, v4.s[3] \n"
"fmla v23.4s, v19.4s, v5.s[3] \n"
"fmla v20.4s, v24.4s, v3.s[0] \n"
"fmla v21.4s, v24.4s, v4.s[0] \n"
"fmla v22.4s, v24.4s, v5.s[0] \n"
"fmla v23.4s, v24.4s, v6.s[0] \n"
"fmla v20.4s, v25.4s, v3.s[1] \n"
"fmla v21.4s, v25.4s, v4.s[1] \n"
"fmla v22.4s, v25.4s, v5.s[1] \n"
"fmla v23.4s, v25.4s, v6.s[1] \n"
"prfm pldl1keep, [%6, #256] \n"
"ld1 {v16.4h, v17.4h, v18.4h, v19.4h}, [%6], #32 \n"
"shll v16.4s, v16.4h, #16 \n"
"shll v17.4s, v17.4h, #16 \n"
"shll v18.4s, v18.4h, #16 \n"
"shll v19.4s, v19.4h, #16 \n"
"fmla v20.4s, v26.4s, v3.s[2] \n"
"fmla v21.4s, v26.4s, v4.s[2] \n"
"fmla v22.4s, v26.4s, v5.s[2] \n"
"fmla v23.4s, v26.4s, v6.s[2] \n"
"fmla v20.4s, v27.4s, v3.s[3] \n"
"fmla v21.4s, v27.4s, v4.s[3] \n"
"fmla v22.4s, v27.4s, v5.s[3] \n"
"fmla v23.4s, v27.4s, v6.s[3] \n"
"prfm pldl1keep, [%2, #256] \n"
"ld1 {v0.4h, v1.4h, v2.4h, v3.4h}, [%2], #32 \n" // r10 r11 r12 r13
"shll v0.4s, v0.4h, #16 \n"
"shll v1.4s, v1.4h, #16 \n"
"shll v2.4s, v2.4h, #16 \n"
"shll v3.4s, v3.4h, #16 \n"
"fmla v20.4s, v16.4s, v4.s[0] \n"
"fmla v21.4s, v16.4s, v5.s[0] \n"
"fmla v22.4s, v16.4s, v6.s[0] \n"
"fmla v23.4s, v16.4s, v7.s[0] \n"
"fmla v20.4s, v17.4s, v4.s[1] \n"
"fmla v21.4s, v17.4s, v5.s[1] \n"
"fmla v22.4s, v17.4s, v6.s[1] \n"
"fmla v23.4s, v17.4s, v7.s[1] \n"
"prfm pldl1keep, [%6, #256] \n"
"ld1 {v24.4h, v25.4h, v26.4h, v27.4h}, [%6], #32 \n"
"shll v24.4s, v24.4h, #16 \n"
"shll v25.4s, v25.4h, #16 \n"
"shll v26.4s, v26.4h, #16 \n"
"shll v27.4s, v27.4h, #16 \n"
"fmla v20.4s, v18.4s, v4.s[2] \n"
"fmla v21.4s, v18.4s, v5.s[2] \n"
"fmla v22.4s, v18.4s, v6.s[2] \n"
"fmla v23.4s, v18.4s, v7.s[2] \n"
"fmla v20.4s, v19.4s, v4.s[3] \n"
"fmla v21.4s, v19.4s, v5.s[3] \n"
"fmla v22.4s, v19.4s, v6.s[3] \n"
"fmla v23.4s, v19.4s, v7.s[3] \n"
"fmla v20.4s, v24.4s, v0.s[0] \n"
"fmla v21.4s, v24.4s, v1.s[0] \n"
"fmla v22.4s, v24.4s, v2.s[0] \n"
"fmla v23.4s, v24.4s, v3.s[0] \n"
"fmla v20.4s, v25.4s, v0.s[1] \n"
"fmla v21.4s, v25.4s, v1.s[1] \n"
"fmla v22.4s, v25.4s, v2.s[1] \n"
"fmla v23.4s, v25.4s, v3.s[1] \n"
"prfm pldl1keep, [%6, #256] \n"
"ld1 {v16.4h, v17.4h, v18.4h, v19.4h}, [%6], #32 \n"
"shll v16.4s, v16.4h, #16 \n"
"shll v17.4s, v17.4h, #16 \n"
"shll v18.4s, v18.4h, #16 \n"
"shll v19.4s, v19.4h, #16 \n"
"fmla v20.4s, v26.4s, v0.s[2] \n"
"fmla v21.4s, v26.4s, v1.s[2] \n"
"fmla v22.4s, v26.4s, v2.s[2] \n"
"fmla v23.4s, v26.4s, v3.s[2] \n"
"fmla v20.4s, v27.4s, v0.s[3] \n"
"fmla v21.4s, v27.4s, v1.s[3] \n"
"fmla v22.4s, v27.4s, v2.s[3] \n"
"fmla v23.4s, v27.4s, v3.s[3] \n"
"prfm pldl1keep, [%2, #256] \n"
"ld1 {v4.4h, v5.4h, v6.4h, v7.4h}, [%2] \n" // r14 r15 r16 r17
"shll v4.4s, v4.4h, #16 \n"
"shll v5.4s, v5.4h, #16 \n"
"shll v6.4s, v6.4h, #16 \n"
"shll v7.4s, v7.4h, #16 \n"
"fmla v20.4s, v16.4s, v1.s[0] \n"
"fmla v21.4s, v16.4s, v2.s[0] \n"
"fmla v22.4s, v16.4s, v3.s[0] \n"
"fmla v23.4s, v16.4s, v4.s[0] \n"
"fmla v20.4s, v17.4s, v1.s[1] \n"
"fmla v21.4s, v17.4s, v2.s[1] \n"
"fmla v22.4s, v17.4s, v3.s[1] \n"
"fmla v23.4s, v17.4s, v4.s[1] \n"
"prfm pldl1keep, [%6, #256] \n"
"ld1 {v24.4h, v25.4h, v26.4h, v27.4h}, [%6], #32 \n"
"shll v24.4s, v24.4h, #16 \n"
"shll v25.4s, v25.4h, #16 \n"
"shll v26.4s, v26.4h, #16 \n"
"shll v27.4s, v27.4h, #16 \n"
"fmla v20.4s, v18.4s, v1.s[2] \n"
"fmla v21.4s, v18.4s, v2.s[2] \n"
"fmla v22.4s, v18.4s, v3.s[2] \n"
"fmla v23.4s, v18.4s, v4.s[2] \n"
"fmla v20.4s, v19.4s, v1.s[3] \n"
"fmla v21.4s, v19.4s, v2.s[3] \n"
"fmla v22.4s, v19.4s, v3.s[3] \n"
"fmla v23.4s, v19.4s, v4.s[3] \n"
"fmla v20.4s, v24.4s, v2.s[0] \n"
"fmla v21.4s, v24.4s, v3.s[0] \n"
"fmla v22.4s, v24.4s, v4.s[0] \n"
"fmla v23.4s, v24.4s, v5.s[0] \n"
"fmla v20.4s, v25.4s, v2.s[1] \n"
"fmla v21.4s, v25.4s, v3.s[1] \n"
"fmla v22.4s, v25.4s, v4.s[1] \n"
"fmla v23.4s, v25.4s, v5.s[1] \n"
"prfm pldl1keep, [%6, #256] \n"
"ld1 {v16.4h, v17.4h, v18.4h, v19.4h}, [%6], #32 \n"
"shll v16.4s, v16.4h, #16 \n"
"shll v17.4s, v17.4h, #16 \n"
"shll v18.4s, v18.4h, #16 \n"
"shll v19.4s, v19.4h, #16 \n"
"fmla v20.4s, v26.4s, v2.s[2] \n"
"fmla v21.4s, v26.4s, v3.s[2] \n"
"fmla v22.4s, v26.4s, v4.s[2] \n"
"fmla v23.4s, v26.4s, v5.s[2] \n"
"fmla v20.4s, v27.4s, v2.s[3] \n"
"fmla v21.4s, v27.4s, v3.s[3] \n"
"fmla v22.4s, v27.4s, v4.s[3] \n"
"fmla v23.4s, v27.4s, v5.s[3] \n"
"fmla v20.4s, v16.4s, v3.s[0] \n"
"fmla v21.4s, v16.4s, v4.s[0] \n"
"fmla v22.4s, v16.4s, v5.s[0] \n"
"fmla v23.4s, v16.4s, v6.s[0] \n"
"fmla v20.4s, v17.4s, v3.s[1] \n"
"fmla v21.4s, v17.4s, v4.s[1] \n"
"fmla v22.4s, v17.4s, v5.s[1] \n"
"fmla v23.4s, v17.4s, v6.s[1] \n"
"prfm pldl1keep, [%6, #256] \n"
"ld1 {v24.4h, v25.4h, v26.4h, v27.4h}, [%6], #32 \n"
"shll v24.4s, v24.4h, #16 \n"
"shll v25.4s, v25.4h, #16 \n"
"shll v26.4s, v26.4h, #16 \n"
"shll v27.4s, v27.4h, #16 \n"
"fmla v20.4s, v18.4s, v3.s[2] \n"
"fmla v21.4s, v18.4s, v4.s[2] \n"
"fmla v22.4s, v18.4s, v5.s[2] \n"
"fmla v23.4s, v18.4s, v6.s[2] \n"
"fmla v20.4s, v19.4s, v3.s[3] \n"
"fmla v21.4s, v19.4s, v4.s[3] \n"
"fmla v22.4s, v19.4s, v5.s[3] \n"
"fmla v23.4s, v19.4s, v6.s[3] \n"
"prfm pldl1keep, [%3, #256] \n"
"ld1 {v0.4h, v1.4h, v2.4h, v3.4h}, [%3], #32 \n" // r20 r21 r22 r23
"shll v0.4s, v0.4h, #16 \n"
"shll v1.4s, v1.4h, #16 \n"
"shll v2.4s, v2.4h, #16 \n"
"shll v3.4s, v3.4h, #16 \n"
"fmla v20.4s, v24.4s, v4.s[0] \n"
"fmla v21.4s, v24.4s, v5.s[0] \n"
"fmla v22.4s, v24.4s, v6.s[0] \n"
"fmla v23.4s, v24.4s, v7.s[0] \n"
"fmla v20.4s, v25.4s, v4.s[1] \n"
"fmla v21.4s, v25.4s, v5.s[1] \n"
"fmla v22.4s, v25.4s, v6.s[1] \n"
"fmla v23.4s, v25.4s, v7.s[1] \n"
"prfm pldl1keep, [%6, #256] \n"
"ld1 {v16.4h, v17.4h, v18.4h, v19.4h}, [%6], #32 \n"
"shll v16.4s, v16.4h, #16 \n"
"shll v17.4s, v17.4h, #16 \n"
"shll v18.4s, v18.4h, #16 \n"
"shll v19.4s, v19.4h, #16 \n"
"fmla v20.4s, v26.4s, v4.s[2] \n"
"fmla v21.4s, v26.4s, v5.s[2] \n"
"fmla v22.4s, v26.4s, v6.s[2] \n"
"fmla v23.4s, v26.4s, v7.s[2] \n"
"fmla v20.4s, v27.4s, v4.s[3] \n"
"fmla v21.4s, v27.4s, v5.s[3] \n"
"fmla v22.4s, v27.4s, v6.s[3] \n"
"fmla v23.4s, v27.4s, v7.s[3] \n"
"fmla v20.4s, v16.4s, v0.s[0] \n"
"fmla v21.4s, v16.4s, v1.s[0] \n"
"fmla v22.4s, v16.4s, v2.s[0] \n"
"fmla v23.4s, v16.4s, v3.s[0] \n"
"fmla v20.4s, v17.4s, v0.s[1] \n"
"fmla v21.4s, v17.4s, v1.s[1] \n"
"fmla v22.4s, v17.4s, v2.s[1] \n"
"fmla v23.4s, v17.4s, v3.s[1] \n"
"prfm pldl1keep, [%6, #256] \n"
"ld1 {v24.4h, v25.4h, v26.4h, v27.4h}, [%6], #32 \n"
"shll v24.4s, v24.4h, #16 \n"
"shll v25.4s, v25.4h, #16 \n"
"shll v26.4s, v26.4h, #16 \n"
"shll v27.4s, v27.4h, #16 \n"
"fmla v20.4s, v18.4s, v0.s[2] \n"
"fmla v21.4s, v18.4s, v1.s[2] \n"
"fmla v22.4s, v18.4s, v2.s[2] \n"
"fmla v23.4s, v18.4s, v3.s[2] \n"
"fmla v20.4s, v19.4s, v0.s[3] \n"
"fmla v21.4s, v19.4s, v1.s[3] \n"
"fmla v22.4s, v19.4s, v2.s[3] \n"
"fmla v23.4s, v19.4s, v3.s[3] \n"
"prfm pldl1keep, [%3, #256] \n"
"ld1 {v4.4h, v5.4h, v6.4h, v7.4h}, [%3] \n" // r24 r25 r26 r27
"shll v4.4s, v4.4h, #16 \n"
"shll v5.4s, v5.4h, #16 \n"
"shll v6.4s, v6.4h, #16 \n"
"shll v7.4s, v7.4h, #16 \n"
"fmla v20.4s, v24.4s, v1.s[0] \n"
"fmla v21.4s, v24.4s, v2.s[0] \n"
"fmla v22.4s, v24.4s, v3.s[0] \n"
"fmla v23.4s, v24.4s, v4.s[0] \n"
"fmla v20.4s, v25.4s, v1.s[1] \n"
"fmla v21.4s, v25.4s, v2.s[1] \n"
"fmla v22.4s, v25.4s, v3.s[1] \n"
"fmla v23.4s, v25.4s, v4.s[1] \n"
"prfm pldl1keep, [%6, #256] \n"
"ld1 {v16.4h, v17.4h, v18.4h, v19.4h}, [%6], #32 \n"
"shll v16.4s, v16.4h, #16 \n"
"shll v17.4s, v17.4h, #16 \n"
"shll v18.4s, v18.4h, #16 \n"
"shll v19.4s, v19.4h, #16 \n"
"fmla v20.4s, v26.4s, v1.s[2] \n"
"fmla v21.4s, v26.4s, v2.s[2] \n"
"fmla v22.4s, v26.4s, v3.s[2] \n"
"fmla v23.4s, v26.4s, v4.s[2] \n"
"fmla v20.4s, v27.4s, v1.s[3] \n"
"fmla v21.4s, v27.4s, v2.s[3] \n"
"fmla v22.4s, v27.4s, v3.s[3] \n"
"fmla v23.4s, v27.4s, v4.s[3] \n"
"fmla v20.4s, v16.4s, v2.s[0] \n"
"fmla v21.4s, v16.4s, v3.s[0] \n"
"fmla v22.4s, v16.4s, v4.s[0] \n"
"fmla v23.4s, v16.4s, v5.s[0] \n"
"fmla v20.4s, v17.4s, v2.s[1] \n"
"fmla v21.4s, v17.4s, v3.s[1] \n"
"fmla v22.4s, v17.4s, v4.s[1] \n"
"fmla v23.4s, v17.4s, v5.s[1] \n"
"prfm pldl1keep, [%6, #256] \n"
"ld1 {v24.4h, v25.4h, v26.4h, v27.4h}, [%6], #32 \n"
"shll v24.4s, v24.4h, #16 \n"
"shll v25.4s, v25.4h, #16 \n"
"shll v26.4s, v26.4h, #16 \n"
"shll v27.4s, v27.4h, #16 \n"
"fmla v20.4s, v18.4s, v2.s[2] \n"
"fmla v21.4s, v18.4s, v3.s[2] \n"
"fmla v22.4s, v18.4s, v4.s[2] \n"
"fmla v23.4s, v18.4s, v5.s[2] \n"
"fmla v20.4s, v19.4s, v2.s[3] \n"
"fmla v21.4s, v19.4s, v3.s[3] \n"
"fmla v22.4s, v19.4s, v4.s[3] \n"
"fmla v23.4s, v19.4s, v5.s[3] \n"
"fmla v20.4s, v24.4s, v3.s[0] \n"
"fmla v21.4s, v24.4s, v4.s[0] \n"
"fmla v22.4s, v24.4s, v5.s[0] \n"
"fmla v23.4s, v24.4s, v6.s[0] \n"
"fmla v20.4s, v25.4s, v3.s[1] \n"
"fmla v21.4s, v25.4s, v4.s[1] \n"
"fmla v22.4s, v25.4s, v5.s[1] \n"
"fmla v23.4s, v25.4s, v6.s[1] \n"
"prfm pldl1keep, [%6, #256] \n"
"ld1 {v16.4h, v17.4h, v18.4h, v19.4h}, [%6], #32 \n"
"shll v16.4s, v16.4h, #16 \n"
"shll v17.4s, v17.4h, #16 \n"
"shll v18.4s, v18.4h, #16 \n"
"shll v19.4s, v19.4h, #16 \n"
"fmla v20.4s, v26.4s, v3.s[2] \n"
"fmla v21.4s, v26.4s, v4.s[2] \n"
"fmla v22.4s, v26.4s, v5.s[2] \n"
"fmla v23.4s, v26.4s, v6.s[2] \n"
"fmla v20.4s, v27.4s, v3.s[3] \n"
"fmla v21.4s, v27.4s, v4.s[3] \n"
"fmla v22.4s, v27.4s, v5.s[3] \n"
"fmla v23.4s, v27.4s, v6.s[3] \n"
"prfm pldl1keep, [%4, #256] \n"
"ld1 {v0.4h, v1.4h, v2.4h, v3.4h}, [%4], #32 \n" // r30 r31 r32 r33
"shll v0.4s, v0.4h, #16 \n"
"shll v1.4s, v1.4h, #16 \n"
"shll v2.4s, v2.4h, #16 \n"
"shll v3.4s, v3.4h, #16 \n"
"fmla v20.4s, v16.4s, v4.s[0] \n"
"fmla v21.4s, v16.4s, v5.s[0] \n"
"fmla v22.4s, v16.4s, v6.s[0] \n"
"fmla v23.4s, v16.4s, v7.s[0] \n"
"fmla v20.4s, v17.4s, v4.s[1] \n"
"fmla v21.4s, v17.4s, v5.s[1] \n"
"fmla v22.4s, v17.4s, v6.s[1] \n"
"fmla v23.4s, v17.4s, v7.s[1] \n"
"prfm pldl1keep, [%6, #256] \n"
"ld1 {v24.4h, v25.4h, v26.4h, v27.4h}, [%6], #32 \n"
"shll v24.4s, v24.4h, #16 \n"
"shll v25.4s, v25.4h, #16 \n"
"shll v26.4s, v26.4h, #16 \n"
"shll v27.4s, v27.4h, #16 \n"
"fmla v20.4s, v18.4s, v4.s[2] \n"
"fmla v21.4s, v18.4s, v5.s[2] \n"
"fmla v22.4s, v18.4s, v6.s[2] \n"
"fmla v23.4s, v18.4s, v7.s[2] \n"
"fmla v20.4s, v19.4s, v4.s[3] \n"
"fmla v21.4s, v19.4s, v5.s[3] \n"
"fmla v22.4s, v19.4s, v6.s[3] \n"
"fmla v23.4s, v19.4s, v7.s[3] \n"
"fmla v20.4s, v24.4s, v0.s[0] \n"
"fmla v21.4s, v24.4s, v1.s[0] \n"
"fmla v22.4s, v24.4s, v2.s[0] \n"
"fmla v23.4s, v24.4s, v3.s[0] \n"
"fmla v20.4s, v25.4s, v0.s[1] \n"
"fmla v21.4s, v25.4s, v1.s[1] \n"
"fmla v22.4s, v25.4s, v2.s[1] \n"
"fmla v23.4s, v25.4s, v3.s[1] \n"
"prfm pldl1keep, [%6, #256] \n"
"ld1 {v16.4h, v17.4h, v18.4h, v19.4h}, [%6], #32 \n"
"shll v16.4s, v16.4h, #16 \n"
"shll v17.4s, v17.4h, #16 \n"
"shll v18.4s, v18.4h, #16 \n"
"shll v19.4s, v19.4h, #16 \n"
"fmla v20.4s, v26.4s, v0.s[2] \n"
"fmla v21.4s, v26.4s, v1.s[2] \n"
"fmla v22.4s, v26.4s, v2.s[2] \n"
"fmla v23.4s, v26.4s, v3.s[2] \n"
"fmla v20.4s, v27.4s, v0.s[3] \n"
"fmla v21.4s, v27.4s, v1.s[3] \n"
"fmla v22.4s, v27.4s, v2.s[3] \n"
"fmla v23.4s, v27.4s, v3.s[3] \n"
"prfm pldl1keep, [%4, #256] \n"
"ld1 {v4.4h, v5.4h, v6.4h, v7.4h}, [%4] \n" // r34 r35 r36 r37
"shll v4.4s, v4.4h, #16 \n"
"shll v5.4s, v5.4h, #16 \n"
"shll v6.4s, v6.4h, #16 \n"
"shll v7.4s, v7.4h, #16 \n"
"fmla v20.4s, v16.4s, v1.s[0] \n"
"fmla v21.4s, v16.4s, v2.s[0] \n"
"fmla v22.4s, v16.4s, v3.s[0] \n"
"fmla v23.4s, v16.4s, v4.s[0] \n"
"fmla v20.4s, v17.4s, v1.s[1] \n"
"fmla v21.4s, v17.4s, v2.s[1] \n"
"fmla v22.4s, v17.4s, v3.s[1] \n"
"fmla v23.4s, v17.4s, v4.s[1] \n"
"prfm pldl1keep, [%6, #256] \n"
"ld1 {v24.4h, v25.4h, v26.4h, v27.4h}, [%6], #32 \n"
"shll v24.4s, v24.4h, #16 \n"
"shll v25.4s, v25.4h, #16 \n"
"shll v26.4s, v26.4h, #16 \n"
"shll v27.4s, v27.4h, #16 \n"
"fmla v20.4s, v18.4s, v1.s[2] \n"
"fmla v21.4s, v18.4s, v2.s[2] \n"
"fmla v22.4s, v18.4s, v3.s[2] \n"
"fmla v23.4s, v18.4s, v4.s[2] \n"
"fmla v20.4s, v19.4s, v1.s[3] \n"
"fmla v21.4s, v19.4s, v2.s[3] \n"
"fmla v22.4s, v19.4s, v3.s[3] \n"
"fmla v23.4s, v19.4s, v4.s[3] \n"
"fmla v20.4s, v24.4s, v2.s[0] \n"
"fmla v21.4s, v24.4s, v3.s[0] \n"
"fmla v22.4s, v24.4s, v4.s[0] \n"
"fmla v23.4s, v24.4s, v5.s[0] \n"
"fmla v20.4s, v25.4s, v2.s[1] \n"
"fmla v21.4s, v25.4s, v3.s[1] \n"
"fmla v22.4s, v25.4s, v4.s[1] \n"
"fmla v23.4s, v25.4s, v5.s[1] \n"
"prfm pldl1keep, [%6, #256] \n"
"ld1 {v16.4h, v17.4h, v18.4h, v19.4h}, [%6], #32 \n"
"shll v16.4s, v16.4h, #16 \n"
"shll v17.4s, v17.4h, #16 \n"
"shll v18.4s, v18.4h, #16 \n"
"shll v19.4s, v19.4h, #16 \n"
"fmla v20.4s, v26.4s, v2.s[2] \n"
"fmla v21.4s, v26.4s, v3.s[2] \n"
"fmla v22.4s, v26.4s, v4.s[2] \n"
"fmla v23.4s, v26.4s, v5.s[2] \n"
"fmla v20.4s, v27.4s, v2.s[3] \n"
"fmla v21.4s, v27.4s, v3.s[3] \n"
"fmla v22.4s, v27.4s, v4.s[3] \n"
"fmla v23.4s, v27.4s, v5.s[3] \n"
"fmla v20.4s, v16.4s, v3.s[0] \n"
"fmla v21.4s, v16.4s, v4.s[0] \n"
"fmla v22.4s, v16.4s, v5.s[0] \n"
"fmla v23.4s, v16.4s, v6.s[0] \n"
"fmla v20.4s, v17.4s, v3.s[1] \n"
"fmla v21.4s, v17.4s, v4.s[1] \n"
"fmla v22.4s, v17.4s, v5.s[1] \n"
"fmla v23.4s, v17.4s, v6.s[1] \n"
"prfm pldl1keep, [%6, #256] \n"
"ld1 {v24.4h, v25.4h, v26.4h, v27.4h}, [%6], #32 \n"
"shll v24.4s, v24.4h, #16 \n"
"shll v25.4s, v25.4h, #16 \n"
"shll v26.4s, v26.4h, #16 \n"
"shll v27.4s, v27.4h, #16 \n"
"fmla v20.4s, v18.4s, v3.s[2] \n"
"fmla v21.4s, v18.4s, v4.s[2] \n"
"fmla v22.4s, v18.4s, v5.s[2] \n"
"fmla v23.4s, v18.4s, v6.s[2] \n"
"fmla v20.4s, v19.4s, v3.s[3] \n"
"fmla v21.4s, v19.4s, v4.s[3] \n"
"fmla v22.4s, v19.4s, v5.s[3] \n"
"fmla v23.4s, v19.4s, v6.s[3] \n"
"prfm pldl1keep, [%5, #256] \n"
"ld1 {v0.4h, v1.4h, v2.4h, v3.4h}, [%5], #32 \n" // r40 r41 r42 r43
"shll v0.4s, v0.4h, #16 \n"
"shll v1.4s, v1.4h, #16 \n"
"shll v2.4s, v2.4h, #16 \n"
"shll v3.4s, v3.4h, #16 \n"
"fmla v20.4s, v24.4s, v4.s[0] \n"
"fmla v21.4s, v24.4s, v5.s[0] \n"
"fmla v22.4s, v24.4s, v6.s[0] \n"
"fmla v23.4s, v24.4s, v7.s[0] \n"
"fmla v20.4s, v25.4s, v4.s[1] \n"
"fmla v21.4s, v25.4s, v5.s[1] \n"
"fmla v22.4s, v25.4s, v6.s[1] \n"
"fmla v23.4s, v25.4s, v7.s[1] \n"
"prfm pldl1keep, [%6, #256] \n"
"ld1 {v16.4h, v17.4h, v18.4h, v19.4h}, [%6], #32 \n"
"shll v16.4s, v16.4h, #16 \n"
"shll v17.4s, v17.4h, #16 \n"
"shll v18.4s, v18.4h, #16 \n"
"shll v19.4s, v19.4h, #16 \n"
"fmla v20.4s, v26.4s, v4.s[2] \n"
"fmla v21.4s, v26.4s, v5.s[2] \n"
"fmla v22.4s, v26.4s, v6.s[2] \n"
"fmla v23.4s, v26.4s, v7.s[2] \n"
"fmla v20.4s, v27.4s, v4.s[3] \n"
"fmla v21.4s, v27.4s, v5.s[3] \n"
"fmla v22.4s, v27.4s, v6.s[3] \n"
"fmla v23.4s, v27.4s, v7.s[3] \n"
"fmla v20.4s, v16.4s, v0.s[0] \n"
"fmla v21.4s, v16.4s, v1.s[0] \n"
"fmla v22.4s, v16.4s, v2.s[0] \n"
"fmla v23.4s, v16.4s, v3.s[0] \n"
"fmla v20.4s, v17.4s, v0.s[1] \n"
"fmla v21.4s, v17.4s, v1.s[1] \n"
"fmla v22.4s, v17.4s, v2.s[1] \n"
"fmla v23.4s, v17.4s, v3.s[1] \n"
"prfm pldl1keep, [%6, #256] \n"
"ld1 {v24.4h, v25.4h, v26.4h, v27.4h}, [%6], #32 \n"
"shll v24.4s, v24.4h, #16 \n"
"shll v25.4s, v25.4h, #16 \n"
"shll v26.4s, v26.4h, #16 \n"
"shll v27.4s, v27.4h, #16 \n"
"fmla v20.4s, v18.4s, v0.s[2] \n"
"fmla v21.4s, v18.4s, v1.s[2] \n"
"fmla v22.4s, v18.4s, v2.s[2] \n"
"fmla v23.4s, v18.4s, v3.s[2] \n"
"fmla v20.4s, v19.4s, v0.s[3] \n"
"fmla v21.4s, v19.4s, v1.s[3] \n"
"fmla v22.4s, v19.4s, v2.s[3] \n"
"fmla v23.4s, v19.4s, v3.s[3] \n"
"prfm pldl1keep, [%5, #256] \n"
"ld1 {v4.4h, v5.4h, v6.4h, v7.4h}, [%5] \n" // r44 r45 r46 r47
"shll v4.4s, v4.4h, #16 \n"
"shll v5.4s, v5.4h, #16 \n"
"shll v6.4s, v6.4h, #16 \n"
"shll v7.4s, v7.4h, #16 \n"
"fmla v20.4s, v24.4s, v1.s[0] \n"
"fmla v21.4s, v24.4s, v2.s[0] \n"
"fmla v22.4s, v24.4s, v3.s[0] \n"
"fmla v23.4s, v24.4s, v4.s[0] \n"
"fmla v20.4s, v25.4s, v1.s[1] \n"
"fmla v21.4s, v25.4s, v2.s[1] \n"
"fmla v22.4s, v25.4s, v3.s[1] \n"
"fmla v23.4s, v25.4s, v4.s[1] \n"
"prfm pldl1keep, [%6, #256] \n"
"ld1 {v16.4h, v17.4h, v18.4h, v19.4h}, [%6], #32 \n"
"shll v16.4s, v16.4h, #16 \n"
"shll v17.4s, v17.4h, #16 \n"
"shll v18.4s, v18.4h, #16 \n"
"shll v19.4s, v19.4h, #16 \n"
"fmla v20.4s, v26.4s, v1.s[2] \n"
"fmla v21.4s, v26.4s, v2.s[2] \n"
"fmla v22.4s, v26.4s, v3.s[2] \n"
"fmla v23.4s, v26.4s, v4.s[2] \n"
"fmla v20.4s, v27.4s, v1.s[3] \n"
"fmla v21.4s, v27.4s, v2.s[3] \n"
"fmla v22.4s, v27.4s, v3.s[3] \n"
"fmla v23.4s, v27.4s, v4.s[3] \n"
"fmla v20.4s, v16.4s, v2.s[0] \n"
"fmla v21.4s, v16.4s, v3.s[0] \n"
"fmla v22.4s, v16.4s, v4.s[0] \n"
"fmla v23.4s, v16.4s, v5.s[0] \n"
"fmla v20.4s, v17.4s, v2.s[1] \n"
"fmla v21.4s, v17.4s, v3.s[1] \n"
"fmla v22.4s, v17.4s, v4.s[1] \n"
"fmla v23.4s, v17.4s, v5.s[1] \n"
"prfm pldl1keep, [%6, #256] \n"
"ld1 {v24.4h, v25.4h, v26.4h, v27.4h}, [%6], #32 \n"
"shll v24.4s, v24.4h, #16 \n"
"shll v25.4s, v25.4h, #16 \n"
"shll v26.4s, v26.4h, #16 \n"
"shll v27.4s, v27.4h, #16 \n"
"fmla v20.4s, v18.4s, v2.s[2] \n"
"fmla v21.4s, v18.4s, v3.s[2] \n"
"fmla v22.4s, v18.4s, v4.s[2] \n"
"fmla v23.4s, v18.4s, v5.s[2] \n"
"fmla v20.4s, v19.4s, v2.s[3] \n"
"fmla v21.4s, v19.4s, v3.s[3] \n"
"fmla v22.4s, v19.4s, v4.s[3] \n"
"fmla v23.4s, v19.4s, v5.s[3] \n"
"fmla v20.4s, v24.4s, v3.s[0] \n"
"fmla v21.4s, v24.4s, v4.s[0] \n"
"fmla v22.4s, v24.4s, v5.s[0] \n"
"fmla v23.4s, v24.4s, v6.s[0] \n"
"fmla v20.4s, v25.4s, v3.s[1] \n"
"fmla v21.4s, v25.4s, v4.s[1] \n"
"fmla v22.4s, v25.4s, v5.s[1] \n"
"fmla v23.4s, v25.4s, v6.s[1] \n"
// "prfm pldl1keep, [%6, #256] \n"
"ld1 {v16.4h, v17.4h, v18.4h, v19.4h}, [%6] \n"
"shll v16.4s, v16.4h, #16 \n"
"shll v17.4s, v17.4h, #16 \n"
"shll v18.4s, v18.4h, #16 \n"
"shll v19.4s, v19.4h, #16 \n"
"fmla v20.4s, v26.4s, v3.s[2] \n"
"fmla v21.4s, v26.4s, v4.s[2] \n"
"fmla v22.4s, v26.4s, v5.s[2] \n"
"fmla v23.4s, v26.4s, v6.s[2] \n"
"fmla v20.4s, v27.4s, v3.s[3] \n"
"fmla v21.4s, v27.4s, v4.s[3] \n"
"fmla v22.4s, v27.4s, v5.s[3] \n"
"fmla v23.4s, v27.4s, v6.s[3] \n"
"fmla v20.4s, v16.4s, v4.s[0] \n"
"fmla v21.4s, v16.4s, v5.s[0] \n"
"fmla v22.4s, v16.4s, v6.s[0] \n"
"fmla v23.4s, v16.4s, v7.s[0] \n"
"fmla v20.4s, v17.4s, v4.s[1] \n"
"fmla v21.4s, v17.4s, v5.s[1] \n"
"fmla v22.4s, v17.4s, v6.s[1] \n"
"fmla v23.4s, v17.4s, v7.s[1] \n"
"fmla v20.4s, v18.4s, v4.s[2] \n"
"fmla v21.4s, v18.4s, v5.s[2] \n"
"fmla v22.4s, v18.4s, v6.s[2] \n"
"fmla v23.4s, v18.4s, v7.s[2] \n"
"fmla v20.4s, v19.4s, v4.s[3] \n"
"fmla v21.4s, v19.4s, v5.s[3] \n"
"fmla v22.4s, v19.4s, v6.s[3] \n"
"fmla v23.4s, v19.4s, v7.s[3] \n"
"sub %6, %6, #768 \n" // kptr -= 24 * 16;
"st1 {v20.4s, v21.4s, v22.4s, v23.4s}, [%0], #64 \n"
: "=r"(outptr0), // %0
"=r"(r0), // %1
"=r"(r1), // %2
"=r"(r2), // %3
"=r"(r3), // %4
"=r"(r4), // %5
"=r"(kptr) // %6
: "0"(outptr0),
"1"(r0),
"2"(r1),
"3"(r2),
"4"(r3),
"5"(r4),
"6"(kptr)
: "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27");
#else // __aarch64__
asm volatile(
"pld [%0, #512] \n"
"vldm %0, {d24-d31} \n" // sum0 sum1 sum2 sum3
"pld [%1, #256] \n"
"vld1.u16 {d4-d7}, [%1 :64]! \n" // r00 r01 r02 r03
"vshll.u16 q0, d4, #16 \n"
"vshll.u16 q1, d5, #16 \n"
"vshll.u16 q2, d6, #16 \n"
"vshll.u16 q3, d7, #16 \n"
"pld [%6, #256] \n"
"vld1.u16 {d20-d23}, [%6 :64]! \n"
"vshll.u16 q8, d20, #16 \n"
"vshll.u16 q9, d21, #16 \n"
"vshll.u16 q10, d22, #16 \n"
"vshll.u16 q11, d23, #16 \n"
"vmla.f32 q12, q8, d0[0] \n"
"vmla.f32 q13, q8, d2[0] \n"
"vmla.f32 q14, q8, d4[0] \n"
"vmla.f32 q15, q8, d6[0] \n"
"vmla.f32 q12, q9, d0[1] \n"
"vmla.f32 q13, q9, d2[1] \n"
"vmla.f32 q14, q9, d4[1] \n"
"vmla.f32 q15, q9, d6[1] \n"
"vmla.f32 q12, q10, d1[0] \n"
"vmla.f32 q13, q10, d3[0] \n"
"vmla.f32 q14, q10, d5[0] \n"
"vmla.f32 q15, q10, d7[0] \n"
"vmla.f32 q12, q11, d1[1] \n"
"vmla.f32 q13, q11, d3[1] \n"
"vmla.f32 q14, q11, d5[1] \n"
"vmla.f32 q15, q11, d7[1] \n"
"pld [%6, #256] \n"
"vld1.u16 {d16-d19}, [%6 :64]! \n"
"vshll.u16 q10, d16, #16 \n"
"vshll.u16 q11, d17, #16 \n"
"vshll.u16 q8, d18, #16 \n"
"vshll.u16 q9, d19, #16 \n"
"pld [%1, #256] \n"
"vld1.u16 {d12-d15}, [%1 :64] \n" // r04 r05 r06 r07
"vshll.u16 q4, d12, #16 \n"
"vshll.u16 q5, d13, #16 \n"
"vshll.u16 q6, d14, #16 \n"
"vshll.u16 q7, d15, #16 \n"
"vmla.f32 q12, q10, d2[0] \n"
"vmla.f32 q13, q10, d4[0] \n"
"vmla.f32 q14, q10, d6[0] \n"
"vmla.f32 q15, q10, d8[0] \n"
"vmla.f32 q12, q11, d2[1] \n"
"vmla.f32 q13, q11, d4[1] \n"
"vmla.f32 q14, q11, d6[1] \n"
"vmla.f32 q15, q11, d8[1] \n"
"vmla.f32 q12, q8, d3[0] \n"
"vmla.f32 q13, q8, d5[0] \n"
"vmla.f32 q14, q8, d7[0] \n"
"vmla.f32 q15, q8, d9[0] \n"
"vmla.f32 q12, q9, d3[1] \n"
"vmla.f32 q13, q9, d5[1] \n"
"vmla.f32 q14, q9, d7[1] \n"
"vmla.f32 q15, q9, d9[1] \n"
"pld [%6, #256] \n"
"vld1.u16 {d20-d23}, [%6 :64]! \n"
"vshll.u16 q8, d20, #16 \n"
"vshll.u16 q9, d21, #16 \n"
"vshll.u16 q10, d22, #16 \n"
"vshll.u16 q11, d23, #16 \n"
"vmla.f32 q12, q8, d4[0] \n"
"vmla.f32 q13, q8, d6[0] \n"
"vmla.f32 q14, q8, d8[0] \n"
"vmla.f32 q15, q8, d10[0] \n"
"vmla.f32 q12, q9, d4[1] \n"
"vmla.f32 q13, q9, d6[1] \n"
"vmla.f32 q14, q9, d8[1] \n"
"vmla.f32 q15, q9, d10[1] \n"
"vmla.f32 q12, q10, d5[0] \n"
"vmla.f32 q13, q10, d7[0] \n"
"vmla.f32 q14, q10, d9[0] \n"
"vmla.f32 q15, q10, d11[0] \n"
"vmla.f32 q12, q11, d5[1] \n"
"vmla.f32 q13, q11, d7[1] \n"
"vmla.f32 q14, q11, d9[1] \n"
"vmla.f32 q15, q11, d11[1] \n"
"pld [%6, #256] \n"
"vld1.u16 {d16-d19}, [%6 :64]! \n"
"vshll.u16 q10, d16, #16 \n"
"vshll.u16 q11, d17, #16 \n"
"vshll.u16 q8, d18, #16 \n"
"vshll.u16 q9, d19, #16 \n"
"vmla.f32 q12, q10, d6[0] \n"
"vmla.f32 q13, q10, d8[0] \n"
"vmla.f32 q14, q10, d10[0] \n"
"vmla.f32 q15, q10, d12[0] \n"
"vmla.f32 q12, q11, d6[1] \n"
"vmla.f32 q13, q11, d8[1] \n"
"vmla.f32 q14, q11, d10[1] \n"
"vmla.f32 q15, q11, d12[1] \n"
"vmla.f32 q12, q8, d7[0] \n"
"vmla.f32 q13, q8, d9[0] \n"
"vmla.f32 q14, q8, d11[0] \n"
"vmla.f32 q15, q8, d13[0] \n"
"vmla.f32 q12, q9, d7[1] \n"
"vmla.f32 q13, q9, d9[1] \n"
"vmla.f32 q14, q9, d11[1] \n"
"vmla.f32 q15, q9, d13[1] \n"
"pld [%6, #256] \n"
"vld1.u16 {d20-d23}, [%6 :64]! \n"
"vshll.u16 q8, d20, #16 \n"
"vshll.u16 q9, d21, #16 \n"
"vshll.u16 q10, d22, #16 \n"
"vshll.u16 q11, d23, #16 \n"
"pld [%2, #256] \n"
"vld1.u16 {d4-d7}, [%2 :64]! \n" // r10 r11 r12 r13
"vshll.u16 q0, d4, #16 \n"
"vshll.u16 q1, d5, #16 \n"
"vshll.u16 q2, d6, #16 \n"
"vshll.u16 q3, d7, #16 \n"
"vmla.f32 q12, q8, d8[0] \n"
"vmla.f32 q13, q8, d10[0] \n"
"vmla.f32 q14, q8, d12[0] \n"
"vmla.f32 q15, q8, d14[0] \n"
"vmla.f32 q12, q9, d8[1] \n"
"vmla.f32 q13, q9, d10[1] \n"
"vmla.f32 q14, q9, d12[1] \n"
"vmla.f32 q15, q9, d14[1] \n"
"vmla.f32 q12, q10, d9[0] \n"
"vmla.f32 q13, q10, d11[0] \n"
"vmla.f32 q14, q10, d13[0] \n"
"vmla.f32 q15, q10, d15[0] \n"
"vmla.f32 q12, q11, d9[1] \n"
"vmla.f32 q13, q11, d11[1] \n"
"vmla.f32 q14, q11, d13[1] \n"
"vmla.f32 q15, q11, d15[1] \n"
"pld [%6, #256] \n"
"vld1.u16 {d16-d19}, [%6 :64]! \n"
"vshll.u16 q10, d16, #16 \n"
"vshll.u16 q11, d17, #16 \n"
"vshll.u16 q8, d18, #16 \n"
"vshll.u16 q9, d19, #16 \n"
"vmla.f32 q12, q10, d0[0] \n"
"vmla.f32 q13, q10, d2[0] \n"
"vmla.f32 q14, q10, d4[0] \n"
"vmla.f32 q15, q10, d6[0] \n"
"vmla.f32 q12, q11, d0[1] \n"
"vmla.f32 q13, q11, d2[1] \n"
"vmla.f32 q14, q11, d4[1] \n"
"vmla.f32 q15, q11, d6[1] \n"
"vmla.f32 q12, q8, d1[0] \n"
"vmla.f32 q13, q8, d3[0] \n"
"vmla.f32 q14, q8, d5[0] \n"
"vmla.f32 q15, q8, d7[0] \n"
"vmla.f32 q12, q9, d1[1] \n"
"vmla.f32 q13, q9, d3[1] \n"
"vmla.f32 q14, q9, d5[1] \n"
"vmla.f32 q15, q9, d7[1] \n"
"pld [%6, #256] \n"
"vld1.u16 {d20-d23}, [%6 :64]! \n"
"vshll.u16 q8, d20, #16 \n"
"vshll.u16 q9, d21, #16 \n"
"vshll.u16 q10, d22, #16 \n"
"vshll.u16 q11, d23, #16 \n"
"pld [%2, #256] \n"
"vld1.u16 {d12-d15}, [%2 :64] \n" // r14 r15 r16 r17
"vshll.u16 q4, d12, #16 \n"
"vshll.u16 q5, d13, #16 \n"
"vshll.u16 q6, d14, #16 \n"
"vshll.u16 q7, d15, #16 \n"
"vmla.f32 q12, q8, d2[0] \n"
"vmla.f32 q13, q8, d4[0] \n"
"vmla.f32 q14, q8, d6[0] \n"
"vmla.f32 q15, q8, d8[0] \n"
"vmla.f32 q12, q9, d2[1] \n"
"vmla.f32 q13, q9, d4[1] \n"
"vmla.f32 q14, q9, d6[1] \n"
"vmla.f32 q15, q9, d8[1] \n"
"vmla.f32 q12, q10, d3[0] \n"
"vmla.f32 q13, q10, d5[0] \n"
"vmla.f32 q14, q10, d7[0] \n"
"vmla.f32 q15, q10, d9[0] \n"
"vmla.f32 q12, q11, d3[1] \n"
"vmla.f32 q13, q11, d5[1] \n"
"vmla.f32 q14, q11, d7[1] \n"
"vmla.f32 q15, q11, d9[1] \n"
"pld [%6, #256] \n"
"vld1.u16 {d16-d19}, [%6 :64]! \n"
"vshll.u16 q10, d16, #16 \n"
"vshll.u16 q11, d17, #16 \n"
"vshll.u16 q8, d18, #16 \n"
"vshll.u16 q9, d19, #16 \n"
"vmla.f32 q12, q10, d4[0] \n"
"vmla.f32 q13, q10, d6[0] \n"
"vmla.f32 q14, q10, d8[0] \n"
"vmla.f32 q15, q10, d10[0] \n"
"vmla.f32 q12, q11, d4[1] \n"
"vmla.f32 q13, q11, d6[1] \n"
"vmla.f32 q14, q11, d8[1] \n"
"vmla.f32 q15, q11, d10[1] \n"
"vmla.f32 q12, q8, d5[0] \n"
"vmla.f32 q13, q8, d7[0] \n"
"vmla.f32 q14, q8, d9[0] \n"
"vmla.f32 q15, q8, d11[0] \n"
"vmla.f32 q12, q9, d5[1] \n"
"vmla.f32 q13, q9, d7[1] \n"
"vmla.f32 q14, q9, d9[1] \n"
"vmla.f32 q15, q9, d11[1] \n"
"pld [%6, #256] \n"
"vld1.u16 {d20-d23}, [%6 :64]! \n"
"vshll.u16 q8, d20, #16 \n"
"vshll.u16 q9, d21, #16 \n"
"vshll.u16 q10, d22, #16 \n"
"vshll.u16 q11, d23, #16 \n"
"vmla.f32 q12, q8, d6[0] \n"
"vmla.f32 q13, q8, d8[0] \n"
"vmla.f32 q14, q8, d10[0] \n"
"vmla.f32 q15, q8, d12[0] \n"
"vmla.f32 q12, q9, d6[1] \n"
"vmla.f32 q13, q9, d8[1] \n"
"vmla.f32 q14, q9, d10[1] \n"
"vmla.f32 q15, q9, d12[1] \n"
"vmla.f32 q12, q10, d7[0] \n"
"vmla.f32 q13, q10, d9[0] \n"
"vmla.f32 q14, q10, d11[0] \n"
"vmla.f32 q15, q10, d13[0] \n"
"vmla.f32 q12, q11, d7[1] \n"
"vmla.f32 q13, q11, d9[1] \n"
"vmla.f32 q14, q11, d11[1] \n"
"vmla.f32 q15, q11, d13[1] \n"
"pld [%6, #256] \n"
"vld1.u16 {d16-d19}, [%6 :64]! \n"
"vshll.u16 q10, d16, #16 \n"
"vshll.u16 q11, d17, #16 \n"
"vshll.u16 q8, d18, #16 \n"
"vshll.u16 q9, d19, #16 \n"
"pld [%3, #256] \n"
"vld1.u16 {d4-d7}, [%3 :64]! \n" // r20 r21 r22 r23
"vshll.u16 q0, d4, #16 \n"
"vshll.u16 q1, d5, #16 \n"
"vshll.u16 q2, d6, #16 \n"
"vshll.u16 q3, d7, #16 \n"
"vmla.f32 q12, q10, d8[0] \n"
"vmla.f32 q13, q10, d10[0] \n"
"vmla.f32 q14, q10, d12[0] \n"
"vmla.f32 q15, q10, d14[0] \n"
"vmla.f32 q12, q11, d8[1] \n"
"vmla.f32 q13, q11, d10[1] \n"
"vmla.f32 q14, q11, d12[1] \n"
"vmla.f32 q15, q11, d14[1] \n"
"vmla.f32 q12, q8, d9[0] \n"
"vmla.f32 q13, q8, d11[0] \n"
"vmla.f32 q14, q8, d13[0] \n"
"vmla.f32 q15, q8, d15[0] \n"
"vmla.f32 q12, q9, d9[1] \n"
"vmla.f32 q13, q9, d11[1] \n"
"vmla.f32 q14, q9, d13[1] \n"
"vmla.f32 q15, q9, d15[1] \n"
"pld [%6, #256] \n"
"vld1.u16 {d20-d23}, [%6 :64]! \n"
"vshll.u16 q8, d20, #16 \n"
"vshll.u16 q9, d21, #16 \n"
"vshll.u16 q10, d22, #16 \n"
"vshll.u16 q11, d23, #16 \n"
"vmla.f32 q12, q8, d0[0] \n"
"vmla.f32 q13, q8, d2[0] \n"
"vmla.f32 q14, q8, d4[0] \n"
"vmla.f32 q15, q8, d6[0] \n"
"vmla.f32 q12, q9, d0[1] \n"
"vmla.f32 q13, q9, d2[1] \n"
"vmla.f32 q14, q9, d4[1] \n"
"vmla.f32 q15, q9, d6[1] \n"
"vmla.f32 q12, q10, d1[0] \n"
"vmla.f32 q13, q10, d3[0] \n"
"vmla.f32 q14, q10, d5[0] \n"
"vmla.f32 q15, q10, d7[0] \n"
"vmla.f32 q12, q11, d1[1] \n"
"vmla.f32 q13, q11, d3[1] \n"
"vmla.f32 q14, q11, d5[1] \n"
"vmla.f32 q15, q11, d7[1] \n"
"pld [%6, #256] \n"
"vld1.u16 {d16-d19}, [%6 :64]! \n"
"vshll.u16 q10, d16, #16 \n"
"vshll.u16 q11, d17, #16 \n"
"vshll.u16 q8, d18, #16 \n"
"vshll.u16 q9, d19, #16 \n"
"pld [%3, #256] \n"
"vld1.u16 {d12-d15}, [%3 :64] \n" // r24 r25 r26 r27
"vshll.u16 q4, d12, #16 \n"
"vshll.u16 q5, d13, #16 \n"
"vshll.u16 q6, d14, #16 \n"
"vshll.u16 q7, d15, #16 \n"
"vmla.f32 q12, q10, d2[0] \n"
"vmla.f32 q13, q10, d4[0] \n"
"vmla.f32 q14, q10, d6[0] \n"
"vmla.f32 q15, q10, d8[0] \n"
"vmla.f32 q12, q11, d2[1] \n"
"vmla.f32 q13, q11, d4[1] \n"
"vmla.f32 q14, q11, d6[1] \n"
"vmla.f32 q15, q11, d8[1] \n"
"vmla.f32 q12, q8, d3[0] \n"
"vmla.f32 q13, q8, d5[0] \n"
"vmla.f32 q14, q8, d7[0] \n"
"vmla.f32 q15, q8, d9[0] \n"
"vmla.f32 q12, q9, d3[1] \n"
"vmla.f32 q13, q9, d5[1] \n"
"vmla.f32 q14, q9, d7[1] \n"
"vmla.f32 q15, q9, d9[1] \n"
"pld [%6, #256] \n"
"vld1.u16 {d20-d23}, [%6 :64]! \n"
"vshll.u16 q8, d20, #16 \n"
"vshll.u16 q9, d21, #16 \n"
"vshll.u16 q10, d22, #16 \n"
"vshll.u16 q11, d23, #16 \n"
"vmla.f32 q12, q8, d4[0] \n"
"vmla.f32 q13, q8, d6[0] \n"
"vmla.f32 q14, q8, d8[0] \n"
"vmla.f32 q15, q8, d10[0] \n"
"vmla.f32 q12, q9, d4[1] \n"
"vmla.f32 q13, q9, d6[1] \n"
"vmla.f32 q14, q9, d8[1] \n"
"vmla.f32 q15, q9, d10[1] \n"
"vmla.f32 q12, q10, d5[0] \n"
"vmla.f32 q13, q10, d7[0] \n"
"vmla.f32 q14, q10, d9[0] \n"
"vmla.f32 q15, q10, d11[0] \n"
"vmla.f32 q12, q11, d5[1] \n"
"vmla.f32 q13, q11, d7[1] \n"
"vmla.f32 q14, q11, d9[1] \n"
"vmla.f32 q15, q11, d11[1] \n"
"pld [%6, #256] \n"
"vld1.u16 {d16-d19}, [%6 :64]! \n"
"vshll.u16 q10, d16, #16 \n"
"vshll.u16 q11, d17, #16 \n"
"vshll.u16 q8, d18, #16 \n"
"vshll.u16 q9, d19, #16 \n"
"vmla.f32 q12, q10, d6[0] \n"
"vmla.f32 q13, q10, d8[0] \n"
"vmla.f32 q14, q10, d10[0] \n"
"vmla.f32 q15, q10, d12[0] \n"
"vmla.f32 q12, q11, d6[1] \n"
"vmla.f32 q13, q11, d8[1] \n"
"vmla.f32 q14, q11, d10[1] \n"
"vmla.f32 q15, q11, d12[1] \n"
"vmla.f32 q12, q8, d7[0] \n"
"vmla.f32 q13, q8, d9[0] \n"
"vmla.f32 q14, q8, d11[0] \n"
"vmla.f32 q15, q8, d13[0] \n"
"vmla.f32 q12, q9, d7[1] \n"
"vmla.f32 q13, q9, d9[1] \n"
"vmla.f32 q14, q9, d11[1] \n"
"vmla.f32 q15, q9, d13[1] \n"
"pld [%6, #256] \n"
"vld1.u16 {d20-d23}, [%6 :64]! \n"
"vshll.u16 q8, d20, #16 \n"
"vshll.u16 q9, d21, #16 \n"
"vshll.u16 q10, d22, #16 \n"
"vshll.u16 q11, d23, #16 \n"
"pld [%4, #256] \n"
"vld1.u16 {d4-d7}, [%4 :64]! \n" // r30 r31 r32 r33
"vshll.u16 q0, d4, #16 \n"
"vshll.u16 q1, d5, #16 \n"
"vshll.u16 q2, d6, #16 \n"
"vshll.u16 q3, d7, #16 \n"
"vmla.f32 q12, q8, d8[0] \n"
"vmla.f32 q13, q8, d10[0] \n"
"vmla.f32 q14, q8, d12[0] \n"
"vmla.f32 q15, q8, d14[0] \n"
"vmla.f32 q12, q9, d8[1] \n"
"vmla.f32 q13, q9, d10[1] \n"
"vmla.f32 q14, q9, d12[1] \n"
"vmla.f32 q15, q9, d14[1] \n"
"vmla.f32 q12, q10, d9[0] \n"
"vmla.f32 q13, q10, d11[0] \n"
"vmla.f32 q14, q10, d13[0] \n"
"vmla.f32 q15, q10, d15[0] \n"
"vmla.f32 q12, q11, d9[1] \n"
"vmla.f32 q13, q11, d11[1] \n"
"vmla.f32 q14, q11, d13[1] \n"
"vmla.f32 q15, q11, d15[1] \n"
"pld [%6, #256] \n"
"vld1.u16 {d16-d19}, [%6 :64]! \n"
"vshll.u16 q10, d16, #16 \n"
"vshll.u16 q11, d17, #16 \n"
"vshll.u16 q8, d18, #16 \n"
"vshll.u16 q9, d19, #16 \n"
"vmla.f32 q12, q10, d0[0] \n"
"vmla.f32 q13, q10, d2[0] \n"
"vmla.f32 q14, q10, d4[0] \n"
"vmla.f32 q15, q10, d6[0] \n"
"vmla.f32 q12, q11, d0[1] \n"
"vmla.f32 q13, q11, d2[1] \n"
"vmla.f32 q14, q11, d4[1] \n"
"vmla.f32 q15, q11, d6[1] \n"
"vmla.f32 q12, q8, d1[0] \n"
"vmla.f32 q13, q8, d3[0] \n"
"vmla.f32 q14, q8, d5[0] \n"
"vmla.f32 q15, q8, d7[0] \n"
"vmla.f32 q12, q9, d1[1] \n"
"vmla.f32 q13, q9, d3[1] \n"
"vmla.f32 q14, q9, d5[1] \n"
"vmla.f32 q15, q9, d7[1] \n"
"pld [%6, #256] \n"
"vld1.u16 {d20-d23}, [%6 :64]! \n"
"vshll.u16 q8, d20, #16 \n"
"vshll.u16 q9, d21, #16 \n"
"vshll.u16 q10, d22, #16 \n"
"vshll.u16 q11, d23, #16 \n"
"pld [%4, #256] \n"
"vld1.u16 {d12-d15}, [%4 :64] \n" // r34 r35 r36 r37
"vshll.u16 q4, d12, #16 \n"
"vshll.u16 q5, d13, #16 \n"
"vshll.u16 q6, d14, #16 \n"
"vshll.u16 q7, d15, #16 \n"
"vmla.f32 q12, q8, d2[0] \n"
"vmla.f32 q13, q8, d4[0] \n"
"vmla.f32 q14, q8, d6[0] \n"
"vmla.f32 q15, q8, d8[0] \n"
"vmla.f32 q12, q9, d2[1] \n"
"vmla.f32 q13, q9, d4[1] \n"
"vmla.f32 q14, q9, d6[1] \n"
"vmla.f32 q15, q9, d8[1] \n"
"vmla.f32 q12, q10, d3[0] \n"
"vmla.f32 q13, q10, d5[0] \n"
"vmla.f32 q14, q10, d7[0] \n"
"vmla.f32 q15, q10, d9[0] \n"
"vmla.f32 q12, q11, d3[1] \n"
"vmla.f32 q13, q11, d5[1] \n"
"vmla.f32 q14, q11, d7[1] \n"
"vmla.f32 q15, q11, d9[1] \n"
"pld [%6, #256] \n"
"vld1.u16 {d16-d19}, [%6 :64]! \n"
"vshll.u16 q10, d16, #16 \n"
"vshll.u16 q11, d17, #16 \n"
"vshll.u16 q8, d18, #16 \n"
"vshll.u16 q9, d19, #16 \n"
"vmla.f32 q12, q10, d4[0] \n"
"vmla.f32 q13, q10, d6[0] \n"
"vmla.f32 q14, q10, d8[0] \n"
"vmla.f32 q15, q10, d10[0] \n"
"vmla.f32 q12, q11, d4[1] \n"
"vmla.f32 q13, q11, d6[1] \n"
"vmla.f32 q14, q11, d8[1] \n"
"vmla.f32 q15, q11, d10[1] \n"
"vmla.f32 q12, q8, d5[0] \n"
"vmla.f32 q13, q8, d7[0] \n"
"vmla.f32 q14, q8, d9[0] \n"
"vmla.f32 q15, q8, d11[0] \n"
"vmla.f32 q12, q9, d5[1] \n"
"vmla.f32 q13, q9, d7[1] \n"
"vmla.f32 q14, q9, d9[1] \n"
"vmla.f32 q15, q9, d11[1] \n"
"pld [%6, #256] \n"
"vld1.u16 {d20-d23}, [%6 :64]! \n"
"vshll.u16 q8, d20, #16 \n"
"vshll.u16 q9, d21, #16 \n"
"vshll.u16 q10, d22, #16 \n"
"vshll.u16 q11, d23, #16 \n"
"vmla.f32 q12, q8, d6[0] \n"
"vmla.f32 q13, q8, d8[0] \n"
"vmla.f32 q14, q8, d10[0] \n"
"vmla.f32 q15, q8, d12[0] \n"
"vmla.f32 q12, q9, d6[1] \n"
"vmla.f32 q13, q9, d8[1] \n"
"vmla.f32 q14, q9, d10[1] \n"
"vmla.f32 q15, q9, d12[1] \n"
"vmla.f32 q12, q10, d7[0] \n"
"vmla.f32 q13, q10, d9[0] \n"
"vmla.f32 q14, q10, d11[0] \n"
"vmla.f32 q15, q10, d13[0] \n"
"vmla.f32 q12, q11, d7[1] \n"
"vmla.f32 q13, q11, d9[1] \n"
"vmla.f32 q14, q11, d11[1] \n"
"vmla.f32 q15, q11, d13[1] \n"
"pld [%6, #256] \n"
"vld1.u16 {d16-d19}, [%6 :64]! \n"
"vshll.u16 q10, d16, #16 \n"
"vshll.u16 q11, d17, #16 \n"
"vshll.u16 q8, d18, #16 \n"
"vshll.u16 q9, d19, #16 \n"
"pld [%5, #256] \n"
"vld1.u16 {d4-d7}, [%5 :64]! \n" // r40 r41 r42 r43
"vshll.u16 q0, d4, #16 \n"
"vshll.u16 q1, d5, #16 \n"
"vshll.u16 q2, d6, #16 \n"
"vshll.u16 q3, d7, #16 \n"
"vmla.f32 q12, q10, d8[0] \n"
"vmla.f32 q13, q10, d10[0] \n"
"vmla.f32 q14, q10, d12[0] \n"
"vmla.f32 q15, q10, d14[0] \n"
"vmla.f32 q12, q11, d8[1] \n"
"vmla.f32 q13, q11, d10[1] \n"
"vmla.f32 q14, q11, d12[1] \n"
"vmla.f32 q15, q11, d14[1] \n"
"vmla.f32 q12, q8, d9[0] \n"
"vmla.f32 q13, q8, d11[0] \n"
"vmla.f32 q14, q8, d13[0] \n"
"vmla.f32 q15, q8, d15[0] \n"
"vmla.f32 q12, q9, d9[1] \n"
"vmla.f32 q13, q9, d11[1] \n"
"vmla.f32 q14, q9, d13[1] \n"
"vmla.f32 q15, q9, d15[1] \n"
"pld [%6, #256] \n"
"vld1.u16 {d20-d23}, [%6 :64]! \n"
"vshll.u16 q8, d20, #16 \n"
"vshll.u16 q9, d21, #16 \n"
"vshll.u16 q10, d22, #16 \n"
"vshll.u16 q11, d23, #16 \n"
"vmla.f32 q12, q8, d0[0] \n"
"vmla.f32 q13, q8, d2[0] \n"
"vmla.f32 q14, q8, d4[0] \n"
"vmla.f32 q15, q8, d6[0] \n"
"vmla.f32 q12, q9, d0[1] \n"
"vmla.f32 q13, q9, d2[1] \n"
"vmla.f32 q14, q9, d4[1] \n"
"vmla.f32 q15, q9, d6[1] \n"
"vmla.f32 q12, q10, d1[0] \n"
"vmla.f32 q13, q10, d3[0] \n"
"vmla.f32 q14, q10, d5[0] \n"
"vmla.f32 q15, q10, d7[0] \n"
"vmla.f32 q12, q11, d1[1] \n"
"vmla.f32 q13, q11, d3[1] \n"
"vmla.f32 q14, q11, d5[1] \n"
"vmla.f32 q15, q11, d7[1] \n"
"pld [%6, #256] \n"
"vld1.u16 {d16-d19}, [%6 :64]! \n"
"vshll.u16 q10, d16, #16 \n"
"vshll.u16 q11, d17, #16 \n"
"vshll.u16 q8, d18, #16 \n"
"vshll.u16 q9, d19, #16 \n"
"pld [%5, #256] \n"
"vld1.u16 {d12-d15}, [%5 :64] \n" // r44 r45 r46 r47
"vshll.u16 q4, d12, #16 \n"
"vshll.u16 q5, d13, #16 \n"
"vshll.u16 q6, d14, #16 \n"
"vshll.u16 q7, d15, #16 \n"
"vmla.f32 q12, q10, d2[0] \n"
"vmla.f32 q13, q10, d4[0] \n"
"vmla.f32 q14, q10, d6[0] \n"
"vmla.f32 q15, q10, d8[0] \n"
"vmla.f32 q12, q11, d2[1] \n"
"vmla.f32 q13, q11, d4[1] \n"
"vmla.f32 q14, q11, d6[1] \n"
"vmla.f32 q15, q11, d8[1] \n"
"vmla.f32 q12, q8, d3[0] \n"
"vmla.f32 q13, q8, d5[0] \n"
"vmla.f32 q14, q8, d7[0] \n"
"vmla.f32 q15, q8, d9[0] \n"
"vmla.f32 q12, q9, d3[1] \n"
"vmla.f32 q13, q9, d5[1] \n"
"vmla.f32 q14, q9, d7[1] \n"
"vmla.f32 q15, q9, d9[1] \n"
"pld [%6, #256] \n"
"vld1.u16 {d20-d23}, [%6 :64]! \n"
"vshll.u16 q8, d20, #16 \n"
"vshll.u16 q9, d21, #16 \n"
"vshll.u16 q10, d22, #16 \n"
"vshll.u16 q11, d23, #16 \n"
"vmla.f32 q12, q8, d4[0] \n"
"vmla.f32 q13, q8, d6[0] \n"
"vmla.f32 q14, q8, d8[0] \n"
"vmla.f32 q15, q8, d10[0] \n"
"vmla.f32 q12, q9, d4[1] \n"
"vmla.f32 q13, q9, d6[1] \n"
"vmla.f32 q14, q9, d8[1] \n"
"vmla.f32 q15, q9, d10[1] \n"
"vmla.f32 q12, q10, d5[0] \n"
"vmla.f32 q13, q10, d7[0] \n"
"vmla.f32 q14, q10, d9[0] \n"
"vmla.f32 q15, q10, d11[0] \n"
"vmla.f32 q12, q11, d5[1] \n"
"vmla.f32 q13, q11, d7[1] \n"
"vmla.f32 q14, q11, d9[1] \n"
"vmla.f32 q15, q11, d11[1] \n"
"pld [%6, #256] \n"
"vld1.u16 {d16-d19}, [%6 :64]! \n"
"vshll.u16 q10, d16, #16 \n"
"vshll.u16 q11, d17, #16 \n"
"vshll.u16 q8, d18, #16 \n"
"vshll.u16 q9, d19, #16 \n"
"vmla.f32 q12, q10, d6[0] \n"
"vmla.f32 q13, q10, d8[0] \n"
"vmla.f32 q14, q10, d10[0] \n"
"vmla.f32 q15, q10, d12[0] \n"
"vmla.f32 q12, q11, d6[1] \n"
"vmla.f32 q13, q11, d8[1] \n"
"vmla.f32 q14, q11, d10[1] \n"
"vmla.f32 q15, q11, d12[1] \n"
"vmla.f32 q12, q8, d7[0] \n"
"vmla.f32 q13, q8, d9[0] \n"
"vmla.f32 q14, q8, d11[0] \n"
"vmla.f32 q15, q8, d13[0] \n"
"vmla.f32 q12, q9, d7[1] \n"
"vmla.f32 q13, q9, d9[1] \n"
"vmla.f32 q14, q9, d11[1] \n"
"vmla.f32 q15, q9, d13[1] \n"
// "pld [%6, #256] \n"
"vld1.u16 {d20-d23}, [%6 :64] \n"
"vshll.u16 q8, d20, #16 \n"
"vshll.u16 q9, d21, #16 \n"
"vshll.u16 q10, d22, #16 \n"
"vshll.u16 q11, d23, #16 \n"
"vmla.f32 q12, q8, d8[0] \n"
"vmla.f32 q13, q8, d10[0] \n"
"vmla.f32 q14, q8, d12[0] \n"
"vmla.f32 q15, q8, d14[0] \n"
"vmla.f32 q12, q9, d8[1] \n"
"vmla.f32 q13, q9, d10[1] \n"
"vmla.f32 q14, q9, d12[1] \n"
"vmla.f32 q15, q9, d14[1] \n"
"vmla.f32 q12, q10, d9[0] \n"
"vmla.f32 q13, q10, d11[0] \n"
"vmla.f32 q14, q10, d13[0] \n"
"vmla.f32 q15, q10, d15[0] \n"
"vmla.f32 q12, q11, d9[1] \n"
"vmla.f32 q13, q11, d11[1] \n"
"vmla.f32 q14, q11, d13[1] \n"
"vmla.f32 q15, q11, d15[1] \n"
"sub %6, %6, #768 \n" // kptr -= 24 * 16;
"vstm %0!, {d24-d31} \n"
: "=r"(outptr0), // %0
"=r"(r0), // %1
"=r"(r1), // %2
"=r"(r2), // %3
"=r"(r3), // %4
"=r"(r4), // %5
"=r"(kptr) // %6
: "0"(outptr0),
"1"(r0),
"2"(r1),
"3"(r2),
"4"(r3),
"5"(r4),
"6"(kptr)
: "memory", "q0", "q1", "q2", "q3", "q4", "q5", "q6", "q7", "q8", "q9", "q10", "q11", "q12", "q13", "q14", "q15");
#endif // __aarch64__
}
for (; j + 1 < outw; j += 2)
{
#if __aarch64__
asm volatile(
"prfm pldl1keep, [%1, #128] \n"
"ld1 {v0.4h, v1.4h}, [%1], #16 \n" // r00 r01
"shll v0.4s, v0.4h, #16 \n"
"shll v1.4s, v1.4h, #16 \n"
"prfm pldl1keep, [%6, #256] \n"
"ld1 {v16.4h, v17.4h, v18.4h, v19.4h}, [%6], #32 \n"
"shll v16.4s, v16.4h, #16 \n"
"shll v17.4s, v17.4h, #16 \n"
"prfm pldl1keep, [%0, #256] \n"
"ld1 {v20.4s, v21.4s}, [%0] \n" // sum0 sum1
"fmul v22.4s, v16.4s, v0.s[0] \n"
"fmul v23.4s, v16.4s, v1.s[0] \n"
"prfm pldl1keep, [%6, #256] \n"
"ld1 {v24.4h, v25.4h, v26.4h, v27.4h}, [%6], #32 \n"
"fmla v20.4s, v17.4s, v0.s[1] \n"
"shll v18.4s, v18.4h, #16 \n"
"fmla v21.4s, v17.4s, v1.s[1] \n"
"shll v19.4s, v19.4h, #16 \n"
"fmla v22.4s, v18.4s, v0.s[2] \n"
"shll v24.4s, v24.4h, #16 \n"
"fmla v23.4s, v18.4s, v1.s[2] \n"
"shll v25.4s, v25.4h, #16 \n"
"fmla v20.4s, v19.4s, v0.s[3] \n"
"prfm pldl1keep, [%1, #256] \n"
"ld1 {v2.4h, v3.4h, v4.4h, v5.4h}, [%1] \n" // r02 r03 r04 r05
"shll v26.4s, v26.4h, #16 \n"
"fmla v21.4s, v19.4s, v1.s[3] \n"
"shll v27.4s, v27.4h, #16 \n"
"fmla v22.4s, v24.4s, v1.s[0] \n"
"shll v2.4s, v2.4h, #16 \n"
"fmla v23.4s, v24.4s, v2.s[0] \n"
"shll v3.4s, v3.4h, #16 \n"
"fmla v20.4s, v25.4s, v1.s[1] \n"
"prfm pldl1keep, [%6, #256] \n"
"ld1 {v16.4h, v17.4h, v18.4h, v19.4h}, [%6], #32 \n"
"fmla v21.4s, v25.4s, v2.s[1] \n"
"shll v4.4s, v4.4h, #16 \n"
"fmla v22.4s, v26.4s, v1.s[2] \n"
"shll v5.4s, v5.4h, #16 \n"
"fmla v23.4s, v26.4s, v2.s[2] \n"
"shll v16.4s, v16.4h, #16 \n"
"fmla v20.4s, v27.4s, v1.s[3] \n"
"fmla v21.4s, v27.4s, v2.s[3] \n"
"shll v17.4s, v17.4h, #16 \n"
"fmla v22.4s, v16.4s, v2.s[0] \n"
"fmla v23.4s, v16.4s, v3.s[0] \n"
"prfm pldl1keep, [%6, #256] \n"
"ld1 {v24.4h, v25.4h, v26.4h, v27.4h}, [%6], #32 \n"
"fmla v20.4s, v17.4s, v2.s[1] \n"
"shll v18.4s, v18.4h, #16 \n"
"fmla v21.4s, v17.4s, v3.s[1] \n"
"shll v19.4s, v19.4h, #16 \n"
"fmla v22.4s, v18.4s, v2.s[2] \n"
"fmla v23.4s, v18.4s, v3.s[2] \n"
"shll v24.4s, v24.4h, #16 \n"
"fmla v20.4s, v19.4s, v2.s[3] \n"
"fmla v21.4s, v19.4s, v3.s[3] \n"
"shll v25.4s, v25.4h, #16 \n"
"fmla v22.4s, v24.4s, v3.s[0] \n"
"fmla v23.4s, v24.4s, v4.s[0] \n"
"prfm pldl1keep, [%6, #256] \n"
"ld1 {v16.4h, v17.4h, v18.4h, v19.4h}, [%6], #32 \n"
"fmla v20.4s, v25.4s, v3.s[1] \n"
"shll v26.4s, v26.4h, #16 \n"
"fmla v21.4s, v25.4s, v4.s[1] \n"
"shll v27.4s, v27.4h, #16 \n"
"fmla v22.4s, v26.4s, v3.s[2] \n"
"shll v16.4s, v16.4h, #16 \n"
"fmla v23.4s, v26.4s, v4.s[2] \n"
"shll v17.4s, v17.4h, #16 \n"
"fmla v20.4s, v27.4s, v3.s[3] \n"
"prfm pldl1keep, [%2, #128] \n"
"ld1 {v0.4h, v1.4h}, [%2], #16 \n" // r10 r11
"fmla v21.4s, v27.4s, v4.s[3] \n"
"shll v18.4s, v18.4h, #16 \n"
"fmla v22.4s, v16.4s, v4.s[0] \n"
"shll v19.4s, v19.4h, #16 \n"
"fmla v23.4s, v16.4s, v5.s[0] \n"
"prfm pldl1keep, [%6, #256] \n"
"ld1 {v24.4h, v25.4h, v26.4h, v27.4h}, [%6], #32 \n"
"fmla v20.4s, v17.4s, v4.s[1] \n"
"shll v0.4s, v0.4h, #16 \n"
"fmla v21.4s, v17.4s, v5.s[1] \n"
"shll v1.4s, v1.4h, #16 \n"
"fmla v22.4s, v18.4s, v4.s[2] \n"
"fmla v23.4s, v18.4s, v5.s[2] \n"
"shll v24.4s, v24.4h, #16 \n"
"fmla v20.4s, v19.4s, v4.s[3] \n"
"fmla v21.4s, v19.4s, v5.s[3] \n"
"shll v25.4s, v25.4h, #16 \n"
"fmla v22.4s, v24.4s, v0.s[0] \n"
"fmla v23.4s, v24.4s, v1.s[0] \n"
"prfm pldl1keep, [%6, #256] \n"
"ld1 {v16.4h, v17.4h, v18.4h, v19.4h}, [%6], #32 \n"
"fmla v20.4s, v25.4s, v0.s[1] \n"
"shll v26.4s, v26.4h, #16 \n"
"fmla v21.4s, v25.4s, v1.s[1] \n"
"shll v27.4s, v27.4h, #16 \n"
"fmla v22.4s, v26.4s, v0.s[2] \n"
"shll v16.4s, v16.4h, #16 \n"
"fmla v23.4s, v26.4s, v1.s[2] \n"
"shll v17.4s, v17.4h, #16 \n"
"fmla v20.4s, v27.4s, v0.s[3] \n"
"prfm pldl1keep, [%2, #256] \n"
"ld1 {v2.4h, v3.4h, v4.4h, v5.4h}, [%2] \n" // r12 r13 r14 r15
"shll v18.4s, v18.4h, #16 \n"
"fmla v21.4s, v27.4s, v1.s[3] \n"
"shll v19.4s, v19.4h, #16 \n"
"fmla v22.4s, v16.4s, v1.s[0] \n"
"shll v2.4s, v2.4h, #16 \n"
"fmla v23.4s, v16.4s, v2.s[0] \n"
"shll v3.4s, v3.4h, #16 \n"
"fmla v20.4s, v17.4s, v1.s[1] \n"
"prfm pldl1keep, [%6, #256] \n"
"ld1 {v24.4h, v25.4h, v26.4h, v27.4h}, [%6], #32 \n"
"fmla v21.4s, v17.4s, v2.s[1] \n"
"shll v4.4s, v4.4h, #16 \n"
"fmla v22.4s, v18.4s, v1.s[2] \n"
"shll v5.4s, v5.4h, #16 \n"
"fmla v23.4s, v18.4s, v2.s[2] \n"
"shll v24.4s, v24.4h, #16 \n"
"fmla v20.4s, v19.4s, v1.s[3] \n"
"fmla v21.4s, v19.4s, v2.s[3] \n"
"shll v25.4s, v25.4h, #16 \n"
"fmla v22.4s, v24.4s, v2.s[0] \n"
"fmla v23.4s, v24.4s, v3.s[0] \n"
"prfm pldl1keep, [%6, #256] \n"
"ld1 {v16.4h, v17.4h, v18.4h, v19.4h}, [%6], #32 \n"
"fmla v20.4s, v25.4s, v2.s[1] \n"
"shll v26.4s, v26.4h, #16 \n"
"fmla v21.4s, v25.4s, v3.s[1] \n"
"shll v27.4s, v27.4h, #16 \n"
"fmla v22.4s, v26.4s, v2.s[2] \n"
"fmla v23.4s, v26.4s, v3.s[2] \n"
"shll v16.4s, v16.4h, #16 \n"
"fmla v20.4s, v27.4s, v2.s[3] \n"
"fmla v21.4s, v27.4s, v3.s[3] \n"
"shll v17.4s, v17.4h, #16 \n"
"fmla v22.4s, v16.4s, v3.s[0] \n"
"fmla v23.4s, v16.4s, v4.s[0] \n"
"prfm pldl1keep, [%6, #256] \n"
"ld1 {v24.4h, v25.4h, v26.4h, v27.4h}, [%6], #32 \n"
"fmla v20.4s, v17.4s, v3.s[1] \n"
"shll v18.4s, v18.4h, #16 \n"
"fmla v21.4s, v17.4s, v4.s[1] \n"
"shll v19.4s, v19.4h, #16 \n"
"fmla v22.4s, v18.4s, v3.s[2] \n"
"shll v24.4s, v24.4h, #16 \n"
"fmla v23.4s, v18.4s, v4.s[2] \n"
"shll v25.4s, v25.4h, #16 \n"
"fmla v20.4s, v19.4s, v3.s[3] \n"
"prfm pldl1keep, [%3, #128] \n"
"ld1 {v0.4h, v1.4h}, [%3], #16 \n" // r20 r21
"fmla v21.4s, v19.4s, v4.s[3] \n"
"shll v26.4s, v26.4h, #16 \n"
"fmla v22.4s, v24.4s, v4.s[0] \n"
"shll v27.4s, v27.4h, #16 \n"
"fmla v23.4s, v24.4s, v5.s[0] \n"
"prfm pldl1keep, [%6, #256] \n"
"ld1 {v16.4h, v17.4h, v18.4h, v19.4h}, [%6], #32 \n"
"fmla v20.4s, v25.4s, v4.s[1] \n"
"shll v0.4s, v0.4h, #16 \n"
"fmla v21.4s, v25.4s, v5.s[1] \n"
"shll v1.4s, v1.4h, #16 \n"
"fmla v22.4s, v26.4s, v4.s[2] \n"
"fmla v23.4s, v26.4s, v5.s[2] \n"
"shll v16.4s, v16.4h, #16 \n"
"fmla v20.4s, v27.4s, v4.s[3] \n"
"fmla v21.4s, v27.4s, v5.s[3] \n"
"shll v17.4s, v17.4h, #16 \n"
"fmla v22.4s, v16.4s, v0.s[0] \n"
"fmla v23.4s, v16.4s, v1.s[0] \n"
"prfm pldl1keep, [%6, #256] \n"
"ld1 {v24.4h, v25.4h, v26.4h, v27.4h}, [%6], #32 \n"
"fmla v20.4s, v17.4s, v0.s[1] \n"
"shll v18.4s, v18.4h, #16 \n"
"fmla v21.4s, v17.4s, v1.s[1] \n"
"shll v19.4s, v19.4h, #16 \n"
"fmla v22.4s, v18.4s, v0.s[2] \n"
"shll v24.4s, v24.4h, #16 \n"
"fmla v23.4s, v18.4s, v1.s[2] \n"
"shll v25.4s, v25.4h, #16 \n"
"fmla v20.4s, v19.4s, v0.s[3] \n"
"prfm pldl1keep, [%3, #256] \n"
"ld1 {v2.4h, v3.4h, v4.4h, v5.4h}, [%3] \n" // r22 r23 r24 r25
"shll v26.4s, v26.4h, #16 \n"
"fmla v21.4s, v19.4s, v1.s[3] \n"
"shll v27.4s, v27.4h, #16 \n"
"fmla v22.4s, v24.4s, v1.s[0] \n"
"shll v2.4s, v2.4h, #16 \n"
"fmla v23.4s, v24.4s, v2.s[0] \n"
"shll v3.4s, v3.4h, #16 \n"
"fmla v20.4s, v25.4s, v1.s[1] \n"
"prfm pldl1keep, [%6, #256] \n"
"ld1 {v16.4h, v17.4h, v18.4h, v19.4h}, [%6], #32 \n"
"fmla v21.4s, v25.4s, v2.s[1] \n"
"shll v4.4s, v4.4h, #16 \n"
"fmla v22.4s, v26.4s, v1.s[2] \n"
"shll v5.4s, v5.4h, #16 \n"
"fmla v23.4s, v26.4s, v2.s[2] \n"
"shll v16.4s, v16.4h, #16 \n"
"fmla v20.4s, v27.4s, v1.s[3] \n"
"fmla v21.4s, v27.4s, v2.s[3] \n"
"shll v17.4s, v17.4h, #16 \n"
"fmla v22.4s, v16.4s, v2.s[0] \n"
"fmla v23.4s, v16.4s, v3.s[0] \n"
"prfm pldl1keep, [%6, #256] \n"
"ld1 {v24.4h, v25.4h, v26.4h, v27.4h}, [%6], #32 \n"
"fmla v20.4s, v17.4s, v2.s[1] \n"
"shll v18.4s, v18.4h, #16 \n"
"fmla v21.4s, v17.4s, v3.s[1] \n"
"shll v19.4s, v19.4h, #16 \n"
"fmla v22.4s, v18.4s, v2.s[2] \n"
"fmla v23.4s, v18.4s, v3.s[2] \n"
"shll v24.4s, v24.4h, #16 \n"
"fmla v20.4s, v19.4s, v2.s[3] \n"
"fmla v21.4s, v19.4s, v3.s[3] \n"
"shll v25.4s, v25.4h, #16 \n"
"fmla v22.4s, v24.4s, v3.s[0] \n"
"fmla v23.4s, v24.4s, v4.s[0] \n"
"prfm pldl1keep, [%6, #256] \n"
"ld1 {v16.4h, v17.4h, v18.4h, v19.4h}, [%6], #32 \n"
"fmla v20.4s, v25.4s, v3.s[1] \n"
"shll v26.4s, v26.4h, #16 \n"
"fmla v21.4s, v25.4s, v4.s[1] \n"
"shll v27.4s, v27.4h, #16 \n"
"fmla v22.4s, v26.4s, v3.s[2] \n"
"shll v16.4s, v16.4h, #16 \n"
"fmla v23.4s, v26.4s, v4.s[2] \n"
"shll v17.4s, v17.4h, #16 \n"
"fmla v20.4s, v27.4s, v3.s[3] \n"
"prfm pldl1keep, [%4, #128] \n"
"ld1 {v0.4h, v1.4h}, [%4], #16 \n" // r30 r31
"fmla v21.4s, v27.4s, v4.s[3] \n"
"shll v18.4s, v18.4h, #16 \n"
"fmla v22.4s, v16.4s, v4.s[0] \n"
"shll v19.4s, v19.4h, #16 \n"
"fmla v23.4s, v16.4s, v5.s[0] \n"
"prfm pldl1keep, [%6, #256] \n"
"ld1 {v24.4h, v25.4h, v26.4h, v27.4h}, [%6], #32 \n"
"fmla v20.4s, v17.4s, v4.s[1] \n"
"shll v0.4s, v0.4h, #16 \n"
"fmla v21.4s, v17.4s, v5.s[1] \n"
"shll v1.4s, v1.4h, #16 \n"
"fmla v22.4s, v18.4s, v4.s[2] \n"
"fmla v23.4s, v18.4s, v5.s[2] \n"
"shll v24.4s, v24.4h, #16 \n"
"fmla v20.4s, v19.4s, v4.s[3] \n"
"fmla v21.4s, v19.4s, v5.s[3] \n"
"shll v25.4s, v25.4h, #16 \n"
"fmla v22.4s, v24.4s, v0.s[0] \n"
"fmla v23.4s, v24.4s, v1.s[0] \n"
"prfm pldl1keep, [%6, #256] \n"
"ld1 {v16.4h, v17.4h, v18.4h, v19.4h}, [%6], #32 \n"
"fmla v20.4s, v25.4s, v0.s[1] \n"
"shll v26.4s, v26.4h, #16 \n"
"fmla v21.4s, v25.4s, v1.s[1] \n"
"shll v27.4s, v27.4h, #16 \n"
"fmla v22.4s, v26.4s, v0.s[2] \n"
"shll v16.4s, v16.4h, #16 \n"
"fmla v23.4s, v26.4s, v1.s[2] \n"
"shll v17.4s, v17.4h, #16 \n"
"fmla v20.4s, v27.4s, v0.s[3] \n"
"prfm pldl1keep, [%4, #256] \n"
"ld1 {v2.4h, v3.4h, v4.4h, v5.4h}, [%4] \n" // r32 r33 r34 r35
"shll v18.4s, v18.4h, #16 \n"
"fmla v21.4s, v27.4s, v1.s[3] \n"
"shll v19.4s, v19.4h, #16 \n"
"fmla v22.4s, v16.4s, v1.s[0] \n"
"shll v2.4s, v2.4h, #16 \n"
"fmla v23.4s, v16.4s, v2.s[0] \n"
"shll v3.4s, v3.4h, #16 \n"
"fmla v20.4s, v17.4s, v1.s[1] \n"
"prfm pldl1keep, [%6, #256] \n"
"ld1 {v24.4h, v25.4h, v26.4h, v27.4h}, [%6], #32 \n"
"fmla v21.4s, v17.4s, v2.s[1] \n"
"shll v4.4s, v4.4h, #16 \n"
"fmla v22.4s, v18.4s, v1.s[2] \n"
"shll v5.4s, v5.4h, #16 \n"
"fmla v23.4s, v18.4s, v2.s[2] \n"
"shll v24.4s, v24.4h, #16 \n"
"fmla v20.4s, v19.4s, v1.s[3] \n"
"fmla v21.4s, v19.4s, v2.s[3] \n"
"shll v25.4s, v25.4h, #16 \n"
"fmla v22.4s, v24.4s, v2.s[0] \n"
"fmla v23.4s, v24.4s, v3.s[0] \n"
"prfm pldl1keep, [%6, #256] \n"
"ld1 {v16.4h, v17.4h, v18.4h, v19.4h}, [%6], #32 \n"
"fmla v20.4s, v25.4s, v2.s[1] \n"
"shll v26.4s, v26.4h, #16 \n"
"fmla v21.4s, v25.4s, v3.s[1] \n"
"shll v27.4s, v27.4h, #16 \n"
"fmla v22.4s, v26.4s, v2.s[2] \n"
"fmla v23.4s, v26.4s, v3.s[2] \n"
"shll v16.4s, v16.4h, #16 \n"
"fmla v20.4s, v27.4s, v2.s[3] \n"
"fmla v21.4s, v27.4s, v3.s[3] \n"
"shll v17.4s, v17.4h, #16 \n"
"fmla v22.4s, v16.4s, v3.s[0] \n"
"fmla v23.4s, v16.4s, v4.s[0] \n"
"prfm pldl1keep, [%6, #256] \n"
"ld1 {v24.4h, v25.4h, v26.4h, v27.4h}, [%6], #32 \n"
"fmla v20.4s, v17.4s, v3.s[1] \n"
"shll v18.4s, v18.4h, #16 \n"
"fmla v21.4s, v17.4s, v4.s[1] \n"
"shll v19.4s, v19.4h, #16 \n"
"fmla v22.4s, v18.4s, v3.s[2] \n"
"shll v24.4s, v24.4h, #16 \n"
"fmla v23.4s, v18.4s, v4.s[2] \n"
"shll v25.4s, v25.4h, #16 \n"
"fmla v20.4s, v19.4s, v3.s[3] \n"
"prfm pldl1keep, [%5, #128] \n"
"ld1 {v0.4h, v1.4h}, [%5], #16 \n" // r40 r41
"fmla v21.4s, v19.4s, v4.s[3] \n"
"shll v26.4s, v26.4h, #16 \n"
"fmla v22.4s, v24.4s, v4.s[0] \n"
"shll v27.4s, v27.4h, #16 \n"
"fmla v23.4s, v24.4s, v5.s[0] \n"
"prfm pldl1keep, [%6, #256] \n"
"ld1 {v16.4h, v17.4h, v18.4h, v19.4h}, [%6], #32 \n"
"fmla v20.4s, v25.4s, v4.s[1] \n"
"shll v0.4s, v0.4h, #16 \n"
"fmla v21.4s, v25.4s, v5.s[1] \n"
"shll v1.4s, v1.4h, #16 \n"
"fmla v22.4s, v26.4s, v4.s[2] \n"
"fmla v23.4s, v26.4s, v5.s[2] \n"
"shll v16.4s, v16.4h, #16 \n"
"fmla v20.4s, v27.4s, v4.s[3] \n"
"fmla v21.4s, v27.4s, v5.s[3] \n"
"shll v17.4s, v17.4h, #16 \n"
"fmla v22.4s, v16.4s, v0.s[0] \n"
"fmla v23.4s, v16.4s, v1.s[0] \n"
"prfm pldl1keep, [%6, #256] \n"
"ld1 {v24.4h, v25.4h, v26.4h, v27.4h}, [%6], #32 \n"
"fmla v20.4s, v17.4s, v0.s[1] \n"
"shll v18.4s, v18.4h, #16 \n"
"fmla v21.4s, v17.4s, v1.s[1] \n"
"shll v19.4s, v19.4h, #16 \n"
"fmla v22.4s, v18.4s, v0.s[2] \n"
"shll v24.4s, v24.4h, #16 \n"
"fmla v23.4s, v18.4s, v1.s[2] \n"
"shll v25.4s, v25.4h, #16 \n"
"fmla v20.4s, v19.4s, v0.s[3] \n"
"prfm pldl1keep, [%5, #256] \n"
"ld1 {v2.4h, v3.4h, v4.4h, v5.4h}, [%5] \n" // r42 r43 r44 r45
"shll v26.4s, v26.4h, #16 \n"
"fmla v21.4s, v19.4s, v1.s[3] \n"
"shll v27.4s, v27.4h, #16 \n"
"fmla v22.4s, v24.4s, v1.s[0] \n"
"shll v2.4s, v2.4h, #16 \n"
"fmla v23.4s, v24.4s, v2.s[0] \n"
"shll v3.4s, v3.4h, #16 \n"
"fmla v20.4s, v25.4s, v1.s[1] \n"
"prfm pldl1keep, [%6, #256] \n"
"ld1 {v16.4h, v17.4h, v18.4h, v19.4h}, [%6], #32 \n"
"fmla v21.4s, v25.4s, v2.s[1] \n"
"shll v4.4s, v4.4h, #16 \n"
"fmla v22.4s, v26.4s, v1.s[2] \n"
"shll v5.4s, v5.4h, #16 \n"
"fmla v23.4s, v26.4s, v2.s[2] \n"
"shll v16.4s, v16.4h, #16 \n"
"fmla v20.4s, v27.4s, v1.s[3] \n"
"fmla v21.4s, v27.4s, v2.s[3] \n"
"shll v17.4s, v17.4h, #16 \n"
"fmla v22.4s, v16.4s, v2.s[0] \n"
"fmla v23.4s, v16.4s, v3.s[0] \n"
"prfm pldl1keep, [%6, #256] \n"
"ld1 {v24.4h, v25.4h, v26.4h, v27.4h}, [%6], #32 \n"
"fmla v20.4s, v17.4s, v2.s[1] \n"
"shll v18.4s, v18.4h, #16 \n"
"fmla v21.4s, v17.4s, v3.s[1] \n"
"shll v19.4s, v19.4h, #16 \n"
"fmla v22.4s, v18.4s, v2.s[2] \n"
"fmla v23.4s, v18.4s, v3.s[2] \n"
"shll v24.4s, v24.4h, #16 \n"
"fmla v20.4s, v19.4s, v2.s[3] \n"
"fmla v21.4s, v19.4s, v3.s[3] \n"
"shll v25.4s, v25.4h, #16 \n"
"fmla v22.4s, v24.4s, v3.s[0] \n"
"fmla v23.4s, v24.4s, v4.s[0] \n"
// "prfm pldl1keep, [%6, #256] \n"
"ld1 {v16.4h, v17.4h, v18.4h, v19.4h}, [%6] \n"
"fmla v20.4s, v25.4s, v3.s[1] \n"
"shll v26.4s, v26.4h, #16 \n"
"fmla v21.4s, v25.4s, v4.s[1] \n"
"shll v27.4s, v27.4h, #16 \n"
"fmla v22.4s, v26.4s, v3.s[2] \n"
"fmla v23.4s, v26.4s, v4.s[2] \n"
"shll v16.4s, v16.4h, #16 \n"
"fmla v20.4s, v27.4s, v3.s[3] \n"
"fmla v21.4s, v27.4s, v4.s[3] \n"
"shll v17.4s, v17.4h, #16 \n"
"fmla v22.4s, v16.4s, v4.s[0] \n"
"fmla v23.4s, v16.4s, v5.s[0] \n"
"shll v18.4s, v18.4h, #16 \n"
"fmla v20.4s, v17.4s, v4.s[1] \n"
"fmla v21.4s, v17.4s, v5.s[1] \n"
"shll v19.4s, v19.4h, #16 \n"
"fmla v22.4s, v18.4s, v4.s[2] \n"
"fmla v23.4s, v18.4s, v5.s[2] \n"
"fmla v20.4s, v19.4s, v4.s[3] \n"
"fmla v21.4s, v19.4s, v5.s[3] \n"
"fadd v20.4s, v20.4s, v22.4s \n"
"fadd v21.4s, v21.4s, v23.4s \n"
"sub %6, %6, #768 \n" // kptr -= 24 * 16;
"st1 {v20.4s, v21.4s}, [%0], #32 \n"
: "=r"(outptr0), // %0
"=r"(r0), // %1
"=r"(r1), // %2
"=r"(r2), // %3
"=r"(r3), // %4
"=r"(r4), // %5
"=r"(kptr) // %6
: "0"(outptr0),
"1"(r0),
"2"(r1),
"3"(r2),
"4"(r3),
"5"(r4),
"6"(kptr)
: "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27");
#else // __aarch64__
asm volatile(
"pld [%1, #128] \n"
"vld1.u16 {d2-d3}, [%1 :64]! \n" // r00 r01
"pld [%6, #256] \n"
"vld1.u16 {d20-d23}, [%6 :128]! \n"
"vshll.u16 q0, d2, #16 \n"
"vshll.u16 q1, d3, #16 \n"
"pld [%1, #256] \n"
"vld1.u16 {d8-d11}, [%1 :64] \n" // r02 r03 r04 r05
"vshll.u16 q8, d20, #16 \n"
"pld [%0, #256] \n"
"vld1.f32 {d24-d27}, [%0 :128] \n" // sum0 sum1
"vmul.f32 q14, q8, d0[0] \n"
"vshll.u16 q9, d21, #16 \n"
"vmul.f32 q15, q8, d2[0] \n"
"vshll.u16 q10, d22, #16 \n"
"vmla.f32 q12, q9, d0[1] \n"
"vmla.f32 q13, q9, d2[1] \n"
"pld [%6, #256] \n"
"vld1.u16 {d16-d19}, [%6 :128]! \n"
"vmla.f32 q14, q10, d1[0] \n"
"vshll.u16 q11, d23, #16 \n"
"vmla.f32 q15, q10, d3[0] \n"
"vshll.u16 q10, d16, #16 \n"
"vmla.f32 q12, q11, d1[1] \n"
"vshll.u16 q2, d8, #16 \n"
"vmla.f32 q13, q11, d3[1] \n"
"vshll.u16 q11, d17, #16 \n"
"vmla.f32 q14, q10, d2[0] \n"
"vmla.f32 q15, q10, d4[0] \n"
"vshll.u16 q8, d18, #16 \n"
"vmla.f32 q12, q11, d2[1] \n"
"vmla.f32 q13, q11, d4[1] \n"
"pld [%6, #256] \n"
"vld1.u16 {d20-d23}, [%6 :128]! \n"
"vmla.f32 q14, q8, d3[0] \n"
"vshll.u16 q9, d19, #16 \n"
"vmla.f32 q15, q8, d5[0] \n"
"vshll.u16 q8, d20, #16 \n"
"vmla.f32 q12, q9, d3[1] \n"
"vshll.u16 q3, d9, #16 \n"
"vmla.f32 q13, q9, d5[1] \n"
"vshll.u16 q9, d21, #16 \n"
"vmla.f32 q14, q8, d4[0] \n"
"vmla.f32 q15, q8, d6[0] \n"
"vshll.u16 q10, d22, #16 \n"
"vmla.f32 q12, q9, d4[1] \n"
"vmla.f32 q13, q9, d6[1] \n"
"pld [%6, #256] \n"
"vld1.u16 {d16-d19}, [%6 :128]! \n"
"vmla.f32 q14, q10, d5[0] \n"
"vshll.u16 q11, d23, #16 \n"
"vmla.f32 q15, q10, d7[0] \n"
"vshll.u16 q10, d16, #16 \n"
"vmla.f32 q12, q11, d5[1] \n"
"vshll.u16 q4, d10, #16 \n"
"vmla.f32 q13, q11, d7[1] \n"
"pld [%2, #128] \n"
"vld1.u16 {d2-d3}, [%2 :64]! \n" // r10 r11
"vmla.f32 q14, q10, d6[0] \n"
"vshll.u16 q11, d17, #16 \n"
"vmla.f32 q15, q10, d8[0] \n"
"vshll.u16 q8, d18, #16 \n"
"vmla.f32 q12, q11, d6[1] \n"
"vmla.f32 q13, q11, d8[1] \n"
"pld [%6, #256] \n"
"vld1.u16 {d20-d23}, [%6 :128]! \n"
"vmla.f32 q14, q8, d7[0] \n"
"vshll.u16 q9, d19, #16 \n"
"vmla.f32 q15, q8, d9[0] \n"
"vshll.u16 q8, d20, #16 \n"
"vmla.f32 q12, q9, d7[1] \n"
"vshll.u16 q5, d11, #16 \n"
"vmla.f32 q13, q9, d9[1] \n"
"vshll.u16 q9, d21, #16 \n"
"vmla.f32 q14, q8, d8[0] \n"
"vmla.f32 q15, q8, d10[0] \n"
"vshll.u16 q10, d22, #16 \n"
"vmla.f32 q12, q9, d8[1] \n"
"vshll.u16 q0, d2, #16 \n"
"vmla.f32 q13, q9, d10[1] \n"
"pld [%6, #256] \n"
"vld1.u16 {d16-d19}, [%6 :128]! \n"
"vmla.f32 q14, q10, d9[0] \n"
"vshll.u16 q11, d23, #16 \n"
"vmla.f32 q15, q10, d11[0] \n"
"vshll.u16 q10, d16, #16 \n"
"vmla.f32 q12, q11, d9[1] \n"
"vshll.u16 q1, d3, #16 \n"
"vmla.f32 q13, q11, d11[1] \n"
"pld [%2, #256] \n"
"vld1.u16 {d8-d11}, [%2 :64] \n" // r12 r13 r14 r15
"vmla.f32 q14, q10, d0[0] \n"
"vshll.u16 q11, d17, #16 \n"
"vmla.f32 q15, q10, d2[0] \n"
"vshll.u16 q8, d18, #16 \n"
"vmla.f32 q12, q11, d0[1] \n"
"vmla.f32 q13, q11, d2[1] \n"
"pld [%6, #256] \n"
"vld1.u16 {d20-d23}, [%6 :128]! \n"
"vmla.f32 q14, q8, d1[0] \n"
"vshll.u16 q9, d19, #16 \n"
"vmla.f32 q15, q8, d3[0] \n"
"vshll.u16 q8, d20, #16 \n"
"vmla.f32 q12, q9, d1[1] \n"
"vshll.u16 q2, d8, #16 \n"
"vmla.f32 q13, q9, d3[1] \n"
"vshll.u16 q9, d21, #16 \n"
"vmla.f32 q14, q8, d2[0] \n"
"vmla.f32 q15, q8, d4[0] \n"
"vshll.u16 q10, d22, #16 \n"
"vmla.f32 q12, q9, d2[1] \n"
"vmla.f32 q13, q9, d4[1] \n"
"pld [%6, #256] \n"
"vld1.u16 {d16-d19}, [%6 :128]! \n"
"vmla.f32 q14, q10, d3[0] \n"
"vshll.u16 q11, d23, #16 \n"
"vmla.f32 q15, q10, d5[0] \n"
"vshll.u16 q10, d16, #16 \n"
"vmla.f32 q12, q11, d3[1] \n"
"vshll.u16 q3, d9, #16 \n"
"vmla.f32 q13, q11, d5[1] \n"
"vshll.u16 q11, d17, #16 \n"
"vmla.f32 q14, q10, d4[0] \n"
"vmla.f32 q15, q10, d6[0] \n"
"vshll.u16 q8, d18, #16 \n"
"vmla.f32 q12, q11, d4[1] \n"
"vmla.f32 q13, q11, d6[1] \n"
"pld [%6, #256] \n"
"vld1.u16 {d20-d23}, [%6 :128]! \n"
"vmla.f32 q14, q8, d5[0] \n"
"vshll.u16 q9, d19, #16 \n"
"vmla.f32 q15, q8, d7[0] \n"
"vshll.u16 q8, d20, #16 \n"
"vmla.f32 q12, q9, d5[1] \n"
"vshll.u16 q4, d10, #16 \n"
"vmla.f32 q13, q9, d7[1] \n"
"pld [%3, #128] \n"
"vld1.u16 {d2-d3}, [%3 :64]! \n" // r20 r21
"vmla.f32 q14, q8, d6[0] \n"
"vshll.u16 q9, d21, #16 \n"
"vmla.f32 q15, q8, d8[0] \n"
"vshll.u16 q10, d22, #16 \n"
"vmla.f32 q12, q9, d6[1] \n"
"vmla.f32 q13, q9, d8[1] \n"
"pld [%6, #256] \n"
"vld1.u16 {d16-d19}, [%6 :128]! \n"
"vmla.f32 q14, q10, d7[0] \n"
"vshll.u16 q11, d23, #16 \n"
"vmla.f32 q15, q10, d9[0] \n"
"vshll.u16 q10, d16, #16 \n"
"vmla.f32 q12, q11, d7[1] \n"
"vshll.u16 q5, d11, #16 \n"
"vmla.f32 q13, q11, d9[1] \n"
"vshll.u16 q11, d17, #16 \n"
"vmla.f32 q14, q10, d8[0] \n"
"vmla.f32 q15, q10, d10[0] \n"
"vshll.u16 q8, d18, #16 \n"
"vmla.f32 q12, q11, d8[1] \n"
"vshll.u16 q0, d2, #16 \n"
"vmla.f32 q13, q11, d10[1] \n"
"pld [%6, #256] \n"
"vld1.u16 {d20-d23}, [%6 :128]! \n"
"vmla.f32 q14, q8, d9[0] \n"
"vshll.u16 q9, d19, #16 \n"
"vmla.f32 q15, q8, d11[0] \n"
"vshll.u16 q8, d20, #16 \n"
"vmla.f32 q12, q9, d9[1] \n"
"vshll.u16 q1, d3, #16 \n"
"vmla.f32 q13, q9, d11[1] \n"
"pld [%3, #256] \n"
"vld1.u16 {d8-d11}, [%3 :64] \n" // r22 r23 r24 r25
"vmla.f32 q14, q8, d0[0] \n"
"vshll.u16 q9, d21, #16 \n"
"vmla.f32 q15, q8, d2[0] \n"
"vshll.u16 q10, d22, #16 \n"
"vmla.f32 q12, q9, d0[1] \n"
"vmla.f32 q13, q9, d2[1] \n"
"pld [%6, #256] \n"
"vld1.u16 {d16-d19}, [%6 :128]! \n"
"vmla.f32 q14, q10, d1[0] \n"
"vshll.u16 q11, d23, #16 \n"
"vmla.f32 q15, q10, d3[0] \n"
"vshll.u16 q10, d16, #16 \n"
"vmla.f32 q12, q11, d1[1] \n"
"vshll.u16 q2, d8, #16 \n"
"vmla.f32 q13, q11, d3[1] \n"
"vshll.u16 q11, d17, #16 \n"
"vmla.f32 q14, q10, d2[0] \n"
"vmla.f32 q15, q10, d4[0] \n"
"vshll.u16 q8, d18, #16 \n"
"vmla.f32 q12, q11, d2[1] \n"
"vmla.f32 q13, q11, d4[1] \n"
"pld [%6, #256] \n"
"vld1.u16 {d20-d23}, [%6 :128]! \n"
"vmla.f32 q14, q8, d3[0] \n"
"vshll.u16 q9, d19, #16 \n"
"vmla.f32 q15, q8, d5[0] \n"
"vshll.u16 q8, d20, #16 \n"
"vmla.f32 q12, q9, d3[1] \n"
"vshll.u16 q3, d9, #16 \n"
"vmla.f32 q13, q9, d5[1] \n"
"vshll.u16 q9, d21, #16 \n"
"vmla.f32 q14, q8, d4[0] \n"
"vmla.f32 q15, q8, d6[0] \n"
"vshll.u16 q10, d22, #16 \n"
"vmla.f32 q12, q9, d4[1] \n"
"vmla.f32 q13, q9, d6[1] \n"
"pld [%6, #256] \n"
"vld1.u16 {d16-d19}, [%6 :128]! \n"
"vmla.f32 q14, q10, d5[0] \n"
"vshll.u16 q11, d23, #16 \n"
"vmla.f32 q15, q10, d7[0] \n"
"vshll.u16 q10, d16, #16 \n"
"vmla.f32 q12, q11, d5[1] \n"
"vshll.u16 q4, d10, #16 \n"
"vmla.f32 q13, q11, d7[1] \n"
"pld [%4, #128] \n"
"vld1.u16 {d2-d3}, [%4 :64]! \n" // r30 r31
"vmla.f32 q14, q10, d6[0] \n"
"vshll.u16 q11, d17, #16 \n"
"vmla.f32 q15, q10, d8[0] \n"
"vshll.u16 q8, d18, #16 \n"
"vmla.f32 q12, q11, d6[1] \n"
"vmla.f32 q13, q11, d8[1] \n"
"pld [%6, #256] \n"
"vld1.u16 {d20-d23}, [%6 :128]! \n"
"vmla.f32 q14, q8, d7[0] \n"
"vshll.u16 q9, d19, #16 \n"
"vmla.f32 q15, q8, d9[0] \n"
"vshll.u16 q8, d20, #16 \n"
"vmla.f32 q12, q9, d7[1] \n"
"vshll.u16 q5, d11, #16 \n"
"vmla.f32 q13, q9, d9[1] \n"
"vshll.u16 q9, d21, #16 \n"
"vmla.f32 q14, q8, d8[0] \n"
"vmla.f32 q15, q8, d10[0] \n"
"vshll.u16 q10, d22, #16 \n"
"vmla.f32 q12, q9, d8[1] \n"
"vshll.u16 q0, d2, #16 \n"
"vmla.f32 q13, q9, d10[1] \n"
"pld [%6, #256] \n"
"vld1.u16 {d16-d19}, [%6 :128]! \n"
"vmla.f32 q14, q10, d9[0] \n"
"vshll.u16 q11, d23, #16 \n"
"vmla.f32 q15, q10, d11[0] \n"
"vshll.u16 q10, d16, #16 \n"
"vmla.f32 q12, q11, d9[1] \n"
"vshll.u16 q1, d3, #16 \n"
"vmla.f32 q13, q11, d11[1] \n"
"pld [%4, #256] \n"
"vld1.u16 {d8-d11}, [%4 :64] \n" // r32 r33 r34 r35
"vmla.f32 q14, q10, d0[0] \n"
"vshll.u16 q11, d17, #16 \n"
"vmla.f32 q15, q10, d2[0] \n"
"vshll.u16 q8, d18, #16 \n"
"vmla.f32 q12, q11, d0[1] \n"
"vmla.f32 q13, q11, d2[1] \n"
"pld [%6, #256] \n"
"vld1.u16 {d20-d23}, [%6 :128]! \n"
"vmla.f32 q14, q8, d1[0] \n"
"vshll.u16 q9, d19, #16 \n"
"vmla.f32 q15, q8, d3[0] \n"
"vshll.u16 q8, d20, #16 \n"
"vmla.f32 q12, q9, d1[1] \n"
"vshll.u16 q2, d8, #16 \n"
"vmla.f32 q13, q9, d3[1] \n"
"vshll.u16 q9, d21, #16 \n"
"vmla.f32 q14, q8, d2[0] \n"
"vmla.f32 q15, q8, d4[0] \n"
"vshll.u16 q10, d22, #16 \n"
"vmla.f32 q12, q9, d2[1] \n"
"vmla.f32 q13, q9, d4[1] \n"
"pld [%6, #256] \n"
"vld1.u16 {d16-d19}, [%6 :128]! \n"
"vmla.f32 q14, q10, d3[0] \n"
"vshll.u16 q11, d23, #16 \n"
"vmla.f32 q15, q10, d5[0] \n"
"vshll.u16 q10, d16, #16 \n"
"vmla.f32 q12, q11, d3[1] \n"
"vshll.u16 q3, d9, #16 \n"
"vmla.f32 q13, q11, d5[1] \n"
"vshll.u16 q11, d17, #16 \n"
"vmla.f32 q14, q10, d4[0] \n"
"vmla.f32 q15, q10, d6[0] \n"
"vshll.u16 q8, d18, #16 \n"
"vmla.f32 q12, q11, d4[1] \n"
"vmla.f32 q13, q11, d6[1] \n"
"pld [%6, #256] \n"
"vld1.u16 {d20-d23}, [%6 :128]! \n"
"vmla.f32 q14, q8, d5[0] \n"
"vshll.u16 q9, d19, #16 \n"
"vmla.f32 q15, q8, d7[0] \n"
"vshll.u16 q8, d20, #16 \n"
"vmla.f32 q12, q9, d5[1] \n"
"vshll.u16 q4, d10, #16 \n"
"vmla.f32 q13, q9, d7[1] \n"
"pld [%5, #128] \n"
"vld1.u16 {d2-d3}, [%5 :64]! \n" // r40 r41
"vmla.f32 q14, q8, d6[0] \n"
"vshll.u16 q9, d21, #16 \n"
"vmla.f32 q15, q8, d8[0] \n"
"vshll.u16 q10, d22, #16 \n"
"vmla.f32 q12, q9, d6[1] \n"
"vmla.f32 q13, q9, d8[1] \n"
"pld [%6, #256] \n"
"vld1.u16 {d16-d19}, [%6 :128]! \n"
"vmla.f32 q14, q10, d7[0] \n"
"vshll.u16 q11, d23, #16 \n"
"vmla.f32 q15, q10, d9[0] \n"
"vshll.u16 q10, d16, #16 \n"
"vmla.f32 q12, q11, d7[1] \n"
"vshll.u16 q5, d11, #16 \n"
"vmla.f32 q13, q11, d9[1] \n"
"vshll.u16 q11, d17, #16 \n"
"vmla.f32 q14, q10, d8[0] \n"
"vmla.f32 q15, q10, d10[0] \n"
"vshll.u16 q8, d18, #16 \n"
"vmla.f32 q12, q11, d8[1] \n"
"vshll.u16 q0, d2, #16 \n"
"vmla.f32 q13, q11, d10[1] \n"
"pld [%6, #256] \n"
"vld1.u16 {d20-d23}, [%6 :128]! \n"
"vmla.f32 q14, q8, d9[0] \n"
"vshll.u16 q9, d19, #16 \n"
"vmla.f32 q15, q8, d11[0] \n"
"vshll.u16 q8, d20, #16 \n"
"vmla.f32 q12, q9, d9[1] \n"
"vshll.u16 q1, d3, #16 \n"
"vmla.f32 q13, q9, d11[1] \n"
"pld [%5, #256] \n"
"vld1.u16 {d8-d11}, [%5 :64] \n" // r42 r43 r44 r45
"vmla.f32 q14, q8, d0[0] \n"
"vshll.u16 q9, d21, #16 \n"
"vmla.f32 q15, q8, d2[0] \n"
"vshll.u16 q10, d22, #16 \n"
"vmla.f32 q12, q9, d0[1] \n"
"vmla.f32 q13, q9, d2[1] \n"
"pld [%6, #256] \n"
"vld1.u16 {d16-d19}, [%6 :128]! \n"
"vmla.f32 q14, q10, d1[0] \n"
"vshll.u16 q11, d23, #16 \n"
"vmla.f32 q15, q10, d3[0] \n"
"vshll.u16 q10, d16, #16 \n"
"vmla.f32 q12, q11, d1[1] \n"
"vshll.u16 q2, d8, #16 \n"
"vmla.f32 q13, q11, d3[1] \n"
"vshll.u16 q11, d17, #16 \n"
"vmla.f32 q14, q10, d2[0] \n"
"vmla.f32 q15, q10, d4[0] \n"
"vshll.u16 q8, d18, #16 \n"
"vmla.f32 q12, q11, d2[1] \n"
"vmla.f32 q13, q11, d4[1] \n"
"pld [%6, #256] \n"
"vld1.u16 {d20-d23}, [%6 :128]! \n"
"vmla.f32 q14, q8, d3[0] \n"
"vshll.u16 q9, d19, #16 \n"
"vmla.f32 q15, q8, d5[0] \n"
"vshll.u16 q8, d20, #16 \n"
"vmla.f32 q12, q9, d3[1] \n"
"vshll.u16 q3, d9, #16 \n"
"vmla.f32 q13, q9, d5[1] \n"
"vshll.u16 q9, d21, #16 \n"
"vmla.f32 q14, q8, d4[0] \n"
"vmla.f32 q15, q8, d6[0] \n"
"vshll.u16 q10, d22, #16 \n"
"vmla.f32 q12, q9, d4[1] \n"
"vmla.f32 q13, q9, d6[1] \n"
"pld [%6, #256] \n"
"vld1.u16 {d16-d19}, [%6 :128]! \n"
"vmla.f32 q14, q10, d5[0] \n"
"vshll.u16 q11, d23, #16 \n"
"vmla.f32 q15, q10, d7[0] \n"
"vshll.u16 q10, d16, #16 \n"
"vmla.f32 q12, q11, d5[1] \n"
"vshll.u16 q4, d10, #16 \n"
"vmla.f32 q13, q11, d7[1] \n"
"vshll.u16 q11, d17, #16 \n"
"vmla.f32 q14, q10, d6[0] \n"
"vmla.f32 q15, q10, d8[0] \n"
"vshll.u16 q8, d18, #16 \n"
"vmla.f32 q12, q11, d6[1] \n"
"vmla.f32 q13, q11, d8[1] \n"
"pld [%6, #256] \n"
"vld1.u16 {d20-d23}, [%6 :128] \n"
"vmla.f32 q14, q8, d7[0] \n"
"vshll.u16 q9, d19, #16 \n"
"vmla.f32 q15, q8, d9[0] \n"
"vshll.u16 q8, d20, #16 \n"
"vmla.f32 q12, q9, d7[1] \n"
"vshll.u16 q5, d11, #16 \n"
"vmla.f32 q13, q9, d9[1] \n"
"vshll.u16 q9, d21, #16 \n"
"vmla.f32 q14, q8, d8[0] \n"
"vmla.f32 q15, q8, d10[0] \n"
"vshll.u16 q10, d22, #16 \n"
"vmla.f32 q12, q9, d8[1] \n"
"vmla.f32 q13, q9, d10[1] \n"
"vshll.u16 q11, d23, #16 \n"
"vmla.f32 q14, q10, d9[0] \n"
"vmla.f32 q15, q10, d11[0] \n"
"vmla.f32 q12, q11, d9[1] \n"
"vmla.f32 q13, q11, d11[1] \n"
"vadd.f32 q12, q12, q14 \n"
"vadd.f32 q13, q13, q15 \n"
"sub %6, %6, #768 \n" // kptr -= 24 * 16;
"vst1.f32 {d24-d27}, [%0 :128]! \n"
: "=r"(outptr0), // %0
"=r"(r0), // %1
"=r"(r1), // %2
"=r"(r2), // %3
"=r"(r3), // %4
"=r"(r4), // %5
"=r"(kptr) // %6
: "0"(outptr0),
"1"(r0),
"2"(r1),
"3"(r2),
"4"(r3),
"5"(r4),
"6"(kptr)
: "memory", "q0", "q1", "q2", "q3", "q4", "q5", "q6", "q7", "q8", "q9", "q10", "q11", "q12", "q13", "q14", "q15");
#endif // __aarch64__
}
for (; j < outw; j++)
{
#if __aarch64__
asm volatile(
"prfm pldl1keep, [%1, #64] \n"
"ld1 {v0.4h}, [%1], #8 \n" // r00
"prfm pldl1keep, [%6, #256] \n"
"ld1 {v16.4h, v17.4h, v18.4h, v19.4h}, [%6], #32 \n"
"shll v0.4s, v0.4h, #16 \n"
"prfm pldl1keep, [%1, #256] \n"
"ld1 {v1.4h, v2.4h, v3.4h, v4.4h}, [%1] \n" // r01 r02 r03 r04
"shll v16.4s, v16.4h, #16 \n"
"shll v17.4s, v17.4h, #16 \n"
"prfm pldl1keep, [%0, #128] \n"
"ld1 {v20.4s}, [%0] \n" // sum0
"shll v18.4s, v18.4h, #16 \n"
"shll v19.4s, v19.4h, #16 \n"
"fmul v21.4s, v16.4s, v0.s[0] \n"
"prfm pldl1keep, [%6, #256] \n"
"ld1 {v24.4h, v25.4h, v26.4h, v27.4h}, [%6], #32 \n"
"shll v1.4s, v1.4h, #16 \n"
"shll v2.4s, v2.4h, #16 \n"
"fmul v22.4s, v17.4s, v0.s[1] \n"
"shll v3.4s, v3.4h, #16 \n"
"shll v4.4s, v4.4h, #16 \n"
"fmul v23.4s, v18.4s, v0.s[2] \n"
"shll v24.4s, v24.4h, #16 \n"
"fmla v20.4s, v19.4s, v0.s[3] \n"
"shll v25.4s, v25.4h, #16 \n"
"fmla v21.4s, v24.4s, v1.s[0] \n"
"prfm pldl1keep, [%6, #256] \n"
"ld1 {v16.4h, v17.4h, v18.4h, v19.4h}, [%6], #32 \n"
"shll v26.4s, v26.4h, #16 \n"
"fmla v22.4s, v25.4s, v1.s[1] \n"
"shll v27.4s, v27.4h, #16 \n"
"fmla v23.4s, v26.4s, v1.s[2] \n"
"shll v16.4s, v16.4h, #16 \n"
"fmla v20.4s, v27.4s, v1.s[3] \n"
"shll v17.4s, v17.4h, #16 \n"
"fmla v21.4s, v16.4s, v2.s[0] \n"
"prfm pldl1keep, [%6, #256] \n"
"ld1 {v24.4h, v25.4h, v26.4h, v27.4h}, [%6], #32 \n"
"shll v18.4s, v18.4h, #16 \n"
"fmla v22.4s, v17.4s, v2.s[1] \n"
"shll v19.4s, v19.4h, #16 \n"
"fmla v23.4s, v18.4s, v2.s[2] \n"
"shll v24.4s, v24.4h, #16 \n"
"fmla v20.4s, v19.4s, v2.s[3] \n"
"shll v25.4s, v25.4h, #16 \n"
"fmla v21.4s, v24.4s, v3.s[0] \n"
"prfm pldl1keep, [%6, #256] \n"
"ld1 {v16.4h, v17.4h, v18.4h, v19.4h}, [%6], #32 \n"
"shll v26.4s, v26.4h, #16 \n"
"fmla v22.4s, v25.4s, v3.s[1] \n"
"shll v27.4s, v27.4h, #16 \n"
"fmla v23.4s, v26.4s, v3.s[2] \n"
"shll v16.4s, v16.4h, #16 \n"
"shll v17.4s, v17.4h, #16 \n"
"fmla v20.4s, v27.4s, v3.s[3] \n"
"prfm pldl1keep, [%2, #64] \n"
"ld1 {v0.4h}, [%2], #8 \n" // r10
"shll v18.4s, v18.4h, #16 \n"
"shll v19.4s, v19.4h, #16 \n"
"fmla v21.4s, v16.4s, v4.s[0] \n"
"prfm pldl1keep, [%6, #256] \n"
"ld1 {v24.4h, v25.4h, v26.4h, v27.4h}, [%6], #32 \n"
"fmla v22.4s, v17.4s, v4.s[1] \n"
"shll v0.4s, v0.4h, #16 \n"
"fmla v23.4s, v18.4s, v4.s[2] \n"
"shll v24.4s, v24.4h, #16 \n"
"shll v25.4s, v25.4h, #16 \n"
"fmla v20.4s, v19.4s, v4.s[3] \n"
"prfm pldl1keep, [%2, #256] \n"
"ld1 {v1.4h, v2.4h, v3.4h, v4.4h}, [%2] \n" // r11 r12 r13 r14
"shll v26.4s, v26.4h, #16 \n"
"shll v27.4s, v27.4h, #16 \n"
"fmla v21.4s, v24.4s, v0.s[0] \n"
"prfm pldl1keep, [%6, #256] \n"
"ld1 {v16.4h, v17.4h, v18.4h, v19.4h}, [%6], #32 \n"
"shll v1.4s, v1.4h, #16 \n"
"shll v2.4s, v2.4h, #16 \n"
"fmla v22.4s, v25.4s, v0.s[1] \n"
"shll v3.4s, v3.4h, #16 \n"
"shll v4.4s, v4.4h, #16 \n"
"fmla v23.4s, v26.4s, v0.s[2] \n"
"shll v16.4s, v16.4h, #16 \n"
"fmla v20.4s, v27.4s, v0.s[3] \n"
"shll v17.4s, v17.4h, #16 \n"
"fmla v21.4s, v16.4s, v1.s[0] \n"
"prfm pldl1keep, [%6, #256] \n"
"ld1 {v24.4h, v25.4h, v26.4h, v27.4h}, [%6], #32 \n"
"shll v18.4s, v18.4h, #16 \n"
"fmla v22.4s, v17.4s, v1.s[1] \n"
"shll v19.4s, v19.4h, #16 \n"
"fmla v23.4s, v18.4s, v1.s[2] \n"
"shll v24.4s, v24.4h, #16 \n"
"fmla v20.4s, v19.4s, v1.s[3] \n"
"shll v25.4s, v25.4h, #16 \n"
"fmla v21.4s, v24.4s, v2.s[0] \n"
"prfm pldl1keep, [%6, #256] \n"
"ld1 {v16.4h, v17.4h, v18.4h, v19.4h}, [%6], #32 \n"
"shll v26.4s, v26.4h, #16 \n"
"fmla v22.4s, v25.4s, v2.s[1] \n"
"shll v27.4s, v27.4h, #16 \n"
"fmla v23.4s, v26.4s, v2.s[2] \n"
"shll v16.4s, v16.4h, #16 \n"
"fmla v20.4s, v27.4s, v2.s[3] \n"
"shll v17.4s, v17.4h, #16 \n"
"fmla v21.4s, v16.4s, v3.s[0] \n"
"prfm pldl1keep, [%6, #256] \n"
"ld1 {v24.4h, v25.4h, v26.4h, v27.4h}, [%6], #32 \n"
"shll v18.4s, v18.4h, #16 \n"
"fmla v22.4s, v17.4s, v3.s[1] \n"
"shll v19.4s, v19.4h, #16 \n"
"fmla v23.4s, v18.4s, v3.s[2] \n"
"shll v24.4s, v24.4h, #16 \n"
"shll v25.4s, v25.4h, #16 \n"
"fmla v20.4s, v19.4s, v3.s[3] \n"
"prfm pldl1keep, [%3, #64] \n"
"ld1 {v0.4h}, [%3], #8 \n" // r20
"shll v26.4s, v26.4h, #16 \n"
"shll v27.4s, v27.4h, #16 \n"
"fmla v21.4s, v24.4s, v4.s[0] \n"
"prfm pldl1keep, [%6, #256] \n"
"ld1 {v16.4h, v17.4h, v18.4h, v19.4h}, [%6], #32 \n"
"fmla v22.4s, v25.4s, v4.s[1] \n"
"shll v0.4s, v0.4h, #16 \n"
"fmla v23.4s, v26.4s, v4.s[2] \n"
"shll v16.4s, v16.4h, #16 \n"
"shll v17.4s, v17.4h, #16 \n"
"fmla v20.4s, v27.4s, v4.s[3] \n"
"prfm pldl1keep, [%3, #256] \n"
"ld1 {v1.4h, v2.4h, v3.4h, v4.4h}, [%3] \n" // r21 r22 r23 r24
"shll v18.4s, v18.4h, #16 \n"
"shll v19.4s, v19.4h, #16 \n"
"fmla v21.4s, v16.4s, v0.s[0] \n"
"prfm pldl1keep, [%6, #256] \n"
"ld1 {v24.4h, v25.4h, v26.4h, v27.4h}, [%6], #32 \n"
"shll v1.4s, v1.4h, #16 \n"
"shll v2.4s, v2.4h, #16 \n"
"fmla v22.4s, v17.4s, v0.s[1] \n"
"shll v3.4s, v3.4h, #16 \n"
"shll v4.4s, v4.4h, #16 \n"
"fmla v23.4s, v18.4s, v0.s[2] \n"
"shll v24.4s, v24.4h, #16 \n"
"fmla v20.4s, v19.4s, v0.s[3] \n"
"shll v25.4s, v25.4h, #16 \n"
"fmla v21.4s, v24.4s, v1.s[0] \n"
"prfm pldl1keep, [%6, #256] \n"
"ld1 {v16.4h, v17.4h, v18.4h, v19.4h}, [%6], #32 \n"
"shll v26.4s, v26.4h, #16 \n"
"fmla v22.4s, v25.4s, v1.s[1] \n"
"shll v27.4s, v27.4h, #16 \n"
"fmla v23.4s, v26.4s, v1.s[2] \n"
"shll v16.4s, v16.4h, #16 \n"
"fmla v20.4s, v27.4s, v1.s[3] \n"
"prfm pldl1keep, [%6, #256] \n"
"ld1 {v24.4h, v25.4h, v26.4h, v27.4h}, [%6], #32 \n"
"shll v17.4s, v17.4h, #16 \n"
"fmla v21.4s, v16.4s, v2.s[0] \n"
"shll v18.4s, v18.4h, #16 \n"
"fmla v22.4s, v17.4s, v2.s[1] \n"
"shll v19.4s, v19.4h, #16 \n"
"fmla v23.4s, v18.4s, v2.s[2] \n"
"shll v24.4s, v24.4h, #16 \n"
"fmla v20.4s, v19.4s, v2.s[3] \n"
"shll v25.4s, v25.4h, #16 \n"
"fmla v21.4s, v24.4s, v3.s[0] \n"
"prfm pldl1keep, [%6, #256] \n"
"ld1 {v16.4h, v17.4h, v18.4h, v19.4h}, [%6], #32 \n"
"shll v26.4s, v26.4h, #16 \n"
"fmla v22.4s, v25.4s, v3.s[1] \n"
"shll v27.4s, v27.4h, #16 \n"
"fmla v23.4s, v26.4s, v3.s[2] \n"
"shll v16.4s, v16.4h, #16 \n"
"shll v17.4s, v17.4h, #16 \n"
"fmla v20.4s, v27.4s, v3.s[3] \n"
"prfm pldl1keep, [%4, #64] \n"
"ld1 {v0.4h}, [%4], #8 \n" // r30
"shll v18.4s, v18.4h, #16 \n"
"shll v19.4s, v19.4h, #16 \n"
"fmla v21.4s, v16.4s, v4.s[0] \n"
"prfm pldl1keep, [%6, #256] \n"
"ld1 {v24.4h, v25.4h, v26.4h, v27.4h}, [%6], #32 \n"
"fmla v22.4s, v17.4s, v4.s[1] \n"
"shll v0.4s, v0.4h, #16 \n"
"fmla v23.4s, v18.4s, v4.s[2] \n"
"shll v24.4s, v24.4h, #16 \n"
"shll v25.4s, v25.4h, #16 \n"
"fmla v20.4s, v19.4s, v4.s[3] \n"
"prfm pldl1keep, [%4, #256] \n"
"ld1 {v1.4h, v2.4h, v3.4h, v4.4h}, [%4] \n" // r31 r32 r33 r34
"shll v26.4s, v26.4h, #16 \n"
"shll v27.4s, v27.4h, #16 \n"
"fmla v21.4s, v24.4s, v0.s[0] \n"
"prfm pldl1keep, [%6, #256] \n"
"ld1 {v16.4h, v17.4h, v18.4h, v19.4h}, [%6], #32 \n"
"shll v1.4s, v1.4h, #16 \n"
"shll v2.4s, v2.4h, #16 \n"
"fmla v22.4s, v25.4s, v0.s[1] \n"
"shll v3.4s, v3.4h, #16 \n"
"shll v4.4s, v4.4h, #16 \n"
"fmla v23.4s, v26.4s, v0.s[2] \n"
"shll v16.4s, v16.4h, #16 \n"
"fmla v20.4s, v27.4s, v0.s[3] \n"
"shll v17.4s, v17.4h, #16 \n"
"fmla v21.4s, v16.4s, v1.s[0] \n"
"prfm pldl1keep, [%6, #256] \n"
"ld1 {v24.4h, v25.4h, v26.4h, v27.4h}, [%6], #32 \n"
"shll v18.4s, v18.4h, #16 \n"
"fmla v22.4s, v17.4s, v1.s[1] \n"
"shll v19.4s, v19.4h, #16 \n"
"fmla v23.4s, v18.4s, v1.s[2] \n"
"shll v24.4s, v24.4h, #16 \n"
"fmla v20.4s, v19.4s, v1.s[3] \n"
"shll v25.4s, v25.4h, #16 \n"
"fmla v21.4s, v24.4s, v2.s[0] \n"
"shll v26.4s, v26.4h, #16 \n"
"fmla v22.4s, v25.4s, v2.s[1] \n"
"prfm pldl1keep, [%6, #256] \n"
"ld1 {v16.4h, v17.4h, v18.4h, v19.4h}, [%6], #32 \n"
"shll v27.4s, v27.4h, #16 \n"
"fmla v23.4s, v26.4s, v2.s[2] \n"
"shll v16.4s, v16.4h, #16 \n"
"fmla v20.4s, v27.4s, v2.s[3] \n"
"shll v17.4s, v17.4h, #16 \n"
"fmla v21.4s, v16.4s, v3.s[0] \n"
"prfm pldl1keep, [%6, #256] \n"
"ld1 {v24.4h, v25.4h, v26.4h, v27.4h}, [%6], #32 \n"
"shll v18.4s, v18.4h, #16 \n"
"fmla v22.4s, v17.4s, v3.s[1] \n"
"shll v19.4s, v19.4h, #16 \n"
"fmla v23.4s, v18.4s, v3.s[2] \n"
"shll v24.4s, v24.4h, #16 \n"
"shll v25.4s, v25.4h, #16 \n"
"fmla v20.4s, v19.4s, v3.s[3] \n"
"prfm pldl1keep, [%5, #64] \n"
"ld1 {v0.4h}, [%5], #8 \n" // r40
"shll v26.4s, v26.4h, #16 \n"
"shll v27.4s, v27.4h, #16 \n"
"fmla v21.4s, v24.4s, v4.s[0] \n"
"prfm pldl1keep, [%6, #256] \n"
"ld1 {v16.4h, v17.4h, v18.4h, v19.4h}, [%6], #32 \n"
"fmla v22.4s, v25.4s, v4.s[1] \n"
"shll v0.4s, v0.4h, #16 \n"
"fmla v23.4s, v26.4s, v4.s[2] \n"
"shll v16.4s, v16.4h, #16 \n"
"shll v17.4s, v17.4h, #16 \n"
"fmla v20.4s, v27.4s, v4.s[3] \n"
"prfm pldl1keep, [%5, #256] \n"
"ld1 {v1.4h, v2.4h, v3.4h, v4.4h}, [%5] \n" // r41 r42 r43 r44
"shll v18.4s, v18.4h, #16 \n"
"shll v19.4s, v19.4h, #16 \n"
"fmla v21.4s, v16.4s, v0.s[0] \n"
"prfm pldl1keep, [%6, #256] \n"
"ld1 {v24.4h, v25.4h, v26.4h, v27.4h}, [%6], #32 \n"
"shll v1.4s, v1.4h, #16 \n"
"shll v2.4s, v2.4h, #16 \n"
"fmla v22.4s, v17.4s, v0.s[1] \n"
"shll v3.4s, v3.4h, #16 \n"
"shll v4.4s, v4.4h, #16 \n"
"fmla v23.4s, v18.4s, v0.s[2] \n"
"shll v24.4s, v24.4h, #16 \n"
"fmla v20.4s, v19.4s, v0.s[3] \n"
"shll v25.4s, v25.4h, #16 \n"
"fmla v21.4s, v24.4s, v1.s[0] \n"
"prfm pldl1keep, [%6, #256] \n"
"ld1 {v16.4h, v17.4h, v18.4h, v19.4h}, [%6], #32 \n"
"shll v26.4s, v26.4h, #16 \n"
"fmla v22.4s, v25.4s, v1.s[1] \n"
"shll v27.4s, v27.4h, #16 \n"
"fmla v23.4s, v26.4s, v1.s[2] \n"
"shll v16.4s, v16.4h, #16 \n"
"fmla v20.4s, v27.4s, v1.s[3] \n"
"shll v17.4s, v17.4h, #16 \n"
"fmla v21.4s, v16.4s, v2.s[0] \n"
"prfm pldl1keep, [%6, #256] \n"
"ld1 {v24.4h, v25.4h, v26.4h, v27.4h}, [%6], #32 \n"
"shll v18.4s, v18.4h, #16 \n"
"fmla v22.4s, v17.4s, v2.s[1] \n"
"shll v19.4s, v19.4h, #16 \n"
"fmla v23.4s, v18.4s, v2.s[2] \n"
"shll v24.4s, v24.4h, #16 \n"
"fmla v20.4s, v19.4s, v2.s[3] \n"
"shll v25.4s, v25.4h, #16 \n"
"fmla v21.4s, v24.4s, v3.s[0] \n"
// "prfm pldl1keep, [%6, #256] \n"
"ld1 {v16.4h, v17.4h, v18.4h, v19.4h}, [%6] \n"
"shll v26.4s, v26.4h, #16 \n"
"fmla v22.4s, v25.4s, v3.s[1] \n"
"shll v27.4s, v27.4h, #16 \n"
"fmla v23.4s, v26.4s, v3.s[2] \n"
"shll v16.4s, v16.4h, #16 \n"
"fmla v20.4s, v27.4s, v3.s[3] \n"
"shll v17.4s, v17.4h, #16 \n"
"fmla v21.4s, v16.4s, v4.s[0] \n"
"shll v18.4s, v18.4h, #16 \n"
"fmla v22.4s, v17.4s, v4.s[1] \n"
"shll v19.4s, v19.4h, #16 \n"
"fmla v23.4s, v18.4s, v4.s[2] \n"
"fmla v20.4s, v19.4s, v4.s[3] \n"
"fadd v22.4s, v21.4s, v22.4s \n"
"fadd v23.4s, v22.4s, v23.4s \n"
"fadd v20.4s, v20.4s, v23.4s \n"
"sub %6, %6, #768 \n" // kptr -= 24 * 16;
"st1 {v20.4s}, [%0], #16 \n"
: "=r"(outptr0), // %0
"=r"(r0), // %1
"=r"(r1), // %2
"=r"(r2), // %3
"=r"(r3), // %4
"=r"(r4), // %5
"=r"(kptr) // %6
: "0"(outptr0),
"1"(r0),
"2"(r1),
"3"(r2),
"4"(r3),
"5"(r4),
"6"(kptr)
: "memory", "v0", "v1", "v2", "v3", "v4", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27");
#else // __aarch64__
asm volatile(
"pld [%1, #64] \n"
"vld1.u16 {d1}, [%1 :64]! \n" // r00
"pld [%6, #256] \n"
"vld1.u16 {d20-d23}, [%6 :128]! \n"
"vshll.u16 q0, d1, #16 \n"
"vshll.u16 q8, d20, #16 \n"
"vshll.u16 q9, d21, #16 \n"
"pld [%0, #128] \n"
"vld1.f32 {d24-d25}, [%0 :128] \n" // sum0
"vmul.f32 q13, q8, d0[0] \n"
"vshll.u16 q10, d22, #16 \n"
"vmul.f32 q14, q9, d0[1] \n"
"pld [%6, #256] \n"
"vld1.u16 {d16-d19}, [%6 :128]! \n"
"vshll.u16 q11, d23, #16 \n"
"vmul.f32 q15, q10, d1[0] \n"
"vshll.u16 q10, d16, #16 \n"
"vmla.f32 q12, q11, d1[1] \n"
"pld [%1, #256] \n"
"vld1.u16 {d6-d9}, [%1 :64] \n" // r01 r02 r03 r04
"vshll.u16 q11, d17, #16 \n"
"vshll.u16 q1, d6, #16 \n"
"vshll.u16 q2, d7, #16 \n"
"vshll.u16 q3, d8, #16 \n"
"vshll.u16 q4, d9, #16 \n"
"vmla.f32 q13, q10, d2[0] \n"
"vshll.u16 q8, d18, #16 \n"
"vmla.f32 q14, q11, d2[1] \n"
"pld [%6, #256] \n"
"vld1.u16 {d20-d23}, [%6 :128]! \n"
"vshll.u16 q9, d19, #16 \n"
"vmla.f32 q15, q8, d3[0] \n"
"vshll.u16 q8, d20, #16 \n"
"vmla.f32 q12, q9, d3[1] \n"
"vshll.u16 q9, d21, #16 \n"
"vmla.f32 q13, q8, d4[0] \n"
"vshll.u16 q10, d22, #16 \n"
"vmla.f32 q14, q9, d4[1] \n"
"pld [%6, #256] \n"
"vld1.u16 {d16-d19}, [%6 :128]! \n"
"vshll.u16 q11, d23, #16 \n"
"vmla.f32 q15, q10, d5[0] \n"
"vshll.u16 q10, d16, #16 \n"
"vmla.f32 q12, q11, d5[1] \n"
"vshll.u16 q11, d17, #16 \n"
"vmla.f32 q13, q10, d6[0] \n"
"vshll.u16 q8, d18, #16 \n"
"vmla.f32 q14, q11, d6[1] \n"
"pld [%6, #256] \n"
"vld1.u16 {d20-d23}, [%6 :128]! \n"
"vshll.u16 q9, d19, #16 \n"
"vmla.f32 q15, q8, d7[0] \n"
"vshll.u16 q8, d20, #16 \n"
"vmla.f32 q12, q9, d7[1] \n"
"pld [%2, #64] \n"
"vld1.u16 {d1}, [%2 :64]! \n" // r10
"vshll.u16 q9, d21, #16 \n"
"vshll.u16 q0, d1, #16 \n"
"vmla.f32 q13, q8, d8[0] \n"
"vshll.u16 q10, d22, #16 \n"
"vmla.f32 q14, q9, d8[1] \n"
"pld [%6, #256] \n"
"vld1.u16 {d16-d19}, [%6 :128]! \n"
"vshll.u16 q11, d23, #16 \n"
"vmla.f32 q15, q10, d9[0] \n"
"vshll.u16 q10, d16, #16 \n"
"vmla.f32 q12, q11, d9[1] \n"
"vshll.u16 q11, d17, #16 \n"
"vmla.f32 q13, q10, d0[0] \n"
"vshll.u16 q8, d18, #16 \n"
"vmla.f32 q14, q11, d0[1] \n"
"pld [%6, #256] \n"
"vld1.u16 {d20-d23}, [%6 :128]! \n"
"vshll.u16 q9, d19, #16 \n"
"vmla.f32 q15, q8, d1[0] \n"
"vshll.u16 q8, d20, #16 \n"
"vmla.f32 q12, q9, d1[1] \n"
"pld [%2, #256] \n"
"vld1.u16 {d6-d9}, [%2 :64] \n" // r11 r12 r13 r14
"vshll.u16 q9, d21, #16 \n"
"vshll.u16 q1, d6, #16 \n"
"vshll.u16 q2, d7, #16 \n"
"vshll.u16 q3, d8, #16 \n"
"vshll.u16 q4, d9, #16 \n"
"vmla.f32 q13, q8, d2[0] \n"
"vshll.u16 q10, d22, #16 \n"
"vmla.f32 q14, q9, d2[1] \n"
"pld [%6, #256] \n"
"vld1.u16 {d16-d19}, [%6 :128]! \n"
"vshll.u16 q11, d23, #16 \n"
"vmla.f32 q15, q10, d3[0] \n"
"vshll.u16 q10, d16, #16 \n"
"vmla.f32 q12, q11, d3[1] \n"
"vshll.u16 q11, d17, #16 \n"
"vmla.f32 q13, q10, d4[0] \n"
"vshll.u16 q8, d18, #16 \n"
"vmla.f32 q14, q11, d4[1] \n"
"pld [%6, #256] \n"
"vld1.u16 {d20-d23}, [%6 :128]! \n"
"vshll.u16 q9, d19, #16 \n"
"vmla.f32 q15, q8, d5[0] \n"
"vshll.u16 q8, d20, #16 \n"
"vmla.f32 q12, q9, d5[1] \n"
"vshll.u16 q9, d21, #16 \n"
"vmla.f32 q13, q8, d6[0] \n"
"vshll.u16 q10, d22, #16 \n"
"vmla.f32 q14, q9, d6[1] \n"
"pld [%6, #256] \n"
"vld1.u16 {d16-d19}, [%6 :128]! \n"
"vshll.u16 q11, d23, #16 \n"
"vmla.f32 q15, q10, d7[0] \n"
"vshll.u16 q10, d16, #16 \n"
"vmla.f32 q12, q11, d7[1] \n"
"pld [%3, #64] \n"
"vld1.u16 {d1}, [%3 :64]! \n" // r20
"vshll.u16 q11, d17, #16 \n"
"vshll.u16 q0, d1, #16 \n"
"vmla.f32 q13, q10, d8[0] \n"
"vshll.u16 q8, d18, #16 \n"
"vmla.f32 q14, q11, d8[1] \n"
"pld [%6, #256] \n"
"vld1.u16 {d20-d23}, [%6 :128]! \n"
"vshll.u16 q9, d19, #16 \n"
"vmla.f32 q15, q8, d9[0] \n"
"vshll.u16 q8, d20, #16 \n"
"vmla.f32 q12, q9, d9[1] \n"
"vshll.u16 q9, d21, #16 \n"
"vmla.f32 q13, q8, d0[0] \n"
"vshll.u16 q10, d22, #16 \n"
"vmla.f32 q14, q9, d0[1] \n"
"pld [%6, #256] \n"
"vld1.u16 {d16-d19}, [%6 :128]! \n"
"vshll.u16 q11, d23, #16 \n"
"vmla.f32 q15, q10, d1[0] \n"
"vshll.u16 q10, d16, #16 \n"
"vmla.f32 q12, q11, d1[1] \n"
"pld [%3, #256] \n"
"vld1.u16 {d6-d9}, [%3 :64] \n" // r21 r22 r23 r24
"vshll.u16 q11, d17, #16 \n"
"vshll.u16 q1, d6, #16 \n"
"vshll.u16 q2, d7, #16 \n"
"vshll.u16 q3, d8, #16 \n"
"vshll.u16 q4, d9, #16 \n"
"vmla.f32 q13, q10, d2[0] \n"
"vshll.u16 q8, d18, #16 \n"
"vmla.f32 q14, q11, d2[1] \n"
"pld [%6, #256] \n"
"vld1.u16 {d20-d23}, [%6 :128]! \n"
"vshll.u16 q9, d19, #16 \n"
"vmla.f32 q15, q8, d3[0] \n"
"vshll.u16 q8, d20, #16 \n"
"vmla.f32 q12, q9, d3[1] \n"
"vshll.u16 q9, d21, #16 \n"
"vmla.f32 q13, q8, d4[0] \n"
"vshll.u16 q10, d22, #16 \n"
"vmla.f32 q14, q9, d4[1] \n"
"pld [%6, #256] \n"
"vld1.u16 {d16-d19}, [%6 :128]! \n"
"vshll.u16 q11, d23, #16 \n"
"vmla.f32 q15, q10, d5[0] \n"
"vshll.u16 q10, d16, #16 \n"
"vmla.f32 q12, q11, d5[1] \n"
"vshll.u16 q11, d17, #16 \n"
"vmla.f32 q13, q10, d6[0] \n"
"vshll.u16 q8, d18, #16 \n"
"vmla.f32 q14, q11, d6[1] \n"
"pld [%6, #256] \n"
"vld1.u16 {d20-d23}, [%6 :128]! \n"
"vshll.u16 q9, d19, #16 \n"
"vmla.f32 q15, q8, d7[0] \n"
"vshll.u16 q8, d20, #16 \n"
"vmla.f32 q12, q9, d7[1] \n"
"pld [%4, #64] \n"
"vld1.u16 {d1}, [%4 :64]! \n" // r30
"vshll.u16 q9, d21, #16 \n"
"vshll.u16 q0, d1, #16 \n"
"vmla.f32 q13, q8, d8[0] \n"
"vshll.u16 q10, d22, #16 \n"
"vmla.f32 q14, q9, d8[1] \n"
"pld [%6, #256] \n"
"vld1.u16 {d16-d19}, [%6 :128]! \n"
"vshll.u16 q11, d23, #16 \n"
"vmla.f32 q15, q10, d9[0] \n"
"vshll.u16 q10, d16, #16 \n"
"vmla.f32 q12, q11, d9[1] \n"
"vshll.u16 q11, d17, #16 \n"
"vmla.f32 q13, q10, d0[0] \n"
"vshll.u16 q8, d18, #16 \n"
"vmla.f32 q14, q11, d0[1] \n"
"pld [%6, #256] \n"
"vld1.u16 {d20-d23}, [%6 :128]! \n"
"vshll.u16 q9, d19, #16 \n"
"vmla.f32 q15, q8, d1[0] \n"
"vshll.u16 q8, d20, #16 \n"
"vmla.f32 q12, q9, d1[1] \n"
"pld [%4, #256] \n"
"vld1.u16 {d6-d9}, [%4 :64] \n" // r31 r32 r33 r34
"vshll.u16 q9, d21, #16 \n"
"vshll.u16 q1, d6, #16 \n"
"vshll.u16 q2, d7, #16 \n"
"vshll.u16 q3, d8, #16 \n"
"vshll.u16 q4, d9, #16 \n"
"vmla.f32 q13, q8, d2[0] \n"
"vshll.u16 q10, d22, #16 \n"
"vmla.f32 q14, q9, d2[1] \n"
"pld [%6, #256] \n"
"vld1.u16 {d16-d19}, [%6 :128]! \n"
"vshll.u16 q11, d23, #16 \n"
"vmla.f32 q15, q10, d3[0] \n"
"vshll.u16 q10, d16, #16 \n"
"vmla.f32 q12, q11, d3[1] \n"
"vshll.u16 q11, d17, #16 \n"
"vmla.f32 q13, q10, d4[0] \n"
"vshll.u16 q8, d18, #16 \n"
"vmla.f32 q14, q11, d4[1] \n"
"pld [%6, #256] \n"
"vld1.u16 {d20-d23}, [%6 :128]! \n"
"vshll.u16 q9, d19, #16 \n"
"vmla.f32 q15, q8, d5[0] \n"
"vshll.u16 q8, d20, #16 \n"
"vmla.f32 q12, q9, d5[1] \n"
"vshll.u16 q9, d21, #16 \n"
"vmla.f32 q13, q8, d6[0] \n"
"vshll.u16 q10, d22, #16 \n"
"vmla.f32 q14, q9, d6[1] \n"
"pld [%6, #256] \n"
"vld1.u16 {d16-d19}, [%6 :128]! \n"
"vshll.u16 q11, d23, #16 \n"
"vmla.f32 q15, q10, d7[0] \n"
"vshll.u16 q10, d16, #16 \n"
"vmla.f32 q12, q11, d7[1] \n"
"pld [%5, #64] \n"
"vld1.u16 {d1}, [%5 :64]! \n" // r40
"vshll.u16 q11, d17, #16 \n"
"vshll.u16 q0, d1, #16 \n"
"vmla.f32 q13, q10, d8[0] \n"
"vshll.u16 q8, d18, #16 \n"
"vmla.f32 q14, q11, d8[1] \n"
"pld [%6, #256] \n"
"vld1.u16 {d20-d23}, [%6 :128]! \n"
"vshll.u16 q9, d19, #16 \n"
"vmla.f32 q15, q8, d9[0] \n"
"vshll.u16 q8, d20, #16 \n"
"vmla.f32 q12, q9, d9[1] \n"
"vshll.u16 q9, d21, #16 \n"
"vmla.f32 q13, q8, d0[0] \n"
"vshll.u16 q10, d22, #16 \n"
"vmla.f32 q14, q9, d0[1] \n"
"pld [%6, #256] \n"
"vld1.u16 {d16-d19}, [%6 :128]! \n"
"vshll.u16 q11, d23, #16 \n"
"vmla.f32 q15, q10, d1[0] \n"
"vshll.u16 q10, d16, #16 \n"
"vmla.f32 q12, q11, d1[1] \n"
"pld [%5, #256] \n"
"vld1.u16 {d6-d9}, [%5 :64] \n" // r41 r42 r43 r44
"vshll.u16 q11, d17, #16 \n"
"vshll.u16 q1, d6, #16 \n"
"vshll.u16 q2, d7, #16 \n"
"vshll.u16 q3, d8, #16 \n"
"vshll.u16 q4, d9, #16 \n"
"vmla.f32 q13, q10, d2[0] \n"
"vshll.u16 q8, d18, #16 \n"
"vmla.f32 q14, q11, d2[1] \n"
"pld [%6, #256] \n"
"vld1.u16 {d20-d23}, [%6 :128]! \n"
"vshll.u16 q9, d19, #16 \n"
"vmla.f32 q15, q8, d3[0] \n"
"vshll.u16 q8, d20, #16 \n"
"vmla.f32 q12, q9, d3[1] \n"
"vshll.u16 q9, d21, #16 \n"
"vmla.f32 q13, q8, d4[0] \n"
"vshll.u16 q10, d22, #16 \n"
"vmla.f32 q14, q9, d4[1] \n"
"pld [%6, #256] \n"
"vld1.u16 {d16-d19}, [%6 :128]! \n"
"vshll.u16 q11, d23, #16 \n"
"vmla.f32 q15, q10, d5[0] \n"
"vshll.u16 q10, d16, #16 \n"
"vmla.f32 q12, q11, d5[1] \n"
"vshll.u16 q11, d17, #16 \n"
"vmla.f32 q13, q10, d6[0] \n"
"vshll.u16 q8, d18, #16 \n"
"vmla.f32 q14, q11, d6[1] \n"
// "pld [%6, #256] \n"
"vld1.u16 {d20-d23}, [%6 :128] \n"
"vshll.u16 q9, d19, #16 \n"
"vmla.f32 q15, q8, d7[0] \n"
"vshll.u16 q8, d20, #16 \n"
"vmla.f32 q12, q9, d7[1] \n"
"vshll.u16 q9, d21, #16 \n"
"vmla.f32 q13, q8, d8[0] \n"
"vshll.u16 q10, d22, #16 \n"
"vmla.f32 q14, q9, d8[1] \n"
"vshll.u16 q11, d23, #16 \n"
"vmla.f32 q15, q10, d9[0] \n"
"vmla.f32 q12, q11, d9[1] \n"
"vadd.f32 q13, q13, q14 \n"
"vadd.f32 q12, q12, q15 \n"
"vadd.f32 q12, q12, q13 \n"
"sub %6, %6, #768 \n" // kptr -= 24 * 16;
"vst1.f32 {d24-d25}, [%0 :128]! \n"
: "=r"(outptr0), // %0
"=r"(r0), // %1
"=r"(r1), // %2
"=r"(r2), // %3
"=r"(r3), // %4
"=r"(r4), // %5
"=r"(kptr) // %6
: "0"(outptr0),
"1"(r0),
"2"(r1),
"3"(r2),
"4"(r3),
"5"(r4),
"6"(kptr)
: "memory", "q0", "q1", "q2", "q3", "q4", "q5", "q6", "q7", "q8", "q9", "q10", "q11", "q12", "q13", "q14", "q15");
#endif // __aarch64__
}
r0 += 4 * 4;
r1 += 4 * 4;
r2 += 4 * 4;
r3 += 4 * 4;
r4 += 4 * 4;
}
}
for (; q < inch; q++)
{
unsigned short* outptr0_bf16 = top_blob.channel(p);
const float* outptr0 = out0.row(0);
const Mat img0 = bottom_blob.channel(q);
const unsigned short* r0 = img0.row<const unsigned short>(0);
const unsigned short* r1 = img0.row<const unsigned short>(1);
const unsigned short* r2 = img0.row<const unsigned short>(2);
const unsigned short* r3 = img0.row<const unsigned short>(3);
const unsigned short* r4 = img0.row<const unsigned short>(4);
const unsigned short* kptr = kernel.channel(p).row<const unsigned short>(q);
int i = 0;
for (; i < outh; i++)
{
int j = 0;
for (; j + 3 < outw; j += 4)
{
#if __aarch64__
asm volatile(
"prfm pldl1keep, [%1, #512] \n"
"ld1 {v20.4s, v21.4s, v22.4s, v23.4s}, [%1], #64 \n" // sum0 sum1 sum2 sum3
"prfm pldl1keep, [%2, #256] \n"
"ld1 {v0.4h, v1.4h, v2.4h, v3.4h}, [%2], #32 \n" // r00 r01 r02 r03
"shll v0.4s, v0.4h, #16 \n"
"shll v1.4s, v1.4h, #16 \n"
"shll v2.4s, v2.4h, #16 \n"
"shll v3.4s, v3.4h, #16 \n"
"prfm pldl1keep, [%7, #256] \n"
"ld1 {v16.4h, v17.4h, v18.4h, v19.4h}, [%7], #32 \n"
"shll v16.4s, v16.4h, #16 \n"
"shll v17.4s, v17.4h, #16 \n"
"shll v18.4s, v18.4h, #16 \n"
"shll v19.4s, v19.4h, #16 \n"
"fmla v20.4s, v16.4s, v0.s[0] \n"
"fmla v21.4s, v16.4s, v1.s[0] \n"
"fmla v22.4s, v16.4s, v2.s[0] \n"
"fmla v23.4s, v16.4s, v3.s[0] \n"
"fmla v20.4s, v17.4s, v0.s[1] \n"
"fmla v21.4s, v17.4s, v1.s[1] \n"
"fmla v22.4s, v17.4s, v2.s[1] \n"
"fmla v23.4s, v17.4s, v3.s[1] \n"
"prfm pldl1keep, [%7, #256] \n"
"ld1 {v24.4h, v25.4h, v26.4h, v27.4h}, [%7], #32 \n"
"shll v24.4s, v24.4h, #16 \n"
"shll v25.4s, v25.4h, #16 \n"
"shll v26.4s, v26.4h, #16 \n"
"shll v27.4s, v27.4h, #16 \n"
"fmla v20.4s, v18.4s, v0.s[2] \n"
"fmla v21.4s, v18.4s, v1.s[2] \n"
"fmla v22.4s, v18.4s, v2.s[2] \n"
"fmla v23.4s, v18.4s, v3.s[2] \n"
"fmla v20.4s, v19.4s, v0.s[3] \n"
"fmla v21.4s, v19.4s, v1.s[3] \n"
"fmla v22.4s, v19.4s, v2.s[3] \n"
"fmla v23.4s, v19.4s, v3.s[3] \n"
"prfm pldl1keep, [%2, #256] \n"
"ld1 {v4.4h, v5.4h, v6.4h, v7.4h}, [%2] \n" // r04 r05 r06 r07
"shll v4.4s, v4.4h, #16 \n"
"shll v5.4s, v5.4h, #16 \n"
"shll v6.4s, v6.4h, #16 \n"
"shll v7.4s, v7.4h, #16 \n"
"fmla v20.4s, v24.4s, v1.s[0] \n"
"fmla v21.4s, v24.4s, v2.s[0] \n"
"fmla v22.4s, v24.4s, v3.s[0] \n"
"fmla v23.4s, v24.4s, v4.s[0] \n"
"fmla v20.4s, v25.4s, v1.s[1] \n"
"fmla v21.4s, v25.4s, v2.s[1] \n"
"fmla v22.4s, v25.4s, v3.s[1] \n"
"fmla v23.4s, v25.4s, v4.s[1] \n"
"prfm pldl1keep, [%7, #256] \n"
"ld1 {v16.4h, v17.4h, v18.4h, v19.4h}, [%7], #32 \n"
"shll v16.4s, v16.4h, #16 \n"
"shll v17.4s, v17.4h, #16 \n"
"shll v18.4s, v18.4h, #16 \n"
"shll v19.4s, v19.4h, #16 \n"
"fmla v20.4s, v26.4s, v1.s[2] \n"
"fmla v21.4s, v26.4s, v2.s[2] \n"
"fmla v22.4s, v26.4s, v3.s[2] \n"
"fmla v23.4s, v26.4s, v4.s[2] \n"
"fmla v20.4s, v27.4s, v1.s[3] \n"
"fmla v21.4s, v27.4s, v2.s[3] \n"
"fmla v22.4s, v27.4s, v3.s[3] \n"
"fmla v23.4s, v27.4s, v4.s[3] \n"
"fmla v20.4s, v16.4s, v2.s[0] \n"
"fmla v21.4s, v16.4s, v3.s[0] \n"
"fmla v22.4s, v16.4s, v4.s[0] \n"
"fmla v23.4s, v16.4s, v5.s[0] \n"
"fmla v20.4s, v17.4s, v2.s[1] \n"
"fmla v21.4s, v17.4s, v3.s[1] \n"
"fmla v22.4s, v17.4s, v4.s[1] \n"
"fmla v23.4s, v17.4s, v5.s[1] \n"
"prfm pldl1keep, [%7, #256] \n"
"ld1 {v24.4h, v25.4h, v26.4h, v27.4h}, [%7], #32 \n"
"shll v24.4s, v24.4h, #16 \n"
"shll v25.4s, v25.4h, #16 \n"
"shll v26.4s, v26.4h, #16 \n"
"shll v27.4s, v27.4h, #16 \n"
"fmla v20.4s, v18.4s, v2.s[2] \n"
"fmla v21.4s, v18.4s, v3.s[2] \n"
"fmla v22.4s, v18.4s, v4.s[2] \n"
"fmla v23.4s, v18.4s, v5.s[2] \n"
"fmla v20.4s, v19.4s, v2.s[3] \n"
"fmla v21.4s, v19.4s, v3.s[3] \n"
"fmla v22.4s, v19.4s, v4.s[3] \n"
"fmla v23.4s, v19.4s, v5.s[3] \n"
"fmla v20.4s, v24.4s, v3.s[0] \n"
"fmla v21.4s, v24.4s, v4.s[0] \n"
"fmla v22.4s, v24.4s, v5.s[0] \n"
"fmla v23.4s, v24.4s, v6.s[0] \n"
"fmla v20.4s, v25.4s, v3.s[1] \n"
"fmla v21.4s, v25.4s, v4.s[1] \n"
"fmla v22.4s, v25.4s, v5.s[1] \n"
"fmla v23.4s, v25.4s, v6.s[1] \n"
"prfm pldl1keep, [%7, #256] \n"
"ld1 {v16.4h, v17.4h, v18.4h, v19.4h}, [%7], #32 \n"
"shll v16.4s, v16.4h, #16 \n"
"shll v17.4s, v17.4h, #16 \n"
"shll v18.4s, v18.4h, #16 \n"
"shll v19.4s, v19.4h, #16 \n"
"fmla v20.4s, v26.4s, v3.s[2] \n"
"fmla v21.4s, v26.4s, v4.s[2] \n"
"fmla v22.4s, v26.4s, v5.s[2] \n"
"fmla v23.4s, v26.4s, v6.s[2] \n"
"fmla v20.4s, v27.4s, v3.s[3] \n"
"fmla v21.4s, v27.4s, v4.s[3] \n"
"fmla v22.4s, v27.4s, v5.s[3] \n"
"fmla v23.4s, v27.4s, v6.s[3] \n"
"prfm pldl1keep, [%3, #256] \n"
"ld1 {v0.4h, v1.4h, v2.4h, v3.4h}, [%3], #32 \n" // r10 r11 r12 r13
"shll v0.4s, v0.4h, #16 \n"
"shll v1.4s, v1.4h, #16 \n"
"shll v2.4s, v2.4h, #16 \n"
"shll v3.4s, v3.4h, #16 \n"
"fmla v20.4s, v16.4s, v4.s[0] \n"
"fmla v21.4s, v16.4s, v5.s[0] \n"
"fmla v22.4s, v16.4s, v6.s[0] \n"
"fmla v23.4s, v16.4s, v7.s[0] \n"
"fmla v20.4s, v17.4s, v4.s[1] \n"
"fmla v21.4s, v17.4s, v5.s[1] \n"
"fmla v22.4s, v17.4s, v6.s[1] \n"
"fmla v23.4s, v17.4s, v7.s[1] \n"
"prfm pldl1keep, [%7, #256] \n"
"ld1 {v24.4h, v25.4h, v26.4h, v27.4h}, [%7], #32 \n"
"shll v24.4s, v24.4h, #16 \n"
"shll v25.4s, v25.4h, #16 \n"
"shll v26.4s, v26.4h, #16 \n"
"shll v27.4s, v27.4h, #16 \n"
"fmla v20.4s, v18.4s, v4.s[2] \n"
"fmla v21.4s, v18.4s, v5.s[2] \n"
"fmla v22.4s, v18.4s, v6.s[2] \n"
"fmla v23.4s, v18.4s, v7.s[2] \n"
"fmla v20.4s, v19.4s, v4.s[3] \n"
"fmla v21.4s, v19.4s, v5.s[3] \n"
"fmla v22.4s, v19.4s, v6.s[3] \n"
"fmla v23.4s, v19.4s, v7.s[3] \n"
"fmla v20.4s, v24.4s, v0.s[0] \n"
"fmla v21.4s, v24.4s, v1.s[0] \n"
"fmla v22.4s, v24.4s, v2.s[0] \n"
"fmla v23.4s, v24.4s, v3.s[0] \n"
"fmla v20.4s, v25.4s, v0.s[1] \n"
"fmla v21.4s, v25.4s, v1.s[1] \n"
"fmla v22.4s, v25.4s, v2.s[1] \n"
"fmla v23.4s, v25.4s, v3.s[1] \n"
"prfm pldl1keep, [%7, #256] \n"
"ld1 {v16.4h, v17.4h, v18.4h, v19.4h}, [%7], #32 \n"
"shll v16.4s, v16.4h, #16 \n"
"shll v17.4s, v17.4h, #16 \n"
"shll v18.4s, v18.4h, #16 \n"
"shll v19.4s, v19.4h, #16 \n"
"fmla v20.4s, v26.4s, v0.s[2] \n"
"fmla v21.4s, v26.4s, v1.s[2] \n"
"fmla v22.4s, v26.4s, v2.s[2] \n"
"fmla v23.4s, v26.4s, v3.s[2] \n"
"fmla v20.4s, v27.4s, v0.s[3] \n"
"fmla v21.4s, v27.4s, v1.s[3] \n"
"fmla v22.4s, v27.4s, v2.s[3] \n"
"fmla v23.4s, v27.4s, v3.s[3] \n"
"prfm pldl1keep, [%3, #256] \n"
"ld1 {v4.4h, v5.4h, v6.4h, v7.4h}, [%3] \n" // r14 r15 r16 r17
"shll v4.4s, v4.4h, #16 \n"
"shll v5.4s, v5.4h, #16 \n"
"shll v6.4s, v6.4h, #16 \n"
"shll v7.4s, v7.4h, #16 \n"
"fmla v20.4s, v16.4s, v1.s[0] \n"
"fmla v21.4s, v16.4s, v2.s[0] \n"
"fmla v22.4s, v16.4s, v3.s[0] \n"
"fmla v23.4s, v16.4s, v4.s[0] \n"
"fmla v20.4s, v17.4s, v1.s[1] \n"
"fmla v21.4s, v17.4s, v2.s[1] \n"
"fmla v22.4s, v17.4s, v3.s[1] \n"
"fmla v23.4s, v17.4s, v4.s[1] \n"
"prfm pldl1keep, [%7, #256] \n"
"ld1 {v24.4h, v25.4h, v26.4h, v27.4h}, [%7], #32 \n"
"shll v24.4s, v24.4h, #16 \n"
"shll v25.4s, v25.4h, #16 \n"
"shll v26.4s, v26.4h, #16 \n"
"shll v27.4s, v27.4h, #16 \n"
"fmla v20.4s, v18.4s, v1.s[2] \n"
"fmla v21.4s, v18.4s, v2.s[2] \n"
"fmla v22.4s, v18.4s, v3.s[2] \n"
"fmla v23.4s, v18.4s, v4.s[2] \n"
"fmla v20.4s, v19.4s, v1.s[3] \n"
"fmla v21.4s, v19.4s, v2.s[3] \n"
"fmla v22.4s, v19.4s, v3.s[3] \n"
"fmla v23.4s, v19.4s, v4.s[3] \n"
"fmla v20.4s, v24.4s, v2.s[0] \n"
"fmla v21.4s, v24.4s, v3.s[0] \n"
"fmla v22.4s, v24.4s, v4.s[0] \n"
"fmla v23.4s, v24.4s, v5.s[0] \n"
"fmla v20.4s, v25.4s, v2.s[1] \n"
"fmla v21.4s, v25.4s, v3.s[1] \n"
"fmla v22.4s, v25.4s, v4.s[1] \n"
"fmla v23.4s, v25.4s, v5.s[1] \n"
"prfm pldl1keep, [%7, #256] \n"
"ld1 {v16.4h, v17.4h, v18.4h, v19.4h}, [%7], #32 \n"
"shll v16.4s, v16.4h, #16 \n"
"shll v17.4s, v17.4h, #16 \n"
"shll v18.4s, v18.4h, #16 \n"
"shll v19.4s, v19.4h, #16 \n"
"fmla v20.4s, v26.4s, v2.s[2] \n"
"fmla v21.4s, v26.4s, v3.s[2] \n"
"fmla v22.4s, v26.4s, v4.s[2] \n"
"fmla v23.4s, v26.4s, v5.s[2] \n"
"fmla v20.4s, v27.4s, v2.s[3] \n"
"fmla v21.4s, v27.4s, v3.s[3] \n"
"fmla v22.4s, v27.4s, v4.s[3] \n"
"fmla v23.4s, v27.4s, v5.s[3] \n"
"fmla v20.4s, v16.4s, v3.s[0] \n"
"fmla v21.4s, v16.4s, v4.s[0] \n"
"fmla v22.4s, v16.4s, v5.s[0] \n"
"fmla v23.4s, v16.4s, v6.s[0] \n"
"fmla v20.4s, v17.4s, v3.s[1] \n"
"fmla v21.4s, v17.4s, v4.s[1] \n"
"fmla v22.4s, v17.4s, v5.s[1] \n"
"fmla v23.4s, v17.4s, v6.s[1] \n"
"prfm pldl1keep, [%7, #256] \n"
"ld1 {v24.4h, v25.4h, v26.4h, v27.4h}, [%7], #32 \n"
"shll v24.4s, v24.4h, #16 \n"
"shll v25.4s, v25.4h, #16 \n"
"shll v26.4s, v26.4h, #16 \n"
"shll v27.4s, v27.4h, #16 \n"
"fmla v20.4s, v18.4s, v3.s[2] \n"
"fmla v21.4s, v18.4s, v4.s[2] \n"
"fmla v22.4s, v18.4s, v5.s[2] \n"
"fmla v23.4s, v18.4s, v6.s[2] \n"
"fmla v20.4s, v19.4s, v3.s[3] \n"
"fmla v21.4s, v19.4s, v4.s[3] \n"
"fmla v22.4s, v19.4s, v5.s[3] \n"
"fmla v23.4s, v19.4s, v6.s[3] \n"
"prfm pldl1keep, [%4, #256] \n"
"ld1 {v0.4h, v1.4h, v2.4h, v3.4h}, [%4], #32 \n" // r20 r21 r22 r23
"shll v0.4s, v0.4h, #16 \n"
"shll v1.4s, v1.4h, #16 \n"
"shll v2.4s, v2.4h, #16 \n"
"shll v3.4s, v3.4h, #16 \n"
"fmla v20.4s, v24.4s, v4.s[0] \n"
"fmla v21.4s, v24.4s, v5.s[0] \n"
"fmla v22.4s, v24.4s, v6.s[0] \n"
"fmla v23.4s, v24.4s, v7.s[0] \n"
"fmla v20.4s, v25.4s, v4.s[1] \n"
"fmla v21.4s, v25.4s, v5.s[1] \n"
"fmla v22.4s, v25.4s, v6.s[1] \n"
"fmla v23.4s, v25.4s, v7.s[1] \n"
"prfm pldl1keep, [%7, #256] \n"
"ld1 {v16.4h, v17.4h, v18.4h, v19.4h}, [%7], #32 \n"
"shll v16.4s, v16.4h, #16 \n"
"shll v17.4s, v17.4h, #16 \n"
"shll v18.4s, v18.4h, #16 \n"
"shll v19.4s, v19.4h, #16 \n"
"fmla v20.4s, v26.4s, v4.s[2] \n"
"fmla v21.4s, v26.4s, v5.s[2] \n"
"fmla v22.4s, v26.4s, v6.s[2] \n"
"fmla v23.4s, v26.4s, v7.s[2] \n"
"fmla v20.4s, v27.4s, v4.s[3] \n"
"fmla v21.4s, v27.4s, v5.s[3] \n"
"fmla v22.4s, v27.4s, v6.s[3] \n"
"fmla v23.4s, v27.4s, v7.s[3] \n"
"fmla v20.4s, v16.4s, v0.s[0] \n"
"fmla v21.4s, v16.4s, v1.s[0] \n"
"fmla v22.4s, v16.4s, v2.s[0] \n"
"fmla v23.4s, v16.4s, v3.s[0] \n"
"fmla v20.4s, v17.4s, v0.s[1] \n"
"fmla v21.4s, v17.4s, v1.s[1] \n"
"fmla v22.4s, v17.4s, v2.s[1] \n"
"fmla v23.4s, v17.4s, v3.s[1] \n"
"prfm pldl1keep, [%7, #256] \n"
"ld1 {v24.4h, v25.4h, v26.4h, v27.4h}, [%7], #32 \n"
"shll v24.4s, v24.4h, #16 \n"
"shll v25.4s, v25.4h, #16 \n"
"shll v26.4s, v26.4h, #16 \n"
"shll v27.4s, v27.4h, #16 \n"
"fmla v20.4s, v18.4s, v0.s[2] \n"
"fmla v21.4s, v18.4s, v1.s[2] \n"
"fmla v22.4s, v18.4s, v2.s[2] \n"
"fmla v23.4s, v18.4s, v3.s[2] \n"
"fmla v20.4s, v19.4s, v0.s[3] \n"
"fmla v21.4s, v19.4s, v1.s[3] \n"
"fmla v22.4s, v19.4s, v2.s[3] \n"
"fmla v23.4s, v19.4s, v3.s[3] \n"
"prfm pldl1keep, [%4, #256] \n"
"ld1 {v4.4h, v5.4h, v6.4h, v7.4h}, [%4] \n" // r24 r25 r26 r27
"shll v4.4s, v4.4h, #16 \n"
"shll v5.4s, v5.4h, #16 \n"
"shll v6.4s, v6.4h, #16 \n"
"shll v7.4s, v7.4h, #16 \n"
"fmla v20.4s, v24.4s, v1.s[0] \n"
"fmla v21.4s, v24.4s, v2.s[0] \n"
"fmla v22.4s, v24.4s, v3.s[0] \n"
"fmla v23.4s, v24.4s, v4.s[0] \n"
"fmla v20.4s, v25.4s, v1.s[1] \n"
"fmla v21.4s, v25.4s, v2.s[1] \n"
"fmla v22.4s, v25.4s, v3.s[1] \n"
"fmla v23.4s, v25.4s, v4.s[1] \n"
"prfm pldl1keep, [%7, #256] \n"
"ld1 {v16.4h, v17.4h, v18.4h, v19.4h}, [%7], #32 \n"
"shll v16.4s, v16.4h, #16 \n"
"shll v17.4s, v17.4h, #16 \n"
"shll v18.4s, v18.4h, #16 \n"
"shll v19.4s, v19.4h, #16 \n"
"fmla v20.4s, v26.4s, v1.s[2] \n"
"fmla v21.4s, v26.4s, v2.s[2] \n"
"fmla v22.4s, v26.4s, v3.s[2] \n"
"fmla v23.4s, v26.4s, v4.s[2] \n"
"fmla v20.4s, v27.4s, v1.s[3] \n"
"fmla v21.4s, v27.4s, v2.s[3] \n"
"fmla v22.4s, v27.4s, v3.s[3] \n"
"fmla v23.4s, v27.4s, v4.s[3] \n"
"fmla v20.4s, v16.4s, v2.s[0] \n"
"fmla v21.4s, v16.4s, v3.s[0] \n"
"fmla v22.4s, v16.4s, v4.s[0] \n"
"fmla v23.4s, v16.4s, v5.s[0] \n"
"fmla v20.4s, v17.4s, v2.s[1] \n"
"fmla v21.4s, v17.4s, v3.s[1] \n"
"fmla v22.4s, v17.4s, v4.s[1] \n"
"fmla v23.4s, v17.4s, v5.s[1] \n"
"prfm pldl1keep, [%7, #256] \n"
"ld1 {v24.4h, v25.4h, v26.4h, v27.4h}, [%7], #32 \n"
"shll v24.4s, v24.4h, #16 \n"
"shll v25.4s, v25.4h, #16 \n"
"shll v26.4s, v26.4h, #16 \n"
"shll v27.4s, v27.4h, #16 \n"
"fmla v20.4s, v18.4s, v2.s[2] \n"
"fmla v21.4s, v18.4s, v3.s[2] \n"
"fmla v22.4s, v18.4s, v4.s[2] \n"
"fmla v23.4s, v18.4s, v5.s[2] \n"
"fmla v20.4s, v19.4s, v2.s[3] \n"
"fmla v21.4s, v19.4s, v3.s[3] \n"
"fmla v22.4s, v19.4s, v4.s[3] \n"
"fmla v23.4s, v19.4s, v5.s[3] \n"
"fmla v20.4s, v24.4s, v3.s[0] \n"
"fmla v21.4s, v24.4s, v4.s[0] \n"
"fmla v22.4s, v24.4s, v5.s[0] \n"
"fmla v23.4s, v24.4s, v6.s[0] \n"
"fmla v20.4s, v25.4s, v3.s[1] \n"
"fmla v21.4s, v25.4s, v4.s[1] \n"
"fmla v22.4s, v25.4s, v5.s[1] \n"
"fmla v23.4s, v25.4s, v6.s[1] \n"
"prfm pldl1keep, [%7, #256] \n"
"ld1 {v16.4h, v17.4h, v18.4h, v19.4h}, [%7], #32 \n"
"shll v16.4s, v16.4h, #16 \n"
"shll v17.4s, v17.4h, #16 \n"
"shll v18.4s, v18.4h, #16 \n"
"shll v19.4s, v19.4h, #16 \n"
"fmla v20.4s, v26.4s, v3.s[2] \n"
"fmla v21.4s, v26.4s, v4.s[2] \n"
"fmla v22.4s, v26.4s, v5.s[2] \n"
"fmla v23.4s, v26.4s, v6.s[2] \n"
"fmla v20.4s, v27.4s, v3.s[3] \n"
"fmla v21.4s, v27.4s, v4.s[3] \n"
"fmla v22.4s, v27.4s, v5.s[3] \n"
"fmla v23.4s, v27.4s, v6.s[3] \n"
"prfm pldl1keep, [%5, #256] \n"
"ld1 {v0.4h, v1.4h, v2.4h, v3.4h}, [%5], #32 \n" // r30 r31 r32 r33
"shll v0.4s, v0.4h, #16 \n"
"shll v1.4s, v1.4h, #16 \n"
"shll v2.4s, v2.4h, #16 \n"
"shll v3.4s, v3.4h, #16 \n"
"fmla v20.4s, v16.4s, v4.s[0] \n"
"fmla v21.4s, v16.4s, v5.s[0] \n"
"fmla v22.4s, v16.4s, v6.s[0] \n"
"fmla v23.4s, v16.4s, v7.s[0] \n"
"fmla v20.4s, v17.4s, v4.s[1] \n"
"fmla v21.4s, v17.4s, v5.s[1] \n"
"fmla v22.4s, v17.4s, v6.s[1] \n"
"fmla v23.4s, v17.4s, v7.s[1] \n"
"prfm pldl1keep, [%7, #256] \n"
"ld1 {v24.4h, v25.4h, v26.4h, v27.4h}, [%7], #32 \n"
"shll v24.4s, v24.4h, #16 \n"
"shll v25.4s, v25.4h, #16 \n"
"shll v26.4s, v26.4h, #16 \n"
"shll v27.4s, v27.4h, #16 \n"
"fmla v20.4s, v18.4s, v4.s[2] \n"
"fmla v21.4s, v18.4s, v5.s[2] \n"
"fmla v22.4s, v18.4s, v6.s[2] \n"
"fmla v23.4s, v18.4s, v7.s[2] \n"
"fmla v20.4s, v19.4s, v4.s[3] \n"
"fmla v21.4s, v19.4s, v5.s[3] \n"
"fmla v22.4s, v19.4s, v6.s[3] \n"
"fmla v23.4s, v19.4s, v7.s[3] \n"
"fmla v20.4s, v24.4s, v0.s[0] \n"
"fmla v21.4s, v24.4s, v1.s[0] \n"
"fmla v22.4s, v24.4s, v2.s[0] \n"
"fmla v23.4s, v24.4s, v3.s[0] \n"
"fmla v20.4s, v25.4s, v0.s[1] \n"
"fmla v21.4s, v25.4s, v1.s[1] \n"
"fmla v22.4s, v25.4s, v2.s[1] \n"
"fmla v23.4s, v25.4s, v3.s[1] \n"
"prfm pldl1keep, [%7, #256] \n"
"ld1 {v16.4h, v17.4h, v18.4h, v19.4h}, [%7], #32 \n"
"shll v16.4s, v16.4h, #16 \n"
"shll v17.4s, v17.4h, #16 \n"
"shll v18.4s, v18.4h, #16 \n"
"shll v19.4s, v19.4h, #16 \n"
"fmla v20.4s, v26.4s, v0.s[2] \n"
"fmla v21.4s, v26.4s, v1.s[2] \n"
"fmla v22.4s, v26.4s, v2.s[2] \n"
"fmla v23.4s, v26.4s, v3.s[2] \n"
"fmla v20.4s, v27.4s, v0.s[3] \n"
"fmla v21.4s, v27.4s, v1.s[3] \n"
"fmla v22.4s, v27.4s, v2.s[3] \n"
"fmla v23.4s, v27.4s, v3.s[3] \n"
"prfm pldl1keep, [%5, #256] \n"
"ld1 {v4.4h, v5.4h, v6.4h, v7.4h}, [%5] \n" // r34 r35 r36 r37
"shll v4.4s, v4.4h, #16 \n"
"shll v5.4s, v5.4h, #16 \n"
"shll v6.4s, v6.4h, #16 \n"
"shll v7.4s, v7.4h, #16 \n"
"fmla v20.4s, v16.4s, v1.s[0] \n"
"fmla v21.4s, v16.4s, v2.s[0] \n"
"fmla v22.4s, v16.4s, v3.s[0] \n"
"fmla v23.4s, v16.4s, v4.s[0] \n"
"fmla v20.4s, v17.4s, v1.s[1] \n"
"fmla v21.4s, v17.4s, v2.s[1] \n"
"fmla v22.4s, v17.4s, v3.s[1] \n"
"fmla v23.4s, v17.4s, v4.s[1] \n"
"prfm pldl1keep, [%7, #256] \n"
"ld1 {v24.4h, v25.4h, v26.4h, v27.4h}, [%7], #32 \n"
"shll v24.4s, v24.4h, #16 \n"
"shll v25.4s, v25.4h, #16 \n"
"shll v26.4s, v26.4h, #16 \n"
"shll v27.4s, v27.4h, #16 \n"
"fmla v20.4s, v18.4s, v1.s[2] \n"
"fmla v21.4s, v18.4s, v2.s[2] \n"
"fmla v22.4s, v18.4s, v3.s[2] \n"
"fmla v23.4s, v18.4s, v4.s[2] \n"
"fmla v20.4s, v19.4s, v1.s[3] \n"
"fmla v21.4s, v19.4s, v2.s[3] \n"
"fmla v22.4s, v19.4s, v3.s[3] \n"
"fmla v23.4s, v19.4s, v4.s[3] \n"
"fmla v20.4s, v24.4s, v2.s[0] \n"
"fmla v21.4s, v24.4s, v3.s[0] \n"
"fmla v22.4s, v24.4s, v4.s[0] \n"
"fmla v23.4s, v24.4s, v5.s[0] \n"
"fmla v20.4s, v25.4s, v2.s[1] \n"
"fmla v21.4s, v25.4s, v3.s[1] \n"
"fmla v22.4s, v25.4s, v4.s[1] \n"
"fmla v23.4s, v25.4s, v5.s[1] \n"
"prfm pldl1keep, [%7, #256] \n"
"ld1 {v16.4h, v17.4h, v18.4h, v19.4h}, [%7], #32 \n"
"shll v16.4s, v16.4h, #16 \n"
"shll v17.4s, v17.4h, #16 \n"
"shll v18.4s, v18.4h, #16 \n"
"shll v19.4s, v19.4h, #16 \n"
"fmla v20.4s, v26.4s, v2.s[2] \n"
"fmla v21.4s, v26.4s, v3.s[2] \n"
"fmla v22.4s, v26.4s, v4.s[2] \n"
"fmla v23.4s, v26.4s, v5.s[2] \n"
"fmla v20.4s, v27.4s, v2.s[3] \n"
"fmla v21.4s, v27.4s, v3.s[3] \n"
"fmla v22.4s, v27.4s, v4.s[3] \n"
"fmla v23.4s, v27.4s, v5.s[3] \n"
"fmla v20.4s, v16.4s, v3.s[0] \n"
"fmla v21.4s, v16.4s, v4.s[0] \n"
"fmla v22.4s, v16.4s, v5.s[0] \n"
"fmla v23.4s, v16.4s, v6.s[0] \n"
"fmla v20.4s, v17.4s, v3.s[1] \n"
"fmla v21.4s, v17.4s, v4.s[1] \n"
"fmla v22.4s, v17.4s, v5.s[1] \n"
"fmla v23.4s, v17.4s, v6.s[1] \n"
"prfm pldl1keep, [%7, #256] \n"
"ld1 {v24.4h, v25.4h, v26.4h, v27.4h}, [%7], #32 \n"
"shll v24.4s, v24.4h, #16 \n"
"shll v25.4s, v25.4h, #16 \n"
"shll v26.4s, v26.4h, #16 \n"
"shll v27.4s, v27.4h, #16 \n"
"fmla v20.4s, v18.4s, v3.s[2] \n"
"fmla v21.4s, v18.4s, v4.s[2] \n"
"fmla v22.4s, v18.4s, v5.s[2] \n"
"fmla v23.4s, v18.4s, v6.s[2] \n"
"fmla v20.4s, v19.4s, v3.s[3] \n"
"fmla v21.4s, v19.4s, v4.s[3] \n"
"fmla v22.4s, v19.4s, v5.s[3] \n"
"fmla v23.4s, v19.4s, v6.s[3] \n"
"prfm pldl1keep, [%6, #256] \n"
"ld1 {v0.4h, v1.4h, v2.4h, v3.4h}, [%6], #32 \n" // r40 r41 r42 r43
"shll v0.4s, v0.4h, #16 \n"
"shll v1.4s, v1.4h, #16 \n"
"shll v2.4s, v2.4h, #16 \n"
"shll v3.4s, v3.4h, #16 \n"
"fmla v20.4s, v24.4s, v4.s[0] \n"
"fmla v21.4s, v24.4s, v5.s[0] \n"
"fmla v22.4s, v24.4s, v6.s[0] \n"
"fmla v23.4s, v24.4s, v7.s[0] \n"
"fmla v20.4s, v25.4s, v4.s[1] \n"
"fmla v21.4s, v25.4s, v5.s[1] \n"
"fmla v22.4s, v25.4s, v6.s[1] \n"
"fmla v23.4s, v25.4s, v7.s[1] \n"
"prfm pldl1keep, [%7, #256] \n"
"ld1 {v16.4h, v17.4h, v18.4h, v19.4h}, [%7], #32 \n"
"shll v16.4s, v16.4h, #16 \n"
"shll v17.4s, v17.4h, #16 \n"
"shll v18.4s, v18.4h, #16 \n"
"shll v19.4s, v19.4h, #16 \n"
"fmla v20.4s, v26.4s, v4.s[2] \n"
"fmla v21.4s, v26.4s, v5.s[2] \n"
"fmla v22.4s, v26.4s, v6.s[2] \n"
"fmla v23.4s, v26.4s, v7.s[2] \n"
"fmla v20.4s, v27.4s, v4.s[3] \n"
"fmla v21.4s, v27.4s, v5.s[3] \n"
"fmla v22.4s, v27.4s, v6.s[3] \n"
"fmla v23.4s, v27.4s, v7.s[3] \n"
"fmla v20.4s, v16.4s, v0.s[0] \n"
"fmla v21.4s, v16.4s, v1.s[0] \n"
"fmla v22.4s, v16.4s, v2.s[0] \n"
"fmla v23.4s, v16.4s, v3.s[0] \n"
"fmla v20.4s, v17.4s, v0.s[1] \n"
"fmla v21.4s, v17.4s, v1.s[1] \n"
"fmla v22.4s, v17.4s, v2.s[1] \n"
"fmla v23.4s, v17.4s, v3.s[1] \n"
"prfm pldl1keep, [%7, #256] \n"
"ld1 {v24.4h, v25.4h, v26.4h, v27.4h}, [%7], #32 \n"
"shll v24.4s, v24.4h, #16 \n"
"shll v25.4s, v25.4h, #16 \n"
"shll v26.4s, v26.4h, #16 \n"
"shll v27.4s, v27.4h, #16 \n"
"fmla v20.4s, v18.4s, v0.s[2] \n"
"fmla v21.4s, v18.4s, v1.s[2] \n"
"fmla v22.4s, v18.4s, v2.s[2] \n"
"fmla v23.4s, v18.4s, v3.s[2] \n"
"fmla v20.4s, v19.4s, v0.s[3] \n"
"fmla v21.4s, v19.4s, v1.s[3] \n"
"fmla v22.4s, v19.4s, v2.s[3] \n"
"fmla v23.4s, v19.4s, v3.s[3] \n"
"prfm pldl1keep, [%6, #256] \n"
"ld1 {v4.4h, v5.4h, v6.4h, v7.4h}, [%6] \n" // r44 r45 r46 r47
"shll v4.4s, v4.4h, #16 \n"
"shll v5.4s, v5.4h, #16 \n"
"shll v6.4s, v6.4h, #16 \n"
"shll v7.4s, v7.4h, #16 \n"
"fmla v20.4s, v24.4s, v1.s[0] \n"
"fmla v21.4s, v24.4s, v2.s[0] \n"
"fmla v22.4s, v24.4s, v3.s[0] \n"
"fmla v23.4s, v24.4s, v4.s[0] \n"
"fmla v20.4s, v25.4s, v1.s[1] \n"
"fmla v21.4s, v25.4s, v2.s[1] \n"
"fmla v22.4s, v25.4s, v3.s[1] \n"
"fmla v23.4s, v25.4s, v4.s[1] \n"
"prfm pldl1keep, [%7, #256] \n"
"ld1 {v16.4h, v17.4h, v18.4h, v19.4h}, [%7], #32 \n"
"shll v16.4s, v16.4h, #16 \n"
"shll v17.4s, v17.4h, #16 \n"
"shll v18.4s, v18.4h, #16 \n"
"shll v19.4s, v19.4h, #16 \n"
"fmla v20.4s, v26.4s, v1.s[2] \n"
"fmla v21.4s, v26.4s, v2.s[2] \n"
"fmla v22.4s, v26.4s, v3.s[2] \n"
"fmla v23.4s, v26.4s, v4.s[2] \n"
"fmla v20.4s, v27.4s, v1.s[3] \n"
"fmla v21.4s, v27.4s, v2.s[3] \n"
"fmla v22.4s, v27.4s, v3.s[3] \n"
"fmla v23.4s, v27.4s, v4.s[3] \n"
"fmla v20.4s, v16.4s, v2.s[0] \n"
"fmla v21.4s, v16.4s, v3.s[0] \n"
"fmla v22.4s, v16.4s, v4.s[0] \n"
"fmla v23.4s, v16.4s, v5.s[0] \n"
"fmla v20.4s, v17.4s, v2.s[1] \n"
"fmla v21.4s, v17.4s, v3.s[1] \n"
"fmla v22.4s, v17.4s, v4.s[1] \n"
"fmla v23.4s, v17.4s, v5.s[1] \n"
"prfm pldl1keep, [%7, #256] \n"
"ld1 {v24.4h, v25.4h, v26.4h, v27.4h}, [%7], #32 \n"
"shll v24.4s, v24.4h, #16 \n"
"shll v25.4s, v25.4h, #16 \n"
"shll v26.4s, v26.4h, #16 \n"
"shll v27.4s, v27.4h, #16 \n"
"fmla v20.4s, v18.4s, v2.s[2] \n"
"fmla v21.4s, v18.4s, v3.s[2] \n"
"fmla v22.4s, v18.4s, v4.s[2] \n"
"fmla v23.4s, v18.4s, v5.s[2] \n"
"fmla v20.4s, v19.4s, v2.s[3] \n"
"fmla v21.4s, v19.4s, v3.s[3] \n"
"fmla v22.4s, v19.4s, v4.s[3] \n"
"fmla v23.4s, v19.4s, v5.s[3] \n"
"fmla v20.4s, v24.4s, v3.s[0] \n"
"fmla v21.4s, v24.4s, v4.s[0] \n"
"fmla v22.4s, v24.4s, v5.s[0] \n"
"fmla v23.4s, v24.4s, v6.s[0] \n"
"fmla v20.4s, v25.4s, v3.s[1] \n"
"fmla v21.4s, v25.4s, v4.s[1] \n"
"fmla v22.4s, v25.4s, v5.s[1] \n"
"fmla v23.4s, v25.4s, v6.s[1] \n"
// "prfm pldl1keep, [%7, #256] \n"
"ld1 {v16.4h, v17.4h, v18.4h, v19.4h}, [%7] \n"
"shll v16.4s, v16.4h, #16 \n"
"shll v17.4s, v17.4h, #16 \n"
"shll v18.4s, v18.4h, #16 \n"
"shll v19.4s, v19.4h, #16 \n"
"fmla v20.4s, v26.4s, v3.s[2] \n"
"fmla v21.4s, v26.4s, v4.s[2] \n"
"fmla v22.4s, v26.4s, v5.s[2] \n"
"fmla v23.4s, v26.4s, v6.s[2] \n"
"fmla v20.4s, v27.4s, v3.s[3] \n"
"fmla v21.4s, v27.4s, v4.s[3] \n"
"fmla v22.4s, v27.4s, v5.s[3] \n"
"fmla v23.4s, v27.4s, v6.s[3] \n"
"fmla v20.4s, v16.4s, v4.s[0] \n"
"fmla v21.4s, v16.4s, v5.s[0] \n"
"fmla v22.4s, v16.4s, v6.s[0] \n"
"fmla v23.4s, v16.4s, v7.s[0] \n"
"fmla v20.4s, v17.4s, v4.s[1] \n"
"fmla v21.4s, v17.4s, v5.s[1] \n"
"fmla v22.4s, v17.4s, v6.s[1] \n"
"fmla v23.4s, v17.4s, v7.s[1] \n"
"fmla v20.4s, v18.4s, v4.s[2] \n"
"fmla v21.4s, v18.4s, v5.s[2] \n"
"fmla v22.4s, v18.4s, v6.s[2] \n"
"fmla v23.4s, v18.4s, v7.s[2] \n"
"fmla v20.4s, v19.4s, v4.s[3] \n"
"fmla v21.4s, v19.4s, v5.s[3] \n"
"fmla v22.4s, v19.4s, v6.s[3] \n"
"fmla v23.4s, v19.4s, v7.s[3] \n"
"sub %7, %7, #768 \n" // kptr -= 24 * 16;
"shrn v20.4h, v20.4s, #16 \n"
"shrn v21.4h, v21.4s, #16 \n"
"shrn v22.4h, v22.4s, #16 \n"
"shrn v23.4h, v23.4s, #16 \n"
"st1 {v20.4h, v21.4h, v22.4h, v23.4h}, [%0], #32 \n"
: "=r"(outptr0_bf16), // %0
"=r"(outptr0), // %1
"=r"(r0), // %2
"=r"(r1), // %3
"=r"(r2), // %4
"=r"(r3), // %5
"=r"(r4), // %6
"=r"(kptr) // %7
: "0"(outptr0_bf16),
"1"(outptr0),
"2"(r0),
"3"(r1),
"4"(r2),
"5"(r3),
"6"(r4),
"7"(kptr)
: "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27");
#else // __aarch64__
asm volatile(
"pld [%1, #512] \n"
"vldm %1!, {d24-d31} \n" // sum0 sum1 sum2 sum3
"pld [%2, #256] \n"
"vld1.u16 {d4-d7}, [%2 :64]! \n" // r00 r01 r02 r03
"vshll.u16 q0, d4, #16 \n"
"vshll.u16 q1, d5, #16 \n"
"vshll.u16 q2, d6, #16 \n"
"vshll.u16 q3, d7, #16 \n"
"pld [%7, #256] \n"
"vld1.u16 {d20-d23}, [%7 :64]! \n"
"vshll.u16 q8, d20, #16 \n"
"vshll.u16 q9, d21, #16 \n"
"vshll.u16 q10, d22, #16 \n"
"vshll.u16 q11, d23, #16 \n"
"vmla.f32 q12, q8, d0[0] \n"
"vmla.f32 q13, q8, d2[0] \n"
"vmla.f32 q14, q8, d4[0] \n"
"vmla.f32 q15, q8, d6[0] \n"
"vmla.f32 q12, q9, d0[1] \n"
"vmla.f32 q13, q9, d2[1] \n"
"vmla.f32 q14, q9, d4[1] \n"
"vmla.f32 q15, q9, d6[1] \n"
"vmla.f32 q12, q10, d1[0] \n"
"vmla.f32 q13, q10, d3[0] \n"
"vmla.f32 q14, q10, d5[0] \n"
"vmla.f32 q15, q10, d7[0] \n"
"vmla.f32 q12, q11, d1[1] \n"
"vmla.f32 q13, q11, d3[1] \n"
"vmla.f32 q14, q11, d5[1] \n"
"vmla.f32 q15, q11, d7[1] \n"
"pld [%7, #256] \n"
"vld1.u16 {d16-d19}, [%7 :64]! \n"
"vshll.u16 q10, d16, #16 \n"
"vshll.u16 q11, d17, #16 \n"
"vshll.u16 q8, d18, #16 \n"
"vshll.u16 q9, d19, #16 \n"
"pld [%2, #256] \n"
"vld1.u16 {d12-d15}, [%2 :64] \n" // r04 r05 r06 r07
"vshll.u16 q4, d12, #16 \n"
"vshll.u16 q5, d13, #16 \n"
"vshll.u16 q6, d14, #16 \n"
"vshll.u16 q7, d15, #16 \n"
"vmla.f32 q12, q10, d2[0] \n"
"vmla.f32 q13, q10, d4[0] \n"
"vmla.f32 q14, q10, d6[0] \n"
"vmla.f32 q15, q10, d8[0] \n"
"vmla.f32 q12, q11, d2[1] \n"
"vmla.f32 q13, q11, d4[1] \n"
"vmla.f32 q14, q11, d6[1] \n"
"vmla.f32 q15, q11, d8[1] \n"
"vmla.f32 q12, q8, d3[0] \n"
"vmla.f32 q13, q8, d5[0] \n"
"vmla.f32 q14, q8, d7[0] \n"
"vmla.f32 q15, q8, d9[0] \n"
"vmla.f32 q12, q9, d3[1] \n"
"vmla.f32 q13, q9, d5[1] \n"
"vmla.f32 q14, q9, d7[1] \n"
"vmla.f32 q15, q9, d9[1] \n"
"pld [%7, #256] \n"
"vld1.u16 {d20-d23}, [%7 :64]! \n"
"vshll.u16 q8, d20, #16 \n"
"vshll.u16 q9, d21, #16 \n"
"vshll.u16 q10, d22, #16 \n"
"vshll.u16 q11, d23, #16 \n"
"vmla.f32 q12, q8, d4[0] \n"
"vmla.f32 q13, q8, d6[0] \n"
"vmla.f32 q14, q8, d8[0] \n"
"vmla.f32 q15, q8, d10[0] \n"
"vmla.f32 q12, q9, d4[1] \n"
"vmla.f32 q13, q9, d6[1] \n"
"vmla.f32 q14, q9, d8[1] \n"
"vmla.f32 q15, q9, d10[1] \n"
"vmla.f32 q12, q10, d5[0] \n"
"vmla.f32 q13, q10, d7[0] \n"
"vmla.f32 q14, q10, d9[0] \n"
"vmla.f32 q15, q10, d11[0] \n"
"vmla.f32 q12, q11, d5[1] \n"
"vmla.f32 q13, q11, d7[1] \n"
"vmla.f32 q14, q11, d9[1] \n"
"vmla.f32 q15, q11, d11[1] \n"
"pld [%7, #256] \n"
"vld1.u16 {d16-d19}, [%7 :64]! \n"
"vshll.u16 q10, d16, #16 \n"
"vshll.u16 q11, d17, #16 \n"
"vshll.u16 q8, d18, #16 \n"
"vshll.u16 q9, d19, #16 \n"
"vmla.f32 q12, q10, d6[0] \n"
"vmla.f32 q13, q10, d8[0] \n"
"vmla.f32 q14, q10, d10[0] \n"
"vmla.f32 q15, q10, d12[0] \n"
"vmla.f32 q12, q11, d6[1] \n"
"vmla.f32 q13, q11, d8[1] \n"
"vmla.f32 q14, q11, d10[1] \n"
"vmla.f32 q15, q11, d12[1] \n"
"vmla.f32 q12, q8, d7[0] \n"
"vmla.f32 q13, q8, d9[0] \n"
"vmla.f32 q14, q8, d11[0] \n"
"vmla.f32 q15, q8, d13[0] \n"
"vmla.f32 q12, q9, d7[1] \n"
"vmla.f32 q13, q9, d9[1] \n"
"vmla.f32 q14, q9, d11[1] \n"
"vmla.f32 q15, q9, d13[1] \n"
"pld [%7, #256] \n"
"vld1.u16 {d20-d23}, [%7 :64]! \n"
"vshll.u16 q8, d20, #16 \n"
"vshll.u16 q9, d21, #16 \n"
"vshll.u16 q10, d22, #16 \n"
"vshll.u16 q11, d23, #16 \n"
"pld [%3, #256] \n"
"vld1.u16 {d4-d7}, [%3 :64]! \n" // r10 r11 r12 r13
"vshll.u16 q0, d4, #16 \n"
"vshll.u16 q1, d5, #16 \n"
"vshll.u16 q2, d6, #16 \n"
"vshll.u16 q3, d7, #16 \n"
"vmla.f32 q12, q8, d8[0] \n"
"vmla.f32 q13, q8, d10[0] \n"
"vmla.f32 q14, q8, d12[0] \n"
"vmla.f32 q15, q8, d14[0] \n"
"vmla.f32 q12, q9, d8[1] \n"
"vmla.f32 q13, q9, d10[1] \n"
"vmla.f32 q14, q9, d12[1] \n"
"vmla.f32 q15, q9, d14[1] \n"
"vmla.f32 q12, q10, d9[0] \n"
"vmla.f32 q13, q10, d11[0] \n"
"vmla.f32 q14, q10, d13[0] \n"
"vmla.f32 q15, q10, d15[0] \n"
"vmla.f32 q12, q11, d9[1] \n"
"vmla.f32 q13, q11, d11[1] \n"
"vmla.f32 q14, q11, d13[1] \n"
"vmla.f32 q15, q11, d15[1] \n"
"pld [%7, #256] \n"
"vld1.u16 {d16-d19}, [%7 :64]! \n"
"vshll.u16 q10, d16, #16 \n"
"vshll.u16 q11, d17, #16 \n"
"vshll.u16 q8, d18, #16 \n"
"vshll.u16 q9, d19, #16 \n"
"vmla.f32 q12, q10, d0[0] \n"
"vmla.f32 q13, q10, d2[0] \n"
"vmla.f32 q14, q10, d4[0] \n"
"vmla.f32 q15, q10, d6[0] \n"
"vmla.f32 q12, q11, d0[1] \n"
"vmla.f32 q13, q11, d2[1] \n"
"vmla.f32 q14, q11, d4[1] \n"
"vmla.f32 q15, q11, d6[1] \n"
"vmla.f32 q12, q8, d1[0] \n"
"vmla.f32 q13, q8, d3[0] \n"
"vmla.f32 q14, q8, d5[0] \n"
"vmla.f32 q15, q8, d7[0] \n"
"vmla.f32 q12, q9, d1[1] \n"
"vmla.f32 q13, q9, d3[1] \n"
"vmla.f32 q14, q9, d5[1] \n"
"vmla.f32 q15, q9, d7[1] \n"
"pld [%7, #256] \n"
"vld1.u16 {d20-d23}, [%7 :64]! \n"
"vshll.u16 q8, d20, #16 \n"
"vshll.u16 q9, d21, #16 \n"
"vshll.u16 q10, d22, #16 \n"
"vshll.u16 q11, d23, #16 \n"
"pld [%3, #256] \n"
"vld1.u16 {d12-d15}, [%3 :64] \n" // r14 r15 r16 r17
"vshll.u16 q4, d12, #16 \n"
"vshll.u16 q5, d13, #16 \n"
"vshll.u16 q6, d14, #16 \n"
"vshll.u16 q7, d15, #16 \n"
"vmla.f32 q12, q8, d2[0] \n"
"vmla.f32 q13, q8, d4[0] \n"
"vmla.f32 q14, q8, d6[0] \n"
"vmla.f32 q15, q8, d8[0] \n"
"vmla.f32 q12, q9, d2[1] \n"
"vmla.f32 q13, q9, d4[1] \n"
"vmla.f32 q14, q9, d6[1] \n"
"vmla.f32 q15, q9, d8[1] \n"
"vmla.f32 q12, q10, d3[0] \n"
"vmla.f32 q13, q10, d5[0] \n"
"vmla.f32 q14, q10, d7[0] \n"
"vmla.f32 q15, q10, d9[0] \n"
"vmla.f32 q12, q11, d3[1] \n"
"vmla.f32 q13, q11, d5[1] \n"
"vmla.f32 q14, q11, d7[1] \n"
"vmla.f32 q15, q11, d9[1] \n"
"pld [%7, #256] \n"
"vld1.u16 {d16-d19}, [%7 :64]! \n"
"vshll.u16 q10, d16, #16 \n"
"vshll.u16 q11, d17, #16 \n"
"vshll.u16 q8, d18, #16 \n"
"vshll.u16 q9, d19, #16 \n"
"vmla.f32 q12, q10, d4[0] \n"
"vmla.f32 q13, q10, d6[0] \n"
"vmla.f32 q14, q10, d8[0] \n"
"vmla.f32 q15, q10, d10[0] \n"
"vmla.f32 q12, q11, d4[1] \n"
"vmla.f32 q13, q11, d6[1] \n"
"vmla.f32 q14, q11, d8[1] \n"
"vmla.f32 q15, q11, d10[1] \n"
"vmla.f32 q12, q8, d5[0] \n"
"vmla.f32 q13, q8, d7[0] \n"
"vmla.f32 q14, q8, d9[0] \n"
"vmla.f32 q15, q8, d11[0] \n"
"vmla.f32 q12, q9, d5[1] \n"
"vmla.f32 q13, q9, d7[1] \n"
"vmla.f32 q14, q9, d9[1] \n"
"vmla.f32 q15, q9, d11[1] \n"
"pld [%7, #256] \n"
"vld1.u16 {d20-d23}, [%7 :64]! \n"
"vshll.u16 q8, d20, #16 \n"
"vshll.u16 q9, d21, #16 \n"
"vshll.u16 q10, d22, #16 \n"
"vshll.u16 q11, d23, #16 \n"
"vmla.f32 q12, q8, d6[0] \n"
"vmla.f32 q13, q8, d8[0] \n"
"vmla.f32 q14, q8, d10[0] \n"
"vmla.f32 q15, q8, d12[0] \n"
"vmla.f32 q12, q9, d6[1] \n"
"vmla.f32 q13, q9, d8[1] \n"
"vmla.f32 q14, q9, d10[1] \n"
"vmla.f32 q15, q9, d12[1] \n"
"vmla.f32 q12, q10, d7[0] \n"
"vmla.f32 q13, q10, d9[0] \n"
"vmla.f32 q14, q10, d11[0] \n"
"vmla.f32 q15, q10, d13[0] \n"
"vmla.f32 q12, q11, d7[1] \n"
"vmla.f32 q13, q11, d9[1] \n"
"vmla.f32 q14, q11, d11[1] \n"
"vmla.f32 q15, q11, d13[1] \n"
"pld [%7, #256] \n"
"vld1.u16 {d16-d19}, [%7 :64]! \n"
"vshll.u16 q10, d16, #16 \n"
"vshll.u16 q11, d17, #16 \n"
"vshll.u16 q8, d18, #16 \n"
"vshll.u16 q9, d19, #16 \n"
"pld [%4, #256] \n"
"vld1.u16 {d4-d7}, [%4 :64]! \n" // r20 r21 r22 r23
"vshll.u16 q0, d4, #16 \n"
"vshll.u16 q1, d5, #16 \n"
"vshll.u16 q2, d6, #16 \n"
"vshll.u16 q3, d7, #16 \n"
"vmla.f32 q12, q10, d8[0] \n"
"vmla.f32 q13, q10, d10[0] \n"
"vmla.f32 q14, q10, d12[0] \n"
"vmla.f32 q15, q10, d14[0] \n"
"vmla.f32 q12, q11, d8[1] \n"
"vmla.f32 q13, q11, d10[1] \n"
"vmla.f32 q14, q11, d12[1] \n"
"vmla.f32 q15, q11, d14[1] \n"
"vmla.f32 q12, q8, d9[0] \n"
"vmla.f32 q13, q8, d11[0] \n"
"vmla.f32 q14, q8, d13[0] \n"
"vmla.f32 q15, q8, d15[0] \n"
"vmla.f32 q12, q9, d9[1] \n"
"vmla.f32 q13, q9, d11[1] \n"
"vmla.f32 q14, q9, d13[1] \n"
"vmla.f32 q15, q9, d15[1] \n"
"pld [%7, #256] \n"
"vld1.u16 {d20-d23}, [%7 :64]! \n"
"vshll.u16 q8, d20, #16 \n"
"vshll.u16 q9, d21, #16 \n"
"vshll.u16 q10, d22, #16 \n"
"vshll.u16 q11, d23, #16 \n"
"vmla.f32 q12, q8, d0[0] \n"
"vmla.f32 q13, q8, d2[0] \n"
"vmla.f32 q14, q8, d4[0] \n"
"vmla.f32 q15, q8, d6[0] \n"
"vmla.f32 q12, q9, d0[1] \n"
"vmla.f32 q13, q9, d2[1] \n"
"vmla.f32 q14, q9, d4[1] \n"
"vmla.f32 q15, q9, d6[1] \n"
"vmla.f32 q12, q10, d1[0] \n"
"vmla.f32 q13, q10, d3[0] \n"
"vmla.f32 q14, q10, d5[0] \n"
"vmla.f32 q15, q10, d7[0] \n"
"vmla.f32 q12, q11, d1[1] \n"
"vmla.f32 q13, q11, d3[1] \n"
"vmla.f32 q14, q11, d5[1] \n"
"vmla.f32 q15, q11, d7[1] \n"
"pld [%7, #256] \n"
"vld1.u16 {d16-d19}, [%7 :64]! \n"
"vshll.u16 q10, d16, #16 \n"
"vshll.u16 q11, d17, #16 \n"
"vshll.u16 q8, d18, #16 \n"
"vshll.u16 q9, d19, #16 \n"
"pld [%4, #256] \n"
"vld1.u16 {d12-d15}, [%4 :64] \n" // r24 r25 r26 r27
"vshll.u16 q4, d12, #16 \n"
"vshll.u16 q5, d13, #16 \n"
"vshll.u16 q6, d14, #16 \n"
"vshll.u16 q7, d15, #16 \n"
"vmla.f32 q12, q10, d2[0] \n"
"vmla.f32 q13, q10, d4[0] \n"
"vmla.f32 q14, q10, d6[0] \n"
"vmla.f32 q15, q10, d8[0] \n"
"vmla.f32 q12, q11, d2[1] \n"
"vmla.f32 q13, q11, d4[1] \n"
"vmla.f32 q14, q11, d6[1] \n"
"vmla.f32 q15, q11, d8[1] \n"
"vmla.f32 q12, q8, d3[0] \n"
"vmla.f32 q13, q8, d5[0] \n"
"vmla.f32 q14, q8, d7[0] \n"
"vmla.f32 q15, q8, d9[0] \n"
"vmla.f32 q12, q9, d3[1] \n"
"vmla.f32 q13, q9, d5[1] \n"
"vmla.f32 q14, q9, d7[1] \n"
"vmla.f32 q15, q9, d9[1] \n"
"pld [%7, #256] \n"
"vld1.u16 {d20-d23}, [%7 :64]! \n"
"vshll.u16 q8, d20, #16 \n"
"vshll.u16 q9, d21, #16 \n"
"vshll.u16 q10, d22, #16 \n"
"vshll.u16 q11, d23, #16 \n"
"vmla.f32 q12, q8, d4[0] \n"
"vmla.f32 q13, q8, d6[0] \n"
"vmla.f32 q14, q8, d8[0] \n"
"vmla.f32 q15, q8, d10[0] \n"
"vmla.f32 q12, q9, d4[1] \n"
"vmla.f32 q13, q9, d6[1] \n"
"vmla.f32 q14, q9, d8[1] \n"
"vmla.f32 q15, q9, d10[1] \n"
"vmla.f32 q12, q10, d5[0] \n"
"vmla.f32 q13, q10, d7[0] \n"
"vmla.f32 q14, q10, d9[0] \n"
"vmla.f32 q15, q10, d11[0] \n"
"vmla.f32 q12, q11, d5[1] \n"
"vmla.f32 q13, q11, d7[1] \n"
"vmla.f32 q14, q11, d9[1] \n"
"vmla.f32 q15, q11, d11[1] \n"
"pld [%7, #256] \n"
"vld1.u16 {d16-d19}, [%7 :64]! \n"
"vshll.u16 q10, d16, #16 \n"
"vshll.u16 q11, d17, #16 \n"
"vshll.u16 q8, d18, #16 \n"
"vshll.u16 q9, d19, #16 \n"
"vmla.f32 q12, q10, d6[0] \n"
"vmla.f32 q13, q10, d8[0] \n"
"vmla.f32 q14, q10, d10[0] \n"
"vmla.f32 q15, q10, d12[0] \n"
"vmla.f32 q12, q11, d6[1] \n"
"vmla.f32 q13, q11, d8[1] \n"
"vmla.f32 q14, q11, d10[1] \n"
"vmla.f32 q15, q11, d12[1] \n"
"vmla.f32 q12, q8, d7[0] \n"
"vmla.f32 q13, q8, d9[0] \n"
"vmla.f32 q14, q8, d11[0] \n"
"vmla.f32 q15, q8, d13[0] \n"
"vmla.f32 q12, q9, d7[1] \n"
"vmla.f32 q13, q9, d9[1] \n"
"vmla.f32 q14, q9, d11[1] \n"
"vmla.f32 q15, q9, d13[1] \n"
"pld [%7, #256] \n"
"vld1.u16 {d20-d23}, [%7 :64]! \n"
"vshll.u16 q8, d20, #16 \n"
"vshll.u16 q9, d21, #16 \n"
"vshll.u16 q10, d22, #16 \n"
"vshll.u16 q11, d23, #16 \n"
"pld [%5, #256] \n"
"vld1.u16 {d4-d7}, [%5 :64]! \n" // r30 r31 r32 r33
"vshll.u16 q0, d4, #16 \n"
"vshll.u16 q1, d5, #16 \n"
"vshll.u16 q2, d6, #16 \n"
"vshll.u16 q3, d7, #16 \n"
"vmla.f32 q12, q8, d8[0] \n"
"vmla.f32 q13, q8, d10[0] \n"
"vmla.f32 q14, q8, d12[0] \n"
"vmla.f32 q15, q8, d14[0] \n"
"vmla.f32 q12, q9, d8[1] \n"
"vmla.f32 q13, q9, d10[1] \n"
"vmla.f32 q14, q9, d12[1] \n"
"vmla.f32 q15, q9, d14[1] \n"
"vmla.f32 q12, q10, d9[0] \n"
"vmla.f32 q13, q10, d11[0] \n"
"vmla.f32 q14, q10, d13[0] \n"
"vmla.f32 q15, q10, d15[0] \n"
"vmla.f32 q12, q11, d9[1] \n"
"vmla.f32 q13, q11, d11[1] \n"
"vmla.f32 q14, q11, d13[1] \n"
"vmla.f32 q15, q11, d15[1] \n"
"pld [%7, #256] \n"
"vld1.u16 {d16-d19}, [%7 :64]! \n"
"vshll.u16 q10, d16, #16 \n"
"vshll.u16 q11, d17, #16 \n"
"vshll.u16 q8, d18, #16 \n"
"vshll.u16 q9, d19, #16 \n"
"vmla.f32 q12, q10, d0[0] \n"
"vmla.f32 q13, q10, d2[0] \n"
"vmla.f32 q14, q10, d4[0] \n"
"vmla.f32 q15, q10, d6[0] \n"
"vmla.f32 q12, q11, d0[1] \n"
"vmla.f32 q13, q11, d2[1] \n"
"vmla.f32 q14, q11, d4[1] \n"
"vmla.f32 q15, q11, d6[1] \n"
"vmla.f32 q12, q8, d1[0] \n"
"vmla.f32 q13, q8, d3[0] \n"
"vmla.f32 q14, q8, d5[0] \n"
"vmla.f32 q15, q8, d7[0] \n"
"vmla.f32 q12, q9, d1[1] \n"
"vmla.f32 q13, q9, d3[1] \n"
"vmla.f32 q14, q9, d5[1] \n"
"vmla.f32 q15, q9, d7[1] \n"
"pld [%7, #256] \n"
"vld1.u16 {d20-d23}, [%7 :64]! \n"
"vshll.u16 q8, d20, #16 \n"
"vshll.u16 q9, d21, #16 \n"
"vshll.u16 q10, d22, #16 \n"
"vshll.u16 q11, d23, #16 \n"
"pld [%5, #256] \n"
"vld1.u16 {d12-d15}, [%5 :64] \n" // r34 r35 r36 r37
"vshll.u16 q4, d12, #16 \n"
"vshll.u16 q5, d13, #16 \n"
"vshll.u16 q6, d14, #16 \n"
"vshll.u16 q7, d15, #16 \n"
"vmla.f32 q12, q8, d2[0] \n"
"vmla.f32 q13, q8, d4[0] \n"
"vmla.f32 q14, q8, d6[0] \n"
"vmla.f32 q15, q8, d8[0] \n"
"vmla.f32 q12, q9, d2[1] \n"
"vmla.f32 q13, q9, d4[1] \n"
"vmla.f32 q14, q9, d6[1] \n"
"vmla.f32 q15, q9, d8[1] \n"
"vmla.f32 q12, q10, d3[0] \n"
"vmla.f32 q13, q10, d5[0] \n"
"vmla.f32 q14, q10, d7[0] \n"
"vmla.f32 q15, q10, d9[0] \n"
"vmla.f32 q12, q11, d3[1] \n"
"vmla.f32 q13, q11, d5[1] \n"
"vmla.f32 q14, q11, d7[1] \n"
"vmla.f32 q15, q11, d9[1] \n"
"pld [%7, #256] \n"
"vld1.u16 {d16-d19}, [%7 :64]! \n"
"vshll.u16 q10, d16, #16 \n"
"vshll.u16 q11, d17, #16 \n"
"vshll.u16 q8, d18, #16 \n"
"vshll.u16 q9, d19, #16 \n"
"vmla.f32 q12, q10, d4[0] \n"
"vmla.f32 q13, q10, d6[0] \n"
"vmla.f32 q14, q10, d8[0] \n"
"vmla.f32 q15, q10, d10[0] \n"
"vmla.f32 q12, q11, d4[1] \n"
"vmla.f32 q13, q11, d6[1] \n"
"vmla.f32 q14, q11, d8[1] \n"
"vmla.f32 q15, q11, d10[1] \n"
"vmla.f32 q12, q8, d5[0] \n"
"vmla.f32 q13, q8, d7[0] \n"
"vmla.f32 q14, q8, d9[0] \n"
"vmla.f32 q15, q8, d11[0] \n"
"vmla.f32 q12, q9, d5[1] \n"
"vmla.f32 q13, q9, d7[1] \n"
"vmla.f32 q14, q9, d9[1] \n"
"vmla.f32 q15, q9, d11[1] \n"
"pld [%7, #256] \n"
"vld1.u16 {d20-d23}, [%7 :64]! \n"
"vshll.u16 q8, d20, #16 \n"
"vshll.u16 q9, d21, #16 \n"
"vshll.u16 q10, d22, #16 \n"
"vshll.u16 q11, d23, #16 \n"
"vmla.f32 q12, q8, d6[0] \n"
"vmla.f32 q13, q8, d8[0] \n"
"vmla.f32 q14, q8, d10[0] \n"
"vmla.f32 q15, q8, d12[0] \n"
"vmla.f32 q12, q9, d6[1] \n"
"vmla.f32 q13, q9, d8[1] \n"
"vmla.f32 q14, q9, d10[1] \n"
"vmla.f32 q15, q9, d12[1] \n"
"vmla.f32 q12, q10, d7[0] \n"
"vmla.f32 q13, q10, d9[0] \n"
"vmla.f32 q14, q10, d11[0] \n"
"vmla.f32 q15, q10, d13[0] \n"
"vmla.f32 q12, q11, d7[1] \n"
"vmla.f32 q13, q11, d9[1] \n"
"vmla.f32 q14, q11, d11[1] \n"
"vmla.f32 q15, q11, d13[1] \n"
"pld [%7, #256] \n"
"vld1.u16 {d16-d19}, [%7 :64]! \n"
"vshll.u16 q10, d16, #16 \n"
"vshll.u16 q11, d17, #16 \n"
"vshll.u16 q8, d18, #16 \n"
"vshll.u16 q9, d19, #16 \n"
"pld [%6, #256] \n"
"vld1.u16 {d4-d7}, [%6 :64]! \n" // r40 r41 r42 r43
"vshll.u16 q0, d4, #16 \n"
"vshll.u16 q1, d5, #16 \n"
"vshll.u16 q2, d6, #16 \n"
"vshll.u16 q3, d7, #16 \n"
"vmla.f32 q12, q10, d8[0] \n"
"vmla.f32 q13, q10, d10[0] \n"
"vmla.f32 q14, q10, d12[0] \n"
"vmla.f32 q15, q10, d14[0] \n"
"vmla.f32 q12, q11, d8[1] \n"
"vmla.f32 q13, q11, d10[1] \n"
"vmla.f32 q14, q11, d12[1] \n"
"vmla.f32 q15, q11, d14[1] \n"
"vmla.f32 q12, q8, d9[0] \n"
"vmla.f32 q13, q8, d11[0] \n"
"vmla.f32 q14, q8, d13[0] \n"
"vmla.f32 q15, q8, d15[0] \n"
"vmla.f32 q12, q9, d9[1] \n"
"vmla.f32 q13, q9, d11[1] \n"
"vmla.f32 q14, q9, d13[1] \n"
"vmla.f32 q15, q9, d15[1] \n"
"pld [%7, #256] \n"
"vld1.u16 {d20-d23}, [%7 :64]! \n"
"vshll.u16 q8, d20, #16 \n"
"vshll.u16 q9, d21, #16 \n"
"vshll.u16 q10, d22, #16 \n"
"vshll.u16 q11, d23, #16 \n"
"vmla.f32 q12, q8, d0[0] \n"
"vmla.f32 q13, q8, d2[0] \n"
"vmla.f32 q14, q8, d4[0] \n"
"vmla.f32 q15, q8, d6[0] \n"
"vmla.f32 q12, q9, d0[1] \n"
"vmla.f32 q13, q9, d2[1] \n"
"vmla.f32 q14, q9, d4[1] \n"
"vmla.f32 q15, q9, d6[1] \n"
"vmla.f32 q12, q10, d1[0] \n"
"vmla.f32 q13, q10, d3[0] \n"
"vmla.f32 q14, q10, d5[0] \n"
"vmla.f32 q15, q10, d7[0] \n"
"vmla.f32 q12, q11, d1[1] \n"
"vmla.f32 q13, q11, d3[1] \n"
"vmla.f32 q14, q11, d5[1] \n"
"vmla.f32 q15, q11, d7[1] \n"
"pld [%7, #256] \n"
"vld1.u16 {d16-d19}, [%7 :64]! \n"
"vshll.u16 q10, d16, #16 \n"
"vshll.u16 q11, d17, #16 \n"
"vshll.u16 q8, d18, #16 \n"
"vshll.u16 q9, d19, #16 \n"
"pld [%6, #256] \n"
"vld1.u16 {d12-d15}, [%6 :64] \n" // r44 r45 r46 r47
"vshll.u16 q4, d12, #16 \n"
"vshll.u16 q5, d13, #16 \n"
"vshll.u16 q6, d14, #16 \n"
"vshll.u16 q7, d15, #16 \n"
"vmla.f32 q12, q10, d2[0] \n"
"vmla.f32 q13, q10, d4[0] \n"
"vmla.f32 q14, q10, d6[0] \n"
"vmla.f32 q15, q10, d8[0] \n"
"vmla.f32 q12, q11, d2[1] \n"
"vmla.f32 q13, q11, d4[1] \n"
"vmla.f32 q14, q11, d6[1] \n"
"vmla.f32 q15, q11, d8[1] \n"
"vmla.f32 q12, q8, d3[0] \n"
"vmla.f32 q13, q8, d5[0] \n"
"vmla.f32 q14, q8, d7[0] \n"
"vmla.f32 q15, q8, d9[0] \n"
"vmla.f32 q12, q9, d3[1] \n"
"vmla.f32 q13, q9, d5[1] \n"
"vmla.f32 q14, q9, d7[1] \n"
"vmla.f32 q15, q9, d9[1] \n"
"pld [%7, #256] \n"
"vld1.u16 {d20-d23}, [%7 :64]! \n"
"vshll.u16 q8, d20, #16 \n"
"vshll.u16 q9, d21, #16 \n"
"vshll.u16 q10, d22, #16 \n"
"vshll.u16 q11, d23, #16 \n"
"vmla.f32 q12, q8, d4[0] \n"
"vmla.f32 q13, q8, d6[0] \n"
"vmla.f32 q14, q8, d8[0] \n"
"vmla.f32 q15, q8, d10[0] \n"
"vmla.f32 q12, q9, d4[1] \n"
"vmla.f32 q13, q9, d6[1] \n"
"vmla.f32 q14, q9, d8[1] \n"
"vmla.f32 q15, q9, d10[1] \n"
"vmla.f32 q12, q10, d5[0] \n"
"vmla.f32 q13, q10, d7[0] \n"
"vmla.f32 q14, q10, d9[0] \n"
"vmla.f32 q15, q10, d11[0] \n"
"vmla.f32 q12, q11, d5[1] \n"
"vmla.f32 q13, q11, d7[1] \n"
"vmla.f32 q14, q11, d9[1] \n"
"vmla.f32 q15, q11, d11[1] \n"
"pld [%7, #256] \n"
"vld1.u16 {d16-d19}, [%7 :64]! \n"
"vshll.u16 q10, d16, #16 \n"
"vshll.u16 q11, d17, #16 \n"
"vshll.u16 q8, d18, #16 \n"
"vshll.u16 q9, d19, #16 \n"
"vmla.f32 q12, q10, d6[0] \n"
"vmla.f32 q13, q10, d8[0] \n"
"vmla.f32 q14, q10, d10[0] \n"
"vmla.f32 q15, q10, d12[0] \n"
"vmla.f32 q12, q11, d6[1] \n"
"vmla.f32 q13, q11, d8[1] \n"
"vmla.f32 q14, q11, d10[1] \n"
"vmla.f32 q15, q11, d12[1] \n"
"vmla.f32 q12, q8, d7[0] \n"
"vmla.f32 q13, q8, d9[0] \n"
"vmla.f32 q14, q8, d11[0] \n"
"vmla.f32 q15, q8, d13[0] \n"
"vmla.f32 q12, q9, d7[1] \n"
"vmla.f32 q13, q9, d9[1] \n"
"vmla.f32 q14, q9, d11[1] \n"
"vmla.f32 q15, q9, d13[1] \n"
// "pld [%7, #256] \n"
"vld1.u16 {d20-d23}, [%7 :64] \n"
"vshll.u16 q8, d20, #16 \n"
"vshll.u16 q9, d21, #16 \n"
"vshll.u16 q10, d22, #16 \n"
"vshll.u16 q11, d23, #16 \n"
"vmla.f32 q12, q8, d8[0] \n"
"vmla.f32 q13, q8, d10[0] \n"
"vmla.f32 q14, q8, d12[0] \n"
"vmla.f32 q15, q8, d14[0] \n"
"vmla.f32 q12, q9, d8[1] \n"
"vmla.f32 q13, q9, d10[1] \n"
"vmla.f32 q14, q9, d12[1] \n"
"vmla.f32 q15, q9, d14[1] \n"
"vmla.f32 q12, q10, d9[0] \n"
"vmla.f32 q13, q10, d11[0] \n"
"vmla.f32 q14, q10, d13[0] \n"
"vmla.f32 q15, q10, d15[0] \n"
"vmla.f32 q12, q11, d9[1] \n"
"vmla.f32 q13, q11, d11[1] \n"
"vmla.f32 q14, q11, d13[1] \n"
"vmla.f32 q15, q11, d15[1] \n"
"sub %7, %7, #768 \n" // kptr -= 24 * 16;
"vshrn.u32 d24, q12, #16 \n"
"vshrn.u32 d25, q13, #16 \n"
"vshrn.u32 d26, q14, #16 \n"
"vshrn.u32 d27, q15, #16 \n"
"vst1.u16 {d24-d27}, [%0 :64]! \n"
: "=r"(outptr0_bf16), // %0
"=r"(outptr0), // %1
"=r"(r0), // %2
"=r"(r1), // %3
"=r"(r2), // %4
"=r"(r3), // %5
"=r"(r4), // %6
"=r"(kptr) // %7
: "0"(outptr0_bf16),
"1"(outptr0),
"2"(r0),
"3"(r1),
"4"(r2),
"5"(r3),
"6"(r4),
"7"(kptr)
: "memory", "q0", "q1", "q2", "q3", "q4", "q5", "q6", "q7", "q8", "q9", "q10", "q11", "q12", "q13", "q14", "q15");
#endif // __aarch64__
}
for (; j + 1 < outw; j += 2)
{
#if __aarch64__
asm volatile(
"prfm pldl1keep, [%2, #128] \n"
"ld1 {v0.4h, v1.4h}, [%2], #16 \n" // r00 r01
"shll v0.4s, v0.4h, #16 \n"
"shll v1.4s, v1.4h, #16 \n"
"prfm pldl1keep, [%7, #256] \n"
"ld1 {v16.4h, v17.4h, v18.4h, v19.4h}, [%7], #32 \n"
"shll v16.4s, v16.4h, #16 \n"
"shll v17.4s, v17.4h, #16 \n"
"prfm pldl1keep, [%1, #256] \n"
"ld1 {v20.4s, v21.4s}, [%1], #32 \n" // sum0 sum1
"fmul v22.4s, v16.4s, v0.s[0] \n"
"fmul v23.4s, v16.4s, v1.s[0] \n"
"prfm pldl1keep, [%7, #256] \n"
"ld1 {v24.4h, v25.4h, v26.4h, v27.4h}, [%7], #32 \n"
"fmla v20.4s, v17.4s, v0.s[1] \n"
"shll v18.4s, v18.4h, #16 \n"
"fmla v21.4s, v17.4s, v1.s[1] \n"
"shll v19.4s, v19.4h, #16 \n"
"fmla v22.4s, v18.4s, v0.s[2] \n"
"shll v24.4s, v24.4h, #16 \n"
"fmla v23.4s, v18.4s, v1.s[2] \n"
"shll v25.4s, v25.4h, #16 \n"
"fmla v20.4s, v19.4s, v0.s[3] \n"
"prfm pldl1keep, [%2, #256] \n"
"ld1 {v2.4h, v3.4h, v4.4h, v5.4h}, [%2] \n" // r02 r03 r04 r05
"shll v26.4s, v26.4h, #16 \n"
"fmla v21.4s, v19.4s, v1.s[3] \n"
"shll v27.4s, v27.4h, #16 \n"
"fmla v22.4s, v24.4s, v1.s[0] \n"
"shll v2.4s, v2.4h, #16 \n"
"fmla v23.4s, v24.4s, v2.s[0] \n"
"shll v3.4s, v3.4h, #16 \n"
"fmla v20.4s, v25.4s, v1.s[1] \n"
"prfm pldl1keep, [%7, #256] \n"
"ld1 {v16.4h, v17.4h, v18.4h, v19.4h}, [%7], #32 \n"
"fmla v21.4s, v25.4s, v2.s[1] \n"
"shll v4.4s, v4.4h, #16 \n"
"fmla v22.4s, v26.4s, v1.s[2] \n"
"shll v5.4s, v5.4h, #16 \n"
"fmla v23.4s, v26.4s, v2.s[2] \n"
"shll v16.4s, v16.4h, #16 \n"
"fmla v20.4s, v27.4s, v1.s[3] \n"
"fmla v21.4s, v27.4s, v2.s[3] \n"
"shll v17.4s, v17.4h, #16 \n"
"fmla v22.4s, v16.4s, v2.s[0] \n"
"fmla v23.4s, v16.4s, v3.s[0] \n"
"prfm pldl1keep, [%7, #256] \n"
"ld1 {v24.4h, v25.4h, v26.4h, v27.4h}, [%7], #32 \n"
"fmla v20.4s, v17.4s, v2.s[1] \n"
"shll v18.4s, v18.4h, #16 \n"
"fmla v21.4s, v17.4s, v3.s[1] \n"
"shll v19.4s, v19.4h, #16 \n"
"fmla v22.4s, v18.4s, v2.s[2] \n"
"fmla v23.4s, v18.4s, v3.s[2] \n"
"shll v24.4s, v24.4h, #16 \n"
"fmla v20.4s, v19.4s, v2.s[3] \n"
"fmla v21.4s, v19.4s, v3.s[3] \n"
"shll v25.4s, v25.4h, #16 \n"
"fmla v22.4s, v24.4s, v3.s[0] \n"
"fmla v23.4s, v24.4s, v4.s[0] \n"
"prfm pldl1keep, [%7, #256] \n"
"ld1 {v16.4h, v17.4h, v18.4h, v19.4h}, [%7], #32 \n"
"fmla v20.4s, v25.4s, v3.s[1] \n"
"shll v26.4s, v26.4h, #16 \n"
"fmla v21.4s, v25.4s, v4.s[1] \n"
"shll v27.4s, v27.4h, #16 \n"
"fmla v22.4s, v26.4s, v3.s[2] \n"
"shll v16.4s, v16.4h, #16 \n"
"fmla v23.4s, v26.4s, v4.s[2] \n"
"shll v17.4s, v17.4h, #16 \n"
"fmla v20.4s, v27.4s, v3.s[3] \n"
"prfm pldl1keep, [%3, #128] \n"
"ld1 {v0.4h, v1.4h}, [%3], #16 \n" // r10 r11
"fmla v21.4s, v27.4s, v4.s[3] \n"
"shll v18.4s, v18.4h, #16 \n"
"fmla v22.4s, v16.4s, v4.s[0] \n"
"shll v19.4s, v19.4h, #16 \n"
"fmla v23.4s, v16.4s, v5.s[0] \n"
"prfm pldl1keep, [%7, #256] \n"
"ld1 {v24.4h, v25.4h, v26.4h, v27.4h}, [%7], #32 \n"
"fmla v20.4s, v17.4s, v4.s[1] \n"
"shll v0.4s, v0.4h, #16 \n"
"fmla v21.4s, v17.4s, v5.s[1] \n"
"shll v1.4s, v1.4h, #16 \n"
"fmla v22.4s, v18.4s, v4.s[2] \n"
"fmla v23.4s, v18.4s, v5.s[2] \n"
"shll v24.4s, v24.4h, #16 \n"
"fmla v20.4s, v19.4s, v4.s[3] \n"
"fmla v21.4s, v19.4s, v5.s[3] \n"
"shll v25.4s, v25.4h, #16 \n"
"fmla v22.4s, v24.4s, v0.s[0] \n"
"fmla v23.4s, v24.4s, v1.s[0] \n"
"prfm pldl1keep, [%7, #256] \n"
"ld1 {v16.4h, v17.4h, v18.4h, v19.4h}, [%7], #32 \n"
"fmla v20.4s, v25.4s, v0.s[1] \n"
"shll v26.4s, v26.4h, #16 \n"
"fmla v21.4s, v25.4s, v1.s[1] \n"
"shll v27.4s, v27.4h, #16 \n"
"fmla v22.4s, v26.4s, v0.s[2] \n"
"shll v16.4s, v16.4h, #16 \n"
"fmla v23.4s, v26.4s, v1.s[2] \n"
"shll v17.4s, v17.4h, #16 \n"
"fmla v20.4s, v27.4s, v0.s[3] \n"
"prfm pldl1keep, [%3, #256] \n"
"ld1 {v2.4h, v3.4h, v4.4h, v5.4h}, [%3] \n" // r12 r13 r14 r15
"shll v18.4s, v18.4h, #16 \n"
"fmla v21.4s, v27.4s, v1.s[3] \n"
"shll v19.4s, v19.4h, #16 \n"
"fmla v22.4s, v16.4s, v1.s[0] \n"
"shll v2.4s, v2.4h, #16 \n"
"fmla v23.4s, v16.4s, v2.s[0] \n"
"shll v3.4s, v3.4h, #16 \n"
"fmla v20.4s, v17.4s, v1.s[1] \n"
"prfm pldl1keep, [%7, #256] \n"
"ld1 {v24.4h, v25.4h, v26.4h, v27.4h}, [%7], #32 \n"
"fmla v21.4s, v17.4s, v2.s[1] \n"
"shll v4.4s, v4.4h, #16 \n"
"fmla v22.4s, v18.4s, v1.s[2] \n"
"shll v5.4s, v5.4h, #16 \n"
"fmla v23.4s, v18.4s, v2.s[2] \n"
"shll v24.4s, v24.4h, #16 \n"
"fmla v20.4s, v19.4s, v1.s[3] \n"
"fmla v21.4s, v19.4s, v2.s[3] \n"
"shll v25.4s, v25.4h, #16 \n"
"fmla v22.4s, v24.4s, v2.s[0] \n"
"fmla v23.4s, v24.4s, v3.s[0] \n"
"prfm pldl1keep, [%7, #256] \n"
"ld1 {v16.4h, v17.4h, v18.4h, v19.4h}, [%7], #32 \n"
"fmla v20.4s, v25.4s, v2.s[1] \n"
"shll v26.4s, v26.4h, #16 \n"
"fmla v21.4s, v25.4s, v3.s[1] \n"
"shll v27.4s, v27.4h, #16 \n"
"fmla v22.4s, v26.4s, v2.s[2] \n"
"fmla v23.4s, v26.4s, v3.s[2] \n"
"shll v16.4s, v16.4h, #16 \n"
"fmla v20.4s, v27.4s, v2.s[3] \n"
"fmla v21.4s, v27.4s, v3.s[3] \n"
"shll v17.4s, v17.4h, #16 \n"
"fmla v22.4s, v16.4s, v3.s[0] \n"
"fmla v23.4s, v16.4s, v4.s[0] \n"
"prfm pldl1keep, [%7, #256] \n"
"ld1 {v24.4h, v25.4h, v26.4h, v27.4h}, [%7], #32 \n"
"fmla v20.4s, v17.4s, v3.s[1] \n"
"shll v18.4s, v18.4h, #16 \n"
"fmla v21.4s, v17.4s, v4.s[1] \n"
"shll v19.4s, v19.4h, #16 \n"
"fmla v22.4s, v18.4s, v3.s[2] \n"
"shll v24.4s, v24.4h, #16 \n"
"fmla v23.4s, v18.4s, v4.s[2] \n"
"shll v25.4s, v25.4h, #16 \n"
"fmla v20.4s, v19.4s, v3.s[3] \n"
"prfm pldl1keep, [%4, #128] \n"
"ld1 {v0.4h, v1.4h}, [%4], #16 \n" // r20 r21
"fmla v21.4s, v19.4s, v4.s[3] \n"
"shll v26.4s, v26.4h, #16 \n"
"fmla v22.4s, v24.4s, v4.s[0] \n"
"shll v27.4s, v27.4h, #16 \n"
"fmla v23.4s, v24.4s, v5.s[0] \n"
"prfm pldl1keep, [%7, #256] \n"
"ld1 {v16.4h, v17.4h, v18.4h, v19.4h}, [%7], #32 \n"
"fmla v20.4s, v25.4s, v4.s[1] \n"
"shll v0.4s, v0.4h, #16 \n"
"fmla v21.4s, v25.4s, v5.s[1] \n"
"shll v1.4s, v1.4h, #16 \n"
"fmla v22.4s, v26.4s, v4.s[2] \n"
"fmla v23.4s, v26.4s, v5.s[2] \n"
"shll v16.4s, v16.4h, #16 \n"
"fmla v20.4s, v27.4s, v4.s[3] \n"
"fmla v21.4s, v27.4s, v5.s[3] \n"
"shll v17.4s, v17.4h, #16 \n"
"fmla v22.4s, v16.4s, v0.s[0] \n"
"fmla v23.4s, v16.4s, v1.s[0] \n"
"prfm pldl1keep, [%7, #256] \n"
"ld1 {v24.4h, v25.4h, v26.4h, v27.4h}, [%7], #32 \n"
"fmla v20.4s, v17.4s, v0.s[1] \n"
"shll v18.4s, v18.4h, #16 \n"
"fmla v21.4s, v17.4s, v1.s[1] \n"
"shll v19.4s, v19.4h, #16 \n"
"fmla v22.4s, v18.4s, v0.s[2] \n"
"shll v24.4s, v24.4h, #16 \n"
"fmla v23.4s, v18.4s, v1.s[2] \n"
"shll v25.4s, v25.4h, #16 \n"
"fmla v20.4s, v19.4s, v0.s[3] \n"
"prfm pldl1keep, [%4, #256] \n"
"ld1 {v2.4h, v3.4h, v4.4h, v5.4h}, [%4] \n" // r22 r23 r24 r25
"shll v26.4s, v26.4h, #16 \n"
"fmla v21.4s, v19.4s, v1.s[3] \n"
"shll v27.4s, v27.4h, #16 \n"
"fmla v22.4s, v24.4s, v1.s[0] \n"
"shll v2.4s, v2.4h, #16 \n"
"fmla v23.4s, v24.4s, v2.s[0] \n"
"shll v3.4s, v3.4h, #16 \n"
"fmla v20.4s, v25.4s, v1.s[1] \n"
"prfm pldl1keep, [%7, #256] \n"
"ld1 {v16.4h, v17.4h, v18.4h, v19.4h}, [%7], #32 \n"
"fmla v21.4s, v25.4s, v2.s[1] \n"
"shll v4.4s, v4.4h, #16 \n"
"fmla v22.4s, v26.4s, v1.s[2] \n"
"shll v5.4s, v5.4h, #16 \n"
"fmla v23.4s, v26.4s, v2.s[2] \n"
"shll v16.4s, v16.4h, #16 \n"
"fmla v20.4s, v27.4s, v1.s[3] \n"
"fmla v21.4s, v27.4s, v2.s[3] \n"
"shll v17.4s, v17.4h, #16 \n"
"fmla v22.4s, v16.4s, v2.s[0] \n"
"fmla v23.4s, v16.4s, v3.s[0] \n"
"prfm pldl1keep, [%7, #256] \n"
"ld1 {v24.4h, v25.4h, v26.4h, v27.4h}, [%7], #32 \n"
"fmla v20.4s, v17.4s, v2.s[1] \n"
"shll v18.4s, v18.4h, #16 \n"
"fmla v21.4s, v17.4s, v3.s[1] \n"
"shll v19.4s, v19.4h, #16 \n"
"fmla v22.4s, v18.4s, v2.s[2] \n"
"fmla v23.4s, v18.4s, v3.s[2] \n"
"shll v24.4s, v24.4h, #16 \n"
"fmla v20.4s, v19.4s, v2.s[3] \n"
"fmla v21.4s, v19.4s, v3.s[3] \n"
"shll v25.4s, v25.4h, #16 \n"
"fmla v22.4s, v24.4s, v3.s[0] \n"
"fmla v23.4s, v24.4s, v4.s[0] \n"
"prfm pldl1keep, [%7, #256] \n"
"ld1 {v16.4h, v17.4h, v18.4h, v19.4h}, [%7], #32 \n"
"fmla v20.4s, v25.4s, v3.s[1] \n"
"shll v26.4s, v26.4h, #16 \n"
"fmla v21.4s, v25.4s, v4.s[1] \n"
"shll v27.4s, v27.4h, #16 \n"
"fmla v22.4s, v26.4s, v3.s[2] \n"
"shll v16.4s, v16.4h, #16 \n"
"fmla v23.4s, v26.4s, v4.s[2] \n"
"shll v17.4s, v17.4h, #16 \n"
"fmla v20.4s, v27.4s, v3.s[3] \n"
"prfm pldl1keep, [%5, #128] \n"
"ld1 {v0.4h, v1.4h}, [%5], #16 \n" // r30 r31
"fmla v21.4s, v27.4s, v4.s[3] \n"
"shll v18.4s, v18.4h, #16 \n"
"fmla v22.4s, v16.4s, v4.s[0] \n"
"shll v19.4s, v19.4h, #16 \n"
"fmla v23.4s, v16.4s, v5.s[0] \n"
"prfm pldl1keep, [%7, #256] \n"
"ld1 {v24.4h, v25.4h, v26.4h, v27.4h}, [%7], #32 \n"
"fmla v20.4s, v17.4s, v4.s[1] \n"
"shll v0.4s, v0.4h, #16 \n"
"fmla v21.4s, v17.4s, v5.s[1] \n"
"shll v1.4s, v1.4h, #16 \n"
"fmla v22.4s, v18.4s, v4.s[2] \n"
"fmla v23.4s, v18.4s, v5.s[2] \n"
"shll v24.4s, v24.4h, #16 \n"
"fmla v20.4s, v19.4s, v4.s[3] \n"
"fmla v21.4s, v19.4s, v5.s[3] \n"
"shll v25.4s, v25.4h, #16 \n"
"fmla v22.4s, v24.4s, v0.s[0] \n"
"fmla v23.4s, v24.4s, v1.s[0] \n"
"prfm pldl1keep, [%7, #256] \n"
"ld1 {v16.4h, v17.4h, v18.4h, v19.4h}, [%7], #32 \n"
"fmla v20.4s, v25.4s, v0.s[1] \n"
"shll v26.4s, v26.4h, #16 \n"
"fmla v21.4s, v25.4s, v1.s[1] \n"
"shll v27.4s, v27.4h, #16 \n"
"fmla v22.4s, v26.4s, v0.s[2] \n"
"shll v16.4s, v16.4h, #16 \n"
"fmla v23.4s, v26.4s, v1.s[2] \n"
"shll v17.4s, v17.4h, #16 \n"
"fmla v20.4s, v27.4s, v0.s[3] \n"
"prfm pldl1keep, [%5, #256] \n"
"ld1 {v2.4h, v3.4h, v4.4h, v5.4h}, [%5] \n" // r32 r33 r34 r35
"shll v18.4s, v18.4h, #16 \n"
"fmla v21.4s, v27.4s, v1.s[3] \n"
"shll v19.4s, v19.4h, #16 \n"
"fmla v22.4s, v16.4s, v1.s[0] \n"
"shll v2.4s, v2.4h, #16 \n"
"fmla v23.4s, v16.4s, v2.s[0] \n"
"shll v3.4s, v3.4h, #16 \n"
"fmla v20.4s, v17.4s, v1.s[1] \n"
"prfm pldl1keep, [%7, #256] \n"
"ld1 {v24.4h, v25.4h, v26.4h, v27.4h}, [%7], #32 \n"
"fmla v21.4s, v17.4s, v2.s[1] \n"
"shll v4.4s, v4.4h, #16 \n"
"fmla v22.4s, v18.4s, v1.s[2] \n"
"shll v5.4s, v5.4h, #16 \n"
"fmla v23.4s, v18.4s, v2.s[2] \n"
"shll v24.4s, v24.4h, #16 \n"
"fmla v20.4s, v19.4s, v1.s[3] \n"
"fmla v21.4s, v19.4s, v2.s[3] \n"
"shll v25.4s, v25.4h, #16 \n"
"fmla v22.4s, v24.4s, v2.s[0] \n"
"fmla v23.4s, v24.4s, v3.s[0] \n"
"prfm pldl1keep, [%7, #256] \n"
"ld1 {v16.4h, v17.4h, v18.4h, v19.4h}, [%7], #32 \n"
"fmla v20.4s, v25.4s, v2.s[1] \n"
"shll v26.4s, v26.4h, #16 \n"
"fmla v21.4s, v25.4s, v3.s[1] \n"
"shll v27.4s, v27.4h, #16 \n"
"fmla v22.4s, v26.4s, v2.s[2] \n"
"fmla v23.4s, v26.4s, v3.s[2] \n"
"shll v16.4s, v16.4h, #16 \n"
"fmla v20.4s, v27.4s, v2.s[3] \n"
"fmla v21.4s, v27.4s, v3.s[3] \n"
"shll v17.4s, v17.4h, #16 \n"
"fmla v22.4s, v16.4s, v3.s[0] \n"
"fmla v23.4s, v16.4s, v4.s[0] \n"
"prfm pldl1keep, [%7, #256] \n"
"ld1 {v24.4h, v25.4h, v26.4h, v27.4h}, [%7], #32 \n"
"fmla v20.4s, v17.4s, v3.s[1] \n"
"shll v18.4s, v18.4h, #16 \n"
"fmla v21.4s, v17.4s, v4.s[1] \n"
"shll v19.4s, v19.4h, #16 \n"
"fmla v22.4s, v18.4s, v3.s[2] \n"
"shll v24.4s, v24.4h, #16 \n"
"fmla v23.4s, v18.4s, v4.s[2] \n"
"shll v25.4s, v25.4h, #16 \n"
"fmla v20.4s, v19.4s, v3.s[3] \n"
"prfm pldl1keep, [%6, #128] \n"
"ld1 {v0.4h, v1.4h}, [%6], #16 \n" // r40 r41
"fmla v21.4s, v19.4s, v4.s[3] \n"
"shll v26.4s, v26.4h, #16 \n"
"fmla v22.4s, v24.4s, v4.s[0] \n"
"shll v27.4s, v27.4h, #16 \n"
"fmla v23.4s, v24.4s, v5.s[0] \n"
"prfm pldl1keep, [%7, #256] \n"
"ld1 {v16.4h, v17.4h, v18.4h, v19.4h}, [%7], #32 \n"
"fmla v20.4s, v25.4s, v4.s[1] \n"
"shll v0.4s, v0.4h, #16 \n"
"fmla v21.4s, v25.4s, v5.s[1] \n"
"shll v1.4s, v1.4h, #16 \n"
"fmla v22.4s, v26.4s, v4.s[2] \n"
"fmla v23.4s, v26.4s, v5.s[2] \n"
"shll v16.4s, v16.4h, #16 \n"
"fmla v20.4s, v27.4s, v4.s[3] \n"
"fmla v21.4s, v27.4s, v5.s[3] \n"
"shll v17.4s, v17.4h, #16 \n"
"fmla v22.4s, v16.4s, v0.s[0] \n"
"fmla v23.4s, v16.4s, v1.s[0] \n"
"prfm pldl1keep, [%7, #256] \n"
"ld1 {v24.4h, v25.4h, v26.4h, v27.4h}, [%7], #32 \n"
"fmla v20.4s, v17.4s, v0.s[1] \n"
"shll v18.4s, v18.4h, #16 \n"
"fmla v21.4s, v17.4s, v1.s[1] \n"
"shll v19.4s, v19.4h, #16 \n"
"fmla v22.4s, v18.4s, v0.s[2] \n"
"shll v24.4s, v24.4h, #16 \n"
"fmla v23.4s, v18.4s, v1.s[2] \n"
"shll v25.4s, v25.4h, #16 \n"
"fmla v20.4s, v19.4s, v0.s[3] \n"
"prfm pldl1keep, [%6, #256] \n"
"ld1 {v2.4h, v3.4h, v4.4h, v5.4h}, [%6] \n" // r42 r43 r44 r45
"shll v26.4s, v26.4h, #16 \n"
"fmla v21.4s, v19.4s, v1.s[3] \n"
"shll v27.4s, v27.4h, #16 \n"
"fmla v22.4s, v24.4s, v1.s[0] \n"
"shll v2.4s, v2.4h, #16 \n"
"fmla v23.4s, v24.4s, v2.s[0] \n"
"shll v3.4s, v3.4h, #16 \n"
"fmla v20.4s, v25.4s, v1.s[1] \n"
"prfm pldl1keep, [%7, #256] \n"
"ld1 {v16.4h, v17.4h, v18.4h, v19.4h}, [%7], #32 \n"
"fmla v21.4s, v25.4s, v2.s[1] \n"
"shll v4.4s, v4.4h, #16 \n"
"fmla v22.4s, v26.4s, v1.s[2] \n"
"shll v5.4s, v5.4h, #16 \n"
"fmla v23.4s, v26.4s, v2.s[2] \n"
"shll v16.4s, v16.4h, #16 \n"
"fmla v20.4s, v27.4s, v1.s[3] \n"
"fmla v21.4s, v27.4s, v2.s[3] \n"
"shll v17.4s, v17.4h, #16 \n"
"fmla v22.4s, v16.4s, v2.s[0] \n"
"fmla v23.4s, v16.4s, v3.s[0] \n"
"prfm pldl1keep, [%7, #256] \n"
"ld1 {v24.4h, v25.4h, v26.4h, v27.4h}, [%7], #32 \n"
"fmla v20.4s, v17.4s, v2.s[1] \n"
"shll v18.4s, v18.4h, #16 \n"
"fmla v21.4s, v17.4s, v3.s[1] \n"
"shll v19.4s, v19.4h, #16 \n"
"fmla v22.4s, v18.4s, v2.s[2] \n"
"fmla v23.4s, v18.4s, v3.s[2] \n"
"shll v24.4s, v24.4h, #16 \n"
"fmla v20.4s, v19.4s, v2.s[3] \n"
"fmla v21.4s, v19.4s, v3.s[3] \n"
"shll v25.4s, v25.4h, #16 \n"
"fmla v22.4s, v24.4s, v3.s[0] \n"
"fmla v23.4s, v24.4s, v4.s[0] \n"
// "prfm pldl1keep, [%7, #256] \n"
"ld1 {v16.4h, v17.4h, v18.4h, v19.4h}, [%7] \n"
"fmla v20.4s, v25.4s, v3.s[1] \n"
"shll v26.4s, v26.4h, #16 \n"
"fmla v21.4s, v25.4s, v4.s[1] \n"
"shll v27.4s, v27.4h, #16 \n"
"fmla v22.4s, v26.4s, v3.s[2] \n"
"fmla v23.4s, v26.4s, v4.s[2] \n"
"shll v16.4s, v16.4h, #16 \n"
"fmla v20.4s, v27.4s, v3.s[3] \n"
"fmla v21.4s, v27.4s, v4.s[3] \n"
"shll v17.4s, v17.4h, #16 \n"
"fmla v22.4s, v16.4s, v4.s[0] \n"
"fmla v23.4s, v16.4s, v5.s[0] \n"
"shll v18.4s, v18.4h, #16 \n"
"fmla v20.4s, v17.4s, v4.s[1] \n"
"fmla v21.4s, v17.4s, v5.s[1] \n"
"shll v19.4s, v19.4h, #16 \n"
"fmla v22.4s, v18.4s, v4.s[2] \n"
"fmla v23.4s, v18.4s, v5.s[2] \n"
"fmla v20.4s, v19.4s, v4.s[3] \n"
"fmla v21.4s, v19.4s, v5.s[3] \n"
"fadd v20.4s, v20.4s, v22.4s \n"
"fadd v21.4s, v21.4s, v23.4s \n"
"sub %7, %7, #768 \n" // kptr -= 24 * 16;
"shrn v20.4h, v20.4s, #16 \n"
"shrn v21.4h, v21.4s, #16 \n"
"st1 {v20.4h, v21.4h}, [%0], #16 \n"
: "=r"(outptr0_bf16), // %0
"=r"(outptr0), // %1
"=r"(r0), // %2
"=r"(r1), // %3
"=r"(r2), // %4
"=r"(r3), // %5
"=r"(r4), // %6
"=r"(kptr) // %7
: "0"(outptr0_bf16),
"1"(outptr0),
"2"(r0),
"3"(r1),
"4"(r2),
"5"(r3),
"6"(r4),
"7"(kptr)
: "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27");
#else // __aarch64__
asm volatile(
"pld [%2, #128] \n"
"vld1.u16 {d2-d3}, [%2 :64]! \n" // r00 r01
"pld [%7, #256] \n"
"vld1.u16 {d20-d23}, [%7 :128]! \n"
"vshll.u16 q0, d2, #16 \n"
"vshll.u16 q1, d3, #16 \n"
"pld [%2, #256] \n"
"vld1.u16 {d8-d11}, [%2 :64] \n" // r02 r03 r04 r05
"vshll.u16 q8, d20, #16 \n"
"pld [%1, #256] \n"
"vld1.f32 {d24-d27}, [%1 :128]! \n" // sum0 sum1
"vmul.f32 q14, q8, d0[0] \n"
"vshll.u16 q9, d21, #16 \n"
"vmul.f32 q15, q8, d2[0] \n"
"vshll.u16 q10, d22, #16 \n"
"vmla.f32 q12, q9, d0[1] \n"
"vmla.f32 q13, q9, d2[1] \n"
"pld [%7, #256] \n"
"vld1.u16 {d16-d19}, [%7 :128]! \n"
"vmla.f32 q14, q10, d1[0] \n"
"vshll.u16 q11, d23, #16 \n"
"vmla.f32 q15, q10, d3[0] \n"
"vshll.u16 q10, d16, #16 \n"
"vmla.f32 q12, q11, d1[1] \n"
"vshll.u16 q2, d8, #16 \n"
"vmla.f32 q13, q11, d3[1] \n"
"vshll.u16 q11, d17, #16 \n"
"vmla.f32 q14, q10, d2[0] \n"
"vmla.f32 q15, q10, d4[0] \n"
"vshll.u16 q8, d18, #16 \n"
"vmla.f32 q12, q11, d2[1] \n"
"vmla.f32 q13, q11, d4[1] \n"
"pld [%7, #256] \n"
"vld1.u16 {d20-d23}, [%7 :128]! \n"
"vmla.f32 q14, q8, d3[0] \n"
"vshll.u16 q9, d19, #16 \n"
"vmla.f32 q15, q8, d5[0] \n"
"vshll.u16 q8, d20, #16 \n"
"vmla.f32 q12, q9, d3[1] \n"
"vshll.u16 q3, d9, #16 \n"
"vmla.f32 q13, q9, d5[1] \n"
"vshll.u16 q9, d21, #16 \n"
"vmla.f32 q14, q8, d4[0] \n"
"vmla.f32 q15, q8, d6[0] \n"
"vshll.u16 q10, d22, #16 \n"
"vmla.f32 q12, q9, d4[1] \n"
"vmla.f32 q13, q9, d6[1] \n"
"pld [%7, #256] \n"
"vld1.u16 {d16-d19}, [%7 :128]! \n"
"vmla.f32 q14, q10, d5[0] \n"
"vshll.u16 q11, d23, #16 \n"
"vmla.f32 q15, q10, d7[0] \n"
"vshll.u16 q10, d16, #16 \n"
"vmla.f32 q12, q11, d5[1] \n"
"vshll.u16 q4, d10, #16 \n"
"vmla.f32 q13, q11, d7[1] \n"
"pld [%3, #128] \n"
"vld1.u16 {d2-d3}, [%3 :64]! \n" // r10 r11
"vmla.f32 q14, q10, d6[0] \n"
"vshll.u16 q11, d17, #16 \n"
"vmla.f32 q15, q10, d8[0] \n"
"vshll.u16 q8, d18, #16 \n"
"vmla.f32 q12, q11, d6[1] \n"
"vmla.f32 q13, q11, d8[1] \n"
"pld [%7, #256] \n"
"vld1.u16 {d20-d23}, [%7 :128]! \n"
"vmla.f32 q14, q8, d7[0] \n"
"vshll.u16 q9, d19, #16 \n"
"vmla.f32 q15, q8, d9[0] \n"
"vshll.u16 q8, d20, #16 \n"
"vmla.f32 q12, q9, d7[1] \n"
"vshll.u16 q5, d11, #16 \n"
"vmla.f32 q13, q9, d9[1] \n"
"vshll.u16 q9, d21, #16 \n"
"vmla.f32 q14, q8, d8[0] \n"
"vmla.f32 q15, q8, d10[0] \n"
"vshll.u16 q10, d22, #16 \n"
"vmla.f32 q12, q9, d8[1] \n"
"vshll.u16 q0, d2, #16 \n"
"vmla.f32 q13, q9, d10[1] \n"
"pld [%7, #256] \n"
"vld1.u16 {d16-d19}, [%7 :128]! \n"
"vmla.f32 q14, q10, d9[0] \n"
"vshll.u16 q11, d23, #16 \n"
"vmla.f32 q15, q10, d11[0] \n"
"vshll.u16 q10, d16, #16 \n"
"vmla.f32 q12, q11, d9[1] \n"
"vshll.u16 q1, d3, #16 \n"
"vmla.f32 q13, q11, d11[1] \n"
"pld [%3, #256] \n"
"vld1.u16 {d8-d11}, [%3 :64] \n" // r12 r13 r14 r15
"vmla.f32 q14, q10, d0[0] \n"
"vshll.u16 q11, d17, #16 \n"
"vmla.f32 q15, q10, d2[0] \n"
"vshll.u16 q8, d18, #16 \n"
"vmla.f32 q12, q11, d0[1] \n"
"vmla.f32 q13, q11, d2[1] \n"
"pld [%7, #256] \n"
"vld1.u16 {d20-d23}, [%7 :128]! \n"
"vmla.f32 q14, q8, d1[0] \n"
"vshll.u16 q9, d19, #16 \n"
"vmla.f32 q15, q8, d3[0] \n"
"vshll.u16 q8, d20, #16 \n"
"vmla.f32 q12, q9, d1[1] \n"
"vshll.u16 q2, d8, #16 \n"
"vmla.f32 q13, q9, d3[1] \n"
"vshll.u16 q9, d21, #16 \n"
"vmla.f32 q14, q8, d2[0] \n"
"vmla.f32 q15, q8, d4[0] \n"
"vshll.u16 q10, d22, #16 \n"
"vmla.f32 q12, q9, d2[1] \n"
"vmla.f32 q13, q9, d4[1] \n"
"pld [%7, #256] \n"
"vld1.u16 {d16-d19}, [%7 :128]! \n"
"vmla.f32 q14, q10, d3[0] \n"
"vshll.u16 q11, d23, #16 \n"
"vmla.f32 q15, q10, d5[0] \n"
"vshll.u16 q10, d16, #16 \n"
"vmla.f32 q12, q11, d3[1] \n"
"vshll.u16 q3, d9, #16 \n"
"vmla.f32 q13, q11, d5[1] \n"
"vshll.u16 q11, d17, #16 \n"
"vmla.f32 q14, q10, d4[0] \n"
"vmla.f32 q15, q10, d6[0] \n"
"vshll.u16 q8, d18, #16 \n"
"vmla.f32 q12, q11, d4[1] \n"
"vmla.f32 q13, q11, d6[1] \n"
"pld [%7, #256] \n"
"vld1.u16 {d20-d23}, [%7 :128]! \n"
"vmla.f32 q14, q8, d5[0] \n"
"vshll.u16 q9, d19, #16 \n"
"vmla.f32 q15, q8, d7[0] \n"
"vshll.u16 q8, d20, #16 \n"
"vmla.f32 q12, q9, d5[1] \n"
"vshll.u16 q4, d10, #16 \n"
"vmla.f32 q13, q9, d7[1] \n"
"pld [%4, #128] \n"
"vld1.u16 {d2-d3}, [%4 :64]! \n" // r20 r21
"vmla.f32 q14, q8, d6[0] \n"
"vshll.u16 q9, d21, #16 \n"
"vmla.f32 q15, q8, d8[0] \n"
"vshll.u16 q10, d22, #16 \n"
"vmla.f32 q12, q9, d6[1] \n"
"vmla.f32 q13, q9, d8[1] \n"
"pld [%7, #256] \n"
"vld1.u16 {d16-d19}, [%7 :128]! \n"
"vmla.f32 q14, q10, d7[0] \n"
"vshll.u16 q11, d23, #16 \n"
"vmla.f32 q15, q10, d9[0] \n"
"vshll.u16 q10, d16, #16 \n"
"vmla.f32 q12, q11, d7[1] \n"
"vshll.u16 q5, d11, #16 \n"
"vmla.f32 q13, q11, d9[1] \n"
"vshll.u16 q11, d17, #16 \n"
"vmla.f32 q14, q10, d8[0] \n"
"vmla.f32 q15, q10, d10[0] \n"
"vshll.u16 q8, d18, #16 \n"
"vmla.f32 q12, q11, d8[1] \n"
"vshll.u16 q0, d2, #16 \n"
"vmla.f32 q13, q11, d10[1] \n"
"pld [%7, #256] \n"
"vld1.u16 {d20-d23}, [%7 :128]! \n"
"vmla.f32 q14, q8, d9[0] \n"
"vshll.u16 q9, d19, #16 \n"
"vmla.f32 q15, q8, d11[0] \n"
"vshll.u16 q8, d20, #16 \n"
"vmla.f32 q12, q9, d9[1] \n"
"vshll.u16 q1, d3, #16 \n"
"vmla.f32 q13, q9, d11[1] \n"
"pld [%4, #256] \n"
"vld1.u16 {d8-d11}, [%4 :64] \n" // r22 r23 r24 r25
"vmla.f32 q14, q8, d0[0] \n"
"vshll.u16 q9, d21, #16 \n"
"vmla.f32 q15, q8, d2[0] \n"
"vshll.u16 q10, d22, #16 \n"
"vmla.f32 q12, q9, d0[1] \n"
"vmla.f32 q13, q9, d2[1] \n"
"pld [%7, #256] \n"
"vld1.u16 {d16-d19}, [%7 :128]! \n"
"vmla.f32 q14, q10, d1[0] \n"
"vshll.u16 q11, d23, #16 \n"
"vmla.f32 q15, q10, d3[0] \n"
"vshll.u16 q10, d16, #16 \n"
"vmla.f32 q12, q11, d1[1] \n"
"vshll.u16 q2, d8, #16 \n"
"vmla.f32 q13, q11, d3[1] \n"
"vshll.u16 q11, d17, #16 \n"
"vmla.f32 q14, q10, d2[0] \n"
"vmla.f32 q15, q10, d4[0] \n"
"vshll.u16 q8, d18, #16 \n"
"vmla.f32 q12, q11, d2[1] \n"
"vmla.f32 q13, q11, d4[1] \n"
"pld [%7, #256] \n"
"vld1.u16 {d20-d23}, [%7 :128]! \n"
"vmla.f32 q14, q8, d3[0] \n"
"vshll.u16 q9, d19, #16 \n"
"vmla.f32 q15, q8, d5[0] \n"
"vshll.u16 q8, d20, #16 \n"
"vmla.f32 q12, q9, d3[1] \n"
"vshll.u16 q3, d9, #16 \n"
"vmla.f32 q13, q9, d5[1] \n"
"vshll.u16 q9, d21, #16 \n"
"vmla.f32 q14, q8, d4[0] \n"
"vmla.f32 q15, q8, d6[0] \n"
"vshll.u16 q10, d22, #16 \n"
"vmla.f32 q12, q9, d4[1] \n"
"vmla.f32 q13, q9, d6[1] \n"
"pld [%7, #256] \n"
"vld1.u16 {d16-d19}, [%7 :128]! \n"
"vmla.f32 q14, q10, d5[0] \n"
"vshll.u16 q11, d23, #16 \n"
"vmla.f32 q15, q10, d7[0] \n"
"vshll.u16 q10, d16, #16 \n"
"vmla.f32 q12, q11, d5[1] \n"
"vshll.u16 q4, d10, #16 \n"
"vmla.f32 q13, q11, d7[1] \n"
"pld [%5, #128] \n"
"vld1.u16 {d2-d3}, [%5 :64]! \n" // r30 r31
"vmla.f32 q14, q10, d6[0] \n"
"vshll.u16 q11, d17, #16 \n"
"vmla.f32 q15, q10, d8[0] \n"
"vshll.u16 q8, d18, #16 \n"
"vmla.f32 q12, q11, d6[1] \n"
"vmla.f32 q13, q11, d8[1] \n"
"pld [%7, #256] \n"
"vld1.u16 {d20-d23}, [%7 :128]! \n"
"vmla.f32 q14, q8, d7[0] \n"
"vshll.u16 q9, d19, #16 \n"
"vmla.f32 q15, q8, d9[0] \n"
"vshll.u16 q8, d20, #16 \n"
"vmla.f32 q12, q9, d7[1] \n"
"vshll.u16 q5, d11, #16 \n"
"vmla.f32 q13, q9, d9[1] \n"
"vshll.u16 q9, d21, #16 \n"
"vmla.f32 q14, q8, d8[0] \n"
"vmla.f32 q15, q8, d10[0] \n"
"vshll.u16 q10, d22, #16 \n"
"vmla.f32 q12, q9, d8[1] \n"
"vshll.u16 q0, d2, #16 \n"
"vmla.f32 q13, q9, d10[1] \n"
"pld [%7, #256] \n"
"vld1.u16 {d16-d19}, [%7 :128]! \n"
"vmla.f32 q14, q10, d9[0] \n"
"vshll.u16 q11, d23, #16 \n"
"vmla.f32 q15, q10, d11[0] \n"
"vshll.u16 q10, d16, #16 \n"
"vmla.f32 q12, q11, d9[1] \n"
"vshll.u16 q1, d3, #16 \n"
"vmla.f32 q13, q11, d11[1] \n"
"pld [%5, #256] \n"
"vld1.u16 {d8-d11}, [%5 :64] \n" // r32 r33 r34 r35
"vmla.f32 q14, q10, d0[0] \n"
"vshll.u16 q11, d17, #16 \n"
"vmla.f32 q15, q10, d2[0] \n"
"vshll.u16 q8, d18, #16 \n"
"vmla.f32 q12, q11, d0[1] \n"
"vmla.f32 q13, q11, d2[1] \n"
"pld [%7, #256] \n"
"vld1.u16 {d20-d23}, [%7 :128]! \n"
"vmla.f32 q14, q8, d1[0] \n"
"vshll.u16 q9, d19, #16 \n"
"vmla.f32 q15, q8, d3[0] \n"
"vshll.u16 q8, d20, #16 \n"
"vmla.f32 q12, q9, d1[1] \n"
"vshll.u16 q2, d8, #16 \n"
"vmla.f32 q13, q9, d3[1] \n"
"vshll.u16 q9, d21, #16 \n"
"vmla.f32 q14, q8, d2[0] \n"
"vmla.f32 q15, q8, d4[0] \n"
"vshll.u16 q10, d22, #16 \n"
"vmla.f32 q12, q9, d2[1] \n"
"vmla.f32 q13, q9, d4[1] \n"
"pld [%7, #256] \n"
"vld1.u16 {d16-d19}, [%7 :128]! \n"
"vmla.f32 q14, q10, d3[0] \n"
"vshll.u16 q11, d23, #16 \n"
"vmla.f32 q15, q10, d5[0] \n"
"vshll.u16 q10, d16, #16 \n"
"vmla.f32 q12, q11, d3[1] \n"
"vshll.u16 q3, d9, #16 \n"
"vmla.f32 q13, q11, d5[1] \n"
"vshll.u16 q11, d17, #16 \n"
"vmla.f32 q14, q10, d4[0] \n"
"vmla.f32 q15, q10, d6[0] \n"
"vshll.u16 q8, d18, #16 \n"
"vmla.f32 q12, q11, d4[1] \n"
"vmla.f32 q13, q11, d6[1] \n"
"pld [%7, #256] \n"
"vld1.u16 {d20-d23}, [%7 :128]! \n"
"vmla.f32 q14, q8, d5[0] \n"
"vshll.u16 q9, d19, #16 \n"
"vmla.f32 q15, q8, d7[0] \n"
"vshll.u16 q8, d20, #16 \n"
"vmla.f32 q12, q9, d5[1] \n"
"vshll.u16 q4, d10, #16 \n"
"vmla.f32 q13, q9, d7[1] \n"
"pld [%6, #128] \n"
"vld1.u16 {d2-d3}, [%6 :64]! \n" // r40 r41
"vmla.f32 q14, q8, d6[0] \n"
"vshll.u16 q9, d21, #16 \n"
"vmla.f32 q15, q8, d8[0] \n"
"vshll.u16 q10, d22, #16 \n"
"vmla.f32 q12, q9, d6[1] \n"
"vmla.f32 q13, q9, d8[1] \n"
"pld [%7, #256] \n"
"vld1.u16 {d16-d19}, [%7 :128]! \n"
"vmla.f32 q14, q10, d7[0] \n"
"vshll.u16 q11, d23, #16 \n"
"vmla.f32 q15, q10, d9[0] \n"
"vshll.u16 q10, d16, #16 \n"
"vmla.f32 q12, q11, d7[1] \n"
"vshll.u16 q5, d11, #16 \n"
"vmla.f32 q13, q11, d9[1] \n"
"vshll.u16 q11, d17, #16 \n"
"vmla.f32 q14, q10, d8[0] \n"
"vmla.f32 q15, q10, d10[0] \n"
"vshll.u16 q8, d18, #16 \n"
"vmla.f32 q12, q11, d8[1] \n"
"vshll.u16 q0, d2, #16 \n"
"vmla.f32 q13, q11, d10[1] \n"
"pld [%7, #256] \n"
"vld1.u16 {d20-d23}, [%7 :128]! \n"
"vmla.f32 q14, q8, d9[0] \n"
"vshll.u16 q9, d19, #16 \n"
"vmla.f32 q15, q8, d11[0] \n"
"vshll.u16 q8, d20, #16 \n"
"vmla.f32 q12, q9, d9[1] \n"
"vshll.u16 q1, d3, #16 \n"
"vmla.f32 q13, q9, d11[1] \n"
"pld [%6, #256] \n"
"vld1.u16 {d8-d11}, [%6 :64] \n" // r42 r43 r44 r45
"vmla.f32 q14, q8, d0[0] \n"
"vshll.u16 q9, d21, #16 \n"
"vmla.f32 q15, q8, d2[0] \n"
"vshll.u16 q10, d22, #16 \n"
"vmla.f32 q12, q9, d0[1] \n"
"vmla.f32 q13, q9, d2[1] \n"
"pld [%7, #256] \n"
"vld1.u16 {d16-d19}, [%7 :128]! \n"
"vmla.f32 q14, q10, d1[0] \n"
"vshll.u16 q11, d23, #16 \n"
"vmla.f32 q15, q10, d3[0] \n"
"vshll.u16 q10, d16, #16 \n"
"vmla.f32 q12, q11, d1[1] \n"
"vshll.u16 q2, d8, #16 \n"
"vmla.f32 q13, q11, d3[1] \n"
"vshll.u16 q11, d17, #16 \n"
"vmla.f32 q14, q10, d2[0] \n"
"vmla.f32 q15, q10, d4[0] \n"
"vshll.u16 q8, d18, #16 \n"
"vmla.f32 q12, q11, d2[1] \n"
"vmla.f32 q13, q11, d4[1] \n"
"pld [%7, #256] \n"
"vld1.u16 {d20-d23}, [%7 :128]! \n"
"vmla.f32 q14, q8, d3[0] \n"
"vshll.u16 q9, d19, #16 \n"
"vmla.f32 q15, q8, d5[0] \n"
"vshll.u16 q8, d20, #16 \n"
"vmla.f32 q12, q9, d3[1] \n"
"vshll.u16 q3, d9, #16 \n"
"vmla.f32 q13, q9, d5[1] \n"
"vshll.u16 q9, d21, #16 \n"
"vmla.f32 q14, q8, d4[0] \n"
"vmla.f32 q15, q8, d6[0] \n"
"vshll.u16 q10, d22, #16 \n"
"vmla.f32 q12, q9, d4[1] \n"
"vmla.f32 q13, q9, d6[1] \n"
"pld [%7, #256] \n"
"vld1.u16 {d16-d19}, [%7 :128]! \n"
"vmla.f32 q14, q10, d5[0] \n"
"vshll.u16 q11, d23, #16 \n"
"vmla.f32 q15, q10, d7[0] \n"
"vshll.u16 q10, d16, #16 \n"
"vmla.f32 q12, q11, d5[1] \n"
"vshll.u16 q4, d10, #16 \n"
"vmla.f32 q13, q11, d7[1] \n"
"vshll.u16 q11, d17, #16 \n"
"vmla.f32 q14, q10, d6[0] \n"
"vmla.f32 q15, q10, d8[0] \n"
"vshll.u16 q8, d18, #16 \n"
"vmla.f32 q12, q11, d6[1] \n"
"vmla.f32 q13, q11, d8[1] \n"
"pld [%7, #256] \n"
"vld1.u16 {d20-d23}, [%7 :128] \n"
"vmla.f32 q14, q8, d7[0] \n"
"vshll.u16 q9, d19, #16 \n"
"vmla.f32 q15, q8, d9[0] \n"
"vshll.u16 q8, d20, #16 \n"
"vmla.f32 q12, q9, d7[1] \n"
"vshll.u16 q5, d11, #16 \n"
"vmla.f32 q13, q9, d9[1] \n"
"vshll.u16 q9, d21, #16 \n"
"vmla.f32 q14, q8, d8[0] \n"
"vmla.f32 q15, q8, d10[0] \n"
"vshll.u16 q10, d22, #16 \n"
"vmla.f32 q12, q9, d8[1] \n"
"vmla.f32 q13, q9, d10[1] \n"
"vshll.u16 q11, d23, #16 \n"
"vmla.f32 q14, q10, d9[0] \n"
"vmla.f32 q15, q10, d11[0] \n"
"vmla.f32 q12, q11, d9[1] \n"
"vmla.f32 q13, q11, d11[1] \n"
"vadd.f32 q12, q12, q14 \n"
"vadd.f32 q13, q13, q15 \n"
"sub %7, %7, #768 \n" // kptr -= 24 * 16;
"vshrn.u32 d24, q12, #16 \n"
"vshrn.u32 d25, q13, #16 \n"
"vst1.u16 {d24-d25}, [%0 :64]! \n"
: "=r"(outptr0_bf16), // %0
"=r"(outptr0), // %1
"=r"(r0), // %2
"=r"(r1), // %3
"=r"(r2), // %4
"=r"(r3), // %5
"=r"(r4), // %6
"=r"(kptr) // %7
: "0"(outptr0_bf16),
"1"(outptr0),
"2"(r0),
"3"(r1),
"4"(r2),
"5"(r3),
"6"(r4),
"7"(kptr)
: "memory", "q0", "q1", "q2", "q3", "q4", "q5", "q6", "q7", "q8", "q9", "q10", "q11", "q12", "q13", "q14", "q15");
#endif // __aarch64__
}
for (; j < outw; j++)
{
#if __aarch64__
asm volatile(
"prfm pldl1keep, [%2, #64] \n"
"ld1 {v0.4h}, [%2], #8 \n" // r00
"prfm pldl1keep, [%7, #256] \n"
"ld1 {v16.4h, v17.4h, v18.4h, v19.4h}, [%7], #32 \n"
"shll v0.4s, v0.4h, #16 \n"
"prfm pldl1keep, [%2, #256] \n"
"ld1 {v1.4h, v2.4h, v3.4h, v4.4h}, [%2] \n" // r01 r02 r03 r04
"shll v16.4s, v16.4h, #16 \n"
"shll v17.4s, v17.4h, #16 \n"
"prfm pldl1keep, [%1, #128] \n"
"ld1 {v20.4s}, [%1], #16 \n" // sum0
"shll v18.4s, v18.4h, #16 \n"
"shll v19.4s, v19.4h, #16 \n"
"fmul v21.4s, v16.4s, v0.s[0] \n"
"prfm pldl1keep, [%7, #256] \n"
"ld1 {v24.4h, v25.4h, v26.4h, v27.4h}, [%7], #32 \n"
"shll v1.4s, v1.4h, #16 \n"
"shll v2.4s, v2.4h, #16 \n"
"fmul v22.4s, v17.4s, v0.s[1] \n"
"shll v3.4s, v3.4h, #16 \n"
"shll v4.4s, v4.4h, #16 \n"
"fmul v23.4s, v18.4s, v0.s[2] \n"
"shll v24.4s, v24.4h, #16 \n"
"fmla v20.4s, v19.4s, v0.s[3] \n"
"shll v25.4s, v25.4h, #16 \n"
"fmla v21.4s, v24.4s, v1.s[0] \n"
"prfm pldl1keep, [%7, #256] \n"
"ld1 {v16.4h, v17.4h, v18.4h, v19.4h}, [%7], #32 \n"
"shll v26.4s, v26.4h, #16 \n"
"fmla v22.4s, v25.4s, v1.s[1] \n"
"shll v27.4s, v27.4h, #16 \n"
"fmla v23.4s, v26.4s, v1.s[2] \n"
"shll v16.4s, v16.4h, #16 \n"
"fmla v20.4s, v27.4s, v1.s[3] \n"
"shll v17.4s, v17.4h, #16 \n"
"fmla v21.4s, v16.4s, v2.s[0] \n"
"prfm pldl1keep, [%7, #256] \n"
"ld1 {v24.4h, v25.4h, v26.4h, v27.4h}, [%7], #32 \n"
"shll v18.4s, v18.4h, #16 \n"
"fmla v22.4s, v17.4s, v2.s[1] \n"
"shll v19.4s, v19.4h, #16 \n"
"fmla v23.4s, v18.4s, v2.s[2] \n"
"shll v24.4s, v24.4h, #16 \n"
"fmla v20.4s, v19.4s, v2.s[3] \n"
"shll v25.4s, v25.4h, #16 \n"
"fmla v21.4s, v24.4s, v3.s[0] \n"
"prfm pldl1keep, [%7, #256] \n"
"ld1 {v16.4h, v17.4h, v18.4h, v19.4h}, [%7], #32 \n"
"shll v26.4s, v26.4h, #16 \n"
"fmla v22.4s, v25.4s, v3.s[1] \n"
"shll v27.4s, v27.4h, #16 \n"
"fmla v23.4s, v26.4s, v3.s[2] \n"
"shll v16.4s, v16.4h, #16 \n"
"shll v17.4s, v17.4h, #16 \n"
"fmla v20.4s, v27.4s, v3.s[3] \n"
"prfm pldl1keep, [%3, #64] \n"
"ld1 {v0.4h}, [%3], #8 \n" // r10
"shll v18.4s, v18.4h, #16 \n"
"shll v19.4s, v19.4h, #16 \n"
"fmla v21.4s, v16.4s, v4.s[0] \n"
"prfm pldl1keep, [%7, #256] \n"
"ld1 {v24.4h, v25.4h, v26.4h, v27.4h}, [%7], #32 \n"
"fmla v22.4s, v17.4s, v4.s[1] \n"
"shll v0.4s, v0.4h, #16 \n"
"fmla v23.4s, v18.4s, v4.s[2] \n"
"shll v24.4s, v24.4h, #16 \n"
"shll v25.4s, v25.4h, #16 \n"
"fmla v20.4s, v19.4s, v4.s[3] \n"
"prfm pldl1keep, [%3, #256] \n"
"ld1 {v1.4h, v2.4h, v3.4h, v4.4h}, [%3] \n" // r11 r12 r13 r14
"shll v26.4s, v26.4h, #16 \n"
"shll v27.4s, v27.4h, #16 \n"
"fmla v21.4s, v24.4s, v0.s[0] \n"
"prfm pldl1keep, [%7, #256] \n"
"ld1 {v16.4h, v17.4h, v18.4h, v19.4h}, [%7], #32 \n"
"shll v1.4s, v1.4h, #16 \n"
"shll v2.4s, v2.4h, #16 \n"
"fmla v22.4s, v25.4s, v0.s[1] \n"
"shll v3.4s, v3.4h, #16 \n"
"shll v4.4s, v4.4h, #16 \n"
"fmla v23.4s, v26.4s, v0.s[2] \n"
"shll v16.4s, v16.4h, #16 \n"
"fmla v20.4s, v27.4s, v0.s[3] \n"
"shll v17.4s, v17.4h, #16 \n"
"fmla v21.4s, v16.4s, v1.s[0] \n"
"prfm pldl1keep, [%7, #256] \n"
"ld1 {v24.4h, v25.4h, v26.4h, v27.4h}, [%7], #32 \n"
"shll v18.4s, v18.4h, #16 \n"
"fmla v22.4s, v17.4s, v1.s[1] \n"
"shll v19.4s, v19.4h, #16 \n"
"fmla v23.4s, v18.4s, v1.s[2] \n"
"shll v24.4s, v24.4h, #16 \n"
"fmla v20.4s, v19.4s, v1.s[3] \n"
"shll v25.4s, v25.4h, #16 \n"
"fmla v21.4s, v24.4s, v2.s[0] \n"
"prfm pldl1keep, [%7, #256] \n"
"ld1 {v16.4h, v17.4h, v18.4h, v19.4h}, [%7], #32 \n"
"shll v26.4s, v26.4h, #16 \n"
"fmla v22.4s, v25.4s, v2.s[1] \n"
"shll v27.4s, v27.4h, #16 \n"
"fmla v23.4s, v26.4s, v2.s[2] \n"
"shll v16.4s, v16.4h, #16 \n"
"fmla v20.4s, v27.4s, v2.s[3] \n"
"shll v17.4s, v17.4h, #16 \n"
"fmla v21.4s, v16.4s, v3.s[0] \n"
"prfm pldl1keep, [%7, #256] \n"
"ld1 {v24.4h, v25.4h, v26.4h, v27.4h}, [%7], #32 \n"
"shll v18.4s, v18.4h, #16 \n"
"fmla v22.4s, v17.4s, v3.s[1] \n"
"shll v19.4s, v19.4h, #16 \n"
"fmla v23.4s, v18.4s, v3.s[2] \n"
"shll v24.4s, v24.4h, #16 \n"
"shll v25.4s, v25.4h, #16 \n"
"fmla v20.4s, v19.4s, v3.s[3] \n"
"prfm pldl1keep, [%4, #64] \n"
"ld1 {v0.4h}, [%4], #8 \n" // r20
"shll v26.4s, v26.4h, #16 \n"
"shll v27.4s, v27.4h, #16 \n"
"fmla v21.4s, v24.4s, v4.s[0] \n"
"prfm pldl1keep, [%7, #256] \n"
"ld1 {v16.4h, v17.4h, v18.4h, v19.4h}, [%7], #32 \n"
"fmla v22.4s, v25.4s, v4.s[1] \n"
"shll v0.4s, v0.4h, #16 \n"
"fmla v23.4s, v26.4s, v4.s[2] \n"
"shll v16.4s, v16.4h, #16 \n"
"shll v17.4s, v17.4h, #16 \n"
"fmla v20.4s, v27.4s, v4.s[3] \n"
"prfm pldl1keep, [%4, #256] \n"
"ld1 {v1.4h, v2.4h, v3.4h, v4.4h}, [%4] \n" // r21 r22 r23 r24
"shll v18.4s, v18.4h, #16 \n"
"shll v19.4s, v19.4h, #16 \n"
"fmla v21.4s, v16.4s, v0.s[0] \n"
"prfm pldl1keep, [%7, #256] \n"
"ld1 {v24.4h, v25.4h, v26.4h, v27.4h}, [%7], #32 \n"
"shll v1.4s, v1.4h, #16 \n"
"shll v2.4s, v2.4h, #16 \n"
"fmla v22.4s, v17.4s, v0.s[1] \n"
"shll v3.4s, v3.4h, #16 \n"
"shll v4.4s, v4.4h, #16 \n"
"fmla v23.4s, v18.4s, v0.s[2] \n"
"shll v24.4s, v24.4h, #16 \n"
"fmla v20.4s, v19.4s, v0.s[3] \n"
"shll v25.4s, v25.4h, #16 \n"
"fmla v21.4s, v24.4s, v1.s[0] \n"
"prfm pldl1keep, [%7, #256] \n"
"ld1 {v16.4h, v17.4h, v18.4h, v19.4h}, [%7], #32 \n"
"shll v26.4s, v26.4h, #16 \n"
"fmla v22.4s, v25.4s, v1.s[1] \n"
"shll v27.4s, v27.4h, #16 \n"
"fmla v23.4s, v26.4s, v1.s[2] \n"
"shll v16.4s, v16.4h, #16 \n"
"fmla v20.4s, v27.4s, v1.s[3] \n"
"prfm pldl1keep, [%7, #256] \n"
"ld1 {v24.4h, v25.4h, v26.4h, v27.4h}, [%7], #32 \n"
"shll v17.4s, v17.4h, #16 \n"
"fmla v21.4s, v16.4s, v2.s[0] \n"
"shll v18.4s, v18.4h, #16 \n"
"fmla v22.4s, v17.4s, v2.s[1] \n"
"shll v19.4s, v19.4h, #16 \n"
"fmla v23.4s, v18.4s, v2.s[2] \n"
"shll v24.4s, v24.4h, #16 \n"
"fmla v20.4s, v19.4s, v2.s[3] \n"
"shll v25.4s, v25.4h, #16 \n"
"fmla v21.4s, v24.4s, v3.s[0] \n"
"prfm pldl1keep, [%7, #256] \n"
"ld1 {v16.4h, v17.4h, v18.4h, v19.4h}, [%7], #32 \n"
"shll v26.4s, v26.4h, #16 \n"
"fmla v22.4s, v25.4s, v3.s[1] \n"
"shll v27.4s, v27.4h, #16 \n"
"fmla v23.4s, v26.4s, v3.s[2] \n"
"shll v16.4s, v16.4h, #16 \n"
"shll v17.4s, v17.4h, #16 \n"
"fmla v20.4s, v27.4s, v3.s[3] \n"
"prfm pldl1keep, [%5, #64] \n"
"ld1 {v0.4h}, [%5], #8 \n" // r30
"shll v18.4s, v18.4h, #16 \n"
"shll v19.4s, v19.4h, #16 \n"
"fmla v21.4s, v16.4s, v4.s[0] \n"
"prfm pldl1keep, [%7, #256] \n"
"ld1 {v24.4h, v25.4h, v26.4h, v27.4h}, [%7], #32 \n"
"fmla v22.4s, v17.4s, v4.s[1] \n"
"shll v0.4s, v0.4h, #16 \n"
"fmla v23.4s, v18.4s, v4.s[2] \n"
"shll v24.4s, v24.4h, #16 \n"
"shll v25.4s, v25.4h, #16 \n"
"fmla v20.4s, v19.4s, v4.s[3] \n"
"prfm pldl1keep, [%5, #256] \n"
"ld1 {v1.4h, v2.4h, v3.4h, v4.4h}, [%5] \n" // r31 r32 r33 r34
"shll v26.4s, v26.4h, #16 \n"
"shll v27.4s, v27.4h, #16 \n"
"fmla v21.4s, v24.4s, v0.s[0] \n"
"prfm pldl1keep, [%7, #256] \n"
"ld1 {v16.4h, v17.4h, v18.4h, v19.4h}, [%7], #32 \n"
"shll v1.4s, v1.4h, #16 \n"
"shll v2.4s, v2.4h, #16 \n"
"fmla v22.4s, v25.4s, v0.s[1] \n"
"shll v3.4s, v3.4h, #16 \n"
"shll v4.4s, v4.4h, #16 \n"
"fmla v23.4s, v26.4s, v0.s[2] \n"
"shll v16.4s, v16.4h, #16 \n"
"fmla v20.4s, v27.4s, v0.s[3] \n"
"shll v17.4s, v17.4h, #16 \n"
"fmla v21.4s, v16.4s, v1.s[0] \n"
"prfm pldl1keep, [%7, #256] \n"
"ld1 {v24.4h, v25.4h, v26.4h, v27.4h}, [%7], #32 \n"
"shll v18.4s, v18.4h, #16 \n"
"fmla v22.4s, v17.4s, v1.s[1] \n"
"shll v19.4s, v19.4h, #16 \n"
"fmla v23.4s, v18.4s, v1.s[2] \n"
"shll v24.4s, v24.4h, #16 \n"
"fmla v20.4s, v19.4s, v1.s[3] \n"
"shll v25.4s, v25.4h, #16 \n"
"fmla v21.4s, v24.4s, v2.s[0] \n"
"shll v26.4s, v26.4h, #16 \n"
"fmla v22.4s, v25.4s, v2.s[1] \n"
"prfm pldl1keep, [%7, #256] \n"
"ld1 {v16.4h, v17.4h, v18.4h, v19.4h}, [%7], #32 \n"
"shll v27.4s, v27.4h, #16 \n"
"fmla v23.4s, v26.4s, v2.s[2] \n"
"shll v16.4s, v16.4h, #16 \n"
"fmla v20.4s, v27.4s, v2.s[3] \n"
"shll v17.4s, v17.4h, #16 \n"
"fmla v21.4s, v16.4s, v3.s[0] \n"
"prfm pldl1keep, [%7, #256] \n"
"ld1 {v24.4h, v25.4h, v26.4h, v27.4h}, [%7], #32 \n"
"shll v18.4s, v18.4h, #16 \n"
"fmla v22.4s, v17.4s, v3.s[1] \n"
"shll v19.4s, v19.4h, #16 \n"
"fmla v23.4s, v18.4s, v3.s[2] \n"
"shll v24.4s, v24.4h, #16 \n"
"shll v25.4s, v25.4h, #16 \n"
"fmla v20.4s, v19.4s, v3.s[3] \n"
"prfm pldl1keep, [%6, #64] \n"
"ld1 {v0.4h}, [%6], #8 \n" // r40
"shll v26.4s, v26.4h, #16 \n"
"shll v27.4s, v27.4h, #16 \n"
"fmla v21.4s, v24.4s, v4.s[0] \n"
"prfm pldl1keep, [%7, #256] \n"
"ld1 {v16.4h, v17.4h, v18.4h, v19.4h}, [%7], #32 \n"
"fmla v22.4s, v25.4s, v4.s[1] \n"
"shll v0.4s, v0.4h, #16 \n"
"fmla v23.4s, v26.4s, v4.s[2] \n"
"shll v16.4s, v16.4h, #16 \n"
"shll v17.4s, v17.4h, #16 \n"
"fmla v20.4s, v27.4s, v4.s[3] \n"
"prfm pldl1keep, [%6, #256] \n"
"ld1 {v1.4h, v2.4h, v3.4h, v4.4h}, [%6] \n" // r41 r42 r43 r44
"shll v18.4s, v18.4h, #16 \n"
"shll v19.4s, v19.4h, #16 \n"
"fmla v21.4s, v16.4s, v0.s[0] \n"
"prfm pldl1keep, [%7, #256] \n"
"ld1 {v24.4h, v25.4h, v26.4h, v27.4h}, [%7], #32 \n"
"shll v1.4s, v1.4h, #16 \n"
"shll v2.4s, v2.4h, #16 \n"
"fmla v22.4s, v17.4s, v0.s[1] \n"
"shll v3.4s, v3.4h, #16 \n"
"shll v4.4s, v4.4h, #16 \n"
"fmla v23.4s, v18.4s, v0.s[2] \n"
"shll v24.4s, v24.4h, #16 \n"
"fmla v20.4s, v19.4s, v0.s[3] \n"
"shll v25.4s, v25.4h, #16 \n"
"fmla v21.4s, v24.4s, v1.s[0] \n"
"prfm pldl1keep, [%7, #256] \n"
"ld1 {v16.4h, v17.4h, v18.4h, v19.4h}, [%7], #32 \n"
"shll v26.4s, v26.4h, #16 \n"
"fmla v22.4s, v25.4s, v1.s[1] \n"
"shll v27.4s, v27.4h, #16 \n"
"fmla v23.4s, v26.4s, v1.s[2] \n"
"shll v16.4s, v16.4h, #16 \n"
"fmla v20.4s, v27.4s, v1.s[3] \n"
"shll v17.4s, v17.4h, #16 \n"
"fmla v21.4s, v16.4s, v2.s[0] \n"
"prfm pldl1keep, [%7, #256] \n"
"ld1 {v24.4h, v25.4h, v26.4h, v27.4h}, [%7], #32 \n"
"shll v18.4s, v18.4h, #16 \n"
"fmla v22.4s, v17.4s, v2.s[1] \n"
"shll v19.4s, v19.4h, #16 \n"
"fmla v23.4s, v18.4s, v2.s[2] \n"
"shll v24.4s, v24.4h, #16 \n"
"fmla v20.4s, v19.4s, v2.s[3] \n"
"shll v25.4s, v25.4h, #16 \n"
"fmla v21.4s, v24.4s, v3.s[0] \n"
// "prfm pldl1keep, [%7, #256] \n"
"ld1 {v16.4h, v17.4h, v18.4h, v19.4h}, [%7] \n"
"shll v26.4s, v26.4h, #16 \n"
"fmla v22.4s, v25.4s, v3.s[1] \n"
"shll v27.4s, v27.4h, #16 \n"
"fmla v23.4s, v26.4s, v3.s[2] \n"
"shll v16.4s, v16.4h, #16 \n"
"fmla v20.4s, v27.4s, v3.s[3] \n"
"shll v17.4s, v17.4h, #16 \n"
"fmla v21.4s, v16.4s, v4.s[0] \n"
"shll v18.4s, v18.4h, #16 \n"
"fmla v22.4s, v17.4s, v4.s[1] \n"
"shll v19.4s, v19.4h, #16 \n"
"fmla v23.4s, v18.4s, v4.s[2] \n"
"fmla v20.4s, v19.4s, v4.s[3] \n"
"fadd v22.4s, v21.4s, v22.4s \n"
"fadd v23.4s, v22.4s, v23.4s \n"
"fadd v20.4s, v20.4s, v23.4s \n"
"sub %7, %7, #768 \n" // kptr -= 24 * 16;
"shrn v20.4h, v20.4s, #16 \n"
"st1 {v20.4h}, [%0], #8 \n"
: "=r"(outptr0_bf16), // %0
"=r"(outptr0), // %1
"=r"(r0), // %2
"=r"(r1), // %3
"=r"(r2), // %4
"=r"(r3), // %5
"=r"(r4), // %6
"=r"(kptr) // %7
: "0"(outptr0_bf16),
"1"(outptr0),
"2"(r0),
"3"(r1),
"4"(r2),
"5"(r3),
"6"(r4),
"7"(kptr)
: "memory", "v0", "v1", "v2", "v3", "v4", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27");
#else // __aarch64__
asm volatile(
"pld [%2, #64] \n"
"vld1.u16 {d1}, [%2 :64]! \n" // r00
"pld [%7, #256] \n"
"vld1.u16 {d20-d23}, [%7 :128]! \n"
"vshll.u16 q0, d1, #16 \n"
"vshll.u16 q8, d20, #16 \n"
"vshll.u16 q9, d21, #16 \n"
"pld [%1, #128] \n"
"vld1.f32 {d24-d25}, [%1 :128]! \n" // sum0
"vmul.f32 q13, q8, d0[0] \n"
"vshll.u16 q10, d22, #16 \n"
"vmul.f32 q14, q9, d0[1] \n"
"pld [%7, #256] \n"
"vld1.u16 {d16-d19}, [%7 :128]! \n"
"vshll.u16 q11, d23, #16 \n"
"vmul.f32 q15, q10, d1[0] \n"
"vshll.u16 q10, d16, #16 \n"
"vmla.f32 q12, q11, d1[1] \n"
"pld [%2, #256] \n"
"vld1.u16 {d6-d9}, [%2 :64] \n" // r01 r02 r03 r04
"vshll.u16 q11, d17, #16 \n"
"vshll.u16 q1, d6, #16 \n"
"vshll.u16 q2, d7, #16 \n"
"vshll.u16 q3, d8, #16 \n"
"vshll.u16 q4, d9, #16 \n"
"vmla.f32 q13, q10, d2[0] \n"
"vshll.u16 q8, d18, #16 \n"
"vmla.f32 q14, q11, d2[1] \n"
"pld [%7, #256] \n"
"vld1.u16 {d20-d23}, [%7 :128]! \n"
"vshll.u16 q9, d19, #16 \n"
"vmla.f32 q15, q8, d3[0] \n"
"vshll.u16 q8, d20, #16 \n"
"vmla.f32 q12, q9, d3[1] \n"
"vshll.u16 q9, d21, #16 \n"
"vmla.f32 q13, q8, d4[0] \n"
"vshll.u16 q10, d22, #16 \n"
"vmla.f32 q14, q9, d4[1] \n"
"pld [%7, #256] \n"
"vld1.u16 {d16-d19}, [%7 :128]! \n"
"vshll.u16 q11, d23, #16 \n"
"vmla.f32 q15, q10, d5[0] \n"
"vshll.u16 q10, d16, #16 \n"
"vmla.f32 q12, q11, d5[1] \n"
"vshll.u16 q11, d17, #16 \n"
"vmla.f32 q13, q10, d6[0] \n"
"vshll.u16 q8, d18, #16 \n"
"vmla.f32 q14, q11, d6[1] \n"
"pld [%7, #256] \n"
"vld1.u16 {d20-d23}, [%7 :128]! \n"
"vshll.u16 q9, d19, #16 \n"
"vmla.f32 q15, q8, d7[0] \n"
"vshll.u16 q8, d20, #16 \n"
"vmla.f32 q12, q9, d7[1] \n"
"pld [%3, #64] \n"
"vld1.u16 {d1}, [%3 :64]! \n" // r10
"vshll.u16 q9, d21, #16 \n"
"vshll.u16 q0, d1, #16 \n"
"vmla.f32 q13, q8, d8[0] \n"
"vshll.u16 q10, d22, #16 \n"
"vmla.f32 q14, q9, d8[1] \n"
"pld [%7, #256] \n"
"vld1.u16 {d16-d19}, [%7 :128]! \n"
"vshll.u16 q11, d23, #16 \n"
"vmla.f32 q15, q10, d9[0] \n"
"vshll.u16 q10, d16, #16 \n"
"vmla.f32 q12, q11, d9[1] \n"
"vshll.u16 q11, d17, #16 \n"
"vmla.f32 q13, q10, d0[0] \n"
"vshll.u16 q8, d18, #16 \n"
"vmla.f32 q14, q11, d0[1] \n"
"pld [%7, #256] \n"
"vld1.u16 {d20-d23}, [%7 :128]! \n"
"vshll.u16 q9, d19, #16 \n"
"vmla.f32 q15, q8, d1[0] \n"
"vshll.u16 q8, d20, #16 \n"
"vmla.f32 q12, q9, d1[1] \n"
"pld [%3, #256] \n"
"vld1.u16 {d6-d9}, [%3 :64] \n" // r11 r12 r13 r14
"vshll.u16 q9, d21, #16 \n"
"vshll.u16 q1, d6, #16 \n"
"vshll.u16 q2, d7, #16 \n"
"vshll.u16 q3, d8, #16 \n"
"vshll.u16 q4, d9, #16 \n"
"vmla.f32 q13, q8, d2[0] \n"
"vshll.u16 q10, d22, #16 \n"
"vmla.f32 q14, q9, d2[1] \n"
"pld [%7, #256] \n"
"vld1.u16 {d16-d19}, [%7 :128]! \n"
"vshll.u16 q11, d23, #16 \n"
"vmla.f32 q15, q10, d3[0] \n"
"vshll.u16 q10, d16, #16 \n"
"vmla.f32 q12, q11, d3[1] \n"
"vshll.u16 q11, d17, #16 \n"
"vmla.f32 q13, q10, d4[0] \n"
"vshll.u16 q8, d18, #16 \n"
"vmla.f32 q14, q11, d4[1] \n"
"pld [%7, #256] \n"
"vld1.u16 {d20-d23}, [%7 :128]! \n"
"vshll.u16 q9, d19, #16 \n"
"vmla.f32 q15, q8, d5[0] \n"
"vshll.u16 q8, d20, #16 \n"
"vmla.f32 q12, q9, d5[1] \n"
"vshll.u16 q9, d21, #16 \n"
"vmla.f32 q13, q8, d6[0] \n"
"vshll.u16 q10, d22, #16 \n"
"vmla.f32 q14, q9, d6[1] \n"
"pld [%7, #256] \n"
"vld1.u16 {d16-d19}, [%7 :128]! \n"
"vshll.u16 q11, d23, #16 \n"
"vmla.f32 q15, q10, d7[0] \n"
"vshll.u16 q10, d16, #16 \n"
"vmla.f32 q12, q11, d7[1] \n"
"pld [%4, #64] \n"
"vld1.u16 {d1}, [%4 :64]! \n" // r20
"vshll.u16 q11, d17, #16 \n"
"vshll.u16 q0, d1, #16 \n"
"vmla.f32 q13, q10, d8[0] \n"
"vshll.u16 q8, d18, #16 \n"
"vmla.f32 q14, q11, d8[1] \n"
"pld [%7, #256] \n"
"vld1.u16 {d20-d23}, [%7 :128]! \n"
"vshll.u16 q9, d19, #16 \n"
"vmla.f32 q15, q8, d9[0] \n"
"vshll.u16 q8, d20, #16 \n"
"vmla.f32 q12, q9, d9[1] \n"
"vshll.u16 q9, d21, #16 \n"
"vmla.f32 q13, q8, d0[0] \n"
"vshll.u16 q10, d22, #16 \n"
"vmla.f32 q14, q9, d0[1] \n"
"pld [%7, #256] \n"
"vld1.u16 {d16-d19}, [%7 :128]! \n"
"vshll.u16 q11, d23, #16 \n"
"vmla.f32 q15, q10, d1[0] \n"
"vshll.u16 q10, d16, #16 \n"
"vmla.f32 q12, q11, d1[1] \n"
"pld [%4, #256] \n"
"vld1.u16 {d6-d9}, [%4 :64] \n" // r21 r22 r23 r24
"vshll.u16 q11, d17, #16 \n"
"vshll.u16 q1, d6, #16 \n"
"vshll.u16 q2, d7, #16 \n"
"vshll.u16 q3, d8, #16 \n"
"vshll.u16 q4, d9, #16 \n"
"vmla.f32 q13, q10, d2[0] \n"
"vshll.u16 q8, d18, #16 \n"
"vmla.f32 q14, q11, d2[1] \n"
"pld [%7, #256] \n"
"vld1.u16 {d20-d23}, [%7 :128]! \n"
"vshll.u16 q9, d19, #16 \n"
"vmla.f32 q15, q8, d3[0] \n"
"vshll.u16 q8, d20, #16 \n"
"vmla.f32 q12, q9, d3[1] \n"
"vshll.u16 q9, d21, #16 \n"
"vmla.f32 q13, q8, d4[0] \n"
"vshll.u16 q10, d22, #16 \n"
"vmla.f32 q14, q9, d4[1] \n"
"pld [%7, #256] \n"
"vld1.u16 {d16-d19}, [%7 :128]! \n"
"vshll.u16 q11, d23, #16 \n"
"vmla.f32 q15, q10, d5[0] \n"
"vshll.u16 q10, d16, #16 \n"
"vmla.f32 q12, q11, d5[1] \n"
"vshll.u16 q11, d17, #16 \n"
"vmla.f32 q13, q10, d6[0] \n"
"vshll.u16 q8, d18, #16 \n"
"vmla.f32 q14, q11, d6[1] \n"
"pld [%7, #256] \n"
"vld1.u16 {d20-d23}, [%7 :128]! \n"
"vshll.u16 q9, d19, #16 \n"
"vmla.f32 q15, q8, d7[0] \n"
"vshll.u16 q8, d20, #16 \n"
"vmla.f32 q12, q9, d7[1] \n"
"pld [%5, #64] \n"
"vld1.u16 {d1}, [%5 :64]! \n" // r30
"vshll.u16 q9, d21, #16 \n"
"vshll.u16 q0, d1, #16 \n"
"vmla.f32 q13, q8, d8[0] \n"
"vshll.u16 q10, d22, #16 \n"
"vmla.f32 q14, q9, d8[1] \n"
"pld [%7, #256] \n"
"vld1.u16 {d16-d19}, [%7 :128]! \n"
"vshll.u16 q11, d23, #16 \n"
"vmla.f32 q15, q10, d9[0] \n"
"vshll.u16 q10, d16, #16 \n"
"vmla.f32 q12, q11, d9[1] \n"
"vshll.u16 q11, d17, #16 \n"
"vmla.f32 q13, q10, d0[0] \n"
"vshll.u16 q8, d18, #16 \n"
"vmla.f32 q14, q11, d0[1] \n"
"pld [%7, #256] \n"
"vld1.u16 {d20-d23}, [%7 :128]! \n"
"vshll.u16 q9, d19, #16 \n"
"vmla.f32 q15, q8, d1[0] \n"
"vshll.u16 q8, d20, #16 \n"
"vmla.f32 q12, q9, d1[1] \n"
"pld [%5, #256] \n"
"vld1.u16 {d6-d9}, [%5 :64] \n" // r31 r32 r33 r34
"vshll.u16 q9, d21, #16 \n"
"vshll.u16 q1, d6, #16 \n"
"vshll.u16 q2, d7, #16 \n"
"vshll.u16 q3, d8, #16 \n"
"vshll.u16 q4, d9, #16 \n"
"vmla.f32 q13, q8, d2[0] \n"
"vshll.u16 q10, d22, #16 \n"
"vmla.f32 q14, q9, d2[1] \n"
"pld [%7, #256] \n"
"vld1.u16 {d16-d19}, [%7 :128]! \n"
"vshll.u16 q11, d23, #16 \n"
"vmla.f32 q15, q10, d3[0] \n"
"vshll.u16 q10, d16, #16 \n"
"vmla.f32 q12, q11, d3[1] \n"
"vshll.u16 q11, d17, #16 \n"
"vmla.f32 q13, q10, d4[0] \n"
"vshll.u16 q8, d18, #16 \n"
"vmla.f32 q14, q11, d4[1] \n"
"pld [%7, #256] \n"
"vld1.u16 {d20-d23}, [%7 :128]! \n"
"vshll.u16 q9, d19, #16 \n"
"vmla.f32 q15, q8, d5[0] \n"
"vshll.u16 q8, d20, #16 \n"
"vmla.f32 q12, q9, d5[1] \n"
"vshll.u16 q9, d21, #16 \n"
"vmla.f32 q13, q8, d6[0] \n"
"vshll.u16 q10, d22, #16 \n"
"vmla.f32 q14, q9, d6[1] \n"
"pld [%7, #256] \n"
"vld1.u16 {d16-d19}, [%7 :128]! \n"
"vshll.u16 q11, d23, #16 \n"
"vmla.f32 q15, q10, d7[0] \n"
"vshll.u16 q10, d16, #16 \n"
"vmla.f32 q12, q11, d7[1] \n"
"pld [%6, #64] \n"
"vld1.u16 {d1}, [%6 :64]! \n" // r40
"vshll.u16 q11, d17, #16 \n"
"vshll.u16 q0, d1, #16 \n"
"vmla.f32 q13, q10, d8[0] \n"
"vshll.u16 q8, d18, #16 \n"
"vmla.f32 q14, q11, d8[1] \n"
"pld [%7, #256] \n"
"vld1.u16 {d20-d23}, [%7 :128]! \n"
"vshll.u16 q9, d19, #16 \n"
"vmla.f32 q15, q8, d9[0] \n"
"vshll.u16 q8, d20, #16 \n"
"vmla.f32 q12, q9, d9[1] \n"
"vshll.u16 q9, d21, #16 \n"
"vmla.f32 q13, q8, d0[0] \n"
"vshll.u16 q10, d22, #16 \n"
"vmla.f32 q14, q9, d0[1] \n"
"pld [%7, #256] \n"
"vld1.u16 {d16-d19}, [%7 :128]! \n"
"vshll.u16 q11, d23, #16 \n"
"vmla.f32 q15, q10, d1[0] \n"
"vshll.u16 q10, d16, #16 \n"
"vmla.f32 q12, q11, d1[1] \n"
"pld [%6, #256] \n"
"vld1.u16 {d6-d9}, [%6 :64] \n" // r41 r42 r43 r44
"vshll.u16 q11, d17, #16 \n"
"vshll.u16 q1, d6, #16 \n"
"vshll.u16 q2, d7, #16 \n"
"vshll.u16 q3, d8, #16 \n"
"vshll.u16 q4, d9, #16 \n"
"vmla.f32 q13, q10, d2[0] \n"
"vshll.u16 q8, d18, #16 \n"
"vmla.f32 q14, q11, d2[1] \n"
"pld [%7, #256] \n"
"vld1.u16 {d20-d23}, [%7 :128]! \n"
"vshll.u16 q9, d19, #16 \n"
"vmla.f32 q15, q8, d3[0] \n"
"vshll.u16 q8, d20, #16 \n"
"vmla.f32 q12, q9, d3[1] \n"
"vshll.u16 q9, d21, #16 \n"
"vmla.f32 q13, q8, d4[0] \n"
"vshll.u16 q10, d22, #16 \n"
"vmla.f32 q14, q9, d4[1] \n"
"pld [%7, #256] \n"
"vld1.u16 {d16-d19}, [%7 :128]! \n"
"vshll.u16 q11, d23, #16 \n"
"vmla.f32 q15, q10, d5[0] \n"
"vshll.u16 q10, d16, #16 \n"
"vmla.f32 q12, q11, d5[1] \n"
"vshll.u16 q11, d17, #16 \n"
"vmla.f32 q13, q10, d6[0] \n"
"vshll.u16 q8, d18, #16 \n"
"vmla.f32 q14, q11, d6[1] \n"
// "pld [%7, #256] \n"
"vld1.u16 {d20-d23}, [%7 :128] \n"
"vshll.u16 q9, d19, #16 \n"
"vmla.f32 q15, q8, d7[0] \n"
"vshll.u16 q8, d20, #16 \n"
"vmla.f32 q12, q9, d7[1] \n"
"vshll.u16 q9, d21, #16 \n"
"vmla.f32 q13, q8, d8[0] \n"
"vshll.u16 q10, d22, #16 \n"
"vmla.f32 q14, q9, d8[1] \n"
"vshll.u16 q11, d23, #16 \n"
"vmla.f32 q15, q10, d9[0] \n"
"vmla.f32 q12, q11, d9[1] \n"
"vadd.f32 q13, q13, q14 \n"
"vadd.f32 q12, q12, q15 \n"
"vadd.f32 q12, q12, q13 \n"
"sub %7, %7, #768 \n" // kptr -= 24 * 16;
"vshrn.u32 d24, q12, #16 \n"
"vst1.u16 {d24}, [%0 :64]! \n"
: "=r"(outptr0_bf16), // %0
"=r"(outptr0), // %1
"=r"(r0), // %2
"=r"(r1), // %3
"=r"(r2), // %4
"=r"(r3), // %5
"=r"(r4), // %6
"=r"(kptr) // %7
: "0"(outptr0_bf16),
"1"(outptr0),
"2"(r0),
"3"(r1),
"4"(r2),
"5"(r3),
"6"(r4),
"7"(kptr)
: "memory", "q0", "q1", "q2", "q3", "q4", "q5", "q6", "q7", "q8", "q9", "q10", "q11", "q12", "q13", "q14", "q15");
#endif // __aarch64__
}
r0 += 4 * 4;
r1 += 4 * 4;
r2 += 4 * 4;
r3 += 4 * 4;
r4 += 4 * 4;
}
}
}
}
static void conv5x5s2_pack4_bf16s_neon(const Mat& bottom_blob, Mat& top_blob, const Mat& kernel, const Mat& _bias, const Option& opt)
{
int w = bottom_blob.w;
int inch = bottom_blob.c;
int outw = top_blob.w;
int outh = top_blob.h;
int outch = top_blob.c;
Mat top_blob_fp32(outw, outh, opt.num_threads, (size_t)4u * 4, 4, opt.workspace_allocator);
const int tailstep = (w - 2 * outw + w) * 4;
const float* bias = _bias;
#pragma omp parallel for num_threads(opt.num_threads)
for (int p = 0; p < outch; p++)
{
Mat out0 = top_blob_fp32.channel(get_omp_thread_num());
float32x4_t _bias0 = bias ? vld1q_f32((const float*)bias + p * 4) : vdupq_n_f32(0.f);
out0.fill(_bias0);
int q = 0;
for (; q < inch - 1; q++)
{
float* outptr0 = out0.row(0);
const Mat img0 = bottom_blob.channel(q);
const unsigned short* r0 = img0.row<const unsigned short>(0);
const unsigned short* r1 = img0.row<const unsigned short>(1);
const unsigned short* r2 = img0.row<const unsigned short>(2);
const unsigned short* r3 = img0.row<const unsigned short>(3);
const unsigned short* r4 = img0.row<const unsigned short>(4);
const unsigned short* kptr = kernel.channel(p).row<const unsigned short>(q);
int i = 0;
for (; i < outh; i++)
{
int j = 0;
for (; j + 3 < outw; j += 4)
{
#if __aarch64__
asm volatile(
"prfm pldl1keep, [%1, #256] \n"
"ld1 {v0.4h, v1.4h, v2.4h, v3.4h}, [%1], #32 \n" // r00 r01 r02 r03
"prfm pldl1keep, [%1, #256] \n"
"ld1 {v4.4h, v5.4h, v6.4h, v7.4h}, [%1], #32 \n" // r04 r05 r06 r07
"shll v0.4s, v0.4h, #16 \n"
"shll v1.4s, v1.4h, #16 \n"
"shll v2.4s, v2.4h, #16 \n"
"shll v3.4s, v3.4h, #16 \n"
"prfm pldl1keep, [%6, #256] \n"
"ld1 {v16.4h, v17.4h, v18.4h, v19.4h}, [%6], #32 \n"
"shll v4.4s, v4.4h, #16 \n"
"shll v5.4s, v5.4h, #16 \n"
"shll v6.4s, v6.4h, #16 \n"
"shll v7.4s, v7.4h, #16 \n"
"prfm pldl1keep, [%0, #512] \n"
"ld1 {v20.4s, v21.4s, v22.4s, v23.4s}, [%0] \n" // sum0 sum1 sum2 sum3
"shll v16.4s, v16.4h, #16 \n"
"shll v17.4s, v17.4h, #16 \n"
"shll v18.4s, v18.4h, #16 \n"
"shll v19.4s, v19.4h, #16 \n"
"fmla v20.4s, v16.4s, v0.s[0] \n"
"fmla v21.4s, v16.4s, v2.s[0] \n"
"fmla v22.4s, v16.4s, v4.s[0] \n"
"fmla v23.4s, v16.4s, v6.s[0] \n"
"fmla v20.4s, v17.4s, v0.s[1] \n"
"fmla v21.4s, v17.4s, v2.s[1] \n"
"fmla v22.4s, v17.4s, v4.s[1] \n"
"fmla v23.4s, v17.4s, v6.s[1] \n"
"prfm pldl1keep, [%6, #256] \n"
"ld1 {v24.4h, v25.4h, v26.4h, v27.4h}, [%6], #32 \n"
"shll v24.4s, v24.4h, #16 \n"
"shll v25.4s, v25.4h, #16 \n"
"shll v26.4s, v26.4h, #16 \n"
"shll v27.4s, v27.4h, #16 \n"
"fmla v20.4s, v18.4s, v0.s[2] \n"
"fmla v21.4s, v18.4s, v2.s[2] \n"
"fmla v22.4s, v18.4s, v4.s[2] \n"
"fmla v23.4s, v18.4s, v6.s[2] \n"
"fmla v20.4s, v19.4s, v0.s[3] \n"
"fmla v21.4s, v19.4s, v2.s[3] \n"
"fmla v22.4s, v19.4s, v4.s[3] \n"
"fmla v23.4s, v19.4s, v6.s[3] \n"
"prfm pldl1keep, [%1, #192] \n"
"ld1 {v28.4h, v29.4h, v30.4h}, [%1] \n" // r08 r09 r010
"shll v28.4s, v28.4h, #16 \n"
"shll v29.4s, v29.4h, #16 \n"
"shll v30.4s, v30.4h, #16 \n"
"fmla v20.4s, v24.4s, v1.s[0] \n"
"fmla v21.4s, v24.4s, v3.s[0] \n"
"fmla v22.4s, v24.4s, v5.s[0] \n"
"fmla v23.4s, v24.4s, v7.s[0] \n"
"fmla v20.4s, v25.4s, v1.s[1] \n"
"fmla v21.4s, v25.4s, v3.s[1] \n"
"fmla v22.4s, v25.4s, v5.s[1] \n"
"fmla v23.4s, v25.4s, v7.s[1] \n"
"prfm pldl1keep, [%6, #256] \n"
"ld1 {v16.4h, v17.4h, v18.4h, v19.4h}, [%6], #32 \n"
"shll v16.4s, v16.4h, #16 \n"
"shll v17.4s, v17.4h, #16 \n"
"shll v18.4s, v18.4h, #16 \n"
"shll v19.4s, v19.4h, #16 \n"
"fmla v20.4s, v26.4s, v1.s[2] \n"
"fmla v21.4s, v26.4s, v3.s[2] \n"
"fmla v22.4s, v26.4s, v5.s[2] \n"
"fmla v23.4s, v26.4s, v7.s[2] \n"
"fmla v20.4s, v27.4s, v1.s[3] \n"
"fmla v21.4s, v27.4s, v3.s[3] \n"
"fmla v22.4s, v27.4s, v5.s[3] \n"
"fmla v23.4s, v27.4s, v7.s[3] \n"
"fmla v20.4s, v16.4s, v2.s[0] \n"
"fmla v21.4s, v16.4s, v4.s[0] \n"
"fmla v22.4s, v16.4s, v6.s[0] \n"
"fmla v23.4s, v16.4s, v28.s[0] \n"
"fmla v20.4s, v17.4s, v2.s[1] \n"
"fmla v21.4s, v17.4s, v4.s[1] \n"
"fmla v22.4s, v17.4s, v6.s[1] \n"
"fmla v23.4s, v17.4s, v28.s[1] \n"
"prfm pldl1keep, [%6, #256] \n"
"ld1 {v24.4h, v25.4h, v26.4h, v27.4h}, [%6], #32 \n"
"shll v24.4s, v24.4h, #16 \n"
"shll v25.4s, v25.4h, #16 \n"
"shll v26.4s, v26.4h, #16 \n"
"shll v27.4s, v27.4h, #16 \n"
"fmla v20.4s, v18.4s, v2.s[2] \n"
"fmla v21.4s, v18.4s, v4.s[2] \n"
"fmla v22.4s, v18.4s, v6.s[2] \n"
"fmla v23.4s, v18.4s, v28.s[2] \n"
"fmla v20.4s, v19.4s, v2.s[3] \n"
"fmla v21.4s, v19.4s, v4.s[3] \n"
"fmla v22.4s, v19.4s, v6.s[3] \n"
"fmla v23.4s, v19.4s, v28.s[3] \n"
"fmla v20.4s, v24.4s, v3.s[0] \n"
"fmla v21.4s, v24.4s, v5.s[0] \n"
"fmla v22.4s, v24.4s, v7.s[0] \n"
"fmla v23.4s, v24.4s, v29.s[0] \n"
"fmla v20.4s, v25.4s, v3.s[1] \n"
"fmla v21.4s, v25.4s, v5.s[1] \n"
"fmla v22.4s, v25.4s, v7.s[1] \n"
"fmla v23.4s, v25.4s, v29.s[1] \n"
"prfm pldl1keep, [%6, #256] \n"
"ld1 {v16.4h, v17.4h, v18.4h, v19.4h}, [%6], #32 \n"
"shll v16.4s, v16.4h, #16 \n"
"shll v17.4s, v17.4h, #16 \n"
"shll v18.4s, v18.4h, #16 \n"
"shll v19.4s, v19.4h, #16 \n"
"fmla v20.4s, v26.4s, v3.s[2] \n"
"fmla v21.4s, v26.4s, v5.s[2] \n"
"fmla v22.4s, v26.4s, v7.s[2] \n"
"fmla v23.4s, v26.4s, v29.s[2] \n"
"fmla v20.4s, v27.4s, v3.s[3] \n"
"fmla v21.4s, v27.4s, v5.s[3] \n"
"fmla v22.4s, v27.4s, v7.s[3] \n"
"fmla v23.4s, v27.4s, v29.s[3] \n"
"prfm pldl1keep, [%2, #256] \n"
"ld1 {v0.4h, v1.4h, v2.4h, v3.4h}, [%2], #32 \n" // r10 r11 r12 r13
"shll v0.4s, v0.4h, #16 \n"
"shll v1.4s, v1.4h, #16 \n"
"shll v2.4s, v2.4h, #16 \n"
"shll v3.4s, v3.4h, #16 \n"
"fmla v20.4s, v16.4s, v4.s[0] \n"
"fmla v21.4s, v16.4s, v6.s[0] \n"
"fmla v22.4s, v16.4s, v28.s[0] \n"
"fmla v23.4s, v16.4s, v30.s[0] \n"
"fmla v20.4s, v17.4s, v4.s[1] \n"
"fmla v21.4s, v17.4s, v6.s[1] \n"
"fmla v22.4s, v17.4s, v28.s[1] \n"
"fmla v23.4s, v17.4s, v30.s[1] \n"
"prfm pldl1keep, [%6, #256] \n"
"ld1 {v24.4h, v25.4h, v26.4h, v27.4h}, [%6], #32 \n"
"shll v24.4s, v24.4h, #16 \n"
"shll v25.4s, v25.4h, #16 \n"
"shll v26.4s, v26.4h, #16 \n"
"shll v27.4s, v27.4h, #16 \n"
"fmla v20.4s, v18.4s, v4.s[2] \n"
"fmla v21.4s, v18.4s, v6.s[2] \n"
"fmla v22.4s, v18.4s, v28.s[2] \n"
"fmla v23.4s, v18.4s, v30.s[2] \n"
"fmla v20.4s, v19.4s, v4.s[3] \n"
"fmla v21.4s, v19.4s, v6.s[3] \n"
"fmla v22.4s, v19.4s, v28.s[3] \n"
"fmla v23.4s, v19.4s, v30.s[3] \n"
"prfm pldl1keep, [%2, #256] \n"
"ld1 {v4.4h, v5.4h, v6.4h, v7.4h}, [%2], #32 \n" // r14 r15 r16 r17
"shll v4.4s, v4.4h, #16 \n"
"shll v5.4s, v5.4h, #16 \n"
"shll v6.4s, v6.4h, #16 \n"
"shll v7.4s, v7.4h, #16 \n"
"fmla v20.4s, v24.4s, v0.s[0] \n"
"fmla v21.4s, v24.4s, v2.s[0] \n"
"fmla v22.4s, v24.4s, v4.s[0] \n"
"fmla v23.4s, v24.4s, v6.s[0] \n"
"fmla v20.4s, v25.4s, v0.s[1] \n"
"fmla v21.4s, v25.4s, v2.s[1] \n"
"fmla v22.4s, v25.4s, v4.s[1] \n"
"fmla v23.4s, v25.4s, v6.s[1] \n"
"prfm pldl1keep, [%6, #256] \n"
"ld1 {v16.4h, v17.4h, v18.4h, v19.4h}, [%6], #32 \n"
"shll v16.4s, v16.4h, #16 \n"
"shll v17.4s, v17.4h, #16 \n"
"shll v18.4s, v18.4h, #16 \n"
"shll v19.4s, v19.4h, #16 \n"
"fmla v20.4s, v26.4s, v0.s[2] \n"
"fmla v21.4s, v26.4s, v2.s[2] \n"
"fmla v22.4s, v26.4s, v4.s[2] \n"
"fmla v23.4s, v26.4s, v6.s[2] \n"
"fmla v20.4s, v27.4s, v0.s[3] \n"
"fmla v21.4s, v27.4s, v2.s[3] \n"
"fmla v22.4s, v27.4s, v4.s[3] \n"
"fmla v23.4s, v27.4s, v6.s[3] \n"
"prfm pldl1keep, [%2, #192] \n"
"ld1 {v28.4h, v29.4h, v30.4h}, [%2] \n" // r18 r19 r110
"shll v28.4s, v28.4h, #16 \n"
"shll v29.4s, v29.4h, #16 \n"
"shll v30.4s, v30.4h, #16 \n"
"fmla v20.4s, v16.4s, v1.s[0] \n"
"fmla v21.4s, v16.4s, v3.s[0] \n"
"fmla v22.4s, v16.4s, v5.s[0] \n"
"fmla v23.4s, v16.4s, v7.s[0] \n"
"fmla v20.4s, v17.4s, v1.s[1] \n"
"fmla v21.4s, v17.4s, v3.s[1] \n"
"fmla v22.4s, v17.4s, v5.s[1] \n"
"fmla v23.4s, v17.4s, v7.s[1] \n"
"prfm pldl1keep, [%6, #256] \n"
"ld1 {v24.4h, v25.4h, v26.4h, v27.4h}, [%6], #32 \n"
"shll v24.4s, v24.4h, #16 \n"
"shll v25.4s, v25.4h, #16 \n"
"shll v26.4s, v26.4h, #16 \n"
"shll v27.4s, v27.4h, #16 \n"
"fmla v20.4s, v18.4s, v1.s[2] \n"
"fmla v21.4s, v18.4s, v3.s[2] \n"
"fmla v22.4s, v18.4s, v5.s[2] \n"
"fmla v23.4s, v18.4s, v7.s[2] \n"
"fmla v20.4s, v19.4s, v1.s[3] \n"
"fmla v21.4s, v19.4s, v3.s[3] \n"
"fmla v22.4s, v19.4s, v5.s[3] \n"
"fmla v23.4s, v19.4s, v7.s[3] \n"
"fmla v20.4s, v24.4s, v2.s[0] \n"
"fmla v21.4s, v24.4s, v4.s[0] \n"
"fmla v22.4s, v24.4s, v6.s[0] \n"
"fmla v23.4s, v24.4s, v28.s[0] \n"
"fmla v20.4s, v25.4s, v2.s[1] \n"
"fmla v21.4s, v25.4s, v4.s[1] \n"
"fmla v22.4s, v25.4s, v6.s[1] \n"
"fmla v23.4s, v25.4s, v28.s[1] \n"
"prfm pldl1keep, [%6, #256] \n"
"ld1 {v16.4h, v17.4h, v18.4h, v19.4h}, [%6], #32 \n"
"shll v16.4s, v16.4h, #16 \n"
"shll v17.4s, v17.4h, #16 \n"
"shll v18.4s, v18.4h, #16 \n"
"shll v19.4s, v19.4h, #16 \n"
"fmla v20.4s, v26.4s, v2.s[2] \n"
"fmla v21.4s, v26.4s, v4.s[2] \n"
"fmla v22.4s, v26.4s, v6.s[2] \n"
"fmla v23.4s, v26.4s, v28.s[2] \n"
"fmla v20.4s, v27.4s, v2.s[3] \n"
"fmla v21.4s, v27.4s, v4.s[3] \n"
"fmla v22.4s, v27.4s, v6.s[3] \n"
"fmla v23.4s, v27.4s, v28.s[3] \n"
"fmla v20.4s, v16.4s, v3.s[0] \n"
"fmla v21.4s, v16.4s, v5.s[0] \n"
"fmla v22.4s, v16.4s, v7.s[0] \n"
"fmla v23.4s, v16.4s, v29.s[0] \n"
"fmla v20.4s, v17.4s, v3.s[1] \n"
"fmla v21.4s, v17.4s, v5.s[1] \n"
"fmla v22.4s, v17.4s, v7.s[1] \n"
"fmla v23.4s, v17.4s, v29.s[1] \n"
"prfm pldl1keep, [%6, #256] \n"
"ld1 {v24.4h, v25.4h, v26.4h, v27.4h}, [%6], #32 \n"
"shll v24.4s, v24.4h, #16 \n"
"shll v25.4s, v25.4h, #16 \n"
"shll v26.4s, v26.4h, #16 \n"
"shll v27.4s, v27.4h, #16 \n"
"fmla v20.4s, v18.4s, v3.s[2] \n"
"fmla v21.4s, v18.4s, v5.s[2] \n"
"fmla v22.4s, v18.4s, v7.s[2] \n"
"fmla v23.4s, v18.4s, v29.s[2] \n"
"fmla v20.4s, v19.4s, v3.s[3] \n"
"fmla v21.4s, v19.4s, v5.s[3] \n"
"fmla v22.4s, v19.4s, v7.s[3] \n"
"fmla v23.4s, v19.4s, v29.s[3] \n"
"prfm pldl1keep, [%3, #256] \n"
"ld1 {v0.4h, v1.4h, v2.4h, v3.4h}, [%3], #32 \n" // r20 r21 r22 r23
"shll v0.4s, v0.4h, #16 \n"
"shll v1.4s, v1.4h, #16 \n"
"shll v2.4s, v2.4h, #16 \n"
"shll v3.4s, v3.4h, #16 \n"
"fmla v20.4s, v24.4s, v4.s[0] \n"
"fmla v21.4s, v24.4s, v6.s[0] \n"
"fmla v22.4s, v24.4s, v28.s[0] \n"
"fmla v23.4s, v24.4s, v30.s[0] \n"
"fmla v20.4s, v25.4s, v4.s[1] \n"
"fmla v21.4s, v25.4s, v6.s[1] \n"
"fmla v22.4s, v25.4s, v28.s[1] \n"
"fmla v23.4s, v25.4s, v30.s[1] \n"
"prfm pldl1keep, [%6, #256] \n"
"ld1 {v16.4h, v17.4h, v18.4h, v19.4h}, [%6], #32 \n"
"shll v16.4s, v16.4h, #16 \n"
"shll v17.4s, v17.4h, #16 \n"
"shll v18.4s, v18.4h, #16 \n"
"shll v19.4s, v19.4h, #16 \n"
"fmla v20.4s, v26.4s, v4.s[2] \n"
"fmla v21.4s, v26.4s, v6.s[2] \n"
"fmla v22.4s, v26.4s, v28.s[2] \n"
"fmla v23.4s, v26.4s, v30.s[2] \n"
"fmla v20.4s, v27.4s, v4.s[3] \n"
"fmla v21.4s, v27.4s, v6.s[3] \n"
"fmla v22.4s, v27.4s, v28.s[3] \n"
"fmla v23.4s, v27.4s, v30.s[3] \n"
"prfm pldl1keep, [%3, #256] \n"
"ld1 {v4.4h, v5.4h, v6.4h, v7.4h}, [%3], #32 \n" // r24 r25 r26 r27
"shll v4.4s, v4.4h, #16 \n"
"shll v5.4s, v5.4h, #16 \n"
"shll v6.4s, v6.4h, #16 \n"
"shll v7.4s, v7.4h, #16 \n"
"fmla v20.4s, v16.4s, v0.s[0] \n"
"fmla v21.4s, v16.4s, v2.s[0] \n"
"fmla v22.4s, v16.4s, v4.s[0] \n"
"fmla v23.4s, v16.4s, v6.s[0] \n"
"fmla v20.4s, v17.4s, v0.s[1] \n"
"fmla v21.4s, v17.4s, v2.s[1] \n"
"fmla v22.4s, v17.4s, v4.s[1] \n"
"fmla v23.4s, v17.4s, v6.s[1] \n"
"prfm pldl1keep, [%6, #256] \n"
"ld1 {v24.4h, v25.4h, v26.4h, v27.4h}, [%6], #32 \n"
"shll v24.4s, v24.4h, #16 \n"
"shll v25.4s, v25.4h, #16 \n"
"shll v26.4s, v26.4h, #16 \n"
"shll v27.4s, v27.4h, #16 \n"
"fmla v20.4s, v18.4s, v0.s[2] \n"
"fmla v21.4s, v18.4s, v2.s[2] \n"
"fmla v22.4s, v18.4s, v4.s[2] \n"
"fmla v23.4s, v18.4s, v6.s[2] \n"
"fmla v20.4s, v19.4s, v0.s[3] \n"
"fmla v21.4s, v19.4s, v2.s[3] \n"
"fmla v22.4s, v19.4s, v4.s[3] \n"
"fmla v23.4s, v19.4s, v6.s[3] \n"
"prfm pldl1keep, [%3, #192] \n"
"ld1 {v28.4h, v29.4h, v30.4h}, [%3] \n" // r28 r29 r210
"shll v28.4s, v28.4h, #16 \n"
"shll v29.4s, v29.4h, #16 \n"
"shll v30.4s, v30.4h, #16 \n"
"fmla v20.4s, v24.4s, v1.s[0] \n"
"fmla v21.4s, v24.4s, v3.s[0] \n"
"fmla v22.4s, v24.4s, v5.s[0] \n"
"fmla v23.4s, v24.4s, v7.s[0] \n"
"fmla v20.4s, v25.4s, v1.s[1] \n"
"fmla v21.4s, v25.4s, v3.s[1] \n"
"fmla v22.4s, v25.4s, v5.s[1] \n"
"fmla v23.4s, v25.4s, v7.s[1] \n"
"prfm pldl1keep, [%6, #256] \n"
"ld1 {v16.4h, v17.4h, v18.4h, v19.4h}, [%6], #32 \n"
"shll v16.4s, v16.4h, #16 \n"
"shll v17.4s, v17.4h, #16 \n"
"shll v18.4s, v18.4h, #16 \n"
"shll v19.4s, v19.4h, #16 \n"
"fmla v20.4s, v26.4s, v1.s[2] \n"
"fmla v21.4s, v26.4s, v3.s[2] \n"
"fmla v22.4s, v26.4s, v5.s[2] \n"
"fmla v23.4s, v26.4s, v7.s[2] \n"
"fmla v20.4s, v27.4s, v1.s[3] \n"
"fmla v21.4s, v27.4s, v3.s[3] \n"
"fmla v22.4s, v27.4s, v5.s[3] \n"
"fmla v23.4s, v27.4s, v7.s[3] \n"
"fmla v20.4s, v16.4s, v2.s[0] \n"
"fmla v21.4s, v16.4s, v4.s[0] \n"
"fmla v22.4s, v16.4s, v6.s[0] \n"
"fmla v23.4s, v16.4s, v28.s[0] \n"
"fmla v20.4s, v17.4s, v2.s[1] \n"
"fmla v21.4s, v17.4s, v4.s[1] \n"
"fmla v22.4s, v17.4s, v6.s[1] \n"
"fmla v23.4s, v17.4s, v28.s[1] \n"
"prfm pldl1keep, [%6, #256] \n"
"ld1 {v24.4h, v25.4h, v26.4h, v27.4h}, [%6], #32 \n"
"shll v24.4s, v24.4h, #16 \n"
"shll v25.4s, v25.4h, #16 \n"
"shll v26.4s, v26.4h, #16 \n"
"shll v27.4s, v27.4h, #16 \n"
"fmla v20.4s, v18.4s, v2.s[2] \n"
"fmla v21.4s, v18.4s, v4.s[2] \n"
"fmla v22.4s, v18.4s, v6.s[2] \n"
"fmla v23.4s, v18.4s, v28.s[2] \n"
"fmla v20.4s, v19.4s, v2.s[3] \n"
"fmla v21.4s, v19.4s, v4.s[3] \n"
"fmla v22.4s, v19.4s, v6.s[3] \n"
"fmla v23.4s, v19.4s, v28.s[3] \n"
"fmla v20.4s, v24.4s, v3.s[0] \n"
"fmla v21.4s, v24.4s, v5.s[0] \n"
"fmla v22.4s, v24.4s, v7.s[0] \n"
"fmla v23.4s, v24.4s, v29.s[0] \n"
"fmla v20.4s, v25.4s, v3.s[1] \n"
"fmla v21.4s, v25.4s, v5.s[1] \n"
"fmla v22.4s, v25.4s, v7.s[1] \n"
"fmla v23.4s, v25.4s, v29.s[1] \n"
"prfm pldl1keep, [%6, #256] \n"
"ld1 {v16.4h, v17.4h, v18.4h, v19.4h}, [%6], #32 \n"
"shll v16.4s, v16.4h, #16 \n"
"shll v17.4s, v17.4h, #16 \n"
"shll v18.4s, v18.4h, #16 \n"
"shll v19.4s, v19.4h, #16 \n"
"fmla v20.4s, v26.4s, v3.s[2] \n"
"fmla v21.4s, v26.4s, v5.s[2] \n"
"fmla v22.4s, v26.4s, v7.s[2] \n"
"fmla v23.4s, v26.4s, v29.s[2] \n"
"fmla v20.4s, v27.4s, v3.s[3] \n"
"fmla v21.4s, v27.4s, v5.s[3] \n"
"fmla v22.4s, v27.4s, v7.s[3] \n"
"fmla v23.4s, v27.4s, v29.s[3] \n"
"prfm pldl1keep, [%4, #256] \n"
"ld1 {v0.4h, v1.4h, v2.4h, v3.4h}, [%4], #32 \n" // r30 r31 r32 r33
"shll v0.4s, v0.4h, #16 \n"
"shll v1.4s, v1.4h, #16 \n"
"shll v2.4s, v2.4h, #16 \n"
"shll v3.4s, v3.4h, #16 \n"
"fmla v20.4s, v16.4s, v4.s[0] \n"
"fmla v21.4s, v16.4s, v6.s[0] \n"
"fmla v22.4s, v16.4s, v28.s[0] \n"
"fmla v23.4s, v16.4s, v30.s[0] \n"
"fmla v20.4s, v17.4s, v4.s[1] \n"
"fmla v21.4s, v17.4s, v6.s[1] \n"
"fmla v22.4s, v17.4s, v28.s[1] \n"
"fmla v23.4s, v17.4s, v30.s[1] \n"
"prfm pldl1keep, [%6, #256] \n"
"ld1 {v24.4h, v25.4h, v26.4h, v27.4h}, [%6], #32 \n"
"shll v24.4s, v24.4h, #16 \n"
"shll v25.4s, v25.4h, #16 \n"
"shll v26.4s, v26.4h, #16 \n"
"shll v27.4s, v27.4h, #16 \n"
"fmla v20.4s, v18.4s, v4.s[2] \n"
"fmla v21.4s, v18.4s, v6.s[2] \n"
"fmla v22.4s, v18.4s, v28.s[2] \n"
"fmla v23.4s, v18.4s, v30.s[2] \n"
"fmla v20.4s, v19.4s, v4.s[3] \n"
"fmla v21.4s, v19.4s, v6.s[3] \n"
"fmla v22.4s, v19.4s, v28.s[3] \n"
"fmla v23.4s, v19.4s, v30.s[3] \n"
"prfm pldl1keep, [%4, #256] \n"
"ld1 {v4.4h, v5.4h, v6.4h, v7.4h}, [%4], #32 \n" // r34 r35 r36 r37
"shll v4.4s, v4.4h, #16 \n"
"shll v5.4s, v5.4h, #16 \n"
"shll v6.4s, v6.4h, #16 \n"
"shll v7.4s, v7.4h, #16 \n"
"fmla v20.4s, v24.4s, v0.s[0] \n"
"fmla v21.4s, v24.4s, v2.s[0] \n"
"fmla v22.4s, v24.4s, v4.s[0] \n"
"fmla v23.4s, v24.4s, v6.s[0] \n"
"fmla v20.4s, v25.4s, v0.s[1] \n"
"fmla v21.4s, v25.4s, v2.s[1] \n"
"fmla v22.4s, v25.4s, v4.s[1] \n"
"fmla v23.4s, v25.4s, v6.s[1] \n"
"prfm pldl1keep, [%6, #256] \n"
"ld1 {v16.4h, v17.4h, v18.4h, v19.4h}, [%6], #32 \n"
"shll v16.4s, v16.4h, #16 \n"
"shll v17.4s, v17.4h, #16 \n"
"shll v18.4s, v18.4h, #16 \n"
"shll v19.4s, v19.4h, #16 \n"
"fmla v20.4s, v26.4s, v0.s[2] \n"
"fmla v21.4s, v26.4s, v2.s[2] \n"
"fmla v22.4s, v26.4s, v4.s[2] \n"
"fmla v23.4s, v26.4s, v6.s[2] \n"
"fmla v20.4s, v27.4s, v0.s[3] \n"
"fmla v21.4s, v27.4s, v2.s[3] \n"
"fmla v22.4s, v27.4s, v4.s[3] \n"
"fmla v23.4s, v27.4s, v6.s[3] \n"
"prfm pldl1keep, [%4, #192] \n"
"ld1 {v28.4h, v29.4h, v30.4h}, [%4] \n" // r38 r39 r310
"shll v28.4s, v28.4h, #16 \n"
"shll v29.4s, v29.4h, #16 \n"
"shll v30.4s, v30.4h, #16 \n"
"fmla v20.4s, v16.4s, v1.s[0] \n"
"fmla v21.4s, v16.4s, v3.s[0] \n"
"fmla v22.4s, v16.4s, v5.s[0] \n"
"fmla v23.4s, v16.4s, v7.s[0] \n"
"fmla v20.4s, v17.4s, v1.s[1] \n"
"fmla v21.4s, v17.4s, v3.s[1] \n"
"fmla v22.4s, v17.4s, v5.s[1] \n"
"fmla v23.4s, v17.4s, v7.s[1] \n"
"prfm pldl1keep, [%6, #256] \n"
"ld1 {v24.4h, v25.4h, v26.4h, v27.4h}, [%6], #32 \n"
"shll v24.4s, v24.4h, #16 \n"
"shll v25.4s, v25.4h, #16 \n"
"shll v26.4s, v26.4h, #16 \n"
"shll v27.4s, v27.4h, #16 \n"
"fmla v20.4s, v18.4s, v1.s[2] \n"
"fmla v21.4s, v18.4s, v3.s[2] \n"
"fmla v22.4s, v18.4s, v5.s[2] \n"
"fmla v23.4s, v18.4s, v7.s[2] \n"
"fmla v20.4s, v19.4s, v1.s[3] \n"
"fmla v21.4s, v19.4s, v3.s[3] \n"
"fmla v22.4s, v19.4s, v5.s[3] \n"
"fmla v23.4s, v19.4s, v7.s[3] \n"
"fmla v20.4s, v24.4s, v2.s[0] \n"
"fmla v21.4s, v24.4s, v4.s[0] \n"
"fmla v22.4s, v24.4s, v6.s[0] \n"
"fmla v23.4s, v24.4s, v28.s[0] \n"
"fmla v20.4s, v25.4s, v2.s[1] \n"
"fmla v21.4s, v25.4s, v4.s[1] \n"
"fmla v22.4s, v25.4s, v6.s[1] \n"
"fmla v23.4s, v25.4s, v28.s[1] \n"
"prfm pldl1keep, [%6, #256] \n"
"ld1 {v16.4h, v17.4h, v18.4h, v19.4h}, [%6], #32 \n"
"shll v16.4s, v16.4h, #16 \n"
"shll v17.4s, v17.4h, #16 \n"
"shll v18.4s, v18.4h, #16 \n"
"shll v19.4s, v19.4h, #16 \n"
"fmla v20.4s, v26.4s, v2.s[2] \n"
"fmla v21.4s, v26.4s, v4.s[2] \n"
"fmla v22.4s, v26.4s, v6.s[2] \n"
"fmla v23.4s, v26.4s, v28.s[2] \n"
"fmla v20.4s, v27.4s, v2.s[3] \n"
"fmla v21.4s, v27.4s, v4.s[3] \n"
"fmla v22.4s, v27.4s, v6.s[3] \n"
"fmla v23.4s, v27.4s, v28.s[3] \n"
"fmla v20.4s, v16.4s, v3.s[0] \n"
"fmla v21.4s, v16.4s, v5.s[0] \n"
"fmla v22.4s, v16.4s, v7.s[0] \n"
"fmla v23.4s, v16.4s, v29.s[0] \n"
"fmla v20.4s, v17.4s, v3.s[1] \n"
"fmla v21.4s, v17.4s, v5.s[1] \n"
"fmla v22.4s, v17.4s, v7.s[1] \n"
"fmla v23.4s, v17.4s, v29.s[1] \n"
"prfm pldl1keep, [%6, #256] \n"
"ld1 {v24.4h, v25.4h, v26.4h, v27.4h}, [%6], #32 \n"
"shll v24.4s, v24.4h, #16 \n"
"shll v25.4s, v25.4h, #16 \n"
"shll v26.4s, v26.4h, #16 \n"
"shll v27.4s, v27.4h, #16 \n"
"fmla v20.4s, v18.4s, v3.s[2] \n"
"fmla v21.4s, v18.4s, v5.s[2] \n"
"fmla v22.4s, v18.4s, v7.s[2] \n"
"fmla v23.4s, v18.4s, v29.s[2] \n"
"fmla v20.4s, v19.4s, v3.s[3] \n"
"fmla v21.4s, v19.4s, v5.s[3] \n"
"fmla v22.4s, v19.4s, v7.s[3] \n"
"fmla v23.4s, v19.4s, v29.s[3] \n"
"prfm pldl1keep, [%5, #256] \n"
"ld1 {v0.4h, v1.4h, v2.4h, v3.4h}, [%5], #32 \n" // r40 r41 r42 r43
"shll v0.4s, v0.4h, #16 \n"
"shll v1.4s, v1.4h, #16 \n"
"shll v2.4s, v2.4h, #16 \n"
"shll v3.4s, v3.4h, #16 \n"
"fmla v20.4s, v24.4s, v4.s[0] \n"
"fmla v21.4s, v24.4s, v6.s[0] \n"
"fmla v22.4s, v24.4s, v28.s[0] \n"
"fmla v23.4s, v24.4s, v30.s[0] \n"
"fmla v20.4s, v25.4s, v4.s[1] \n"
"fmla v21.4s, v25.4s, v6.s[1] \n"
"fmla v22.4s, v25.4s, v28.s[1] \n"
"fmla v23.4s, v25.4s, v30.s[1] \n"
"prfm pldl1keep, [%6, #256] \n"
"ld1 {v16.4h, v17.4h, v18.4h, v19.4h}, [%6], #32 \n"
"shll v16.4s, v16.4h, #16 \n"
"shll v17.4s, v17.4h, #16 \n"
"shll v18.4s, v18.4h, #16 \n"
"shll v19.4s, v19.4h, #16 \n"
"fmla v20.4s, v26.4s, v4.s[2] \n"
"fmla v21.4s, v26.4s, v6.s[2] \n"
"fmla v22.4s, v26.4s, v28.s[2] \n"
"fmla v23.4s, v26.4s, v30.s[2] \n"
"fmla v20.4s, v27.4s, v4.s[3] \n"
"fmla v21.4s, v27.4s, v6.s[3] \n"
"fmla v22.4s, v27.4s, v28.s[3] \n"
"fmla v23.4s, v27.4s, v30.s[3] \n"
"prfm pldl1keep, [%5, #256] \n"
"ld1 {v4.4h, v5.4h, v6.4h, v7.4h}, [%5], #32 \n" // r44 r45 r46 r47
"shll v4.4s, v4.4h, #16 \n"
"shll v5.4s, v5.4h, #16 \n"
"shll v6.4s, v6.4h, #16 \n"
"shll v7.4s, v7.4h, #16 \n"
"fmla v20.4s, v16.4s, v0.s[0] \n"
"fmla v21.4s, v16.4s, v2.s[0] \n"
"fmla v22.4s, v16.4s, v4.s[0] \n"
"fmla v23.4s, v16.4s, v6.s[0] \n"
"fmla v20.4s, v17.4s, v0.s[1] \n"
"fmla v21.4s, v17.4s, v2.s[1] \n"
"fmla v22.4s, v17.4s, v4.s[1] \n"
"fmla v23.4s, v17.4s, v6.s[1] \n"
"prfm pldl1keep, [%6, #256] \n"
"ld1 {v24.4h, v25.4h, v26.4h, v27.4h}, [%6], #32 \n"
"shll v24.4s, v24.4h, #16 \n"
"shll v25.4s, v25.4h, #16 \n"
"shll v26.4s, v26.4h, #16 \n"
"shll v27.4s, v27.4h, #16 \n"
"fmla v20.4s, v18.4s, v0.s[2] \n"
"fmla v21.4s, v18.4s, v2.s[2] \n"
"fmla v22.4s, v18.4s, v4.s[2] \n"
"fmla v23.4s, v18.4s, v6.s[2] \n"
"fmla v20.4s, v19.4s, v0.s[3] \n"
"fmla v21.4s, v19.4s, v2.s[3] \n"
"fmla v22.4s, v19.4s, v4.s[3] \n"
"fmla v23.4s, v19.4s, v6.s[3] \n"
"prfm pldl1keep, [%5, #192] \n"
"ld1 {v28.4h, v29.4h, v30.4h}, [%5] \n" // r48 r49 r410
"shll v28.4s, v28.4h, #16 \n"
"shll v29.4s, v29.4h, #16 \n"
"shll v30.4s, v30.4h, #16 \n"
"fmla v20.4s, v24.4s, v1.s[0] \n"
"fmla v21.4s, v24.4s, v3.s[0] \n"
"fmla v22.4s, v24.4s, v5.s[0] \n"
"fmla v23.4s, v24.4s, v7.s[0] \n"
"fmla v20.4s, v25.4s, v1.s[1] \n"
"fmla v21.4s, v25.4s, v3.s[1] \n"
"fmla v22.4s, v25.4s, v5.s[1] \n"
"fmla v23.4s, v25.4s, v7.s[1] \n"
"prfm pldl1keep, [%6, #256] \n"
"ld1 {v16.4h, v17.4h, v18.4h, v19.4h}, [%6], #32 \n"
"shll v16.4s, v16.4h, #16 \n"
"shll v17.4s, v17.4h, #16 \n"
"shll v18.4s, v18.4h, #16 \n"
"shll v19.4s, v19.4h, #16 \n"
"fmla v20.4s, v26.4s, v1.s[2] \n"
"fmla v21.4s, v26.4s, v3.s[2] \n"
"fmla v22.4s, v26.4s, v5.s[2] \n"
"fmla v23.4s, v26.4s, v7.s[2] \n"
"fmla v20.4s, v27.4s, v1.s[3] \n"
"fmla v21.4s, v27.4s, v3.s[3] \n"
"fmla v22.4s, v27.4s, v5.s[3] \n"
"fmla v23.4s, v27.4s, v7.s[3] \n"
"fmla v20.4s, v16.4s, v2.s[0] \n"
"fmla v21.4s, v16.4s, v4.s[0] \n"
"fmla v22.4s, v16.4s, v6.s[0] \n"
"fmla v23.4s, v16.4s, v28.s[0] \n"
"fmla v20.4s, v17.4s, v2.s[1] \n"
"fmla v21.4s, v17.4s, v4.s[1] \n"
"fmla v22.4s, v17.4s, v6.s[1] \n"
"fmla v23.4s, v17.4s, v28.s[1] \n"
"prfm pldl1keep, [%6, #256] \n"
"ld1 {v24.4h, v25.4h, v26.4h, v27.4h}, [%6], #32 \n"
"shll v24.4s, v24.4h, #16 \n"
"shll v25.4s, v25.4h, #16 \n"
"shll v26.4s, v26.4h, #16 \n"
"shll v27.4s, v27.4h, #16 \n"
"fmla v20.4s, v18.4s, v2.s[2] \n"
"fmla v21.4s, v18.4s, v4.s[2] \n"
"fmla v22.4s, v18.4s, v6.s[2] \n"
"fmla v23.4s, v18.4s, v28.s[2] \n"
"fmla v20.4s, v19.4s, v2.s[3] \n"
"fmla v21.4s, v19.4s, v4.s[3] \n"
"fmla v22.4s, v19.4s, v6.s[3] \n"
"fmla v23.4s, v19.4s, v28.s[3] \n"
"fmla v20.4s, v24.4s, v3.s[0] \n"
"fmla v21.4s, v24.4s, v5.s[0] \n"
"fmla v22.4s, v24.4s, v7.s[0] \n"
"fmla v23.4s, v24.4s, v29.s[0] \n"
"fmla v20.4s, v25.4s, v3.s[1] \n"
"fmla v21.4s, v25.4s, v5.s[1] \n"
"fmla v22.4s, v25.4s, v7.s[1] \n"
"fmla v23.4s, v25.4s, v29.s[1] \n"
// "prfm pldl1keep, [%6, #256] \n"
"ld1 {v16.4h, v17.4h, v18.4h, v19.4h}, [%6] \n"
"shll v16.4s, v16.4h, #16 \n"
"shll v17.4s, v17.4h, #16 \n"
"shll v18.4s, v18.4h, #16 \n"
"shll v19.4s, v19.4h, #16 \n"
"fmla v20.4s, v26.4s, v3.s[2] \n"
"fmla v21.4s, v26.4s, v5.s[2] \n"
"fmla v22.4s, v26.4s, v7.s[2] \n"
"fmla v23.4s, v26.4s, v29.s[2] \n"
"fmla v20.4s, v27.4s, v3.s[3] \n"
"fmla v21.4s, v27.4s, v5.s[3] \n"
"fmla v22.4s, v27.4s, v7.s[3] \n"
"fmla v23.4s, v27.4s, v29.s[3] \n"
"fmla v20.4s, v16.4s, v4.s[0] \n"
"fmla v21.4s, v16.4s, v6.s[0] \n"
"fmla v22.4s, v16.4s, v28.s[0] \n"
"fmla v23.4s, v16.4s, v30.s[0] \n"
"fmla v20.4s, v17.4s, v4.s[1] \n"
"fmla v21.4s, v17.4s, v6.s[1] \n"
"fmla v22.4s, v17.4s, v28.s[1] \n"
"fmla v23.4s, v17.4s, v30.s[1] \n"
"fmla v20.4s, v18.4s, v4.s[2] \n"
"fmla v21.4s, v18.4s, v6.s[2] \n"
"fmla v22.4s, v18.4s, v28.s[2] \n"
"fmla v23.4s, v18.4s, v30.s[2] \n"
"fmla v20.4s, v19.4s, v4.s[3] \n"
"fmla v21.4s, v19.4s, v6.s[3] \n"
"fmla v22.4s, v19.4s, v28.s[3] \n"
"fmla v23.4s, v19.4s, v30.s[3] \n"
"sub %6, %6, #768 \n" // kptr -= 24 * 16;
"st1 {v20.4s, v21.4s, v22.4s, v23.4s}, [%0], #64 \n"
: "=r"(outptr0), // %0
"=r"(r0), // %1
"=r"(r1), // %2
"=r"(r2), // %3
"=r"(r3), // %4
"=r"(r4), // %5
"=r"(kptr) // %6
: "0"(outptr0),
"1"(r0),
"2"(r1),
"3"(r2),
"4"(r3),
"5"(r4),
"6"(kptr)
: "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30");
#else // __aarch64__
asm volatile(
"pld [%0, #512] \n"
"vldm %0, {d24-d31} \n" // sum0 sum1 sum2 sum3
"pld [%1, #256] \n"
"vld1.u16 {d4-d7}, [%1 :64]! \n" // r00 r01 r02 r03
"vshll.u16 q0, d4, #16 \n"
"vshll.u16 q1, d5, #16 \n"
"vshll.u16 q2, d6, #16 \n"
"vshll.u16 q3, d7, #16 \n"
"pld [%6, #256] \n"
"vld1.u16 {d20-d23}, [%6 :128]! \n"
"vshll.u16 q8, d20, #16 \n"
"vshll.u16 q9, d21, #16 \n"
"vshll.u16 q10, d22, #16 \n"
"vshll.u16 q11, d23, #16 \n"
"pld [%1, #256] \n"
"vld1.u16 {d12-d15}, [%1 :64]! \n" // r04 r05 r06 r07
"vshll.u16 q4, d12, #16 \n"
"vshll.u16 q5, d13, #16 \n"
"vshll.u16 q6, d14, #16 \n"
"vshll.u16 q7, d15, #16 \n"
"vmla.f32 q12, q8, d0[0] \n"
"vmla.f32 q13, q8, d4[0] \n"
"vmla.f32 q14, q8, d8[0] \n"
"vmla.f32 q15, q8, d12[0] \n"
"vmla.f32 q12, q9, d0[1] \n"
"vmla.f32 q13, q9, d4[1] \n"
"vmla.f32 q14, q9, d8[1] \n"
"vmla.f32 q15, q9, d12[1] \n"
"vmla.f32 q12, q10, d1[0] \n"
"vmla.f32 q13, q10, d5[0] \n"
"vmla.f32 q14, q10, d9[0] \n"
"vmla.f32 q15, q10, d13[0] \n"
"vmla.f32 q12, q11, d1[1] \n"
"vmla.f32 q13, q11, d5[1] \n"
"vmla.f32 q14, q11, d9[1] \n"
"vmla.f32 q15, q11, d13[1] \n"
"pld [%6, #256] \n"
"vld1.u16 {d16-d19}, [%6 :128]! \n"
"vshll.u16 q10, d16, #16 \n"
"vshll.u16 q11, d17, #16 \n"
"vshll.u16 q8, d18, #16 \n"
"vshll.u16 q9, d19, #16 \n"
"vmla.f32 q12, q10, d2[0] \n"
"vmla.f32 q13, q10, d6[0] \n"
"vmla.f32 q14, q10, d10[0] \n"
"vmla.f32 q15, q10, d14[0] \n"
"vmla.f32 q12, q11, d2[1] \n"
"vmla.f32 q13, q11, d6[1] \n"
"vmla.f32 q14, q11, d10[1] \n"
"vmla.f32 q15, q11, d14[1] \n"
"vmla.f32 q12, q8, d3[0] \n"
"vmla.f32 q13, q8, d7[0] \n"
"vmla.f32 q14, q8, d11[0] \n"
"vmla.f32 q15, q8, d15[0] \n"
"vmla.f32 q12, q9, d3[1] \n"
"vmla.f32 q13, q9, d7[1] \n"
"vmla.f32 q14, q9, d11[1] \n"
"vmla.f32 q15, q9, d15[1] \n"
"pld [%6, #256] \n"
"vld1.u16 {d20-d23}, [%6 :128]! \n"
"vshll.u16 q8, d20, #16 \n"
"vshll.u16 q9, d21, #16 \n"
"vshll.u16 q10, d22, #16 \n"
"vshll.u16 q11, d23, #16 \n"
"pld [%1, #128] \n"
"vld1.u16 {d2-d3}, [%1 :64]! \n" // r08 r09
"vshll.u16 q0, d2, #16 \n"
"vshll.u16 q1, d3, #16 \n"
"vmla.f32 q12, q8, d4[0] \n"
"vmla.f32 q13, q8, d8[0] \n"
"vmla.f32 q14, q8, d12[0] \n"
"vmla.f32 q15, q8, d0[0] \n"
"vmla.f32 q12, q9, d4[1] \n"
"vmla.f32 q13, q9, d8[1] \n"
"vmla.f32 q14, q9, d12[1] \n"
"vmla.f32 q15, q9, d0[1] \n"
"vmla.f32 q12, q10, d5[0] \n"
"vmla.f32 q13, q10, d9[0] \n"
"vmla.f32 q14, q10, d13[0] \n"
"vmla.f32 q15, q10, d1[0] \n"
"vmla.f32 q12, q11, d5[1] \n"
"vmla.f32 q13, q11, d9[1] \n"
"vmla.f32 q14, q11, d13[1] \n"
"vmla.f32 q15, q11, d1[1] \n"
"pld [%6, #256] \n"
"vld1.u16 {d16-d19}, [%6 :128]! \n"
"vshll.u16 q10, d16, #16 \n"
"vshll.u16 q11, d17, #16 \n"
"vshll.u16 q8, d18, #16 \n"
"vshll.u16 q9, d19, #16 \n"
"vmla.f32 q12, q10, d6[0] \n"
"vmla.f32 q13, q10, d10[0] \n"
"vmla.f32 q14, q10, d14[0] \n"
"vmla.f32 q15, q10, d2[0] \n"
"vmla.f32 q12, q11, d6[1] \n"
"vmla.f32 q13, q11, d10[1] \n"
"vmla.f32 q14, q11, d14[1] \n"
"vmla.f32 q15, q11, d2[1] \n"
"vmla.f32 q12, q8, d7[0] \n"
"vmla.f32 q13, q8, d11[0] \n"
"vmla.f32 q14, q8, d15[0] \n"
"vmla.f32 q15, q8, d3[0] \n"
"vmla.f32 q12, q9, d7[1] \n"
"vmla.f32 q13, q9, d11[1] \n"
"vmla.f32 q14, q9, d15[1] \n"
"vmla.f32 q15, q9, d3[1] \n"
"pld [%6, #256] \n"
"vld1.u16 {d20-d23}, [%6 :128]! \n"
"vshll.u16 q8, d20, #16 \n"
"vshll.u16 q9, d21, #16 \n"
"vshll.u16 q10, d22, #16 \n"
"vshll.u16 q11, d23, #16 \n"
"pld [%1, #64] \n"
"vld1.u16 {d5}, [%1 :64] \n" // r010
"vshll.u16 q2, d5, #16 \n"
"vmla.f32 q12, q8, d8[0] \n"
"vmla.f32 q13, q8, d12[0] \n"
"vmla.f32 q14, q8, d0[0] \n"
"vmla.f32 q15, q8, d4[0] \n"
"vmla.f32 q12, q9, d8[1] \n"
"vmla.f32 q13, q9, d12[1] \n"
"vmla.f32 q14, q9, d0[1] \n"
"vmla.f32 q15, q9, d4[1] \n"
"vmla.f32 q12, q10, d9[0] \n"
"vmla.f32 q13, q10, d13[0] \n"
"vmla.f32 q14, q10, d1[0] \n"
"vmla.f32 q15, q10, d5[0] \n"
"vmla.f32 q12, q11, d9[1] \n"
"vmla.f32 q13, q11, d13[1] \n"
"pld [%2, #256] \n"
"vld1.u16 {d12-d15}, [%2 :64]! \n" // r10 r11 r12 r13
"vshll.u16 q4, d12, #16 \n"
"vshll.u16 q5, d13, #16 \n"
"vshll.u16 q6, d14, #16 \n"
"vshll.u16 q7, d15, #16 \n"
"vmla.f32 q14, q11, d1[1] \n"
"vmla.f32 q15, q11, d5[1] \n"
"pld [%6, #256] \n"
"vld1.u16 {d16-d19}, [%6 :128]! \n"
"vshll.u16 q10, d16, #16 \n"
"vshll.u16 q11, d17, #16 \n"
"vshll.u16 q8, d18, #16 \n"
"vshll.u16 q9, d19, #16 \n"
"pld [%2, #256] \n"
"vld1.u16 {d4-d7}, [%2 :64]! \n" // r14 r15 r16 r17
"vshll.u16 q0, d4, #16 \n"
"vshll.u16 q1, d5, #16 \n"
"vshll.u16 q2, d6, #16 \n"
"vshll.u16 q3, d7, #16 \n"
"vmla.f32 q12, q10, d8[0] \n"
"vmla.f32 q13, q10, d12[0] \n"
"vmla.f32 q14, q10, d0[0] \n"
"vmla.f32 q15, q10, d4[0] \n"
"vmla.f32 q12, q11, d8[1] \n"
"vmla.f32 q13, q11, d12[1] \n"
"vmla.f32 q14, q11, d0[1] \n"
"vmla.f32 q15, q11, d4[1] \n"
"vmla.f32 q12, q8, d9[0] \n"
"vmla.f32 q13, q8, d13[0] \n"
"vmla.f32 q14, q8, d1[0] \n"
"vmla.f32 q15, q8, d5[0] \n"
"vmla.f32 q12, q9, d9[1] \n"
"vmla.f32 q13, q9, d13[1] \n"
"vmla.f32 q14, q9, d1[1] \n"
"vmla.f32 q15, q9, d5[1] \n"
"pld [%6, #256] \n"
"vld1.u16 {d20-d23}, [%6 :128]! \n"
"vshll.u16 q8, d20, #16 \n"
"vshll.u16 q9, d21, #16 \n"
"vshll.u16 q10, d22, #16 \n"
"vshll.u16 q11, d23, #16 \n"
"vmla.f32 q12, q8, d10[0] \n"
"vmla.f32 q13, q8, d14[0] \n"
"vmla.f32 q14, q8, d2[0] \n"
"vmla.f32 q15, q8, d6[0] \n"
"vmla.f32 q12, q9, d10[1] \n"
"vmla.f32 q13, q9, d14[1] \n"
"vmla.f32 q14, q9, d2[1] \n"
"vmla.f32 q15, q9, d6[1] \n"
"vmla.f32 q12, q10, d11[0] \n"
"vmla.f32 q13, q10, d15[0] \n"
"vmla.f32 q14, q10, d3[0] \n"
"vmla.f32 q15, q10, d7[0] \n"
"vmla.f32 q12, q11, d11[1] \n"
"vmla.f32 q13, q11, d15[1] \n"
"vmla.f32 q14, q11, d3[1] \n"
"vmla.f32 q15, q11, d7[1] \n"
"pld [%6, #256] \n"
"vld1.u16 {d16-d19}, [%6 :128]! \n"
"vshll.u16 q10, d16, #16 \n"
"vshll.u16 q11, d17, #16 \n"
"vshll.u16 q8, d18, #16 \n"
"vshll.u16 q9, d19, #16 \n"
"pld [%2, #128] \n"
"vld1.u16 {d10-d11}, [%2 :64]! \n" // r18 r19
"vshll.u16 q4, d10, #16 \n"
"vshll.u16 q5, d11, #16 \n"
"vmla.f32 q12, q10, d12[0] \n"
"vmla.f32 q13, q10, d0[0] \n"
"vmla.f32 q14, q10, d4[0] \n"
"vmla.f32 q15, q10, d8[0] \n"
"vmla.f32 q12, q11, d12[1] \n"
"vmla.f32 q13, q11, d0[1] \n"
"vmla.f32 q14, q11, d4[1] \n"
"vmla.f32 q15, q11, d8[1] \n"
"vmla.f32 q12, q8, d13[0] \n"
"vmla.f32 q13, q8, d1[0] \n"
"vmla.f32 q14, q8, d5[0] \n"
"vmla.f32 q15, q8, d9[0] \n"
"vmla.f32 q12, q9, d13[1] \n"
"vmla.f32 q13, q9, d1[1] \n"
"vmla.f32 q14, q9, d5[1] \n"
"vmla.f32 q15, q9, d9[1] \n"
"pld [%6, #256] \n"
"vld1.u16 {d20-d23}, [%6 :128]! \n"
"vshll.u16 q8, d20, #16 \n"
"vshll.u16 q9, d21, #16 \n"
"vshll.u16 q10, d22, #16 \n"
"vshll.u16 q11, d23, #16 \n"
"vmla.f32 q12, q8, d14[0] \n"
"vmla.f32 q13, q8, d2[0] \n"
"vmla.f32 q14, q8, d6[0] \n"
"vmla.f32 q15, q8, d10[0] \n"
"vmla.f32 q12, q9, d14[1] \n"
"vmla.f32 q13, q9, d2[1] \n"
"vmla.f32 q14, q9, d6[1] \n"
"vmla.f32 q15, q9, d10[1] \n"
"vmla.f32 q12, q10, d15[0] \n"
"vmla.f32 q13, q10, d3[0] \n"
"vmla.f32 q14, q10, d7[0] \n"
"vmla.f32 q15, q10, d11[0] \n"
"vmla.f32 q12, q11, d15[1] \n"
"vmla.f32 q13, q11, d3[1] \n"
"vmla.f32 q14, q11, d7[1] \n"
"vmla.f32 q15, q11, d11[1] \n"
"pld [%6, #256] \n"
"vld1.u16 {d16-d19}, [%6 :128]! \n"
"vshll.u16 q10, d16, #16 \n"
"vshll.u16 q11, d17, #16 \n"
"vshll.u16 q8, d18, #16 \n"
"vshll.u16 q9, d19, #16 \n"
"pld [%2, #64] \n"
"vld1.u16 {d13}, [%2 :64] \n" // r110
"vshll.u16 q6, d13, #16 \n"
"vmla.f32 q12, q10, d0[0] \n"
"vmla.f32 q13, q10, d4[0] \n"
"vmla.f32 q14, q10, d8[0] \n"
"vmla.f32 q15, q10, d12[0] \n"
"vmla.f32 q12, q11, d0[1] \n"
"vmla.f32 q13, q11, d4[1] \n"
"vmla.f32 q14, q11, d8[1] \n"
"vmla.f32 q15, q11, d12[1] \n"
"vmla.f32 q12, q8, d1[0] \n"
"vmla.f32 q13, q8, d5[0] \n"
"vmla.f32 q14, q8, d9[0] \n"
"vmla.f32 q15, q8, d13[0] \n"
"vmla.f32 q12, q9, d1[1] \n"
"vmla.f32 q13, q9, d5[1] \n"
"pld [%3, #256] \n"
"vld1.u16 {d4-d7}, [%3 :64]! \n" // r20 r21 r22 r23
"vshll.u16 q0, d4, #16 \n"
"vshll.u16 q1, d5, #16 \n"
"vshll.u16 q2, d6, #16 \n"
"vshll.u16 q3, d7, #16 \n"
"vmla.f32 q14, q9, d9[1] \n"
"vmla.f32 q15, q9, d13[1] \n"
"pld [%6, #256] \n"
"vld1.u16 {d20-d23}, [%6 :128]! \n"
"vshll.u16 q8, d20, #16 \n"
"vshll.u16 q9, d21, #16 \n"
"vshll.u16 q10, d22, #16 \n"
"vshll.u16 q11, d23, #16 \n"
"pld [%3, #256] \n"
"vld1.u16 {d12-d15}, [%3 :64]! \n" // r24 r25 r26 r27
"vshll.u16 q4, d12, #16 \n"
"vshll.u16 q5, d13, #16 \n"
"vshll.u16 q6, d14, #16 \n"
"vshll.u16 q7, d15, #16 \n"
"vmla.f32 q12, q8, d0[0] \n"
"vmla.f32 q13, q8, d4[0] \n"
"vmla.f32 q14, q8, d8[0] \n"
"vmla.f32 q15, q8, d12[0] \n"
"vmla.f32 q12, q9, d0[1] \n"
"vmla.f32 q13, q9, d4[1] \n"
"vmla.f32 q14, q9, d8[1] \n"
"vmla.f32 q15, q9, d12[1] \n"
"vmla.f32 q12, q10, d1[0] \n"
"vmla.f32 q13, q10, d5[0] \n"
"vmla.f32 q14, q10, d9[0] \n"
"vmla.f32 q15, q10, d13[0] \n"
"vmla.f32 q12, q11, d1[1] \n"
"vmla.f32 q13, q11, d5[1] \n"
"vmla.f32 q14, q11, d9[1] \n"
"vmla.f32 q15, q11, d13[1] \n"
"pld [%6, #256] \n"
"vld1.u16 {d16-d19}, [%6 :128]! \n"
"vshll.u16 q10, d16, #16 \n"
"vshll.u16 q11, d17, #16 \n"
"vshll.u16 q8, d18, #16 \n"
"vshll.u16 q9, d19, #16 \n"
"vmla.f32 q12, q10, d2[0] \n"
"vmla.f32 q13, q10, d6[0] \n"
"vmla.f32 q14, q10, d10[0] \n"
"vmla.f32 q15, q10, d14[0] \n"
"vmla.f32 q12, q11, d2[1] \n"
"vmla.f32 q13, q11, d6[1] \n"
"vmla.f32 q14, q11, d10[1] \n"
"vmla.f32 q15, q11, d14[1] \n"
"vmla.f32 q12, q8, d3[0] \n"
"vmla.f32 q13, q8, d7[0] \n"
"vmla.f32 q14, q8, d11[0] \n"
"vmla.f32 q15, q8, d15[0] \n"
"vmla.f32 q12, q9, d3[1] \n"
"vmla.f32 q13, q9, d7[1] \n"
"vmla.f32 q14, q9, d11[1] \n"
"vmla.f32 q15, q9, d15[1] \n"
"pld [%6, #256] \n"
"vld1.u16 {d20-d23}, [%6 :128]! \n"
"vshll.u16 q8, d20, #16 \n"
"vshll.u16 q9, d21, #16 \n"
"vshll.u16 q10, d22, #16 \n"
"vshll.u16 q11, d23, #16 \n"
"pld [%3, #128] \n"
"vld1.u16 {d2-d3}, [%3 :64]! \n" // r28 r29
"vshll.u16 q0, d2, #16 \n"
"vshll.u16 q1, d3, #16 \n"
"vmla.f32 q12, q8, d4[0] \n"
"vmla.f32 q13, q8, d8[0] \n"
"vmla.f32 q14, q8, d12[0] \n"
"vmla.f32 q15, q8, d0[0] \n"
"vmla.f32 q12, q9, d4[1] \n"
"vmla.f32 q13, q9, d8[1] \n"
"vmla.f32 q14, q9, d12[1] \n"
"vmla.f32 q15, q9, d0[1] \n"
"vmla.f32 q12, q10, d5[0] \n"
"vmla.f32 q13, q10, d9[0] \n"
"vmla.f32 q14, q10, d13[0] \n"
"vmla.f32 q15, q10, d1[0] \n"
"vmla.f32 q12, q11, d5[1] \n"
"vmla.f32 q13, q11, d9[1] \n"
"vmla.f32 q14, q11, d13[1] \n"
"vmla.f32 q15, q11, d1[1] \n"
"pld [%6, #256] \n"
"vld1.u16 {d16-d19}, [%6 :128]! \n"
"vshll.u16 q10, d16, #16 \n"
"vshll.u16 q11, d17, #16 \n"
"vshll.u16 q8, d18, #16 \n"
"vshll.u16 q9, d19, #16 \n"
"vmla.f32 q12, q10, d6[0] \n"
"vmla.f32 q13, q10, d10[0] \n"
"vmla.f32 q14, q10, d14[0] \n"
"vmla.f32 q15, q10, d2[0] \n"
"vmla.f32 q12, q11, d6[1] \n"
"vmla.f32 q13, q11, d10[1] \n"
"vmla.f32 q14, q11, d14[1] \n"
"vmla.f32 q15, q11, d2[1] \n"
"vmla.f32 q12, q8, d7[0] \n"
"vmla.f32 q13, q8, d11[0] \n"
"vmla.f32 q14, q8, d15[0] \n"
"vmla.f32 q15, q8, d3[0] \n"
"vmla.f32 q12, q9, d7[1] \n"
"vmla.f32 q13, q9, d11[1] \n"
"vmla.f32 q14, q9, d15[1] \n"
"vmla.f32 q15, q9, d3[1] \n"
"pld [%6, #256] \n"
"vld1.u16 {d20-d23}, [%6 :128]! \n"
"vshll.u16 q8, d20, #16 \n"
"vshll.u16 q9, d21, #16 \n"
"vshll.u16 q10, d22, #16 \n"
"vshll.u16 q11, d23, #16 \n"
"pld [%3, #64] \n"
"vld1.u16 {d5}, [%3 :64] \n" // r210
"vshll.u16 q2, d5, #16 \n"
"vmla.f32 q12, q8, d8[0] \n"
"vmla.f32 q13, q8, d12[0] \n"
"vmla.f32 q14, q8, d0[0] \n"
"vmla.f32 q15, q8, d4[0] \n"
"vmla.f32 q12, q9, d8[1] \n"
"vmla.f32 q13, q9, d12[1] \n"
"vmla.f32 q14, q9, d0[1] \n"
"vmla.f32 q15, q9, d4[1] \n"
"vmla.f32 q12, q10, d9[0] \n"
"vmla.f32 q13, q10, d13[0] \n"
"vmla.f32 q14, q10, d1[0] \n"
"vmla.f32 q15, q10, d5[0] \n"
"vmla.f32 q12, q11, d9[1] \n"
"vmla.f32 q13, q11, d13[1] \n"
"pld [%4, #256] \n"
"vld1.u16 {d12-d15}, [%4 :64]! \n" // r30 r31 r32 r33
"vshll.u16 q4, d12, #16 \n"
"vshll.u16 q5, d13, #16 \n"
"vshll.u16 q6, d14, #16 \n"
"vshll.u16 q7, d15, #16 \n"
"vmla.f32 q14, q11, d1[1] \n"
"vmla.f32 q15, q11, d5[1] \n"
"pld [%6, #256] \n"
"vld1.u16 {d16-d19}, [%6 :128]! \n"
"vshll.u16 q10, d16, #16 \n"
"vshll.u16 q11, d17, #16 \n"
"vshll.u16 q8, d18, #16 \n"
"vshll.u16 q9, d19, #16 \n"
"pld [%4, #256] \n"
"vld1.u16 {d4-d7}, [%4 :64]! \n" // r34 r35 r36 r37
"vshll.u16 q0, d4, #16 \n"
"vshll.u16 q1, d5, #16 \n"
"vshll.u16 q2, d6, #16 \n"
"vshll.u16 q3, d7, #16 \n"
"vmla.f32 q12, q10, d8[0] \n"
"vmla.f32 q13, q10, d12[0] \n"
"vmla.f32 q14, q10, d0[0] \n"
"vmla.f32 q15, q10, d4[0] \n"
"vmla.f32 q12, q11, d8[1] \n"
"vmla.f32 q13, q11, d12[1] \n"
"vmla.f32 q14, q11, d0[1] \n"
"vmla.f32 q15, q11, d4[1] \n"
"vmla.f32 q12, q8, d9[0] \n"
"vmla.f32 q13, q8, d13[0] \n"
"vmla.f32 q14, q8, d1[0] \n"
"vmla.f32 q15, q8, d5[0] \n"
"vmla.f32 q12, q9, d9[1] \n"
"vmla.f32 q13, q9, d13[1] \n"
"vmla.f32 q14, q9, d1[1] \n"
"vmla.f32 q15, q9, d5[1] \n"
"pld [%6, #256] \n"
"vld1.u16 {d20-d23}, [%6 :128]! \n"
"vshll.u16 q8, d20, #16 \n"
"vshll.u16 q9, d21, #16 \n"
"vshll.u16 q10, d22, #16 \n"
"vshll.u16 q11, d23, #16 \n"
"vmla.f32 q12, q8, d10[0] \n"
"vmla.f32 q13, q8, d14[0] \n"
"vmla.f32 q14, q8, d2[0] \n"
"vmla.f32 q15, q8, d6[0] \n"
"vmla.f32 q12, q9, d10[1] \n"
"vmla.f32 q13, q9, d14[1] \n"
"vmla.f32 q14, q9, d2[1] \n"
"vmla.f32 q15, q9, d6[1] \n"
"vmla.f32 q12, q10, d11[0] \n"
"vmla.f32 q13, q10, d15[0] \n"
"vmla.f32 q14, q10, d3[0] \n"
"vmla.f32 q15, q10, d7[0] \n"
"vmla.f32 q12, q11, d11[1] \n"
"vmla.f32 q13, q11, d15[1] \n"
"vmla.f32 q14, q11, d3[1] \n"
"vmla.f32 q15, q11, d7[1] \n"
"pld [%6, #256] \n"
"vld1.u16 {d16-d19}, [%6 :128]! \n"
"vshll.u16 q10, d16, #16 \n"
"vshll.u16 q11, d17, #16 \n"
"vshll.u16 q8, d18, #16 \n"
"vshll.u16 q9, d19, #16 \n"
"pld [%4, #128] \n"
"vld1.u16 {d10-d11}, [%4 :64]! \n" // r38 r39
"vshll.u16 q4, d10, #16 \n"
"vshll.u16 q5, d11, #16 \n"
"vmla.f32 q12, q10, d12[0] \n"
"vmla.f32 q13, q10, d0[0] \n"
"vmla.f32 q14, q10, d4[0] \n"
"vmla.f32 q15, q10, d8[0] \n"
"vmla.f32 q12, q11, d12[1] \n"
"vmla.f32 q13, q11, d0[1] \n"
"vmla.f32 q14, q11, d4[1] \n"
"vmla.f32 q15, q11, d8[1] \n"
"vmla.f32 q12, q8, d13[0] \n"
"vmla.f32 q13, q8, d1[0] \n"
"vmla.f32 q14, q8, d5[0] \n"
"vmla.f32 q15, q8, d9[0] \n"
"vmla.f32 q12, q9, d13[1] \n"
"vmla.f32 q13, q9, d1[1] \n"
"vmla.f32 q14, q9, d5[1] \n"
"vmla.f32 q15, q9, d9[1] \n"
"pld [%6, #256] \n"
"vld1.u16 {d20-d23}, [%6 :128]! \n"
"vshll.u16 q8, d20, #16 \n"
"vshll.u16 q9, d21, #16 \n"
"vshll.u16 q10, d22, #16 \n"
"vshll.u16 q11, d23, #16 \n"
"vmla.f32 q12, q8, d14[0] \n"
"vmla.f32 q13, q8, d2[0] \n"
"vmla.f32 q14, q8, d6[0] \n"
"vmla.f32 q15, q8, d10[0] \n"
"vmla.f32 q12, q9, d14[1] \n"
"vmla.f32 q13, q9, d2[1] \n"
"vmla.f32 q14, q9, d6[1] \n"
"vmla.f32 q15, q9, d10[1] \n"
"vmla.f32 q12, q10, d15[0] \n"
"vmla.f32 q13, q10, d3[0] \n"
"vmla.f32 q14, q10, d7[0] \n"
"vmla.f32 q15, q10, d11[0] \n"
"vmla.f32 q12, q11, d15[1] \n"
"vmla.f32 q13, q11, d3[1] \n"
"vmla.f32 q14, q11, d7[1] \n"
"vmla.f32 q15, q11, d11[1] \n"
"pld [%6, #256] \n"
"vld1.u16 {d16-d19}, [%6 :128]! \n"
"vshll.u16 q10, d16, #16 \n"
"vshll.u16 q11, d17, #16 \n"
"vshll.u16 q8, d18, #16 \n"
"vshll.u16 q9, d19, #16 \n"
"pld [%4, #64] \n"
"vld1.u16 {d13}, [%4 :64] \n" // r310
"vshll.u16 q6, d13, #16 \n"
"vmla.f32 q12, q10, d0[0] \n"
"vmla.f32 q13, q10, d4[0] \n"
"vmla.f32 q14, q10, d8[0] \n"
"vmla.f32 q15, q10, d12[0] \n"
"vmla.f32 q12, q11, d0[1] \n"
"vmla.f32 q13, q11, d4[1] \n"
"vmla.f32 q14, q11, d8[1] \n"
"vmla.f32 q15, q11, d12[1] \n"
"vmla.f32 q12, q8, d1[0] \n"
"vmla.f32 q13, q8, d5[0] \n"
"vmla.f32 q14, q8, d9[0] \n"
"vmla.f32 q15, q8, d13[0] \n"
"vmla.f32 q12, q9, d1[1] \n"
"vmla.f32 q13, q9, d5[1] \n"
"pld [%5, #256] \n"
"vld1.u16 {d4-d7}, [%5 :64]! \n" // r40 r41 r42 r43
"vshll.u16 q0, d4, #16 \n"
"vshll.u16 q1, d5, #16 \n"
"vshll.u16 q2, d6, #16 \n"
"vshll.u16 q3, d7, #16 \n"
"vmla.f32 q14, q9, d9[1] \n"
"vmla.f32 q15, q9, d13[1] \n"
"pld [%6, #256] \n"
"vld1.u16 {d20-d23}, [%6 :128]! \n"
"vshll.u16 q8, d20, #16 \n"
"vshll.u16 q9, d21, #16 \n"
"vshll.u16 q10, d22, #16 \n"
"vshll.u16 q11, d23, #16 \n"
"pld [%5, #256] \n"
"vld1.u16 {d12-d15}, [%5 :64]! \n" // r44 r45 r46 r47
"vshll.u16 q4, d12, #16 \n"
"vshll.u16 q5, d13, #16 \n"
"vshll.u16 q6, d14, #16 \n"
"vshll.u16 q7, d15, #16 \n"
"vmla.f32 q12, q8, d0[0] \n"
"vmla.f32 q13, q8, d4[0] \n"
"vmla.f32 q14, q8, d8[0] \n"
"vmla.f32 q15, q8, d12[0] \n"
"vmla.f32 q12, q9, d0[1] \n"
"vmla.f32 q13, q9, d4[1] \n"
"vmla.f32 q14, q9, d8[1] \n"
"vmla.f32 q15, q9, d12[1] \n"
"vmla.f32 q12, q10, d1[0] \n"
"vmla.f32 q13, q10, d5[0] \n"
"vmla.f32 q14, q10, d9[0] \n"
"vmla.f32 q15, q10, d13[0] \n"
"vmla.f32 q12, q11, d1[1] \n"
"vmla.f32 q13, q11, d5[1] \n"
"vmla.f32 q14, q11, d9[1] \n"
"vmla.f32 q15, q11, d13[1] \n"
"pld [%6, #256] \n"
"vld1.u16 {d16-d19}, [%6 :128]! \n"
"vshll.u16 q10, d16, #16 \n"
"vshll.u16 q11, d17, #16 \n"
"vshll.u16 q8, d18, #16 \n"
"vshll.u16 q9, d19, #16 \n"
"vmla.f32 q12, q10, d2[0] \n"
"vmla.f32 q13, q10, d6[0] \n"
"vmla.f32 q14, q10, d10[0] \n"
"vmla.f32 q15, q10, d14[0] \n"
"vmla.f32 q12, q11, d2[1] \n"
"vmla.f32 q13, q11, d6[1] \n"
"vmla.f32 q14, q11, d10[1] \n"
"vmla.f32 q15, q11, d14[1] \n"
"vmla.f32 q12, q8, d3[0] \n"
"vmla.f32 q13, q8, d7[0] \n"
"vmla.f32 q14, q8, d11[0] \n"
"vmla.f32 q15, q8, d15[0] \n"
"vmla.f32 q12, q9, d3[1] \n"
"vmla.f32 q13, q9, d7[1] \n"
"vmla.f32 q14, q9, d11[1] \n"
"vmla.f32 q15, q9, d15[1] \n"
"pld [%6, #256] \n"
"vld1.u16 {d20-d23}, [%6 :128]! \n"
"vshll.u16 q8, d20, #16 \n"
"vshll.u16 q9, d21, #16 \n"
"vshll.u16 q10, d22, #16 \n"
"vshll.u16 q11, d23, #16 \n"
"pld [%5, #128] \n"
"vld1.u16 {d2-d3}, [%5 :64]! \n" // r48 r49
"vshll.u16 q0, d2, #16 \n"
"vshll.u16 q1, d3, #16 \n"
"vmla.f32 q12, q8, d4[0] \n"
"vmla.f32 q13, q8, d8[0] \n"
"vmla.f32 q14, q8, d12[0] \n"
"vmla.f32 q15, q8, d0[0] \n"
"vmla.f32 q12, q9, d4[1] \n"
"vmla.f32 q13, q9, d8[1] \n"
"vmla.f32 q14, q9, d12[1] \n"
"vmla.f32 q15, q9, d0[1] \n"
"vmla.f32 q12, q10, d5[0] \n"
"vmla.f32 q13, q10, d9[0] \n"
"vmla.f32 q14, q10, d13[0] \n"
"vmla.f32 q15, q10, d1[0] \n"
"vmla.f32 q12, q11, d5[1] \n"
"vmla.f32 q13, q11, d9[1] \n"
"vmla.f32 q14, q11, d13[1] \n"
"vmla.f32 q15, q11, d1[1] \n"
"pld [%6, #256] \n"
"vld1.u16 {d16-d19}, [%6 :128]! \n"
"vshll.u16 q10, d16, #16 \n"
"vshll.u16 q11, d17, #16 \n"
"vshll.u16 q8, d18, #16 \n"
"vshll.u16 q9, d19, #16 \n"
"vmla.f32 q12, q10, d6[0] \n"
"vmla.f32 q13, q10, d10[0] \n"
"vmla.f32 q14, q10, d14[0] \n"
"vmla.f32 q15, q10, d2[0] \n"
"vmla.f32 q12, q11, d6[1] \n"
"vmla.f32 q13, q11, d10[1] \n"
"vmla.f32 q14, q11, d14[1] \n"
"vmla.f32 q15, q11, d2[1] \n"
"vmla.f32 q12, q8, d7[0] \n"
"vmla.f32 q13, q8, d11[0] \n"
"vmla.f32 q14, q8, d15[0] \n"
"vmla.f32 q15, q8, d3[0] \n"
"vmla.f32 q12, q9, d7[1] \n"
"vmla.f32 q13, q9, d11[1] \n"
"vmla.f32 q14, q9, d15[1] \n"
"vmla.f32 q15, q9, d3[1] \n"
// "pld [%6, #256] \n"
"vld1.u16 {d20-d23}, [%6 :128] \n"
"vshll.u16 q8, d20, #16 \n"
"vshll.u16 q9, d21, #16 \n"
"vshll.u16 q10, d22, #16 \n"
"vshll.u16 q11, d23, #16 \n"
"pld [%5, #64] \n"
"vld1.u16 {d5}, [%5 :64] \n" // r410
"vshll.u16 q2, d5, #16 \n"
"vmla.f32 q12, q8, d8[0] \n"
"vmla.f32 q13, q8, d12[0] \n"
"vmla.f32 q14, q8, d0[0] \n"
"vmla.f32 q15, q8, d4[0] \n"
"vmla.f32 q12, q9, d8[1] \n"
"vmla.f32 q13, q9, d12[1] \n"
"vmla.f32 q14, q9, d0[1] \n"
"vmla.f32 q15, q9, d4[1] \n"
"vmla.f32 q12, q10, d9[0] \n"
"vmla.f32 q13, q10, d13[0] \n"
"vmla.f32 q14, q10, d1[0] \n"
"vmla.f32 q15, q10, d5[0] \n"
"vmla.f32 q12, q11, d9[1] \n"
"vmla.f32 q13, q11, d13[1] \n"
"vmla.f32 q14, q11, d1[1] \n"
"vmla.f32 q15, q11, d5[1] \n"
"sub %6, %6, #768 \n" // kptr -= 24 * 16;
"sub %1, %1, #16 \n"
"sub %2, %2, #16 \n"
"sub %3, %3, #16 \n"
"sub %4, %4, #16 \n"
"sub %5, %5, #16 \n"
"vstm %0!, {d24-d31} \n"
: "=r"(outptr0), // %0
"=r"(r0), // %1
"=r"(r1), // %2
"=r"(r2), // %3
"=r"(r3), // %4
"=r"(r4), // %5
"=r"(kptr) // %6
: "0"(outptr0),
"1"(r0),
"2"(r1),
"3"(r2),
"4"(r3),
"5"(r4),
"6"(kptr)
: "memory", "q0", "q1", "q2", "q3", "q4", "q5", "q6", "q7", "q8", "q9", "q10", "q11", "q12", "q13", "q14", "q15");
#endif // __aarch64__
}
for (; j + 1 < outw; j += 2)
{
#if __aarch64__
asm volatile(
"prfm pldl1keep, [%1, #256] \n"
"ld1 {v0.4h, v1.4h, v2.4h, v3.4h}, [%1], #32 \n" // r00 r01 r02 r03
"shll v0.4s, v0.4h, #16 \n"
"shll v1.4s, v1.4h, #16 \n"
"prfm pldl1keep, [%6, #256] \n"
"ld1 {v16.4h, v17.4h, v18.4h, v19.4h}, [%6], #32 \n"
"shll v2.4s, v2.4h, #16 \n"
"shll v3.4s, v3.4h, #16 \n"
"shll v16.4s, v16.4h, #16 \n"
"prfm pldl1keep, [%0, #256] \n"
"ld1 {v20.4s, v21.4s}, [%0] \n" // sum0 sum1
"fmul v22.4s, v16.4s, v0.s[0] \n"
"prfm pldl1keep, [%6, #256] \n"
"ld1 {v24.4h, v25.4h, v26.4h, v27.4h}, [%6], #32 \n"
"shll v17.4s, v17.4h, #16 \n"
"fmul v23.4s, v16.4s, v2.s[0] \n"
"shll v18.4s, v18.4h, #16 \n"
"fmla v20.4s, v17.4s, v0.s[1] \n"
"fmla v21.4s, v17.4s, v2.s[1] \n"
"shll v19.4s, v19.4h, #16 \n"
"fmla v22.4s, v18.4s, v0.s[2] \n"
"fmla v23.4s, v18.4s, v2.s[2] \n"
"shll v24.4s, v24.4h, #16 \n"
"fmla v20.4s, v19.4s, v0.s[3] \n"
"fmla v21.4s, v19.4s, v2.s[3] \n"
"prfm pldl1keep, [%1, #192] \n"
"ld1 {v4.4h, v5.4h, v6.4h}, [%1] \n" // r04 r05 r06
"shll v25.4s, v25.4h, #16 \n"
"shll v4.4s, v4.4h, #16 \n"
"shll v5.4s, v5.4h, #16 \n"
"shll v6.4s, v6.4h, #16 \n"
"fmla v22.4s, v24.4s, v1.s[0] \n"
"prfm pldl1keep, [%6, #256] \n"
"ld1 {v16.4h, v17.4h, v18.4h, v19.4h}, [%6], #32 \n"
"fmla v23.4s, v24.4s, v3.s[0] \n"
"shll v26.4s, v26.4h, #16 \n"
"fmla v20.4s, v25.4s, v1.s[1] \n"
"fmla v21.4s, v25.4s, v3.s[1] \n"
"shll v27.4s, v27.4h, #16 \n"
"fmla v22.4s, v26.4s, v1.s[2] \n"
"fmla v23.4s, v26.4s, v3.s[2] \n"
"shll v16.4s, v16.4h, #16 \n"
"fmla v20.4s, v27.4s, v1.s[3] \n"
"fmla v21.4s, v27.4s, v3.s[3] \n"
"shll v17.4s, v17.4h, #16 \n"
"fmla v22.4s, v16.4s, v2.s[0] \n"
"prfm pldl1keep, [%6, #256] \n"
"ld1 {v24.4h, v25.4h, v26.4h, v27.4h}, [%6], #32 \n"
"fmla v23.4s, v16.4s, v4.s[0] \n"
"shll v18.4s, v18.4h, #16 \n"
"fmla v20.4s, v17.4s, v2.s[1] \n"
"fmla v21.4s, v17.4s, v4.s[1] \n"
"shll v19.4s, v19.4h, #16 \n"
"fmla v22.4s, v18.4s, v2.s[2] \n"
"fmla v23.4s, v18.4s, v4.s[2] \n"
"shll v24.4s, v24.4h, #16 \n"
"fmla v20.4s, v19.4s, v2.s[3] \n"
"fmla v21.4s, v19.4s, v4.s[3] \n"
"shll v25.4s, v25.4h, #16 \n"
"fmla v22.4s, v24.4s, v3.s[0] \n"
"prfm pldl1keep, [%6, #256] \n"
"ld1 {v16.4h, v17.4h, v18.4h, v19.4h}, [%6], #32 \n"
"fmla v23.4s, v24.4s, v5.s[0] \n"
"shll v26.4s, v26.4h, #16 \n"
"fmla v20.4s, v25.4s, v3.s[1] \n"
"fmla v21.4s, v25.4s, v5.s[1] \n"
"shll v27.4s, v27.4h, #16 \n"
"fmla v22.4s, v26.4s, v3.s[2] \n"
"fmla v23.4s, v26.4s, v5.s[2] \n"
"shll v16.4s, v16.4h, #16 \n"
"fmla v20.4s, v27.4s, v3.s[3] \n"
"fmla v21.4s, v27.4s, v5.s[3] \n"
"prfm pldl1keep, [%2, #256] \n"
"ld1 {v0.4h, v1.4h, v2.4h, v3.4h}, [%2], #32 \n" // r10 r11 r12 r13
"shll v17.4s, v17.4h, #16 \n"
"shll v0.4s, v0.4h, #16 \n"
"shll v1.4s, v1.4h, #16 \n"
"shll v2.4s, v2.4h, #16 \n"
"shll v3.4s, v3.4h, #16 \n"
"fmla v22.4s, v16.4s, v4.s[0] \n"
"prfm pldl1keep, [%6, #256] \n"
"ld1 {v24.4h, v25.4h, v26.4h, v27.4h}, [%6], #32 \n"
"fmla v23.4s, v16.4s, v6.s[0] \n"
"shll v18.4s, v18.4h, #16 \n"
"fmla v20.4s, v17.4s, v4.s[1] \n"
"fmla v21.4s, v17.4s, v6.s[1] \n"
"shll v19.4s, v19.4h, #16 \n"
"fmla v22.4s, v18.4s, v4.s[2] \n"
"fmla v23.4s, v18.4s, v6.s[2] \n"
"shll v24.4s, v24.4h, #16 \n"
"fmla v20.4s, v19.4s, v4.s[3] \n"
"fmla v21.4s, v19.4s, v6.s[3] \n"
"shll v25.4s, v25.4h, #16 \n"
"fmla v22.4s, v24.4s, v0.s[0] \n"
"prfm pldl1keep, [%6, #256] \n"
"ld1 {v16.4h, v17.4h, v18.4h, v19.4h}, [%6], #32 \n"
"fmla v23.4s, v24.4s, v2.s[0] \n"
"shll v26.4s, v26.4h, #16 \n"
"fmla v20.4s, v25.4s, v0.s[1] \n"
"fmla v21.4s, v25.4s, v2.s[1] \n"
"shll v27.4s, v27.4h, #16 \n"
"fmla v22.4s, v26.4s, v0.s[2] \n"
"fmla v23.4s, v26.4s, v2.s[2] \n"
"shll v16.4s, v16.4h, #16 \n"
"fmla v20.4s, v27.4s, v0.s[3] \n"
"fmla v21.4s, v27.4s, v2.s[3] \n"
"prfm pldl1keep, [%2, #192] \n"
"ld1 {v4.4h, v5.4h, v6.4h}, [%2] \n" // r14 r15 r16
"shll v17.4s, v17.4h, #16 \n"
"shll v4.4s, v4.4h, #16 \n"
"shll v5.4s, v5.4h, #16 \n"
"shll v6.4s, v6.4h, #16 \n"
"fmla v22.4s, v16.4s, v1.s[0] \n"
"prfm pldl1keep, [%6, #256] \n"
"ld1 {v24.4h, v25.4h, v26.4h, v27.4h}, [%6], #32 \n"
"fmla v23.4s, v16.4s, v3.s[0] \n"
"shll v18.4s, v18.4h, #16 \n"
"fmla v20.4s, v17.4s, v1.s[1] \n"
"fmla v21.4s, v17.4s, v3.s[1] \n"
"shll v19.4s, v19.4h, #16 \n"
"fmla v22.4s, v18.4s, v1.s[2] \n"
"fmla v23.4s, v18.4s, v3.s[2] \n"
"shll v24.4s, v24.4h, #16 \n"
"fmla v20.4s, v19.4s, v1.s[3] \n"
"fmla v21.4s, v19.4s, v3.s[3] \n"
"shll v25.4s, v25.4h, #16 \n"
"fmla v22.4s, v24.4s, v2.s[0] \n"
"prfm pldl1keep, [%6, #256] \n"
"ld1 {v16.4h, v17.4h, v18.4h, v19.4h}, [%6], #32 \n"
"fmla v23.4s, v24.4s, v4.s[0] \n"
"shll v26.4s, v26.4h, #16 \n"
"fmla v20.4s, v25.4s, v2.s[1] \n"
"fmla v21.4s, v25.4s, v4.s[1] \n"
"shll v27.4s, v27.4h, #16 \n"
"fmla v22.4s, v26.4s, v2.s[2] \n"
"fmla v23.4s, v26.4s, v4.s[2] \n"
"shll v16.4s, v16.4h, #16 \n"
"fmla v20.4s, v27.4s, v2.s[3] \n"
"fmla v21.4s, v27.4s, v4.s[3] \n"
"shll v17.4s, v17.4h, #16 \n"
"fmla v22.4s, v16.4s, v3.s[0] \n"
"prfm pldl1keep, [%6, #256] \n"
"ld1 {v24.4h, v25.4h, v26.4h, v27.4h}, [%6], #32 \n"
"fmla v23.4s, v16.4s, v5.s[0] \n"
"shll v18.4s, v18.4h, #16 \n"
"fmla v20.4s, v17.4s, v3.s[1] \n"
"fmla v21.4s, v17.4s, v5.s[1] \n"
"shll v19.4s, v19.4h, #16 \n"
"fmla v22.4s, v18.4s, v3.s[2] \n"
"fmla v23.4s, v18.4s, v5.s[2] \n"
"shll v24.4s, v24.4h, #16 \n"
"fmla v20.4s, v19.4s, v3.s[3] \n"
"fmla v21.4s, v19.4s, v5.s[3] \n"
"prfm pldl1keep, [%3, #256] \n"
"ld1 {v0.4h, v1.4h, v2.4h, v3.4h}, [%3], #32 \n" // r20 r21 r22 r23
"shll v25.4s, v25.4h, #16 \n"
"shll v0.4s, v0.4h, #16 \n"
"shll v1.4s, v1.4h, #16 \n"
"shll v2.4s, v2.4h, #16 \n"
"shll v3.4s, v3.4h, #16 \n"
"fmla v22.4s, v24.4s, v4.s[0] \n"
"prfm pldl1keep, [%6, #256] \n"
"ld1 {v16.4h, v17.4h, v18.4h, v19.4h}, [%6], #32 \n"
"fmla v23.4s, v24.4s, v6.s[0] \n"
"shll v26.4s, v26.4h, #16 \n"
"fmla v20.4s, v25.4s, v4.s[1] \n"
"fmla v21.4s, v25.4s, v6.s[1] \n"
"shll v27.4s, v27.4h, #16 \n"
"fmla v22.4s, v26.4s, v4.s[2] \n"
"fmla v23.4s, v26.4s, v6.s[2] \n"
"shll v16.4s, v16.4h, #16 \n"
"fmla v20.4s, v27.4s, v4.s[3] \n"
"fmla v21.4s, v27.4s, v6.s[3] \n"
"shll v17.4s, v17.4h, #16 \n"
"fmla v22.4s, v16.4s, v0.s[0] \n"
"prfm pldl1keep, [%6, #256] \n"
"ld1 {v24.4h, v25.4h, v26.4h, v27.4h}, [%6], #32 \n"
"fmla v23.4s, v16.4s, v2.s[0] \n"
"shll v18.4s, v18.4h, #16 \n"
"fmla v20.4s, v17.4s, v0.s[1] \n"
"fmla v21.4s, v17.4s, v2.s[1] \n"
"shll v19.4s, v19.4h, #16 \n"
"fmla v22.4s, v18.4s, v0.s[2] \n"
"fmla v23.4s, v18.4s, v2.s[2] \n"
"shll v24.4s, v24.4h, #16 \n"
"fmla v20.4s, v19.4s, v0.s[3] \n"
"fmla v21.4s, v19.4s, v2.s[3] \n"
"prfm pldl1keep, [%3, #192] \n"
"ld1 {v4.4h, v5.4h, v6.4h}, [%3] \n" // r24 r25 r26
"shll v25.4s, v25.4h, #16 \n"
"shll v4.4s, v4.4h, #16 \n"
"shll v5.4s, v5.4h, #16 \n"
"shll v6.4s, v6.4h, #16 \n"
"fmla v22.4s, v24.4s, v1.s[0] \n"
"prfm pldl1keep, [%6, #256] \n"
"ld1 {v16.4h, v17.4h, v18.4h, v19.4h}, [%6], #32 \n"
"fmla v23.4s, v24.4s, v3.s[0] \n"
"shll v26.4s, v26.4h, #16 \n"
"fmla v20.4s, v25.4s, v1.s[1] \n"
"fmla v21.4s, v25.4s, v3.s[1] \n"
"shll v27.4s, v27.4h, #16 \n"
"fmla v22.4s, v26.4s, v1.s[2] \n"
"fmla v23.4s, v26.4s, v3.s[2] \n"
"shll v16.4s, v16.4h, #16 \n"
"fmla v20.4s, v27.4s, v1.s[3] \n"
"fmla v21.4s, v27.4s, v3.s[3] \n"
"shll v17.4s, v17.4h, #16 \n"
"fmla v22.4s, v16.4s, v2.s[0] \n"
"prfm pldl1keep, [%6, #256] \n"
"ld1 {v24.4h, v25.4h, v26.4h, v27.4h}, [%6], #32 \n"
"fmla v23.4s, v16.4s, v4.s[0] \n"
"shll v18.4s, v18.4h, #16 \n"
"fmla v20.4s, v17.4s, v2.s[1] \n"
"fmla v21.4s, v17.4s, v4.s[1] \n"
"shll v19.4s, v19.4h, #16 \n"
"fmla v22.4s, v18.4s, v2.s[2] \n"
"fmla v23.4s, v18.4s, v4.s[2] \n"
"shll v24.4s, v24.4h, #16 \n"
"fmla v20.4s, v19.4s, v2.s[3] \n"
"fmla v21.4s, v19.4s, v4.s[3] \n"
"shll v25.4s, v25.4h, #16 \n"
"fmla v22.4s, v24.4s, v3.s[0] \n"
"prfm pldl1keep, [%6, #256] \n"
"ld1 {v16.4h, v17.4h, v18.4h, v19.4h}, [%6], #32 \n"
"fmla v23.4s, v24.4s, v5.s[0] \n"
"shll v26.4s, v26.4h, #16 \n"
"fmla v20.4s, v25.4s, v3.s[1] \n"
"fmla v21.4s, v25.4s, v5.s[1] \n"
"shll v27.4s, v27.4h, #16 \n"
"fmla v22.4s, v26.4s, v3.s[2] \n"
"fmla v23.4s, v26.4s, v5.s[2] \n"
"shll v16.4s, v16.4h, #16 \n"
"fmla v20.4s, v27.4s, v3.s[3] \n"
"fmla v21.4s, v27.4s, v5.s[3] \n"
"prfm pldl1keep, [%4, #256] \n"
"ld1 {v0.4h, v1.4h, v2.4h, v3.4h}, [%4], #32 \n" // r30 r31 r32 r33
"shll v17.4s, v17.4h, #16 \n"
"shll v0.4s, v0.4h, #16 \n"
"shll v1.4s, v1.4h, #16 \n"
"shll v2.4s, v2.4h, #16 \n"
"shll v3.4s, v3.4h, #16 \n"
"fmla v22.4s, v16.4s, v4.s[0] \n"
"prfm pldl1keep, [%6, #256] \n"
"ld1 {v24.4h, v25.4h, v26.4h, v27.4h}, [%6], #32 \n"
"fmla v23.4s, v16.4s, v6.s[0] \n"
"shll v18.4s, v18.4h, #16 \n"
"fmla v20.4s, v17.4s, v4.s[1] \n"
"fmla v21.4s, v17.4s, v6.s[1] \n"
"shll v19.4s, v19.4h, #16 \n"
"fmla v22.4s, v18.4s, v4.s[2] \n"
"fmla v23.4s, v18.4s, v6.s[2] \n"
"shll v24.4s, v24.4h, #16 \n"
"fmla v20.4s, v19.4s, v4.s[3] \n"
"fmla v21.4s, v19.4s, v6.s[3] \n"
"shll v25.4s, v25.4h, #16 \n"
"fmla v22.4s, v24.4s, v0.s[0] \n"
"prfm pldl1keep, [%6, #256] \n"
"ld1 {v16.4h, v17.4h, v18.4h, v19.4h}, [%6], #32 \n"
"fmla v23.4s, v24.4s, v2.s[0] \n"
"shll v26.4s, v26.4h, #16 \n"
"fmla v20.4s, v25.4s, v0.s[1] \n"
"fmla v21.4s, v25.4s, v2.s[1] \n"
"shll v27.4s, v27.4h, #16 \n"
"fmla v22.4s, v26.4s, v0.s[2] \n"
"fmla v23.4s, v26.4s, v2.s[2] \n"
"shll v16.4s, v16.4h, #16 \n"
"fmla v20.4s, v27.4s, v0.s[3] \n"
"fmla v21.4s, v27.4s, v2.s[3] \n"
"prfm pldl1keep, [%4, #192] \n"
"ld1 {v4.4h, v5.4h, v6.4h}, [%4] \n" // r34 r35 r36
"shll v17.4s, v17.4h, #16 \n"
"shll v4.4s, v4.4h, #16 \n"
"shll v5.4s, v5.4h, #16 \n"
"shll v6.4s, v6.4h, #16 \n"
"fmla v22.4s, v16.4s, v1.s[0] \n"
"prfm pldl1keep, [%6, #256] \n"
"ld1 {v24.4h, v25.4h, v26.4h, v27.4h}, [%6], #32 \n"
"fmla v23.4s, v16.4s, v3.s[0] \n"
"shll v18.4s, v18.4h, #16 \n"
"fmla v20.4s, v17.4s, v1.s[1] \n"
"fmla v21.4s, v17.4s, v3.s[1] \n"
"shll v19.4s, v19.4h, #16 \n"
"fmla v22.4s, v18.4s, v1.s[2] \n"
"fmla v23.4s, v18.4s, v3.s[2] \n"
"shll v24.4s, v24.4h, #16 \n"
"fmla v20.4s, v19.4s, v1.s[3] \n"
"fmla v21.4s, v19.4s, v3.s[3] \n"
"shll v25.4s, v25.4h, #16 \n"
"fmla v22.4s, v24.4s, v2.s[0] \n"
"prfm pldl1keep, [%6, #256] \n"
"ld1 {v16.4h, v17.4h, v18.4h, v19.4h}, [%6], #32 \n"
"fmla v23.4s, v24.4s, v4.s[0] \n"
"shll v26.4s, v26.4h, #16 \n"
"fmla v20.4s, v25.4s, v2.s[1] \n"
"fmla v21.4s, v25.4s, v4.s[1] \n"
"shll v27.4s, v27.4h, #16 \n"
"fmla v22.4s, v26.4s, v2.s[2] \n"
"fmla v23.4s, v26.4s, v4.s[2] \n"
"shll v16.4s, v16.4h, #16 \n"
"fmla v20.4s, v27.4s, v2.s[3] \n"
"fmla v21.4s, v27.4s, v4.s[3] \n"
"shll v17.4s, v17.4h, #16 \n"
"fmla v22.4s, v16.4s, v3.s[0] \n"
"prfm pldl1keep, [%6, #256] \n"
"ld1 {v24.4h, v25.4h, v26.4h, v27.4h}, [%6], #32 \n"
"fmla v23.4s, v16.4s, v5.s[0] \n"
"shll v18.4s, v18.4h, #16 \n"
"fmla v20.4s, v17.4s, v3.s[1] \n"
"fmla v21.4s, v17.4s, v5.s[1] \n"
"shll v19.4s, v19.4h, #16 \n"
"fmla v22.4s, v18.4s, v3.s[2] \n"
"fmla v23.4s, v18.4s, v5.s[2] \n"
"shll v24.4s, v24.4h, #16 \n"
"fmla v20.4s, v19.4s, v3.s[3] \n"
"fmla v21.4s, v19.4s, v5.s[3] \n"
"prfm pldl1keep, [%5, #256] \n"
"ld1 {v0.4h, v1.4h, v2.4h, v3.4h}, [%5], #32 \n" // r40 r41 r42 r43
"shll v25.4s, v25.4h, #16 \n"
"shll v0.4s, v0.4h, #16 \n"
"shll v1.4s, v1.4h, #16 \n"
"shll v2.4s, v2.4h, #16 \n"
"shll v3.4s, v3.4h, #16 \n"
"fmla v22.4s, v24.4s, v4.s[0] \n"
"prfm pldl1keep, [%6, #256] \n"
"ld1 {v16.4h, v17.4h, v18.4h, v19.4h}, [%6], #32 \n"
"fmla v23.4s, v24.4s, v6.s[0] \n"
"shll v26.4s, v26.4h, #16 \n"
"fmla v20.4s, v25.4s, v4.s[1] \n"
"fmla v21.4s, v25.4s, v6.s[1] \n"
"shll v27.4s, v27.4h, #16 \n"
"fmla v22.4s, v26.4s, v4.s[2] \n"
"fmla v23.4s, v26.4s, v6.s[2] \n"
"shll v16.4s, v16.4h, #16 \n"
"fmla v20.4s, v27.4s, v4.s[3] \n"
"fmla v21.4s, v27.4s, v6.s[3] \n"
"shll v17.4s, v17.4h, #16 \n"
"fmla v22.4s, v16.4s, v0.s[0] \n"
"prfm pldl1keep, [%6, #256] \n"
"ld1 {v24.4h, v25.4h, v26.4h, v27.4h}, [%6], #32 \n"
"fmla v23.4s, v16.4s, v2.s[0] \n"
"shll v18.4s, v18.4h, #16 \n"
"fmla v20.4s, v17.4s, v0.s[1] \n"
"fmla v21.4s, v17.4s, v2.s[1] \n"
"shll v19.4s, v19.4h, #16 \n"
"fmla v22.4s, v18.4s, v0.s[2] \n"
"fmla v23.4s, v18.4s, v2.s[2] \n"
"shll v24.4s, v24.4h, #16 \n"
"fmla v20.4s, v19.4s, v0.s[3] \n"
"fmla v21.4s, v19.4s, v2.s[3] \n"
"prfm pldl1keep, [%5, #192] \n"
"ld1 {v4.4h, v5.4h, v6.4h}, [%5] \n" // r44 r45 r46
"shll v25.4s, v25.4h, #16 \n"
"shll v4.4s, v4.4h, #16 \n"
"shll v5.4s, v5.4h, #16 \n"
"shll v6.4s, v6.4h, #16 \n"
"fmla v22.4s, v24.4s, v1.s[0] \n"
"prfm pldl1keep, [%6, #256] \n"
"ld1 {v16.4h, v17.4h, v18.4h, v19.4h}, [%6], #32 \n"
"fmla v23.4s, v24.4s, v3.s[0] \n"
"shll v26.4s, v26.4h, #16 \n"
"fmla v20.4s, v25.4s, v1.s[1] \n"
"fmla v21.4s, v25.4s, v3.s[1] \n"
"shll v27.4s, v27.4h, #16 \n"
"fmla v22.4s, v26.4s, v1.s[2] \n"
"fmla v23.4s, v26.4s, v3.s[2] \n"
"shll v16.4s, v16.4h, #16 \n"
"fmla v20.4s, v27.4s, v1.s[3] \n"
"fmla v21.4s, v27.4s, v3.s[3] \n"
"shll v17.4s, v17.4h, #16 \n"
"fmla v22.4s, v16.4s, v2.s[0] \n"
"prfm pldl1keep, [%6, #256] \n"
"ld1 {v24.4h, v25.4h, v26.4h, v27.4h}, [%6], #32 \n"
"fmla v23.4s, v16.4s, v4.s[0] \n"
"shll v18.4s, v18.4h, #16 \n"
"fmla v20.4s, v17.4s, v2.s[1] \n"
"fmla v21.4s, v17.4s, v4.s[1] \n"
"shll v19.4s, v19.4h, #16 \n"
"fmla v22.4s, v18.4s, v2.s[2] \n"
"fmla v23.4s, v18.4s, v4.s[2] \n"
"shll v24.4s, v24.4h, #16 \n"
"fmla v20.4s, v19.4s, v2.s[3] \n"
"fmla v21.4s, v19.4s, v4.s[3] \n"
"shll v25.4s, v25.4h, #16 \n"
"fmla v22.4s, v24.4s, v3.s[0] \n"
// "prfm pldl1keep, [%6, #256] \n"
"ld1 {v16.4h, v17.4h, v18.4h, v19.4h}, [%6] \n"
"fmla v23.4s, v24.4s, v5.s[0] \n"
"shll v26.4s, v26.4h, #16 \n"
"fmla v20.4s, v25.4s, v3.s[1] \n"
"fmla v21.4s, v25.4s, v5.s[1] \n"
"shll v27.4s, v27.4h, #16 \n"
"fmla v22.4s, v26.4s, v3.s[2] \n"
"fmla v23.4s, v26.4s, v5.s[2] \n"
"shll v16.4s, v16.4h, #16 \n"
"fmla v20.4s, v27.4s, v3.s[3] \n"
"fmla v21.4s, v27.4s, v5.s[3] \n"
"shll v17.4s, v17.4h, #16 \n"
"fmla v22.4s, v16.4s, v4.s[0] \n"
"fmla v23.4s, v16.4s, v6.s[0] \n"
"shll v18.4s, v18.4h, #16 \n"
"fmla v20.4s, v17.4s, v4.s[1] \n"
"fmla v21.4s, v17.4s, v6.s[1] \n"
"shll v19.4s, v19.4h, #16 \n"
"fmla v22.4s, v18.4s, v4.s[2] \n"
"fmla v23.4s, v18.4s, v6.s[2] \n"
"fmla v20.4s, v19.4s, v4.s[3] \n"
"fmla v21.4s, v19.4s, v6.s[3] \n"
"fadd v20.4s, v20.4s, v22.4s \n"
"fadd v21.4s, v21.4s, v23.4s \n"
"sub %6, %6, #768 \n" // kptr -= 24 * 16;
"st1 {v20.4s, v21.4s}, [%0], #32 \n"
: "=r"(outptr0), // %0
"=r"(r0), // %1
"=r"(r1), // %2
"=r"(r2), // %3
"=r"(r3), // %4
"=r"(r4), // %5
"=r"(kptr) // %6
: "0"(outptr0),
"1"(r0),
"2"(r1),
"3"(r2),
"4"(r3),
"5"(r4),
"6"(kptr)
: "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27");
#else // __aarch64__
asm volatile(
"pld [%1, #256] \n"
"vld1.u16 {d4-d7}, [%1 :64]! \n" // r00 r01 r02 r03
"vshll.u16 q0, d4, #16 \n"
"vshll.u16 q1, d5, #16 \n"
"pld [%6, #256] \n"
"vld1.u16 {d20-d23}, [%6 :128]! \n"
"vshll.u16 q2, d6, #16 \n"
"vshll.u16 q3, d7, #16 \n"
"vshll.u16 q8, d20, #16 \n"
"vshll.u16 q9, d21, #16 \n"
"pld [%0, #256] \n"
"vld1.f32 {d24-d27}, [%0 :128] \n" // sum0 sum1
"vmul.f32 q14, q8, d0[0] \n"
"vmul.f32 q15, q8, d4[0] \n"
"vshll.u16 q10, d22, #16 \n"
"vmla.f32 q12, q9, d0[1] \n"
"vmla.f32 q13, q9, d4[1] \n"
"pld [%6, #256] \n"
"vld1.u16 {d16-d19}, [%6 :128]! \n"
"vmla.f32 q14, q10, d1[0] \n"
"vshll.u16 q11, d23, #16 \n"
"vmla.f32 q15, q10, d5[0] \n"
"vshll.u16 q10, d16, #16 \n"
"vmla.f32 q12, q11, d1[1] \n"
"vmla.f32 q13, q11, d5[1] \n"
"pld [%1, #192] \n"
"vld1.u16 {d10-d12}, [%1 :64] \n" // r04 r05 r06
"vshll.u16 q11, d17, #16 \n"
"vshll.u16 q4, d10, #16 \n"
"vshll.u16 q5, d11, #16 \n"
"vshll.u16 q6, d12, #16 \n"
"vmla.f32 q14, q10, d2[0] \n"
"vmla.f32 q15, q10, d6[0] \n"
"vshll.u16 q8, d18, #16 \n"
"vmla.f32 q12, q11, d2[1] \n"
"vmla.f32 q13, q11, d6[1] \n"
"pld [%6, #256] \n"
"vld1.u16 {d20-d23}, [%6 :128]! \n"
"vmla.f32 q14, q8, d3[0] \n"
"vshll.u16 q9, d19, #16 \n"
"vmla.f32 q15, q8, d7[0] \n"
"vshll.u16 q8, d20, #16 \n"
"vmla.f32 q12, q9, d3[1] \n"
"vmla.f32 q13, q9, d7[1] \n"
"vshll.u16 q9, d21, #16 \n"
"vmla.f32 q14, q8, d4[0] \n"
"vmla.f32 q15, q8, d8[0] \n"
"vshll.u16 q10, d22, #16 \n"
"vmla.f32 q12, q9, d4[1] \n"
"vmla.f32 q13, q9, d8[1] \n"
"pld [%6, #256] \n"
"vld1.u16 {d16-d19}, [%6 :128]! \n"
"vmla.f32 q14, q10, d5[0] \n"
"vshll.u16 q11, d23, #16 \n"
"vmla.f32 q15, q10, d9[0] \n"
"vshll.u16 q10, d16, #16 \n"
"vmla.f32 q12, q11, d5[1] \n"
"vmla.f32 q13, q11, d9[1] \n"
"vshll.u16 q11, d17, #16 \n"
"vmla.f32 q14, q10, d6[0] \n"
"vmla.f32 q15, q10, d10[0] \n"
"vshll.u16 q8, d18, #16 \n"
"vmla.f32 q12, q11, d6[1] \n"
"vmla.f32 q13, q11, d10[1] \n"
"pld [%6, #256] \n"
"vld1.u16 {d20-d23}, [%6 :128]! \n"
"vmla.f32 q14, q8, d7[0] \n"
"vshll.u16 q9, d19, #16 \n"
"vmla.f32 q15, q8, d11[0] \n"
"vshll.u16 q8, d20, #16 \n"
"vmla.f32 q12, q9, d7[1] \n"
"vmla.f32 q13, q9, d11[1] \n"
"pld [%2, #256] \n"
"vld1.u16 {d4-d7}, [%2 :64]! \n" // r10 r11 r12 r13
"vshll.u16 q9, d21, #16 \n"
"vshll.u16 q0, d4, #16 \n"
"vshll.u16 q1, d5, #16 \n"
"vshll.u16 q2, d6, #16 \n"
"vshll.u16 q3, d7, #16 \n"
"vmla.f32 q14, q8, d8[0] \n"
"vmla.f32 q15, q8, d12[0] \n"
"vshll.u16 q10, d22, #16 \n"
"vmla.f32 q12, q9, d8[1] \n"
"vmla.f32 q13, q9, d12[1] \n"
"pld [%6, #256] \n"
"vld1.u16 {d16-d19}, [%6 :128]! \n"
"vmla.f32 q14, q10, d9[0] \n"
"vshll.u16 q11, d23, #16 \n"
"vmla.f32 q15, q10, d13[0] \n"
"vshll.u16 q10, d16, #16 \n"
"vmla.f32 q12, q11, d9[1] \n"
"vmla.f32 q13, q11, d13[1] \n"
"vshll.u16 q11, d17, #16 \n"
"vmla.f32 q14, q10, d0[0] \n"
"vmla.f32 q15, q10, d4[0] \n"
"vshll.u16 q8, d18, #16 \n"
"vmla.f32 q12, q11, d0[1] \n"
"vmla.f32 q13, q11, d4[1] \n"
"pld [%6, #256] \n"
"vld1.u16 {d20-d23}, [%6 :128]! \n"
"vmla.f32 q14, q8, d1[0] \n"
"vshll.u16 q9, d19, #16 \n"
"vmla.f32 q15, q8, d5[0] \n"
"vshll.u16 q8, d20, #16 \n"
"vmla.f32 q12, q9, d1[1] \n"
"vmla.f32 q13, q9, d5[1] \n"
"pld [%2, #192] \n"
"vld1.u16 {d10-d12}, [%2 :64] \n" // r14 r15 r16
"vshll.u16 q9, d21, #16 \n"
"vshll.u16 q4, d10, #16 \n"
"vshll.u16 q5, d11, #16 \n"
"vshll.u16 q6, d12, #16 \n"
"vmla.f32 q14, q8, d2[0] \n"
"vmla.f32 q15, q8, d6[0] \n"
"vshll.u16 q10, d22, #16 \n"
"vmla.f32 q12, q9, d2[1] \n"
"vmla.f32 q13, q9, d6[1] \n"
"pld [%6, #256] \n"
"vld1.u16 {d16-d19}, [%6 :128]! \n"
"vmla.f32 q14, q10, d3[0] \n"
"vshll.u16 q11, d23, #16 \n"
"vmla.f32 q15, q10, d7[0] \n"
"vshll.u16 q10, d16, #16 \n"
"vmla.f32 q12, q11, d3[1] \n"
"vmla.f32 q13, q11, d7[1] \n"
"vshll.u16 q11, d17, #16 \n"
"vmla.f32 q14, q10, d4[0] \n"
"vmla.f32 q15, q10, d8[0] \n"
"vshll.u16 q8, d18, #16 \n"
"vmla.f32 q12, q11, d4[1] \n"
"vmla.f32 q13, q11, d8[1] \n"
"pld [%6, #256] \n"
"vld1.u16 {d20-d23}, [%6 :128]! \n"
"vmla.f32 q14, q8, d5[0] \n"
"vshll.u16 q9, d19, #16 \n"
"vmla.f32 q15, q8, d9[0] \n"
"vshll.u16 q8, d20, #16 \n"
"vmla.f32 q12, q9, d5[1] \n"
"vmla.f32 q13, q9, d9[1] \n"
"vshll.u16 q9, d21, #16 \n"
"vmla.f32 q14, q8, d6[0] \n"
"vmla.f32 q15, q8, d10[0] \n"
"vshll.u16 q10, d22, #16 \n"
"vmla.f32 q12, q9, d6[1] \n"
"vmla.f32 q13, q9, d10[1] \n"
"pld [%6, #256] \n"
"vld1.u16 {d16-d19}, [%6 :128]! \n"
"vmla.f32 q14, q10, d7[0] \n"
"vshll.u16 q11, d23, #16 \n"
"vmla.f32 q15, q10, d11[0] \n"
"vshll.u16 q10, d16, #16 \n"
"vmla.f32 q12, q11, d7[1] \n"
"vmla.f32 q13, q11, d11[1] \n"
"pld [%3, #256] \n"
"vld1.u16 {d4-d7}, [%3 :64]! \n" // r20 r21 r22 r23
"vshll.u16 q11, d17, #16 \n"
"vshll.u16 q0, d4, #16 \n"
"vshll.u16 q1, d5, #16 \n"
"vshll.u16 q2, d6, #16 \n"
"vshll.u16 q3, d7, #16 \n"
"vmla.f32 q14, q10, d8[0] \n"
"vmla.f32 q15, q10, d12[0] \n"
"vshll.u16 q8, d18, #16 \n"
"vmla.f32 q12, q11, d8[1] \n"
"vmla.f32 q13, q11, d12[1] \n"
"pld [%6, #256] \n"
"vld1.u16 {d20-d23}, [%6 :128]! \n"
"vmla.f32 q14, q8, d9[0] \n"
"vshll.u16 q9, d19, #16 \n"
"vmla.f32 q15, q8, d13[0] \n"
"vshll.u16 q8, d20, #16 \n"
"vmla.f32 q12, q9, d9[1] \n"
"vmla.f32 q13, q9, d13[1] \n"
"vshll.u16 q9, d21, #16 \n"
"vmla.f32 q14, q8, d0[0] \n"
"vmla.f32 q15, q8, d4[0] \n"
"vshll.u16 q10, d22, #16 \n"
"vmla.f32 q12, q9, d0[1] \n"
"vmla.f32 q13, q9, d4[1] \n"
"pld [%6, #256] \n"
"vld1.u16 {d16-d19}, [%6 :128]! \n"
"vmla.f32 q14, q10, d1[0] \n"
"vshll.u16 q11, d23, #16 \n"
"vmla.f32 q15, q10, d5[0] \n"
"vshll.u16 q10, d16, #16 \n"
"vmla.f32 q12, q11, d1[1] \n"
"vmla.f32 q13, q11, d5[1] \n"
"pld [%3, #192] \n"
"vld1.u16 {d10-d12}, [%3 :64] \n" // r24 r25 r26
"vshll.u16 q11, d17, #16 \n"
"vshll.u16 q4, d10, #16 \n"
"vshll.u16 q5, d11, #16 \n"
"vshll.u16 q6, d12, #16 \n"
"vmla.f32 q14, q10, d2[0] \n"
"vmla.f32 q15, q10, d6[0] \n"
"vshll.u16 q8, d18, #16 \n"
"vmla.f32 q12, q11, d2[1] \n"
"vmla.f32 q13, q11, d6[1] \n"
"pld [%6, #256] \n"
"vld1.u16 {d20-d23}, [%6 :128]! \n"
"vmla.f32 q14, q8, d3[0] \n"
"vshll.u16 q9, d19, #16 \n"
"vmla.f32 q15, q8, d7[0] \n"
"vshll.u16 q8, d20, #16 \n"
"vmla.f32 q12, q9, d3[1] \n"
"vmla.f32 q13, q9, d7[1] \n"
"vshll.u16 q9, d21, #16 \n"
"vmla.f32 q14, q8, d4[0] \n"
"vmla.f32 q15, q8, d8[0] \n"
"vshll.u16 q10, d22, #16 \n"
"vmla.f32 q12, q9, d4[1] \n"
"vmla.f32 q13, q9, d8[1] \n"
"pld [%6, #256] \n"
"vld1.u16 {d16-d19}, [%6 :128]! \n"
"vmla.f32 q14, q10, d5[0] \n"
"vshll.u16 q11, d23, #16 \n"
"vmla.f32 q15, q10, d9[0] \n"
"vshll.u16 q10, d16, #16 \n"
"vmla.f32 q12, q11, d5[1] \n"
"vmla.f32 q13, q11, d9[1] \n"
"vshll.u16 q11, d17, #16 \n"
"vmla.f32 q14, q10, d6[0] \n"
"vmla.f32 q15, q10, d10[0] \n"
"vshll.u16 q8, d18, #16 \n"
"vmla.f32 q12, q11, d6[1] \n"
"vmla.f32 q13, q11, d10[1] \n"
"pld [%6, #256] \n"
"vld1.u16 {d20-d23}, [%6 :128]! \n"
"vmla.f32 q14, q8, d7[0] \n"
"vshll.u16 q9, d19, #16 \n"
"vmla.f32 q15, q8, d11[0] \n"
"vshll.u16 q8, d20, #16 \n"
"vmla.f32 q12, q9, d7[1] \n"
"vmla.f32 q13, q9, d11[1] \n"
"pld [%4, #256] \n"
"vld1.u16 {d4-d7}, [%4 :64]! \n" // r30 r31 r32 r33
"vshll.u16 q9, d21, #16 \n"
"vshll.u16 q0, d4, #16 \n"
"vshll.u16 q1, d5, #16 \n"
"vshll.u16 q2, d6, #16 \n"
"vshll.u16 q3, d7, #16 \n"
"vmla.f32 q14, q8, d8[0] \n"
"vmla.f32 q15, q8, d12[0] \n"
"vshll.u16 q10, d22, #16 \n"
"vmla.f32 q12, q9, d8[1] \n"
"vmla.f32 q13, q9, d12[1] \n"
"pld [%6, #256] \n"
"vld1.u16 {d16-d19}, [%6 :128]! \n"
"vmla.f32 q14, q10, d9[0] \n"
"vshll.u16 q11, d23, #16 \n"
"vmla.f32 q15, q10, d13[0] \n"
"vshll.u16 q10, d16, #16 \n"
"vmla.f32 q12, q11, d9[1] \n"
"vmla.f32 q13, q11, d13[1] \n"
"vshll.u16 q11, d17, #16 \n"
"vmla.f32 q14, q10, d0[0] \n"
"vmla.f32 q15, q10, d4[0] \n"
"vshll.u16 q8, d18, #16 \n"
"vmla.f32 q12, q11, d0[1] \n"
"vmla.f32 q13, q11, d4[1] \n"
"pld [%6, #256] \n"
"vld1.u16 {d20-d23}, [%6 :128]! \n"
"vmla.f32 q14, q8, d1[0] \n"
"vshll.u16 q9, d19, #16 \n"
"vmla.f32 q15, q8, d5[0] \n"
"vshll.u16 q8, d20, #16 \n"
"vmla.f32 q12, q9, d1[1] \n"
"vmla.f32 q13, q9, d5[1] \n"
"pld [%4, #192] \n"
"vld1.u16 {d10-d12}, [%4 :64] \n" // r34 r35 r36
"vshll.u16 q9, d21, #16 \n"
"vshll.u16 q4, d10, #16 \n"
"vshll.u16 q5, d11, #16 \n"
"vshll.u16 q6, d12, #16 \n"
"vmla.f32 q14, q8, d2[0] \n"
"vmla.f32 q15, q8, d6[0] \n"
"vshll.u16 q10, d22, #16 \n"
"vmla.f32 q12, q9, d2[1] \n"
"vmla.f32 q13, q9, d6[1] \n"
"pld [%6, #256] \n"
"vld1.u16 {d16-d19}, [%6 :128]! \n"
"vmla.f32 q14, q10, d3[0] \n"
"vshll.u16 q11, d23, #16 \n"
"vmla.f32 q15, q10, d7[0] \n"
"vshll.u16 q10, d16, #16 \n"
"vmla.f32 q12, q11, d3[1] \n"
"vmla.f32 q13, q11, d7[1] \n"
"vshll.u16 q11, d17, #16 \n"
"vmla.f32 q14, q10, d4[0] \n"
"vmla.f32 q15, q10, d8[0] \n"
"vshll.u16 q8, d18, #16 \n"
"vmla.f32 q12, q11, d4[1] \n"
"vmla.f32 q13, q11, d8[1] \n"
"pld [%6, #256] \n"
"vld1.u16 {d20-d23}, [%6 :128]! \n"
"vmla.f32 q14, q8, d5[0] \n"
"vshll.u16 q9, d19, #16 \n"
"vmla.f32 q15, q8, d9[0] \n"
"vshll.u16 q8, d20, #16 \n"
"vmla.f32 q12, q9, d5[1] \n"
"vmla.f32 q13, q9, d9[1] \n"
"vshll.u16 q9, d21, #16 \n"
"vmla.f32 q14, q8, d6[0] \n"
"vmla.f32 q15, q8, d10[0] \n"
"vshll.u16 q10, d22, #16 \n"
"vmla.f32 q12, q9, d6[1] \n"
"vmla.f32 q13, q9, d10[1] \n"
"pld [%6, #256] \n"
"vld1.u16 {d16-d19}, [%6 :128]! \n"
"vmla.f32 q14, q10, d7[0] \n"
"vshll.u16 q11, d23, #16 \n"
"vmla.f32 q15, q10, d11[0] \n"
"vshll.u16 q10, d16, #16 \n"
"vmla.f32 q12, q11, d7[1] \n"
"vmla.f32 q13, q11, d11[1] \n"
"pld [%5, #256] \n"
"vld1.u16 {d4-d7}, [%5 :64]! \n" // r40 r41 r42 r43
"vshll.u16 q11, d17, #16 \n"
"vshll.u16 q0, d4, #16 \n"
"vshll.u16 q1, d5, #16 \n"
"vshll.u16 q2, d6, #16 \n"
"vshll.u16 q3, d7, #16 \n"
"vmla.f32 q14, q10, d8[0] \n"
"vmla.f32 q15, q10, d12[0] \n"
"vshll.u16 q8, d18, #16 \n"
"vmla.f32 q12, q11, d8[1] \n"
"vmla.f32 q13, q11, d12[1] \n"
"pld [%6, #256] \n"
"vld1.u16 {d20-d23}, [%6 :128]! \n"
"vmla.f32 q14, q8, d9[0] \n"
"vshll.u16 q9, d19, #16 \n"
"vmla.f32 q15, q8, d13[0] \n"
"vshll.u16 q8, d20, #16 \n"
"vmla.f32 q12, q9, d9[1] \n"
"vmla.f32 q13, q9, d13[1] \n"
"vshll.u16 q9, d21, #16 \n"
"vmla.f32 q14, q8, d0[0] \n"
"vmla.f32 q15, q8, d4[0] \n"
"vshll.u16 q10, d22, #16 \n"
"vmla.f32 q12, q9, d0[1] \n"
"vmla.f32 q13, q9, d4[1] \n"
"pld [%6, #256] \n"
"vld1.u16 {d16-d19}, [%6 :128]! \n"
"vmla.f32 q14, q10, d1[0] \n"
"vshll.u16 q11, d23, #16 \n"
"vmla.f32 q15, q10, d5[0] \n"
"vshll.u16 q10, d16, #16 \n"
"vmla.f32 q12, q11, d1[1] \n"
"vmla.f32 q13, q11, d5[1] \n"
"pld [%5, #192] \n"
"vld1.u16 {d10-d12}, [%5 :64] \n" // r44 r45 r46
"vshll.u16 q11, d17, #16 \n"
"vshll.u16 q4, d10, #16 \n"
"vshll.u16 q5, d11, #16 \n"
"vshll.u16 q6, d12, #16 \n"
"vmla.f32 q14, q10, d2[0] \n"
"vmla.f32 q15, q10, d6[0] \n"
"vshll.u16 q8, d18, #16 \n"
"vmla.f32 q12, q11, d2[1] \n"
"vmla.f32 q13, q11, d6[1] \n"
"pld [%6, #256] \n"
"vld1.u16 {d20-d23}, [%6 :128]! \n"
"vmla.f32 q14, q8, d3[0] \n"
"vshll.u16 q9, d19, #16 \n"
"vmla.f32 q15, q8, d7[0] \n"
"vshll.u16 q8, d20, #16 \n"
"vmla.f32 q12, q9, d3[1] \n"
"vmla.f32 q13, q9, d7[1] \n"
"vshll.u16 q9, d21, #16 \n"
"vmla.f32 q14, q8, d4[0] \n"
"vmla.f32 q15, q8, d8[0] \n"
"vshll.u16 q10, d22, #16 \n"
"vmla.f32 q12, q9, d4[1] \n"
"vmla.f32 q13, q9, d8[1] \n"
"pld [%6, #256] \n"
"vld1.u16 {d16-d19}, [%6 :128]! \n"
"vmla.f32 q14, q10, d5[0] \n"
"vshll.u16 q11, d23, #16 \n"
"vmla.f32 q15, q10, d9[0] \n"
"vshll.u16 q10, d16, #16 \n"
"vmla.f32 q12, q11, d5[1] \n"
"vmla.f32 q13, q11, d9[1] \n"
"vshll.u16 q11, d17, #16 \n"
"vmla.f32 q14, q10, d6[0] \n"
"vmla.f32 q15, q10, d10[0] \n"
"vshll.u16 q8, d18, #16 \n"
"vmla.f32 q12, q11, d6[1] \n"
"vmla.f32 q13, q11, d10[1] \n"
// "pld [%6, #256] \n"
"vld1.u16 {d20-d23}, [%6 :128] \n"
"vmla.f32 q14, q8, d7[0] \n"
"vshll.u16 q9, d19, #16 \n"
"vmla.f32 q15, q8, d11[0] \n"
"vshll.u16 q8, d20, #16 \n"
"vmla.f32 q12, q9, d7[1] \n"
"vmla.f32 q13, q9, d11[1] \n"
"vshll.u16 q9, d21, #16 \n"
"vmla.f32 q14, q8, d8[0] \n"
"vmla.f32 q15, q8, d12[0] \n"
"vshll.u16 q10, d22, #16 \n"
"vmla.f32 q12, q9, d8[1] \n"
"vmla.f32 q13, q9, d12[1] \n"
"vshll.u16 q11, d23, #16 \n"
"vmla.f32 q14, q10, d9[0] \n"
"vmla.f32 q15, q10, d13[0] \n"
"vmla.f32 q12, q11, d9[1] \n"
"vmla.f32 q13, q11, d13[1] \n"
"vadd.f32 q12, q12, q14 \n"
"vadd.f32 q13, q13, q15 \n"
"sub %6, %6, #768 \n" // kptr -= 24 * 16;
"vst1.f32 {d24-d27}, [%0 :128]! \n"
: "=r"(outptr0), // %0
"=r"(r0), // %1
"=r"(r1), // %2
"=r"(r2), // %3
"=r"(r3), // %4
"=r"(r4), // %5
"=r"(kptr) // %6
: "0"(outptr0),
"1"(r0),
"2"(r1),
"3"(r2),
"4"(r3),
"5"(r4),
"6"(kptr)
: "memory", "q0", "q1", "q2", "q3", "q4", "q5", "q6", "q7", "q8", "q9", "q10", "q11", "q12", "q13", "q14", "q15");
#endif // __aarch64__
}
for (; j < outw; j++)
{
#if __aarch64__
asm volatile(
"prfm pldl1keep, [%0, #128] \n"
"ld1 {v20.4s}, [%0] \n" // sum0
"prfm pldl1keep, [%1, #128] \n"
"ld1 {v0.4h, v1.4h}, [%1], #16 \n" // r00 r01
"shll v0.4s, v0.4h, #16 \n"
"prfm pldl1keep, [%6, #256] \n"
"ld1 {v16.4h, v17.4h, v18.4h, v19.4h}, [%6], #32 \n"
"shll v1.4s, v1.4h, #16 \n"
"shll v16.4s, v16.4h, #16 \n"
"shll v17.4s, v17.4h, #16 \n"
"fmul v21.4s, v16.4s, v0.s[0] \n"
"prfm pldl1keep, [%6, #256] \n"
"ld1 {v24.4h, v25.4h, v26.4h, v27.4h}, [%6], #32 \n"
"shll v18.4s, v18.4h, #16 \n"
"fmul v22.4s, v17.4s, v0.s[1] \n"
"shll v19.4s, v19.4h, #16 \n"
"fmul v23.4s, v18.4s, v0.s[2] \n"
"shll v24.4s, v24.4h, #16 \n"
"fmla v20.4s, v19.4s, v0.s[3] \n"
"prfm pldl1keep, [%1, #192] \n"
"ld1 {v2.4h, v3.4h, v4.4h}, [%1] \n" // r02 r03 r04
"shll v25.4s, v25.4h, #16 \n"
"shll v2.4s, v2.4h, #16 \n"
"shll v3.4s, v3.4h, #16 \n"
"shll v4.4s, v4.4h, #16 \n"
"fmla v21.4s, v24.4s, v1.s[0] \n"
"prfm pldl1keep, [%6, #256] \n"
"ld1 {v16.4h, v17.4h, v18.4h, v19.4h}, [%6], #32 \n"
"shll v26.4s, v26.4h, #16 \n"
"fmla v22.4s, v25.4s, v1.s[1] \n"
"shll v27.4s, v27.4h, #16 \n"
"fmla v23.4s, v26.4s, v1.s[2] \n"
"shll v16.4s, v16.4h, #16 \n"
"fmla v20.4s, v27.4s, v1.s[3] \n"
"shll v17.4s, v17.4h, #16 \n"
"fmla v21.4s, v16.4s, v2.s[0] \n"
"prfm pldl1keep, [%6, #256] \n"
"ld1 {v24.4h, v25.4h, v26.4h, v27.4h}, [%6], #32 \n"
"shll v18.4s, v18.4h, #16 \n"
"fmla v22.4s, v17.4s, v2.s[1] \n"
"shll v19.4s, v19.4h, #16 \n"
"fmla v23.4s, v18.4s, v2.s[2] \n"
"shll v24.4s, v24.4h, #16 \n"
"fmla v20.4s, v19.4s, v2.s[3] \n"
"shll v25.4s, v25.4h, #16 \n"
"fmla v21.4s, v24.4s, v3.s[0] \n"
"prfm pldl1keep, [%6, #256] \n"
"ld1 {v16.4h, v17.4h, v18.4h, v19.4h}, [%6], #32 \n"
"shll v26.4s, v26.4h, #16 \n"
"fmla v22.4s, v25.4s, v3.s[1] \n"
"shll v27.4s, v27.4h, #16 \n"
"fmla v23.4s, v26.4s, v3.s[2] \n"
"shll v16.4s, v16.4h, #16 \n"
"fmla v20.4s, v27.4s, v3.s[3] \n"
"prfm pldl1keep, [%2, #128] \n"
"ld1 {v0.4h, v1.4h}, [%2], #16 \n" // r10 r11
"shll v17.4s, v17.4h, #16 \n"
"shll v0.4s, v0.4h, #16 \n"
"shll v1.4s, v1.4h, #16 \n"
"fmla v21.4s, v16.4s, v4.s[0] \n"
"prfm pldl1keep, [%6, #256] \n"
"ld1 {v24.4h, v25.4h, v26.4h, v27.4h}, [%6], #32 \n"
"shll v18.4s, v18.4h, #16 \n"
"fmla v22.4s, v17.4s, v4.s[1] \n"
"shll v19.4s, v19.4h, #16 \n"
"fmla v23.4s, v18.4s, v4.s[2] \n"
"shll v24.4s, v24.4h, #16 \n"
"fmla v20.4s, v19.4s, v4.s[3] \n"
"shll v25.4s, v25.4h, #16 \n"
"fmla v21.4s, v24.4s, v0.s[0] \n"
"prfm pldl1keep, [%6, #256] \n"
"ld1 {v16.4h, v17.4h, v18.4h, v19.4h}, [%6], #32 \n"
"shll v26.4s, v26.4h, #16 \n"
"fmla v22.4s, v25.4s, v0.s[1] \n"
"shll v27.4s, v27.4h, #16 \n"
"fmla v23.4s, v26.4s, v0.s[2] \n"
"shll v16.4s, v16.4h, #16 \n"
"fmla v20.4s, v27.4s, v0.s[3] \n"
"prfm pldl1keep, [%2, #192] \n"
"ld1 {v2.4h, v3.4h, v4.4h}, [%2] \n" // r12 r13 r14
"shll v17.4s, v17.4h, #16 \n"
"shll v2.4s, v2.4h, #16 \n"
"shll v3.4s, v3.4h, #16 \n"
"shll v4.4s, v4.4h, #16 \n"
"fmla v21.4s, v16.4s, v1.s[0] \n"
"prfm pldl1keep, [%6, #256] \n"
"ld1 {v24.4h, v25.4h, v26.4h, v27.4h}, [%6], #32 \n"
"shll v18.4s, v18.4h, #16 \n"
"fmla v22.4s, v17.4s, v1.s[1] \n"
"shll v19.4s, v19.4h, #16 \n"
"fmla v23.4s, v18.4s, v1.s[2] \n"
"shll v24.4s, v24.4h, #16 \n"
"fmla v20.4s, v19.4s, v1.s[3] \n"
"shll v25.4s, v25.4h, #16 \n"
"fmla v21.4s, v24.4s, v2.s[0] \n"
"prfm pldl1keep, [%6, #256] \n"
"ld1 {v16.4h, v17.4h, v18.4h, v19.4h}, [%6], #32 \n"
"shll v26.4s, v26.4h, #16 \n"
"fmla v22.4s, v25.4s, v2.s[1] \n"
"shll v27.4s, v27.4h, #16 \n"
"fmla v23.4s, v26.4s, v2.s[2] \n"
"shll v16.4s, v16.4h, #16 \n"
"fmla v20.4s, v27.4s, v2.s[3] \n"
"shll v17.4s, v17.4h, #16 \n"
"fmla v21.4s, v16.4s, v3.s[0] \n"
"prfm pldl1keep, [%6, #256] \n"
"ld1 {v24.4h, v25.4h, v26.4h, v27.4h}, [%6], #32 \n"
"shll v18.4s, v18.4h, #16 \n"
"fmla v22.4s, v17.4s, v3.s[1] \n"
"shll v19.4s, v19.4h, #16 \n"
"fmla v23.4s, v18.4s, v3.s[2] \n"
"shll v24.4s, v24.4h, #16 \n"
"fmla v20.4s, v19.4s, v3.s[3] \n"
"prfm pldl1keep, [%3, #128] \n"
"ld1 {v0.4h, v1.4h}, [%3], #16 \n" // r20 r21
"shll v25.4s, v25.4h, #16 \n"
"shll v0.4s, v0.4h, #16 \n"
"shll v1.4s, v1.4h, #16 \n"
"fmla v21.4s, v24.4s, v4.s[0] \n"
"prfm pldl1keep, [%6, #256] \n"
"ld1 {v16.4h, v17.4h, v18.4h, v19.4h}, [%6], #32 \n"
"shll v26.4s, v26.4h, #16 \n"
"fmla v22.4s, v25.4s, v4.s[1] \n"
"shll v27.4s, v27.4h, #16 \n"
"fmla v23.4s, v26.4s, v4.s[2] \n"
"shll v16.4s, v16.4h, #16 \n"
"fmla v20.4s, v27.4s, v4.s[3] \n"
"shll v17.4s, v17.4h, #16 \n"
"fmla v21.4s, v16.4s, v0.s[0] \n"
"prfm pldl1keep, [%6, #256] \n"
"ld1 {v24.4h, v25.4h, v26.4h, v27.4h}, [%6], #32 \n"
"shll v18.4s, v18.4h, #16 \n"
"fmla v22.4s, v17.4s, v0.s[1] \n"
"shll v19.4s, v19.4h, #16 \n"
"fmla v23.4s, v18.4s, v0.s[2] \n"
"shll v24.4s, v24.4h, #16 \n"
"fmla v20.4s, v19.4s, v0.s[3] \n"
"prfm pldl1keep, [%3, #192] \n"
"ld1 {v2.4h, v3.4h, v4.4h}, [%3] \n" // r22 r23 r24
"shll v25.4s, v25.4h, #16 \n"
"shll v2.4s, v2.4h, #16 \n"
"shll v3.4s, v3.4h, #16 \n"
"shll v4.4s, v4.4h, #16 \n"
"fmla v21.4s, v24.4s, v1.s[0] \n"
"prfm pldl1keep, [%6, #256] \n"
"ld1 {v16.4h, v17.4h, v18.4h, v19.4h}, [%6], #32 \n"
"shll v26.4s, v26.4h, #16 \n"
"fmla v22.4s, v25.4s, v1.s[1] \n"
"shll v27.4s, v27.4h, #16 \n"
"fmla v23.4s, v26.4s, v1.s[2] \n"
"shll v16.4s, v16.4h, #16 \n"
"fmla v20.4s, v27.4s, v1.s[3] \n"
"shll v17.4s, v17.4h, #16 \n"
"fmla v21.4s, v16.4s, v2.s[0] \n"
"prfm pldl1keep, [%6, #256] \n"
"ld1 {v24.4h, v25.4h, v26.4h, v27.4h}, [%6], #32 \n"
"shll v18.4s, v18.4h, #16 \n"
"fmla v22.4s, v17.4s, v2.s[1] \n"
"shll v19.4s, v19.4h, #16 \n"
"fmla v23.4s, v18.4s, v2.s[2] \n"
"shll v24.4s, v24.4h, #16 \n"
"fmla v20.4s, v19.4s, v2.s[3] \n"
"shll v25.4s, v25.4h, #16 \n"
"fmla v21.4s, v24.4s, v3.s[0] \n"
"prfm pldl1keep, [%6, #256] \n"
"ld1 {v16.4h, v17.4h, v18.4h, v19.4h}, [%6], #32 \n"
"shll v26.4s, v26.4h, #16 \n"
"fmla v22.4s, v25.4s, v3.s[1] \n"
"shll v27.4s, v27.4h, #16 \n"
"fmla v23.4s, v26.4s, v3.s[2] \n"
"shll v16.4s, v16.4h, #16 \n"
"fmla v20.4s, v27.4s, v3.s[3] \n"
"prfm pldl1keep, [%4, #128] \n"
"ld1 {v0.4h, v1.4h}, [%4], #16 \n" // r30 r31
"shll v17.4s, v17.4h, #16 \n"
"shll v0.4s, v0.4h, #16 \n"
"shll v1.4s, v1.4h, #16 \n"
"fmla v21.4s, v16.4s, v4.s[0] \n"
"prfm pldl1keep, [%6, #256] \n"
"ld1 {v24.4h, v25.4h, v26.4h, v27.4h}, [%6], #32 \n"
"shll v18.4s, v18.4h, #16 \n"
"fmla v22.4s, v17.4s, v4.s[1] \n"
"shll v19.4s, v19.4h, #16 \n"
"fmla v23.4s, v18.4s, v4.s[2] \n"
"shll v24.4s, v24.4h, #16 \n"
"fmla v20.4s, v19.4s, v4.s[3] \n"
"shll v25.4s, v25.4h, #16 \n"
"fmla v21.4s, v24.4s, v0.s[0] \n"
"prfm pldl1keep, [%6, #256] \n"
"ld1 {v16.4h, v17.4h, v18.4h, v19.4h}, [%6], #32 \n"
"shll v26.4s, v26.4h, #16 \n"
"fmla v22.4s, v25.4s, v0.s[1] \n"
"shll v27.4s, v27.4h, #16 \n"
"fmla v23.4s, v26.4s, v0.s[2] \n"
"shll v16.4s, v16.4h, #16 \n"
"fmla v20.4s, v27.4s, v0.s[3] \n"
"prfm pldl1keep, [%4, #192] \n"
"ld1 {v2.4h, v3.4h, v4.4h}, [%4] \n" // r32 r33 r34
"shll v17.4s, v17.4h, #16 \n"
"shll v2.4s, v2.4h, #16 \n"
"shll v3.4s, v3.4h, #16 \n"
"shll v4.4s, v4.4h, #16 \n"
"fmla v21.4s, v16.4s, v1.s[0] \n"
"prfm pldl1keep, [%6, #256] \n"
"ld1 {v24.4h, v25.4h, v26.4h, v27.4h}, [%6], #32 \n"
"shll v18.4s, v18.4h, #16 \n"
"fmla v22.4s, v17.4s, v1.s[1] \n"
"shll v19.4s, v19.4h, #16 \n"
"fmla v23.4s, v18.4s, v1.s[2] \n"
"shll v24.4s, v24.4h, #16 \n"
"fmla v20.4s, v19.4s, v1.s[3] \n"
"shll v25.4s, v25.4h, #16 \n"
"fmla v21.4s, v24.4s, v2.s[0] \n"
"prfm pldl1keep, [%6, #256] \n"
"ld1 {v16.4h, v17.4h, v18.4h, v19.4h}, [%6], #32 \n"
"shll v26.4s, v26.4h, #16 \n"
"fmla v22.4s, v25.4s, v2.s[1] \n"
"shll v27.4s, v27.4h, #16 \n"
"fmla v23.4s, v26.4s, v2.s[2] \n"
"shll v16.4s, v16.4h, #16 \n"
"fmla v20.4s, v27.4s, v2.s[3] \n"
"shll v17.4s, v17.4h, #16 \n"
"fmla v21.4s, v16.4s, v3.s[0] \n"
"prfm pldl1keep, [%6, #256] \n"
"ld1 {v24.4h, v25.4h, v26.4h, v27.4h}, [%6], #32 \n"
"shll v18.4s, v18.4h, #16 \n"
"fmla v22.4s, v17.4s, v3.s[1] \n"
"shll v19.4s, v19.4h, #16 \n"
"fmla v23.4s, v18.4s, v3.s[2] \n"
"shll v24.4s, v24.4h, #16 \n"
"fmla v20.4s, v19.4s, v3.s[3] \n"
"prfm pldl1keep, [%5, #128] \n"
"ld1 {v0.4h, v1.4h}, [%5], #16 \n" // r40 r41
"shll v25.4s, v25.4h, #16 \n"
"shll v0.4s, v0.4h, #16 \n"
"shll v1.4s, v1.4h, #16 \n"
"fmla v21.4s, v24.4s, v4.s[0] \n"
"prfm pldl1keep, [%6, #256] \n"
"ld1 {v16.4h, v17.4h, v18.4h, v19.4h}, [%6], #32 \n"
"shll v26.4s, v26.4h, #16 \n"
"fmla v22.4s, v25.4s, v4.s[1] \n"
"shll v27.4s, v27.4h, #16 \n"
"fmla v23.4s, v26.4s, v4.s[2] \n"
"shll v16.4s, v16.4h, #16 \n"
"fmla v20.4s, v27.4s, v4.s[3] \n"
"shll v17.4s, v17.4h, #16 \n"
"fmla v21.4s, v16.4s, v0.s[0] \n"
"prfm pldl1keep, [%6, #256] \n"
"ld1 {v24.4h, v25.4h, v26.4h, v27.4h}, [%6], #32 \n"
"shll v18.4s, v18.4h, #16 \n"
"fmla v22.4s, v17.4s, v0.s[1] \n"
"shll v19.4s, v19.4h, #16 \n"
"fmla v23.4s, v18.4s, v0.s[2] \n"
"shll v24.4s, v24.4h, #16 \n"
"fmla v20.4s, v19.4s, v0.s[3] \n"
"prfm pldl1keep, [%5, #192] \n"
"ld1 {v2.4h, v3.4h, v4.4h}, [%5] \n" // r42 r43 r44
"shll v25.4s, v25.4h, #16 \n"
"shll v2.4s, v2.4h, #16 \n"
"shll v3.4s, v3.4h, #16 \n"
"shll v4.4s, v4.4h, #16 \n"
"fmla v21.4s, v24.4s, v1.s[0] \n"
"prfm pldl1keep, [%6, #256] \n"
"ld1 {v16.4h, v17.4h, v18.4h, v19.4h}, [%6], #32 \n"
"shll v26.4s, v26.4h, #16 \n"
"fmla v22.4s, v25.4s, v1.s[1] \n"
"shll v27.4s, v27.4h, #16 \n"
"fmla v23.4s, v26.4s, v1.s[2] \n"
"shll v16.4s, v16.4h, #16 \n"
"fmla v20.4s, v27.4s, v1.s[3] \n"
"shll v17.4s, v17.4h, #16 \n"
"fmla v21.4s, v16.4s, v2.s[0] \n"
"prfm pldl1keep, [%6, #256] \n"
"ld1 {v24.4h, v25.4h, v26.4h, v27.4h}, [%6], #32 \n"
"shll v18.4s, v18.4h, #16 \n"
"fmla v22.4s, v17.4s, v2.s[1] \n"
"shll v19.4s, v19.4h, #16 \n"
"fmla v23.4s, v18.4s, v2.s[2] \n"
"shll v24.4s, v24.4h, #16 \n"
"fmla v20.4s, v19.4s, v2.s[3] \n"
"shll v25.4s, v25.4h, #16 \n"
"fmla v21.4s, v24.4s, v3.s[0] \n"
// "prfm pldl1keep, [%6, #256] \n"
"ld1 {v16.4h, v17.4h, v18.4h, v19.4h}, [%6] \n"
"shll v26.4s, v26.4h, #16 \n"
"fmla v22.4s, v25.4s, v3.s[1] \n"
"shll v27.4s, v27.4h, #16 \n"
"fmla v23.4s, v26.4s, v3.s[2] \n"
"shll v16.4s, v16.4h, #16 \n"
"fmla v20.4s, v27.4s, v3.s[3] \n"
"shll v17.4s, v17.4h, #16 \n"
"fmla v21.4s, v16.4s, v4.s[0] \n"
"shll v18.4s, v18.4h, #16 \n"
"fmla v22.4s, v17.4s, v4.s[1] \n"
"shll v19.4s, v19.4h, #16 \n"
"fmla v23.4s, v18.4s, v4.s[2] \n"
"fmla v20.4s, v19.4s, v4.s[3] \n"
"fadd v22.4s, v21.4s, v22.4s \n"
"fadd v23.4s, v22.4s, v23.4s \n"
"fadd v20.4s, v20.4s, v23.4s \n"
"sub %6, %6, #768 \n" // kptr -= 24 * 16;
"st1 {v20.4s}, [%0], #16 \n"
: "=r"(outptr0), // %0
"=r"(r0), // %1
"=r"(r1), // %2
"=r"(r2), // %3
"=r"(r3), // %4
"=r"(r4), // %5
"=r"(kptr) // %6
: "0"(outptr0),
"1"(r0),
"2"(r1),
"3"(r2),
"4"(r3),
"5"(r4),
"6"(kptr)
: "memory", "v0", "v1", "v2", "v3", "v4", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27");
#else // __aarch64__
asm volatile(
"pld [%1, #128] \n"
"vld1.u16 {d2-d3}, [%1 :64]! \n" // r00 r01
"pld [%6, #256] \n"
"vld1.u16 {d20-d23}, [%6 :128]! \n"
"vshll.u16 q0, d2, #16 \n"
"vshll.u16 q1, d3, #16 \n"
"vshll.u16 q8, d20, #16 \n"
"vshll.u16 q9, d21, #16 \n"
"pld [%0, #128] \n"
"vld1.f32 {d24-d25}, [%0 :128] \n" // sum0
"vmul.f32 q13, q8, d0[0] \n"
"vshll.u16 q10, d22, #16 \n"
"vmul.f32 q14, q9, d0[1] \n"
"pld [%6, #256] \n"
"vld1.u16 {d16-d19}, [%6 :128]! \n"
"vshll.u16 q11, d23, #16 \n"
"vmul.f32 q15, q10, d1[0] \n"
"vshll.u16 q10, d16, #16 \n"
"vmla.f32 q12, q11, d1[1] \n"
"pld [%1, #192] \n"
"vld1.u16 {d6-d8}, [%1 :64] \n" // r02 r03 r04
"vshll.u16 q11, d17, #16 \n"
"vshll.u16 q2, d6, #16 \n"
"vshll.u16 q3, d7, #16 \n"
"vshll.u16 q4, d8, #16 \n"
"vmla.f32 q13, q10, d2[0] \n"
"vshll.u16 q8, d18, #16 \n"
"vmla.f32 q14, q11, d2[1] \n"
"pld [%6, #256] \n"
"vld1.u16 {d20-d23}, [%6 :128]! \n"
"vshll.u16 q9, d19, #16 \n"
"vmla.f32 q15, q8, d3[0] \n"
"vshll.u16 q8, d20, #16 \n"
"vmla.f32 q12, q9, d3[1] \n"
"vshll.u16 q9, d21, #16 \n"
"vmla.f32 q13, q8, d4[0] \n"
"vshll.u16 q10, d22, #16 \n"
"vmla.f32 q14, q9, d4[1] \n"
"pld [%6, #256] \n"
"vld1.u16 {d16-d19}, [%6 :128]! \n"
"vshll.u16 q11, d23, #16 \n"
"vmla.f32 q15, q10, d5[0] \n"
"vshll.u16 q10, d16, #16 \n"
"vmla.f32 q12, q11, d5[1] \n"
"vshll.u16 q11, d17, #16 \n"
"vmla.f32 q13, q10, d6[0] \n"
"vshll.u16 q8, d18, #16 \n"
"vmla.f32 q14, q11, d6[1] \n"
"pld [%6, #256] \n"
"vld1.u16 {d20-d23}, [%6 :128]! \n"
"vshll.u16 q9, d19, #16 \n"
"vmla.f32 q15, q8, d7[0] \n"
"vshll.u16 q8, d20, #16 \n"
"vmla.f32 q12, q9, d7[1] \n"
"pld [%2, #128] \n"
"vld1.u16 {d2-d3}, [%2 :64]! \n" // r10 r11
"vshll.u16 q9, d21, #16 \n"
"vshll.u16 q0, d2, #16 \n"
"vshll.u16 q1, d3, #16 \n"
"vmla.f32 q13, q8, d8[0] \n"
"vshll.u16 q10, d22, #16 \n"
"vmla.f32 q14, q9, d8[1] \n"
"pld [%6, #256] \n"
"vld1.u16 {d16-d19}, [%6 :128]! \n"
"vshll.u16 q11, d23, #16 \n"
"vmla.f32 q15, q10, d9[0] \n"
"vshll.u16 q10, d16, #16 \n"
"vmla.f32 q12, q11, d9[1] \n"
"vshll.u16 q11, d17, #16 \n"
"vmla.f32 q13, q10, d0[0] \n"
"vshll.u16 q8, d18, #16 \n"
"vmla.f32 q14, q11, d0[1] \n"
"pld [%6, #256] \n"
"vld1.u16 {d20-d23}, [%6 :128]! \n"
"vshll.u16 q9, d19, #16 \n"
"vmla.f32 q15, q8, d1[0] \n"
"vshll.u16 q8, d20, #16 \n"
"vmla.f32 q12, q9, d1[1] \n"
"pld [%2, #192] \n"
"vld1.u16 {d6-d8}, [%2 :64] \n" // r12 r13 r14
"vshll.u16 q9, d21, #16 \n"
"vshll.u16 q2, d6, #16 \n"
"vshll.u16 q3, d7, #16 \n"
"vshll.u16 q4, d8, #16 \n"
"vmla.f32 q13, q8, d2[0] \n"
"vshll.u16 q10, d22, #16 \n"
"vmla.f32 q14, q9, d2[1] \n"
"pld [%6, #256] \n"
"vld1.u16 {d16-d19}, [%6 :128]! \n"
"vshll.u16 q11, d23, #16 \n"
"vmla.f32 q15, q10, d3[0] \n"
"vshll.u16 q10, d16, #16 \n"
"vmla.f32 q12, q11, d3[1] \n"
"vshll.u16 q11, d17, #16 \n"
"vmla.f32 q13, q10, d4[0] \n"
"vshll.u16 q8, d18, #16 \n"
"vmla.f32 q14, q11, d4[1] \n"
"pld [%6, #256] \n"
"vld1.u16 {d20-d23}, [%6 :128]! \n"
"vshll.u16 q9, d19, #16 \n"
"vmla.f32 q15, q8, d5[0] \n"
"vshll.u16 q8, d20, #16 \n"
"vmla.f32 q12, q9, d5[1] \n"
"vshll.u16 q9, d21, #16 \n"
"vmla.f32 q13, q8, d6[0] \n"
"vshll.u16 q10, d22, #16 \n"
"vmla.f32 q14, q9, d6[1] \n"
"pld [%6, #256] \n"
"vld1.u16 {d16-d19}, [%6 :128]! \n"
"vshll.u16 q11, d23, #16 \n"
"vmla.f32 q15, q10, d7[0] \n"
"vshll.u16 q10, d16, #16 \n"
"vmla.f32 q12, q11, d7[1] \n"
"pld [%3, #128] \n"
"vld1.u16 {d2-d3}, [%3 :64]! \n" // r20 r21
"vshll.u16 q11, d17, #16 \n"
"vshll.u16 q0, d2, #16 \n"
"vshll.u16 q1, d3, #16 \n"
"vmla.f32 q13, q10, d8[0] \n"
"vshll.u16 q8, d18, #16 \n"
"vmla.f32 q14, q11, d8[1] \n"
"pld [%6, #256] \n"
"vld1.u16 {d20-d23}, [%6 :128]! \n"
"vshll.u16 q9, d19, #16 \n"
"vmla.f32 q15, q8, d9[0] \n"
"vshll.u16 q8, d20, #16 \n"
"vmla.f32 q12, q9, d9[1] \n"
"vshll.u16 q9, d21, #16 \n"
"vmla.f32 q13, q8, d0[0] \n"
"vshll.u16 q10, d22, #16 \n"
"vmla.f32 q14, q9, d0[1] \n"
"pld [%6, #256] \n"
"vld1.u16 {d16-d19}, [%6 :128]! \n"
"vshll.u16 q11, d23, #16 \n"
"vmla.f32 q15, q10, d1[0] \n"
"vshll.u16 q10, d16, #16 \n"
"vmla.f32 q12, q11, d1[1] \n"
"pld [%3, #192] \n"
"vld1.u16 {d6-d8}, [%3 :64] \n" // r22 r23 r24
"vshll.u16 q11, d17, #16 \n"
"vshll.u16 q2, d6, #16 \n"
"vshll.u16 q3, d7, #16 \n"
"vshll.u16 q4, d8, #16 \n"
"vmla.f32 q13, q10, d2[0] \n"
"vshll.u16 q8, d18, #16 \n"
"vmla.f32 q14, q11, d2[1] \n"
"pld [%6, #256] \n"
"vld1.u16 {d20-d23}, [%6 :128]! \n"
"vshll.u16 q9, d19, #16 \n"
"vmla.f32 q15, q8, d3[0] \n"
"vshll.u16 q8, d20, #16 \n"
"vmla.f32 q12, q9, d3[1] \n"
"vshll.u16 q9, d21, #16 \n"
"vmla.f32 q13, q8, d4[0] \n"
"vshll.u16 q10, d22, #16 \n"
"vmla.f32 q14, q9, d4[1] \n"
"pld [%6, #256] \n"
"vld1.u16 {d16-d19}, [%6 :128]! \n"
"vshll.u16 q11, d23, #16 \n"
"vmla.f32 q15, q10, d5[0] \n"
"vshll.u16 q10, d16, #16 \n"
"vmla.f32 q12, q11, d5[1] \n"
"vshll.u16 q11, d17, #16 \n"
"vmla.f32 q13, q10, d6[0] \n"
"vshll.u16 q8, d18, #16 \n"
"vmla.f32 q14, q11, d6[1] \n"
"pld [%6, #256] \n"
"vld1.u16 {d20-d23}, [%6 :128]! \n"
"vshll.u16 q9, d19, #16 \n"
"vmla.f32 q15, q8, d7[0] \n"
"vshll.u16 q8, d20, #16 \n"
"vmla.f32 q12, q9, d7[1] \n"
"pld [%4, #128] \n"
"vld1.u16 {d2-d3}, [%4 :64]! \n" // r30 r31
"vshll.u16 q9, d21, #16 \n"
"vshll.u16 q0, d2, #16 \n"
"vshll.u16 q10, d22, #16 \n"
"vshll.u16 q1, d3, #16 \n"
"vmla.f32 q13, q8, d8[0] \n"
"vmla.f32 q14, q9, d8[1] \n"
"pld [%6, #256] \n"
"vld1.u16 {d16-d19}, [%6 :128]! \n"
"vshll.u16 q11, d23, #16 \n"
"vmla.f32 q15, q10, d9[0] \n"
"vshll.u16 q10, d16, #16 \n"
"vmla.f32 q12, q11, d9[1] \n"
"vshll.u16 q11, d17, #16 \n"
"vmla.f32 q13, q10, d0[0] \n"
"vshll.u16 q8, d18, #16 \n"
"vmla.f32 q14, q11, d0[1] \n"
"pld [%6, #256] \n"
"vld1.u16 {d20-d23}, [%6 :128]! \n"
"vshll.u16 q9, d19, #16 \n"
"vmla.f32 q15, q8, d1[0] \n"
"vshll.u16 q8, d20, #16 \n"
"vmla.f32 q12, q9, d1[1] \n"
"pld [%4, #192] \n"
"vld1.u16 {d6-d8}, [%4 :64] \n" // r32 r33 r34
"vshll.u16 q9, d21, #16 \n"
"vshll.u16 q2, d6, #16 \n"
"vshll.u16 q3, d7, #16 \n"
"vshll.u16 q4, d8, #16 \n"
"vmla.f32 q13, q8, d2[0] \n"
"vshll.u16 q10, d22, #16 \n"
"vmla.f32 q14, q9, d2[1] \n"
"pld [%6, #256] \n"
"vld1.u16 {d16-d19}, [%6 :128]! \n"
"vshll.u16 q11, d23, #16 \n"
"vmla.f32 q15, q10, d3[0] \n"
"vshll.u16 q10, d16, #16 \n"
"vmla.f32 q12, q11, d3[1] \n"
"vshll.u16 q11, d17, #16 \n"
"vmla.f32 q13, q10, d4[0] \n"
"vshll.u16 q8, d18, #16 \n"
"vmla.f32 q14, q11, d4[1] \n"
"pld [%6, #256] \n"
"vld1.u16 {d20-d23}, [%6 :128]! \n"
"vshll.u16 q9, d19, #16 \n"
"vmla.f32 q15, q8, d5[0] \n"
"vshll.u16 q8, d20, #16 \n"
"vmla.f32 q12, q9, d5[1] \n"
"vshll.u16 q9, d21, #16 \n"
"vmla.f32 q13, q8, d6[0] \n"
"vshll.u16 q10, d22, #16 \n"
"vmla.f32 q14, q9, d6[1] \n"
"pld [%6, #256] \n"
"vld1.u16 {d16-d19}, [%6 :128]! \n"
"vshll.u16 q11, d23, #16 \n"
"vmla.f32 q15, q10, d7[0] \n"
"vshll.u16 q10, d16, #16 \n"
"vmla.f32 q12, q11, d7[1] \n"
"pld [%5, #128] \n"
"vld1.u16 {d2-d3}, [%5 :64]! \n" // r40 r41
"vshll.u16 q11, d17, #16 \n"
"vshll.u16 q0, d2, #16 \n"
"vshll.u16 q1, d3, #16 \n"
"vmla.f32 q13, q10, d8[0] \n"
"vshll.u16 q8, d18, #16 \n"
"vmla.f32 q14, q11, d8[1] \n"
"pld [%6, #256] \n"
"vld1.u16 {d20-d23}, [%6 :128]! \n"
"vshll.u16 q9, d19, #16 \n"
"vmla.f32 q15, q8, d9[0] \n"
"vshll.u16 q8, d20, #16 \n"
"vmla.f32 q12, q9, d9[1] \n"
"vshll.u16 q9, d21, #16 \n"
"vmla.f32 q13, q8, d0[0] \n"
"vshll.u16 q10, d22, #16 \n"
"vmla.f32 q14, q9, d0[1] \n"
"pld [%6, #256] \n"
"vld1.u16 {d16-d19}, [%6 :128]! \n"
"vshll.u16 q11, d23, #16 \n"
"vmla.f32 q15, q10, d1[0] \n"
"vshll.u16 q10, d16, #16 \n"
"vmla.f32 q12, q11, d1[1] \n"
"pld [%5, #192] \n"
"vld1.u16 {d6-d8}, [%5 :64] \n" // r42 r43 r44
"vshll.u16 q11, d17, #16 \n"
"vshll.u16 q2, d6, #16 \n"
"vshll.u16 q3, d7, #16 \n"
"vshll.u16 q4, d8, #16 \n"
"vmla.f32 q13, q10, d2[0] \n"
"vshll.u16 q8, d18, #16 \n"
"vmla.f32 q14, q11, d2[1] \n"
"pld [%6, #256] \n"
"vld1.u16 {d20-d23}, [%6 :128]! \n"
"vshll.u16 q9, d19, #16 \n"
"vmla.f32 q15, q8, d3[0] \n"
"vshll.u16 q8, d20, #16 \n"
"vmla.f32 q12, q9, d3[1] \n"
"vshll.u16 q9, d21, #16 \n"
"vmla.f32 q13, q8, d4[0] \n"
"vshll.u16 q10, d22, #16 \n"
"vmla.f32 q14, q9, d4[1] \n"
"pld [%6, #256] \n"
"vld1.u16 {d16-d19}, [%6 :128]! \n"
"vshll.u16 q11, d23, #16 \n"
"vmla.f32 q15, q10, d5[0] \n"
"vshll.u16 q10, d16, #16 \n"
"vmla.f32 q12, q11, d5[1] \n"
"vshll.u16 q11, d17, #16 \n"
"vmla.f32 q13, q10, d6[0] \n"
"vshll.u16 q8, d18, #16 \n"
"vmla.f32 q14, q11, d6[1] \n"
// "pld [%6, #256] \n"
"vld1.u16 {d20-d23}, [%6 :128] \n"
"vshll.u16 q9, d19, #16 \n"
"vmla.f32 q15, q8, d7[0] \n"
"vshll.u16 q8, d20, #16 \n"
"vmla.f32 q12, q9, d7[1] \n"
"vshll.u16 q9, d21, #16 \n"
"vmla.f32 q13, q8, d8[0] \n"
"vshll.u16 q10, d22, #16 \n"
"vmla.f32 q14, q9, d8[1] \n"
"vshll.u16 q11, d23, #16 \n"
"vmla.f32 q15, q10, d9[0] \n"
"vmla.f32 q12, q11, d9[1] \n"
"vadd.f32 q14, q13, q14 \n"
"vadd.f32 q15, q14, q15 \n"
"vadd.f32 q12, q12, q15 \n"
"sub %6, %6, #768 \n" // kptr -= 24 * 16;
"vst1.f32 {d24-d25}, [%0 :128]! \n"
: "=r"(outptr0), // %0
"=r"(r0), // %1
"=r"(r1), // %2
"=r"(r2), // %3
"=r"(r3), // %4
"=r"(r4), // %5
"=r"(kptr) // %6
: "0"(outptr0),
"1"(r0),
"2"(r1),
"3"(r2),
"4"(r3),
"5"(r4),
"6"(kptr)
: "memory", "q0", "q1", "q2", "q3", "q4", "q5", "q6", "q7", "q8", "q9", "q10", "q11", "q12", "q13", "q14", "q15");
#endif // __aarch64__
}
r0 += tailstep;
r1 += tailstep;
r2 += tailstep;
r3 += tailstep;
r4 += tailstep;
}
}
for (; q < inch; q++)
{
unsigned short* outptr0_bf16 = top_blob.channel(p);
const float* outptr0 = out0.row(0);
const Mat img0 = bottom_blob.channel(q);
const unsigned short* r0 = img0.row<const unsigned short>(0);
const unsigned short* r1 = img0.row<const unsigned short>(1);
const unsigned short* r2 = img0.row<const unsigned short>(2);
const unsigned short* r3 = img0.row<const unsigned short>(3);
const unsigned short* r4 = img0.row<const unsigned short>(4);
const unsigned short* kptr = kernel.channel(p).row<const unsigned short>(q);
int i = 0;
for (; i < outh; i++)
{
int j = 0;
for (; j + 3 < outw; j += 4)
{
#if __aarch64__
asm volatile(
"prfm pldl1keep, [%2, #256] \n"
"ld1 {v0.4h, v1.4h, v2.4h, v3.4h}, [%2], #32 \n" // r00 r01 r02 r03
"prfm pldl1keep, [%2, #256] \n"
"ld1 {v4.4h, v5.4h, v6.4h, v7.4h}, [%2], #32 \n" // r04 r05 r06 r07
"shll v0.4s, v0.4h, #16 \n"
"shll v1.4s, v1.4h, #16 \n"
"shll v2.4s, v2.4h, #16 \n"
"shll v3.4s, v3.4h, #16 \n"
"prfm pldl1keep, [%7, #256] \n"
"ld1 {v16.4h, v17.4h, v18.4h, v19.4h}, [%7], #32 \n"
"shll v4.4s, v4.4h, #16 \n"
"shll v5.4s, v5.4h, #16 \n"
"shll v6.4s, v6.4h, #16 \n"
"shll v7.4s, v7.4h, #16 \n"
"prfm pldl1keep, [%1, #512] \n"
"ld1 {v20.4s, v21.4s, v22.4s, v23.4s}, [%1], #64 \n" // sum0 sum1 sum2 sum3
"shll v16.4s, v16.4h, #16 \n"
"shll v17.4s, v17.4h, #16 \n"
"shll v18.4s, v18.4h, #16 \n"
"shll v19.4s, v19.4h, #16 \n"
"fmla v20.4s, v16.4s, v0.s[0] \n"
"fmla v21.4s, v16.4s, v2.s[0] \n"
"fmla v22.4s, v16.4s, v4.s[0] \n"
"fmla v23.4s, v16.4s, v6.s[0] \n"
"fmla v20.4s, v17.4s, v0.s[1] \n"
"fmla v21.4s, v17.4s, v2.s[1] \n"
"fmla v22.4s, v17.4s, v4.s[1] \n"
"fmla v23.4s, v17.4s, v6.s[1] \n"
"prfm pldl1keep, [%7, #256] \n"
"ld1 {v24.4h, v25.4h, v26.4h, v27.4h}, [%7], #32 \n"
"shll v24.4s, v24.4h, #16 \n"
"shll v25.4s, v25.4h, #16 \n"
"shll v26.4s, v26.4h, #16 \n"
"shll v27.4s, v27.4h, #16 \n"
"fmla v20.4s, v18.4s, v0.s[2] \n"
"fmla v21.4s, v18.4s, v2.s[2] \n"
"fmla v22.4s, v18.4s, v4.s[2] \n"
"fmla v23.4s, v18.4s, v6.s[2] \n"
"fmla v20.4s, v19.4s, v0.s[3] \n"
"fmla v21.4s, v19.4s, v2.s[3] \n"
"fmla v22.4s, v19.4s, v4.s[3] \n"
"fmla v23.4s, v19.4s, v6.s[3] \n"
"prfm pldl1keep, [%2, #192] \n"
"ld1 {v28.4h, v29.4h, v30.4h}, [%2] \n" // r08 r09 r010
"shll v28.4s, v28.4h, #16 \n"
"shll v29.4s, v29.4h, #16 \n"
"shll v30.4s, v30.4h, #16 \n"
"fmla v20.4s, v24.4s, v1.s[0] \n"
"fmla v21.4s, v24.4s, v3.s[0] \n"
"fmla v22.4s, v24.4s, v5.s[0] \n"
"fmla v23.4s, v24.4s, v7.s[0] \n"
"fmla v20.4s, v25.4s, v1.s[1] \n"
"fmla v21.4s, v25.4s, v3.s[1] \n"
"fmla v22.4s, v25.4s, v5.s[1] \n"
"fmla v23.4s, v25.4s, v7.s[1] \n"
"prfm pldl1keep, [%7, #256] \n"
"ld1 {v16.4h, v17.4h, v18.4h, v19.4h}, [%7], #32 \n"
"shll v16.4s, v16.4h, #16 \n"
"shll v17.4s, v17.4h, #16 \n"
"shll v18.4s, v18.4h, #16 \n"
"shll v19.4s, v19.4h, #16 \n"
"fmla v20.4s, v26.4s, v1.s[2] \n"
"fmla v21.4s, v26.4s, v3.s[2] \n"
"fmla v22.4s, v26.4s, v5.s[2] \n"
"fmla v23.4s, v26.4s, v7.s[2] \n"
"fmla v20.4s, v27.4s, v1.s[3] \n"
"fmla v21.4s, v27.4s, v3.s[3] \n"
"fmla v22.4s, v27.4s, v5.s[3] \n"
"fmla v23.4s, v27.4s, v7.s[3] \n"
"fmla v20.4s, v16.4s, v2.s[0] \n"
"fmla v21.4s, v16.4s, v4.s[0] \n"
"fmla v22.4s, v16.4s, v6.s[0] \n"
"fmla v23.4s, v16.4s, v28.s[0] \n"
"fmla v20.4s, v17.4s, v2.s[1] \n"
"fmla v21.4s, v17.4s, v4.s[1] \n"
"fmla v22.4s, v17.4s, v6.s[1] \n"
"fmla v23.4s, v17.4s, v28.s[1] \n"
"prfm pldl1keep, [%7, #256] \n"
"ld1 {v24.4h, v25.4h, v26.4h, v27.4h}, [%7], #32 \n"
"shll v24.4s, v24.4h, #16 \n"
"shll v25.4s, v25.4h, #16 \n"
"shll v26.4s, v26.4h, #16 \n"
"shll v27.4s, v27.4h, #16 \n"
"fmla v20.4s, v18.4s, v2.s[2] \n"
"fmla v21.4s, v18.4s, v4.s[2] \n"
"fmla v22.4s, v18.4s, v6.s[2] \n"
"fmla v23.4s, v18.4s, v28.s[2] \n"
"fmla v20.4s, v19.4s, v2.s[3] \n"
"fmla v21.4s, v19.4s, v4.s[3] \n"
"fmla v22.4s, v19.4s, v6.s[3] \n"
"fmla v23.4s, v19.4s, v28.s[3] \n"
"fmla v20.4s, v24.4s, v3.s[0] \n"
"fmla v21.4s, v24.4s, v5.s[0] \n"
"fmla v22.4s, v24.4s, v7.s[0] \n"
"fmla v23.4s, v24.4s, v29.s[0] \n"
"fmla v20.4s, v25.4s, v3.s[1] \n"
"fmla v21.4s, v25.4s, v5.s[1] \n"
"fmla v22.4s, v25.4s, v7.s[1] \n"
"fmla v23.4s, v25.4s, v29.s[1] \n"
"prfm pldl1keep, [%7, #256] \n"
"ld1 {v16.4h, v17.4h, v18.4h, v19.4h}, [%7], #32 \n"
"shll v16.4s, v16.4h, #16 \n"
"shll v17.4s, v17.4h, #16 \n"
"shll v18.4s, v18.4h, #16 \n"
"shll v19.4s, v19.4h, #16 \n"
"fmla v20.4s, v26.4s, v3.s[2] \n"
"fmla v21.4s, v26.4s, v5.s[2] \n"
"fmla v22.4s, v26.4s, v7.s[2] \n"
"fmla v23.4s, v26.4s, v29.s[2] \n"
"fmla v20.4s, v27.4s, v3.s[3] \n"
"fmla v21.4s, v27.4s, v5.s[3] \n"
"fmla v22.4s, v27.4s, v7.s[3] \n"
"fmla v23.4s, v27.4s, v29.s[3] \n"
"prfm pldl1keep, [%3, #256] \n"
"ld1 {v0.4h, v1.4h, v2.4h, v3.4h}, [%3], #32 \n" // r10 r11 r12 r13
"shll v0.4s, v0.4h, #16 \n"
"shll v1.4s, v1.4h, #16 \n"
"shll v2.4s, v2.4h, #16 \n"
"shll v3.4s, v3.4h, #16 \n"
"fmla v20.4s, v16.4s, v4.s[0] \n"
"fmla v21.4s, v16.4s, v6.s[0] \n"
"fmla v22.4s, v16.4s, v28.s[0] \n"
"fmla v23.4s, v16.4s, v30.s[0] \n"
"fmla v20.4s, v17.4s, v4.s[1] \n"
"fmla v21.4s, v17.4s, v6.s[1] \n"
"fmla v22.4s, v17.4s, v28.s[1] \n"
"fmla v23.4s, v17.4s, v30.s[1] \n"
"prfm pldl1keep, [%7, #256] \n"
"ld1 {v24.4h, v25.4h, v26.4h, v27.4h}, [%7], #32 \n"
"shll v24.4s, v24.4h, #16 \n"
"shll v25.4s, v25.4h, #16 \n"
"shll v26.4s, v26.4h, #16 \n"
"shll v27.4s, v27.4h, #16 \n"
"fmla v20.4s, v18.4s, v4.s[2] \n"
"fmla v21.4s, v18.4s, v6.s[2] \n"
"fmla v22.4s, v18.4s, v28.s[2] \n"
"fmla v23.4s, v18.4s, v30.s[2] \n"
"fmla v20.4s, v19.4s, v4.s[3] \n"
"fmla v21.4s, v19.4s, v6.s[3] \n"
"fmla v22.4s, v19.4s, v28.s[3] \n"
"fmla v23.4s, v19.4s, v30.s[3] \n"
"prfm pldl1keep, [%3, #256] \n"
"ld1 {v4.4h, v5.4h, v6.4h, v7.4h}, [%3], #32 \n" // r14 r15 r16 r17
"shll v4.4s, v4.4h, #16 \n"
"shll v5.4s, v5.4h, #16 \n"
"shll v6.4s, v6.4h, #16 \n"
"shll v7.4s, v7.4h, #16 \n"
"fmla v20.4s, v24.4s, v0.s[0] \n"
"fmla v21.4s, v24.4s, v2.s[0] \n"
"fmla v22.4s, v24.4s, v4.s[0] \n"
"fmla v23.4s, v24.4s, v6.s[0] \n"
"fmla v20.4s, v25.4s, v0.s[1] \n"
"fmla v21.4s, v25.4s, v2.s[1] \n"
"fmla v22.4s, v25.4s, v4.s[1] \n"
"fmla v23.4s, v25.4s, v6.s[1] \n"
"prfm pldl1keep, [%7, #256] \n"
"ld1 {v16.4h, v17.4h, v18.4h, v19.4h}, [%7], #32 \n"
"shll v16.4s, v16.4h, #16 \n"
"shll v17.4s, v17.4h, #16 \n"
"shll v18.4s, v18.4h, #16 \n"
"shll v19.4s, v19.4h, #16 \n"
"fmla v20.4s, v26.4s, v0.s[2] \n"
"fmla v21.4s, v26.4s, v2.s[2] \n"
"fmla v22.4s, v26.4s, v4.s[2] \n"
"fmla v23.4s, v26.4s, v6.s[2] \n"
"fmla v20.4s, v27.4s, v0.s[3] \n"
"fmla v21.4s, v27.4s, v2.s[3] \n"
"fmla v22.4s, v27.4s, v4.s[3] \n"
"fmla v23.4s, v27.4s, v6.s[3] \n"
"prfm pldl1keep, [%3, #192] \n"
"ld1 {v28.4h, v29.4h, v30.4h}, [%3] \n" // r18 r19 r110
"shll v28.4s, v28.4h, #16 \n"
"shll v29.4s, v29.4h, #16 \n"
"shll v30.4s, v30.4h, #16 \n"
"fmla v20.4s, v16.4s, v1.s[0] \n"
"fmla v21.4s, v16.4s, v3.s[0] \n"
"fmla v22.4s, v16.4s, v5.s[0] \n"
"fmla v23.4s, v16.4s, v7.s[0] \n"
"fmla v20.4s, v17.4s, v1.s[1] \n"
"fmla v21.4s, v17.4s, v3.s[1] \n"
"fmla v22.4s, v17.4s, v5.s[1] \n"
"fmla v23.4s, v17.4s, v7.s[1] \n"
"prfm pldl1keep, [%7, #256] \n"
"ld1 {v24.4h, v25.4h, v26.4h, v27.4h}, [%7], #32 \n"
"shll v24.4s, v24.4h, #16 \n"
"shll v25.4s, v25.4h, #16 \n"
"shll v26.4s, v26.4h, #16 \n"
"shll v27.4s, v27.4h, #16 \n"
"fmla v20.4s, v18.4s, v1.s[2] \n"
"fmla v21.4s, v18.4s, v3.s[2] \n"
"fmla v22.4s, v18.4s, v5.s[2] \n"
"fmla v23.4s, v18.4s, v7.s[2] \n"
"fmla v20.4s, v19.4s, v1.s[3] \n"
"fmla v21.4s, v19.4s, v3.s[3] \n"
"fmla v22.4s, v19.4s, v5.s[3] \n"
"fmla v23.4s, v19.4s, v7.s[3] \n"
"fmla v20.4s, v24.4s, v2.s[0] \n"
"fmla v21.4s, v24.4s, v4.s[0] \n"
"fmla v22.4s, v24.4s, v6.s[0] \n"
"fmla v23.4s, v24.4s, v28.s[0] \n"
"fmla v20.4s, v25.4s, v2.s[1] \n"
"fmla v21.4s, v25.4s, v4.s[1] \n"
"fmla v22.4s, v25.4s, v6.s[1] \n"
"fmla v23.4s, v25.4s, v28.s[1] \n"
"prfm pldl1keep, [%7, #256] \n"
"ld1 {v16.4h, v17.4h, v18.4h, v19.4h}, [%7], #32 \n"
"shll v16.4s, v16.4h, #16 \n"
"shll v17.4s, v17.4h, #16 \n"
"shll v18.4s, v18.4h, #16 \n"
"shll v19.4s, v19.4h, #16 \n"
"fmla v20.4s, v26.4s, v2.s[2] \n"
"fmla v21.4s, v26.4s, v4.s[2] \n"
"fmla v22.4s, v26.4s, v6.s[2] \n"
"fmla v23.4s, v26.4s, v28.s[2] \n"
"fmla v20.4s, v27.4s, v2.s[3] \n"
"fmla v21.4s, v27.4s, v4.s[3] \n"
"fmla v22.4s, v27.4s, v6.s[3] \n"
"fmla v23.4s, v27.4s, v28.s[3] \n"
"fmla v20.4s, v16.4s, v3.s[0] \n"
"fmla v21.4s, v16.4s, v5.s[0] \n"
"fmla v22.4s, v16.4s, v7.s[0] \n"
"fmla v23.4s, v16.4s, v29.s[0] \n"
"fmla v20.4s, v17.4s, v3.s[1] \n"
"fmla v21.4s, v17.4s, v5.s[1] \n"
"fmla v22.4s, v17.4s, v7.s[1] \n"
"fmla v23.4s, v17.4s, v29.s[1] \n"
"prfm pldl1keep, [%7, #256] \n"
"ld1 {v24.4h, v25.4h, v26.4h, v27.4h}, [%7], #32 \n"
"shll v24.4s, v24.4h, #16 \n"
"shll v25.4s, v25.4h, #16 \n"
"shll v26.4s, v26.4h, #16 \n"
"shll v27.4s, v27.4h, #16 \n"
"fmla v20.4s, v18.4s, v3.s[2] \n"
"fmla v21.4s, v18.4s, v5.s[2] \n"
"fmla v22.4s, v18.4s, v7.s[2] \n"
"fmla v23.4s, v18.4s, v29.s[2] \n"
"fmla v20.4s, v19.4s, v3.s[3] \n"
"fmla v21.4s, v19.4s, v5.s[3] \n"
"fmla v22.4s, v19.4s, v7.s[3] \n"
"fmla v23.4s, v19.4s, v29.s[3] \n"
"prfm pldl1keep, [%4, #256] \n"
"ld1 {v0.4h, v1.4h, v2.4h, v3.4h}, [%4], #32 \n" // r20 r21 r22 r23
"shll v0.4s, v0.4h, #16 \n"
"shll v1.4s, v1.4h, #16 \n"
"shll v2.4s, v2.4h, #16 \n"
"shll v3.4s, v3.4h, #16 \n"
"fmla v20.4s, v24.4s, v4.s[0] \n"
"fmla v21.4s, v24.4s, v6.s[0] \n"
"fmla v22.4s, v24.4s, v28.s[0] \n"
"fmla v23.4s, v24.4s, v30.s[0] \n"
"fmla v20.4s, v25.4s, v4.s[1] \n"
"fmla v21.4s, v25.4s, v6.s[1] \n"
"fmla v22.4s, v25.4s, v28.s[1] \n"
"fmla v23.4s, v25.4s, v30.s[1] \n"
"prfm pldl1keep, [%7, #256] \n"
"ld1 {v16.4h, v17.4h, v18.4h, v19.4h}, [%7], #32 \n"
"shll v16.4s, v16.4h, #16 \n"
"shll v17.4s, v17.4h, #16 \n"
"shll v18.4s, v18.4h, #16 \n"
"shll v19.4s, v19.4h, #16 \n"
"fmla v20.4s, v26.4s, v4.s[2] \n"
"fmla v21.4s, v26.4s, v6.s[2] \n"
"fmla v22.4s, v26.4s, v28.s[2] \n"
"fmla v23.4s, v26.4s, v30.s[2] \n"
"fmla v20.4s, v27.4s, v4.s[3] \n"
"fmla v21.4s, v27.4s, v6.s[3] \n"
"fmla v22.4s, v27.4s, v28.s[3] \n"
"fmla v23.4s, v27.4s, v30.s[3] \n"
"prfm pldl1keep, [%4, #256] \n"
"ld1 {v4.4h, v5.4h, v6.4h, v7.4h}, [%4], #32 \n" // r24 r25 r26 r27
"shll v4.4s, v4.4h, #16 \n"
"shll v5.4s, v5.4h, #16 \n"
"shll v6.4s, v6.4h, #16 \n"
"shll v7.4s, v7.4h, #16 \n"
"fmla v20.4s, v16.4s, v0.s[0] \n"
"fmla v21.4s, v16.4s, v2.s[0] \n"
"fmla v22.4s, v16.4s, v4.s[0] \n"
"fmla v23.4s, v16.4s, v6.s[0] \n"
"fmla v20.4s, v17.4s, v0.s[1] \n"
"fmla v21.4s, v17.4s, v2.s[1] \n"
"fmla v22.4s, v17.4s, v4.s[1] \n"
"fmla v23.4s, v17.4s, v6.s[1] \n"
"prfm pldl1keep, [%7, #256] \n"
"ld1 {v24.4h, v25.4h, v26.4h, v27.4h}, [%7], #32 \n"
"shll v24.4s, v24.4h, #16 \n"
"shll v25.4s, v25.4h, #16 \n"
"shll v26.4s, v26.4h, #16 \n"
"shll v27.4s, v27.4h, #16 \n"
"fmla v20.4s, v18.4s, v0.s[2] \n"
"fmla v21.4s, v18.4s, v2.s[2] \n"
"fmla v22.4s, v18.4s, v4.s[2] \n"
"fmla v23.4s, v18.4s, v6.s[2] \n"
"fmla v20.4s, v19.4s, v0.s[3] \n"
"fmla v21.4s, v19.4s, v2.s[3] \n"
"fmla v22.4s, v19.4s, v4.s[3] \n"
"fmla v23.4s, v19.4s, v6.s[3] \n"
"prfm pldl1keep, [%4, #192] \n"
"ld1 {v28.4h, v29.4h, v30.4h}, [%4] \n" // r28 r29 r210
"shll v28.4s, v28.4h, #16 \n"
"shll v29.4s, v29.4h, #16 \n"
"shll v30.4s, v30.4h, #16 \n"
"fmla v20.4s, v24.4s, v1.s[0] \n"
"fmla v21.4s, v24.4s, v3.s[0] \n"
"fmla v22.4s, v24.4s, v5.s[0] \n"
"fmla v23.4s, v24.4s, v7.s[0] \n"
"fmla v20.4s, v25.4s, v1.s[1] \n"
"fmla v21.4s, v25.4s, v3.s[1] \n"
"fmla v22.4s, v25.4s, v5.s[1] \n"
"fmla v23.4s, v25.4s, v7.s[1] \n"
"prfm pldl1keep, [%7, #256] \n"
"ld1 {v16.4h, v17.4h, v18.4h, v19.4h}, [%7], #32 \n"
"shll v16.4s, v16.4h, #16 \n"
"shll v17.4s, v17.4h, #16 \n"
"shll v18.4s, v18.4h, #16 \n"
"shll v19.4s, v19.4h, #16 \n"
"fmla v20.4s, v26.4s, v1.s[2] \n"
"fmla v21.4s, v26.4s, v3.s[2] \n"
"fmla v22.4s, v26.4s, v5.s[2] \n"
"fmla v23.4s, v26.4s, v7.s[2] \n"
"fmla v20.4s, v27.4s, v1.s[3] \n"
"fmla v21.4s, v27.4s, v3.s[3] \n"
"fmla v22.4s, v27.4s, v5.s[3] \n"
"fmla v23.4s, v27.4s, v7.s[3] \n"
"fmla v20.4s, v16.4s, v2.s[0] \n"
"fmla v21.4s, v16.4s, v4.s[0] \n"
"fmla v22.4s, v16.4s, v6.s[0] \n"
"fmla v23.4s, v16.4s, v28.s[0] \n"
"fmla v20.4s, v17.4s, v2.s[1] \n"
"fmla v21.4s, v17.4s, v4.s[1] \n"
"fmla v22.4s, v17.4s, v6.s[1] \n"
"fmla v23.4s, v17.4s, v28.s[1] \n"
"prfm pldl1keep, [%7, #256] \n"
"ld1 {v24.4h, v25.4h, v26.4h, v27.4h}, [%7], #32 \n"
"shll v24.4s, v24.4h, #16 \n"
"shll v25.4s, v25.4h, #16 \n"
"shll v26.4s, v26.4h, #16 \n"
"shll v27.4s, v27.4h, #16 \n"
"fmla v20.4s, v18.4s, v2.s[2] \n"
"fmla v21.4s, v18.4s, v4.s[2] \n"
"fmla v22.4s, v18.4s, v6.s[2] \n"
"fmla v23.4s, v18.4s, v28.s[2] \n"
"fmla v20.4s, v19.4s, v2.s[3] \n"
"fmla v21.4s, v19.4s, v4.s[3] \n"
"fmla v22.4s, v19.4s, v6.s[3] \n"
"fmla v23.4s, v19.4s, v28.s[3] \n"
"fmla v20.4s, v24.4s, v3.s[0] \n"
"fmla v21.4s, v24.4s, v5.s[0] \n"
"fmla v22.4s, v24.4s, v7.s[0] \n"
"fmla v23.4s, v24.4s, v29.s[0] \n"
"fmla v20.4s, v25.4s, v3.s[1] \n"
"fmla v21.4s, v25.4s, v5.s[1] \n"
"fmla v22.4s, v25.4s, v7.s[1] \n"
"fmla v23.4s, v25.4s, v29.s[1] \n"
"prfm pldl1keep, [%7, #256] \n"
"ld1 {v16.4h, v17.4h, v18.4h, v19.4h}, [%7], #32 \n"
"shll v16.4s, v16.4h, #16 \n"
"shll v17.4s, v17.4h, #16 \n"
"shll v18.4s, v18.4h, #16 \n"
"shll v19.4s, v19.4h, #16 \n"
"fmla v20.4s, v26.4s, v3.s[2] \n"
"fmla v21.4s, v26.4s, v5.s[2] \n"
"fmla v22.4s, v26.4s, v7.s[2] \n"
"fmla v23.4s, v26.4s, v29.s[2] \n"
"fmla v20.4s, v27.4s, v3.s[3] \n"
"fmla v21.4s, v27.4s, v5.s[3] \n"
"fmla v22.4s, v27.4s, v7.s[3] \n"
"fmla v23.4s, v27.4s, v29.s[3] \n"
"prfm pldl1keep, [%5, #256] \n"
"ld1 {v0.4h, v1.4h, v2.4h, v3.4h}, [%5], #32 \n" // r30 r31 r32 r33
"shll v0.4s, v0.4h, #16 \n"
"shll v1.4s, v1.4h, #16 \n"
"shll v2.4s, v2.4h, #16 \n"
"shll v3.4s, v3.4h, #16 \n"
"fmla v20.4s, v16.4s, v4.s[0] \n"
"fmla v21.4s, v16.4s, v6.s[0] \n"
"fmla v22.4s, v16.4s, v28.s[0] \n"
"fmla v23.4s, v16.4s, v30.s[0] \n"
"fmla v20.4s, v17.4s, v4.s[1] \n"
"fmla v21.4s, v17.4s, v6.s[1] \n"
"fmla v22.4s, v17.4s, v28.s[1] \n"
"fmla v23.4s, v17.4s, v30.s[1] \n"
"prfm pldl1keep, [%7, #256] \n"
"ld1 {v24.4h, v25.4h, v26.4h, v27.4h}, [%7], #32 \n"
"shll v24.4s, v24.4h, #16 \n"
"shll v25.4s, v25.4h, #16 \n"
"shll v26.4s, v26.4h, #16 \n"
"shll v27.4s, v27.4h, #16 \n"
"fmla v20.4s, v18.4s, v4.s[2] \n"
"fmla v21.4s, v18.4s, v6.s[2] \n"
"fmla v22.4s, v18.4s, v28.s[2] \n"
"fmla v23.4s, v18.4s, v30.s[2] \n"
"fmla v20.4s, v19.4s, v4.s[3] \n"
"fmla v21.4s, v19.4s, v6.s[3] \n"
"fmla v22.4s, v19.4s, v28.s[3] \n"
"fmla v23.4s, v19.4s, v30.s[3] \n"
"prfm pldl1keep, [%5, #256] \n"
"ld1 {v4.4h, v5.4h, v6.4h, v7.4h}, [%5], #32 \n" // r34 r35 r36 r37
"shll v4.4s, v4.4h, #16 \n"
"shll v5.4s, v5.4h, #16 \n"
"shll v6.4s, v6.4h, #16 \n"
"shll v7.4s, v7.4h, #16 \n"
"fmla v20.4s, v24.4s, v0.s[0] \n"
"fmla v21.4s, v24.4s, v2.s[0] \n"
"fmla v22.4s, v24.4s, v4.s[0] \n"
"fmla v23.4s, v24.4s, v6.s[0] \n"
"fmla v20.4s, v25.4s, v0.s[1] \n"
"fmla v21.4s, v25.4s, v2.s[1] \n"
"fmla v22.4s, v25.4s, v4.s[1] \n"
"fmla v23.4s, v25.4s, v6.s[1] \n"
"prfm pldl1keep, [%7, #256] \n"
"ld1 {v16.4h, v17.4h, v18.4h, v19.4h}, [%7], #32 \n"
"shll v16.4s, v16.4h, #16 \n"
"shll v17.4s, v17.4h, #16 \n"
"shll v18.4s, v18.4h, #16 \n"
"shll v19.4s, v19.4h, #16 \n"
"fmla v20.4s, v26.4s, v0.s[2] \n"
"fmla v21.4s, v26.4s, v2.s[2] \n"
"fmla v22.4s, v26.4s, v4.s[2] \n"
"fmla v23.4s, v26.4s, v6.s[2] \n"
"fmla v20.4s, v27.4s, v0.s[3] \n"
"fmla v21.4s, v27.4s, v2.s[3] \n"
"fmla v22.4s, v27.4s, v4.s[3] \n"
"fmla v23.4s, v27.4s, v6.s[3] \n"
"prfm pldl1keep, [%5, #192] \n"
"ld1 {v28.4h, v29.4h, v30.4h}, [%5] \n" // r38 r39 r310
"shll v28.4s, v28.4h, #16 \n"
"shll v29.4s, v29.4h, #16 \n"
"shll v30.4s, v30.4h, #16 \n"
"fmla v20.4s, v16.4s, v1.s[0] \n"
"fmla v21.4s, v16.4s, v3.s[0] \n"
"fmla v22.4s, v16.4s, v5.s[0] \n"
"fmla v23.4s, v16.4s, v7.s[0] \n"
"fmla v20.4s, v17.4s, v1.s[1] \n"
"fmla v21.4s, v17.4s, v3.s[1] \n"
"fmla v22.4s, v17.4s, v5.s[1] \n"
"fmla v23.4s, v17.4s, v7.s[1] \n"
"prfm pldl1keep, [%7, #256] \n"
"ld1 {v24.4h, v25.4h, v26.4h, v27.4h}, [%7], #32 \n"
"shll v24.4s, v24.4h, #16 \n"
"shll v25.4s, v25.4h, #16 \n"
"shll v26.4s, v26.4h, #16 \n"
"shll v27.4s, v27.4h, #16 \n"
"fmla v20.4s, v18.4s, v1.s[2] \n"
"fmla v21.4s, v18.4s, v3.s[2] \n"
"fmla v22.4s, v18.4s, v5.s[2] \n"
"fmla v23.4s, v18.4s, v7.s[2] \n"
"fmla v20.4s, v19.4s, v1.s[3] \n"
"fmla v21.4s, v19.4s, v3.s[3] \n"
"fmla v22.4s, v19.4s, v5.s[3] \n"
"fmla v23.4s, v19.4s, v7.s[3] \n"
"fmla v20.4s, v24.4s, v2.s[0] \n"
"fmla v21.4s, v24.4s, v4.s[0] \n"
"fmla v22.4s, v24.4s, v6.s[0] \n"
"fmla v23.4s, v24.4s, v28.s[0] \n"
"fmla v20.4s, v25.4s, v2.s[1] \n"
"fmla v21.4s, v25.4s, v4.s[1] \n"
"fmla v22.4s, v25.4s, v6.s[1] \n"
"fmla v23.4s, v25.4s, v28.s[1] \n"
"prfm pldl1keep, [%7, #256] \n"
"ld1 {v16.4h, v17.4h, v18.4h, v19.4h}, [%7], #32 \n"
"shll v16.4s, v16.4h, #16 \n"
"shll v17.4s, v17.4h, #16 \n"
"shll v18.4s, v18.4h, #16 \n"
"shll v19.4s, v19.4h, #16 \n"
"fmla v20.4s, v26.4s, v2.s[2] \n"
"fmla v21.4s, v26.4s, v4.s[2] \n"
"fmla v22.4s, v26.4s, v6.s[2] \n"
"fmla v23.4s, v26.4s, v28.s[2] \n"
"fmla v20.4s, v27.4s, v2.s[3] \n"
"fmla v21.4s, v27.4s, v4.s[3] \n"
"fmla v22.4s, v27.4s, v6.s[3] \n"
"fmla v23.4s, v27.4s, v28.s[3] \n"
"fmla v20.4s, v16.4s, v3.s[0] \n"
"fmla v21.4s, v16.4s, v5.s[0] \n"
"fmla v22.4s, v16.4s, v7.s[0] \n"
"fmla v23.4s, v16.4s, v29.s[0] \n"
"fmla v20.4s, v17.4s, v3.s[1] \n"
"fmla v21.4s, v17.4s, v5.s[1] \n"
"fmla v22.4s, v17.4s, v7.s[1] \n"
"fmla v23.4s, v17.4s, v29.s[1] \n"
"prfm pldl1keep, [%7, #256] \n"
"ld1 {v24.4h, v25.4h, v26.4h, v27.4h}, [%7], #32 \n"
"shll v24.4s, v24.4h, #16 \n"
"shll v25.4s, v25.4h, #16 \n"
"shll v26.4s, v26.4h, #16 \n"
"shll v27.4s, v27.4h, #16 \n"
"fmla v20.4s, v18.4s, v3.s[2] \n"
"fmla v21.4s, v18.4s, v5.s[2] \n"
"fmla v22.4s, v18.4s, v7.s[2] \n"
"fmla v23.4s, v18.4s, v29.s[2] \n"
"fmla v20.4s, v19.4s, v3.s[3] \n"
"fmla v21.4s, v19.4s, v5.s[3] \n"
"fmla v22.4s, v19.4s, v7.s[3] \n"
"fmla v23.4s, v19.4s, v29.s[3] \n"
"prfm pldl1keep, [%6, #256] \n"
"ld1 {v0.4h, v1.4h, v2.4h, v3.4h}, [%6], #32 \n" // r40 r41 r42 r43
"shll v0.4s, v0.4h, #16 \n"
"shll v1.4s, v1.4h, #16 \n"
"shll v2.4s, v2.4h, #16 \n"
"shll v3.4s, v3.4h, #16 \n"
"fmla v20.4s, v24.4s, v4.s[0] \n"
"fmla v21.4s, v24.4s, v6.s[0] \n"
"fmla v22.4s, v24.4s, v28.s[0] \n"
"fmla v23.4s, v24.4s, v30.s[0] \n"
"fmla v20.4s, v25.4s, v4.s[1] \n"
"fmla v21.4s, v25.4s, v6.s[1] \n"
"fmla v22.4s, v25.4s, v28.s[1] \n"
"fmla v23.4s, v25.4s, v30.s[1] \n"
"prfm pldl1keep, [%7, #256] \n"
"ld1 {v16.4h, v17.4h, v18.4h, v19.4h}, [%7], #32 \n"
"shll v16.4s, v16.4h, #16 \n"
"shll v17.4s, v17.4h, #16 \n"
"shll v18.4s, v18.4h, #16 \n"
"shll v19.4s, v19.4h, #16 \n"
"fmla v20.4s, v26.4s, v4.s[2] \n"
"fmla v21.4s, v26.4s, v6.s[2] \n"
"fmla v22.4s, v26.4s, v28.s[2] \n"
"fmla v23.4s, v26.4s, v30.s[2] \n"
"fmla v20.4s, v27.4s, v4.s[3] \n"
"fmla v21.4s, v27.4s, v6.s[3] \n"
"fmla v22.4s, v27.4s, v28.s[3] \n"
"fmla v23.4s, v27.4s, v30.s[3] \n"
"prfm pldl1keep, [%6, #256] \n"
"ld1 {v4.4h, v5.4h, v6.4h, v7.4h}, [%6], #32 \n" // r44 r45 r46 r47
"shll v4.4s, v4.4h, #16 \n"
"shll v5.4s, v5.4h, #16 \n"
"shll v6.4s, v6.4h, #16 \n"
"shll v7.4s, v7.4h, #16 \n"
"fmla v20.4s, v16.4s, v0.s[0] \n"
"fmla v21.4s, v16.4s, v2.s[0] \n"
"fmla v22.4s, v16.4s, v4.s[0] \n"
"fmla v23.4s, v16.4s, v6.s[0] \n"
"fmla v20.4s, v17.4s, v0.s[1] \n"
"fmla v21.4s, v17.4s, v2.s[1] \n"
"fmla v22.4s, v17.4s, v4.s[1] \n"
"fmla v23.4s, v17.4s, v6.s[1] \n"
"prfm pldl1keep, [%7, #256] \n"
"ld1 {v24.4h, v25.4h, v26.4h, v27.4h}, [%7], #32 \n"
"shll v24.4s, v24.4h, #16 \n"
"shll v25.4s, v25.4h, #16 \n"
"shll v26.4s, v26.4h, #16 \n"
"shll v27.4s, v27.4h, #16 \n"
"fmla v20.4s, v18.4s, v0.s[2] \n"
"fmla v21.4s, v18.4s, v2.s[2] \n"
"fmla v22.4s, v18.4s, v4.s[2] \n"
"fmla v23.4s, v18.4s, v6.s[2] \n"
"fmla v20.4s, v19.4s, v0.s[3] \n"
"fmla v21.4s, v19.4s, v2.s[3] \n"
"fmla v22.4s, v19.4s, v4.s[3] \n"
"fmla v23.4s, v19.4s, v6.s[3] \n"
"prfm pldl1keep, [%6, #192] \n"
"ld1 {v28.4h, v29.4h, v30.4h}, [%6] \n" // r48 r49 r410
"shll v28.4s, v28.4h, #16 \n"
"shll v29.4s, v29.4h, #16 \n"
"shll v30.4s, v30.4h, #16 \n"
"fmla v20.4s, v24.4s, v1.s[0] \n"
"fmla v21.4s, v24.4s, v3.s[0] \n"
"fmla v22.4s, v24.4s, v5.s[0] \n"
"fmla v23.4s, v24.4s, v7.s[0] \n"
"fmla v20.4s, v25.4s, v1.s[1] \n"
"fmla v21.4s, v25.4s, v3.s[1] \n"
"fmla v22.4s, v25.4s, v5.s[1] \n"
"fmla v23.4s, v25.4s, v7.s[1] \n"
"prfm pldl1keep, [%7, #256] \n"
"ld1 {v16.4h, v17.4h, v18.4h, v19.4h}, [%7], #32 \n"
"shll v16.4s, v16.4h, #16 \n"
"shll v17.4s, v17.4h, #16 \n"
"shll v18.4s, v18.4h, #16 \n"
"shll v19.4s, v19.4h, #16 \n"
"fmla v20.4s, v26.4s, v1.s[2] \n"
"fmla v21.4s, v26.4s, v3.s[2] \n"
"fmla v22.4s, v26.4s, v5.s[2] \n"
"fmla v23.4s, v26.4s, v7.s[2] \n"
"fmla v20.4s, v27.4s, v1.s[3] \n"
"fmla v21.4s, v27.4s, v3.s[3] \n"
"fmla v22.4s, v27.4s, v5.s[3] \n"
"fmla v23.4s, v27.4s, v7.s[3] \n"
"fmla v20.4s, v16.4s, v2.s[0] \n"
"fmla v21.4s, v16.4s, v4.s[0] \n"
"fmla v22.4s, v16.4s, v6.s[0] \n"
"fmla v23.4s, v16.4s, v28.s[0] \n"
"fmla v20.4s, v17.4s, v2.s[1] \n"
"fmla v21.4s, v17.4s, v4.s[1] \n"
"fmla v22.4s, v17.4s, v6.s[1] \n"
"fmla v23.4s, v17.4s, v28.s[1] \n"
"prfm pldl1keep, [%7, #256] \n"
"ld1 {v24.4h, v25.4h, v26.4h, v27.4h}, [%7], #32 \n"
"shll v24.4s, v24.4h, #16 \n"
"shll v25.4s, v25.4h, #16 \n"
"shll v26.4s, v26.4h, #16 \n"
"shll v27.4s, v27.4h, #16 \n"
"fmla v20.4s, v18.4s, v2.s[2] \n"
"fmla v21.4s, v18.4s, v4.s[2] \n"
"fmla v22.4s, v18.4s, v6.s[2] \n"
"fmla v23.4s, v18.4s, v28.s[2] \n"
"fmla v20.4s, v19.4s, v2.s[3] \n"
"fmla v21.4s, v19.4s, v4.s[3] \n"
"fmla v22.4s, v19.4s, v6.s[3] \n"
"fmla v23.4s, v19.4s, v28.s[3] \n"
"fmla v20.4s, v24.4s, v3.s[0] \n"
"fmla v21.4s, v24.4s, v5.s[0] \n"
"fmla v22.4s, v24.4s, v7.s[0] \n"
"fmla v23.4s, v24.4s, v29.s[0] \n"
"fmla v20.4s, v25.4s, v3.s[1] \n"
"fmla v21.4s, v25.4s, v5.s[1] \n"
"fmla v22.4s, v25.4s, v7.s[1] \n"
"fmla v23.4s, v25.4s, v29.s[1] \n"
// "prfm pldl1keep, [%7, #256] \n"
"ld1 {v16.4h, v17.4h, v18.4h, v19.4h}, [%7] \n"
"shll v16.4s, v16.4h, #16 \n"
"shll v17.4s, v17.4h, #16 \n"
"shll v18.4s, v18.4h, #16 \n"
"shll v19.4s, v19.4h, #16 \n"
"fmla v20.4s, v26.4s, v3.s[2] \n"
"fmla v21.4s, v26.4s, v5.s[2] \n"
"fmla v22.4s, v26.4s, v7.s[2] \n"
"fmla v23.4s, v26.4s, v29.s[2] \n"
"fmla v20.4s, v27.4s, v3.s[3] \n"
"fmla v21.4s, v27.4s, v5.s[3] \n"
"fmla v22.4s, v27.4s, v7.s[3] \n"
"fmla v23.4s, v27.4s, v29.s[3] \n"
"fmla v20.4s, v16.4s, v4.s[0] \n"
"fmla v21.4s, v16.4s, v6.s[0] \n"
"fmla v22.4s, v16.4s, v28.s[0] \n"
"fmla v23.4s, v16.4s, v30.s[0] \n"
"fmla v20.4s, v17.4s, v4.s[1] \n"
"fmla v21.4s, v17.4s, v6.s[1] \n"
"fmla v22.4s, v17.4s, v28.s[1] \n"
"fmla v23.4s, v17.4s, v30.s[1] \n"
"fmla v20.4s, v18.4s, v4.s[2] \n"
"fmla v21.4s, v18.4s, v6.s[2] \n"
"fmla v22.4s, v18.4s, v28.s[2] \n"
"fmla v23.4s, v18.4s, v30.s[2] \n"
"fmla v20.4s, v19.4s, v4.s[3] \n"
"fmla v21.4s, v19.4s, v6.s[3] \n"
"fmla v22.4s, v19.4s, v28.s[3] \n"
"fmla v23.4s, v19.4s, v30.s[3] \n"
"sub %7, %7, #768 \n" // kptr -= 24 * 16;
"shrn v20.4h, v20.4s, #16 \n"
"shrn v21.4h, v21.4s, #16 \n"
"shrn v22.4h, v22.4s, #16 \n"
"shrn v23.4h, v23.4s, #16 \n"
"st1 {v20.4h, v21.4h, v22.4h, v23.4h}, [%0], #32 \n"
: "=r"(outptr0_bf16), // %0
"=r"(outptr0), // %1
"=r"(r0), // %2
"=r"(r1), // %3
"=r"(r2), // %4
"=r"(r3), // %5
"=r"(r4), // %6
"=r"(kptr) // %7
: "0"(outptr0_bf16),
"1"(outptr0),
"2"(r0),
"3"(r1),
"4"(r2),
"5"(r3),
"6"(r4),
"7"(kptr)
: "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30");
#else // __aarch64__
asm volatile(
"pld [%1, #512] \n"
"vldm %1!, {d24-d31} \n" // sum0 sum1 sum2 sum3
"pld [%2, #256] \n"
"vld1.u16 {d4-d7}, [%2 :64]! \n" // r00 r01 r02 r03
"vshll.u16 q0, d4, #16 \n"
"vshll.u16 q1, d5, #16 \n"
"vshll.u16 q2, d6, #16 \n"
"vshll.u16 q3, d7, #16 \n"
"pld [%7, #256] \n"
"vld1.u16 {d20-d23}, [%7 :128]! \n"
"vshll.u16 q8, d20, #16 \n"
"vshll.u16 q9, d21, #16 \n"
"vshll.u16 q10, d22, #16 \n"
"vshll.u16 q11, d23, #16 \n"
"pld [%2, #256] \n"
"vld1.u16 {d12-d15}, [%2 :64]! \n" // r04 r05 r06 r07
"vshll.u16 q4, d12, #16 \n"
"vshll.u16 q5, d13, #16 \n"
"vshll.u16 q6, d14, #16 \n"
"vshll.u16 q7, d15, #16 \n"
"vmla.f32 q12, q8, d0[0] \n"
"vmla.f32 q13, q8, d4[0] \n"
"vmla.f32 q14, q8, d8[0] \n"
"vmla.f32 q15, q8, d12[0] \n"
"vmla.f32 q12, q9, d0[1] \n"
"vmla.f32 q13, q9, d4[1] \n"
"vmla.f32 q14, q9, d8[1] \n"
"vmla.f32 q15, q9, d12[1] \n"
"vmla.f32 q12, q10, d1[0] \n"
"vmla.f32 q13, q10, d5[0] \n"
"vmla.f32 q14, q10, d9[0] \n"
"vmla.f32 q15, q10, d13[0] \n"
"vmla.f32 q12, q11, d1[1] \n"
"vmla.f32 q13, q11, d5[1] \n"
"vmla.f32 q14, q11, d9[1] \n"
"vmla.f32 q15, q11, d13[1] \n"
"pld [%7, #256] \n"
"vld1.u16 {d16-d19}, [%7 :128]! \n"
"vshll.u16 q10, d16, #16 \n"
"vshll.u16 q11, d17, #16 \n"
"vshll.u16 q8, d18, #16 \n"
"vshll.u16 q9, d19, #16 \n"
"vmla.f32 q12, q10, d2[0] \n"
"vmla.f32 q13, q10, d6[0] \n"
"vmla.f32 q14, q10, d10[0] \n"
"vmla.f32 q15, q10, d14[0] \n"
"vmla.f32 q12, q11, d2[1] \n"
"vmla.f32 q13, q11, d6[1] \n"
"vmla.f32 q14, q11, d10[1] \n"
"vmla.f32 q15, q11, d14[1] \n"
"vmla.f32 q12, q8, d3[0] \n"
"vmla.f32 q13, q8, d7[0] \n"
"vmla.f32 q14, q8, d11[0] \n"
"vmla.f32 q15, q8, d15[0] \n"
"vmla.f32 q12, q9, d3[1] \n"
"vmla.f32 q13, q9, d7[1] \n"
"vmla.f32 q14, q9, d11[1] \n"
"vmla.f32 q15, q9, d15[1] \n"
"pld [%7, #256] \n"
"vld1.u16 {d20-d23}, [%7 :128]! \n"
"vshll.u16 q8, d20, #16 \n"
"vshll.u16 q9, d21, #16 \n"
"vshll.u16 q10, d22, #16 \n"
"vshll.u16 q11, d23, #16 \n"
"pld [%2, #128] \n"
"vld1.u16 {d2-d3}, [%2 :64]! \n" // r08 r09
"vshll.u16 q0, d2, #16 \n"
"vshll.u16 q1, d3, #16 \n"
"vmla.f32 q12, q8, d4[0] \n"
"vmla.f32 q13, q8, d8[0] \n"
"vmla.f32 q14, q8, d12[0] \n"
"vmla.f32 q15, q8, d0[0] \n"
"vmla.f32 q12, q9, d4[1] \n"
"vmla.f32 q13, q9, d8[1] \n"
"vmla.f32 q14, q9, d12[1] \n"
"vmla.f32 q15, q9, d0[1] \n"
"vmla.f32 q12, q10, d5[0] \n"
"vmla.f32 q13, q10, d9[0] \n"
"vmla.f32 q14, q10, d13[0] \n"
"vmla.f32 q15, q10, d1[0] \n"
"vmla.f32 q12, q11, d5[1] \n"
"vmla.f32 q13, q11, d9[1] \n"
"vmla.f32 q14, q11, d13[1] \n"
"vmla.f32 q15, q11, d1[1] \n"
"pld [%7, #256] \n"
"vld1.u16 {d16-d19}, [%7 :128]! \n"
"vshll.u16 q10, d16, #16 \n"
"vshll.u16 q11, d17, #16 \n"
"vshll.u16 q8, d18, #16 \n"
"vshll.u16 q9, d19, #16 \n"
"vmla.f32 q12, q10, d6[0] \n"
"vmla.f32 q13, q10, d10[0] \n"
"vmla.f32 q14, q10, d14[0] \n"
"vmla.f32 q15, q10, d2[0] \n"
"vmla.f32 q12, q11, d6[1] \n"
"vmla.f32 q13, q11, d10[1] \n"
"vmla.f32 q14, q11, d14[1] \n"
"vmla.f32 q15, q11, d2[1] \n"
"vmla.f32 q12, q8, d7[0] \n"
"vmla.f32 q13, q8, d11[0] \n"
"vmla.f32 q14, q8, d15[0] \n"
"vmla.f32 q15, q8, d3[0] \n"
"vmla.f32 q12, q9, d7[1] \n"
"vmla.f32 q13, q9, d11[1] \n"
"vmla.f32 q14, q9, d15[1] \n"
"vmla.f32 q15, q9, d3[1] \n"
"pld [%7, #256] \n"
"vld1.u16 {d20-d23}, [%7 :128]! \n"
"vshll.u16 q8, d20, #16 \n"
"vshll.u16 q9, d21, #16 \n"
"vshll.u16 q10, d22, #16 \n"
"vshll.u16 q11, d23, #16 \n"
"pld [%2, #64] \n"
"vld1.u16 {d5}, [%2 :64] \n" // r010
"vshll.u16 q2, d5, #16 \n"
"vmla.f32 q12, q8, d8[0] \n"
"vmla.f32 q13, q8, d12[0] \n"
"vmla.f32 q14, q8, d0[0] \n"
"vmla.f32 q15, q8, d4[0] \n"
"vmla.f32 q12, q9, d8[1] \n"
"vmla.f32 q13, q9, d12[1] \n"
"vmla.f32 q14, q9, d0[1] \n"
"vmla.f32 q15, q9, d4[1] \n"
"vmla.f32 q12, q10, d9[0] \n"
"vmla.f32 q13, q10, d13[0] \n"
"vmla.f32 q14, q10, d1[0] \n"
"vmla.f32 q15, q10, d5[0] \n"
"vmla.f32 q12, q11, d9[1] \n"
"vmla.f32 q13, q11, d13[1] \n"
"pld [%3, #256] \n"
"vld1.u16 {d12-d15}, [%3 :64]! \n" // r10 r11 r12 r13
"vshll.u16 q4, d12, #16 \n"
"vshll.u16 q5, d13, #16 \n"
"vshll.u16 q6, d14, #16 \n"
"vshll.u16 q7, d15, #16 \n"
"vmla.f32 q14, q11, d1[1] \n"
"vmla.f32 q15, q11, d5[1] \n"
"pld [%7, #256] \n"
"vld1.u16 {d16-d19}, [%7 :128]! \n"
"vshll.u16 q10, d16, #16 \n"
"vshll.u16 q11, d17, #16 \n"
"vshll.u16 q8, d18, #16 \n"
"vshll.u16 q9, d19, #16 \n"
"pld [%3, #256] \n"
"vld1.u16 {d4-d7}, [%3 :64]! \n" // r14 r15 r16 r17
"vshll.u16 q0, d4, #16 \n"
"vshll.u16 q1, d5, #16 \n"
"vshll.u16 q2, d6, #16 \n"
"vshll.u16 q3, d7, #16 \n"
"vmla.f32 q12, q10, d8[0] \n"
"vmla.f32 q13, q10, d12[0] \n"
"vmla.f32 q14, q10, d0[0] \n"
"vmla.f32 q15, q10, d4[0] \n"
"vmla.f32 q12, q11, d8[1] \n"
"vmla.f32 q13, q11, d12[1] \n"
"vmla.f32 q14, q11, d0[1] \n"
"vmla.f32 q15, q11, d4[1] \n"
"vmla.f32 q12, q8, d9[0] \n"
"vmla.f32 q13, q8, d13[0] \n"
"vmla.f32 q14, q8, d1[0] \n"
"vmla.f32 q15, q8, d5[0] \n"
"vmla.f32 q12, q9, d9[1] \n"
"vmla.f32 q13, q9, d13[1] \n"
"vmla.f32 q14, q9, d1[1] \n"
"vmla.f32 q15, q9, d5[1] \n"
"pld [%7, #256] \n"
"vld1.u16 {d20-d23}, [%7 :128]! \n"
"vshll.u16 q8, d20, #16 \n"
"vshll.u16 q9, d21, #16 \n"
"vshll.u16 q10, d22, #16 \n"
"vshll.u16 q11, d23, #16 \n"
"vmla.f32 q12, q8, d10[0] \n"
"vmla.f32 q13, q8, d14[0] \n"
"vmla.f32 q14, q8, d2[0] \n"
"vmla.f32 q15, q8, d6[0] \n"
"vmla.f32 q12, q9, d10[1] \n"
"vmla.f32 q13, q9, d14[1] \n"
"vmla.f32 q14, q9, d2[1] \n"
"vmla.f32 q15, q9, d6[1] \n"
"vmla.f32 q12, q10, d11[0] \n"
"vmla.f32 q13, q10, d15[0] \n"
"vmla.f32 q14, q10, d3[0] \n"
"vmla.f32 q15, q10, d7[0] \n"
"vmla.f32 q12, q11, d11[1] \n"
"vmla.f32 q13, q11, d15[1] \n"
"vmla.f32 q14, q11, d3[1] \n"
"vmla.f32 q15, q11, d7[1] \n"
"pld [%7, #256] \n"
"vld1.u16 {d16-d19}, [%7 :128]! \n"
"vshll.u16 q10, d16, #16 \n"
"vshll.u16 q11, d17, #16 \n"
"vshll.u16 q8, d18, #16 \n"
"vshll.u16 q9, d19, #16 \n"
"pld [%3, #128] \n"
"vld1.u16 {d10-d11}, [%3 :64]! \n" // r18 r19
"vshll.u16 q4, d10, #16 \n"
"vshll.u16 q5, d11, #16 \n"
"vmla.f32 q12, q10, d12[0] \n"
"vmla.f32 q13, q10, d0[0] \n"
"vmla.f32 q14, q10, d4[0] \n"
"vmla.f32 q15, q10, d8[0] \n"
"vmla.f32 q12, q11, d12[1] \n"
"vmla.f32 q13, q11, d0[1] \n"
"vmla.f32 q14, q11, d4[1] \n"
"vmla.f32 q15, q11, d8[1] \n"
"vmla.f32 q12, q8, d13[0] \n"
"vmla.f32 q13, q8, d1[0] \n"
"vmla.f32 q14, q8, d5[0] \n"
"vmla.f32 q15, q8, d9[0] \n"
"vmla.f32 q12, q9, d13[1] \n"
"vmla.f32 q13, q9, d1[1] \n"
"vmla.f32 q14, q9, d5[1] \n"
"vmla.f32 q15, q9, d9[1] \n"
"pld [%7, #256] \n"
"vld1.u16 {d20-d23}, [%7 :128]! \n"
"vshll.u16 q8, d20, #16 \n"
"vshll.u16 q9, d21, #16 \n"
"vshll.u16 q10, d22, #16 \n"
"vshll.u16 q11, d23, #16 \n"
"vmla.f32 q12, q8, d14[0] \n"
"vmla.f32 q13, q8, d2[0] \n"
"vmla.f32 q14, q8, d6[0] \n"
"vmla.f32 q15, q8, d10[0] \n"
"vmla.f32 q12, q9, d14[1] \n"
"vmla.f32 q13, q9, d2[1] \n"
"vmla.f32 q14, q9, d6[1] \n"
"vmla.f32 q15, q9, d10[1] \n"
"vmla.f32 q12, q10, d15[0] \n"
"vmla.f32 q13, q10, d3[0] \n"
"vmla.f32 q14, q10, d7[0] \n"
"vmla.f32 q15, q10, d11[0] \n"
"vmla.f32 q12, q11, d15[1] \n"
"vmla.f32 q13, q11, d3[1] \n"
"vmla.f32 q14, q11, d7[1] \n"
"vmla.f32 q15, q11, d11[1] \n"
"pld [%7, #256] \n"
"vld1.u16 {d16-d19}, [%7 :128]! \n"
"vshll.u16 q10, d16, #16 \n"
"vshll.u16 q11, d17, #16 \n"
"vshll.u16 q8, d18, #16 \n"
"vshll.u16 q9, d19, #16 \n"
"pld [%3, #64] \n"
"vld1.u16 {d13}, [%3 :64] \n" // r110
"vshll.u16 q6, d13, #16 \n"
"vmla.f32 q12, q10, d0[0] \n"
"vmla.f32 q13, q10, d4[0] \n"
"vmla.f32 q14, q10, d8[0] \n"
"vmla.f32 q15, q10, d12[0] \n"
"vmla.f32 q12, q11, d0[1] \n"
"vmla.f32 q13, q11, d4[1] \n"
"vmla.f32 q14, q11, d8[1] \n"
"vmla.f32 q15, q11, d12[1] \n"
"vmla.f32 q12, q8, d1[0] \n"
"vmla.f32 q13, q8, d5[0] \n"
"vmla.f32 q14, q8, d9[0] \n"
"vmla.f32 q15, q8, d13[0] \n"
"vmla.f32 q12, q9, d1[1] \n"
"vmla.f32 q13, q9, d5[1] \n"
"pld [%4, #256] \n"
"vld1.u16 {d4-d7}, [%4 :64]! \n" // r20 r21 r22 r23
"vshll.u16 q0, d4, #16 \n"
"vshll.u16 q1, d5, #16 \n"
"vshll.u16 q2, d6, #16 \n"
"vshll.u16 q3, d7, #16 \n"
"vmla.f32 q14, q9, d9[1] \n"
"vmla.f32 q15, q9, d13[1] \n"
"pld [%7, #256] \n"
"vld1.u16 {d20-d23}, [%7 :128]! \n"
"vshll.u16 q8, d20, #16 \n"
"vshll.u16 q9, d21, #16 \n"
"vshll.u16 q10, d22, #16 \n"
"vshll.u16 q11, d23, #16 \n"
"pld [%4, #256] \n"
"vld1.u16 {d12-d15}, [%4 :64]! \n" // r24 r25 r26 r27
"vshll.u16 q4, d12, #16 \n"
"vshll.u16 q5, d13, #16 \n"
"vshll.u16 q6, d14, #16 \n"
"vshll.u16 q7, d15, #16 \n"
"vmla.f32 q12, q8, d0[0] \n"
"vmla.f32 q13, q8, d4[0] \n"
"vmla.f32 q14, q8, d8[0] \n"
"vmla.f32 q15, q8, d12[0] \n"
"vmla.f32 q12, q9, d0[1] \n"
"vmla.f32 q13, q9, d4[1] \n"
"vmla.f32 q14, q9, d8[1] \n"
"vmla.f32 q15, q9, d12[1] \n"
"vmla.f32 q12, q10, d1[0] \n"
"vmla.f32 q13, q10, d5[0] \n"
"vmla.f32 q14, q10, d9[0] \n"
"vmla.f32 q15, q10, d13[0] \n"
"vmla.f32 q12, q11, d1[1] \n"
"vmla.f32 q13, q11, d5[1] \n"
"vmla.f32 q14, q11, d9[1] \n"
"vmla.f32 q15, q11, d13[1] \n"
"pld [%7, #256] \n"
"vld1.u16 {d16-d19}, [%7 :128]! \n"
"vshll.u16 q10, d16, #16 \n"
"vshll.u16 q11, d17, #16 \n"
"vshll.u16 q8, d18, #16 \n"
"vshll.u16 q9, d19, #16 \n"
"vmla.f32 q12, q10, d2[0] \n"
"vmla.f32 q13, q10, d6[0] \n"
"vmla.f32 q14, q10, d10[0] \n"
"vmla.f32 q15, q10, d14[0] \n"
"vmla.f32 q12, q11, d2[1] \n"
"vmla.f32 q13, q11, d6[1] \n"
"vmla.f32 q14, q11, d10[1] \n"
"vmla.f32 q15, q11, d14[1] \n"
"vmla.f32 q12, q8, d3[0] \n"
"vmla.f32 q13, q8, d7[0] \n"
"vmla.f32 q14, q8, d11[0] \n"
"vmla.f32 q15, q8, d15[0] \n"
"vmla.f32 q12, q9, d3[1] \n"
"vmla.f32 q13, q9, d7[1] \n"
"vmla.f32 q14, q9, d11[1] \n"
"vmla.f32 q15, q9, d15[1] \n"
"pld [%7, #256] \n"
"vld1.u16 {d20-d23}, [%7 :128]! \n"
"vshll.u16 q8, d20, #16 \n"
"vshll.u16 q9, d21, #16 \n"
"vshll.u16 q10, d22, #16 \n"
"vshll.u16 q11, d23, #16 \n"
"pld [%4, #128] \n"
"vld1.u16 {d2-d3}, [%4 :64]! \n" // r28 r29
"vshll.u16 q0, d2, #16 \n"
"vshll.u16 q1, d3, #16 \n"
"vmla.f32 q12, q8, d4[0] \n"
"vmla.f32 q13, q8, d8[0] \n"
"vmla.f32 q14, q8, d12[0] \n"
"vmla.f32 q15, q8, d0[0] \n"
"vmla.f32 q12, q9, d4[1] \n"
"vmla.f32 q13, q9, d8[1] \n"
"vmla.f32 q14, q9, d12[1] \n"
"vmla.f32 q15, q9, d0[1] \n"
"vmla.f32 q12, q10, d5[0] \n"
"vmla.f32 q13, q10, d9[0] \n"
"vmla.f32 q14, q10, d13[0] \n"
"vmla.f32 q15, q10, d1[0] \n"
"vmla.f32 q12, q11, d5[1] \n"
"vmla.f32 q13, q11, d9[1] \n"
"vmla.f32 q14, q11, d13[1] \n"
"vmla.f32 q15, q11, d1[1] \n"
"pld [%7, #256] \n"
"vld1.u16 {d16-d19}, [%7 :128]! \n"
"vshll.u16 q10, d16, #16 \n"
"vshll.u16 q11, d17, #16 \n"
"vshll.u16 q8, d18, #16 \n"
"vshll.u16 q9, d19, #16 \n"
"vmla.f32 q12, q10, d6[0] \n"
"vmla.f32 q13, q10, d10[0] \n"
"vmla.f32 q14, q10, d14[0] \n"
"vmla.f32 q15, q10, d2[0] \n"
"vmla.f32 q12, q11, d6[1] \n"
"vmla.f32 q13, q11, d10[1] \n"
"vmla.f32 q14, q11, d14[1] \n"
"vmla.f32 q15, q11, d2[1] \n"
"vmla.f32 q12, q8, d7[0] \n"
"vmla.f32 q13, q8, d11[0] \n"
"vmla.f32 q14, q8, d15[0] \n"
"vmla.f32 q15, q8, d3[0] \n"
"vmla.f32 q12, q9, d7[1] \n"
"vmla.f32 q13, q9, d11[1] \n"
"vmla.f32 q14, q9, d15[1] \n"
"vmla.f32 q15, q9, d3[1] \n"
"pld [%7, #256] \n"
"vld1.u16 {d20-d23}, [%7 :128]! \n"
"vshll.u16 q8, d20, #16 \n"
"vshll.u16 q9, d21, #16 \n"
"vshll.u16 q10, d22, #16 \n"
"vshll.u16 q11, d23, #16 \n"
"pld [%4, #64] \n"
"vld1.u16 {d5}, [%4 :64] \n" // r210
"vshll.u16 q2, d5, #16 \n"
"vmla.f32 q12, q8, d8[0] \n"
"vmla.f32 q13, q8, d12[0] \n"
"vmla.f32 q14, q8, d0[0] \n"
"vmla.f32 q15, q8, d4[0] \n"
"vmla.f32 q12, q9, d8[1] \n"
"vmla.f32 q13, q9, d12[1] \n"
"vmla.f32 q14, q9, d0[1] \n"
"vmla.f32 q15, q9, d4[1] \n"
"vmla.f32 q12, q10, d9[0] \n"
"vmla.f32 q13, q10, d13[0] \n"
"vmla.f32 q14, q10, d1[0] \n"
"vmla.f32 q15, q10, d5[0] \n"
"vmla.f32 q12, q11, d9[1] \n"
"vmla.f32 q13, q11, d13[1] \n"
"pld [%5, #256] \n"
"vld1.u16 {d12-d15}, [%5 :64]! \n" // r30 r31 r32 r33
"vshll.u16 q4, d12, #16 \n"
"vshll.u16 q5, d13, #16 \n"
"vshll.u16 q6, d14, #16 \n"
"vshll.u16 q7, d15, #16 \n"
"vmla.f32 q14, q11, d1[1] \n"
"vmla.f32 q15, q11, d5[1] \n"
"pld [%7, #256] \n"
"vld1.u16 {d16-d19}, [%7 :128]! \n"
"vshll.u16 q10, d16, #16 \n"
"vshll.u16 q11, d17, #16 \n"
"vshll.u16 q8, d18, #16 \n"
"vshll.u16 q9, d19, #16 \n"
"pld [%5, #256] \n"
"vld1.u16 {d4-d7}, [%5 :64]! \n" // r34 r35 r36 r37
"vshll.u16 q0, d4, #16 \n"
"vshll.u16 q1, d5, #16 \n"
"vshll.u16 q2, d6, #16 \n"
"vshll.u16 q3, d7, #16 \n"
"vmla.f32 q12, q10, d8[0] \n"
"vmla.f32 q13, q10, d12[0] \n"
"vmla.f32 q14, q10, d0[0] \n"
"vmla.f32 q15, q10, d4[0] \n"
"vmla.f32 q12, q11, d8[1] \n"
"vmla.f32 q13, q11, d12[1] \n"
"vmla.f32 q14, q11, d0[1] \n"
"vmla.f32 q15, q11, d4[1] \n"
"vmla.f32 q12, q8, d9[0] \n"
"vmla.f32 q13, q8, d13[0] \n"
"vmla.f32 q14, q8, d1[0] \n"
"vmla.f32 q15, q8, d5[0] \n"
"vmla.f32 q12, q9, d9[1] \n"
"vmla.f32 q13, q9, d13[1] \n"
"vmla.f32 q14, q9, d1[1] \n"
"vmla.f32 q15, q9, d5[1] \n"
"pld [%7, #256] \n"
"vld1.u16 {d20-d23}, [%7 :128]! \n"
"vshll.u16 q8, d20, #16 \n"
"vshll.u16 q9, d21, #16 \n"
"vshll.u16 q10, d22, #16 \n"
"vshll.u16 q11, d23, #16 \n"
"vmla.f32 q12, q8, d10[0] \n"
"vmla.f32 q13, q8, d14[0] \n"
"vmla.f32 q14, q8, d2[0] \n"
"vmla.f32 q15, q8, d6[0] \n"
"vmla.f32 q12, q9, d10[1] \n"
"vmla.f32 q13, q9, d14[1] \n"
"vmla.f32 q14, q9, d2[1] \n"
"vmla.f32 q15, q9, d6[1] \n"
"vmla.f32 q12, q10, d11[0] \n"
"vmla.f32 q13, q10, d15[0] \n"
"vmla.f32 q14, q10, d3[0] \n"
"vmla.f32 q15, q10, d7[0] \n"
"vmla.f32 q12, q11, d11[1] \n"
"vmla.f32 q13, q11, d15[1] \n"
"vmla.f32 q14, q11, d3[1] \n"
"vmla.f32 q15, q11, d7[1] \n"
"pld [%7, #256] \n"
"vld1.u16 {d16-d19}, [%7 :128]! \n"
"vshll.u16 q10, d16, #16 \n"
"vshll.u16 q11, d17, #16 \n"
"vshll.u16 q8, d18, #16 \n"
"vshll.u16 q9, d19, #16 \n"
"pld [%5, #128] \n"
"vld1.u16 {d10-d11}, [%5 :64]! \n" // r38 r39
"vshll.u16 q4, d10, #16 \n"
"vshll.u16 q5, d11, #16 \n"
"vmla.f32 q12, q10, d12[0] \n"
"vmla.f32 q13, q10, d0[0] \n"
"vmla.f32 q14, q10, d4[0] \n"
"vmla.f32 q15, q10, d8[0] \n"
"vmla.f32 q12, q11, d12[1] \n"
"vmla.f32 q13, q11, d0[1] \n"
"vmla.f32 q14, q11, d4[1] \n"
"vmla.f32 q15, q11, d8[1] \n"
"vmla.f32 q12, q8, d13[0] \n"
"vmla.f32 q13, q8, d1[0] \n"
"vmla.f32 q14, q8, d5[0] \n"
"vmla.f32 q15, q8, d9[0] \n"
"vmla.f32 q12, q9, d13[1] \n"
"vmla.f32 q13, q9, d1[1] \n"
"vmla.f32 q14, q9, d5[1] \n"
"vmla.f32 q15, q9, d9[1] \n"
"pld [%7, #256] \n"
"vld1.u16 {d20-d23}, [%7 :128]! \n"
"vshll.u16 q8, d20, #16 \n"
"vshll.u16 q9, d21, #16 \n"
"vshll.u16 q10, d22, #16 \n"
"vshll.u16 q11, d23, #16 \n"
"vmla.f32 q12, q8, d14[0] \n"
"vmla.f32 q13, q8, d2[0] \n"
"vmla.f32 q14, q8, d6[0] \n"
"vmla.f32 q15, q8, d10[0] \n"
"vmla.f32 q12, q9, d14[1] \n"
"vmla.f32 q13, q9, d2[1] \n"
"vmla.f32 q14, q9, d6[1] \n"
"vmla.f32 q15, q9, d10[1] \n"
"vmla.f32 q12, q10, d15[0] \n"
"vmla.f32 q13, q10, d3[0] \n"
"vmla.f32 q14, q10, d7[0] \n"
"vmla.f32 q15, q10, d11[0] \n"
"vmla.f32 q12, q11, d15[1] \n"
"vmla.f32 q13, q11, d3[1] \n"
"vmla.f32 q14, q11, d7[1] \n"
"vmla.f32 q15, q11, d11[1] \n"
"pld [%7, #256] \n"
"vld1.u16 {d16-d19}, [%7 :128]! \n"
"vshll.u16 q10, d16, #16 \n"
"vshll.u16 q11, d17, #16 \n"
"vshll.u16 q8, d18, #16 \n"
"vshll.u16 q9, d19, #16 \n"
"pld [%5, #64] \n"
"vld1.u16 {d13}, [%5 :64] \n" // r310
"vshll.u16 q6, d13, #16 \n"
"vmla.f32 q12, q10, d0[0] \n"
"vmla.f32 q13, q10, d4[0] \n"
"vmla.f32 q14, q10, d8[0] \n"
"vmla.f32 q15, q10, d12[0] \n"
"vmla.f32 q12, q11, d0[1] \n"
"vmla.f32 q13, q11, d4[1] \n"
"vmla.f32 q14, q11, d8[1] \n"
"vmla.f32 q15, q11, d12[1] \n"
"vmla.f32 q12, q8, d1[0] \n"
"vmla.f32 q13, q8, d5[0] \n"
"vmla.f32 q14, q8, d9[0] \n"
"vmla.f32 q15, q8, d13[0] \n"
"vmla.f32 q12, q9, d1[1] \n"
"vmla.f32 q13, q9, d5[1] \n"
"pld [%6, #256] \n"
"vld1.u16 {d4-d7}, [%6 :64]! \n" // r40 r41 r42 r43
"vshll.u16 q0, d4, #16 \n"
"vshll.u16 q1, d5, #16 \n"
"vshll.u16 q2, d6, #16 \n"
"vshll.u16 q3, d7, #16 \n"
"vmla.f32 q14, q9, d9[1] \n"
"vmla.f32 q15, q9, d13[1] \n"
"pld [%7, #256] \n"
"vld1.u16 {d20-d23}, [%7 :128]! \n"
"vshll.u16 q8, d20, #16 \n"
"vshll.u16 q9, d21, #16 \n"
"vshll.u16 q10, d22, #16 \n"
"vshll.u16 q11, d23, #16 \n"
"pld [%6, #256] \n"
"vld1.u16 {d12-d15}, [%6 :64]! \n" // r44 r45 r46 r47
"vshll.u16 q4, d12, #16 \n"
"vshll.u16 q5, d13, #16 \n"
"vshll.u16 q6, d14, #16 \n"
"vshll.u16 q7, d15, #16 \n"
"vmla.f32 q12, q8, d0[0] \n"
"vmla.f32 q13, q8, d4[0] \n"
"vmla.f32 q14, q8, d8[0] \n"
"vmla.f32 q15, q8, d12[0] \n"
"vmla.f32 q12, q9, d0[1] \n"
"vmla.f32 q13, q9, d4[1] \n"
"vmla.f32 q14, q9, d8[1] \n"
"vmla.f32 q15, q9, d12[1] \n"
"vmla.f32 q12, q10, d1[0] \n"
"vmla.f32 q13, q10, d5[0] \n"
"vmla.f32 q14, q10, d9[0] \n"
"vmla.f32 q15, q10, d13[0] \n"
"vmla.f32 q12, q11, d1[1] \n"
"vmla.f32 q13, q11, d5[1] \n"
"vmla.f32 q14, q11, d9[1] \n"
"vmla.f32 q15, q11, d13[1] \n"
"pld [%7, #256] \n"
"vld1.u16 {d16-d19}, [%7 :128]! \n"
"vshll.u16 q10, d16, #16 \n"
"vshll.u16 q11, d17, #16 \n"
"vshll.u16 q8, d18, #16 \n"
"vshll.u16 q9, d19, #16 \n"
"vmla.f32 q12, q10, d2[0] \n"
"vmla.f32 q13, q10, d6[0] \n"
"vmla.f32 q14, q10, d10[0] \n"
"vmla.f32 q15, q10, d14[0] \n"
"vmla.f32 q12, q11, d2[1] \n"
"vmla.f32 q13, q11, d6[1] \n"
"vmla.f32 q14, q11, d10[1] \n"
"vmla.f32 q15, q11, d14[1] \n"
"vmla.f32 q12, q8, d3[0] \n"
"vmla.f32 q13, q8, d7[0] \n"
"vmla.f32 q14, q8, d11[0] \n"
"vmla.f32 q15, q8, d15[0] \n"
"vmla.f32 q12, q9, d3[1] \n"
"vmla.f32 q13, q9, d7[1] \n"
"vmla.f32 q14, q9, d11[1] \n"
"vmla.f32 q15, q9, d15[1] \n"
"pld [%7, #256] \n"
"vld1.u16 {d20-d23}, [%7 :128]! \n"
"vshll.u16 q8, d20, #16 \n"
"vshll.u16 q9, d21, #16 \n"
"vshll.u16 q10, d22, #16 \n"
"vshll.u16 q11, d23, #16 \n"
"pld [%6, #128] \n"
"vld1.u16 {d2-d3}, [%6 :64]! \n" // r48 r49
"vshll.u16 q0, d2, #16 \n"
"vshll.u16 q1, d3, #16 \n"
"vmla.f32 q12, q8, d4[0] \n"
"vmla.f32 q13, q8, d8[0] \n"
"vmla.f32 q14, q8, d12[0] \n"
"vmla.f32 q15, q8, d0[0] \n"
"vmla.f32 q12, q9, d4[1] \n"
"vmla.f32 q13, q9, d8[1] \n"
"vmla.f32 q14, q9, d12[1] \n"
"vmla.f32 q15, q9, d0[1] \n"
"vmla.f32 q12, q10, d5[0] \n"
"vmla.f32 q13, q10, d9[0] \n"
"vmla.f32 q14, q10, d13[0] \n"
"vmla.f32 q15, q10, d1[0] \n"
"vmla.f32 q12, q11, d5[1] \n"
"vmla.f32 q13, q11, d9[1] \n"
"vmla.f32 q14, q11, d13[1] \n"
"vmla.f32 q15, q11, d1[1] \n"
"pld [%7, #256] \n"
"vld1.u16 {d16-d19}, [%7 :128]! \n"
"vshll.u16 q10, d16, #16 \n"
"vshll.u16 q11, d17, #16 \n"
"vshll.u16 q8, d18, #16 \n"
"vshll.u16 q9, d19, #16 \n"
"vmla.f32 q12, q10, d6[0] \n"
"vmla.f32 q13, q10, d10[0] \n"
"vmla.f32 q14, q10, d14[0] \n"
"vmla.f32 q15, q10, d2[0] \n"
"vmla.f32 q12, q11, d6[1] \n"
"vmla.f32 q13, q11, d10[1] \n"
"vmla.f32 q14, q11, d14[1] \n"
"vmla.f32 q15, q11, d2[1] \n"
"vmla.f32 q12, q8, d7[0] \n"
"vmla.f32 q13, q8, d11[0] \n"
"vmla.f32 q14, q8, d15[0] \n"
"vmla.f32 q15, q8, d3[0] \n"
"vmla.f32 q12, q9, d7[1] \n"
"vmla.f32 q13, q9, d11[1] \n"
"vmla.f32 q14, q9, d15[1] \n"
"vmla.f32 q15, q9, d3[1] \n"
// "pld [%7, #256] \n"
"vld1.u16 {d20-d23}, [%7 :128] \n"
"vshll.u16 q8, d20, #16 \n"
"vshll.u16 q9, d21, #16 \n"
"vshll.u16 q10, d22, #16 \n"
"vshll.u16 q11, d23, #16 \n"
"pld [%6, #64] \n"
"vld1.u16 {d5}, [%6 :64] \n" // r410
"vshll.u16 q2, d5, #16 \n"
"vmla.f32 q12, q8, d8[0] \n"
"vmla.f32 q13, q8, d12[0] \n"
"vmla.f32 q14, q8, d0[0] \n"
"vmla.f32 q15, q8, d4[0] \n"
"vmla.f32 q12, q9, d8[1] \n"
"vmla.f32 q13, q9, d12[1] \n"
"vmla.f32 q14, q9, d0[1] \n"
"vmla.f32 q15, q9, d4[1] \n"
"vmla.f32 q12, q10, d9[0] \n"
"vmla.f32 q13, q10, d13[0] \n"
"vmla.f32 q14, q10, d1[0] \n"
"vmla.f32 q15, q10, d5[0] \n"
"vmla.f32 q12, q11, d9[1] \n"
"vmla.f32 q13, q11, d13[1] \n"
"vmla.f32 q14, q11, d1[1] \n"
"vmla.f32 q15, q11, d5[1] \n"
"sub %7, %7, #768 \n" // kptr -= 24 * 16;
"sub %2, %2, #16 \n"
"sub %3, %3, #16 \n"
"sub %4, %4, #16 \n"
"sub %5, %5, #16 \n"
"sub %6, %6, #16 \n"
"vshrn.u32 d24, q12, #16 \n"
"vshrn.u32 d25, q13, #16 \n"
"vshrn.u32 d26, q14, #16 \n"
"vshrn.u32 d27, q15, #16 \n"
"vst1.u16 {d24-d27}, [%0 :64]! \n"
: "=r"(outptr0_bf16), // %0
"=r"(outptr0), // %1
"=r"(r0), // %2
"=r"(r1), // %3
"=r"(r2), // %4
"=r"(r3), // %5
"=r"(r4), // %6
"=r"(kptr) // %7
: "0"(outptr0_bf16),
"1"(outptr0),
"2"(r0),
"3"(r1),
"4"(r2),
"5"(r3),
"6"(r4),
"7"(kptr)
: "memory", "q0", "q1", "q2", "q3", "q4", "q5", "q6", "q7", "q8", "q9", "q10", "q11", "q12", "q13", "q14", "q15");
#endif // __aarch64__
}
for (; j + 1 < outw; j += 2)
{
#if __aarch64__
asm volatile(
"prfm pldl1keep, [%2, #256] \n"
"ld1 {v0.4h, v1.4h, v2.4h, v3.4h}, [%2], #32 \n" // r00 r01 r02 r03
"shll v0.4s, v0.4h, #16 \n"
"shll v1.4s, v1.4h, #16 \n"
"prfm pldl1keep, [%7, #256] \n"
"ld1 {v16.4h, v17.4h, v18.4h, v19.4h}, [%7], #32 \n"
"shll v2.4s, v2.4h, #16 \n"
"shll v3.4s, v3.4h, #16 \n"
"shll v16.4s, v16.4h, #16 \n"
"prfm pldl1keep, [%1, #256] \n"
"ld1 {v20.4s, v21.4s}, [%1], #32 \n" // sum0 sum1
"fmul v22.4s, v16.4s, v0.s[0] \n"
"prfm pldl1keep, [%7, #256] \n"
"ld1 {v24.4h, v25.4h, v26.4h, v27.4h}, [%7], #32 \n"
"shll v17.4s, v17.4h, #16 \n"
"fmul v23.4s, v16.4s, v2.s[0] \n"
"shll v18.4s, v18.4h, #16 \n"
"fmla v20.4s, v17.4s, v0.s[1] \n"
"fmla v21.4s, v17.4s, v2.s[1] \n"
"shll v19.4s, v19.4h, #16 \n"
"fmla v22.4s, v18.4s, v0.s[2] \n"
"fmla v23.4s, v18.4s, v2.s[2] \n"
"shll v24.4s, v24.4h, #16 \n"
"fmla v20.4s, v19.4s, v0.s[3] \n"
"fmla v21.4s, v19.4s, v2.s[3] \n"
"prfm pldl1keep, [%2, #192] \n"
"ld1 {v4.4h, v5.4h, v6.4h}, [%2] \n" // r04 r05 r06
"shll v25.4s, v25.4h, #16 \n"
"shll v4.4s, v4.4h, #16 \n"
"shll v5.4s, v5.4h, #16 \n"
"shll v6.4s, v6.4h, #16 \n"
"fmla v22.4s, v24.4s, v1.s[0] \n"
"prfm pldl1keep, [%7, #256] \n"
"ld1 {v16.4h, v17.4h, v18.4h, v19.4h}, [%7], #32 \n"
"fmla v23.4s, v24.4s, v3.s[0] \n"
"shll v26.4s, v26.4h, #16 \n"
"fmla v20.4s, v25.4s, v1.s[1] \n"
"fmla v21.4s, v25.4s, v3.s[1] \n"
"shll v27.4s, v27.4h, #16 \n"
"fmla v22.4s, v26.4s, v1.s[2] \n"
"fmla v23.4s, v26.4s, v3.s[2] \n"
"shll v16.4s, v16.4h, #16 \n"
"fmla v20.4s, v27.4s, v1.s[3] \n"
"fmla v21.4s, v27.4s, v3.s[3] \n"
"shll v17.4s, v17.4h, #16 \n"
"fmla v22.4s, v16.4s, v2.s[0] \n"
"prfm pldl1keep, [%7, #256] \n"
"ld1 {v24.4h, v25.4h, v26.4h, v27.4h}, [%7], #32 \n"
"fmla v23.4s, v16.4s, v4.s[0] \n"
"shll v18.4s, v18.4h, #16 \n"
"fmla v20.4s, v17.4s, v2.s[1] \n"
"fmla v21.4s, v17.4s, v4.s[1] \n"
"shll v19.4s, v19.4h, #16 \n"
"fmla v22.4s, v18.4s, v2.s[2] \n"
"fmla v23.4s, v18.4s, v4.s[2] \n"
"shll v24.4s, v24.4h, #16 \n"
"fmla v20.4s, v19.4s, v2.s[3] \n"
"fmla v21.4s, v19.4s, v4.s[3] \n"
"shll v25.4s, v25.4h, #16 \n"
"fmla v22.4s, v24.4s, v3.s[0] \n"
"prfm pldl1keep, [%7, #256] \n"
"ld1 {v16.4h, v17.4h, v18.4h, v19.4h}, [%7], #32 \n"
"fmla v23.4s, v24.4s, v5.s[0] \n"
"shll v26.4s, v26.4h, #16 \n"
"fmla v20.4s, v25.4s, v3.s[1] \n"
"fmla v21.4s, v25.4s, v5.s[1] \n"
"shll v27.4s, v27.4h, #16 \n"
"fmla v22.4s, v26.4s, v3.s[2] \n"
"fmla v23.4s, v26.4s, v5.s[2] \n"
"shll v16.4s, v16.4h, #16 \n"
"fmla v20.4s, v27.4s, v3.s[3] \n"
"fmla v21.4s, v27.4s, v5.s[3] \n"
"prfm pldl1keep, [%3, #256] \n"
"ld1 {v0.4h, v1.4h, v2.4h, v3.4h}, [%3], #32 \n" // r10 r11 r12 r13
"shll v17.4s, v17.4h, #16 \n"
"shll v0.4s, v0.4h, #16 \n"
"shll v1.4s, v1.4h, #16 \n"
"shll v2.4s, v2.4h, #16 \n"
"shll v3.4s, v3.4h, #16 \n"
"fmla v22.4s, v16.4s, v4.s[0] \n"
"prfm pldl1keep, [%7, #256] \n"
"ld1 {v24.4h, v25.4h, v26.4h, v27.4h}, [%7], #32 \n"
"fmla v23.4s, v16.4s, v6.s[0] \n"
"shll v18.4s, v18.4h, #16 \n"
"fmla v20.4s, v17.4s, v4.s[1] \n"
"fmla v21.4s, v17.4s, v6.s[1] \n"
"shll v19.4s, v19.4h, #16 \n"
"fmla v22.4s, v18.4s, v4.s[2] \n"
"fmla v23.4s, v18.4s, v6.s[2] \n"
"shll v24.4s, v24.4h, #16 \n"
"fmla v20.4s, v19.4s, v4.s[3] \n"
"fmla v21.4s, v19.4s, v6.s[3] \n"
"shll v25.4s, v25.4h, #16 \n"
"fmla v22.4s, v24.4s, v0.s[0] \n"
"prfm pldl1keep, [%7, #256] \n"
"ld1 {v16.4h, v17.4h, v18.4h, v19.4h}, [%7], #32 \n"
"fmla v23.4s, v24.4s, v2.s[0] \n"
"shll v26.4s, v26.4h, #16 \n"
"fmla v20.4s, v25.4s, v0.s[1] \n"
"fmla v21.4s, v25.4s, v2.s[1] \n"
"shll v27.4s, v27.4h, #16 \n"
"fmla v22.4s, v26.4s, v0.s[2] \n"
"fmla v23.4s, v26.4s, v2.s[2] \n"
"shll v16.4s, v16.4h, #16 \n"
"fmla v20.4s, v27.4s, v0.s[3] \n"
"fmla v21.4s, v27.4s, v2.s[3] \n"
"prfm pldl1keep, [%3, #192] \n"
"ld1 {v4.4h, v5.4h, v6.4h}, [%3] \n" // r14 r15 r16
"shll v17.4s, v17.4h, #16 \n"
"shll v4.4s, v4.4h, #16 \n"
"shll v5.4s, v5.4h, #16 \n"
"shll v6.4s, v6.4h, #16 \n"
"fmla v22.4s, v16.4s, v1.s[0] \n"
"prfm pldl1keep, [%7, #256] \n"
"ld1 {v24.4h, v25.4h, v26.4h, v27.4h}, [%7], #32 \n"
"fmla v23.4s, v16.4s, v3.s[0] \n"
"shll v18.4s, v18.4h, #16 \n"
"fmla v20.4s, v17.4s, v1.s[1] \n"
"fmla v21.4s, v17.4s, v3.s[1] \n"
"shll v19.4s, v19.4h, #16 \n"
"fmla v22.4s, v18.4s, v1.s[2] \n"
"fmla v23.4s, v18.4s, v3.s[2] \n"
"shll v24.4s, v24.4h, #16 \n"
"fmla v20.4s, v19.4s, v1.s[3] \n"
"fmla v21.4s, v19.4s, v3.s[3] \n"
"shll v25.4s, v25.4h, #16 \n"
"fmla v22.4s, v24.4s, v2.s[0] \n"
"prfm pldl1keep, [%7, #256] \n"
"ld1 {v16.4h, v17.4h, v18.4h, v19.4h}, [%7], #32 \n"
"fmla v23.4s, v24.4s, v4.s[0] \n"
"shll v26.4s, v26.4h, #16 \n"
"fmla v20.4s, v25.4s, v2.s[1] \n"
"fmla v21.4s, v25.4s, v4.s[1] \n"
"shll v27.4s, v27.4h, #16 \n"
"fmla v22.4s, v26.4s, v2.s[2] \n"
"fmla v23.4s, v26.4s, v4.s[2] \n"
"shll v16.4s, v16.4h, #16 \n"
"fmla v20.4s, v27.4s, v2.s[3] \n"
"fmla v21.4s, v27.4s, v4.s[3] \n"
"shll v17.4s, v17.4h, #16 \n"
"fmla v22.4s, v16.4s, v3.s[0] \n"
"prfm pldl1keep, [%7, #256] \n"
"ld1 {v24.4h, v25.4h, v26.4h, v27.4h}, [%7], #32 \n"
"fmla v23.4s, v16.4s, v5.s[0] \n"
"shll v18.4s, v18.4h, #16 \n"
"fmla v20.4s, v17.4s, v3.s[1] \n"
"fmla v21.4s, v17.4s, v5.s[1] \n"
"shll v19.4s, v19.4h, #16 \n"
"fmla v22.4s, v18.4s, v3.s[2] \n"
"fmla v23.4s, v18.4s, v5.s[2] \n"
"shll v24.4s, v24.4h, #16 \n"
"fmla v20.4s, v19.4s, v3.s[3] \n"
"fmla v21.4s, v19.4s, v5.s[3] \n"
"prfm pldl1keep, [%4, #256] \n"
"ld1 {v0.4h, v1.4h, v2.4h, v3.4h}, [%4], #32 \n" // r20 r21 r22 r23
"shll v25.4s, v25.4h, #16 \n"
"shll v0.4s, v0.4h, #16 \n"
"shll v1.4s, v1.4h, #16 \n"
"shll v2.4s, v2.4h, #16 \n"
"shll v3.4s, v3.4h, #16 \n"
"fmla v22.4s, v24.4s, v4.s[0] \n"
"prfm pldl1keep, [%7, #256] \n"
"ld1 {v16.4h, v17.4h, v18.4h, v19.4h}, [%7], #32 \n"
"fmla v23.4s, v24.4s, v6.s[0] \n"
"shll v26.4s, v26.4h, #16 \n"
"fmla v20.4s, v25.4s, v4.s[1] \n"
"fmla v21.4s, v25.4s, v6.s[1] \n"
"shll v27.4s, v27.4h, #16 \n"
"fmla v22.4s, v26.4s, v4.s[2] \n"
"fmla v23.4s, v26.4s, v6.s[2] \n"
"shll v16.4s, v16.4h, #16 \n"
"fmla v20.4s, v27.4s, v4.s[3] \n"
"fmla v21.4s, v27.4s, v6.s[3] \n"
"shll v17.4s, v17.4h, #16 \n"
"fmla v22.4s, v16.4s, v0.s[0] \n"
"prfm pldl1keep, [%7, #256] \n"
"ld1 {v24.4h, v25.4h, v26.4h, v27.4h}, [%7], #32 \n"
"fmla v23.4s, v16.4s, v2.s[0] \n"
"shll v18.4s, v18.4h, #16 \n"
"fmla v20.4s, v17.4s, v0.s[1] \n"
"fmla v21.4s, v17.4s, v2.s[1] \n"
"shll v19.4s, v19.4h, #16 \n"
"fmla v22.4s, v18.4s, v0.s[2] \n"
"fmla v23.4s, v18.4s, v2.s[2] \n"
"shll v24.4s, v24.4h, #16 \n"
"fmla v20.4s, v19.4s, v0.s[3] \n"
"fmla v21.4s, v19.4s, v2.s[3] \n"
"prfm pldl1keep, [%4, #192] \n"
"ld1 {v4.4h, v5.4h, v6.4h}, [%4] \n" // r24 r25 r26
"shll v25.4s, v25.4h, #16 \n"
"shll v4.4s, v4.4h, #16 \n"
"shll v5.4s, v5.4h, #16 \n"
"shll v6.4s, v6.4h, #16 \n"
"fmla v22.4s, v24.4s, v1.s[0] \n"
"prfm pldl1keep, [%7, #256] \n"
"ld1 {v16.4h, v17.4h, v18.4h, v19.4h}, [%7], #32 \n"
"fmla v23.4s, v24.4s, v3.s[0] \n"
"shll v26.4s, v26.4h, #16 \n"
"fmla v20.4s, v25.4s, v1.s[1] \n"
"fmla v21.4s, v25.4s, v3.s[1] \n"
"shll v27.4s, v27.4h, #16 \n"
"fmla v22.4s, v26.4s, v1.s[2] \n"
"fmla v23.4s, v26.4s, v3.s[2] \n"
"shll v16.4s, v16.4h, #16 \n"
"fmla v20.4s, v27.4s, v1.s[3] \n"
"fmla v21.4s, v27.4s, v3.s[3] \n"
"shll v17.4s, v17.4h, #16 \n"
"fmla v22.4s, v16.4s, v2.s[0] \n"
"prfm pldl1keep, [%7, #256] \n"
"ld1 {v24.4h, v25.4h, v26.4h, v27.4h}, [%7], #32 \n"
"fmla v23.4s, v16.4s, v4.s[0] \n"
"shll v18.4s, v18.4h, #16 \n"
"fmla v20.4s, v17.4s, v2.s[1] \n"
"fmla v21.4s, v17.4s, v4.s[1] \n"
"shll v19.4s, v19.4h, #16 \n"
"fmla v22.4s, v18.4s, v2.s[2] \n"
"fmla v23.4s, v18.4s, v4.s[2] \n"
"shll v24.4s, v24.4h, #16 \n"
"fmla v20.4s, v19.4s, v2.s[3] \n"
"fmla v21.4s, v19.4s, v4.s[3] \n"
"shll v25.4s, v25.4h, #16 \n"
"fmla v22.4s, v24.4s, v3.s[0] \n"
"prfm pldl1keep, [%7, #256] \n"
"ld1 {v16.4h, v17.4h, v18.4h, v19.4h}, [%7], #32 \n"
"fmla v23.4s, v24.4s, v5.s[0] \n"
"shll v26.4s, v26.4h, #16 \n"
"fmla v20.4s, v25.4s, v3.s[1] \n"
"fmla v21.4s, v25.4s, v5.s[1] \n"
"shll v27.4s, v27.4h, #16 \n"
"fmla v22.4s, v26.4s, v3.s[2] \n"
"fmla v23.4s, v26.4s, v5.s[2] \n"
"shll v16.4s, v16.4h, #16 \n"
"fmla v20.4s, v27.4s, v3.s[3] \n"
"fmla v21.4s, v27.4s, v5.s[3] \n"
"prfm pldl1keep, [%5, #256] \n"
"ld1 {v0.4h, v1.4h, v2.4h, v3.4h}, [%5], #32 \n" // r30 r31 r32 r33
"shll v17.4s, v17.4h, #16 \n"
"shll v0.4s, v0.4h, #16 \n"
"shll v1.4s, v1.4h, #16 \n"
"shll v2.4s, v2.4h, #16 \n"
"shll v3.4s, v3.4h, #16 \n"
"fmla v22.4s, v16.4s, v4.s[0] \n"
"prfm pldl1keep, [%7, #256] \n"
"ld1 {v24.4h, v25.4h, v26.4h, v27.4h}, [%7], #32 \n"
"fmla v23.4s, v16.4s, v6.s[0] \n"
"shll v18.4s, v18.4h, #16 \n"
"fmla v20.4s, v17.4s, v4.s[1] \n"
"fmla v21.4s, v17.4s, v6.s[1] \n"
"shll v19.4s, v19.4h, #16 \n"
"fmla v22.4s, v18.4s, v4.s[2] \n"
"fmla v23.4s, v18.4s, v6.s[2] \n"
"shll v24.4s, v24.4h, #16 \n"
"fmla v20.4s, v19.4s, v4.s[3] \n"
"fmla v21.4s, v19.4s, v6.s[3] \n"
"shll v25.4s, v25.4h, #16 \n"
"fmla v22.4s, v24.4s, v0.s[0] \n"
"prfm pldl1keep, [%7, #256] \n"
"ld1 {v16.4h, v17.4h, v18.4h, v19.4h}, [%7], #32 \n"
"fmla v23.4s, v24.4s, v2.s[0] \n"
"shll v26.4s, v26.4h, #16 \n"
"fmla v20.4s, v25.4s, v0.s[1] \n"
"fmla v21.4s, v25.4s, v2.s[1] \n"
"shll v27.4s, v27.4h, #16 \n"
"fmla v22.4s, v26.4s, v0.s[2] \n"
"fmla v23.4s, v26.4s, v2.s[2] \n"
"shll v16.4s, v16.4h, #16 \n"
"fmla v20.4s, v27.4s, v0.s[3] \n"
"fmla v21.4s, v27.4s, v2.s[3] \n"
"prfm pldl1keep, [%5, #192] \n"
"ld1 {v4.4h, v5.4h, v6.4h}, [%5] \n" // r34 r35 r36
"shll v17.4s, v17.4h, #16 \n"
"shll v4.4s, v4.4h, #16 \n"
"shll v5.4s, v5.4h, #16 \n"
"shll v6.4s, v6.4h, #16 \n"
"fmla v22.4s, v16.4s, v1.s[0] \n"
"prfm pldl1keep, [%7, #256] \n"
"ld1 {v24.4h, v25.4h, v26.4h, v27.4h}, [%7], #32 \n"
"fmla v23.4s, v16.4s, v3.s[0] \n"
"shll v18.4s, v18.4h, #16 \n"
"fmla v20.4s, v17.4s, v1.s[1] \n"
"fmla v21.4s, v17.4s, v3.s[1] \n"
"shll v19.4s, v19.4h, #16 \n"
"fmla v22.4s, v18.4s, v1.s[2] \n"
"fmla v23.4s, v18.4s, v3.s[2] \n"
"shll v24.4s, v24.4h, #16 \n"
"fmla v20.4s, v19.4s, v1.s[3] \n"
"fmla v21.4s, v19.4s, v3.s[3] \n"
"shll v25.4s, v25.4h, #16 \n"
"fmla v22.4s, v24.4s, v2.s[0] \n"
"prfm pldl1keep, [%7, #256] \n"
"ld1 {v16.4h, v17.4h, v18.4h, v19.4h}, [%7], #32 \n"
"fmla v23.4s, v24.4s, v4.s[0] \n"
"shll v26.4s, v26.4h, #16 \n"
"fmla v20.4s, v25.4s, v2.s[1] \n"
"fmla v21.4s, v25.4s, v4.s[1] \n"
"shll v27.4s, v27.4h, #16 \n"
"fmla v22.4s, v26.4s, v2.s[2] \n"
"fmla v23.4s, v26.4s, v4.s[2] \n"
"shll v16.4s, v16.4h, #16 \n"
"fmla v20.4s, v27.4s, v2.s[3] \n"
"fmla v21.4s, v27.4s, v4.s[3] \n"
"shll v17.4s, v17.4h, #16 \n"
"fmla v22.4s, v16.4s, v3.s[0] \n"
"prfm pldl1keep, [%7, #256] \n"
"ld1 {v24.4h, v25.4h, v26.4h, v27.4h}, [%7], #32 \n"
"fmla v23.4s, v16.4s, v5.s[0] \n"
"shll v18.4s, v18.4h, #16 \n"
"fmla v20.4s, v17.4s, v3.s[1] \n"
"fmla v21.4s, v17.4s, v5.s[1] \n"
"shll v19.4s, v19.4h, #16 \n"
"fmla v22.4s, v18.4s, v3.s[2] \n"
"fmla v23.4s, v18.4s, v5.s[2] \n"
"shll v24.4s, v24.4h, #16 \n"
"fmla v20.4s, v19.4s, v3.s[3] \n"
"fmla v21.4s, v19.4s, v5.s[3] \n"
"prfm pldl1keep, [%6, #256] \n"
"ld1 {v0.4h, v1.4h, v2.4h, v3.4h}, [%6], #32 \n" // r40 r41 r42 r43
"shll v25.4s, v25.4h, #16 \n"
"shll v0.4s, v0.4h, #16 \n"
"shll v1.4s, v1.4h, #16 \n"
"shll v2.4s, v2.4h, #16 \n"
"shll v3.4s, v3.4h, #16 \n"
"fmla v22.4s, v24.4s, v4.s[0] \n"
"prfm pldl1keep, [%7, #256] \n"
"ld1 {v16.4h, v17.4h, v18.4h, v19.4h}, [%7], #32 \n"
"fmla v23.4s, v24.4s, v6.s[0] \n"
"shll v26.4s, v26.4h, #16 \n"
"fmla v20.4s, v25.4s, v4.s[1] \n"
"fmla v21.4s, v25.4s, v6.s[1] \n"
"shll v27.4s, v27.4h, #16 \n"
"fmla v22.4s, v26.4s, v4.s[2] \n"
"fmla v23.4s, v26.4s, v6.s[2] \n"
"shll v16.4s, v16.4h, #16 \n"
"fmla v20.4s, v27.4s, v4.s[3] \n"
"fmla v21.4s, v27.4s, v6.s[3] \n"
"shll v17.4s, v17.4h, #16 \n"
"fmla v22.4s, v16.4s, v0.s[0] \n"
"prfm pldl1keep, [%7, #256] \n"
"ld1 {v24.4h, v25.4h, v26.4h, v27.4h}, [%7], #32 \n"
"fmla v23.4s, v16.4s, v2.s[0] \n"
"shll v18.4s, v18.4h, #16 \n"
"fmla v20.4s, v17.4s, v0.s[1] \n"
"fmla v21.4s, v17.4s, v2.s[1] \n"
"shll v19.4s, v19.4h, #16 \n"
"fmla v22.4s, v18.4s, v0.s[2] \n"
"fmla v23.4s, v18.4s, v2.s[2] \n"
"shll v24.4s, v24.4h, #16 \n"
"fmla v20.4s, v19.4s, v0.s[3] \n"
"fmla v21.4s, v19.4s, v2.s[3] \n"
"prfm pldl1keep, [%6, #192] \n"
"ld1 {v4.4h, v5.4h, v6.4h}, [%6] \n" // r44 r45 r46
"shll v25.4s, v25.4h, #16 \n"
"shll v4.4s, v4.4h, #16 \n"
"shll v5.4s, v5.4h, #16 \n"
"shll v6.4s, v6.4h, #16 \n"
"fmla v22.4s, v24.4s, v1.s[0] \n"
"prfm pldl1keep, [%7, #256] \n"
"ld1 {v16.4h, v17.4h, v18.4h, v19.4h}, [%7], #32 \n"
"fmla v23.4s, v24.4s, v3.s[0] \n"
"shll v26.4s, v26.4h, #16 \n"
"fmla v20.4s, v25.4s, v1.s[1] \n"
"fmla v21.4s, v25.4s, v3.s[1] \n"
"shll v27.4s, v27.4h, #16 \n"
"fmla v22.4s, v26.4s, v1.s[2] \n"
"fmla v23.4s, v26.4s, v3.s[2] \n"
"shll v16.4s, v16.4h, #16 \n"
"fmla v20.4s, v27.4s, v1.s[3] \n"
"fmla v21.4s, v27.4s, v3.s[3] \n"
"shll v17.4s, v17.4h, #16 \n"
"fmla v22.4s, v16.4s, v2.s[0] \n"
"prfm pldl1keep, [%7, #256] \n"
"ld1 {v24.4h, v25.4h, v26.4h, v27.4h}, [%7], #32 \n"
"fmla v23.4s, v16.4s, v4.s[0] \n"
"shll v18.4s, v18.4h, #16 \n"
"fmla v20.4s, v17.4s, v2.s[1] \n"
"fmla v21.4s, v17.4s, v4.s[1] \n"
"shll v19.4s, v19.4h, #16 \n"
"fmla v22.4s, v18.4s, v2.s[2] \n"
"fmla v23.4s, v18.4s, v4.s[2] \n"
"shll v24.4s, v24.4h, #16 \n"
"fmla v20.4s, v19.4s, v2.s[3] \n"
"fmla v21.4s, v19.4s, v4.s[3] \n"
"shll v25.4s, v25.4h, #16 \n"
"fmla v22.4s, v24.4s, v3.s[0] \n"
// "prfm pldl1keep, [%7, #256] \n"
"ld1 {v16.4h, v17.4h, v18.4h, v19.4h}, [%7] \n"
"fmla v23.4s, v24.4s, v5.s[0] \n"
"shll v26.4s, v26.4h, #16 \n"
"fmla v20.4s, v25.4s, v3.s[1] \n"
"fmla v21.4s, v25.4s, v5.s[1] \n"
"shll v27.4s, v27.4h, #16 \n"
"fmla v22.4s, v26.4s, v3.s[2] \n"
"fmla v23.4s, v26.4s, v5.s[2] \n"
"shll v16.4s, v16.4h, #16 \n"
"fmla v20.4s, v27.4s, v3.s[3] \n"
"fmla v21.4s, v27.4s, v5.s[3] \n"
"shll v17.4s, v17.4h, #16 \n"
"fmla v22.4s, v16.4s, v4.s[0] \n"
"fmla v23.4s, v16.4s, v6.s[0] \n"
"shll v18.4s, v18.4h, #16 \n"
"fmla v20.4s, v17.4s, v4.s[1] \n"
"fmla v21.4s, v17.4s, v6.s[1] \n"
"shll v19.4s, v19.4h, #16 \n"
"fmla v22.4s, v18.4s, v4.s[2] \n"
"fmla v23.4s, v18.4s, v6.s[2] \n"
"fmla v20.4s, v19.4s, v4.s[3] \n"
"fmla v21.4s, v19.4s, v6.s[3] \n"
"fadd v20.4s, v20.4s, v22.4s \n"
"fadd v21.4s, v21.4s, v23.4s \n"
"sub %7, %7, #768 \n" // kptr -= 24 * 16;
"shrn v20.4h, v20.4s, #16 \n"
"shrn v21.4h, v21.4s, #16 \n"
"st1 {v20.4h, v21.4h}, [%0], #16 \n"
: "=r"(outptr0_bf16), // %0
"=r"(outptr0), // %1
"=r"(r0), // %2
"=r"(r1), // %3
"=r"(r2), // %4
"=r"(r3), // %5
"=r"(r4), // %6
"=r"(kptr) // %7
: "0"(outptr0_bf16),
"1"(outptr0),
"2"(r0),
"3"(r1),
"4"(r2),
"5"(r3),
"6"(r4),
"7"(kptr)
: "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27");
#else // __aarch64__
asm volatile(
"pld [%2, #256] \n"
"vld1.u16 {d4-d7}, [%2 :64]! \n" // r00 r01 r02 r03
"vshll.u16 q0, d4, #16 \n"
"vshll.u16 q1, d5, #16 \n"
"pld [%7, #256] \n"
"vld1.u16 {d20-d23}, [%7 :128]! \n"
"vshll.u16 q2, d6, #16 \n"
"vshll.u16 q3, d7, #16 \n"
"vshll.u16 q8, d20, #16 \n"
"vshll.u16 q9, d21, #16 \n"
"pld [%1, #256] \n"
"vld1.f32 {d24-d27}, [%1 :128]! \n" // sum0 sum1
"vmul.f32 q14, q8, d0[0] \n"
"vmul.f32 q15, q8, d4[0] \n"
"vshll.u16 q10, d22, #16 \n"
"vmla.f32 q12, q9, d0[1] \n"
"vmla.f32 q13, q9, d4[1] \n"
"pld [%7, #256] \n"
"vld1.u16 {d16-d19}, [%7 :128]! \n"
"vmla.f32 q14, q10, d1[0] \n"
"vshll.u16 q11, d23, #16 \n"
"vmla.f32 q15, q10, d5[0] \n"
"vshll.u16 q10, d16, #16 \n"
"vmla.f32 q12, q11, d1[1] \n"
"vmla.f32 q13, q11, d5[1] \n"
"pld [%2, #192] \n"
"vld1.u16 {d10-d12}, [%2 :64] \n" // r04 r05 r06
"vshll.u16 q11, d17, #16 \n"
"vshll.u16 q4, d10, #16 \n"
"vshll.u16 q5, d11, #16 \n"
"vshll.u16 q6, d12, #16 \n"
"vmla.f32 q14, q10, d2[0] \n"
"vmla.f32 q15, q10, d6[0] \n"
"vshll.u16 q8, d18, #16 \n"
"vmla.f32 q12, q11, d2[1] \n"
"vmla.f32 q13, q11, d6[1] \n"
"pld [%7, #256] \n"
"vld1.u16 {d20-d23}, [%7 :128]! \n"
"vmla.f32 q14, q8, d3[0] \n"
"vshll.u16 q9, d19, #16 \n"
"vmla.f32 q15, q8, d7[0] \n"
"vshll.u16 q8, d20, #16 \n"
"vmla.f32 q12, q9, d3[1] \n"
"vmla.f32 q13, q9, d7[1] \n"
"vshll.u16 q9, d21, #16 \n"
"vmla.f32 q14, q8, d4[0] \n"
"vmla.f32 q15, q8, d8[0] \n"
"vshll.u16 q10, d22, #16 \n"
"vmla.f32 q12, q9, d4[1] \n"
"vmla.f32 q13, q9, d8[1] \n"
"pld [%7, #256] \n"
"vld1.u16 {d16-d19}, [%7 :128]! \n"
"vmla.f32 q14, q10, d5[0] \n"
"vshll.u16 q11, d23, #16 \n"
"vmla.f32 q15, q10, d9[0] \n"
"vshll.u16 q10, d16, #16 \n"
"vmla.f32 q12, q11, d5[1] \n"
"vmla.f32 q13, q11, d9[1] \n"
"vshll.u16 q11, d17, #16 \n"
"vmla.f32 q14, q10, d6[0] \n"
"vmla.f32 q15, q10, d10[0] \n"
"vshll.u16 q8, d18, #16 \n"
"vmla.f32 q12, q11, d6[1] \n"
"vmla.f32 q13, q11, d10[1] \n"
"pld [%7, #256] \n"
"vld1.u16 {d20-d23}, [%7 :128]! \n"
"vmla.f32 q14, q8, d7[0] \n"
"vshll.u16 q9, d19, #16 \n"
"vmla.f32 q15, q8, d11[0] \n"
"vshll.u16 q8, d20, #16 \n"
"vmla.f32 q12, q9, d7[1] \n"
"vmla.f32 q13, q9, d11[1] \n"
"pld [%3, #256] \n"
"vld1.u16 {d4-d7}, [%3 :64]! \n" // r10 r11 r12 r13
"vshll.u16 q9, d21, #16 \n"
"vshll.u16 q0, d4, #16 \n"
"vshll.u16 q1, d5, #16 \n"
"vshll.u16 q2, d6, #16 \n"
"vshll.u16 q3, d7, #16 \n"
"vmla.f32 q14, q8, d8[0] \n"
"vmla.f32 q15, q8, d12[0] \n"
"vshll.u16 q10, d22, #16 \n"
"vmla.f32 q12, q9, d8[1] \n"
"vmla.f32 q13, q9, d12[1] \n"
"pld [%7, #256] \n"
"vld1.u16 {d16-d19}, [%7 :128]! \n"
"vmla.f32 q14, q10, d9[0] \n"
"vshll.u16 q11, d23, #16 \n"
"vmla.f32 q15, q10, d13[0] \n"
"vshll.u16 q10, d16, #16 \n"
"vmla.f32 q12, q11, d9[1] \n"
"vmla.f32 q13, q11, d13[1] \n"
"vshll.u16 q11, d17, #16 \n"
"vmla.f32 q14, q10, d0[0] \n"
"vmla.f32 q15, q10, d4[0] \n"
"vshll.u16 q8, d18, #16 \n"
"vmla.f32 q12, q11, d0[1] \n"
"vmla.f32 q13, q11, d4[1] \n"
"pld [%7, #256] \n"
"vld1.u16 {d20-d23}, [%7 :128]! \n"
"vmla.f32 q14, q8, d1[0] \n"
"vshll.u16 q9, d19, #16 \n"
"vmla.f32 q15, q8, d5[0] \n"
"vshll.u16 q8, d20, #16 \n"
"vmla.f32 q12, q9, d1[1] \n"
"vmla.f32 q13, q9, d5[1] \n"
"pld [%3, #192] \n"
"vld1.u16 {d10-d12}, [%3 :64] \n" // r14 r15 r16
"vshll.u16 q9, d21, #16 \n"
"vshll.u16 q4, d10, #16 \n"
"vshll.u16 q5, d11, #16 \n"
"vshll.u16 q6, d12, #16 \n"
"vmla.f32 q14, q8, d2[0] \n"
"vmla.f32 q15, q8, d6[0] \n"
"vshll.u16 q10, d22, #16 \n"
"vmla.f32 q12, q9, d2[1] \n"
"vmla.f32 q13, q9, d6[1] \n"
"pld [%7, #256] \n"
"vld1.u16 {d16-d19}, [%7 :128]! \n"
"vmla.f32 q14, q10, d3[0] \n"
"vshll.u16 q11, d23, #16 \n"
"vmla.f32 q15, q10, d7[0] \n"
"vshll.u16 q10, d16, #16 \n"
"vmla.f32 q12, q11, d3[1] \n"
"vmla.f32 q13, q11, d7[1] \n"
"vshll.u16 q11, d17, #16 \n"
"vmla.f32 q14, q10, d4[0] \n"
"vmla.f32 q15, q10, d8[0] \n"
"vshll.u16 q8, d18, #16 \n"
"vmla.f32 q12, q11, d4[1] \n"
"vmla.f32 q13, q11, d8[1] \n"
"pld [%7, #256] \n"
"vld1.u16 {d20-d23}, [%7 :128]! \n"
"vmla.f32 q14, q8, d5[0] \n"
"vshll.u16 q9, d19, #16 \n"
"vmla.f32 q15, q8, d9[0] \n"
"vshll.u16 q8, d20, #16 \n"
"vmla.f32 q12, q9, d5[1] \n"
"vmla.f32 q13, q9, d9[1] \n"
"vshll.u16 q9, d21, #16 \n"
"vmla.f32 q14, q8, d6[0] \n"
"vmla.f32 q15, q8, d10[0] \n"
"vshll.u16 q10, d22, #16 \n"
"vmla.f32 q12, q9, d6[1] \n"
"vmla.f32 q13, q9, d10[1] \n"
"pld [%7, #256] \n"
"vld1.u16 {d16-d19}, [%7 :128]! \n"
"vmla.f32 q14, q10, d7[0] \n"
"vshll.u16 q11, d23, #16 \n"
"vmla.f32 q15, q10, d11[0] \n"
"vshll.u16 q10, d16, #16 \n"
"vmla.f32 q12, q11, d7[1] \n"
"vmla.f32 q13, q11, d11[1] \n"
"pld [%4, #256] \n"
"vld1.u16 {d4-d7}, [%4 :64]! \n" // r20 r21 r22 r23
"vshll.u16 q11, d17, #16 \n"
"vshll.u16 q0, d4, #16 \n"
"vshll.u16 q1, d5, #16 \n"
"vshll.u16 q2, d6, #16 \n"
"vshll.u16 q3, d7, #16 \n"
"vmla.f32 q14, q10, d8[0] \n"
"vmla.f32 q15, q10, d12[0] \n"
"vshll.u16 q8, d18, #16 \n"
"vmla.f32 q12, q11, d8[1] \n"
"vmla.f32 q13, q11, d12[1] \n"
"pld [%7, #256] \n"
"vld1.u16 {d20-d23}, [%7 :128]! \n"
"vmla.f32 q14, q8, d9[0] \n"
"vshll.u16 q9, d19, #16 \n"
"vmla.f32 q15, q8, d13[0] \n"
"vshll.u16 q8, d20, #16 \n"
"vmla.f32 q12, q9, d9[1] \n"
"vmla.f32 q13, q9, d13[1] \n"
"vshll.u16 q9, d21, #16 \n"
"vmla.f32 q14, q8, d0[0] \n"
"vmla.f32 q15, q8, d4[0] \n"
"vshll.u16 q10, d22, #16 \n"
"vmla.f32 q12, q9, d0[1] \n"
"vmla.f32 q13, q9, d4[1] \n"
"pld [%7, #256] \n"
"vld1.u16 {d16-d19}, [%7 :128]! \n"
"vmla.f32 q14, q10, d1[0] \n"
"vshll.u16 q11, d23, #16 \n"
"vmla.f32 q15, q10, d5[0] \n"
"vshll.u16 q10, d16, #16 \n"
"vmla.f32 q12, q11, d1[1] \n"
"vmla.f32 q13, q11, d5[1] \n"
"pld [%4, #192] \n"
"vld1.u16 {d10-d12}, [%4 :64] \n" // r24 r25 r26
"vshll.u16 q11, d17, #16 \n"
"vshll.u16 q4, d10, #16 \n"
"vshll.u16 q5, d11, #16 \n"
"vshll.u16 q6, d12, #16 \n"
"vmla.f32 q14, q10, d2[0] \n"
"vmla.f32 q15, q10, d6[0] \n"
"vshll.u16 q8, d18, #16 \n"
"vmla.f32 q12, q11, d2[1] \n"
"vmla.f32 q13, q11, d6[1] \n"
"pld [%7, #256] \n"
"vld1.u16 {d20-d23}, [%7 :128]! \n"
"vmla.f32 q14, q8, d3[0] \n"
"vshll.u16 q9, d19, #16 \n"
"vmla.f32 q15, q8, d7[0] \n"
"vshll.u16 q8, d20, #16 \n"
"vmla.f32 q12, q9, d3[1] \n"
"vmla.f32 q13, q9, d7[1] \n"
"vshll.u16 q9, d21, #16 \n"
"vmla.f32 q14, q8, d4[0] \n"
"vmla.f32 q15, q8, d8[0] \n"
"vshll.u16 q10, d22, #16 \n"
"vmla.f32 q12, q9, d4[1] \n"
"vmla.f32 q13, q9, d8[1] \n"
"pld [%7, #256] \n"
"vld1.u16 {d16-d19}, [%7 :128]! \n"
"vmla.f32 q14, q10, d5[0] \n"
"vshll.u16 q11, d23, #16 \n"
"vmla.f32 q15, q10, d9[0] \n"
"vshll.u16 q10, d16, #16 \n"
"vmla.f32 q12, q11, d5[1] \n"
"vmla.f32 q13, q11, d9[1] \n"
"vshll.u16 q11, d17, #16 \n"
"vmla.f32 q14, q10, d6[0] \n"
"vmla.f32 q15, q10, d10[0] \n"
"vshll.u16 q8, d18, #16 \n"
"vmla.f32 q12, q11, d6[1] \n"
"vmla.f32 q13, q11, d10[1] \n"
"pld [%7, #256] \n"
"vld1.u16 {d20-d23}, [%7 :128]! \n"
"vmla.f32 q14, q8, d7[0] \n"
"vshll.u16 q9, d19, #16 \n"
"vmla.f32 q15, q8, d11[0] \n"
"vshll.u16 q8, d20, #16 \n"
"vmla.f32 q12, q9, d7[1] \n"
"vmla.f32 q13, q9, d11[1] \n"
"pld [%5, #256] \n"
"vld1.u16 {d4-d7}, [%5 :64]! \n" // r30 r31 r32 r33
"vshll.u16 q9, d21, #16 \n"
"vshll.u16 q0, d4, #16 \n"
"vshll.u16 q1, d5, #16 \n"
"vshll.u16 q2, d6, #16 \n"
"vshll.u16 q3, d7, #16 \n"
"vmla.f32 q14, q8, d8[0] \n"
"vmla.f32 q15, q8, d12[0] \n"
"vshll.u16 q10, d22, #16 \n"
"vmla.f32 q12, q9, d8[1] \n"
"vmla.f32 q13, q9, d12[1] \n"
"pld [%7, #256] \n"
"vld1.u16 {d16-d19}, [%7 :128]! \n"
"vmla.f32 q14, q10, d9[0] \n"
"vshll.u16 q11, d23, #16 \n"
"vmla.f32 q15, q10, d13[0] \n"
"vshll.u16 q10, d16, #16 \n"
"vmla.f32 q12, q11, d9[1] \n"
"vmla.f32 q13, q11, d13[1] \n"
"vshll.u16 q11, d17, #16 \n"
"vmla.f32 q14, q10, d0[0] \n"
"vmla.f32 q15, q10, d4[0] \n"
"vshll.u16 q8, d18, #16 \n"
"vmla.f32 q12, q11, d0[1] \n"
"vmla.f32 q13, q11, d4[1] \n"
"pld [%7, #256] \n"
"vld1.u16 {d20-d23}, [%7 :128]! \n"
"vmla.f32 q14, q8, d1[0] \n"
"vshll.u16 q9, d19, #16 \n"
"vmla.f32 q15, q8, d5[0] \n"
"vshll.u16 q8, d20, #16 \n"
"vmla.f32 q12, q9, d1[1] \n"
"vmla.f32 q13, q9, d5[1] \n"
"pld [%5, #192] \n"
"vld1.u16 {d10-d12}, [%5 :64] \n" // r34 r35 r36
"vshll.u16 q9, d21, #16 \n"
"vshll.u16 q4, d10, #16 \n"
"vshll.u16 q5, d11, #16 \n"
"vshll.u16 q6, d12, #16 \n"
"vmla.f32 q14, q8, d2[0] \n"
"vmla.f32 q15, q8, d6[0] \n"
"vshll.u16 q10, d22, #16 \n"
"vmla.f32 q12, q9, d2[1] \n"
"vmla.f32 q13, q9, d6[1] \n"
"pld [%7, #256] \n"
"vld1.u16 {d16-d19}, [%7 :128]! \n"
"vmla.f32 q14, q10, d3[0] \n"
"vshll.u16 q11, d23, #16 \n"
"vmla.f32 q15, q10, d7[0] \n"
"vshll.u16 q10, d16, #16 \n"
"vmla.f32 q12, q11, d3[1] \n"
"vmla.f32 q13, q11, d7[1] \n"
"vshll.u16 q11, d17, #16 \n"
"vmla.f32 q14, q10, d4[0] \n"
"vmla.f32 q15, q10, d8[0] \n"
"vshll.u16 q8, d18, #16 \n"
"vmla.f32 q12, q11, d4[1] \n"
"vmla.f32 q13, q11, d8[1] \n"
"pld [%7, #256] \n"
"vld1.u16 {d20-d23}, [%7 :128]! \n"
"vmla.f32 q14, q8, d5[0] \n"
"vshll.u16 q9, d19, #16 \n"
"vmla.f32 q15, q8, d9[0] \n"
"vshll.u16 q8, d20, #16 \n"
"vmla.f32 q12, q9, d5[1] \n"
"vmla.f32 q13, q9, d9[1] \n"
"vshll.u16 q9, d21, #16 \n"
"vmla.f32 q14, q8, d6[0] \n"
"vmla.f32 q15, q8, d10[0] \n"
"vshll.u16 q10, d22, #16 \n"
"vmla.f32 q12, q9, d6[1] \n"
"vmla.f32 q13, q9, d10[1] \n"
"pld [%7, #256] \n"
"vld1.u16 {d16-d19}, [%7 :128]! \n"
"vmla.f32 q14, q10, d7[0] \n"
"vshll.u16 q11, d23, #16 \n"
"vmla.f32 q15, q10, d11[0] \n"
"vshll.u16 q10, d16, #16 \n"
"vmla.f32 q12, q11, d7[1] \n"
"vmla.f32 q13, q11, d11[1] \n"
"pld [%6, #256] \n"
"vld1.u16 {d4-d7}, [%6 :64]! \n" // r40 r41 r42 r43
"vshll.u16 q11, d17, #16 \n"
"vshll.u16 q0, d4, #16 \n"
"vshll.u16 q1, d5, #16 \n"
"vshll.u16 q2, d6, #16 \n"
"vshll.u16 q3, d7, #16 \n"
"vmla.f32 q14, q10, d8[0] \n"
"vmla.f32 q15, q10, d12[0] \n"
"vshll.u16 q8, d18, #16 \n"
"vmla.f32 q12, q11, d8[1] \n"
"vmla.f32 q13, q11, d12[1] \n"
"pld [%7, #256] \n"
"vld1.u16 {d20-d23}, [%7 :128]! \n"
"vmla.f32 q14, q8, d9[0] \n"
"vshll.u16 q9, d19, #16 \n"
"vmla.f32 q15, q8, d13[0] \n"
"vshll.u16 q8, d20, #16 \n"
"vmla.f32 q12, q9, d9[1] \n"
"vmla.f32 q13, q9, d13[1] \n"
"vshll.u16 q9, d21, #16 \n"
"vmla.f32 q14, q8, d0[0] \n"
"vmla.f32 q15, q8, d4[0] \n"
"vshll.u16 q10, d22, #16 \n"
"vmla.f32 q12, q9, d0[1] \n"
"vmla.f32 q13, q9, d4[1] \n"
"pld [%7, #256] \n"
"vld1.u16 {d16-d19}, [%7 :128]! \n"
"vmla.f32 q14, q10, d1[0] \n"
"vshll.u16 q11, d23, #16 \n"
"vmla.f32 q15, q10, d5[0] \n"
"vshll.u16 q10, d16, #16 \n"
"vmla.f32 q12, q11, d1[1] \n"
"vmla.f32 q13, q11, d5[1] \n"
"pld [%6, #192] \n"
"vld1.u16 {d10-d12}, [%6 :64] \n" // r44 r45 r46
"vshll.u16 q11, d17, #16 \n"
"vshll.u16 q4, d10, #16 \n"
"vshll.u16 q5, d11, #16 \n"
"vshll.u16 q6, d12, #16 \n"
"vmla.f32 q14, q10, d2[0] \n"
"vmla.f32 q15, q10, d6[0] \n"
"vshll.u16 q8, d18, #16 \n"
"vmla.f32 q12, q11, d2[1] \n"
"vmla.f32 q13, q11, d6[1] \n"
"pld [%7, #256] \n"
"vld1.u16 {d20-d23}, [%7 :128]! \n"
"vmla.f32 q14, q8, d3[0] \n"
"vshll.u16 q9, d19, #16 \n"
"vmla.f32 q15, q8, d7[0] \n"
"vshll.u16 q8, d20, #16 \n"
"vmla.f32 q12, q9, d3[1] \n"
"vmla.f32 q13, q9, d7[1] \n"
"vshll.u16 q9, d21, #16 \n"
"vmla.f32 q14, q8, d4[0] \n"
"vmla.f32 q15, q8, d8[0] \n"
"vshll.u16 q10, d22, #16 \n"
"vmla.f32 q12, q9, d4[1] \n"
"vmla.f32 q13, q9, d8[1] \n"
"pld [%7, #256] \n"
"vld1.u16 {d16-d19}, [%7 :128]! \n"
"vmla.f32 q14, q10, d5[0] \n"
"vshll.u16 q11, d23, #16 \n"
"vmla.f32 q15, q10, d9[0] \n"
"vshll.u16 q10, d16, #16 \n"
"vmla.f32 q12, q11, d5[1] \n"
"vmla.f32 q13, q11, d9[1] \n"
"vshll.u16 q11, d17, #16 \n"
"vmla.f32 q14, q10, d6[0] \n"
"vmla.f32 q15, q10, d10[0] \n"
"vshll.u16 q8, d18, #16 \n"
"vmla.f32 q12, q11, d6[1] \n"
"vmla.f32 q13, q11, d10[1] \n"
// "pld [%7, #256] \n"
"vld1.u16 {d20-d23}, [%7 :128] \n"
"vmla.f32 q14, q8, d7[0] \n"
"vshll.u16 q9, d19, #16 \n"
"vmla.f32 q15, q8, d11[0] \n"
"vshll.u16 q8, d20, #16 \n"
"vmla.f32 q12, q9, d7[1] \n"
"vmla.f32 q13, q9, d11[1] \n"
"vshll.u16 q9, d21, #16 \n"
"vmla.f32 q14, q8, d8[0] \n"
"vmla.f32 q15, q8, d12[0] \n"
"vshll.u16 q10, d22, #16 \n"
"vmla.f32 q12, q9, d8[1] \n"
"vmla.f32 q13, q9, d12[1] \n"
"vshll.u16 q11, d23, #16 \n"
"vmla.f32 q14, q10, d9[0] \n"
"vmla.f32 q15, q10, d13[0] \n"
"vmla.f32 q12, q11, d9[1] \n"
"vmla.f32 q13, q11, d13[1] \n"
"vadd.f32 q12, q12, q14 \n"
"vadd.f32 q13, q13, q15 \n"
"sub %7, %7, #768 \n" // kptr -= 24 * 16;
"vshrn.u32 d24, q12, #16 \n"
"vshrn.u32 d25, q13, #16 \n"
"vst1.u16 {d24-d25}, [%0 :64]! \n"
: "=r"(outptr0_bf16), // %0
"=r"(outptr0), // %1
"=r"(r0), // %2
"=r"(r1), // %3
"=r"(r2), // %4
"=r"(r3), // %5
"=r"(r4), // %6
"=r"(kptr) // %7
: "0"(outptr0_bf16),
"1"(outptr0),
"2"(r0),
"3"(r1),
"4"(r2),
"5"(r3),
"6"(r4),
"7"(kptr)
: "memory", "q0", "q1", "q2", "q3", "q4", "q5", "q6", "q7", "q8", "q9", "q10", "q11", "q12", "q13", "q14", "q15");
#endif // __aarch64__
}
for (; j < outw; j++)
{
#if __aarch64__
asm volatile(
"prfm pldl1keep, [%1, #128] \n"
"ld1 {v20.4s}, [%1], #16 \n" // sum0
"prfm pldl1keep, [%2, #128] \n"
"ld1 {v0.4h, v1.4h}, [%2], #16 \n" // r00 r01
"shll v0.4s, v0.4h, #16 \n"
"prfm pldl1keep, [%7, #256] \n"
"ld1 {v16.4h, v17.4h, v18.4h, v19.4h}, [%7], #32 \n"
"shll v1.4s, v1.4h, #16 \n"
"shll v16.4s, v16.4h, #16 \n"
"shll v17.4s, v17.4h, #16 \n"
"fmul v21.4s, v16.4s, v0.s[0] \n"
"prfm pldl1keep, [%7, #256] \n"
"ld1 {v24.4h, v25.4h, v26.4h, v27.4h}, [%7], #32 \n"
"shll v18.4s, v18.4h, #16 \n"
"fmul v22.4s, v17.4s, v0.s[1] \n"
"shll v19.4s, v19.4h, #16 \n"
"fmul v23.4s, v18.4s, v0.s[2] \n"
"shll v24.4s, v24.4h, #16 \n"
"fmla v20.4s, v19.4s, v0.s[3] \n"
"prfm pldl1keep, [%2, #192] \n"
"ld1 {v2.4h, v3.4h, v4.4h}, [%2] \n" // r02 r03 r04
"shll v25.4s, v25.4h, #16 \n"
"shll v2.4s, v2.4h, #16 \n"
"shll v3.4s, v3.4h, #16 \n"
"shll v4.4s, v4.4h, #16 \n"
"fmla v21.4s, v24.4s, v1.s[0] \n"
"prfm pldl1keep, [%7, #256] \n"
"ld1 {v16.4h, v17.4h, v18.4h, v19.4h}, [%7], #32 \n"
"shll v26.4s, v26.4h, #16 \n"
"fmla v22.4s, v25.4s, v1.s[1] \n"
"shll v27.4s, v27.4h, #16 \n"
"fmla v23.4s, v26.4s, v1.s[2] \n"
"shll v16.4s, v16.4h, #16 \n"
"fmla v20.4s, v27.4s, v1.s[3] \n"
"shll v17.4s, v17.4h, #16 \n"
"fmla v21.4s, v16.4s, v2.s[0] \n"
"prfm pldl1keep, [%7, #256] \n"
"ld1 {v24.4h, v25.4h, v26.4h, v27.4h}, [%7], #32 \n"
"shll v18.4s, v18.4h, #16 \n"
"fmla v22.4s, v17.4s, v2.s[1] \n"
"shll v19.4s, v19.4h, #16 \n"
"fmla v23.4s, v18.4s, v2.s[2] \n"
"shll v24.4s, v24.4h, #16 \n"
"fmla v20.4s, v19.4s, v2.s[3] \n"
"shll v25.4s, v25.4h, #16 \n"
"fmla v21.4s, v24.4s, v3.s[0] \n"
"prfm pldl1keep, [%7, #256] \n"
"ld1 {v16.4h, v17.4h, v18.4h, v19.4h}, [%7], #32 \n"
"shll v26.4s, v26.4h, #16 \n"
"fmla v22.4s, v25.4s, v3.s[1] \n"
"shll v27.4s, v27.4h, #16 \n"
"fmla v23.4s, v26.4s, v3.s[2] \n"
"shll v16.4s, v16.4h, #16 \n"
"fmla v20.4s, v27.4s, v3.s[3] \n"
"prfm pldl1keep, [%3, #128] \n"
"ld1 {v0.4h, v1.4h}, [%3], #16 \n" // r10 r11
"shll v17.4s, v17.4h, #16 \n"
"shll v0.4s, v0.4h, #16 \n"
"shll v1.4s, v1.4h, #16 \n"
"fmla v21.4s, v16.4s, v4.s[0] \n"
"prfm pldl1keep, [%7, #256] \n"
"ld1 {v24.4h, v25.4h, v26.4h, v27.4h}, [%7], #32 \n"
"shll v18.4s, v18.4h, #16 \n"
"fmla v22.4s, v17.4s, v4.s[1] \n"
"shll v19.4s, v19.4h, #16 \n"
"fmla v23.4s, v18.4s, v4.s[2] \n"
"shll v24.4s, v24.4h, #16 \n"
"fmla v20.4s, v19.4s, v4.s[3] \n"
"shll v25.4s, v25.4h, #16 \n"
"fmla v21.4s, v24.4s, v0.s[0] \n"
"prfm pldl1keep, [%7, #256] \n"
"ld1 {v16.4h, v17.4h, v18.4h, v19.4h}, [%7], #32 \n"
"shll v26.4s, v26.4h, #16 \n"
"fmla v22.4s, v25.4s, v0.s[1] \n"
"shll v27.4s, v27.4h, #16 \n"
"fmla v23.4s, v26.4s, v0.s[2] \n"
"shll v16.4s, v16.4h, #16 \n"
"fmla v20.4s, v27.4s, v0.s[3] \n"
"prfm pldl1keep, [%3, #192] \n"
"ld1 {v2.4h, v3.4h, v4.4h}, [%3] \n" // r12 r13 r14
"shll v17.4s, v17.4h, #16 \n"
"shll v2.4s, v2.4h, #16 \n"
"shll v3.4s, v3.4h, #16 \n"
"shll v4.4s, v4.4h, #16 \n"
"fmla v21.4s, v16.4s, v1.s[0] \n"
"prfm pldl1keep, [%7, #256] \n"
"ld1 {v24.4h, v25.4h, v26.4h, v27.4h}, [%7], #32 \n"
"shll v18.4s, v18.4h, #16 \n"
"fmla v22.4s, v17.4s, v1.s[1] \n"
"shll v19.4s, v19.4h, #16 \n"
"fmla v23.4s, v18.4s, v1.s[2] \n"
"shll v24.4s, v24.4h, #16 \n"
"fmla v20.4s, v19.4s, v1.s[3] \n"
"shll v25.4s, v25.4h, #16 \n"
"fmla v21.4s, v24.4s, v2.s[0] \n"
"prfm pldl1keep, [%7, #256] \n"
"ld1 {v16.4h, v17.4h, v18.4h, v19.4h}, [%7], #32 \n"
"shll v26.4s, v26.4h, #16 \n"
"fmla v22.4s, v25.4s, v2.s[1] \n"
"shll v27.4s, v27.4h, #16 \n"
"fmla v23.4s, v26.4s, v2.s[2] \n"
"shll v16.4s, v16.4h, #16 \n"
"fmla v20.4s, v27.4s, v2.s[3] \n"
"shll v17.4s, v17.4h, #16 \n"
"fmla v21.4s, v16.4s, v3.s[0] \n"
"prfm pldl1keep, [%7, #256] \n"
"ld1 {v24.4h, v25.4h, v26.4h, v27.4h}, [%7], #32 \n"
"shll v18.4s, v18.4h, #16 \n"
"fmla v22.4s, v17.4s, v3.s[1] \n"
"shll v19.4s, v19.4h, #16 \n"
"fmla v23.4s, v18.4s, v3.s[2] \n"
"shll v24.4s, v24.4h, #16 \n"
"fmla v20.4s, v19.4s, v3.s[3] \n"
"prfm pldl1keep, [%4, #128] \n"
"ld1 {v0.4h, v1.4h}, [%4], #16 \n" // r20 r21
"shll v25.4s, v25.4h, #16 \n"
"shll v0.4s, v0.4h, #16 \n"
"shll v1.4s, v1.4h, #16 \n"
"fmla v21.4s, v24.4s, v4.s[0] \n"
"prfm pldl1keep, [%7, #256] \n"
"ld1 {v16.4h, v17.4h, v18.4h, v19.4h}, [%7], #32 \n"
"shll v26.4s, v26.4h, #16 \n"
"fmla v22.4s, v25.4s, v4.s[1] \n"
"shll v27.4s, v27.4h, #16 \n"
"fmla v23.4s, v26.4s, v4.s[2] \n"
"shll v16.4s, v16.4h, #16 \n"
"fmla v20.4s, v27.4s, v4.s[3] \n"
"shll v17.4s, v17.4h, #16 \n"
"fmla v21.4s, v16.4s, v0.s[0] \n"
"prfm pldl1keep, [%7, #256] \n"
"ld1 {v24.4h, v25.4h, v26.4h, v27.4h}, [%7], #32 \n"
"shll v18.4s, v18.4h, #16 \n"
"fmla v22.4s, v17.4s, v0.s[1] \n"
"shll v19.4s, v19.4h, #16 \n"
"fmla v23.4s, v18.4s, v0.s[2] \n"
"shll v24.4s, v24.4h, #16 \n"
"fmla v20.4s, v19.4s, v0.s[3] \n"
"prfm pldl1keep, [%4, #192] \n"
"ld1 {v2.4h, v3.4h, v4.4h}, [%4] \n" // r22 r23 r24
"shll v25.4s, v25.4h, #16 \n"
"shll v2.4s, v2.4h, #16 \n"
"shll v3.4s, v3.4h, #16 \n"
"shll v4.4s, v4.4h, #16 \n"
"fmla v21.4s, v24.4s, v1.s[0] \n"
"prfm pldl1keep, [%7, #256] \n"
"ld1 {v16.4h, v17.4h, v18.4h, v19.4h}, [%7], #32 \n"
"shll v26.4s, v26.4h, #16 \n"
"fmla v22.4s, v25.4s, v1.s[1] \n"
"shll v27.4s, v27.4h, #16 \n"
"fmla v23.4s, v26.4s, v1.s[2] \n"
"shll v16.4s, v16.4h, #16 \n"
"fmla v20.4s, v27.4s, v1.s[3] \n"
"shll v17.4s, v17.4h, #16 \n"
"fmla v21.4s, v16.4s, v2.s[0] \n"
"prfm pldl1keep, [%7, #256] \n"
"ld1 {v24.4h, v25.4h, v26.4h, v27.4h}, [%7], #32 \n"
"shll v18.4s, v18.4h, #16 \n"
"fmla v22.4s, v17.4s, v2.s[1] \n"
"shll v19.4s, v19.4h, #16 \n"
"fmla v23.4s, v18.4s, v2.s[2] \n"
"shll v24.4s, v24.4h, #16 \n"
"fmla v20.4s, v19.4s, v2.s[3] \n"
"shll v25.4s, v25.4h, #16 \n"
"fmla v21.4s, v24.4s, v3.s[0] \n"
"prfm pldl1keep, [%7, #256] \n"
"ld1 {v16.4h, v17.4h, v18.4h, v19.4h}, [%7], #32 \n"
"shll v26.4s, v26.4h, #16 \n"
"fmla v22.4s, v25.4s, v3.s[1] \n"
"shll v27.4s, v27.4h, #16 \n"
"fmla v23.4s, v26.4s, v3.s[2] \n"
"shll v16.4s, v16.4h, #16 \n"
"fmla v20.4s, v27.4s, v3.s[3] \n"
"prfm pldl1keep, [%5, #128] \n"
"ld1 {v0.4h, v1.4h}, [%5], #16 \n" // r30 r31
"shll v17.4s, v17.4h, #16 \n"
"shll v0.4s, v0.4h, #16 \n"
"shll v1.4s, v1.4h, #16 \n"
"fmla v21.4s, v16.4s, v4.s[0] \n"
"prfm pldl1keep, [%7, #256] \n"
"ld1 {v24.4h, v25.4h, v26.4h, v27.4h}, [%7], #32 \n"
"shll v18.4s, v18.4h, #16 \n"
"fmla v22.4s, v17.4s, v4.s[1] \n"
"shll v19.4s, v19.4h, #16 \n"
"fmla v23.4s, v18.4s, v4.s[2] \n"
"shll v24.4s, v24.4h, #16 \n"
"fmla v20.4s, v19.4s, v4.s[3] \n"
"shll v25.4s, v25.4h, #16 \n"
"fmla v21.4s, v24.4s, v0.s[0] \n"
"prfm pldl1keep, [%7, #256] \n"
"ld1 {v16.4h, v17.4h, v18.4h, v19.4h}, [%7], #32 \n"
"shll v26.4s, v26.4h, #16 \n"
"fmla v22.4s, v25.4s, v0.s[1] \n"
"shll v27.4s, v27.4h, #16 \n"
"fmla v23.4s, v26.4s, v0.s[2] \n"
"shll v16.4s, v16.4h, #16 \n"
"fmla v20.4s, v27.4s, v0.s[3] \n"
"prfm pldl1keep, [%5, #192] \n"
"ld1 {v2.4h, v3.4h, v4.4h}, [%5] \n" // r32 r33 r34
"shll v17.4s, v17.4h, #16 \n"
"shll v2.4s, v2.4h, #16 \n"
"shll v3.4s, v3.4h, #16 \n"
"shll v4.4s, v4.4h, #16 \n"
"fmla v21.4s, v16.4s, v1.s[0] \n"
"prfm pldl1keep, [%7, #256] \n"
"ld1 {v24.4h, v25.4h, v26.4h, v27.4h}, [%7], #32 \n"
"shll v18.4s, v18.4h, #16 \n"
"fmla v22.4s, v17.4s, v1.s[1] \n"
"shll v19.4s, v19.4h, #16 \n"
"fmla v23.4s, v18.4s, v1.s[2] \n"
"shll v24.4s, v24.4h, #16 \n"
"fmla v20.4s, v19.4s, v1.s[3] \n"
"shll v25.4s, v25.4h, #16 \n"
"fmla v21.4s, v24.4s, v2.s[0] \n"
"prfm pldl1keep, [%7, #256] \n"
"ld1 {v16.4h, v17.4h, v18.4h, v19.4h}, [%7], #32 \n"
"shll v26.4s, v26.4h, #16 \n"
"fmla v22.4s, v25.4s, v2.s[1] \n"
"shll v27.4s, v27.4h, #16 \n"
"fmla v23.4s, v26.4s, v2.s[2] \n"
"shll v16.4s, v16.4h, #16 \n"
"fmla v20.4s, v27.4s, v2.s[3] \n"
"shll v17.4s, v17.4h, #16 \n"
"fmla v21.4s, v16.4s, v3.s[0] \n"
"prfm pldl1keep, [%7, #256] \n"
"ld1 {v24.4h, v25.4h, v26.4h, v27.4h}, [%7], #32 \n"
"shll v18.4s, v18.4h, #16 \n"
"fmla v22.4s, v17.4s, v3.s[1] \n"
"shll v19.4s, v19.4h, #16 \n"
"fmla v23.4s, v18.4s, v3.s[2] \n"
"shll v24.4s, v24.4h, #16 \n"
"fmla v20.4s, v19.4s, v3.s[3] \n"
"prfm pldl1keep, [%6, #128] \n"
"ld1 {v0.4h, v1.4h}, [%6], #16 \n" // r40 r41
"shll v25.4s, v25.4h, #16 \n"
"shll v0.4s, v0.4h, #16 \n"
"shll v1.4s, v1.4h, #16 \n"
"fmla v21.4s, v24.4s, v4.s[0] \n"
"prfm pldl1keep, [%7, #256] \n"
"ld1 {v16.4h, v17.4h, v18.4h, v19.4h}, [%7], #32 \n"
"shll v26.4s, v26.4h, #16 \n"
"fmla v22.4s, v25.4s, v4.s[1] \n"
"shll v27.4s, v27.4h, #16 \n"
"fmla v23.4s, v26.4s, v4.s[2] \n"
"shll v16.4s, v16.4h, #16 \n"
"fmla v20.4s, v27.4s, v4.s[3] \n"
"shll v17.4s, v17.4h, #16 \n"
"fmla v21.4s, v16.4s, v0.s[0] \n"
"prfm pldl1keep, [%7, #256] \n"
"ld1 {v24.4h, v25.4h, v26.4h, v27.4h}, [%7], #32 \n"
"shll v18.4s, v18.4h, #16 \n"
"fmla v22.4s, v17.4s, v0.s[1] \n"
"shll v19.4s, v19.4h, #16 \n"
"fmla v23.4s, v18.4s, v0.s[2] \n"
"shll v24.4s, v24.4h, #16 \n"
"fmla v20.4s, v19.4s, v0.s[3] \n"
"prfm pldl1keep, [%6, #192] \n"
"ld1 {v2.4h, v3.4h, v4.4h}, [%6] \n" // r42 r43 r44
"shll v25.4s, v25.4h, #16 \n"
"shll v2.4s, v2.4h, #16 \n"
"shll v3.4s, v3.4h, #16 \n"
"shll v4.4s, v4.4h, #16 \n"
"fmla v21.4s, v24.4s, v1.s[0] \n"
"prfm pldl1keep, [%7, #256] \n"
"ld1 {v16.4h, v17.4h, v18.4h, v19.4h}, [%7], #32 \n"
"shll v26.4s, v26.4h, #16 \n"
"fmla v22.4s, v25.4s, v1.s[1] \n"
"shll v27.4s, v27.4h, #16 \n"
"fmla v23.4s, v26.4s, v1.s[2] \n"
"shll v16.4s, v16.4h, #16 \n"
"fmla v20.4s, v27.4s, v1.s[3] \n"
"shll v17.4s, v17.4h, #16 \n"
"fmla v21.4s, v16.4s, v2.s[0] \n"
"prfm pldl1keep, [%7, #256] \n"
"ld1 {v24.4h, v25.4h, v26.4h, v27.4h}, [%7], #32 \n"
"shll v18.4s, v18.4h, #16 \n"
"fmla v22.4s, v17.4s, v2.s[1] \n"
"shll v19.4s, v19.4h, #16 \n"
"fmla v23.4s, v18.4s, v2.s[2] \n"
"shll v24.4s, v24.4h, #16 \n"
"fmla v20.4s, v19.4s, v2.s[3] \n"
"shll v25.4s, v25.4h, #16 \n"
"fmla v21.4s, v24.4s, v3.s[0] \n"
// "prfm pldl1keep, [%7, #256] \n"
"ld1 {v16.4h, v17.4h, v18.4h, v19.4h}, [%7] \n"
"shll v26.4s, v26.4h, #16 \n"
"fmla v22.4s, v25.4s, v3.s[1] \n"
"shll v27.4s, v27.4h, #16 \n"
"fmla v23.4s, v26.4s, v3.s[2] \n"
"shll v16.4s, v16.4h, #16 \n"
"fmla v20.4s, v27.4s, v3.s[3] \n"
"shll v17.4s, v17.4h, #16 \n"
"fmla v21.4s, v16.4s, v4.s[0] \n"
"shll v18.4s, v18.4h, #16 \n"
"fmla v22.4s, v17.4s, v4.s[1] \n"
"shll v19.4s, v19.4h, #16 \n"
"fmla v23.4s, v18.4s, v4.s[2] \n"
"fmla v20.4s, v19.4s, v4.s[3] \n"
"fadd v22.4s, v21.4s, v22.4s \n"
"fadd v23.4s, v22.4s, v23.4s \n"
"fadd v20.4s, v20.4s, v23.4s \n"
"sub %7, %7, #768 \n" // kptr -= 24 * 16;
"shrn v20.4h, v20.4s, #16 \n"
"st1 {v20.4h}, [%0], #8 \n"
: "=r"(outptr0_bf16), // %0
"=r"(outptr0), // %1
"=r"(r0), // %2
"=r"(r1), // %3
"=r"(r2), // %4
"=r"(r3), // %5
"=r"(r4), // %6
"=r"(kptr) // %7
: "0"(outptr0_bf16),
"1"(outptr0),
"2"(r0),
"3"(r1),
"4"(r2),
"5"(r3),
"6"(r4),
"7"(kptr)
: "memory", "v0", "v1", "v2", "v3", "v4", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27");
#else // __aarch64__
asm volatile(
"pld [%2, #128] \n"
"vld1.u16 {d2-d3}, [%2 :64]! \n" // r00 r01
"pld [%7, #256] \n"
"vld1.u16 {d20-d23}, [%7 :128]! \n"
"vshll.u16 q0, d2, #16 \n"
"vshll.u16 q1, d3, #16 \n"
"vshll.u16 q8, d20, #16 \n"
"vshll.u16 q9, d21, #16 \n"
"pld [%1, #128] \n"
"vld1.f32 {d24-d25}, [%1 :128]! \n" // sum0
"vmul.f32 q13, q8, d0[0] \n"
"vshll.u16 q10, d22, #16 \n"
"vmul.f32 q14, q9, d0[1] \n"
"pld [%7, #256] \n"
"vld1.u16 {d16-d19}, [%7 :128]! \n"
"vshll.u16 q11, d23, #16 \n"
"vmul.f32 q15, q10, d1[0] \n"
"vshll.u16 q10, d16, #16 \n"
"vmla.f32 q12, q11, d1[1] \n"
"pld [%2, #192] \n"
"vld1.u16 {d6-d8}, [%2 :64] \n" // r02 r03 r04
"vshll.u16 q11, d17, #16 \n"
"vshll.u16 q2, d6, #16 \n"
"vshll.u16 q3, d7, #16 \n"
"vshll.u16 q4, d8, #16 \n"
"vmla.f32 q13, q10, d2[0] \n"
"vshll.u16 q8, d18, #16 \n"
"vmla.f32 q14, q11, d2[1] \n"
"pld [%7, #256] \n"
"vld1.u16 {d20-d23}, [%7 :128]! \n"
"vshll.u16 q9, d19, #16 \n"
"vmla.f32 q15, q8, d3[0] \n"
"vshll.u16 q8, d20, #16 \n"
"vmla.f32 q12, q9, d3[1] \n"
"vshll.u16 q9, d21, #16 \n"
"vmla.f32 q13, q8, d4[0] \n"
"vshll.u16 q10, d22, #16 \n"
"vmla.f32 q14, q9, d4[1] \n"
"pld [%7, #256] \n"
"vld1.u16 {d16-d19}, [%7 :128]! \n"
"vshll.u16 q11, d23, #16 \n"
"vmla.f32 q15, q10, d5[0] \n"
"vshll.u16 q10, d16, #16 \n"
"vmla.f32 q12, q11, d5[1] \n"
"vshll.u16 q11, d17, #16 \n"
"vmla.f32 q13, q10, d6[0] \n"
"vshll.u16 q8, d18, #16 \n"
"vmla.f32 q14, q11, d6[1] \n"
"pld [%7, #256] \n"
"vld1.u16 {d20-d23}, [%7 :128]! \n"
"vshll.u16 q9, d19, #16 \n"
"vmla.f32 q15, q8, d7[0] \n"
"vshll.u16 q8, d20, #16 \n"
"vmla.f32 q12, q9, d7[1] \n"
"pld [%3, #128] \n"
"vld1.u16 {d2-d3}, [%3 :64]! \n" // r10 r11
"vshll.u16 q9, d21, #16 \n"
"vshll.u16 q0, d2, #16 \n"
"vshll.u16 q1, d3, #16 \n"
"vmla.f32 q13, q8, d8[0] \n"
"vshll.u16 q10, d22, #16 \n"
"vmla.f32 q14, q9, d8[1] \n"
"pld [%7, #256] \n"
"vld1.u16 {d16-d19}, [%7 :128]! \n"
"vshll.u16 q11, d23, #16 \n"
"vmla.f32 q15, q10, d9[0] \n"
"vshll.u16 q10, d16, #16 \n"
"vmla.f32 q12, q11, d9[1] \n"
"vshll.u16 q11, d17, #16 \n"
"vmla.f32 q13, q10, d0[0] \n"
"vshll.u16 q8, d18, #16 \n"
"vmla.f32 q14, q11, d0[1] \n"
"pld [%7, #256] \n"
"vld1.u16 {d20-d23}, [%7 :128]! \n"
"vshll.u16 q9, d19, #16 \n"
"vmla.f32 q15, q8, d1[0] \n"
"vshll.u16 q8, d20, #16 \n"
"vmla.f32 q12, q9, d1[1] \n"
"pld [%3, #192] \n"
"vld1.u16 {d6-d8}, [%3 :64] \n" // r12 r13 r14
"vshll.u16 q9, d21, #16 \n"
"vshll.u16 q2, d6, #16 \n"
"vshll.u16 q3, d7, #16 \n"
"vshll.u16 q4, d8, #16 \n"
"vmla.f32 q13, q8, d2[0] \n"
"vshll.u16 q10, d22, #16 \n"
"vmla.f32 q14, q9, d2[1] \n"
"pld [%7, #256] \n"
"vld1.u16 {d16-d19}, [%7 :128]! \n"
"vshll.u16 q11, d23, #16 \n"
"vmla.f32 q15, q10, d3[0] \n"
"vshll.u16 q10, d16, #16 \n"
"vmla.f32 q12, q11, d3[1] \n"
"vshll.u16 q11, d17, #16 \n"
"vmla.f32 q13, q10, d4[0] \n"
"vshll.u16 q8, d18, #16 \n"
"vmla.f32 q14, q11, d4[1] \n"
"pld [%7, #256] \n"
"vld1.u16 {d20-d23}, [%7 :128]! \n"
"vshll.u16 q9, d19, #16 \n"
"vmla.f32 q15, q8, d5[0] \n"
"vshll.u16 q8, d20, #16 \n"
"vmla.f32 q12, q9, d5[1] \n"
"vshll.u16 q9, d21, #16 \n"
"vmla.f32 q13, q8, d6[0] \n"
"vshll.u16 q10, d22, #16 \n"
"vmla.f32 q14, q9, d6[1] \n"
"pld [%7, #256] \n"
"vld1.u16 {d16-d19}, [%7 :128]! \n"
"vshll.u16 q11, d23, #16 \n"
"vmla.f32 q15, q10, d7[0] \n"
"vshll.u16 q10, d16, #16 \n"
"vmla.f32 q12, q11, d7[1] \n"
"pld [%4, #128] \n"
"vld1.u16 {d2-d3}, [%4 :64]! \n" // r20 r21
"vshll.u16 q11, d17, #16 \n"
"vshll.u16 q0, d2, #16 \n"
"vshll.u16 q1, d3, #16 \n"
"vmla.f32 q13, q10, d8[0] \n"
"vshll.u16 q8, d18, #16 \n"
"vmla.f32 q14, q11, d8[1] \n"
"pld [%7, #256] \n"
"vld1.u16 {d20-d23}, [%7 :128]! \n"
"vshll.u16 q9, d19, #16 \n"
"vmla.f32 q15, q8, d9[0] \n"
"vshll.u16 q8, d20, #16 \n"
"vmla.f32 q12, q9, d9[1] \n"
"vshll.u16 q9, d21, #16 \n"
"vmla.f32 q13, q8, d0[0] \n"
"vshll.u16 q10, d22, #16 \n"
"vmla.f32 q14, q9, d0[1] \n"
"pld [%7, #256] \n"
"vld1.u16 {d16-d19}, [%7 :128]! \n"
"vshll.u16 q11, d23, #16 \n"
"vmla.f32 q15, q10, d1[0] \n"
"vshll.u16 q10, d16, #16 \n"
"vmla.f32 q12, q11, d1[1] \n"
"pld [%4, #192] \n"
"vld1.u16 {d6-d8}, [%4 :64] \n" // r22 r23 r24
"vshll.u16 q11, d17, #16 \n"
"vshll.u16 q2, d6, #16 \n"
"vshll.u16 q3, d7, #16 \n"
"vshll.u16 q4, d8, #16 \n"
"vmla.f32 q13, q10, d2[0] \n"
"vshll.u16 q8, d18, #16 \n"
"vmla.f32 q14, q11, d2[1] \n"
"pld [%7, #256] \n"
"vld1.u16 {d20-d23}, [%7 :128]! \n"
"vshll.u16 q9, d19, #16 \n"
"vmla.f32 q15, q8, d3[0] \n"
"vshll.u16 q8, d20, #16 \n"
"vmla.f32 q12, q9, d3[1] \n"
"vshll.u16 q9, d21, #16 \n"
"vmla.f32 q13, q8, d4[0] \n"
"vshll.u16 q10, d22, #16 \n"
"vmla.f32 q14, q9, d4[1] \n"
"pld [%7, #256] \n"
"vld1.u16 {d16-d19}, [%7 :128]! \n"
"vshll.u16 q11, d23, #16 \n"
"vmla.f32 q15, q10, d5[0] \n"
"vshll.u16 q10, d16, #16 \n"
"vmla.f32 q12, q11, d5[1] \n"
"vshll.u16 q11, d17, #16 \n"
"vmla.f32 q13, q10, d6[0] \n"
"vshll.u16 q8, d18, #16 \n"
"vmla.f32 q14, q11, d6[1] \n"
"pld [%7, #256] \n"
"vld1.u16 {d20-d23}, [%7 :128]! \n"
"vshll.u16 q9, d19, #16 \n"
"vmla.f32 q15, q8, d7[0] \n"
"vshll.u16 q8, d20, #16 \n"
"vmla.f32 q12, q9, d7[1] \n"
"pld [%5, #128] \n"
"vld1.u16 {d2-d3}, [%5 :64]! \n" // r30 r31
"vshll.u16 q9, d21, #16 \n"
"vshll.u16 q0, d2, #16 \n"
"vshll.u16 q10, d22, #16 \n"
"vshll.u16 q1, d3, #16 \n"
"vmla.f32 q13, q8, d8[0] \n"
"vmla.f32 q14, q9, d8[1] \n"
"pld [%7, #256] \n"
"vld1.u16 {d16-d19}, [%7 :128]! \n"
"vshll.u16 q11, d23, #16 \n"
"vmla.f32 q15, q10, d9[0] \n"
"vshll.u16 q10, d16, #16 \n"
"vmla.f32 q12, q11, d9[1] \n"
"vshll.u16 q11, d17, #16 \n"
"vmla.f32 q13, q10, d0[0] \n"
"vshll.u16 q8, d18, #16 \n"
"vmla.f32 q14, q11, d0[1] \n"
"pld [%7, #256] \n"
"vld1.u16 {d20-d23}, [%7 :128]! \n"
"vshll.u16 q9, d19, #16 \n"
"vmla.f32 q15, q8, d1[0] \n"
"vshll.u16 q8, d20, #16 \n"
"vmla.f32 q12, q9, d1[1] \n"
"pld [%5, #192] \n"
"vld1.u16 {d6-d8}, [%5 :64] \n" // r32 r33 r34
"vshll.u16 q9, d21, #16 \n"
"vshll.u16 q2, d6, #16 \n"
"vshll.u16 q3, d7, #16 \n"
"vshll.u16 q4, d8, #16 \n"
"vmla.f32 q13, q8, d2[0] \n"
"vshll.u16 q10, d22, #16 \n"
"vmla.f32 q14, q9, d2[1] \n"
"pld [%7, #256] \n"
"vld1.u16 {d16-d19}, [%7 :128]! \n"
"vshll.u16 q11, d23, #16 \n"
"vmla.f32 q15, q10, d3[0] \n"
"vshll.u16 q10, d16, #16 \n"
"vmla.f32 q12, q11, d3[1] \n"
"vshll.u16 q11, d17, #16 \n"
"vmla.f32 q13, q10, d4[0] \n"
"vshll.u16 q8, d18, #16 \n"
"vmla.f32 q14, q11, d4[1] \n"
"pld [%7, #256] \n"
"vld1.u16 {d20-d23}, [%7 :128]! \n"
"vshll.u16 q9, d19, #16 \n"
"vmla.f32 q15, q8, d5[0] \n"
"vshll.u16 q8, d20, #16 \n"
"vmla.f32 q12, q9, d5[1] \n"
"vshll.u16 q9, d21, #16 \n"
"vmla.f32 q13, q8, d6[0] \n"
"vshll.u16 q10, d22, #16 \n"
"vmla.f32 q14, q9, d6[1] \n"
"pld [%7, #256] \n"
"vld1.u16 {d16-d19}, [%7 :128]! \n"
"vshll.u16 q11, d23, #16 \n"
"vmla.f32 q15, q10, d7[0] \n"
"vshll.u16 q10, d16, #16 \n"
"vmla.f32 q12, q11, d7[1] \n"
"pld [%6, #128] \n"
"vld1.u16 {d2-d3}, [%6 :64]! \n" // r40 r41
"vshll.u16 q11, d17, #16 \n"
"vshll.u16 q0, d2, #16 \n"
"vshll.u16 q1, d3, #16 \n"
"vmla.f32 q13, q10, d8[0] \n"
"vshll.u16 q8, d18, #16 \n"
"vmla.f32 q14, q11, d8[1] \n"
"pld [%7, #256] \n"
"vld1.u16 {d20-d23}, [%7 :128]! \n"
"vshll.u16 q9, d19, #16 \n"
"vmla.f32 q15, q8, d9[0] \n"
"vshll.u16 q8, d20, #16 \n"
"vmla.f32 q12, q9, d9[1] \n"
"vshll.u16 q9, d21, #16 \n"
"vmla.f32 q13, q8, d0[0] \n"
"vshll.u16 q10, d22, #16 \n"
"vmla.f32 q14, q9, d0[1] \n"
"pld [%7, #256] \n"
"vld1.u16 {d16-d19}, [%7 :128]! \n"
"vshll.u16 q11, d23, #16 \n"
"vmla.f32 q15, q10, d1[0] \n"
"vshll.u16 q10, d16, #16 \n"
"vmla.f32 q12, q11, d1[1] \n"
"pld [%6, #192] \n"
"vld1.u16 {d6-d8}, [%6 :64] \n" // r42 r43 r44
"vshll.u16 q11, d17, #16 \n"
"vshll.u16 q2, d6, #16 \n"
"vshll.u16 q3, d7, #16 \n"
"vshll.u16 q4, d8, #16 \n"
"vmla.f32 q13, q10, d2[0] \n"
"vshll.u16 q8, d18, #16 \n"
"vmla.f32 q14, q11, d2[1] \n"
"pld [%7, #256] \n"
"vld1.u16 {d20-d23}, [%7 :128]! \n"
"vshll.u16 q9, d19, #16 \n"
"vmla.f32 q15, q8, d3[0] \n"
"vshll.u16 q8, d20, #16 \n"
"vmla.f32 q12, q9, d3[1] \n"
"vshll.u16 q9, d21, #16 \n"
"vmla.f32 q13, q8, d4[0] \n"
"vshll.u16 q10, d22, #16 \n"
"vmla.f32 q14, q9, d4[1] \n"
"pld [%7, #256] \n"
"vld1.u16 {d16-d19}, [%7 :128]! \n"
"vshll.u16 q11, d23, #16 \n"
"vmla.f32 q15, q10, d5[0] \n"
"vshll.u16 q10, d16, #16 \n"
"vmla.f32 q12, q11, d5[1] \n"
"vshll.u16 q11, d17, #16 \n"
"vmla.f32 q13, q10, d6[0] \n"
"vshll.u16 q8, d18, #16 \n"
"vmla.f32 q14, q11, d6[1] \n"
// "pld [%7, #256] \n"
"vld1.u16 {d20-d23}, [%7 :128] \n"
"vshll.u16 q9, d19, #16 \n"
"vmla.f32 q15, q8, d7[0] \n"
"vshll.u16 q8, d20, #16 \n"
"vmla.f32 q12, q9, d7[1] \n"
"vshll.u16 q9, d21, #16 \n"
"vmla.f32 q13, q8, d8[0] \n"
"vshll.u16 q10, d22, #16 \n"
"vmla.f32 q14, q9, d8[1] \n"
"vshll.u16 q11, d23, #16 \n"
"vmla.f32 q15, q10, d9[0] \n"
"vmla.f32 q12, q11, d9[1] \n"
"vadd.f32 q14, q13, q14 \n"
"vadd.f32 q15, q14, q15 \n"
"vadd.f32 q12, q12, q15 \n"
"sub %7, %7, #768 \n" // kptr -= 24 * 16;
"vshrn.u32 d24, q12, #16 \n"
"vst1.u16 {d24}, [%0 :64]! \n"
: "=r"(outptr0_bf16), // %0
"=r"(outptr0), // %1
"=r"(r0), // %2
"=r"(r1), // %3
"=r"(r2), // %4
"=r"(r3), // %5
"=r"(r4), // %6
"=r"(kptr) // %7
: "0"(outptr0_bf16),
"1"(outptr0),
"2"(r0),
"3"(r1),
"4"(r2),
"5"(r3),
"6"(r4),
"7"(kptr)
: "memory", "q0", "q1", "q2", "q3", "q4", "q5", "q6", "q7", "q8", "q9", "q10", "q11", "q12", "q13", "q14", "q15");
#endif // __aarch64__
}
r0 += tailstep;
r1 += tailstep;
r2 += tailstep;
r3 += tailstep;
r4 += tailstep;
}
}
}
}
|
GB_unaryop__abs_bool_uint8.c | //------------------------------------------------------------------------------
// GB_unaryop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2020, All Rights Reserved.
// http://suitesparse.com See GraphBLAS/Doc/License.txt for license.
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_iterator.h"
#include "GB_unaryop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB_unop__abs_bool_uint8
// op(A') function: GB_tran__abs_bool_uint8
// C type: bool
// A type: uint8_t
// cast: bool cij = (bool) aij
// unaryop: cij = aij
#define GB_ATYPE \
uint8_t
#define GB_CTYPE \
bool
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
uint8_t aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = x ;
// casting
#define GB_CASTING(z, aij) \
bool z = (bool) aij ;
// cij = op (cast (aij))
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
GB_GETA (aij, Ax, pA) ; \
/* Cx [pC] = op (cast (aij)) */ \
GB_CASTING (z, aij) ; \
GB_OP (GB_CX (pC), z) ; \
}
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_ABS || GxB_NO_BOOL || GxB_NO_UINT8)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_unop__abs_bool_uint8
(
bool *Cx, // Cx and Ax may be aliased
uint8_t *Ax,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
GB_CAST_OP (p, p) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_tran__abs_bool_uint8
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t *GB_RESTRICT *Rowcounts,
GBI_single_iterator Iter,
const int64_t *GB_RESTRICT A_slice,
int naslice
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#define GB_PHASE_2_OF_2
#include "GB_unaryop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
DRB098-simd2-orig-no.c | /*
Copyright (c) 2017, Lawrence Livermore National Security, LLC.
Produced at the Lawrence Livermore National Laboratory
Written by Chunhua Liao, Pei-Hung Lin, Joshua Asplund,
Markus Schordan, and Ian Karlin
(email: liao6@llnl.gov, lin32@llnl.gov, asplund1@llnl.gov,
schordan1@llnl.gov, karlin1@llnl.gov)
LLNL-CODE-732144
All rights reserved.
This file is part of DataRaceBench. For details, see
https://github.com/LLNL/dataracebench. Please also see the LICENSE file
for our additional BSD notice.
Redistribution and use in source and binary forms, with
or without modification, are permitted provided that the following
conditions are met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the disclaimer below.
* Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the disclaimer (as noted below)
in the documentation and/or other materials provided with the
distribution.
* Neither the name of the LLNS/LLNL nor the names of its contributors
may be used to endorse or promote products derived from this
software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND
CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES,
INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL LAWRENCE LIVERMORE NATIONAL
SECURITY, LLC, THE U.S. DEPARTMENT OF ENERGY OR CONTRIBUTORS BE
LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY,
OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
THE POSSIBILITY OF SUCH DAMAGE.
*/
#include <stdio.h>
/*
Two-dimension array computation with a vetorization directive
collapse(2) makes simd associate with 2 loops.
Loop iteration variables should be predetermined as lastprivate.
*/
int main()
{
int len=100;
double a[len][len], b[len][len], c[len][len];
int i,j;
#pragma omp parallel for private(i, j)
for (i=0;i<len;i++)
#pragma omp parallel for private(j)
for (j=0;j<len;j++)
{
a[i][j]=((double)i)/2.0;
b[i][j]=((double)i)/3.0;
c[i][j]=((double)i)/7.0;
}
#pragma omp parallel for private(j) collapse(2)
for (i=0;i<len;i++)
for (j=0;j<len;j++)
c[i][j]=a[i][j]*b[i][j];
printf ("c[50][50]=%f\n",c[50][50]);
return 0;
}
|
feature.c | /*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% FFFFF EEEEE AAA TTTTT U U RRRR EEEEE %
% F E A A T U U R R E %
% FFF EEE AAAAA T U U RRRR EEE %
% F E A A T U U R R E %
% F EEEEE A A T UUU R R EEEEE %
% %
% %
% MagickCore Image Feature Methods %
% %
% Software Design %
% Cristy %
% July 1992 %
% %
% %
% Copyright 1999-2018 ImageMagick Studio LLC, a non-profit organization %
% dedicated to making software imaging solutions freely available. %
% %
% You may not use this file except in compliance with the License. You may %
% obtain a copy of the License at %
% %
% https://www.imagemagick.org/script/license.php %
% %
% Unless required by applicable law or agreed to in writing, software %
% distributed under the License is distributed on an "AS IS" BASIS, %
% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. %
% See the License for the specific language governing permissions and %
% limitations under the License. %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
%
%
*/
/*
Include declarations.
*/
#include "MagickCore/studio.h"
#include "MagickCore/animate.h"
#include "MagickCore/artifact.h"
#include "MagickCore/blob.h"
#include "MagickCore/blob-private.h"
#include "MagickCore/cache.h"
#include "MagickCore/cache-private.h"
#include "MagickCore/cache-view.h"
#include "MagickCore/channel.h"
#include "MagickCore/client.h"
#include "MagickCore/color.h"
#include "MagickCore/color-private.h"
#include "MagickCore/colorspace.h"
#include "MagickCore/colorspace-private.h"
#include "MagickCore/composite.h"
#include "MagickCore/composite-private.h"
#include "MagickCore/compress.h"
#include "MagickCore/constitute.h"
#include "MagickCore/display.h"
#include "MagickCore/draw.h"
#include "MagickCore/enhance.h"
#include "MagickCore/exception.h"
#include "MagickCore/exception-private.h"
#include "MagickCore/feature.h"
#include "MagickCore/gem.h"
#include "MagickCore/geometry.h"
#include "MagickCore/list.h"
#include "MagickCore/image-private.h"
#include "MagickCore/magic.h"
#include "MagickCore/magick.h"
#include "MagickCore/matrix.h"
#include "MagickCore/memory_.h"
#include "MagickCore/module.h"
#include "MagickCore/monitor.h"
#include "MagickCore/monitor-private.h"
#include "MagickCore/morphology-private.h"
#include "MagickCore/option.h"
#include "MagickCore/paint.h"
#include "MagickCore/pixel-accessor.h"
#include "MagickCore/profile.h"
#include "MagickCore/property.h"
#include "MagickCore/quantize.h"
#include "MagickCore/quantum-private.h"
#include "MagickCore/random_.h"
#include "MagickCore/resource_.h"
#include "MagickCore/segment.h"
#include "MagickCore/semaphore.h"
#include "MagickCore/signature-private.h"
#include "MagickCore/string_.h"
#include "MagickCore/thread-private.h"
#include "MagickCore/timer.h"
#include "MagickCore/utility.h"
#include "MagickCore/version.h"
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% C a n n y E d g e I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% CannyEdgeImage() uses a multi-stage algorithm to detect a wide range of
% edges in images.
%
% The format of the CannyEdgeImage method is:
%
% Image *CannyEdgeImage(const Image *image,const double radius,
% const double sigma,const double lower_percent,
% const double upper_percent,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o radius: the radius of the gaussian smoothing filter.
%
% o sigma: the sigma of the gaussian smoothing filter.
%
% o lower_percent: percentage of edge pixels in the lower threshold.
%
% o upper_percent: percentage of edge pixels in the upper threshold.
%
% o exception: return any errors or warnings in this structure.
%
*/
typedef struct _CannyInfo
{
double
magnitude,
intensity;
int
orientation;
ssize_t
x,
y;
} CannyInfo;
static inline MagickBooleanType IsAuthenticPixel(const Image *image,
const ssize_t x,const ssize_t y)
{
if ((x < 0) || (x >= (ssize_t) image->columns))
return(MagickFalse);
if ((y < 0) || (y >= (ssize_t) image->rows))
return(MagickFalse);
return(MagickTrue);
}
static MagickBooleanType TraceEdges(Image *edge_image,CacheView *edge_view,
MatrixInfo *canny_cache,const ssize_t x,const ssize_t y,
const double lower_threshold,ExceptionInfo *exception)
{
CannyInfo
edge,
pixel;
MagickBooleanType
status;
register Quantum
*q;
register ssize_t
i;
q=GetCacheViewAuthenticPixels(edge_view,x,y,1,1,exception);
if (q == (Quantum *) NULL)
return(MagickFalse);
*q=QuantumRange;
status=SyncCacheViewAuthenticPixels(edge_view,exception);
if (status == MagickFalse)
return(MagickFalse);
if (GetMatrixElement(canny_cache,0,0,&edge) == MagickFalse)
return(MagickFalse);
edge.x=x;
edge.y=y;
if (SetMatrixElement(canny_cache,0,0,&edge) == MagickFalse)
return(MagickFalse);
for (i=1; i != 0; )
{
ssize_t
v;
i--;
status=GetMatrixElement(canny_cache,i,0,&edge);
if (status == MagickFalse)
return(MagickFalse);
for (v=(-1); v <= 1; v++)
{
ssize_t
u;
for (u=(-1); u <= 1; u++)
{
if ((u == 0) && (v == 0))
continue;
if (IsAuthenticPixel(edge_image,edge.x+u,edge.y+v) == MagickFalse)
continue;
/*
Not an edge if gradient value is below the lower threshold.
*/
q=GetCacheViewAuthenticPixels(edge_view,edge.x+u,edge.y+v,1,1,
exception);
if (q == (Quantum *) NULL)
return(MagickFalse);
status=GetMatrixElement(canny_cache,edge.x+u,edge.y+v,&pixel);
if (status == MagickFalse)
return(MagickFalse);
if ((GetPixelIntensity(edge_image,q) == 0.0) &&
(pixel.intensity >= lower_threshold))
{
*q=QuantumRange;
status=SyncCacheViewAuthenticPixels(edge_view,exception);
if (status == MagickFalse)
return(MagickFalse);
edge.x+=u;
edge.y+=v;
status=SetMatrixElement(canny_cache,i,0,&edge);
if (status == MagickFalse)
return(MagickFalse);
i++;
}
}
}
}
return(MagickTrue);
}
MagickExport Image *CannyEdgeImage(const Image *image,const double radius,
const double sigma,const double lower_percent,const double upper_percent,
ExceptionInfo *exception)
{
#define CannyEdgeImageTag "CannyEdge/Image"
CacheView
*edge_view;
CannyInfo
element;
char
geometry[MagickPathExtent];
double
lower_threshold,
max,
min,
upper_threshold;
Image
*edge_image;
KernelInfo
*kernel_info;
MagickBooleanType
status;
MagickOffsetType
progress;
MatrixInfo
*canny_cache;
ssize_t
y;
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
/*
Filter out noise.
*/
(void) FormatLocaleString(geometry,MagickPathExtent,
"blur:%.20gx%.20g;blur:%.20gx%.20g+90",radius,sigma,radius,sigma);
kernel_info=AcquireKernelInfo(geometry,exception);
if (kernel_info == (KernelInfo *) NULL)
ThrowImageException(ResourceLimitError,"MemoryAllocationFailed");
edge_image=MorphologyImage(image,ConvolveMorphology,1,kernel_info,exception);
kernel_info=DestroyKernelInfo(kernel_info);
if (edge_image == (Image *) NULL)
return((Image *) NULL);
if (TransformImageColorspace(edge_image,GRAYColorspace,exception) == MagickFalse)
{
edge_image=DestroyImage(edge_image);
return((Image *) NULL);
}
(void) SetImageAlphaChannel(edge_image,OffAlphaChannel,exception);
/*
Find the intensity gradient of the image.
*/
canny_cache=AcquireMatrixInfo(edge_image->columns,edge_image->rows,
sizeof(CannyInfo),exception);
if (canny_cache == (MatrixInfo *) NULL)
{
edge_image=DestroyImage(edge_image);
return((Image *) NULL);
}
status=MagickTrue;
edge_view=AcquireVirtualCacheView(edge_image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static,4) shared(status) \
magick_number_threads(edge_image,edge_image,edge_image->rows,1)
#endif
for (y=0; y < (ssize_t) edge_image->rows; y++)
{
register const Quantum
*magick_restrict p;
register ssize_t
x;
if (status == MagickFalse)
continue;
p=GetCacheViewVirtualPixels(edge_view,0,y,edge_image->columns+1,2,
exception);
if (p == (const Quantum *) NULL)
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) edge_image->columns; x++)
{
CannyInfo
pixel;
double
dx,
dy;
register const Quantum
*magick_restrict kernel_pixels;
ssize_t
v;
static double
Gx[2][2] =
{
{ -1.0, +1.0 },
{ -1.0, +1.0 }
},
Gy[2][2] =
{
{ +1.0, +1.0 },
{ -1.0, -1.0 }
};
(void) ResetMagickMemory(&pixel,0,sizeof(pixel));
dx=0.0;
dy=0.0;
kernel_pixels=p;
for (v=0; v < 2; v++)
{
ssize_t
u;
for (u=0; u < 2; u++)
{
double
intensity;
intensity=GetPixelIntensity(edge_image,kernel_pixels+u);
dx+=0.5*Gx[v][u]*intensity;
dy+=0.5*Gy[v][u]*intensity;
}
kernel_pixels+=edge_image->columns+1;
}
pixel.magnitude=hypot(dx,dy);
pixel.orientation=0;
if (fabs(dx) > MagickEpsilon)
{
double
slope;
slope=dy/dx;
if (slope < 0.0)
{
if (slope < -2.41421356237)
pixel.orientation=0;
else
if (slope < -0.414213562373)
pixel.orientation=1;
else
pixel.orientation=2;
}
else
{
if (slope > 2.41421356237)
pixel.orientation=0;
else
if (slope > 0.414213562373)
pixel.orientation=3;
else
pixel.orientation=2;
}
}
if (SetMatrixElement(canny_cache,x,y,&pixel) == MagickFalse)
continue;
p+=GetPixelChannels(edge_image);
}
}
edge_view=DestroyCacheView(edge_view);
/*
Non-maxima suppression, remove pixels that are not considered to be part
of an edge.
*/
progress=0;
(void) GetMatrixElement(canny_cache,0,0,&element);
max=element.intensity;
min=element.intensity;
edge_view=AcquireAuthenticCacheView(edge_image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static,4) shared(status) \
magick_number_threads(edge_image,edge_image,edge_image->rows,1)
#endif
for (y=0; y < (ssize_t) edge_image->rows; y++)
{
register Quantum
*magick_restrict q;
register ssize_t
x;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(edge_view,0,y,edge_image->columns,1,
exception);
if (q == (Quantum *) NULL)
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) edge_image->columns; x++)
{
CannyInfo
alpha_pixel,
beta_pixel,
pixel;
(void) GetMatrixElement(canny_cache,x,y,&pixel);
switch (pixel.orientation)
{
case 0:
default:
{
/*
0 degrees, north and south.
*/
(void) GetMatrixElement(canny_cache,x,y-1,&alpha_pixel);
(void) GetMatrixElement(canny_cache,x,y+1,&beta_pixel);
break;
}
case 1:
{
/*
45 degrees, northwest and southeast.
*/
(void) GetMatrixElement(canny_cache,x-1,y-1,&alpha_pixel);
(void) GetMatrixElement(canny_cache,x+1,y+1,&beta_pixel);
break;
}
case 2:
{
/*
90 degrees, east and west.
*/
(void) GetMatrixElement(canny_cache,x-1,y,&alpha_pixel);
(void) GetMatrixElement(canny_cache,x+1,y,&beta_pixel);
break;
}
case 3:
{
/*
135 degrees, northeast and southwest.
*/
(void) GetMatrixElement(canny_cache,x+1,y-1,&beta_pixel);
(void) GetMatrixElement(canny_cache,x-1,y+1,&alpha_pixel);
break;
}
}
pixel.intensity=pixel.magnitude;
if ((pixel.magnitude < alpha_pixel.magnitude) ||
(pixel.magnitude < beta_pixel.magnitude))
pixel.intensity=0;
(void) SetMatrixElement(canny_cache,x,y,&pixel);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp critical (MagickCore_CannyEdgeImage)
#endif
{
if (pixel.intensity < min)
min=pixel.intensity;
if (pixel.intensity > max)
max=pixel.intensity;
}
*q=0;
q+=GetPixelChannels(edge_image);
}
if (SyncCacheViewAuthenticPixels(edge_view,exception) == MagickFalse)
status=MagickFalse;
}
edge_view=DestroyCacheView(edge_view);
/*
Estimate hysteresis threshold.
*/
lower_threshold=lower_percent*(max-min)+min;
upper_threshold=upper_percent*(max-min)+min;
/*
Hysteresis threshold.
*/
edge_view=AcquireAuthenticCacheView(edge_image,exception);
for (y=0; y < (ssize_t) edge_image->rows; y++)
{
register ssize_t
x;
if (status == MagickFalse)
continue;
for (x=0; x < (ssize_t) edge_image->columns; x++)
{
CannyInfo
pixel;
register const Quantum
*magick_restrict p;
/*
Edge if pixel gradient higher than upper threshold.
*/
p=GetCacheViewVirtualPixels(edge_view,x,y,1,1,exception);
if (p == (const Quantum *) NULL)
continue;
status=GetMatrixElement(canny_cache,x,y,&pixel);
if (status == MagickFalse)
continue;
if ((GetPixelIntensity(edge_image,p) == 0.0) &&
(pixel.intensity >= upper_threshold))
status=TraceEdges(edge_image,edge_view,canny_cache,x,y,lower_threshold,
exception);
}
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp critical (MagickCore_CannyEdgeImage)
#endif
proceed=SetImageProgress(image,CannyEdgeImageTag,progress++,
image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
edge_view=DestroyCacheView(edge_view);
/*
Free resources.
*/
canny_cache=DestroyMatrixInfo(canny_cache);
return(edge_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% G e t I m a g e F e a t u r e s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetImageFeatures() returns features for each channel in the image in
% each of four directions (horizontal, vertical, left and right diagonals)
% for the specified distance. The features include the angular second
% moment, contrast, correlation, sum of squares: variance, inverse difference
% moment, sum average, sum varience, sum entropy, entropy, difference variance,% difference entropy, information measures of correlation 1, information
% measures of correlation 2, and maximum correlation coefficient. You can
% access the red channel contrast, for example, like this:
%
% channel_features=GetImageFeatures(image,1,exception);
% contrast=channel_features[RedPixelChannel].contrast[0];
%
% Use MagickRelinquishMemory() to free the features buffer.
%
% The format of the GetImageFeatures method is:
%
% ChannelFeatures *GetImageFeatures(const Image *image,
% const size_t distance,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o distance: the distance.
%
% o exception: return any errors or warnings in this structure.
%
*/
static inline double MagickLog10(const double x)
{
#define Log10Epsilon (1.0e-11)
if (fabs(x) < Log10Epsilon)
return(log10(Log10Epsilon));
return(log10(fabs(x)));
}
MagickExport ChannelFeatures *GetImageFeatures(const Image *image,
const size_t distance,ExceptionInfo *exception)
{
typedef struct _ChannelStatistics
{
PixelInfo
direction[4]; /* horizontal, vertical, left and right diagonals */
} ChannelStatistics;
CacheView
*image_view;
ChannelFeatures
*channel_features;
ChannelStatistics
**cooccurrence,
correlation,
*density_x,
*density_xy,
*density_y,
entropy_x,
entropy_xy,
entropy_xy1,
entropy_xy2,
entropy_y,
mean,
**Q,
*sum,
sum_squares,
variance;
PixelPacket
gray,
*grays;
MagickBooleanType
status;
register ssize_t
i,
r;
size_t
length;
unsigned int
number_grays;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
if ((image->columns < (distance+1)) || (image->rows < (distance+1)))
return((ChannelFeatures *) NULL);
length=MaxPixelChannels+1UL;
channel_features=(ChannelFeatures *) AcquireQuantumMemory(length,
sizeof(*channel_features));
if (channel_features == (ChannelFeatures *) NULL)
ThrowFatalException(ResourceLimitFatalError,"MemoryAllocationFailed");
(void) ResetMagickMemory(channel_features,0,length*
sizeof(*channel_features));
/*
Form grays.
*/
grays=(PixelPacket *) AcquireQuantumMemory(MaxMap+1UL,sizeof(*grays));
if (grays == (PixelPacket *) NULL)
{
channel_features=(ChannelFeatures *) RelinquishMagickMemory(
channel_features);
(void) ThrowMagickException(exception,GetMagickModule(),
ResourceLimitError,"MemoryAllocationFailed","`%s'",image->filename);
return(channel_features);
}
for (i=0; i <= (ssize_t) MaxMap; i++)
{
grays[i].red=(~0U);
grays[i].green=(~0U);
grays[i].blue=(~0U);
grays[i].alpha=(~0U);
grays[i].black=(~0U);
}
status=MagickTrue;
image_view=AcquireVirtualCacheView(image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static,4) shared(status) \
magick_number_threads(image,image,image->rows,1)
#endif
for (r=0; r < (ssize_t) image->rows; r++)
{
register const Quantum
*magick_restrict p;
register ssize_t
x;
if (status == MagickFalse)
continue;
p=GetCacheViewVirtualPixels(image_view,0,r,image->columns,1,exception);
if (p == (const Quantum *) NULL)
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) image->columns; x++)
{
grays[ScaleQuantumToMap(GetPixelRed(image,p))].red=
ScaleQuantumToMap(GetPixelRed(image,p));
grays[ScaleQuantumToMap(GetPixelGreen(image,p))].green=
ScaleQuantumToMap(GetPixelGreen(image,p));
grays[ScaleQuantumToMap(GetPixelBlue(image,p))].blue=
ScaleQuantumToMap(GetPixelBlue(image,p));
if (image->colorspace == CMYKColorspace)
grays[ScaleQuantumToMap(GetPixelBlack(image,p))].black=
ScaleQuantumToMap(GetPixelBlack(image,p));
if (image->alpha_trait != UndefinedPixelTrait)
grays[ScaleQuantumToMap(GetPixelAlpha(image,p))].alpha=
ScaleQuantumToMap(GetPixelAlpha(image,p));
p+=GetPixelChannels(image);
}
}
image_view=DestroyCacheView(image_view);
if (status == MagickFalse)
{
grays=(PixelPacket *) RelinquishMagickMemory(grays);
channel_features=(ChannelFeatures *) RelinquishMagickMemory(
channel_features);
return(channel_features);
}
(void) ResetMagickMemory(&gray,0,sizeof(gray));
for (i=0; i <= (ssize_t) MaxMap; i++)
{
if (grays[i].red != ~0U)
grays[gray.red++].red=grays[i].red;
if (grays[i].green != ~0U)
grays[gray.green++].green=grays[i].green;
if (grays[i].blue != ~0U)
grays[gray.blue++].blue=grays[i].blue;
if (image->colorspace == CMYKColorspace)
if (grays[i].black != ~0U)
grays[gray.black++].black=grays[i].black;
if (image->alpha_trait != UndefinedPixelTrait)
if (grays[i].alpha != ~0U)
grays[gray.alpha++].alpha=grays[i].alpha;
}
/*
Allocate spatial dependence matrix.
*/
number_grays=gray.red;
if (gray.green > number_grays)
number_grays=gray.green;
if (gray.blue > number_grays)
number_grays=gray.blue;
if (image->colorspace == CMYKColorspace)
if (gray.black > number_grays)
number_grays=gray.black;
if (image->alpha_trait != UndefinedPixelTrait)
if (gray.alpha > number_grays)
number_grays=gray.alpha;
cooccurrence=(ChannelStatistics **) AcquireQuantumMemory(number_grays,
sizeof(*cooccurrence));
density_x=(ChannelStatistics *) AcquireQuantumMemory(2*(number_grays+1),
sizeof(*density_x));
density_xy=(ChannelStatistics *) AcquireQuantumMemory(2*(number_grays+1),
sizeof(*density_xy));
density_y=(ChannelStatistics *) AcquireQuantumMemory(2*(number_grays+1),
sizeof(*density_y));
Q=(ChannelStatistics **) AcquireQuantumMemory(number_grays,sizeof(*Q));
sum=(ChannelStatistics *) AcquireQuantumMemory(number_grays,sizeof(*sum));
if ((cooccurrence == (ChannelStatistics **) NULL) ||
(density_x == (ChannelStatistics *) NULL) ||
(density_xy == (ChannelStatistics *) NULL) ||
(density_y == (ChannelStatistics *) NULL) ||
(Q == (ChannelStatistics **) NULL) ||
(sum == (ChannelStatistics *) NULL))
{
if (Q != (ChannelStatistics **) NULL)
{
for (i=0; i < (ssize_t) number_grays; i++)
Q[i]=(ChannelStatistics *) RelinquishMagickMemory(Q[i]);
Q=(ChannelStatistics **) RelinquishMagickMemory(Q);
}
if (sum != (ChannelStatistics *) NULL)
sum=(ChannelStatistics *) RelinquishMagickMemory(sum);
if (density_y != (ChannelStatistics *) NULL)
density_y=(ChannelStatistics *) RelinquishMagickMemory(density_y);
if (density_xy != (ChannelStatistics *) NULL)
density_xy=(ChannelStatistics *) RelinquishMagickMemory(density_xy);
if (density_x != (ChannelStatistics *) NULL)
density_x=(ChannelStatistics *) RelinquishMagickMemory(density_x);
if (cooccurrence != (ChannelStatistics **) NULL)
{
for (i=0; i < (ssize_t) number_grays; i++)
cooccurrence[i]=(ChannelStatistics *)
RelinquishMagickMemory(cooccurrence[i]);
cooccurrence=(ChannelStatistics **) RelinquishMagickMemory(
cooccurrence);
}
grays=(PixelPacket *) RelinquishMagickMemory(grays);
channel_features=(ChannelFeatures *) RelinquishMagickMemory(
channel_features);
(void) ThrowMagickException(exception,GetMagickModule(),
ResourceLimitError,"MemoryAllocationFailed","`%s'",image->filename);
return(channel_features);
}
(void) ResetMagickMemory(&correlation,0,sizeof(correlation));
(void) ResetMagickMemory(density_x,0,2*(number_grays+1)*sizeof(*density_x));
(void) ResetMagickMemory(density_xy,0,2*(number_grays+1)*sizeof(*density_xy));
(void) ResetMagickMemory(density_y,0,2*(number_grays+1)*sizeof(*density_y));
(void) ResetMagickMemory(&mean,0,sizeof(mean));
(void) ResetMagickMemory(sum,0,number_grays*sizeof(*sum));
(void) ResetMagickMemory(&sum_squares,0,sizeof(sum_squares));
(void) ResetMagickMemory(density_xy,0,2*number_grays*sizeof(*density_xy));
(void) ResetMagickMemory(&entropy_x,0,sizeof(entropy_x));
(void) ResetMagickMemory(&entropy_xy,0,sizeof(entropy_xy));
(void) ResetMagickMemory(&entropy_xy1,0,sizeof(entropy_xy1));
(void) ResetMagickMemory(&entropy_xy2,0,sizeof(entropy_xy2));
(void) ResetMagickMemory(&entropy_y,0,sizeof(entropy_y));
(void) ResetMagickMemory(&variance,0,sizeof(variance));
for (i=0; i < (ssize_t) number_grays; i++)
{
cooccurrence[i]=(ChannelStatistics *) AcquireQuantumMemory(number_grays,
sizeof(**cooccurrence));
Q[i]=(ChannelStatistics *) AcquireQuantumMemory(number_grays,sizeof(**Q));
if ((cooccurrence[i] == (ChannelStatistics *) NULL) ||
(Q[i] == (ChannelStatistics *) NULL))
break;
(void) ResetMagickMemory(cooccurrence[i],0,number_grays*
sizeof(**cooccurrence));
(void) ResetMagickMemory(Q[i],0,number_grays*sizeof(**Q));
}
if (i < (ssize_t) number_grays)
{
for (i--; i >= 0; i--)
{
if (Q[i] != (ChannelStatistics *) NULL)
Q[i]=(ChannelStatistics *) RelinquishMagickMemory(Q[i]);
if (cooccurrence[i] != (ChannelStatistics *) NULL)
cooccurrence[i]=(ChannelStatistics *)
RelinquishMagickMemory(cooccurrence[i]);
}
Q=(ChannelStatistics **) RelinquishMagickMemory(Q);
cooccurrence=(ChannelStatistics **) RelinquishMagickMemory(cooccurrence);
sum=(ChannelStatistics *) RelinquishMagickMemory(sum);
density_y=(ChannelStatistics *) RelinquishMagickMemory(density_y);
density_xy=(ChannelStatistics *) RelinquishMagickMemory(density_xy);
density_x=(ChannelStatistics *) RelinquishMagickMemory(density_x);
grays=(PixelPacket *) RelinquishMagickMemory(grays);
channel_features=(ChannelFeatures *) RelinquishMagickMemory(
channel_features);
(void) ThrowMagickException(exception,GetMagickModule(),
ResourceLimitError,"MemoryAllocationFailed","`%s'",image->filename);
return(channel_features);
}
/*
Initialize spatial dependence matrix.
*/
status=MagickTrue;
image_view=AcquireVirtualCacheView(image,exception);
for (r=0; r < (ssize_t) image->rows; r++)
{
register const Quantum
*magick_restrict p;
register ssize_t
x;
ssize_t
offset,
u,
v;
if (status == MagickFalse)
continue;
p=GetCacheViewVirtualPixels(image_view,-(ssize_t) distance,r,image->columns+
2*distance,distance+2,exception);
if (p == (const Quantum *) NULL)
{
status=MagickFalse;
continue;
}
p+=distance*GetPixelChannels(image);;
for (x=0; x < (ssize_t) image->columns; x++)
{
for (i=0; i < 4; i++)
{
switch (i)
{
case 0:
default:
{
/*
Horizontal adjacency.
*/
offset=(ssize_t) distance;
break;
}
case 1:
{
/*
Vertical adjacency.
*/
offset=(ssize_t) (image->columns+2*distance);
break;
}
case 2:
{
/*
Right diagonal adjacency.
*/
offset=(ssize_t) ((image->columns+2*distance)-distance);
break;
}
case 3:
{
/*
Left diagonal adjacency.
*/
offset=(ssize_t) ((image->columns+2*distance)+distance);
break;
}
}
u=0;
v=0;
while (grays[u].red != ScaleQuantumToMap(GetPixelRed(image,p)))
u++;
while (grays[v].red != ScaleQuantumToMap(GetPixelRed(image,p+offset*GetPixelChannels(image))))
v++;
cooccurrence[u][v].direction[i].red++;
cooccurrence[v][u].direction[i].red++;
u=0;
v=0;
while (grays[u].green != ScaleQuantumToMap(GetPixelGreen(image,p)))
u++;
while (grays[v].green != ScaleQuantumToMap(GetPixelGreen(image,p+offset*GetPixelChannels(image))))
v++;
cooccurrence[u][v].direction[i].green++;
cooccurrence[v][u].direction[i].green++;
u=0;
v=0;
while (grays[u].blue != ScaleQuantumToMap(GetPixelBlue(image,p)))
u++;
while (grays[v].blue != ScaleQuantumToMap(GetPixelBlue(image,p+offset*GetPixelChannels(image))))
v++;
cooccurrence[u][v].direction[i].blue++;
cooccurrence[v][u].direction[i].blue++;
if (image->colorspace == CMYKColorspace)
{
u=0;
v=0;
while (grays[u].black != ScaleQuantumToMap(GetPixelBlack(image,p)))
u++;
while (grays[v].black != ScaleQuantumToMap(GetPixelBlack(image,p+offset*GetPixelChannels(image))))
v++;
cooccurrence[u][v].direction[i].black++;
cooccurrence[v][u].direction[i].black++;
}
if (image->alpha_trait != UndefinedPixelTrait)
{
u=0;
v=0;
while (grays[u].alpha != ScaleQuantumToMap(GetPixelAlpha(image,p)))
u++;
while (grays[v].alpha != ScaleQuantumToMap(GetPixelAlpha(image,p+offset*GetPixelChannels(image))))
v++;
cooccurrence[u][v].direction[i].alpha++;
cooccurrence[v][u].direction[i].alpha++;
}
}
p+=GetPixelChannels(image);
}
}
grays=(PixelPacket *) RelinquishMagickMemory(grays);
image_view=DestroyCacheView(image_view);
if (status == MagickFalse)
{
for (i=0; i < (ssize_t) number_grays; i++)
cooccurrence[i]=(ChannelStatistics *)
RelinquishMagickMemory(cooccurrence[i]);
cooccurrence=(ChannelStatistics **) RelinquishMagickMemory(cooccurrence);
channel_features=(ChannelFeatures *) RelinquishMagickMemory(
channel_features);
(void) ThrowMagickException(exception,GetMagickModule(),
ResourceLimitError,"MemoryAllocationFailed","`%s'",image->filename);
return(channel_features);
}
/*
Normalize spatial dependence matrix.
*/
for (i=0; i < 4; i++)
{
double
normalize;
register ssize_t
y;
switch (i)
{
case 0:
default:
{
/*
Horizontal adjacency.
*/
normalize=2.0*image->rows*(image->columns-distance);
break;
}
case 1:
{
/*
Vertical adjacency.
*/
normalize=2.0*(image->rows-distance)*image->columns;
break;
}
case 2:
{
/*
Right diagonal adjacency.
*/
normalize=2.0*(image->rows-distance)*(image->columns-distance);
break;
}
case 3:
{
/*
Left diagonal adjacency.
*/
normalize=2.0*(image->rows-distance)*(image->columns-distance);
break;
}
}
normalize=PerceptibleReciprocal(normalize);
for (y=0; y < (ssize_t) number_grays; y++)
{
register ssize_t
x;
for (x=0; x < (ssize_t) number_grays; x++)
{
cooccurrence[x][y].direction[i].red*=normalize;
cooccurrence[x][y].direction[i].green*=normalize;
cooccurrence[x][y].direction[i].blue*=normalize;
if (image->colorspace == CMYKColorspace)
cooccurrence[x][y].direction[i].black*=normalize;
if (image->alpha_trait != UndefinedPixelTrait)
cooccurrence[x][y].direction[i].alpha*=normalize;
}
}
}
/*
Compute texture features.
*/
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static,4) shared(status) \
magick_number_threads(image,image,number_grays,1)
#endif
for (i=0; i < 4; i++)
{
register ssize_t
y;
for (y=0; y < (ssize_t) number_grays; y++)
{
register ssize_t
x;
for (x=0; x < (ssize_t) number_grays; x++)
{
/*
Angular second moment: measure of homogeneity of the image.
*/
channel_features[RedPixelChannel].angular_second_moment[i]+=
cooccurrence[x][y].direction[i].red*
cooccurrence[x][y].direction[i].red;
channel_features[GreenPixelChannel].angular_second_moment[i]+=
cooccurrence[x][y].direction[i].green*
cooccurrence[x][y].direction[i].green;
channel_features[BluePixelChannel].angular_second_moment[i]+=
cooccurrence[x][y].direction[i].blue*
cooccurrence[x][y].direction[i].blue;
if (image->colorspace == CMYKColorspace)
channel_features[BlackPixelChannel].angular_second_moment[i]+=
cooccurrence[x][y].direction[i].black*
cooccurrence[x][y].direction[i].black;
if (image->alpha_trait != UndefinedPixelTrait)
channel_features[AlphaPixelChannel].angular_second_moment[i]+=
cooccurrence[x][y].direction[i].alpha*
cooccurrence[x][y].direction[i].alpha;
/*
Correlation: measure of linear-dependencies in the image.
*/
sum[y].direction[i].red+=cooccurrence[x][y].direction[i].red;
sum[y].direction[i].green+=cooccurrence[x][y].direction[i].green;
sum[y].direction[i].blue+=cooccurrence[x][y].direction[i].blue;
if (image->colorspace == CMYKColorspace)
sum[y].direction[i].black+=cooccurrence[x][y].direction[i].black;
if (image->alpha_trait != UndefinedPixelTrait)
sum[y].direction[i].alpha+=cooccurrence[x][y].direction[i].alpha;
correlation.direction[i].red+=x*y*cooccurrence[x][y].direction[i].red;
correlation.direction[i].green+=x*y*
cooccurrence[x][y].direction[i].green;
correlation.direction[i].blue+=x*y*
cooccurrence[x][y].direction[i].blue;
if (image->colorspace == CMYKColorspace)
correlation.direction[i].black+=x*y*
cooccurrence[x][y].direction[i].black;
if (image->alpha_trait != UndefinedPixelTrait)
correlation.direction[i].alpha+=x*y*
cooccurrence[x][y].direction[i].alpha;
/*
Inverse Difference Moment.
*/
channel_features[RedPixelChannel].inverse_difference_moment[i]+=
cooccurrence[x][y].direction[i].red/((y-x)*(y-x)+1);
channel_features[GreenPixelChannel].inverse_difference_moment[i]+=
cooccurrence[x][y].direction[i].green/((y-x)*(y-x)+1);
channel_features[BluePixelChannel].inverse_difference_moment[i]+=
cooccurrence[x][y].direction[i].blue/((y-x)*(y-x)+1);
if (image->colorspace == CMYKColorspace)
channel_features[BlackPixelChannel].inverse_difference_moment[i]+=
cooccurrence[x][y].direction[i].black/((y-x)*(y-x)+1);
if (image->alpha_trait != UndefinedPixelTrait)
channel_features[AlphaPixelChannel].inverse_difference_moment[i]+=
cooccurrence[x][y].direction[i].alpha/((y-x)*(y-x)+1);
/*
Sum average.
*/
density_xy[y+x+2].direction[i].red+=
cooccurrence[x][y].direction[i].red;
density_xy[y+x+2].direction[i].green+=
cooccurrence[x][y].direction[i].green;
density_xy[y+x+2].direction[i].blue+=
cooccurrence[x][y].direction[i].blue;
if (image->colorspace == CMYKColorspace)
density_xy[y+x+2].direction[i].black+=
cooccurrence[x][y].direction[i].black;
if (image->alpha_trait != UndefinedPixelTrait)
density_xy[y+x+2].direction[i].alpha+=
cooccurrence[x][y].direction[i].alpha;
/*
Entropy.
*/
channel_features[RedPixelChannel].entropy[i]-=
cooccurrence[x][y].direction[i].red*
MagickLog10(cooccurrence[x][y].direction[i].red);
channel_features[GreenPixelChannel].entropy[i]-=
cooccurrence[x][y].direction[i].green*
MagickLog10(cooccurrence[x][y].direction[i].green);
channel_features[BluePixelChannel].entropy[i]-=
cooccurrence[x][y].direction[i].blue*
MagickLog10(cooccurrence[x][y].direction[i].blue);
if (image->colorspace == CMYKColorspace)
channel_features[BlackPixelChannel].entropy[i]-=
cooccurrence[x][y].direction[i].black*
MagickLog10(cooccurrence[x][y].direction[i].black);
if (image->alpha_trait != UndefinedPixelTrait)
channel_features[AlphaPixelChannel].entropy[i]-=
cooccurrence[x][y].direction[i].alpha*
MagickLog10(cooccurrence[x][y].direction[i].alpha);
/*
Information Measures of Correlation.
*/
density_x[x].direction[i].red+=cooccurrence[x][y].direction[i].red;
density_x[x].direction[i].green+=cooccurrence[x][y].direction[i].green;
density_x[x].direction[i].blue+=cooccurrence[x][y].direction[i].blue;
if (image->alpha_trait != UndefinedPixelTrait)
density_x[x].direction[i].alpha+=
cooccurrence[x][y].direction[i].alpha;
if (image->colorspace == CMYKColorspace)
density_x[x].direction[i].black+=
cooccurrence[x][y].direction[i].black;
density_y[y].direction[i].red+=cooccurrence[x][y].direction[i].red;
density_y[y].direction[i].green+=cooccurrence[x][y].direction[i].green;
density_y[y].direction[i].blue+=cooccurrence[x][y].direction[i].blue;
if (image->colorspace == CMYKColorspace)
density_y[y].direction[i].black+=
cooccurrence[x][y].direction[i].black;
if (image->alpha_trait != UndefinedPixelTrait)
density_y[y].direction[i].alpha+=
cooccurrence[x][y].direction[i].alpha;
}
mean.direction[i].red+=y*sum[y].direction[i].red;
sum_squares.direction[i].red+=y*y*sum[y].direction[i].red;
mean.direction[i].green+=y*sum[y].direction[i].green;
sum_squares.direction[i].green+=y*y*sum[y].direction[i].green;
mean.direction[i].blue+=y*sum[y].direction[i].blue;
sum_squares.direction[i].blue+=y*y*sum[y].direction[i].blue;
if (image->colorspace == CMYKColorspace)
{
mean.direction[i].black+=y*sum[y].direction[i].black;
sum_squares.direction[i].black+=y*y*sum[y].direction[i].black;
}
if (image->alpha_trait != UndefinedPixelTrait)
{
mean.direction[i].alpha+=y*sum[y].direction[i].alpha;
sum_squares.direction[i].alpha+=y*y*sum[y].direction[i].alpha;
}
}
/*
Correlation: measure of linear-dependencies in the image.
*/
channel_features[RedPixelChannel].correlation[i]=
(correlation.direction[i].red-mean.direction[i].red*
mean.direction[i].red)/(sqrt(sum_squares.direction[i].red-
(mean.direction[i].red*mean.direction[i].red))*sqrt(
sum_squares.direction[i].red-(mean.direction[i].red*
mean.direction[i].red)));
channel_features[GreenPixelChannel].correlation[i]=
(correlation.direction[i].green-mean.direction[i].green*
mean.direction[i].green)/(sqrt(sum_squares.direction[i].green-
(mean.direction[i].green*mean.direction[i].green))*sqrt(
sum_squares.direction[i].green-(mean.direction[i].green*
mean.direction[i].green)));
channel_features[BluePixelChannel].correlation[i]=
(correlation.direction[i].blue-mean.direction[i].blue*
mean.direction[i].blue)/(sqrt(sum_squares.direction[i].blue-
(mean.direction[i].blue*mean.direction[i].blue))*sqrt(
sum_squares.direction[i].blue-(mean.direction[i].blue*
mean.direction[i].blue)));
if (image->colorspace == CMYKColorspace)
channel_features[BlackPixelChannel].correlation[i]=
(correlation.direction[i].black-mean.direction[i].black*
mean.direction[i].black)/(sqrt(sum_squares.direction[i].black-
(mean.direction[i].black*mean.direction[i].black))*sqrt(
sum_squares.direction[i].black-(mean.direction[i].black*
mean.direction[i].black)));
if (image->alpha_trait != UndefinedPixelTrait)
channel_features[AlphaPixelChannel].correlation[i]=
(correlation.direction[i].alpha-mean.direction[i].alpha*
mean.direction[i].alpha)/(sqrt(sum_squares.direction[i].alpha-
(mean.direction[i].alpha*mean.direction[i].alpha))*sqrt(
sum_squares.direction[i].alpha-(mean.direction[i].alpha*
mean.direction[i].alpha)));
}
/*
Compute more texture features.
*/
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static,4) shared(status) \
magick_number_threads(image,image,number_grays,1)
#endif
for (i=0; i < 4; i++)
{
register ssize_t
x;
for (x=2; x < (ssize_t) (2*number_grays); x++)
{
/*
Sum average.
*/
channel_features[RedPixelChannel].sum_average[i]+=
x*density_xy[x].direction[i].red;
channel_features[GreenPixelChannel].sum_average[i]+=
x*density_xy[x].direction[i].green;
channel_features[BluePixelChannel].sum_average[i]+=
x*density_xy[x].direction[i].blue;
if (image->colorspace == CMYKColorspace)
channel_features[BlackPixelChannel].sum_average[i]+=
x*density_xy[x].direction[i].black;
if (image->alpha_trait != UndefinedPixelTrait)
channel_features[AlphaPixelChannel].sum_average[i]+=
x*density_xy[x].direction[i].alpha;
/*
Sum entropy.
*/
channel_features[RedPixelChannel].sum_entropy[i]-=
density_xy[x].direction[i].red*
MagickLog10(density_xy[x].direction[i].red);
channel_features[GreenPixelChannel].sum_entropy[i]-=
density_xy[x].direction[i].green*
MagickLog10(density_xy[x].direction[i].green);
channel_features[BluePixelChannel].sum_entropy[i]-=
density_xy[x].direction[i].blue*
MagickLog10(density_xy[x].direction[i].blue);
if (image->colorspace == CMYKColorspace)
channel_features[BlackPixelChannel].sum_entropy[i]-=
density_xy[x].direction[i].black*
MagickLog10(density_xy[x].direction[i].black);
if (image->alpha_trait != UndefinedPixelTrait)
channel_features[AlphaPixelChannel].sum_entropy[i]-=
density_xy[x].direction[i].alpha*
MagickLog10(density_xy[x].direction[i].alpha);
/*
Sum variance.
*/
channel_features[RedPixelChannel].sum_variance[i]+=
(x-channel_features[RedPixelChannel].sum_entropy[i])*
(x-channel_features[RedPixelChannel].sum_entropy[i])*
density_xy[x].direction[i].red;
channel_features[GreenPixelChannel].sum_variance[i]+=
(x-channel_features[GreenPixelChannel].sum_entropy[i])*
(x-channel_features[GreenPixelChannel].sum_entropy[i])*
density_xy[x].direction[i].green;
channel_features[BluePixelChannel].sum_variance[i]+=
(x-channel_features[BluePixelChannel].sum_entropy[i])*
(x-channel_features[BluePixelChannel].sum_entropy[i])*
density_xy[x].direction[i].blue;
if (image->colorspace == CMYKColorspace)
channel_features[BlackPixelChannel].sum_variance[i]+=
(x-channel_features[BlackPixelChannel].sum_entropy[i])*
(x-channel_features[BlackPixelChannel].sum_entropy[i])*
density_xy[x].direction[i].black;
if (image->alpha_trait != UndefinedPixelTrait)
channel_features[AlphaPixelChannel].sum_variance[i]+=
(x-channel_features[AlphaPixelChannel].sum_entropy[i])*
(x-channel_features[AlphaPixelChannel].sum_entropy[i])*
density_xy[x].direction[i].alpha;
}
}
/*
Compute more texture features.
*/
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static,4) shared(status) \
magick_number_threads(image,image,number_grays,1)
#endif
for (i=0; i < 4; i++)
{
register ssize_t
y;
for (y=0; y < (ssize_t) number_grays; y++)
{
register ssize_t
x;
for (x=0; x < (ssize_t) number_grays; x++)
{
/*
Sum of Squares: Variance
*/
variance.direction[i].red+=(y-mean.direction[i].red+1)*
(y-mean.direction[i].red+1)*cooccurrence[x][y].direction[i].red;
variance.direction[i].green+=(y-mean.direction[i].green+1)*
(y-mean.direction[i].green+1)*cooccurrence[x][y].direction[i].green;
variance.direction[i].blue+=(y-mean.direction[i].blue+1)*
(y-mean.direction[i].blue+1)*cooccurrence[x][y].direction[i].blue;
if (image->colorspace == CMYKColorspace)
variance.direction[i].black+=(y-mean.direction[i].black+1)*
(y-mean.direction[i].black+1)*cooccurrence[x][y].direction[i].black;
if (image->alpha_trait != UndefinedPixelTrait)
variance.direction[i].alpha+=(y-mean.direction[i].alpha+1)*
(y-mean.direction[i].alpha+1)*
cooccurrence[x][y].direction[i].alpha;
/*
Sum average / Difference Variance.
*/
density_xy[MagickAbsoluteValue(y-x)].direction[i].red+=
cooccurrence[x][y].direction[i].red;
density_xy[MagickAbsoluteValue(y-x)].direction[i].green+=
cooccurrence[x][y].direction[i].green;
density_xy[MagickAbsoluteValue(y-x)].direction[i].blue+=
cooccurrence[x][y].direction[i].blue;
if (image->colorspace == CMYKColorspace)
density_xy[MagickAbsoluteValue(y-x)].direction[i].black+=
cooccurrence[x][y].direction[i].black;
if (image->alpha_trait != UndefinedPixelTrait)
density_xy[MagickAbsoluteValue(y-x)].direction[i].alpha+=
cooccurrence[x][y].direction[i].alpha;
/*
Information Measures of Correlation.
*/
entropy_xy.direction[i].red-=cooccurrence[x][y].direction[i].red*
MagickLog10(cooccurrence[x][y].direction[i].red);
entropy_xy.direction[i].green-=cooccurrence[x][y].direction[i].green*
MagickLog10(cooccurrence[x][y].direction[i].green);
entropy_xy.direction[i].blue-=cooccurrence[x][y].direction[i].blue*
MagickLog10(cooccurrence[x][y].direction[i].blue);
if (image->colorspace == CMYKColorspace)
entropy_xy.direction[i].black-=cooccurrence[x][y].direction[i].black*
MagickLog10(cooccurrence[x][y].direction[i].black);
if (image->alpha_trait != UndefinedPixelTrait)
entropy_xy.direction[i].alpha-=
cooccurrence[x][y].direction[i].alpha*MagickLog10(
cooccurrence[x][y].direction[i].alpha);
entropy_xy1.direction[i].red-=(cooccurrence[x][y].direction[i].red*
MagickLog10(density_x[x].direction[i].red*density_y[y].direction[i].red));
entropy_xy1.direction[i].green-=(cooccurrence[x][y].direction[i].green*
MagickLog10(density_x[x].direction[i].green*
density_y[y].direction[i].green));
entropy_xy1.direction[i].blue-=(cooccurrence[x][y].direction[i].blue*
MagickLog10(density_x[x].direction[i].blue*density_y[y].direction[i].blue));
if (image->colorspace == CMYKColorspace)
entropy_xy1.direction[i].black-=(
cooccurrence[x][y].direction[i].black*MagickLog10(
density_x[x].direction[i].black*density_y[y].direction[i].black));
if (image->alpha_trait != UndefinedPixelTrait)
entropy_xy1.direction[i].alpha-=(
cooccurrence[x][y].direction[i].alpha*MagickLog10(
density_x[x].direction[i].alpha*density_y[y].direction[i].alpha));
entropy_xy2.direction[i].red-=(density_x[x].direction[i].red*
density_y[y].direction[i].red*MagickLog10(density_x[x].direction[i].red*
density_y[y].direction[i].red));
entropy_xy2.direction[i].green-=(density_x[x].direction[i].green*
density_y[y].direction[i].green*MagickLog10(density_x[x].direction[i].green*
density_y[y].direction[i].green));
entropy_xy2.direction[i].blue-=(density_x[x].direction[i].blue*
density_y[y].direction[i].blue*MagickLog10(density_x[x].direction[i].blue*
density_y[y].direction[i].blue));
if (image->colorspace == CMYKColorspace)
entropy_xy2.direction[i].black-=(density_x[x].direction[i].black*
density_y[y].direction[i].black*MagickLog10(
density_x[x].direction[i].black*density_y[y].direction[i].black));
if (image->alpha_trait != UndefinedPixelTrait)
entropy_xy2.direction[i].alpha-=(density_x[x].direction[i].alpha*
density_y[y].direction[i].alpha*MagickLog10(
density_x[x].direction[i].alpha*density_y[y].direction[i].alpha));
}
}
channel_features[RedPixelChannel].variance_sum_of_squares[i]=
variance.direction[i].red;
channel_features[GreenPixelChannel].variance_sum_of_squares[i]=
variance.direction[i].green;
channel_features[BluePixelChannel].variance_sum_of_squares[i]=
variance.direction[i].blue;
if (image->colorspace == CMYKColorspace)
channel_features[BlackPixelChannel].variance_sum_of_squares[i]=
variance.direction[i].black;
if (image->alpha_trait != UndefinedPixelTrait)
channel_features[AlphaPixelChannel].variance_sum_of_squares[i]=
variance.direction[i].alpha;
}
/*
Compute more texture features.
*/
(void) ResetMagickMemory(&variance,0,sizeof(variance));
(void) ResetMagickMemory(&sum_squares,0,sizeof(sum_squares));
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static,4) shared(status) \
magick_number_threads(image,image,number_grays,1)
#endif
for (i=0; i < 4; i++)
{
register ssize_t
x;
for (x=0; x < (ssize_t) number_grays; x++)
{
/*
Difference variance.
*/
variance.direction[i].red+=density_xy[x].direction[i].red;
variance.direction[i].green+=density_xy[x].direction[i].green;
variance.direction[i].blue+=density_xy[x].direction[i].blue;
if (image->colorspace == CMYKColorspace)
variance.direction[i].black+=density_xy[x].direction[i].black;
if (image->alpha_trait != UndefinedPixelTrait)
variance.direction[i].alpha+=density_xy[x].direction[i].alpha;
sum_squares.direction[i].red+=density_xy[x].direction[i].red*
density_xy[x].direction[i].red;
sum_squares.direction[i].green+=density_xy[x].direction[i].green*
density_xy[x].direction[i].green;
sum_squares.direction[i].blue+=density_xy[x].direction[i].blue*
density_xy[x].direction[i].blue;
if (image->colorspace == CMYKColorspace)
sum_squares.direction[i].black+=density_xy[x].direction[i].black*
density_xy[x].direction[i].black;
if (image->alpha_trait != UndefinedPixelTrait)
sum_squares.direction[i].alpha+=density_xy[x].direction[i].alpha*
density_xy[x].direction[i].alpha;
/*
Difference entropy.
*/
channel_features[RedPixelChannel].difference_entropy[i]-=
density_xy[x].direction[i].red*
MagickLog10(density_xy[x].direction[i].red);
channel_features[GreenPixelChannel].difference_entropy[i]-=
density_xy[x].direction[i].green*
MagickLog10(density_xy[x].direction[i].green);
channel_features[BluePixelChannel].difference_entropy[i]-=
density_xy[x].direction[i].blue*
MagickLog10(density_xy[x].direction[i].blue);
if (image->colorspace == CMYKColorspace)
channel_features[BlackPixelChannel].difference_entropy[i]-=
density_xy[x].direction[i].black*
MagickLog10(density_xy[x].direction[i].black);
if (image->alpha_trait != UndefinedPixelTrait)
channel_features[AlphaPixelChannel].difference_entropy[i]-=
density_xy[x].direction[i].alpha*
MagickLog10(density_xy[x].direction[i].alpha);
/*
Information Measures of Correlation.
*/
entropy_x.direction[i].red-=(density_x[x].direction[i].red*
MagickLog10(density_x[x].direction[i].red));
entropy_x.direction[i].green-=(density_x[x].direction[i].green*
MagickLog10(density_x[x].direction[i].green));
entropy_x.direction[i].blue-=(density_x[x].direction[i].blue*
MagickLog10(density_x[x].direction[i].blue));
if (image->colorspace == CMYKColorspace)
entropy_x.direction[i].black-=(density_x[x].direction[i].black*
MagickLog10(density_x[x].direction[i].black));
if (image->alpha_trait != UndefinedPixelTrait)
entropy_x.direction[i].alpha-=(density_x[x].direction[i].alpha*
MagickLog10(density_x[x].direction[i].alpha));
entropy_y.direction[i].red-=(density_y[x].direction[i].red*
MagickLog10(density_y[x].direction[i].red));
entropy_y.direction[i].green-=(density_y[x].direction[i].green*
MagickLog10(density_y[x].direction[i].green));
entropy_y.direction[i].blue-=(density_y[x].direction[i].blue*
MagickLog10(density_y[x].direction[i].blue));
if (image->colorspace == CMYKColorspace)
entropy_y.direction[i].black-=(density_y[x].direction[i].black*
MagickLog10(density_y[x].direction[i].black));
if (image->alpha_trait != UndefinedPixelTrait)
entropy_y.direction[i].alpha-=(density_y[x].direction[i].alpha*
MagickLog10(density_y[x].direction[i].alpha));
}
/*
Difference variance.
*/
channel_features[RedPixelChannel].difference_variance[i]=
(((double) number_grays*number_grays*sum_squares.direction[i].red)-
(variance.direction[i].red*variance.direction[i].red))/
((double) number_grays*number_grays*number_grays*number_grays);
channel_features[GreenPixelChannel].difference_variance[i]=
(((double) number_grays*number_grays*sum_squares.direction[i].green)-
(variance.direction[i].green*variance.direction[i].green))/
((double) number_grays*number_grays*number_grays*number_grays);
channel_features[BluePixelChannel].difference_variance[i]=
(((double) number_grays*number_grays*sum_squares.direction[i].blue)-
(variance.direction[i].blue*variance.direction[i].blue))/
((double) number_grays*number_grays*number_grays*number_grays);
if (image->colorspace == CMYKColorspace)
channel_features[BlackPixelChannel].difference_variance[i]=
(((double) number_grays*number_grays*sum_squares.direction[i].black)-
(variance.direction[i].black*variance.direction[i].black))/
((double) number_grays*number_grays*number_grays*number_grays);
if (image->alpha_trait != UndefinedPixelTrait)
channel_features[AlphaPixelChannel].difference_variance[i]=
(((double) number_grays*number_grays*sum_squares.direction[i].alpha)-
(variance.direction[i].alpha*variance.direction[i].alpha))/
((double) number_grays*number_grays*number_grays*number_grays);
/*
Information Measures of Correlation.
*/
channel_features[RedPixelChannel].measure_of_correlation_1[i]=
(entropy_xy.direction[i].red-entropy_xy1.direction[i].red)/
(entropy_x.direction[i].red > entropy_y.direction[i].red ?
entropy_x.direction[i].red : entropy_y.direction[i].red);
channel_features[GreenPixelChannel].measure_of_correlation_1[i]=
(entropy_xy.direction[i].green-entropy_xy1.direction[i].green)/
(entropy_x.direction[i].green > entropy_y.direction[i].green ?
entropy_x.direction[i].green : entropy_y.direction[i].green);
channel_features[BluePixelChannel].measure_of_correlation_1[i]=
(entropy_xy.direction[i].blue-entropy_xy1.direction[i].blue)/
(entropy_x.direction[i].blue > entropy_y.direction[i].blue ?
entropy_x.direction[i].blue : entropy_y.direction[i].blue);
if (image->colorspace == CMYKColorspace)
channel_features[BlackPixelChannel].measure_of_correlation_1[i]=
(entropy_xy.direction[i].black-entropy_xy1.direction[i].black)/
(entropy_x.direction[i].black > entropy_y.direction[i].black ?
entropy_x.direction[i].black : entropy_y.direction[i].black);
if (image->alpha_trait != UndefinedPixelTrait)
channel_features[AlphaPixelChannel].measure_of_correlation_1[i]=
(entropy_xy.direction[i].alpha-entropy_xy1.direction[i].alpha)/
(entropy_x.direction[i].alpha > entropy_y.direction[i].alpha ?
entropy_x.direction[i].alpha : entropy_y.direction[i].alpha);
channel_features[RedPixelChannel].measure_of_correlation_2[i]=
(sqrt(fabs(1.0-exp(-2.0*(double) (entropy_xy2.direction[i].red-
entropy_xy.direction[i].red)))));
channel_features[GreenPixelChannel].measure_of_correlation_2[i]=
(sqrt(fabs(1.0-exp(-2.0*(double) (entropy_xy2.direction[i].green-
entropy_xy.direction[i].green)))));
channel_features[BluePixelChannel].measure_of_correlation_2[i]=
(sqrt(fabs(1.0-exp(-2.0*(double) (entropy_xy2.direction[i].blue-
entropy_xy.direction[i].blue)))));
if (image->colorspace == CMYKColorspace)
channel_features[BlackPixelChannel].measure_of_correlation_2[i]=
(sqrt(fabs(1.0-exp(-2.0*(double) (entropy_xy2.direction[i].black-
entropy_xy.direction[i].black)))));
if (image->alpha_trait != UndefinedPixelTrait)
channel_features[AlphaPixelChannel].measure_of_correlation_2[i]=
(sqrt(fabs(1.0-exp(-2.0*(double) (entropy_xy2.direction[i].alpha-
entropy_xy.direction[i].alpha)))));
}
/*
Compute more texture features.
*/
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static,4) shared(status) \
magick_number_threads(image,image,number_grays,1)
#endif
for (i=0; i < 4; i++)
{
ssize_t
z;
for (z=0; z < (ssize_t) number_grays; z++)
{
register ssize_t
y;
ChannelStatistics
pixel;
(void) ResetMagickMemory(&pixel,0,sizeof(pixel));
for (y=0; y < (ssize_t) number_grays; y++)
{
register ssize_t
x;
for (x=0; x < (ssize_t) number_grays; x++)
{
/*
Contrast: amount of local variations present in an image.
*/
if (((y-x) == z) || ((x-y) == z))
{
pixel.direction[i].red+=cooccurrence[x][y].direction[i].red;
pixel.direction[i].green+=cooccurrence[x][y].direction[i].green;
pixel.direction[i].blue+=cooccurrence[x][y].direction[i].blue;
if (image->colorspace == CMYKColorspace)
pixel.direction[i].black+=cooccurrence[x][y].direction[i].black;
if (image->alpha_trait != UndefinedPixelTrait)
pixel.direction[i].alpha+=
cooccurrence[x][y].direction[i].alpha;
}
/*
Maximum Correlation Coefficient.
*/
Q[z][y].direction[i].red+=cooccurrence[z][x].direction[i].red*
cooccurrence[y][x].direction[i].red/density_x[z].direction[i].red/
density_y[x].direction[i].red;
Q[z][y].direction[i].green+=cooccurrence[z][x].direction[i].green*
cooccurrence[y][x].direction[i].green/
density_x[z].direction[i].green/density_y[x].direction[i].red;
Q[z][y].direction[i].blue+=cooccurrence[z][x].direction[i].blue*
cooccurrence[y][x].direction[i].blue/density_x[z].direction[i].blue/
density_y[x].direction[i].blue;
if (image->colorspace == CMYKColorspace)
Q[z][y].direction[i].black+=cooccurrence[z][x].direction[i].black*
cooccurrence[y][x].direction[i].black/
density_x[z].direction[i].black/density_y[x].direction[i].black;
if (image->alpha_trait != UndefinedPixelTrait)
Q[z][y].direction[i].alpha+=
cooccurrence[z][x].direction[i].alpha*
cooccurrence[y][x].direction[i].alpha/
density_x[z].direction[i].alpha/
density_y[x].direction[i].alpha;
}
}
channel_features[RedPixelChannel].contrast[i]+=z*z*
pixel.direction[i].red;
channel_features[GreenPixelChannel].contrast[i]+=z*z*
pixel.direction[i].green;
channel_features[BluePixelChannel].contrast[i]+=z*z*
pixel.direction[i].blue;
if (image->colorspace == CMYKColorspace)
channel_features[BlackPixelChannel].contrast[i]+=z*z*
pixel.direction[i].black;
if (image->alpha_trait != UndefinedPixelTrait)
channel_features[AlphaPixelChannel].contrast[i]+=z*z*
pixel.direction[i].alpha;
}
/*
Maximum Correlation Coefficient.
Future: return second largest eigenvalue of Q.
*/
channel_features[RedPixelChannel].maximum_correlation_coefficient[i]=
sqrt((double) -1.0);
channel_features[GreenPixelChannel].maximum_correlation_coefficient[i]=
sqrt((double) -1.0);
channel_features[BluePixelChannel].maximum_correlation_coefficient[i]=
sqrt((double) -1.0);
if (image->colorspace == CMYKColorspace)
channel_features[BlackPixelChannel].maximum_correlation_coefficient[i]=
sqrt((double) -1.0);
if (image->alpha_trait != UndefinedPixelTrait)
channel_features[AlphaPixelChannel].maximum_correlation_coefficient[i]=
sqrt((double) -1.0);
}
/*
Relinquish resources.
*/
sum=(ChannelStatistics *) RelinquishMagickMemory(sum);
for (i=0; i < (ssize_t) number_grays; i++)
Q[i]=(ChannelStatistics *) RelinquishMagickMemory(Q[i]);
Q=(ChannelStatistics **) RelinquishMagickMemory(Q);
density_y=(ChannelStatistics *) RelinquishMagickMemory(density_y);
density_xy=(ChannelStatistics *) RelinquishMagickMemory(density_xy);
density_x=(ChannelStatistics *) RelinquishMagickMemory(density_x);
for (i=0; i < (ssize_t) number_grays; i++)
cooccurrence[i]=(ChannelStatistics *)
RelinquishMagickMemory(cooccurrence[i]);
cooccurrence=(ChannelStatistics **) RelinquishMagickMemory(cooccurrence);
return(channel_features);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% H o u g h L i n e I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% Use HoughLineImage() in conjunction with any binary edge extracted image (we
% recommand Canny) to identify lines in the image. The algorithm accumulates
% counts for every white pixel for every possible orientation (for angles from
% 0 to 179 in 1 degree increments) and distance from the center of the image to
% the corner (in 1 px increments) and stores the counts in an accumulator matrix
% of angle vs distance. The size of the accumulator is 180x(diagonal/2). Next
% it searches this space for peaks in counts and converts the locations of the
% peaks to slope and intercept in the normal x,y input image space. Use the
% slope/intercepts to find the endpoints clipped to the bounds of the image. The
% lines are then drawn. The counts are a measure of the length of the lines
%
% The format of the HoughLineImage method is:
%
% Image *HoughLineImage(const Image *image,const size_t width,
% const size_t height,const size_t threshold,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o width, height: find line pairs as local maxima in this neighborhood.
%
% o threshold: the line count threshold.
%
% o exception: return any errors or warnings in this structure.
%
*/
static inline double MagickRound(double x)
{
/*
Round the fraction to nearest integer.
*/
if ((x-floor(x)) < (ceil(x)-x))
return(floor(x));
return(ceil(x));
}
static Image *RenderHoughLines(const ImageInfo *image_info,const size_t columns,
const size_t rows,ExceptionInfo *exception)
{
#define BoundingBox "viewbox"
DrawInfo
*draw_info;
Image
*image;
MagickBooleanType
status;
/*
Open image.
*/
image=AcquireImage(image_info,exception);
status=OpenBlob(image_info,image,ReadBinaryBlobMode,exception);
if (status == MagickFalse)
{
image=DestroyImageList(image);
return((Image *) NULL);
}
image->columns=columns;
image->rows=rows;
draw_info=CloneDrawInfo(image_info,(DrawInfo *) NULL);
draw_info->affine.sx=image->resolution.x == 0.0 ? 1.0 : image->resolution.x/
DefaultResolution;
draw_info->affine.sy=image->resolution.y == 0.0 ? 1.0 : image->resolution.y/
DefaultResolution;
image->columns=(size_t) (draw_info->affine.sx*image->columns);
image->rows=(size_t) (draw_info->affine.sy*image->rows);
status=SetImageExtent(image,image->columns,image->rows,exception);
if (status == MagickFalse)
return(DestroyImageList(image));
if (SetImageBackgroundColor(image,exception) == MagickFalse)
{
image=DestroyImageList(image);
return((Image *) NULL);
}
/*
Render drawing.
*/
if (GetBlobStreamData(image) == (unsigned char *) NULL)
draw_info->primitive=FileToString(image->filename,~0UL,exception);
else
{
draw_info->primitive=(char *) AcquireMagickMemory((size_t)
GetBlobSize(image)+1);
if (draw_info->primitive != (char *) NULL)
{
(void) CopyMagickMemory(draw_info->primitive,GetBlobStreamData(image),
(size_t) GetBlobSize(image));
draw_info->primitive[GetBlobSize(image)]='\0';
}
}
(void) DrawImage(image,draw_info,exception);
draw_info=DestroyDrawInfo(draw_info);
(void) CloseBlob(image);
return(GetFirstImageInList(image));
}
MagickExport Image *HoughLineImage(const Image *image,const size_t width,
const size_t height,const size_t threshold,ExceptionInfo *exception)
{
#define HoughLineImageTag "HoughLine/Image"
CacheView
*image_view;
char
message[MagickPathExtent],
path[MagickPathExtent];
const char
*artifact;
double
hough_height;
Image
*lines_image = NULL;
ImageInfo
*image_info;
int
file;
MagickBooleanType
status;
MagickOffsetType
progress;
MatrixInfo
*accumulator;
PointInfo
center;
register ssize_t
y;
size_t
accumulator_height,
accumulator_width,
line_count;
/*
Create the accumulator.
*/
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
accumulator_width=180;
hough_height=((sqrt(2.0)*(double) (image->rows > image->columns ?
image->rows : image->columns))/2.0);
accumulator_height=(size_t) (2.0*hough_height);
accumulator=AcquireMatrixInfo(accumulator_width,accumulator_height,
sizeof(double),exception);
if (accumulator == (MatrixInfo *) NULL)
ThrowImageException(ResourceLimitError,"MemoryAllocationFailed");
if (NullMatrix(accumulator) == MagickFalse)
{
accumulator=DestroyMatrixInfo(accumulator);
ThrowImageException(ResourceLimitError,"MemoryAllocationFailed");
}
/*
Populate the accumulator.
*/
status=MagickTrue;
progress=0;
center.x=(double) image->columns/2.0;
center.y=(double) image->rows/2.0;
image_view=AcquireVirtualCacheView(image,exception);
for (y=0; y < (ssize_t) image->rows; y++)
{
register const Quantum
*magick_restrict p;
register ssize_t
x;
if (status == MagickFalse)
continue;
p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception);
if (p == (Quantum *) NULL)
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) image->columns; x++)
{
if (GetPixelIntensity(image,p) > (QuantumRange/2.0))
{
register ssize_t
i;
for (i=0; i < 180; i++)
{
double
count,
radius;
radius=(((double) x-center.x)*cos(DegreesToRadians((double) i)))+
(((double) y-center.y)*sin(DegreesToRadians((double) i)));
(void) GetMatrixElement(accumulator,i,(ssize_t)
MagickRound(radius+hough_height),&count);
count++;
(void) SetMatrixElement(accumulator,i,(ssize_t)
MagickRound(radius+hough_height),&count);
}
}
p+=GetPixelChannels(image);
}
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp critical (MagickCore_CannyEdgeImage)
#endif
proceed=SetImageProgress(image,CannyEdgeImageTag,progress++,
image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
image_view=DestroyCacheView(image_view);
if (status == MagickFalse)
{
accumulator=DestroyMatrixInfo(accumulator);
return((Image *) NULL);
}
/*
Generate line segments from accumulator.
*/
file=AcquireUniqueFileResource(path);
if (file == -1)
{
accumulator=DestroyMatrixInfo(accumulator);
return((Image *) NULL);
}
(void) FormatLocaleString(message,MagickPathExtent,
"# Hough line transform: %.20gx%.20g%+.20g\n",(double) width,
(double) height,(double) threshold);
if (write(file,message,strlen(message)) != (ssize_t) strlen(message))
status=MagickFalse;
(void) FormatLocaleString(message,MagickPathExtent,
"viewbox 0 0 %.20g %.20g\n",(double) image->columns,(double) image->rows);
if (write(file,message,strlen(message)) != (ssize_t) strlen(message))
status=MagickFalse;
line_count=image->columns > image->rows ? image->columns/4 : image->rows/4;
if (threshold != 0)
line_count=threshold;
for (y=0; y < (ssize_t) accumulator_height; y++)
{
register ssize_t
x;
for (x=0; x < (ssize_t) accumulator_width; x++)
{
double
count;
(void) GetMatrixElement(accumulator,x,y,&count);
if (count >= (double) line_count)
{
double
maxima;
SegmentInfo
line;
ssize_t
v;
/*
Is point a local maxima?
*/
maxima=count;
for (v=(-((ssize_t) height/2)); v <= (((ssize_t) height/2)); v++)
{
ssize_t
u;
for (u=(-((ssize_t) width/2)); u <= (((ssize_t) width/2)); u++)
{
if ((u != 0) || (v !=0))
{
(void) GetMatrixElement(accumulator,x+u,y+v,&count);
if (count > maxima)
{
maxima=count;
break;
}
}
}
if (u < (ssize_t) (width/2))
break;
}
(void) GetMatrixElement(accumulator,x,y,&count);
if (maxima > count)
continue;
if ((x >= 45) && (x <= 135))
{
/*
y = (r-x cos(t))/sin(t)
*/
line.x1=0.0;
line.y1=((double) (y-(accumulator_height/2.0))-((line.x1-
(image->columns/2.0))*cos(DegreesToRadians((double) x))))/
sin(DegreesToRadians((double) x))+(image->rows/2.0);
line.x2=(double) image->columns;
line.y2=((double) (y-(accumulator_height/2.0))-((line.x2-
(image->columns/2.0))*cos(DegreesToRadians((double) x))))/
sin(DegreesToRadians((double) x))+(image->rows/2.0);
}
else
{
/*
x = (r-y cos(t))/sin(t)
*/
line.y1=0.0;
line.x1=((double) (y-(accumulator_height/2.0))-((line.y1-
(image->rows/2.0))*sin(DegreesToRadians((double) x))))/
cos(DegreesToRadians((double) x))+(image->columns/2.0);
line.y2=(double) image->rows;
line.x2=((double) (y-(accumulator_height/2.0))-((line.y2-
(image->rows/2.0))*sin(DegreesToRadians((double) x))))/
cos(DegreesToRadians((double) x))+(image->columns/2.0);
}
(void) FormatLocaleString(message,MagickPathExtent,
"line %g,%g %g,%g # %g\n",line.x1,line.y1,line.x2,line.y2,maxima);
if (write(file,message,strlen(message)) != (ssize_t) strlen(message))
status=MagickFalse;
}
}
}
(void) close(file);
/*
Render lines to image canvas.
*/
image_info=AcquireImageInfo();
image_info->background_color=image->background_color;
(void) FormatLocaleString(image_info->filename,MagickPathExtent,"%s",path);
artifact=GetImageArtifact(image,"background");
if (artifact != (const char *) NULL)
(void) SetImageOption(image_info,"background",artifact);
artifact=GetImageArtifact(image,"fill");
if (artifact != (const char *) NULL)
(void) SetImageOption(image_info,"fill",artifact);
artifact=GetImageArtifact(image,"stroke");
if (artifact != (const char *) NULL)
(void) SetImageOption(image_info,"stroke",artifact);
artifact=GetImageArtifact(image,"strokewidth");
if (artifact != (const char *) NULL)
(void) SetImageOption(image_info,"strokewidth",artifact);
lines_image=RenderHoughLines(image_info,image->columns,image->rows,exception);
artifact=GetImageArtifact(image,"hough-lines:accumulator");
if ((lines_image != (Image *) NULL) &&
(IsStringTrue(artifact) != MagickFalse))
{
Image
*accumulator_image;
accumulator_image=MatrixToImage(accumulator,exception);
if (accumulator_image != (Image *) NULL)
AppendImageToList(&lines_image,accumulator_image);
}
/*
Free resources.
*/
accumulator=DestroyMatrixInfo(accumulator);
image_info=DestroyImageInfo(image_info);
(void) RelinquishUniqueFileResource(path);
return(GetFirstImageInList(lines_image));
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% M e a n S h i f t I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% MeanShiftImage() delineate arbitrarily shaped clusters in the image. For
% each pixel, it visits all the pixels in the neighborhood specified by
% the window centered at the pixel and excludes those that are outside the
% radius=(window-1)/2 surrounding the pixel. From those pixels, it finds those
% that are within the specified color distance from the current mean, and
% computes a new x,y centroid from those coordinates and a new mean. This new
% x,y centroid is used as the center for a new window. This process iterates
% until it converges and the final mean is replaces the (original window
% center) pixel value. It repeats this process for the next pixel, etc.,
% until it processes all pixels in the image. Results are typically better with
% colorspaces other than sRGB. We recommend YIQ, YUV or YCbCr.
%
% The format of the MeanShiftImage method is:
%
% Image *MeanShiftImage(const Image *image,const size_t width,
% const size_t height,const double color_distance,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o width, height: find pixels in this neighborhood.
%
% o color_distance: the color distance.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *MeanShiftImage(const Image *image,const size_t width,
const size_t height,const double color_distance,ExceptionInfo *exception)
{
#define MaxMeanShiftIterations 100
#define MeanShiftImageTag "MeanShift/Image"
CacheView
*image_view,
*mean_view,
*pixel_view;
Image
*mean_image;
MagickBooleanType
status;
MagickOffsetType
progress;
ssize_t
y;
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
mean_image=CloneImage(image,image->columns,image->rows,MagickTrue,exception);
if (mean_image == (Image *) NULL)
return((Image *) NULL);
if (SetImageStorageClass(mean_image,DirectClass,exception) == MagickFalse)
{
mean_image=DestroyImage(mean_image);
return((Image *) NULL);
}
status=MagickTrue;
progress=0;
image_view=AcquireVirtualCacheView(image,exception);
pixel_view=AcquireVirtualCacheView(image,exception);
mean_view=AcquireAuthenticCacheView(mean_image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static,4) shared(status,progress) \
magick_number_threads(mean_image,mean_image,mean_image->rows,1)
#endif
for (y=0; y < (ssize_t) mean_image->rows; y++)
{
register const Quantum
*magick_restrict p;
register Quantum
*magick_restrict q;
register ssize_t
x;
if (status == MagickFalse)
continue;
p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception);
q=GetCacheViewAuthenticPixels(mean_view,0,y,mean_image->columns,1,
exception);
if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL))
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) mean_image->columns; x++)
{
PixelInfo
mean_pixel,
previous_pixel;
PointInfo
mean_location,
previous_location;
register ssize_t
i;
GetPixelInfo(image,&mean_pixel);
GetPixelInfoPixel(image,p,&mean_pixel);
mean_location.x=(double) x;
mean_location.y=(double) y;
for (i=0; i < MaxMeanShiftIterations; i++)
{
double
distance,
gamma;
PixelInfo
sum_pixel;
PointInfo
sum_location;
ssize_t
count,
v;
sum_location.x=0.0;
sum_location.y=0.0;
GetPixelInfo(image,&sum_pixel);
previous_location=mean_location;
previous_pixel=mean_pixel;
count=0;
for (v=(-((ssize_t) height/2)); v <= (((ssize_t) height/2)); v++)
{
ssize_t
u;
for (u=(-((ssize_t) width/2)); u <= (((ssize_t) width/2)); u++)
{
if ((v*v+u*u) <= (ssize_t) ((width/2)*(height/2)))
{
PixelInfo
pixel;
status=GetOneCacheViewVirtualPixelInfo(pixel_view,(ssize_t)
MagickRound(mean_location.x+u),(ssize_t) MagickRound(
mean_location.y+v),&pixel,exception);
distance=(mean_pixel.red-pixel.red)*(mean_pixel.red-pixel.red)+
(mean_pixel.green-pixel.green)*(mean_pixel.green-pixel.green)+
(mean_pixel.blue-pixel.blue)*(mean_pixel.blue-pixel.blue);
if (distance <= (color_distance*color_distance))
{
sum_location.x+=mean_location.x+u;
sum_location.y+=mean_location.y+v;
sum_pixel.red+=pixel.red;
sum_pixel.green+=pixel.green;
sum_pixel.blue+=pixel.blue;
sum_pixel.alpha+=pixel.alpha;
count++;
}
}
}
}
gamma=1.0/count;
mean_location.x=gamma*sum_location.x;
mean_location.y=gamma*sum_location.y;
mean_pixel.red=gamma*sum_pixel.red;
mean_pixel.green=gamma*sum_pixel.green;
mean_pixel.blue=gamma*sum_pixel.blue;
mean_pixel.alpha=gamma*sum_pixel.alpha;
distance=(mean_location.x-previous_location.x)*
(mean_location.x-previous_location.x)+
(mean_location.y-previous_location.y)*
(mean_location.y-previous_location.y)+
255.0*QuantumScale*(mean_pixel.red-previous_pixel.red)*
255.0*QuantumScale*(mean_pixel.red-previous_pixel.red)+
255.0*QuantumScale*(mean_pixel.green-previous_pixel.green)*
255.0*QuantumScale*(mean_pixel.green-previous_pixel.green)+
255.0*QuantumScale*(mean_pixel.blue-previous_pixel.blue)*
255.0*QuantumScale*(mean_pixel.blue-previous_pixel.blue);
if (distance <= 3.0)
break;
}
SetPixelRed(mean_image,ClampToQuantum(mean_pixel.red),q);
SetPixelGreen(mean_image,ClampToQuantum(mean_pixel.green),q);
SetPixelBlue(mean_image,ClampToQuantum(mean_pixel.blue),q);
SetPixelAlpha(mean_image,ClampToQuantum(mean_pixel.alpha),q);
p+=GetPixelChannels(image);
q+=GetPixelChannels(mean_image);
}
if (SyncCacheViewAuthenticPixels(mean_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp critical (MagickCore_MeanShiftImage)
#endif
proceed=SetImageProgress(image,MeanShiftImageTag,progress++,
image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
mean_view=DestroyCacheView(mean_view);
pixel_view=DestroyCacheView(pixel_view);
image_view=DestroyCacheView(image_view);
return(mean_image);
}
|
zbuffer.c | /*
* Z buffer: 16 bits Z / 16 bits color
*
*/
#include <stdlib.h>
#include <string.h>
#include "../include/zbuffer.h"
#include "msghandling.h"
ZBuffer* ZB_open(GLint xsize, GLint ysize, GLint mode,
void* frame_buffer) {
ZBuffer* zb;
GLint size;
zb = gl_malloc(sizeof(ZBuffer));
if (zb == NULL)
return NULL;
zb->xsize = xsize & ~3;
zb->ysize = ysize;
zb->linesize = (xsize * PSZB);
switch (mode) {
#if TGL_FEATURE_32_BITS == 1
case ZB_MODE_RGBA:
break;
#endif
#if TGL_FEATURE_16_BITS == 1
case ZB_MODE_5R6G5B:
break;
#endif
default:
goto error;
}
size = zb->xsize * zb->ysize * sizeof(GLushort);
zb->zbuf = gl_malloc(size);
if (zb->zbuf == NULL)
goto error;
if (frame_buffer == NULL) {
zb->pbuf = gl_malloc(zb->ysize * zb->linesize);
if (zb->pbuf == NULL) {
gl_free(zb->zbuf);
goto error;
}
zb->frame_buffer_allocated = 1;
} else {
zb->frame_buffer_allocated = 0;
zb->pbuf = frame_buffer;
}
zb->current_texture = NULL;
return zb;
error:
gl_free(zb);
return NULL;
}
void ZB_close(ZBuffer* zb) {
if (zb->frame_buffer_allocated)
gl_free(zb->pbuf);
gl_free(zb->zbuf);
gl_free(zb);
}
void ZB_resize(ZBuffer* zb, void* frame_buffer, GLint xsize, GLint ysize) {
GLint size;
/* xsize must be a multiple of 4 */
xsize = xsize & ~3;
zb->xsize = xsize;
zb->ysize = ysize;
zb->linesize = (xsize * PSZB);
size = zb->xsize * zb->ysize * sizeof(GLushort);
gl_free(zb->zbuf);
zb->zbuf = gl_malloc(size);
if (zb->zbuf == NULL)
exit(1);
if (zb->frame_buffer_allocated)
gl_free(zb->pbuf);
if (frame_buffer == NULL) {
zb->pbuf = gl_malloc(zb->ysize * zb->linesize);
if (!zb->pbuf)
exit(1);
zb->frame_buffer_allocated = 1;
} else {
zb->pbuf = frame_buffer;
zb->frame_buffer_allocated = 0;
}
}
#if TGL_FEATURE_32_BITS == 1
PIXEL pxReverse32(PIXEL x) {
return
((x & 0xFF000000) >> 24) | /*______AA*/
((x & 0x00FF0000) >> 8) | /*____RR__*/
((x & 0x0000FF00) << 8) | /*__GG____*/
((x & 0x000000FF) << 24); /* BB______*/
}
#endif
static void ZB_copyBuffer(ZBuffer* zb, void* buf, GLint linesize) {
GLint y;
#if TGL_FEATURE_MULTITHREADED_ZB_COPYBUFFER == 1
#pragma omp parallel for
for (y = 0; y < zb->ysize; y++) {
PIXEL* q;
GLubyte* p1;
q = zb->pbuf + y * zb->xsize;
p1 = (GLubyte*)buf + y * linesize;
#if TGL_FEATURE_NO_COPY_COLOR == 1
for (i = 0; i < zb->xsize; i++) {
if ((*(q + i) & TGL_COLOR_MASK) != TGL_NO_COPY_COLOR)
*(((PIXEL*)p1) + i) = *(q + i);
}
#else
memcpy(p1, q, linesize);
#endif
}
#else
for (y = 0; y < zb->ysize; y++) {
PIXEL* q;
GLubyte* p1;
q = zb->pbuf + y * zb->xsize;
p1 = (GLubyte*)buf + y * linesize;
#if TGL_FEATURE_NO_COPY_COLOR == 1
for (i = 0; i < zb->xsize; i++) {
if ((*(q + i) & TGL_COLOR_MASK) != TGL_NO_COPY_COLOR)
*(((PIXEL*)p1) + i) = *(q + i);
}
#else
memcpy(p1, q, linesize);
#endif
}
#endif
}
#if TGL_FEATURE_RENDER_BITS == 16
/* 32 bpp copy */
/*
#ifdef TGL_FEATURE_32_BITS
#define RGB16_TO_RGB32(p0,p1,v)\
{\
GLuint g,b,gb;\
g = (v & 0x07E007E0) << 5;\
b = (v & 0x001F001F) << 3;\
gb = g | b;\
p0 = (gb & 0x0000FFFF) | ((v & 0x0000F800) << 8);\
p1 = (gb >> 16) | ((v & 0xF8000000) >> 8);\
}
static void ZB_copyFrameBufferRGB32(ZBuffer * zb,
void *buf,
GLint linesize)
{
GLushort *q;
GLuint *p, *p1, v, w0, w1;
GLint y, n;
q = zb->pbuf;
p1 = (GLuint *) buf;
for (y = 0; y < zb->ysize; y++) {
p = p1;
n = zb->xsize >> 2;
do {
v = *(GLuint *) q;
RGB16_TO_RGB32(w1, w0, v);
p[0] = w0;
p[1] = w1;
v = *(GLuint *) (q + 2);
RGB16_TO_RGB32(w1, w0, v);
p[2] = w0;
p[3] = 0;
q += 4;
p += 4;
} while (--n > 0);
p1 += linesize;
}
}
*/
#endif
/* 24 bit packed pixel handling */
#ifdef TGL_FEATURE_24_BITS
/* order: RGBR GBRG BRGB */
/* XXX: packed pixel 24 bit support not tested */
/* XXX: big endian case not optimised */
/*
#if BYTE_ORDER == BIG_ENDIAN
#define RGB16_TO_RGB24(p0,p1,p2,v1,v2)\
{\
GLuint r1,g1,b1,gb1,g2,b2,gb2;\
v1 = (v1 << 16) | (v1 >> 16);\
v2 = (v2 << 16) | (v2 >> 16);\
r1 = (v1 & 0xF800F800);\
g1 = (v1 & 0x07E007E0) << 5;\
b1 = (v1 & 0x001F001F) << 3;\
gb1 = g1 | b1;\
p0 = ((gb1 & 0x0000FFFF) << 8) | (r1 << 16) | (r1 >> 24);\
g2 = (v2 & 0x07E007E0) << 5;\
b2 = (v2 & 0x001F001F) << 3;\
gb2 = g2 | b2;\
p1 = (gb1 & 0xFFFF0000) | (v2 & 0xF800) | ((gb2 >> 8) & 0xff);\
p2 = (gb2 << 24) | ((v2 & 0xF8000000) >> 8) | (gb2 >> 16);\
}
#else
#define RGB16_TO_RGB24(p0,p1,p2,v1,v2)\
{\
GLuint r1,g1,b1,gb1,g2,b2,gb2;\
r1 = (v1 & 0xF800F800);\
g1 = (v1 & 0x07E007E0) << 5;\
b1 = (v1 & 0x001F001F) << 3;\
gb1 = g1 | b1;\
p0 = ((gb1 & 0x0000FFFF) << 8) | (r1 << 16) | (r1 >> 24);\
g2 = (v2 & 0x07E007E0) << 5;\
b2 = (v2 & 0x001F001F) << 3;\
gb2 = g2 | b2;\
p1 = (gb1 & 0xFFFF0000) | (v2 & 0xF800) | ((gb2 >> 8) & 0xff);\
p2 = (gb2 << 24) | ((v2 & 0xF8000000) >> 8) | (gb2 >> 16);\
}
#endif
*/
/*
static void ZB_copyFrameBufferRGB24(ZBuffer * zb,
void *buf,
GLint linesize)
{
GLushort *q;
GLuint *p, *p1, w0, w1, w2, v0, v1;
GLint y, n;
q = zb->pbuf;
p1 = (GLuint *) buf;
linesize = linesize * 3;
for (y = 0; y < zb->ysize; y++) {
p = p1;
n = zb->xsize >> 2;
do {
v0 = *(GLuint *) q;
v1 = *(GLuint *) (q + 2);
RGB16_TO_RGB24(w0, w1, w2, v0, v1);
p[0] = w0;
p[1] = w1;
p[2] = w2;
q += 4;
p += 3;
} while (--n > 0);
*((GLbyte *) p1) += linesize;
}
}
*/
#endif
#if TGL_FEATURE_RENDER_BITS == 16
void ZB_copyFrameBuffer(ZBuffer* zb, void* buf, GLint linesize) {
ZB_copyBuffer(zb, buf, linesize);
}
#endif
/*^ TGL_FEATURE_RENDER_BITS == 16 */
#if TGL_FEATURE_RENDER_BITS == 32
#define RGB32_TO_RGB16(v) (((v >> 8) & 0xf800) | (((v) >> 5) & 0x07e0) | (((v)&0xff) >> 3))
void ZB_copyFrameBuffer(ZBuffer* zb, void* buf, GLint linesize) {
ZB_copyBuffer(zb, buf, linesize);
}
#endif
/* ^TGL_FEATURE_RENDER_BITS == 32 */
/*
* adr must be aligned on an 'int'
*/
static void memset_s(void* adr, GLint val, GLint count) {
GLint i, n, v;
GLuint* p;
GLushort* q;
p = adr;
v = val | (val << 16);
n = count >> 3;
for (i = 0; i < n; i++) {
p[0] = v;
p[1] = v;
p[2] = v;
p[3] = v;
p += 4;
}
q = (GLushort*)p;
n = count & 7;
for (i = 0; i < n; i++)
*q++ = val;
}
/* Used in 32 bit mode*/
static void memset_l(void* adr, GLint val, GLint count) {
GLint i, n, v;
GLuint* p;
p = adr;
v = val;
n = count >> 2;
for (i = 0; i < n; i++) {
p[0] = v;
p[1] = v;
p[2] = v;
p[3] = v;
p += 4;
}
n = count & 3;
for (i = 0; i < n; i++)
*p++ = val;
}
void ZB_clear(ZBuffer* zb, GLint clear_z, GLint z, GLint clear_color, GLint r, GLint g, GLint b) {
GLuint color;
GLint y;
PIXEL* pp;
if (clear_z) {
memset_s(zb->zbuf, z, zb->xsize * zb->ysize);
}
if (clear_color) {
pp = zb->pbuf;
for (y = 0; y < zb->ysize; y++) {
#if TGL_FEATURE_RENDER_BITS == 15 || TGL_FEATURE_RENDER_BITS == 16
// color = RGB_TO_PIXEL(r, g, b);
#if TGL_FEATURE_FORCE_CLEAR_NO_COPY_COLOR
color = TGL_NO_COPY_COLOR;
#else
color = RGB_TO_PIXEL(r, g, b);
#endif
memset_s(pp, color, zb->xsize);
#elif TGL_FEATURE_RENDER_BITS == 32
#if TGL_FEATURE_FORCE_CLEAR_NO_COPY_COLOR
color = TGL_NO_COPY_COLOR;
#else
color = RGB_TO_PIXEL(r, g, b);
#endif
memset_l(pp, color, zb->xsize);
#else
#error BADJUJU
#endif
pp = (PIXEL*)((GLbyte*)pp + zb->linesize);
}
}
}
|
iRCCE_lib.h | //
// Copyright 2010 Intel Corporation
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
// [2010-10-25] added support for non-blocking send/recv operations
// - iRCCE_isend(), ..._test(), ..._wait(), ..._push()
// - iRCCE_irecv(), ..._test(), ..._wait(), ..._push()
// by Carsten Clauss, Chair for Operating Systems,
// RWTH Aachen University
//
// [2010-11-12] extracted non-blocking code into separate library
// by Carsten Scholtes
//
// [2011-04-19] added wildcard mechanism (iRCCE_ANY_SOURCE) for receiving
// a message from an arbitrary remote rank
// by Simon Pickartz, Chair for Operating Systems,
// RWTH Aachen University
//
// [2011-06-27] merged iRCCE_ANY_SOURCE branch with trunk (iRCCE_ANY_LENGTH)
//
#ifndef IRCCE_LIB_H
#define IRCCE_LIB_H
#include "RCCE_lib.h"
#include "iRCCE.h"
#ifdef AIR
#define FPGA_BASE 0xf9000000
#define BACKOFF_MIN 8
#define BACKOFF_MAX 256
extern iRCCE_AIR iRCCE_atomic_inc_regs[];
extern int iRCCE_atomic_alloc_counter;
extern iRCCE_AIR* iRCCE_atomic_barrier[2];
#endif
extern iRCCE_SEND_REQUEST* iRCCE_isend_queue;
extern iRCCE_RECV_REQUEST* iRCCE_irecv_queue[RCCE_MAXNP];
extern iRCCE_RECV_REQUEST* iRCCE_irecv_any_source_queue;
extern int iRCCE_recent_source;
extern int iRCCE_recent_length;
#if defined(_OPENMP) && !defined(__hermit__)
#pragma omp threadprivate (iRCCE_isend_queue, iRCCE_irecv_queue, iRCCE_irecv_any_source_queue, iRCCE_recent_source, iRCCE_recent_length)
#endif
int iRCCE_test_flag(RCCE_FLAG, RCCE_FLAG_STATUS, int *);
int iRCCE_push_ssend_request(iRCCE_SEND_REQUEST *request);
int iRCCE_push_srecv_request(iRCCE_RECV_REQUEST *request);
#endif
|
linear_system_solver.c | //
// Created by sachetto on 04/10/17.
// Modified by berg on 13/05/18
//
#include "../monodomain/config/linear_system_solver_config.h"
#include "../libraries_common/config_helpers.h"
SOLVE_LINEAR_SYSTEM(conjugate_gradient) {
double tol = 1e-16;
GET_PARAMETER_NUMERIC_VALUE_OR_USE_DEFAULT(double, tol, config->config_data.config, "tolerance");
bool use_jacobi = true;
char *preconditioner_char;
GET_PARAMETER_VALUE_CHAR (preconditioner_char, config->config_data.config, "use_preconditioner");
if (preconditioner_char != NULL) {
use_jacobi = ((strcmp (preconditioner_char, "yes") == 0) || (strcmp (preconditioner_char, "true") == 0));
}
int max_its = 50;
GET_PARAMETER_NUMERIC_VALUE_OR_USE_DEFAULT(int, max_its, config->config_data.config, "max_iterations");
double rTr,
r1Tr1,
pTAp,
alpha,
beta,
precision = tol,
rTz,
r1Tz1;
uint32_t num_active_cells = the_grid->num_active_cells;
struct cell_node** ac = the_grid->active_cells;
*error = 1.0;
*number_of_iterations = 1;
//__________________________________________________________________________
//Computes int_vector A*x, residue r = b - Ax, scalar rTr = r^T * r and
//sets initial search direction p.
rTr = 0.0;
rTz = 0.0;
struct element element;
int i;
#pragma omp parallel for private (element) reduction(+:rTr,rTz)
for (i = 0; i < num_active_cells; i++) {
struct element *cell_elements = ac[i]->elements;
ac[i]->Ax = 0.0;
size_t max_el = sb_count(cell_elements);
for(int el = 0; el < max_el; el++) {
element = cell_elements[el];
ac[i]->Ax += element.value * element.cell->v;
}
ac[i]->r = ac[i]->b - ac[i]->Ax;
if(use_jacobi) {
double value = cell_elements[0].value;
if(value == 0.0) value = 1.0;
ac[i]->z = (1.0/value) * ac[i]->r; // preconditioner
rTz += ac[i]->r * ac[i]->z;
ac[i]->p = ac[i]->z;
}
else {
ac[i]->p = ac[i]->r;
}
rTr += ac[i]->r * ac[i]->r;
}
*error = rTr;
//__________________________________________________________________________
//Conjugate gradient iterations.
if( *error >= precision ) {
while( *number_of_iterations < max_its ) {
//__________________________________________________________________
// Computes Ap and pTAp. Uses Ax to store Ap.
pTAp = 0.0;
#pragma omp parallel for private(element) reduction(+ : pTAp)
for (i = 0; i < num_active_cells; i++) {
ac[i]->Ax = 0.0;
struct element *cell_elements = ac[i]->elements;
size_t max_el = sb_count(cell_elements);
for(int el = 0; el < max_el; el++) {
element = cell_elements[el];
ac[i]->Ax += element.value * element.cell->p;
}
pTAp += ac[i]->p * ac[i]->Ax;
}
//__________________________________________________________________
// Computes alpha.
if(use_jacobi) {
alpha = rTz/pTAp;
}
else {
alpha = rTr/pTAp;
}
//__________________________________________________________________
r1Tr1 = 0.0;
r1Tz1 = 0.0;
// Computes new value of solution: u = u + alpha*p.
#pragma omp parallel for reduction (+:r1Tr1,r1Tz1)
for (i = 0; i < num_active_cells; i++) {
ac[i]->v += alpha * ac[i]->p;
ac[i]->r -= alpha * ac[i]->Ax;
if(use_jacobi) {
double value = ac[i]->elements[0].value;
if(value == 0.0) value = 1.0;
ac[i]->z = (1.0/value) * ac[i]->r;
r1Tz1 += ac[i]->z * ac[i]->r;
}
r1Tr1 += ac[i]->r * ac[i]->r;
}
//__________________________________________________________________
//Computes beta.
if(use_jacobi) {
beta = r1Tz1/rTz;
}
else {
beta = r1Tr1/rTr;
}
*error = r1Tr1;
*number_of_iterations = *number_of_iterations + 1;
if( *error <= precision ) {
break;
}
//__________________________________________________________________
//Computes int_vector p1 = r1 + beta*p and uses it to upgrade p.
#pragma omp parallel for
for (i = 0; i < num_active_cells; i++) {
if(use_jacobi) {
ac[i]->p1 = ac[i]->z + beta * ac[i]->p;
}
else {
ac[i]->p1 = ac[i]->r + beta * ac[i]->p;
}
ac[i]->p = ac[i]->p1;
}
rTz = r1Tz1;
rTr = r1Tr1;
}
}//end of conjugate gradient iterations.
}//end conjugateGradient() function.
// Berg's code
SOLVE_LINEAR_SYSTEM(jacobi)
{
double tol = 1e-08;
GET_PARAMETER_NUMERIC_VALUE_OR_USE_DEFAULT(double, tol, config->config_data.config, "tolerance");
int max_its = 500;
GET_PARAMETER_NUMERIC_VALUE_OR_USE_DEFAULT(int, max_its, config->config_data.config, "max_iterations");
double sigma,
precision = tol;
uint32_t num_active_cells = the_grid->num_active_cells;
struct cell_node** ac = the_grid->active_cells;
*error = 1.0;
*number_of_iterations = 1;
struct element element;
int i;
if (*error >= precision)
{
//__________________________________________________________________________
//Jacobi iterations.
while (*number_of_iterations < max_its)
{
#pragma omp parallel for private (element,sigma)
for (i = 0; i < num_active_cells; i++)
{
struct element *cell_elements = ac[i]->elements;
sigma = 0.0;
size_t max_el = sb_count(cell_elements);
// Do not take the diagonal element
for(int el = 1; el < max_el; el++)
{
element = cell_elements[el];
sigma += element.value * element.cell->v;
}
double value = cell_elements[0].value;
ac[i]->x_aux = (1/value)*(ac[i]->b - sigma);
/*
if (isnan(ac[i]->x_aux))
{
printf("ERROR! Not a number\n");
exit(EXIT_FAILURE);
}
*/
}
double residue = 0.0;
double sum;
#pragma omp parallel for private (element,sum) reduction (+:residue)
for (i = 0; i < num_active_cells; i++)
{
struct element *cell_elements = ac[i]->elements;
size_t max_el = sb_count(cell_elements);
// Do not take the diagonal element
sum = 0.0;
for(int el = 0; el < max_el; el++)
{
element = cell_elements[el];
sum += element.value * element.cell->x_aux;
}
ac[i]->v = ac[i]->x_aux;
residue += pow(ac[i]->b - sum,2);
}
// The error is norm of the residue
residue = sqrt(residue);
*error = residue;
*number_of_iterations = *number_of_iterations + 1;
if( *error <= precision )
break;
}
}
}
// Berg's code
SOLVE_LINEAR_SYSTEM(biconjugate_gradient)
{
double tol = 1e-16;
GET_PARAMETER_NUMERIC_VALUE_OR_USE_DEFAULT(double, tol, config->config_data.config, "tolerance");
bool use_jacobi = true;
char *preconditioner_char;
GET_PARAMETER_VALUE_CHAR (preconditioner_char, config->config_data.config, "use_preconditioner");
if (preconditioner_char != NULL)
{
use_jacobi = ((strcmp (preconditioner_char, "yes") == 0) || (strcmp (preconditioner_char, "true") == 0));
}
int max_its = 100;
GET_PARAMETER_NUMERIC_VALUE_OR_USE_DEFAULT(int, max_its, config->config_data.config, "max_iterations");
double rTr,
r1Tr1,
pTAp,
alpha,
beta,
precision = tol,
rTz,
r1Tz1;
uint32_t num_active_cells = the_grid->num_active_cells;
struct cell_node** ac = the_grid->active_cells;
*error = 1.0;
*number_of_iterations = 1;
struct element element;
int i;
//__________________________________________________________________________
// Zero all entries on the int_vector x*A
// And initialize the second guess vector x_aux
#pragma omp parallel for
for (i = 0; i < num_active_cells; i++)
{
ac[i]->xA = 0.0;
ac[i]->x_aux = ac[i]->v;
}
//__________________________________________________________________________
//Computes int_vector A*x, x*A
//xA must be fully calculated to start doing anything over the r_aux vector
#pragma omp parallel for private (element)
for (i = 0; i < num_active_cells; i++)
{
struct element *cell_elements = ac[i]->elements;
ac[i]->Ax = 0.0;
size_t max_el = sb_count(cell_elements);
for(int el = 0; el < max_el; el++)
{
element = cell_elements[el];
uint32_t col = element.column;
ac[i]->Ax += element.value * element.cell->v;
#pragma omp critical
ac[col]->xA += element.value * ac[i]->x_aux;
}
}
rTr = 0.0;
rTz = 0.0;
//__________________________________________________________________________
//Computes residues r, r_aux
//scalar rTr = r^T * r_aux and
//sets initial search directions p and p_aux.
#pragma omp parallel for private (element) reduction(+:rTr,rTz)
for (i = 0; i < num_active_cells; i++)
{
struct element *cell_elements = ac[i]->elements;
ac[i]->r = ac[i]->b - ac[i]->Ax;
ac[i]->r_aux = ac[i]->b - ac[i]->xA;
if(use_jacobi)
{
double value = cell_elements[0].value;
if(value == 0.0) value = 1.0;
ac[i]->z = (1.0/value) * ac[i]->r; // preconditioner
ac[i]->z_aux = (1.0/value) * ac[i]->r_aux;
rTz += ac[i]->r_aux * ac[i]->z;
ac[i]->p = ac[i]->z;
ac[i]->p_aux = ac[i]->z_aux;
}
else
{
ac[i]->p = ac[i]->r;
ac[i]->p_aux = ac[i]->r_aux;
}
rTr += ac[i]->r_aux * ac[i]->r;
}
*error = rTr;
//__________________________________________________________________________
//Biconjugate gradient iterations.
if( *error >= precision )
{
while( *number_of_iterations < max_its )
{
//__________________________________________________________________
// Computes Ap, pA and pTAp. Uses Ax to store Ap and xA to store pA
pTAp = 0.0;
#pragma omp parallel for
for (i = 0; i < num_active_cells; i++)
ac[i]->xA = 0.0;
#pragma omp parallel for private(element) reduction(+ : pTAp)
for (i = 0; i < num_active_cells; i++)
{
ac[i]->Ax = 0.0;
struct element *cell_elements = ac[i]->elements;
size_t max_el = sb_count(cell_elements);
for(int el = 0; el < max_el; el++)
{
element = cell_elements[el];
uint32_t col = element.column;
ac[i]->Ax += element.value * element.cell->p;
#pragma omp critical
ac[col]->xA += element.value * ac[i]->p_aux;
}
pTAp += ac[i]->p_aux * ac[i]->Ax;
}
//__________________________________________________________________
// Computes alpha.
if(use_jacobi)
{
alpha = rTz/pTAp;
}
else
{
alpha = rTr/pTAp;
}
//__________________________________________________________________
r1Tr1 = 0.0;
r1Tz1 = 0.0;
// Computes new value of solution: u = u + alpha*p.
// u_aux = u_aux + alpha*p_aux
#pragma omp parallel for reduction (+:r1Tr1,r1Tz1)
for (i = 0; i < num_active_cells; i++)
{
ac[i]->v += alpha * ac[i]->p;
ac[i]->x_aux += alpha * ac[i]->p_aux;
ac[i]->r -= alpha * ac[i]->Ax;
ac[i]->r_aux -= alpha * ac[i]->xA;
if(use_jacobi)
{
double value = ac[i]->elements[0].value;
if(value == 0.0) value = 1.0;
ac[i]->z = (1.0/value) * ac[i]->r;
ac[i]->z_aux = (1.0/value) * ac[i]->r_aux;
r1Tz1 += ac[i]->z * ac[i]->r_aux;
}
r1Tr1 += ac[i]->r * ac[i]->r_aux;
}
//__________________________________________________________________
//Computes beta.
if(use_jacobi)
{
beta = r1Tz1/rTz;
}
else
{
beta = r1Tr1/rTr;
}
*error = r1Tr1;
*number_of_iterations = *number_of_iterations + 1;
if( *error <= precision )
{
break;
}
//__________________________________________________________________
//Computes int_vector p1 = r1 + beta*p and uses it to upgrade p.
#pragma omp parallel for
for (i = 0; i < num_active_cells; i++)
{
if(use_jacobi)
{
ac[i]->p1 = ac[i]->z + beta * ac[i]->p;
ac[i]->p1_aux = ac[i]->z_aux + beta * ac[i]->p_aux;
}
else
{
ac[i]->p1 = ac[i]->r + beta * ac[i]->p;
ac[i]->p1_aux = ac[i]->r_aux + beta * ac[i]->p_aux;
}
ac[i]->p = ac[i]->p1;
ac[i]->p_aux = ac[i]->p1_aux;
}
rTz = r1Tz1;
rTr = r1Tr1;
}
}//end of biconjugate gradient iterations.
}//end biconjugateGradient() function.
|
pr81768-2.c | /* PR middle-end/81768 */
/* { dg-do compile } */
float b[10][15][10];
void
foo (void)
{
float *i;
#pragma omp target parallel for schedule(static, 32) collapse(3)
for (i = &b[0][0][0]; i < &b[0][0][10]; i++)
for (float *j = &b[0][15][0]; j > &b[0][0][0]; j -= 10)
for (float *k = &b[0][0][10]; k > &b[0][0][0]; --k)
b[i - &b[0][0][0]][(j - &b[0][0][0]) / 10 - 1][(k - &b[0][0][0]) - 1] -= 3.5;
}
|
mkldnn_quantize_v2-inl.h | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
/*!
* \file mkldnn_quantize_v2-inl.h
* \brief
*/
#ifndef MXNET_OPERATOR_QUANTIZATION_MKLDNN_MKLDNN_QUANTIZE_V2_INL_H_
#define MXNET_OPERATOR_QUANTIZATION_MKLDNN_MKLDNN_QUANTIZE_V2_INL_H_
#if MXNET_USE_ONEDNN == 1
#include <algorithm>
#include <string>
#include <vector>
#include "../../nn/mkldnn/mkldnn_base-inl.h"
#include "../quantize_v2-inl.h"
namespace mxnet {
namespace op {
class SgMKLDNNQuantizeOperator {
public:
explicit SgMKLDNNQuantizeOperator(const nnvm::NodeAttrs& attrs)
: param_(nnvm::get<QuantizeV2Param>(attrs.parsed)) {}
void Forward(const OpContext& ctx,
const std::vector<NDArray>& inputs,
const std::vector<OpReqType>& req,
const std::vector<NDArray>& outputs);
private:
bool initalized_{false};
QuantizeV2Param param_;
float cached_data_min_{0.f};
float cached_data_max_{0.f};
mkldnn::memory::desc o_desc_;
mkldnn_args_map_t args_;
std::shared_ptr<mkldnn::reorder> fwd_pd_;
};
void SgMKLDNNQuantizeOperator::Forward(const OpContext& ctx,
const std::vector<NDArray>& inputs,
const std::vector<OpReqType>& req,
const std::vector<NDArray>& outputs) {
float quantized_range = 0.0;
NDArray in_buffer = inputs[0];
float data_min = mshadow::red::limits::MaxValue<float>();
float data_max = mshadow::red::limits::MinValue<float>();
// Pass through quantized data
if (inputs[0].dtype() == mshadow::kUint8 || inputs[0].dtype() == mshadow::kInt8) {
if (param_.min_calib_range.has_value() && param_.max_calib_range.has_value()) {
*outputs[1].data().dptr<float>() = param_.min_calib_range.value();
*outputs[2].data().dptr<float>() = param_.max_calib_range.value();
} else {
if (inputs[0].dtype() == mshadow::kUint8) {
*outputs[1].data().dptr<float>() = 0;
*outputs[2].data().dptr<float>() = kUint8Range;
} else {
*outputs[1].data().dptr<float>() = -kInt8Range;
*outputs[2].data().dptr<float>() = kInt8Range;
}
}
if (req[0] != kWriteInplace) {
const_cast<NDArray&>(outputs[0]).CopyFrom(*inputs[0].GetMKLDNNData());
MKLDNNStream::Get()->Submit();
}
} else {
if (in_buffer.IsView() && in_buffer.IsMKLDNNData())
in_buffer = inputs[0].Reorder2Default();
auto i_mem = in_buffer.GetMKLDNNData();
if (param_.min_calib_range.has_value() && param_.max_calib_range.has_value()) {
data_min = param_.min_calib_range.value();
data_max = param_.max_calib_range.value();
} else {
// no calib info
in_buffer = inputs[0].Reorder2Default();
auto in_ptr = in_buffer.data().dptr<float>();
auto nthreads = engine::OpenMP::Get()->GetRecommendedOMPThreadCount();
std::vector<float> data_maxs(nthreads, data_max);
std::vector<float> data_mins(nthreads, data_min);
#pragma omp parallel for num_threads(nthreads)
for (index_t i = 0; i < static_cast<index_t>(in_buffer.shape().Size()); i++) {
int tid = omp_get_thread_num();
if (in_ptr[i] > data_maxs[tid])
data_maxs[tid] = in_ptr[i];
if (in_ptr[i] < data_mins[tid])
data_mins[tid] = in_ptr[i];
}
for (index_t i = 0; i < nthreads; i++) {
if (data_maxs[i] > data_max)
data_max = data_maxs[i];
if (data_mins[i] < data_min)
data_min = data_mins[i];
}
if (initalized_ && (cached_data_min_ != data_min || cached_data_max_ != data_max))
initalized_ = false;
}
// Write output min/max
auto out_type = GetQuantizeOutputType(param_);
if (out_type == mshadow::kUint8) {
quantized_range = kUint8Range;
*outputs[1].data().dptr<float>() = data_min;
*outputs[2].data().dptr<float>() = data_max;
} else if (out_type == mshadow::kInt8) {
float real_range = MaxAbs(data_min, data_max);
quantized_range = kInt8Range;
*outputs[1].data().dptr<float>() = -real_range;
*outputs[2].data().dptr<float>() = real_range;
} else {
LOG(FATAL) << "mkldnn quantize op only supports int8 and uint8 as output type";
}
if (!initalized_) {
cached_data_min_ = data_min;
cached_data_max_ = data_max;
float real_range = MaxAbs(data_min, data_max);
float scale = quantized_range / real_range;
mkldnn::primitive_attr attr;
const int mask = 0;
std::vector<float> scales = {scale};
attr.set_output_scales(mask, scales);
mkldnn::engine cpu_engine = mxnet::CpuEngine::Get()->get_engine();
auto i_desc = i_mem->get_desc();
size_t i_ndim = in_buffer.shape().ndim();
if (i_ndim == 4) {
mkldnn::memory::format_tag o_fmt = mkldnn::memory::format_tag::nhwc;
mkldnn::memory::dims o_dims(i_desc.data.dims, i_desc.data.dims + i_desc.data.ndims);
o_desc_ = mkldnn::memory::desc(o_dims, get_mkldnn_type(out_type), o_fmt);
} else {
o_desc_ = i_desc;
o_desc_.data.data_type = get_mkldnn_type_t(out_type);
}
auto reorder_pd =
mkldnn::reorder::primitive_desc(cpu_engine, i_desc, cpu_engine, o_desc_, attr);
fwd_pd_ = std::make_shared<mkldnn::reorder>(reorder_pd);
initalized_ = true;
}
auto o_mem = CreateMKLDNNMem(outputs[0], o_desc_, req[0]);
args_[MKLDNN_ARG_FROM] = *i_mem;
args_[MKLDNN_ARG_TO] = *o_mem.second;
MKLDNNStream::Get()->RegisterPrimArgs(*fwd_pd_, args_);
CommitOutput(outputs[0], o_mem);
MKLDNNStream::Get()->Submit();
}
}
static void SgMKLDNNQuantizeForward(const OpStatePtr& state_ptr,
const OpContext& ctx,
const std::vector<NDArray>& inputs,
const std::vector<OpReqType>& req,
const std::vector<NDArray>& outputs) {
SgMKLDNNQuantizeOperator& op = state_ptr.get_state<SgMKLDNNQuantizeOperator>();
op.Forward(ctx, inputs, req, outputs);
}
} // namespace op
} // namespace mxnet
#endif // MXNET_USE_ONEDNN == 1
#endif // MXNET_OPERATOR_QUANTIZATION_MKLDNN_MKLDNN_QUANTIZE_V2_INL_H_
|
sample_sort.c |
/*********************************************************************
samplesort.c: source: http://www.cse.iitd.ernet.in/~dheerajb/MPI/codes/day-3/c/samplesort.c
Objective : To sort unsorted integers by sample sort algorithm
Write a MPI program to sort n integers, using sample
sort algorithm on a p processor of PARAM 10000.
Assume n is multiple of p. Sorting is defined as the
task of arranging an unordered collection of elements
into monotonically increasing (or decreasing) order.
postcds: array[] is sorted in ascending order ANSI C
provides a quicksort function called sorting(). Its
function prototype is in the standard header file
<stdlib.h>
Description : 1. Partitioning of the input data and local sort :
The first step of sample sort is to partition the data.
Initially, each one of the p processors stores n/p
elements of the sequence of the elements to be sorted.
Let Ai be the sequence stored at processor Pi. In the
first phase each processor sorts the local n/p elements
using a serial sorting algorithm. (You can use C
library sorting() for performing this local sort).
2. Choosing the Splitters :
The second phase of the algorithm determines the p-1
splitter elements S. This is done as follows. Each
processor Pi selects p-1 equally spaced elements from
the locally sorted sequence Ai. These p-1 elements
from these p(p-1) elements are selected to be the
splitters.
3. Completing the sort :
In the third phase, each processor Pi uses the splitters
to partition the local sequence Ai into p subsequences
Ai,j such that for 0 <=j <p-1 all the elements in Ai,j
are smaller than Sj , and for j=p-1 (i.e., the last
element) Ai, j contains the rest elements. Then each
processor i sends the sub-sequence Ai,j to processor Pj.
Finally, each processor merge-sorts the received
sub-sequences, completing the sorting algorithm.
Input : Process with rank 0 generates unsorted integers
using C library call rand().
Output : Process with rank 0 stores the sorted elements in
the file sorted_data_out.
*********************************************************************/
#include <stdio.h>
#include <stdint.h>
#include <stdlib.h>
#include <string.h>
#include <sys/time.h>
#include <limits.h>
#include <assert.h>
//#define TOTAL_ELEMENT_PER_PE (4*1024*1024)
#define TOTAL_ELEMENT_PER_PE (8*1024*1024)
#define ELEMENT_T int
#define COUNT_T uint64_t
#define VERIFY
#define HC_GRANULARITY 2048
//#define HC_GRANULARITY 3072
#ifdef _OSHMEM_
#include <shmem.h>
long pSync[_SHMEM_BCAST_SYNC_SIZE];
#define RESET_BCAST_PSYNC { int _i; for(_i=0; _i<_SHMEM_BCAST_SYNC_SIZE; _i++) { pSync[_i] = _SHMEM_SYNC_VALUE; } shmem_barrier_all(); }
#endif
#ifdef _MPI_
#include <mpi.h>
#define ELEMENT_T_MPI MPI_INT
#define shmem_malloc malloc
#define shmem_free free
#endif
long seconds() {
struct timeval t;
gettimeofday(&t,NULL);
return t.tv_sec*1000000+t.tv_usec;
}
static int compare(const void *i, const void *j)
{
if ((*(ELEMENT_T*)i) > (*(ELEMENT_T *)j))
return (1);
if ((*(ELEMENT_T *)i) < (*(ELEMENT_T *)j))
return (-1);
return (0);
}
int partition(ELEMENT_T* data, int left, int right) {
int i = left;
int j = right;
ELEMENT_T tmp;
ELEMENT_T pivot = data[(left + right) / 2];
while (i <= j) {
while (data[i] < pivot) i++;
while (data[j] > pivot) j--;
if (i <= j) {
tmp = data[i];
data[i] = data[j];
data[j] = tmp;
i++;
j--;
}
}
return i;
}
#ifdef _ASYNC_OSHMEM_
typedef struct sort_data_t {
ELEMENT_T *buffer;
int left;
int right;
} sort_data_t;
void par_sort(void* arg) {
sort_data_t *in = (sort_data_t*) arg;
ELEMENT_T* data = in->buffer;
int left = in->left;
int right = in->right;
if (right - left + 1 > HC_GRANULARITY) {
int index = partition(data, left, right);
shmem_task_scope_begin();
if (left < index - 1) {
sort_data_t* buf = (sort_data_t*) malloc(sizeof(sort_data_t));
buf->buffer = data;
buf->left = left;
buf->right = index - 1;
shmem_task_nbi(par_sort, buf, NULL);
}
if (index < right) {
sort_data_t* buf = (sort_data_t*) malloc(sizeof(sort_data_t));
buf->buffer = data;
buf->left = index;
buf->right = right;
shmem_task_nbi(par_sort, buf, NULL);
}
shmem_task_scope_end();
}
else {
// quicksort in C library
qsort(data+left, right - left + 1, sizeof(ELEMENT_T), compare);
}
free(arg);
}
void sorting(ELEMENT_T* buffer, int size) {
sort_data_t* buf = (sort_data_t*) malloc(sizeof(sort_data_t));
buf->buffer = buffer;
buf->left = 0;
buf->right = size - 1;
long start = seconds();
shmem_task_scope_begin();
shmem_task_nbi(par_sort, buf, NULL);
shmem_task_scope_end();
double end = (((double)(seconds()-start))/1000000) * 1000; // msec
printf("Sorting (%d) = %.3f\n",size, end);
}
#else // OpenMP
void par_sort(ELEMENT_T* data, int left, int right) {
if (right - left + 1 > HC_GRANULARITY) {
int index = partition(data, left, right);
if (left < index - 1) {
#pragma omp task
{
par_sort(data, left, index - 1);
}
}
if (index < right) {
#pragma omp task
{
par_sort(data, index, right);
}
}
#pragma omp taskwait
}
else {
// quicksort in C library
qsort(data+left, right - left + 1, sizeof(ELEMENT_T), compare);
}
}
void sorting(ELEMENT_T* buffer, int size) {
long start = seconds();
#pragma omp parallel
{
#pragma omp single nowait
{
par_sort(buffer, 0, size-1);
}
}
double end = (((double)(seconds()-start))/1000000) * 1000; // msec
printf("Sorting (%d) = %.3f\n",size, end);
}
#endif
#ifdef _OSHMEM_
void shmem_scatter32(ELEMENT_T* dest, ELEMENT_T* src, int root, int count) {
int i;
int me = shmem_my_pe();
int procs = shmem_n_pes();
shmem_barrier_all();
if(me == root) {
for(i=0; i<procs; i++) {
ELEMENT_T* start = &src[i * count];
shmem_put32(dest, start, count, i);
}
}
shmem_barrier_all();
}
void shmem_gather32(ELEMENT_T* dest, ELEMENT_T* src, int root, int count) {
int me = shmem_my_pe();
int procs = shmem_n_pes();
shmem_barrier_all();
ELEMENT_T* target_index = &dest[me * count];
shmem_put32(target_index, src, count, root);
shmem_barrier_all();
}
void shmem_alltoall32(ELEMENT_T* dest, ELEMENT_T* src, int count) {
int i;
int me = shmem_my_pe();
int procs = shmem_n_pes();
shmem_barrier_all();
for(i=0; i<procs; i++) {
shmem_put32(&dest[me*count], &src[i*count], count, i);
}
shmem_barrier_all();
}
#endif
#ifndef HCLIB_COMM_WORKER_FIXED
void entrypoint(void *arg) {
#else
int main (int argc, char *argv[]) {
/**** Initialising ****/
#if defined(_OSHMEM_) && defined(_MPI_)
printf("ERROR: You cannot use both OpenSHMEM as well as MPI\n");
exit(1);
#endif
#if defined(_OSHMEM_)
shmem_init ();
#elif defined(_MPI_)
MPI_Init(&argc, &argv);
#else
printf("ERROR: Use either OpenSHMEM or MPI\n");
exit(1);
#endif
#endif
/* Variable Declarations */
int Numprocs,MyRank, Root = 0;
COUNT_T i,j,k, NoofElements, NoofElements_Bloc,
NoElementsToSort;
COUNT_T count, temp;
ELEMENT_T *Input, *InputData;
ELEMENT_T *Splitter, *AllSplitter;
ELEMENT_T *Buckets, *BucketBuffer, *LocalBucket;
ELEMENT_T *OutputBuffer, *Output, *target_index;
long start_time = seconds();
long local_timer_start;
double local_timer_end, end_time, init_time;
double communication_timer=0;
#if defined(_MPI_)
MPI_Comm_size(MPI_COMM_WORLD, &Numprocs);
MPI_Comm_rank(MPI_COMM_WORLD, &MyRank);
#else
Numprocs = shmem_n_pes ();
MyRank = shmem_my_pe ();
#endif
assert((TOTAL_ELEMENT_PER_PE * Numprocs) < INT_MAX && "Change count type from int to uint64_t");
NoofElements = TOTAL_ELEMENT_PER_PE * Numprocs;
/**** Reading Input ****/
Input = (ELEMENT_T *) shmem_malloc (NoofElements*sizeof(*Input));
if(Input == NULL) {
printf("Error : Can not allocate memory \n");
}
if (MyRank == Root){
printf("\n-----\nmkdir timedrun fake\n\n");
/* Initialise random number generator */
printf ("Generating input Array for Sorting %d numbers\n",NoofElements);
srand48((ELEMENT_T)NoofElements);
for(i=0; i< NoofElements; i++) {
Input[i] = rand();
}
}
/**** Sending Data ****/
NoofElements_Bloc = NoofElements / Numprocs;
InputData = (ELEMENT_T *) shmem_malloc (NoofElements_Bloc * sizeof (*InputData));
if(InputData == NULL) {
printf("Error : Can not allocate memory \n");
}
local_timer_start = seconds();
#if defined(_MPI_)
MPI_Scatter(Input, NoofElements_Bloc, ELEMENT_T_MPI, InputData,
NoofElements_Bloc, ELEMENT_T_MPI, Root, MPI_COMM_WORLD);
#else
shmem_scatter32(InputData, Input, 0, NoofElements_Bloc);
#endif
local_timer_end = (((double)(seconds()-local_timer_start))/1000000) * 1000;
communication_timer += local_timer_end;
init_time = local_timer_end;
printf("Scatter = %.3f\n",local_timer_end);
/**** Sorting Locally ****/
sorting(InputData, NoofElements_Bloc);
/**** Choosing Local Splitters ****/
Splitter = (ELEMENT_T *) shmem_malloc (sizeof (ELEMENT_T) * (Numprocs-1));
if(Splitter == NULL) {
printf("Error : Can not allocate memory \n");
}
for (i=0; i< (Numprocs-1); i++){
Splitter[i] = InputData[NoofElements/(Numprocs*Numprocs) * (i+1)];
}
/**** Gathering Local Splitters at Root ****/
AllSplitter = (ELEMENT_T *) shmem_malloc (sizeof (ELEMENT_T) * Numprocs * (Numprocs-1));
if(AllSplitter == NULL) {
printf("Error : Can not allocate memory \n");
}
local_timer_start = seconds();
#if defined(_MPI_)
MPI_Gather (Splitter, Numprocs-1, ELEMENT_T_MPI, AllSplitter, Numprocs-1,
ELEMENT_T_MPI, Root, MPI_COMM_WORLD);
#else
shmem_gather32(AllSplitter, Splitter, 0, Numprocs-1);
#endif
local_timer_end = (((double)(seconds()-local_timer_start))/1000000) * 1000;
communication_timer += local_timer_end;
printf("Gather = %.3f\n",local_timer_end);
/**** Choosing Global Splitters ****/
if (MyRank == Root){
sorting (AllSplitter, Numprocs*(Numprocs-1));
for (i=0; i<Numprocs-1; i++)
Splitter[i] = AllSplitter[(Numprocs-1)*(i+1)];
}
local_timer_start = seconds();
/**** Broadcasting Global Splitters ****/
#if defined(_MPI_)
MPI_Bcast (Splitter, Numprocs-1, ELEMENT_T_MPI, 0, MPI_COMM_WORLD);
#else
RESET_BCAST_PSYNC;
shmem_broadcast32(Splitter, Splitter, Numprocs-1, 0, 0, 0, Numprocs, pSync);
shmem_barrier_all();
#endif
local_timer_end = (((double)(seconds()-local_timer_start))/1000000) * 1000;
communication_timer += local_timer_end;
printf("Bcast = %.3f\n",local_timer_end);
/**** Creating Numprocs Buckets locally ****/
Buckets = (ELEMENT_T *) shmem_malloc (sizeof (ELEMENT_T) * (NoofElements + Numprocs));
if(Buckets == NULL) {
printf("Error : Can not allocate memory \n");
}
j = 0;
k = 1;
for (i=0; i<NoofElements_Bloc; i++){
if(j < (Numprocs-1)){
if (InputData[i] < Splitter[j])
Buckets[((NoofElements_Bloc + 1) * j) + k++] = InputData[i];
else{
Buckets[(NoofElements_Bloc + 1) * j] = k-1;
k=1;
j++;
i--;
}
}
else
Buckets[((NoofElements_Bloc + 1) * j) + k++] = InputData[i];
}
Buckets[(NoofElements_Bloc + 1) * j] = k - 1;
shmem_free(Splitter);
shmem_free(AllSplitter);
/**** Sending buckets to respective processors ****/
BucketBuffer = (ELEMENT_T *) shmem_malloc (sizeof (ELEMENT_T) * (NoofElements + Numprocs));
if(BucketBuffer == NULL) {
printf("Error : Can not allocate memory \n");
}
local_timer_start = seconds();
#if defined(_MPI_)
MPI_Alltoall (Buckets, NoofElements_Bloc + 1, ELEMENT_T_MPI, BucketBuffer,
NoofElements_Bloc + 1, ELEMENT_T_MPI, MPI_COMM_WORLD);
#else
shmem_alltoall32(BucketBuffer, Buckets, NoofElements_Bloc + 1);
#endif
local_timer_end = (((double)(seconds()-local_timer_start))/1000000) * 1000;
communication_timer += local_timer_end;
printf("AlltoAll = %.3f\n",local_timer_end);
/**** Rearranging BucketBuffer ****/
LocalBucket = (ELEMENT_T *) shmem_malloc (sizeof (ELEMENT_T) * 2 * NoofElements / Numprocs);
if(LocalBucket == NULL) {
printf("Error : Can not allocate memory \n");
}
count = 1;
for (j=0; j<Numprocs; j++) {
k = 1;
for (i=0; i<BucketBuffer[(NoofElements/Numprocs + 1) * j]; i++)
LocalBucket[count++] = BucketBuffer[(NoofElements/Numprocs + 1) * j + k++];
}
LocalBucket[0] = count-1;
/**** Sorting Local Buckets using Bubble Sort ****/
/*sorting (InputData, NoofElements_Bloc, sizeof(int), intcompare); */
NoElementsToSort = LocalBucket[0];
sorting (&LocalBucket[1], NoElementsToSort);
/**** Gathering sorted sub blocks at root ****/
OutputBuffer = (ELEMENT_T *) shmem_malloc (sizeof(ELEMENT_T) * 2 * NoofElements);
if(OutputBuffer == NULL) {
printf("Error : Can not allocate memory \n");
}
local_timer_start = seconds();
#if defined(_MPI_)
MPI_Gather (LocalBucket, 2*NoofElements_Bloc, ELEMENT_T_MPI, OutputBuffer,
2*NoofElements_Bloc, ELEMENT_T_MPI, Root, MPI_COMM_WORLD);
#else
shmem_gather32(OutputBuffer, LocalBucket, 0, (2*NoofElements_Bloc));
#endif
local_timer_end = (((double)(seconds()-local_timer_start))/1000000) * 1000;
communication_timer += local_timer_end;
printf("Gather = %.3f\n",local_timer_end);
end_time = (((double)(seconds()-start_time))/1000000) * 1000; // msec
/**** Rearranging output buffer ****/
if (MyRank == Root){
Output = (ELEMENT_T *) malloc (sizeof (ELEMENT_T) * NoofElements);
count = 0;
for(j=0; j<Numprocs; j++){
k = 1;
for(i=0; i<OutputBuffer[(2 * NoofElements/Numprocs) * j]; i++)
Output[count++] = OutputBuffer[(2*NoofElements/Numprocs) * j + k++];
}
printf ( "Number of Elements to be sorted : %d \n", NoofElements);
ELEMENT_T prev = 0;
int fail = 0;
for (i=0; i<NoofElements; i++){
if(Output[i] < prev) { printf("Failed at index %d\n",i); fail = 1; }
prev = Output[i];
}
if(fail) printf("Sorting FAILED\n");
else printf("Sorting PASSED\n");
printf("Time for initialization (tInit) = %.3f\n",init_time);
printf("Time for communicaions (tComm)= %.3f\n",communication_timer); // communication_timer includes init_time
printf("Time for computations (tComp) = %.3f\n",(end_time - communication_timer));
printf("Total Time (excluding initalization = tTotal) = %.3f\n",(end_time - init_time));
free(Output);
printf("============================ Tabulate Statistics ============================\ntInit\ttComm\ttComp\ttTotal\n%.3f\t%.3f\t%.3f\t%.3f\n",init_time, communication_timer, (end_time - communication_timer), (end_time - init_time));
printf("=============================================================================\n===== TEST PASSED in %.3f msec =====\n",end_time);
}/* MyRank==0*/
shmem_free(Input);
shmem_free(OutputBuffer);
shmem_free(InputData);
shmem_free(Buckets);
shmem_free(BucketBuffer);
shmem_free(LocalBucket);
#ifndef HCLIB_COMM_WORKER_FIXED
}
int main (int argc, char ** argv) {
#if defined(_OSHMEM_) && defined(_MPI_)
printf("ERROR: You cannot use both OpenSHMEM as well as MPI\n");
exit(1);
#endif
#if defined(_OSHMEM_)
shmem_init ();
#ifdef _ASYNC_OSHMEM_
shmem_workers_init(entrypoint, NULL);
#else
entrypoint(NULL);
#endif //_ASYNC_OSHMEM_
shmem_finalize ();
#elif defined(_MPI_)
MPI_Init(&argc, &argv);
entrypoint(NULL);
MPI_Finalize();
#else
printf("ERROR: Use either OpenSHMEM or MPI\n");
exit(1);
#endif
return 0;
}
#else // HCLIB_COMM_WORKER_FIXED
/**** Finalize ****/
#if defined(_OSHMEM_)
shmem_finalize();
#elif defined(_MPI_)
MPI_Finalize();
#endif
}
#endif
|
DRB054-inneronly2-orig-no.c | /*
Copyright (C) 1991-2018 Free Software Foundation, Inc.
This file is part of the GNU C Library.
The GNU C Library is free software; you can redistribute it andor
modify it under the terms of the GNU Lesser General Public
License as published by the Free Software Foundation; either
version 2.1 of the License, or (at your option) any later version.
The GNU C Library is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
Lesser General Public License for more details.
You should have received a copy of the GNU Lesser General Public
License along with the GNU C Library; if not, see
<http:www.gnu.org/licenses/>.
*/
/*
This header is separate from features.h so that the compiler can
include it implicitly at the start of every compilation. It must
not itself include <features.h> or any other header that includes
<features.h> because the implicit include comes before any feature
test macros that may be defined in a source file before it first
explicitly includes a system header. GCC knows the name of this
header in order to preinclude it.
*/
/*
glibc's intent is to support the IEC 559 math functionality, real
and complex. If the GCC (4.9 and later) predefined macros
specifying compiler intent are available, use them to determine
whether the overall intent is to support these features; otherwise,
presume an older compiler has intent to support these features and
define these macros by default.
*/
/*
wchar_t uses Unicode 10.0.0. Version 10.0 of the Unicode Standard is
synchronized with ISOIEC 10646:2017, fifth edition, plus
the following additions from Amendment 1 to the fifth edition:
- 56 emoji characters
- 285 hentaigana
- 3 additional Zanabazar Square characters
*/
/*
Copyright (c) 2017, Lawrence Livermore National Security, LLC.
Produced at the Lawrence Livermore National Laboratory
Written by Chunhua Liao, Pei-Hung Lin, Joshua Asplund,
Markus Schordan, and Ian Karlin
(email: liao6@llnl.gov, lin32@llnl.gov, asplund1@llnl.gov,
schordan1@llnl.gov, karlin1@llnl.gov)
LLNL-CODE-732144
All rights reserved.
This file is part of DataRaceBench. For details, see
https:github.comLLNL/dataracebench. Please also see the LICENSE file
for our additional BSD notice.
Redistribution and use in source and binary forms, with
or without modification, are permitted provided that the following
conditions are met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the disclaimer below.
* Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the disclaimer (as noted below)
in the documentation and/or other materials provided with the
distribution.
* Neither the name of the LLNS/LLNL nor the names of its contributors
may be used to endorse or promote products derived from this
software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND
CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES,
INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL LAWRENCE LIVERMORE NATIONAL
SECURITY, LLC, THE U.S. DEPARTMENT OF ENERGY OR CONTRIBUTORS BE
LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY,
OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
THE POSSIBILITY OF SUCH DAMAGE.
*/
/*
Example with loop-carried data dependence at the outer level loop.
The inner level loop can be parallelized.
*/
int main()
{
int i, j;
int n = 100, m = 100;
double b[n][m];
int _ret_val_0;
#pragma cetus private(i, j)
#pragma loop name main#0
#pragma cetus parallel
#pragma omp parallel for private(i, j)
for (i=0; i<n; i ++ )
{
#pragma cetus private(j)
#pragma loop name main#0#0
#pragma cetus parallel
#pragma omp parallel for private(j)
for (j=0; j<n; j ++ )
{
b[i][j]=((double)(i*j));
}
}
#pragma cetus private(i, j)
#pragma loop name main#1
for (i=1; i<n; i ++ )
{
#pragma cetus private(j)
#pragma loop name main#1#0
#pragma cetus parallel
#pragma omp parallel for private(j)
for (j=1; j<m; j ++ )
{
b[i][j]=b[i-1][j-1];
}
}
#pragma cetus private(i, j)
#pragma loop name main#2
for (i=0; i<n; i ++ )
{
#pragma cetus private(j)
#pragma loop name main#2#0
for (j=0; j<n; j ++ )
{
printf("%lf\n", b[i][j]);
}
}
_ret_val_0=0;
return _ret_val_0;
}
|
target.c | // RUN: %libomptarget-compile-generic -fopenmp-version=51
// RUN: %libomptarget-run-fail-generic 2>&1 \
// RUN: | %fcheck-generic
#include <stdio.h>
int main() {
int i;
// CHECK: addr=0x[[#%x,HOST_ADDR:]], size=[[#%u,SIZE:]]
fprintf(stderr, "addr=%p, size=%ld\n", &i, sizeof i);
// CHECK-NOT: Libomptarget
#pragma omp target data map(alloc: i)
#pragma omp target map(present, alloc: i)
;
// CHECK: i is present
fprintf(stderr, "i is present\n");
// CHECK: Libomptarget message: device mapping required by 'present' map type modifier does not exist for host address 0x{{0*}}[[#HOST_ADDR]] ([[#SIZE]] bytes)
// CHECK: Libomptarget error: Call to getTargetPointer returned null pointer ('present' map type modifier).
// CHECK: Libomptarget error: Call to targetDataBegin failed, abort target.
// CHECK: Libomptarget error: Failed to process data before launching the kernel.
// CHECK: Libomptarget fatal error 1: failure of target construct while offloading is mandatory
#pragma omp target map(present, alloc: i)
;
// CHECK-NOT: i is present
fprintf(stderr, "i is present\n");
return 0;
}
|
ccv_bbf.c | #include "ccv.h"
#include "ccv_internal.h"
#include <sys/time.h>
#ifdef HAVE_GSL
#include <gsl/gsl_rng.h>
#include <gsl/gsl_randist.h>
#endif
#ifdef USE_OPENMP
#include <omp.h>
#endif
const ccv_bbf_param_t ccv_bbf_default_params = {
.interval = 5,
.min_neighbors = 2,
.accurate = 1,
.flags = 0,
.size = {
24,
24,
},
};
#define _ccv_width_padding(x) (((x) + 3) & -4)
static inline int _ccv_run_bbf_feature(ccv_bbf_feature_t* feature, int* step, unsigned char** u8)
{
#define pf_at(i) (*(u8[feature->pz[i]] + feature->px[i] + feature->py[i] * step[feature->pz[i]]))
#define nf_at(i) (*(u8[feature->nz[i]] + feature->nx[i] + feature->ny[i] * step[feature->nz[i]]))
unsigned char pmin = pf_at(0), nmax = nf_at(0);
/* check if every point in P > every point in N, and take a shortcut */
if (pmin <= nmax)
return 0;
int i;
for (i = 1; i < feature->size; i++)
{
if (feature->pz[i] >= 0)
{
int p = pf_at(i);
if (p < pmin)
{
if (p <= nmax)
return 0;
pmin = p;
}
}
if (feature->nz[i] >= 0)
{
int n = nf_at(i);
if (n > nmax)
{
if (pmin <= n)
return 0;
nmax = n;
}
}
}
#undef pf_at
#undef nf_at
return 1;
}
static int _ccv_read_bbf_stage_classifier(const char* file, ccv_bbf_stage_classifier_t* classifier)
{
FILE* r = fopen(file, "r");
if (r == 0) return -1;
int stat = 0;
stat |= fscanf(r, "%d", &classifier->count);
union { float fl; int i; } fli;
stat |= fscanf(r, "%d", &fli.i);
classifier->threshold = fli.fl;
classifier->feature = (ccv_bbf_feature_t*)ccmalloc(classifier->count * sizeof(ccv_bbf_feature_t));
classifier->alpha = (float*)ccmalloc(classifier->count * 2 * sizeof(float));
int i, j;
for (i = 0; i < classifier->count; i++)
{
stat |= fscanf(r, "%d", &classifier->feature[i].size);
for (j = 0; j < classifier->feature[i].size; j++)
{
stat |= fscanf(r, "%d %d %d", &classifier->feature[i].px[j], &classifier->feature[i].py[j], &classifier->feature[i].pz[j]);
stat |= fscanf(r, "%d %d %d", &classifier->feature[i].nx[j], &classifier->feature[i].ny[j], &classifier->feature[i].nz[j]);
}
union { float fl; int i; } flia, flib;
stat |= fscanf(r, "%d %d", &flia.i, &flib.i);
classifier->alpha[i * 2] = flia.fl;
classifier->alpha[i * 2 + 1] = flib.fl;
}
fclose(r);
return 0;
}
#ifdef HAVE_GSL
static unsigned int _ccv_bbf_time_measure()
{
struct timeval tv;
gettimeofday(&tv, 0);
return tv.tv_sec * 1000000 + tv.tv_usec;
}
#define less_than(a, b, aux) ((a) < (b))
CCV_IMPLEMENT_QSORT(_ccv_sort_32f, float, less_than)
#undef less_than
static void _ccv_bbf_eval_data(ccv_bbf_stage_classifier_t* classifier, unsigned char** posdata, int posnum, unsigned char** negdata, int negnum, ccv_size_t size, float* peval, float* neval)
{
int i, j;
int steps[] = { _ccv_width_padding(size.width),
_ccv_width_padding(size.width >> 1),
_ccv_width_padding(size.width >> 2) };
int isizs0 = steps[0] * size.height;
int isizs01 = isizs0 + steps[1] * (size.height >> 1);
for (i = 0; i < posnum; i++)
{
unsigned char* u8[] = { posdata[i], posdata[i] + isizs0, posdata[i] + isizs01 };
float sum = 0;
float* alpha = classifier->alpha;
ccv_bbf_feature_t* feature = classifier->feature;
for (j = 0; j < classifier->count; ++j, alpha += 2, ++feature)
sum += alpha[_ccv_run_bbf_feature(feature, steps, u8)];
peval[i] = sum;
}
for (i = 0; i < negnum; i++)
{
unsigned char* u8[] = { negdata[i], negdata[i] + isizs0, negdata[i] + isizs01 };
float sum = 0;
float* alpha = classifier->alpha;
ccv_bbf_feature_t* feature = classifier->feature;
for (j = 0; j < classifier->count; ++j, alpha += 2, ++feature)
sum += alpha[_ccv_run_bbf_feature(feature, steps, u8)];
neval[i] = sum;
}
}
static int _ccv_prune_positive_data(ccv_bbf_classifier_cascade_t* cascade, unsigned char** posdata, int posnum, ccv_size_t size)
{
float* peval = (float*)ccmalloc(posnum * sizeof(float));
int i, j, k, rpos = posnum;
for (i = 0; i < cascade->count; i++)
{
_ccv_bbf_eval_data(cascade->stage_classifier + i, posdata, rpos, 0, 0, size, peval, 0);
k = 0;
for (j = 0; j < rpos; j++)
if (peval[j] >= cascade->stage_classifier[i].threshold)
{
posdata[k] = posdata[j];
++k;
} else {
ccfree(posdata[j]);
}
rpos = k;
}
ccfree(peval);
return rpos;
}
static int _ccv_prepare_background_data(ccv_bbf_classifier_cascade_t* cascade, char** bgfiles, int bgnum, unsigned char** negdata, int negnum)
{
int t, i, j, k, q;
int negperbg;
int negtotal = 0;
int steps[] = { _ccv_width_padding(cascade->size.width),
_ccv_width_padding(cascade->size.width >> 1),
_ccv_width_padding(cascade->size.width >> 2) };
int isizs0 = steps[0] * cascade->size.height;
int isizs1 = steps[1] * (cascade->size.height >> 1);
int isizs2 = steps[2] * (cascade->size.height >> 2);
int* idcheck = (int*)ccmalloc(negnum * sizeof(int));
gsl_rng_env_setup();
gsl_rng* rng = gsl_rng_alloc(gsl_rng_default);
gsl_rng_set(rng, (unsigned long int)idcheck);
ccv_size_t imgsz = cascade->size;
int rneg = negtotal;
for (t = 0; negtotal < negnum; t++)
{
PRINT(CCV_CLI_INFO, "preparing negative data ... 0%%");
for (i = 0; i < bgnum; i++)
{
negperbg = (t < 2) ? (negnum - negtotal) / (bgnum - i) + 1 : negnum - negtotal;
ccv_dense_matrix_t* image = 0;
ccv_read(bgfiles[i], &image, CCV_IO_GRAY | CCV_IO_ANY_FILE);
assert((image->type & CCV_C1) && (image->type & CCV_8U));
if (image == 0)
{
PRINT(CCV_CLI_ERROR, "\n%s file corrupted\n", bgfiles[i]);
continue;
}
if (t % 2 != 0)
ccv_flip(image, 0, 0, CCV_FLIP_X);
if (t % 4 >= 2)
ccv_flip(image, 0, 0, CCV_FLIP_Y);
ccv_bbf_param_t params = { .interval = 3, .min_neighbors = 0, .accurate = 1, .flags = 0, .size = cascade->size };
ccv_array_t* detected = ccv_bbf_detect_objects(image, &cascade, 1, params);
memset(idcheck, 0, ccv_min(detected->rnum, negperbg) * sizeof(int));
for (j = 0; j < ccv_min(detected->rnum, negperbg); j++)
{
int r = gsl_rng_uniform_int(rng, detected->rnum);
int flag = 1;
ccv_rect_t* rect = (ccv_rect_t*)ccv_array_get(detected, r);
while (flag) {
flag = 0;
for (k = 0; k < j; k++)
if (r == idcheck[k])
{
flag = 1;
r = gsl_rng_uniform_int(rng, detected->rnum);
break;
}
rect = (ccv_rect_t*)ccv_array_get(detected, r);
if ((rect->x < 0) || (rect->y < 0) || (rect->width + rect->x > image->cols) || (rect->height + rect->y > image->rows))
{
flag = 1;
r = gsl_rng_uniform_int(rng, detected->rnum);
}
}
idcheck[j] = r;
ccv_dense_matrix_t* temp = 0;
ccv_dense_matrix_t* imgs0 = 0;
ccv_dense_matrix_t* imgs1 = 0;
ccv_dense_matrix_t* imgs2 = 0;
ccv_slice(image, (ccv_matrix_t**)&temp, 0, rect->y, rect->x, rect->height, rect->width);
ccv_resample(temp, &imgs0, 0, imgsz.height, imgsz.width, CCV_INTER_AREA);
assert(imgs0->step == steps[0]);
ccv_matrix_free(temp);
ccv_sample_down(imgs0, &imgs1, 0, 0, 0);
assert(imgs1->step == steps[1]);
ccv_sample_down(imgs1, &imgs2, 0, 0, 0);
assert(imgs2->step == steps[2]);
negdata[negtotal] = (unsigned char*)ccmalloc(isizs0 + isizs1 + isizs2);
unsigned char* u8s0 = negdata[negtotal];
unsigned char* u8s1 = negdata[negtotal] + isizs0;
unsigned char* u8s2 = negdata[negtotal] + isizs0 + isizs1;
unsigned char* u8[] = { u8s0, u8s1, u8s2 };
memcpy(u8s0, imgs0->data.u8, imgs0->rows * imgs0->step);
ccv_matrix_free(imgs0);
memcpy(u8s1, imgs1->data.u8, imgs1->rows * imgs1->step);
ccv_matrix_free(imgs1);
memcpy(u8s2, imgs2->data.u8, imgs2->rows * imgs2->step);
ccv_matrix_free(imgs2);
flag = 1;
ccv_bbf_stage_classifier_t* classifier = cascade->stage_classifier;
for (k = 0; k < cascade->count; ++k, ++classifier)
{
float sum = 0;
float* alpha = classifier->alpha;
ccv_bbf_feature_t* feature = classifier->feature;
for (q = 0; q < classifier->count; ++q, alpha += 2, ++feature)
sum += alpha[_ccv_run_bbf_feature(feature, steps, u8)];
if (sum < classifier->threshold)
{
flag = 0;
break;
}
}
if (!flag)
ccfree(negdata[negtotal]);
else {
++negtotal;
if (negtotal >= negnum)
break;
}
}
ccv_array_free(detected);
ccv_matrix_free(image);
ccv_drain_cache();
PRINT(CCV_CLI_INFO, "\rpreparing negative data ... %2d%%", 100 * negtotal / negnum);
fflush(0);
if (negtotal >= negnum)
break;
}
if (rneg == negtotal)
break;
rneg = negtotal;
PRINT(CCV_CLI_INFO, "\nentering additional round %d\n", t + 1);
}
gsl_rng_free(rng);
ccfree(idcheck);
ccv_drain_cache();
PRINT(CCV_CLI_INFO, "\n");
return negtotal;
}
static void _ccv_prepare_positive_data(ccv_dense_matrix_t** posimg, unsigned char** posdata, ccv_size_t size, int posnum)
{
PRINT(CCV_CLI_INFO, "preparing positive data ... 0%%");
int i;
for (i = 0; i < posnum; i++)
{
ccv_dense_matrix_t* imgs0 = posimg[i];
ccv_dense_matrix_t* imgs1 = 0;
ccv_dense_matrix_t* imgs2 = 0;
assert((imgs0->type & CCV_C1) && (imgs0->type & CCV_8U) && imgs0->rows == size.height && imgs0->cols == size.width);
ccv_sample_down(imgs0, &imgs1, 0, 0, 0);
ccv_sample_down(imgs1, &imgs2, 0, 0, 0);
int isizs0 = imgs0->rows * imgs0->step;
int isizs1 = imgs1->rows * imgs1->step;
int isizs2 = imgs2->rows * imgs2->step;
posdata[i] = (unsigned char*)ccmalloc(isizs0 + isizs1 + isizs2);
memcpy(posdata[i], imgs0->data.u8, isizs0);
memcpy(posdata[i] + isizs0, imgs1->data.u8, isizs1);
memcpy(posdata[i] + isizs0 + isizs1, imgs2->data.u8, isizs2);
PRINT(CCV_CLI_INFO, "\rpreparing positive data ... %2d%%", 100 * (i + 1) / posnum);
fflush(0);
ccv_matrix_free(imgs1);
ccv_matrix_free(imgs2);
}
ccv_drain_cache();
PRINT(CCV_CLI_INFO, "\n");
}
typedef struct {
double fitness;
int pk, nk;
int age;
double error;
ccv_bbf_feature_t feature;
} ccv_bbf_gene_t;
static inline void _ccv_bbf_genetic_fitness(ccv_bbf_gene_t* gene)
{
gene->fitness = (1 - gene->error) * exp(-0.01 * gene->age) * exp((gene->pk + gene->nk) * log(1.015));
}
static inline int _ccv_bbf_exist_gene_feature(ccv_bbf_gene_t* gene, int x, int y, int z)
{
int i;
for (i = 0; i < gene->pk; i++)
if (z == gene->feature.pz[i] && x == gene->feature.px[i] && y == gene->feature.py[i])
return 1;
for (i = 0; i < gene->nk; i++)
if (z == gene->feature.nz[i] && x == gene->feature.nx[i] && y == gene->feature.ny[i])
return 1;
return 0;
}
static inline void _ccv_bbf_randomize_gene(gsl_rng* rng, ccv_bbf_gene_t* gene, int* rows, int* cols)
{
int i;
do {
gene->pk = gsl_rng_uniform_int(rng, CCV_BBF_POINT_MAX - 1) + 1;
gene->nk = gsl_rng_uniform_int(rng, CCV_BBF_POINT_MAX - 1) + 1;
} while (gene->pk + gene->nk < CCV_BBF_POINT_MIN); /* a hard restriction of at least 3 points have to be examed */
gene->feature.size = ccv_max(gene->pk, gene->nk);
gene->age = 0;
for (i = 0; i < CCV_BBF_POINT_MAX; i++)
{
gene->feature.pz[i] = -1;
gene->feature.nz[i] = -1;
}
int x, y, z;
for (i = 0; i < gene->pk; i++)
{
do {
z = gsl_rng_uniform_int(rng, 3);
x = gsl_rng_uniform_int(rng, cols[z]);
y = gsl_rng_uniform_int(rng, rows[z]);
} while (_ccv_bbf_exist_gene_feature(gene, x, y, z));
gene->feature.pz[i] = z;
gene->feature.px[i] = x;
gene->feature.py[i] = y;
}
for (i = 0; i < gene->nk; i++)
{
do {
z = gsl_rng_uniform_int(rng, 3);
x = gsl_rng_uniform_int(rng, cols[z]);
y = gsl_rng_uniform_int(rng, rows[z]);
} while ( _ccv_bbf_exist_gene_feature(gene, x, y, z));
gene->feature.nz[i] = z;
gene->feature.nx[i] = x;
gene->feature.ny[i] = y;
}
}
static inline double _ccv_bbf_error_rate(ccv_bbf_feature_t* feature, unsigned char** posdata, int posnum, unsigned char** negdata, int negnum, ccv_size_t size, double* pw, double* nw)
{
int i;
int steps[] = { _ccv_width_padding(size.width),
_ccv_width_padding(size.width >> 1),
_ccv_width_padding(size.width >> 2) };
int isizs0 = steps[0] * size.height;
int isizs01 = isizs0 + steps[1] * (size.height >> 1);
double error = 0;
for (i = 0; i < posnum; i++)
{
unsigned char* u8[] = { posdata[i], posdata[i] + isizs0, posdata[i] + isizs01 };
if (!_ccv_run_bbf_feature(feature, steps, u8))
error += pw[i];
}
for (i = 0; i < negnum; i++)
{
unsigned char* u8[] = { negdata[i], negdata[i] + isizs0, negdata[i] + isizs01 };
if ( _ccv_run_bbf_feature(feature, steps, u8))
error += nw[i];
}
return error;
}
#define less_than(fit1, fit2, aux) ((fit1).fitness >= (fit2).fitness)
static CCV_IMPLEMENT_QSORT(_ccv_bbf_genetic_qsort, ccv_bbf_gene_t, less_than)
#undef less_than
static ccv_bbf_feature_t _ccv_bbf_genetic_optimize(unsigned char** posdata, int posnum, unsigned char** negdata, int negnum, int ftnum, ccv_size_t size, double* pw, double* nw)
{
ccv_bbf_feature_t best;
/* seed (random method) */
gsl_rng_env_setup();
gsl_rng* rng = gsl_rng_alloc(gsl_rng_default);
union { unsigned long int li; double db; } dbli;
dbli.db = pw[0] + nw[0];
gsl_rng_set(rng, dbli.li);
int i, j;
int pnum = ftnum * 100;
assert(pnum > 0);
ccv_bbf_gene_t* gene = (ccv_bbf_gene_t*)ccmalloc(pnum * sizeof(ccv_bbf_gene_t));
int rows[] = { size.height, size.height >> 1, size.height >> 2 };
int cols[] = { size.width, size.width >> 1, size.width >> 2 };
for (i = 0; i < pnum; i++)
_ccv_bbf_randomize_gene(rng, &gene[i], rows, cols);
unsigned int timer = _ccv_bbf_time_measure();
#ifdef USE_OPENMP
#pragma omp parallel for private(i) schedule(dynamic)
#endif
for (i = 0; i < pnum; i++)
gene[i].error = _ccv_bbf_error_rate(&gene[i].feature, posdata, posnum, negdata, negnum, size, pw, nw);
timer = _ccv_bbf_time_measure() - timer;
for (i = 0; i < pnum; i++)
_ccv_bbf_genetic_fitness(&gene[i]);
double best_err = 1;
int rnum = ftnum * 39; /* number of randomize */
int mnum = ftnum * 40; /* number of mutation */
int hnum = ftnum * 20; /* number of hybrid */
/* iteration stop crit : best no change in 40 iterations */
int it = 0, t;
for (t = 0 ; it < 40; ++it, ++t)
{
int min_id = 0;
double min_err = gene[0].error;
for (i = 1; i < pnum; i++)
if (gene[i].error < min_err)
{
min_id = i;
min_err = gene[i].error;
}
min_err = gene[min_id].error = _ccv_bbf_error_rate(&gene[min_id].feature, posdata, posnum, negdata, negnum, size, pw, nw);
if (min_err < best_err)
{
best_err = min_err;
memcpy(&best, &gene[min_id].feature, sizeof(best));
PRINT(CCV_CLI_INFO, "best bbf feature with error %f\n|-size: %d\n|-positive point: ", best_err, best.size);
for (i = 0; i < best.size; i++)
PRINT(CCV_CLI_INFO, "(%d %d %d), ", best.px[i], best.py[i], best.pz[i]);
PRINT(CCV_CLI_INFO, "\n|-negative point: ");
for (i = 0; i < best.size; i++)
PRINT(CCV_CLI_INFO, "(%d %d %d), ", best.nx[i], best.ny[i], best.nz[i]);
PRINT(CCV_CLI_INFO, "\n");
it = 0;
}
PRINT(CCV_CLI_INFO, "minimum error achieved in round %d(%d) : %f with %d ms\n", t, it, min_err, timer / 1000);
_ccv_bbf_genetic_qsort(gene, pnum, 0);
for (i = 0; i < ftnum; i++)
++gene[i].age;
for (i = ftnum; i < ftnum + mnum; i++)
{
int parent = gsl_rng_uniform_int(rng, ftnum);
memcpy(gene + i, gene + parent, sizeof(ccv_bbf_gene_t));
/* three mutation strategy : 1. add, 2. remove, 3. refine */
int pnm, pn = gsl_rng_uniform_int(rng, 2);
int* pnk[] = { &gene[i].pk, &gene[i].nk };
int* pnx[] = { gene[i].feature.px, gene[i].feature.nx };
int* pny[] = { gene[i].feature.py, gene[i].feature.ny };
int* pnz[] = { gene[i].feature.pz, gene[i].feature.nz };
int x, y, z;
int victim, decay = 1;
do {
switch (gsl_rng_uniform_int(rng, 3))
{
case 0: /* add */
if (gene[i].pk == CCV_BBF_POINT_MAX && gene[i].nk == CCV_BBF_POINT_MAX)
break;
while (*pnk[pn] + 1 > CCV_BBF_POINT_MAX)
pn = gsl_rng_uniform_int(rng, 2);
do {
z = gsl_rng_uniform_int(rng, 3);
x = gsl_rng_uniform_int(rng, cols[z]);
y = gsl_rng_uniform_int(rng, rows[z]);
} while (_ccv_bbf_exist_gene_feature(&gene[i], x, y, z));
pnz[pn][*pnk[pn]] = z;
pnx[pn][*pnk[pn]] = x;
pny[pn][*pnk[pn]] = y;
++(*pnk[pn]);
gene[i].feature.size = ccv_max(gene[i].pk, gene[i].nk);
decay = gene[i].age = 0;
break;
case 1: /* remove */
if (gene[i].pk + gene[i].nk <= CCV_BBF_POINT_MIN) /* at least 3 points have to be examed */
break;
while (*pnk[pn] - 1 <= 0) // || *pnk[pn] + *pnk[!pn] - 1 < CCV_BBF_POINT_MIN)
pn = gsl_rng_uniform_int(rng, 2);
victim = gsl_rng_uniform_int(rng, *pnk[pn]);
for (j = victim; j < *pnk[pn] - 1; j++)
{
pnz[pn][j] = pnz[pn][j + 1];
pnx[pn][j] = pnx[pn][j + 1];
pny[pn][j] = pny[pn][j + 1];
}
pnz[pn][*pnk[pn] - 1] = -1;
--(*pnk[pn]);
gene[i].feature.size = ccv_max(gene[i].pk, gene[i].nk);
decay = gene[i].age = 0;
break;
case 2: /* refine */
pnm = gsl_rng_uniform_int(rng, *pnk[pn]);
do {
z = gsl_rng_uniform_int(rng, 3);
x = gsl_rng_uniform_int(rng, cols[z]);
y = gsl_rng_uniform_int(rng, rows[z]);
} while (_ccv_bbf_exist_gene_feature(&gene[i], x, y, z));
pnz[pn][pnm] = z;
pnx[pn][pnm] = x;
pny[pn][pnm] = y;
decay = gene[i].age = 0;
break;
}
} while (decay);
}
for (i = ftnum + mnum; i < ftnum + mnum + hnum; i++)
{
/* hybrid strategy: taking positive points from dad, negative points from mum */
int dad, mum;
do {
dad = gsl_rng_uniform_int(rng, ftnum);
mum = gsl_rng_uniform_int(rng, ftnum);
} while (dad == mum || gene[dad].pk + gene[mum].nk < CCV_BBF_POINT_MIN); /* at least 3 points have to be examed */
for (j = 0; j < CCV_BBF_POINT_MAX; j++)
{
gene[i].feature.pz[j] = -1;
gene[i].feature.nz[j] = -1;
}
gene[i].pk = gene[dad].pk;
for (j = 0; j < gene[i].pk; j++)
{
gene[i].feature.pz[j] = gene[dad].feature.pz[j];
gene[i].feature.px[j] = gene[dad].feature.px[j];
gene[i].feature.py[j] = gene[dad].feature.py[j];
}
gene[i].nk = gene[mum].nk;
for (j = 0; j < gene[i].nk; j++)
{
gene[i].feature.nz[j] = gene[mum].feature.nz[j];
gene[i].feature.nx[j] = gene[mum].feature.nx[j];
gene[i].feature.ny[j] = gene[mum].feature.ny[j];
}
gene[i].feature.size = ccv_max(gene[i].pk, gene[i].nk);
gene[i].age = 0;
}
for (i = ftnum + mnum + hnum; i < ftnum + mnum + hnum + rnum; i++)
_ccv_bbf_randomize_gene(rng, &gene[i], rows, cols);
timer = _ccv_bbf_time_measure();
#ifdef USE_OPENMP
#pragma omp parallel for private(i) schedule(dynamic)
#endif
for (i = 0; i < pnum; i++)
gene[i].error = _ccv_bbf_error_rate(&gene[i].feature, posdata, posnum, negdata, negnum, size, pw, nw);
timer = _ccv_bbf_time_measure() - timer;
for (i = 0; i < pnum; i++)
_ccv_bbf_genetic_fitness(&gene[i]);
}
ccfree(gene);
gsl_rng_free(rng);
return best;
}
#define less_than(fit1, fit2, aux) ((fit1).error < (fit2).error)
static CCV_IMPLEMENT_QSORT(_ccv_bbf_best_qsort, ccv_bbf_gene_t, less_than)
#undef less_than
static ccv_bbf_gene_t _ccv_bbf_best_gene(ccv_bbf_gene_t* gene, int pnum, int point_min, unsigned char** posdata, int posnum, unsigned char** negdata, int negnum, ccv_size_t size, double* pw, double* nw)
{
int i = 0;
unsigned int timer = _ccv_bbf_time_measure();
#ifdef USE_OPENMP
#pragma omp parallel for private(i) schedule(dynamic)
#endif
for (i = 0; i < pnum; i++)
gene[i].error = _ccv_bbf_error_rate(&gene[i].feature, posdata, posnum, negdata, negnum, size, pw, nw);
timer = _ccv_bbf_time_measure() - timer;
_ccv_bbf_best_qsort(gene, pnum, 0);
int min_id = 0;
double min_err = gene[0].error;
for (i = 0; i < pnum; i++)
if (gene[i].nk + gene[i].pk >= point_min)
{
min_id = i;
min_err = gene[i].error;
break;
}
PRINT(CCV_CLI_INFO, "local best bbf feature with error %f\n|-size: %d\n|-positive point: ", min_err, gene[min_id].feature.size);
for (i = 0; i < gene[min_id].feature.size; i++)
PRINT(CCV_CLI_INFO, "(%d %d %d), ", gene[min_id].feature.px[i], gene[min_id].feature.py[i], gene[min_id].feature.pz[i]);
PRINT(CCV_CLI_INFO, "\n|-negative point: ");
for (i = 0; i < gene[min_id].feature.size; i++)
PRINT(CCV_CLI_INFO, "(%d %d %d), ", gene[min_id].feature.nx[i], gene[min_id].feature.ny[i], gene[min_id].feature.nz[i]);
PRINT(CCV_CLI_INFO, "\nthe computation takes %d ms\n", timer / 1000);
return gene[min_id];
}
static ccv_bbf_feature_t _ccv_bbf_convex_optimize(unsigned char** posdata, int posnum, unsigned char** negdata, int negnum, ccv_bbf_feature_t* best_feature, ccv_size_t size, double* pw, double* nw)
{
ccv_bbf_gene_t best_gene;
/* seed (random method) */
gsl_rng_env_setup();
gsl_rng* rng = gsl_rng_alloc(gsl_rng_default);
union { unsigned long int li; double db; } dbli;
dbli.db = pw[0] + nw[0];
gsl_rng_set(rng, dbli.li);
int i, j, k, q, p, g, t;
int rows[] = { size.height, size.height >> 1, size.height >> 2 };
int cols[] = { size.width, size.width >> 1, size.width >> 2 };
int pnum = rows[0] * cols[0] + rows[1] * cols[1] + rows[2] * cols[2];
ccv_bbf_gene_t* gene = (ccv_bbf_gene_t*)ccmalloc((pnum * (CCV_BBF_POINT_MAX * 2 + 1) * 2 + CCV_BBF_POINT_MAX * 2 + 1) * sizeof(ccv_bbf_gene_t));
if (best_feature == 0)
{
/* bootstrapping the best feature, start from two pixels, one for positive, one for negative
* the bootstrapping process go like this: first, it will assign a random pixel as positive
* and enumerate every possible pixel as negative, and pick the best one. Then, enumerate every
* possible pixel as positive, and pick the best one, until it converges */
memset(&best_gene, 0, sizeof(ccv_bbf_gene_t));
for (i = 0; i < CCV_BBF_POINT_MAX; i++)
best_gene.feature.pz[i] = best_gene.feature.nz[i] = -1;
best_gene.pk = 1;
best_gene.nk = 0;
best_gene.feature.size = 1;
best_gene.feature.pz[0] = gsl_rng_uniform_int(rng, 3);
best_gene.feature.px[0] = gsl_rng_uniform_int(rng, cols[best_gene.feature.pz[0]]);
best_gene.feature.py[0] = gsl_rng_uniform_int(rng, rows[best_gene.feature.pz[0]]);
for (t = 0; ; ++t)
{
g = 0;
if (t % 2 == 0)
{
for (i = 0; i < 3; i++)
for (j = 0; j < cols[i]; j++)
for (k = 0; k < rows[i]; k++)
if (i != best_gene.feature.pz[0] || j != best_gene.feature.px[0] || k != best_gene.feature.py[0])
{
gene[g] = best_gene;
gene[g].pk = gene[g].nk = 1;
gene[g].feature.nz[0] = i;
gene[g].feature.nx[0] = j;
gene[g].feature.ny[0] = k;
g++;
}
} else {
for (i = 0; i < 3; i++)
for (j = 0; j < cols[i]; j++)
for (k = 0; k < rows[i]; k++)
if (i != best_gene.feature.nz[0] || j != best_gene.feature.nx[0] || k != best_gene.feature.ny[0])
{
gene[g] = best_gene;
gene[g].pk = gene[g].nk = 1;
gene[g].feature.pz[0] = i;
gene[g].feature.px[0] = j;
gene[g].feature.py[0] = k;
g++;
}
}
PRINT(CCV_CLI_INFO, "bootstrapping round : %d\n", t);
ccv_bbf_gene_t local_gene = _ccv_bbf_best_gene(gene, g, 2, posdata, posnum, negdata, negnum, size, pw, nw);
if (local_gene.error >= best_gene.error - 1e-10)
break;
best_gene = local_gene;
}
} else {
best_gene.feature = *best_feature;
best_gene.pk = best_gene.nk = best_gene.feature.size;
for (i = 0; i < CCV_BBF_POINT_MAX; i++)
if (best_feature->pz[i] == -1)
{
best_gene.pk = i;
break;
}
for (i = 0; i < CCV_BBF_POINT_MAX; i++)
if (best_feature->nz[i] == -1)
{
best_gene.nk = i;
break;
}
}
/* after bootstrapping, the float search technique will do the following permutations:
* a). add a new point to positive or negative
* b). remove a point from positive or negative
* c). move an existing point in positive or negative to another position
* the three rules applied exhaustively, no heuristic used. */
for (t = 0; ; ++t)
{
g = 0;
for (i = 0; i < 3; i++)
for (j = 0; j < cols[i]; j++)
for (k = 0; k < rows[i]; k++)
if (!_ccv_bbf_exist_gene_feature(&best_gene, j, k, i))
{
/* add positive point */
if (best_gene.pk < CCV_BBF_POINT_MAX - 1)
{
gene[g] = best_gene;
gene[g].feature.pz[gene[g].pk] = i;
gene[g].feature.px[gene[g].pk] = j;
gene[g].feature.py[gene[g].pk] = k;
gene[g].pk++;
gene[g].feature.size = ccv_max(gene[g].pk, gene[g].nk);
g++;
}
/* add negative point */
if (best_gene.nk < CCV_BBF_POINT_MAX - 1)
{
gene[g] = best_gene;
gene[g].feature.nz[gene[g].nk] = i;
gene[g].feature.nx[gene[g].nk] = j;
gene[g].feature.ny[gene[g].nk] = k;
gene[g].nk++;
gene[g].feature.size = ccv_max(gene[g].pk, gene[g].nk);
g++;
}
/* refine positive point */
for (q = 0; q < best_gene.pk; q++)
{
gene[g] = best_gene;
gene[g].feature.pz[q] = i;
gene[g].feature.px[q] = j;
gene[g].feature.py[q] = k;
g++;
}
/* add positive point, remove negative point */
if (best_gene.pk < CCV_BBF_POINT_MAX - 1 && best_gene.nk > 1)
{
for (q = 0; q < best_gene.nk; q++)
{
gene[g] = best_gene;
gene[g].feature.pz[gene[g].pk] = i;
gene[g].feature.px[gene[g].pk] = j;
gene[g].feature.py[gene[g].pk] = k;
gene[g].pk++;
for (p = q; p < best_gene.nk - 1; p++)
{
gene[g].feature.nz[p] = gene[g].feature.nz[p + 1];
gene[g].feature.nx[p] = gene[g].feature.nx[p + 1];
gene[g].feature.ny[p] = gene[g].feature.ny[p + 1];
}
gene[g].feature.nz[gene[g].nk - 1] = -1;
gene[g].nk--;
gene[g].feature.size = ccv_max(gene[g].pk, gene[g].nk);
g++;
}
}
/* refine negative point */
for (q = 0; q < best_gene.nk; q++)
{
gene[g] = best_gene;
gene[g].feature.nz[q] = i;
gene[g].feature.nx[q] = j;
gene[g].feature.ny[q] = k;
g++;
}
/* add negative point, remove positive point */
if (best_gene.pk > 1 && best_gene.nk < CCV_BBF_POINT_MAX - 1)
{
for (q = 0; q < best_gene.pk; q++)
{
gene[g] = best_gene;
gene[g].feature.nz[gene[g].nk] = i;
gene[g].feature.nx[gene[g].nk] = j;
gene[g].feature.ny[gene[g].nk] = k;
gene[g].nk++;
for (p = q; p < best_gene.pk - 1; p++)
{
gene[g].feature.pz[p] = gene[g].feature.pz[p + 1];
gene[g].feature.px[p] = gene[g].feature.px[p + 1];
gene[g].feature.py[p] = gene[g].feature.py[p + 1];
}
gene[g].feature.pz[gene[g].pk - 1] = -1;
gene[g].pk--;
gene[g].feature.size = ccv_max(gene[g].pk, gene[g].nk);
g++;
}
}
}
if (best_gene.pk > 1)
for (q = 0; q < best_gene.pk; q++)
{
gene[g] = best_gene;
for (i = q; i < best_gene.pk - 1; i++)
{
gene[g].feature.pz[i] = gene[g].feature.pz[i + 1];
gene[g].feature.px[i] = gene[g].feature.px[i + 1];
gene[g].feature.py[i] = gene[g].feature.py[i + 1];
}
gene[g].feature.pz[gene[g].pk - 1] = -1;
gene[g].pk--;
gene[g].feature.size = ccv_max(gene[g].pk, gene[g].nk);
g++;
}
if (best_gene.nk > 1)
for (q = 0; q < best_gene.nk; q++)
{
gene[g] = best_gene;
for (i = q; i < best_gene.nk - 1; i++)
{
gene[g].feature.nz[i] = gene[g].feature.nz[i + 1];
gene[g].feature.nx[i] = gene[g].feature.nx[i + 1];
gene[g].feature.ny[i] = gene[g].feature.ny[i + 1];
}
gene[g].feature.nz[gene[g].nk - 1] = -1;
gene[g].nk--;
gene[g].feature.size = ccv_max(gene[g].pk, gene[g].nk);
g++;
}
gene[g] = best_gene;
g++;
PRINT(CCV_CLI_INFO, "float search round : %d\n", t);
ccv_bbf_gene_t local_gene = _ccv_bbf_best_gene(gene, g, CCV_BBF_POINT_MIN, posdata, posnum, negdata, negnum, size, pw, nw);
if (local_gene.error >= best_gene.error - 1e-10)
break;
best_gene = local_gene;
}
ccfree(gene);
gsl_rng_free(rng);
return best_gene.feature;
}
static int _ccv_write_bbf_stage_classifier(const char* file, ccv_bbf_stage_classifier_t* classifier)
{
FILE* w = fopen(file, "wb");
if (w == 0) return -1;
fprintf(w, "%d\n", classifier->count);
union { float fl; int i; } fli;
fli.fl = classifier->threshold;
fprintf(w, "%d\n", fli.i);
int i, j;
for (i = 0; i < classifier->count; i++)
{
fprintf(w, "%d\n", classifier->feature[i].size);
for (j = 0; j < classifier->feature[i].size; j++)
{
fprintf(w, "%d %d %d\n", classifier->feature[i].px[j], classifier->feature[i].py[j], classifier->feature[i].pz[j]);
fprintf(w, "%d %d %d\n", classifier->feature[i].nx[j], classifier->feature[i].ny[j], classifier->feature[i].nz[j]);
}
union { float fl; int i; } flia, flib;
flia.fl = classifier->alpha[i * 2];
flib.fl = classifier->alpha[i * 2 + 1];
fprintf(w, "%d %d\n", flia.i, flib.i);
}
fclose(w);
return 0;
}
static int _ccv_read_background_data(const char* file, unsigned char** negdata, int* negnum, ccv_size_t size)
{
int stat = 0;
FILE* r = fopen(file, "rb");
if (r == 0) return -1;
stat |= fread(negnum, sizeof(int), 1, r);
int i;
int isizs012 = _ccv_width_padding(size.width) * size.height +
_ccv_width_padding(size.width >> 1) * (size.height >> 1) +
_ccv_width_padding(size.width >> 2) * (size.height >> 2);
for (i = 0; i < *negnum; i++)
{
negdata[i] = (unsigned char*)ccmalloc(isizs012);
stat |= fread(negdata[i], 1, isizs012, r);
}
fclose(r);
return 0;
}
static int _ccv_write_background_data(const char* file, unsigned char** negdata, int negnum, ccv_size_t size)
{
FILE* w = fopen(file, "w");
if (w == 0) return -1;
fwrite(&negnum, sizeof(int), 1, w);
int i;
int isizs012 = _ccv_width_padding(size.width) * size.height +
_ccv_width_padding(size.width >> 1) * (size.height >> 1) +
_ccv_width_padding(size.width >> 2) * (size.height >> 2);
for (i = 0; i < negnum; i++)
fwrite(negdata[i], 1, isizs012, w);
fclose(w);
return 0;
}
static int _ccv_resume_bbf_cascade_training_state(const char* file, int* i, int* k, int* bg, double* pw, double* nw, int posnum, int negnum)
{
int stat = 0;
FILE* r = fopen(file, "r");
if (r == 0) return -1;
stat |= fscanf(r, "%d %d %d", i, k, bg);
int j;
union { double db; int i[2]; } dbi;
for (j = 0; j < posnum; j++)
{
stat |= fscanf(r, "%d %d", &dbi.i[0], &dbi.i[1]);
pw[j] = dbi.db;
}
for (j = 0; j < negnum; j++)
{
stat |= fscanf(r, "%d %d", &dbi.i[0], &dbi.i[1]);
nw[j] = dbi.db;
}
fclose(r);
return 0;
}
static int _ccv_save_bbf_cacade_training_state(const char* file, int i, int k, int bg, double* pw, double* nw, int posnum, int negnum)
{
FILE* w = fopen(file, "w");
if (w == 0) return -1;
fprintf(w, "%d %d %d\n", i, k, bg);
int j;
union { double db; int i[2]; } dbi;
for (j = 0; j < posnum; ++j)
{
dbi.db = pw[j];
fprintf(w, "%d %d ", dbi.i[0], dbi.i[1]);
}
fprintf(w, "\n");
for (j = 0; j < negnum; ++j)
{
dbi.db = nw[j];
fprintf(w, "%d %d ", dbi.i[0], dbi.i[1]);
}
fprintf(w, "\n");
fclose(w);
return 0;
}
void ccv_bbf_classifier_cascade_new(ccv_dense_matrix_t** posimg, int posnum, char** bgfiles, int bgnum, int negnum, ccv_size_t size, const char* dir, ccv_bbf_new_param_t params)
{
int i, j, k;
/* allocate memory for usage */
ccv_bbf_classifier_cascade_t* cascade = (ccv_bbf_classifier_cascade_t*)ccmalloc(sizeof(ccv_bbf_classifier_cascade_t));
cascade->count = 0;
cascade->size = size;
cascade->stage_classifier = (ccv_bbf_stage_classifier_t*)ccmalloc(sizeof(ccv_bbf_stage_classifier_t));
unsigned char** posdata = (unsigned char**)ccmalloc(posnum * sizeof(unsigned char*));
unsigned char** negdata = (unsigned char**)ccmalloc(negnum * sizeof(unsigned char*));
double* pw = (double*)ccmalloc(posnum * sizeof(double));
double* nw = (double*)ccmalloc(negnum * sizeof(double));
float* peval = (float*)ccmalloc(posnum * sizeof(float));
float* neval = (float*)ccmalloc(negnum * sizeof(float));
double inv_balance_k = 1. / params.balance_k;
/* balance factor k, and weighted with 0.01 */
params.balance_k *= 0.01;
inv_balance_k *= 0.01;
int steps[] = { _ccv_width_padding(cascade->size.width),
_ccv_width_padding(cascade->size.width >> 1),
_ccv_width_padding(cascade->size.width >> 2) };
int isizs0 = steps[0] * cascade->size.height;
int isizs01 = isizs0 + steps[1] * (cascade->size.height >> 1);
i = 0;
k = 0;
int bg = 0;
int cacheK = 10;
/* state resume code */
char buf[1024];
sprintf(buf, "%s/stat.txt", dir);
_ccv_resume_bbf_cascade_training_state(buf, &i, &k, &bg, pw, nw, posnum, negnum);
if (i > 0)
{
cascade->count = i;
ccfree(cascade->stage_classifier);
cascade->stage_classifier = (ccv_bbf_stage_classifier_t*)ccmalloc(i * sizeof(ccv_bbf_stage_classifier_t));
for (j = 0; j < i; j++)
{
sprintf(buf, "%s/stage-%d.txt", dir, j);
_ccv_read_bbf_stage_classifier(buf, &cascade->stage_classifier[j]);
}
}
if (k > 0)
cacheK = k;
int rpos, rneg = 0;
if (bg)
{
sprintf(buf, "%s/negs.txt", dir);
_ccv_read_background_data(buf, negdata, &rneg, cascade->size);
}
for (; i < params.layer; i++)
{
if (!bg)
{
rneg = _ccv_prepare_background_data(cascade, bgfiles, bgnum, negdata, negnum);
/* save state of background data */
sprintf(buf, "%s/negs.txt", dir);
_ccv_write_background_data(buf, negdata, rneg, cascade->size);
bg = 1;
}
double totalw;
/* save state of cascade : level, weight etc. */
sprintf(buf, "%s/stat.txt", dir);
_ccv_save_bbf_cacade_training_state(buf, i, k, bg, pw, nw, posnum, negnum);
ccv_bbf_stage_classifier_t classifier;
if (k > 0)
{
/* resume state of classifier */
sprintf( buf, "%s/stage-%d.txt", dir, i );
_ccv_read_bbf_stage_classifier(buf, &classifier);
} else {
/* initialize classifier */
for (j = 0; j < posnum; j++)
pw[j] = params.balance_k;
for (j = 0; j < rneg; j++)
nw[j] = inv_balance_k;
classifier.count = k;
classifier.threshold = 0;
classifier.feature = (ccv_bbf_feature_t*)ccmalloc(cacheK * sizeof(ccv_bbf_feature_t));
classifier.alpha = (float*)ccmalloc(cacheK * 2 * sizeof(float));
}
_ccv_prepare_positive_data(posimg, posdata, cascade->size, posnum);
rpos = _ccv_prune_positive_data(cascade, posdata, posnum, cascade->size);
PRINT(CCV_CLI_INFO, "%d postivie data and %d negative data in training\n", rpos, rneg);
/* reweight to 1.00 */
totalw = 0;
for (j = 0; j < rpos; j++)
totalw += pw[j];
for (j = 0; j < rneg; j++)
totalw += nw[j];
for (j = 0; j < rpos; j++)
pw[j] = pw[j] / totalw;
for (j = 0; j < rneg; j++)
nw[j] = nw[j] / totalw;
for (; ; k++)
{
/* get overall true-positive, false-positive rate and threshold */
double tp = 0, fp = 0, etp = 0, efp = 0;
_ccv_bbf_eval_data(&classifier, posdata, rpos, negdata, rneg, cascade->size, peval, neval);
_ccv_sort_32f(peval, rpos, 0);
classifier.threshold = peval[(int)((1. - params.pos_crit) * rpos)] - 1e-6;
for (j = 0; j < rpos; j++)
{
if (peval[j] >= 0)
++tp;
if (peval[j] >= classifier.threshold)
++etp;
}
tp /= rpos; etp /= rpos;
for (j = 0; j < rneg; j++)
{
if (neval[j] >= 0)
++fp;
if (neval[j] >= classifier.threshold)
++efp;
}
fp /= rneg; efp /= rneg;
PRINT(CCV_CLI_INFO, "stage classifier real TP rate : %f, FP rate : %f\n", tp, fp);
PRINT(CCV_CLI_INFO, "stage classifier TP rate : %f, FP rate : %f at threshold : %f\n", etp, efp, classifier.threshold);
if (k > 0)
{
/* save classifier state */
sprintf(buf, "%s/stage-%d.txt", dir, i);
_ccv_write_bbf_stage_classifier(buf, &classifier);
sprintf(buf, "%s/stat.txt", dir);
_ccv_save_bbf_cacade_training_state(buf, i, k, bg, pw, nw, posnum, negnum);
}
if (etp > params.pos_crit && efp < params.neg_crit)
break;
/* TODO: more post-process is needed in here */
/* select the best feature in current distribution through genetic algorithm optimization */
ccv_bbf_feature_t best;
if (params.optimizer == CCV_BBF_GENETIC_OPT)
{
best = _ccv_bbf_genetic_optimize(posdata, rpos, negdata, rneg, params.feature_number, cascade->size, pw, nw);
} else if (params.optimizer == CCV_BBF_FLOAT_OPT) {
best = _ccv_bbf_convex_optimize(posdata, rpos, negdata, rneg, 0, cascade->size, pw, nw);
} else {
best = _ccv_bbf_genetic_optimize(posdata, rpos, negdata, rneg, params.feature_number, cascade->size, pw, nw);
best = _ccv_bbf_convex_optimize(posdata, rpos, negdata, rneg, &best, cascade->size, pw, nw);
}
double err = _ccv_bbf_error_rate(&best, posdata, rpos, negdata, rneg, cascade->size, pw, nw);
double rw = (1 - err) / err;
totalw = 0;
/* reweight */
for (j = 0; j < rpos; j++)
{
unsigned char* u8[] = { posdata[j], posdata[j] + isizs0, posdata[j] + isizs01 };
if (!_ccv_run_bbf_feature(&best, steps, u8))
pw[j] *= rw;
pw[j] *= params.balance_k;
totalw += pw[j];
}
for (j = 0; j < rneg; j++)
{
unsigned char* u8[] = { negdata[j], negdata[j] + isizs0, negdata[j] + isizs01 };
if (_ccv_run_bbf_feature(&best, steps, u8))
nw[j] *= rw;
nw[j] *= inv_balance_k;
totalw += nw[j];
}
for (j = 0; j < rpos; j++)
pw[j] = pw[j] / totalw;
for (j = 0; j < rneg; j++)
nw[j] = nw[j] / totalw;
double c = log(rw);
PRINT(CCV_CLI_INFO, "coefficient of feature %d: %f\n", k + 1, c);
classifier.count = k + 1;
/* resizing classifier */
if (k >= cacheK)
{
ccv_bbf_feature_t* feature = (ccv_bbf_feature_t*)ccmalloc(cacheK * 2 * sizeof(ccv_bbf_feature_t));
memcpy(feature, classifier.feature, cacheK * sizeof(ccv_bbf_feature_t));
ccfree(classifier.feature);
float* alpha = (float*)ccmalloc(cacheK * 4 * sizeof(float));
memcpy(alpha, classifier.alpha, cacheK * 2 * sizeof(float));
ccfree(classifier.alpha);
classifier.feature = feature;
classifier.alpha = alpha;
cacheK *= 2;
}
/* setup new feature */
classifier.feature[k] = best;
classifier.alpha[k * 2] = -c;
classifier.alpha[k * 2 + 1] = c;
}
cascade->count = i + 1;
ccv_bbf_stage_classifier_t* stage_classifier = (ccv_bbf_stage_classifier_t*)ccmalloc(cascade->count * sizeof(ccv_bbf_stage_classifier_t));
memcpy(stage_classifier, cascade->stage_classifier, i * sizeof(ccv_bbf_stage_classifier_t));
ccfree(cascade->stage_classifier);
stage_classifier[i] = classifier;
cascade->stage_classifier = stage_classifier;
k = 0;
bg = 0;
for (j = 0; j < rpos; j++)
ccfree(posdata[j]);
for (j = 0; j < rneg; j++)
ccfree(negdata[j]);
}
ccfree(neval);
ccfree(peval);
ccfree(nw);
ccfree(pw);
ccfree(negdata);
ccfree(posdata);
ccfree(cascade);
}
#else
void ccv_bbf_classifier_cascade_new(ccv_dense_matrix_t** posimg, int posnum, char** bgfiles, int bgnum, int negnum, ccv_size_t size, const char* dir, ccv_bbf_new_param_t params)
{
fprintf(stderr, " ccv_bbf_classifier_cascade_new requires libgsl support, please compile ccv with libgsl.\n");
}
#endif
static int _ccv_is_equal(const void* _r1, const void* _r2, void* data)
{
const ccv_comp_t* r1 = (const ccv_comp_t*)_r1;
const ccv_comp_t* r2 = (const ccv_comp_t*)_r2;
int distance = (int)(r1->rect.width * 0.25 + 0.5);
return r2->rect.x <= r1->rect.x + distance &&
r2->rect.x >= r1->rect.x - distance &&
r2->rect.y <= r1->rect.y + distance &&
r2->rect.y >= r1->rect.y - distance &&
r2->rect.width <= (int)(r1->rect.width * 1.5 + 0.5) &&
(int)(r2->rect.width * 1.5 + 0.5) >= r1->rect.width;
}
static int _ccv_is_equal_same_class(const void* _r1, const void* _r2, void* data)
{
const ccv_comp_t* r1 = (const ccv_comp_t*)_r1;
const ccv_comp_t* r2 = (const ccv_comp_t*)_r2;
int distance = (int)(r1->rect.width * 0.25 + 0.5);
return r2->classification.id == r1->classification.id &&
r2->rect.x <= r1->rect.x + distance &&
r2->rect.x >= r1->rect.x - distance &&
r2->rect.y <= r1->rect.y + distance &&
r2->rect.y >= r1->rect.y - distance &&
r2->rect.width <= (int)(r1->rect.width * 1.5 + 0.5) &&
(int)(r2->rect.width * 1.5 + 0.5) >= r1->rect.width;
}
ccv_array_t* ccv_bbf_detect_objects(ccv_dense_matrix_t* a, ccv_bbf_classifier_cascade_t** _cascade, int count, ccv_bbf_param_t params)
{
int hr = a->rows / params.size.height;
int wr = a->cols / params.size.width;
double scale = pow(2., 1. / (params.interval + 1.));
int next = params.interval + 1;
int scale_upto = (int)(log((double)ccv_min(hr, wr)) / log(scale));
ccv_dense_matrix_t** pyr = (ccv_dense_matrix_t**)alloca((scale_upto + next * 2) * 4 * sizeof(ccv_dense_matrix_t*));
memset(pyr, 0, (scale_upto + next * 2) * 4 * sizeof(ccv_dense_matrix_t*));
if (params.size.height != _cascade[0]->size.height || params.size.width != _cascade[0]->size.width)
ccv_resample(a, &pyr[0], 0, a->rows * _cascade[0]->size.height / params.size.height, a->cols * _cascade[0]->size.width / params.size.width, CCV_INTER_AREA);
else
pyr[0] = a;
int i, j, k, t, x, y, q;
for (i = 1; i < ccv_min(params.interval + 1, scale_upto + next * 2); i++)
ccv_resample(pyr[0], &pyr[i * 4], 0, (int)(pyr[0]->rows / pow(scale, i)), (int)(pyr[0]->cols / pow(scale, i)), CCV_INTER_AREA);
for (i = next; i < scale_upto + next * 2; i++)
ccv_sample_down(pyr[i * 4 - next * 4], &pyr[i * 4], 0, 0, 0);
if (params.accurate)
for (i = next * 2; i < scale_upto + next * 2; i++)
{
ccv_sample_down(pyr[i * 4 - next * 4], &pyr[i * 4 + 1], 0, 1, 0);
ccv_sample_down(pyr[i * 4 - next * 4], &pyr[i * 4 + 2], 0, 0, 1);
ccv_sample_down(pyr[i * 4 - next * 4], &pyr[i * 4 + 3], 0, 1, 1);
}
ccv_array_t* idx_seq;
ccv_array_t* seq = ccv_array_new(sizeof(ccv_comp_t), 64, 0);
ccv_array_t* seq2 = ccv_array_new(sizeof(ccv_comp_t), 64, 0);
ccv_array_t* result_seq = ccv_array_new(sizeof(ccv_comp_t), 64, 0);
/* detect in multi scale */
for (t = 0; t < count; t++)
{
ccv_bbf_classifier_cascade_t* cascade = _cascade[t];
float scale_x = (float) params.size.width / (float) cascade->size.width;
float scale_y = (float) params.size.height / (float) cascade->size.height;
ccv_array_clear(seq);
for (i = 0; i < scale_upto; i++)
{
int dx[] = {0, 1, 0, 1};
int dy[] = {0, 0, 1, 1};
int i_rows = pyr[i * 4 + next * 8]->rows - (cascade->size.height >> 2);
int steps[] = { pyr[i * 4]->step, pyr[i * 4 + next * 4]->step, pyr[i * 4 + next * 8]->step };
int i_cols = pyr[i * 4 + next * 8]->cols - (cascade->size.width >> 2);
int paddings[] = { pyr[i * 4]->step * 4 - i_cols * 4,
pyr[i * 4 + next * 4]->step * 2 - i_cols * 2,
pyr[i * 4 + next * 8]->step - i_cols };
for (q = 0; q < (params.accurate ? 4 : 1); q++)
{
unsigned char* u8[] = { pyr[i * 4]->data.u8 + dx[q] * 2 + dy[q] * pyr[i * 4]->step * 2, pyr[i * 4 + next * 4]->data.u8 + dx[q] + dy[q] * pyr[i * 4 + next * 4]->step, pyr[i * 4 + next * 8 + q]->data.u8 };
for (y = 0; y < i_rows; y++)
{
for (x = 0; x < i_cols; x++)
{
float sum;
int flag = 1;
ccv_bbf_stage_classifier_t* classifier = cascade->stage_classifier;
for (j = 0; j < cascade->count; ++j, ++classifier)
{
sum = 0;
float* alpha = classifier->alpha;
ccv_bbf_feature_t* feature = classifier->feature;
for (k = 0; k < classifier->count; ++k, alpha += 2, ++feature)
sum += alpha[_ccv_run_bbf_feature(feature, steps, u8)];
if (sum < classifier->threshold)
{
flag = 0;
break;
}
}
if (flag)
{
ccv_comp_t comp;
comp.rect = ccv_rect((int)((x * 4 + dx[q] * 2) * scale_x + 0.5), (int)((y * 4 + dy[q] * 2) * scale_y + 0.5), (int)(cascade->size.width * scale_x + 0.5), (int)(cascade->size.height * scale_y + 0.5));
comp.neighbors = 1;
comp.classification.id = t;
comp.classification.confidence = sum;
ccv_array_push(seq, &comp);
}
u8[0] += 4;
u8[1] += 2;
u8[2] += 1;
}
u8[0] += paddings[0];
u8[1] += paddings[1];
u8[2] += paddings[2];
}
}
scale_x *= scale;
scale_y *= scale;
}
/* the following code from OpenCV's haar feature implementation */
if(params.min_neighbors == 0)
{
for (i = 0; i < seq->rnum; i++)
{
ccv_comp_t* comp = (ccv_comp_t*)ccv_array_get(seq, i);
ccv_array_push(result_seq, comp);
}
} else {
idx_seq = 0;
ccv_array_clear(seq2);
// group retrieved rectangles in order to filter out noise
int ncomp = ccv_array_group(seq, &idx_seq, _ccv_is_equal_same_class, 0);
ccv_comp_t* comps = (ccv_comp_t*)ccmalloc((ncomp + 1) * sizeof(ccv_comp_t));
memset(comps, 0, (ncomp + 1) * sizeof(ccv_comp_t));
// count number of neighbors
for(i = 0; i < seq->rnum; i++)
{
ccv_comp_t r1 = *(ccv_comp_t*)ccv_array_get(seq, i);
int idx = *(int*)ccv_array_get(idx_seq, i);
if (comps[idx].neighbors == 0)
comps[idx].classification.confidence = r1.classification.confidence;
++comps[idx].neighbors;
comps[idx].rect.x += r1.rect.x;
comps[idx].rect.y += r1.rect.y;
comps[idx].rect.width += r1.rect.width;
comps[idx].rect.height += r1.rect.height;
comps[idx].classification.id = r1.classification.id;
comps[idx].classification.confidence = ccv_max(comps[idx].classification.confidence, r1.classification.confidence);
}
// calculate average bounding box
for(i = 0; i < ncomp; i++)
{
int n = comps[i].neighbors;
if(n >= params.min_neighbors)
{
ccv_comp_t comp;
comp.rect.x = (comps[i].rect.x * 2 + n) / (2 * n);
comp.rect.y = (comps[i].rect.y * 2 + n) / (2 * n);
comp.rect.width = (comps[i].rect.width * 2 + n) / (2 * n);
comp.rect.height = (comps[i].rect.height * 2 + n) / (2 * n);
comp.neighbors = comps[i].neighbors;
comp.classification.id = comps[i].classification.id;
comp.classification.confidence = comps[i].classification.confidence;
ccv_array_push(seq2, &comp);
}
}
// filter out small face rectangles inside large face rectangles
for(i = 0; i < seq2->rnum; i++)
{
ccv_comp_t r1 = *(ccv_comp_t*)ccv_array_get(seq2, i);
int flag = 1;
for(j = 0; j < seq2->rnum; j++)
{
ccv_comp_t r2 = *(ccv_comp_t*)ccv_array_get(seq2, j);
int distance = (int)(r2.rect.width * 0.25 + 0.5);
if(i != j &&
r1.classification.id == r2.classification.id &&
r1.rect.x >= r2.rect.x - distance &&
r1.rect.y >= r2.rect.y - distance &&
r1.rect.x + r1.rect.width <= r2.rect.x + r2.rect.width + distance &&
r1.rect.y + r1.rect.height <= r2.rect.y + r2.rect.height + distance &&
(r2.neighbors > ccv_max(3, r1.neighbors) || r1.neighbors < 3))
{
flag = 0;
break;
}
}
if(flag)
ccv_array_push(result_seq, &r1);
}
ccv_array_free(idx_seq);
ccfree(comps);
}
}
ccv_array_free(seq);
ccv_array_free(seq2);
ccv_array_t* result_seq2;
/* the following code from OpenCV's haar feature implementation */
if (params.flags & CCV_BBF_NO_NESTED)
{
result_seq2 = ccv_array_new(sizeof(ccv_comp_t), 64, 0);
idx_seq = 0;
// group retrieved rectangles in order to filter out noise
int ncomp = ccv_array_group(result_seq, &idx_seq, _ccv_is_equal, 0);
ccv_comp_t* comps = (ccv_comp_t*)ccmalloc((ncomp + 1) * sizeof(ccv_comp_t));
memset(comps, 0, (ncomp + 1) * sizeof(ccv_comp_t));
// count number of neighbors
for(i = 0; i < result_seq->rnum; i++)
{
ccv_comp_t r1 = *(ccv_comp_t*)ccv_array_get(result_seq, i);
int idx = *(int*)ccv_array_get(idx_seq, i);
if (comps[idx].neighbors == 0 || comps[idx].classification.confidence < r1.classification.confidence)
{
comps[idx].classification.confidence = r1.classification.confidence;
comps[idx].neighbors = 1;
comps[idx].rect = r1.rect;
comps[idx].classification.id = r1.classification.id;
}
}
// calculate average bounding box
for(i = 0; i < ncomp; i++)
if(comps[i].neighbors)
ccv_array_push(result_seq2, &comps[i]);
ccv_array_free(result_seq);
ccfree(comps);
} else {
result_seq2 = result_seq;
}
for (i = 1; i < scale_upto + next * 2; i++)
ccv_matrix_free(pyr[i * 4]);
if (params.accurate)
for (i = next * 2; i < scale_upto + next * 2; i++)
{
ccv_matrix_free(pyr[i * 4 + 1]);
ccv_matrix_free(pyr[i * 4 + 2]);
ccv_matrix_free(pyr[i * 4 + 3]);
}
if (params.size.height != _cascade[0]->size.height || params.size.width != _cascade[0]->size.width)
ccv_matrix_free(pyr[0]);
return result_seq2;
}
ccv_bbf_classifier_cascade_t* ccv_bbf_read_classifier_cascade(const char* directory)
{
char buf[1024];
sprintf(buf, "%s/cascade.txt", directory);
int s, i;
FILE* r = fopen(buf, "r");
if (r == 0)
return 0;
ccv_bbf_classifier_cascade_t* cascade = (ccv_bbf_classifier_cascade_t*)ccmalloc(sizeof(ccv_bbf_classifier_cascade_t));
s = fscanf(r, "%d %d %d", &cascade->count, &cascade->size.width, &cascade->size.height);
assert(s > 0);
cascade->stage_classifier = (ccv_bbf_stage_classifier_t*)ccmalloc(cascade->count * sizeof(ccv_bbf_stage_classifier_t));
for (i = 0; i < cascade->count; i++)
{
sprintf(buf, "%s/stage-%d.txt", directory, i);
if (_ccv_read_bbf_stage_classifier(buf, &cascade->stage_classifier[i]) < 0)
{
cascade->count = i;
break;
}
}
fclose(r);
return cascade;
}
ccv_bbf_classifier_cascade_t* ccv_bbf_classifier_cascade_read_binary(char* s)
{
int i;
ccv_bbf_classifier_cascade_t* cascade = (ccv_bbf_classifier_cascade_t*)ccmalloc(sizeof(ccv_bbf_classifier_cascade_t));
memcpy(&cascade->count, s, sizeof(cascade->count)); s += sizeof(cascade->count);
memcpy(&cascade->size.width, s, sizeof(cascade->size.width)); s += sizeof(cascade->size.width);
memcpy(&cascade->size.height, s, sizeof(cascade->size.height)); s += sizeof(cascade->size.height);
ccv_bbf_stage_classifier_t* classifier = cascade->stage_classifier = (ccv_bbf_stage_classifier_t*)ccmalloc(cascade->count * sizeof(ccv_bbf_stage_classifier_t));
for (i = 0; i < cascade->count; i++, classifier++)
{
memcpy(&classifier->count, s, sizeof(classifier->count)); s += sizeof(classifier->count);
memcpy(&classifier->threshold, s, sizeof(classifier->threshold)); s += sizeof(classifier->threshold);
classifier->feature = (ccv_bbf_feature_t*)ccmalloc(classifier->count * sizeof(ccv_bbf_feature_t));
classifier->alpha = (float*)ccmalloc(classifier->count * 2 * sizeof(float));
memcpy(classifier->feature, s, classifier->count * sizeof(ccv_bbf_feature_t)); s += classifier->count * sizeof(ccv_bbf_feature_t);
memcpy(classifier->alpha, s, classifier->count * 2 * sizeof(float)); s += classifier->count * 2 * sizeof(float);
}
return cascade;
}
int ccv_bbf_classifier_cascade_write_binary(ccv_bbf_classifier_cascade_t* cascade, char* s, int slen)
{
int i;
int len = sizeof(cascade->count) + sizeof(cascade->size.width) + sizeof(cascade->size.height);
ccv_bbf_stage_classifier_t* classifier = cascade->stage_classifier;
for (i = 0; i < cascade->count; i++, classifier++)
len += sizeof(classifier->count) + sizeof(classifier->threshold) + classifier->count * sizeof(ccv_bbf_feature_t) + classifier->count * 2 * sizeof(float);
if (slen >= len)
{
memcpy(s, &cascade->count, sizeof(cascade->count)); s += sizeof(cascade->count);
memcpy(s, &cascade->size.width, sizeof(cascade->size.width)); s += sizeof(cascade->size.width);
memcpy(s, &cascade->size.height, sizeof(cascade->size.height)); s += sizeof(cascade->size.height);
classifier = cascade->stage_classifier;
for (i = 0; i < cascade->count; i++, classifier++)
{
memcpy(s, &classifier->count, sizeof(classifier->count)); s += sizeof(classifier->count);
memcpy(s, &classifier->threshold, sizeof(classifier->threshold)); s += sizeof(classifier->threshold);
memcpy(s, classifier->feature, classifier->count * sizeof(ccv_bbf_feature_t)); s += classifier->count * sizeof(ccv_bbf_feature_t);
memcpy(s, classifier->alpha, classifier->count * 2 * sizeof(float)); s += classifier->count * 2 * sizeof(float);
}
}
return len;
}
void ccv_bbf_classifier_cascade_free(ccv_bbf_classifier_cascade_t* cascade)
{
int i;
for (i = 0; i < cascade->count; ++i)
{
ccfree(cascade->stage_classifier[i].feature);
ccfree(cascade->stage_classifier[i].alpha);
}
ccfree(cascade->stage_classifier);
ccfree(cascade);
}
|
sse-blk-dag1.h | #include <util/omp_wrapper.h>
extern "C"
void wilson_dslash_blk_dag1(IFloat *chi_p_f,
IFloat *u_p_f,
IFloat *psi_p_f,
int cb,
Wilson *wilson_p)
{
int tt;
int cbn;
Float *chi_p = (Float *) chi_p_f;
Float *u_p = (Float *) u_p_f;
Float *psi_p = (Float *) psi_p_f;
#if defined(_OPENMP) && defined(__linux__)
static int init = 0;
// if (init == 0) { cpubind(); init = 1; }
#endif
const int lx = wilson_p->ptr[0];
const int ly = wilson_p->ptr[1];
const int lz = wilson_p->ptr[2];
const int lt = wilson_p->ptr[3];
const int vol = wilson_p->vol[0];
const int x_loc = GJP.Xnodes() == 1 ? 1 : 0;
const int x_nloc = !x_loc;
const int y_nloc = GJP.Ynodes() == 1 ? 0 : 1;
const int z_nloc = GJP.Znodes() == 1 ? 0 : 1;
const int t_nloc = GJP.Tnodes() == 1 ? 0 : 1;
const SSE_C_FLOAT* const recv_buf1 = (SSE_C_FLOAT*)wilson_p->recv_buf[0];
const SSE_C_FLOAT* const recv_buf2 = (SSE_C_FLOAT*)wilson_p->recv_buf[1];
const SSE_C_FLOAT* const recv_buf3 = (SSE_C_FLOAT*)wilson_p->recv_buf[2];
const SSE_C_FLOAT* const recv_buf4 = (SSE_C_FLOAT*)wilson_p->recv_buf[3];
const SSE_C_FLOAT* const recv_buf5 = (SSE_C_FLOAT*)wilson_p->recv_buf[4];
const SSE_C_FLOAT* const recv_buf6 = (SSE_C_FLOAT*)wilson_p->recv_buf[5];
const SSE_C_FLOAT* const recv_buf7 = (SSE_C_FLOAT*)wilson_p->recv_buf[6];
const SSE_C_FLOAT* const recv_buf8 = (SSE_C_FLOAT*)wilson_p->recv_buf[7];
//for(int itr=0;itr<NITR;++itr){
//printf("%d x %d x %d x %d = %d\n",lx,ly,lz,lt,vol);
if(cb == 0) cbn = 1;
else cbn = 0;
#ifdef _OPENMP
#pragma omp parallel for schedule(static,1)
#endif
for(tt = 0; tt < lt; tt++){
int x, y, z, t, s;
int i;
int by, bz;
int bt1, by1, bz1;
int xp, yp, zp, tp;
int xm, ym, zm, tm;
int xyzt, _xyzt, __xyzt;
int xpyzt, xypzt, xyzpt, xyztp;
int xmyzt, xymzt, xyzmt, xyztm;
int _xpyzt, _xypzt, _xyzpt, _xyztp;
int _xmyzt, _xymzt, _xyzmt, _xyztm;
Float __RESTRICT *u;
Float __RESTRICT *chi;
Float __RESTRICT *psi;
//printf("tt=%d\n",tt);
if (omp_get_num_threads() == 4) {
if (tt & 2)
t = (lt >> 1) + ((tt >> 2) << 1) + (tt & 1);
else
t = (lt >> 1) - ((tt >> 2) << 1) - (tt & 1) - 1;
} else
t = (tt + 1) % lt;
tp = (t + 1) % lt;
tm = t - 1 + ((lt - t) / lt) * lt;
for (bz = 0; bz < lz; bz += Z_BLOCK) {
bz1 = bz + Z_BLOCK;
if (bz1 >= lz) bz1 = lz;
for (by = 0; by < ly; by += Y_BLOCK) {
by1 = by + Y_BLOCK;
if (by1 >= ly) by1 = ly;
for(z = bz; z < bz1; z++){
zp = (z + 1) % lz;
zm = z - 1 + ((lz - z) / lz) * lz;
_xyzt = (lx >> 1) * (y + ly * (z + lz * t));
_xpyzt = (lx >> 1) * (y + ly * (z + lz * t));
_xmyzt = (lx >> 1) * (y + ly * (z + lz * t));
_xypzt = (lx >> 1) * (yp + ly * (z + lz * t));
_xymzt = (lx >> 1) * (ym + ly * (z + lz * t));
_xyzpt = (lx >> 1) * (y + ly * (zp + lz * t));
_xyzmt = (lx >> 1) * (y + ly * (zm + lz * t));
_xyztp = (lx >> 1) * (y + ly * (z + lz * tp));
_xyztm = (lx >> 1) * (y + ly * (z + lz * tm));
for(y = by; y < by1; y++){
int x_check, yzt_edge;
yp = (y + 1) % ly;
ym = y - 1 + ((ly - y) / ly ) * ly;
_xyzt = (lx >> 1) * (y + ly * (z + lz * t));
_xpyzt = (lx >> 1) * (y + ly * (z + lz * t));
_xmyzt = (lx >> 1) * (y + ly * (z + lz * t));
_xypzt = (lx >> 1) * (yp + ly * (z + lz * t));
_xymzt = (lx >> 1) * (ym + ly * (z + lz * t));
_xyzpt = (lx >> 1) * (y + ly * (zp + lz * t));
_xyzmt = (lx >> 1) * (y + ly * (zm + lz * t));
_xyztp = (lx >> 1) * (y + ly * (z + lz * tp));
_xyztm = (lx >> 1) * (y + ly * (z + lz * tm));
if ( ((z == 0 || z == lz - 1)&& z_nloc) ||
((t == 0 || t == lt - 1)&& t_nloc) ||
((y == 0 || y == ly - 1)&& y_nloc) ) yzt_edge = 0;
else yzt_edge = -1;
if ((y + z + t + cbn) & 1) x_check = lx - 1;
else x_check = 0;
for (x = cbn ^ ((y + z + t) & 1); x < lx; x += 2)
{
//::printf("%d %d %d %d\n",x,y,z,t);
const int x_edge = x==x_check ? x_loc : 1;
// DECLARE;
#ifdef ADD2REG
// __m128d __RESTRICT wxp[6],wyp[6], wzp[6], wtp[6];
// __m128d __RESTRICT wxm[6],wym[6], wzm[6], wtm[6];
register __m128d t00, t01, t02, t03, t04, t05;
register __m128d t06, t07, t08, t09, t10, t11;
#else
M128D __RESTRICT wxp[6];
M128D __RESTRICT wyp[6];
M128D __RESTRICT wzp[6];
M128D __RESTRICT wtp[6];
M128D __RESTRICT wxm[6];
M128D __RESTRICT wym[6];
M128D __RESTRICT wzm[6];
M128D __RESTRICT wtm[6];
#endif
#ifndef USE_HERN
register M128D _a, _b, _c, _d;
#endif
xyzt = (x >> 1) + _xyzt;
xp = (x + 1) & ((x + 1 - lx) >> 31);
xm = x - 1 + (((x - 1) >> 31) & lx);
xpyzt = (xp >> 1) + _xpyzt;
xmyzt = (xm >> 1) + _xmyzt;
xypzt = (x >> 1) + _xypzt;
xymzt = (x >> 1) + _xymzt;
xyzpt = (x >> 1) + _xyzpt;
xyzmt = (x >> 1) + _xyzmt;
xyztp = (x >> 1) + _xyztp;
xyztm = (x >> 1) + _xyztm;
#ifndef DEBUG_NOEDGE
if ((yzt_edge & x_edge ) == 0) {
ZERO;
u = u_p + GAUGE_SIZE * (xyzt + vol * cbn);
psi = psi_p + SPINOR_SIZE * xpyzt;
if(x_nloc&&(x == lx - 1)){
const size_t shft = (SPINOR_SIZE/2)* ((y+ly*(z+lz*t))/2);
N_KERN_XP_EDG( recv_buf1+shft, recv_buf1+shft+6 );
}else {N_KERN_XP;}
PREFETCH_U0;
PREFETCH_PSI;
u = u_p + GAUGE_SIZE * (xyzt + vol * cbn);
psi = psi_p + SPINOR_SIZE * xypzt;
if( y_nloc && (y == ly-1) ) {
const size_t shft = (SPINOR_SIZE/2)* ((x+lx*(z+lz*t))/2);
#if 0
{//debug
int dag=1;
int gx=GJP.XnodeCoor()*lx+x;
int gy=GJP.YnodeCoor()*ly+y;
int gz=GJP.ZnodeCoor()*lz+z;
int gt=GJP.TnodeCoor()*lt+t;
if( gx==0 && gy==3 && gz==6 && gt==0){
printf("debug4 %d %d (%d %d %d %d) a:",
cb,dag, gx,gy,gz,gt );
for(int i=0;i<6;++i)
printf("%e ", *(i+recv_bf2+shft));
printf(" | b: ");
for(int i=0;i<6;++i)
printf("%e ", *(i+recv_bf2+6+shft));
}
}//debug
#endif
N_KERN_YP_EDG( recv_buf2+shft, recv_buf2+shft+6 );
#if 0
{//debug
int dag=1;
int gx=GJP.XnodeCoor()*lx+x;
int gy=GJP.YnodeCoor()*ly+y;
int gz=GJP.ZnodeCoor()*lz+z;
int gt=GJP.TnodeCoor()*lt+t;
if( gx==0 && gy==3 && gz==6 && gt==0){
printf("debug4 %d %d (%d %d %d %d) wyp ",
cb,dag, gx,gy,gz,gt );
for(int i=0;i<24;++i)
printf("%e ", *((double*)wyp+i));
}
}//debug
#endif
}else {N_KERN_YP;}
PREFETCH_U1;
PREFETCH_PSI;
u = u_p + GAUGE_SIZE * (xyzt + vol * cbn);
psi = psi_p + SPINOR_SIZE * xyzpt;
if( z_nloc && (z == lz-1) ) {
const size_t shft = (SPINOR_SIZE/2)* ((x+lx*(y+ly*t))/2);
N_KERN_ZP_EDG( recv_buf3+shft, recv_buf3+shft+6 );
}else {N_KERN_ZP;}
PREFETCH_U2;
PREFETCH_PSI;
u = u_p + GAUGE_SIZE * (xyzt + vol * cbn);
psi = psi_p + SPINOR_SIZE * xyztp;
if(t_nloc && (t == lt - 1)){
const size_t shft = (SPINOR_SIZE/2)* ((x+lx*(y+ly*z))/2);
N_KERN_TP_EDG( recv_buf4+shft, recv_buf4+shft+6 );
}else {N_KERN_TP;}
PREFETCH_U3;
PREFETCH_PSI;
u = u_p + GAUGE_SIZE * (xmyzt + vol * cb);
psi = psi_p + SPINOR_SIZE * xmyzt;
if(x == 0 && x_nloc) {
const size_t shft = (SPINOR_SIZE/2)* ((y+ly*(z+lz*t))/2);
#if 1
LOAD(wxm,recv_buf5);
#else
wxm[0] = _mm_load_pd( recv_buf5+shft );
wxm[1] = _mm_load_pd( recv_buf5+shft + 2);
wxm[2] = _mm_load_pd( recv_buf5+shft + 4);
wxm[3] = _mm_load_pd( recv_buf5+shft + 6);
wxm[4] = _mm_load_pd( recv_buf5+shft + 8);
wxm[5] = _mm_load_pd( recv_buf5+shft + 10);
#endif
} else {N_KERN_XM;}
PREFETCH_U0;
PREFETCH_PSI;
u = u_p + GAUGE_SIZE * (xymzt + vol * cb);
psi = psi_p + SPINOR_SIZE * xymzt;
if( y_nloc && y==0) {
const size_t shft = (SPINOR_SIZE/2)* ((x+lx*(z+lz*t))/2);
#if 1
LOAD(wym,recv_buf6);
#else
wym[0] = _mm_load_pd( recv_buf6+shft );
wym[1] = _mm_load_pd( recv_buf6+shft + 2);
wym[2] = _mm_load_pd( recv_buf6+shft + 4);
wym[3] = _mm_load_pd( recv_buf6+shft + 6);
wym[4] = _mm_load_pd( recv_buf6+shft + 8);
wym[5] = _mm_load_pd( recv_buf6+shft + 10);
#endif
}
else {N_KERN_YM;}
PREFETCH_U1;
PREFETCH_PSI;
u = u_p + GAUGE_SIZE * (xyzmt + vol * cb);
psi = psi_p + SPINOR_SIZE * xyzmt;
if ( z_nloc && z == 0 ) {
const size_t shft = (SPINOR_SIZE/2)* ((x+lx*(y+ly*t))/2);
#if 1
LOAD(wzm,recv_buf7);
#else
wzm[0] = _mm_load_pd( recv_buf7+shft );
wzm[1] = _mm_load_pd( recv_buf7+shft + 2);
wzm[2] = _mm_load_pd( recv_buf7+shft + 4);
wzm[3] = _mm_load_pd( recv_buf7+shft + 6);
wzm[4] = _mm_load_pd( recv_buf7+shft + 8);
wzm[5] = _mm_load_pd( recv_buf7+shft + 10);
#endif
} else {N_KERN_ZM;}
PREFETCH_U2;
PREFETCH_PSI;
u = u_p + GAUGE_SIZE * (xyztm + vol * cb);
psi = psi_p + SPINOR_SIZE * xyztm;
if ( t_nloc && t == 0 ) {
const size_t shft = (SPINOR_SIZE/2)* ((x+lx*(y+ly*z))/2);
#if 1
LOAD(wtm,recv_buf8);
#else
wtm[0] = _mm_load_pd( recv_buf8+shft );
wtm[1] = _mm_load_pd( recv_buf8+shft + 2);
wtm[2] = _mm_load_pd( recv_buf8+shft + 4);
wtm[3] = _mm_load_pd( recv_buf8+shft + 6);
wtm[4] = _mm_load_pd( recv_buf8+shft + 8);
wtm[5] = _mm_load_pd( recv_buf8+shft + 10);
#endif
} else {N_KERN_TM;}
PREFETCH_U3;
PREFETCH_PSI;
chi = chi_p + SPINOR_SIZE * xyzt;
#if 0
{//deb
int gx=GJP.XnodeCoor()*lx+x;
int gy=GJP.YnodeCoor()*ly+y;
int gz=GJP.ZnodeCoor()*lz+z;
int gt=GJP.TnodeCoor()*lt+t;
if(gx==4&&gy==0&&gz==0&>==1)
{
::printf("edge (%d %d %d %d) %4.3e %4.3e\n",
gx,gy,gz,gt,
*(0+(double*)&(wxm[0])),
*(1+(double*)&(wxm[0])));
}
}
#endif
N_STORE_XP_noadd;
N_STORE_YP;
N_STORE_ZP;
N_STORE_TP;
N_STORE_XM;
N_STORE_YM;
N_STORE_ZM;
N_STORE_TM;
PREFETCH_CHI;
} else
#endif //DEBUG_NOEDGE
{// if (yzt_edge & (x - x_check) == 0)
ZERO;
#ifdef POS_DIR_ON
#ifdef X_DIR_ON
u = u_p + GAUGE_SIZE * (xyzt + vol * cbn);
psi = psi_p + SPINOR_SIZE *xpyzt;
N_KERN_XP;
PREFETCH_U0;
PREFETCH_PSI;
#endif
#ifdef Y_DIR_ON
u = u_p + GAUGE_SIZE * (xyzt + vol * cbn);
psi = psi_p +SPINOR_SIZE * xypzt;
N_KERN_YP;
PREFETCH_U1;
PREFETCH_PSI;
#endif
#ifdef Z_DIR_ON
u = u_p + GAUGE_SIZE * (xyzt + vol * cbn);
psi = psi_p +SPINOR_SIZE * xyzpt;
N_KERN_ZP;
PREFETCH_U2;
PREFETCH_PSI;
#endif
#ifdef T_DIR_ON
u = u_p + GAUGE_SIZE * (xyzt + vol * cbn);
psi = psi_p +SPINOR_SIZE * xyztp;
N_KERN_TP;
PREFETCH_U3;
PREFETCH_PSI;
#endif
#endif //POS_DIR_ON
#ifdef NEG_DIR_ON
#ifdef X_DIR_ON
u = u_p + GAUGE_SIZE * (xmyzt + vol * cb);
psi = psi_p +SPINOR_SIZE * xmyzt;
N_KERN_XM;
PREFETCH_U0;
PREFETCH_PSI;
#endif
#ifdef Y_DIR_ON
u = u_p + GAUGE_SIZE * (xymzt + vol * cb);
psi = psi_p +SPINOR_SIZE * xymzt;
N_KERN_YM;
PREFETCH_U1;
PREFETCH_PSI;
#endif
#ifdef Z_DIR_ON
u = u_p + GAUGE_SIZE * (xyzmt + vol * cb);
psi = psi_p +SPINOR_SIZE * xyzmt;
N_KERN_ZM;
PREFETCH_U2;
PREFETCH_PSI;
#endif
#ifdef T_DIR_ON
u = u_p + GAUGE_SIZE * (xyztm + vol * cb);
psi = psi_p +SPINOR_SIZE * xyztm;
N_KERN_TM;
PREFETCH_U3;
PREFETCH_PSI;
#endif
#endif //NEG_DIR_ON
chi = chi_p + SPINOR_SIZE * xyzt;
N_STORE_XP_noadd;
N_STORE_YP;
N_STORE_ZP;
N_STORE_TP;
N_STORE_XM;
N_STORE_YM;
N_STORE_ZM;
N_STORE_TM;
//STORE(chi);
PREFETCH_CHI;
}// else of if (yzt_edge & (x - x_check) == 0)
}// for (x = cbn ^ ((y + z + t) & 1); x < lx; x += 2)
}
}
}
}
}
#ifdef DEBUG_PRINT_DAG1
int dag=1;
int mpisize=NumNodes();
int mpirank=UniqueID();
// MPI_Comm_rank(MPI_COMM_WORLD, &mpirank);
// MPI_Comm_size(MPI_COMM_WORLD, &mpisize);
for(int irank=0; irank< mpisize;++irank)
{
int cbn=!cb;
if(mpirank == irank)
{
int idx=0;
for(int t=0; t<lt; t++){
for(int z=0; z<lz; z++){
for(int y=0; y<ly; y++){
for(int x=0; x<lx; x++){
int parity = x+y+z+t;
parity = parity % 2;
if(parity == cbn){
::printf("debug3 %d %d (%d %d %d %d) ",
cb,dag,
GJP.XnodeCoor()*lx+x,
GJP.YnodeCoor()*ly+y,
GJP.ZnodeCoor()*lz+z,
GJP.TnodeCoor()*lt+t);
for(int i=0;i<24;++i)
//::printf(" %4.3e", *(chi_p+idx+i));
::printf(" %e", *(chi_p+idx+i));
::printf("\n");
idx +=24;
}
}
}
}
}
}
//MPI_Barrier( MPI_COMM_WORLD);
cps::sync();
}
//exit(1);
#endif
}
|
GB_unop__identity_fc32_uint8.c | //------------------------------------------------------------------------------
// GB_unop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2020, All Rights Reserved.
// http://suitesparse.com See GraphBLAS/Doc/License.txt for license.
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_unop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB_unop_apply__identity_fc32_uint8
// op(A') function: GB_unop_tran__identity_fc32_uint8
// C type: GxB_FC32_t
// A type: uint8_t
// cast: GxB_FC32_t cij = GxB_CMPLXF ((float) (aij), 0)
// unaryop: cij = aij
#define GB_ATYPE \
uint8_t
#define GB_CTYPE \
GxB_FC32_t
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
uint8_t aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = x ;
// casting
#define GB_CAST(z, aij) \
GxB_FC32_t z = GxB_CMPLXF ((float) (aij), 0) ;
// cij = op (aij)
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
uint8_t aij = Ax [pA] ; \
/* Cx [pC] = op (cast (aij)) */ \
GxB_FC32_t z = GxB_CMPLXF ((float) (aij), 0) ; \
Cx [pC] = z ; \
}
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_IDENTITY || GxB_NO_FC32 || GxB_NO_UINT8)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_unop_apply__identity_fc32_uint8
(
GxB_FC32_t *Cx, // Cx and Ax may be aliased
const uint8_t *Ax,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
uint8_t aij = Ax [p] ;
GxB_FC32_t z = GxB_CMPLXF ((float) (aij), 0) ;
Cx [p] = z ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_unop_tran__identity_fc32_uint8
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t *GB_RESTRICT *Rowcounts,
GBI_single_iterator Iter,
const int64_t *GB_RESTRICT A_slice,
int naslice
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#define GB_PHASE_2_OF_2
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
GB_binop__second_uint32.c | //------------------------------------------------------------------------------
// GB_binop: hard-coded functions for each built-in binary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated2/ folder, do not edit it
// (it is auto-generated from Generator/*).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_emult.h"
#include "GB_control.h"
#include "GB_ek_slice.h"
#include "GB_dense.h"
#include "GB_atomics.h"
#include "GB_bitmap_assign_methods.h"
#include "GB_binop__include.h"
// C=binop(A,B) is defined by the following types and operators:
// A+B function (eWiseAdd): GB (_AaddB__second_uint32)
// A.*B function (eWiseMult): GB (_AemultB_01__second_uint32)
// A.*B function (eWiseMult): GB (_AemultB_02__second_uint32)
// A.*B function (eWiseMult): GB (_AemultB_03__second_uint32)
// A.*B function (eWiseMult): GB (_AemultB_bitmap__second_uint32)
// A*D function (colscale): GB (_AxD__second_uint32)
// D*A function (rowscale): GB (_DxB__second_uint32)
// C+=B function (dense accum): GB (_Cdense_accumB__second_uint32)
// C+=b function (dense accum): GB (_Cdense_accumb__second_uint32)
// C+=A+B function (dense ewise3): GB ((none))
// C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__second_uint32)
// C=scalar+B GB ((none))
// C=scalar+B' GB ((none))
// C=A+scalar GB ((none))
// C=A'+scalar GB ((none))
// C type: uint32_t
// A type: uint32_t
// B,b type: uint32_t
// BinaryOp: cij = bij
#define GB_ATYPE \
uint32_t
#define GB_BTYPE \
uint32_t
#define GB_CTYPE \
uint32_t
// true if the types of A and B are identical
#define GB_ATYPE_IS_BTYPE \
1
// true if the types of C and A are identical
#define GB_CTYPE_IS_ATYPE \
1
// true if the types of C and B are identical
#define GB_CTYPE_IS_BTYPE \
1
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA,A_iso) \
;
// bij = Bx [pB]
#define GB_GETB(bij,Bx,pB,B_iso) \
uint32_t bij = GBX (Bx, pB, B_iso)
// declare scalar of the same type as C
#define GB_CTYPE_SCALAR(t) \
uint32_t t
// cij = Ax [pA]
#define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \
cij = GBX (Ax, pA, A_iso)
// cij = Bx [pB]
#define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \
cij = GBX (Bx, pB, B_iso)
#define GB_CX(p) Cx [p]
// binary operator
#define GB_BINOP(z,x,y,i,j) \
z = y ;
// true if the binop must be flipped
#define GB_BINOP_FLIP \
0
// op is second
#define GB_OP_IS_SECOND \
1
// do the numerical phases of GB_add and GB_emult
#define GB_PHASE_2_OF_2
// hard-coded loops can be vectorized
#define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_SECOND || GxB_NO_UINT32 || GxB_NO_SECOND_UINT32)
//------------------------------------------------------------------------------
// C += A+B, all 3 matrices dense
//------------------------------------------------------------------------------
#if 0
// The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV.
void GB ((none))
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_accum_template.c"
}
#endif
//------------------------------------------------------------------------------
// C = A+B, all 3 matrices dense
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_ewise3_noaccum__second_uint32)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_dense_ewise3_noaccum_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += B, accumulate a sparse matrix into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumB__second_uint32)
(
GrB_Matrix C,
const GrB_Matrix B,
const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
#include "GB_dense_subassign_23_template.c"
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += b, accumulate a scalar into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumb__second_uint32)
(
GrB_Matrix C,
const GB_void *p_bwork,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
// get the scalar b for C += b, of type uint32_t
uint32_t bwork = (*((uint32_t *) p_bwork)) ;
#include "GB_dense_subassign_22_template.c"
return (GrB_SUCCESS) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = A*D, column scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_AxD__second_uint32)
(
GrB_Matrix C,
const GrB_Matrix A, bool A_is_pattern,
const GrB_Matrix D, bool D_is_pattern,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint32_t *restrict Cx = (uint32_t *) C->x ;
#include "GB_AxB_colscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = D*B, row scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_DxB__second_uint32)
(
GrB_Matrix C,
const GrB_Matrix D, bool D_is_pattern,
const GrB_Matrix B, bool B_is_pattern,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint32_t *restrict Cx = (uint32_t *) C->x ;
#include "GB_AxB_rowscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseAdd: C = A+B or C<M> = A+B
//------------------------------------------------------------------------------
GrB_Info GB (_AaddB__second_uint32)
(
GrB_Matrix C,
const int C_sparsity,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool Ch_is_Mh,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
GB_WERK_DECLARE (M_ek_slicing, int64_t) ;
GB_WERK_DECLARE (A_ek_slicing, int64_t) ;
GB_WERK_DECLARE (B_ek_slicing, int64_t) ;
#include "GB_add_template.c"
GB_FREE_WORK ;
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C = A.*B or C<M> = A.*B
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_01__second_uint32)
(
GrB_Matrix C,
const int C_sparsity,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_01_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_02__second_uint32)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool flipxy,
const int64_t *restrict Cp_kfirst,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#if GB_BINOP_FLIP
// The operator is not commutative, and does not have a flipped
// variant. For example z=atan2(y,x).
if (flipxy)
{
// use fmult(y,x)
#undef GB_FLIPPED
#define GB_FLIPPED 1
#include "GB_emult_02_template.c"
}
else
{
// use fmult(x,y)
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
}
#else
// No need to handle the flip: the operator is either commutative, or
// has been handled by changing z=div(y,x) to z=rdiv(x,y) for example.
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
#endif
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_03__second_uint32)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict Cp_kfirst,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_03_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_bitmap__second_uint32)
(
GrB_Matrix C,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_bitmap_emult_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st
//------------------------------------------------------------------------------
#if 0
GrB_Info GB ((none))
(
GB_void *Cx_output, // Cx and Bx may be aliased
const GB_void *x_input,
const GB_void *Bx_input,
const int8_t *restrict Bb,
int64_t bnz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint32_t *Cx = (uint32_t *) Cx_output ;
uint32_t x = (*((uint32_t *) x_input)) ;
uint32_t *Bx = (uint32_t *) Bx_input ;
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < bnz ; p++)
{
if (!GBB (Bb, p)) continue ;
uint32_t bij = GBX (Bx, p, false) ;
Cx [p] = bij ;
}
return (GrB_SUCCESS) ;
#endif
}
#endif
//------------------------------------------------------------------------------
// Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd
//------------------------------------------------------------------------------
#if 0
GrB_Info GB ((none))
(
GB_void *Cx_output, // Cx and Ax may be aliased
const GB_void *Ax_input,
const GB_void *y_input,
const int8_t *restrict Ab,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
uint32_t *Cx = (uint32_t *) Cx_output ;
uint32_t *Ax = (uint32_t *) Ax_input ;
uint32_t y = (*((uint32_t *) y_input)) ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!GBB (Ab, p)) continue ;
; ;
Cx [p] = y ;
}
return (GrB_SUCCESS) ;
#endif
}
#endif
//------------------------------------------------------------------------------
// C = op (x, A'): transpose and apply a binary operator
//------------------------------------------------------------------------------
#if 0
// cij = op (x, aij), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
uint32_t aij = GBX (Ax, pA, false) ; \
Cx [pC] = aij ; \
}
GrB_Info GB ((none))
(
GrB_Matrix C,
const GB_void *x_input,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
// GB_unop_transpose.c uses GB_ATYPE, but A is
// the 2nd input to binary operator z=f(x,y).
#undef GB_ATYPE
#define GB_ATYPE \
uint32_t
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint32_t x = (*((const uint32_t *) x_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
#undef GB_ATYPE
#define GB_ATYPE \
uint32_t
}
#endif
//------------------------------------------------------------------------------
// C = op (A', y): transpose and apply a binary operator
//------------------------------------------------------------------------------
#if 0
// cij = op (aij, y), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
; ; \
Cx [pC] = y ; \
}
GrB_Info GB ((none))
(
GrB_Matrix C,
const GrB_Matrix A,
const GB_void *y_input,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint32_t y = (*((const uint32_t *) y_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
#endif
|
Parser.h | //===--- Parser.h - C Language Parser ---------------------------*- C++ -*-===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// This file defines the Parser interface.
//
//===----------------------------------------------------------------------===//
#ifndef LLVM_CLANG_PARSE_PARSER_H
#define LLVM_CLANG_PARSE_PARSER_H
#include "clang/AST/Availability.h"
#include "clang/Basic/OpenMPKinds.h"
#include "clang/Basic/OperatorPrecedence.h"
#include "clang/Basic/Specifiers.h"
#include "clang/Lex/CodeCompletionHandler.h"
#include "clang/Lex/Preprocessor.h"
#include "clang/Sema/DeclSpec.h"
#include "clang/Sema/LoopHint.h"
#include "clang/Sema/Sema.h"
#include "llvm/ADT/SmallVector.h"
#include "llvm/Support/Compiler.h"
#include "llvm/Support/PrettyStackTrace.h"
#include "llvm/Support/SaveAndRestore.h"
#include <memory>
#include <stack>
namespace clang {
class PragmaHandler;
class Scope;
class BalancedDelimiterTracker;
class CorrectionCandidateCallback;
class DeclGroupRef;
class DiagnosticBuilder;
class Parser;
class ParsingDeclRAIIObject;
class ParsingDeclSpec;
class ParsingDeclarator;
class ParsingFieldDeclarator;
class ColonProtectionRAIIObject;
class InMessageExpressionRAIIObject;
class PoisonSEHIdentifiersRAIIObject;
class VersionTuple;
class OMPClause;
class ObjCTypeParamList;
class ObjCTypeParameter;
/// Parser - This implements a parser for the C family of languages. After
/// parsing units of the grammar, productions are invoked to handle whatever has
/// been read.
///
class Parser : public CodeCompletionHandler {
friend class ColonProtectionRAIIObject;
friend class InMessageExpressionRAIIObject;
friend class PoisonSEHIdentifiersRAIIObject;
friend class ObjCDeclContextSwitch;
friend class ParenBraceBracketBalancer;
friend class BalancedDelimiterTracker;
Preprocessor &PP;
/// Tok - The current token we are peeking ahead. All parsing methods assume
/// that this is valid.
Token Tok;
// PrevTokLocation - The location of the token we previously
// consumed. This token is used for diagnostics where we expected to
// see a token following another token (e.g., the ';' at the end of
// a statement).
SourceLocation PrevTokLocation;
unsigned short ParenCount = 0, BracketCount = 0, BraceCount = 0;
unsigned short MisplacedModuleBeginCount = 0;
/// Actions - These are the callbacks we invoke as we parse various constructs
/// in the file.
Sema &Actions;
DiagnosticsEngine &Diags;
/// ScopeCache - Cache scopes to reduce malloc traffic.
enum { ScopeCacheSize = 16 };
unsigned NumCachedScopes;
Scope *ScopeCache[ScopeCacheSize];
/// Identifiers used for SEH handling in Borland. These are only
/// allowed in particular circumstances
// __except block
IdentifierInfo *Ident__exception_code,
*Ident___exception_code,
*Ident_GetExceptionCode;
// __except filter expression
IdentifierInfo *Ident__exception_info,
*Ident___exception_info,
*Ident_GetExceptionInfo;
// __finally
IdentifierInfo *Ident__abnormal_termination,
*Ident___abnormal_termination,
*Ident_AbnormalTermination;
/// Contextual keywords for Microsoft extensions.
IdentifierInfo *Ident__except;
mutable IdentifierInfo *Ident_sealed;
/// Ident_super - IdentifierInfo for "super", to support fast
/// comparison.
IdentifierInfo *Ident_super;
/// Ident_vector, Ident_bool - cached IdentifierInfos for "vector" and
/// "bool" fast comparison. Only present if AltiVec or ZVector are enabled.
IdentifierInfo *Ident_vector;
IdentifierInfo *Ident_bool;
/// Ident_pixel - cached IdentifierInfos for "pixel" fast comparison.
/// Only present if AltiVec enabled.
IdentifierInfo *Ident_pixel;
/// Objective-C contextual keywords.
mutable IdentifierInfo *Ident_instancetype;
/// \brief Identifier for "introduced".
IdentifierInfo *Ident_introduced;
/// \brief Identifier for "deprecated".
IdentifierInfo *Ident_deprecated;
/// \brief Identifier for "obsoleted".
IdentifierInfo *Ident_obsoleted;
/// \brief Identifier for "unavailable".
IdentifierInfo *Ident_unavailable;
/// \brief Identifier for "message".
IdentifierInfo *Ident_message;
/// \brief Identifier for "strict".
IdentifierInfo *Ident_strict;
/// \brief Identifier for "replacement".
IdentifierInfo *Ident_replacement;
/// Identifiers used by the 'external_source_symbol' attribute.
IdentifierInfo *Ident_language, *Ident_defined_in,
*Ident_generated_declaration;
/// C++0x contextual keywords.
mutable IdentifierInfo *Ident_final;
mutable IdentifierInfo *Ident_GNU_final;
mutable IdentifierInfo *Ident_override;
// C++ type trait keywords that can be reverted to identifiers and still be
// used as type traits.
llvm::SmallDenseMap<IdentifierInfo *, tok::TokenKind> RevertibleTypeTraits;
std::unique_ptr<PragmaHandler> AlignHandler;
std::unique_ptr<PragmaHandler> GCCVisibilityHandler;
std::unique_ptr<PragmaHandler> OptionsHandler;
std::unique_ptr<PragmaHandler> PackHandler;
std::unique_ptr<PragmaHandler> MSStructHandler;
std::unique_ptr<PragmaHandler> UnusedHandler;
std::unique_ptr<PragmaHandler> WeakHandler;
std::unique_ptr<PragmaHandler> RedefineExtnameHandler;
std::unique_ptr<PragmaHandler> FPContractHandler;
std::unique_ptr<PragmaHandler> OpenCLExtensionHandler;
std::unique_ptr<PragmaHandler> OpenMPHandler;
std::unique_ptr<PragmaHandler> PCSectionHandler;
std::unique_ptr<PragmaHandler> MSCommentHandler;
std::unique_ptr<PragmaHandler> MSDetectMismatchHandler;
std::unique_ptr<PragmaHandler> MSPointersToMembers;
std::unique_ptr<PragmaHandler> MSVtorDisp;
std::unique_ptr<PragmaHandler> MSInitSeg;
std::unique_ptr<PragmaHandler> MSDataSeg;
std::unique_ptr<PragmaHandler> MSBSSSeg;
std::unique_ptr<PragmaHandler> MSConstSeg;
std::unique_ptr<PragmaHandler> MSCodeSeg;
std::unique_ptr<PragmaHandler> MSSection;
std::unique_ptr<PragmaHandler> MSRuntimeChecks;
std::unique_ptr<PragmaHandler> MSIntrinsic;
std::unique_ptr<PragmaHandler> CUDAForceHostDeviceHandler;
std::unique_ptr<PragmaHandler> OptimizeHandler;
std::unique_ptr<PragmaHandler> LoopHintHandler;
std::unique_ptr<PragmaHandler> UnrollHintHandler;
std::unique_ptr<PragmaHandler> NoUnrollHintHandler;
std::unique_ptr<PragmaHandler> FPHandler;
std::unique_ptr<PragmaHandler> AttributePragmaHandler;
std::unique_ptr<CommentHandler> CommentSemaHandler;
/// Whether the '>' token acts as an operator or not. This will be
/// true except when we are parsing an expression within a C++
/// template argument list, where the '>' closes the template
/// argument list.
bool GreaterThanIsOperator;
/// ColonIsSacred - When this is false, we aggressively try to recover from
/// code like "foo : bar" as if it were a typo for "foo :: bar". This is not
/// safe in case statements and a few other things. This is managed by the
/// ColonProtectionRAIIObject RAII object.
bool ColonIsSacred;
/// \brief When true, we are directly inside an Objective-C message
/// send expression.
///
/// This is managed by the \c InMessageExpressionRAIIObject class, and
/// should not be set directly.
bool InMessageExpression;
/// The "depth" of the template parameters currently being parsed.
unsigned TemplateParameterDepth;
/// \brief RAII class that manages the template parameter depth.
class TemplateParameterDepthRAII {
unsigned &Depth;
unsigned AddedLevels;
public:
explicit TemplateParameterDepthRAII(unsigned &Depth)
: Depth(Depth), AddedLevels(0) {}
~TemplateParameterDepthRAII() {
Depth -= AddedLevels;
}
void operator++() {
++Depth;
++AddedLevels;
}
void addDepth(unsigned D) {
Depth += D;
AddedLevels += D;
}
unsigned getDepth() const { return Depth; }
};
/// Factory object for creating AttributeList objects.
AttributeFactory AttrFactory;
/// \brief Gathers and cleans up TemplateIdAnnotations when parsing of a
/// top-level declaration is finished.
SmallVector<TemplateIdAnnotation *, 16> TemplateIds;
/// \brief Identifiers which have been declared within a tentative parse.
SmallVector<IdentifierInfo *, 8> TentativelyDeclaredIdentifiers;
IdentifierInfo *getSEHExceptKeyword();
/// True if we are within an Objective-C container while parsing C-like decls.
///
/// This is necessary because Sema thinks we have left the container
/// to parse the C-like decls, meaning Actions.getObjCDeclContext() will
/// be NULL.
bool ParsingInObjCContainer;
bool SkipFunctionBodies;
/// The location of the expression statement that is being parsed right now.
/// Used to determine if an expression that is being parsed is a statement or
/// just a regular sub-expression.
SourceLocation ExprStatementTokLoc;
public:
Parser(Preprocessor &PP, Sema &Actions, bool SkipFunctionBodies);
~Parser() override;
const LangOptions &getLangOpts() const { return PP.getLangOpts(); }
const TargetInfo &getTargetInfo() const { return PP.getTargetInfo(); }
Preprocessor &getPreprocessor() const { return PP; }
Sema &getActions() const { return Actions; }
AttributeFactory &getAttrFactory() { return AttrFactory; }
const Token &getCurToken() const { return Tok; }
Scope *getCurScope() const { return Actions.getCurScope(); }
void incrementMSManglingNumber() const {
return Actions.incrementMSManglingNumber();
}
Decl *getObjCDeclContext() const { return Actions.getObjCDeclContext(); }
// Type forwarding. All of these are statically 'void*', but they may all be
// different actual classes based on the actions in place.
typedef OpaquePtr<DeclGroupRef> DeclGroupPtrTy;
typedef OpaquePtr<TemplateName> TemplateTy;
typedef SmallVector<TemplateParameterList *, 4> TemplateParameterLists;
typedef Sema::FullExprArg FullExprArg;
// Parsing methods.
/// Initialize - Warm up the parser.
///
void Initialize();
/// Parse the first top-level declaration in a translation unit.
bool ParseFirstTopLevelDecl(DeclGroupPtrTy &Result);
/// ParseTopLevelDecl - Parse one top-level declaration. Returns true if
/// the EOF was encountered.
bool ParseTopLevelDecl(DeclGroupPtrTy &Result);
bool ParseTopLevelDecl() {
DeclGroupPtrTy Result;
return ParseTopLevelDecl(Result);
}
/// ConsumeToken - Consume the current 'peek token' and lex the next one.
/// This does not work with special tokens: string literals, code completion,
/// annotation tokens and balanced tokens must be handled using the specific
/// consume methods.
/// Returns the location of the consumed token.
SourceLocation ConsumeToken() {
assert(!isTokenSpecial() &&
"Should consume special tokens with Consume*Token");
PrevTokLocation = Tok.getLocation();
PP.Lex(Tok);
return PrevTokLocation;
}
bool TryConsumeToken(tok::TokenKind Expected) {
if (Tok.isNot(Expected))
return false;
assert(!isTokenSpecial() &&
"Should consume special tokens with Consume*Token");
PrevTokLocation = Tok.getLocation();
PP.Lex(Tok);
return true;
}
bool TryConsumeToken(tok::TokenKind Expected, SourceLocation &Loc) {
if (!TryConsumeToken(Expected))
return false;
Loc = PrevTokLocation;
return true;
}
SourceLocation getEndOfPreviousToken() {
return PP.getLocForEndOfToken(PrevTokLocation);
}
/// Retrieve the underscored keyword (_Nonnull, _Nullable) that corresponds
/// to the given nullability kind.
IdentifierInfo *getNullabilityKeyword(NullabilityKind nullability) {
return Actions.getNullabilityKeyword(nullability);
}
private:
//===--------------------------------------------------------------------===//
// Low-Level token peeking and consumption methods.
//
/// isTokenParen - Return true if the cur token is '(' or ')'.
bool isTokenParen() const {
return Tok.getKind() == tok::l_paren || Tok.getKind() == tok::r_paren;
}
/// isTokenBracket - Return true if the cur token is '[' or ']'.
bool isTokenBracket() const {
return Tok.getKind() == tok::l_square || Tok.getKind() == tok::r_square;
}
/// isTokenBrace - Return true if the cur token is '{' or '}'.
bool isTokenBrace() const {
return Tok.getKind() == tok::l_brace || Tok.getKind() == tok::r_brace;
}
/// isTokenStringLiteral - True if this token is a string-literal.
bool isTokenStringLiteral() const {
return tok::isStringLiteral(Tok.getKind());
}
/// isTokenSpecial - True if this token requires special consumption methods.
bool isTokenSpecial() const {
return isTokenStringLiteral() || isTokenParen() || isTokenBracket() ||
isTokenBrace() || Tok.is(tok::code_completion) || Tok.isAnnotation();
}
/// \brief Returns true if the current token is '=' or is a type of '='.
/// For typos, give a fixit to '='
bool isTokenEqualOrEqualTypo();
/// \brief Return the current token to the token stream and make the given
/// token the current token.
void UnconsumeToken(Token &Consumed) {
Token Next = Tok;
PP.EnterToken(Consumed);
PP.Lex(Tok);
PP.EnterToken(Next);
}
/// ConsumeAnyToken - Dispatch to the right Consume* method based on the
/// current token type. This should only be used in cases where the type of
/// the token really isn't known, e.g. in error recovery.
SourceLocation ConsumeAnyToken(bool ConsumeCodeCompletionTok = false) {
if (isTokenParen())
return ConsumeParen();
if (isTokenBracket())
return ConsumeBracket();
if (isTokenBrace())
return ConsumeBrace();
if (isTokenStringLiteral())
return ConsumeStringToken();
if (Tok.is(tok::code_completion))
return ConsumeCodeCompletionTok ? ConsumeCodeCompletionToken()
: handleUnexpectedCodeCompletionToken();
if (Tok.isAnnotation())
return ConsumeAnnotationToken();
return ConsumeToken();
}
SourceLocation ConsumeAnnotationToken() {
assert(Tok.isAnnotation() && "wrong consume method");
SourceLocation Loc = Tok.getLocation();
PrevTokLocation = Tok.getAnnotationEndLoc();
PP.Lex(Tok);
return Loc;
}
/// ConsumeParen - This consume method keeps the paren count up-to-date.
///
SourceLocation ConsumeParen() {
assert(isTokenParen() && "wrong consume method");
if (Tok.getKind() == tok::l_paren)
++ParenCount;
else if (ParenCount)
--ParenCount; // Don't let unbalanced )'s drive the count negative.
PrevTokLocation = Tok.getLocation();
PP.Lex(Tok);
return PrevTokLocation;
}
/// ConsumeBracket - This consume method keeps the bracket count up-to-date.
///
SourceLocation ConsumeBracket() {
assert(isTokenBracket() && "wrong consume method");
if (Tok.getKind() == tok::l_square)
++BracketCount;
else if (BracketCount)
--BracketCount; // Don't let unbalanced ]'s drive the count negative.
PrevTokLocation = Tok.getLocation();
PP.Lex(Tok);
return PrevTokLocation;
}
/// ConsumeBrace - This consume method keeps the brace count up-to-date.
///
SourceLocation ConsumeBrace() {
assert(isTokenBrace() && "wrong consume method");
if (Tok.getKind() == tok::l_brace)
++BraceCount;
else if (BraceCount)
--BraceCount; // Don't let unbalanced }'s drive the count negative.
PrevTokLocation = Tok.getLocation();
PP.Lex(Tok);
return PrevTokLocation;
}
/// ConsumeStringToken - Consume the current 'peek token', lexing a new one
/// and returning the token kind. This method is specific to strings, as it
/// handles string literal concatenation, as per C99 5.1.1.2, translation
/// phase #6.
SourceLocation ConsumeStringToken() {
assert(isTokenStringLiteral() &&
"Should only consume string literals with this method");
PrevTokLocation = Tok.getLocation();
PP.Lex(Tok);
return PrevTokLocation;
}
/// \brief Consume the current code-completion token.
///
/// This routine can be called to consume the code-completion token and
/// continue processing in special cases where \c cutOffParsing() isn't
/// desired, such as token caching or completion with lookahead.
SourceLocation ConsumeCodeCompletionToken() {
assert(Tok.is(tok::code_completion));
PrevTokLocation = Tok.getLocation();
PP.Lex(Tok);
return PrevTokLocation;
}
///\ brief When we are consuming a code-completion token without having
/// matched specific position in the grammar, provide code-completion results
/// based on context.
///
/// \returns the source location of the code-completion token.
SourceLocation handleUnexpectedCodeCompletionToken();
/// \brief Abruptly cut off parsing; mainly used when we have reached the
/// code-completion point.
void cutOffParsing() {
if (PP.isCodeCompletionEnabled())
PP.setCodeCompletionReached();
// Cut off parsing by acting as if we reached the end-of-file.
Tok.setKind(tok::eof);
}
/// \brief Determine if we're at the end of the file or at a transition
/// between modules.
bool isEofOrEom() {
tok::TokenKind Kind = Tok.getKind();
return Kind == tok::eof || Kind == tok::annot_module_begin ||
Kind == tok::annot_module_end || Kind == tok::annot_module_include;
}
/// \brief Initialize all pragma handlers.
void initializePragmaHandlers();
/// \brief Destroy and reset all pragma handlers.
void resetPragmaHandlers();
/// \brief Handle the annotation token produced for #pragma unused(...)
void HandlePragmaUnused();
/// \brief Handle the annotation token produced for
/// #pragma GCC visibility...
void HandlePragmaVisibility();
/// \brief Handle the annotation token produced for
/// #pragma pack...
void HandlePragmaPack();
/// \brief Handle the annotation token produced for
/// #pragma ms_struct...
void HandlePragmaMSStruct();
/// \brief Handle the annotation token produced for
/// #pragma comment...
void HandlePragmaMSComment();
void HandlePragmaMSPointersToMembers();
void HandlePragmaMSVtorDisp();
void HandlePragmaMSPragma();
bool HandlePragmaMSSection(StringRef PragmaName,
SourceLocation PragmaLocation);
bool HandlePragmaMSSegment(StringRef PragmaName,
SourceLocation PragmaLocation);
bool HandlePragmaMSInitSeg(StringRef PragmaName,
SourceLocation PragmaLocation);
/// \brief Handle the annotation token produced for
/// #pragma align...
void HandlePragmaAlign();
/// \brief Handle the annotation token produced for
/// #pragma clang __debug dump...
void HandlePragmaDump();
/// \brief Handle the annotation token produced for
/// #pragma weak id...
void HandlePragmaWeak();
/// \brief Handle the annotation token produced for
/// #pragma weak id = id...
void HandlePragmaWeakAlias();
/// \brief Handle the annotation token produced for
/// #pragma redefine_extname...
void HandlePragmaRedefineExtname();
/// \brief Handle the annotation token produced for
/// #pragma STDC FP_CONTRACT...
void HandlePragmaFPContract();
/// \brief Handle the annotation token produced for
/// #pragma clang fp ...
void HandlePragmaFP();
/// \brief Handle the annotation token produced for
/// #pragma OPENCL EXTENSION...
void HandlePragmaOpenCLExtension();
/// \brief Handle the annotation token produced for
/// #pragma clang __debug captured
StmtResult HandlePragmaCaptured();
/// \brief Handle the annotation token produced for
/// #pragma clang loop and #pragma unroll.
bool HandlePragmaLoopHint(LoopHint &Hint);
bool ParsePragmaAttributeSubjectMatchRuleSet(
attr::ParsedSubjectMatchRuleSet &SubjectMatchRules,
SourceLocation &AnyLoc, SourceLocation &LastMatchRuleEndLoc);
void HandlePragmaAttribute();
/// GetLookAheadToken - This peeks ahead N tokens and returns that token
/// without consuming any tokens. LookAhead(0) returns 'Tok', LookAhead(1)
/// returns the token after Tok, etc.
///
/// Note that this differs from the Preprocessor's LookAhead method, because
/// the Parser always has one token lexed that the preprocessor doesn't.
///
const Token &GetLookAheadToken(unsigned N) {
if (N == 0 || Tok.is(tok::eof)) return Tok;
return PP.LookAhead(N-1);
}
public:
/// NextToken - This peeks ahead one token and returns it without
/// consuming it.
const Token &NextToken() {
return PP.LookAhead(0);
}
/// getTypeAnnotation - Read a parsed type out of an annotation token.
static ParsedType getTypeAnnotation(const Token &Tok) {
return ParsedType::getFromOpaquePtr(Tok.getAnnotationValue());
}
private:
static void setTypeAnnotation(Token &Tok, ParsedType T) {
Tok.setAnnotationValue(T.getAsOpaquePtr());
}
/// \brief Read an already-translated primary expression out of an annotation
/// token.
static ExprResult getExprAnnotation(const Token &Tok) {
return ExprResult::getFromOpaquePointer(Tok.getAnnotationValue());
}
/// \brief Set the primary expression corresponding to the given annotation
/// token.
static void setExprAnnotation(Token &Tok, ExprResult ER) {
Tok.setAnnotationValue(ER.getAsOpaquePointer());
}
public:
// If NeedType is true, then TryAnnotateTypeOrScopeToken will try harder to
// find a type name by attempting typo correction.
bool TryAnnotateTypeOrScopeToken();
bool TryAnnotateTypeOrScopeTokenAfterScopeSpec(CXXScopeSpec &SS,
bool IsNewScope);
bool TryAnnotateCXXScopeToken(bool EnteringContext = false);
private:
enum AnnotatedNameKind {
/// Annotation has failed and emitted an error.
ANK_Error,
/// The identifier is a tentatively-declared name.
ANK_TentativeDecl,
/// The identifier is a template name. FIXME: Add an annotation for that.
ANK_TemplateName,
/// The identifier can't be resolved.
ANK_Unresolved,
/// Annotation was successful.
ANK_Success
};
AnnotatedNameKind
TryAnnotateName(bool IsAddressOfOperand,
std::unique_ptr<CorrectionCandidateCallback> CCC = nullptr);
/// Push a tok::annot_cxxscope token onto the token stream.
void AnnotateScopeToken(CXXScopeSpec &SS, bool IsNewAnnotation);
/// TryAltiVecToken - Check for context-sensitive AltiVec identifier tokens,
/// replacing them with the non-context-sensitive keywords. This returns
/// true if the token was replaced.
bool TryAltiVecToken(DeclSpec &DS, SourceLocation Loc,
const char *&PrevSpec, unsigned &DiagID,
bool &isInvalid) {
if (!getLangOpts().AltiVec && !getLangOpts().ZVector)
return false;
if (Tok.getIdentifierInfo() != Ident_vector &&
Tok.getIdentifierInfo() != Ident_bool &&
(!getLangOpts().AltiVec || Tok.getIdentifierInfo() != Ident_pixel))
return false;
return TryAltiVecTokenOutOfLine(DS, Loc, PrevSpec, DiagID, isInvalid);
}
/// TryAltiVecVectorToken - Check for context-sensitive AltiVec vector
/// identifier token, replacing it with the non-context-sensitive __vector.
/// This returns true if the token was replaced.
bool TryAltiVecVectorToken() {
if ((!getLangOpts().AltiVec && !getLangOpts().ZVector) ||
Tok.getIdentifierInfo() != Ident_vector) return false;
return TryAltiVecVectorTokenOutOfLine();
}
bool TryAltiVecVectorTokenOutOfLine();
bool TryAltiVecTokenOutOfLine(DeclSpec &DS, SourceLocation Loc,
const char *&PrevSpec, unsigned &DiagID,
bool &isInvalid);
/// Returns true if the current token is the identifier 'instancetype'.
///
/// Should only be used in Objective-C language modes.
bool isObjCInstancetype() {
assert(getLangOpts().ObjC1);
if (Tok.isAnnotation())
return false;
if (!Ident_instancetype)
Ident_instancetype = PP.getIdentifierInfo("instancetype");
return Tok.getIdentifierInfo() == Ident_instancetype;
}
/// TryKeywordIdentFallback - For compatibility with system headers using
/// keywords as identifiers, attempt to convert the current token to an
/// identifier and optionally disable the keyword for the remainder of the
/// translation unit. This returns false if the token was not replaced,
/// otherwise emits a diagnostic and returns true.
bool TryKeywordIdentFallback(bool DisableKeyword);
/// \brief Get the TemplateIdAnnotation from the token.
TemplateIdAnnotation *takeTemplateIdAnnotation(const Token &tok);
/// TentativeParsingAction - An object that is used as a kind of "tentative
/// parsing transaction". It gets instantiated to mark the token position and
/// after the token consumption is done, Commit() or Revert() is called to
/// either "commit the consumed tokens" or revert to the previously marked
/// token position. Example:
///
/// TentativeParsingAction TPA(*this);
/// ConsumeToken();
/// ....
/// TPA.Revert();
///
class TentativeParsingAction {
Parser &P;
Token PrevTok;
size_t PrevTentativelyDeclaredIdentifierCount;
unsigned short PrevParenCount, PrevBracketCount, PrevBraceCount;
bool isActive;
public:
explicit TentativeParsingAction(Parser& p) : P(p) {
PrevTok = P.Tok;
PrevTentativelyDeclaredIdentifierCount =
P.TentativelyDeclaredIdentifiers.size();
PrevParenCount = P.ParenCount;
PrevBracketCount = P.BracketCount;
PrevBraceCount = P.BraceCount;
P.PP.EnableBacktrackAtThisPos();
isActive = true;
}
void Commit() {
assert(isActive && "Parsing action was finished!");
P.TentativelyDeclaredIdentifiers.resize(
PrevTentativelyDeclaredIdentifierCount);
P.PP.CommitBacktrackedTokens();
isActive = false;
}
void Revert() {
assert(isActive && "Parsing action was finished!");
P.PP.Backtrack();
P.Tok = PrevTok;
P.TentativelyDeclaredIdentifiers.resize(
PrevTentativelyDeclaredIdentifierCount);
P.ParenCount = PrevParenCount;
P.BracketCount = PrevBracketCount;
P.BraceCount = PrevBraceCount;
isActive = false;
}
~TentativeParsingAction() {
assert(!isActive && "Forgot to call Commit or Revert!");
}
};
/// A TentativeParsingAction that automatically reverts in its destructor.
/// Useful for disambiguation parses that will always be reverted.
class RevertingTentativeParsingAction
: private Parser::TentativeParsingAction {
public:
RevertingTentativeParsingAction(Parser &P)
: Parser::TentativeParsingAction(P) {}
~RevertingTentativeParsingAction() { Revert(); }
};
class UnannotatedTentativeParsingAction;
/// ObjCDeclContextSwitch - An object used to switch context from
/// an objective-c decl context to its enclosing decl context and
/// back.
class ObjCDeclContextSwitch {
Parser &P;
Decl *DC;
SaveAndRestore<bool> WithinObjCContainer;
public:
explicit ObjCDeclContextSwitch(Parser &p)
: P(p), DC(p.getObjCDeclContext()),
WithinObjCContainer(P.ParsingInObjCContainer, DC != nullptr) {
if (DC)
P.Actions.ActOnObjCTemporaryExitContainerContext(cast<DeclContext>(DC));
}
~ObjCDeclContextSwitch() {
if (DC)
P.Actions.ActOnObjCReenterContainerContext(cast<DeclContext>(DC));
}
};
/// ExpectAndConsume - The parser expects that 'ExpectedTok' is next in the
/// input. If so, it is consumed and false is returned.
///
/// If a trivial punctuator misspelling is encountered, a FixIt error
/// diagnostic is issued and false is returned after recovery.
///
/// If the input is malformed, this emits the specified diagnostic and true is
/// returned.
bool ExpectAndConsume(tok::TokenKind ExpectedTok,
unsigned Diag = diag::err_expected,
StringRef DiagMsg = "");
/// \brief The parser expects a semicolon and, if present, will consume it.
///
/// If the next token is not a semicolon, this emits the specified diagnostic,
/// or, if there's just some closing-delimiter noise (e.g., ')' or ']') prior
/// to the semicolon, consumes that extra token.
bool ExpectAndConsumeSemi(unsigned DiagID);
/// \brief The kind of extra semi diagnostic to emit.
enum ExtraSemiKind {
OutsideFunction = 0,
InsideStruct = 1,
InstanceVariableList = 2,
AfterMemberFunctionDefinition = 3
};
/// \brief Consume any extra semi-colons until the end of the line.
void ConsumeExtraSemi(ExtraSemiKind Kind, unsigned TST = TST_unspecified);
/// Return false if the next token is an identifier. An 'expected identifier'
/// error is emitted otherwise.
///
/// The parser tries to recover from the error by checking if the next token
/// is a C++ keyword when parsing Objective-C++. Return false if the recovery
/// was successful.
bool expectIdentifier();
public:
//===--------------------------------------------------------------------===//
// Scope manipulation
/// ParseScope - Introduces a new scope for parsing. The kind of
/// scope is determined by ScopeFlags. Objects of this type should
/// be created on the stack to coincide with the position where the
/// parser enters the new scope, and this object's constructor will
/// create that new scope. Similarly, once the object is destroyed
/// the parser will exit the scope.
class ParseScope {
Parser *Self;
ParseScope(const ParseScope &) = delete;
void operator=(const ParseScope &) = delete;
public:
// ParseScope - Construct a new object to manage a scope in the
// parser Self where the new Scope is created with the flags
// ScopeFlags, but only when we aren't about to enter a compound statement.
ParseScope(Parser *Self, unsigned ScopeFlags, bool EnteredScope = true,
bool BeforeCompoundStmt = false)
: Self(Self) {
if (EnteredScope && !BeforeCompoundStmt)
Self->EnterScope(ScopeFlags);
else {
if (BeforeCompoundStmt)
Self->incrementMSManglingNumber();
this->Self = nullptr;
}
}
// Exit - Exit the scope associated with this object now, rather
// than waiting until the object is destroyed.
void Exit() {
if (Self) {
Self->ExitScope();
Self = nullptr;
}
}
~ParseScope() {
Exit();
}
};
/// EnterScope - Start a new scope.
void EnterScope(unsigned ScopeFlags);
/// ExitScope - Pop a scope off the scope stack.
void ExitScope();
private:
/// \brief RAII object used to modify the scope flags for the current scope.
class ParseScopeFlags {
Scope *CurScope;
unsigned OldFlags;
ParseScopeFlags(const ParseScopeFlags &) = delete;
void operator=(const ParseScopeFlags &) = delete;
public:
ParseScopeFlags(Parser *Self, unsigned ScopeFlags, bool ManageFlags = true);
~ParseScopeFlags();
};
//===--------------------------------------------------------------------===//
// Diagnostic Emission and Error recovery.
public:
DiagnosticBuilder Diag(SourceLocation Loc, unsigned DiagID);
DiagnosticBuilder Diag(const Token &Tok, unsigned DiagID);
DiagnosticBuilder Diag(unsigned DiagID) {
return Diag(Tok, DiagID);
}
private:
void SuggestParentheses(SourceLocation Loc, unsigned DK,
SourceRange ParenRange);
void CheckNestedObjCContexts(SourceLocation AtLoc);
public:
/// \brief Control flags for SkipUntil functions.
enum SkipUntilFlags {
StopAtSemi = 1 << 0, ///< Stop skipping at semicolon
/// \brief Stop skipping at specified token, but don't skip the token itself
StopBeforeMatch = 1 << 1,
StopAtCodeCompletion = 1 << 2 ///< Stop at code completion
};
friend constexpr SkipUntilFlags operator|(SkipUntilFlags L,
SkipUntilFlags R) {
return static_cast<SkipUntilFlags>(static_cast<unsigned>(L) |
static_cast<unsigned>(R));
}
/// SkipUntil - Read tokens until we get to the specified token, then consume
/// it (unless StopBeforeMatch is specified). Because we cannot guarantee
/// that the token will ever occur, this skips to the next token, or to some
/// likely good stopping point. If Flags has StopAtSemi flag, skipping will
/// stop at a ';' character.
///
/// If SkipUntil finds the specified token, it returns true, otherwise it
/// returns false.
bool SkipUntil(tok::TokenKind T,
SkipUntilFlags Flags = static_cast<SkipUntilFlags>(0)) {
return SkipUntil(llvm::makeArrayRef(T), Flags);
}
bool SkipUntil(tok::TokenKind T1, tok::TokenKind T2,
SkipUntilFlags Flags = static_cast<SkipUntilFlags>(0)) {
tok::TokenKind TokArray[] = {T1, T2};
return SkipUntil(TokArray, Flags);
}
bool SkipUntil(tok::TokenKind T1, tok::TokenKind T2, tok::TokenKind T3,
SkipUntilFlags Flags = static_cast<SkipUntilFlags>(0)) {
tok::TokenKind TokArray[] = {T1, T2, T3};
return SkipUntil(TokArray, Flags);
}
bool SkipUntil(ArrayRef<tok::TokenKind> Toks,
SkipUntilFlags Flags = static_cast<SkipUntilFlags>(0));
/// SkipMalformedDecl - Read tokens until we get to some likely good stopping
/// point for skipping past a simple-declaration.
void SkipMalformedDecl();
private:
//===--------------------------------------------------------------------===//
// Lexing and parsing of C++ inline methods.
struct ParsingClass;
/// [class.mem]p1: "... the class is regarded as complete within
/// - function bodies
/// - default arguments
/// - exception-specifications (TODO: C++0x)
/// - and brace-or-equal-initializers for non-static data members
/// (including such things in nested classes)."
/// LateParsedDeclarations build the tree of those elements so they can
/// be parsed after parsing the top-level class.
class LateParsedDeclaration {
public:
virtual ~LateParsedDeclaration();
virtual void ParseLexedMethodDeclarations();
virtual void ParseLexedMemberInitializers();
virtual void ParseLexedMethodDefs();
virtual void ParseLexedAttributes();
};
/// Inner node of the LateParsedDeclaration tree that parses
/// all its members recursively.
class LateParsedClass : public LateParsedDeclaration {
public:
LateParsedClass(Parser *P, ParsingClass *C);
~LateParsedClass() override;
void ParseLexedMethodDeclarations() override;
void ParseLexedMemberInitializers() override;
void ParseLexedMethodDefs() override;
void ParseLexedAttributes() override;
private:
Parser *Self;
ParsingClass *Class;
};
/// Contains the lexed tokens of an attribute with arguments that
/// may reference member variables and so need to be parsed at the
/// end of the class declaration after parsing all other member
/// member declarations.
/// FIXME: Perhaps we should change the name of LateParsedDeclaration to
/// LateParsedTokens.
struct LateParsedAttribute : public LateParsedDeclaration {
Parser *Self;
CachedTokens Toks;
IdentifierInfo &AttrName;
SourceLocation AttrNameLoc;
SmallVector<Decl*, 2> Decls;
explicit LateParsedAttribute(Parser *P, IdentifierInfo &Name,
SourceLocation Loc)
: Self(P), AttrName(Name), AttrNameLoc(Loc) {}
void ParseLexedAttributes() override;
void addDecl(Decl *D) { Decls.push_back(D); }
};
// A list of late-parsed attributes. Used by ParseGNUAttributes.
class LateParsedAttrList: public SmallVector<LateParsedAttribute *, 2> {
public:
LateParsedAttrList(bool PSoon = false) : ParseSoon(PSoon) { }
bool parseSoon() { return ParseSoon; }
private:
bool ParseSoon; // Are we planning to parse these shortly after creation?
};
/// Contains the lexed tokens of a member function definition
/// which needs to be parsed at the end of the class declaration
/// after parsing all other member declarations.
struct LexedMethod : public LateParsedDeclaration {
Parser *Self;
Decl *D;
CachedTokens Toks;
/// \brief Whether this member function had an associated template
/// scope. When true, D is a template declaration.
/// otherwise, it is a member function declaration.
bool TemplateScope;
explicit LexedMethod(Parser* P, Decl *MD)
: Self(P), D(MD), TemplateScope(false) {}
void ParseLexedMethodDefs() override;
};
/// LateParsedDefaultArgument - Keeps track of a parameter that may
/// have a default argument that cannot be parsed yet because it
/// occurs within a member function declaration inside the class
/// (C++ [class.mem]p2).
struct LateParsedDefaultArgument {
explicit LateParsedDefaultArgument(Decl *P,
std::unique_ptr<CachedTokens> Toks = nullptr)
: Param(P), Toks(std::move(Toks)) { }
/// Param - The parameter declaration for this parameter.
Decl *Param;
/// Toks - The sequence of tokens that comprises the default
/// argument expression, not including the '=' or the terminating
/// ')' or ','. This will be NULL for parameters that have no
/// default argument.
std::unique_ptr<CachedTokens> Toks;
};
/// LateParsedMethodDeclaration - A method declaration inside a class that
/// contains at least one entity whose parsing needs to be delayed
/// until the class itself is completely-defined, such as a default
/// argument (C++ [class.mem]p2).
struct LateParsedMethodDeclaration : public LateParsedDeclaration {
explicit LateParsedMethodDeclaration(Parser *P, Decl *M)
: Self(P), Method(M), TemplateScope(false),
ExceptionSpecTokens(nullptr) {}
void ParseLexedMethodDeclarations() override;
Parser* Self;
/// Method - The method declaration.
Decl *Method;
/// \brief Whether this member function had an associated template
/// scope. When true, D is a template declaration.
/// othewise, it is a member function declaration.
bool TemplateScope;
/// DefaultArgs - Contains the parameters of the function and
/// their default arguments. At least one of the parameters will
/// have a default argument, but all of the parameters of the
/// method will be stored so that they can be reintroduced into
/// scope at the appropriate times.
SmallVector<LateParsedDefaultArgument, 8> DefaultArgs;
/// \brief The set of tokens that make up an exception-specification that
/// has not yet been parsed.
CachedTokens *ExceptionSpecTokens;
};
/// LateParsedMemberInitializer - An initializer for a non-static class data
/// member whose parsing must to be delayed until the class is completely
/// defined (C++11 [class.mem]p2).
struct LateParsedMemberInitializer : public LateParsedDeclaration {
LateParsedMemberInitializer(Parser *P, Decl *FD)
: Self(P), Field(FD) { }
void ParseLexedMemberInitializers() override;
Parser *Self;
/// Field - The field declaration.
Decl *Field;
/// CachedTokens - The sequence of tokens that comprises the initializer,
/// including any leading '='.
CachedTokens Toks;
};
/// LateParsedDeclarationsContainer - During parsing of a top (non-nested)
/// C++ class, its method declarations that contain parts that won't be
/// parsed until after the definition is completed (C++ [class.mem]p2),
/// the method declarations and possibly attached inline definitions
/// will be stored here with the tokens that will be parsed to create those
/// entities.
typedef SmallVector<LateParsedDeclaration*,2> LateParsedDeclarationsContainer;
/// \brief Representation of a class that has been parsed, including
/// any member function declarations or definitions that need to be
/// parsed after the corresponding top-level class is complete.
struct ParsingClass {
ParsingClass(Decl *TagOrTemplate, bool TopLevelClass, bool IsInterface)
: TopLevelClass(TopLevelClass), TemplateScope(false),
IsInterface(IsInterface), TagOrTemplate(TagOrTemplate) { }
/// \brief Whether this is a "top-level" class, meaning that it is
/// not nested within another class.
bool TopLevelClass : 1;
/// \brief Whether this class had an associated template
/// scope. When true, TagOrTemplate is a template declaration;
/// othewise, it is a tag declaration.
bool TemplateScope : 1;
/// \brief Whether this class is an __interface.
bool IsInterface : 1;
/// \brief The class or class template whose definition we are parsing.
Decl *TagOrTemplate;
/// LateParsedDeclarations - Method declarations, inline definitions and
/// nested classes that contain pieces whose parsing will be delayed until
/// the top-level class is fully defined.
LateParsedDeclarationsContainer LateParsedDeclarations;
};
/// \brief The stack of classes that is currently being
/// parsed. Nested and local classes will be pushed onto this stack
/// when they are parsed, and removed afterward.
std::stack<ParsingClass *> ClassStack;
ParsingClass &getCurrentClass() {
assert(!ClassStack.empty() && "No lexed method stacks!");
return *ClassStack.top();
}
/// \brief RAII object used to manage the parsing of a class definition.
class ParsingClassDefinition {
Parser &P;
bool Popped;
Sema::ParsingClassState State;
public:
ParsingClassDefinition(Parser &P, Decl *TagOrTemplate, bool TopLevelClass,
bool IsInterface)
: P(P), Popped(false),
State(P.PushParsingClass(TagOrTemplate, TopLevelClass, IsInterface)) {
}
/// \brief Pop this class of the stack.
void Pop() {
assert(!Popped && "Nested class has already been popped");
Popped = true;
P.PopParsingClass(State);
}
~ParsingClassDefinition() {
if (!Popped)
P.PopParsingClass(State);
}
};
/// \brief Contains information about any template-specific
/// information that has been parsed prior to parsing declaration
/// specifiers.
struct ParsedTemplateInfo {
ParsedTemplateInfo()
: Kind(NonTemplate), TemplateParams(nullptr), TemplateLoc() { }
ParsedTemplateInfo(TemplateParameterLists *TemplateParams,
bool isSpecialization,
bool lastParameterListWasEmpty = false)
: Kind(isSpecialization? ExplicitSpecialization : Template),
TemplateParams(TemplateParams),
LastParameterListWasEmpty(lastParameterListWasEmpty) { }
explicit ParsedTemplateInfo(SourceLocation ExternLoc,
SourceLocation TemplateLoc)
: Kind(ExplicitInstantiation), TemplateParams(nullptr),
ExternLoc(ExternLoc), TemplateLoc(TemplateLoc),
LastParameterListWasEmpty(false){ }
/// \brief The kind of template we are parsing.
enum {
/// \brief We are not parsing a template at all.
NonTemplate = 0,
/// \brief We are parsing a template declaration.
Template,
/// \brief We are parsing an explicit specialization.
ExplicitSpecialization,
/// \brief We are parsing an explicit instantiation.
ExplicitInstantiation
} Kind;
/// \brief The template parameter lists, for template declarations
/// and explicit specializations.
TemplateParameterLists *TemplateParams;
/// \brief The location of the 'extern' keyword, if any, for an explicit
/// instantiation
SourceLocation ExternLoc;
/// \brief The location of the 'template' keyword, for an explicit
/// instantiation.
SourceLocation TemplateLoc;
/// \brief Whether the last template parameter list was empty.
bool LastParameterListWasEmpty;
SourceRange getSourceRange() const LLVM_READONLY;
};
void LexTemplateFunctionForLateParsing(CachedTokens &Toks);
void ParseLateTemplatedFuncDef(LateParsedTemplate &LPT);
static void LateTemplateParserCallback(void *P, LateParsedTemplate &LPT);
static void LateTemplateParserCleanupCallback(void *P);
Sema::ParsingClassState
PushParsingClass(Decl *TagOrTemplate, bool TopLevelClass, bool IsInterface);
void DeallocateParsedClasses(ParsingClass *Class);
void PopParsingClass(Sema::ParsingClassState);
enum CachedInitKind {
CIK_DefaultArgument,
CIK_DefaultInitializer
};
NamedDecl *ParseCXXInlineMethodDef(AccessSpecifier AS,
AttributeList *AccessAttrs,
ParsingDeclarator &D,
const ParsedTemplateInfo &TemplateInfo,
const VirtSpecifiers& VS,
SourceLocation PureSpecLoc);
void ParseCXXNonStaticMemberInitializer(Decl *VarD);
void ParseLexedAttributes(ParsingClass &Class);
void ParseLexedAttributeList(LateParsedAttrList &LAs, Decl *D,
bool EnterScope, bool OnDefinition);
void ParseLexedAttribute(LateParsedAttribute &LA,
bool EnterScope, bool OnDefinition);
void ParseLexedMethodDeclarations(ParsingClass &Class);
void ParseLexedMethodDeclaration(LateParsedMethodDeclaration &LM);
void ParseLexedMethodDefs(ParsingClass &Class);
void ParseLexedMethodDef(LexedMethod &LM);
void ParseLexedMemberInitializers(ParsingClass &Class);
void ParseLexedMemberInitializer(LateParsedMemberInitializer &MI);
void ParseLexedObjCMethodDefs(LexedMethod &LM, bool parseMethod);
bool ConsumeAndStoreFunctionPrologue(CachedTokens &Toks);
bool ConsumeAndStoreInitializer(CachedTokens &Toks, CachedInitKind CIK);
bool ConsumeAndStoreConditional(CachedTokens &Toks);
bool ConsumeAndStoreUntil(tok::TokenKind T1,
CachedTokens &Toks,
bool StopAtSemi = true,
bool ConsumeFinalToken = true) {
return ConsumeAndStoreUntil(T1, T1, Toks, StopAtSemi, ConsumeFinalToken);
}
bool ConsumeAndStoreUntil(tok::TokenKind T1, tok::TokenKind T2,
CachedTokens &Toks,
bool StopAtSemi = true,
bool ConsumeFinalToken = true);
//===--------------------------------------------------------------------===//
// C99 6.9: External Definitions.
struct ParsedAttributesWithRange : ParsedAttributes {
ParsedAttributesWithRange(AttributeFactory &factory)
: ParsedAttributes(factory) {}
void clear() {
ParsedAttributes::clear();
Range = SourceRange();
}
SourceRange Range;
};
DeclGroupPtrTy ParseExternalDeclaration(ParsedAttributesWithRange &attrs,
ParsingDeclSpec *DS = nullptr);
bool isDeclarationAfterDeclarator();
bool isStartOfFunctionDefinition(const ParsingDeclarator &Declarator);
DeclGroupPtrTy ParseDeclarationOrFunctionDefinition(
ParsedAttributesWithRange &attrs,
ParsingDeclSpec *DS = nullptr,
AccessSpecifier AS = AS_none);
DeclGroupPtrTy ParseDeclOrFunctionDefInternal(ParsedAttributesWithRange &attrs,
ParsingDeclSpec &DS,
AccessSpecifier AS);
void SkipFunctionBody();
Decl *ParseFunctionDefinition(ParsingDeclarator &D,
const ParsedTemplateInfo &TemplateInfo = ParsedTemplateInfo(),
LateParsedAttrList *LateParsedAttrs = nullptr);
void ParseKNRParamDeclarations(Declarator &D);
// EndLoc, if non-NULL, is filled with the location of the last token of
// the simple-asm.
ExprResult ParseSimpleAsm(SourceLocation *EndLoc = nullptr);
ExprResult ParseAsmStringLiteral();
// Objective-C External Declarations
void MaybeSkipAttributes(tok::ObjCKeywordKind Kind);
DeclGroupPtrTy ParseObjCAtDirectives();
DeclGroupPtrTy ParseObjCAtClassDeclaration(SourceLocation atLoc);
Decl *ParseObjCAtInterfaceDeclaration(SourceLocation AtLoc,
ParsedAttributes &prefixAttrs);
class ObjCTypeParamListScope;
ObjCTypeParamList *parseObjCTypeParamList();
ObjCTypeParamList *parseObjCTypeParamListOrProtocolRefs(
ObjCTypeParamListScope &Scope, SourceLocation &lAngleLoc,
SmallVectorImpl<IdentifierLocPair> &protocolIdents,
SourceLocation &rAngleLoc, bool mayBeProtocolList = true);
void HelperActionsForIvarDeclarations(Decl *interfaceDecl, SourceLocation atLoc,
BalancedDelimiterTracker &T,
SmallVectorImpl<Decl *> &AllIvarDecls,
bool RBraceMissing);
void ParseObjCClassInstanceVariables(Decl *interfaceDecl,
tok::ObjCKeywordKind visibility,
SourceLocation atLoc);
bool ParseObjCProtocolReferences(SmallVectorImpl<Decl *> &P,
SmallVectorImpl<SourceLocation> &PLocs,
bool WarnOnDeclarations,
bool ForObjCContainer,
SourceLocation &LAngleLoc,
SourceLocation &EndProtoLoc,
bool consumeLastToken);
/// Parse the first angle-bracket-delimited clause for an
/// Objective-C object or object pointer type, which may be either
/// type arguments or protocol qualifiers.
void parseObjCTypeArgsOrProtocolQualifiers(
ParsedType baseType,
SourceLocation &typeArgsLAngleLoc,
SmallVectorImpl<ParsedType> &typeArgs,
SourceLocation &typeArgsRAngleLoc,
SourceLocation &protocolLAngleLoc,
SmallVectorImpl<Decl *> &protocols,
SmallVectorImpl<SourceLocation> &protocolLocs,
SourceLocation &protocolRAngleLoc,
bool consumeLastToken,
bool warnOnIncompleteProtocols);
/// Parse either Objective-C type arguments or protocol qualifiers; if the
/// former, also parse protocol qualifiers afterward.
void parseObjCTypeArgsAndProtocolQualifiers(
ParsedType baseType,
SourceLocation &typeArgsLAngleLoc,
SmallVectorImpl<ParsedType> &typeArgs,
SourceLocation &typeArgsRAngleLoc,
SourceLocation &protocolLAngleLoc,
SmallVectorImpl<Decl *> &protocols,
SmallVectorImpl<SourceLocation> &protocolLocs,
SourceLocation &protocolRAngleLoc,
bool consumeLastToken);
/// Parse a protocol qualifier type such as '<NSCopying>', which is
/// an anachronistic way of writing 'id<NSCopying>'.
TypeResult parseObjCProtocolQualifierType(SourceLocation &rAngleLoc);
/// Parse Objective-C type arguments and protocol qualifiers, extending the
/// current type with the parsed result.
TypeResult parseObjCTypeArgsAndProtocolQualifiers(SourceLocation loc,
ParsedType type,
bool consumeLastToken,
SourceLocation &endLoc);
void ParseObjCInterfaceDeclList(tok::ObjCKeywordKind contextKey,
Decl *CDecl);
DeclGroupPtrTy ParseObjCAtProtocolDeclaration(SourceLocation atLoc,
ParsedAttributes &prefixAttrs);
struct ObjCImplParsingDataRAII {
Parser &P;
Decl *Dcl;
bool HasCFunction;
typedef SmallVector<LexedMethod*, 8> LateParsedObjCMethodContainer;
LateParsedObjCMethodContainer LateParsedObjCMethods;
ObjCImplParsingDataRAII(Parser &parser, Decl *D)
: P(parser), Dcl(D), HasCFunction(false) {
P.CurParsedObjCImpl = this;
Finished = false;
}
~ObjCImplParsingDataRAII();
void finish(SourceRange AtEnd);
bool isFinished() const { return Finished; }
private:
bool Finished;
};
ObjCImplParsingDataRAII *CurParsedObjCImpl;
void StashAwayMethodOrFunctionBodyTokens(Decl *MDecl);
DeclGroupPtrTy ParseObjCAtImplementationDeclaration(SourceLocation AtLoc);
DeclGroupPtrTy ParseObjCAtEndDeclaration(SourceRange atEnd);
Decl *ParseObjCAtAliasDeclaration(SourceLocation atLoc);
Decl *ParseObjCPropertySynthesize(SourceLocation atLoc);
Decl *ParseObjCPropertyDynamic(SourceLocation atLoc);
IdentifierInfo *ParseObjCSelectorPiece(SourceLocation &MethodLocation);
// Definitions for Objective-c context sensitive keywords recognition.
enum ObjCTypeQual {
objc_in=0, objc_out, objc_inout, objc_oneway, objc_bycopy, objc_byref,
objc_nonnull, objc_nullable, objc_null_unspecified,
objc_NumQuals
};
IdentifierInfo *ObjCTypeQuals[objc_NumQuals];
bool isTokIdentifier_in() const;
ParsedType ParseObjCTypeName(ObjCDeclSpec &DS, Declarator::TheContext Ctx,
ParsedAttributes *ParamAttrs);
void ParseObjCMethodRequirement();
Decl *ParseObjCMethodPrototype(
tok::ObjCKeywordKind MethodImplKind = tok::objc_not_keyword,
bool MethodDefinition = true);
Decl *ParseObjCMethodDecl(SourceLocation mLoc, tok::TokenKind mType,
tok::ObjCKeywordKind MethodImplKind = tok::objc_not_keyword,
bool MethodDefinition=true);
void ParseObjCPropertyAttribute(ObjCDeclSpec &DS);
Decl *ParseObjCMethodDefinition();
public:
//===--------------------------------------------------------------------===//
// C99 6.5: Expressions.
/// TypeCastState - State whether an expression is or may be a type cast.
enum TypeCastState {
NotTypeCast = 0,
MaybeTypeCast,
IsTypeCast
};
ExprResult ParseExpression(TypeCastState isTypeCast = NotTypeCast);
ExprResult ParseConstantExpressionInExprEvalContext(
TypeCastState isTypeCast = NotTypeCast);
ExprResult ParseConstantExpression(TypeCastState isTypeCast = NotTypeCast);
ExprResult ParseConstraintExpression();
// Expr that doesn't include commas.
ExprResult ParseAssignmentExpression(TypeCastState isTypeCast = NotTypeCast);
ExprResult ParseMSAsmIdentifier(llvm::SmallVectorImpl<Token> &LineToks,
unsigned &NumLineToksConsumed,
void *Info,
bool IsUnevaluated);
private:
ExprResult ParseExpressionWithLeadingAt(SourceLocation AtLoc);
ExprResult ParseExpressionWithLeadingExtension(SourceLocation ExtLoc);
ExprResult ParseRHSOfBinaryExpression(ExprResult LHS,
prec::Level MinPrec);
ExprResult ParseCastExpression(bool isUnaryExpression,
bool isAddressOfOperand,
bool &NotCastExpr,
TypeCastState isTypeCast,
bool isVectorLiteral = false);
ExprResult ParseCastExpression(bool isUnaryExpression,
bool isAddressOfOperand = false,
TypeCastState isTypeCast = NotTypeCast,
bool isVectorLiteral = false);
/// Returns true if the next token cannot start an expression.
bool isNotExpressionStart();
/// Returns true if the next token would start a postfix-expression
/// suffix.
bool isPostfixExpressionSuffixStart() {
tok::TokenKind K = Tok.getKind();
return (K == tok::l_square || K == tok::l_paren ||
K == tok::period || K == tok::arrow ||
K == tok::plusplus || K == tok::minusminus);
}
bool diagnoseUnknownTemplateId(ExprResult TemplateName, SourceLocation Less);
ExprResult ParsePostfixExpressionSuffix(ExprResult LHS);
ExprResult ParseUnaryExprOrTypeTraitExpression();
ExprResult ParseBuiltinPrimaryExpression();
ExprResult ParseExprAfterUnaryExprOrTypeTrait(const Token &OpTok,
bool &isCastExpr,
ParsedType &CastTy,
SourceRange &CastRange);
typedef SmallVector<Expr*, 20> ExprListTy;
typedef SmallVector<SourceLocation, 20> CommaLocsTy;
/// ParseExpressionList - Used for C/C++ (argument-)expression-list.
bool ParseExpressionList(SmallVectorImpl<Expr *> &Exprs,
SmallVectorImpl<SourceLocation> &CommaLocs,
std::function<void()> Completer = nullptr);
/// ParseSimpleExpressionList - A simple comma-separated list of expressions,
/// used for misc language extensions.
bool ParseSimpleExpressionList(SmallVectorImpl<Expr*> &Exprs,
SmallVectorImpl<SourceLocation> &CommaLocs);
/// ParenParseOption - Control what ParseParenExpression will parse.
enum ParenParseOption {
SimpleExpr, // Only parse '(' expression ')'
CompoundStmt, // Also allow '(' compound-statement ')'
CompoundLiteral, // Also allow '(' type-name ')' '{' ... '}'
CastExpr // Also allow '(' type-name ')' <anything>
};
ExprResult ParseParenExpression(ParenParseOption &ExprType,
bool stopIfCastExpr,
bool isTypeCast,
ParsedType &CastTy,
SourceLocation &RParenLoc);
ExprResult ParseCXXAmbiguousParenExpression(
ParenParseOption &ExprType, ParsedType &CastTy,
BalancedDelimiterTracker &Tracker, ColonProtectionRAIIObject &ColonProt);
ExprResult ParseCompoundLiteralExpression(ParsedType Ty,
SourceLocation LParenLoc,
SourceLocation RParenLoc);
ExprResult ParseStringLiteralExpression(bool AllowUserDefinedLiteral = false);
ExprResult ParseGenericSelectionExpression();
ExprResult ParseObjCBoolLiteral();
ExprResult ParseFoldExpression(ExprResult LHS, BalancedDelimiterTracker &T);
//===--------------------------------------------------------------------===//
// C++ Expressions
ExprResult tryParseCXXIdExpression(CXXScopeSpec &SS, bool isAddressOfOperand,
Token &Replacement);
ExprResult ParseCXXIdExpression(bool isAddressOfOperand = false);
bool areTokensAdjacent(const Token &A, const Token &B);
void CheckForTemplateAndDigraph(Token &Next, ParsedType ObjectTypePtr,
bool EnteringContext, IdentifierInfo &II,
CXXScopeSpec &SS);
bool ParseOptionalCXXScopeSpecifier(CXXScopeSpec &SS,
ParsedType ObjectType,
bool EnteringContext,
bool *MayBePseudoDestructor = nullptr,
bool IsTypename = false,
IdentifierInfo **LastII = nullptr,
bool OnlyNamespace = false);
//===--------------------------------------------------------------------===//
// C++0x 5.1.2: Lambda expressions
// [...] () -> type {...}
ExprResult ParseLambdaExpression();
ExprResult TryParseLambdaExpression();
Optional<unsigned> ParseLambdaIntroducer(LambdaIntroducer &Intro,
bool *SkippedInits = nullptr);
bool TryParseLambdaIntroducer(LambdaIntroducer &Intro);
ExprResult ParseLambdaExpressionAfterIntroducer(
LambdaIntroducer &Intro);
//===--------------------------------------------------------------------===//
// C++ 5.2p1: C++ Casts
ExprResult ParseCXXCasts();
//===--------------------------------------------------------------------===//
// C++ 5.2p1: C++ Type Identification
ExprResult ParseCXXTypeid();
//===--------------------------------------------------------------------===//
// C++ : Microsoft __uuidof Expression
ExprResult ParseCXXUuidof();
//===--------------------------------------------------------------------===//
// C++ 5.2.4: C++ Pseudo-Destructor Expressions
ExprResult ParseCXXPseudoDestructor(Expr *Base, SourceLocation OpLoc,
tok::TokenKind OpKind,
CXXScopeSpec &SS,
ParsedType ObjectType);
//===--------------------------------------------------------------------===//
// C++ 9.3.2: C++ 'this' pointer
ExprResult ParseCXXThis();
//===--------------------------------------------------------------------===//
// C++ 15: C++ Throw Expression
ExprResult ParseThrowExpression();
ExceptionSpecificationType tryParseExceptionSpecification(
bool Delayed,
SourceRange &SpecificationRange,
SmallVectorImpl<ParsedType> &DynamicExceptions,
SmallVectorImpl<SourceRange> &DynamicExceptionRanges,
ExprResult &NoexceptExpr,
CachedTokens *&ExceptionSpecTokens);
// EndLoc is filled with the location of the last token of the specification.
ExceptionSpecificationType ParseDynamicExceptionSpecification(
SourceRange &SpecificationRange,
SmallVectorImpl<ParsedType> &Exceptions,
SmallVectorImpl<SourceRange> &Ranges);
//===--------------------------------------------------------------------===//
// C++0x 8: Function declaration trailing-return-type
TypeResult ParseTrailingReturnType(SourceRange &Range);
//===--------------------------------------------------------------------===//
// C++ 2.13.5: C++ Boolean Literals
ExprResult ParseCXXBoolLiteral();
//===--------------------------------------------------------------------===//
// C++ 5.2.3: Explicit type conversion (functional notation)
ExprResult ParseCXXTypeConstructExpression(const DeclSpec &DS);
/// ParseCXXSimpleTypeSpecifier - [C++ 7.1.5.2] Simple type specifiers.
/// This should only be called when the current token is known to be part of
/// simple-type-specifier.
void ParseCXXSimpleTypeSpecifier(DeclSpec &DS);
bool ParseCXXTypeSpecifierSeq(DeclSpec &DS);
//===--------------------------------------------------------------------===//
// C++ 5.3.4 and 5.3.5: C++ new and delete
bool ParseExpressionListOrTypeId(SmallVectorImpl<Expr*> &Exprs,
Declarator &D);
void ParseDirectNewDeclarator(Declarator &D);
ExprResult ParseCXXNewExpression(bool UseGlobal, SourceLocation Start);
ExprResult ParseCXXDeleteExpression(bool UseGlobal,
SourceLocation Start);
//===--------------------------------------------------------------------===//
// C++ if/switch/while condition expression.
Sema::ConditionResult ParseCXXCondition(StmtResult *InitStmt,
SourceLocation Loc,
Sema::ConditionKind CK);
//===--------------------------------------------------------------------===//
// C++ Coroutines
ExprResult ParseCoyieldExpression();
//===--------------------------------------------------------------------===//
// C99 6.7.8: Initialization.
/// ParseInitializer
/// initializer: [C99 6.7.8]
/// assignment-expression
/// '{' ...
ExprResult ParseInitializer() {
if (Tok.isNot(tok::l_brace))
return ParseAssignmentExpression();
return ParseBraceInitializer();
}
bool MayBeDesignationStart();
ExprResult ParseBraceInitializer();
ExprResult ParseInitializerWithPotentialDesignator();
//===--------------------------------------------------------------------===//
// clang Expressions
ExprResult ParseBlockLiteralExpression(); // ^{...}
//===--------------------------------------------------------------------===//
// Objective-C Expressions
ExprResult ParseObjCAtExpression(SourceLocation AtLocation);
ExprResult ParseObjCStringLiteral(SourceLocation AtLoc);
ExprResult ParseObjCCharacterLiteral(SourceLocation AtLoc);
ExprResult ParseObjCNumericLiteral(SourceLocation AtLoc);
ExprResult ParseObjCBooleanLiteral(SourceLocation AtLoc, bool ArgValue);
ExprResult ParseObjCArrayLiteral(SourceLocation AtLoc);
ExprResult ParseObjCDictionaryLiteral(SourceLocation AtLoc);
ExprResult ParseObjCBoxedExpr(SourceLocation AtLoc);
ExprResult ParseObjCEncodeExpression(SourceLocation AtLoc);
ExprResult ParseObjCSelectorExpression(SourceLocation AtLoc);
ExprResult ParseObjCProtocolExpression(SourceLocation AtLoc);
bool isSimpleObjCMessageExpression();
ExprResult ParseObjCMessageExpression();
ExprResult ParseObjCMessageExpressionBody(SourceLocation LBracloc,
SourceLocation SuperLoc,
ParsedType ReceiverType,
Expr *ReceiverExpr);
ExprResult ParseAssignmentExprWithObjCMessageExprStart(
SourceLocation LBracloc, SourceLocation SuperLoc,
ParsedType ReceiverType, Expr *ReceiverExpr);
bool ParseObjCXXMessageReceiver(bool &IsExpr, void *&TypeOrExpr);
//===--------------------------------------------------------------------===//
// C99 6.8: Statements and Blocks.
/// A SmallVector of statements, with stack size 32 (as that is the only one
/// used.)
typedef SmallVector<Stmt*, 32> StmtVector;
/// A SmallVector of expressions, with stack size 12 (the maximum used.)
typedef SmallVector<Expr*, 12> ExprVector;
/// A SmallVector of types.
typedef SmallVector<ParsedType, 12> TypeVector;
StmtResult ParseStatement(SourceLocation *TrailingElseLoc = nullptr,
bool AllowOpenMPStandalone = false);
enum AllowedConstructsKind {
/// \brief Allow any declarations, statements, OpenMP directives.
ACK_Any,
/// \brief Allow only statements and non-standalone OpenMP directives.
ACK_StatementsOpenMPNonStandalone,
/// \brief Allow statements and all executable OpenMP directives
ACK_StatementsOpenMPAnyExecutable
};
StmtResult
ParseStatementOrDeclaration(StmtVector &Stmts, AllowedConstructsKind Allowed,
SourceLocation *TrailingElseLoc = nullptr);
StmtResult ParseStatementOrDeclarationAfterAttributes(
StmtVector &Stmts,
AllowedConstructsKind Allowed,
SourceLocation *TrailingElseLoc,
ParsedAttributesWithRange &Attrs);
StmtResult ParseExprStatement();
StmtResult ParseLabeledStatement(ParsedAttributesWithRange &attrs);
StmtResult ParseCaseStatement(bool MissingCase = false,
ExprResult Expr = ExprResult());
StmtResult ParseDefaultStatement();
StmtResult ParseCompoundStatement(bool isStmtExpr = false);
StmtResult ParseCompoundStatement(bool isStmtExpr,
unsigned ScopeFlags);
void ParseCompoundStatementLeadingPragmas();
StmtResult ParseCompoundStatementBody(bool isStmtExpr = false);
bool ParseParenExprOrCondition(StmtResult *InitStmt,
Sema::ConditionResult &CondResult,
SourceLocation Loc,
Sema::ConditionKind CK);
StmtResult ParseIfStatement(SourceLocation *TrailingElseLoc);
StmtResult ParseSwitchStatement(SourceLocation *TrailingElseLoc);
StmtResult ParseWhileStatement(SourceLocation *TrailingElseLoc);
StmtResult ParseDoStatement();
StmtResult ParseForStatement(SourceLocation *TrailingElseLoc);
StmtResult ParseGotoStatement();
StmtResult ParseContinueStatement();
StmtResult ParseBreakStatement();
StmtResult ParseReturnStatement();
StmtResult ParseAsmStatement(bool &msAsm);
StmtResult ParseMicrosoftAsmStatement(SourceLocation AsmLoc);
StmtResult ParsePragmaLoopHint(StmtVector &Stmts,
AllowedConstructsKind Allowed,
SourceLocation *TrailingElseLoc,
ParsedAttributesWithRange &Attrs);
/// \brief Describes the behavior that should be taken for an __if_exists
/// block.
enum IfExistsBehavior {
/// \brief Parse the block; this code is always used.
IEB_Parse,
/// \brief Skip the block entirely; this code is never used.
IEB_Skip,
/// \brief Parse the block as a dependent block, which may be used in
/// some template instantiations but not others.
IEB_Dependent
};
/// \brief Describes the condition of a Microsoft __if_exists or
/// __if_not_exists block.
struct IfExistsCondition {
/// \brief The location of the initial keyword.
SourceLocation KeywordLoc;
/// \brief Whether this is an __if_exists block (rather than an
/// __if_not_exists block).
bool IsIfExists;
/// \brief Nested-name-specifier preceding the name.
CXXScopeSpec SS;
/// \brief The name we're looking for.
UnqualifiedId Name;
/// \brief The behavior of this __if_exists or __if_not_exists block
/// should.
IfExistsBehavior Behavior;
};
bool ParseMicrosoftIfExistsCondition(IfExistsCondition& Result);
void ParseMicrosoftIfExistsStatement(StmtVector &Stmts);
void ParseMicrosoftIfExistsExternalDeclaration();
void ParseMicrosoftIfExistsClassDeclaration(DeclSpec::TST TagType,
AccessSpecifier& CurAS);
bool ParseMicrosoftIfExistsBraceInitializer(ExprVector &InitExprs,
bool &InitExprsOk);
bool ParseAsmOperandsOpt(SmallVectorImpl<IdentifierInfo *> &Names,
SmallVectorImpl<Expr *> &Constraints,
SmallVectorImpl<Expr *> &Exprs);
//===--------------------------------------------------------------------===//
// C++ 6: Statements and Blocks
StmtResult ParseCXXTryBlock();
StmtResult ParseCXXTryBlockCommon(SourceLocation TryLoc, bool FnTry = false);
StmtResult ParseCXXCatchBlock(bool FnCatch = false);
//===--------------------------------------------------------------------===//
// MS: SEH Statements and Blocks
StmtResult ParseSEHTryBlock();
StmtResult ParseSEHExceptBlock(SourceLocation Loc);
StmtResult ParseSEHFinallyBlock(SourceLocation Loc);
StmtResult ParseSEHLeaveStatement();
//===--------------------------------------------------------------------===//
// Objective-C Statements
StmtResult ParseObjCAtStatement(SourceLocation atLoc);
StmtResult ParseObjCTryStmt(SourceLocation atLoc);
StmtResult ParseObjCThrowStmt(SourceLocation atLoc);
StmtResult ParseObjCSynchronizedStmt(SourceLocation atLoc);
StmtResult ParseObjCAutoreleasePoolStmt(SourceLocation atLoc);
//===--------------------------------------------------------------------===//
// C99 6.7: Declarations.
/// A context for parsing declaration specifiers. TODO: flesh this
/// out, there are other significant restrictions on specifiers than
/// would be best implemented in the parser.
enum DeclSpecContext {
DSC_normal, // normal context
DSC_class, // class context, enables 'friend'
DSC_type_specifier, // C++ type-specifier-seq or C specifier-qualifier-list
DSC_trailing, // C++11 trailing-type-specifier in a trailing return type
DSC_alias_declaration, // C++11 type-specifier-seq in an alias-declaration
DSC_top_level, // top-level/namespace declaration context
DSC_template_param, // template parameter context
DSC_template_type_arg, // template type argument context
DSC_objc_method_result, // ObjC method result context, enables 'instancetype'
DSC_condition // condition declaration context
};
/// Is this a context in which we are parsing just a type-specifier (or
/// trailing-type-specifier)?
static bool isTypeSpecifier(DeclSpecContext DSC) {
switch (DSC) {
case DSC_normal:
case DSC_template_param:
case DSC_class:
case DSC_top_level:
case DSC_objc_method_result:
case DSC_condition:
return false;
case DSC_template_type_arg:
case DSC_type_specifier:
case DSC_trailing:
case DSC_alias_declaration:
return true;
}
llvm_unreachable("Missing DeclSpecContext case");
}
/// Is this a context in which we can perform class template argument
/// deduction?
static bool isClassTemplateDeductionContext(DeclSpecContext DSC) {
switch (DSC) {
case DSC_normal:
case DSC_template_param:
case DSC_class:
case DSC_top_level:
case DSC_condition:
case DSC_type_specifier:
return true;
case DSC_objc_method_result:
case DSC_template_type_arg:
case DSC_trailing:
case DSC_alias_declaration:
return false;
}
llvm_unreachable("Missing DeclSpecContext case");
}
/// Information on a C++0x for-range-initializer found while parsing a
/// declaration which turns out to be a for-range-declaration.
struct ForRangeInit {
SourceLocation ColonLoc;
ExprResult RangeExpr;
bool ParsedForRangeDecl() { return !ColonLoc.isInvalid(); }
};
DeclGroupPtrTy ParseDeclaration(unsigned Context, SourceLocation &DeclEnd,
ParsedAttributesWithRange &attrs);
DeclGroupPtrTy ParseSimpleDeclaration(unsigned Context,
SourceLocation &DeclEnd,
ParsedAttributesWithRange &attrs,
bool RequireSemi,
ForRangeInit *FRI = nullptr);
bool MightBeDeclarator(unsigned Context);
DeclGroupPtrTy ParseDeclGroup(ParsingDeclSpec &DS, unsigned Context,
SourceLocation *DeclEnd = nullptr,
ForRangeInit *FRI = nullptr);
Decl *ParseDeclarationAfterDeclarator(Declarator &D,
const ParsedTemplateInfo &TemplateInfo = ParsedTemplateInfo());
bool ParseAsmAttributesAfterDeclarator(Declarator &D);
Decl *ParseDeclarationAfterDeclaratorAndAttributes(
Declarator &D,
const ParsedTemplateInfo &TemplateInfo = ParsedTemplateInfo(),
ForRangeInit *FRI = nullptr);
Decl *ParseFunctionStatementBody(Decl *Decl, ParseScope &BodyScope);
Decl *ParseFunctionTryBlock(Decl *Decl, ParseScope &BodyScope);
/// \brief When in code-completion, skip parsing of the function/method body
/// unless the body contains the code-completion point.
///
/// \returns true if the function body was skipped.
bool trySkippingFunctionBody();
bool ParseImplicitInt(DeclSpec &DS, CXXScopeSpec *SS,
const ParsedTemplateInfo &TemplateInfo,
AccessSpecifier AS, DeclSpecContext DSC,
ParsedAttributesWithRange &Attrs);
DeclSpecContext getDeclSpecContextFromDeclaratorContext(unsigned Context);
void ParseDeclarationSpecifiers(DeclSpec &DS,
const ParsedTemplateInfo &TemplateInfo = ParsedTemplateInfo(),
AccessSpecifier AS = AS_none,
DeclSpecContext DSC = DSC_normal,
LateParsedAttrList *LateAttrs = nullptr);
bool DiagnoseMissingSemiAfterTagDefinition(DeclSpec &DS, AccessSpecifier AS,
DeclSpecContext DSContext,
LateParsedAttrList *LateAttrs = nullptr);
void ParseSpecifierQualifierList(DeclSpec &DS, AccessSpecifier AS = AS_none,
DeclSpecContext DSC = DSC_normal);
void ParseObjCTypeQualifierList(ObjCDeclSpec &DS,
Declarator::TheContext Context);
void ParseEnumSpecifier(SourceLocation TagLoc, DeclSpec &DS,
const ParsedTemplateInfo &TemplateInfo,
AccessSpecifier AS, DeclSpecContext DSC);
void ParseEnumBody(SourceLocation StartLoc, Decl *TagDecl);
void ParseStructUnionBody(SourceLocation StartLoc, unsigned TagType,
Decl *TagDecl);
void ParseStructDeclaration(
ParsingDeclSpec &DS,
llvm::function_ref<void(ParsingFieldDeclarator &)> FieldsCallback);
bool isDeclarationSpecifier(bool DisambiguatingWithExpression = false);
bool isTypeSpecifierQualifier();
/// isKnownToBeTypeSpecifier - Return true if we know that the specified token
/// is definitely a type-specifier. Return false if it isn't part of a type
/// specifier or if we're not sure.
bool isKnownToBeTypeSpecifier(const Token &Tok) const;
/// \brief Return true if we know that we are definitely looking at a
/// decl-specifier, and isn't part of an expression such as a function-style
/// cast. Return false if it's no a decl-specifier, or we're not sure.
bool isKnownToBeDeclarationSpecifier() {
if (getLangOpts().CPlusPlus)
return isCXXDeclarationSpecifier() == TPResult::True;
return isDeclarationSpecifier(true);
}
/// isDeclarationStatement - Disambiguates between a declaration or an
/// expression statement, when parsing function bodies.
/// Returns true for declaration, false for expression.
bool isDeclarationStatement() {
if (getLangOpts().CPlusPlus)
return isCXXDeclarationStatement();
return isDeclarationSpecifier(true);
}
/// isForInitDeclaration - Disambiguates between a declaration or an
/// expression in the context of the C 'clause-1' or the C++
// 'for-init-statement' part of a 'for' statement.
/// Returns true for declaration, false for expression.
bool isForInitDeclaration() {
if (getLangOpts().CPlusPlus)
return isCXXSimpleDeclaration(/*AllowForRangeDecl=*/true);
return isDeclarationSpecifier(true);
}
/// \brief Determine whether this is a C++1z for-range-identifier.
bool isForRangeIdentifier();
/// \brief Determine whether we are currently at the start of an Objective-C
/// class message that appears to be missing the open bracket '['.
bool isStartOfObjCClassMessageMissingOpenBracket();
/// \brief Starting with a scope specifier, identifier, or
/// template-id that refers to the current class, determine whether
/// this is a constructor declarator.
bool isConstructorDeclarator(bool Unqualified, bool DeductionGuide = false);
/// \brief Specifies the context in which type-id/expression
/// disambiguation will occur.
enum TentativeCXXTypeIdContext {
TypeIdInParens,
TypeIdUnambiguous,
TypeIdAsTemplateArgument
};
/// isTypeIdInParens - Assumes that a '(' was parsed and now we want to know
/// whether the parens contain an expression or a type-id.
/// Returns true for a type-id and false for an expression.
bool isTypeIdInParens(bool &isAmbiguous) {
if (getLangOpts().CPlusPlus)
return isCXXTypeId(TypeIdInParens, isAmbiguous);
isAmbiguous = false;
return isTypeSpecifierQualifier();
}
bool isTypeIdInParens() {
bool isAmbiguous;
return isTypeIdInParens(isAmbiguous);
}
/// \brief Checks if the current tokens form type-id or expression.
/// It is similar to isTypeIdInParens but does not suppose that type-id
/// is in parenthesis.
bool isTypeIdUnambiguously() {
bool IsAmbiguous;
if (getLangOpts().CPlusPlus)
return isCXXTypeId(TypeIdUnambiguous, IsAmbiguous);
return isTypeSpecifierQualifier();
}
/// isCXXDeclarationStatement - C++-specialized function that disambiguates
/// between a declaration or an expression statement, when parsing function
/// bodies. Returns true for declaration, false for expression.
bool isCXXDeclarationStatement();
/// isCXXSimpleDeclaration - C++-specialized function that disambiguates
/// between a simple-declaration or an expression-statement.
/// If during the disambiguation process a parsing error is encountered,
/// the function returns true to let the declaration parsing code handle it.
/// Returns false if the statement is disambiguated as expression.
bool isCXXSimpleDeclaration(bool AllowForRangeDecl);
/// isCXXFunctionDeclarator - Disambiguates between a function declarator or
/// a constructor-style initializer, when parsing declaration statements.
/// Returns true for function declarator and false for constructor-style
/// initializer. Sets 'IsAmbiguous' to true to indicate that this declaration
/// might be a constructor-style initializer.
/// If during the disambiguation process a parsing error is encountered,
/// the function returns true to let the declaration parsing code handle it.
bool isCXXFunctionDeclarator(bool *IsAmbiguous = nullptr);
struct ConditionDeclarationOrInitStatementState;
enum class ConditionOrInitStatement {
Expression, ///< Disambiguated as an expression (either kind).
ConditionDecl, ///< Disambiguated as the declaration form of condition.
InitStmtDecl, ///< Disambiguated as a simple-declaration init-statement.
Error ///< Can't be any of the above!
};
/// \brief Disambiguates between the different kinds of things that can happen
/// after 'if (' or 'switch ('. This could be one of two different kinds of
/// declaration (depending on whether there is a ';' later) or an expression.
ConditionOrInitStatement
isCXXConditionDeclarationOrInitStatement(bool CanBeInitStmt);
bool isCXXTypeId(TentativeCXXTypeIdContext Context, bool &isAmbiguous);
bool isCXXTypeId(TentativeCXXTypeIdContext Context) {
bool isAmbiguous;
return isCXXTypeId(Context, isAmbiguous);
}
/// TPResult - Used as the result value for functions whose purpose is to
/// disambiguate C++ constructs by "tentatively parsing" them.
enum class TPResult {
True, False, Ambiguous, Error
};
/// \brief Based only on the given token kind, determine whether we know that
/// we're at the start of an expression or a type-specifier-seq (which may
/// be an expression, in C++).
///
/// This routine does not attempt to resolve any of the trick cases, e.g.,
/// those involving lookup of identifiers.
///
/// \returns \c TPR_true if this token starts an expression, \c TPR_false if
/// this token starts a type-specifier-seq, or \c TPR_ambiguous if it cannot
/// tell.
TPResult isExpressionOrTypeSpecifierSimple(tok::TokenKind Kind);
/// isCXXDeclarationSpecifier - Returns TPResult::True if it is a
/// declaration specifier, TPResult::False if it is not,
/// TPResult::Ambiguous if it could be either a decl-specifier or a
/// function-style cast, and TPResult::Error if a parsing error was
/// encountered. If it could be a braced C++11 function-style cast, returns
/// BracedCastResult.
/// Doesn't consume tokens.
TPResult
isCXXDeclarationSpecifier(TPResult BracedCastResult = TPResult::False,
bool *HasMissingTypename = nullptr);
/// Given that isCXXDeclarationSpecifier returns \c TPResult::True or
/// \c TPResult::Ambiguous, determine whether the decl-specifier would be
/// a type-specifier other than a cv-qualifier.
bool isCXXDeclarationSpecifierAType();
/// \brief Determine whether an identifier has been tentatively declared as a
/// non-type. Such tentative declarations should not be found to name a type
/// during a tentative parse, but also should not be annotated as a non-type.
bool isTentativelyDeclared(IdentifierInfo *II);
// "Tentative parsing" functions, used for disambiguation. If a parsing error
// is encountered they will return TPResult::Error.
// Returning TPResult::True/False indicates that the ambiguity was
// resolved and tentative parsing may stop. TPResult::Ambiguous indicates
// that more tentative parsing is necessary for disambiguation.
// They all consume tokens, so backtracking should be used after calling them.
TPResult TryParseSimpleDeclaration(bool AllowForRangeDecl);
TPResult TryParseTypeofSpecifier();
TPResult TryParseProtocolQualifiers();
TPResult TryParsePtrOperatorSeq();
TPResult TryParseOperatorId();
TPResult TryParseInitDeclaratorList();
TPResult TryParseDeclarator(bool mayBeAbstract, bool mayHaveIdentifier=true);
TPResult
TryParseParameterDeclarationClause(bool *InvalidAsDeclaration = nullptr,
bool VersusTemplateArg = false);
TPResult TryParseFunctionDeclarator();
TPResult TryParseBracketDeclarator();
TPResult TryConsumeDeclarationSpecifier();
public:
TypeResult ParseTypeName(SourceRange *Range = nullptr,
Declarator::TheContext Context
= Declarator::TypeNameContext,
AccessSpecifier AS = AS_none,
Decl **OwnedType = nullptr,
ParsedAttributes *Attrs = nullptr);
private:
void ParseBlockId(SourceLocation CaretLoc);
// Check for the start of a C++11 attribute-specifier-seq in a context where
// an attribute is not allowed.
bool CheckProhibitedCXX11Attribute() {
assert(Tok.is(tok::l_square));
if (!getLangOpts().CPlusPlus11 || NextToken().isNot(tok::l_square))
return false;
return DiagnoseProhibitedCXX11Attribute();
}
bool DiagnoseProhibitedCXX11Attribute();
void CheckMisplacedCXX11Attribute(ParsedAttributesWithRange &Attrs,
SourceLocation CorrectLocation) {
if (!getLangOpts().CPlusPlus11)
return;
if ((Tok.isNot(tok::l_square) || NextToken().isNot(tok::l_square)) &&
Tok.isNot(tok::kw_alignas))
return;
DiagnoseMisplacedCXX11Attribute(Attrs, CorrectLocation);
}
void DiagnoseMisplacedCXX11Attribute(ParsedAttributesWithRange &Attrs,
SourceLocation CorrectLocation);
void stripTypeAttributesOffDeclSpec(ParsedAttributesWithRange &Attrs,
DeclSpec &DS, Sema::TagUseKind TUK);
void ProhibitAttributes(ParsedAttributesWithRange &attrs) {
if (!attrs.Range.isValid()) return;
DiagnoseProhibitedAttributes(attrs);
attrs.clear();
}
void DiagnoseProhibitedAttributes(ParsedAttributesWithRange &attrs);
// Forbid C++11 attributes that appear on certain syntactic
// locations which standard permits but we don't supported yet,
// for example, attributes appertain to decl specifiers.
void ProhibitCXX11Attributes(ParsedAttributesWithRange &Attrs,
unsigned DiagID);
/// \brief Skip C++11 attributes and return the end location of the last one.
/// \returns SourceLocation() if there are no attributes.
SourceLocation SkipCXX11Attributes();
/// \brief Diagnose and skip C++11 attributes that appear in syntactic
/// locations where attributes are not allowed.
void DiagnoseAndSkipCXX11Attributes();
/// \brief Parses syntax-generic attribute arguments for attributes which are
/// known to the implementation, and adds them to the given ParsedAttributes
/// list with the given attribute syntax. Returns the number of arguments
/// parsed for the attribute.
unsigned
ParseAttributeArgsCommon(IdentifierInfo *AttrName, SourceLocation AttrNameLoc,
ParsedAttributes &Attrs, SourceLocation *EndLoc,
IdentifierInfo *ScopeName, SourceLocation ScopeLoc,
AttributeList::Syntax Syntax);
void MaybeParseGNUAttributes(Declarator &D,
LateParsedAttrList *LateAttrs = nullptr) {
if (Tok.is(tok::kw___attribute)) {
ParsedAttributes attrs(AttrFactory);
SourceLocation endLoc;
ParseGNUAttributes(attrs, &endLoc, LateAttrs, &D);
D.takeAttributes(attrs, endLoc);
}
}
void MaybeParseGNUAttributes(ParsedAttributes &attrs,
SourceLocation *endLoc = nullptr,
LateParsedAttrList *LateAttrs = nullptr) {
if (Tok.is(tok::kw___attribute))
ParseGNUAttributes(attrs, endLoc, LateAttrs);
}
void ParseGNUAttributes(ParsedAttributes &attrs,
SourceLocation *endLoc = nullptr,
LateParsedAttrList *LateAttrs = nullptr,
Declarator *D = nullptr);
void ParseGNUAttributeArgs(IdentifierInfo *AttrName,
SourceLocation AttrNameLoc,
ParsedAttributes &Attrs,
SourceLocation *EndLoc,
IdentifierInfo *ScopeName,
SourceLocation ScopeLoc,
AttributeList::Syntax Syntax,
Declarator *D);
IdentifierLoc *ParseIdentifierLoc();
unsigned
ParseClangAttributeArgs(IdentifierInfo *AttrName, SourceLocation AttrNameLoc,
ParsedAttributes &Attrs, SourceLocation *EndLoc,
IdentifierInfo *ScopeName, SourceLocation ScopeLoc,
AttributeList::Syntax Syntax);
void MaybeParseCXX11Attributes(Declarator &D) {
if (getLangOpts().CPlusPlus11 && isCXX11AttributeSpecifier()) {
ParsedAttributesWithRange attrs(AttrFactory);
SourceLocation endLoc;
ParseCXX11Attributes(attrs, &endLoc);
D.takeAttributes(attrs, endLoc);
}
}
void MaybeParseCXX11Attributes(ParsedAttributes &attrs,
SourceLocation *endLoc = nullptr) {
if (getLangOpts().CPlusPlus11 && isCXX11AttributeSpecifier()) {
ParsedAttributesWithRange attrsWithRange(AttrFactory);
ParseCXX11Attributes(attrsWithRange, endLoc);
attrs.takeAllFrom(attrsWithRange);
}
}
void MaybeParseCXX11Attributes(ParsedAttributesWithRange &attrs,
SourceLocation *endLoc = nullptr,
bool OuterMightBeMessageSend = false) {
if (getLangOpts().CPlusPlus11 &&
isCXX11AttributeSpecifier(false, OuterMightBeMessageSend))
ParseCXX11Attributes(attrs, endLoc);
}
void ParseCXX11AttributeSpecifier(ParsedAttributes &attrs,
SourceLocation *EndLoc = nullptr);
void ParseCXX11Attributes(ParsedAttributesWithRange &attrs,
SourceLocation *EndLoc = nullptr);
/// \brief Parses a C++-style attribute argument list. Returns true if this
/// results in adding an attribute to the ParsedAttributes list.
bool ParseCXX11AttributeArgs(IdentifierInfo *AttrName,
SourceLocation AttrNameLoc,
ParsedAttributes &Attrs, SourceLocation *EndLoc,
IdentifierInfo *ScopeName,
SourceLocation ScopeLoc);
IdentifierInfo *TryParseCXX11AttributeIdentifier(SourceLocation &Loc);
void MaybeParseMicrosoftAttributes(ParsedAttributes &attrs,
SourceLocation *endLoc = nullptr) {
if (getLangOpts().MicrosoftExt && Tok.is(tok::l_square))
ParseMicrosoftAttributes(attrs, endLoc);
}
void ParseMicrosoftUuidAttributeArgs(ParsedAttributes &Attrs);
void ParseMicrosoftAttributes(ParsedAttributes &attrs,
SourceLocation *endLoc = nullptr);
void MaybeParseMicrosoftDeclSpecs(ParsedAttributes &Attrs,
SourceLocation *End = nullptr) {
const auto &LO = getLangOpts();
if (LO.DeclSpecKeyword && Tok.is(tok::kw___declspec))
ParseMicrosoftDeclSpecs(Attrs, End);
}
void ParseMicrosoftDeclSpecs(ParsedAttributes &Attrs,
SourceLocation *End = nullptr);
bool ParseMicrosoftDeclSpecArgs(IdentifierInfo *AttrName,
SourceLocation AttrNameLoc,
ParsedAttributes &Attrs);
void ParseMicrosoftTypeAttributes(ParsedAttributes &attrs);
void DiagnoseAndSkipExtendedMicrosoftTypeAttributes();
SourceLocation SkipExtendedMicrosoftTypeAttributes();
void ParseMicrosoftInheritanceClassAttributes(ParsedAttributes &attrs);
void ParseBorlandTypeAttributes(ParsedAttributes &attrs);
void ParseOpenCLKernelAttributes(ParsedAttributes &attrs);
void ParseOpenCLQualifiers(ParsedAttributes &Attrs);
/// \brief Parses opencl_unroll_hint attribute if language is OpenCL v2.0
/// or higher.
/// \return false if error happens.
bool MaybeParseOpenCLUnrollHintAttribute(ParsedAttributes &Attrs) {
if (getLangOpts().OpenCL)
return ParseOpenCLUnrollHintAttribute(Attrs);
return true;
}
/// \brief Parses opencl_unroll_hint attribute.
/// \return false if error happens.
bool ParseOpenCLUnrollHintAttribute(ParsedAttributes &Attrs);
void ParseNullabilityTypeSpecifiers(ParsedAttributes &attrs);
VersionTuple ParseVersionTuple(SourceRange &Range);
void ParseAvailabilityAttribute(IdentifierInfo &Availability,
SourceLocation AvailabilityLoc,
ParsedAttributes &attrs,
SourceLocation *endLoc,
IdentifierInfo *ScopeName,
SourceLocation ScopeLoc,
AttributeList::Syntax Syntax);
Optional<AvailabilitySpec> ParseAvailabilitySpec();
ExprResult ParseAvailabilityCheckExpr(SourceLocation StartLoc);
void ParseExternalSourceSymbolAttribute(IdentifierInfo &ExternalSourceSymbol,
SourceLocation Loc,
ParsedAttributes &Attrs,
SourceLocation *EndLoc,
IdentifierInfo *ScopeName,
SourceLocation ScopeLoc,
AttributeList::Syntax Syntax);
void ParseObjCBridgeRelatedAttribute(IdentifierInfo &ObjCBridgeRelated,
SourceLocation ObjCBridgeRelatedLoc,
ParsedAttributes &attrs,
SourceLocation *endLoc,
IdentifierInfo *ScopeName,
SourceLocation ScopeLoc,
AttributeList::Syntax Syntax);
void ParseTypeTagForDatatypeAttribute(IdentifierInfo &AttrName,
SourceLocation AttrNameLoc,
ParsedAttributes &Attrs,
SourceLocation *EndLoc,
IdentifierInfo *ScopeName,
SourceLocation ScopeLoc,
AttributeList::Syntax Syntax);
void ParseAttributeWithTypeArg(IdentifierInfo &AttrName,
SourceLocation AttrNameLoc,
ParsedAttributes &Attrs,
SourceLocation *EndLoc,
IdentifierInfo *ScopeName,
SourceLocation ScopeLoc,
AttributeList::Syntax Syntax);
void ParseTypeofSpecifier(DeclSpec &DS);
SourceLocation ParseDecltypeSpecifier(DeclSpec &DS);
void AnnotateExistingDecltypeSpecifier(const DeclSpec &DS,
SourceLocation StartLoc,
SourceLocation EndLoc);
void ParseUnderlyingTypeSpecifier(DeclSpec &DS);
void ParseAtomicSpecifier(DeclSpec &DS);
ExprResult ParseAlignArgument(SourceLocation Start,
SourceLocation &EllipsisLoc);
void ParseAlignmentSpecifier(ParsedAttributes &Attrs,
SourceLocation *endLoc = nullptr);
VirtSpecifiers::Specifier isCXX11VirtSpecifier(const Token &Tok) const;
VirtSpecifiers::Specifier isCXX11VirtSpecifier() const {
return isCXX11VirtSpecifier(Tok);
}
void ParseOptionalCXX11VirtSpecifierSeq(VirtSpecifiers &VS, bool IsInterface,
SourceLocation FriendLoc);
bool isCXX11FinalKeyword() const;
/// DeclaratorScopeObj - RAII object used in Parser::ParseDirectDeclarator to
/// enter a new C++ declarator scope and exit it when the function is
/// finished.
class DeclaratorScopeObj {
Parser &P;
CXXScopeSpec &SS;
bool EnteredScope;
bool CreatedScope;
public:
DeclaratorScopeObj(Parser &p, CXXScopeSpec &ss)
: P(p), SS(ss), EnteredScope(false), CreatedScope(false) {}
void EnterDeclaratorScope() {
assert(!EnteredScope && "Already entered the scope!");
assert(SS.isSet() && "C++ scope was not set!");
CreatedScope = true;
P.EnterScope(0); // Not a decl scope.
if (!P.Actions.ActOnCXXEnterDeclaratorScope(P.getCurScope(), SS))
EnteredScope = true;
}
~DeclaratorScopeObj() {
if (EnteredScope) {
assert(SS.isSet() && "C++ scope was cleared ?");
P.Actions.ActOnCXXExitDeclaratorScope(P.getCurScope(), SS);
}
if (CreatedScope)
P.ExitScope();
}
};
/// ParseDeclarator - Parse and verify a newly-initialized declarator.
void ParseDeclarator(Declarator &D);
/// A function that parses a variant of direct-declarator.
typedef void (Parser::*DirectDeclParseFunction)(Declarator&);
void ParseDeclaratorInternal(Declarator &D,
DirectDeclParseFunction DirectDeclParser);
enum AttrRequirements {
AR_NoAttributesParsed = 0, ///< No attributes are diagnosed.
AR_GNUAttributesParsedAndRejected = 1 << 0, ///< Diagnose GNU attributes.
AR_GNUAttributesParsed = 1 << 1,
AR_CXX11AttributesParsed = 1 << 2,
AR_DeclspecAttributesParsed = 1 << 3,
AR_AllAttributesParsed = AR_GNUAttributesParsed |
AR_CXX11AttributesParsed |
AR_DeclspecAttributesParsed,
AR_VendorAttributesParsed = AR_GNUAttributesParsed |
AR_DeclspecAttributesParsed
};
void ParseTypeQualifierListOpt(
DeclSpec &DS, unsigned AttrReqs = AR_AllAttributesParsed,
bool AtomicAllowed = true, bool IdentifierRequired = false,
Optional<llvm::function_ref<void()>> CodeCompletionHandler = None);
void ParseDirectDeclarator(Declarator &D);
void ParseDecompositionDeclarator(Declarator &D);
void ParseParenDeclarator(Declarator &D);
void ParseFunctionDeclarator(Declarator &D,
ParsedAttributes &attrs,
BalancedDelimiterTracker &Tracker,
bool IsAmbiguous,
bool RequiresArg = false);
bool ParseRefQualifier(bool &RefQualifierIsLValueRef,
SourceLocation &RefQualifierLoc);
bool isFunctionDeclaratorIdentifierList();
void ParseFunctionDeclaratorIdentifierList(
Declarator &D,
SmallVectorImpl<DeclaratorChunk::ParamInfo> &ParamInfo);
void ParseParameterDeclarationClause(
Declarator &D,
ParsedAttributes &attrs,
SmallVectorImpl<DeclaratorChunk::ParamInfo> &ParamInfo,
SourceLocation &EllipsisLoc);
void ParseBracketDeclarator(Declarator &D);
void ParseMisplacedBracketDeclarator(Declarator &D);
//===--------------------------------------------------------------------===//
// C++ 7: Declarations [dcl.dcl]
/// The kind of attribute specifier we have found.
enum CXX11AttributeKind {
/// This is not an attribute specifier.
CAK_NotAttributeSpecifier,
/// This should be treated as an attribute-specifier.
CAK_AttributeSpecifier,
/// The next tokens are '[[', but this is not an attribute-specifier. This
/// is ill-formed by C++11 [dcl.attr.grammar]p6.
CAK_InvalidAttributeSpecifier
};
CXX11AttributeKind
isCXX11AttributeSpecifier(bool Disambiguate = false,
bool OuterMightBeMessageSend = false);
void DiagnoseUnexpectedNamespace(NamedDecl *Context);
DeclGroupPtrTy ParseNamespace(unsigned Context, SourceLocation &DeclEnd,
SourceLocation InlineLoc = SourceLocation());
void ParseInnerNamespace(std::vector<SourceLocation>& IdentLoc,
std::vector<IdentifierInfo*>& Ident,
std::vector<SourceLocation>& NamespaceLoc,
unsigned int index, SourceLocation& InlineLoc,
ParsedAttributes& attrs,
BalancedDelimiterTracker &Tracker);
Decl *ParseLinkage(ParsingDeclSpec &DS, unsigned Context);
Decl *ParseExportDeclaration();
DeclGroupPtrTy ParseUsingDirectiveOrDeclaration(
unsigned Context, const ParsedTemplateInfo &TemplateInfo,
SourceLocation &DeclEnd, ParsedAttributesWithRange &attrs);
Decl *ParseUsingDirective(unsigned Context,
SourceLocation UsingLoc,
SourceLocation &DeclEnd,
ParsedAttributes &attrs);
struct UsingDeclarator {
SourceLocation TypenameLoc;
CXXScopeSpec SS;
SourceLocation TemplateKWLoc;
UnqualifiedId Name;
SourceLocation EllipsisLoc;
void clear() {
TypenameLoc = TemplateKWLoc = EllipsisLoc = SourceLocation();
SS.clear();
Name.clear();
}
};
bool ParseUsingDeclarator(unsigned Context, UsingDeclarator &D);
DeclGroupPtrTy ParseUsingDeclaration(unsigned Context,
const ParsedTemplateInfo &TemplateInfo,
SourceLocation UsingLoc,
SourceLocation &DeclEnd,
AccessSpecifier AS = AS_none);
Decl *ParseAliasDeclarationAfterDeclarator(
const ParsedTemplateInfo &TemplateInfo, SourceLocation UsingLoc,
UsingDeclarator &D, SourceLocation &DeclEnd, AccessSpecifier AS,
ParsedAttributes &Attrs, Decl **OwnedType = nullptr);
Decl *ParseStaticAssertDeclaration(SourceLocation &DeclEnd);
Decl *ParseNamespaceAlias(SourceLocation NamespaceLoc,
SourceLocation AliasLoc, IdentifierInfo *Alias,
SourceLocation &DeclEnd);
//===--------------------------------------------------------------------===//
// C++ 9: classes [class] and C structs/unions.
bool isValidAfterTypeSpecifier(bool CouldBeBitfield);
void ParseClassSpecifier(tok::TokenKind TagTokKind, SourceLocation TagLoc,
DeclSpec &DS, const ParsedTemplateInfo &TemplateInfo,
AccessSpecifier AS, bool EnteringContext,
DeclSpecContext DSC,
ParsedAttributesWithRange &Attributes);
void SkipCXXMemberSpecification(SourceLocation StartLoc,
SourceLocation AttrFixitLoc,
unsigned TagType,
Decl *TagDecl);
void ParseCXXMemberSpecification(SourceLocation StartLoc,
SourceLocation AttrFixitLoc,
ParsedAttributesWithRange &Attrs,
unsigned TagType,
Decl *TagDecl);
ExprResult ParseCXXMemberInitializer(Decl *D, bool IsFunction,
SourceLocation &EqualLoc);
bool ParseCXXMemberDeclaratorBeforeInitializer(Declarator &DeclaratorInfo,
VirtSpecifiers &VS,
ExprResult &BitfieldSize,
LateParsedAttrList &LateAttrs);
void MaybeParseAndDiagnoseDeclSpecAfterCXX11VirtSpecifierSeq(Declarator &D,
VirtSpecifiers &VS);
DeclGroupPtrTy ParseCXXClassMemberDeclaration(
AccessSpecifier AS, AttributeList *Attr,
const ParsedTemplateInfo &TemplateInfo = ParsedTemplateInfo(),
ParsingDeclRAIIObject *DiagsFromTParams = nullptr);
DeclGroupPtrTy ParseCXXClassMemberDeclarationWithPragmas(
AccessSpecifier &AS, ParsedAttributesWithRange &AccessAttrs,
DeclSpec::TST TagType, Decl *Tag);
void ParseConstructorInitializer(Decl *ConstructorDecl);
MemInitResult ParseMemInitializer(Decl *ConstructorDecl);
void HandleMemberFunctionDeclDelays(Declarator& DeclaratorInfo,
Decl *ThisDecl);
//===--------------------------------------------------------------------===//
// C++ 10: Derived classes [class.derived]
TypeResult ParseBaseTypeSpecifier(SourceLocation &BaseLoc,
SourceLocation &EndLocation);
void ParseBaseClause(Decl *ClassDecl);
BaseResult ParseBaseSpecifier(Decl *ClassDecl);
AccessSpecifier getAccessSpecifierIfPresent() const;
bool ParseUnqualifiedIdTemplateId(CXXScopeSpec &SS,
SourceLocation TemplateKWLoc,
IdentifierInfo *Name,
SourceLocation NameLoc,
bool EnteringContext,
ParsedType ObjectType,
UnqualifiedId &Id,
bool AssumeTemplateId);
bool ParseUnqualifiedIdOperator(CXXScopeSpec &SS, bool EnteringContext,
ParsedType ObjectType,
UnqualifiedId &Result);
//===--------------------------------------------------------------------===//
// OpenMP: Directives and clauses.
/// Parse clauses for '#pragma omp declare simd'.
DeclGroupPtrTy ParseOMPDeclareSimdClauses(DeclGroupPtrTy Ptr,
CachedTokens &Toks,
SourceLocation Loc);
/// \brief Parses declarative OpenMP directives.
DeclGroupPtrTy ParseOpenMPDeclarativeDirectiveWithExtDecl(
AccessSpecifier &AS, ParsedAttributesWithRange &Attrs,
DeclSpec::TST TagType = DeclSpec::TST_unspecified,
Decl *TagDecl = nullptr);
/// \brief Parse 'omp declare reduction' construct.
DeclGroupPtrTy ParseOpenMPDeclareReductionDirective(AccessSpecifier AS);
/// \brief Parses simple list of variables.
///
/// \param Kind Kind of the directive.
/// \param Callback Callback function to be called for the list elements.
/// \param AllowScopeSpecifier true, if the variables can have fully
/// qualified names.
///
bool ParseOpenMPSimpleVarList(
OpenMPDirectiveKind Kind,
const llvm::function_ref<void(CXXScopeSpec &, DeclarationNameInfo)> &
Callback,
bool AllowScopeSpecifier);
/// \brief Parses declarative or executable directive.
///
/// \param Allowed ACK_Any, if any directives are allowed,
/// ACK_StatementsOpenMPAnyExecutable - if any executable directives are
/// allowed, ACK_StatementsOpenMPNonStandalone - if only non-standalone
/// executable directives are allowed.
///
StmtResult
ParseOpenMPDeclarativeOrExecutableDirective(AllowedConstructsKind Allowed);
/// \brief Parses clause of kind \a CKind for directive of a kind \a Kind.
///
/// \param DKind Kind of current directive.
/// \param CKind Kind of current clause.
/// \param FirstClause true, if this is the first clause of a kind \a CKind
/// in current directive.
///
OMPClause *ParseOpenMPClause(OpenMPDirectiveKind DKind,
OpenMPClauseKind CKind, bool FirstClause);
/// \brief Parses clause with a single expression of a kind \a Kind.
///
/// \param Kind Kind of current clause.
///
OMPClause *ParseOpenMPSingleExprClause(OpenMPClauseKind Kind);
/// \brief Parses simple clause of a kind \a Kind.
///
/// \param Kind Kind of current clause.
///
OMPClause *ParseOpenMPSimpleClause(OpenMPClauseKind Kind);
/// \brief Parses clause with a single expression and an additional argument
/// of a kind \a Kind.
///
/// \param Kind Kind of current clause.
///
OMPClause *ParseOpenMPSingleExprWithArgClause(OpenMPClauseKind Kind);
/// \brief Parses clause without any additional arguments.
///
/// \param Kind Kind of current clause.
///
OMPClause *ParseOpenMPClause(OpenMPClauseKind Kind);
/// \brief Parses clause with the list of variables of a kind \a Kind.
///
/// \param Kind Kind of current clause.
///
OMPClause *ParseOpenMPVarListClause(OpenMPDirectiveKind DKind,
OpenMPClauseKind Kind);
public:
/// Parses simple expression in parens for single-expression clauses of OpenMP
/// constructs.
/// \param RLoc Returned location of right paren.
ExprResult ParseOpenMPParensExpr(StringRef ClauseName, SourceLocation &RLoc);
/// Data used for parsing list of variables in OpenMP clauses.
struct OpenMPVarListDataTy {
Expr *TailExpr = nullptr;
SourceLocation ColonLoc;
CXXScopeSpec ReductionIdScopeSpec;
DeclarationNameInfo ReductionId;
OpenMPDependClauseKind DepKind = OMPC_DEPEND_unknown;
OpenMPLinearClauseKind LinKind = OMPC_LINEAR_val;
OpenMPMapClauseKind MapTypeModifier = OMPC_MAP_unknown;
OpenMPMapClauseKind MapType = OMPC_MAP_unknown;
bool IsMapTypeImplicit = false;
SourceLocation DepLinMapLoc;
};
/// Parses clauses with list.
bool ParseOpenMPVarList(OpenMPDirectiveKind DKind, OpenMPClauseKind Kind,
SmallVectorImpl<Expr *> &Vars,
OpenMPVarListDataTy &Data);
bool ParseUnqualifiedId(CXXScopeSpec &SS, bool EnteringContext,
bool AllowDestructorName,
bool AllowConstructorName,
bool AllowDeductionGuide,
ParsedType ObjectType,
SourceLocation& TemplateKWLoc,
UnqualifiedId &Result);
private:
//===--------------------------------------------------------------------===//
// C++ 14: Templates [temp]
// C++ 14.1: Template Parameters [temp.param]
Decl *ParseDeclarationStartingWithTemplate(unsigned Context,
SourceLocation &DeclEnd,
AccessSpecifier AS = AS_none,
AttributeList *AccessAttrs = nullptr);
Decl *ParseTemplateDeclarationOrSpecialization(unsigned Context,
SourceLocation &DeclEnd,
AccessSpecifier AS,
AttributeList *AccessAttrs);
Decl *ParseSingleDeclarationAfterTemplate(
unsigned Context,
const ParsedTemplateInfo &TemplateInfo,
ParsingDeclRAIIObject &DiagsFromParams,
SourceLocation &DeclEnd,
AccessSpecifier AS=AS_none,
AttributeList *AccessAttrs = nullptr);
bool ParseTemplateParameters(unsigned Depth,
SmallVectorImpl<Decl*> &TemplateParams,
SourceLocation &LAngleLoc,
SourceLocation &RAngleLoc);
bool ParseTemplateParameterList(unsigned Depth,
SmallVectorImpl<Decl*> &TemplateParams);
bool isStartOfTemplateTypeParameter();
Decl *ParseTemplateParameter(unsigned Depth, unsigned Position);
Decl *ParseTypeParameter(unsigned Depth, unsigned Position);
Decl *ParseTemplateTemplateParameter(unsigned Depth, unsigned Position);
Decl *ParseNonTypeTemplateParameter(unsigned Depth, unsigned Position);
void DiagnoseMisplacedEllipsis(SourceLocation EllipsisLoc,
SourceLocation CorrectLoc,
bool AlreadyHasEllipsis,
bool IdentifierHasName);
void DiagnoseMisplacedEllipsisInDeclarator(SourceLocation EllipsisLoc,
Declarator &D);
// C++ 14.3: Template arguments [temp.arg]
typedef SmallVector<ParsedTemplateArgument, 16> TemplateArgList;
bool ParseGreaterThanInTemplateList(SourceLocation &RAngleLoc,
bool ConsumeLastToken,
bool ObjCGenericList);
bool ParseTemplateIdAfterTemplateName(bool ConsumeLastToken,
SourceLocation &LAngleLoc,
TemplateArgList &TemplateArgs,
SourceLocation &RAngleLoc);
bool AnnotateTemplateIdToken(TemplateTy Template, TemplateNameKind TNK,
CXXScopeSpec &SS,
SourceLocation TemplateKWLoc,
UnqualifiedId &TemplateName,
bool AllowTypeAnnotation = true);
void AnnotateTemplateIdTokenAsType(bool IsClassName = false);
bool IsTemplateArgumentList(unsigned Skip = 0);
bool ParseTemplateArgumentList(TemplateArgList &TemplateArgs);
ParsedTemplateArgument ParseTemplateTemplateArgument();
ParsedTemplateArgument ParseTemplateArgument();
Decl *ParseExplicitInstantiation(unsigned Context,
SourceLocation ExternLoc,
SourceLocation TemplateLoc,
SourceLocation &DeclEnd,
AccessSpecifier AS = AS_none);
//===--------------------------------------------------------------------===//
// Modules
DeclGroupPtrTy ParseModuleDecl();
DeclGroupPtrTy ParseModuleImport(SourceLocation AtLoc);
bool parseMisplacedModuleImport();
bool tryParseMisplacedModuleImport() {
tok::TokenKind Kind = Tok.getKind();
if (Kind == tok::annot_module_begin || Kind == tok::annot_module_end ||
Kind == tok::annot_module_include)
return parseMisplacedModuleImport();
return false;
}
bool ParseModuleName(
SourceLocation UseLoc,
SmallVectorImpl<std::pair<IdentifierInfo *, SourceLocation>> &Path,
bool IsImport);
//===--------------------------------------------------------------------===//
// C++11/G++: Type Traits [Type-Traits.html in the GCC manual]
ExprResult ParseTypeTrait();
//===--------------------------------------------------------------------===//
// Embarcadero: Arary and Expression Traits
ExprResult ParseArrayTypeTrait();
ExprResult ParseExpressionTrait();
//===--------------------------------------------------------------------===//
// Preprocessor code-completion pass-through
void CodeCompleteDirective(bool InConditional) override;
void CodeCompleteInConditionalExclusion() override;
void CodeCompleteMacroName(bool IsDefinition) override;
void CodeCompletePreprocessorExpression() override;
void CodeCompleteMacroArgument(IdentifierInfo *Macro, MacroInfo *MacroInfo,
unsigned ArgumentIndex) override;
void CodeCompleteNaturalLanguage() override;
};
} // end namespace clang
#endif
|
dds.c | /*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% DDDD DDDD SSSSS %
% D D D D SS %
% D D D D SSS %
% D D D D SS %
% DDDD DDDD SSSSS %
% %
% %
% Read/Write Microsoft Direct Draw Surface Image Format %
% %
% Software Design %
% Bianca van Schaik %
% March 2008 %
% Dirk Lemstra %
% September 2013 %
% %
% %
% Copyright 1999-2021 ImageMagick Studio LLC, a non-profit organization %
% dedicated to making software imaging solutions freely available. %
% %
% You may not use this file except in compliance with the License. You may %
% obtain a copy of the License at %
% %
% https://imagemagick.org/script/license.php %
% %
% Unless required by applicable law or agreed to in writing, software %
% distributed under the License is distributed on an "AS IS" BASIS, %
% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. %
% See the License for the specific language governing permissions and %
% limitations under the License. %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
%
*/
/*
Include declarations.
*/
#include "MagickCore/studio.h"
#include "MagickCore/attribute.h"
#include "MagickCore/blob.h"
#include "MagickCore/blob-private.h"
#include "MagickCore/cache.h"
#include "MagickCore/colorspace.h"
#include "MagickCore/colorspace-private.h"
#include "MagickCore/exception.h"
#include "MagickCore/exception-private.h"
#include "MagickCore/image.h"
#include "MagickCore/image-private.h"
#include "MagickCore/list.h"
#include "MagickCore/log.h"
#include "MagickCore/magick.h"
#include "MagickCore/memory_.h"
#include "MagickCore/monitor.h"
#include "MagickCore/monitor-private.h"
#include "MagickCore/option.h"
#include "MagickCore/pixel-accessor.h"
#include "MagickCore/profile.h"
#include "MagickCore/quantum.h"
#include "MagickCore/quantum-private.h"
#include "MagickCore/resource_.h"
#include "MagickCore/static.h"
#include "MagickCore/string_.h"
#include "MagickCore/string-private.h"
#include "MagickCore/module.h"
#include "MagickCore/transform.h"
/*
Definitions
*/
#define DDSD_CAPS 0x00000001
#define DDSD_HEIGHT 0x00000002
#define DDSD_WIDTH 0x00000004
#define DDSD_PITCH 0x00000008
#define DDSD_PIXELFORMAT 0x00001000
#define DDSD_MIPMAPCOUNT 0x00020000
#define DDSD_LINEARSIZE 0x00080000
#define DDSD_DEPTH 0x00800000
#define DDPF_ALPHAPIXELS 0x00000001
#define DDPF_FOURCC 0x00000004
#define DDPF_RGB 0x00000040
#define DDPF_LUMINANCE 0x00020000
#define FOURCC_DXT1 0x31545844
#define FOURCC_DXT3 0x33545844
#define FOURCC_DXT5 0x35545844
#define FOURCC_DX10 0x30315844
#define DDSCAPS_COMPLEX 0x00000008
#define DDSCAPS_TEXTURE 0x00001000
#define DDSCAPS_MIPMAP 0x00400000
#define DDSCAPS2_CUBEMAP 0x00000200
#define DDSCAPS2_CUBEMAP_POSITIVEX 0x00000400
#define DDSCAPS2_CUBEMAP_NEGATIVEX 0x00000800
#define DDSCAPS2_CUBEMAP_POSITIVEY 0x00001000
#define DDSCAPS2_CUBEMAP_NEGATIVEY 0x00002000
#define DDSCAPS2_CUBEMAP_POSITIVEZ 0x00004000
#define DDSCAPS2_CUBEMAP_NEGATIVEZ 0x00008000
#define DDSCAPS2_VOLUME 0x00200000
#define DDSEXT_DIMENSION_TEX2D 0x00000003
#define DDSEXTFLAGS_CUBEMAP 0x00000004
typedef enum DXGI_FORMAT
{
DXGI_FORMAT_UNKNOWN,
DXGI_FORMAT_R32G32B32A32_TYPELESS,
DXGI_FORMAT_R32G32B32A32_FLOAT,
DXGI_FORMAT_R32G32B32A32_UINT,
DXGI_FORMAT_R32G32B32A32_SINT,
DXGI_FORMAT_R32G32B32_TYPELESS,
DXGI_FORMAT_R32G32B32_FLOAT,
DXGI_FORMAT_R32G32B32_UINT,
DXGI_FORMAT_R32G32B32_SINT,
DXGI_FORMAT_R16G16B16A16_TYPELESS,
DXGI_FORMAT_R16G16B16A16_FLOAT,
DXGI_FORMAT_R16G16B16A16_UNORM,
DXGI_FORMAT_R16G16B16A16_UINT,
DXGI_FORMAT_R16G16B16A16_SNORM,
DXGI_FORMAT_R16G16B16A16_SINT,
DXGI_FORMAT_R32G32_TYPELESS,
DXGI_FORMAT_R32G32_FLOAT,
DXGI_FORMAT_R32G32_UINT,
DXGI_FORMAT_R32G32_SINT,
DXGI_FORMAT_R32G8X24_TYPELESS,
DXGI_FORMAT_D32_FLOAT_S8X24_UINT,
DXGI_FORMAT_R32_FLOAT_X8X24_TYPELESS,
DXGI_FORMAT_X32_TYPELESS_G8X24_UINT,
DXGI_FORMAT_R10G10B10A2_TYPELESS,
DXGI_FORMAT_R10G10B10A2_UNORM,
DXGI_FORMAT_R10G10B10A2_UINT,
DXGI_FORMAT_R11G11B10_FLOAT,
DXGI_FORMAT_R8G8B8A8_TYPELESS,
DXGI_FORMAT_R8G8B8A8_UNORM,
DXGI_FORMAT_R8G8B8A8_UNORM_SRGB,
DXGI_FORMAT_R8G8B8A8_UINT,
DXGI_FORMAT_R8G8B8A8_SNORM,
DXGI_FORMAT_R8G8B8A8_SINT,
DXGI_FORMAT_R16G16_TYPELESS,
DXGI_FORMAT_R16G16_FLOAT,
DXGI_FORMAT_R16G16_UNORM,
DXGI_FORMAT_R16G16_UINT,
DXGI_FORMAT_R16G16_SNORM,
DXGI_FORMAT_R16G16_SINT,
DXGI_FORMAT_R32_TYPELESS,
DXGI_FORMAT_D32_FLOAT,
DXGI_FORMAT_R32_FLOAT,
DXGI_FORMAT_R32_UINT,
DXGI_FORMAT_R32_SINT,
DXGI_FORMAT_R24G8_TYPELESS,
DXGI_FORMAT_D24_UNORM_S8_UINT,
DXGI_FORMAT_R24_UNORM_X8_TYPELESS,
DXGI_FORMAT_X24_TYPELESS_G8_UINT,
DXGI_FORMAT_R8G8_TYPELESS,
DXGI_FORMAT_R8G8_UNORM,
DXGI_FORMAT_R8G8_UINT,
DXGI_FORMAT_R8G8_SNORM,
DXGI_FORMAT_R8G8_SINT,
DXGI_FORMAT_R16_TYPELESS,
DXGI_FORMAT_R16_FLOAT,
DXGI_FORMAT_D16_UNORM,
DXGI_FORMAT_R16_UNORM,
DXGI_FORMAT_R16_UINT,
DXGI_FORMAT_R16_SNORM,
DXGI_FORMAT_R16_SINT,
DXGI_FORMAT_R8_TYPELESS,
DXGI_FORMAT_R8_UNORM,
DXGI_FORMAT_R8_UINT,
DXGI_FORMAT_R8_SNORM,
DXGI_FORMAT_R8_SINT,
DXGI_FORMAT_A8_UNORM,
DXGI_FORMAT_R1_UNORM,
DXGI_FORMAT_R9G9B9E5_SHAREDEXP,
DXGI_FORMAT_R8G8_B8G8_UNORM,
DXGI_FORMAT_G8R8_G8B8_UNORM,
DXGI_FORMAT_BC1_TYPELESS,
DXGI_FORMAT_BC1_UNORM,
DXGI_FORMAT_BC1_UNORM_SRGB,
DXGI_FORMAT_BC2_TYPELESS,
DXGI_FORMAT_BC2_UNORM,
DXGI_FORMAT_BC2_UNORM_SRGB,
DXGI_FORMAT_BC3_TYPELESS,
DXGI_FORMAT_BC3_UNORM,
DXGI_FORMAT_BC3_UNORM_SRGB,
DXGI_FORMAT_BC4_TYPELESS,
DXGI_FORMAT_BC4_UNORM,
DXGI_FORMAT_BC4_SNORM,
DXGI_FORMAT_BC5_TYPELESS,
DXGI_FORMAT_BC5_UNORM,
DXGI_FORMAT_BC5_SNORM,
DXGI_FORMAT_B5G6R5_UNORM,
DXGI_FORMAT_B5G5R5A1_UNORM,
DXGI_FORMAT_B8G8R8A8_UNORM,
DXGI_FORMAT_B8G8R8X8_UNORM,
DXGI_FORMAT_R10G10B10_XR_BIAS_A2_UNORM,
DXGI_FORMAT_B8G8R8A8_TYPELESS,
DXGI_FORMAT_B8G8R8A8_UNORM_SRGB,
DXGI_FORMAT_B8G8R8X8_TYPELESS,
DXGI_FORMAT_B8G8R8X8_UNORM_SRGB,
DXGI_FORMAT_BC6H_TYPELESS,
DXGI_FORMAT_BC6H_UF16,
DXGI_FORMAT_BC6H_SF16,
DXGI_FORMAT_BC7_TYPELESS,
DXGI_FORMAT_BC7_UNORM,
DXGI_FORMAT_BC7_UNORM_SRGB,
DXGI_FORMAT_AYUV,
DXGI_FORMAT_Y410,
DXGI_FORMAT_Y416,
DXGI_FORMAT_NV12,
DXGI_FORMAT_P010,
DXGI_FORMAT_P016,
DXGI_FORMAT_420_OPAQUE,
DXGI_FORMAT_YUY2,
DXGI_FORMAT_Y210,
DXGI_FORMAT_Y216,
DXGI_FORMAT_NV11,
DXGI_FORMAT_AI44,
DXGI_FORMAT_IA44,
DXGI_FORMAT_P8,
DXGI_FORMAT_A8P8,
DXGI_FORMAT_B4G4R4A4_UNORM,
DXGI_FORMAT_P208,
DXGI_FORMAT_V208,
DXGI_FORMAT_V408,
DXGI_FORMAT_SAMPLER_FEEDBACK_MIN_MIP_OPAQUE,
DXGI_FORMAT_SAMPLER_FEEDBACK_MIP_REGION_USED_OPAQUE,
DXGI_FORMAT_FORCE_UINT
} DXGI_FORMAT;
#ifndef SIZE_MAX
#define SIZE_MAX ((size_t) -1)
#endif
/*
Structure declarations.
*/
typedef struct _DDSPixelFormat
{
size_t
flags,
fourcc,
rgb_bitcount,
r_bitmask,
g_bitmask,
b_bitmask,
alpha_bitmask;
} DDSPixelFormat;
typedef struct _DDSInfo
{
size_t
flags,
height,
width,
pitchOrLinearSize,
depth,
mipmapcount,
ddscaps1,
ddscaps2,
extFormat,
extDimension,
extFlags,
extArraySize,
extFlags2;
DDSPixelFormat
pixelformat;
} DDSInfo;
typedef struct _DDSColors
{
unsigned char
r[4],
g[4],
b[4],
a[4];
} DDSColors;
typedef struct _BC7Colors
{
unsigned char
r[6],
g[6],
b[6],
a[6];
} BC7Colors;
typedef struct _DDSVector4
{
float
x,
y,
z,
w;
} DDSVector4;
typedef struct _DDSVector3
{
float
x,
y,
z;
} DDSVector3;
typedef struct _DDSSourceBlock
{
unsigned char
start,
end,
error;
} DDSSourceBlock;
typedef struct _DDSSingleColorLookup
{
DDSSourceBlock sources[2];
} DDSSingleColorLookup;
typedef struct _BC7ModeInfo
{
unsigned char
partition_bits,
num_subsets,
color_precision,
alpha_precision,
num_pbits,
index_precision,
index2_precision;
} BC7ModeInfo;
typedef MagickBooleanType
DDSDecoder(const ImageInfo *,Image *,const DDSInfo *,const MagickBooleanType,
ExceptionInfo *);
typedef MagickBooleanType
DDSPixelDecoder(Image *,const DDSInfo *,ExceptionInfo *);
static const DDSSingleColorLookup DDSLookup_5_4[] =
{
{ { { 0, 0, 0 }, { 0, 0, 0 } } },
{ { { 0, 0, 1 }, { 0, 1, 1 } } },
{ { { 0, 0, 2 }, { 0, 1, 0 } } },
{ { { 0, 0, 3 }, { 0, 1, 1 } } },
{ { { 0, 0, 4 }, { 0, 2, 1 } } },
{ { { 1, 0, 3 }, { 0, 2, 0 } } },
{ { { 1, 0, 2 }, { 0, 2, 1 } } },
{ { { 1, 0, 1 }, { 0, 3, 1 } } },
{ { { 1, 0, 0 }, { 0, 3, 0 } } },
{ { { 1, 0, 1 }, { 1, 2, 1 } } },
{ { { 1, 0, 2 }, { 1, 2, 0 } } },
{ { { 1, 0, 3 }, { 0, 4, 0 } } },
{ { { 1, 0, 4 }, { 0, 5, 1 } } },
{ { { 2, 0, 3 }, { 0, 5, 0 } } },
{ { { 2, 0, 2 }, { 0, 5, 1 } } },
{ { { 2, 0, 1 }, { 0, 6, 1 } } },
{ { { 2, 0, 0 }, { 0, 6, 0 } } },
{ { { 2, 0, 1 }, { 2, 3, 1 } } },
{ { { 2, 0, 2 }, { 2, 3, 0 } } },
{ { { 2, 0, 3 }, { 0, 7, 0 } } },
{ { { 2, 0, 4 }, { 1, 6, 1 } } },
{ { { 3, 0, 3 }, { 1, 6, 0 } } },
{ { { 3, 0, 2 }, { 0, 8, 0 } } },
{ { { 3, 0, 1 }, { 0, 9, 1 } } },
{ { { 3, 0, 0 }, { 0, 9, 0 } } },
{ { { 3, 0, 1 }, { 0, 9, 1 } } },
{ { { 3, 0, 2 }, { 0, 10, 1 } } },
{ { { 3, 0, 3 }, { 0, 10, 0 } } },
{ { { 3, 0, 4 }, { 2, 7, 1 } } },
{ { { 4, 0, 4 }, { 2, 7, 0 } } },
{ { { 4, 0, 3 }, { 0, 11, 0 } } },
{ { { 4, 0, 2 }, { 1, 10, 1 } } },
{ { { 4, 0, 1 }, { 1, 10, 0 } } },
{ { { 4, 0, 0 }, { 0, 12, 0 } } },
{ { { 4, 0, 1 }, { 0, 13, 1 } } },
{ { { 4, 0, 2 }, { 0, 13, 0 } } },
{ { { 4, 0, 3 }, { 0, 13, 1 } } },
{ { { 4, 0, 4 }, { 0, 14, 1 } } },
{ { { 5, 0, 3 }, { 0, 14, 0 } } },
{ { { 5, 0, 2 }, { 2, 11, 1 } } },
{ { { 5, 0, 1 }, { 2, 11, 0 } } },
{ { { 5, 0, 0 }, { 0, 15, 0 } } },
{ { { 5, 0, 1 }, { 1, 14, 1 } } },
{ { { 5, 0, 2 }, { 1, 14, 0 } } },
{ { { 5, 0, 3 }, { 0, 16, 0 } } },
{ { { 5, 0, 4 }, { 0, 17, 1 } } },
{ { { 6, 0, 3 }, { 0, 17, 0 } } },
{ { { 6, 0, 2 }, { 0, 17, 1 } } },
{ { { 6, 0, 1 }, { 0, 18, 1 } } },
{ { { 6, 0, 0 }, { 0, 18, 0 } } },
{ { { 6, 0, 1 }, { 2, 15, 1 } } },
{ { { 6, 0, 2 }, { 2, 15, 0 } } },
{ { { 6, 0, 3 }, { 0, 19, 0 } } },
{ { { 6, 0, 4 }, { 1, 18, 1 } } },
{ { { 7, 0, 3 }, { 1, 18, 0 } } },
{ { { 7, 0, 2 }, { 0, 20, 0 } } },
{ { { 7, 0, 1 }, { 0, 21, 1 } } },
{ { { 7, 0, 0 }, { 0, 21, 0 } } },
{ { { 7, 0, 1 }, { 0, 21, 1 } } },
{ { { 7, 0, 2 }, { 0, 22, 1 } } },
{ { { 7, 0, 3 }, { 0, 22, 0 } } },
{ { { 7, 0, 4 }, { 2, 19, 1 } } },
{ { { 8, 0, 4 }, { 2, 19, 0 } } },
{ { { 8, 0, 3 }, { 0, 23, 0 } } },
{ { { 8, 0, 2 }, { 1, 22, 1 } } },
{ { { 8, 0, 1 }, { 1, 22, 0 } } },
{ { { 8, 0, 0 }, { 0, 24, 0 } } },
{ { { 8, 0, 1 }, { 0, 25, 1 } } },
{ { { 8, 0, 2 }, { 0, 25, 0 } } },
{ { { 8, 0, 3 }, { 0, 25, 1 } } },
{ { { 8, 0, 4 }, { 0, 26, 1 } } },
{ { { 9, 0, 3 }, { 0, 26, 0 } } },
{ { { 9, 0, 2 }, { 2, 23, 1 } } },
{ { { 9, 0, 1 }, { 2, 23, 0 } } },
{ { { 9, 0, 0 }, { 0, 27, 0 } } },
{ { { 9, 0, 1 }, { 1, 26, 1 } } },
{ { { 9, 0, 2 }, { 1, 26, 0 } } },
{ { { 9, 0, 3 }, { 0, 28, 0 } } },
{ { { 9, 0, 4 }, { 0, 29, 1 } } },
{ { { 10, 0, 3 }, { 0, 29, 0 } } },
{ { { 10, 0, 2 }, { 0, 29, 1 } } },
{ { { 10, 0, 1 }, { 0, 30, 1 } } },
{ { { 10, 0, 0 }, { 0, 30, 0 } } },
{ { { 10, 0, 1 }, { 2, 27, 1 } } },
{ { { 10, 0, 2 }, { 2, 27, 0 } } },
{ { { 10, 0, 3 }, { 0, 31, 0 } } },
{ { { 10, 0, 4 }, { 1, 30, 1 } } },
{ { { 11, 0, 3 }, { 1, 30, 0 } } },
{ { { 11, 0, 2 }, { 4, 24, 0 } } },
{ { { 11, 0, 1 }, { 1, 31, 1 } } },
{ { { 11, 0, 0 }, { 1, 31, 0 } } },
{ { { 11, 0, 1 }, { 1, 31, 1 } } },
{ { { 11, 0, 2 }, { 2, 30, 1 } } },
{ { { 11, 0, 3 }, { 2, 30, 0 } } },
{ { { 11, 0, 4 }, { 2, 31, 1 } } },
{ { { 12, 0, 4 }, { 2, 31, 0 } } },
{ { { 12, 0, 3 }, { 4, 27, 0 } } },
{ { { 12, 0, 2 }, { 3, 30, 1 } } },
{ { { 12, 0, 1 }, { 3, 30, 0 } } },
{ { { 12, 0, 0 }, { 4, 28, 0 } } },
{ { { 12, 0, 1 }, { 3, 31, 1 } } },
{ { { 12, 0, 2 }, { 3, 31, 0 } } },
{ { { 12, 0, 3 }, { 3, 31, 1 } } },
{ { { 12, 0, 4 }, { 4, 30, 1 } } },
{ { { 13, 0, 3 }, { 4, 30, 0 } } },
{ { { 13, 0, 2 }, { 6, 27, 1 } } },
{ { { 13, 0, 1 }, { 6, 27, 0 } } },
{ { { 13, 0, 0 }, { 4, 31, 0 } } },
{ { { 13, 0, 1 }, { 5, 30, 1 } } },
{ { { 13, 0, 2 }, { 5, 30, 0 } } },
{ { { 13, 0, 3 }, { 8, 24, 0 } } },
{ { { 13, 0, 4 }, { 5, 31, 1 } } },
{ { { 14, 0, 3 }, { 5, 31, 0 } } },
{ { { 14, 0, 2 }, { 5, 31, 1 } } },
{ { { 14, 0, 1 }, { 6, 30, 1 } } },
{ { { 14, 0, 0 }, { 6, 30, 0 } } },
{ { { 14, 0, 1 }, { 6, 31, 1 } } },
{ { { 14, 0, 2 }, { 6, 31, 0 } } },
{ { { 14, 0, 3 }, { 8, 27, 0 } } },
{ { { 14, 0, 4 }, { 7, 30, 1 } } },
{ { { 15, 0, 3 }, { 7, 30, 0 } } },
{ { { 15, 0, 2 }, { 8, 28, 0 } } },
{ { { 15, 0, 1 }, { 7, 31, 1 } } },
{ { { 15, 0, 0 }, { 7, 31, 0 } } },
{ { { 15, 0, 1 }, { 7, 31, 1 } } },
{ { { 15, 0, 2 }, { 8, 30, 1 } } },
{ { { 15, 0, 3 }, { 8, 30, 0 } } },
{ { { 15, 0, 4 }, { 10, 27, 1 } } },
{ { { 16, 0, 4 }, { 10, 27, 0 } } },
{ { { 16, 0, 3 }, { 8, 31, 0 } } },
{ { { 16, 0, 2 }, { 9, 30, 1 } } },
{ { { 16, 0, 1 }, { 9, 30, 0 } } },
{ { { 16, 0, 0 }, { 12, 24, 0 } } },
{ { { 16, 0, 1 }, { 9, 31, 1 } } },
{ { { 16, 0, 2 }, { 9, 31, 0 } } },
{ { { 16, 0, 3 }, { 9, 31, 1 } } },
{ { { 16, 0, 4 }, { 10, 30, 1 } } },
{ { { 17, 0, 3 }, { 10, 30, 0 } } },
{ { { 17, 0, 2 }, { 10, 31, 1 } } },
{ { { 17, 0, 1 }, { 10, 31, 0 } } },
{ { { 17, 0, 0 }, { 12, 27, 0 } } },
{ { { 17, 0, 1 }, { 11, 30, 1 } } },
{ { { 17, 0, 2 }, { 11, 30, 0 } } },
{ { { 17, 0, 3 }, { 12, 28, 0 } } },
{ { { 17, 0, 4 }, { 11, 31, 1 } } },
{ { { 18, 0, 3 }, { 11, 31, 0 } } },
{ { { 18, 0, 2 }, { 11, 31, 1 } } },
{ { { 18, 0, 1 }, { 12, 30, 1 } } },
{ { { 18, 0, 0 }, { 12, 30, 0 } } },
{ { { 18, 0, 1 }, { 14, 27, 1 } } },
{ { { 18, 0, 2 }, { 14, 27, 0 } } },
{ { { 18, 0, 3 }, { 12, 31, 0 } } },
{ { { 18, 0, 4 }, { 13, 30, 1 } } },
{ { { 19, 0, 3 }, { 13, 30, 0 } } },
{ { { 19, 0, 2 }, { 16, 24, 0 } } },
{ { { 19, 0, 1 }, { 13, 31, 1 } } },
{ { { 19, 0, 0 }, { 13, 31, 0 } } },
{ { { 19, 0, 1 }, { 13, 31, 1 } } },
{ { { 19, 0, 2 }, { 14, 30, 1 } } },
{ { { 19, 0, 3 }, { 14, 30, 0 } } },
{ { { 19, 0, 4 }, { 14, 31, 1 } } },
{ { { 20, 0, 4 }, { 14, 31, 0 } } },
{ { { 20, 0, 3 }, { 16, 27, 0 } } },
{ { { 20, 0, 2 }, { 15, 30, 1 } } },
{ { { 20, 0, 1 }, { 15, 30, 0 } } },
{ { { 20, 0, 0 }, { 16, 28, 0 } } },
{ { { 20, 0, 1 }, { 15, 31, 1 } } },
{ { { 20, 0, 2 }, { 15, 31, 0 } } },
{ { { 20, 0, 3 }, { 15, 31, 1 } } },
{ { { 20, 0, 4 }, { 16, 30, 1 } } },
{ { { 21, 0, 3 }, { 16, 30, 0 } } },
{ { { 21, 0, 2 }, { 18, 27, 1 } } },
{ { { 21, 0, 1 }, { 18, 27, 0 } } },
{ { { 21, 0, 0 }, { 16, 31, 0 } } },
{ { { 21, 0, 1 }, { 17, 30, 1 } } },
{ { { 21, 0, 2 }, { 17, 30, 0 } } },
{ { { 21, 0, 3 }, { 20, 24, 0 } } },
{ { { 21, 0, 4 }, { 17, 31, 1 } } },
{ { { 22, 0, 3 }, { 17, 31, 0 } } },
{ { { 22, 0, 2 }, { 17, 31, 1 } } },
{ { { 22, 0, 1 }, { 18, 30, 1 } } },
{ { { 22, 0, 0 }, { 18, 30, 0 } } },
{ { { 22, 0, 1 }, { 18, 31, 1 } } },
{ { { 22, 0, 2 }, { 18, 31, 0 } } },
{ { { 22, 0, 3 }, { 20, 27, 0 } } },
{ { { 22, 0, 4 }, { 19, 30, 1 } } },
{ { { 23, 0, 3 }, { 19, 30, 0 } } },
{ { { 23, 0, 2 }, { 20, 28, 0 } } },
{ { { 23, 0, 1 }, { 19, 31, 1 } } },
{ { { 23, 0, 0 }, { 19, 31, 0 } } },
{ { { 23, 0, 1 }, { 19, 31, 1 } } },
{ { { 23, 0, 2 }, { 20, 30, 1 } } },
{ { { 23, 0, 3 }, { 20, 30, 0 } } },
{ { { 23, 0, 4 }, { 22, 27, 1 } } },
{ { { 24, 0, 4 }, { 22, 27, 0 } } },
{ { { 24, 0, 3 }, { 20, 31, 0 } } },
{ { { 24, 0, 2 }, { 21, 30, 1 } } },
{ { { 24, 0, 1 }, { 21, 30, 0 } } },
{ { { 24, 0, 0 }, { 24, 24, 0 } } },
{ { { 24, 0, 1 }, { 21, 31, 1 } } },
{ { { 24, 0, 2 }, { 21, 31, 0 } } },
{ { { 24, 0, 3 }, { 21, 31, 1 } } },
{ { { 24, 0, 4 }, { 22, 30, 1 } } },
{ { { 25, 0, 3 }, { 22, 30, 0 } } },
{ { { 25, 0, 2 }, { 22, 31, 1 } } },
{ { { 25, 0, 1 }, { 22, 31, 0 } } },
{ { { 25, 0, 0 }, { 24, 27, 0 } } },
{ { { 25, 0, 1 }, { 23, 30, 1 } } },
{ { { 25, 0, 2 }, { 23, 30, 0 } } },
{ { { 25, 0, 3 }, { 24, 28, 0 } } },
{ { { 25, 0, 4 }, { 23, 31, 1 } } },
{ { { 26, 0, 3 }, { 23, 31, 0 } } },
{ { { 26, 0, 2 }, { 23, 31, 1 } } },
{ { { 26, 0, 1 }, { 24, 30, 1 } } },
{ { { 26, 0, 0 }, { 24, 30, 0 } } },
{ { { 26, 0, 1 }, { 26, 27, 1 } } },
{ { { 26, 0, 2 }, { 26, 27, 0 } } },
{ { { 26, 0, 3 }, { 24, 31, 0 } } },
{ { { 26, 0, 4 }, { 25, 30, 1 } } },
{ { { 27, 0, 3 }, { 25, 30, 0 } } },
{ { { 27, 0, 2 }, { 28, 24, 0 } } },
{ { { 27, 0, 1 }, { 25, 31, 1 } } },
{ { { 27, 0, 0 }, { 25, 31, 0 } } },
{ { { 27, 0, 1 }, { 25, 31, 1 } } },
{ { { 27, 0, 2 }, { 26, 30, 1 } } },
{ { { 27, 0, 3 }, { 26, 30, 0 } } },
{ { { 27, 0, 4 }, { 26, 31, 1 } } },
{ { { 28, 0, 4 }, { 26, 31, 0 } } },
{ { { 28, 0, 3 }, { 28, 27, 0 } } },
{ { { 28, 0, 2 }, { 27, 30, 1 } } },
{ { { 28, 0, 1 }, { 27, 30, 0 } } },
{ { { 28, 0, 0 }, { 28, 28, 0 } } },
{ { { 28, 0, 1 }, { 27, 31, 1 } } },
{ { { 28, 0, 2 }, { 27, 31, 0 } } },
{ { { 28, 0, 3 }, { 27, 31, 1 } } },
{ { { 28, 0, 4 }, { 28, 30, 1 } } },
{ { { 29, 0, 3 }, { 28, 30, 0 } } },
{ { { 29, 0, 2 }, { 30, 27, 1 } } },
{ { { 29, 0, 1 }, { 30, 27, 0 } } },
{ { { 29, 0, 0 }, { 28, 31, 0 } } },
{ { { 29, 0, 1 }, { 29, 30, 1 } } },
{ { { 29, 0, 2 }, { 29, 30, 0 } } },
{ { { 29, 0, 3 }, { 29, 30, 1 } } },
{ { { 29, 0, 4 }, { 29, 31, 1 } } },
{ { { 30, 0, 3 }, { 29, 31, 0 } } },
{ { { 30, 0, 2 }, { 29, 31, 1 } } },
{ { { 30, 0, 1 }, { 30, 30, 1 } } },
{ { { 30, 0, 0 }, { 30, 30, 0 } } },
{ { { 30, 0, 1 }, { 30, 31, 1 } } },
{ { { 30, 0, 2 }, { 30, 31, 0 } } },
{ { { 30, 0, 3 }, { 30, 31, 1 } } },
{ { { 30, 0, 4 }, { 31, 30, 1 } } },
{ { { 31, 0, 3 }, { 31, 30, 0 } } },
{ { { 31, 0, 2 }, { 31, 30, 1 } } },
{ { { 31, 0, 1 }, { 31, 31, 1 } } },
{ { { 31, 0, 0 }, { 31, 31, 0 } } }
};
static const DDSSingleColorLookup DDSLookup_6_4[] =
{
{ { { 0, 0, 0 }, { 0, 0, 0 } } },
{ { { 0, 0, 1 }, { 0, 1, 0 } } },
{ { { 0, 0, 2 }, { 0, 2, 0 } } },
{ { { 1, 0, 1 }, { 0, 3, 1 } } },
{ { { 1, 0, 0 }, { 0, 3, 0 } } },
{ { { 1, 0, 1 }, { 0, 4, 0 } } },
{ { { 1, 0, 2 }, { 0, 5, 0 } } },
{ { { 2, 0, 1 }, { 0, 6, 1 } } },
{ { { 2, 0, 0 }, { 0, 6, 0 } } },
{ { { 2, 0, 1 }, { 0, 7, 0 } } },
{ { { 2, 0, 2 }, { 0, 8, 0 } } },
{ { { 3, 0, 1 }, { 0, 9, 1 } } },
{ { { 3, 0, 0 }, { 0, 9, 0 } } },
{ { { 3, 0, 1 }, { 0, 10, 0 } } },
{ { { 3, 0, 2 }, { 0, 11, 0 } } },
{ { { 4, 0, 1 }, { 0, 12, 1 } } },
{ { { 4, 0, 0 }, { 0, 12, 0 } } },
{ { { 4, 0, 1 }, { 0, 13, 0 } } },
{ { { 4, 0, 2 }, { 0, 14, 0 } } },
{ { { 5, 0, 1 }, { 0, 15, 1 } } },
{ { { 5, 0, 0 }, { 0, 15, 0 } } },
{ { { 5, 0, 1 }, { 0, 16, 0 } } },
{ { { 5, 0, 2 }, { 1, 15, 0 } } },
{ { { 6, 0, 1 }, { 0, 17, 0 } } },
{ { { 6, 0, 0 }, { 0, 18, 0 } } },
{ { { 6, 0, 1 }, { 0, 19, 0 } } },
{ { { 6, 0, 2 }, { 3, 14, 0 } } },
{ { { 7, 0, 1 }, { 0, 20, 0 } } },
{ { { 7, 0, 0 }, { 0, 21, 0 } } },
{ { { 7, 0, 1 }, { 0, 22, 0 } } },
{ { { 7, 0, 2 }, { 4, 15, 0 } } },
{ { { 8, 0, 1 }, { 0, 23, 0 } } },
{ { { 8, 0, 0 }, { 0, 24, 0 } } },
{ { { 8, 0, 1 }, { 0, 25, 0 } } },
{ { { 8, 0, 2 }, { 6, 14, 0 } } },
{ { { 9, 0, 1 }, { 0, 26, 0 } } },
{ { { 9, 0, 0 }, { 0, 27, 0 } } },
{ { { 9, 0, 1 }, { 0, 28, 0 } } },
{ { { 9, 0, 2 }, { 7, 15, 0 } } },
{ { { 10, 0, 1 }, { 0, 29, 0 } } },
{ { { 10, 0, 0 }, { 0, 30, 0 } } },
{ { { 10, 0, 1 }, { 0, 31, 0 } } },
{ { { 10, 0, 2 }, { 9, 14, 0 } } },
{ { { 11, 0, 1 }, { 0, 32, 0 } } },
{ { { 11, 0, 0 }, { 0, 33, 0 } } },
{ { { 11, 0, 1 }, { 2, 30, 0 } } },
{ { { 11, 0, 2 }, { 0, 34, 0 } } },
{ { { 12, 0, 1 }, { 0, 35, 0 } } },
{ { { 12, 0, 0 }, { 0, 36, 0 } } },
{ { { 12, 0, 1 }, { 3, 31, 0 } } },
{ { { 12, 0, 2 }, { 0, 37, 0 } } },
{ { { 13, 0, 1 }, { 0, 38, 0 } } },
{ { { 13, 0, 0 }, { 0, 39, 0 } } },
{ { { 13, 0, 1 }, { 5, 30, 0 } } },
{ { { 13, 0, 2 }, { 0, 40, 0 } } },
{ { { 14, 0, 1 }, { 0, 41, 0 } } },
{ { { 14, 0, 0 }, { 0, 42, 0 } } },
{ { { 14, 0, 1 }, { 6, 31, 0 } } },
{ { { 14, 0, 2 }, { 0, 43, 0 } } },
{ { { 15, 0, 1 }, { 0, 44, 0 } } },
{ { { 15, 0, 0 }, { 0, 45, 0 } } },
{ { { 15, 0, 1 }, { 8, 30, 0 } } },
{ { { 15, 0, 2 }, { 0, 46, 0 } } },
{ { { 16, 0, 2 }, { 0, 47, 0 } } },
{ { { 16, 0, 1 }, { 1, 46, 0 } } },
{ { { 16, 0, 0 }, { 0, 48, 0 } } },
{ { { 16, 0, 1 }, { 0, 49, 0 } } },
{ { { 16, 0, 2 }, { 0, 50, 0 } } },
{ { { 17, 0, 1 }, { 2, 47, 0 } } },
{ { { 17, 0, 0 }, { 0, 51, 0 } } },
{ { { 17, 0, 1 }, { 0, 52, 0 } } },
{ { { 17, 0, 2 }, { 0, 53, 0 } } },
{ { { 18, 0, 1 }, { 4, 46, 0 } } },
{ { { 18, 0, 0 }, { 0, 54, 0 } } },
{ { { 18, 0, 1 }, { 0, 55, 0 } } },
{ { { 18, 0, 2 }, { 0, 56, 0 } } },
{ { { 19, 0, 1 }, { 5, 47, 0 } } },
{ { { 19, 0, 0 }, { 0, 57, 0 } } },
{ { { 19, 0, 1 }, { 0, 58, 0 } } },
{ { { 19, 0, 2 }, { 0, 59, 0 } } },
{ { { 20, 0, 1 }, { 7, 46, 0 } } },
{ { { 20, 0, 0 }, { 0, 60, 0 } } },
{ { { 20, 0, 1 }, { 0, 61, 0 } } },
{ { { 20, 0, 2 }, { 0, 62, 0 } } },
{ { { 21, 0, 1 }, { 8, 47, 0 } } },
{ { { 21, 0, 0 }, { 0, 63, 0 } } },
{ { { 21, 0, 1 }, { 1, 62, 0 } } },
{ { { 21, 0, 2 }, { 1, 63, 0 } } },
{ { { 22, 0, 1 }, { 10, 46, 0 } } },
{ { { 22, 0, 0 }, { 2, 62, 0 } } },
{ { { 22, 0, 1 }, { 2, 63, 0 } } },
{ { { 22, 0, 2 }, { 3, 62, 0 } } },
{ { { 23, 0, 1 }, { 11, 47, 0 } } },
{ { { 23, 0, 0 }, { 3, 63, 0 } } },
{ { { 23, 0, 1 }, { 4, 62, 0 } } },
{ { { 23, 0, 2 }, { 4, 63, 0 } } },
{ { { 24, 0, 1 }, { 13, 46, 0 } } },
{ { { 24, 0, 0 }, { 5, 62, 0 } } },
{ { { 24, 0, 1 }, { 5, 63, 0 } } },
{ { { 24, 0, 2 }, { 6, 62, 0 } } },
{ { { 25, 0, 1 }, { 14, 47, 0 } } },
{ { { 25, 0, 0 }, { 6, 63, 0 } } },
{ { { 25, 0, 1 }, { 7, 62, 0 } } },
{ { { 25, 0, 2 }, { 7, 63, 0 } } },
{ { { 26, 0, 1 }, { 16, 45, 0 } } },
{ { { 26, 0, 0 }, { 8, 62, 0 } } },
{ { { 26, 0, 1 }, { 8, 63, 0 } } },
{ { { 26, 0, 2 }, { 9, 62, 0 } } },
{ { { 27, 0, 1 }, { 16, 48, 0 } } },
{ { { 27, 0, 0 }, { 9, 63, 0 } } },
{ { { 27, 0, 1 }, { 10, 62, 0 } } },
{ { { 27, 0, 2 }, { 10, 63, 0 } } },
{ { { 28, 0, 1 }, { 16, 51, 0 } } },
{ { { 28, 0, 0 }, { 11, 62, 0 } } },
{ { { 28, 0, 1 }, { 11, 63, 0 } } },
{ { { 28, 0, 2 }, { 12, 62, 0 } } },
{ { { 29, 0, 1 }, { 16, 54, 0 } } },
{ { { 29, 0, 0 }, { 12, 63, 0 } } },
{ { { 29, 0, 1 }, { 13, 62, 0 } } },
{ { { 29, 0, 2 }, { 13, 63, 0 } } },
{ { { 30, 0, 1 }, { 16, 57, 0 } } },
{ { { 30, 0, 0 }, { 14, 62, 0 } } },
{ { { 30, 0, 1 }, { 14, 63, 0 } } },
{ { { 30, 0, 2 }, { 15, 62, 0 } } },
{ { { 31, 0, 1 }, { 16, 60, 0 } } },
{ { { 31, 0, 0 }, { 15, 63, 0 } } },
{ { { 31, 0, 1 }, { 24, 46, 0 } } },
{ { { 31, 0, 2 }, { 16, 62, 0 } } },
{ { { 32, 0, 2 }, { 16, 63, 0 } } },
{ { { 32, 0, 1 }, { 17, 62, 0 } } },
{ { { 32, 0, 0 }, { 25, 47, 0 } } },
{ { { 32, 0, 1 }, { 17, 63, 0 } } },
{ { { 32, 0, 2 }, { 18, 62, 0 } } },
{ { { 33, 0, 1 }, { 18, 63, 0 } } },
{ { { 33, 0, 0 }, { 27, 46, 0 } } },
{ { { 33, 0, 1 }, { 19, 62, 0 } } },
{ { { 33, 0, 2 }, { 19, 63, 0 } } },
{ { { 34, 0, 1 }, { 20, 62, 0 } } },
{ { { 34, 0, 0 }, { 28, 47, 0 } } },
{ { { 34, 0, 1 }, { 20, 63, 0 } } },
{ { { 34, 0, 2 }, { 21, 62, 0 } } },
{ { { 35, 0, 1 }, { 21, 63, 0 } } },
{ { { 35, 0, 0 }, { 30, 46, 0 } } },
{ { { 35, 0, 1 }, { 22, 62, 0 } } },
{ { { 35, 0, 2 }, { 22, 63, 0 } } },
{ { { 36, 0, 1 }, { 23, 62, 0 } } },
{ { { 36, 0, 0 }, { 31, 47, 0 } } },
{ { { 36, 0, 1 }, { 23, 63, 0 } } },
{ { { 36, 0, 2 }, { 24, 62, 0 } } },
{ { { 37, 0, 1 }, { 24, 63, 0 } } },
{ { { 37, 0, 0 }, { 32, 47, 0 } } },
{ { { 37, 0, 1 }, { 25, 62, 0 } } },
{ { { 37, 0, 2 }, { 25, 63, 0 } } },
{ { { 38, 0, 1 }, { 26, 62, 0 } } },
{ { { 38, 0, 0 }, { 32, 50, 0 } } },
{ { { 38, 0, 1 }, { 26, 63, 0 } } },
{ { { 38, 0, 2 }, { 27, 62, 0 } } },
{ { { 39, 0, 1 }, { 27, 63, 0 } } },
{ { { 39, 0, 0 }, { 32, 53, 0 } } },
{ { { 39, 0, 1 }, { 28, 62, 0 } } },
{ { { 39, 0, 2 }, { 28, 63, 0 } } },
{ { { 40, 0, 1 }, { 29, 62, 0 } } },
{ { { 40, 0, 0 }, { 32, 56, 0 } } },
{ { { 40, 0, 1 }, { 29, 63, 0 } } },
{ { { 40, 0, 2 }, { 30, 62, 0 } } },
{ { { 41, 0, 1 }, { 30, 63, 0 } } },
{ { { 41, 0, 0 }, { 32, 59, 0 } } },
{ { { 41, 0, 1 }, { 31, 62, 0 } } },
{ { { 41, 0, 2 }, { 31, 63, 0 } } },
{ { { 42, 0, 1 }, { 32, 61, 0 } } },
{ { { 42, 0, 0 }, { 32, 62, 0 } } },
{ { { 42, 0, 1 }, { 32, 63, 0 } } },
{ { { 42, 0, 2 }, { 41, 46, 0 } } },
{ { { 43, 0, 1 }, { 33, 62, 0 } } },
{ { { 43, 0, 0 }, { 33, 63, 0 } } },
{ { { 43, 0, 1 }, { 34, 62, 0 } } },
{ { { 43, 0, 2 }, { 42, 47, 0 } } },
{ { { 44, 0, 1 }, { 34, 63, 0 } } },
{ { { 44, 0, 0 }, { 35, 62, 0 } } },
{ { { 44, 0, 1 }, { 35, 63, 0 } } },
{ { { 44, 0, 2 }, { 44, 46, 0 } } },
{ { { 45, 0, 1 }, { 36, 62, 0 } } },
{ { { 45, 0, 0 }, { 36, 63, 0 } } },
{ { { 45, 0, 1 }, { 37, 62, 0 } } },
{ { { 45, 0, 2 }, { 45, 47, 0 } } },
{ { { 46, 0, 1 }, { 37, 63, 0 } } },
{ { { 46, 0, 0 }, { 38, 62, 0 } } },
{ { { 46, 0, 1 }, { 38, 63, 0 } } },
{ { { 46, 0, 2 }, { 47, 46, 0 } } },
{ { { 47, 0, 1 }, { 39, 62, 0 } } },
{ { { 47, 0, 0 }, { 39, 63, 0 } } },
{ { { 47, 0, 1 }, { 40, 62, 0 } } },
{ { { 47, 0, 2 }, { 48, 46, 0 } } },
{ { { 48, 0, 2 }, { 40, 63, 0 } } },
{ { { 48, 0, 1 }, { 41, 62, 0 } } },
{ { { 48, 0, 0 }, { 41, 63, 0 } } },
{ { { 48, 0, 1 }, { 48, 49, 0 } } },
{ { { 48, 0, 2 }, { 42, 62, 0 } } },
{ { { 49, 0, 1 }, { 42, 63, 0 } } },
{ { { 49, 0, 0 }, { 43, 62, 0 } } },
{ { { 49, 0, 1 }, { 48, 52, 0 } } },
{ { { 49, 0, 2 }, { 43, 63, 0 } } },
{ { { 50, 0, 1 }, { 44, 62, 0 } } },
{ { { 50, 0, 0 }, { 44, 63, 0 } } },
{ { { 50, 0, 1 }, { 48, 55, 0 } } },
{ { { 50, 0, 2 }, { 45, 62, 0 } } },
{ { { 51, 0, 1 }, { 45, 63, 0 } } },
{ { { 51, 0, 0 }, { 46, 62, 0 } } },
{ { { 51, 0, 1 }, { 48, 58, 0 } } },
{ { { 51, 0, 2 }, { 46, 63, 0 } } },
{ { { 52, 0, 1 }, { 47, 62, 0 } } },
{ { { 52, 0, 0 }, { 47, 63, 0 } } },
{ { { 52, 0, 1 }, { 48, 61, 0 } } },
{ { { 52, 0, 2 }, { 48, 62, 0 } } },
{ { { 53, 0, 1 }, { 56, 47, 0 } } },
{ { { 53, 0, 0 }, { 48, 63, 0 } } },
{ { { 53, 0, 1 }, { 49, 62, 0 } } },
{ { { 53, 0, 2 }, { 49, 63, 0 } } },
{ { { 54, 0, 1 }, { 58, 46, 0 } } },
{ { { 54, 0, 0 }, { 50, 62, 0 } } },
{ { { 54, 0, 1 }, { 50, 63, 0 } } },
{ { { 54, 0, 2 }, { 51, 62, 0 } } },
{ { { 55, 0, 1 }, { 59, 47, 0 } } },
{ { { 55, 0, 0 }, { 51, 63, 0 } } },
{ { { 55, 0, 1 }, { 52, 62, 0 } } },
{ { { 55, 0, 2 }, { 52, 63, 0 } } },
{ { { 56, 0, 1 }, { 61, 46, 0 } } },
{ { { 56, 0, 0 }, { 53, 62, 0 } } },
{ { { 56, 0, 1 }, { 53, 63, 0 } } },
{ { { 56, 0, 2 }, { 54, 62, 0 } } },
{ { { 57, 0, 1 }, { 62, 47, 0 } } },
{ { { 57, 0, 0 }, { 54, 63, 0 } } },
{ { { 57, 0, 1 }, { 55, 62, 0 } } },
{ { { 57, 0, 2 }, { 55, 63, 0 } } },
{ { { 58, 0, 1 }, { 56, 62, 1 } } },
{ { { 58, 0, 0 }, { 56, 62, 0 } } },
{ { { 58, 0, 1 }, { 56, 63, 0 } } },
{ { { 58, 0, 2 }, { 57, 62, 0 } } },
{ { { 59, 0, 1 }, { 57, 63, 1 } } },
{ { { 59, 0, 0 }, { 57, 63, 0 } } },
{ { { 59, 0, 1 }, { 58, 62, 0 } } },
{ { { 59, 0, 2 }, { 58, 63, 0 } } },
{ { { 60, 0, 1 }, { 59, 62, 1 } } },
{ { { 60, 0, 0 }, { 59, 62, 0 } } },
{ { { 60, 0, 1 }, { 59, 63, 0 } } },
{ { { 60, 0, 2 }, { 60, 62, 0 } } },
{ { { 61, 0, 1 }, { 60, 63, 1 } } },
{ { { 61, 0, 0 }, { 60, 63, 0 } } },
{ { { 61, 0, 1 }, { 61, 62, 0 } } },
{ { { 61, 0, 2 }, { 61, 63, 0 } } },
{ { { 62, 0, 1 }, { 62, 62, 1 } } },
{ { { 62, 0, 0 }, { 62, 62, 0 } } },
{ { { 62, 0, 1 }, { 62, 63, 0 } } },
{ { { 62, 0, 2 }, { 63, 62, 0 } } },
{ { { 63, 0, 1 }, { 63, 63, 1 } } },
{ { { 63, 0, 0 }, { 63, 63, 0 } } }
};
static const DDSSingleColorLookup*
DDS_LOOKUP[] =
{
DDSLookup_5_4,
DDSLookup_6_4,
DDSLookup_5_4
};
static const unsigned char BC7_weight2[] = { 0, 21, 43, 64 };
static const unsigned char BC7_weight3[] = { 0, 9, 18, 27, 37, 46, 55, 64 };
static const unsigned char BC7_weight4[] = { 0, 4, 9, 13, 17, 21, 26, 30, 34,
38, 43, 47, 51, 55, 60, 64 };
/* stores info for each mode of BC7 */
static const BC7ModeInfo BC7_mode_info[8] =
{
{ 4, 3, 4, 0, 6, 3, 0 }, /* mode 0 */
{ 6, 2, 6, 0, 2, 3, 0 }, /* mode 1 */
{ 6, 3, 5, 0, 0, 2, 0 }, /* mode 2 */
{ 6, 2, 7, 0, 4, 2, 0 }, /* mode 3 */
{ 0, 1, 5, 6, 0, 2, 3 }, /* mode 4 */
{ 0, 1, 7, 8, 0, 2, 2 }, /* mode 5 */
{ 0, 1, 7, 7, 2, 4, 0 }, /* mode 6 */
{ 6, 2, 5, 5, 4, 2, 0 }, /* mode 7 */
};
static const unsigned char BC7_partition_table[2][64][16] =
{
{ /* BC7 Partition Set for 2 Subsets */
{ 0, 0, 1, 1, 0, 0, 1, 1, 0, 0, 1, 1, 0, 0, 1, 1 },
{ 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 1 },
{ 0, 1, 1, 1, 0, 1, 1, 1, 0, 1, 1, 1, 0, 1, 1, 1 },
{ 0, 0, 0, 1, 0, 0, 1, 1, 0, 0, 1, 1, 0, 1, 1, 1 },
{ 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 1, 1 },
{ 0, 0, 1, 1, 0, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1 },
{ 0, 0, 0, 1, 0, 0, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1 },
{ 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 1, 1, 0, 1, 1, 1 },
{ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 1, 1 },
{ 0, 0, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1 },
{ 0, 0, 0, 0, 0, 0, 0, 1, 0, 1, 1, 1, 1, 1, 1, 1 },
{ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 1, 1, 1 },
{ 0, 0, 0, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1 },
{ 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1 },
{ 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1 },
{ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1 },
{ 0, 0, 0, 0, 1, 0, 0, 0, 1, 1, 1, 0, 1, 1, 1, 1 },
{ 0, 1, 1, 1, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0 },
{ 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 1, 1, 1, 0 },
{ 0, 1, 1, 1, 0, 0, 1, 1, 0, 0, 0, 1, 0, 0, 0, 0 },
{ 0, 0, 1, 1, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0 },
{ 0, 0, 0, 0, 1, 0, 0, 0, 1, 1, 0, 0, 1, 1, 1, 0 },
{ 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 1, 1, 0, 0 },
{ 0, 1, 1, 1, 0, 0, 1, 1, 0, 0, 1, 1, 0, 0, 0, 1 },
{ 0, 0, 1, 1, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 0 },
{ 0, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 1, 1, 0, 0 },
{ 0, 1, 1, 0, 0, 1, 1, 0, 0, 1, 1, 0, 0, 1, 1, 0 },
{ 0, 0, 1, 1, 0, 1, 1, 0, 0, 1, 1, 0, 1, 1, 0, 0 },
{ 0, 0, 0, 1, 0, 1, 1, 1, 1, 1, 1, 0, 1, 0, 0, 0 },
{ 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0 },
{ 0, 1, 1, 1, 0, 0, 0, 1, 1, 0, 0, 0, 1, 1, 1, 0 },
{ 0, 0, 1, 1, 1, 0, 0, 1, 1, 0, 0, 1, 1, 1, 0, 0 },
{ 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1 },
{ 0, 0, 0, 0, 1, 1, 1, 1, 0, 0, 0, 0, 1, 1, 1, 1 },
{ 0, 1, 0, 1, 1, 0, 1, 0, 0, 1, 0, 1, 1, 0, 1, 0 },
{ 0, 0, 1, 1, 0, 0, 1, 1, 1, 1, 0, 0, 1, 1, 0, 0 },
{ 0, 0, 1, 1, 1, 1, 0, 0, 0, 0, 1, 1, 1, 1, 0, 0 },
{ 0, 1, 0, 1, 0, 1, 0, 1, 1, 0, 1, 0, 1, 0, 1, 0 },
{ 0, 1, 1, 0, 1, 0, 0, 1, 0, 1, 1, 0, 1, 0, 0, 1 },
{ 0, 1, 0, 1, 1, 0, 1, 0, 1, 0, 1, 0, 0, 1, 0, 1 },
{ 0, 1, 1, 1, 0, 0, 1, 1, 1, 1, 0, 0, 1, 1, 1, 0 },
{ 0, 0, 0, 1, 0, 0, 1, 1, 1, 1, 0, 0, 1, 0, 0, 0 },
{ 0, 0, 1, 1, 0, 0, 1, 0, 0, 1, 0, 0, 1, 1, 0, 0 },
{ 0, 0, 1, 1, 1, 0, 1, 1, 1, 1, 0, 1, 1, 1, 0, 0 },
{ 0, 1, 1, 0, 1, 0, 0, 1, 1, 0, 0, 1, 0, 1, 1, 0 },
{ 0, 0, 1, 1, 1, 1, 0, 0, 1, 1, 0, 0, 0, 0, 1, 1 },
{ 0, 1, 1, 0, 0, 1, 1, 0, 1, 0, 0, 1, 1, 0, 0, 1 },
{ 0, 0, 0, 0, 0, 1, 1, 0, 0, 1, 1, 0, 0, 0, 0, 0 },
{ 0, 1, 0, 0, 1, 1, 1, 0, 0, 1, 0, 0, 0, 0, 0, 0 },
{ 0, 0, 1, 0, 0, 1, 1, 1, 0, 0, 1, 0, 0, 0, 0, 0 },
{ 0, 0, 0, 0, 0, 0, 1, 0, 0, 1, 1, 1, 0, 0, 1, 0 },
{ 0, 0, 0, 0, 0, 1, 0, 0, 1, 1, 1, 0, 0, 1, 0, 0 },
{ 0, 1, 1, 0, 1, 1, 0, 0, 1, 0, 0, 1, 0, 0, 1, 1 },
{ 0, 0, 1, 1, 0, 1, 1, 0, 1, 1, 0, 0, 1, 0, 0, 1 },
{ 0, 1, 1, 0, 0, 0, 1, 1, 1, 0, 0, 1, 1, 1, 0, 0 },
{ 0, 0, 1, 1, 1, 0, 0, 1, 1, 1, 0, 0, 0, 1, 1, 0 },
{ 0, 1, 1, 0, 1, 1, 0, 0, 1, 1, 0, 0, 1, 0, 0, 1 },
{ 0, 1, 1, 0, 0, 0, 1, 1, 0, 0, 1, 1, 1, 0, 0, 1 },
{ 0, 1, 1, 1, 1, 1, 1, 0, 1, 0, 0, 0, 0, 0, 0, 1 },
{ 0, 0, 0, 1, 1, 0, 0, 0, 1, 1, 1, 0, 0, 1, 1, 1 },
{ 0, 0, 0, 0, 1, 1, 1, 1, 0, 0, 1, 1, 0, 0, 1, 1 },
{ 0, 0, 1, 1, 0, 0, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0 },
{ 0, 0, 1, 0, 0, 0, 1, 0, 1, 1, 1, 0, 1, 1, 1, 0 },
{ 0, 1, 0, 0, 0, 1, 0, 0, 0, 1, 1, 1, 0, 1, 1, 1 }
},
{ /* BC7 Partition Set for 3 Subsets */
{ 0, 0, 1, 1, 0, 0, 1, 1, 0, 2, 2, 1, 2, 2, 2, 2 },
{ 0, 0, 0, 1, 0, 0, 1, 1, 2, 2, 1, 1, 2, 2, 2, 1 },
{ 0, 0, 0, 0, 2, 0, 0, 1, 2, 2, 1, 1, 2, 2, 1, 1 },
{ 0, 2, 2, 2, 0, 0, 2, 2, 0, 0, 1, 1, 0, 1, 1, 1 },
{ 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 2, 2, 1, 1, 2, 2 },
{ 0, 0, 1, 1, 0, 0, 1, 1, 0, 0, 2, 2, 0, 0, 2, 2 },
{ 0, 0, 2, 2, 0, 0, 2, 2, 1, 1, 1, 1, 1, 1, 1, 1 },
{ 0, 0, 1, 1, 0, 0, 1, 1, 2, 2, 1, 1, 2, 2, 1, 1 },
{ 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 2, 2, 2, 2 },
{ 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 2, 2, 2, 2 },
{ 0, 0, 0, 0, 1, 1, 1, 1, 2, 2, 2, 2, 2, 2, 2, 2 },
{ 0, 0, 1, 2, 0, 0, 1, 2, 0, 0, 1, 2, 0, 0, 1, 2 },
{ 0, 1, 1, 2, 0, 1, 1, 2, 0, 1, 1, 2, 0, 1, 1, 2 },
{ 0, 1, 2, 2, 0, 1, 2, 2, 0, 1, 2, 2, 0, 1, 2, 2 },
{ 0, 0, 1, 1, 0, 1, 1, 2, 1, 1, 2, 2, 1, 2, 2, 2 },
{ 0, 0, 1, 1, 2, 0, 0, 1, 2, 2, 0, 0, 2, 2, 2, 0 },
{ 0, 0, 0, 1, 0, 0, 1, 1, 0, 1, 1, 2, 1, 1, 2, 2 },
{ 0, 1, 1, 1, 0, 0, 1, 1, 2, 0, 0, 1, 2, 2, 0, 0 },
{ 0, 0, 0, 0, 1, 1, 2, 2, 1, 1, 2, 2, 1, 1, 2, 2 },
{ 0, 0, 2, 2, 0, 0, 2, 2, 0, 0, 2, 2, 1, 1, 1, 1 },
{ 0, 1, 1, 1, 0, 1, 1, 1, 0, 2, 2, 2, 0, 2, 2, 2 },
{ 0, 0, 0, 1, 0, 0, 0, 1, 2, 2, 2, 1, 2, 2, 2, 1 },
{ 0, 0, 0, 0, 0, 0, 1, 1, 0, 1, 2, 2, 0, 1, 2, 2 },
{ 0, 0, 0, 0, 1, 1, 0, 0, 2, 2, 1, 0, 2, 2, 1, 0 },
{ 0, 1, 2, 2, 0, 1, 2, 2, 0, 0, 1, 1, 0, 0, 0, 0 },
{ 0, 0, 1, 2, 0, 0, 1, 2, 1, 1, 2, 2, 2, 2, 2, 2 },
{ 0, 1, 1, 0, 1, 2, 2, 1, 1, 2, 2, 1, 0, 1, 1, 0 },
{ 0, 0, 0, 0, 0, 1, 1, 0, 1, 2, 2, 1, 1, 2, 2, 1 },
{ 0, 0, 2, 2, 1, 1, 0, 2, 1, 1, 0, 2, 0, 0, 2, 2 },
{ 0, 1, 1, 0, 0, 1, 1, 0, 2, 0, 0, 2, 2, 2, 2, 2 },
{ 0, 0, 1, 1, 0, 1, 2, 2, 0, 1, 2, 2, 0, 0, 1, 1 },
{ 0, 0, 0, 0, 2, 0, 0, 0, 2, 2, 1, 1, 2, 2, 2, 1 },
{ 0, 0, 0, 0, 0, 0, 0, 2, 1, 1, 2, 2, 1, 2, 2, 2 },
{ 0, 2, 2, 2, 0, 0, 2, 2, 0, 0, 1, 2, 0, 0, 1, 1 },
{ 0, 0, 1, 1, 0, 0, 1, 2, 0, 0, 2, 2, 0, 2, 2, 2 },
{ 0, 1, 2, 0, 0, 1, 2, 0, 0, 1, 2, 0, 0, 1, 2, 0 },
{ 0, 0, 0, 0, 1, 1, 1, 1, 2, 2, 2, 2, 0, 0, 0, 0 },
{ 0, 1, 2, 0, 1, 2, 0, 1, 2, 0, 1, 2, 0, 1, 2, 0 },
{ 0, 1, 2, 0, 2, 0, 1, 2, 1, 2, 0, 1, 0, 1, 2, 0 },
{ 0, 0, 1, 1, 2, 2, 0, 0, 1, 1, 2, 2, 0, 0, 1, 1 },
{ 0, 0, 1, 1, 1, 1, 2, 2, 2, 2, 0, 0, 0, 0, 1, 1 },
{ 0, 1, 0, 1, 0, 1, 0, 1, 2, 2, 2, 2, 2, 2, 2, 2 },
{ 0, 0, 0, 0, 0, 0, 0, 0, 2, 1, 2, 1, 2, 1, 2, 1 },
{ 0, 0, 2, 2, 1, 1, 2, 2, 0, 0, 2, 2, 1, 1, 2, 2 },
{ 0, 0, 2, 2, 0, 0, 1, 1, 0, 0, 2, 2, 0, 0, 1, 1 },
{ 0, 2, 2, 0, 1, 2, 2, 1, 0, 2, 2, 0, 1, 2, 2, 1 },
{ 0, 1, 0, 1, 2, 2, 2, 2, 2, 2, 2, 2, 0, 1, 0, 1 },
{ 0, 0, 0, 0, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1 },
{ 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 2, 2, 2, 2 },
{ 0, 2, 2, 2, 0, 1, 1, 1, 0, 2, 2, 2, 0, 1, 1, 1 },
{ 0, 0, 0, 2, 1, 1, 1, 2, 0, 0, 0, 2, 1, 1, 1, 2 },
{ 0, 0, 0, 0, 2, 1, 1, 2, 2, 1, 1, 2, 2, 1, 1, 2 },
{ 0, 2, 2, 2, 0, 1, 1, 1, 0, 1, 1, 1, 0, 2, 2, 2 },
{ 0, 0, 0, 2, 1, 1, 1, 2, 1, 1, 1, 2, 0, 0, 0, 2 },
{ 0, 1, 1, 0, 0, 1, 1, 0, 0, 1, 1, 0, 2, 2, 2, 2 },
{ 0, 0, 0, 0, 0, 0, 0, 0, 2, 1, 1, 2, 2, 1, 1, 2 },
{ 0, 1, 1, 0, 0, 1, 1, 0, 2, 2, 2, 2, 2, 2, 2, 2 },
{ 0, 0, 2, 2, 0, 0, 1, 1, 0, 0, 1, 1, 0, 0, 2, 2 },
{ 0, 0, 2, 2, 1, 1, 2, 2, 1, 1, 2, 2, 0, 0, 2, 2 },
{ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2, 1, 1, 2 },
{ 0, 0, 0, 2, 0, 0, 0, 1, 0, 0, 0, 2, 0, 0, 0, 1 },
{ 0, 2, 2, 2, 1, 2, 2, 2, 0, 2, 2, 2, 1, 2, 2, 2 },
{ 0, 1, 0, 1, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2 },
{ 0, 1, 1, 1, 2, 0, 1, 1, 2, 2, 0, 1, 2, 2, 2, 0 }
}
};
static const unsigned char BC7_anchor_index_table[4][64] =
{
/* Anchor index values for the first subset */
{
0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0
},
/* Anchor index values for the second subset of two-subset partitioning */
{
15,15,15,15,15,15,15,15,
15,15,15,15,15,15,15,15,
15, 2, 8, 2, 2, 8, 8,15,
2, 8, 2, 2, 8, 8, 2, 2,
15,15, 6, 8, 2, 8,15,15,
2, 8, 2, 2, 2,15,15, 6,
6, 2, 6, 8,15,15, 2, 2,
15,15,15,15,15, 2, 2,15
},
/* Anchor index values for the second subset of three-subset partitioning */
{
3, 3,15,15, 8, 3,15,15,
8, 8, 6, 6, 6, 5, 3, 3,
3, 3, 8,15, 3, 3, 6,10,
5, 8, 8, 6, 8, 5,15,15,
8,15, 3, 5, 6,10, 8,15,
15, 3,15, 5,15,15,15,15,
3,15, 5, 5, 5, 8, 5,10,
5,10, 8,13,15,12, 3, 3
},
/* Anchor index values for the third subset of three-subset partitioning */
{
15, 8, 8, 3,15,15, 3, 8,
15,15,15,15,15,15,15, 8,
15, 8,15, 3,15, 8,15, 8,
3,15, 6,10,15,15,10, 8,
15, 3,15,10,10, 8, 9,10,
6,15, 8,15, 3, 6, 6, 8,
15, 3,15,15,15,15,15,15,
15,15,15,15, 3,15,15, 8
}
};
/*
Macros
*/
#define C565_r(x) (((x) & 0xF800) >> 11)
#define C565_g(x) (((x) & 0x07E0) >> 5)
#define C565_b(x) ((x) & 0x001F)
#define C565_red(x) ( (C565_r(x) << 3 | C565_r(x) >> 2))
#define C565_green(x) ( (C565_g(x) << 2 | C565_g(x) >> 4))
#define C565_blue(x) ( (C565_b(x) << 3 | C565_b(x) >> 2))
#define DIV2(x) ((x) > 1 ? ((x) >> 1) : 1)
#define FixRange(min, max, steps) \
if (min > max) \
min = max; \
if ((ssize_t) max - min < steps) \
max = MagickMin(min + steps, 255); \
if ((ssize_t) max - min < steps) \
min = MagickMax(0, (ssize_t) max - steps)
#define Dot(left, right) (left.x*right.x) + (left.y*right.y) + (left.z*right.z)
#define VectorInit(vector, value) vector.x = vector.y = vector.z = vector.w \
= value
#define VectorInit3(vector, value) vector.x = vector.y = vector.z = value
#define IsBitMask(mask, r, g, b, a) (mask.r_bitmask == r && mask.g_bitmask == \
g && mask.b_bitmask == b && mask.alpha_bitmask == a)
/*
Forward declarations
*/
static MagickBooleanType
WriteDDSImage(const ImageInfo *,Image *,ExceptionInfo *);
static inline void VectorAdd(const DDSVector4 left, const DDSVector4 right,
DDSVector4 *destination)
{
destination->x = left.x + right.x;
destination->y = left.y + right.y;
destination->z = left.z + right.z;
destination->w = left.w + right.w;
}
static inline void VectorClamp(DDSVector4 *value)
{
value->x = MagickMin(1.0f,MagickMax(0.0f,value->x));
value->y = MagickMin(1.0f,MagickMax(0.0f,value->y));
value->z = MagickMin(1.0f,MagickMax(0.0f,value->z));
value->w = MagickMin(1.0f,MagickMax(0.0f,value->w));
}
static inline void VectorClamp3(DDSVector3 *value)
{
value->x = MagickMin(1.0f,MagickMax(0.0f,value->x));
value->y = MagickMin(1.0f,MagickMax(0.0f,value->y));
value->z = MagickMin(1.0f,MagickMax(0.0f,value->z));
}
static inline void VectorCopy43(const DDSVector4 source,
DDSVector3 *destination)
{
destination->x = source.x;
destination->y = source.y;
destination->z = source.z;
}
static inline void VectorCopy44(const DDSVector4 source,
DDSVector4 *destination)
{
destination->x = source.x;
destination->y = source.y;
destination->z = source.z;
destination->w = source.w;
}
static inline void VectorNegativeMultiplySubtract(const DDSVector4 a,
const DDSVector4 b, const DDSVector4 c, DDSVector4 *destination)
{
destination->x = c.x - (a.x * b.x);
destination->y = c.y - (a.y * b.y);
destination->z = c.z - (a.z * b.z);
destination->w = c.w - (a.w * b.w);
}
static inline void VectorMultiply(const DDSVector4 left,
const DDSVector4 right, DDSVector4 *destination)
{
destination->x = left.x * right.x;
destination->y = left.y * right.y;
destination->z = left.z * right.z;
destination->w = left.w * right.w;
}
static inline void VectorMultiply3(const DDSVector3 left,
const DDSVector3 right, DDSVector3 *destination)
{
destination->x = left.x * right.x;
destination->y = left.y * right.y;
destination->z = left.z * right.z;
}
static inline void VectorMultiplyAdd(const DDSVector4 a, const DDSVector4 b,
const DDSVector4 c, DDSVector4 *destination)
{
destination->x = (a.x * b.x) + c.x;
destination->y = (a.y * b.y) + c.y;
destination->z = (a.z * b.z) + c.z;
destination->w = (a.w * b.w) + c.w;
}
static inline void VectorMultiplyAdd3(const DDSVector3 a, const DDSVector3 b,
const DDSVector3 c, DDSVector3 *destination)
{
destination->x = (a.x * b.x) + c.x;
destination->y = (a.y * b.y) + c.y;
destination->z = (a.z * b.z) + c.z;
}
static inline void VectorReciprocal(const DDSVector4 value,
DDSVector4 *destination)
{
destination->x = 1.0f / value.x;
destination->y = 1.0f / value.y;
destination->z = 1.0f / value.z;
destination->w = 1.0f / value.w;
}
static inline void VectorSubtract(const DDSVector4 left,
const DDSVector4 right, DDSVector4 *destination)
{
destination->x = left.x - right.x;
destination->y = left.y - right.y;
destination->z = left.z - right.z;
destination->w = left.w - right.w;
}
static inline void VectorSubtract3(const DDSVector3 left,
const DDSVector3 right, DDSVector3 *destination)
{
destination->x = left.x - right.x;
destination->y = left.y - right.y;
destination->z = left.z - right.z;
}
static inline void VectorTruncate(DDSVector4 *value)
{
value->x = value->x > 0.0f ? floor(value->x) : ceil(value->x);
value->y = value->y > 0.0f ? floor(value->y) : ceil(value->y);
value->z = value->z > 0.0f ? floor(value->z) : ceil(value->z);
value->w = value->w > 0.0f ? floor(value->w) : ceil(value->w);
}
static inline void VectorTruncate3(DDSVector3 *value)
{
value->x = value->x > 0.0f ? floor(value->x) : ceil(value->x);
value->y = value->y > 0.0f ? floor(value->y) : ceil(value->y);
value->z = value->z > 0.0f ? floor(value->z) : ceil(value->z);
}
static inline size_t ClampToLimit(const float value, const size_t limit)
{
size_t
result = (int) (value + 0.5f);
if (result < 0.0f)
return(0);
if (result > limit)
return(limit);
return result;
}
static inline size_t ColorTo565(const DDSVector3 point)
{
size_t r = ClampToLimit(31.0f*point.x,31);
size_t g = ClampToLimit(63.0f*point.y,63);
size_t b = ClampToLimit(31.0f*point.z,31);
return (r << 11) | (g << 5) | b;
}
static inline unsigned char GetSubsetIndex(unsigned char numSubsets,
unsigned char partition_id,size_t pixelIndex)
{
if (numSubsets == 2)
return BC7_partition_table[0][partition_id][pixelIndex];
if (numSubsets == 3)
return BC7_partition_table[1][partition_id][pixelIndex];
return 0;
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% I s D D S %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% IsDDS() returns MagickTrue if the image format type, identified by the
% magick string, is DDS.
%
% The format of the IsDDS method is:
%
% MagickBooleanType IsDDS(const unsigned char *magick,const size_t length)
%
% A description of each parameter follows:
%
% o magick: compare image format pattern against these bytes.
%
% o length: Specifies the length of the magick string.
%
*/
static MagickBooleanType IsDDS(const unsigned char *magick, const size_t length)
{
if (length < 4)
return(MagickFalse);
if (LocaleNCompare((char *) magick,"DDS ", 4) == 0)
return(MagickTrue);
return(MagickFalse);
}
static MagickBooleanType ReadDDSInfo(Image *image, DDSInfo *dds_info)
{
size_t
hdr_size,
required;
/* Seek to start of header */
(void) SeekBlob(image, 4, SEEK_SET);
/* Check header field */
hdr_size = ReadBlobLSBLong(image);
if (hdr_size != 124)
return MagickFalse;
/* Fill in DDS info struct */
dds_info->flags = ReadBlobLSBLong(image);
/* Check required flags */
required=(size_t) (DDSD_CAPS | DDSD_WIDTH | DDSD_HEIGHT | DDSD_PIXELFORMAT);
if ((dds_info->flags & required) != required)
return MagickFalse;
dds_info->height = ReadBlobLSBLong(image);
dds_info->width = ReadBlobLSBLong(image);
dds_info->pitchOrLinearSize = ReadBlobLSBLong(image);
dds_info->depth = ReadBlobLSBLong(image);
dds_info->mipmapcount = ReadBlobLSBLong(image);
(void) SeekBlob(image, 44, SEEK_CUR); /* reserved region of 11 DWORDs */
/* Read pixel format structure */
hdr_size = ReadBlobLSBLong(image);
if (hdr_size != 32)
return MagickFalse;
dds_info->pixelformat.flags = ReadBlobLSBLong(image);
dds_info->pixelformat.fourcc = ReadBlobLSBLong(image);
dds_info->pixelformat.rgb_bitcount = ReadBlobLSBLong(image);
dds_info->pixelformat.r_bitmask = ReadBlobLSBLong(image);
dds_info->pixelformat.g_bitmask = ReadBlobLSBLong(image);
dds_info->pixelformat.b_bitmask = ReadBlobLSBLong(image);
dds_info->pixelformat.alpha_bitmask = ReadBlobLSBLong(image);
dds_info->ddscaps1 = ReadBlobLSBLong(image);
dds_info->ddscaps2 = ReadBlobLSBLong(image);
(void) SeekBlob(image, 12, SEEK_CUR); /* 3 reserved DWORDs */
/* Read optional DX10 header if available */
if ((dds_info->pixelformat.flags & DDPF_FOURCC) &&
(dds_info->pixelformat.fourcc == FOURCC_DX10))
{
dds_info->extFormat = ReadBlobLSBLong(image);
dds_info->extDimension = ReadBlobLSBLong(image);
dds_info->extFlags = ReadBlobLSBLong(image);
dds_info->extArraySize = ReadBlobLSBLong(image);
dds_info->extFlags2 = ReadBlobLSBLong(image);
}
else
{
dds_info->extFormat = 0;
dds_info->extDimension = 0;
dds_info->extFlags = 0;
dds_info->extArraySize = 0;
dds_info->extFlags2 = 0;
}
return(MagickTrue);
}
static MagickBooleanType SetDXT1Pixels(Image *image,ssize_t x,ssize_t y,
DDSColors colors,size_t bits,Quantum *q)
{
ssize_t
i;
ssize_t
j;
unsigned char
code;
for (j = 0; j < 4; j++)
{
for (i = 0; i < 4; i++)
{
if ((x + i) < (ssize_t) image->columns &&
(y + j) < (ssize_t) image->rows)
{
code=(unsigned char) ((bits >> ((j*4+i)*2)) & 0x3);
SetPixelRed(image,ScaleCharToQuantum(colors.r[code]),q);
SetPixelGreen(image,ScaleCharToQuantum(colors.g[code]),q);
SetPixelBlue(image,ScaleCharToQuantum(colors.b[code]),q);
SetPixelOpacity(image,ScaleCharToQuantum(colors.a[code]),q);
if ((colors.a[code] != 0) &&
(image->alpha_trait == UndefinedPixelTrait))
return(MagickFalse);
q+=GetPixelChannels(image);
}
}
}
return(MagickTrue);
}
static MagickBooleanType ReadMipmaps(const ImageInfo *image_info,Image *image,
const DDSInfo *dds_info,DDSPixelDecoder decoder,ExceptionInfo *exception)
{
MagickBooleanType
status;
/*
Only skip mipmaps for textures and cube maps
*/
if (EOFBlob(image) != MagickFalse)
{
ThrowFileException(exception,CorruptImageWarning,"UnexpectedEndOfFile",
image->filename);
return(MagickFalse);
}
status=MagickTrue;
if (dds_info->ddscaps1 & DDSCAPS_MIPMAP
&& (dds_info->ddscaps1 & DDSCAPS_TEXTURE
|| dds_info->ddscaps2 & DDSCAPS2_CUBEMAP))
{
ssize_t
i;
size_t
h,
w;
w=DIV2(dds_info->width);
h=DIV2(dds_info->height);
/*
Mipmapcount includes the main image, so start from one
*/
for (i = 1; (i < (ssize_t) dds_info->mipmapcount) && w && h; i++)
{
AcquireNextImage(image_info,image,exception);
if (image->next == (Image *) NULL)
return(MagickFalse);
image->next->alpha_trait=image->alpha_trait;
image=SyncNextImageInList(image);
status=SetImageExtent(image,w,h,exception);
if (status == MagickFalse)
break;
status=decoder(image,dds_info,exception);
if (status == MagickFalse)
break;
if ((w == 1) && (h == 1))
break;
w=DIV2(w);
h=DIV2(h);
}
}
return(status);
}
static void CalculateColors(unsigned short c0, unsigned short c1,
DDSColors *c, MagickBooleanType ignoreAlpha)
{
c->a[0] = c->a[1] = c->a[2] = c->a[3] = 0;
c->r[0] = (unsigned char) C565_red(c0);
c->g[0] = (unsigned char) C565_green(c0);
c->b[0] = (unsigned char) C565_blue(c0);
c->r[1] = (unsigned char) C565_red(c1);
c->g[1] = (unsigned char) C565_green(c1);
c->b[1] = (unsigned char) C565_blue(c1);
if (ignoreAlpha != MagickFalse || c0 > c1)
{
c->r[2] = (unsigned char) ((2 * c->r[0] + c->r[1]) / 3);
c->g[2] = (unsigned char) ((2 * c->g[0] + c->g[1]) / 3);
c->b[2] = (unsigned char) ((2 * c->b[0] + c->b[1]) / 3);
c->r[3] = (unsigned char) ((c->r[0] + 2 * c->r[1]) / 3);
c->g[3] = (unsigned char) ((c->g[0] + 2 * c->g[1]) / 3);
c->b[3] = (unsigned char) ((c->b[0] + 2 * c->b[1]) / 3);
}
else
{
c->r[2] = (unsigned char) ((c->r[0] + c->r[1]) / 2);
c->g[2] = (unsigned char) ((c->g[0] + c->g[1]) / 2);
c->b[2] = (unsigned char) ((c->b[0] + c->b[1]) / 2);
c->r[3] = c->g[3] = c->b[3] = 0;
c->a[3] = 255;
}
}
static MagickBooleanType ReadDXT1Pixels(Image *image,
const DDSInfo *magick_unused(dds_info),ExceptionInfo *exception)
{
DDSColors
colors;
Quantum
*q;
ssize_t
x;
size_t
bits;
ssize_t
y;
unsigned short
c0,
c1;
magick_unreferenced(dds_info);
for (y = 0; y < (ssize_t) image->rows; y += 4)
{
for (x = 0; x < (ssize_t) image->columns; x += 4)
{
/* Get 4x4 patch of pixels to write on */
q=QueueAuthenticPixels(image,x,y,MagickMin(4,image->columns-x),
MagickMin(4,image->rows-y),exception);
if (q == (Quantum *) NULL)
return(MagickFalse);
/* Read 8 bytes of data from the image */
c0=ReadBlobLSBShort(image);
c1=ReadBlobLSBShort(image);
bits=ReadBlobLSBLong(image);
CalculateColors(c0,c1,&colors,MagickFalse);
if (EOFBlob(image) != MagickFalse)
return(MagickFalse);
/* Write the pixels */
if (SetDXT1Pixels(image,x,y,colors,bits,q) == MagickFalse)
{
/* Correct alpha */
SetImageAlpha(image,QuantumRange,exception);
q=QueueAuthenticPixels(image,x,y,MagickMin(4,image->columns-x),
MagickMin(4,image->rows-y),exception);
if (q != (Quantum *) NULL)
SetDXT1Pixels(image,x,y,colors,bits,q);
}
if (SyncAuthenticPixels(image,exception) == MagickFalse)
return(MagickFalse);
}
if (EOFBlob(image) != MagickFalse)
return(MagickFalse);
}
return(MagickTrue);
}
/*
Skip the mipmap images for compressed (DXTn) dds files
*/
static MagickBooleanType SkipDXTMipmaps(Image *image,const DDSInfo *dds_info,
int texel_size,ExceptionInfo *exception)
{
/*
Only skip mipmaps for textures and cube maps
*/
if (EOFBlob(image) != MagickFalse)
{
ThrowFileException(exception,CorruptImageWarning,"UnexpectedEndOfFile",
image->filename);
return(MagickFalse);
}
if (dds_info->ddscaps1 & DDSCAPS_MIPMAP
&& (dds_info->ddscaps1 & DDSCAPS_TEXTURE
|| dds_info->ddscaps2 & DDSCAPS2_CUBEMAP))
{
MagickOffsetType
offset;
ssize_t
i;
size_t
h,
w;
w=DIV2(dds_info->width);
h=DIV2(dds_info->height);
/*
Mipmapcount includes the main image, so start from one
*/
for (i = 1; (i < (ssize_t) dds_info->mipmapcount) && w && h; i++)
{
offset=(MagickOffsetType)((w+3)/4)*((h+3)/4)*texel_size;
if (SeekBlob(image,offset,SEEK_CUR) < 0)
break;
w=DIV2(w);
h=DIV2(h);
if ((w == 1) && (h == 1))
break;
}
}
return(MagickTrue);
}
static MagickBooleanType ReadDXT1(const ImageInfo *image_info,Image *image,
const DDSInfo *dds_info,const MagickBooleanType read_mipmaps,
ExceptionInfo *exception)
{
if (ReadDXT1Pixels(image,dds_info,exception) == MagickFalse)
return(MagickFalse);
if (read_mipmaps != MagickFalse)
return(ReadMipmaps(image_info,image,dds_info,ReadDXT1Pixels,exception));
else
return(SkipDXTMipmaps(image,dds_info,8,exception));
}
static MagickBooleanType ReadDXT3Pixels(Image *image,
const DDSInfo *magick_unused(dds_info),ExceptionInfo *exception)
{
DDSColors
colors;
Quantum
*q;
ssize_t
i,
x;
unsigned char
alpha;
size_t
a0,
a1,
bits,
code;
ssize_t
j,
y;
unsigned short
c0,
c1;
magick_unreferenced(dds_info);
for (y = 0; y < (ssize_t) image->rows; y += 4)
{
for (x = 0; x < (ssize_t) image->columns; x += 4)
{
/* Get 4x4 patch of pixels to write on */
q = QueueAuthenticPixels(image, x, y, MagickMin(4, image->columns - x),
MagickMin(4, image->rows - y),exception);
if (q == (Quantum *) NULL)
return(MagickFalse);
/* Read alpha values (8 bytes) */
a0 = ReadBlobLSBLong(image);
a1 = ReadBlobLSBLong(image);
/* Read 8 bytes of data from the image */
c0 = ReadBlobLSBShort(image);
c1 = ReadBlobLSBShort(image);
bits = ReadBlobLSBLong(image);
CalculateColors(c0, c1, &colors, MagickTrue);
if (EOFBlob(image) != MagickFalse)
return(MagickFalse);
/* Write the pixels */
for (j = 0; j < 4; j++)
{
for (i = 0; i < 4; i++)
{
if ((x + i) < (ssize_t) image->columns && (y + j) < (ssize_t) image->rows)
{
code = (bits >> ((4*j+i)*2)) & 0x3;
SetPixelRed(image,ScaleCharToQuantum(colors.r[code]),q);
SetPixelGreen(image,ScaleCharToQuantum(colors.g[code]),q);
SetPixelBlue(image,ScaleCharToQuantum(colors.b[code]),q);
/*
Extract alpha value: multiply 0..15 by 17 to get range 0..255
*/
if (j < 2)
alpha = 17U * (unsigned char) ((a0 >> (4*(4*j+i))) & 0xf);
else
alpha = 17U * (unsigned char) ((a1 >> (4*(4*(j-2)+i))) & 0xf);
SetPixelAlpha(image,ScaleCharToQuantum((unsigned char) alpha),q);
q+=GetPixelChannels(image);
}
}
}
if (SyncAuthenticPixels(image,exception) == MagickFalse)
return(MagickFalse);
}
if (EOFBlob(image) != MagickFalse)
return(MagickFalse);
}
return(MagickTrue);
}
static MagickBooleanType ReadDXT3(const ImageInfo *image_info,Image *image,
const DDSInfo *dds_info,const MagickBooleanType read_mipmaps,
ExceptionInfo *exception)
{
if (ReadDXT3Pixels(image,dds_info,exception) == MagickFalse)
return(MagickFalse);
if (read_mipmaps != MagickFalse)
return(ReadMipmaps(image_info,image,dds_info,ReadDXT3Pixels,exception));
else
return(SkipDXTMipmaps(image,dds_info,16,exception));
}
static MagickBooleanType ReadDXT5Pixels(Image *image,
const DDSInfo *magick_unused(dds_info),ExceptionInfo *exception)
{
DDSColors
colors;
MagickSizeType
alpha_bits;
Quantum
*q;
ssize_t
i,
x;
unsigned char
a0,
a1;
size_t
alpha,
bits,
code,
alpha_code;
ssize_t
j,
y;
unsigned short
c0,
c1;
magick_unreferenced(dds_info);
for (y = 0; y < (ssize_t) image->rows; y += 4)
{
for (x = 0; x < (ssize_t) image->columns; x += 4)
{
/* Get 4x4 patch of pixels to write on */
q = QueueAuthenticPixels(image, x, y, MagickMin(4, image->columns - x),
MagickMin(4, image->rows - y),exception);
if (q == (Quantum *) NULL)
return(MagickFalse);
/* Read alpha values (8 bytes) */
a0 = (unsigned char) ReadBlobByte(image);
a1 = (unsigned char) ReadBlobByte(image);
alpha_bits = (MagickSizeType)ReadBlobLSBLong(image);
alpha_bits = alpha_bits | ((MagickSizeType)ReadBlobLSBShort(image) << 32);
/* Read 8 bytes of data from the image */
c0 = ReadBlobLSBShort(image);
c1 = ReadBlobLSBShort(image);
bits = ReadBlobLSBLong(image);
CalculateColors(c0, c1, &colors, MagickTrue);
if (EOFBlob(image) != MagickFalse)
return(MagickFalse);
/* Write the pixels */
for (j = 0; j < 4; j++)
{
for (i = 0; i < 4; i++)
{
if ((x + i) < (ssize_t) image->columns &&
(y + j) < (ssize_t) image->rows)
{
code = (bits >> ((4*j+i)*2)) & 0x3;
SetPixelRed(image,ScaleCharToQuantum(colors.r[code]),q);
SetPixelGreen(image,ScaleCharToQuantum(colors.g[code]),q);
SetPixelBlue(image,ScaleCharToQuantum(colors.b[code]),q);
/* Extract alpha value */
alpha_code = (size_t) (alpha_bits >> (3*(4*j+i))) & 0x7;
if (alpha_code == 0)
alpha = a0;
else if (alpha_code == 1)
alpha = a1;
else if (a0 > a1)
alpha = ((8-alpha_code) * a0 + (alpha_code-1) * a1) / 7;
else if (alpha_code == 6)
alpha = 0;
else if (alpha_code == 7)
alpha = 255;
else
alpha = (((6-alpha_code) * a0 + (alpha_code-1) * a1) / 5);
SetPixelAlpha(image,ScaleCharToQuantum((unsigned char) alpha),q);
q+=GetPixelChannels(image);
}
}
}
if (SyncAuthenticPixels(image,exception) == MagickFalse)
return(MagickFalse);
}
if (EOFBlob(image) != MagickFalse)
return(MagickFalse);
}
return(MagickTrue);
}
static MagickBooleanType ReadDXT5(const ImageInfo *image_info,Image *image,
const DDSInfo *dds_info,const MagickBooleanType read_mipmaps,
ExceptionInfo *exception)
{
if (ReadDXT5Pixels(image,dds_info,exception) == MagickFalse)
return(MagickFalse);
if (read_mipmaps != MagickFalse)
return(ReadMipmaps(image_info,image,dds_info,ReadDXT5Pixels,exception));
else
return(SkipDXTMipmaps(image,dds_info,16,exception));
}
static unsigned char GetBit(const unsigned char *block,size_t *start_bit)
{
size_t
base,
index;
index=(*start_bit) >> 3;
base=(*start_bit) - (index << 3);
(*start_bit)++;
if (index > 15)
return(0);
return((block[index] >> base) & 0x01);
}
static unsigned char GetBits(const unsigned char *block,size_t *start_bit,
unsigned char num_bits)
{
size_t
base,
first_bits,
index,
next_bits;
unsigned char
ret;
index=(*start_bit) >> 3;
base=(*start_bit)-(index << 3);
if (index > 15)
return(0);
if (base + num_bits > 8)
{
first_bits=8-base;
next_bits=num_bits-first_bits;
ret=((block[index] >> base) | (((block[index + 1]) &
((1u << next_bits) - 1)) << first_bits));
}
else
{
ret=((block[index] >> base) & ((1 << num_bits) - 1));
}
(*start_bit)+=num_bits;
return(ret);
}
static MagickBooleanType IsPixelAnchorIndex(unsigned char subset_index,
unsigned char num_subsets,size_t pixelIndex,unsigned char partition_id)
{
size_t
table_index;
/* for first subset */
if (subset_index == 0)
table_index=0;
/* for second subset of two subset partitioning */
else if ((subset_index == 1) && (num_subsets == 2))
table_index=1;
/* for second subset of three subset partitioning */
else if ((subset_index == 1) && (num_subsets == 3))
table_index=2;
/* for third subset of three subset partitioning */
else
table_index=3;
if (BC7_anchor_index_table[table_index][partition_id] == pixelIndex)
return(MagickTrue);
else
return(MagickFalse);
}
static void ReadEndpoints(BC7Colors *endpoints,const unsigned char *block,
size_t mode,size_t *start_bit)
{
MagickBooleanType
has_alpha,
has_pbits;
unsigned char
alpha_bits,
color_bits,
pbit,
pbit0,
pbit1;
size_t
num_subsets,
i;
num_subsets=(size_t) BC7_mode_info[mode].num_subsets;
color_bits=BC7_mode_info[mode].color_precision;
/* red */
for (i=0; i < num_subsets * 2; i++)
endpoints->r[i]=GetBits(block,start_bit,color_bits);
/* green */
for (i=0; i < num_subsets * 2; i++)
endpoints->g[i]=GetBits(block,start_bit,color_bits);
/* blue */
for (i=0; i < num_subsets * 2; i++)
endpoints->b[i]=GetBits(block,start_bit,color_bits);
/* alpha */
alpha_bits=BC7_mode_info[mode].alpha_precision;
has_alpha=mode >= 4;
if (has_alpha != MagickFalse)
{
for (i=0; i < num_subsets * 2; i++)
endpoints->a[i]=GetBits(block,start_bit,alpha_bits);
}
/* handle modes that have p bits */
has_pbits=(mode == 0) || (mode == 1) || (mode == 3) || (mode == 6) || (mode == 7);
if (has_pbits != MagickFalse)
{
for (i=0; i < num_subsets * 2; i++)
{
endpoints->r[i] <<= 1;
endpoints->g[i] <<= 1;
endpoints->b[i] <<= 1;
endpoints->a[i] <<= 1;
}
/* mode 1 shares a p-bit for both endpoints */
if (mode == 1)
{
pbit0=GetBit(block,start_bit);
pbit1=GetBit(block,start_bit);
endpoints->r[0] |= pbit0;
endpoints->g[0] |= pbit0;
endpoints->b[0] |= pbit0;
endpoints->r[1] |= pbit0;
endpoints->g[1] |= pbit0;
endpoints->b[1] |= pbit0;
endpoints->r[2] |= pbit1;
endpoints->g[2] |= pbit1;
endpoints->b[2] |= pbit1;
endpoints->r[3] |= pbit1;
endpoints->g[3] |= pbit1;
endpoints->b[3] |= pbit1;
}
else
{
for (i=0; i < num_subsets * 2; i++)
{
pbit=GetBit(block,start_bit);
endpoints->r[i] |= pbit;
endpoints->g[i] |= pbit;
endpoints->b[i] |= pbit;
endpoints->a[i] |= pbit;
}
}
}
/* 1 bit increased due to the pbit */
if (has_pbits != MagickFalse)
{
color_bits++;
alpha_bits++;
}
/* color and alpha bit shifting so that MSB lies in bit 7 */
for (i=0; i < num_subsets * 2; i++)
{
endpoints->r[i] <<= (8 - color_bits);
endpoints->g[i] <<= (8 - color_bits);
endpoints->b[i] <<= (8 - color_bits);
endpoints->a[i] <<= (8 - color_bits);
endpoints->r[i]=endpoints->r[i] | (endpoints->r[i] >> color_bits);
endpoints->g[i]=endpoints->g[i] | (endpoints->g[i] >> color_bits);
endpoints->b[i]=endpoints->b[i] | (endpoints->b[i] >> color_bits);
endpoints->a[i]=endpoints->a[i] | (endpoints->a[i] >> color_bits);
}
if (has_alpha == MagickFalse)
{
for (i=0; i < num_subsets * 2; i++)
endpoints->a[i]=255;
}
}
static MagickBooleanType ReadBC7Pixels(Image *image,
const DDSInfo *magick_unused(dds_info),ExceptionInfo *exception)
{
BC7Colors
colors;
Quantum
*q;
size_t
mode,
start_bit;
ssize_t
count,
i,
x,
y;
unsigned char
a,
alpha_indices[16],
b,
block[16],
c0,
c1,
color_indices[16],
g,
index_prec,
index2_prec,
num_bits,
num_subsets,
partition_id,
r,
rotation,
selector_bit,
subset_indices[16],
weight;
magick_unreferenced(dds_info);
memset(alpha_indices,0,sizeof(alpha_indices));
memset(block,0,sizeof(block));
memset(color_indices,0,sizeof(color_indices));
memset(subset_indices,0,sizeof(subset_indices));
for (y = 0; y < (ssize_t) image->rows; y += 4)
{
for (x = 0; x < (ssize_t) image->columns; x += 4)
{
/* Get 4x4 patch of pixels to write on */
q=QueueAuthenticPixels(image,x,y,MagickMin(4,image->columns-x),
MagickMin(4,image->rows-y),exception);
if (q == (Quantum *) NULL)
return(MagickFalse);
/* Read 16 bytes of data from the image */
count=ReadBlob(image,16,block);
if (count != 16)
return(MagickFalse);
if (EOFBlob(image) != MagickFalse)
return(MagickFalse);
/* Get the mode of the block */
start_bit=0;
while (start_bit <= 8 && !GetBit(block, &start_bit)) {}
mode=start_bit-1;
if (mode > 7)
return(MagickFalse);
num_subsets=BC7_mode_info[mode].num_subsets;
partition_id=0;
/* only these modes have more than 1 subset */
if ((mode == 0) || (mode == 1) || (mode == 2) || (mode == 3) || (mode == 7))
{
partition_id=GetBits(block,&start_bit,BC7_mode_info[mode].partition_bits);
if (partition_id > 63)
return(MagickFalse);
}
rotation=0;
if ((mode == 4) || (mode == 5))
rotation=GetBits(block,&start_bit,2);
selector_bit=0;
if (mode == 4)
selector_bit=GetBit(block, &start_bit);
ReadEndpoints(&colors,block,mode,&start_bit);
index_prec=BC7_mode_info[mode].index_precision;
index2_prec=BC7_mode_info[mode].index2_precision;
if ((mode == 4) && (selector_bit == 1))
{
index_prec=3;
alpha_indices[0]=GetBit(block,&start_bit);
for (i = 1; i < 16; i++)
alpha_indices[i]=GetBits(block,&start_bit,2);
}
/* get color and subset indices */
for (i=0; i < 16; i++)
{
subset_indices[i]=GetSubsetIndex(num_subsets,partition_id,i);
num_bits=index_prec;
if (IsPixelAnchorIndex(subset_indices[i],num_subsets,i,partition_id))
num_bits--;
color_indices[i]=GetBits(block,&start_bit,num_bits);
}
/* get alpha indices if the block has it */
if ((mode == 5) || ((mode == 4) && (selector_bit == 0)))
{
alpha_indices[0]=GetBits(block,&start_bit,index2_prec - 1);
for (i=1; i < 16; i++)
alpha_indices[i]=GetBits(block,&start_bit,index2_prec);
}
/* Write the pixels */
for (i=0; i < 16; i++)
{
c0=2 * subset_indices[i];
c1=(2 * subset_indices[i]) + 1;
/* Color Interpolation */
switch(index_prec)
{
case 2: weight=BC7_weight2[color_indices[i]]; break;
case 3: weight=BC7_weight3[color_indices[i]]; break;
default: weight=BC7_weight4[color_indices[i]];
}
r=((64 - weight) * colors.r[c0] + weight * colors.r[c1] + 32) >> 6;
g=((64 - weight) * colors.g[c0] + weight * colors.g[c1] + 32) >> 6;
b=((64 - weight) * colors.b[c0] + weight * colors.b[c1] + 32) >> 6;
a=((64 - weight) * colors.a[c0] + weight * colors.a[c1] + 32) >> 6;
/* Interpolate alpha for mode 4 and 5 blocks */
if (mode == 4 || mode == 5)
{
weight=BC7_weight2[alpha_indices[i]];
if (mode == 4 && selector_bit == 0)
weight=BC7_weight3[alpha_indices[i]];
a=((64 - weight) * colors.a[c0] + weight * colors.a[c1] + 32) >> 6;
}
switch (rotation)
{
case 1:
Swap(a,r);
break;
case 2:
Swap(a,g);
break;
case 3:
Swap(a,b);
break;
}
SetPixelRed(image,ScaleCharToQuantum((unsigned char)r),q);
SetPixelGreen(image,ScaleCharToQuantum((unsigned char)g),q);
SetPixelBlue(image,ScaleCharToQuantum((unsigned char)b),q);
SetPixelAlpha(image,ScaleCharToQuantum((unsigned char)a),q);
q+=GetPixelChannels(image);
}
if (SyncAuthenticPixels(image,exception) == MagickFalse)
return(MagickFalse);
}
if (EOFBlob(image) != MagickFalse)
return(MagickFalse);
}
return(MagickTrue);
}
static MagickBooleanType ReadBC7(const ImageInfo *image_info,Image *image,
const DDSInfo *dds_info,const MagickBooleanType read_mipmaps,
ExceptionInfo *exception)
{
if (ReadBC7Pixels(image,dds_info,exception) == MagickFalse)
return(MagickFalse);
if (read_mipmaps != MagickFalse)
return(ReadMipmaps(image_info,image,dds_info,ReadBC7Pixels,exception));
else
return(SkipDXTMipmaps(image,dds_info,16,exception));
}
static MagickBooleanType ReadUncompressedRGBPixels(Image *image,
const DDSInfo *dds_info,ExceptionInfo *exception)
{
Quantum
*q;
ssize_t
x,
y;
unsigned short
color;
for (y = 0; y < (ssize_t) image->rows; y++)
{
q=QueueAuthenticPixels(image,0,y,image->columns,1,exception);
if (q == (Quantum *) NULL)
return(MagickFalse);
for (x = 0; x < (ssize_t) image->columns; x++)
{
if (dds_info->pixelformat.rgb_bitcount == 8 ||
dds_info->extFormat == DXGI_FORMAT_R8_UNORM)
SetPixelGray(image,ScaleCharToQuantum(ReadBlobByte(image)),q);
else if (dds_info->pixelformat.rgb_bitcount == 16 ||
dds_info->extFormat == DXGI_FORMAT_B5G6R5_UNORM)
{
color=ReadBlobShort(image);
SetPixelRed(image,ScaleCharToQuantum((unsigned char)
(((color >> 11)/31.0)*255)),q);
SetPixelGreen(image,ScaleCharToQuantum((unsigned char)
((((unsigned short)(color << 5) >> 10)/63.0)*255)),q);
SetPixelBlue(image,ScaleCharToQuantum((unsigned char)
((((unsigned short)(color << 11) >> 11)/31.0)*255)),q);
}
else
{
SetPixelBlue(image,ScaleCharToQuantum((unsigned char)
ReadBlobByte(image)),q);
SetPixelGreen(image,ScaleCharToQuantum((unsigned char)
ReadBlobByte(image)),q);
SetPixelRed(image,ScaleCharToQuantum((unsigned char)
ReadBlobByte(image)),q);
if (dds_info->pixelformat.rgb_bitcount == 32 ||
dds_info->extFormat == DXGI_FORMAT_B8G8R8X8_UNORM)
(void) ReadBlobByte(image);
}
q+=GetPixelChannels(image);
}
if (SyncAuthenticPixels(image,exception) == MagickFalse)
return(MagickFalse);
if (EOFBlob(image) != MagickFalse)
return(MagickFalse);
}
return(MagickTrue);
}
/*
Skip the mipmap images for uncompressed (RGB or RGBA) dds files
*/
static MagickBooleanType SkipRGBMipmaps(Image *image,const DDSInfo *dds_info,
int pixel_size,ExceptionInfo *exception)
{
/*
Only skip mipmaps for textures and cube maps
*/
if (EOFBlob(image) != MagickFalse)
{
ThrowFileException(exception,CorruptImageError,"UnexpectedEndOfFile",
image->filename);
return(MagickFalse);
}
if (dds_info->ddscaps1 & DDSCAPS_MIPMAP
&& (dds_info->ddscaps1 & DDSCAPS_TEXTURE
|| dds_info->ddscaps2 & DDSCAPS2_CUBEMAP))
{
MagickOffsetType
offset;
ssize_t
i;
size_t
h,
w;
w=DIV2(dds_info->width);
h=DIV2(dds_info->height);
/*
Mipmapcount includes the main image, so start from one
*/
for (i=1; (i < (ssize_t) dds_info->mipmapcount) && w && h; i++)
{
offset=(MagickOffsetType)w*h*pixel_size;
if (SeekBlob(image,offset,SEEK_CUR) < 0)
break;
w=DIV2(w);
h=DIV2(h);
if ((w == 1) && (h == 1))
break;
}
}
return(MagickTrue);
}
static MagickBooleanType ReadUncompressedRGB(const ImageInfo *image_info,
Image *image,const DDSInfo *dds_info,const MagickBooleanType read_mipmaps,
ExceptionInfo *exception)
{
if (dds_info->pixelformat.rgb_bitcount == 8 ||
dds_info->extFormat == DXGI_FORMAT_R8_UNORM)
(void) SetImageType(image,GrayscaleType,exception);
else if (dds_info->pixelformat.rgb_bitcount == 16 && !IsBitMask(
dds_info->pixelformat,0xf800,0x07e0,0x001f,0x0000))
ThrowBinaryException(CorruptImageError,"ImageTypeNotSupported",
image->filename);
if (ReadUncompressedRGBPixels(image,dds_info,exception) == MagickFalse)
return(MagickFalse);
if (read_mipmaps != MagickFalse)
return(ReadMipmaps(image_info,image,dds_info,ReadUncompressedRGBPixels,
exception));
else
return(SkipRGBMipmaps(image,dds_info,3,exception));
}
static MagickBooleanType ReadUncompressedRGBAPixels(Image *image,
const DDSInfo *dds_info,ExceptionInfo *exception)
{
Quantum
*q;
ssize_t
alphaBits,
x,
y;
unsigned short
color;
alphaBits=0;
if (dds_info->pixelformat.rgb_bitcount == 16)
{
if (IsBitMask(dds_info->pixelformat,0x7c00,0x03e0,0x001f,0x8000))
alphaBits=1;
else if (IsBitMask(dds_info->pixelformat,0x00ff,0x00ff,0x00ff,0xff00))
{
alphaBits=2;
(void) SetImageType(image,GrayscaleAlphaType,exception);
}
else if (IsBitMask(dds_info->pixelformat,0x0f00,0x00f0,0x000f,0xf000))
alphaBits=4;
else
ThrowBinaryException(CorruptImageError,"ImageTypeNotSupported",
image->filename);
}
if (dds_info->extFormat == DXGI_FORMAT_B5G5R5A1_UNORM)
alphaBits=1;
for (y = 0; y < (ssize_t) image->rows; y++)
{
q = QueueAuthenticPixels(image, 0, y, image->columns, 1,exception);
if (q == (Quantum *) NULL)
return(MagickFalse);
for (x = 0; x < (ssize_t) image->columns; x++)
{
if (dds_info->pixelformat.rgb_bitcount == 16 ||
dds_info->extFormat == DXGI_FORMAT_B5G5R5A1_UNORM)
{
color=ReadBlobShort(image);
if (alphaBits == 1)
{
SetPixelAlpha(image,(color & (1 << 15)) ? QuantumRange : 0,q);
SetPixelRed(image,ScaleCharToQuantum((unsigned char)
((((unsigned short)(color << 1) >> 11)/31.0)*255)),q);
SetPixelGreen(image,ScaleCharToQuantum((unsigned char)
((((unsigned short)(color << 6) >> 11)/31.0)*255)),q);
SetPixelBlue(image,ScaleCharToQuantum((unsigned char)
((((unsigned short)(color << 11) >> 11)/31.0)*255)),q);
}
else if (alphaBits == 2)
{
SetPixelAlpha(image,ScaleCharToQuantum((unsigned char)
(color >> 8)),q);
SetPixelGray(image,ScaleCharToQuantum((unsigned char)color),q);
}
else
{
SetPixelAlpha(image,ScaleCharToQuantum((unsigned char)
(((color >> 12)/15.0)*255)),q);
SetPixelRed(image,ScaleCharToQuantum((unsigned char)
((((unsigned short)(color << 4) >> 12)/15.0)*255)),q);
SetPixelGreen(image,ScaleCharToQuantum((unsigned char)
((((unsigned short)(color << 8) >> 12)/15.0)*255)),q);
SetPixelBlue(image,ScaleCharToQuantum((unsigned char)
((((unsigned short)(color << 12) >> 12)/15.0)*255)),q);
}
}
else if (dds_info->extFormat == DXGI_FORMAT_R8G8B8A8_UNORM ||
IsBitMask(dds_info->pixelformat,0x000000ff,0x0000ff00,0x00ff0000,0xff000000))
{
SetPixelRed(image,ScaleCharToQuantum((unsigned char)
ReadBlobByte(image)),q);
SetPixelGreen(image,ScaleCharToQuantum((unsigned char)
ReadBlobByte(image)),q);
SetPixelBlue(image,ScaleCharToQuantum((unsigned char)
ReadBlobByte(image)),q);
SetPixelAlpha(image,ScaleCharToQuantum((unsigned char)
ReadBlobByte(image)),q);
}
else
{
SetPixelBlue(image,ScaleCharToQuantum((unsigned char)
ReadBlobByte(image)),q);
SetPixelGreen(image,ScaleCharToQuantum((unsigned char)
ReadBlobByte(image)),q);
SetPixelRed(image,ScaleCharToQuantum((unsigned char)
ReadBlobByte(image)),q);
SetPixelAlpha(image,ScaleCharToQuantum((unsigned char)
ReadBlobByte(image)),q);
}
q+=GetPixelChannels(image);
}
if (SyncAuthenticPixels(image,exception) == MagickFalse)
return(MagickFalse);
if (EOFBlob(image) != MagickFalse)
return(MagickFalse);
}
return(MagickTrue);
}
static MagickBooleanType ReadUncompressedRGBA(const ImageInfo *image_info,
Image *image,const DDSInfo *dds_info,const MagickBooleanType read_mipmaps,
ExceptionInfo *exception)
{
if (ReadUncompressedRGBAPixels(image,dds_info,exception) == MagickFalse)
return(MagickFalse);
if (read_mipmaps != MagickFalse)
return(ReadMipmaps(image_info,image,dds_info,ReadUncompressedRGBAPixels,
exception));
else
return(SkipRGBMipmaps(image,dds_info,4,exception));
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% R e a d D D S I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ReadDDSImage() reads a DirectDraw Surface image file and returns it. It
% allocates the memory necessary for the new Image structure and returns a
% pointer to the new image.
%
% The format of the ReadDDSImage method is:
%
% Image *ReadDDSImage(const ImageInfo *image_info,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image_info: The image info.
%
% o exception: return any errors or warnings in this structure.
%
*/
static Image *ReadDDSImage(const ImageInfo *image_info,ExceptionInfo *exception)
{
const char
*option;
CompressionType
compression;
DDSInfo
dds_info;
DDSDecoder
*decoder;
Image
*image;
MagickBooleanType
status,
cubemap,
volume,
read_mipmaps;
PixelTrait
alpha_trait;
size_t
n,
num_images;
/*
Open image file.
*/
assert(image_info != (const ImageInfo *) NULL);
assert(image_info->signature == MagickCoreSignature);
if (image_info->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",
image_info->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
cubemap=MagickFalse,
volume=MagickFalse,
read_mipmaps=MagickFalse;
image=AcquireImage(image_info,exception);
status=OpenBlob(image_info,image,ReadBinaryBlobMode,exception);
if (status == MagickFalse)
{
image=DestroyImageList(image);
return((Image *) NULL);
}
/*
Initialize image structure.
*/
if (ReadDDSInfo(image, &dds_info) != MagickTrue)
ThrowReaderException(CorruptImageError,"ImproperImageHeader");
if (dds_info.ddscaps2 & DDSCAPS2_CUBEMAP)
cubemap = MagickTrue;
if (dds_info.ddscaps2 & DDSCAPS2_VOLUME && dds_info.depth > 0)
volume = MagickTrue;
/*
Determine pixel format
*/
if (dds_info.pixelformat.flags & DDPF_RGB)
{
compression = NoCompression;
if (dds_info.pixelformat.flags & DDPF_ALPHAPIXELS)
{
alpha_trait = BlendPixelTrait;
decoder = ReadUncompressedRGBA;
}
else
{
alpha_trait = UndefinedPixelTrait;
decoder = ReadUncompressedRGB;
}
}
else if (dds_info.pixelformat.flags & DDPF_LUMINANCE)
{
compression = NoCompression;
if (dds_info.pixelformat.flags & DDPF_ALPHAPIXELS)
{
/* Not sure how to handle this */
ThrowReaderException(CorruptImageError, "ImageTypeNotSupported");
}
else
{
alpha_trait = UndefinedPixelTrait;
decoder = ReadUncompressedRGB;
}
}
else if (dds_info.pixelformat.flags & DDPF_FOURCC)
{
switch (dds_info.pixelformat.fourcc)
{
case FOURCC_DXT1:
{
alpha_trait = UndefinedPixelTrait;
compression = DXT1Compression;
decoder = ReadDXT1;
break;
}
case FOURCC_DXT3:
{
alpha_trait = BlendPixelTrait;
compression = DXT3Compression;
decoder = ReadDXT3;
break;
}
case FOURCC_DXT5:
{
alpha_trait = BlendPixelTrait;
compression = DXT5Compression;
decoder = ReadDXT5;
break;
}
case FOURCC_DX10:
{
if (dds_info.extDimension != DDSEXT_DIMENSION_TEX2D)
{
ThrowReaderException(CorruptImageError, "ImageTypeNotSupported");
}
switch (dds_info.extFormat)
{
case DXGI_FORMAT_R8_UNORM:
{
compression = NoCompression;
alpha_trait = UndefinedPixelTrait;
decoder = ReadUncompressedRGB;
break;
}
case DXGI_FORMAT_B5G6R5_UNORM:
{
compression = NoCompression;
alpha_trait = UndefinedPixelTrait;
decoder = ReadUncompressedRGB;
break;
}
case DXGI_FORMAT_B5G5R5A1_UNORM:
{
compression = NoCompression;
alpha_trait = BlendPixelTrait;
decoder = ReadUncompressedRGBA;
break;
}
case DXGI_FORMAT_B8G8R8A8_UNORM:
{
compression = NoCompression;
alpha_trait = BlendPixelTrait;
decoder = ReadUncompressedRGBA;
break;
}
case DXGI_FORMAT_R8G8B8A8_UNORM:
{
compression = NoCompression;
alpha_trait = BlendPixelTrait;
decoder = ReadUncompressedRGBA;
break;
}
case DXGI_FORMAT_B8G8R8X8_UNORM:
{
compression = NoCompression;
alpha_trait = UndefinedPixelTrait;
decoder = ReadUncompressedRGB;
break;
}
case DXGI_FORMAT_BC1_UNORM:
{
alpha_trait = UndefinedPixelTrait;
compression = DXT1Compression;
decoder = ReadDXT1;
break;
}
case DXGI_FORMAT_BC2_UNORM:
{
alpha_trait = BlendPixelTrait;
compression = DXT3Compression;
decoder = ReadDXT3;
break;
}
case DXGI_FORMAT_BC3_UNORM:
{
alpha_trait = BlendPixelTrait;
compression = DXT5Compression;
decoder = ReadDXT5;
break;
}
case DXGI_FORMAT_BC7_UNORM:
case DXGI_FORMAT_BC7_UNORM_SRGB:
{
alpha_trait = BlendPixelTrait;
compression = BC7Compression;
decoder = ReadBC7;
break;
}
default:
{
/* Unknown format */
ThrowReaderException(CorruptImageError, "ImageTypeNotSupported");
}
}
if (dds_info.extFlags & DDSEXTFLAGS_CUBEMAP)
cubemap = MagickTrue;
num_images = dds_info.extArraySize;
break;
}
default:
{
/* Unknown FOURCC */
ThrowReaderException(CorruptImageError, "ImageTypeNotSupported");
}
}
}
else
{
/* Neither compressed nor uncompressed... thus unsupported */
ThrowReaderException(CorruptImageError, "ImageTypeNotSupported");
}
num_images = 1;
if (cubemap)
{
/*
Determine number of faces defined in the cubemap
*/
num_images = 0;
if (dds_info.ddscaps2 & DDSCAPS2_CUBEMAP_POSITIVEX) num_images++;
if (dds_info.ddscaps2 & DDSCAPS2_CUBEMAP_NEGATIVEX) num_images++;
if (dds_info.ddscaps2 & DDSCAPS2_CUBEMAP_POSITIVEY) num_images++;
if (dds_info.ddscaps2 & DDSCAPS2_CUBEMAP_NEGATIVEY) num_images++;
if (dds_info.ddscaps2 & DDSCAPS2_CUBEMAP_POSITIVEZ) num_images++;
if (dds_info.ddscaps2 & DDSCAPS2_CUBEMAP_NEGATIVEZ) num_images++;
}
if (volume)
num_images = dds_info.depth;
if ((num_images == 0) || (num_images > GetBlobSize(image)))
ThrowReaderException(CorruptImageError,"ImproperImageHeader");
if (AcquireMagickResource(ListLengthResource,num_images) == MagickFalse)
ThrowReaderException(ResourceLimitError,"ListLengthExceedsLimit");
option=GetImageOption(image_info,"dds:skip-mipmaps");
if (IsStringFalse(option) != MagickFalse)
read_mipmaps=MagickTrue;
for (n = 0; n < num_images; n++)
{
if (n != 0)
{
/* Start a new image */
if (EOFBlob(image) != MagickFalse)
ThrowReaderException(CorruptImageError,"UnexpectedEndOfFile");
AcquireNextImage(image_info,image,exception);
if (GetNextImageInList(image) == (Image *) NULL)
return(DestroyImageList(image));
image=SyncNextImageInList(image);
}
image->alpha_trait=alpha_trait;
image->compression=compression;
image->columns=dds_info.width;
image->rows=dds_info.height;
image->storage_class=DirectClass;
image->endian=LSBEndian;
image->depth=8;
if (image_info->ping != MagickFalse)
{
(void) CloseBlob(image);
return(GetFirstImageInList(image));
}
status=SetImageExtent(image,image->columns,image->rows,exception);
if (status == MagickFalse)
return(DestroyImageList(image));
(void) SetImageBackgroundColor(image,exception);
status=(decoder)(image_info,image,&dds_info,read_mipmaps,exception);
if (status == MagickFalse)
{
(void) CloseBlob(image);
if (n == 0)
return(DestroyImageList(image));
return(GetFirstImageInList(image));
}
}
(void) CloseBlob(image);
return(GetFirstImageInList(image));
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% R e g i s t e r D D S I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% RegisterDDSImage() adds attributes for the DDS image format to
% the list of supported formats. The attributes include the image format
% tag, a method to read and/or write the format, whether the format
% supports the saving of more than one frame to the same file or blob,
% whether the format supports native in-memory I/O, and a brief
% description of the format.
%
% The format of the RegisterDDSImage method is:
%
% RegisterDDSImage(void)
%
*/
ModuleExport size_t RegisterDDSImage(void)
{
MagickInfo
*entry;
entry = AcquireMagickInfo("DDS","DDS","Microsoft DirectDraw Surface");
entry->decoder = (DecodeImageHandler *) ReadDDSImage;
entry->encoder = (EncodeImageHandler *) WriteDDSImage;
entry->magick = (IsImageFormatHandler *) IsDDS;
entry->flags|=CoderDecoderSeekableStreamFlag;
(void) RegisterMagickInfo(entry);
entry = AcquireMagickInfo("DDS","DXT1","Microsoft DirectDraw Surface");
entry->decoder = (DecodeImageHandler *) ReadDDSImage;
entry->encoder = (EncodeImageHandler *) WriteDDSImage;
entry->magick = (IsImageFormatHandler *) IsDDS;
entry->flags|=CoderDecoderSeekableStreamFlag;
(void) RegisterMagickInfo(entry);
entry = AcquireMagickInfo("DDS","DXT5","Microsoft DirectDraw Surface");
entry->decoder = (DecodeImageHandler *) ReadDDSImage;
entry->encoder = (EncodeImageHandler *) WriteDDSImage;
entry->magick = (IsImageFormatHandler *) IsDDS;
entry->flags|=CoderDecoderSeekableStreamFlag;
(void) RegisterMagickInfo(entry);
return(MagickImageCoderSignature);
}
static void RemapIndices(const ssize_t *map, const unsigned char *source,
unsigned char *target)
{
ssize_t
i;
for (i = 0; i < 16; i++)
{
if (map[i] == -1)
target[i] = 3;
else
target[i] = source[map[i]];
}
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% U n r e g i s t e r D D S I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% UnregisterDDSImage() removes format registrations made by the
% DDS module from the list of supported formats.
%
% The format of the UnregisterDDSImage method is:
%
% UnregisterDDSImage(void)
%
*/
ModuleExport void UnregisterDDSImage(void)
{
(void) UnregisterMagickInfo("DDS");
(void) UnregisterMagickInfo("DXT1");
(void) UnregisterMagickInfo("DXT5");
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% W r i t e D D S I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% WriteDDSImage() writes a DirectDraw Surface image file in the DXT5 format.
%
% The format of the WriteDDSImage method is:
%
% MagickBooleanType WriteDDSImage(const ImageInfo *image_info,Image *image)
%
% A description of each parameter follows.
%
% o image_info: the image info.
%
% o image: The image.
%
*/
static size_t CompressAlpha(const size_t min, const size_t max,
const size_t steps, const ssize_t *alphas, unsigned char* indices)
{
unsigned char
codes[8];
ssize_t
i;
size_t
error,
index,
j,
least,
value;
codes[0] = (unsigned char) min;
codes[1] = (unsigned char) max;
codes[6] = 0;
codes[7] = 255;
for (i=1; i < (ssize_t) steps; i++)
codes[i+1] = (unsigned char) (((steps-i)*min + i*max) / steps);
error = 0;
for (i=0; i<16; i++)
{
if (alphas[i] == -1)
{
indices[i] = 0;
continue;
}
value = alphas[i];
least = SIZE_MAX;
index = 0;
for (j=0; j<8; j++)
{
size_t
dist;
dist = value - (size_t)codes[j];
dist *= dist;
if (dist < least)
{
least = dist;
index = j;
}
}
indices[i] = (unsigned char)index;
error += least;
}
return error;
}
static MagickBooleanType ConstructOrdering(const size_t count,
const DDSVector4 *points, const DDSVector3 axis, DDSVector4 *pointsWeights,
DDSVector4 *xSumwSum, unsigned char *order, size_t iteration)
{
float
dps[16],
f;
ssize_t
i;
size_t
j;
unsigned char
c,
*o,
*p;
o = order + (16*iteration);
for (i=0; i < (ssize_t) count; i++)
{
dps[i] = Dot(points[i],axis);
o[i] = (unsigned char)i;
}
for (i=0; i < (ssize_t) count; i++)
{
for (j=i; j > 0 && dps[j] < dps[j - 1]; j--)
{
f = dps[j];
dps[j] = dps[j - 1];
dps[j - 1] = f;
c = o[j];
o[j] = o[j - 1];
o[j - 1] = c;
}
}
for (i=0; i < (ssize_t) iteration; i++)
{
MagickBooleanType
same;
p = order + (16*i);
same = MagickTrue;
for (j=0; j < count; j++)
{
if (o[j] != p[j])
{
same = MagickFalse;
break;
}
}
if (same != MagickFalse)
return MagickFalse;
}
xSumwSum->x = 0;
xSumwSum->y = 0;
xSumwSum->z = 0;
xSumwSum->w = 0;
for (i=0; i < (ssize_t) count; i++)
{
DDSVector4
v;
j = (size_t) o[i];
v.x = points[j].w * points[j].x;
v.y = points[j].w * points[j].y;
v.z = points[j].w * points[j].z;
v.w = points[j].w * 1.0f;
VectorCopy44(v,&pointsWeights[i]);
VectorAdd(*xSumwSum,v,xSumwSum);
}
return MagickTrue;
}
static void CompressClusterFit(const size_t count,
const DDSVector4 *points, const ssize_t *map, const DDSVector3 principle,
const DDSVector4 metric, DDSVector3 *start, DDSVector3* end,
unsigned char *indices)
{
DDSVector3
axis;
DDSVector4
grid,
gridrcp,
half,
onethird_onethird2,
pointsWeights[16],
two,
twonineths,
twothirds_twothirds2,
xSumwSum;
float
bestError = 1e+37f;
size_t
bestIteration = 0,
besti = 0,
bestj = 0,
bestk = 0,
iterationIndex;
ssize_t
i;
unsigned char
*o,
order[128],
unordered[16];
VectorInit(half,0.5f);
VectorInit(two,2.0f);
VectorInit(onethird_onethird2,1.0f/3.0f);
onethird_onethird2.w = 1.0f/9.0f;
VectorInit(twothirds_twothirds2,2.0f/3.0f);
twothirds_twothirds2.w = 4.0f/9.0f;
VectorInit(twonineths,2.0f/9.0f);
grid.x = 31.0f;
grid.y = 63.0f;
grid.z = 31.0f;
grid.w = 0.0f;
gridrcp.x = 1.0f/31.0f;
gridrcp.y = 1.0f/63.0f;
gridrcp.z = 1.0f/31.0f;
gridrcp.w = 0.0f;
xSumwSum.x = 0.0f;
xSumwSum.y = 0.0f;
xSumwSum.z = 0.0f;
xSumwSum.w = 0.0f;
ConstructOrdering(count,points,principle,pointsWeights,&xSumwSum,order,0);
for (iterationIndex = 0;;)
{
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(dynamic,1) \
num_threads(GetMagickResourceLimit(ThreadResource))
#endif
for (i=0; i < (ssize_t) count; i++)
{
DDSVector4
part0,
part1,
part2;
size_t
ii,
j,
k,
kmin;
VectorInit(part0,0.0f);
for(ii=0; ii < (size_t) i; ii++)
VectorAdd(pointsWeights[ii],part0,&part0);
VectorInit(part1,0.0f);
for (j=(size_t) i;;)
{
if (j == 0)
{
VectorCopy44(pointsWeights[0],&part2);
kmin = 1;
}
else
{
VectorInit(part2,0.0f);
kmin = j;
}
for (k=kmin;;)
{
DDSVector4
a,
alpha2_sum,
alphax_sum,
alphabeta_sum,
b,
beta2_sum,
betax_sum,
e1,
e2,
factor,
part3;
float
error;
VectorSubtract(xSumwSum,part2,&part3);
VectorSubtract(part3,part1,&part3);
VectorSubtract(part3,part0,&part3);
VectorMultiplyAdd(part1,twothirds_twothirds2,part0,&alphax_sum);
VectorMultiplyAdd(part2,onethird_onethird2,alphax_sum,&alphax_sum);
VectorInit(alpha2_sum,alphax_sum.w);
VectorMultiplyAdd(part2,twothirds_twothirds2,part3,&betax_sum);
VectorMultiplyAdd(part1,onethird_onethird2,betax_sum,&betax_sum);
VectorInit(beta2_sum,betax_sum.w);
VectorAdd(part1,part2,&alphabeta_sum);
VectorInit(alphabeta_sum,alphabeta_sum.w);
VectorMultiply(twonineths,alphabeta_sum,&alphabeta_sum);
VectorMultiply(alpha2_sum,beta2_sum,&factor);
VectorNegativeMultiplySubtract(alphabeta_sum,alphabeta_sum,factor,
&factor);
VectorReciprocal(factor,&factor);
VectorMultiply(alphax_sum,beta2_sum,&a);
VectorNegativeMultiplySubtract(betax_sum,alphabeta_sum,a,&a);
VectorMultiply(a,factor,&a);
VectorMultiply(betax_sum,alpha2_sum,&b);
VectorNegativeMultiplySubtract(alphax_sum,alphabeta_sum,b,&b);
VectorMultiply(b,factor,&b);
VectorClamp(&a);
VectorMultiplyAdd(grid,a,half,&a);
VectorTruncate(&a);
VectorMultiply(a,gridrcp,&a);
VectorClamp(&b);
VectorMultiplyAdd(grid,b,half,&b);
VectorTruncate(&b);
VectorMultiply(b,gridrcp,&b);
VectorMultiply(b,b,&e1);
VectorMultiply(e1,beta2_sum,&e1);
VectorMultiply(a,a,&e2);
VectorMultiplyAdd(e2,alpha2_sum,e1,&e1);
VectorMultiply(a,b,&e2);
VectorMultiply(e2,alphabeta_sum,&e2);
VectorNegativeMultiplySubtract(a,alphax_sum,e2,&e2);
VectorNegativeMultiplySubtract(b,betax_sum,e2,&e2);
VectorMultiplyAdd(two,e2,e1,&e2);
VectorMultiply(e2,metric,&e2);
error = e2.x + e2.y + e2.z;
if (error < bestError)
{
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp critical (DDS_CompressClusterFit)
#endif
{
if (error < bestError)
{
VectorCopy43(a,start);
VectorCopy43(b,end);
bestError = error;
besti = i;
bestj = j;
bestk = k;
bestIteration = iterationIndex;
}
}
}
if (k == count)
break;
VectorAdd(pointsWeights[k],part2,&part2);
k++;
}
if (j == count)
break;
VectorAdd(pointsWeights[j],part1,&part1);
j++;
}
}
if (bestIteration != iterationIndex)
break;
iterationIndex++;
if (iterationIndex == 8)
break;
VectorSubtract3(*end,*start,&axis);
if (ConstructOrdering(count,points,axis,pointsWeights,&xSumwSum,order,
iterationIndex) == MagickFalse)
break;
}
o = order + (16*bestIteration);
for (i=0; i < (ssize_t) besti; i++)
unordered[o[i]] = 0;
for (i=besti; i < (ssize_t) bestj; i++)
unordered[o[i]] = 2;
for (i=bestj; i < (ssize_t) bestk; i++)
unordered[o[i]] = 3;
for (i=bestk; i < (ssize_t) count; i++)
unordered[o[i]] = 1;
RemapIndices(map,unordered,indices);
}
static void CompressRangeFit(const size_t count,
const DDSVector4* points, const ssize_t *map, const DDSVector3 principle,
const DDSVector4 metric, DDSVector3 *start, DDSVector3 *end,
unsigned char *indices)
{
float
d,
bestDist,
max,
min,
val;
DDSVector3
codes[4],
grid,
gridrcp,
half,
dist;
ssize_t
i;
size_t
bestj,
j;
unsigned char
closest[16];
VectorInit3(half,0.5f);
grid.x = 31.0f;
grid.y = 63.0f;
grid.z = 31.0f;
gridrcp.x = 1.0f/31.0f;
gridrcp.y = 1.0f/63.0f;
gridrcp.z = 1.0f/31.0f;
if (count > 0)
{
VectorCopy43(points[0],start);
VectorCopy43(points[0],end);
min = max = Dot(points[0],principle);
for (i=1; i < (ssize_t) count; i++)
{
val = Dot(points[i],principle);
if (val < min)
{
VectorCopy43(points[i],start);
min = val;
}
else if (val > max)
{
VectorCopy43(points[i],end);
max = val;
}
}
}
VectorClamp3(start);
VectorMultiplyAdd3(grid,*start,half,start);
VectorTruncate3(start);
VectorMultiply3(*start,gridrcp,start);
VectorClamp3(end);
VectorMultiplyAdd3(grid,*end,half,end);
VectorTruncate3(end);
VectorMultiply3(*end,gridrcp,end);
codes[0] = *start;
codes[1] = *end;
codes[2].x = (start->x * (2.0f/3.0f)) + (end->x * (1.0f/3.0f));
codes[2].y = (start->y * (2.0f/3.0f)) + (end->y * (1.0f/3.0f));
codes[2].z = (start->z * (2.0f/3.0f)) + (end->z * (1.0f/3.0f));
codes[3].x = (start->x * (1.0f/3.0f)) + (end->x * (2.0f/3.0f));
codes[3].y = (start->y * (1.0f/3.0f)) + (end->y * (2.0f/3.0f));
codes[3].z = (start->z * (1.0f/3.0f)) + (end->z * (2.0f/3.0f));
for (i=0; i < (ssize_t) count; i++)
{
bestDist = 1e+37f;
bestj = 0;
for (j=0; j < 4; j++)
{
dist.x = (points[i].x - codes[j].x) * metric.x;
dist.y = (points[i].y - codes[j].y) * metric.y;
dist.z = (points[i].z - codes[j].z) * metric.z;
d = Dot(dist,dist);
if (d < bestDist)
{
bestDist = d;
bestj = j;
}
}
closest[i] = (unsigned char) bestj;
}
RemapIndices(map, closest, indices);
}
static void ComputeEndPoints(const DDSSingleColorLookup *lookup[],
const unsigned char *color, DDSVector3 *start, DDSVector3 *end,
unsigned char *index)
{
ssize_t
i;
size_t
c,
maxError = SIZE_MAX;
for (i=0; i < 2; i++)
{
const DDSSourceBlock*
sources[3];
size_t
error = 0;
for (c=0; c < 3; c++)
{
sources[c] = &lookup[c][color[c]].sources[i];
error += ((size_t) sources[c]->error) * ((size_t) sources[c]->error);
}
if (error > maxError)
continue;
start->x = (float) sources[0]->start / 31.0f;
start->y = (float) sources[1]->start / 63.0f;
start->z = (float) sources[2]->start / 31.0f;
end->x = (float) sources[0]->end / 31.0f;
end->y = (float) sources[1]->end / 63.0f;
end->z = (float) sources[2]->end / 31.0f;
*index = (unsigned char) (2*i);
maxError = error;
}
}
static void ComputePrincipleComponent(const float *covariance,
DDSVector3 *principle)
{
DDSVector4
row0,
row1,
row2,
v;
ssize_t
i;
row0.x = covariance[0];
row0.y = covariance[1];
row0.z = covariance[2];
row0.w = 0.0f;
row1.x = covariance[1];
row1.y = covariance[3];
row1.z = covariance[4];
row1.w = 0.0f;
row2.x = covariance[2];
row2.y = covariance[4];
row2.z = covariance[5];
row2.w = 0.0f;
VectorInit(v,1.0f);
for (i=0; i < 8; i++)
{
DDSVector4
w;
float
a;
w.x = row0.x * v.x;
w.y = row0.y * v.x;
w.z = row0.z * v.x;
w.w = row0.w * v.x;
w.x = (row1.x * v.y) + w.x;
w.y = (row1.y * v.y) + w.y;
w.z = (row1.z * v.y) + w.z;
w.w = (row1.w * v.y) + w.w;
w.x = (row2.x * v.z) + w.x;
w.y = (row2.y * v.z) + w.y;
w.z = (row2.z * v.z) + w.z;
w.w = (row2.w * v.z) + w.w;
a = (float) PerceptibleReciprocal(MagickMax(w.x,MagickMax(w.y,w.z)));
v.x = w.x * a;
v.y = w.y * a;
v.z = w.z * a;
v.w = w.w * a;
}
VectorCopy43(v,principle);
}
static void ComputeWeightedCovariance(const size_t count,
const DDSVector4 *points, float *covariance)
{
DDSVector3
centroid;
float
total;
size_t
i;
total = 0.0f;
VectorInit3(centroid,0.0f);
for (i=0; i < count; i++)
{
total += points[i].w;
centroid.x += (points[i].x * points[i].w);
centroid.y += (points[i].y * points[i].w);
centroid.z += (points[i].z * points[i].w);
}
if( total > 1.192092896e-07F)
{
centroid.x /= total;
centroid.y /= total;
centroid.z /= total;
}
for (i=0; i < 6; i++)
covariance[i] = 0.0f;
for (i = 0; i < count; i++)
{
DDSVector3
a,
b;
a.x = points[i].x - centroid.x;
a.y = points[i].y - centroid.y;
a.z = points[i].z - centroid.z;
b.x = points[i].w * a.x;
b.y = points[i].w * a.y;
b.z = points[i].w * a.z;
covariance[0] += a.x*b.x;
covariance[1] += a.x*b.y;
covariance[2] += a.x*b.z;
covariance[3] += a.y*b.y;
covariance[4] += a.y*b.z;
covariance[5] += a.z*b.z;
}
}
static void WriteAlphas(Image *image, const ssize_t *alphas, size_t min5,
size_t max5, size_t min7, size_t max7)
{
ssize_t
i;
size_t
err5,
err7,
j;
unsigned char
indices5[16],
indices7[16];
FixRange(min5,max5,5);
err5 = CompressAlpha(min5,max5,5,alphas,indices5);
FixRange(min7,max7,7);
err7 = CompressAlpha(min7,max7,7,alphas,indices7);
if (err7 < err5)
{
for (i=0; i < 16; i++)
{
unsigned char
index;
index = indices7[i];
if( index == 0 )
indices5[i] = 1;
else if (index == 1)
indices5[i] = 0;
else
indices5[i] = 9 - index;
}
min5 = max7;
max5 = min7;
}
(void) WriteBlobByte(image,(unsigned char) min5);
(void) WriteBlobByte(image,(unsigned char) max5);
for(i=0; i < 2; i++)
{
size_t
value = 0;
for (j=0; j < 8; j++)
{
size_t index = (size_t) indices5[j + i*8];
value |= ( index << 3*j );
}
for (j=0; j < 3; j++)
{
size_t byte = (value >> 8*j) & 0xff;
(void) WriteBlobByte(image,(unsigned char) byte);
}
}
}
static void WriteIndices(Image *image, const DDSVector3 start,
const DDSVector3 end, unsigned char *indices)
{
ssize_t
i;
size_t
a,
b;
unsigned char
remapped[16];
const unsigned char
*ind;
a = ColorTo565(start);
b = ColorTo565(end);
for (i=0; i<16; i++)
{
if( a < b )
remapped[i] = (indices[i] ^ 0x1) & 0x3;
else if( a == b )
remapped[i] = 0;
else
remapped[i] = indices[i];
}
if( a < b )
Swap(a,b);
(void) WriteBlobByte(image,(unsigned char) (a & 0xff));
(void) WriteBlobByte(image,(unsigned char) (a >> 8));
(void) WriteBlobByte(image,(unsigned char) (b & 0xff));
(void) WriteBlobByte(image,(unsigned char) (b >> 8));
for (i=0; i<4; i++)
{
ind = remapped + 4*i;
(void) WriteBlobByte(image,ind[0] | (ind[1] << 2) | (ind[2] << 4) |
(ind[3] << 6));
}
}
static void WriteCompressed(Image *image, const size_t count,
DDSVector4 *points, const ssize_t *map, const MagickBooleanType clusterFit)
{
float
covariance[16];
DDSVector3
end,
principle,
start;
DDSVector4
metric;
unsigned char
indices[16];
VectorInit(metric,1.0f);
VectorInit3(start,0.0f);
VectorInit3(end,0.0f);
ComputeWeightedCovariance(count,points,covariance);
ComputePrincipleComponent(covariance,&principle);
if ((clusterFit == MagickFalse) || (count == 0))
CompressRangeFit(count,points,map,principle,metric,&start,&end,indices);
else
CompressClusterFit(count,points,map,principle,metric,&start,&end,indices);
WriteIndices(image,start,end,indices);
}
static void WriteSingleColorFit(Image *image, const DDSVector4 *points,
const ssize_t *map)
{
DDSVector3
start,
end;
ssize_t
i;
unsigned char
color[3],
index,
indexes[16],
indices[16];
color[0] = (unsigned char) ClampToLimit(255.0f*points->x,255);
color[1] = (unsigned char) ClampToLimit(255.0f*points->y,255);
color[2] = (unsigned char) ClampToLimit(255.0f*points->z,255);
index=0;
ComputeEndPoints(DDS_LOOKUP,color,&start,&end,&index);
for (i=0; i< 16; i++)
indexes[i]=index;
RemapIndices(map,indexes,indices);
WriteIndices(image,start,end,indices);
}
static void WriteFourCC(Image *image, const size_t compression,
const MagickBooleanType clusterFit, const MagickBooleanType weightByAlpha,
ExceptionInfo *exception)
{
ssize_t
x;
ssize_t
i,
y,
bx,
by;
const Quantum
*p;
for (y=0; y < (ssize_t) image->rows; y+=4)
{
for (x=0; x < (ssize_t) image->columns; x+=4)
{
MagickBooleanType
match;
DDSVector4
point,
points[16];
size_t
count = 0,
max5 = 0,
max7 = 0,
min5 = 255,
min7 = 255,
columns = 4,
rows = 4;
ssize_t
alphas[16],
map[16];
unsigned char
alpha;
if (x + columns >= image->columns)
columns = image->columns - x;
if (y + rows >= image->rows)
rows = image->rows - y;
p=GetVirtualPixels(image,x,y,columns,rows,exception);
if (p == (const Quantum *) NULL)
break;
for (i=0; i<16; i++)
{
map[i] = -1;
alphas[i] = -1;
}
for (by=0; by < (ssize_t) rows; by++)
{
for (bx=0; bx < (ssize_t) columns; bx++)
{
if (compression == FOURCC_DXT5)
alpha = ScaleQuantumToChar(GetPixelAlpha(image,p));
else
alpha = 255;
if (compression == FOURCC_DXT5)
{
if (alpha < min7)
min7 = alpha;
if (alpha > max7)
max7 = alpha;
if (alpha != 0 && alpha < min5)
min5 = alpha;
if (alpha != 255 && alpha > max5)
max5 = alpha;
}
alphas[4*by + bx] = (size_t)alpha;
point.x = (float)ScaleQuantumToChar(GetPixelRed(image,p)) / 255.0f;
point.y = (float)ScaleQuantumToChar(GetPixelGreen(image,p)) / 255.0f;
point.z = (float)ScaleQuantumToChar(GetPixelBlue(image,p)) / 255.0f;
point.w = weightByAlpha ? (float)(alpha + 1) / 256.0f : 1.0f;
p+=GetPixelChannels(image);
match = MagickFalse;
for (i=0; i < (ssize_t) count; i++)
{
if ((points[i].x == point.x) &&
(points[i].y == point.y) &&
(points[i].z == point.z) &&
(alpha >= 128 || compression == FOURCC_DXT5))
{
points[i].w += point.w;
map[4*by + bx] = i;
match = MagickTrue;
break;
}
}
if (match != MagickFalse)
continue;
points[count].x = point.x;
points[count].y = point.y;
points[count].z = point.z;
points[count].w = point.w;
map[4*by + bx] = count;
count++;
}
}
for (i=0; i < (ssize_t) count; i++)
points[i].w = sqrt(points[i].w);
if (compression == FOURCC_DXT5)
WriteAlphas(image,alphas,min5,max5,min7,max7);
if (count == 1)
WriteSingleColorFit(image,points,map);
else
WriteCompressed(image,count,points,map,clusterFit);
}
}
}
static void WriteUncompressed(Image *image, ExceptionInfo *exception)
{
const Quantum
*p;
ssize_t
x;
ssize_t
y;
for (y=0; y < (ssize_t) image->rows; y++)
{
p=GetVirtualPixels(image,0,y,image->columns,1,exception);
if (p == (const Quantum *) NULL)
break;
for (x=0; x < (ssize_t) image->columns; x++)
{
(void) WriteBlobByte(image,ScaleQuantumToChar(GetPixelBlue(image,p)));
(void) WriteBlobByte(image,ScaleQuantumToChar(GetPixelGreen(image,p)));
(void) WriteBlobByte(image,ScaleQuantumToChar(GetPixelRed(image,p)));
if (image->alpha_trait != UndefinedPixelTrait)
(void) WriteBlobByte(image,ScaleQuantumToChar(GetPixelAlpha(image,p)));
p+=GetPixelChannels(image);
}
}
}
static void WriteImageData(Image *image, const size_t pixelFormat,
const size_t compression,const MagickBooleanType clusterFit,
const MagickBooleanType weightByAlpha, ExceptionInfo *exception)
{
if (pixelFormat == DDPF_FOURCC)
WriteFourCC(image,compression,clusterFit,weightByAlpha,exception);
else
WriteUncompressed(image,exception);
}
static MagickBooleanType WriteMipmaps(Image *image,const ImageInfo *image_info,
const size_t pixelFormat,const size_t compression,const size_t mipmaps,
const MagickBooleanType fromlist,const MagickBooleanType clusterFit,
const MagickBooleanType weightByAlpha,ExceptionInfo *exception)
{
const char
*option;
Image
*mipmap_image,
*resize_image;
MagickBooleanType
fast_mipmaps,
status;
ssize_t
i;
size_t
columns,
rows;
columns=DIV2(image->columns);
rows=DIV2(image->rows);
option=GetImageOption(image_info,"dds:fast-mipmaps");
fast_mipmaps=IsStringTrue(option);
mipmap_image=image;
resize_image=image;
status=MagickTrue;
for (i=0; i < (ssize_t) mipmaps; i++)
{
if (fromlist == MagickFalse)
{
mipmap_image=ResizeImage(resize_image,columns,rows,TriangleFilter,
exception);
if (mipmap_image == (Image *) NULL)
{
status=MagickFalse;
break;
}
}
else
{
mipmap_image=mipmap_image->next;
if ((mipmap_image->columns != columns) || (mipmap_image->rows != rows))
ThrowBinaryException(CoderError,"ImageColumnOrRowSizeIsNotSupported",
image->filename);
}
DestroyBlob(mipmap_image);
mipmap_image->blob=ReferenceBlob(image->blob);
WriteImageData(mipmap_image,pixelFormat,compression,weightByAlpha,
clusterFit,exception);
if (fromlist == MagickFalse)
{
if (fast_mipmaps == MagickFalse)
mipmap_image=DestroyImage(mipmap_image);
else
{
if (resize_image != image)
resize_image=DestroyImage(resize_image);
resize_image=mipmap_image;
}
}
columns=DIV2(columns);
rows=DIV2(rows);
}
if (resize_image != image)
resize_image=DestroyImage(resize_image);
return(status);
}
static void WriteDDSInfo(Image *image, const size_t pixelFormat,
const size_t compression, const size_t mipmaps)
{
char
software[MagickPathExtent];
ssize_t
i;
unsigned int
format,
caps,
flags;
flags=(unsigned int) (DDSD_CAPS | DDSD_WIDTH | DDSD_HEIGHT |
DDSD_PIXELFORMAT);
caps=(unsigned int) DDSCAPS_TEXTURE;
format=(unsigned int) pixelFormat;
if (format == DDPF_FOURCC)
flags=flags | DDSD_LINEARSIZE;
else
flags=flags | DDSD_PITCH;
if (mipmaps > 0)
{
flags=flags | (unsigned int) DDSD_MIPMAPCOUNT;
caps=caps | (unsigned int) (DDSCAPS_MIPMAP | DDSCAPS_COMPLEX);
}
if (format != DDPF_FOURCC && image->alpha_trait != UndefinedPixelTrait)
format=format | DDPF_ALPHAPIXELS;
(void) WriteBlob(image,4,(unsigned char *) "DDS ");
(void) WriteBlobLSBLong(image,124);
(void) WriteBlobLSBLong(image,flags);
(void) WriteBlobLSBLong(image,(unsigned int) image->rows);
(void) WriteBlobLSBLong(image,(unsigned int) image->columns);
if (pixelFormat == DDPF_FOURCC)
{
/* Compressed DDS requires linear compressed size of first image */
if (compression == FOURCC_DXT1)
(void) WriteBlobLSBLong(image,(unsigned int) (MagickMax(1,
(image->columns+3)/4)*MagickMax(1,(image->rows+3)/4)*8));
else /* DXT5 */
(void) WriteBlobLSBLong(image,(unsigned int) (MagickMax(1,
(image->columns+3)/4)*MagickMax(1,(image->rows+3)/4)*16));
}
else
{
/* Uncompressed DDS requires byte pitch of first image */
if (image->alpha_trait != UndefinedPixelTrait)
(void) WriteBlobLSBLong(image,(unsigned int) (image->columns * 4));
else
(void) WriteBlobLSBLong(image,(unsigned int) (image->columns * 3));
}
(void) WriteBlobLSBLong(image,0x00);
(void) WriteBlobLSBLong(image,(unsigned int) mipmaps+1);
(void) memset(software,0,sizeof(software));
(void) CopyMagickString(software,"IMAGEMAGICK",MagickPathExtent);
(void) WriteBlob(image,44,(unsigned char *) software);
(void) WriteBlobLSBLong(image,32);
(void) WriteBlobLSBLong(image,format);
if (pixelFormat == DDPF_FOURCC)
{
(void) WriteBlobLSBLong(image,(unsigned int) compression);
for(i=0;i < 5;i++) /* bitcount / masks */
(void) WriteBlobLSBLong(image,0x00);
}
else
{
(void) WriteBlobLSBLong(image,0x00);
if (image->alpha_trait != UndefinedPixelTrait)
{
(void) WriteBlobLSBLong(image,32);
(void) WriteBlobLSBLong(image,0xff0000);
(void) WriteBlobLSBLong(image,0xff00);
(void) WriteBlobLSBLong(image,0xff);
(void) WriteBlobLSBLong(image,0xff000000);
}
else
{
(void) WriteBlobLSBLong(image,24);
(void) WriteBlobLSBLong(image,0xff0000);
(void) WriteBlobLSBLong(image,0xff00);
(void) WriteBlobLSBLong(image,0xff);
(void) WriteBlobLSBLong(image,0x00);
}
}
(void) WriteBlobLSBLong(image,caps);
for(i=0;i < 4;i++) /* ddscaps2 + reserved region */
(void) WriteBlobLSBLong(image,0x00);
}
static MagickBooleanType WriteDDSImage(const ImageInfo *image_info,
Image *image, ExceptionInfo *exception)
{
const char
*option;
size_t
compression,
columns,
maxMipmaps,
mipmaps,
pixelFormat,
rows;
MagickBooleanType
clusterFit,
fromlist,
status,
weightByAlpha;
assert(image_info != (const ImageInfo *) NULL);
assert(image_info->signature == MagickCoreSignature);
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
status=OpenBlob(image_info,image,WriteBinaryBlobMode,exception);
if (status == MagickFalse)
return(status);
if (IssRGBCompatibleColorspace(image->colorspace) == MagickFalse)
(void) TransformImageColorspace(image,sRGBColorspace,exception);
pixelFormat=DDPF_FOURCC;
compression=FOURCC_DXT5;
if (image->alpha_trait == UndefinedPixelTrait)
compression=FOURCC_DXT1;
if (LocaleCompare(image_info->magick,"dxt1") == 0)
compression=FOURCC_DXT1;
if (image_info->compression == DXT1Compression)
compression=FOURCC_DXT1;
else if (image_info->compression == NoCompression)
pixelFormat=DDPF_RGB;
option=GetImageOption(image_info,"dds:compression");
if (option != (char *) NULL)
{
if (LocaleCompare(option,"dxt1") == 0)
compression=FOURCC_DXT1;
if (LocaleCompare(option,"none") == 0)
pixelFormat=DDPF_RGB;
}
clusterFit=MagickFalse;
weightByAlpha=MagickFalse;
if (pixelFormat == DDPF_FOURCC)
{
option=GetImageOption(image_info,"dds:cluster-fit");
if (IsStringTrue(option) != MagickFalse)
{
clusterFit=MagickTrue;
if (compression != FOURCC_DXT1)
{
option=GetImageOption(image_info,"dds:weight-by-alpha");
if (IsStringTrue(option) != MagickFalse)
weightByAlpha=MagickTrue;
}
}
}
mipmaps=0;
fromlist=MagickFalse;
option=GetImageOption(image_info,"dds:mipmaps");
if (option != (char *) NULL)
{
if (LocaleNCompare(option,"fromlist",8) == 0)
{
Image
*next;
fromlist=MagickTrue;
next=image->next;
while(next != (Image *) NULL)
{
mipmaps++;
next=next->next;
}
}
}
if ((mipmaps == 0) &&
((image->columns & (image->columns - 1)) == 0) &&
((image->rows & (image->rows - 1)) == 0))
{
maxMipmaps=SIZE_MAX;
if (option != (char *) NULL)
maxMipmaps=StringToUnsignedLong(option);
if (maxMipmaps != 0)
{
columns=image->columns;
rows=image->rows;
while ((columns != 1 || rows != 1) && mipmaps != maxMipmaps)
{
columns=DIV2(columns);
rows=DIV2(rows);
mipmaps++;
}
}
}
option=GetImageOption(image_info,"dds:raw");
if (IsStringTrue(option) == MagickFalse)
WriteDDSInfo(image,pixelFormat,compression,mipmaps);
else
mipmaps=0;
WriteImageData(image,pixelFormat,compression,clusterFit,weightByAlpha,
exception);
if ((mipmaps > 0) && (WriteMipmaps(image,image_info,pixelFormat,compression,
mipmaps,fromlist,clusterFit,weightByAlpha,exception) == MagickFalse))
return(MagickFalse);
(void) CloseBlob(image);
return(MagickTrue);
}
|
GB_binop__isge_uint32.c |
//------------------------------------------------------------------------------
// GB_binop: hard-coded functions for each built-in binary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2022, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated2/ folder, do not edit it
// (it is auto-generated from Generator/*).
#include "GB.h"
#ifndef GBCUDA_DEV
#include "GB_emult.h"
#include "GB_control.h"
#include "GB_ek_slice.h"
#include "GB_dense.h"
#include "GB_atomics.h"
#include "GB_bitmap_assign_methods.h"
#include "GB_binop__include.h"
// C=binop(A,B) is defined by the following types and operators:
// A+B function (eWiseAdd): GB (_AaddB__isge_uint32)
// A.*B function (eWiseMult): GB (_AemultB_08__isge_uint32)
// A.*B function (eWiseMult): GB (_AemultB_02__isge_uint32)
// A.*B function (eWiseMult): GB (_AemultB_04__isge_uint32)
// A.*B function (eWiseMult): GB (_AemultB_bitmap__isge_uint32)
// A*D function (colscale): GB (_AxD__isge_uint32)
// D*A function (rowscale): GB (_DxB__isge_uint32)
// C+=B function (dense accum): GB (_Cdense_accumB__isge_uint32)
// C+=b function (dense accum): GB (_Cdense_accumb__isge_uint32)
// C+=A+B function (dense ewise3): GB ((none))
// C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__isge_uint32)
// C=scalar+B GB (_bind1st__isge_uint32)
// C=scalar+B' GB (_bind1st_tran__isge_uint32)
// C=A+scalar GB (_bind2nd__isge_uint32)
// C=A'+scalar GB (_bind2nd_tran__isge_uint32)
// C type: uint32_t
// A type: uint32_t
// A pattern? 0
// B type: uint32_t
// B pattern? 0
// BinaryOp: cij = (aij >= bij)
#define GB_ATYPE \
uint32_t
#define GB_BTYPE \
uint32_t
#define GB_CTYPE \
uint32_t
// true if the types of A and B are identical
#define GB_ATYPE_IS_BTYPE \
1
// true if the types of C and A are identical
#define GB_CTYPE_IS_ATYPE \
1
// true if the types of C and B are identical
#define GB_CTYPE_IS_BTYPE \
1
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA,A_iso) \
uint32_t aij = GBX (Ax, pA, A_iso)
// true if values of A are not used
#define GB_A_IS_PATTERN \
0 \
// bij = Bx [pB]
#define GB_GETB(bij,Bx,pB,B_iso) \
uint32_t bij = GBX (Bx, pB, B_iso)
// true if values of B are not used
#define GB_B_IS_PATTERN \
0 \
// declare scalar of the same type as C
#define GB_CTYPE_SCALAR(t) \
uint32_t t
// cij = Ax [pA]
#define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \
cij = GBX (Ax, pA, A_iso)
// cij = Bx [pB]
#define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \
cij = GBX (Bx, pB, B_iso)
#define GB_CX(p) Cx [p]
// binary operator
#define GB_BINOP(z,x,y,i,j) \
z = (x >= y) ;
// true if the binop must be flipped
#define GB_BINOP_FLIP \
0
// op is second
#define GB_OP_IS_SECOND \
0
// do the numerical phases of GB_add and GB_emult
#define GB_PHASE_2_OF_2
// hard-coded loops can be vectorized
#define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_ISGE || GxB_NO_UINT32 || GxB_NO_ISGE_UINT32)
//------------------------------------------------------------------------------
// C += A+B, all 3 matrices dense
//------------------------------------------------------------------------------
#if 0
// The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV.
void GB ((none))
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_accum_template.c"
}
#endif
//------------------------------------------------------------------------------
// C = A+B, all 3 matrices dense
//------------------------------------------------------------------------------
void GB (_Cdense_ewise3_noaccum__isge_uint32)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_noaccum_template.c"
}
//------------------------------------------------------------------------------
// C += B, accumulate a sparse matrix into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumB__isge_uint32)
(
GrB_Matrix C,
const GrB_Matrix B,
const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
#include "GB_dense_subassign_23_template.c"
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += b, accumulate a scalar into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumb__isge_uint32)
(
GrB_Matrix C,
const GB_void *p_bwork,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
// get the scalar b for C += b, of type uint32_t
uint32_t bwork = (*((uint32_t *) p_bwork)) ;
#include "GB_dense_subassign_22_template.c"
return (GrB_SUCCESS) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = A*D, column scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_AxD__isge_uint32)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix D,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint32_t *restrict Cx = (uint32_t *) C->x ;
#include "GB_AxB_colscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = D*B, row scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_DxB__isge_uint32)
(
GrB_Matrix C,
const GrB_Matrix D,
const GrB_Matrix B,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint32_t *restrict Cx = (uint32_t *) C->x ;
#include "GB_AxB_rowscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseAdd: C=A+B, C<M>=A+B, C<!M>=A+B
//------------------------------------------------------------------------------
GrB_Info GB (_AaddB__isge_uint32)
(
GrB_Matrix C,
const int C_sparsity,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool is_eWiseUnion,
const GB_void *alpha_scalar_in,
const GB_void *beta_scalar_in,
const bool Ch_is_Mh,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
GB_WERK_DECLARE (M_ek_slicing, int64_t) ;
GB_WERK_DECLARE (A_ek_slicing, int64_t) ;
GB_WERK_DECLARE (B_ek_slicing, int64_t) ;
uint32_t alpha_scalar ;
uint32_t beta_scalar ;
if (is_eWiseUnion)
{
alpha_scalar = (*((uint32_t *) alpha_scalar_in)) ;
beta_scalar = (*((uint32_t *) beta_scalar_in )) ;
}
#include "GB_add_template.c"
GB_FREE_WORKSPACE ;
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, or C<M!>=A.*B where C is sparse/hyper
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_08__isge_uint32)
(
GrB_Matrix C,
const int C_sparsity,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_08_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_02__isge_uint32)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool flipxy,
const int64_t *restrict Cp_kfirst,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#if GB_BINOP_FLIP
// The operator is not commutative, and does not have a flipped
// variant. For example z=atan2(y,x).
if (flipxy)
{
// use fmult(y,x)
#undef GB_FLIPPED
#define GB_FLIPPED 1
#include "GB_emult_02_template.c"
}
else
{
// use fmult(x,y)
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
}
#else
// No need to handle the flip: the operator is either commutative, or
// has been handled by changing z=div(y,x) to z=rdiv(x,y) for example.
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
#endif
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_04__isge_uint32)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict Cp_kfirst,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_04_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_bitmap__isge_uint32)
(
GrB_Matrix C,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_bitmap_emult_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st
//------------------------------------------------------------------------------
GrB_Info GB (_bind1st__isge_uint32)
(
GB_void *Cx_output, // Cx and Bx may be aliased
const GB_void *x_input,
const GB_void *Bx_input,
const int8_t *restrict Bb,
int64_t bnz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint32_t *Cx = (uint32_t *) Cx_output ;
uint32_t x = (*((uint32_t *) x_input)) ;
uint32_t *Bx = (uint32_t *) Bx_input ;
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < bnz ; p++)
{
if (!GBB (Bb, p)) continue ;
uint32_t bij = GBX (Bx, p, false) ;
Cx [p] = (x >= bij) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd
//------------------------------------------------------------------------------
GrB_Info GB (_bind2nd__isge_uint32)
(
GB_void *Cx_output, // Cx and Ax may be aliased
const GB_void *Ax_input,
const GB_void *y_input,
const int8_t *restrict Ab,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
uint32_t *Cx = (uint32_t *) Cx_output ;
uint32_t *Ax = (uint32_t *) Ax_input ;
uint32_t y = (*((uint32_t *) y_input)) ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!GBB (Ab, p)) continue ;
uint32_t aij = GBX (Ax, p, false) ;
Cx [p] = (aij >= y) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (x, A'): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (x, aij), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
uint32_t aij = GBX (Ax, pA, false) ; \
Cx [pC] = (x >= aij) ; \
}
GrB_Info GB (_bind1st_tran__isge_uint32)
(
GrB_Matrix C,
const GB_void *x_input,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
// GB_unop_transpose.c uses GB_ATYPE, but A is
// the 2nd input to binary operator z=f(x,y).
#undef GB_ATYPE
#define GB_ATYPE \
uint32_t
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint32_t x = (*((const uint32_t *) x_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
#undef GB_ATYPE
#define GB_ATYPE \
uint32_t
}
//------------------------------------------------------------------------------
// C = op (A', y): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (aij, y), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
uint32_t aij = GBX (Ax, pA, false) ; \
Cx [pC] = (aij >= y) ; \
}
GrB_Info GB (_bind2nd_tran__isge_uint32)
(
GrB_Matrix C,
const GrB_Matrix A,
const GB_void *y_input,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint32_t y = (*((const uint32_t *) y_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
permute.c | /**
* @file permute.c
* @brief Functions for re-ordering a matrix
* @author Dominique LaSalle <lasalle@cs.umn.edu>
* Copyright 2014
* @version 1
* @date 2014-06-05
*/
#ifndef GOOSEBERRY_PERMUTE_C
#define GOOSEBERRY_PERMUTE_C
#include "permute.h"
/******************************************************************************
* DOMLIB IMPORTS **************************************************************
******************************************************************************/
#define DLPQ_PREFIX dd
#define DLPQ_KEY_T dim_t
#define DLPQ_VAL_T dim_t
#define DLPQ_MIN 1
#define DLPQ_STATIC
#include "dlpq_headers.h"
#undef DLPQ_STATIC
#undef DLPQ_MIN
#undef DLPQ_KEY_T
#undef DLPQ_VAL_T
#undef DLPQ_PREFIX
/******************************************************************************
* PRIVATE FUNCTIONS ***********************************************************
******************************************************************************/
static void __cuthillmckee_block(
const dim_t start,
const dim_t end,
const ind_t * const rowptr,
const dim_t * const rowind,
const real_t * const rowval,
dim_t * const perm)
{
dim_t i,k,d,nordered,sr;
ind_t j;
dim_t * deg;
dd_pq_t * q, * rem;
q = dd_pq_create(start,end);
rem = dd_pq_create(start,end);
/* offset pointer */
deg = dim_alloc(end-start)-start;
/* find my lowest degree vertex */
for (i=start;i<end;++i) {
d = 0;
for (j=rowptr[i];j<rowptr[i+1];++j) {
k = rowind[j];
if (k < end && k >= start) {
++d;
}
}
deg[i] = d;
dd_pq_push(d,i,rem);
}
sr = nordered = start;
/* loop through connected components */
while (rem->size > 0) {
i = dd_pq_pop(rem);
perm[nordered++] = i;
/* perform bfs */
while (sr < nordered) {
i = perm[sr++];
for (j=rowptr[i];j<rowptr[i+1];++j) {
k = rowind[j];
if (k < end && k >= start && dd_pq_contains(k,rem)) {
/* local non-zero */
dd_pq_remove(k,rem);
dd_pq_push(deg[k],k,q);
}
}
/* add rows/vertices in ascending order of local degree */
while (q->size > 0) {
k = dd_pq_pop(q);
perm[nordered++] = k;
}
}
}
dd_pq_free(q);
dd_pq_free(rem);
/* un-offset */
dl_free((deg+start));
}
/******************************************************************************
* PUBLIC FUNCTIONS ************************************************************
******************************************************************************/
int permute_sparse(
const dim_t nrows,
const dim_t ncols,
ind_t * const rowptr,
dim_t * const rowind,
real_t * const rowval,
const dim_t * const rowperm,
const dim_t * const colperm)
{
dim_t i, k, v;
ind_t j;
dim_t * rename;
ind_t * prowptr;
dim_t * prowind;
real_t * prowval;
if (rowperm) {
/* perform a row and possibly a column permutation */
prowptr = ind_alloc(nrows+1);
prowind = dim_alloc(rowptr[nrows]);
prowval = real_alloc(rowptr[nrows]);
prowptr[0] = 0;
for (i=0;i<nrows;++i) {
v = rowperm[i];
prowptr[i+1] = prowptr[i] + (rowptr[v+1] - rowptr[v]);
}
k = 0;
if (colperm) {
rename = dim_alloc(ncols);
/* reverse the colperm */
for (i=0;i<ncols;++i) {
v = colperm[i];
rename[v] = i;
}
/* apply the colperm while filling rowindex and rowvalue arrays */
for (i=0;i<nrows;++i) {
v = rowperm[i];
for (j=rowptr[v];j<rowptr[v+1];++j) {
prowind[k] = rename[rowind[j]];
prowval[k] = rowval[j];
++k;
}
}
dl_free(rename);
} else {
/* fill rowindex and rowvalue arrays */
for (i=0;i<nrows;++i) {
v = rowperm[i];
for (j=rowptr[v];j<rowptr[v+1];++j) {
prowind[k] = rowind[j];
prowval[k] = rowval[j];
++k;
}
}
}
ind_copy(rowptr,prowptr,nrows);
dim_copy(rowind,prowind,rowptr[nrows]);
real_copy(rowval,prowval,rowptr[nrows]);
dl_free(prowind);
dl_free(prowptr);
dl_free(prowval);
} else if (colperm) {
/* perform only a column permutation */
prowind = dim_alloc(rowptr[nrows]);
rename = dim_alloc(ncols);
/* reverse the colperm */
for (i=0;i<ncols;++i) {
v = colperm[i];
rename[v] = i;
}
/* apply the colperm while filling rowindex and rowvalue arrays */
for (i=0;i<nrows;++i) {
for (j=rowptr[i];j<rowptr[i+1];++j) {
prowind[j] = rename[rowind[j]];
}
}
dim_copy(rowind,prowind,rowptr[nrows]);
dl_free(rename);
dl_free(prowind);
}
return GOOSEBERRY_SUCCESS;
}
int permute_dense(
const dim_t nrows,
const dim_t ncols,
real_t * const rowval,
const dim_t * const rowperm,
const dim_t * const colperm)
{
dim_t i,v,j,u;
real_t * prowval;
if (rowperm || colperm) {
prowval = real_alloc(nrows*ncols);
for (i=0;i<nrows;++i) {
if (rowperm) {
v = rowperm[i];
} else {
v = i;
}
if (colperm) {
for (j=0;j<ncols;++j) {
u = colperm[j];
prowval[(i*ncols)+j] = rowval[(v*ncols)+u];
}
} else {
for (j=0;j<ncols;++j) {
prowval[(i*ncols)+j] = rowval[(v*ncols)+j];
}
}
}
real_copy(rowval,prowval,nrows*ncols);
dl_free(prowval);
}
return GOOSEBERRY_SUCCESS;
}
int permute_dense_rev(
const dim_t nrows,
const dim_t ncols,
real_t * const rowval,
const dim_t * const rowperm,
const dim_t * const colperm)
{
dim_t i,v,j,u;
real_t * prowval;
if (rowperm || colperm) {
prowval = real_alloc(nrows*ncols);
for (i=0;i<nrows;++i) {
if (rowperm) {
v = rowperm[i];
} else {
v = i;
}
if (colperm) {
for (j=0;j<ncols;++j) {
u = colperm[j];
prowval[(v*ncols)+u] = rowval[(i*ncols)+j];
}
} else {
for (j=0;j<ncols;++j) {
prowval[(v*ncols)+j] = rowval[(i*ncols)+j];
}
}
}
real_copy(rowval,prowval,nrows*ncols);
dl_free(prowval);
}
return GOOSEBERRY_SUCCESS;
}
int permute_cuthillmckee(
const dim_t nrows,
const dim_t ncols,
const ind_t * const rowptr,
const dim_t * const rowind,
const real_t * const rowval,
const dim_t * const blocks,
const dim_t nblocks,
dim_t * const perm)
{
int err;
if (nrows != ncols) {
eprintf("Cuthill-McKee requires a structurally symmetric matrix: Given "
PF_DIM_T"x"PF_DIM_T"\n",nrows,ncols);
err = GOOSEBERRY_ERROR_INVALIDINPUT;
goto END;
}
if (blocks) {
/* if block is supplied, bandwidth reduction happens per block */
#pragma omp parallel default(none)
{
dim_t c, mystart, myend;
const dim_t myid = omp_get_thread_num();
const dim_t nthreads = omp_get_num_threads();
for (c=myid;c<nblocks;c+=nthreads) {
mystart = blocks[c];
myend = blocks[c+1];
__cuthillmckee_block(mystart,myend,rowptr,rowind,rowval,perm);
}
}
} else {
__cuthillmckee_block(0,nrows,rowptr,rowind,rowval,perm);
}
err = GOOSEBERRY_SUCCESS;
END:
return err;
}
#endif
|
updater_basemaker-inl.h | /*!
* Copyright 2014 by Contributors
* \file updater_basemaker-inl.h
* \brief implement a common tree constructor
* \author Tianqi Chen
*/
#ifndef XGBOOST_TREE_UPDATER_BASEMAKER_INL_H_
#define XGBOOST_TREE_UPDATER_BASEMAKER_INL_H_
#include <rabit/rabit.h>
#include <xgboost/base.h>
#include <xgboost/tree_updater.h>
#include <vector>
#include <algorithm>
#include <string>
#include <limits>
#include <utility>
#include "./param.h"
#include "../common/io.h"
#include "../common/random.h"
#include "../common/quantile.h"
namespace xgboost {
namespace tree {
/*!
* \brief base tree maker class that defines common operation
* needed in tree making
*/
class BaseMaker: public TreeUpdater {
public:
void Init(const std::vector<std::pair<std::string, std::string> >& args) override {
param_.InitAllowUnknown(args);
}
protected:
// helper to collect and query feature meta information
struct FMetaHelper {
public:
/*! \brief find type of each feature, use column format */
inline void InitByCol(DMatrix* p_fmat,
const RegTree& tree) {
fminmax_.resize(tree.param.num_feature * 2);
std::fill(fminmax_.begin(), fminmax_.end(),
-std::numeric_limits<bst_float>::max());
// start accumulating statistics
for (const auto &batch : p_fmat->GetSortedColumnBatches()) {
for (bst_uint fid = 0; fid < batch.Size(); ++fid) {
auto c = batch[fid];
if (c.size() != 0) {
fminmax_[fid * 2 + 0] =
std::max(-c[0].fvalue, fminmax_[fid * 2 + 0]);
fminmax_[fid * 2 + 1] =
std::max(c[c.size() - 1].fvalue, fminmax_[fid * 2 + 1]);
}
}
}
}
/*! \brief synchronize the information */
inline void SyncInfo() {
rabit::Allreduce<rabit::op::Max>(dmlc::BeginPtr(fminmax_), fminmax_.size());
}
// get feature type, 0:empty 1:binary 2:real
inline int Type(bst_uint fid) const {
CHECK_LT(fid * 2 + 1, fminmax_.size())
<< "FeatHelper fid exceed query bound ";
bst_float a = fminmax_[fid * 2];
bst_float b = fminmax_[fid * 2 + 1];
if (a == -std::numeric_limits<bst_float>::max()) return 0;
if (-a == b) {
return 1;
} else {
return 2;
}
}
inline bst_float MaxValue(bst_uint fid) const {
return fminmax_[fid *2 + 1];
}
inline void SampleCol(float p, std::vector<bst_uint> *p_findex) const {
std::vector<bst_uint> &findex = *p_findex;
findex.clear();
for (size_t i = 0; i < fminmax_.size(); i += 2) {
const auto fid = static_cast<bst_uint>(i / 2);
if (this->Type(fid) != 0) findex.push_back(fid);
}
auto n = static_cast<unsigned>(p * findex.size());
std::shuffle(findex.begin(), findex.end(), common::GlobalRandom());
findex.resize(n);
// sync the findex if it is subsample
std::string s_cache;
common::MemoryBufferStream fc(&s_cache);
dmlc::Stream& fs = fc;
if (rabit::GetRank() == 0) {
fs.Write(findex);
}
rabit::Broadcast(&s_cache, 0);
fs.Read(&findex);
}
private:
std::vector<bst_float> fminmax_;
};
// ------static helper functions ------
// helper function to get to next level of the tree
/*! \brief this is helper function for row based data*/
inline static int NextLevel(const SparsePage::Inst &inst, const RegTree &tree, int nid) {
const RegTree::Node &n = tree[nid];
bst_uint findex = n.SplitIndex();
for (const auto& ins : inst) {
if (findex == ins.index) {
if (ins.fvalue < n.SplitCond()) {
return n.LeftChild();
} else {
return n.RightChild();
}
}
}
return n.DefaultChild();
}
// ------class member helpers---------
/*! \brief initialize temp data structure */
inline void InitData(const std::vector<GradientPair> &gpair,
const DMatrix &fmat,
const RegTree &tree) {
CHECK_EQ(tree.param.num_nodes, tree.param.num_roots)
<< "TreeMaker: can only grow new tree";
const std::vector<unsigned> &root_index = fmat.Info().root_index_;
{
// setup position
position_.resize(gpair.size());
if (root_index.size() == 0) {
std::fill(position_.begin(), position_.end(), 0);
} else {
for (size_t i = 0; i < position_.size(); ++i) {
position_[i] = root_index[i];
CHECK_LT(root_index[i], (unsigned)tree.param.num_roots)
<< "root index exceed setting";
}
}
// mark delete for the deleted datas
for (size_t i = 0; i < position_.size(); ++i) {
if (gpair[i].GetHess() < 0.0f) position_[i] = ~position_[i];
}
// mark subsample
if (param_.subsample < 1.0f) {
std::bernoulli_distribution coin_flip(param_.subsample);
auto& rnd = common::GlobalRandom();
for (size_t i = 0; i < position_.size(); ++i) {
if (gpair[i].GetHess() < 0.0f) continue;
if (!coin_flip(rnd)) position_[i] = ~position_[i];
}
}
}
{
// expand query
qexpand_.reserve(256); qexpand_.clear();
for (int i = 0; i < tree.param.num_roots; ++i) {
qexpand_.push_back(i);
}
this->UpdateNode2WorkIndex(tree);
}
}
/*! \brief update queue expand add in new leaves */
inline void UpdateQueueExpand(const RegTree &tree) {
std::vector<int> newnodes;
for (int nid : qexpand_) {
if (!tree[nid].IsLeaf()) {
newnodes.push_back(tree[nid].LeftChild());
newnodes.push_back(tree[nid].RightChild());
}
}
// use new nodes for qexpand
qexpand_ = newnodes;
this->UpdateNode2WorkIndex(tree);
}
// return decoded position
inline int DecodePosition(bst_uint ridx) const {
const int pid = position_[ridx];
return pid < 0 ? ~pid : pid;
}
// encode the encoded position value for ridx
inline void SetEncodePosition(bst_uint ridx, int nid) {
if (position_[ridx] < 0) {
position_[ridx] = ~nid;
} else {
position_[ridx] = nid;
}
}
/*!
* \brief this is helper function uses column based data structure,
* reset the positions to the lastest one
* \param nodes the set of nodes that contains the split to be used
* \param p_fmat feature matrix needed for tree construction
* \param tree the regression tree structure
*/
inline void ResetPositionCol(const std::vector<int> &nodes,
DMatrix *p_fmat,
const RegTree &tree) {
// set the positions in the nondefault
this->SetNonDefaultPositionCol(nodes, p_fmat, tree);
this->SetDefaultPostion(p_fmat, tree);
}
/*!
* \brief helper function to set the non-leaf positions to default direction.
* This function can be applied multiple times and will get the same result.
* \param p_fmat feature matrix needed for tree construction
* \param tree the regression tree structure
*/
inline void SetDefaultPostion(DMatrix *p_fmat,
const RegTree &tree) {
// set default direct nodes to default
// for leaf nodes that are not fresh, mark then to ~nid,
// so that they are ignored in future statistics collection
const auto ndata = static_cast<bst_omp_uint>(p_fmat->Info().num_row_);
#pragma omp parallel for schedule(static)
for (bst_omp_uint ridx = 0; ridx < ndata; ++ridx) {
const int nid = this->DecodePosition(ridx);
if (tree[nid].IsLeaf()) {
// mark finish when it is not a fresh leaf
if (tree[nid].RightChild() == -1) {
position_[ridx] = ~nid;
}
} else {
// push to default branch
if (tree[nid].DefaultLeft()) {
this->SetEncodePosition(ridx, tree[nid].LeftChild());
} else {
this->SetEncodePosition(ridx, tree[nid].RightChild());
}
}
}
}
/*!
* \brief this is helper function uses column based data structure,
* to CORRECT the positions of non-default directions that WAS set to default
* before calling this function.
* \param batch The column batch
* \param sorted_split_set The set of index that contains split solutions.
* \param tree the regression tree structure
*/
inline void CorrectNonDefaultPositionByBatch(
const SparsePage &batch, const std::vector<bst_uint> &sorted_split_set,
const RegTree &tree) {
for (size_t fid = 0; fid < batch.Size(); ++fid) {
auto col = batch[fid];
auto it = std::lower_bound(sorted_split_set.begin(), sorted_split_set.end(), fid);
if (it != sorted_split_set.end() && *it == fid) {
const auto ndata = static_cast<bst_omp_uint>(col.size());
#pragma omp parallel for schedule(static)
for (bst_omp_uint j = 0; j < ndata; ++j) {
const bst_uint ridx = col[j].index;
const bst_float fvalue = col[j].fvalue;
const int nid = this->DecodePosition(ridx);
CHECK(tree[nid].IsLeaf());
int pid = tree[nid].Parent();
// go back to parent, correct those who are not default
if (!tree[nid].IsRoot() && tree[pid].SplitIndex() == fid) {
if (fvalue < tree[pid].SplitCond()) {
this->SetEncodePosition(ridx, tree[pid].LeftChild());
} else {
this->SetEncodePosition(ridx, tree[pid].RightChild());
}
}
}
}
}
}
/*!
* \brief this is helper function uses column based data structure,
* \param nodes the set of nodes that contains the split to be used
* \param tree the regression tree structure
* \param out_split_set The split index set
*/
inline void GetSplitSet(const std::vector<int> &nodes,
const RegTree &tree,
std::vector<unsigned>* out_split_set) {
std::vector<unsigned>& fsplits = *out_split_set;
fsplits.clear();
// step 1, classify the non-default data into right places
for (int nid : nodes) {
if (!tree[nid].IsLeaf()) {
fsplits.push_back(tree[nid].SplitIndex());
}
}
std::sort(fsplits.begin(), fsplits.end());
fsplits.resize(std::unique(fsplits.begin(), fsplits.end()) - fsplits.begin());
}
/*!
* \brief this is helper function uses column based data structure,
* update all positions into nondefault branch, if any, ignore the default branch
* \param nodes the set of nodes that contains the split to be used
* \param p_fmat feature matrix needed for tree construction
* \param tree the regression tree structure
*/
virtual void SetNonDefaultPositionCol(const std::vector<int> &nodes,
DMatrix *p_fmat,
const RegTree &tree) {
std::vector<unsigned> fsplits;
this->GetSplitSet(nodes, tree, &fsplits);
for (const auto &batch : p_fmat->GetSortedColumnBatches()) {
for (auto fid : fsplits) {
auto col = batch[fid];
const auto ndata = static_cast<bst_omp_uint>(col.size());
#pragma omp parallel for schedule(static)
for (bst_omp_uint j = 0; j < ndata; ++j) {
const bst_uint ridx = col[j].index;
const bst_float fvalue = col[j].fvalue;
const int nid = this->DecodePosition(ridx);
// go back to parent, correct those who are not default
if (!tree[nid].IsLeaf() && tree[nid].SplitIndex() == fid) {
if (fvalue < tree[nid].SplitCond()) {
this->SetEncodePosition(ridx, tree[nid].LeftChild());
} else {
this->SetEncodePosition(ridx, tree[nid].RightChild());
}
}
}
}
}
}
/*! \brief helper function to get statistics from a tree */
template<typename TStats>
inline void GetNodeStats(const std::vector<GradientPair> &gpair,
const DMatrix &fmat,
const RegTree &tree,
std::vector< std::vector<TStats> > *p_thread_temp,
std::vector<TStats> *p_node_stats) {
std::vector< std::vector<TStats> > &thread_temp = *p_thread_temp;
const MetaInfo &info = fmat.Info();
thread_temp.resize(omp_get_max_threads());
p_node_stats->resize(tree.param.num_nodes);
#pragma omp parallel
{
const int tid = omp_get_thread_num();
thread_temp[tid].resize(tree.param.num_nodes, TStats(param_));
for (unsigned int nid : qexpand_) {
thread_temp[tid][nid].Clear();
}
}
// setup position
const auto ndata = static_cast<bst_omp_uint>(fmat.Info().num_row_);
#pragma omp parallel for schedule(static)
for (bst_omp_uint ridx = 0; ridx < ndata; ++ridx) {
const int nid = position_[ridx];
const int tid = omp_get_thread_num();
if (nid >= 0) {
thread_temp[tid][nid].Add(gpair, info, ridx);
}
}
// sum the per thread statistics together
for (int nid : qexpand_) {
TStats &s = (*p_node_stats)[nid];
s.Clear();
for (size_t tid = 0; tid < thread_temp.size(); ++tid) {
s.Add(thread_temp[tid][nid]);
}
}
}
/*! \brief common helper data structure to build sketch */
struct SketchEntry {
/*! \brief total sum of amount to be met */
double sum_total;
/*! \brief statistics used in the sketch */
double rmin, wmin;
/*! \brief last seen feature value */
bst_float last_fvalue;
/*! \brief current size of sketch */
double next_goal;
// pointer to the sketch to put things in
common::WXQuantileSketch<bst_float, bst_float> *sketch;
// initialize the space
inline void Init(unsigned max_size) {
next_goal = -1.0f;
rmin = wmin = 0.0f;
sketch->temp.Reserve(max_size + 1);
sketch->temp.size = 0;
}
/*!
* \brief push a new element to sketch
* \param fvalue feature value, comes in sorted ascending order
* \param w weight
* \param max_size
*/
inline void Push(bst_float fvalue, bst_float w, unsigned max_size) {
if (next_goal == -1.0f) {
next_goal = 0.0f;
last_fvalue = fvalue;
wmin = w;
return;
}
if (last_fvalue != fvalue) {
double rmax = rmin + wmin;
if (rmax >= next_goal && sketch->temp.size != max_size) {
if (sketch->temp.size == 0 ||
last_fvalue > sketch->temp.data[sketch->temp.size-1].value) {
// push to sketch
sketch->temp.data[sketch->temp.size] =
common::WXQuantileSketch<bst_float, bst_float>::
Entry(static_cast<bst_float>(rmin),
static_cast<bst_float>(rmax),
static_cast<bst_float>(wmin), last_fvalue);
CHECK_LT(sketch->temp.size, max_size)
<< "invalid maximum size max_size=" << max_size
<< ", stemp.size" << sketch->temp.size;
++sketch->temp.size;
}
if (sketch->temp.size == max_size) {
next_goal = sum_total * 2.0f + 1e-5f;
} else {
next_goal = static_cast<bst_float>(sketch->temp.size * sum_total / max_size);
}
} else {
if (rmax >= next_goal) {
LOG(TRACKER) << "INFO: rmax=" << rmax
<< ", sum_total=" << sum_total
<< ", naxt_goal=" << next_goal
<< ", size=" << sketch->temp.size;
}
}
rmin = rmax;
wmin = w;
last_fvalue = fvalue;
} else {
wmin += w;
}
}
/*! \brief push final unfinished value to the sketch */
inline void Finalize(unsigned max_size) {
double rmax = rmin + wmin;
if (sketch->temp.size == 0 || last_fvalue > sketch->temp.data[sketch->temp.size-1].value) {
CHECK_LE(sketch->temp.size, max_size)
<< "Finalize: invalid maximum size, max_size=" << max_size
<< ", stemp.size=" << sketch->temp.size;
// push to sketch
sketch->temp.data[sketch->temp.size] =
common::WXQuantileSketch<bst_float, bst_float>::
Entry(static_cast<bst_float>(rmin),
static_cast<bst_float>(rmax),
static_cast<bst_float>(wmin), last_fvalue);
++sketch->temp.size;
}
sketch->PushTemp();
}
};
/*! \brief training parameter of tree grower */
TrainParam param_;
/*! \brief queue of nodes to be expanded */
std::vector<int> qexpand_;
/*!
* \brief map active node to is working index offset in qexpand,
* can be -1, which means the node is node actively expanding
*/
std::vector<int> node2workindex_;
/*!
* \brief position of each instance in the tree
* can be negative, which means this position is no longer expanding
* see also Decode/EncodePosition
*/
std::vector<int> position_;
private:
inline void UpdateNode2WorkIndex(const RegTree &tree) {
// update the node2workindex
std::fill(node2workindex_.begin(), node2workindex_.end(), -1);
node2workindex_.resize(tree.param.num_nodes);
for (size_t i = 0; i < qexpand_.size(); ++i) {
node2workindex_[qexpand_[i]] = static_cast<int>(i);
}
}
};
} // namespace tree
} // namespace xgboost
#endif // XGBOOST_TREE_UPDATER_BASEMAKER_INL_H_
|
GB_unaryop__ainv_uint32_int8.c | //------------------------------------------------------------------------------
// GB_unaryop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2019, All Rights Reserved.
// http://suitesparse.com See GraphBLAS/Doc/License.txt for license.
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_iterator.h"
#include "GB_unaryop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB_unop__ainv_uint32_int8
// op(A') function: GB_tran__ainv_uint32_int8
// C type: uint32_t
// A type: int8_t
// cast: uint32_t cij = (uint32_t) aij
// unaryop: cij = -aij
#define GB_ATYPE \
int8_t
#define GB_CTYPE \
uint32_t
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
int8_t aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = -x ;
// casting
#define GB_CASTING(z, x) \
uint32_t z = (uint32_t) x ;
// cij = op (cast (aij))
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
GB_GETA (aij, Ax, pA) ; \
/* Cx [pC] = op (cast (aij)) */ \
GB_CASTING (x, aij) ; \
GB_OP (GB_CX (pC), x) ; \
}
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_AINV || GxB_NO_UINT32 || GxB_NO_INT8)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_unop__ainv_uint32_int8
(
uint32_t *restrict Cx,
const int8_t *restrict Ax,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (int64_t p = 0 ; p < anz ; p++)
{
GB_CAST_OP (p, p) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_tran__ainv_uint32_int8
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t **Rowcounts,
GBI_single_iterator Iter,
const int64_t *restrict A_slice,
int naslice
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#define GB_PHASE_2_OF_2
#include "GB_unaryop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
gemm_symm_int8.h | // chgemm is pleased to support the open source community by supporting ncnn available.
//
// author:tpoisonooo (https://github.com/tpoisonooo/chgemm) implement symmetric int8 GEMM on aarch64.
//
// Copyright (C) 2019 tpoisonooo. All rights reserved.
//
// Licensed under the BSD 3-Clause License (the "License"); you may not use this file except
// in compliance with the License. You may obtain a copy of the License at
//
// https://opensource.org/licenses/BSD-3-Clause
//
// Unless required by applicable law or agreed to in writing, software distributed
// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
// CONDITIONS OF ANY KIND, either express or implied. See the License for the
// specific language governing permissions and limitations under the License.
#if __aarch64__
#define DECOMPOSE_K\
int ktmp = k;\
int k8 = k >> 3;\
int k8_even = (k8 % 2 == 0) ? 0: 1;\
k -= (k8 << 3);\
int k4 = k >> 2;\
k -= (k4 << 2);\
int k2 = k >> 1;\
k -= (k2 << 1);\
int k1 = k;\
k = ktmp;
#define DECOMPOSE_N\
int ntmp = n;\
int n4 = n >> 2;\
n -= (n4 << 2);\
int n2 = n >> 1;\
n -= (n2 << 1);\
int n1 = n;\
n = ntmp;
static void reorder_b(const int8_t* b, int8_t* sb, const int k, const int n, const int ldx) {
int i = 0;
for (; i+3 < n; i += 4) {
const int8_t *p0 = b + i;
const int8_t *p1 = b + 1 * ldx + i;
const int8_t *p2 = b + 2 * ldx + i;
const int8_t *p3 = b + 3 * ldx + i;
const int8_t *p4 = b + 4 * ldx + i;
const int8_t *p5 = b + 5 * ldx + i;
const int8_t *p6 = b + 6 * ldx + i;
const int8_t *p7 = b + 7 * ldx + i;
int j = 0;
for (; j+7 < k; j += 8) {
sb[0] = p0[0];
sb[1] = p1[0];
sb[2] = p2[0];
sb[3] = p3[0];
sb[4] = p4[0];
sb[5] = p5[0];
sb[6] = p6[0];
sb[7] = p7[0];
sb[8] = p0[1];
sb[9] = p1[1];
sb[10] = p2[1];
sb[11] = p3[1];
sb[12] = p4[1];
sb[13] = p5[1];
sb[14] = p6[1];
sb[15] = p7[1];
sb[16] = p0[2];
sb[17] = p1[2];
sb[18] = p2[2];
sb[19] = p3[2];
sb[20] = p4[2];
sb[21] = p5[2];
sb[22] = p6[2];
sb[23] = p7[2];
sb[24] = p0[3];
sb[25] = p1[3];
sb[26] = p2[3];
sb[27] = p3[3];
sb[28] = p4[3];
sb[29] = p5[3];
sb[30] = p6[3];
sb[31] = p7[3];
sb += 32;
p0 += 8 * ldx;
p1 += 8 * ldx;
p2 += 8 * ldx;
p3 += 8 * ldx;
p4 += 8 * ldx;
p5 += 8 * ldx;
p6 += 8 * ldx;
p7 += 8 * ldx;
}
for (; j+3 < k; j += 4) {
sb[0] = p0[0];
sb[1] = p1[0];
sb[2] = p2[0];
sb[3] = p3[0];
sb[4] = p0[1];
sb[5] = p1[1];
sb[6] = p2[1];
sb[7] = p3[1];
sb[8] = p0[2];
sb[9] = p1[2];
sb[10] = p2[2];
sb[11] = p3[2];
sb[12] = p0[3];
sb[13] = p1[3];
sb[14] = p2[3];
sb[15] = p3[3];
sb += 16;
p0 += 4 * ldx;
p1 += 4 * ldx;
p2 += 4 * ldx;
p3 += 4 * ldx;
}
for (; j+1 < k; j += 2) {
sb[0] = p0[0];
sb[1] = p1[0];
sb[2] = p0[1];
sb[3] = p1[1];
sb[4] = p0[2];
sb[5] = p1[2];
sb[6] = p0[3];
sb[7] = p1[3];
sb += 8;
p0 += 2 * ldx;
p1 += 2 * ldx;
}
for (; j < k; ++j) {
sb[0] = p0[0];
sb[1] = p0[1];
sb[2] = p0[2];
sb[3] = p0[3];
sb += 4;
p0 += ldx;
}
}
for (; i+1 < n; i += 2) {
const int8_t *p0 = b + i;
const int8_t *p1 = b + 1 * ldx + i;
const int8_t *p2 = b + 2 * ldx + i;
const int8_t *p3 = b + 3 * ldx + i;
const int8_t *p4 = b + 4 * ldx + i;
const int8_t *p5 = b + 5 * ldx + i;
const int8_t *p6 = b + 6 * ldx + i;
const int8_t *p7 = b + 7 * ldx + i;
int j = 0;
for (; j+7 < k; j += 8) {
sb[0] = p0[0];
sb[1] = p1[0];
sb[2] = p2[0];
sb[3] = p3[0];
sb[4] = p4[0];
sb[5] = p5[0];
sb[6] = p6[0];
sb[7] = p7[0];
sb[8] = p0[1];
sb[9] = p1[1];
sb[10] = p2[1];
sb[11] = p3[1];
sb[12] = p4[1];
sb[13] = p5[1];
sb[14] = p6[1];
sb[15] = p7[1];
sb += 16;
p0 += 8 * ldx;
p1 += 8 * ldx;
p2 += 8 * ldx;
p3 += 8 * ldx;
p4 += 8 * ldx;
p5 += 8 * ldx;
p6 += 8 * ldx;
p7 += 8 * ldx;
}
for (; j+3 < k; j += 4) {
sb[0] = p0[0];
sb[1] = p1[0];
sb[2] = p2[0];
sb[3] = p3[0];
sb[4] = p0[1];
sb[5] = p1[1];
sb[6] = p2[1];
sb[7] = p3[1];
sb += 8;
p0 += 4 * ldx;
p1 += 4 * ldx;
p2 += 4 * ldx;
p3 += 4 * ldx;
}
for (; j+1 < k; j += 2) {
sb[0] = p0[0];
sb[1] = p1[0];
sb[2] = p0[1];
sb[3] = p1[1];
sb += 4;
p0 += 2 * ldx;
p1 += 2 * ldx;
}
for (; j < k; ++j) {
sb[0] = p0[0];
sb[1] = p0[1];
sb += 2;
p0 += ldx;
}
}
for (; i < n; ++i) {
const int8_t *p0 = b + i;
const int8_t *p1 = b + 1 * ldx + i;
const int8_t *p2 = b + 2 * ldx + i;
const int8_t *p3 = b + 3 * ldx + i;
const int8_t *p4 = b + 4 * ldx + i;
const int8_t *p5 = b + 5 * ldx + i;
const int8_t *p6 = b + 6 * ldx + i;
const int8_t *p7 = b + 7 * ldx + i;
int j = 0;
for (; j+7 < k; j += 8) {
sb[0] = p0[0];
sb[1] = p1[0];
sb[2] = p2[0];
sb[3] = p3[0];
sb[4] = p4[0];
sb[5] = p5[0];
sb[6] = p6[0];
sb[7] = p7[0];
sb += 8;
p0 += 8 * ldx;
p1 += 8 * ldx;
p2 += 8 * ldx;
p3 += 8 * ldx;
p4 += 8 * ldx;
p5 += 8 * ldx;
p6 += 8 * ldx;
p7 += 8 * ldx;
}
for (; j+3 < k; j += 4) {
sb[0] = p0[0];
sb[1] = p1[0];
sb[2] = p2[0];
sb[3] = p3[0];
sb += 4;
p0 += 4 * ldx;
p1 += 4 * ldx;
p2 += 4 * ldx;
p3 += 4 * ldx;
}
for (; j+1 < k; j += 2) {
sb[0] = p0[0];
sb[1] = p1[0];
sb += 2;
p0 += 2 * ldx;
p1 += 2 * ldx;
}
for (; j < k; ++j) {
sb[0] = p0[0];
sb += 1;
p0 += ldx;
}
}
}
static void reorder_a(int8_t* a, int8_t* sa, int m, const int k, const int ldx) {
int i = 0;
for (; i + 3 < m; i += 4) {
int8_t *p0 = a;
int8_t *p1 = a + ldx;
int8_t *p2 = a + 2 * ldx;
int8_t *p3 = a + 3 * ldx;
int j = 0;
for (; j + 7 < k; j += 8) {
asm volatile (
"ld1 {v0.8b}, [%0], #8 \n"
"ld1 {v1.8b}, [%1], #8 \n"
"ld1 {v2.8b}, [%2], #8 \n"
"ld1 {v3.8b}, [%3], #8 \n"
"st1 {v0.8b, v1.8b, v2.8b, v3.8b}, [%4], #32\n"
: "=r"(p0),
"=r"(p1),
"=r"(p2),
"=r"(p3),
"=r"(sa)
: "0"(p0),
"1"(p1),
"2"(p2),
"3"(p3),
"4"(sa)
: "cc", "memory", "v0", "v1", "v2", "v3"
);
}
if (j + 3 < k) {
j += 4;
asm volatile (
"ld1 {v0.8b}, [%0] \n"
"add %0, %0, #4 \n"
"ld1 {v1.8b}, [%1] \n"
"add %1, %1, #4 \n"
"ld1 {v2.8b}, [%2] \n"
"add %2, %2, #4 \n"
"ld1 {v3.8b}, [%3] \n"
"add %3, %3, #4 \n"
"trn1 v0.2s, v0.2s, v1.2s \n"
"st1 {v0.8b}, [%4], #8 \n"
"trn1 v2.2s, v2.2s, v3.2s \n"
"st1 {v2.8b}, [%4], #8 \n"
: "=r"(p0),
"=r"(p1),
"=r"(p2),
"=r"(p3),
"=r"(sa)
: "0"(p0),
"1"(p1),
"2"(p2),
"3"(p3),
"4"(sa)
: "cc", "memory", "v0", "v1", "v2", "v3"
);
}
if (j + 1 < k) {
j += 2;
asm volatile (
"ld1 {v0.8b}, [%0] \n"
"add %0, %0, #2 \n"
"ld1 {v1.8b}, [%1] \n"
"add %1, %1, #2 \n"
"ld1 {v2.8b}, [%2] \n"
"add %2, %2, #2 \n"
"ld1 {v3.8b}, [%3] \n"
"add %3, %3, #2 \n"
"trn1 v0.4h, v0.4h, v1.4h \n"
"trn1 v2.4h, v2.4h, v3.4h \n"
"trn1 v0.2s, v0.2s, v2.2s \n"
"st1 {v0.8b}, [%4], #8 \n"
: "=r"(p0),
"=r"(p1),
"=r"(p2),
"=r"(p3),
"=r"(sa)
: "0"(p0),
"1"(p1),
"2"(p2),
"3"(p3),
"4"(sa)
: "cc", "memory", "v0", "v1", "v2", "v3"
);
}
if (j < k) {
*sa++ = *p0;
*sa++ = *p1;
*sa++ = *p2;
*sa++ = *p3;
}
a += 4 * ldx;
}
if (i + 1 < m) {
i += 2;
int8_t *p0 = a;
int8_t *p1 = a + ldx;
int j = 0;
for (; j + 7 < k; j += 8) {
asm volatile (
"ld1 {v0.8b}, [%0], #8 \n"
"ld1 {v1.8b}, [%1], #8 \n"
"st1 {v0.8b, v1.8b}, [%2], #16\n"
: "=r"(p0),
"=r"(p1),
"=r"(sa)
: "0"(p0),
"1"(p1),
"2"(sa)
: "cc", "memory", "v0", "v1"
);
}
if (j + 3 < k) {
j += 4;
asm volatile (
"ld1 {v0.8b}, [%0] \n"
"add %0, %0, #4 \n"
"ld1 {v1.8b}, [%1] \n"
"add %1, %1, #4 \n"
"trn1 v0.2s, v0.2s, v1.2s \n"
"st1 {v0.8b}, [%2], #8 \n"
: "=r"(p0),
"=r"(p1),
"=r"(sa)
: "0"(p0),
"1"(p1),
"2"(sa)
: "cc", "memory", "v0", "v1"
);
}
if (j + 1 < k) {
j += 2;
sa[0] = p0[0];
sa[1] = p0[1];
sa[2] = p1[0];
sa[3] = p1[1];
sa += 4;
p0 += 2;
p1 += 2;
}
if (j < k) {
sa[0] = p0[0];
sa[1] = p1[0];
sa += 2;
}
a += 2 * ldx;
}
if (i < m) {
memcpy(sa, a, sizeof(int8_t) * ldx);
}
}
void int8kernel_m1(void* dst, int8_t* sa, int8_t* sb, int, int k, int n, int, float* scales, float* bias) {
void *pc = dst;
int8_t *pa = sa;
int8_t *pb = sb;
DECOMPOSE_K
DECOMPOSE_N
// int8_t* pTmp = (int8_t*)fastMalloc(16);
if (n4 > 0) {
asm volatile(
"9: \n"
" eor v8.16b, v8.16b, v8.16b \n"
" eor v9.16b, v9.16b, v9.16b \n"
" eor v10.16b, v10.16b, v10.16b\n"
" eor v11.16b, v11.16b, v11.16b\n"
" mov x8, %0 // PanelA\n"
" cmp %w4, #0 \n"
" beq 1f \n"
" mov w19, %w4 \n"
" cmp %w3, #0 \n"
" beq 2f// loop number is even \n"
" // start loopm1_kd8_nd4\n"
" subs w19, w19, #1 \n"
" ld1 {v4.8b, v5.8b, v6.8b, v7.8b}, [%1], #32 // load four lines of B\n"
" ld1 {v2.8b}, [%0], #8 // load two lines of PanelA\n"
" smull v0.8h, v4.8b, v2.8b \n"
" saddlp v8.4s, v0.8h \n"
" smull v0.8h, v5.8b, v2.8b \n"
" saddlp v9.4s, v0.8h \n"
" smull v0.8h, v6.8b, v2.8b \n"
" saddlp v10.4s, v0.8h \n"
" smull v0.8h, v7.8b, v2.8b \n"
" saddlp v11.4s, v0.8h \n"
" cmp w19, #0 \n"
" beq 3f \n"
" 2: \n"
" ld1 {v4.8b, v5.8b, v6.8b, v7.8b}, [%1], #32 \n"
" ld1 {v12.8b, v13.8b, v14.8b, v15.8b}, [%1], #32\n"
" ld1 {v2.8b, v3.8b}, [%0], #16 \n"
" smull v0.8h, v2.8b, v4.8b \n"
" smlal v0.8h, v3.8b, v12.8b \n"
" sadalp v8.4s, v0.8h \n"
" smull v1.8h, v2.8b, v5.8b \n"
" smlal v1.8h, v3.8b, v13.8b \n"
" sadalp v9.4s, v1.8h \n"
" smull v0.8h, v2.8b, v6.8b \n"
" smlal v0.8h, v3.8b, v14.8b \n"
" sadalp v10.4s, v0.8h \n"
" smull v1.8h, v2.8b, v7.8b \n"
" smlal v1.8h, v3.8b, v15.8b \n"
" sadalp v11.4s, v1.8h \n"
" subs w19, w19, #2 \n"
" bne 2b \n"
" 3: \n"
" addp v8.4s, v8.4s, v9.4s \n"
" addp v10.4s, v10.4s, v11.4s\n"
" addp v8.4s, v8.4s, v10.4s \n"
" // start process kd4 kd2 kd1 cases\n"
" 1: \n"
" cmp %w5, #0 \n"
" beq 4f \n"
" // start subkernel_m1n4k4 \n"
" ld1 {v4.8b, v5.8b}, [%1], #16 // load B4x4\n"
" sxtl v4.8h, v4.8b \n"
" sxtl v5.8h, v5.8b \n"
" mov v6.d[0], v4.d[1] \n"
" mov v7.d[0], v5.d[1] \n"
" ld1 {v2.8b}, [%0] // load A1x4\n"
" add %0, %0, #4 \n"
" sxtl v2.8h, v2.8b \n"
" smull v12.4s, v2.4h, v4.4h \n"
" smull v13.4s, v2.4h, v6.4h \n"
" smull v14.4s, v2.4h, v5.4h \n"
" smull v15.4s, v2.4h, v7.4h \n"
" addp v12.4s, v12.4s, v13.4s\n"
" addp v14.4s, v14.4s, v15.4s\n"
" addp v12.4s, v12.4s, v14.4s\n"
" add v8.4s, v8.4s, v12.4s \n"
" 4: \n"
" cmp %w6, #0 \n"
" beq 5f \n"
" // start subkernel_m1n4k2\n"
" ld1 {v4.8b}, [%0] // load A1x2 \n"
" add %0, %0, #2 \n"
" ld1 {v0.8b}, [%1], #8 // load B2x4 \n"
" mov v4.h[1], v4.h[0] \n"
" mov v4.s[1], v4.s[0] \n"
" smull v0.8h, v0.8b, v4.8b \n"
" sadalp v8.4s, v0.8h \n"
" 5: \n"
" cmp %w7, #0 \n"
" beq 6f \n"
" // start subkernel_m1n4k1 \n"
" ld1 {v4.8b}, [%1] // load B1x4\n"
" add %1, %1, #4 \n"
" ld1 {v2.8b}, [%0] // load A1x1\n"
" add %0, %0, #1 \n"
" sxtl v4.8h, v4.8b \n"
" sxtl v2.8h, v2.8b \n"
" smlal v8.4s, v4.4h, v2.h[0]\n"
" 6: \n"
" cmp %9, #0 \n"
" beq 7f \n"
" ldr w24, [%9] \n"
" // int32 => fp32 \n"
" scvtf v8.4s, v8.4s \n"
" // fp32 *= scale_tm \n"
" mov v12.s[0], w24 \n"
" fmul v8.4s, v8.4s, v12.s[0]\n"
" cmp %10, #0 \n"
" beq 8f \n"
" // fp32 += bias_tm \n"
" ldr w24, [%10] \n"
" dup v15.4s, w24 \n"
" fadd v8.4s, v8.4s, v15.4s \n"
" 8: \n"
" // fp32 -> int32 \n"
" fcvtas v8.4s, v8.4s\n"
" // int32 -> int16 \n"
" sqxtn v8.4h, v8.4s \n"
" // int16 -> int8 \n"
" sqxtn v8.8b, v8.8h \n"
" // save \n"
" st1 {v8.s}[0], [%2]\n"
" add %2, %2, #4 \n"
" b m1_loopnd4_finish\n"
" 7: \n"
" st1 {v8.4s}, [%2], #16 \n"
" m1_loopnd4_finish: \n"
" subs %w8, %w8, #1 \n"
" mov %0, x8 \n"
" bne 9b \n"
: "=r"(pa), // %0
"=r"(pb), // %1
"=r"(pc), // %2
"=r"(k8_even),// %3
"=r"(k8), // %4
"=r"(k4), // %5
"=r"(k2), // %6
"=r"(k1), // %7
"=r"(n4), // %8
"=r"(scales), // %9
"=r"(bias) // %10
: "0"(pa),
"1"(pb),
"2"(pc),
"3"(k8_even),
"4"(k8),
"5"(k4),
"6"(k2),
"7"(k1),
"8"(n4),
"9"(scales),
"10"(bias)
: "cc", "memory", "x8", "w19", "w24", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31"
);
}
// print_fp32_vec(scales, bias, m);
if (n2 > 0) {
asm volatile(
"m1_nd2_start: \n"
" eor v8.16b, v8.16b, v8.16b \n"
" eor v9.16b, v9.16b, v9.16b \n"
" eor v10.16b, v10.16b, v10.16b\n"
" eor v11.16b, v11.16b, v11.16b\n"
" mov x8, %0 // PanelA\n"
" cmp %w4, #0 \n"
" beq 1f // k <= 7\n"
" mov w19, %w4\n"
" cmp %w3, #0 \n"
" beq 2f // loop number is even \n"
" // start loopmd1_kd8_nd2 \n"
" subs w19, w19, #1 \n"
" ld1 {v4.8b, v5.8b}, [%1], #16 // load two lines of B\n"
" ld1 {v2.8b}, [%0], #8 // load two lines of PanelA\n"
" smull v0.8h, v4.8b, v2.8b \n"
" saddlp v8.4s, v0.8h \n"
" smull v0.8h, v5.8b, v2.8b \n"
" saddlp v9.4s, v0.8h \n"
" cmp w19, #0 \n"
" beq 3f \n"
" 2: \n"
" ld1 {v4.8b, v5.8b, v6.8b, v7.8b}, [%1], #32\n"
" ld1 {v2.8b, v3.8b}, [%0], #16 \n"
" smull v0.8h, v2.8b, v4.8b \n"
" smlal v0.8h, v3.8b, v6.8b \n"
" sadalp v8.4s, v0.8h \n"
" smull v1.8h, v2.8b, v5.8b \n"
" smlal v1.8h, v3.8b, v7.8b \n"
" sadalp v9.4s, v1.8h \n"
" subs w19, w19, #2 \n"
" bne 2b \n"
" 3: \n"
" addp v8.4s, v8.4s, v9.4s \n"
" addp v8.4s, v8.4s, v8.4s \n"
" // start process kd4 kd2 kd1 cases \n"
" 1: \n"
" cmp %w5, 0 \n"
" beq 4f \n"
" // start subkernel_m1n2k4 \n"
" ld1 {v4.8b}, [%1], #8 // load B4x2\n"
" sxtl v4.8h, v4.8b \n"
" mov v6.d[0], v4.d[1] \n"
" ld1 {v2.8b}, [%0] // load A1x4\n"
" add %0, %0, #4 \n"
" sxtl v2.8h, v2.8b \n"
" smull v9.4s, v2.4h, v4.4h \n"
" smull v10.4s, v2.4h, v6.4h \n"
" addp v9.4s, v9.4s, v10.4s \n"
" addp v9.4s, v9.4s, v9.4s \n"
" add v8.4s, v8.4s, v9.4s \n"
" 4: \n"
" cmp %w6, 0 \n"
" beq 5f \n"
" // start subkernel_m1n2k2 \n"
" ld1 {v4.8b}, [%0] // load A1x2\n"
" add %0, %0, #2 \n"
" ld1 {v0.8b}, [%1] // load B2x2\n"
" add %1, %1, #4 \n"
" mov v4.h[1], v4.h[0] \n"
" smull v0.8h, v4.8b, v0.8b \n"
" saddlp v0.4s, v0.8h \n"
" add v8.4s, v8.4s, v0.4s \n"
" 5: \n"
" cmp %w7, 0 \n"
" beq 6f \n"
" // start subkernel_m1n2k1 \n"
" ld1 {v4.8b}, [%1] // load B1x2\n"
" add %1, %1, #2 \n"
" ld1 {v2.8b}, [%0] // load A1x1\n"
" add %0, %0, #2 \n"
" sxtl v4.8h, v4.8b \n"
" sxtl v2.8h, v2.8b \n"
" smlal v8.4s, v4.4h, v2.h[0]\n"
" 6: \n"
" cmp %9, #0 \n"
" beq 7f \n"
" // v12: s0 s1 \n"
" ldr w24, [%9] \n"
" mov v12.s[0], w24 \n"
" mov v12.s[1], v12.s[0] \n"
" // int32 => fp32 \n"
" scvtf v8.2s, v8.2s \n"
" // fp32 *= scale_tm \n"
" fmul v8.2s, v8.2s, v12.2s \n"
" cmp %10, #0 \n"
" beq 8f \n"
" // fp32 += bias_tm \n"
" ldr w24, [%10] \n"
" mov v12.s[0], w24 \n"
" mov v12.s[1], v12.s[0] \n"
" fadd v8.2s, v8.2s, v12.2s \n"
" 8:\n"
" // fp32 -> int32 \n"
" fcvtas v8.2s, v8.2s\n"
" // int32 -> int16 \n"
" sqxtn v8.4h, v8.4s \n"
" // int16 -> int8 \n"
" sqxtn v8.8b, v8.8h \n"
" // save \n"
" st1 {v8.h}[0], [%2]\n"
" add %2, %2, #2 \n"
" b m1_loopnd2_finish\n"
" 7: \n"
" st1 {v8.2s}, [%2], #8 \n"
" m1_loopnd2_finish: \n"
" mov %0, x8 \n"
: "=r"(pa), // %0
"=r"(pb), // %1
"=r"(pc), // %2
"=r"(k8_even),// %3
"=r"(k8), // %4
"=r"(k4), // %5
"=r"(k2), // %6
"=r"(k1), // %7
"=r"(n4), // %8
"=r"(scales), // %9
"=r"(bias) // %10
: "0"(pa),
"1"(pb),
"2"(pc),
"3"(k8_even),
"4"(k8),
"5"(k4),
"6"(k2),
"7"(k1),
"8"(n4),
"9"(scales),
"10"(bias)
: "cc", "memory", "x8", "w19", "w24", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31"
);
}
if (n1 > 0) {
asm volatile (
"m1_nd1_start: \n"
" eor v8.16b, v8.16b, v8.16b \n"
" eor v9.16b, v9.16b, v9.16b \n"
" eor v10.16b, v10.16b, v10.16b\n"
" eor v11.16b, v11.16b, v11.16b\n"
" cmp %w4, #0 \n"
" beq 1f // k <= 7 \n"
" mov w19, %w4\n"
" cmp %w3, #0 \n"
" beq 2f // loop number is even \n"
" // start loopkd8_nd1 \n"
" subs w19, w19, #1 \n"
" ld1 {v4.8b}, [%1], #8 // load B line \n"
" ld1 {v2.8b}, [%0], #8 // load A line \n"
" smull v0.8h, v4.8b, v2.8b \n"
" saddlp v8.4s, v0.8h \n"
" cmp w19, #0 \n"
" beq 3f \n"
" 2: \n"
" ld1 {v4.8b, v5.8b}, [%1], #16 \n"
" ld1 {v24.8b, v25.8b}, [%0], #16\n"
" smull v0.8h, v24.8b, v4.8b \n"
" smlal v0.8h, v25.8b, v5.8b \n"
" sadalp v8.4s, v0.8h \n"
" subs w19, w19, #2 \n"
" bne 2b \n"
" 3: \n"
" addp v8.4s, v8.4s, v8.4s \n"
" addp v8.4s, v8.4s, v8.4s \n"
" // start process kd4 kd2 kd1 cases\n"
" 1: \n"
" cmp %w5, 0 \n"
" beq 4f \n"
" // start subkernel_m1n1k4 \n"
" ld1 {v4.8b}, [%1] // load B4x1\n"
" add %1, %1, #4 \n"
" sxtl v4.8h, v4.8b // extend B4x1 to v4\n"
" ld1 {v2.8b}, [%0] // load A1x4\n"
" add %0, %0, #4 \n"
" sxtl v2.8h, v2.8b \n"
" smull v9.4s, v2.4h, v4.4h \n"
" addp v9.4s, v9.4s, v9.4s \n"
" addp v9.4s, v9.4s, v9.4s \n"
" add v8.4s, v8.4s, v9.4s \n"
" 4: \n"
" cmp %w6, 0 \n"
" beq 5f \n"
" // start subkernel_m1n1k2 \n"
" ld1 {v4.8b}, [%0] // load A1x2\n"
" add %0, %0, #2 \n"
" ld1 {v0.8b}, [%1] // load B2x1\n"
" add %1, %1, #2 \n"
" smull v0.8h, v0.8b, v4.8b \n"
" saddlp v0.4s, v0.8h \n"
" add v8.4s, v8.4s, v0.4s \n"
" 5: \n"
" cmp %w7, 0 \n"
" beq 6f \n"
" // start subkernel_m1n1k1 \n"
" ld1 {v0.8b}, [%1] // load B1x1 \n"
" add %1, %1, #1 \n"
" ld1 {v1.8b}, [%0] // load A1x1 \n"
" add %0, %0, #1 \n"
" sxtl v1.8h, v1.8b \n"
" sxtl v0.8h, v0.8b \n"
" smull v0.4s, v1.4h, v0.h[0] \n"
" add v8.4s, v8.4s, v0.4s \n"
" 6: \n"
" cmp %9, #0 \n"
" beq 7f \n"
" // int32 => fp32 \n"
" scvtf v8.2s, v8.2s \n"
" // fp32 *= scale_tm\n"
" ldr w24, [%9] \n"
" mov v12.s[0], w24 \n"
" fmul v8.2s, v8.2s, v12.2s \n"
" cmp %10, #0 \n"
" beq 8f \n"
" // fp32 += bias_tm \n"
" ldr w24, [%10] \n"
" mov v12.s[0], w24 \n"
" fadd v8.2s, v8.2s, v12.2s \n"
" 8: \n"
" // fp32 -> int32 \n"
" fcvtas v8.2s, v8.2s\n"
" // int32 -> int16 \n"
" sqxtn v8.4h, v8.4s \n"
" // int16 -> int8 \n"
" sqxtn v8.8b, v8.8h \n"
" // save \n"
" st1 {v8.b}[0], [%2]\n"
" b m1_finish \n"
" 7: \n"
" st1 {v8.s}[0], [%2] \n"
" m1_finish: \n"
" mov x0, #0 \n"
: "=r"(pa), // %0
"=r"(pb), // %1
"=r"(pc), // %2
"=r"(k8_even),// %3
"=r"(k8), // %4
"=r"(k4), // %5
"=r"(k2), // %6
"=r"(k1), // %7
"=r"(n4), // %8
"=r"(scales), // %9
"=r"(bias) // %10
: "0"(pa),
"1"(pb),
"2"(pc),
"3"(k8_even),
"4"(k8),
"5"(k4),
"6"(k2),
"7"(k1),
"8"(n4),
"9"(scales),
"10"(bias)
: "cc", "memory", "x0", "x8", "w19", "w24", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31"
);
}
}
void int8kernel_m2(void* dst, int8_t* sa, int8_t* sb, int, int k, int n, int ldc, float* scales, float* bias) {
void *pc0, *pc1;
if (scales == nullptr) {
pc0 = (int32_t*)dst;
pc1 = ((int32_t*)pc0) + ldc;
} else {
pc0 = dst;
pc1 = ((int8_t*)pc0) + ldc;
}
int8_t *pa = sa;
int8_t *pb = sb;
DECOMPOSE_K
DECOMPOSE_N
if (n4 > 0) {
asm volatile(
"9: \n"
" eor v8.16b, v8.16b, v8.16b \n"
" eor v9.16b, v9.16b, v9.16b \n"
" eor v10.16b, v10.16b, v10.16b \n"
" eor v11.16b, v11.16b, v11.16b \n"
" eor v12.16b, v12.16b, v12.16b \n"
" eor v13.16b, v13.16b, v13.16b \n"
" eor v14.16b, v14.16b, v14.16b \n"
" eor v15.16b, v15.16b, v15.16b \n"
" eor v16.16b, v16.16b, v16.16b \n"
" eor v17.16b, v17.16b, v17.16b \n"
" eor v18.16b, v18.16b, v18.16b \n"
" eor v19.16b, v19.16b, v19.16b \n"
" eor v20.16b, v20.16b, v20.16b \n"
" eor v21.16b, v21.16b, v21.16b \n"
" eor v22.16b, v22.16b, v22.16b \n"
" eor v23.16b, v23.16b, v23.16b \n"
" mov x8, %0 // PanelA \n"
" cmp %w5, #0 \n"
" beq 1f \n"
" mov w17, %w5 \n"
" cmp %w4, #0 \n"
" beq 2f // loop number is even \n"
" // start loopm2_kd8_nd4\n"
" subs w17, w17, #1 \n"
" ld1 {v4.8b, v5.8b, v6.8b, v7.8b}, [%1], #32 // load four lines of B\n"
" ld1 {v2.8b, v3.8b}, [%0], #16 // load two lines of PanelA \n"
" smull v0.8h, v4.8b, v2.8b \n"
" smull v1.8h, v4.8b, v3.8b \n"
" saddlp v8.4s, v0.8h \n"
" saddlp v12.4s, v1.8h \n"
" smull v0.8h, v5.8b, v2.8b \n"
" smull v1.8h, v5.8b, v3.8b \n"
" saddlp v9.4s, v0.8h \n"
" saddlp v13.4s, v1.8h \n"
" smull v0.8h, v6.8b, v2.8b \n"
" smull v1.8h, v6.8b, v3.8b \n"
" saddlp v10.4s, v0.8h \n"
" saddlp v14.4s, v1.8h \n"
" smull v0.8h, v7.8b, v2.8b \n"
" smull v1.8h, v7.8b, v3.8b \n"
" saddlp v11.4s, v0.8h \n"
" saddlp v15.4s, v1.8h \n"
" cmp w17, #0 \n"
" beq 3f \n"
" 2: \n"
" add x12, %1, #32 \n"
" ld1 {v4.8b, v5.8b}, [%1], #16 \n"
" ld1 {v2.8b, v3.8b}, [%0], #16 \n"
" smull v0.8h, v4.8b, v2.8b \n"
" smull v1.8h, v5.8b, v2.8b \n"
" ld1 {v6.8b, v7.8b}, [x12], #16 \n"
" ld1 {v24.8b, v25.8b}, [%0], #16\n"
" smlal v0.8h, v6.8b, v24.8b \n"
" smlal v1.8h, v7.8b, v24.8b \n"
" sadalp v8.4s, v0.8h\n"
" sadalp v9.4s, v1.8h\n"
" smull v0.8h, v4.8b, v3.8b \n"
" smull v1.8h, v5.8b, v3.8b \n"
" smlal v0.8h, v6.8b, v25.8b \n"
" smlal v1.8h, v7.8b, v25.8b \n"
" sadalp v12.4s, v0.8h\n"
" sadalp v13.4s, v1.8h\n"
" // start v10v11, v14v15, v18v19, v22v23, error here!\n"
" ld1 {v4.8b, v5.8b}, [%1], #16 \n"
" smull v0.8h, v4.8b, v2.8b \n"
" smull v1.8h, v5.8b, v2.8b \n"
" ld1 {v6.8b, v7.8b}, [x12], #16 \n"
" smlal v0.8h, v6.8b, v24.8b \n"
" smlal v1.8h, v7.8b, v24.8b \n"
" sadalp v10.4s, v0.8h \n"
" sadalp v11.4s, v1.8h \n"
" smull v0.8h, v4.8b, v3.8b \n"
" smull v1.8h, v5.8b, v3.8b \n"
" smlal v0.8h, v6.8b, v25.8b \n"
" smlal v1.8h, v7.8b, v25.8b \n"
" sadalp v14.4s, v0.8h \n"
" sadalp v15.4s, v1.8h \n"
" add %1, %1, #32 \n"
" subs w17, w17, #2 \n"
" bne 2b \n"
" 3: \n"
" addp v8.4s, v8.4s, v9.4s \n"
" addp v10.4s, v10.4s, v11.4s\n"
" addp v12.4s, v12.4s, v13.4s\n"
" addp v14.4s, v14.4s, v15.4s\n"
" addp v8.4s, v8.4s, v10.4s \n"
" addp v9.4s, v12.4s, v14.4s \n"
" // start process kd4 kd2 kd1 cases \n"
" 1: \n"
" cmp %w6, #0 \n"
" beq 4f \n"
" // start subkernel_m2n4k4 \n"
" ld1 {v4.8b, v5.8b}, [%1], #16 // load B4x4\n"
" sxtl v4.8h, v4.8b \n"
" sxtl v5.8h, v5.8b \n"
" mov v6.d[0], v4.d[1] \n"
" mov v7.d[0], v5.d[1] \n"
" ld1 {v2.8b}, [%0], #8 // load A2x4\n"
" sxtl v2.8h, v2.8b \n"
" mov v3.d[0], v2.d[1] \n"
" smull v12.4s, v2.4h, v4.4h \n"
" smull v13.4s, v2.4h, v6.4h \n"
" smull v14.4s, v2.4h, v5.4h \n"
" smull v15.4s, v2.4h, v7.4h \n"
" addp v12.4s, v12.4s, v13.4s\n"
" addp v14.4s, v14.4s, v15.4s\n"
" addp v12.4s, v12.4s, v14.4s\n"
" add v8.4s, v8.4s, v12.4s \n"
" smull v16.4s, v3.4h, v4.4h \n"
" smull v17.4s, v3.4h, v6.4h \n"
" smull v18.4s, v3.4h, v5.4h \n"
" smull v19.4s, v3.4h, v7.4h \n"
" addp v16.4s, v16.4s, v17.4s\n"
" addp v18.4s, v18.4s, v19.4s\n"
" addp v16.4s, v16.4s, v18.4s\n"
" add v9.4s, v9.4s, v16.4s \n"
" 4: \n"
" cmp %w7, #0 \n"
" beq 5f \n"
" // start subkernel_m2n4k2 \n"
" ld1 {v4.8b}, [%0] // load A2x2 \n"
" add %0, %0, #4 \n"
" ld1 {v0.8b}, [%1], #8 // load B2x4 \n"
" // 00 11 22 33 \n"
" rev32 v1.4h, v0.4h // 11 00 33 22 \n"
" rev64 v2.2s, v0.2s // 22 33 00 11 \n"
" rev64 v3.4h, v0.4h // 33 22 11 00 \n"
" smull v12.8h, v4.8b, v0.8b \n"
" smull v13.8h, v4.8b, v1.8b \n"
" smull v14.8h, v4.8b, v2.8b \n"
" smull v15.8h, v4.8b, v3.8b \n"
" saddlp v12.4s, v12.8h \n"
" saddlp v13.4s, v13.8h \n"
" saddlp v14.4s, v14.8h \n"
" saddlp v15.4s, v15.8h \n"
" mov v16.s[0], v12.s[0] \n"
" mov v16.s[1], v13.s[0] \n"
" mov v16.s[2], v14.s[0] \n"
" mov v16.s[3], v15.s[0] \n"
" mov v17.s[0], v13.s[1] \n"
" mov v17.s[1], v12.s[1] \n"
" mov v17.s[2], v15.s[1] \n"
" mov v17.s[3], v14.s[1] \n"
" add v8.4s, v8.4s, v16.4s \n"
" add v9.4s, v9.4s, v17.4s \n"
" 5: \n"
" cmp %w8, #0 \n"
" beq 6f \n"
" // start subkernel_m2n4k1 \n"
" ld1 {v4.8b}, [%1] // load B1x4\n"
" add %1, %1, #4 \n"
" ld1 {v2.8b}, [%0] // load A2x1\n"
" add %0, %0, #2 \n"
" sxtl v4.8h, v4.8b \n"
" sxtl v2.8h, v2.8b \n"
" smlal v8.4s, v4.4h, v2.h[0]\n"
" smlal v9.4s, v4.4h, v2.h[1]\n"
" 6: \n"
" cmp %10, #0 \n"
" beq 7f \n"
" ld1 {v12.2s}, [%10] \n"
" // int32 => fp32 \n"
" scvtf v8.4s, v8.4s \n"
" scvtf v9.4s, v9.4s \n"
" // fp32 *= scale_tm \n"
" fmul v8.4s, v8.4s, v12.s[0]\n"
" fmul v9.4s, v9.4s, v12.s[1]\n"
" cmp %11, #0 \n"
" beq 8f \n"
" // fp32 += scales_tm \n"
" ld1 {v14.2s}, [%11] \n"
" dup v15.4s, v14.s[0] \n"
" fadd v8.4s, v8.4s, v15.4s \n"
" dup v15.4s, v14.s[1] \n"
" fadd v9.4s, v9.4s, v15.4s \n"
" 8: \n"
" // fp32 -> int32 \n"
" fcvtas v8.4s, v8.4s\n"
" fcvtas v9.4s, v9.4s\n"
" // int32 -> int16 \n"
" sqxtn v6.4h, v8.4s \n"
" sqxtn2 v6.8h, v9.4s\n"
" // int16 -> int8 \n"
" sqxtn v8.8b, v6.8h \n"
" // save \n"
" st1 {v8.s}[0], [%2] \n"
" add %2, %2, #4 \n"
" st1 {v8.s}[1], [%3] \n"
" add %3, %3, #4 \n"
" b m2_loopnd4_finish \n"
" 7: \n"
" st1 {v8.4s}, [%2], #16 \n"
" st1 {v9.4s}, [%3], #16 \n"
" m2_loopnd4_finish: \n"
" subs %w9, %w9, #1 \n"
" mov %0, x8 \n"
" bne 9b \n"
: "=r"(pa), // %0
"=r"(pb), // %1
"=r"(pc0), // %2
"=r"(pc1), // %3
"=r"(k8_even),// %4
"=r"(k8), // %5
"=r"(k4), // %6
"=r"(k2), // %7
"=r"(k1), // %8
"=r"(n4), // %9
"=r"(scales), // %10
"=r"(bias) // %11
: "0"(pa),
"1"(pb),
"2"(pc0),
"3"(pc1),
"4"(k8_even),
"5"(k8),
"6"(k4),
"7"(k2),
"8"(k1),
"9"(n4),
"10"(scales),
"11"(bias)
: "cc", "memory", "x8", "w17", "x12", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31"
);
}
if (n2 > 0) {
asm volatile(
"eor v8.16b, v8.16b, v8.16b \n"
"eor v9.16b, v9.16b, v9.16b \n"
"eor v10.16b, v10.16b, v10.16b \n"
"eor v11.16b, v11.16b, v11.16b \n"
"eor v12.16b, v12.16b, v12.16b \n"
"eor v13.16b, v13.16b, v13.16b \n"
"eor v14.16b, v14.16b, v14.16b \n"
"eor v15.16b, v15.16b, v15.16b \n"
"eor v16.16b, v16.16b, v16.16b \n"
"eor v17.16b, v17.16b, v17.16b \n"
"eor v18.16b, v18.16b, v18.16b \n"
"eor v19.16b, v19.16b, v19.16b \n"
"eor v20.16b, v20.16b, v20.16b \n"
"eor v21.16b, v21.16b, v21.16b \n"
"eor v22.16b, v22.16b, v22.16b \n"
"eor v23.16b, v23.16b, v23.16b \n"
"m2_nd2_start: \n"
" mov x8, %0 // PanelA \n"
" cmp %w5, #0 \n"
" beq 1f \n"
" mov w17, %w5 \n"
" cmp %w4, #0 \n"
" beq 2f // loop number is even \n"
" // start loopmd2_kd8_nd2 \n"
" subs w17, w17, #1 \n"
" ld1 {v4.8b, v5.8b}, [%1], #16 // load two lines of B\n"
" ld1 {v2.8b, v3.8b}, [%0], #16 // load two lines of PanelA\n"
" smull v0.8h, v4.8b, v2.8b \n"
" smull v1.8h, v4.8b, v3.8b \n"
" saddlp v8.4s, v0.8h \n"
" saddlp v12.4s, v1.8h \n"
" smull v0.8h, v5.8b, v2.8b \n"
" smull v1.8h, v5.8b, v3.8b \n"
" saddlp v9.4s, v0.8h \n"
" saddlp v13.4s, v1.8h \n"
" cmp w17, #0 \n"
" beq 3f \n"
" 2: \n"
" ld1 {v4.8b, v5.8b}, [%1], #16 \n"
" ld1 {v2.8b, v3.8b}, [%0], #16 \n"
" smull v0.8h, v4.8b, v2.8b \n"
" ld1 {v6.8b, v7.8b}, [%1], #16 \n"
" smull v1.8h, v5.8b, v2.8b \n"
" ld1 {v24.8b, v25.8b}, [%0], #16\n"
" smlal v0.8h, v6.8b, v24.8b \n"
" smlal v1.8h, v7.8b, v24.8b \n"
" sadalp v8.4s, v0.8h\n"
" sadalp v9.4s, v1.8h\n"
" smull v0.8h, v4.8b, v3.8b \n"
" smull v1.8h, v5.8b, v3.8b \n"
" smlal v0.8h, v6.8b, v25.8b \n"
" smlal v1.8h, v7.8b, v25.8b \n"
" sadalp v12.4s, v0.8h \n"
" sadalp v13.4s, v1.8h \n"
" subs w17, w17, #2 \n"
" bne 2b \n"
" 3: \n"
" addp v8.4s, v8.4s, v9.4s \n"
" addp v12.4s, v12.4s, v13.4s\n"
" addp v8.4s, v8.4s, v8.4s \n"
" addp v12.4s, v12.4s, v12.4s\n"
" // start process kd4 kd2 kd1 cases\n"
" 1: \n"
" cmp %w6, #0 \n"
" beq 4f \n"
" // start subkernel_m2n2k4 \n"
" ld1 {v4.8b}, [%1], #8 // load B4x2\n"
" sxtl v4.8h, v4.8b \n"
" mov v6.d[0], v4.d[1] \n"
" ld1 {v2.8b}, [%0], #8 // load first A2x4\n"
" sxtl v2.8h, v2.8b \n"
" mov v3.d[0], v2.d[1] \n"
" smull v9.4s, v2.4h, v4.4h \n"
" smull v10.4s, v2.4h, v6.4h \n"
" addp v9.4s, v9.4s, v10.4s \n"
" addp v9.4s, v9.4s, v9.4s \n"
" add v8.4s, v8.4s, v9.4s \n"
" smull v13.4s, v3.4h, v4.4h \n"
" smull v14.4s, v3.4h, v6.4h \n"
" addp v13.4s, v13.4s, v14.4s\n"
" addp v13.4s, v13.4s, v13.4s\n"
" add v12.4s, v12.4s, v13.4s \n"
" 4: \n"
" cmp %w7, 0 \n"
" beq 5f \n"
" // start subkernel_m2n2k2 \n"
" ld1 {v4.8b}, [%0] // load A2x2\n"
" add %0, %0, #4 \n"
" ld1 {v0.8b}, [%1] // load B2x2\n"
" add %1, %1, #4 \n"
" // 00 11\n"
" rev32 v1.4h, v0.4h // 11 00\n"
" smull v21.8h, v4.8b, v0.8b \n"
" smull v22.8h, v4.8b, v1.8b \n"
" saddlp v21.4s, v21.8h \n"
" saddlp v22.4s, v22.8h \n"
" mov v9.s[0], v21.s[0] \n"
" mov v9.s[1], v22.s[0] \n"
" add v8.4s, v8.4s, v9.4s \n"
" mov v13.s[0], v22.s[1] \n"
" mov v13.s[1], v21.s[1] \n"
" add v12.4s, v12.4s, v13.4s \n"
" 5: \n"
" cmp %w8, #0 \n"
" beq 6f \n"
" // start subkernel_m2n2k1 \n"
" ld1 {v4.8b}, [%1] // load B1x2\n"
" add %1, %1, #2 \n"
" ld1 {v2.8b}, [%0] // load A4x1\n"
" add %0, %0, #2 \n"
" sxtl v4.8h, v4.8b \n"
" sxtl v2.8h, v2.8b \n"
" smlal v8.4s, v4.4h, v2.h[0]\n"
" smlal v12.4s, v4.4h, v2.h[1] \n"
" 6: \n"
" cmp %9, #0 \n"
" beq 7f \n"
" mov v8.d[1], v12.d[0] \n"
" // v12: 0 1 \n"
" ld1 {v12.2s}, [%9] \n"
" zip1 v12.4s, v12.4s, v12.4s\n"
" // v12: 0 0 1 1 \n"
" // int32 => fp32 \n"
" scvtf v8.4s, v8.4s \n"
" // fp32 *= scale_tm \n"
" fmul v8.4s, v8.4s, v12.4s \n"
" cmp %10, #0 \n"
" beq 8f \n"
" // fp32 += bias_tm \n"
" ld1 {v12.2s}, [%10] \n"
" zip1 v12.4s, v12.4s, v12.4s\n"
" fadd v8.4s, v8.4s, v12.4s \n"
" 8: \n"
" // fp32 -> int32 \n"
" fcvtas v8.4s, v8.4s \n"
" // int32 -> int16 \n"
" sqxtn v8.4h, v8.4s \n"
" // int16 -> int8 \n"
" sqxtn v8.8b, v8.8h \n"
" // save \n"
" st1 {v8.h}[0], [%2] \n"
" add %2, %2, #2 \n"
" st1 {v8.h}[1], [%3] \n"
" add %3, %3, #2 \n"
" b m2_loopnd2_finish \n"
" 7:"
" st1 {v8.2s}, [%2], #8 \n"
" st1 {v12.2s}, [%3], #8 \n"
" m2_loopnd2_finish: \n"
" mov %0, x8 \n"
: "=r"(pa), // %0
"=r"(pb), // %1
"=r"(pc0), // %2
"=r"(pc1), // %3
"=r"(k8_even),// %4
"=r"(k8), // %5
"=r"(k4), // %6
"=r"(k2), // %7
"=r"(k1), // %8
"=r"(scales), // %9
"=r"(bias) // %10
: "0"(pa),
"1"(pb),
"2"(pc0),
"3"(pc1),
"4"(k8_even),
"5"(k8),
"6"(k4),
"7"(k2),
"8"(k1),
"9"(scales),
"10"(bias)
: "cc", "memory", "x8", "x12", "w17", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31"
);
}
if (n1 > 0) {
asm volatile(
"eor v8.16b, v8.16b, v8.16b \n"
"eor v9.16b, v9.16b, v9.16b \n"
"eor v10.16b, v10.16b, v10.16b \n"
"eor v11.16b, v11.16b, v11.16b \n"
"eor v12.16b, v12.16b, v12.16b \n"
"eor v13.16b, v13.16b, v13.16b \n"
"eor v14.16b, v14.16b, v14.16b \n"
"eor v15.16b, v15.16b, v15.16b \n"
"eor v16.16b, v16.16b, v16.16b \n"
"eor v17.16b, v17.16b, v17.16b \n"
"eor v18.16b, v18.16b, v18.16b \n"
"eor v19.16b, v19.16b, v19.16b \n"
"eor v20.16b, v20.16b, v20.16b \n"
"eor v21.16b, v21.16b, v21.16b \n"
"eor v22.16b, v22.16b, v22.16b \n"
"eor v23.16b, v23.16b, v23.16b \n"
"m2_nd1_start: \n"
" cmp %w5, #0 \n"
" beq 1f // k <=7\n"
" mov w17, %w5\n"
" cmp %w4, #0 \n"
" beq 2f // loop number is even \n"
" // start loopkd8_nd1 \n"
" subs w17, w17, #1 \n"
" ld1 {v4.8b}, [%1], #8 // load four lines of B\n"
" ld1 {v2.8b, v3.8b}, [%0], #16 // load two lines of PanelA\n"
" smull v0.8h, v4.8b, v2.8b \n"
" smull v1.8h, v4.8b, v3.8b \n"
" saddlp v8.4s, v0.8h \n"
" saddlp v12.4s, v1.8h \n"
" cmp w17, #0 \n"
" beq 3f \n"
" 2: \n"
" ld1 {v4.8b, v5.8b}, [%1], #16 \n"
" ld1 {v24.8b, v25.8b, v26.8b, v27.8b}, [%0], #32\n"
" smull v0.8h, v24.8b, v4.8b \n"
" smlal v0.8h, v26.8b, v5.8b \n"
" sadalp v8.4s, v0.8h \n"
" smull v1.8h, v25.8b, v4.8b \n"
" smlal v1.8h, v27.8b, v5.8b \n"
" sadalp v12.4s, v1.8h \n"
" subs w17, w17, #2 \n"
" bne 2b \n"
" 3: \n"
" addp v8.4s, v8.4s, v8.4s \n"
" addp v8.4s, v8.4s, v8.4s \n"
" addp v12.4s, v12.4s, v12.4s\n"
" addp v12.4s, v12.4s, v12.4s\n"
" // start process kd4 kd2 kd1 cases\n"
" 1: \n"
" cmp %w6, #0 \n"
" beq 4f \n"
" // start subkernel_m2n1k2 \n"
" ld1 {v4.8b}, [%1] // load B4x1\n"
" add %1, %1, #4 \n"
" sxtl v4.8h, v4.8b // extend B4x1 to v4\n"
" ld1 {v2.8b}, [%0], #8 // load A2x4 \n"
" sxtl v2.8h, v2.8b \n"
" mov v5.d[0], v2.d[1] \n"
" smull v9.4s, v2.4h, v4.4h \n"
" addp v9.4s, v9.4s, v9.4s \n"
" addp v9.4s, v9.4s, v9.4s \n"
" add v8.4s, v8.4s, v9.4s \n"
" smull v13.4s, v5.4h, v4.4h \n"
" addp v13.4s, v13.4s, v13.4s\n"
" addp v13.4s, v13.4s, v13.4s\n"
" add v12.4s, v12.4s, v13.4s \n"
" 4: \n"
" cmp %w7, 0 \n"
" beq 5f \n"
" // start subkernel_m2n1k2 \n"
" ld1 {v4.8b}, [%0] // load A2x2\n"
" add %0, %0, #4 \n"
" ld1 {v0.8b}, [%1] // load B2x1\n"
" add %1, %1, #2 \n"
" mov v0.h[1], v0.h[0] \n"
" smull v0.8h, v0.8b, v4.8b \n"
" saddlp v0.4s, v0.8h \n"
" mov v9.s[0], v0.s[0] \n"
" add v8.4s, v8.4s, v9.4s \n"
" mov v13.s[0], v0.s[1] \n"
" add v12.4s, v12.4s, v13.4s \n"
" 5: \n"
" cmp %w8, 0 \n"
" beq 6f \n"
" // start subkernel_m2n1k1 \n"
" ld1 {v0.8b}, [%1] // load B1x1\n"
" add %1, %1, #1 \n"
" ld1 {v1.8b}, [%0] // load A2x1\n"
" add %0, %0, #2 \n"
" sxtl v1.8h, v1.8b \n"
" sxtl v0.8h, v0.8b \n"
" smull v0.4s, v1.4h, v0.h[0]\n"
" mov v1.s[0], v0.s[1] \n"
" add v8.4s, v8.4s, v0.4s \n"
" add v12.4s, v12.4s, v1.4s \n"
" 6: \n"
" cmp %w9, #0 \n"
" beq 7f \n"
" mov v8.s[1], v12.s[0] \n"
" // v12: s0 s1 \n"
" ld1 {v12.2s}, [%9] \n"
" // int32 => fp32 \n"
" scvtf v8.2s, v8.2s \n"
" // fp32 *= scale_tm \n"
" fmul v8.2s, v8.2s, v12.2s \n"
" cmp %10, #0 \n"
" beq 8f \n"
" // fp32 += bias_tm \n"
" ld1 {v12.2s}, [%10] \n"
" fadd v8.2s, v8.2s, v12.2s \n"
" 8: \n"
" // fp32 -> int32 \n"
" fcvtas v8.2s, v8.2s \n"
" // int32 -> int16 \n"
" sqxtn v8.4h, v8.4s \n"
" // int16 -> int8 \n"
" sqxtn v8.8b, v8.8h \n"
" // save \n"
" st1 {v8.b}[0], [%2] \n"
" st1 {v8.b}[1], [%3] \n"
" b m2_finish \n"
" 7: \n"
" st1 {v8.s}[0], [%2] \n"
" st1 {v12.s}[0], [%3] \n"
" m2_finish: \n"
" mov x0, #0 \n"
: "=r"(pa), // %0
"=r"(pb), // %1
"=r"(pc0), // %2
"=r"(pc1), // %3
"=r"(k8_even),// %4
"=r"(k8), // %5
"=r"(k4), // %6
"=r"(k2), // %7
"=r"(k1), // %8
"=r"(scales), // %9
"=r"(bias) // %10
: "0"(pa),
"1"(pb),
"2"(pc0),
"3"(pc1),
"4"(k8_even),
"5"(k8),
"6"(k4),
"7"(k2),
"8"(k1),
"9"(scales),
"10"(bias)
: "cc", "memory", "x0", "x8", "w17", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31"
);
}
}
void int8kernel_m4(void* dst, int8_t* sa, int8_t* sb, int, int k, int n, int ldc, float* scales, float* bias) {
void *pc0, *pc1, *pc2, *pc3;
if (scales == nullptr) {
pc0 = (int32_t*)dst;
pc1 = ((int32_t*)pc0) + ldc;
pc2 = ((int32_t*)pc1) + ldc;
pc3 = ((int32_t*)pc2) + ldc;
} else {
pc0 = dst;
pc1 = ((int8_t*)pc0) + ldc;
pc2 = ((int8_t*)pc1) + ldc;
pc3 = ((int8_t*)pc2) + ldc;
}
int8_t *pa = sa;
int8_t *pb = sb;
DECOMPOSE_K
DECOMPOSE_N
if (n4 > 0) {
// fprintf(stdout, "start m4n4 \n");
asm volatile(
"8: \n"
" eor v8.8b, v8.8b, v8.8b \n"
" eor v9.8b, v9.8b, v9.8b \n"
" eor v10.8b, v10.8b, v10.8b \n"
" eor v11.8b, v11.8b, v11.8b \n"
" eor v12.8b, v12.8b, v12.8b \n"
" eor v13.8b, v13.8b, v13.8b \n"
" eor v14.8b, v14.8b, v14.8b \n"
" eor v15.8b, v15.8b, v15.8b \n"
" eor v16.8b, v16.8b, v16.8b \n"
" eor v17.8b, v17.8b, v17.8b \n"
" eor v18.8b, v18.8b, v18.8b \n"
" eor v19.8b, v19.8b, v19.8b \n"
" eor v20.8b, v20.8b, v20.8b \n"
" eor v21.8b, v21.8b, v21.8b \n"
" eor v22.8b, v22.8b, v22.8b \n"
" eor v23.8b, v23.8b, v23.8b \n"
" mov x8, %0 \n"
" cmp %w7, #0 \n"
" beq 1f \n"
" mov w20, %w7 \n"
" cmp %w6, #0 \n"
" beq 2f \n"
" subs w20, w20, #1 \n"
" ld1 {v4.8b, v5.8b, v6.8b, v7.8b}, [%1], #32 \n"
" ld1 {v2.8b, v3.8b}, [%0], #16 \n"
" smull v0.8h, v4.8b, v2.8b \n"
" smull v1.8h, v4.8b, v3.8b \n"
" saddlp v8.4s, v0.8h \n"
" saddlp v12.4s, v1.8h \n"
" smull v0.8h, v5.8b, v2.8b \n"
" smull v1.8h, v5.8b, v3.8b \n"
" saddlp v9.4s, v0.8h \n"
" saddlp v13.4s, v1.8h \n"
" smull v0.8h, v6.8b, v2.8b \n"
" smull v1.8h, v6.8b, v3.8b \n"
" saddlp v10.4s, v0.8h \n"
" saddlp v14.4s, v1.8h \n"
" smull v0.8h, v7.8b, v2.8b \n"
" smull v1.8h, v7.8b, v3.8b \n"
" saddlp v11.4s, v0.8h \n"
" ld1 {v2.8b, v3.8b}, [%0], #16 \n"
" saddlp v15.4s, v1.8h \n"
" smull v0.8h, v4.8b, v2.8b \n"
" smull v1.8h, v4.8b, v3.8b \n"
" saddlp v16.4s, v0.8h \n"
" saddlp v20.4s, v1.8h \n"
" smull v0.8h, v5.8b, v2.8b \n"
" smull v1.8h, v5.8b, v3.8b \n"
" saddlp v17.4s, v0.8h \n"
" saddlp v21.4s, v1.8h \n"
" smull v0.8h, v6.8b, v2.8b \n"
" smull v1.8h, v6.8b, v3.8b \n"
" saddlp v18.4s, v0.8h \n"
" saddlp v22.4s, v1.8h \n"
" smull v0.8h, v7.8b, v2.8b \n"
" smull v1.8h, v7.8b, v3.8b \n"
" saddlp v19.4s, v0.8h \n"
" saddlp v23.4s, v1.8h \n"
" cmp w20, #0 \n"
" beq 3f \n"
" 2: \n"
" add x15, %x1, #32 \n"
" add x14, %x0, #32 \n"
" ld1 {v4.8b, v5.8b}, [%1], #16\n"
" ld1 {v2.8b, v3.8b}, [%0], #16\n"
" smull v0.8h, v4.8b, v2.8b \n"
" ld1 {v6.8b, v7.8b}, [x15], #16 \n"
" smull v1.8h, v5.8b, v2.8b \n"
" ld1 {v24.8b, v25.8b}, [x14], #16\n"
" smlal v0.8h, v6.8b, v24.8b\n"
" smlal v1.8h, v7.8b, v24.8b\n"
" sadalp v8.4s, v0.8h\n"
" sadalp v9.4s, v1.8h\n"
" smull v0.8h, v4.8b, v3.8b \n"
" smull v1.8h, v5.8b, v3.8b \n"
" smlal v0.8h, v6.8b, v25.8b \n"
" smlal v1.8h, v7.8b, v25.8b \n"
" sadalp v12.4s, v0.8h\n"
" sadalp v13.4s, v1.8h\n"
" // finish v8v9 v12v13, start proc v16v17,v20v21\n"
" ld1 {v28.8b, v29.8b}, [%0], #16 \n"
" smull v0.8h, v4.8b, v28.8b \n"
" smull v1.8h, v5.8b, v28.8b \n"
" ld1 {v26.8b, v27.8b}, [x14], #16\n"
" smlal v0.8h, v6.8b, v26.8b \n"
" smlal v1.8h, v7.8b, v26.8b \n"
" sadalp v16.4s, v0.8h \n"
" sadalp v17.4s, v1.8h \n"
" smull v0.8h, v4.8b, v29.8b \n"
" smull v1.8h, v5.8b, v29.8b \n"
" smlal v0.8h, v6.8b, v27.8b \n"
" smlal v1.8h, v7.8b, v27.8b \n"
" sadalp v20.4s, v0.8h \n"
" sadalp v21.4s, v1.8h \n"
" // start v10v11, v14v15, v18v19, v22v23\n"
" ld1 {v4.8b, v5.8b}, [%1], #16 \n"
" smull v0.8h, v4.8b, v2.8b \n"
" smull v1.8h, v5.8b, v2.8b \n"
" ld1 {v6.8b, v7.8b}, [x15], #16 \n"
" smlal v0.8h, v6.8b, v24.8b \n"
" smlal v1.8h, v7.8b, v24.8b \n"
" sadalp v10.4s, v0.8h \n"
" sadalp v11.4s, v1.8h \n"
" smull v0.8h, v4.8b, v3.8b \n"
" smull v1.8h, v5.8b, v3.8b \n"
" smlal v0.8h, v6.8b, v25.8b \n"
" smlal v1.8h, v7.8b, v25.8b \n"
" sadalp v14.4s, v0.8h \n"
" sadalp v15.4s, v1.8h \n"
" smull v0.8h, v4.8b, v28.8b \n"
" smull v1.8h, v5.8b, v28.8b \n"
" smlal v0.8h, v6.8b, v26.8b \n"
" smlal v1.8h, v7.8b, v26.8b \n"
" sadalp v18.4s, v0.8h \n"
" sadalp v19.4s, v1.8h \n"
" smull v0.8h, v4.8b, v29.8b \n"
" smull v1.8h, v5.8b, v29.8b \n"
" smlal v0.8h, v6.8b, v27.8b \n"
" smlal v1.8h, v7.8b, v27.8b \n"
" sadalp v22.4s, v0.8h \n"
" sadalp v23.4s, v1.8h \n"
" add %0, %0, #32 \n"
" add %1, %1, #32 \n"
" subs w20, w20, #2 \n"
" bne 2b \n"
// start nd2
" 3: \n"
" addp v8.4s, v8.4s, v9.4s \n"
" addp v10.4s, v10.4s, v11.4s\n"
" addp v12.4s, v12.4s, v13.4s\n"
" addp v14.4s, v14.4s, v15.4s\n"
" addp v16.4s, v16.4s, v17.4s\n"
" addp v18.4s, v18.4s, v19.4s\n"
" addp v20.4s, v20.4s, v21.4s\n"
" addp v22.4s, v22.4s, v23.4s\n"
" addp v8.4s, v8.4s, v10.4s \n"
" addp v9.4s, v12.4s, v14.4s \n"
" addp v10.4s, v16.4s, v18.4s\n"
" addp v11.4s, v20.4s, v22.4s\n"
" // start process kd4 kd2 kd1 cases\n"
" 1: \n"
" cmp %w8, #0 \n"
" beq 4f \n"
" // start subkernel_m4n4k4\n"
" ld1 {v4.8b, v5.8b}, [%1], #16 // load B4x4\n"
" sxtl v4.8h, v4.8b \n"
" mov v6.d[0], v4.d[1] \n"
" sxtl v5.8h, v5.8b \n"
" mov v7.d[0], v5.d[1] \n"
" ld1 {v2.8b}, [%0], #8 // load A2x4\n"
" sxtl v2.8h, v2.8b \n"
" mov v3.d[0], v2.d[1] \n"
" smull v12.4s, v2.4h, v4.4h \n"
" smull v13.4s, v2.4h, v6.4h \n"
" smull v14.4s, v2.4h, v5.4h \n"
" addp v12.4s, v12.4s, v13.4s\n"
" smull v15.4s, v2.4h, v7.4h \n"
" addp v14.4s, v14.4s, v15.4s\n"
" addp v12.4s, v12.4s, v14.4s\n"
" smull v16.4s, v3.4h, v4.4h \n"
" add v8.4s, v8.4s, v12.4s \n"
" smull v17.4s, v3.4h, v6.4h \n"
" smull v18.4s, v3.4h, v5.4h \n"
" addp v16.4s, v16.4s, v17.4s\n"
" smull v19.4s, v3.4h, v7.4h \n"
" addp v18.4s, v18.4s, v19.4s\n"
" addp v16.4s, v16.4s, v18.4s\n"
" add v9.4s, v9.4s, v16.4s \n"
" ld1 {v2.8b}, [%0], #8 // load next A2x4\n"
" sxtl v2.8h, v2.8b \n"
" mov v3.d[0], v2.d[1] \n"
" smull v12.4s, v2.4h, v4.4h \n"
" smull v13.4s, v2.4h, v6.4h \n"
" smull v14.4s, v2.4h, v5.4h \n"
" addp v12.4s, v12.4s, v13.4s\n"
" smull v15.4s, v2.4h, v7.4h \n"
" addp v14.4s, v14.4s, v15.4s\n"
" addp v12.4s, v12.4s, v14.4s\n"
" smull v16.4s, v3.4h, v4.4h \n"
" add v10.4s, v10.4s, v12.4s \n"
" smull v17.4s, v3.4h, v6.4h \n"
" smull v18.4s, v3.4h, v5.4h \n"
" addp v16.4s, v16.4s, v17.4s\n"
" smull v19.4s, v3.4h, v7.4h \n"
" addp v18.4s, v18.4s, v19.4s\n"
" addp v16.4s, v16.4s, v18.4s\n"
" add v11.4s, v11.4s, v16.4s \n"
" 4: \n"
" cmp %w9, #0 \n"
" beq 5f \n"
" // start subkernel_m4n4k2 \n"
" ld1 {v0.8b}, [%1], #8 // load B2x4 \n"
" // 00 11 22 33 \n"
" rev32 v1.4h, v0.4h // 11 00 33 22 \n"
" rev64 v2.2s, v0.2s // 22 33 00 11 \n"
" ld1 {v4.8b}, [%0], #8 // load A4x2 \n"
" rev64 v3.4h, v0.4h // 33 22 11 00 \n"
" smull v12.8h, v4.8b, v0.8b \n"
" smull v13.8h, v4.8b, v1.8b \n"
" saddlp v12.4s, v12.8h \n"
" smull v14.8h, v4.8b, v2.8b \n"
" saddlp v13.4s, v13.8h \n"
" smull v15.8h, v4.8b, v3.8b \n"
" saddlp v14.4s, v14.8h \n"
" saddlp v15.4s, v15.8h \n"
" mov v16.s[0], v12.s[0] \n"
" mov v16.s[1], v13.s[0] \n"
" mov v16.s[2], v14.s[0] \n"
" mov v16.s[3], v15.s[0] \n"
" mov v17.s[0], v13.s[1] \n"
" mov v17.s[1], v12.s[1] \n"
" mov v17.s[2], v15.s[1] \n"
" mov v17.s[3], v14.s[1] \n"
" mov v18.s[0], v14.s[2] \n"
" mov v18.s[1], v15.s[2] \n"
" mov v18.s[2], v12.s[2] \n"
" mov v18.s[3], v13.s[2] \n"
" mov v19.s[0], v15.s[3] \n"
" mov v19.s[1], v14.s[3] \n"
" mov v19.s[2], v13.s[3] \n"
" mov v19.s[3], v12.s[3] \n"
" add v8.4s, v8.4s, v16.4s \n"
" add v9.4s, v9.4s, v17.4s \n"
" add v10.4s, v10.4s, v18.4s \n"
" add v11.4s, v11.4s, v19.4s \n"
" 5: \n"
" cmp %w10, #0 \n"
" beq 6f \n"
" // start subkernel_m4n4k1\n"
" ld1 {v4.8b}, [%1] // load B1x4\n"
" add %1, %1, #4 \n"
" ld1 {v2.8b}, [%0] // load A4x1\n"
" add %0, %0, #4 \n"
" sxtl v4.8h, v4.8b \n"
" sxtl v2.8h, v2.8b \n"
" smlal v8.4s, v4.4h, v2.h[0] \n"
" smlal v9.4s, v4.4h, v2.h[1] \n"
" smlal v10.4s, v4.4h, v2.h[2] \n"
" smlal v11.4s, v4.4h, v2.h[3] \n"
" 6: \n"
" cmp %12, #0 \n"
" beq 9f \n"
" ld1 {v12.4s}, [%12] \n"
" // int32 => fp32 \n"
" scvtf v8.4s, v8.4s \n"
" scvtf v9.4s, v9.4s \n"
" scvtf v10.4s, v10.4s \n"
" scvtf v11.4s, v11.4s \n"
" // fp32 *= scale_tm \n"
" fmul v8.4s, v8.4s, v12.s[0] \n"
" fmul v9.4s, v9.4s, v12.s[1] \n"
" fmul v10.4s, v10.4s, v12.s[2] \n"
" fmul v11.4s, v11.4s, v12.s[3] \n"
" cmp %13, #0 \n"
" beq 7f \n"
" ld1 {v14.4s}, [%13] \n"
" dup v15.4s, v14.s[0] \n"
" fadd v8.4s, v8.4s, v15.4s \n"
" dup v15.4s, v14.s[1] \n"
" fadd v9.4s, v9.4s, v15.4s \n"
" dup v15.4s, v14.s[2] \n"
" fadd v10.4s, v10.4s, v15.4s\n"
" dup v15.4s, v14.s[3] \n"
" fadd v11.4s, v11.4s, v15.4s\n"
" 7: \n"
" // fp32 -> int32 \n"
" fcvtas v8.4s, v8.4s \n"
" fcvtas v9.4s, v9.4s \n"
" fcvtas v10.4s, v10.4s \n"
" fcvtas v11.4s, v11.4s \n"
" // int32 -> int16 \n"
" sqxtn v6.4h, v8.4s \n"
" sqxtn2 v6.8h, v9.4s \n"
" sqxtn v7.4h, v10.4s \n"
" sqxtn2 v7.8h, v11.4s \n"
" // int16 -> int8 \n"
" sqxtn v8.8b, v6.8h \n"
" sqxtn v9.8b, v7.8h \n"
" // save \n"
" st1 {v8.s}[0], [%2] \n"
" add %x2, %x2, #4 \n"
" st1 {v8.s}[1], [%3] \n"
" add %x3, %x3, #4 \n"
" st1 {v9.s}[0], [%4] \n"
" add %x4, %x4, #4 \n"
" st1 {v9.s}[1], [%5] \n"
" add %x5, %x5, #4 \n"
" b m4_loopnd4_finish \n"
" 9: \n"
" st1 {v8.4s}, [%x2], #16 \n"
" st1 {v9.4s}, [%x3], #16 \n"
" st1 {v10.4s}, [%x4], #16 \n"
" st1 {v11.4s}, [%x5], #16 \n"
" m4_loopnd4_finish: \n"
" subs %x11, %x11, #1 \n"
" mov %x0, x8 \n"
" bne 8b \n"
: "=r"(pa), // %0
"=r"(pb), // %1
"=r"(pc0), // %2
"=r"(pc1), // %3
"=r"(pc2), // %4
"=r"(pc3), // %5
"=r"(k8_even),// %6
"=r"(k8), // %7
"=r"(k4), // %8
"=r"(k2), // %9
"=r"(k1), // %10
"=r"(n4), // %11
"=r"(scales), // %12
"=r"(bias) // %13
: "0"(pa),
"1"(pb),
"2"(pc0),
"3"(pc1),
"4"(pc2),
"5"(pc3),
"6"(k8_even),
"7"(k8),
"8"(k4),
"9"(k2),
"10"(k1),
"11"(n4),
"12"(scales),
"13"(bias)
: "cc", "memory", "x8", "w20", "x14", "x15", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31"
);
}
if (n2 > 0) {
// fprintf(stdout, "start m4n2 \n");
asm volatile(
" eor v8.8b, v8.8b, v8.8b \n"
" eor v9.8b, v9.8b, v9.8b \n"
" eor v10.8b, v10.8b, v10.8b \n"
" eor v11.8b, v11.8b, v11.8b \n"
" eor v12.8b, v12.8b, v12.8b \n"
" eor v13.8b, v13.8b, v13.8b \n"
" eor v14.8b, v14.8b, v14.8b \n"
" eor v15.8b, v15.8b, v15.8b \n"
" eor v16.8b, v16.8b, v16.8b \n"
" eor v17.8b, v17.8b, v17.8b \n"
" eor v18.8b, v18.8b, v18.8b \n"
" eor v19.8b, v19.8b, v19.8b \n"
" eor v20.8b, v20.8b, v20.8b \n"
" eor v21.8b, v21.8b, v21.8b \n"
" eor v22.8b, v22.8b, v22.8b \n"
" eor v23.8b, v23.8b, v23.8b \n"
"m4_nd2_start: \n"
" mov x8, %x0 // PanelA \n"
" cmp %w7, #0 \n"
" beq 1f // k <= 7 \n"
" mov w20, %w7 \n"
" cmp %w6, #0 \n"
" beq 2f// loop number is even \n"
" // start loopkd8_nd2 \n"
" subs w20, w20, #1 \n"
" ld1 {v4.8b, v5.8b}, [%1], #16 // load two lines of B\n"
" ld1 {v2.8b, v3.8b}, [%0], #16 // load two lines of PanelA\n"
" smull v0.8h, v4.8b, v2.8b \n"
" smull v1.8h, v4.8b, v3.8b \n"
" saddlp v8.4s, v0.8h \n"
" saddlp v12.4s, v1.8h \n"
" smull v0.8h, v5.8b, v2.8b \n"
" smull v1.8h, v5.8b, v3.8b \n"
" saddlp v9.4s, v0.8h \n"
" saddlp v13.4s, v1.8h \n"
" ld1 {v2.8b, v3.8b}, [%0], #16 \n"
" smull v0.8h, v4.8b, v2.8b \n"
" smull v1.8h, v4.8b, v3.8b \n"
" saddlp v16.4s, v0.8h \n"
" saddlp v20.4s, v1.8h \n"
" smull v0.8h, v5.8b, v2.8b \n"
" smull v1.8h, v5.8b, v3.8b \n"
" saddlp v17.4s, v0.8h \n"
" saddlp v21.4s, v1.8h \n"
" cmp w20, #0 \n"
" beq 3f \n"
" 2: \n"
" add x15, %1, #16 \n"
" add x14, %0, #32 \n"
" ld1 {v4.8b, v5.8b}, [%1], #16 \n"
" ld1 {v2.8b, v3.8b}, [%0], #16 \n"
" smull v0.8h, v4.8b, v2.8b \n"
" ld1 {v6.8b, v7.8b}, [x15], #16 \n"
" smull v1.8h, v5.8b, v2.8b \n"
" ld1 {v24.8b, v25.8b}, [x14], #16 \n"
" smlal v0.8h, v6.8b, v24.8b \n"
" smlal v1.8h, v7.8b, v24.8b \n"
" sadalp v8.4s, v0.8h \n"
" sadalp v9.4s, v1.8h \n"
" smull v0.8h, v4.8b, v3.8b \n"
" smull v1.8h, v5.8b, v3.8b \n"
" smlal v0.8h, v6.8b, v25.8b \n"
" smlal v1.8h, v7.8b, v25.8b \n"
" sadalp v12.4s, v0.8h \n"
" sadalp v13.4s, v1.8h \n"
" // finish v8v9 v12v13, start proc v16v17,v20v21\n"
" ld1 {v28.8b, v29.8b}, [%0], #16\n"
" smull v0.8h, v4.8b, v28.8b\n"
" smull v1.8h, v5.8b, v28.8b\n"
" ld1 {v26.8b, v27.8b}, [x14], #16\n"
" smlal v0.8h, v6.8b, v26.8b\n"
" smlal v1.8h, v7.8b, v26.8b\n"
" sadalp v16.4s, v0.8h\n"
" sadalp v17.4s, v1.8h\n"
" smull v0.8h, v4.8b, v29.8b\n"
" smull v1.8h, v5.8b, v29.8b\n"
" smlal v0.8h, v6.8b, v27.8b\n"
" smlal v1.8h, v7.8b, v27.8b\n"
" sadalp v20.4s, v0.8h\n"
" sadalp v21.4s, v1.8h\n"
" add %0, %0, #32 \n"
" add %1, %1, #16 \n"
" subs w20, w20, #2 \n"
" bne 2b \n"
" 3: \n"
" addp v8.4s, v8.4s, v9.4s \n"
" addp v12.4s, v12.4s, v13.4s\n"
" addp v16.4s, v16.4s, v17.4s\n"
" addp v20.4s, v20.4s, v21.4s\n"
" addp v8.4s, v8.4s, v8.4s \n"
" addp v12.4s, v12.4s, v12.4s\n"
" addp v16.4s, v16.4s, v16.4s\n"
" addp v20.4s, v20.4s, v20.4s\n"
" // start process kd4 kd2 kd1 cases\n"
" 1: \n"
" cmp %w8, 0 \n"
" beq 4f \n"
" // start subkernel_m4n2k4 \n"
" ld1 {v4.8b}, [%1], #8 // load B4x2\n"
" sxtl v4.8h, v4.8b \n"
" mov v6.d[0], v4.d[1] \n"
" ld1 {v2.8b}, [%0], #8 // load first A2x4\n"
" sxtl v2.8h, v2.8b \n"
" mov v3.d[0], v2.d[1] \n"
" smull v9.4s, v2.4h, v4.4h \n"
" smull v10.4s, v2.4h, v6.4h \n"
" addp v9.4s, v9.4s, v10.4s \n"
" addp v9.4s, v9.4s, v9.4s \n"
" add v8.4s, v8.4s, v9.4s \n"
" smull v13.4s, v3.4h, v4.4h \n"
" smull v14.4s, v3.4h, v6.4h \n"
" addp v13.4s, v13.4s, v14.4s\n"
" addp v13.4s, v13.4s, v13.4s\n"
" add v12.4s, v12.4s, v13.4s \n"
" ld1 {v2.8b}, [%0], #8 // load next A2x4\n"
" sxtl v2.8h, v2.8b \n"
" mov v3.d[0], v2.d[1] \n"
" smull v17.4s, v2.4h, v4.4h \n"
" smull v18.4s, v2.4h, v6.4h \n"
" addp v17.4s, v17.4s, v18.4s\n"
" addp v17.4s, v17.4s, v17.4s\n"
" add v16.4s, v16.4s, v17.4s \n"
" smull v21.4s, v3.4h, v4.4h \n"
" smull v22.4s, v3.4h, v6.4h \n"
" addp v21.4s, v21.4s, v22.4s\n"
" addp v21.4s, v21.4s, v21.4s\n"
" add v20.4s, v20.4s, v21.4s \n"
" 4: \n"
" cmp %w9, 0 \n"
" beq 5f \n"
" // start subkernel_m4n2k2 \n"
" ld1 {v4.8b}, [%0], #8 //load A4x2\n"
" ld1 {v0.8b}, [%1] // load B2x2 \n"
" add %1, %1, #4 \n"
" // 00 11 22 33 \n"
" rev32 v1.4h, v0.4h // 11 00 33 22 \n"
" rev64 v2.2s, v0.2s // 22 33 00 11 \n"
" rev64 v3.4h, v0.4h // 33 22 11 00 \n"
" smull v21.8h, v4.8b, v0.8b \n"
" smull v22.8h, v4.8b, v1.8b \n"
" smull v23.8h, v4.8b, v2.8b \n"
" smull v24.8h, v4.8b, v3.8b \n"
" saddlp v21.4s, v21.8h \n"
" saddlp v22.4s, v22.8h \n"
" saddlp v23.4s, v23.8h \n"
" saddlp v24.4s, v24.8h \n"
" mov v9.s[0], v21.s[0] \n"
" mov v9.s[1], v22.s[0] \n"
" add v8.4s, v8.4s, v9.4s\n"
" mov v13.s[0], v22.s[1] \n"
" mov v13.s[1], v21.s[1] \n"
" add v12.4s, v12.4s, v13.4s \n"
" mov v17.s[0], v23.s[2] \n"
" mov v17.s[1], v24.s[2] \n"
" add v16.4s, v16.4s, v17.4s \n"
" mov v21.s[0], v24.s[3] \n"
" mov v21.s[1], v23.s[3] \n"
" add v20.4s, v20.4s, v21.4s \n"
" 5: \n"
" cmp %w10, 0 \n"
" beq 6f \n"
" // start subkernel_m4n2k1\n"
" ld1 {v4.8b}, [%1] // load B1x2\n"
" add %1, %1, #2 \n"
" ld1 {v2.8b}, [%0] // load A4x1\n"
" add %0, %0, #4 \n"
" sxtl v4.8h, v4.8b \n"
" sxtl v2.8h, v2.8b \n"
" smlal v8.4s, v4.4h, v2.h[0] \n"
" smlal v12.4s, v4.4h, v2.h[1] \n"
" smlal v16.4s, v4.4h, v2.h[2] \n"
" smlal v20.4s, v4.4h, v2.h[3] \n"
" 6: \n"
" cmp %11, #0 \n"
" beq 7f \n"
" mov v8.d[1], v12.d[0] \n"
" mov v16.d[1], v20.d[0] \n"
" // v12: 0 1 2 3 \n"
" ld1 {v12.4s}, [%11] \n"
" zip2 v13.4s, v12.4s, v12.4s \n"
" zip1 v12.4s, v12.4s, v12.4s \n"
" // v12: 0 0 1 1 \n"
" // v13: 2 2 3 3 \n"
" // int32 => fp32 \n"
" scvtf v8.4s, v8.4s \n"
" scvtf v16.4s, v16.4s \n"
" // fp32 *= scale_tm \n"
" fmul v8.4s, v8.4s, v12.4s \n"
" fmul v16.4s, v16.4s, v13.4s\n"
" cmp %12, #0 \n"
" beq 8f // skip add scales \n"
" // fp32 += scales_tm \n"
" ld1 {v12.4s}, [%12] \n"
" zip2 v13.4s, v12.4s, v12.4s\n"
" zip1 v12.4s, v12.4s, v12.4s\n"
" fadd v8.4s, v8.4s, v12.4s \n"
" fadd v16.4s, v16.4s, v13.4s\n"
" 8: \n"
" // fp32 -> int32 \n"
" fcvtas v8.4s, v8.4s \n"
" fcvtas v16.4s, v16.4s \n"
" // int32 -> int16 \n"
" sqxtn v8.4h, v8.4s \n"
" sqxtn v16.4h, v16.4s \n"
" // int16 -> int8 \n"
" sqxtn v8.8b, v8.8h \n"
" sqxtn v16.8b, v16.8h \n"
" // save \n"
" st1 {v8.h}[0], [%2] \n"
" add %2, %2, #2 \n"
" st1 {v8.h}[1], [%3] \n"
" add %3, %3, #2 \n"
" st1 {v16.h}[0], [%4] \n"
" add %4, %4, #2 \n"
" st1 {v16.h}[1], [%5] \n"
" add %5, %5, #2 \n"
" b m4_loopnd2_finish \n"
" 7: \n"
" st1 {v8.2s}, [%2], #8 \n"
" st1 {v12.2s}, [%3], #8 \n"
" st1 {v16.2s}, [%4], #8 \n"
" st1 {v20.2s}, [%5], #8 \n"
" m4_loopnd2_finish: \n"
" mov %0, x8 \n"
: "=r"(pa), // %0
"=r"(pb), // %1
"=r"(pc0), // %2
"=r"(pc1), // %3
"=r"(pc2), // %4
"=r"(pc3), // %5
"=r"(k8_even),// %6
"=r"(k8), // %7
"=r"(k4), // %8
"=r"(k2), // %9
"=r"(k1), // %10
"=r"(scales), // %11
"=r"(bias) // %12
: "0"(pa),
"1"(pb),
"2"(pc0),
"3"(pc1),
"4"(pc2),
"5"(pc3),
"6"(k8_even),
"7"(k8),
"8"(k4),
"9"(k2),
"10"(k1),
"11"(scales),
"12"(bias)
: "cc", "memory", "x8", "w20", "x14", "x15", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31"
);
}
if (n1 > 0) {
// fprintf(stdout, "start m4n1 \n");
asm volatile(
" eor v8.8b, v8.8b, v8.8b \n"
" eor v9.8b, v9.8b, v9.8b \n"
" eor v10.8b, v10.8b, v10.8b \n"
" eor v11.8b, v11.8b, v11.8b \n"
" eor v12.8b, v12.8b, v12.8b \n"
" eor v13.8b, v13.8b, v13.8b \n"
" eor v14.8b, v14.8b, v14.8b \n"
" eor v15.8b, v15.8b, v15.8b \n"
" eor v16.8b, v16.8b, v16.8b \n"
" eor v17.8b, v17.8b, v17.8b \n"
" eor v18.8b, v18.8b, v18.8b \n"
" eor v19.8b, v19.8b, v19.8b \n"
" eor v20.8b, v20.8b, v20.8b \n"
" eor v21.8b, v21.8b, v21.8b \n"
" eor v22.8b, v22.8b, v22.8b \n"
" eor v23.8b, v23.8b, v23.8b \n"
"m4_n1_start: \n"
" cmp %w7, #0 \n"
" beq 10f \n"
" mov w20, %w7 \n"
" cmp %w6, #0 \n"
" beq 11f// loop number is even \n"
" // start loopkd8_nd1 \n"
" subs w20, w20, #1 \n"
" ld1 {v4.8b}, [%1], #8 // load four lines of B\n"
" ld1 {v2.8b, v3.8b}, [%0], #16 // load two lines of PanelA\n"
" smull v0.8h, v4.8b, v2.8b \n"
" smull v1.8h, v4.8b, v3.8b \n"
" saddlp v8.4s, v0.8h \n"
" saddlp v12.4s, v1.8h \n"
" ld1 {v2.8b, v3.8b}, [%0], #16 \n"
" smull v0.8h, v4.8b, v2.8b \n"
" smull v1.8h, v4.8b, v3.8b \n"
" saddlp v16.4s, v0.8h \n"
" saddlp v20.4s, v1.8h \n"
" cmp w20, #0 \n"
" beq 12f \n"
" 11: \n"
" ld1 {v4.8b, v5.8b}, [%1], #16 \n"
" ld1 {v24.8b, v25.8b, v26.8b, v27.8b}, [%0], #32\n"
" ld1 {v28.8b, v29.8b, v30.8b, v31.8b}, [%0], #32\n"
" smull v0.8h, v24.8b, v4.8b \n"
" smlal v0.8h, v28.8b, v5.8b \n"
" sadalp v8.4s, v0.8h \n"
" smull v1.8h, v25.8b, v4.8b \n"
" smlal v1.8h, v29.8b, v5.8b \n"
" sadalp v12.4s, v1.8h \n"
" smull v0.8h, v26.8b, v4.8b \n"
" smlal v0.8h, v30.8b, v5.8b \n"
" sadalp v16.4s, v0.8h \n"
" smull v1.8h, v27.8b, v4.8b \n"
" smlal v1.8h, v31.8b, v5.8b \n"
" sadalp v20.4s, v1.8h \n"
" subs w20, w20, #2 \n"
" bne 11b \n"
" 12: \n"
" addp v8.4s, v8.4s, v8.4s \n"
" addp v8.4s, v8.4s, v8.4s \n"
" addp v12.4s, v12.4s, v12.4s\n"
" addp v12.4s, v12.4s, v12.4s\n"
" addp v16.4s, v16.4s, v16.4s\n"
" addp v16.4s, v16.4s, v16.4s\n"
" addp v20.4s, v20.4s, v20.4s\n"
" addp v20.4s, v20.4s, v20.4s\n"
" // start process kd4 kd2 kd1 cases\n"
" 10: \n"
" cmp %w8, #0 \n"
" beq 13f \n"
" // start subkernel_m4n1k2 \n"
" ld1 {v4.8b}, [%1] // load B4x1\n"
" add %x1, %x1, #4 \n"
" sxtl v4.8h, v4.8b // extend B4x1 to v4 \n"
" ld1 {v2.8b, v3.8b}, [%0], #16 // load A4x4\n"
" sxtl v2.8h, v2.8b \n"
" mov v5.d[0], v2.d[1] \n"
" sxtl v3.8h, v3.8b \n"
" mov v6.d[0], v3.d[1] // extend A4x4 to v2,v5,v3,v6\n"
" smull v9.4s, v2.4h, v4.4h \n"
" addp v9.4s, v9.4s, v9.4s \n"
" addp v9.4s, v9.4s, v9.4s \n"
" add v8.4s, v8.4s, v9.4s \n"
" smull v13.4s, v5.4h, v4.4h \n"
" addp v13.4s, v13.4s, v13.4s\n"
" addp v13.4s, v13.4s, v13.4s\n"
" add v12.4s, v12.4s, v13.4s \n"
" smull v17.4s, v3.4h, v4.4h \n"
" addp v17.4s, v17.4s, v17.4s\n"
" addp v17.4s, v17.4s, v17.4s\n"
" add v16.4s, v16.4s, v17.4s \n"
" smull v21.4s, v6.4h, v4.4h \n"
" addp v21.4s, v21.4s, v21.4s\n"
" addp v21.4s, v21.4s, v21.4s\n"
" add v20.4s, v20.4s, v21.4s \n"
" 13: \n"
" cmp %w9, #0 \n"
" beq 14f \n"
" // start subkernel_m4n1k2 \n"
" ld1 {v4.8b}, [%0], #8 // load A4x2 \n"
" ld1 {v0.8b}, [%1] // load B2x1 \n"
" add %1, %1, #2 \n"
" mov v0.h[1], v0.h[0] \n"
" mov v0.s[1], v0.s[0] \n"
" smull v0.8h, v0.8b, v4.8b \n"
" saddlp v0.4s, v0.8h \n"
" mov v9.s[0], v0.s[0] \n"
" add v8.4s, v8.4s, v9.4s \n"
" mov v13.s[0], v0.s[1] \n"
" add v12.4s, v12.4s, v13.4s \n"
" mov v17.s[0], v0.s[2] \n"
" add v16.4s, v16.4s, v17.4s \n"
" mov v21.s[0], v0.s[3] \n"
" add v20.4s, v20.4s, v21.4s \n"
" 14: \n"
" cmp %w10, #0 \n"
" beq 15f \n"
" // start subkernel_m4n1k1 \n"
" ld1 {v4.8b}, [%1] // load B1x1\n"
" add %1, %1, #1 \n"
" ld1 {v2.8b}, [%0] // load A4x1\n"
" add %0, %0, #4 \n"
" sxtl v4.8h, v4.8b \n"
" sxtl v2.8h, v2.8b \n"
" smull v0.4s, v2.4h, v4.h[0]\n"
" add v8.4s, v8.4s, v0.4s \n"
" mov v13.s[0], v0.s[1] \n"
" add v12.4s, v12.4s, v13.4s \n"
" mov v17.s[0], v0.s[2] \n"
" add v16.4s, v16.4s, v17.4s \n"
" mov v21.s[0], v0.s[3] \n"
" add v20.4s, v20.4s, v21.4s \n"
" 15: \n"
// REQUANT
" cmp %11, #0 \n"
" beq 16f \n"
" mov v8.s[1], v12.s[0] \n"
" mov v8.s[2], v16.s[0] \n"
" mov v8.s[3], v20.s[0] \n"
" // v12: s0 s1 s2 s3 \n"
" ld1 {v12.4s}, [%11] \n"
" // int32 => fp32 \n"
" scvtf v8.4s, v8.4s \n"
" // fp32 *= scale_tm \n"
" fmul v8.4s, v8.4s, v12.4s \n"
" cmp %12, #0 \n"
" beq 17f \n"
" // fp32 += bias_tm \n"
" ld1 {v12.4s}, [%12] \n"
" fadd v8.4s, v8.4s, v12.4s \n"
" 17: \n"
" // fp32 -> int32 \n"
" fcvtas v8.4s, v8.4s \n"
" // int32 -> int16 \n"
" sqxtn v8.4h, v8.4s \n"
" // int16 -> int8 \n"
" sqxtn v8.8b, v8.8h \n"
" // save \n"
" st1 {v8.b}[0], [%2] \n"
" st1 {v8.b}[1], [%3] \n"
" st1 {v8.b}[2], [%4] \n"
" st1 {v8.b}[3], [%5] \n"
" b m4_finish \n"
" // no need to add the last output pointer\n"
" 16: \n"
" st1 {v8.s}[0], [%2] \n"
" st1 {v12.s}[0], [%3] \n"
" st1 {v16.s}[0], [%4] \n"
" st1 {v20.s}[0], [%5] \n"
" m4_finish: \n"
" mov x0, #0 \n"
: "=r"(pa), // %0
"=r"(pb), // %1
"=r"(pc0), // %2
"=r"(pc1), // %3
"=r"(pc2), // %4
"=r"(pc3), // %5
"=r"(k8_even),// %6
"=r"(k8), // %7
"=r"(k4), // %8
"=r"(k2), // %9
"=r"(k1), // %10
"=r"(scales), // %11
"=r"(bias) // %12
: "0"(pa),
"1"(pb),
"2"(pc0),
"3"(pc1),
"4"(pc2),
"5"(pc3),
"6"(k8_even),
"7"(k8),
"8"(k4),
"9"(k2),
"10"(k1),
"11"(scales),
"12"(bias)
: "cc", "memory", "x0", "x8", "w20", "x14", "x15", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31"
);
}
}
#undef DECOMPOSE_K
#undef DECOMPOSE_N
void int8kernel(void* dst, const int8_t* sa, const int8_t* sb, int m, int k, int n, int ldc, float* scales, float* bias, const Option& opt) {
int8_t* pa = (int8_t*)sa;
int8_t* pb = (int8_t*)sb;
const int nn = (m >> 2) << 2;
if (scales == nullptr) {
int32_t* pc = (int32_t*)dst;
#pragma omp parallel for num_threads(opt.num_threads)
for (int i = 0; i < nn; i += 4) {
int8kernel_m4((void*)(pc + i * n), pa + i * k, pb, m, k, n, ldc, nullptr, nullptr);
}
pa += nn * k;
pb += nn * n;
switch(m-nn)
{
case 3:
int8kernel_m2((void*)pc, pa, pb, m, k, n, ldc, nullptr, nullptr);
pc += 2 * n;
pa += 2 * k;
int8kernel_m1((void*)pc, pa, pb, m, k, n, ldc, nullptr, nullptr);
break;
case 2:
int8kernel_m2((void*)pc, pa, pb, m, k, n, ldc, nullptr, nullptr);
break;
case 1:
int8kernel_m1((void*)pc, pa, pb, m, k, n, ldc, nullptr, nullptr);
break;
case 0:
default:
break;
}
} else {
int8_t* pc = (int8_t*)dst;
#pragma omp parallel for num_threads(opt.num_threads)
for (int i = 0; i < nn; i += 4) {
int8kernel_m4((void*)(pc + i * n), pa + i * k, pb, m, k, n, ldc, scales + i, (bias==nullptr)? nullptr: bias+i);
}
pa += nn * k;
pb += nn * n;
scales += nn;
bias = (bias == nullptr)? nullptr: bias + nn;
switch(m-nn)
{
case 3:
int8kernel_m2((void*)pc, pa, pb, m, k, n, ldc, scales, bias);
pc += 2 * n;
pa += 2 * k;
scales += 2;
bias = (bias == nullptr)? nullptr: bias + 2;
int8kernel_m1((void*)pc, pa, pb, m, k, n, ldc, scales, bias);
break;
case 2:
int8kernel_m2((void*)pc, pa, pb, m, k, n, ldc, scales, bias);
break;
case 1:
int8kernel_m1((void*)pc, pa, pb, m, k, n, ldc, scales, bias);
break;
case 0:
default:
break;
}
}
return;
}
#endif
|
LayerNormOps.h | #pragma once
#include <string.h>
#include <cmath>
#include <algorithm>
#include "General.h"
#include "TensorRef.h"
#include "Vector-inl.h"
OPS_API int TS_AddLayerNorm(
TensorRef* out_,
TensorRef* in1_,
TensorRef* in2_,
TensorRef* gamma_,
TensorRef* beta_,
float eps,
int rows,
int cols);
OPS_API int TS_AddLayerNormGrad(
TensorRef * gradX1_,
TensorRef * gradX2_,
TensorRef * gradGamma_,
TensorRef * gradBeta_,
TensorRef * adj_,
TensorRef * y_,
TensorRef * x1_,
TensorRef * x2_,
TensorRef * gamma_,
TensorRef * beta_,
int rows,
int cols,
float eps);
template<typename T>
void AddLayerNorm(TensorRef* out_,
TensorRef* in1_,
TensorRef* in2_,
TensorRef* gamma_,
TensorRef* beta_,
float eps,
int rows,
int cols) {
T * out = (T*)out_->buffer;
T * in1 = (T*)in1_->buffer;
T * in2 = (T*)in2_->buffer;
T * alpha = (T*)gamma_->buffer;
T * beta = beta_ ? (T*)beta_->buffer : nullptr;
#pragma omp parallel for
for (int j = 0; j < rows; ++j) {
T * so = out + j * cols;
const T * sp1 = in1 + j * cols;
const T * sp2 = in2 + j * cols;
T sum = 0.f;
#pragma omp simd reduction(+ : sum)
for (int i = 0; i < cols; ++i) {
sum += (sp1[i] + sp2[i]);
}
T mean = sum / cols;
T sqSum = 0.f;
#pragma omp simd reduction(+ : sqSum)
for (int i = 0; i < cols; ++i) {
T ex = (sp1[i] + sp2[i]) - mean;
sqSum += ex * ex;
}
T sigma = std::sqrt(eps + sqSum / cols);
#pragma omp simd
for (int i = 0; i < cols; ++i) {
T t = alpha[i] * (((sp1[i] + sp2[i]) - mean) / sigma);
if (beta != nullptr) {
t += beta[i];
}
so[i] = t;
}
}
}
template<typename T>
void AddLayerNormGrad(
TensorRef * gradX1_,
TensorRef * gradX2_,
TensorRef * gradGamma_,
TensorRef * gradBeta_,
TensorRef * adj_,
TensorRef * y_,
TensorRef * x1_,
TensorRef * x2_,
TensorRef * gamma_,
TensorRef * beta_,
int rows,
int cols,
float eps) {
T * gradX1 = (T*)gradX1_->buffer;
T * gradX2 = (T*)gradX2_->buffer;
T * gradGamma = (T*)gradGamma_->buffer;
T * gradBeta = gradBeta_ ? (T*)gradBeta_->buffer : nullptr;
T * adj = (T*)adj_->buffer;
T * y = (T*)y_->buffer;
T * x1 = (T*)x1_->buffer;
T * x2 = (T*)x2_->buffer;
T * gamma = (T*)gamma_->buffer;
T * beta = beta_ ? (T*)beta_->buffer : nullptr;
if (beta) {
#pragma omp parallel for reduction(+ : gradGamma[:cols], gradBeta[:cols])
for (size_t j = 0; j < rows; ++j) {
T * x1Row = x1 + j * cols;
T * x2Row = x2 + j * cols;
T * yRow = y + j * cols;
T * adjRow = adj + j * cols;
T * gradX1Row = gradX1 + j * cols;
T * gradX2Row = gradX2 + j * cols;
T sum_x = 0.f;
T sum_adj = 0.f;
T sum_adj_x = 0.f;
T sum_sqr = 0.f;
#pragma omp simd reduction(+ : sum_x, sum_adj_x, sum_adj)
for (size_t i = 0; i < cols; ++i) {
sum_x += (x1Row[i] + x2Row[i]);
sum_adj_x += adjRow[i] * (yRow[i] - (beta ? beta[i] : 0.f)) / gamma[i];
sum_adj += adjRow[i];
}
T mean = sum_x / cols;
#pragma omp simd reduction(+ : sum_sqr)
for (size_t i = 0; i < cols; ++i) {
T ex = (x1Row[i] + x2Row[i]) - mean;
sum_sqr += ex * ex;
}
T sigma = std::sqrt(eps + sum_sqr / cols);
#pragma omp simd
for (size_t i = 0; i < cols; ++i) {
T grad_x = 0.f;
T x_hat = (yRow[i] - beta[i]) / gamma[i];
grad_x += cols * adjRow[i];
grad_x -= sum_adj;
grad_x -= sum_adj_x * x_hat;
grad_x /= cols * sigma;
gradX1Row[i] += gamma[i] * grad_x;
gradX2Row[i] += gamma[i] * grad_x;
gradGamma[i] += adjRow[i] * x_hat;
gradBeta[i] += adjRow[i];
}
}
}
else {
#pragma omp parallel for reduction(+ : gradGamma[:cols])
for (size_t j = 0; j < rows; ++j) {
T * x1Row = x1 + j * cols;
T * x2Row = x2 + j * cols;
T * yRow = y + j * cols;
T * adjRow = adj + j * cols;
T *gradX1Row = gradX1 + j * cols;
T *gradX2Row = gradX2 + j * cols;
T sum_x = 0.f;
T sum_adj = 0.f;
T sum_adj_x = 0.f;
T sum_sqr = 0.f;
#pragma omp simd reduction(+ : sum_x, sum_adj_x, sum_adj)
for (size_t i = 0; i < cols; ++i) {
sum_x += (x1Row[i] + x2Row[i]);
sum_adj_x += adjRow[i] * (yRow[i] - (beta ? beta[i] : 0.f)) / gamma[i];
sum_adj += adjRow[i];
}
T mean = sum_x / cols;
#pragma omp simd reduction(+ : sum_sqr)
for (size_t i = 0; i < cols; ++i) {
T ex = (x1Row[i] + x2Row[i]) - mean;
sum_sqr += ex * ex;
}
T sigma = std::sqrt(eps + sum_sqr / cols);
#pragma omp simd
for (size_t i = 0; i < cols; ++i) {
T grad_x = 0.f;
T x_hat = yRow[i] / gamma[i];
grad_x += cols * adjRow[i];
grad_x -= sum_adj;
grad_x -= sum_adj_x * x_hat;
grad_x /= cols * sigma;
gradX1Row[i] += gamma[i] * grad_x;
gradX2Row[i] += gamma[i] * grad_x;
gradGamma[i] += adjRow[i] * x_hat;
}
}
}
} |
data.h | /*!
* Copyright (c) 2015-2021 by Contributors
* \file data.h
* \brief The input data structure of xgboost.
* \author Tianqi Chen
*/
#ifndef XGBOOST_DATA_H_
#define XGBOOST_DATA_H_
#include <dmlc/base.h>
#include <dmlc/data.h>
#include <dmlc/serializer.h>
#include <xgboost/base.h>
#include <xgboost/host_device_vector.h>
#include <xgboost/linalg.h>
#include <xgboost/span.h>
#include <xgboost/string_view.h>
#include <algorithm>
#include <memory>
#include <numeric>
#include <string>
#include <utility>
#include <vector>
namespace xgboost {
// forward declare dmatrix.
class DMatrix;
/*! \brief data type accepted by xgboost interface */
enum class DataType : uint8_t {
kFloat32 = 1,
kDouble = 2,
kUInt32 = 3,
kUInt64 = 4,
kStr = 5
};
enum class FeatureType : uint8_t {
kNumerical,
kCategorical
};
/*!
* \brief Meta information about dataset, always sit in memory.
*/
class MetaInfo {
public:
/*! \brief number of data fields in MetaInfo */
static constexpr uint64_t kNumField = 12;
/*! \brief number of rows in the data */
uint64_t num_row_{0}; // NOLINT
/*! \brief number of columns in the data */
uint64_t num_col_{0}; // NOLINT
/*! \brief number of nonzero entries in the data */
uint64_t num_nonzero_{0}; // NOLINT
/*! \brief label of each instance */
HostDeviceVector<bst_float> labels_; // NOLINT
/*!
* \brief the index of begin and end of a group
* needed when the learning task is ranking.
*/
std::vector<bst_group_t> group_ptr_; // NOLINT
/*! \brief weights of each instance, optional */
HostDeviceVector<bst_float> weights_; // NOLINT
/*!
* \brief initialized margins,
* if specified, xgboost will start from this init margin
* can be used to specify initial prediction to boost from.
*/
linalg::Tensor<float, 3> base_margin_; // NOLINT
/*!
* \brief lower bound of the label, to be used for survival analysis (censored regression)
*/
HostDeviceVector<bst_float> labels_lower_bound_; // NOLINT
/*!
* \brief upper bound of the label, to be used for survival analysis (censored regression)
*/
HostDeviceVector<bst_float> labels_upper_bound_; // NOLINT
/*!
* \brief Name of type for each feature provided by users. Eg. "int"/"float"/"i"/"q"
*/
std::vector<std::string> feature_type_names;
/*!
* \brief Name for each feature.
*/
std::vector<std::string> feature_names;
/*
* \brief Type of each feature. Automatically set when feature_type_names is specifed.
*/
HostDeviceVector<FeatureType> feature_types;
/*
* \brief Weight of each feature, used to define the probability of each feature being
* selected when using column sampling.
*/
HostDeviceVector<float> feature_weights;
/*! \brief default constructor */
MetaInfo() = default;
MetaInfo(MetaInfo&& that) = default;
MetaInfo& operator=(MetaInfo&& that) = default;
MetaInfo& operator=(MetaInfo const& that) = delete;
/*!
* \brief Validate all metainfo.
*/
void Validate(int32_t device) const;
MetaInfo Slice(common::Span<int32_t const> ridxs) const;
/*!
* \brief Get weight of each instances.
* \param i Instance index.
* \return The weight.
*/
inline bst_float GetWeight(size_t i) const {
return weights_.Size() != 0 ? weights_.HostVector()[i] : 1.0f;
}
/*! \brief get sorted indexes (argsort) of labels by absolute value (used by cox loss) */
inline const std::vector<size_t>& LabelAbsSort() const {
if (label_order_cache_.size() == labels_.Size()) {
return label_order_cache_;
}
label_order_cache_.resize(labels_.Size());
std::iota(label_order_cache_.begin(), label_order_cache_.end(), 0);
const auto& l = labels_.HostVector();
XGBOOST_PARALLEL_SORT(label_order_cache_.begin(), label_order_cache_.end(),
[&l](size_t i1, size_t i2) {return std::abs(l[i1]) < std::abs(l[i2]);});
return label_order_cache_;
}
/*! \brief clear all the information */
void Clear();
/*!
* \brief Load the Meta info from binary stream.
* \param fi The input stream
*/
void LoadBinary(dmlc::Stream* fi);
/*!
* \brief Save the Meta info to binary stream
* \param fo The output stream.
*/
void SaveBinary(dmlc::Stream* fo) const;
/*!
* \brief Set information in the meta info.
* \param key The key of the information.
* \param dptr The data pointer of the source array.
* \param dtype The type of the source data.
* \param num Number of elements in the source array.
*/
void SetInfo(const char* key, const void* dptr, DataType dtype, size_t num);
/*!
* \brief Set information in the meta info with array interface.
* \param key The key of the information.
* \param interface_str String representation of json format array interface.
*/
void SetInfo(StringView key, StringView interface_str);
void GetInfo(char const* key, bst_ulong* out_len, DataType dtype,
const void** out_dptr) const;
void SetFeatureInfo(const char *key, const char **info, const bst_ulong size);
void GetFeatureInfo(const char *field, std::vector<std::string>* out_str_vecs) const;
/*
* \brief Extend with other MetaInfo.
*
* \param that The other MetaInfo object.
*
* \param accumulate_rows Whether rows need to be accumulated in this function. If
* client code knows number of rows in advance, set this
* parameter to false.
* \param check_column Whether the extend method should check the consistency of
* columns.
*/
void Extend(MetaInfo const& that, bool accumulate_rows, bool check_column);
private:
void SetInfoFromHost(StringView key, Json arr);
void SetInfoFromCUDA(StringView key, Json arr);
/*! \brief argsort of labels */
mutable std::vector<size_t> label_order_cache_;
};
/*! \brief Element from a sparse vector */
struct Entry {
/*! \brief feature index */
bst_feature_t index;
/*! \brief feature value */
bst_float fvalue;
/*! \brief default constructor */
Entry() = default;
/*!
* \brief constructor with index and value
* \param index The feature or row index.
* \param fvalue The feature value.
*/
XGBOOST_DEVICE Entry(bst_feature_t index, bst_float fvalue) : index(index), fvalue(fvalue) {}
/*! \brief reversely compare feature values */
inline static bool CmpValue(const Entry& a, const Entry& b) {
return a.fvalue < b.fvalue;
}
inline bool operator==(const Entry& other) const {
return (this->index == other.index && this->fvalue == other.fvalue);
}
};
/*!
* \brief Parameters for constructing batches.
*/
struct BatchParam {
/*! \brief The GPU device to use. */
int gpu_id {-1};
/*! \brief Maximum number of bins per feature for histograms. */
int max_bin{0};
/*! \brief Hessian, used for sketching with future approx implementation. */
common::Span<float> hess;
/*! \brief Whether should DMatrix regenerate the batch. Only used for GHistIndex. */
bool regen {false};
BatchParam() = default;
BatchParam(int32_t device, int32_t max_bin)
: gpu_id{device}, max_bin{max_bin} {}
/**
* \brief Get batch with sketch weighted by hessian. The batch will be regenerated if
* the span is changed, so caller should keep the span for each iteration.
*/
BatchParam(int32_t device, int32_t max_bin, common::Span<float> hessian,
bool regenerate = false)
: gpu_id{device}, max_bin{max_bin}, hess{hessian}, regen{regenerate} {}
bool operator!=(const BatchParam& other) const {
if (hess.empty() && other.hess.empty()) {
return gpu_id != other.gpu_id || max_bin != other.max_bin;
}
return gpu_id != other.gpu_id || max_bin != other.max_bin || hess.data() != other.hess.data();
}
};
struct HostSparsePageView {
using Inst = common::Span<Entry const>;
common::Span<bst_row_t const> offset;
common::Span<Entry const> data;
Inst operator[](size_t i) const {
auto size = *(offset.data() + i + 1) - *(offset.data() + i);
return {data.data() + *(offset.data() + i),
static_cast<Inst::index_type>(size)};
}
size_t Size() const { return offset.size() == 0 ? 0 : offset.size() - 1; }
};
/*!
* \brief In-memory storage unit of sparse batch, stored in CSR format.
*/
class SparsePage {
public:
// Offset for each row.
HostDeviceVector<bst_row_t> offset;
/*! \brief the data of the segments */
HostDeviceVector<Entry> data;
size_t base_rowid {0};
/*! \brief an instance of sparse vector in the batch */
using Inst = common::Span<Entry const>;
HostSparsePageView GetView() const {
return {offset.ConstHostSpan(), data.ConstHostSpan()};
}
/*! \brief constructor */
SparsePage() {
this->Clear();
}
/*! \return Number of instances in the page. */
inline size_t Size() const {
return offset.Size() == 0 ? 0 : offset.Size() - 1;
}
/*! \return estimation of memory cost of this page */
inline size_t MemCostBytes() const {
return offset.Size() * sizeof(size_t) + data.Size() * sizeof(Entry);
}
/*! \brief clear the page */
inline void Clear() {
base_rowid = 0;
auto& offset_vec = offset.HostVector();
offset_vec.clear();
offset_vec.push_back(0);
data.HostVector().clear();
}
/*! \brief Set the base row id for this page. */
inline void SetBaseRowId(size_t row_id) {
base_rowid = row_id;
}
SparsePage GetTranspose(int num_columns) const;
void SortRows() {
auto ncol = static_cast<bst_omp_uint>(this->Size());
dmlc::OMPException exc;
#pragma omp parallel for schedule(dynamic, 1)
for (bst_omp_uint i = 0; i < ncol; ++i) {
exc.Run([&]() {
if (this->offset.HostVector()[i] < this->offset.HostVector()[i + 1]) {
std::sort(
this->data.HostVector().begin() + this->offset.HostVector()[i],
this->data.HostVector().begin() + this->offset.HostVector()[i + 1],
Entry::CmpValue);
}
});
}
exc.Rethrow();
}
/**
* \brief Pushes external data batch onto this page
*
* \tparam AdapterBatchT
* \param batch
* \param missing
* \param nthread
*
* \return The maximum number of columns encountered in this input batch. Useful when pushing many adapter batches to work out the total number of columns.
*/
template <typename AdapterBatchT>
uint64_t Push(const AdapterBatchT& batch, float missing, int nthread);
/*!
* \brief Push a sparse page
* \param batch the row page
*/
void Push(const SparsePage &batch);
/*!
* \brief Push a SparsePage stored in CSC format
* \param batch The row batch to be pushed
*/
void PushCSC(const SparsePage& batch);
};
class CSCPage: public SparsePage {
public:
CSCPage() : SparsePage() {}
explicit CSCPage(SparsePage page) : SparsePage(std::move(page)) {}
};
class SortedCSCPage : public SparsePage {
public:
SortedCSCPage() : SparsePage() {}
explicit SortedCSCPage(SparsePage page) : SparsePage(std::move(page)) {}
};
class EllpackPageImpl;
/*!
* \brief A page stored in ELLPACK format.
*
* This class uses the PImpl idiom (https://en.cppreference.com/w/cpp/language/pimpl) to avoid
* including CUDA-specific implementation details in the header.
*/
class EllpackPage {
public:
/*!
* \brief Default constructor.
*
* This is used in the external memory case. An empty ELLPACK page is constructed with its content
* set later by the reader.
*/
EllpackPage();
/*!
* \brief Constructor from an existing DMatrix.
*
* This is used in the in-memory case. The ELLPACK page is constructed from an existing DMatrix
* in CSR format.
*/
explicit EllpackPage(DMatrix* dmat, const BatchParam& param);
/*! \brief Destructor. */
~EllpackPage();
EllpackPage(EllpackPage&& that);
/*! \return Number of instances in the page. */
size_t Size() const;
/*! \brief Set the base row id for this page. */
void SetBaseRowId(size_t row_id);
const EllpackPageImpl* Impl() const { return impl_.get(); }
EllpackPageImpl* Impl() { return impl_.get(); }
private:
std::unique_ptr<EllpackPageImpl> impl_;
};
class GHistIndexMatrix;
template<typename T>
class BatchIteratorImpl {
public:
using iterator_category = std::forward_iterator_tag; // NOLINT
virtual ~BatchIteratorImpl() = default;
virtual const T& operator*() const = 0;
virtual BatchIteratorImpl& operator++() = 0;
virtual bool AtEnd() const = 0;
virtual std::shared_ptr<T const> Page() const = 0;
};
template<typename T>
class BatchIterator {
public:
using iterator_category = std::forward_iterator_tag; // NOLINT
explicit BatchIterator(BatchIteratorImpl<T>* impl) { impl_.reset(impl); }
explicit BatchIterator(std::shared_ptr<BatchIteratorImpl<T>> impl) { impl_ = impl; }
BatchIterator &operator++() {
CHECK(impl_ != nullptr);
++(*impl_);
return *this;
}
const T& operator*() const {
CHECK(impl_ != nullptr);
return *(*impl_);
}
bool operator!=(const BatchIterator&) const {
CHECK(impl_ != nullptr);
return !impl_->AtEnd();
}
bool AtEnd() const {
CHECK(impl_ != nullptr);
return impl_->AtEnd();
}
std::shared_ptr<T const> Page() const {
return impl_->Page();
}
private:
std::shared_ptr<BatchIteratorImpl<T>> impl_;
};
template<typename T>
class BatchSet {
public:
explicit BatchSet(BatchIterator<T> begin_iter) : begin_iter_(std::move(begin_iter)) {}
BatchIterator<T> begin() { return begin_iter_; } // NOLINT
BatchIterator<T> end() { return BatchIterator<T>(nullptr); } // NOLINT
private:
BatchIterator<T> begin_iter_;
};
struct XGBAPIThreadLocalEntry;
/*!
* \brief Internal data structured used by XGBoost during training.
*/
class DMatrix {
public:
/*! \brief default constructor */
DMatrix() = default;
/*! \brief meta information of the dataset */
virtual MetaInfo& Info() = 0;
virtual void SetInfo(const char *key, const void *dptr, DataType dtype,
size_t num) {
this->Info().SetInfo(key, dptr, dtype, num);
}
virtual void SetInfo(const char* key, std::string const& interface_str) {
this->Info().SetInfo(key, StringView{interface_str});
}
/*! \brief meta information of the dataset */
virtual const MetaInfo& Info() const = 0;
/*! \brief Get thread local memory for returning data from DMatrix. */
XGBAPIThreadLocalEntry& GetThreadLocal() const;
/**
* \brief Gets batches. Use range based for loop over BatchSet to access individual batches.
*/
template<typename T>
BatchSet<T> GetBatches(const BatchParam& param = {});
template <typename T>
bool PageExists() const;
// the following are column meta data, should be able to answer them fast.
/*! \return Whether the data columns single column block. */
virtual bool SingleColBlock() const = 0;
/*! \brief virtual destructor */
virtual ~DMatrix();
/*! \brief Whether the matrix is dense. */
bool IsDense() const {
return Info().num_nonzero_ == Info().num_row_ * Info().num_col_;
}
/*!
* \brief Load DMatrix from URI.
* \param uri The URI of input.
* \param silent Whether print information during loading.
* \param load_row_split Flag to read in part of rows, divided among the workers in distributed mode.
* \param file_format The format type of the file, used for dmlc::Parser::Create.
* By default "auto" will be able to load in both local binary file.
* \param page_size Page size for external memory.
* \return The created DMatrix.
*/
static DMatrix* Load(const std::string& uri,
bool silent,
bool load_row_split,
const std::string& file_format = "auto");
/**
* \brief Creates a new DMatrix from an external data adapter.
*
* \tparam AdapterT Type of the adapter.
* \param [in,out] adapter View onto an external data.
* \param missing Values to count as missing.
* \param nthread Number of threads for construction.
* \param cache_prefix (Optional) The cache prefix for external memory.
* \param page_size (Optional) Size of the page.
*
* \return a Created DMatrix.
*/
template <typename AdapterT>
static DMatrix* Create(AdapterT* adapter, float missing, int nthread,
const std::string& cache_prefix = "");
/**
* \brief Create a new Quantile based DMatrix used for histogram based algorithm.
*
* \tparam DataIterHandle External iterator type, defined in C API.
* \tparam DMatrixHandle DMatrix handle, defined in C API.
* \tparam DataIterResetCallback Callback for reset, prototype defined in C API.
* \tparam XGDMatrixCallbackNext Callback for next, prototype defined in C API.
*
* \param iter External data iterator
* \param proxy A hanlde to ProxyDMatrix
* \param reset Callback for reset
* \param next Callback for next
* \param missing Value that should be treated as missing.
* \param nthread number of threads used for initialization.
* \param max_bin Maximum number of bins.
*
* \return A created quantile based DMatrix.
*/
template <typename DataIterHandle, typename DMatrixHandle,
typename DataIterResetCallback, typename XGDMatrixCallbackNext>
static DMatrix *Create(DataIterHandle iter, DMatrixHandle proxy,
DataIterResetCallback *reset,
XGDMatrixCallbackNext *next, float missing,
int nthread,
int max_bin);
/**
* \brief Create an external memory DMatrix with callbacks.
*
* \tparam DataIterHandle External iterator type, defined in C API.
* \tparam DMatrixHandle DMatrix handle, defined in C API.
* \tparam DataIterResetCallback Callback for reset, prototype defined in C API.
* \tparam XGDMatrixCallbackNext Callback for next, prototype defined in C API.
*
* \param iter External data iterator
* \param proxy A hanlde to ProxyDMatrix
* \param reset Callback for reset
* \param next Callback for next
* \param missing Value that should be treated as missing.
* \param nthread number of threads used for initialization.
* \param cache Prefix of cache file path.
*
* \return A created external memory DMatrix.
*/
template <typename DataIterHandle, typename DMatrixHandle,
typename DataIterResetCallback, typename XGDMatrixCallbackNext>
static DMatrix *Create(DataIterHandle iter, DMatrixHandle proxy,
DataIterResetCallback *reset,
XGDMatrixCallbackNext *next, float missing,
int32_t nthread, std::string cache);
virtual DMatrix *Slice(common::Span<int32_t const> ridxs) = 0;
/*! \brief Number of rows per page in external memory. Approximately 100MB per page for
* dataset with 100 features. */
static const size_t kPageSize = 32UL << 12UL;
protected:
virtual BatchSet<SparsePage> GetRowBatches() = 0;
virtual BatchSet<CSCPage> GetColumnBatches() = 0;
virtual BatchSet<SortedCSCPage> GetSortedColumnBatches() = 0;
virtual BatchSet<EllpackPage> GetEllpackBatches(const BatchParam& param) = 0;
virtual BatchSet<GHistIndexMatrix> GetGradientIndex(const BatchParam& param) = 0;
virtual bool EllpackExists() const = 0;
virtual bool SparsePageExists() const = 0;
};
template<>
inline BatchSet<SparsePage> DMatrix::GetBatches(const BatchParam&) {
return GetRowBatches();
}
template<>
inline bool DMatrix::PageExists<EllpackPage>() const {
return this->EllpackExists();
}
template<>
inline bool DMatrix::PageExists<SparsePage>() const {
return this->SparsePageExists();
}
template<>
inline BatchSet<CSCPage> DMatrix::GetBatches(const BatchParam&) {
return GetColumnBatches();
}
template<>
inline BatchSet<SortedCSCPage> DMatrix::GetBatches(const BatchParam&) {
return GetSortedColumnBatches();
}
template<>
inline BatchSet<EllpackPage> DMatrix::GetBatches(const BatchParam& param) {
return GetEllpackBatches(param);
}
template<>
inline BatchSet<GHistIndexMatrix> DMatrix::GetBatches(const BatchParam& param) {
return GetGradientIndex(param);
}
} // namespace xgboost
namespace dmlc {
DMLC_DECLARE_TRAITS(is_pod, xgboost::Entry, true);
namespace serializer {
template <>
struct Handler<xgboost::Entry> {
inline static void Write(Stream* strm, const xgboost::Entry& data) {
strm->Write(data.index);
strm->Write(data.fvalue);
}
inline static bool Read(Stream* strm, xgboost::Entry* data) {
return strm->Read(&data->index) && strm->Read(&data->fvalue);
}
};
} // namespace serializer
} // namespace dmlc
#endif // XGBOOST_DATA_H_
|
init_then_openmp.c | // RUN: %libomp-compile-and-run
// REQUIRES: abt
#include "omp_testsuite.h"
#include <stdio.h>
int test_init_then_openmp(int num_init) {
int i;
int val = 0;
for (i = 0; i < num_init; i++) {
ABT_EXIT_IF_FAIL(ABT_init(0, 0));
}
#pragma omp parallel num_threads(NUM_TASKS)
{
#pragma omp master
{ val = 1; }
}
for (i = 0; i < num_init; i++) {
ABT_EXIT_IF_FAIL(ABT_finalize());
}
return val;
}
int main() {
int i;
int num_failed = 0;
for (i = 0; i < REPETITIONS; i++) {
// Note that Argobots will be initialized once BOLT is instantiated.
if (!test_init_then_openmp(i + 1)) {
num_failed++;
}
}
return num_failed;
}
|
omp_combinado.c | /******************************************************************************
* OpenMP Example - Combined Parallel Loop Work-sharing - C/C++ Version
* FILE: omp_workshare3.c
* DESCRIPTION:
* This example attempts to show use of the parallel for construct. However
* it will generate errors at compile time. Try to determine what is causing
* the error. See omp_workshare4.c for a corrected version.
* SOURCE: Blaise Barney 5/99
* LAST REVISED:
******************************************************************************/
#include <stdio.h>
#include <omp.h>
#define N 50
#define CHUNK 5
int main () {
int i, n, chunk, tid;
float a[N], b[N], c[N];
/* Some initializations */
for (i=0; i < N; i++)
a[i] = b[i] = i * 1.0;
n = N;
chunk = CHUNK;
#pragma omp parallel for \
shared(a,b,c,n) \
private(i,tid) \
schedule(static,chunk)
{
tid = omp_get_thread_num();
for (i=0; i < n; i++)
{
c[i] = a[i] + b[i];
printf("tid= %d i= %d c[i]= %f\n", tid, i, c[i]);
}
} /* end of parallel for construct */
}
|
lis_precon_jacobi.c | /* Copyright (C) 2002-2012 The SSI Project. All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
1. Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the distribution.
3. Neither the name of the project nor the names of its contributors
may be used to endorse or promote products derived from this software
without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE SCALABLE SOFTWARE INFRASTRUCTURE PROJECT
``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE SCALABLE SOFTWARE INFRASTRUCTURE
PROJECT BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY,
OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
POSSIBILITY OF SUCH DAMAGE.
*/
#ifdef HAVE_CONFIG_H
#include "lis_config.h"
#else
#ifdef HAVE_CONFIG_WIN32_H
#include "lis_config_win32.h"
#endif
#endif
#include <stdio.h>
#include <stdlib.h>
#ifdef HAVE_MALLOC_H
#include <malloc.h>
#endif
#include <string.h>
#include <stdarg.h>
#ifdef USE_SSE2
#include <emmintrin.h>
#endif
#ifdef _OPENMP
#include <omp.h>
#endif
#ifdef USE_MPI
#include <mpi.h>
#endif
#include "lislib.h"
#undef __FUNC__
#define __FUNC__ "lis_precon_create_jacobi"
LIS_INT lis_precon_create_jacobi(LIS_SOLVER solver, LIS_PRECON precon)
{
LIS_INT err;
LIS_DEBUG_FUNC_IN;
if( solver->precision==LIS_PRECISION_DEFAULT )
{
err = lis_vector_duplicate(solver->A, &precon->D);
}
else
{
err = lis_vector_duplicateex(LIS_PRECISION_QUAD,solver->A, &precon->D);
}
if( err )
{
return err;
}
lis_matrix_get_diagonal(solver->A, precon->D);
lis_vector_reciprocal(precon->D);
LIS_DEBUG_FUNC_OUT;
return LIS_SUCCESS;
}
#undef __FUNC__
#define __FUNC__ "lis_psolve_jacobi"
LIS_INT lis_psolve_jacobi(LIS_SOLVER solver, LIS_VECTOR B, LIS_VECTOR X)
{
LIS_INT i,n;
LIS_SCALAR *b,*x,*d;
LIS_PRECON precon;
LIS_QUAD_DECLAR;
#ifdef USE_QUAD_PRECISION
LIS_SCALAR *xl;
#endif
LIS_DEBUG_FUNC_IN;
/*
* Mx = b
* M = D
*/
precon = solver->precon;
n = precon->D->n;
d = precon->D->value;
b = B->value;
x = X->value;
#ifdef USE_QUAD_PRECISION
xl = X->value_lo;
#endif
#ifdef USE_QUAD_PRECISION
if( B->precision==LIS_PRECISION_DEFAULT )
{
#endif
#ifdef _OPENMP
#pragma omp parallel for private(i)
#endif
for(i=0; i<n; i++)
{
x[i] = b[i] * d[i];
}
#ifdef USE_QUAD_PRECISION
}
else
{
#ifdef _OPENMP
#ifndef USE_SSE2
#pragma omp parallel for private(i,p1,p2,tq,bhi,blo,chi,clo,sh,sl,th,tl,eh,el)
#else
#pragma omp parallel for private(i,bh,ch,sh,wh,th,bl,cl,sl,wl,tl,p1,p2,t0,t1,t2,eh)
#endif
#endif
for(i=0; i<n; i++)
{
#ifndef USE_SSE2
LIS_QUAD_MULD(x[i],xl[i],B->value[i],B->value_lo[i],d[i]);
#else
LIS_QUAD_MULD_SSE2(x[i],xl[i],B->value[i],B->value_lo[i],d[i]);
#endif
/* x[i] = b[i] * d[i]; */
}
}
#endif
LIS_DEBUG_FUNC_OUT;
return LIS_SUCCESS;
}
#undef __FUNC__
#define __FUNC__ "lis_psolvet_jacobi"
LIS_INT lis_psolvet_jacobi(LIS_SOLVER solver, LIS_VECTOR B, LIS_VECTOR X)
{
LIS_INT i,n;
LIS_SCALAR *b,*x,*d;
LIS_PRECON precon;
LIS_QUAD_DECLAR;
#ifdef USE_QUAD_PRECISION
LIS_SCALAR *xl;
#endif
LIS_DEBUG_FUNC_IN;
/*
* Mx = b
* M = D
*/
precon = solver->precon;
n = precon->D->n;
d = precon->D->value;
b = B->value;
x = X->value;
#ifdef USE_QUAD_PRECISION
xl = X->value_lo;
#endif
#ifdef USE_QUAD_PRECISION
if( B->precision==LIS_PRECISION_DEFAULT )
{
#endif
#ifdef _OPENMP
#pragma omp parallel for private(i)
#endif
for(i=0; i<n; i++)
{
x[i] = b[i] * d[i];
}
#ifdef USE_QUAD_PRECISION
}
else
{
#ifdef _OPENMP
#ifndef USE_SSE2
#pragma omp parallel for private(i,p1,p2,tq,bhi,blo,chi,clo,sh,sl,th,tl,eh,el)
#else
#pragma omp parallel for private(i,bh,ch,sh,wh,th,bl,cl,sl,wl,tl,p1,p2,t0,t1,t2,eh)
#endif
#endif
for(i=0; i<n; i++)
{
#ifndef USE_SSE2
LIS_QUAD_MULD(x[i],xl[i],B->value[i],B->value_lo[i],d[i]);
#else
LIS_QUAD_MULD_SSE2(x[i],xl[i],B->value[i],B->value_lo[i],d[i]);
#endif
/* x[i] = b[i] * d[i]; */
}
}
#endif
LIS_DEBUG_FUNC_OUT;
return LIS_SUCCESS;
}
#undef __FUNC__
#define __FUNC__ "lis_precon_create_bjacobi"
LIS_INT lis_precon_create_bjacobi(LIS_SOLVER solver, LIS_PRECON precon)
{
LIS_INT err;
LIS_MATRIX A;
LIS_DEBUG_FUNC_IN;
A = solver->A;
err = lis_matrix_convert_self(solver);
if( err ) return err;
if( !A->is_block )
{
solver->options[LIS_OPTIONS_PRECON] = LIS_PRECON_TYPE_JACOBI;
precon->precon_type = LIS_PRECON_TYPE_JACOBI;
err = lis_precon_create_jacobi(solver,precon);
return err;
}
err = lis_matrix_split(A);
if( err ) return err;
err = lis_matrix_diag_duplicate(A->D,&precon->WD);
if( err ) return err;
lis_matrix_diag_copy(A->D,precon->WD);
lis_matrix_diag_inverse(precon->WD);
LIS_DEBUG_FUNC_OUT;
return LIS_SUCCESS;
}
#undef __FUNC__
#define __FUNC__ "lis_psolve_bjacobi"
LIS_INT lis_psolve_bjacobi(LIS_SOLVER solver, LIS_VECTOR B, LIS_VECTOR X)
{
LIS_PRECON precon;
LIS_DEBUG_FUNC_IN;
/*
* Mx = b
* M = D
*/
precon = solver->precon;
lis_matrix_diag_matvec(precon->WD,B,X);
LIS_DEBUG_FUNC_OUT;
return LIS_SUCCESS;
}
#undef __FUNC__
#define __FUNC__ "lis_psolvet_bjacobi"
LIS_INT lis_psolvet_bjacobi(LIS_SOLVER solver, LIS_VECTOR B, LIS_VECTOR X)
{
LIS_PRECON precon;
LIS_DEBUG_FUNC_IN;
/*
* Mx = b
* M = D
*/
precon = solver->precon;
lis_matrix_diag_matvect(precon->WD,B,X);
LIS_DEBUG_FUNC_OUT;
return LIS_SUCCESS;
}
|
Example_target_unstructured_data.1.c | /*
* @@name: target-unstructured-data.1.c
* @@type: C
* @@compilable: yes
* @@linkable: no
* @@expect: success
*/
#include <stdlib.h>
typedef struct {
double *A;
int N;
} Matrix;
void init_matrix(Matrix *mat, int n)
{
mat->A = (double *)malloc(n*sizeof(double));
mat->N = n;
#pragma omp target enter data map(alloc:mat->A[:n])
}
void free_matrix(Matrix *mat)
{
#pragma omp target exit data map(delete:mat->A[:mat->N])
mat->N = 0;
free(mat->A);
mat->A = NULL;
}
|
control_tool_no_ompt_support.c | // RUN: %libomp-compile-and-run
// REQUIRES: openmp-5.0
#include <omp.h>
int main()
{
#pragma omp parallel num_threads(1)
{
omp_control_tool(omp_control_tool_flush, 1, NULL);
}
return 0;
}
|
ranksort-openmp.c | #include <stdio.h>
#include <stdlib.h>
#include <omp.h>
#define N 100000
#define NUM_THREADS 2
int main(int argc, char *argv[])
{
double start,stop;
int x[N], y[N];
int i, j, my_num, my_place,tid;
start = omp_get_wtime();
for (i=0; i<N; i++) {
x[i] = N - i;
}
omp_set_num_threads(NUM_THREADS);
#pragma omp parallel private(j,i,tid,my_num,my_place)
{
tid=omp_get_thread_num();
for (j=tid*N/NUM_THREADS; j<tid*N/NUM_THREADS+N/NUM_THREADS; j++) {
my_num = x[j];
my_place = 0;
for (i=0; i<N; i++) {
if ( my_num > x[i] ) {
my_place++;
}
}
y[my_place] = my_num;
}
stop = omp_get_wtime();
}
for (i=0; i<N; i++)
printf("%d\n", y[i]);
printf("time %f\n",stop-start);
return 0;
}
|
unfold.c | #ifndef TH_GENERIC_FILE
#define TH_GENERIC_FILE "generic/unfold.c"
#else
/* note: due to write issues, this one cannot be parallelized as well as unfolded_copy */
void THNN_(unfolded_acc)(
THTensor *finput,
THTensor *input,
int kW,
int kH,
int dW,
int dH,
int padW,
int padH,
int nInputPlane,
int inputWidth,
int inputHeight,
int outputWidth,
int outputHeight)
{
// This function assumes that
// outputHeight*dH does not overflow a int64_t
// outputWidth*dW does not overflow a int64_t
int nip;
real *input_data = THTensor_(data)(input);
real *finput_data = THTensor_(data)(finput);
#pragma omp parallel for private(nip)
for(nip = 0; nip < nInputPlane; nip++)
{
int kw, kh, y, x;
int64_t ix, iy;
for(kh = 0; kh < kH; kh++)
{
for(kw = 0; kw < kW; kw++)
{
real *src = finput_data + nip*((size_t)kH*kW*outputHeight*outputWidth) + kh*((size_t)kW*outputHeight*outputWidth) + kw*((size_t)outputHeight*outputWidth);
real *dst = input_data + nip*((size_t)inputHeight*inputWidth);
if (padW > 0 || padH > 0) {
int lpad,rpad;
for(y = 0; y < outputHeight; y++) {
iy = (int64_t)y*dH - padH + kh;
if (iy < 0 || iy >= inputHeight) {
} else {
if (dW==1){
ix = 0 - padW + kw;
lpad = fmaxf(0,padW-kw);
rpad = fmaxf(0,padW-(kW-kw-1));
real *dst_slice = dst+(size_t)iy*inputWidth+ix+lpad;
THVector_(cadd)(dst_slice, dst_slice, src+(size_t)y*outputWidth+lpad, 1, outputWidth - lpad - rpad); /* note: THVector_add could handle 1 value better */
}
else{
for (x=0; x<outputWidth; x++){
ix = (int64_t)x*dW - padW + kw;
if (ix < 0 || ix >= inputWidth){
}else{
real *dst_slice = dst+(size_t)iy*inputWidth+ix;
THVector_(cadd)(dst_slice, dst_slice, src+(size_t)y*outputWidth+x, 1, 1);
}
}
}
}
}
} else {
for(y = 0; y < outputHeight; y++) {
iy = (int64_t)y*dH + kh;
ix = 0 + kw;
if (dW == 1 ) {
real *dst_slice = dst+(size_t)iy*inputWidth+ix;
THVector_(cadd)(dst_slice, dst_slice, src+(size_t)y*outputWidth, 1, outputWidth); /* note: THVector_add could handle 1 value better */
}else{
for(x = 0; x < outputWidth; x++) {
real *dst_slice = dst+(size_t)iy*inputWidth+ix+x*dW;
THVector_(cadd)(dst_slice, dst_slice, src+(size_t)y*outputWidth+x, 1, 1);
}
}
}
}
}
}
}
}
void THNN_(unfolded_copy)(
THTensor *finput,
THTensor *input,
int kW,
int kH,
int dW,
int dH,
int padW,
int padH,
int nInputPlane,
int inputWidth,
int inputHeight,
int outputWidth,
int outputHeight)
{
// This function assumes that
// kH*kW does not overflow an int
// nInputPlane*kH*kW does not overflow a int64_t
// outputHeight*dH does not overflow a int64_t
// outputWidth*dW does not overflow a int64_t
int64_t k;
real *input_data = THTensor_(data)(input);
real *finput_data = THTensor_(data)(finput);
#pragma omp parallel for private(k)
for(k = 0; k < (int64_t)nInputPlane*kH*kW; k++) {
int64_t nip = k / (kH*kW);
int64_t rest = k % (kH*kW);
int64_t kh = rest / kW;
int64_t kw = rest % kW;
int x, y;
int64_t ix, iy;
real *dst = finput_data + nip*((size_t)kH*kW*outputHeight*outputWidth) + kh*((size_t)kW*outputHeight*outputWidth) + kw*((size_t)outputHeight*outputWidth);
real *src = input_data + nip*((size_t)inputHeight*inputWidth);
if (padW > 0 || padH > 0) {
int64_t lpad,rpad;
for(y = 0; y < outputHeight; y++) {
iy = (int64_t)y*dH - padH + kh;
if (iy < 0 || iy >= inputHeight) {
memset(dst+(size_t)y*outputWidth, 0, sizeof(real)*outputWidth);
} else {
if (dW==1){
ix = 0 - padW + kw;
lpad = fmaxf(0,padW-kw);
rpad = fmaxf(0,padW-(kW-kw-1));
if (outputWidth-rpad-lpad <= 0) {
memset(dst+(size_t)y*outputWidth, 0, sizeof(real)*outputWidth);
} else {
if (lpad > 0) memset(dst+(size_t)y*outputWidth, 0, sizeof(real)*lpad);
memcpy(dst+(size_t)y*outputWidth+lpad, src+(size_t)iy*inputWidth+ix+lpad, sizeof(real)*(outputWidth-rpad-lpad));
if (rpad > 0) memset(dst+(size_t)y*outputWidth + outputWidth - rpad, 0, sizeof(real)*rpad);
}
}
else{
for (x=0; x<outputWidth; x++){
ix = (int64_t)x*dW - padW + kw;
if (ix < 0 || ix >= inputWidth)
memset(dst+(size_t)y*outputWidth+x, 0, sizeof(real)*1);
else
memcpy(dst+(size_t)y*outputWidth+x, src+(size_t)iy*inputWidth+ix, sizeof(real)*(1));
}
}
}
}
} else {
for(y = 0; y < outputHeight; y++) {
iy = (int64_t)y*dH + kh;
ix = 0 + kw;
if (dW == 1)
memcpy(dst+(size_t)y*outputWidth, src+(size_t)iy*inputWidth+ix, sizeof(real)*outputWidth);
else{
for (x=0; x<outputWidth; x++)
memcpy(dst+(size_t)y*outputWidth+x, src+(size_t)iy*inputWidth+ix+(int64_t)x*dW, sizeof(real)*(1));
}
}
}
}
}
#endif
|
GB_binop__div_fc64.c | //------------------------------------------------------------------------------
// GB_binop: hard-coded functions for each built-in binary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated2/ folder, do not edit it
// (it is auto-generated from Generator/*).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_emult.h"
#include "GB_control.h"
#include "GB_ek_slice.h"
#include "GB_dense.h"
#include "GB_atomics.h"
#include "GB_bitmap_assign_methods.h"
#include "GB_binop__include.h"
// C=binop(A,B) is defined by the following types and operators:
// A+B function (eWiseAdd): GB (_AaddB__div_fc64)
// A.*B function (eWiseMult): GB (_AemultB_08__div_fc64)
// A.*B function (eWiseMult): GB (_AemultB_02__div_fc64)
// A.*B function (eWiseMult): GB (_AemultB_04__div_fc64)
// A.*B function (eWiseMult): GB (_AemultB_bitmap__div_fc64)
// A*D function (colscale): GB (_AxD__div_fc64)
// D*A function (rowscale): GB (_DxB__div_fc64)
// C+=B function (dense accum): GB (_Cdense_accumB__div_fc64)
// C+=b function (dense accum): GB (_Cdense_accumb__div_fc64)
// C+=A+B function (dense ewise3): GB (_Cdense_ewise3_accum__div_fc64)
// C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__div_fc64)
// C=scalar+B GB (_bind1st__div_fc64)
// C=scalar+B' GB (_bind1st_tran__div_fc64)
// C=A+scalar GB (_bind2nd__div_fc64)
// C=A'+scalar GB (_bind2nd_tran__div_fc64)
// C type: GxB_FC64_t
// A type: GxB_FC64_t
// B,b type: GxB_FC64_t
// BinaryOp: cij = GB_FC64_div (aij, bij)
#define GB_ATYPE \
GxB_FC64_t
#define GB_BTYPE \
GxB_FC64_t
#define GB_CTYPE \
GxB_FC64_t
// true if the types of A and B are identical
#define GB_ATYPE_IS_BTYPE \
1
// true if the types of C and A are identical
#define GB_CTYPE_IS_ATYPE \
1
// true if the types of C and B are identical
#define GB_CTYPE_IS_BTYPE \
1
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA,A_iso) \
GxB_FC64_t aij = GBX (Ax, pA, A_iso)
// bij = Bx [pB]
#define GB_GETB(bij,Bx,pB,B_iso) \
GxB_FC64_t bij = GBX (Bx, pB, B_iso)
// declare scalar of the same type as C
#define GB_CTYPE_SCALAR(t) \
GxB_FC64_t t
// cij = Ax [pA]
#define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \
cij = GBX (Ax, pA, A_iso)
// cij = Bx [pB]
#define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \
cij = GBX (Bx, pB, B_iso)
#define GB_CX(p) Cx [p]
// binary operator
#define GB_BINOP(z,x,y,i,j) \
z = GB_FC64_div (x, y) ;
// true if the binop must be flipped
#define GB_BINOP_FLIP \
0
// op is second
#define GB_OP_IS_SECOND \
0
// do the numerical phases of GB_add and GB_emult
#define GB_PHASE_2_OF_2
// hard-coded loops can be vectorized
#define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_DIV || GxB_NO_FC64 || GxB_NO_DIV_FC64)
//------------------------------------------------------------------------------
// C += A+B, all 3 matrices dense
//------------------------------------------------------------------------------
// The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV.
void GB (_Cdense_ewise3_accum__div_fc64)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_accum_template.c"
}
//------------------------------------------------------------------------------
// C = A+B, all 3 matrices dense
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_ewise3_noaccum__div_fc64)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_dense_ewise3_noaccum_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += B, accumulate a sparse matrix into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumB__div_fc64)
(
GrB_Matrix C,
const GrB_Matrix B,
const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
#include "GB_dense_subassign_23_template.c"
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += b, accumulate a scalar into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumb__div_fc64)
(
GrB_Matrix C,
const GB_void *p_bwork,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
// get the scalar b for C += b, of type GxB_FC64_t
GxB_FC64_t bwork = (*((GxB_FC64_t *) p_bwork)) ;
#include "GB_dense_subassign_22_template.c"
return (GrB_SUCCESS) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = A*D, column scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_AxD__div_fc64)
(
GrB_Matrix C,
const GrB_Matrix A, bool A_is_pattern,
const GrB_Matrix D, bool D_is_pattern,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
GxB_FC64_t *restrict Cx = (GxB_FC64_t *) C->x ;
#include "GB_AxB_colscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = D*B, row scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_DxB__div_fc64)
(
GrB_Matrix C,
const GrB_Matrix D, bool D_is_pattern,
const GrB_Matrix B, bool B_is_pattern,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
GxB_FC64_t *restrict Cx = (GxB_FC64_t *) C->x ;
#include "GB_AxB_rowscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseAdd: C=A+B, C<M>=A+B, C<!M>=A+B
//------------------------------------------------------------------------------
GrB_Info GB (_AaddB__div_fc64)
(
GrB_Matrix C,
const int C_sparsity,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool Ch_is_Mh,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
GB_WERK_DECLARE (M_ek_slicing, int64_t) ;
GB_WERK_DECLARE (A_ek_slicing, int64_t) ;
GB_WERK_DECLARE (B_ek_slicing, int64_t) ;
#include "GB_add_template.c"
GB_FREE_WORK ;
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, or C<M!>=A.*B where C is sparse/hyper
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_08__div_fc64)
(
GrB_Matrix C,
const int C_sparsity,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_08_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_02__div_fc64)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool flipxy,
const int64_t *restrict Cp_kfirst,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#if GB_BINOP_FLIP
// The operator is not commutative, and does not have a flipped
// variant. For example z=atan2(y,x).
if (flipxy)
{
// use fmult(y,x)
#undef GB_FLIPPED
#define GB_FLIPPED 1
#include "GB_emult_02_template.c"
}
else
{
// use fmult(x,y)
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
}
#else
// No need to handle the flip: the operator is either commutative, or
// has been handled by changing z=div(y,x) to z=rdiv(x,y) for example.
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
#endif
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_04__div_fc64)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict Cp_kfirst,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_04_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_bitmap__div_fc64)
(
GrB_Matrix C,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_bitmap_emult_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st
//------------------------------------------------------------------------------
GrB_Info GB (_bind1st__div_fc64)
(
GB_void *Cx_output, // Cx and Bx may be aliased
const GB_void *x_input,
const GB_void *Bx_input,
const int8_t *restrict Bb,
int64_t bnz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
GxB_FC64_t *Cx = (GxB_FC64_t *) Cx_output ;
GxB_FC64_t x = (*((GxB_FC64_t *) x_input)) ;
GxB_FC64_t *Bx = (GxB_FC64_t *) Bx_input ;
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < bnz ; p++)
{
if (!GBB (Bb, p)) continue ;
GxB_FC64_t bij = GBX (Bx, p, false) ;
Cx [p] = GB_FC64_div (x, bij) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd
//------------------------------------------------------------------------------
GrB_Info GB (_bind2nd__div_fc64)
(
GB_void *Cx_output, // Cx and Ax may be aliased
const GB_void *Ax_input,
const GB_void *y_input,
const int8_t *restrict Ab,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
GxB_FC64_t *Cx = (GxB_FC64_t *) Cx_output ;
GxB_FC64_t *Ax = (GxB_FC64_t *) Ax_input ;
GxB_FC64_t y = (*((GxB_FC64_t *) y_input)) ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!GBB (Ab, p)) continue ;
GxB_FC64_t aij = GBX (Ax, p, false) ;
Cx [p] = GB_FC64_div (aij, y) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (x, A'): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (x, aij), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
GxB_FC64_t aij = GBX (Ax, pA, false) ; \
Cx [pC] = GB_FC64_div (x, aij) ; \
}
GrB_Info GB (_bind1st_tran__div_fc64)
(
GrB_Matrix C,
const GB_void *x_input,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
// GB_unop_transpose.c uses GB_ATYPE, but A is
// the 2nd input to binary operator z=f(x,y).
#undef GB_ATYPE
#define GB_ATYPE \
GxB_FC64_t
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
GxB_FC64_t x = (*((const GxB_FC64_t *) x_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
#undef GB_ATYPE
#define GB_ATYPE \
GxB_FC64_t
}
//------------------------------------------------------------------------------
// C = op (A', y): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (aij, y), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
GxB_FC64_t aij = GBX (Ax, pA, false) ; \
Cx [pC] = GB_FC64_div (aij, y) ; \
}
GrB_Info GB (_bind2nd_tran__div_fc64)
(
GrB_Matrix C,
const GrB_Matrix A,
const GB_void *y_input,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
GxB_FC64_t y = (*((const GxB_FC64_t *) y_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
matrix.c | //
// Created by Javier Peralta on 9/16/17.
//
#include "matrix.h"
//#include <omp.h>
void vectorScalar (double *v, double d, int size){
for(int i = 0; i < size; ++i ){
v[i] *= d;
}
}
void restaVector(double *v1, double *v2, double* out, int size){
//#pragma omp parallel for
for(int i = 0; i < size; ++i ){
out[i] = v1[i] - v2[i];
}
}
void sumaVector(double *v1, double *v2, double* out, int size){
//#pragma omp parallel for
for(int i = 0; i < size; ++i ){
out[i] = v1[i] + v2[i];
}
}
void multVector(double *vec1, double *vec2, double* out, int size){
//#pragma omp parallel for
for (int i = 0; i < size; i++) {
out[i] = vec1[i] * vec2[i];
}
}
double productoPunto(double *vec1, double *vec2, int size){
double c = 0;
for (int i = 0; i < size; i++) {
c += vec1[i] * vec2[i];
}
return c;
}
void productoPuntoA(double *vec1, double *vec2, double* vec3, int size){
//#pragma omp parallel for
for (int i = 0; i < size; i++) {
vec3[i] = vec1[i] * vec2[i];
}
}
void multMatriz(double **mat1, double **mat2, int n, int m, int p, int q, double **res){
//fila * columna
if (m != p) {
perror("Numero de filas de la primera matriz debe ser igual numero de columnas de la segunda\n");
return;
}
//#pragma omp parallel for
for (int i = 0; i < n; ++i) {
double *fila = res[i];
//#pragma omp parallel for
for (int j = 0; j < q; ++j) {
double c = 0;
//#pragma omp parallel for reduction(+:c)
for (int k = 0; k < m; ++k) {
c += mat1[i][k] * mat2[k][j];
}
fila[j] = c;
}
}
}
void multMatrizVect(double **mat, double *vec, int n, int m, double* res){
for (int i = 0; i < n; i++) {
res[i] = productoPunto(mat[i], vec, m);
}
}
//other
void printVect(double * a, int n){
for (int i = 0; i < n; ++i) {
if(a[i] >= 0) printf(" ");
printf("%3.3lf ", a[i]);
}
printf("\n");
}
void printMtx(double**a, int nr, int nc){
for (int i = 0; i < nr; ++i) {
printVect(a[i], nc);
}
}
void printMtxT(double**a, int nr, int nc){
for (int i = 0; i < nc; ++i) {
for (int j = 0; j < nr; ++j) {
if(a[j][i] >= 0) printf(" ");
printf("%3.3lf ", a[j][i]);
}
printf("\n");
}
}
double *readVector(char* name, int* sz){
FILE *f = fopen(name, "rb");
if (!f) return NULL;
fread(sz, sizeof(int), 1, f);
double *vect = malloc(sizeof(double) * *sz);
for (int i = 0; i < *sz; ++i) {
fread(vect, sizeof(double), *sz, f);
}
fclose(f);
return vect;
}
double **readMtx(char* name, int* nr, int* nc){
FILE *f = fopen(name, "rb");
if (!f) return NULL;
fread(nr, sizeof(int), 1, f);
fread(nc, sizeof(int), 1, f);
double **mtx = allocMtx(*nr, *nc);
for (int i = 0; i < *nr; ++i) {
fread(mtx[i], sizeof(double), (unsigned int)*nc, f);
}
fclose(f);
return mtx;
}
double **allocMtx(int nr, int nc){
double **mtx = malloc((sizeof(double*)*nr) + sizeof(int));
int *indi = (int*)mtx;
mtx = (void*)indi+ sizeof(int);
if(nr * nc * sizeof(double) < MTXMAXSIZE) {
indi[0] = 0; //indicate 1 block
mtx[0] = malloc(sizeof(double) * nr*nc);
for (int i = 1; i < nr; ++i) {
mtx[i] = mtx[i-1] + nc;
}
} else {
indi[0] = nr; //indicate nr block
for (int i = 0; i < nr; ++i) {
mtx[i] = malloc(sizeof(double) * nc);
}
}
return mtx;
}
double** allocMtxI(int n){
double ** mtx = allocMtx(n, n);
for (int i = 0; i < n; ++i) {
for (int j = 0; j < n; ++j) {
mtx[i][j] = j == i ? 1 : 0;
}
}
return mtx;
}
void freeMtx(double**a){
if(a == NULL) return; //nothing to free...
void *indi = (void*)a - sizeof(int);
int nr = ((int*)indi)[0];
if(nr){
for (int i = 0; i < nr; ++i) free(a[i]);
}
else free(a[0]);
free(indi);
}
//
double norma2Vect(double* v, int size){
return sqrt(norma2VectSq(v, size));
}
double norma2VectSq(double* v, int size){
double c = 0;
//#pragma omp parallel for reduction(+:c)
for (int i = 0; i < size; i++) {
double val = v[i];
c += val * val;
}
return c;
}
void normalizaVect(double *v, int size){
double norm = sqrt(norma2VectSq(v, size));
for (int i = 0; i < size; i++) v[i] /= norm;
}
double diffVectSq(double* v1, double* v2, int size){
double c = 0;
//#pragma omp parallel for reduction(+:c)
for (int i = 0; i < size; i++) {
double val = v1[i] - v2[i];
c += val * val;
}
return c;
}
double diffMatrizSq(double** m1, double** m2, int nr, int nc){
//#pragma omp parallel for reduction(+:c)
double c = 0;
int sz = nr*nc;
for (int i = 0; i < sz; ++i) {
double dif = m1[0][i] - m2[0][i];
c += dif* dif;
}
return c;
}
double* diagSol(double*a , double*b, int n){
double *vect = malloc(sizeof(double) * n);
//#pragma omp parallel
for (int i = 0; i < n; ++i) {
if (a[i] == 0){
if (b[i] != 0){
printf("Sin solución, X%d no tiene valor\n", i);
return NULL;
}
printf("Multiples Soluciones, X%d puede tener cualquier valor\n", i);
vect[i] = 0;
continue;
}
vect[i] = b[i]/a[i];
}
return vect;
}
double* upperSol(double**a , double*b, int nr, int nc){
double *vect = malloc(sizeof(double) * nc);
for (int i = nr -1; i >= 0; i--) {
double tmp = b[i];
for (int j = i+1; j < nc; ++j) {
tmp -= vect[j] * a[i][j];
}
vect[i] = tmp / a[i][i];
}
return vect;
}
double* lowerSol(double**a , double*b, int nr, int nc){
double *vect = malloc(sizeof(double) * nc);
for (int i = 0; i < nr; ++i) {
double tmp = b[i];
for (int j = 0; j < i && j < nc; ++j) {
tmp -= vect[j] * a[i][j];
}
tmp /= a[i][i];
vect[i] = tmp;
}
return vect;
}
int luFactor(double** a, double **l, double **u, int nr, int nc){
for (int i = 0; i < nr; ++i) {
u[i][i] = 1;
for (int j = 0; j <= i && j <nc; ++j) {
double lij = a[i][j];
for (int k = 0; k < j; ++k) {
lij -= l[i][k]*u[k][j];
}
l[i][j] = lij;
}
for (int j = i+1; j < nc; ++j) {
double lij = a[i][j];
if(fabs(l[i][i]) < ZERO)
return 0;
for (int k = 0; k < i; ++k) {
lij -= l[i][k]*u[k][j];
}
lij /= l[i][i];
u[i][j] = lij;
}
}
return 1;
}
double* luSolver(double **l, double **u, double *b, int nr, int nc){
double* sol = lowerSol(l, b, nr, nc);
double* sol2 = upperSol(u, sol, nr, nc);
free(sol);
return sol2;
}
//same as lu factor, but in 1 matrix
int luFactor2(double **a, int nr, int nc){
for (int i = 0; i < nr; ++i) {
for (int j = 0; j <= i && j <nc; ++j) {
double lij = a[i][j];
for (int k = 0; k < j; ++k) {
lij -= a[i][k]*a[k][j];
}
a[i][j] = lij;
}
for (int j = i+1; j < nc; ++j) {
double lij = a[i][j];
if(fabs(a[i][i]) < ZERO)
return 0;
for (int k = 0; k < i; ++k) {
lij -= a[i][k]*a[k][j];
}
lij /= a[i][i];
a[i][j] = lij;
}
}
return 1;
}
double* luSolver2(double **a, double *b, int nr, int nc){
double* sol = lowerSol(a, b, nr, nc);
//need to do upper sol with upper a and 1 in diagonal
for (int i = nr -1; i >= 0; i--) {
double tmp = sol[i];
for (int j = i+1; j < nc; ++j) {
tmp -= sol[j] * a[i][j];
}
sol[i] = tmp;
}
return sol;
}
double* triDiagSol(double **a, double *d, int nr, int nc){
double *xi = malloc(sizeof(double) * nr);
double *ax = a[0], *bx = a[1], *cx = a[2];
cx[0] /= bx[0];
d[0] /= bx[0];
for (int i = 1; i < nc; ++i) {
double ptemp = bx[i] - (ax[i] * cx[i-1]);
cx[i] /= ptemp;
d[i] = (d[i] - ax[i] * d[i-1])/ptemp;
}
xi[nr-1] = d[nr-1];
for (int i = nr-2; i >= 0; --i) {
xi[i] = d[i] - cx[i] * xi[i+1];
}
return xi;
}
double potencia(double **mat, double *eigvec, int nr, int nc, int maxIter, double toler){
double error;
for (int i = 0; i < nr; ++i) eigvec[i] = 1;
double *y = malloc(sizeof(double) * nr);
double *vt = malloc(sizeof(double) * nr);
double eigV = 0;
int i = 0;
do {
multMatrizVect(mat, eigvec, nr, nc, y);
memcpy(eigvec, y, nr * sizeof(double));
normalizaVect(eigvec, nr);
multMatrizVect(mat, eigvec, nr, nc, vt);
eigV = productoPunto(eigvec, vt, nr);
memcpy(vt, eigvec, nr * sizeof(double));
vectorScalar(vt, eigV, nr);
restaVector(y, vt, vt, nr);
error = norma2Vect(vt, nr);
}
while(++i < maxIter && error > toler);
free(y); free(vt);
// printf("Matriz tam %d x %d\n", nr, nc);
// printf("Valor lambda %lf\n", eigV);
// printf("Iteraciones realizadas %d\n", i);
// printf("Error %g\n", error);
return eigV;
}
double smallestEigv(double **mat, double *eigvec, int n, int m, int maxIter, double toler){
double **inv = allocMtx(m, n);
inverseMtx(mat, inv, n, m);
double lam = potencia(inv, eigvec, m, n, 1000, 0.0001);
freeMtx(inv);
return fabs(lam) > ZERO ? 1/lam : lam;
}
double nearestEigv(double **mat, double *eigvec, double val, int n, int m, int maxIter, double toler){
for (int i = 0; i < n; ++i) {
mat[i][i] -= val;
}
double l = smallestEigv(mat, eigvec, n, m, maxIter, toler);
for (int i = 0; i < n; ++i) {
mat[i][i] += val;
}
return val + l;
}
double potenciaInv(double **mat, double *eigvec, double val, int n, int m, int maxIter, double toler, int *k, double *err){
for (int i = 0; i < n; ++i) {
mat[i][i] -= val;
}
double **inv = allocMtx(m, n);
inverseMtx(mat, inv, n, m);
for (int i = 0; i < n; ++i) eigvec[i] = 1;
double *y = malloc(sizeof(double) * n);
double *px = malloc(sizeof(double) * n);
double mu = 0;
do {
multMatrizVect(inv, eigvec, n, m, y);
double norm = norma2Vect(y, n);
vectorScalar(y, 1/norm, n); //x^
vectorScalar(eigvec, 1/norm, n); //w
mu = productoPunto(y, eigvec, n);
memcpy(px, y, sizeof(double) * n);
vectorScalar(px, mu, n);
mu += val;
restaVector(eigvec, px, px, n);
memcpy(eigvec, y, sizeof(double) *n);
*k += 1;
*err = norma2Vect(px, n);
} while(*err > toler && maxIter > *k);
for (int i = 0; i < n; ++i) {
mat[i][i] += val;
}
free(px);
free(y);
freeMtx(inv);
return mu;
}
double* allEigv(double **mat, int n, int m, int maxIter, double toler, int sections){
double d = normaInf(mat, n, m);
double delta = 2*d/sections;
double *eigvals = malloc(sizeof(double) * n);
for (int i = 0; i < n; ++i) eigvals[i] = NAN;
double *eigVect = malloc(sizeof(double) * n);
int i = 0;
int k;
double err;
for (int t = 0; t <= sections; ++t) {
k = 0;
double aprox = -d + t*delta;
double val = potenciaInv(mat, eigVect, aprox, n, m, maxIter, toler, &k, &err);
if((i==0 || fabs(val - eigvals[i-1]) > 0.0001) && err < toler){
eigvals[i++] = val;
printf("----------------\nValor mu %lf\n", val);
printf("Iteraciones realizadas %d\n", k);
printf("||r|| %g\n----------------\n", err);
}
}
free(eigVect);
return eigvals;
}
double normaInf(double **m1, int n, int m){
double max = 0;
for (int i = 0; i < n; ++i) {
double sum = 0;
for (int j = 0; j < m; ++j) {
sum += fabs(m1[i][j]);
}
if(sum > max) max = sum;
}
return max;
}
void inverseMtx(double **mat, double **inv, int n, int m){
double **l = allocMtx(n, m);
double **u = allocMtx(n, m);
if (luFactor(mat, l, u, n, m)){
double *b = malloc(sizeof(double) * m);
for (int i = 0; i < n; ++i) {
for (int j = 0; j < m; ++j) {
b[j] = j == i;
}
double *sol = luSolver(l, u, b, n, m);
for (int j = 0; j < n; ++j) {
inv[j][i] = sol[j];
}
free(sol);
}
free(b);
}
freeMtx(l); freeMtx(u);
}
//jacobi
double valMayor(double **mat, int n, int m, int *x, int *y){
double mayor = 0;
for (int i = 0; i < n; ++i) {
for (int j = 0; j < m; ++j) {
if (i == j) continue;
if(mayor < fabs(mat[i][j])){
mayor = fabs(mat[i][j]);
*x = i; *y = j;
}
}
}
return mayor;
}
//GT * A * G
void givensRotate(double **mat, int n, int m, int mi, int mj, double c, double s){
for (int i = 0; i < m; ++i) {
double matimi = mat[i][mi];
mat[i][mi] = matimi * c - s*mat[i][mj];
mat[i][mj] = matimi * s + c*mat[i][mj];
}
for (int i = 0; i < n; ++i) {
double matmii = mat[mi][i];
mat[mi][i] = mat[mi][i] * c - s*mat[mj][i];
mat[mj][i] = matmii * s + c*mat[mj][i];
}
}
void givensM(double **mat, int n, int m, int mi, int mj, double c, double s){
for (int i = 0; i < m; ++i) {
double matimi = mat[i][mi];
mat[i][mi] = matimi * c - s*mat[i][mj];
mat[i][mj] = matimi * s + c*mat[i][mj];
}
}
double* jacobiEig(double **mat, double**eigVec, int n, int m, int maxIter, double toler){
int x, y;
double max = valMayor(mat, n, m, &x, &y);
if(max < toler) return NULL; //eigvs in diag
double **eigvalsM = allocMtx(n, m);
for (int i = 0; i < n; ++i) memcpy(eigvalsM[i], mat[i], sizeof(double) * m);
int iter = 0;
while (max > toler && ++iter < maxIter){
double d = (eigvalsM[y][y] - eigvalsM[x][x])/(2 * eigvalsM[x][y]);
double t = 1 / (fabs(d) + sqrt(1 + d*d));
t = d > 0 ? t : -t;
double c = 1/(sqrt(1 + t * t));
double s = c * t;
givensRotate(eigvalsM, n, m, x, y, c, s);
givensM(eigVec, n, n, x, y, c, s);
max = valMayor(eigvalsM, n, m, &x, &y);
}
//printf("--------\n");printMtx(eigvalsM, n, m);
//printf("--------\n");printMtx(eigVec, n, m);
printf("Iteraciones: %d\n", iter);
double **AV = allocMtx(n, m);
multMatriz(mat, eigVec, n, m, m, n, AV);
double **VD = allocMtx(n, m);
multMatriz(eigVec, eigvalsM, n, m, m, n, VD);
printf("||AV - VD|| = %g\n", sqrt(diffMatrizSq(AV, VD, n, m)));
freeMtx(VD); freeMtx(AV);
double *eigvals = malloc(sizeof(double) * n);
for (int i = 0; i < n; ++i) {
eigvals[i] = eigvalsM[i][i];
}
freeMtx(eigvalsM);
return eigvals;
} |
main.c | /**
* program: dadafilterbank
*
* Purpose: connect to a ring buffer and create Sigproc output per TAB on request
*
* A ringbuffer page is interpreted as an array of Stokes I:
* [NTABS, NCHANNELS, padded_size] = [12, 1536, > 25000]
*
* Written for the AA-Alert project, ASTRON
*
* Author: Jisk Attema, Netherlands eScience Center
* Licencse: Apache v2.0
*/
#include <unistd.h>
#include <stdlib.h>
#include <string.h>
#include <stdio.h>
#include <getopt.h>
#include <errno.h>
#include <signal.h>
#include "dada_hdu.h"
#include "ascii_header.h"
#include "filterbank.h"
#include "config.h"
#define MAXTABS 12
int output[MAXTABS];
FILE *runlog = NULL;
#define LOG(...) {fprintf(stdout, __VA_ARGS__); fprintf(runlog, __VA_ARGS__); fflush(stdout); fflush(runlog);}
// Hardcoded parameters
const unsigned int nchannels = 1536; // Must be divisible by 6 for the current transpose/inverse implementation
const unsigned int nbit = 8;
// Parameters read from ringbuffer header block (with default to lowest data rate)
int science_case = 3;
int science_mode = 2;
int padded_size = 12500;
double min_frequency;
double bandwidth;
double ra;
double dec;
char source_name[256];
double az_start;
double za_start;
double mjd_start;
// Derived parameters (with default to lowest data rate)
double tsamp = 1.024 / 12500;
int ntimes = 12500;
int ntabs = 1;
/**
* Open a connection to the ringbuffer
*
* @param {char *} key String containing the shared memory key as hexadecimal number
* @returns {hdu *} A connected HDU
*/
dada_hdu_t *init_ringbuffer(char *key) {
uint64_t nbufs;
int header_incomplete = 0;
multilog_t* multilog = NULL; // TODO: See if this is used in anyway by dada
// create hdu
dada_hdu_t *hdu = dada_hdu_create (multilog);
// init key
key_t shmkey;
sscanf(key, "%x", &shmkey);
dada_hdu_set_key(hdu, shmkey);
LOG("dadafilterbank SHMKEY: %s\n", key);
// connect
if (dada_hdu_connect (hdu) < 0) {
LOG("ERROR in dada_hdu_connect\n");
exit(EXIT_FAILURE);
}
// Make data buffers readable
if (dada_hdu_lock_read(hdu) < 0) {
LOG("ERROR in dada_hdu_open_view\n");
exit(EXIT_FAILURE);
}
// get write address
char *header;
uint64_t bufsz;
header = ipcbuf_get_next_read (hdu->header_block, &bufsz);
if (! header || ! bufsz) {
LOG("ERROR. Get next header block error\n");
exit(EXIT_FAILURE);
}
// parse header
if(ascii_header_get(header, "MIN_FREQUENCY", "%lf", &min_frequency) == -1) {
LOG("ERROR. MIN_FREQUENCY not set in dada buffer\n");
header_incomplete = 1;
}
if(ascii_header_get(header, "BW", "%lf", &bandwidth) == -1) {
LOG("ERROR. BW not set in dada buffer\n");
header_incomplete = 1;
}
if(ascii_header_get(header, "RA", "%lf", &ra) == -1) {
LOG("ERROR. RA not set in dada buffer\n");
header_incomplete = 1;
}
if(ascii_header_get(header, "DEC", "%lf", &dec) == -1) {
LOG("ERROR. DEC not set in dada buffer\n");
header_incomplete = 1;
}
if(ascii_header_get(header, "SOURCE", "%s", source_name) == -1) {
LOG("ERROR. SOURCE not set in dada buffer\n");
header_incomplete = 1;
}
if(ascii_header_get(header, "AZ_START", "%lf", &az_start) == -1) {
LOG("ERROR. AZ_START not set in dada buffer\n");
header_incomplete = 1;
}
if(ascii_header_get(header, "ZA_START", "%lf", &za_start) == -1) {
LOG("ERROR. ZA_START not set in dada buffer\n");
header_incomplete = 1;
}
if(ascii_header_get(header, "MJD_START", "%lf", &mjd_start) == -1) {
LOG("ERROR. MJD_START not set in dada buffer\n");
header_incomplete = 1;
}
if(ascii_header_get(header, "SCIENCE_CASE", "%i", &science_case) == -1) {
LOG("ERROR. SCIENCE_CASE not set in dada buffer\n");
header_incomplete = 1;
}
if(ascii_header_get(header, "SCIENCE_MODE", "%i", &science_mode) == -1) {
LOG("ERROR. SCIENCE_MODE not set in dada buffer\n");
header_incomplete = 1;
}
if(ascii_header_get(header, "PADDED_SIZE", "%i", &padded_size) == -1) {
LOG("ERROR. PADDED_SIZE not set in dada buffer\n");
header_incomplete = 1;
}
// tell the ringbuffer the header has been read
if (ipcbuf_mark_cleared(hdu->header_block) < 0) {
LOG("ERROR. Cannot mark the header as cleared\n");
exit(EXIT_FAILURE);
}
LOG("psrdada HEADER:\n%s\n", header);
if (header_incomplete) {
exit(EXIT_FAILURE);
}
return hdu;
}
/**
* Print commandline options
*/
void printOptions() {
printf("usage: dadafilterbank -k <hexadecimal key> -l <logfile> -n <filename prefix for dumps>\n");
printf("e.g. dadafits -k dada -l log.txt -n myobs\n");
return;
}
/**
* Parse commandline
*/
void parseOptions(int argc, char *argv[], char **key, char **prefix, char **logfile) {
int c;
int setk=0, setl=0, setn=0;
while((c=getopt(argc,argv,"b:c:m:k:l:n:"))!=-1) {
switch(c) {
// -k <hexadecimal_key>
case('k'):
*key = strdup(optarg);
setk=1;
break;
// -l log file
case('l'):
*logfile = strdup(optarg);
setl=1;
break;
// -n <filename prefix>
case('n'):
setn=1;
*prefix = strdup(optarg);
break;
// -h
case('h'):
printOptions();
exit(EXIT_SUCCESS);
break;
default:
fprintf(stderr, "Unknown option '%c'\n", c);
exit(EXIT_FAILURE);
break;
}
}
// All arguments are required
if (!setk || !setl || !setn) {
if (!setk) fprintf(stderr, "Error: DADA key not set\n");
if (!setl) fprintf(stderr, "Error: Log file not set\n");
if (!setn) fprintf(stderr, "Error: Filename prefix not set\n");
exit(EXIT_FAILURE);
}
}
void open_files(char *prefix, int ntabs) {
int tab;
for (tab=0; tab<ntabs; tab++) {
char fname[256];
if (ntabs == 1) {
snprintf(fname, 256, "%s.fil", prefix);
}
else {
snprintf(fname, 256, "%s_%02i.fil", prefix, tab + 1);
}
// open filterbank file
output[tab] = filterbank_create(
fname, // filename
10, // int telescope_id,
15, // int machine_id,
source_name, // char *source_name,
az_start, // double az_start,
za_start, // double za_start,
ra, // double src_raj,
dec, // double src_dej,
mjd_start, // double tstart
tsamp, // double tsamp,
nbit, // int nbits,
min_frequency + bandwidth - (bandwidth / nchannels), // double fch1,
-1 * bandwidth / nchannels, // double foff,
nchannels, // int nchans,
ntabs, // int nbeams,
tab + 1, // int ibeam
1 // int nifs
);
}
}
void close_files() {
int tab;
for (tab=0; tab<ntabs; tab++) {
filterbank_close(output[tab]);
}
}
/**
* Catch SIGINT then sync and close files before exiting
*/
void sigint_handler (int sig) {
LOG("SIGINT received, aborting\n");
int i;
for (i=0; i<ntabs; i++) {
if (output[i]) {
fsync(output[i]);
filterbank_close(output[i]);
}
}
exit(EXIT_FAILURE);
}
int main (int argc, char *argv[]) {
char *key;
char *logfile;
char *file_prefix;
// parse commandline
parseOptions(argc, argv, &key, &file_prefix, &logfile);
// set up logging
if (logfile) {
runlog = fopen(logfile, "w");
if (! runlog) {
LOG("ERROR opening logfile: %s\n", logfile);
exit(EXIT_FAILURE);
}
LOG("Logging to logfile: %s\n", logfile);
free (logfile);
}
// connect to ring buffer
dada_hdu_t *ringbuffer = init_ringbuffer(key);
ipcbuf_t *data_block = (ipcbuf_t *) ringbuffer->data_block;
ipcio_t *ipc = ringbuffer->data_block;
if (science_case == 3) {
// NTIMES (12500) per 1.024 seconds -> 0.00008192 [s]
ntimes = 12500;
tsamp = 1.024 / 12500;
} else if (science_case == 4) {
// NTIMES (25000) per 1.024 seconds -> 0.00004096 [s]
ntimes = 25000;
tsamp = 1.024 / 25000;
} else {
LOG("Error: Illegal science case '%i'", science_mode);
exit(EXIT_FAILURE);
}
LOG("dadafilterbank version: " VERSION "\n");
LOG("Science case = %i\n", science_case);
LOG("Filename prefix = %s\n", file_prefix);
if (science_mode == 0) {
// I + TAB
ntabs = 12;
LOG("Science mode: 0 [I + TAB]\n");
} else if (science_mode == 2) {
// I + IAB
ntabs = 1;
LOG("Science mode: 2 [I + IAB]\n");
} else if (science_mode == 1 || science_mode == 3) {
LOG("Error: modes 1 [IQUV + TAB] / 3 [IQUV + IAB] not supported");
exit(EXIT_FAILURE);
} else {
LOG("Error: Illegal science mode '%i'", science_mode);
exit(EXIT_FAILURE);
}
// create filterbank files, and close files on C-c
open_files(file_prefix, ntabs);
signal(SIGINT, sigint_handler);
// for interaction with ringbuffer
uint64_t bufsz = ipc->curbufsz;
char *page = NULL;
// for processing a page
int tab, channel, time;
char *buffer = malloc(ntabs * ntimes * nchannels * sizeof(char));
int page_count = 0;
int quit = 0;
while(!quit && !ipcbuf_eod(data_block)) {
page = ipcbuf_get_next_read(data_block, &bufsz);
if (! page) {
quit = 1;
} else {
// page [NTABS, nchannels, time(padded_size)]
// file [time, nchannels]
for (tab = 0; tab < ntabs; tab++) {
int channel;
#pragma omp parallel for
for (channel = 0; channel < nchannels; channel+=6) {
const char *channelA = &page[(tab*nchannels + channel + 0)*padded_size];
const char *channelB = &page[(tab*nchannels + channel + 1)*padded_size];
const char *channelC = &page[(tab*nchannels + channel + 2)*padded_size];
const char *channelD = &page[(tab*nchannels + channel + 3)*padded_size];
const char *channelE = &page[(tab*nchannels + channel + 4)*padded_size];
const char *channelF = &page[(tab*nchannels + channel + 5)*padded_size];
int time;
for (time = 0; time < ntimes; time++) {
// reverse freq order to comply with header
buffer[tab*ntimes*nchannels + time*nchannels+nchannels-(channel+0)-1] = channelA[time];
buffer[tab*ntimes*nchannels + time*nchannels+nchannels-(channel+1)-1] = channelB[time];
buffer[tab*ntimes*nchannels + time*nchannels+nchannels-(channel+2)-1] = channelC[time];
buffer[tab*ntimes*nchannels + time*nchannels+nchannels-(channel+3)-1] = channelD[time];
buffer[tab*ntimes*nchannels + time*nchannels+nchannels-(channel+4)-1] = channelE[time];
buffer[tab*ntimes*nchannels + time*nchannels+nchannels-(channel+5)-1] = channelF[time];
}
}
ssize_t size = write(output[tab], &buffer[tab*ntimes*nchannels], sizeof(char) * ntimes * nchannels);
}
ipcbuf_mark_cleared((ipcbuf_t *) ipc);
page_count++;
}
}
if (ipcbuf_eod(data_block)) {
LOG("End of data received\n");
}
dada_hdu_unlock_read(ringbuffer);
dada_hdu_disconnect(ringbuffer);
free(buffer);
LOG("Read %i pages\n", page_count);
}
|
hello_cmake.c | // Reference: https://computing.llnl.gov/tutorials/openMP/samples/C/omp_hello.c
#include <stdio.h>
#include <stdlib.h>
#include <omp.h>
int main(int argc, char **argv)
{
int nthreads, tid;
#pragma omp parallel private(nthreads, tid)
{
tid = omp_get_thread_num();
printf("Hello Cmake from thread = %d\n", tid);
if(tid == 0)
{
nthreads = omp_get_num_threads();
printf("Number of threads = %d\n", nthreads);
}
}
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.