source stringlengths 3 92 | c stringlengths 26 2.25M |
|---|---|
shape.h | /*******************************************************************************
* Copyright (c) 2015-2018 Skymind, Inc.
*
* This program and the accompanying materials are made available under the
* terms of the Apache License, Version 2.0 which is available at
* https://www.apache.org/licenses/LICENSE-2.0.
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations
* under the License.
*
* SPDX-License-Identifier: Apache-2.0
******************************************************************************/
/*
* shape.h
*
* Created on: Dec 28, 2015
* Author: agibsonccc
*/
#ifndef SHAPE_H_
#define SHAPE_H_
#include <cstring>
#include <cstdio>
#include "../dll.h"
#include "../nd4jmalloc.h"
#include "../templatemath.h"
#include "../helpers/logger.h"
#include "../pointercast.h"
#include "../cnpy/cnpy.h"
#include <op_boilerplate.h>
#define MAX_DIMENSION 0x7fffffff
#define MAX_NUM_THREADS 1024
#define MAX_RANK 32
#define MAX_SHAPEINFOLENGTH 2*MAX_RANK+4
#define MAX_COORD 3
#define PREALLOC_SIZE 33554432
#ifdef __CUDACC__
#include <cuda.h>
#include <cuda_runtime.h>
#include <helpers/sharedmem.h>
#endif
#ifdef __CUDACC__
#define INLINEDEF inline
#else
#define INLINEDEF inline
#endif
#include "../pairwise_util.h"
#include <stdint.h>
#include <array/ArrayOptions.h>
typedef unsigned int uint;
namespace shape {
/**
* Shape information approximating
* the information on an ndarray
*/
struct ND4J_EXPORT ShapeInformation {
_CUDA_HD ShapeInformation(Nd4jLong *shape_ = nullptr, Nd4jLong *stride_ = nullptr, char order_ = 0, int rank_ = 0, int offset_ = 0, int elementWiseStride_ = 0)
: shape(shape_), stride(stride_), order(order_), rank(rank_), offset(offset_), elementWiseStride(elementWiseStride_)
{}
Nd4jLong *shape;
Nd4jLong *stride;
char order;
int rank;
int offset;
int elementWiseStride;
};
/**
* Indexing information
* for bounds checking
*/
struct ND4J_EXPORT CurrentIndexing {
int numElementsPerThread;
int blockStartingIndex;
int startingThreadIndex;
int endingThreadIndex;
};
ND4J_EXPORT _CUDA_HD bool shapeEquals(const int shape1Rank, const Nd4jLong *shape1, const int shape2Rank, const Nd4jLong *shape2);
ND4J_EXPORT _CUDA_HD Nd4jLong* detachShape(Nd4jLong *originalShape);
ND4J_EXPORT _CUDA_HD Nd4jLong* copyShape(Nd4jLong *originalShape);
ND4J_EXPORT _CUDA_HD bool shapeEquals(const Nd4jLong *shapeInfo1, const Nd4jLong *shapeInfo2);
ND4J_EXPORT _CUDA_HD bool strideEquals(int shape1Rank,Nd4jLong *shape1,int shape2Rank,Nd4jLong *shape2);
ND4J_EXPORT _CUDA_HD bool strideEquals(Nd4jLong *shapeInfo1,Nd4jLong *shapeInfo2);
ND4J_EXPORT _CUDA_HD bool strideEquals(Nd4jLong *stride1,int rank1,Nd4jLong *stride2,int rank2);
ND4J_EXPORT _CUDA_HD bool equalsSoft(const Nd4jLong *shapeA, const Nd4jLong *shapeB);
ND4J_EXPORT _CUDA_HD bool equalsTypesAndShapesSoft(const Nd4jLong *shapeA, const Nd4jLong *shapeB);
ND4J_EXPORT _CUDA_HD bool equalsStrict(const Nd4jLong *shapeA, const Nd4jLong *shapeB);
ND4J_EXPORT _CUDA_HD bool haveSameOffsets(const Nd4jLong *shapeA, const Nd4jLong *shapeB);
ND4J_EXPORT _CUDA_HD int sizeAt(const Nd4jLong *shape, const int dim);
template <typename T>
ND4J_EXPORT _CUDA_HD void fill(T* buffer, T value, Nd4jLong length);
ND4J_EXPORT _CUDA_HD void traceNew(int id);
ND4J_EXPORT _CUDA_HD int tadIndexForLinear(int linearIndex, int tadLength);
ND4J_EXPORT _CUDA_HD int tadLength(Nd4jLong *shapeInfo, int *dimension, int dimensionLength);
ND4J_EXPORT _CUDA_HD bool canReshape(const int oldRank, Nd4jLong* oldShape, const int newRank, Nd4jLong* newShape, bool isFOrder);
ND4J_EXPORT _CUDA_HD bool reshapeC(const int oldRank, const Nd4jLong* oldShapeInfo, const int newRank, const Nd4jLong* newShape, Nd4jLong* newShapeInfo);
/**
* Get the shape info buffer
* for the given rank and shape.
*/
ND4J_EXPORT _CUDA_HD Nd4jLong *shapeBuffer(int rank, nd4j::DataType dtype, Nd4jLong *shape);
ND4J_EXPORT _CUDA_HD Nd4jLong *shapeBuffer(int rank, nd4j::DataType dtype, Nd4jLong *shape, Nd4jLong *buffer);
/**
* Get the shape info buffer
* for the given rank and shape.
*/
ND4J_EXPORT _CUDA_HD Nd4jLong *shapeBufferFortran(int rank, nd4j::DataType dtype, Nd4jLong *shape);
ND4J_EXPORT _CUDA_HD Nd4jLong *shapeBufferFortran(int rank, nd4j::DataType dtype, Nd4jLong *shape, Nd4jLong *output);
//ND4J_EXPORT _CUDA_HD void doPermuteShapeBuffer(Nd4jLong *shapeBuffer, int* rearrange, Nd4jLong *tmpBuffer);
ND4J_EXPORT _CUDA_HD void doPermuteShapeBuffer(int rank, Nd4jLong *shapeBuffer, int *rearrange, Nd4jLong *tmpBuffer);
#ifdef __CUDACC__
template <typename T>
__device__ ND4J_EXPORT Nd4jLong *cuMalloc(Nd4jLong *buffer, long size, UnifiedSharedMemory *manager);
__device__ ND4J_EXPORT Nd4jLong *cuMalloc(Nd4jLong *buffer, long size);
#endif
/**
* Computes the standard packed array strides for a given shape.
*
* @param shape the shape of a matrix:
* @param startNum the start number for the strides
* @return the strides for a matrix of n dimensions
*/
ND4J_EXPORT _CUDA_HD Nd4jLong * calcStridesFortran(Nd4jLong *shape, int rank);
ND4J_EXPORT _CUDA_HD Nd4jLong * calcStridesFortran(Nd4jLong *shape, int rank, Nd4jLong* ret);
/**
* Computes the standard packed array strides for a given shape.
*
* @param shape the shape of a matrix:
* @param startNum the start number for the strides
* @return the strides for a matrix of n dimensions
*/
ND4J_EXPORT _CUDA_HD Nd4jLong* calcStrides(Nd4jLong *shape, int rank);
ND4J_EXPORT _CUDA_HD Nd4jLong* calcStrides(Nd4jLong *shape, int rank, Nd4jLong* ret);
ND4J_EXPORT _CUDA_HD void updateStrides(Nd4jLong *shape, const char order);
ND4J_EXPORT _CUDA_HD void updateStrides(const int rank, const Nd4jLong *shapeOnly, Nd4jLong *stridesOnly, const char order);
// check whether input dimensions are permuted, not permuted dimensions order have to be 0,....,rank-1
template <typename T>
ND4J_EXPORT _CUDA_HD bool isDimPermuted(const T* dimensions, const int dimSize);
/**
* Computes the standard packed array strides for a given shape.
*
* @param shape the shape of a matrix:
* @param startNum the start number for the strides
* @return the strides for a matrix of n dimensions
*/
ND4J_EXPORT _CUDA_HD Nd4jLong* calcStridesFortran(Nd4jLong *shape, int rank, int startNum);
ND4J_EXPORT _CUDA_HD Nd4jLong* calcStridesFortran(Nd4jLong *shape, int rank, int startNum, Nd4jLong* ret);
/**
* Computes the standard packed array strides for a given shape.
*
* @param shape the shape of a matrix:
* @param startNum the start number for the strides
* @return the strides for a matrix of n dimensions
*/
ND4J_EXPORT _CUDA_HD Nd4jLong* calcStrides(Nd4jLong *shape, int rank, int startNum);
ND4J_EXPORT _CUDA_HD Nd4jLong* calcStrides(Nd4jLong *shape, int rank, int startNum, Nd4jLong* ret);
/**
* @param toCopy the shape to copy
* @return a copy of the original struct
*/
ND4J_EXPORT _CUDA_HD ShapeInformation *shapeCopy( ShapeInformation *toCopy);
ND4J_EXPORT _CUDA_HD bool strideDescendingCAscendingF(const Nd4jLong *shapeBuffer);
ND4J_EXPORT _CUDA_HD bool isStrideSimple(const Nd4jLong* shapeInfo);
/**
* copy-past from java hasDefaultStridesForShape function
* check whether array is not permuted and has contiguous elements in memory
*/
ND4J_EXPORT _CUDA_HD bool areStridesDefault(const Nd4jLong* shapeInfo);
/**
* Compute the element wise stride
* for a given shape/stride configuration
* @param rank the rank of the shape/stride
* @param shape the shape
* @param stride the stride
* @param isFOrder 0 or 1 for whether the array is f
* ordered or not
* @return 0 if there is no element wise stride the
* element wise stride of reshape(1,length) otherwise
*/
ND4J_EXPORT _CUDA_HD int computeElementWiseStride(int rank, Nd4jLong *shape, Nd4jLong *stride, int isFOrder);
/**
* Compute the element wise stride
* for a given shape/stride configuration
* @param rank the rank of the shape/stride
* @param shape the shape
* @param stride the stride
* @param isFOrder 0 or 1 for whether the array is f
* ordered or not
* @return 0 if there is no element wise stride the
* element wise stride of reshape(1,length) otherwise
*/
ND4J_EXPORT _CUDA_HD int computeElementWiseStride(int rank, Nd4jLong *shape, Nd4jLong *stride, int isFOrder, Nd4jLong *dimension, int dimensionLength);
ND4J_EXPORT _CUDA_HD Nd4jLong *shapeInfoOnlyShapeAndStride(Nd4jLong *shapeInfo, Nd4jLong *dimension, int dimensionLength,bool reverseCopyStride);
ND4J_EXPORT _CUDA_HD Nd4jLong *shapeInfoOnlyShapeAndStride(Nd4jLong *shapeInfo, Nd4jLong *dimension, int dimensionLength,bool reverseCopyStride, Nd4jLong *buffer);
/**
*
* @param length
* @param shape
* @param rearrange
* @return
*/
ND4J_EXPORT _CUDA_HD Nd4jLong *doPermuteSwap(int length, Nd4jLong *shape, int* rearrange);
/**
* In place permute swap
* @param length
* @param shape
* @param rearrange
*/
ND4J_EXPORT _CUDA_HD void doPermuteSwap(int length, Nd4jLong **shape, int* rearrange);
ND4J_EXPORT _CUDA_HD Nd4jLong *permuteShapeBuffer(Nd4jLong *shapeBuffer, int* rearrange);
ND4J_EXPORT _CUDA_HD void permuteShapeBufferInPlace(Nd4jLong *shapeBuffer, int* rearrange, Nd4jLong *out);
ND4J_EXPORT _CUDA_HD void doPermuteShapeInfo(Nd4jLong *shapeBuffer, const int *rearrange);
ND4J_EXPORT _CUDA_HD void doPermuteShapeInfo(Nd4jLong *shapeBuffer, const Nd4jLong *rearrange);
ND4J_EXPORT _CUDA_HD void doPermuteShapeBuffer(Nd4jLong *shapeBuffer, int* rearrange);
ND4J_EXPORT _CUDA_HD void doPermuteShapeBuffer(int rank,Nd4jLong *shapeBuffer, int* rearrange);
/**
* Rearrange the permute indexes
* according to which dimensions are specified.
*
* For example, dimension is implicitly:
* 0,1,2
*
* If you want to do a reduce along dimensions 0 and 1,
* you need to permute the indexes to be:
* 2,0,1
*
* which will give us the ability to ierate along an element
* wise stride.
*/
ND4J_EXPORT _CUDA_HD Nd4jLong* createPermuteIndexes(int originalRank, int *dimension,int dimensionLength);
ND4J_EXPORT _CUDA_HD Nd4jLong* computeResultShape(Nd4jLong *originalShapeBuffer, int *dimension,int dimensionLength);
/**
* This method does inplace transpose of given shapeBuffer
*
* @param shapeBuffer
*/
ND4J_EXPORT _CUDA_HD void transposeInplace(Nd4jLong *shapeBuffer);
/**
* Get the ordering for the device
* @param length
* @param shape
* @param stride
* @param elementStride
* @return
*/
ND4J_EXPORT _CUDA_HD char getOrder(int length, Nd4jLong *shape, Nd4jLong *stride, int elementStride);
/**
* Ensure that every value in the re arrange
* array is unique
* @param arr
* @param shape
* @param arrLength
* @param shapeLength
* @return
*/
template <typename T>
ND4J_EXPORT _CUDA_HD int checkArrangeArray(T *arr, int arrLength, int shapeLength);
/**
* Permute the shape information
* @param info the shape information to permute
* @param rearrange the order to re arrange
* @param rank the rank of the rearrange array
*/
ND4J_EXPORT _CUDA_HD void permute(ShapeInformation **info, int *rearrange, int rank);
/**
* Returns whether the
* given shape is a vector or not
* @param shape the shape of the array
* @param rank the rank of cthe shape
*/
ND4J_EXPORT _CUDA_HD int isVector(Nd4jLong *shape, int rank);
/**
* When 1 dimension is the whole length of the
* array
*/
ND4J_EXPORT _CUDA_HD int oneDimEqualToLength(Nd4jLong *shape, int rank);
ND4J_EXPORT _CUDA_HD int oneDimEqualToLength(Nd4jLong *shapeInfo);
ND4J_EXPORT _CUDA_HD int isVector(const Nd4jLong *shapeInfo);
ND4J_EXPORT _CUDA_HD bool isLikeVector(Nd4jLong *shapeInfo, int& posOfNonUnityDim);
ND4J_EXPORT _CUDA_HD bool isCommonVector(const Nd4jLong *shapeInfo, int& posOfNonUnityDim);
ND4J_EXPORT _CUDA_HD bool isRowVector(const Nd4jLong *shapeInfo);
ND4J_EXPORT _CUDA_HD bool isColumnVector(Nd4jLong *shapeInfo);
/**
* Returns whether the
* given shape is a vector or not
* @param shape the shape of the array
* @param rank the rank of the shape
*/
ND4J_EXPORT _CUDA_HD int isMatrix(Nd4jLong *shape, int rank);
INLINEDEF _CUDA_HD int isMatrix(Nd4jLong *shapeInfo);
/**
* Returns the shape portion of an information
* buffer
*/
ND4J_EXPORT _CUDA_HD Nd4jLong *shapeOf(Nd4jLong *buffer);
/**
* Return a copy of a buffer.
* This buffer allocates memory
* that must be freed elsewhere.
*/
template <typename T>
ND4J_EXPORT _CUDA_HD T* copyOf(Nd4jLong length, T *toCopy);
template <typename T>
ND4J_EXPORT _CUDA_HD T* copyOf(Nd4jLong length, T *toCopy, T *ret);
/**
* Return a copy of a buffer.
* This buffer allocates memory
* that must be freed elsewhere.
*/
template <typename T>
ND4J_EXPORT _CUDA_HD void copyTo(Nd4jLong length, T *from, T *to);
/**
* Return a copy of a buffer.
* This buffer allocates memory
* that must be freed elsewhere.
*/
ND4J_EXPORT _CUDA_HD void copyTo(int length, Nd4jLong *from, Nd4jLong *to, Nd4jLong *indexes);
/**
* Permute the given strides
* in the given rearrange order
* @param toPermute the buffer to permute
* @param shapeRank the length of the buffer to permute
* @param rearrange the rearrange order (must be 0 based indexes
* and all must be filled in)
* @return the rearranged array
*/
//ND4J_EXPORT _CUDA_HD Nd4jLong *permutedStrides(Nd4jLong *toPermute, int shapeRank, Nd4jLong *rearrange);
/**
* Return the slice (shape + 1 in pointer arithmetic)
* @param shape the shape to take the slice of
* @return the shape array - the first entry
*/
ND4J_EXPORT _CUDA_HD Nd4jLong *slice(Nd4jLong *shape);
ND4J_EXPORT _CUDA_HD int slices(Nd4jLong *shapeBuffer);
ND4J_EXPORT _CUDA_HD Nd4jLong *sliceOfShapeBuffer(Nd4jLong sliceIdx, Nd4jLong *shapeBuffer);
/**
* Returns the length of the
* shape information buffer:
* rank * 2 + 3
* @param rank the rank to get the shape
* info length for
* @return rank * 2 + 4
*/
ND4J_EXPORT _CUDA_HD int shapeInfoLength(int rank);
ND4J_EXPORT _CUDA_HD int shapeInfoLength(Nd4jLong* shapeInfo);
ND4J_EXPORT _CUDA_HD int shapeInfoLength(const Nd4jLong* shapeInfo);
ND4J_EXPORT _CUDA_HD size_t shapeInfoByteLength(int rank);
ND4J_EXPORT _CUDA_HD size_t shapeInfoByteLength(const Nd4jLong* shapeInfo);
ND4J_EXPORT _CUDA_HD size_t shapeInfoByteLength(const Nd4jLong* shapeInfo);
/**
* Returns the rank portion of
* an information buffer
*/
ND4J_EXPORT _CUDA_HD int rank(const Nd4jLong *buffer);
ND4J_EXPORT _CUDA_HD int rank(const int *buffer);
ND4J_EXPORT _CUDA_HD int rank(const unsigned int *buffer);
// returns pointer on elementWiseStride
ND4J_EXPORT _CUDA_HD Nd4jLong* ews(Nd4jLong* shapeInfo);
/**
* Converts a raw int buffer of the layout:
* rank
* shape
* stride
* offset
* elementWiseStride
*
* where shape and stride are both straight int pointers
*/
ND4J_EXPORT _CUDA_HD ShapeInformation *infoFromBuffer(Nd4jLong *buffer);
/**
* Returns the stride portion of an information
* buffer
*/
ND4J_EXPORT _CUDA_HD Nd4jLong *stride(Nd4jLong *buffer);
/**
* Compute the length of the given shape
*/
ND4J_EXPORT _CUDA_HD bool isEmpty(const Nd4jLong *shapeInfo);
ND4J_EXPORT _CUDA_HD Nd4jLong length(const Nd4jLong *shapeInfo);
ND4J_EXPORT _CUDA_HD Nd4jLong length(std::initializer_list<int>& shape);
ND4J_EXPORT _CUDA_HD Nd4jLong length(std::initializer_list<Nd4jLong>& shape);
/***
* Returns the offset portion of an information buffer
*/
ND4J_EXPORT _CUDA_HD Nd4jLong offset(Nd4jLong *buffer);
ND4J_EXPORT _CUDA_HD Nd4jLong& extra(Nd4jLong *buffer);
/**
* Returns the ordering
* for this shape information buffer
*/
ND4J_EXPORT _CUDA_HD char order(const Nd4jLong *buffer);
/**
* Returns the type
*/
ND4J_EXPORT _CUDA_HD Nd4jLong type(const Nd4jLong* shapeInfo);
/**
* Returns the element wise stride for this information
* buffer
*/
ND4J_EXPORT _CUDA_HD Nd4jLong elementWiseStride(const Nd4jLong *buffer);
/**
* Returns the element wise stride for this information
* buffer
* relative to a dimension and ordering for a reduction index
*/
ND4J_EXPORT _CUDA_HD Nd4jLong reductionIndexElementWiseStride(Nd4jLong *buffer, int *dimension, int dimensionLength);
/**
* Returns whether
* the given shape info buffer
* represents a scalar shape
*/
ND4J_EXPORT _CUDA_HD int isScalar(Nd4jLong *info);
/**
* Returns whether
* the given shape information
* represents a scalar
* shape or not
*/
ND4J_EXPORT _CUDA_HD int isScalar(volatile ShapeInformation *info);
/**
* Return a copy of this array with the
* given index omitted
*
* @param data the data to copy
* @param indexes the index of the item to remove
* @param dataLength the length of the data array
* @param indexesLength the length of the data array
* @return the new array with the omitted
*
* item
*/
template <typename T1, typename T2>
ND4J_EXPORT _CUDA_HD void removeIndex(T1 *data, T2 *indexes, Nd4jLong dataLength, Nd4jLong indexesLength, T1 *out);
/**
* Return a copy of this array with the
* given index omitted
*
* @param data the data to copy
* @param indexes the index of the item to remove
* @param dataLength the length of the data array
* @param indexesLength the length of the data array
* @return the new array with the omitted
*
* item
*/
template <typename T1, typename T2>
ND4J_EXPORT _CUDA_HD T1* removeIndex(T1 *data, T2 *indexes, Nd4jLong dataLength, Nd4jLong indexesLength);
/**
* Iterate over a given set of indexes
* the begin and end indexes are 0 based.
* 1 padding is automatically assumed for the ending.
*
* For example if you want to iterate over 0 to 4
* it will go to 4 rather than 3.
*
* indexes should be the indexes to exclude
* indexes length should be the length of indexes
*/
ND4J_EXPORT _CUDA_HD Nd4jLong* everyIndexBut(Nd4jLong *indexes,int indexesLength,int begin,int end);
/**
* Computes the offset for accessing
* a global element given the shape information
* and the offset to be read.
*/
//#ifdef __CUDACC__
// __device__
//#endif
// ND4J_EXPORT int tadOffset(shape::ShapeInformation *xInfo, int offset);
/**
* Returns a shape
* forces the given length to be 2.
* @param shape the shape to modify
* @param dimension the dimension (row or column)
* for the shape to be returned as
* @return the new shape
*/
ND4J_EXPORT _CUDA_HD Nd4jLong* ensureVectorShape(Nd4jLong *shape);
ND4J_EXPORT _CUDA_HD Nd4jLong* createScalarShapeInfo();
ND4J_EXPORT _CUDA_HD Nd4jLong* createScalarShapeInfo(Nd4jLong *ret);
/**
* Generate an int buffer
* up to the given length
* at the specified increment
*
*/
template <typename T>
ND4J_EXPORT _CUDA_HD T* range(int from, int to, int increment);
/**
* Range between from and two with an
* increment of 1
*/
template <typename T>
ND4J_EXPORT _CUDA_HD T* range(int from, int to);
/**
* Keep the given indexes
* in the data
*/
ND4J_EXPORT _CUDA_HD Nd4jLong *keep(volatile Nd4jLong *data, int* index, int indexLength, int dataLength);
/**
* Generate reverse copy of the data
* @param data
* @param length
* @return
*/
template <typename T>
ND4J_EXPORT _CUDA_HD T* reverseCopy(T *data, Nd4jLong length);
template <typename T>
ND4J_EXPORT _CUDA_HD void reverseCopyTo(T *from, T *to, Nd4jLong length);
template <typename T>
ND4J_EXPORT _CUDA_HD void reverseCopyTo(T *from, T *to, Nd4jLong *indexes, Nd4jLong length);
template <typename T1, typename T2>
ND4J_EXPORT _CUDA_H void convertT(T1 *from, T2 *to, Nd4jLong length);
/**
*
* @param arr1
* @param arr1Length
* @param arr2
* @param arr2Length
* @return
*/
template <typename T>
ND4J_EXPORT _CUDA_HD T* concat(T* arr1, Nd4jLong arr1Length, T* arr2, Nd4jLong arr2Length);
/**
*
* @param numArrays
* @param numTotalElements
* @param arr
* @param lengths
* @return
*/
template <typename T>
ND4J_EXPORT _CUDA_HD T* concat(int numArrays, int numTotalElements, Nd4jLong **arr, Nd4jLong *lengths);
/**
* Get the length per slice of the
* given shape and the dimension
* @param rank the rank of the shape
* @param shape the shape of to get
* the length per slice for
* @param dimension the dimension to
* get the length per slice for
* @param dimensionLength the length of the dimension array
* @return the length per slice of the given shape
* along the given dimension
*/
ND4J_EXPORT _CUDA_HD Nd4jLong lengthPerSlice(int rank, Nd4jLong *shape, int *dimension, int dimensionLength);
/**
* calculates the offset for a tensor
* @param index
* @param arr
* @param tensorShape
* @return
*/
ND4J_EXPORT _CUDA_HD Nd4jLong sliceOffsetForTensor(int rank,
int index,
Nd4jLong *shape,
Nd4jLong *tensorShape,
int tensorShapeLength,
int *dimension,
int dimensionLength);
/**
* calculates the offset for a tensor
* @param index
* @param arr
* @param tensorShape
* @return
*/
ND4J_EXPORT _CUDA_HD Nd4jLong sliceOffsetForTensor(int index,int tensorLength,int lengthPerSlice2);
/**
* Computes the tensor along dimension
* offset
* @param index the index to get the offset for the tad for
* @param rank the rank of the shapes and strides
* @param info the shape information to use for tad
* @param dimension the dimensions to use for computing the tensor along dimensions
*/
// ND4J_EXPORT _CUDA_HD int offset(int index,
// int rank,
// shape::ShapeInformation *info,
// Nd4jLong *dimension,
// int dimensionLength);
/**
* Computes the number
* of tensors along
* a given dimension
*/
ND4J_EXPORT _CUDA_HD Nd4jLong tensorsAlongDimension(int rank,
volatile int length,
volatile Nd4jLong *shape,
int *dimension,
int dimensionLength);
/**
* Computes the number
* of tensors along
* a given dimension
*/
ND4J_EXPORT _CUDA_HD Nd4jLong tensorsAlongDimension(Nd4jLong *shapeInfo, int *dimension, int dimensionLength);
/**
* Returns the tensor along dimension
* for the given block index
* @param blockSize
* @param blockIdx
* @param i
* @return
*/
ND4J_EXPORT _CUDA_HD int tadForBlockIndex(int blockSize, int blockIdx, int i);
/**
* Computes the number of tads per block
*
*/
ND4J_EXPORT _CUDA_HD int tadsPerBlock(int blockSize, int tads);
// ND4J_EXPORT _CUDA_HD Nd4jLong *tadShapeInfo(int index, Nd4jLong *xShapeInfo, Nd4jLong *dimension,
// int dimensionLength);
/**
* Returns a shape buffer
* for the shape information metadata.
*/
ND4J_EXPORT _CUDA_HD Nd4jLong *toShapeBuffer( ShapeInformation *info);
ND4J_EXPORT _CUDA_HD Nd4jLong *toShapeBuffer( ShapeInformation *info, Nd4jLong* ret);
/**
* Returns the number of elements per thread
*/
//#ifdef __CUDACC__
// __device__
//#endif
// int numElementsPerThread(int N);
/**
* Returns the block starting index
*/
//#ifdef __CUDACC__
// __device__
//#endif
// int blockStartingIndex(int N);
/**
* Returns the thread starting index
*/
//#ifdef __CUDACC__
// __device__
//#endif
// int threadStartingIndex(int N, int stride, int offset);
/**
* Returns the thread ending index
*/
//#ifdef __CUDACC__
// __device__
//#endif
// int threadEndingIndex(int N, int stride, int offset);
/**
* Returns indexing information
* for the current kernel invocation
*/
//#ifdef __CUDACC__
// __device__
//#endif
// CurrentIndexing *currentIndex(int N, int offset, int stride);
/** Given an linear index, element wise stride
* and the length of each tad
* map a linear index to a tad
* @param i the index to map
* @param the element wise stride for the tads
* @param numElementsPerTad the number of elements
* per tad
*/
ND4J_EXPORT _CUDA_HD int tadIndex(int i, int elementWiseStride, int numElementsPerTad);
/**
* Map a tad to a
* reduction index.
* @param tadIndexForOriginal the original tad index for the
* split up problem (eg: split is dimension 3 mapping to a 2,3 problem)
* @param tadsForReduced the number of tads for the shrunk down problem (eg: 2,3)
* @param tadsForOriginal the number of tads for the smaller problem (eg: 3)
*/
ND4J_EXPORT _CUDA_HD int reductionIndexForTad(int tadIndexForOriginal, int tadsForReduced,
int tadsForOriginal);
/**
* Computes the number of tads
* per reduce index for the
* reduction tad.
*/
ND4J_EXPORT _CUDA_HD int tadsPerReduceIndex(int tadsForReduce, int tadsForOriginal);
/**
* Maps a linear index to a reduction index
* @param i the linear index to map
* @param elementWiseStride the element wise stride
* for the multiple problem
* @param tadNum the number of tads for the shrunken problem
* @param originalTadNum the tad number for the reduced version of the problem
*/
ND4J_EXPORT _CUDA_HD int reductionIndexForLinear(int i, int elementWiseStride, int numElementsPerTad,
int tadNum, int originalTadNum);
/**
* Returns the prod of the data
* up to the given length
*/
ND4J_EXPORT _CUDA_HD int prod(Nd4jLong *data, int length);
ND4J_EXPORT _CUDA_HD Nd4jLong prodLong(const Nd4jLong *data, int length);
/**
* Returns the rear most left over item not present in
* the dimension array. This assumes that the dimension array is sorted.
*
* For example, given a dimension array of:
* 0,2
*
* and
*
* 12,4,2,1 in data
*
* You end up with 1 (data[3])
* since the first item won't match
* the last item of the dimension array
*/
// ND4J_EXPORT _CUDA_HD int rearMostLeftOverItem(Nd4jLong *data,int length,Nd4jLong *dimension,int dimensionLength);
/**
* Get an offset for retrieval
* from a data buffer
* based on the given
* shape stride and given indices
* @param baseOffset the offset to start from
* @param shape the shape of the array
* @param stride the stride of the array
* @param indices the indices to iterate over
* @return the double at the specified index
*/
ND4J_EXPORT _CUDA_HD Nd4jLong getOffset(Nd4jLong baseOffset, const Nd4jLong *shape, const Nd4jLong *stride, const Nd4jLong *indices,int rank);
ND4J_EXPORT _CUDA_HD Nd4jLong* createShapeInfo(Nd4jLong *shape, Nd4jLong *stride, int rank);
ND4J_EXPORT _CUDA_HD Nd4jLong* createShapeInfo(Nd4jLong *shape, Nd4jLong *stride, int rank, Nd4jLong *buffer);
/**
* Convert a linear index to
* the equivalent nd index
* @param shape the shape of the dimensions
* @param index the index to map
* @param numIndices the number of total indices (typically prod of shape(
* @return the mapped indexes along each dimension
*/
ND4J_EXPORT _CUDA_HD Nd4jLong* ind2sub(int rank, Nd4jLong *shape, Nd4jLong index, Nd4jLong numIndices);
ND4J_EXPORT _CUDA_HD Nd4jLong *ind2sub(int rank, Nd4jLong *shape, Nd4jLong index);
/**
* Convert a linear index to
* the equivalent nd index
* @param shape the shape of the dimensions
* @param index the index to map
* @param numIndices the number of total indices (typically prod of shape(
* @return the mapped indexes along each dimension
*/
ND4J_EXPORT _CUDA_HD void ind2sub(int rank,Nd4jLong *shape, Nd4jLong index, Nd4jLong numIndices,Nd4jLong *out);
/**
* Convert a linear index to
* the equivalent nd index.
* Infers the number of indices from the specified shape.
*
* @param shape the shape of the dimensions
* @param index the index to map
* @return the mapped indexes along each dimension
*/
ND4J_EXPORT _CUDA_HD void ind2sub(int rank, Nd4jLong *shape, Nd4jLong index, Nd4jLong *out);
/**
* Convert a linear index to
* the equivalent nd index
* @param shape the shape of the dimensions
* @param index the index to map
* @param numIndices the number of total indices (typically prod of shape(
* @return the mapped indexes along each dimension
*/
ND4J_EXPORT _CUDA_HD Nd4jLong* ind2subC(const int rank, const Nd4jLong *shape, Nd4jLong index);
/**
* Convert a linear index to
* the equivalent nd index
* @param shape the shape of the dimensions
* @param index the index to map
* @param numIndices the number of total indices (typically prod of shape(
* @return the mapped indexes along each dimension
*/
ND4J_EXPORT _CUDA_HD Nd4jLong* ind2subC(const int rank, const Nd4jLong *shape, Nd4jLong index, Nd4jLong numIndices);
/**
* Convert a linear index to
* the equivalent nd index
* @param shape the shape of the dimensions
* @param index the index to map
* @param numIndices the number of total indices (typically prod of shape(
* @return the mapped indexes along each dimension
*/
ND4J_EXPORT _CUDA_HD void ind2subC(const int rank, const Nd4jLong *shape, Nd4jLong index, Nd4jLong numIndices, Nd4jLong *out);
/**
* Convert a linear index to
* the equivalent nd index.
* Infers the number of indices from the specified shape.
*
* @param shape the shape of the dimensions
* @param index the index to map
* @return the mapped indexes along each dimension
*/
ND4J_EXPORT _CUDA_HD void ind2subC(const int rank, const Nd4jLong *shape, Nd4jLong index, Nd4jLong *out);
/**
* Convert the given index (such as 1,1)
* to a linear index
* @param shape the shape of the indexes to convert
* @param indices the index to convert
* @return the linear index given the shape
* and indices
*/
ND4J_EXPORT _CUDA_HD Nd4jLong sub2Ind(const int rank, const Nd4jLong *shape, const Nd4jLong *indices);
/**
* increment n-dimensional array by one iteration by changing coord appropriately
* for example we have array with shape {2, 3}:
* - if input coord = {0,1}, then output coord = {0,2}
* - if input coord = {0,2}, then output coord = {1,0}
* so the aim is to produce following subsequence of coord: {0,0}, {0,1}, {0,2}, {1,0}, {1,1}, {1,2}
*/
/* calculates an array buffer offset for given "index" using following formula: offset = coord_0*stride_0 + coord_1*stride_1 + ... + coord_{rank-1}*stride_{rank-1}
* arrLen - array length
*/
ND4J_EXPORT _CUDA_HD uint getIndexOffset(uint index, const uint *shapeInfo, uint arrLen);
ND4J_EXPORT _CUDA_HD Nd4jLong getIndexOffset(Nd4jLong index, const Nd4jLong *shapeInfo, Nd4jLong arrLen);
ND4J_EXPORT _CUDA_HD Nd4jLong getIndexOrderOffset(Nd4jLong index, const Nd4jLong *shapeInfo, Nd4jLong arrLen, const char order);
ND4J_EXPORT _CUDA_HD Nd4jLong indexOffset(Nd4jLong index, const Nd4jLong* lShapeInfo, const uint* uShapeInfo, Nd4jLong arrLen, const bool useUnsigned);
/**
* Compute the real linear indices for the given shape and stride
*/
ND4J_EXPORT _CUDA_HD Nd4jLong *computeIndices(int rank, Nd4jLong *shape, Nd4jLong *stride);
/**
* Compute the real linear indices for the
* given shape buffer. Shape,stride and rank are derived
* from the buffer
*/
ND4J_EXPORT _CUDA_HD Nd4jLong *computeIndices( Nd4jLong *shapeBuffer);
/**
* Convert a linear index to
* the equivalent nd index
* @param shape the shape of the dimensions
* @param index the index to map
* @param numIndices the number of total indices (typically prod of shape(
* @return the mapped indexes along each dimension
*/
ND4J_EXPORT _CUDA_HD void ind2subOrder(Nd4jLong *shapeInfo, Nd4jLong index, Nd4jLong numIndices,Nd4jLong *out);
/**
* Convert a linear index to
* the equivalent nd index
* @param shape the shape of the dimensions
* @param index the index to map
* @param numIndices the number of total indices (typically prod of shape(
* @return the mapped indexes along each dimension
*/
ND4J_EXPORT _CUDA_HD void ind2subOrder(Nd4jLong *shapeInfo, Nd4jLong index,Nd4jLong *out);
ND4J_EXPORT _CUDA_HD void printShapeInfo(Nd4jLong *shapeInfo);
ND4J_EXPORT _CUDA_HD void printShapeInfoLinear(const Nd4jLong *shapeInfo);
ND4J_EXPORT _CUDA_HD void printShapeInfoLinear(const char *msg, const Nd4jLong *shapeInfo);
ND4J_EXPORT _CUDA_HD void printShapeInfoLinear(const char *msg, int rank, const Nd4jLong *shape, const Nd4jLong *strides);
ND4J_EXPORT _CUDA_HD void printIntArray(const Nd4jLong *arr, const int length);
ND4J_EXPORT _CUDA_HD void printIntArray(const int *arr, const int length);
ND4J_EXPORT _CUDA_HD void printArray(float *arr,int length);
template<typename T>
ND4J_EXPORT _CUDA_HD void printArray(T *arr,int length, const char *message);
ND4J_EXPORT _CUDA_HD Nd4jLong* shapeBufferOfNpy(int rank, unsigned int *shape,bool fortranOrder);
ND4J_EXPORT _CUDA_HD Nd4jLong *shapeBufferOfNpy(cnpy::NpyArray arr);
// ND4J_EXPORT _CUDA_HD Nd4jLong *shapeBufferOfNpyBuffer(char *buffer);
// this function checks the consistence of dimensions with array rank (negative dimensions, too large dimensions, too big number of dimensions)
// also sort input array of dimensions, this operation is also necessary for creating TAD object
ND4J_EXPORT _CUDA_H void checkDimensions(const int rank, std::vector<int>& dimensions);
// function calculates linear index of array min, min is sub-array of max, index to be returned is min-array's index and corresponds to maxIdx of max array
// dimsToExclude - should be sorted in increasing order
ND4J_EXPORT _CUDA_HD Nd4jLong subArrayIndex(const Nd4jLong maxIdx, const Nd4jLong* maxShapeInfo, const Nd4jLong* minShapeInfo, const int* dimsToExclude = nullptr, const int dimsLen = -1);
// function calculates absolute offset of min array, min is sub-array of max, offset to be returned corresponds to maxIdx of max array
// dimsToExclude - should be sorted in increasing order
ND4J_EXPORT _CUDA_HD Nd4jLong subArrayOffset(const Nd4jLong maxIdx, const Nd4jLong* maxShapeInfo, const Nd4jLong* minShapeInfo, const int* dimsToExclude = nullptr, const int dimsLen = -1);
// max array is outer for min array, min array is sub-array of max array
// function calculates the coordinates of min array (and saves them into minIdxs) given coordinates of max array (already stored in maxIdxs)
// dimsToExclude - should be sorted in increasing order
// dimsLen - length of dimsToExclude, if not set (= -1), then it is calculated as maxRank - minRank
ND4J_EXPORT _CUDA_HD void maxIndToMinInd(Nd4jLong* maxIdxs, Nd4jLong* minIdxs, const Nd4jLong* maxShapeInfo, const Nd4jLong* minShapeInfo, const int* dimsToExclude = nullptr, const int dimsLen = -1);
// calculate indexes of max-array, these output indexes correspond to one minIdx index of min-array which is sub-array of max-array
// dimsToExclude - should be sorted in increasing order
ND4J_EXPORT _CUDA_HD int outerArrayIndexes(Nd4jLong* maxIdxs, const Nd4jLong minIdx, const Nd4jLong* maxShapeInfo, const Nd4jLong* minShapeInfo, const int* dimsToExclude = nullptr);
// calculate offsets of max-array, these output offsets correspond to one minIdx index of min-array which is sub-array of max-array
// dimsToExclude - should be sorted in increasing order
ND4J_EXPORT _CUDA_HD int outerArrayOffsets(Nd4jLong* maxOffsets, const Nd4jLong minIdx, const Nd4jLong* maxShapeInfo, const Nd4jLong* minShapeInfo, const int* dimsToExclude = nullptr);
// calculates offsets for numOfSubArrs sub-arrays, shape in this context means dominions excluded from outer array
// rank is equal to size of shape
ND4J_EXPORT void calcSubArrOffsets(const Nd4jLong numOfSubArrs, const int rank, const Nd4jLong* shape, const Nd4jLong* strides, Nd4jLong* subArrOffsets);
ND4J_EXPORT _CUDA_HD void shapeOldScalar(nd4j::DataType dtype, Nd4jLong* const buffer, const char order);
// calculate element-wise stride
// if array is scalar or unit length vector then ews = 1
// if array is common vector then ews = stride of non-unity dimension
// if strides are normal set ews = 1, otherwise ews = 0
ND4J_EXPORT _CUDA_HD void setEws(Nd4jLong* shapeInfo, Nd4jLong len);
//END HEADERS
//BEGIN IMPLEMENTATIONS
#ifdef __CUDACC__
template <typename T>
__device__ INLINEDEF Nd4jLong *cuMalloc(Nd4jLong *buffer, long size, UnifiedSharedMemory *manager) {
// if we go for 3 dimensions coord space or below - just use shared memory for that
if (size <= MAX_COORD * 4) {
Nd4jLong *ptr = new Nd4jLong[size / 4];//manager->getSharedCoordBuffer() + (threadIdx.x * MAX_COORD);
return ptr;
} else {
// otherwise go to preallocated global memory :(
int tid = blockIdx.x * blockDim.x + threadIdx.x;
if (tid * size > PREALLOC_SIZE - size) {
return (Nd4jLong *) malloc(size);
} else {
Nd4jLong *ret = buffer;
ret += (tid * size);
return ret;
}
}
}
#endif
#ifdef __CUDACC__
/**
* BEWARE: THIS METHOD DOES NOT CHECKS ALLOCATION BOUNDARIES
*/
__device__ INLINEDEF Nd4jLong *cuMalloc(Nd4jLong *buffer, long size) {
Nd4jLong *ret = buffer;
ret += (threadIdx.x * size);
return ret;
}
#endif
/**
* Length of a tad given
* the shape information
*/
INLINEDEF _CUDA_HD int tadLength(Nd4jLong *shapeInfo, int *dimension, int dimensionLength) {
if(dimensionLength == 1) {
return shape::shapeOf(shapeInfo)[dimension[0]];
}
else {
int ret = 1;
for(int i = 0; i < shape::rank(shapeInfo); i++) {
for(int j = 0; j < dimensionLength; j++) {
if(i == dimension[j])
ret *= shape::shapeOf(shapeInfo)[dimension[j]];
}
}
return ret;
}
}
/**
* Tad element wise stride:
* given the inner most dimension (the sorted dimension of the last)
* the element wise stride of the tad (disregarding order) is the
* last dimension's stride.
*
* For a given singular dimension this will just be the only entry.
* For example, given the following c order shape/stride:
* 2,2,3,2
* 12,6,2,1
*
* The tad element wise stride for 3 will be 1.
* For zero it wil be 12
*
* For 2,3 it's 1
*
* Note here that the multi dimensional 2,3 case
* is equivalent to the singular 3 case.
*
*
* Note that this is for the dimension that ultimately
* ends up removed.
*
* Again: this may not preserve ordering of the tad
* but maybe used for reductions.
*/
INLINEDEF _CUDA_HD int tadElementWiseStride(Nd4jLong *shapeInfo, int *dimension,int dimensionLength) {
return reductionIndexElementWiseStride(shapeInfo,dimension,dimensionLength);
}
INLINEDEF _CUDA_HD bool shapeEquals(const int shape1Rank, const Nd4jLong *shape1, const int shape2Rank, const Nd4jLong *shape2) {
if(shape1Rank != shape2Rank)
return false;
//rank not equals
for(int i = 0; i < shape1Rank; i++) {
if(shape1[i] != shape2[i])
return false;
}
return true;
}
INLINEDEF _CUDA_HD bool shapeEquals(const Nd4jLong *shapeInfo1, const Nd4jLong *shapeInfo2) {
return shape::shapeEquals(shape::rank(shapeInfo1), shape::shapeOf(const_cast<Nd4jLong*>(shapeInfo1)), shape::rank(shapeInfo2), shape::shapeOf(const_cast<Nd4jLong*>(shapeInfo2)));
}
INLINEDEF _CUDA_HD bool strideEquals(int shape1Rank,Nd4jLong *shape1,int shape2Rank,Nd4jLong *shape2) {
if(shape1Rank != shape2Rank)
return false;
//rank not equals
for(int i = 0; i < shape1Rank; i++) {
if(shape1[i] != shape2[i])
return false;
}
return true;
}
INLINEDEF _CUDA_HD bool strideEquals(Nd4jLong *shapeInfo1,Nd4jLong *shapeInfo2) {
return shape::strideEquals(shape::rank(shapeInfo1),shape::stride(shapeInfo1),shape::rank(shapeInfo2),shape::stride(shapeInfo2));
}
INLINEDEF _CUDA_HD bool strideEquals(Nd4jLong *stride1,int rank1 , Nd4jLong *stride2, int rank2) {
if(rank1 != rank2)
return false;
for(int i = 0; i < rank1; i++) {
if(stride1[i] != stride2[i])
return false;
}
return true;
}
INLINEDEF _CUDA_HD Nd4jLong *computeResultShape(Nd4jLong *originalShapeBuffer, int* dimension,int dimensionLength) {
Nd4jLong *retShape;
int retShapeLength;
if(dimensionLength == 1 && dimension[0] == 2147483647) {
retShape = new Nd4jLong[2];
retShape[0] = 1;
retShape[1] = 1;
retShapeLength = 2;
}
else {
retShape = shape::removeIndex<Nd4jLong, int>(shape::shapeOf(originalShapeBuffer), dimension, shape::shapeInfoLength(shape::rank(originalShapeBuffer)), dimensionLength);
retShapeLength = shape::rank(originalShapeBuffer) - dimensionLength;
}
//ensure vector is proper shape
if (retShapeLength == 1) {
if (dimension[0] == 0) {
auto newRetShape = new Nd4jLong[2]{1, retShape[0]};
delete[] retShape;
retShape = newRetShape;
retShapeLength = 2;
}
else {
auto newRetShape = new Nd4jLong[2]{retShape[0], 1};
delete[] retShape;
retShape = newRetShape;
retShapeLength = 2;
}
} else if (retShapeLength == 0) {
auto newRetShape = new Nd4jLong[2]{1, 1};
delete[] retShape;
retShape = newRetShape;
retShapeLength = 2;
}
auto ret = shape::shapeBuffer(retShapeLength, nd4j::ArrayOptions::dataType(originalShapeBuffer), retShape);
delete[] retShape;
return ret;
}
INLINEDEF _CUDA_HD Nd4jLong *shapeInfoOnlyShapeAndStride(Nd4jLong *shapeInfo, Nd4jLong *dimension, int dimensionLength,bool reverseCopyStride, Nd4jLong *buffer) {
Nd4jLong *theShape = shape::shapeOf(shapeInfo);
Nd4jLong *theStride = shape::stride(shapeInfo);
int rank = dimensionLength == 1 ? 2 : dimensionLength;
Nd4jLong *ret = buffer;
//set the rank
ret[0] = rank;
Nd4jLong *retShape = shape::shapeOf(ret);
Nd4jLong *retStride = shape::stride(ret);
int len = rank;
if(dimensionLength == 1) {
if(shape::isMatrix(theShape,shape::rank(shapeInfo))) {
if(dimension[0] == 0) {
Nd4jLong newStride[2] = {theStride[dimension[0]],1};
Nd4jLong newShape[2] = {theShape[dimension[0]],1};
retShape[0] = newShape[0];
retShape[1] = newShape[1];
retStride[0] = newStride[0];
retStride[1] = newStride[1];
}
else {
Nd4jLong newStride[2] = {theStride[dimension[0]],1};
Nd4jLong newShape[2] = {theShape[dimension[0]],1};
retShape[0] = newShape[0];
retShape[1] = newShape[1];
retStride[0] = newStride[0];
retStride[1] = newStride[1];
}
}
else {
Nd4jLong newStride[2] = {1,theStride[dimension[0]]};
Nd4jLong newShape[2] = {1,theShape[dimension[0]]};
retShape[0] = newShape[0];
retShape[1] = newShape[1];
retStride[0] = newStride[0];
retStride[1] = newStride[1];
}
}
else {
Nd4jLong *newIndexes = dimension;
if(reverseCopyStride)
shape::reverseCopyTo(theStride, retStride, newIndexes, len);
else
shape::copyTo(len, theStride, retStride, newIndexes);
shape::copyTo(len, theShape, retShape, newIndexes);
}
ret[shape::shapeInfoLength(rank) - 1] = shape::order(shapeInfo);
return ret;
}
INLINEDEF _CUDA_HD Nd4jLong *shapeInfoOnlyShapeAndStride(Nd4jLong *shapeInfo, Nd4jLong *dimension, int dimensionLength,bool reverseCopyStride) {
int rank = dimensionLength == 1 ? 2 : dimensionLength;
traceNew(4);
Nd4jLong *ret = new Nd4jLong[shape::shapeInfoLength(rank)];
return shapeInfoOnlyShapeAndStride(shapeInfo, dimension, dimensionLength, reverseCopyStride, ret);
}
INLINEDEF _CUDA_HD Nd4jLong * createShapeInfo(Nd4jLong *shape, Nd4jLong *stride, int rank) {
traceNew(5);
Nd4jLong *ret = new Nd4jLong[shape::shapeInfoLength(rank)];
return createShapeInfo(shape, stride, rank, ret);
}
INLINEDEF _CUDA_HD Nd4jLong * createShapeInfo(Nd4jLong *shape, Nd4jLong *stride, int rank, Nd4jLong *buffer) {
buffer[0] = rank;
Nd4jLong *retShape = shape::shapeOf(buffer);
Nd4jLong *retStride = shape::stride(buffer);
for(int i = 0;i < rank; i++) {
retShape[i] = shape[i];
retStride[i] = stride[i];
}
return buffer;
}
/**
* Computes the standard packed array strides for a given shape.
*
* @param shape the shape of a matrix:
* @param startNum the start number for the strides
* @return the strides for a matrix of n dimensions
*/
INLINEDEF _CUDA_HD Nd4jLong * calcStridesFortran(Nd4jLong *shape, int rank, int startNum) {
if (isVector(shape, rank)) {
traceNew(5);
Nd4jLong *ret = new Nd4jLong[2];
for (int i = 0; i < 2; i++)
ret[i] = 1;
return ret;
}
int dimensions = rank;
traceNew(6);
Nd4jLong *stride = new Nd4jLong[dimensions];
int st = startNum;
for (int j = 0; j < rank; j++) {
stride[j] = st;
st *= shape[j];
}
return stride;
}
INLINEDEF _CUDA_HD Nd4jLong * calcStridesFortran(Nd4jLong *shape, int rank, int startNum, Nd4jLong *ret) {
if (isVector(shape, rank)) {
for (int i = 0; i < 2; i++)
ret[i] = 1;
return ret;
}
int dimensions = rank;
int st = startNum;
for (int j = 0; j < rank; j++) {
ret[j] = st;
st *= shape[j];
}
return ret;
}
/**
* Computes the standard packed array strides for a given shape.
*
* @param shape the shape of a matrix:
* @param startNum the start number for the strides
* @return the strides for a matrix of n dimensions
*/
INLINEDEF _CUDA_HD Nd4jLong * calcStrides(Nd4jLong *shape, int rank, int startNum) {
traceNew(7);
Nd4jLong *stride = new Nd4jLong[rank];
if (rank == 1) {
stride[0] = 1;
return stride;
}
// if (shape::isVector(shape, rank)) {
// for (int i = 0; i < 2; i++)
// stride[i] = 1;
// return stride;
// }
int st = startNum;
for (int j = rank - 1; j >= 0; j--) {
stride[j] = st;
st *= shape[j];
}
return stride;
}
INLINEDEF _CUDA_HD Nd4jLong * calcStrides(Nd4jLong *shape, int rank, int startNum, Nd4jLong* ret) {
if (rank == 1) {
ret[0] = 1;
return ret;
}
// if (shape::isVector(shape, rank)) {
// for (int i = 0; i < 2; i++)
// ret[i] = 1;
// return ret;
// }
int st = startNum;
for (int j = rank - 1; j >= 0; j--) {
ret[j] = st;
st *= shape[j];
}
return ret;
}
/**
* Computes the standard packed array strides for a given shape.
*
* @param shape the shape of a matrix:
* @param startNum the start number for the strides
* @return the strides for a matrix of n dimensions
*/
INLINEDEF _CUDA_HD Nd4jLong * calcStridesFortran(Nd4jLong *shape, int rank) {
return calcStridesFortran(shape, rank, 1);
}
INLINEDEF _CUDA_HD Nd4jLong * calcStridesFortran(Nd4jLong *shape, int rank, Nd4jLong* ret) {
return calcStridesFortran(shape, rank, 1, ret);
}
/**
* Computes the standard packed array strides for a given shape.
*
* @param shape the shape of a matrix:
* @param startNum the start number for the strides
* @return the strides for a matrix of n dimensions
*/
INLINEDEF _CUDA_HD Nd4jLong* calcStrides(Nd4jLong *shape, int rank) {
return calcStrides(shape, rank, 1);
}
INLINEDEF _CUDA_HD Nd4jLong* calcStrides(Nd4jLong *shape, int rank, Nd4jLong* ret) {
return calcStrides(shape, rank, 1, ret);
}
//////////////////////////////////////////////////////////////////////
INLINEDEF _CUDA_HD void updateStrides(Nd4jLong *shapeInfo, const char order) {
int rank = shapeInfo[0];
int doubleRank = 2*rank;
if (rank > 0) {
if (order == 'c') {
shapeInfo[doubleRank] = 1; // set unity as last stride for c order
for (int j = 1; j < rank; ++j) {
shapeInfo[doubleRank - j] = shapeInfo[doubleRank - j + 1] * shapeInfo[rank + 1 - j];
}
} else {
shapeInfo[rank + 1] = 1; // set unity as first stride for f order
for (int j = rank + 1; j < doubleRank; ++j) {
shapeInfo[j + 1] = shapeInfo[j] * shapeInfo[j - rank];
}
}
}
// set last 2 elements in shapeInfo
shapeInfo[doubleRank + 2] = 1;
shapeInfo[doubleRank + 3] = (int)order;
}
//////////////////////////////////////////////////////////////////////
INLINEDEF _CUDA_HD void updateStrides(const int rank, const Nd4jLong *shapeOnly, Nd4jLong *stridesOnly, const char order) {
if (rank > 0) {
if (order == 'c') {
stridesOnly[rank - 1] = 1; // set unity as last stride for c order
for (int j = 1; j < rank; ++j)
stridesOnly[rank - 1 - j] = stridesOnly[rank - j] * shapeOnly[rank - j];
}
else {
stridesOnly[0] = 1; // set unity as first stride for f order
for (int j = 1; j < rank; ++j) {
stridesOnly[j] = stridesOnly[j - 1] * shapeOnly[j - 1];
}
}
}
}
// check whether input dimensions are permuted, not permuted dimensions order have to be 0,....,rank-1
template <typename T>
INLINEDEF _CUDA_HD bool isDimPermuted(const T* dimensions, const Nd4jLong dimSize ) {
for(int i=0; i<dimSize-1; ++i)
if(dimensions[i] > dimensions[i+1])
return true;
return false;
}
/**
* @param toCopy the shape to copy
* @return a copy of the original struct
*/
INLINEDEF _CUDA_HD ShapeInformation *shapeCopy( ShapeInformation *toCopy) {
auto copy = new ShapeInformation;
traceNew(8);
copy->shape = new Nd4jLong[toCopy->rank];
memcpy(copy->shape, toCopy->shape, toCopy->rank * sizeof(Nd4jLong));
traceNew(9);
copy->stride = new Nd4jLong[toCopy->rank];
for (int i = 0; i < toCopy->rank; i++) {
copy->stride[i] = toCopy->stride[i];
}
copy->order = toCopy->order;
copy->rank = toCopy->rank;
copy->offset = toCopy->offset;
copy->elementWiseStride = toCopy->elementWiseStride;
return copy;
}
INLINEDEF _CUDA_HD int computeElementWiseStride(int rank, Nd4jLong *shape, Nd4jLong *stride, int isFOrder) {
if (rank == 0)
return 1;
if(shape::isVector(shape,rank)) {
return stride[rank - 1];
}
else {
int oldnd;
Nd4jLong *oldDims = shape::copyOf(rank, shape);
Nd4jLong *oldStrides = shape::copyOf(rank, stride);
int np, op, last_stride;
int oldStart, oldStop, ok, newStart, newStop, nk;
traceNew(10);
auto newStrides = new Nd4jLong[rank];
oldnd = 0;
//set the shape to be 1 x length
int newShapeRank = 2;
auto newShape = new Nd4jLong[newShapeRank];
newShape[0] = 1;
newShape[1] = shape::prodLong(shape, rank);
/*
* Remove axes with dimension 1 from the old array. They have no effect
* but would need special cases since their strides do not matter.
*/
for (oldStart = 0; oldStart < rank; oldStart++) {
if (shape[oldStart] != 1) {
oldDims[oldnd] = shape[oldStart];
oldStrides[oldnd] = stride[oldStart];
oldnd++;
}
}
np = 1;
for (newStart = 0; newStart < newShapeRank; newStart++) {
np *= newShape[newStart];
}
op = 1;
for (oldStart = 0; oldStart < oldnd; oldStart++) {
op *= oldDims[oldStart];
}
if (np != op) {
/* different total sizes; no hope */
delete[] newStrides;
delete[] newShape;
delete[] oldStrides;
delete[] oldDims;
return 0;
}
if (np == 0) {
/* the current code does not handle 0-sized arrays, so give up */
delete[] newStrides;
delete[] newShape;
delete[] oldStrides;
delete[] oldDims;
return 0;
}
/* oldStart to oldStop and newStart to newStop give the axis ranges currently worked with */
oldStart = 0;
oldStop = 1;
newStart = 0;
newStop = 1;
while (newStart < newShapeRank && oldStart < oldnd) {
np = newShape[newStart];
op = oldDims[oldStart];
while (np != op) {
if (np < op) {
/* Misses trailing 1s, these are handled later */
np *= newShape[newStop++];
} else {
op *= oldDims[oldStop++];
}
}
/* Check whether the original axes can be combined */
for (ok = oldStart; ok < oldStop - 1; ok++) {
if (isFOrder) {
if (oldStrides[ok + 1] != oldDims[ok] * oldStrides[ok]) {
/* not contiguous enough */
delete[] newStrides;
delete[] newShape;
delete[] oldStrides;
delete[] oldDims;
return 0;
}
} else {
/* C order */
if (oldStrides[ok] != oldDims[ok + 1] * oldStrides[ok + 1]) {
/* not contiguous enough */
delete[] newStrides;
delete[] newShape;
delete[] oldStrides;
delete[] oldDims;
return 0;
}
}
}
/* Calculate new strides for all axes currently worked with */
if (isFOrder) {
newStrides[newStart] = oldStrides[oldStart];
for (nk = newStart + 1; nk < newStop; nk++) {
newStrides[nk] = newStrides[nk - 1] * newShape[nk - 1];
}
} else {
/* C order */
newStrides[newStop - 1] = oldStrides[oldStop - 1];
for (nk = newStop - 1; nk > newStart; nk--) {
newStrides[nk - 1] = newStrides[nk] * newShape[nk];
}
}
newStart = newStop++;
oldStart = oldStop++;
}
/*
* Set strides corresponding to trailing 1s of the new shape.
*/
if (newStart >= 1) {
last_stride = newStrides[newStart - 1];
} else {
last_stride = stride[rank - 1];
}
if (isFOrder) {
if (newStart >= 1)
last_stride *= newShape[newStart - 1];
}
for (nk = newStart; nk < newShapeRank; nk++) {
newStrides[nk] = last_stride;
}
//returns the last element of the new stride array
int ret = last_stride;
delete[] newStrides;
delete[] newShape;
delete[] oldStrides;
delete[] oldDims;
return ret;
}
}
INLINEDEF _CUDA_HD int computeElementWiseStride(int rank, Nd4jLong *shape, Nd4jLong *stride, int isFOrder,
Nd4jLong *dimension, int dimensionLength) {
if(dimensionLength == 1) {
return stride[dimension[0]];
}
return 0;
}
/**
* Get the shape info buffer
* for the given rank and shape.
*/
INLINEDEF _CUDA_HD Nd4jLong *shapeBuffer(int rank, nd4j::DataType dtype, Nd4jLong *shape) {
Nd4jLong *stride = shape::calcStrides(shape, rank);
traceNew(11);
auto shapeInfo = new shape::ShapeInformation();
shapeInfo->shape = shape;
shapeInfo->stride = stride;
shapeInfo->offset = 0;
shapeInfo->rank = rank;
int elementWiseStride = shape::computeElementWiseStride(rank, shape, stride, 0);
shapeInfo->order = 'c';
shapeInfo->elementWiseStride = elementWiseStride;
auto shapeInfoBuffer = shape::toShapeBuffer(shapeInfo);
delete[] stride;
delete shapeInfo;
nd4j::ArrayOptions::setDataType(shapeInfoBuffer, dtype);
return shapeInfoBuffer;
}
/**
* This is special method, it returns ONLY 2D shapebuffer.
*
* This method is used only for SoftMax
*/
INLINEDEF _CUDA_HD Nd4jLong *shapeBuffer(int rank, nd4j::DataType dtype, Nd4jLong *shape, Nd4jLong *buffer) {
Nd4jLong stride[MAX_RANK];
shape::calcStrides(shape,rank, stride);
shape::ShapeInformation shapeInfo;
shapeInfo.shape = shape;
shapeInfo.stride = stride;
shapeInfo.offset = 0;
shapeInfo.rank = rank;
auto elementWiseStride = shape::computeElementWiseStride(rank, shape, stride, 0);
shapeInfo.order = 'c';
shapeInfo.elementWiseStride = elementWiseStride;
shape::toShapeBuffer(&shapeInfo, buffer);
nd4j::ArrayOptions::setDataType(buffer, dtype);
return buffer;
}
/**
* Get the shape info buffer
* for the given rank and shape.
*/
INLINEDEF _CUDA_HD Nd4jLong *shapeBufferFortran(int rank, nd4j::DataType dtype, Nd4jLong *shape) {
auto stride = shape::calcStridesFortran(shape,rank);
traceNew(12);
auto shapeInfo = new shape::ShapeInformation();
shapeInfo->shape = shape;
shapeInfo->stride = stride;
shapeInfo->offset = 0;
shapeInfo->rank = rank;
int elementWiseStride = shape::computeElementWiseStride(rank, shape, stride, 0);
shapeInfo->order = 'f';
shapeInfo->elementWiseStride = elementWiseStride;
auto shapeInfoBuffer = shape::toShapeBuffer(shapeInfo);
delete[] stride;
delete shapeInfo;
nd4j::ArrayOptions::setDataType(shapeInfoBuffer, dtype);
return shapeInfoBuffer;
}
INLINEDEF _CUDA_HD Nd4jLong *shapeBufferFortran(int rank, nd4j::DataType dtype, Nd4jLong *shape, Nd4jLong *output) {
Nd4jLong stride[MAX_RANK];
shape::calcStridesFortran(shape,rank, stride);
shape::ShapeInformation shapeInfo;
shapeInfo.shape = shape;
shapeInfo.stride = stride;
shapeInfo.offset = 0;
shapeInfo.rank = rank;
auto elementWiseStride = shape::computeElementWiseStride(rank, shape, stride, 0);
shapeInfo.order = 'f';
shapeInfo.elementWiseStride = elementWiseStride;
shape::toShapeBuffer(&shapeInfo, output);
nd4j::ArrayOptions::setDataType(output, dtype);
return output;
}
/**
* Compute the real linear indices for the given shape and stride
*/
INLINEDEF _CUDA_HD Nd4jLong *computeIndices(int rank, Nd4jLong *shape, Nd4jLong *stride) {
Nd4jLong length = shape::prodLong(shape,rank);
traceNew(13);
Nd4jLong *ret = new Nd4jLong[length];
for(int i = 0; i < length; i++) {
Nd4jLong *idx = shape::ind2sub(rank, shape, i);
ret[i] = shape::getOffset(0, shape, stride, idx, rank);
delete[] idx;
}
return ret;
}
/**
* Compute the real linear indices for the given shape and stride
*/
INLINEDEF _CUDA_HD Nd4jLong *computeIndices(Nd4jLong *shapeBuffer) {
return computeIndices(shape::rank(shapeBuffer),shape::shapeOf(shapeBuffer),shape::stride(shapeBuffer));
}
/**
* Convert the given index (such as 1,1)
* to a linear index
* @param shape the shape of the indexes to convert
* @param indices the index to convert
* @return the linear index given the shape
* and indices
*/
INLINEDEF _CUDA_HD Nd4jLong sub2Ind(const int rank, const Nd4jLong *shape, const Nd4jLong *indices) {
Nd4jLong index = indices[rank-1];
Nd4jLong shift = 1;
for(int i = rank-2; i >= 0; --i) {
shift *= shape[i+1];
index += shift * indices[i];
}
return index;
}
template <typename T>
INLINEDEF _CUDA_HD void fill(T* buffer, T value, Nd4jLong length) {
PRAGMA_OMP_SIMD
for (int e = 0; e < length; e++)
buffer[e] = value;
}
/**
* Convert a linear index to
* the equivalent nd index
* @param shape the shape of the dimensions
* @param index the index to map
* @param numIndices the number of total indices (typically prod of shape(
* @return the mapped indexes along each dimension
*/
INLINEDEF _CUDA_HD Nd4jLong* ind2sub(int rank, Nd4jLong *shape, Nd4jLong index, Nd4jLong numIndices) {
auto ret = new Nd4jLong[rank];
ind2sub(rank, shape, index, numIndices, ret);
return ret;
}
/**
* Convert a linear index to
* the equivalent nd index.
* Infers the number of indices from the specified shape.
*
* @param shape the shape of the dimensions
* @param index the index to map
* @return the mapped indexes along each dimension
*/
INLINEDEF _CUDA_HD Nd4jLong* ind2sub(int rank, Nd4jLong *shape, Nd4jLong index) {
return ind2sub(rank,shape, index, shape::prodLong(shape,rank));
}
/**
* Convert a linear index to
* the equivalent nd index
* @param shape the shape of the dimensions
* @param index the index to map
* @param numIndices the number of total indices (typically prod of shape(
* @return the mapped indexes along each dimension
*/
INLINEDEF _CUDA_HD void ind2sub(int rank, Nd4jLong *shape, Nd4jLong index, Nd4jLong numIndices, Nd4jLong *ret) {
int denom = numIndices;
for(int i = rank - 1; i >= 0; i--) {
denom /= shape[i];
ret[i] = index / denom;
index %= denom;
}
}
/**
* Convert a linear index to
* the equivalent nd index.
* Infers the number of indices from the specified shape.
*
* @param shape the shape of the dimensions
* @param index the index to map
* @return the mapped indexes along each dimension
*/
INLINEDEF _CUDA_HD void ind2sub(int rank,Nd4jLong *shape, Nd4jLong index, Nd4jLong *out) {
ind2sub(rank,shape, index, shape::prodLong(shape,rank),out);
}
/**
* Convert a linear index to
* the equivalent nd index
* @param shape the shape of the dimensions
* @param index the index to map
* @param numIndices the number of total indices (typically prod of shape(
* @return the mapped indexes along each dimension
*/
INLINEDEF _CUDA_HD Nd4jLong * ind2subC(const int rank, const Nd4jLong *shape, Nd4jLong index, Nd4jLong numIndices) {
auto ret = new Nd4jLong[rank];
ind2subC(rank, shape, index, numIndices, ret);
return ret;
}
/**
* Convert a linear index to
* the equivalent nd index.
* Infers the number of indices from the specified shape.
*
* @param shape the shape of the dimensions
* @param index the index to map
* @return the mapped indexes along each dimension
*/
INLINEDEF _CUDA_HD Nd4jLong *ind2subC(const int rank, const Nd4jLong *shape, Nd4jLong index) {
return ind2subC(rank,shape, index, shape::prodLong(shape,rank));
}
/**
* Convert a linear index to
* the equivalent nd index
* @param shape the shape of the dimensions
* @param index the index to map
* @param arrLen the number of total indices (typically prod of shape(
* @return the mapped indexes along each dimension
*/
INLINEDEF _CUDA_HD void ind2subC(const int rank, const Nd4jLong *shape, Nd4jLong index, Nd4jLong arrLen, Nd4jLong *ret) {
for(int i = 0; i < rank; i++) {
arrLen /= shape[i];
if(arrLen > 0) {
ret[i] = index / arrLen;
index %= arrLen;
}
else
ret[i] = 0;
}
}
/**
* Convert a linear index to
* the equivalent nd index.
* Infers the number of indices from the specified shape.
*
* @param shape the shape of the dimensions
* @param index the index to map
* @return the mapped indexes along each dimension
*/
INLINEDEF _CUDA_HD void ind2subC(const int rank, const Nd4jLong *shape, Nd4jLong index, Nd4jLong *out) {
ind2subC(rank,shape, index,shape::prodLong(shape,rank),out);
}
//////////////////////////////////////////////////////////////////////
INLINEDEF _CUDA_HD Nd4jLong getIndexOffset(Nd4jLong index, const Nd4jLong *shapeInfo, Nd4jLong arrLen) {
const Nd4jLong ews = shapeInfo[shapeInfo[0] + shapeInfo[0] + 2];
if(ews > 0 && order(shapeInfo) == 'c')
if (ews == 1)
return index;
else
return ews * index;
Nd4jLong offset = 0;
for(int i = 1; i <= shapeInfo[0]; ++i) {
arrLen /= shapeInfo[i];
if(arrLen > 0 && shapeInfo[i] > 1) {
offset += (index / arrLen) * shapeInfo[i + shapeInfo[0]];
index %= arrLen;
}
}
return offset;
}
INLINEDEF _CUDA_HD uint getIndexOffset(uint index, const uint *shapeInfo, uint arrLen) {
const uint rank = shapeInfo[0];
const uint ews = shapeInfo[rank + rank + 2];
if(ews > 0 && shapeInfo[rank + rank + 3] == 99)
if (ews == 1)
return index;
else
return ews * index;
uint offset = 0;
for(uint i = 1; i <= rank; ++i) {
arrLen /= shapeInfo[i];
if(arrLen > 0 && shapeInfo[i] > 1) {
offset += (index / arrLen) * shapeInfo[i + rank];
index %= arrLen;
}
}
return offset;
}
INLINEDEF _CUDA_HD Nd4jLong indexOffset(Nd4jLong index, const Nd4jLong* lShapeInfo, const uint* uShapeInfo, Nd4jLong arrLen, const bool useUnsigned) {
if(useUnsigned)
return getIndexOffset(static_cast<uint>(index), uShapeInfo, static_cast<uint>(arrLen));
return getIndexOffset(index, lShapeInfo, arrLen);
}
//////////////////////////////////////////////////////////////////////
INLINEDEF _CUDA_HD Nd4jLong getIndexOrderOffset(Nd4jLong index, const Nd4jLong *shapeInfo, Nd4jLong arrLen, const char order) {
Nd4jLong offset = 0;
if(order == 'c') {
for(int i = 1; i <= *shapeInfo; ++i) {
arrLen /= shapeInfo[i];
if(arrLen > 0 && shapeInfo[i] > 1) {
offset += (index / arrLen) * shapeInfo[i + *shapeInfo];
index %= arrLen;
}
}
}
else {
for(int i = *shapeInfo; i >= 1 ; --i) {
arrLen /= shapeInfo[i];
if(arrLen > 0 && shapeInfo[i] > 1) {
offset += (index / arrLen) * shapeInfo[i + *shapeInfo];
index %= arrLen;
}
}
}
return offset;
}
/**
* Convert a linear index to
* the equivalent nd index
* @param shape the shape of the dimensions
* @param index the index to map
* @param numIndices the number of total indices (typically prod of shape(
* @return the mapped indexes along each dimension
*/
INLINEDEF _CUDA_HD void ind2subOrder(Nd4jLong *shapeInfo, Nd4jLong index, Nd4jLong numIndices, Nd4jLong *out) {
if(shape::order(shapeInfo) == 'f') {
shape::ind2sub(
shape::rank(shapeInfo),
shape::shapeOf(shapeInfo),
index,
numIndices,
out);
}
else {
shape::ind2subC(
shape::rank(shapeInfo),
shape::shapeOf(shapeInfo),
index,
numIndices,
out);
}
}
/**
* Convert a linear index to
* the equivalent nd index
* @param shape the shape of the dimensions
* @param index the index to map
* @param numIndices the number of total indices (typically prod of shape(
* @return the mapped indexes along each dimension
*/
INLINEDEF _CUDA_HD void ind2subOrder(Nd4jLong *shapeInfo, Nd4jLong index, Nd4jLong *out) {
ind2subOrder(shapeInfo,index,shape::length(shapeInfo),out);
}
/**
* Convert a linear index to
* the equivalent nd index
* @param shape the shape of the dimensions
* @param index the index to map
* @param numIndices the number of total indices (typically prod of shape(
* @return the mapped indexes along each dimension
*/
/**
*
* @param length
* @param shape
* @param rearrange
* @return
*/
INLINEDEF _CUDA_HD Nd4jLong *doPermuteSwap(int length, Nd4jLong *shape, int *rearrange) {
traceNew(16);
Nd4jLong *ret = new Nd4jLong[length];
for (int i = 0; i < length; i++) {
ret[i] = shape[rearrange[i]];
}
return ret;
}
/**
*
* @param length
* @param shape
* @param rearrange
* @return
*/
INLINEDEF _CUDA_HD void doPermuteSwap(int length, Nd4jLong **shape, int *rearrange) {
if(length == 1) {
return;
}
else {
Nd4jLong *shapeDeref = *shape;
if(shape::prodLong(shapeDeref,length) < 2) {
return;
}
}
bool inOrder = true;
for(int i = 0; i < length - 1; i++) {
inOrder = inOrder && rearrange[i] + 1 == rearrange[i + 1];
}
//all in order, nothing to do
if(inOrder)
return;
Nd4jLong *shapeDeref = *shape;
//we know they are just reversed, dimension length of 2
if(length == 2) {
auto shapeFirst = shapeDeref[0];
auto shapeSecond = shapeDeref[1];
shapeDeref[0] = shapeSecond;
shapeDeref[1] = shapeFirst;
return;
}
else if(length == 1) {
//no permute
return;
}
auto temp = new Nd4jLong[length];
memcpy(temp,shapeDeref,sizeof(Nd4jLong) * length);
for (int i = 0; i < length; i++) {
shapeDeref[i] = temp[rearrange[i]];
}
delete[] temp;
}
INLINEDEF _CUDA_HD void permuteShapeBufferInPlace(Nd4jLong *shapeBuffer, int *rearrange, Nd4jLong *out) {
if(shapeBuffer != out)
memcpy(out,shapeBuffer,sizeof(Nd4jLong) * shape::shapeInfoLength(shape::rank(shapeBuffer)));
doPermuteShapeBuffer(shape::rank(shapeBuffer), shapeBuffer, rearrange, out);
}
INLINEDEF _CUDA_HD Nd4jLong *permuteShapeBuffer(Nd4jLong *shapeBuffer, int* rearrange) {
auto len = shape::shapeInfoLength(shape::rank(shapeBuffer));
Nd4jLong *copy = shape::copyOf(len, shapeBuffer);
doPermuteShapeBuffer(copy,rearrange);
return copy;
}
INLINEDEF _CUDA_HD void doPermuteShapeInfo(Nd4jLong *shapeInfo, const Nd4jLong *rearrange) {
const int rank = shape::rank(shapeInfo);
//check whether shape is like {1} or {1,1} or {1,1,1,1,...} - in this case we don't need permute
if(prodLong(shape::shapeOf(shapeInfo), rank) < 2)
return;
// check whether rearrange is like {0,1,2,3,...} - in this case we don't need permute as well
bool isPermutNecessary = false;
for(int i = 0; i < rank; ++i)
if(rearrange[i] != i) {
isPermutNecessary = true;
break;
}
if(!isPermutNecessary)
return;
// check whether rearrange contains correct indexes
for(int i = 0; i < rank; ++i)
if(rearrange[i] >= rank || rearrange[i] < 0) {
printf("shape::doPermuteShapeInfo function failed: rearrange indexes are incorrect !\n");
return;
}
// if everything is ok then perform permute
auto temp = new Nd4jLong[shape::shapeInfoLength(rank)];
memcpy(temp, shapeInfo, sizeof(Nd4jLong) * shape::shapeInfoLength(rank));
for (int i = 0; i < rank; ++i) {
shapeInfo[i + 1] = temp[rearrange[i] + 1];
shapeInfo[i + 1 + rank] = temp[rearrange[i] + 1 + rank];
}
shapeInfo[2 * rank + 2] = 0; // ews
shapeInfo[2 * rank + 3] = shape::getOrder(rank, shape::shapeOf(shapeInfo),shape::stride(shapeInfo),1); // order
delete[] temp;
}
INLINEDEF _CUDA_HD void doPermuteShapeInfo(Nd4jLong *shapeInfo, const int* rearrange) {
const int rank = shape::rank(shapeInfo);
//check whether shape is like {1} or {1,1} or {1,1,1,1,...} - in this case we don't need permute
if(prodLong(shape::shapeOf(shapeInfo), rank) < 2)
return;
// check whether rearrange is like {0,1,2,3,...} - in this case we don't need permute as well
bool isPermutNecessary = false;
for(int i = 0; i < rank; ++i)
if(rearrange[i] != i) {
isPermutNecessary = true;
break;
}
if(!isPermutNecessary)
return;
// check whether rearrange contains correct indexes
for(int i = 0; i < rank; ++i)
if(rearrange[i] >= rank || rearrange[i] < 0) {
printf("shape::doPermuteShapeInfo function failed: rearrange indexes are incorrect !\n");
return;
}
// if everything is ok then perform permute
auto temp = new Nd4jLong[shape::shapeInfoLength(rank)];
memcpy(temp, shapeInfo, sizeof(Nd4jLong) * shape::shapeInfoLength(rank));
for (int i = 0; i < rank; ++i) {
shapeInfo[i + 1] = temp[rearrange[i] + 1];
shapeInfo[i + 1 + rank] = temp[rearrange[i] + 1 + rank];
}
shapeInfo[shapeInfoLength(rank) - 2] = 0;
shapeInfo[shape::shapeInfoLength(rank) - 1] = shape::getOrder(rank, shape::shapeOf(shapeInfo),shape::stride(shapeInfo), 1);
delete[] temp;
}
INLINEDEF _CUDA_HD void doPermuteShapeBuffer(Nd4jLong *shapeBuffer,int *rearrange) {
//no swapping needs to happen
if(shape::isScalar(shapeBuffer)) {
return;
}
Nd4jLong *shapeRef = shapeBuffer;
//rank of the rearrange array == rank of shape buffer
int rearrageRank = shape::rank(shapeRef);
Nd4jLong *shape = shape::shapeOf(shapeRef);
Nd4jLong *stride = shape::stride(shapeRef);
shape::doPermuteSwap(rearrageRank,&shape,rearrange);
shape::doPermuteSwap(rearrageRank,&stride,rearrange);
shapeRef[shapeInfoLength(rearrageRank) - 2] = 0;
shapeRef[shape::shapeInfoLength(rearrageRank) - 1] = shape::getOrder(rearrageRank,shape,stride,1);
// doPermuteShapeInfo(shapeBuffer, rearrange); // possible fix of integer overflow issue when strides are too large
}
/*
INLINEDEF _CUDA_HD void doPermuteShapeBuffer(Nd4jLong *shapeBuffer, int *rearrange, Nd4jLong *tmpBuffer) {
auto shapeRef = shapeBuffer;
//rank of the rearrange array == rank of shape buffer
int rearrageRank = shape::rank(shapeRef);
auto shape = shape::shapeOf(shapeRef);
auto stride = shape::stride(shapeRef);
shape::copyOf(rearrageRank,rearrange, tmpBuffer);
shape::doPermuteSwap(rearrageRank,&shape, tmpBuffer);
shape::copyOf(rearrageRank,rearrange, tmpBuffer);
shape::doPermuteSwap(rearrageRank,&stride,tmpBuffer);
shapeRef[shapeInfoLength(rearrageRank) - 2] = 0;
shapeRef[shape::shapeInfoLength(rearrageRank) - 1] = shape::getOrder(rearrageRank,shape,stride,1);
}
*/
INLINEDEF _CUDA_HD void doPermuteShapeBuffer(int rank,Nd4jLong *shapeBuffer, int *rearrange) {
Nd4jLong *shapeRef = shapeBuffer;
//rank of the rearrange array == rank of shape buffer
int rearrageRank = rank;
Nd4jLong *shape = shape::shapeOf(shapeRef);
Nd4jLong *stride = shape::stride(shapeRef);
auto rearrangeCopy1 = shape::copyOf(rearrageRank, rearrange);
shape::doPermuteSwap(rearrageRank,&shape,rearrangeCopy1);
delete[] rearrangeCopy1;
auto rearrangeCopy2 = shape::copyOf(rearrageRank,rearrange);
shape::doPermuteSwap(rearrageRank, &stride, rearrangeCopy2);
shapeBuffer[shape::shapeInfoLength(rank) - 1] = shape::getOrder(rank,shape,stride,1);
shapeBuffer[shape::shapeInfoLength(rank) - 2] = 0;
delete[] rearrangeCopy2;
}
INLINEDEF _CUDA_HD void doPermuteShapeBuffer(int rank, Nd4jLong *shapeBuffer, int *rearrange, Nd4jLong *tmpBuffer) {
Nd4jLong *shapeRef = shapeBuffer;
//rank of the rearrange array == rank of shape buffer
int rearrageRank = rank;
auto shape = shape::shapeOf(shapeRef);
auto stride = shape::stride(shapeRef);
if(shapeBuffer != tmpBuffer)
shape::copyOf(rearrageRank,shapeBuffer, tmpBuffer);
shape::doPermuteSwap(rearrageRank,&shape,rearrange);
shape::doPermuteSwap(rearrageRank,&stride,rearrange);
shapeRef[shapeInfoLength(rank) - 2] = 0;
shapeRef[shape::shapeInfoLength(rank) - 1] = shape::getOrder(rank,shape,stride,1);
}
INLINEDEF _CUDA_HD Nd4jLong *createPermuteIndexes(int originalRank, int *dimension,int dimensionLength) {
int delta = originalRank - dimensionLength;
traceNew(17);
Nd4jLong *ret = new Nd4jLong[originalRank];
for(int i = 0; i < delta; i++) {
ret[i] = i + dimensionLength;
}
for(int i = delta; i < originalRank; i++) {
ret[i] = i - delta;
}
return ret;
}
/**
* Get the ordering for the device
* @param length
* @param shape
* @param stride
* @param elementStride
* @return
*/
INLINEDEF _CUDA_HD char getOrder(int length, Nd4jLong *shape, Nd4jLong *stride, int elementStride) {
int sd = -1;
int dim = -1;
int i = -1;
int cContiguous = 1;
int isFortran = 1;
sd = 1;
for (i = length - 1; i >= 0; --i) {
dim = shape[i];
if (stride[i] != sd) {
cContiguous = 0;
break;
}
/* contiguous, if it got this far */
if (dim == 0) {
break;
}
sd *= dim;
}
/* check if fortran contiguous */
sd = elementStride;
for (i = 0; i < length; ++i) {
dim = shape[i];
if (stride[i] != sd) {
isFortran = 0;
}
if (dim == 0) {
break;
}
sd *= dim;
}
if (isFortran && cContiguous)
return 'a';
else if (isFortran && !cContiguous)
return 'f';
else if (!isFortran && !cContiguous)
return 'c';
else
return 'c';
}
/**
* Ensure that every value in the re arrange
* array is unique
* @param arr
* @param shape
* @param arrLength
* @param shapeLength
* @return
*/
template <typename T>
INLINEDEF _CUDA_HD int checkArrangeArray(T *arr, int arrLength, int shapeLength) {
if (arrLength != shapeLength)
return -1;
for (int i = 0; i < arrLength; i++) {
if (arr[i] >= arrLength || arr[i] < 0)
return -1;
}
for (int i = 0; i < arrLength; i++) {
for (int j = 0; j < arrLength; j++) {
if (i != j && arr[i] == arr[j])
return -1;
}
}
return 1;
}
INLINEDEF _CUDA_HD void traceNew(int id) {
//printf("new happened: [%i]\n", id);
#ifndef __CUDACC__
//fflush(stdout);
#endif
}
/**
* Permute the shape information
* @param info the shape information to permute
* @param rearrange the order to re arrange
* @param rank the rank of the rearrange array
*/
INLINEDEF _CUDA_HD void permute(ShapeInformation **info, int *rearrange, int rank) {
ShapeInformation *infoDeref = *info;
checkArrangeArray(rearrange, rank, rank);
shape::doPermuteSwap(rank, &infoDeref->shape, rearrange);
shape::doPermuteSwap(rank, &infoDeref->stride, rearrange);
char order = getOrder(rank,
infoDeref->shape,
infoDeref->stride,
infoDeref->elementWiseStride);
infoDeref->order = order;
}
/**
* Returns whether the
* given shape is a vector or not
* @param shape the shape of the array
* @param rank the rank of the shape
*/
INLINEDEF _CUDA_HD int isVector(Nd4jLong *shape, int rank) {
if (rank == 0)
return 0;
if (rank == 1)
return 1;
if (rank > 2)
return 0;
else if (rank <= 2) {
if (shape[0] == 1 || shape[1] == 1)
return 1;
}
return 0;
}
INLINEDEF _CUDA_HD bool isLikeVector(Nd4jLong *shapeInfo, int& posOfNonUnityDim) {
int numOfNonUnity = 0;
for(int i = 1; i <= shapeInfo[0]; ++i) {
if(shapeInfo[i] != 1) {
++numOfNonUnity;
posOfNonUnityDim = i-1;
}
}
return numOfNonUnity == 1 && shapeInfo[0] > 2;
}
INLINEDEF _CUDA_HD bool isCommonVector(const Nd4jLong *shapeInfo, int& posOfNonUnityDim) {
if(rank(shapeInfo) > 0 && length(shapeInfo) == 1)
return true;
int numOfNonUnity = 0;
for(int i = 1; i <= shapeInfo[0]; ++i) {
if(shapeInfo[i] != 1) {
++numOfNonUnity;
posOfNonUnityDim = i-1;
}
}
return numOfNonUnity == 1;
}
INLINEDEF _CUDA_H Nd4jLong* detachShape(Nd4jLong *originalShape) {
Nd4jLong *newShape = new Nd4jLong[shape::shapeInfoLength(originalShape)];
memcpy(newShape, originalShape, shape::shapeInfoByteLength(originalShape));
return newShape;
}
INLINEDEF _CUDA_H Nd4jLong* copyShape(Nd4jLong *originalShape) {
Nd4jLong *newShape = new Nd4jLong[shape::shapeInfoLength(originalShape)];
memcpy(newShape, originalShape, shape::shapeInfoByteLength(originalShape));
return newShape;
}
INLINEDEF _CUDA_HD int isVector(const Nd4jLong *shapeInfo) {
return isVector(shape::shapeOf(const_cast<Nd4jLong*>(shapeInfo)), shape::rank(shapeInfo));
}
INLINEDEF _CUDA_HD bool isRowVector(const Nd4jLong *shapeInfo) {
bool isVector = shape::isVector(shapeInfo) == 1;
bool shapeFirstOne = shape::shapeOf(const_cast<Nd4jLong*>(shapeInfo))[0] == 1;
return isVector && shapeFirstOne;
}
INLINEDEF _CUDA_HD bool isColumnVector(Nd4jLong *shapeInfo) {
bool isVector = shape::isVector(shapeInfo) == 1;
bool shapeFirstOne = shape::shapeOf(shapeInfo)[0] == 1;
return isVector && !shapeFirstOne;
}
INLINEDEF _CUDA_HD int oneDimEqualToLength(Nd4jLong *shape, int rank) {
for(int i = 0; i < rank; i++) {
if(shape[i] == shape::prod(shape,rank))
return 1;
}
return 0;
}
INLINEDEF _CUDA_HD int oneDimEqualToLength(Nd4jLong *shapeInfo) {
return oneDimEqualToLength(shape::shapeOf(shapeInfo),shape::rank(shapeInfo));
}
/**
* Returns whether the
* given shape is a vector or not
* @param shape the shape of the array
* @param rank the rank of the shape
*/
INLINEDEF _CUDA_HD int isMatrix(Nd4jLong *shape, int rank) {
if (rank > 2)
return 0;
else if (rank <= 2) {
if (shape[0] == 1 || shape[1] == 1)
return 0;
}
return 1;
}
INLINEDEF _CUDA_HD int isMatrix(Nd4jLong *shapeInfo) {
return isMatrix(shape::shapeOf(shapeInfo),shape::rank(shapeInfo));
}
/**
* Returns the shape portion of an information
* buffer
*/
INLINEDEF _CUDA_HD Nd4jLong *shapeOf(Nd4jLong *buffer) {
return buffer + 1;
}
/**
* Return a copy of a buffer.
* This buffer allocates memory
* that must be freed elsewhere.
*/
template <typename T>
INLINEDEF _CUDA_HD T *copyOf(Nd4jLong length, T *toCopy) {
traceNew(18);
T *ret = new T[length];
return copyOf(length, toCopy, ret);
}
template <typename T>
INLINEDEF _CUDA_HD T* copyOf(Nd4jLong length, T *toCopy, T *ret) {
memcpy(ret, toCopy, sizeof(T)*length);
return ret;
}
/**
* Return a copy of a buffer.
* This buffer allocates memory
* that must be freed elsewhere.
*/
template <typename T>
INLINEDEF _CUDA_HD void copyTo(Nd4jLong length, T *from, T *to) {
memcpy(to, from, sizeof(T)*length);
}
/**
* Return a copy of a buffer.
* This buffer allocates memory
* that must be freed elsewhere.
*/
INLINEDEF _CUDA_HD void copyTo(int length, Nd4jLong *from, Nd4jLong *to, Nd4jLong *indexes) {
for(int i = 0; i < length; i++) {
to[i] = from[indexes[i]];
}
}
/**
* Permute the given strides
* in the given rearrange order
* @param toPermute the buffer to permute
* @param shapeRank the length of the buffer to permute
* @param rearrange the rearrange order (must be 0 based indexes
* and all must be filled in)
* @return the rearranged array
*/
/*
INLINEDEF _CUDA_HD Nd4jLong *permutedStrides(Nd4jLong *toPermute, int shapeRank, int *rearrange) {
Nd4jLong *strideCopy = copyOf(shapeRank, toPermute);
checkArrangeArray(rearrange, shapeRank, shapeRank);
Nd4jLong *newStride = doPermuteSwap(shapeRank, strideCopy, rearrange);
delete[] strideCopy;
return newStride;
}
*/
/**
* Return the slice (shape + 1 in pointer arithmetic)
* @param shape the shape to take the slice of
* @return the shape array - the first entry
*/
INLINEDEF _CUDA_HD Nd4jLong *slice(Nd4jLong *shape) {
return shape + 1;
}
INLINEDEF _CUDA_HD int slices(Nd4jLong *shapeBuffer) {
return static_cast<int>(shape::shapeOf(shapeBuffer)[0]);
}
INLINEDEF _CUDA_HD Nd4jLong *sliceOfShapeBuffer(Nd4jLong sliceIdx, Nd4jLong *shapeBuffer) {
int rank = shape::rank(shapeBuffer);
int newRank = rank - 1;
if(newRank < 2)
newRank = 2;
Nd4jLong *newShapeBuffer = new Nd4jLong[shape::shapeInfoLength(newRank)];
newShapeBuffer[0] = newRank;
Nd4jLong *currShape = shape::shapeOf(shapeBuffer);
Nd4jLong *currStride = shape::stride(shapeBuffer);
//initialize new shape and stride by taking the shape and stride + 1
//and adding to the shape information
//a slice is always just taking the existing shape and cutting the first index off
//of the shape and stride
Nd4jLong *newShape = shape::shapeOf(newShapeBuffer);
Nd4jLong *newStride = shape::stride(newShapeBuffer);
if(shape::isVector(shapeBuffer)) {
Nd4jLong *currShape = shape::shapeOf(shapeBuffer);
//row vector: slice index 0 is a valid index, just copy the whole thing
if(currShape[0] == 1) {
if(sliceIdx == 0) {
memcpy(newShapeBuffer,shapeBuffer,shape::shapeInfoByteLength(shape::rank(shapeBuffer)));
return newShapeBuffer;
}
}
//column vector: this will be a scalar
else {
delete[] newShapeBuffer;
Nd4jLong *scalar = shape::createScalarShapeInfo();
int offset = shape::offset(shapeBuffer);
scalar[shape::shapeInfoLength(2) - 3] = offset + sliceIdx;
return scalar;
}
}
else if(shape::isMatrix(shapeBuffer)) {
newShape[0] = 1;
newShape[1] = currShape[1];
newStride[0] = 1;
newStride[1] = currStride[1];
}
else {
for(int i = 0; i < newRank; i++) {
newShape[i] = currShape[i + 1];
newStride[i] = currStride[i + 1];
}
}
auto indices = new Nd4jLong[rank];
memset((void *) indices,0,rank * sizeof(Nd4jLong));
indices[0] = sliceIdx;
Nd4jLong offset = shape::getOffset(0,newShape,newStride,indices,rank);
newShapeBuffer[shape::shapeInfoLength(newRank) - 3] = offset;
if(shape::isMatrix(shapeBuffer)) {
newShapeBuffer[shape::shapeInfoLength(newRank) - 2] = currStride[1];
}
else {
newShapeBuffer[shape::shapeInfoLength(newRank) - 2] = shape::elementWiseStride(shapeBuffer);
}
newShapeBuffer[shape::shapeInfoLength(newRank) - 1] = shape::getOrder(newRank,newShape,newStride,1);
delete[] indices;
return newShapeBuffer;
}
/**
* Returns the length of the
* shape information buffer:
* rank * 2 + 3
* @param rank the rank to get the shape
* info length for
* @return rank * 2 + 4
*/
INLINEDEF _CUDA_HD int shapeInfoLength(int rank) {
//FIXME magic numbers
return rank * 2 + 4;
}
INLINEDEF _CUDA_HD int shapeInfoLength(Nd4jLong* shape) {
return shapeInfoLength(static_cast<int>(shape[0]));
}
INLINEDEF _CUDA_HD int shapeInfoLength(const Nd4jLong* shape) {
return shapeInfoLength(static_cast<int>(shape[0]));
}
INLINEDEF _CUDA_HD size_t shapeInfoByteLength(int rank) {
//FIXME magic numbers
return (rank * 2 + 4) * sizeof(Nd4jLong);
}
INLINEDEF _CUDA_HD size_t shapeInfoByteLength(const Nd4jLong* shapeInfo) {
//FIXME magic numbers
return shapeInfoByteLength((int) shapeInfo[0]);
}
/**
* Returns the rank portion of
* an information buffer
*/
INLINEDEF _CUDA_HD int rank(const Nd4jLong *buffer) {
return static_cast<int>(buffer[0]);
}
INLINEDEF _CUDA_HD int rank(const int *buffer) {
return buffer[0];
}
INLINEDEF _CUDA_HD int rank(const unsigned int *buffer) {
return static_cast<int>(buffer[0]);
}
INLINEDEF _CUDA_HD Nd4jLong* ews(Nd4jLong* shapeInfo) {
return shapeInfo + 2 * shapeInfo[0] + 2;
}
/**
* Converts a raw int buffer of the layout:
* rank
* shape
* stride
* offset
* elementWiseStride
*
* where shape and stride are both straight int pointers
*/
INLINEDEF _CUDA_HD ShapeInformation *infoFromBuffer(Nd4jLong *buffer) {
traceNew(19);
auto info = new ShapeInformation;
auto length = shapeInfoLength(rank(buffer));
auto rank = buffer[0];
//start after rank
info->shape = buffer + 1;
info->stride = buffer + (1 + rank);
info->rank = rank;
info->offset = buffer[length - 3];
info->elementWiseStride = buffer[length - 2];
Nd4jLong *stride = buffer + 1 + rank;
info->stride = stride;
info->order = (char) buffer[length - 1];
return info;
}
/**
* Returns the stride portion of an information
* buffer
*/
INLINEDEF _CUDA_HD Nd4jLong *stride(Nd4jLong *buffer) {
return buffer + (1 + rank(buffer));
}
INLINEDEF _CUDA_HD bool isEmpty(const Nd4jLong *shapeInfo) {
return ((shape::extra(const_cast<Nd4jLong*>(shapeInfo)) & ARRAY_EMPTY) == ARRAY_EMPTY);
}
/**
* Compute the length of the given shape
*/
INLINEDEF _CUDA_HD Nd4jLong length(const Nd4jLong *shapeInfo) {
int rank = shape::rank(shapeInfo);
if (rank == 0) {
if (isEmpty(shapeInfo))
return 0L;
else
return 1L;
}
if (rank == 1)
return shapeInfo[1];
return shape::prodLong(shape::shapeOf(const_cast<Nd4jLong*>(shapeInfo)), rank);
}
INLINEDEF _CUDA_HD Nd4jLong length(std::initializer_list<int>& shape) {
Nd4jLong ret = 1;
for (auto v : shape) {
ret *= v;
}
return ret;
}
INLINEDEF _CUDA_HD Nd4jLong length(std::initializer_list<Nd4jLong>& shape) {
Nd4jLong ret = 1;
for (auto v : shape) {
ret *= v;
}
return ret;
}
/***
* Returns the offset
* portion of an information buffer
*/
INLINEDEF _CUDA_HD Nd4jLong offset(Nd4jLong *buffer) {
return buffer[shape::shapeInfoLength(shape::rank(buffer)) - 3];
}
INLINEDEF _CUDA_HD Nd4jLong& extra(Nd4jLong *buffer) {
return buffer[shape::shapeInfoLength(shape::rank(buffer)) - 3];
}
/**
* Returns the ordering
* for this shape information buffer
*/
INLINEDEF _CUDA_HD char order(const Nd4jLong *buffer) {
//FIXME magic numbers
return static_cast<char>(buffer[(buffer[0] * 2 + 4) - 1]);
}
/**
* Returns type
*/
INLINEDEF _CUDA_HD Nd4jLong type(const Nd4jLong *shapeInfo) {
return shapeInfo[2 * shapeInfo[0] + 1];
}
/**
* Returns the element wise stride for this information
* buffer
*/
INLINEDEF _CUDA_HD Nd4jLong elementWiseStride(const Nd4jLong *buffer) {
return buffer[shapeInfoLength(static_cast<int>(buffer[0])) - 2];
}
/**
* Returns the element wise stride for this information
* buffer relative to a dimension and reduction index
*/
INLINEDEF _CUDA_HD Nd4jLong reductionIndexElementWiseStride(Nd4jLong* buffer, int* dimension, int dimensionLength) {
if(dimensionLength > 1) {
if(shape::order(buffer) == 'f') {
/**
* The element wise stride belongs to a reduction index.
* When used out of order, we can get rid of the data
* dependencies and rely on using the max dimension
* specified for stride instead.
* Say we take the sum(0,1) along arr
* we can use arr.stride(1) as a representation
* along which to iterate.
*/
if(shape::shapeOf(buffer)[dimension[dimensionLength - 1]] != 1) {
//int tadElementWiseStride = shape::stride(buffer)[dimension[dimensionLength - 1]];
//return tadElementWiseStride;
auto tadElementWiseStride = shape::stride(buffer)[dimension[0]];
return tadElementWiseStride;
}
return 1;
}
else {
/**
* The element wise stride belongs to a reduction index.
* When used out of order, we can get rid of the data
* dependencies and rely on using the max dimension
* specified for stride instead.
* Say we take the sum(0,1) along arr
* we can use arr.stride(1) as a representation
* along which to iterate.
*/
if(shape::shapeOf(buffer)[dimension[dimensionLength - 1]] != 1) {
auto tadElementWiseStride = shape::stride(buffer)[dimension[dimensionLength - 1]];
return tadElementWiseStride;
}
return 1;
}
}
else {
if(shape::order(buffer) == 'f') {
/**
* The element wise stride belongs to a reduction index.
* When used out of order, we can get rid of the data
* dependencies and rely on using the max dimension
* specified for stride instead.
* Say we take the sum(0,1) along arr
* we can use arr.stride(1) as a representation
* along which to iterate.
*/
auto tadElementWiseStride = shape::stride(buffer)[dimension[0]];
return tadElementWiseStride;
}
else {
/**
* The element wise stride belongs to a reduction index.
* When used out of order, we can get rid of the data
* dependencies and rely on using the max dimension
* specified for stride instead.
* Say we take the sum(0,1) along arr
* we can use arr.stride(1) as a representation
* along which to iterate.
*/
auto tadElementWiseStride = shape::stride(buffer)[dimension[dimensionLength - 1]];
return tadElementWiseStride;
}
}
}
/**
* Returns whether
* the given shape info buffer
* represents a scalar shape
*/
INLINEDEF _CUDA_HD int isScalar(Nd4jLong *info) {
const int rank = shape::rank(info);
if(rank > 2)
return 0;
if(rank == 0)
return 1;
if(rank == 1)
return shape::shapeOf(info)[0] == 1;
if(rank == 2)
return shape::shapeOf(info)[0] == 1 && shape::shapeOf(info)[1] == 1;
return 0;
}
/**
* Returns whether
* the given shape information
* represents a scalar
* shape or not
*/
INLINEDEF _CUDA_HD int isScalar(volatile ShapeInformation *info) {
const int rank = info->rank;
if(rank > 2)
return 0;
if(rank == 1)
return info->shape[0] == 1;
if(rank == 2)
return info->shape[0] == 1 && info->shape[1] == 1;
return 0;
}
/**
* Return a copy of this array with the
* given index omitted
*
* @param data the data to copy
* @param indexes the index of the item to remove
* @param dataLength the length of the data array
* @param indexesLength the length of the data array
* @return the new array with the omitted
*
* item
*/
template <typename T1, typename T2>
INLINEDEF _CUDA_HD void removeIndex(T1* data, T2 *indexes, Nd4jLong dataLength, Nd4jLong indexesLength, T1 *ret) {
int count = 0;
int absLength = dataLength - indexesLength;
for (int i = 0; i < dataLength && count < absLength; i++) {
int contains = 0;
for (int j = 0; j < indexesLength; j++) {
if (i == indexes[j]) {
contains = 1;
break;
}
}
if (!contains) {
ret[count] = data[i];
count++;
}
}
}
/**
* Return a copy of this array with the
* given index omitted
*
* @param data the data to copy
* @param indexes the index of the item to remove
* @param dataLength the length of the data array
* @param indexesLength the length of the data array
* @return the new array with the omitted
*
* item
*/
template <typename T1, typename T2>
INLINEDEF _CUDA_HD T1* removeIndex(T1 *data, T2 *indexes, Nd4jLong dataLength, Nd4jLong indexesLength) {
auto lengthOfArr = dataLength - indexesLength;
if(lengthOfArr < 0) {
printf("Remove index call created a <= 0 length array. This was likely not intended.");
}
auto ret = new T1[lengthOfArr];
memset(ret,0,sizeof(T1) * lengthOfArr);
removeIndex<T1, T2>(data, indexes, dataLength, indexesLength, ret);
return ret;
}
INLINEDEF _CUDA_HD Nd4jLong* everyIndexBut(Nd4jLong *indexes,int indexesLength,int begin,int end) {
int len = end - indexesLength;
traceNew(20);
auto ret = new Nd4jLong[len];
int retIdx = 0;
//not here that we do 0 based indexing for end - this assumes things like:
//0 to 4 are specified
for(int i = begin; i < end ; i++) {
bool found = false;
for(int j = 0; j < indexesLength; j++) {
if(indexes[j] == i) {
found = true;
break;
}
}
if(!found) {
ret[retIdx++] = i;
}
}
return ret;
}
/**
* Computes the offset for accessing
* a global element given the shape information
* and the offset to be read.
*/
#ifdef __CUDACC__
INLINEDEF __device__ int tadOffset(ShapeInformation *xInfo, int offset) {
return offset + threadIdx.x * xInfo->elementWiseStride;
}
#endif
/**
* Returns a shape
* forces the given length to be 2.
* @param shape the shape to modify
* @param dimension the dimension (row or column)
* for the shape to be returned as
* @return the new shape
*/
INLINEDEF _CUDA_HD Nd4jLong *ensureVectorShape(Nd4jLong *shape, int dimension) {
traceNew(21);
Nd4jLong *ret = new Nd4jLong[2];
if (dimension == 0) {
ret[0] = 1;
ret[1] = shape[0];
} else {
ret[0] = shape[0];
ret[1] = 1;
}
return ret;
}
/**
* Returns a shape
* forces the given length to be 2.
* @param shape the shape to modify
* @param dimension the dimension (row or column)
* for the shape to be returned as
* @return the new shape
*/
INLINEDEF _CUDA_HD Nd4jLong *ensureVectorShape(Nd4jLong *shape) {
return ensureVectorShape(shape, 0);
}
/**
* This method does STRICT comparison for two shape buffers
*
* @param shape
* @return
*/
INLINEDEF _CUDA_HD bool equalsStrict(const Nd4jLong *shapeA, const Nd4jLong *shapeB) {
if (shapeA[0] != shapeB[0])
return false;
if (shapeA[0] == 0)
return true;
// we do full comparison here
int length = shape::shapeInfoLength(shapeA[0]);
for (int e = 1; e < length; e++)
if (shapeA[e] != shapeB[e])
return false;
return true;
}
INLINEDEF _CUDA_HD bool haveSameOffsets(const Nd4jLong *shapeA, const Nd4jLong *shapeB) {
if (shapeA[0] != shapeB[0])
return false;
if (shapeA[0] == 0)
return true;
// we do full comparison here
int length = shape::shapeInfoLength(shapeA[0]);
for (int e = 1; e < length; e++) {
if(e == (length - 3)) continue; // type position, neglect it
if (shapeA[e] != shapeB[e])
return false;
}
return true;
}
INLINEDEF _CUDA_HD int sizeAt(const Nd4jLong *shape, const int dim) {
if (dim >= 0)
return shape[1+dim];
else
return shape[1+(rank(shape) + dim)];
}
/**
* This method does SOFT comparison for two shape buffers, we compare only rank & shapes
*
* @param shape
* @return
*/
INLINEDEF _CUDA_HD bool equalsSoft(const Nd4jLong *shapeA, const Nd4jLong *shapeB) {
if (shapeA[0] != shapeB[0])
return false;
if (shapeA[0] == 0)
return true;
// we compare only shapes, and ignoring stride & ews
auto length = shapeA[0];
for (int e = 1; e <= length; e++)
if (shapeA[e] != shapeB[e])
return false;
return true;
}
INLINEDEF _CUDA_HD bool equalsTypesAndShapesSoft(const Nd4jLong *shapeA, const Nd4jLong *shapeB) {
return equalsSoft(shapeA, shapeB) && shapeA[shapeInfoLength(shapeA) - 3] == shapeB[shapeInfoLength(shapeB) - 3];
}
/**
* Generate an int buffer
* up to the given length
* at the specified increment
*
*/
template <typename T>
INLINEDEF _CUDA_HD T* range(int from, int to, int increment) {
int diff = nd4j::math::nd4j_abs<int>(from - to);
int retLength = diff / increment;
T *ret;
traceNew(22);
if(diff / increment < 1)
ret = new T[1];
else
ret = new T[diff / increment];
if (from < to) {
int count = 0;
for (int i = from; i < to; i += increment) {
if (count >= retLength)
break;
ret[count++] = i;
}
} else if (from > to) {
int count = 0;
for (int i = from - 1; i >= to; i -= increment) {
if (count >= retLength)
break;
ret[count++] = i;
}
}
return ret;
}
/**
* Generate a range
* beginning at from and ending at to
* incrementing by 1
* @param from the start
* @param to the end
* @return the int array starting at from and ending at to
*/
template <typename T>
INLINEDEF _CUDA_HD T* range(int from, int to) {
return range<T>(from, to, 1);
}
/**
* Keep the given indexes in the data
* @param data
* @param index
* @param indexLength
* @param dataLength
* @return
*/
INLINEDEF _CUDA_HD Nd4jLong *keep(volatile Nd4jLong *data, int* index, int indexLength, int dataLength) {
traceNew(23);
Nd4jLong *ret = new Nd4jLong[indexLength];
int count = 0;
for (int i = 0; i < dataLength; i++) {
int contains = 0;
for (int j = 0; j < indexLength; j++) {
if (i == index[j]) {
contains = 1;
break;
}
}
if (contains)
ret[count++] = data[i];
}
return ret;
}
/**
* Generate a reverse
* copy of the data
*/
template <typename T>
INLINEDEF _CUDA_HD T* reverseCopy(T *data, Nd4jLong length) {
if (length < 1)
return nullptr;
traceNew(24);
T *copy = new T[length];
for (Nd4jLong i = 0; i <= length / 2; i++) {
T temp = data[i];
copy[i] = data[length - i - 1];
copy[length - i - 1] = temp;
}
return copy;
}
template <typename T>
INLINEDEF _CUDA_HD void reverseCopyTo(T *from, T *to, Nd4jLong length) {
if (length < 1)
return;
for (Nd4jLong i = 0; i <= length / 2; i++) {
T temp = from[i];
to[i] = from[length - i - 1];
to[length - i - 1] = temp;
}
}
template <typename T>
INLINEDEF _CUDA_HD void reverseCopyTo(T *from, T *to, Nd4jLong *indexes, Nd4jLong length) {
if (length < 1)
return;
for (Nd4jLong i = 0; i <= length / 2; i++) {
T temp = from[indexes[i]];
to[i] = from[indexes[length - i - 1]];
to[length - i - 1] = temp;
}
}
/**
*
* @param arr1
* @param arr1Length
* @param arr2
* @param arr2Length
* @return
*/
template <typename T>
INLINEDEF _CUDA_HD T* concat(T* arr1, Nd4jLong arr1Length, T* arr2, Nd4jLong arr2Length) {
traceNew(25);
T *ret = new T[arr1Length + arr2Length];
std::memcpy(ret, arr1, arr1Length * sizeof(T));
std::memcpy(ret + arr1Length, arr2, arr2Length * sizeof(T));
return ret;
}
/**
*
* @param numArrays
* @param numTotalElements
* @param arr
* @param lengths
* @return
*/
template <typename T>
INLINEDEF _CUDA_HD T *concat(Nd4jLong numArrays, Nd4jLong numTotalElements, T **arr, Nd4jLong *lengths) {
T* ret = new T[numTotalElements];
Nd4jLong count = 0;
for (Nd4jLong i = 0; i < numArrays; i++) {
for (Nd4jLong j = 0; j < lengths[i]; j++) {
ret[count++] = arr[i][j];
}
}
return ret;
}
/**
* Get the length per slice of the
* given shape and the dimension
* @param rank the rank of the shape
* @param shape the shape of to get
* the length per slice for
* @param dimension the dimension to
* get the length per slice for
* @param dimensionLength the length of the dimension array
* @return the length per slice of the given shape
* along the given dimension
*/
INLINEDEF _CUDA_HD Nd4jLong lengthPerSlice(int rank, Nd4jLong *shape, int* dimension, int dimensionLength) {
if(shape::isVector(shape,rank)) {
//return total length for row vectors
if(dimensionLength == 1 && shape[0] == 1) {
return shape::prod(shape,rank);
}
}
else if(rank == dimensionLength)
return shape::prod(shape,rank);
int absSelta = nd4j::math::nd4j_abs<int>(rank - dimensionLength);
traceNew(27);
auto ret2 = shape::removeIndex<Nd4jLong>(shape, dimension, rank, dimensionLength);
auto ret = prodLong(ret2, absSelta);
delete[] ret2;
return ret;
}
/**
* calculates the offset for a tensor
* @param index
* @param arr
* @param tensorShape
* @return
*/
INLINEDEF _CUDA_HD Nd4jLong sliceOffsetForTensor(int rank, int index, Nd4jLong *shape, Nd4jLong *tensorShape, int tensorShapeLength, int* dimension, int dimensionLength) {
auto tensorLength = prodLong(tensorShape, tensorShapeLength);
auto lengthPerSlice2 = lengthPerSlice(rank, shape, dimension, dimensionLength);
if (lengthPerSlice2 <= 0) {
return 0;
}
Nd4jLong offset = index * tensorLength / lengthPerSlice2;
return offset;
}
/**
* calculates the offset for a tensor
* @param index
* @param arr
* @param tensorShape
* @return
*/
INLINEDEF _CUDA_HD Nd4jLong sliceOffsetForTensor(int index,int tensorLength,int lengthPerSlice2) {
Nd4jLong offset = index * tensorLength / lengthPerSlice2;
return offset;
}
#ifdef __CUDACC__
/**
* Computes the offset for accessing
* a global element given the shape information
* and the offset to be read.
*/
INLINEDEF _CUDA_D int tadOffset(Nd4jLong *xInfo, int offset) {
return offset + threadIdx.x * elementWiseStride(xInfo);
}
#endif
/**
* Computes the number
* of tensors along
* a given dimension
*/
INLINEDEF _CUDA_HD Nd4jLong tensorsAlongDimension(volatile int rank, volatile int length,
volatile Nd4jLong *shape, int *dimension, int dimensionLength) {
Nd4jLong *tensorShape = shape::keep(shape, dimension, dimensionLength, rank);
Nd4jLong ret = length / shape::prodLong(tensorShape, dimensionLength);
delete[] tensorShape;
return ret;
}
/**
* Computes the number
* of tensors along
* a given dimension
*/
INLINEDEF _CUDA_HD Nd4jLong tensorsAlongDimension(Nd4jLong *shapeInfo, int *dimension, int dimensionLength) {
Nd4jLong *keepShape = shape::shapeOf(shapeInfo);
Nd4jLong *tensorShape = shape::keep(keepShape, dimension, dimensionLength, rank(shapeInfo));
Nd4jLong ret = shape::length(shapeInfo) / shape::prodLong(tensorShape, dimensionLength);
delete[] tensorShape;
return ret;
}
/**
* Get an offset for retrieval
* from a data buffer
* based on the given
* shape stride and given indices
* @param baseOffset the offset to start from
* @param shape the shape of the array
* @param stride the stride of the array
* @param indices the indices to iterate over
* @return the double at the specified index
*/
INLINEDEF _CUDA_HD Nd4jLong getOffset(Nd4jLong baseOffset, const Nd4jLong *shape, const Nd4jLong *stride, const Nd4jLong *indices, int rank) {
Nd4jLong offset = baseOffset;
for(int i = 0; i < rank; i++) {
if(indices[i] >= shape[i] && shape[i] != 1) {
#ifdef __CUDA_ARCH__
printf("D: Index %i [%lld] must not be >= shape[%lld].\n", i,indices[i],shape[i]);
#else
printf("H: Index %i [%lld] must not be >= shape[%lld].\n", i, (long long) indices[i], (long long) shape[i]);
#endif
#ifdef __CUDA_ARCH__
//if (threadIdx.x == 0 && blockIdx.x == 0)
// printShapeInfoLinear("getOffsetFailed", rank, shape, stride);
#endif
return -1;
}
if(shape[i] != 1) {
offset += indices[i] * stride[i];
}
}
return offset;
}
/**
* Returns the tensor along dimension
* for the given block index
* @param blockSize
* @param blockIdx
* @param i
* @return
*/
INLINEDEF _CUDA_HD int tadForBlockIndex(int blockSize, int blockIdx, int i) {
return blockIdx + i * blockSize;
}
/**
* Computes the number of tads per block
*
*/
INLINEDEF _CUDA_HD int tadsPerBlock(int blockSize, int tads) {
return nd4j::math::nd4j_ceil<double, int>(tads / (double) blockSize);
}
/**
* Returns a shape buffer
* for the shape information metadata.
*/
INLINEDEF _CUDA_HD Nd4jLong *toShapeBuffer( ShapeInformation *info) {
traceNew(29);
auto ret = new Nd4jLong[shapeInfoLength(info->rank)];
int count = 1;
int rank = info->rank;
ret[0] = info->rank;
for (int i = 0; i < rank; i++) {
ret[count++] = info->shape[i];
}
for (int i = 0; i < rank; i++) {
ret[count++] = info->stride[i];
}
ret[count++] = info->offset;
ret[count++] = info->elementWiseStride;
ret[count] = info->order;
return ret;
}
INLINEDEF _CUDA_HD Nd4jLong *toShapeBuffer( ShapeInformation *info, Nd4jLong* ret) {
int count = 1;
int rank = info->rank;
ret[0] = info->rank;
if (ret[0] == 0) {
ret[1] = 0;
ret[2] = 1;
ret[3] = 99;
return ret;
}
for (int i = 0; i < rank; i++) {
ret[count++] = info->shape[i];
}
for (int i = 0; i < rank; i++) {
ret[count++] = info->stride[i];
}
ret[count++] = info->offset;
ret[count++] = info->elementWiseStride;
ret[count++] = info->order;
return ret;
}
INLINEDEF _CUDA_HD void printIntArray(const Nd4jLong *arr, const int length) {
for(int i = 0; i < length; i++) {
printf(" %lld ", (long long) arr[i]);
}
printf("\n");
}
INLINEDEF _CUDA_HD void printIntArray(const int *arr, const int length) {
for(int i = 0; i < length; i++) {
printf(" %i ", arr[i]);
}
printf("\n");
}
INLINEDEF _CUDA_HD void printShapeInfo(Nd4jLong *shapeInfo) {
int rank = shape::rank(shapeInfo);
Nd4jLong *shape = shape::shapeOf(shapeInfo);
printf("Rank %d\n",rank);
printf("Shape:\n");
for(int i = 0; i < rank; i++) {
printf(" %lld ",(long long) shape[i]);
}
printf("\n");
Nd4jLong *stride = shape::stride(shapeInfo);
printf("Stride:\n");
for(int i = 0; i < rank; i++) {
printf(" %lld ", (long long) stride[i]);
}
printf("\n");
printf("Order %c\n",shape::order(shapeInfo));
}
INLINEDEF _CUDA_HD void printShapeInfoLinear(const Nd4jLong *shapeInfo) {
int rank = shape::rank(shapeInfo);
int lim = shape::shapeInfoLength(rank);
printf("ShapeInfo: [");
for (int i = 0; i < lim; i++) {
printf("%lld", (long long) shapeInfo[i]);
if (i < lim - 1) {
printf(", ");
}
}
printf("]\n");
#ifndef __CUDA_ARCH__
fflush(stdout);
#endif
}
INLINEDEF _CUDA_HD void printShapeInfoLinear(const char *msg, int rank, const Nd4jLong *shape, const Nd4jLong *strides) {
printf("%s : [", msg);
for (int i = 0; i < rank; i++) {
printf("%lld, ", (long long) shape[i]);
}
for (int i = 0; i < rank; i++) {
printf("%lld", (long long) strides[i]);
if (i < rank - 1)
printf(", ");
}
printf("]\n");
#ifndef __CUDA_ARCH__
fflush(stdout);
#endif
}
INLINEDEF _CUDA_HD void printShapeInfoLinear(const char *msg, const Nd4jLong *shapeInfo) {
int rank = shape::rank(shapeInfo);
int lim = shape::shapeInfoLength(rank);
printf("%s : [", msg);
for (int i = 0; i < lim; i++) {
printf("%lld", (long long) shapeInfo[i]);
if (i < lim - 1) {
printf(", ");
}
}
printf("]\n");
#ifndef __CUDACC__
fflush(stdout);
#endif
}
template <typename T>
INLINEDEF _CUDA_HD void printArray(void *varr,int length, const char * message) {
auto arr = reinterpret_cast<T*>(varr);
if (message != nullptr)
printf("%s: [", message);
else
printf("Array: [");
for (int i = 0; i < length; i ++) {
printf("%f", (float) arr[i]);
if (i + 1 < length) printf(", ");
}
printf("]\n");
#ifndef __CUDACC__
fflush(stdout);
#endif
}
INLINEDEF _CUDA_HD void printArray(float *arr,int length) {
printf("Array: [");
for (int i = 0; i < length; i ++) {
printf("%f", arr[i]);
if (i + 1 < length) printf(", ");
}
printf("]\n");
}
/**
* Given an linear index, element wise stride
* and the length of each tad
* map a linear index to a tad
* @param i the index to map
* @param the element wise stride for the tads
* @param numElementsPerTad the number of elements
* per tad
*/
INLINEDEF _CUDA_HD int tadIndex(int i, int elementWiseStride, int numElementsPerTad) {
return i / (numElementsPerTad * elementWiseStride);
}
/**
* Map a tad to a
* reduction index.
* @param tadIndexForOriginal the original tad index for the
* split up problem (eg: split is dimension 3 mapping to a 2,3 problem)
* @param tadsForReduced the number of tads for the shrunk down problem (eg: 2,3)
* @param tadsForOriginal the number of tads for the smaller problem (eg: 3)
*/
INLINEDEF _CUDA_HD int reductionIndexForTad(int tadIndexForOriginal, int tadsForReduced,
int tadsForOriginal) {
if (tadIndexForOriginal == 0)
return 0;
return tadIndexForOriginal / (tadsForOriginal / tadsForReduced);
}
INLINEDEF _CUDA_HD void transposeInplace(Nd4jLong *shapeBuffer) {
int rank = shape::rank(shapeBuffer);
Nd4jLong *shape = shape::shapeOf(shapeBuffer);
Nd4jLong *strides = shape::stride(shapeBuffer);
// swap shape
for (int e = 0; e < rank / 2; e++) {
int idx1 = rank - e - 1;
int idx2 = e;
int tmp = shape[idx2];
shape[idx2] = shape[idx1];
shape[idx1] = tmp;
}
// swap strides
for (int e = 0; e < rank / 2; e++) {
int idx1 = rank - e - 1;
int idx2 = e;
int tmp = strides[idx2];
strides[idx2] = strides[idx1];
strides[idx1] = tmp;
}
if (shape::order(shapeBuffer) == 'c')
shapeBuffer[shape::shapeInfoLength(shapeBuffer) - 1] = 102;
else
shapeBuffer[shape::shapeInfoLength(shapeBuffer) - 1] = 99;
}
/**
* Tad index for linear
* @param linearIndex
* @param tadLength
* @return
*/
INLINEDEF _CUDA_HD int tadIndexForLinear(int linearIndex, int tadLength) {
return linearIndex % tadLength;
}
/**
* Computes the number of tads
* per reduce index for the
* reduction tad.
*/
INLINEDEF _CUDA_HD int tadsPerReduceIndex(int tadsForReduce, int tadsForOriginal) {
return tadsForOriginal / tadsForReduce;
}
/**
* Maps a linear index to a reduction index
* @param i the linear index to map
* @param elementWiseStride the element wise stride
* for the multiple problem
* @param tadNum the number of tads for the shrunken problem
* @param originalTadNum the tad number for the reduced version of the problem
*/
INLINEDEF _CUDA_HD int reductionIndexForLinear(int i, int elementWiseStride, int numElementsPerTad,
int tadNum, int originalTadNum) {
int tad = tadIndex(i, elementWiseStride, numElementsPerTad);
return reductionIndexForTad(tad, tadNum, originalTadNum);
}
INLINEDEF _CUDA_HD Nd4jLong* createScalarShapeInfo() {
traceNew(30);
auto shape = new Nd4jLong[1];
shape[0] = 1;
auto stride = new Nd4jLong[1];
stride[0] = 1;
auto shapeInformation2 = new ShapeInformation();
shapeInformation2->rank = 1;
shapeInformation2->offset = 0;
shapeInformation2->stride = stride;
shapeInformation2->shape = shape;
shapeInformation2->elementWiseStride = 1;
shapeInformation2->order = 99;
Nd4jLong *ret = shape::toShapeBuffer(shapeInformation2);
delete shapeInformation2;
delete[] shape;
delete[] stride;
return ret;
}
INLINEDEF _CUDA_HD Nd4jLong* createScalarShapeInfo(Nd4jLong *ret) {
ret[0] = 2;
ret[1] = 1;
ret[2] = 1;
ret[3] = 1;
ret[4] = 1;
ret[5] = 0;
ret[6] = 1;
ret[7] = 99;
return ret;
}
/**
* Returns the prod of the data
* up to the given length
*/
INLINEDEF _CUDA_HD int prod(Nd4jLong *data, int length) {
int prod = 1;
for (int i = 0; i < length; i++) {
prod *= data[i];
}
return prod;
}
/**
* Returns the prod of the data
* up to the given length
*/
INLINEDEF _CUDA_HD Nd4jLong prodLong(const Nd4jLong *data, int length) {
Nd4jLong prod = 1;
for (int i = 0; i < length; i++) {
prod *= data[i];
}
return prod;
}
INLINEDEF _CUDA_HD int rearMostLeftOverItem(Nd4jLong *data, Nd4jLong *dimension,int dimensionLength) {
Nd4jLong *stride = shape::stride(data);
//corner case: return the final item when its greater than the max, since its guaranteed to be left over
//note here that strides are interpreted in reverse for tad
//start from the front rather than the back
int rank = shape::rank(data);
if(shape::order(data) == 'f') {
int dimIdx = dimensionLength - 1;
for(int i = rank - 1; i >= 0; i--) {
/**
* Needs to find an algorithm such that:
* looping backwards will find the highest dimension left
* that isn't included in the dimension index list.
*
* This can also be thought of as the last item of the first index
* of the difference between the full list of indices and
* the dimension indices.
*
* We should avoid excessive object creation by only looping backwards.
*/
if(dimension[dimIdx--] != i) {
int ret = stride[i];
return ret;
}
}
}
else {
int dimIdx = dimensionLength - 1;
for(int i = rank - 1; i >= 0; i--) {
/**
* Needs to find an algorithm such that:
* looping backwards will find the highest dimension left
* that isn't included in the dimension index list.
*
* This can also be thought of as the last item of the first index
* of the difference between the full list of indices and
* the dimension indices.
*
* We should avoid excessive object creation by only looping backwards.
*/
if(dimension[dimIdx--] != i) {
int ret = stride[i];
return ret;
}
}
}
int ret = stride[0];
return ret;
}
#ifdef __CUDACC__
__device__ INLINEDEF void sweepShapeInfoBuffer(Nd4jLong *shapeInfoBuffer, Nd4jLong *targetBuffer) {
// we read first element, to find out length of our shapeInfoBuffer
int rank = shapeInfoBuffer[0];
int len = shape::shapeInfoLength(rank);
for (int i = threadIdx.x; i < len; i += blockDim.x)
targetBuffer[i] = shapeInfoBuffer[i];
}
#endif
INLINEDEF _CUDA_HD Nd4jLong *shapeBufferOfNpy(cnpy::NpyArray arr) {
return shape::shapeBufferOfNpy(arr.shape.size(),(unsigned int*) arr.shape.data(),arr.fortranOrder);
}
// INLINEDEF _CUDA_HD Nd4jLong *shapeBufferOfNpyBuffer(char *buffer) {
// unsigned Nd4jLong *shape;
// unsigned int ndims, wordSize;
// bool fortranOrder;
// cnpy::parseNpyHeaderStr(std::string(buffer),wordSize,shape,ndims,fortranOrder);
// Nd4jLong * ret = shape::shapeBufferOfNpy(ndims,shape,fortranOrder);
// delete[] shape;
// return ret;
// }
INLINEDEF _CUDA_HD Nd4jLong *shapeBufferOfNpy(int rank, unsigned int* shape,bool fortranOrder) {
if(fortranOrder) {
Nd4jLong *shapeBufferRet = shape::shapeBufferFortran(rank, nd4j::FLOAT32,(Nd4jLong *) shape);
return shapeBufferRet;
}
else {
Nd4jLong *newShape = new Nd4jLong[rank];
for(int i = 0; i < rank; i++) {
newShape[i] = shape[i];
}
Nd4jLong *shapeBufferRet = shape::shapeBuffer(rank, nd4j::FLOAT32, newShape);
delete[] newShape;
return shapeBufferRet;
}
}
INLINEDEF _CUDA_HD bool strideDescendingCAscendingF(const Nd4jLong *shapeBuffer) {
int rank = shape::rank(shapeBuffer);
Nd4jLong *strides = shape::stride(const_cast<Nd4jLong*>(shapeBuffer));
char order = shape::order(shapeBuffer);
if (shape::isRowVector(shapeBuffer) && strides[0] == 1 && strides[1] == 1)
return true;
if (order == 'c') {
for (int i = 1; i < rank; i++)
if (strides[i-1] <= strides[i])
return false;
return true;
} else if (order == 'f') {
for (int i = 1; i < rank; i++)
if (strides[i-1] >= strides[i])
return false;
return true;
} else {
printf("Unknown order for array!\n");
return false;
}
}
INLINEDEF _CUDA_HD bool isStrideSimple(const Nd4jLong* shapeInfo) {
return (order(shapeInfo) == 'c') && (elementWiseStride(shapeInfo) > 0);
}
//////////////////////////////////////////////////////////////////////////
// copy-past from java hasDefaultStridesForShape function
INLINEDEF _CUDA_HD bool areStridesDefault(const Nd4jLong* shapeInfo) {
const int rank = shape::rank(shapeInfo);
if(rank == 0)
return true;
if(!strideDescendingCAscendingF(shapeInfo))
return false;
Nd4jLong defaultShapeInfo[MAX_SHAPEINFOLENGTH];
memcpy(defaultShapeInfo, shapeInfo, shape::shapeInfoByteLength(shapeInfo));
shape::updateStrides(defaultShapeInfo, shape::order(shapeInfo));
bool result = true;
for(int i = rank+1; i <= 2*rank; ++i)
if(defaultShapeInfo[i] != shapeInfo[i]) {
result = false;
break;
}
return result;
}
// INLINEDEF _CUDA_H bool reshapeC(const int oldRank, Nd4jLong* oldShape, const int newRank, Nd4jLong* newShapeOf, bool isFOrder, Nd4jLong* target) {
// int oldnd;
// Nd4jLong* olddims = shape::copyOf(oldRank, shape::shapeOf(oldShape));
// Nd4jLong* oldstrides = shape::copyOf(oldRank, shape::stride(oldShape));
// int np, op, last_stride;
// int oi, oj, ok, ni, nj, nk;
// Nd4jLong* newStrides = new Nd4jLong[newRank];
// oldnd = 0;
// /*
// * Remove axes with dimension 1 from the old array. They have no effect
// * but would need special cases since their strides do not matter.
// */
// for (oi = 0; oi < oldRank; oi++) {
// if (shape::shapeOf(oldShape)[oi] != 1) {
// olddims[oldnd] = shape::shapeOf(oldShape)[oi];
// oldstrides[oldnd] = shape::stride(oldShape)[oi];
// oldnd++;
// }
// }
// np = 1;
// for (ni = 0; ni < newRank; ni++) {
// np *= newShapeOf[ni];
// }
// op = 1;
// for (oi = 0; oi < oldnd; oi++) {
// op *= olddims[oi];
// }
// if (np != op) {
// /* different total sizes; no hope */
// delete[] olddims;
// delete[] oldstrides;
// delete[] newStrides;
// return false;
// }
// if (np == 0) {
// /* the current code does not handle 0-sized arrays, so give up */
// delete[] olddims;
// delete[] oldstrides;
// delete[] newStrides;
// return false;
// }
// /* oi to oj and ni to nj give the axis ranges currently worked with */
// oi = 0;
// oj = 1;
// ni = 0;
// nj = 1;
// while (ni < newRank && oi < oldnd) {
// np = newShapeOf[ni];
// op = olddims[oi];
// while (np != op) {
// if (np < op) {
// /* Misses trailing 1s, these are handled later */
// np *= newShapeOf[nj++];
// } else {
// op *= olddims[oj++];
// }
// }
// /* Check whether the original axes can be combined */
// for (ok = oi; ok < oj - 1; ok++) {
// if (isFOrder) {
// if (oldstrides[ok + 1] != olddims[ok] * oldstrides[ok]) {
// /* not contiguous enough */
// delete[] olddims;
// delete[] oldstrides;
// delete[] newStrides;
// return false;
// }
// } else {
// /* C order */
// if (oldstrides[ok] != olddims[ok + 1] * oldstrides[ok + 1]) {
// /* not contiguous enough */
// delete[] olddims;
// delete[] oldstrides;
// delete[] newStrides;
// return false;
// }
// }
// }
// /* Calculate new strides for all axes currently worked with */
// if (isFOrder) {
// newStrides[ni] = oldstrides[oi];
// for (nk = ni + 1; nk < nj; nk++) {
// newStrides[nk] = newStrides[nk - 1] * newShapeOf[nk - 1];
// }
// } else {
// /* C order */
// newStrides[nj - 1] = oldstrides[oj - 1];
// for (nk = nj - 1; nk > ni; nk--) {
// newStrides[nk - 1] = newStrides[nk] * newShapeOf[nk];
// }
// }
// ni = nj++;
// oi = oj++;
// }
// if (ni >= 1) {
// last_stride = newStrides[ni - 1];
// } else {
// last_stride = shape::elementWiseStride(oldShape);
// }
// if (isFOrder && ni >= 1) {
// last_stride *= newShapeOf[ni - 1];
// }
// for (nk = ni; nk < newRank; nk++) {
// newStrides[nk] = last_stride;
// }
// target[0] = newRank;
// int cnt = 1;
// for (int e = 0; e < newRank; e++)
// target[cnt++] = newShapeOf[e];
// for (int e = 0; e < newRank; e++)
// target[cnt++] = newStrides[e];
// target[shape::shapeInfoLength(newRank) - 3] = 0;
// target[shape::shapeInfoLength(newRank) - 2] = 0;
// target[shape::shapeInfoLength(newRank) - 1] = isFOrder ? 102 : 99;
// nd4j::ArrayOptions::setDataType(target, nd4j::ArrayOptions::dataType(oldShape));
// delete[] olddims;
// delete[] oldstrides;
// delete[] newStrides;
// return true;
// }
// INLINEDEF _CUDA_H bool reshapeC(const int oldRank, const Nd4jLong* oldShapeInfo, const int newRank, const Nd4jLong* newShape, const bool isFOrder, Nd4jLong* newShapeInfo) {
// // PLEASE NOTE !: reshaping not-permuted (ews=1) array in f order (except insertion/elimination of unities) will definitely cause allocation of new buffer for array elements
// // also this function takes into account identical shapes automatically, namely in that case oldShapeInfo is completely copied to newShapeInfo
// const int newOrder = isFOrder ? 102 : 99;
// const int oldOrder = oldShapeInfo[2 * oldRank + 3];
// newShapeInfo[0] = newRank;
// memcpy(newShapeInfo + 1, newShape, newRank * sizeof(Nd4jLong));
// Nd4jLong* newStrides = shape::stride(newShapeInfo);
// const Nd4jLong* oldShape = shape::shapeOf(const_cast<Nd4jLong*>(oldShapeInfo));
// const Nd4jLong* oldStrides = shape::stride(const_cast<Nd4jLong*>(oldShapeInfo));
// int oldStart(0), oldStop(1), newStart(0), newStop(1), newDim, oldDim;
// while (newStart < newRank && oldStart < oldRank) {
// newDim = newShape[newStart];
// oldDim = oldShape[oldStart];
// while (newDim != oldDim)
// if (newDim < oldDim) newDim *= newShape[newStop++];
// else oldDim *= oldShape[oldStop++];
// // ------ Check whether the original axes can be combined ------ //
// for (int i = oldStart; i < oldStop - 1; i++) {
// if(oldShape[i] == 1) { // ignore strides like {...,1,1,...}
// if(oldOrder == 102) ++oldStart;
// continue;
// }
// if(oldOrder == 102 && oldStrides[i + 1] != oldShape[i] * oldStrides[i])
// return false; // not contiguous enough
// if(oldOrder == 99 && oldStrides[i] != oldShape[i + 1] * oldStrides[i + 1])
// return false; // not contiguous enough
// }
// // ------ Calculate new strides for all axes currently worked with ------ //
// if(isFOrder) {
// newStrides[newStart] = oldStrides[oldStart];
// for (int i = newStart + 1; i < newStop; ++i)
// newStrides[i] = newStrides[i - 1] * newShape[i - 1];
// }
// else {
// newStrides[newStop - 1] = oldStrides[oldStop - 1];
// for (int i = newStop - 1; i > newStart; --i)
// newStrides[i - 1] = newStrides[i] * newShape[i];
// }
// newStart = newStop++;
// oldStart = oldStop++;
// }
// newShapeInfo[2 * newRank + 3] = shape::order(oldShapeInfo); // order
// newShapeInfo[2 * newRank + 2] = shape::elementWiseStride(oldShapeInfo); // ews
// newShapeInfo[2 * newRank + 1] = shape::type(oldShapeInfo); // type
// return true;
// }
//////////////////////////////////////////////////////////////////////
INLINEDEF _CUDA_H bool reshapeC(const int oldRank, const Nd4jLong* oldShapeInfo, const int newRank, const Nd4jLong* newShape, Nd4jLong* newShapeInfo) {
// PLEASE NOTE !: reshaping not-permuted (ews=1) array in f order (except insertion/elimination of unities) will definitely cause allocation of new buffer for array elements
// also this function takes into account identical shapes automatically, namely in that case oldShapeInfo is completely copied to newShapeInfo
newShapeInfo[0] = newRank;
memcpy(newShapeInfo + 1, newShape, newRank * sizeof(Nd4jLong));
Nd4jLong* newStrides = shape::stride(newShapeInfo);
const Nd4jLong* oldShape = shape::shapeOf(const_cast<Nd4jLong*>(oldShapeInfo));
const Nd4jLong* oldStrides = shape::stride(const_cast<Nd4jLong*>(oldShapeInfo));
int oldStart(0), oldStop(1), newStart(0), newStop(1), newDim, oldDim;
while (newStart < newRank && oldStart < oldRank) {
newDim = newShape[newStart];
oldDim = oldShape[oldStart];
while (newDim != oldDim)
if (newDim < oldDim) newDim *= newShape[newStop++];
else oldDim *= oldShape[oldStop++];
// ------ Check whether the original axes can be combined ------ //
for (int step = 1, i = oldStart; i < oldStop - 1; ++i) {
if(oldShape[i] == 1) // skip unity-dimension and its stride
continue;
while((i + step) < oldRank && oldShape[i + step] == 1)
++step; // skip following unity-dimensions and its strides if such are present
if((i + step) < oldRank && oldStrides[i] != oldShape[i + step] * oldStrides[i + step])
return false; // not contiguous enough
}
newStrides[newStop - 1] = oldStrides[oldStop - 1];
for (int i = newStop - 1; i > newStart; --i)
newStrides[i - 1] = newStrides[i] * newShape[i];
newStart = newStop++;
oldStart = oldStop++;
}
newShapeInfo[2 * newRank + 3] = shape::order(oldShapeInfo); // order
newShapeInfo[2 * newRank + 2] = shape::elementWiseStride(oldShapeInfo); // ews
newShapeInfo[2 * newRank + 1] = shape::type(oldShapeInfo); // type
return true;
}
INLINEDEF _CUDA_H bool canReshape(const int oldRank, Nd4jLong* oldShape, const int newRank, Nd4jLong* newShapeOf, bool isFOrder) {
int oldnd;
Nd4jLong* oldDims = shape::copyOf(oldRank, shape::shapeOf(oldShape));
Nd4jLong* oldStrides = shape::copyOf(oldRank, shape::stride(oldShape));
int np, op, last_stride;
int oldStart, oldStop, ok, newStart, newStop, nk;
auto newStrides = new Nd4jLong[newRank];
oldnd = 0;
/*
* Remove axes with dimension 1 from the old array. They have no effect
* but would need special cases since their strides do not matter.
*/
for (oldStart = 0; oldStart < oldRank; oldStart++) {
if (shape::shapeOf(oldShape)[oldStart] != 1) {
oldDims[oldnd] = shape::shapeOf(oldShape)[oldStart];
oldStrides[oldnd] = shape::stride(oldShape)[oldStart];
oldnd++;
}
}
np = 1;
for (newStart = 0; newStart < newRank; newStart++) {
np *= newShapeOf[newStart];
}
op = 1;
for (oldStart = 0; oldStart < oldnd; oldStart++) {
op *= oldDims[oldStart];
}
if (np != op) {
/* different total sizes; no hope */
delete[] oldDims;
delete[] oldStrides;
delete[] newStrides;
return false;
}
if (np == 0) {
/* the current code does not handle 0-sized arrays, so give up */
delete[] oldDims;
delete[] oldStrides;
delete[] newStrides;
return false;
}
/* oldStart to oldStop and newStart to newStop give the axis ranges currently worked with */
oldStart = 0;
oldStop = 1;
newStart = 0;
newStop = 1;
while (newStart < newRank && oldStart < oldnd) {
np = newShapeOf[newStart];
op = oldDims[oldStart];
while (np != op) {
if (np < op) {
/* Misses trailing 1s, these are handled later */
np *= newShapeOf[newStop++];
} else {
op *= oldDims[oldStop++];
}
}
/* Check whether the original axes can be combined */
for (ok = oldStart; ok < oldStop - 1; ok++) {
if (isFOrder) {
if (oldStrides[ok + 1] != oldDims[ok] * oldStrides[ok]) {
/* not contiguous enough */
delete[] oldDims;
delete[] oldStrides;
delete[] newStrides;
return false;
}
} else {
/* C order */
if (oldStrides[ok] != oldDims[ok + 1] * oldStrides[ok + 1]) {
/* not contiguous enough */
delete[] oldDims;
delete[] oldStrides;
delete[] newStrides;
return false;
}
}
}
/* Calculate new strides for all axes currently worked with */
if (isFOrder) {
newStrides[newStart] = oldStrides[oldStart];
for (nk = newStart + 1; nk < newStop; nk++) {
newStrides[nk] = newStrides[nk - 1] * newShapeOf[nk - 1];
}
} else {
/* C order */
newStrides[newStop - 1] = oldStrides[oldStop - 1];
for (nk = newStop - 1; nk > newStart; nk--) {
newStrides[nk - 1] = newStrides[nk] * newShapeOf[nk];
}
}
newStart = newStop++;
oldStart = oldStop++;
}
delete[] oldDims;
delete[] oldStrides;
delete[] newStrides;
return true;
}
// this function checks the consistence of dimensions with array rank (negative dimensions, too large dimensions, too big number of dimensions)
// also it sorts input array of dimensions, this operation is also necessary for creating TAD object
INLINEDEF _CUDA_H void checkDimensions(const int rank, std::vector<int>& dimensions) {
int dimSize = dimensions.size();
if(dimSize == 0)
throw std::runtime_error("shape::checkDimensions method: array of dimensions is empty!");
// check presence of negative dimensions and if they are present transform them to positive ones -dim -> rank - |dim|
for(auto& dim : dimensions)
if(dim < 0)
dim += rank;
// sort input array of dimensions, this operation is also necessary for creating TAD object in external methods
if (dimSize > 1) {
std::sort(dimensions.begin(), dimensions.end());
// remove duplicates if they are present
dimensions.erase(std::unique(dimensions.begin(), dimensions.end()), dimensions.end());
}
// check whether number of dimensions is to big (>rank)
dimSize = dimensions.size();
if(dimSize > rank)
throw std::runtime_error("shape::checkDimensions method: number of input dimensions is too big ( > rank of array)!");
// check if min dimension is still negative and whether max dimension is bigger then rank-1
if(dimensions[0] < 0 || dimensions.back() > (rank-1))
throw std::runtime_error("shape::checkDimensions method: the negative dimension is still present in input array after transform or the too big dimension is present ( > rank of array) !");
}
// max array is outer for min array, min array is sub-array of max array
// function calculates the coordinates of min array (and saves them into minIdxs) given coordinates of max array (already stored in maxIdxs)
INLINEDEF _CUDA_HD void maxIndToMinInd(Nd4jLong* maxIdxs, Nd4jLong* minIdxs, const Nd4jLong* maxShapeInfo, const Nd4jLong* minShapeInfo, const int* dimsToExclude, int dimsLen) {
const auto maxRank = shape::rank(maxShapeInfo);
const auto minRank = shape::rank(minShapeInfo);
// if(minRank >= maxRank)
// throw std::runtime_error("shape::maxIndToMinInd method: rank of min array should be smaller then rank of max array!");
if(dimsLen == -1)
dimsLen = maxRank - minRank; // if size is not given (= -1) then it is equal to ranks difference
if(maxRank == minRank) {
if(dimsToExclude == nullptr) { // --> means dimsToExclude == {0,1,2,...,dimsLen-1}
for (int i = 0; i < maxRank; ++i) {
if(i < dimsLen)
minIdxs[i] = maxIdxs[i];
else {
if(maxIdxs[i] > minShapeInfo[i + 1])
minIdxs[i] = maxIdxs[i] % minShapeInfo[i + 1];
else if(maxIdxs[i] == minShapeInfo[i + 1])
minIdxs[i] = 0;
else
minIdxs[i] = maxIdxs[i];
}
}
}
else {
for (int i = 0, dim = 0; i < maxRank; ++i) {
if(dim < dimsLen && dimsToExclude[dim] == i) {
minIdxs[i] = maxIdxs[i];
++dim;
continue;
}
if(maxIdxs[i] > minShapeInfo[i + 1])
minIdxs[i] = maxIdxs[i] % minShapeInfo[i + 1];
else if(maxIdxs[i] == minShapeInfo[i + 1])
minIdxs[i] = 0;
else
minIdxs[i] = maxIdxs[i];
}
}
}
else {
if(dimsToExclude == nullptr) { // --> means dimsToExclude == {0,1,2,...,dimsLen-1}
for (int i = 0; i < minRank; ++i) {
if(maxIdxs[i + dimsLen] > minShapeInfo[i + 1])
minIdxs[i] = maxIdxs[i + dimsLen] % minShapeInfo[i + 1];
else if(maxIdxs[i + dimsLen] == minShapeInfo[i + 1])
minIdxs[i] = 0;
else
minIdxs[i] = maxIdxs[i + dimsLen];
}
}
else {
for (int minI = 0, maxI = 0, dim = 0; maxI < maxRank; ++maxI) {
if(dim < dimsLen && dimsToExclude[dim] == maxI) {
++dim;
continue;
}
if(maxIdxs[maxI] == minShapeInfo[minI + 1])
minIdxs[minI] = 0;
else if(maxIdxs[maxI] > minShapeInfo[minI + 1])
minIdxs[minI] = maxIdxs[maxI] % minShapeInfo[minI + 1];
else
minIdxs[minI] = maxIdxs[maxI];
++minI;
}
}
}
}
//////////////////////////////////////////////////////////////////////
INLINEDEF _CUDA_HD Nd4jLong subArrayIndex(const Nd4jLong maxIdx, const Nd4jLong* maxShapeInfo, const Nd4jLong* minShapeInfo, const int* dimsToExclude, const int dimsLen) {
Nd4jLong maxIdxs[MAX_RANK];
if(shape::order(maxShapeInfo) == 'c')
shape::ind2subC(shape::rank(maxShapeInfo), const_cast<Nd4jLong *>(maxShapeInfo)+1, const_cast<Nd4jLong&>(maxIdx), maxIdxs);
else
shape::ind2sub(shape::rank(maxShapeInfo), const_cast<Nd4jLong *>(maxShapeInfo)+1, const_cast<Nd4jLong&>(maxIdx), maxIdxs);
Nd4jLong minIdxs[MAX_RANK];
maxIndToMinInd(maxIdxs, minIdxs, maxShapeInfo, minShapeInfo, dimsToExclude, dimsLen);
return sub2Ind(shape::rank(minShapeInfo), minShapeInfo + 1, minIdxs);
}
//////////////////////////////////////////////////////////////////////
INLINEDEF _CUDA_HD Nd4jLong subArrayOffset(const Nd4jLong maxIdx, const Nd4jLong* maxShapeInfo, const Nd4jLong* minShapeInfo, const int* dimsToExclude, const int dimsLen) {
Nd4jLong maxIdxs[MAX_RANK];
if(shape::order(maxShapeInfo) == 'c')
shape::ind2subC(shape::rank(maxShapeInfo), const_cast<Nd4jLong *>(maxShapeInfo)+1, const_cast<Nd4jLong&>(maxIdx), maxIdxs);
else
shape::ind2sub(shape::rank(maxShapeInfo), const_cast<Nd4jLong *>(maxShapeInfo)+1, const_cast<Nd4jLong&>(maxIdx), maxIdxs);
Nd4jLong minIdxs[MAX_RANK];
maxIndToMinInd(maxIdxs, minIdxs, maxShapeInfo, minShapeInfo, dimsToExclude, dimsLen);
return getOffset(0, minShapeInfo + 1, minShapeInfo + shape::rank(minShapeInfo) + 1, minIdxs, shape::rank(minShapeInfo));
}
//////////////////////////////////////////////////////////////////////
INLINEDEF _CUDA_HD int outerArrayOffsets(Nd4jLong* maxOffsets, const Nd4jLong minIdx, const Nd4jLong* maxShapeInfo, const Nd4jLong* minShapeInfo, const int* dimsToExclude) {
const auto rankMin = shape::rank(minShapeInfo);
const auto rankMax = shape::rank(maxShapeInfo);
// if(rankMin >= rankMax)
// throw std::runtime_error("shape::subArrayIndex method: rank of min array should be smaller then rank of max array!");
// if(rankMax > MAX_RANK/2)
// throw std::runtime_error("shape::subArrayIndex method: rank of max array should be <= MAX_RANK/2 !");
const auto diff = rankMax - rankMin; // the size of dimsToExclude is equal to diff
Nd4jLong buffer[MAX_RANK];
Nd4jLong* indices = buffer;
Nd4jLong* increment = buffer + MAX_RANK/2;
int N, minI, maxI;
// calculate min per-dim-indices which corresponds to absolute minIdx index
if(order(minShapeInfo) == 'c')
shape::ind2subC(rankMin, minShapeInfo + 1, minIdx, indices);
else
shape::ind2sub(rankMin, const_cast<Nd4jLong*>(minShapeInfo) + 1, minIdx, indices);
// transform storage indices to contain per-dim max indices, purpose - memory saving
// fill increment array as well
if(dimsToExclude == nullptr) { // means dimsToExclude == {0,1,2,...,diff-1}
for(minI = rankMin - 1, maxI = rankMax-1; maxI >= diff; --maxI, --minI) {
increment[maxI] = (maxShapeInfo[maxI+1] == minShapeInfo[minI+1]) ? 0 : minShapeInfo[minI+1];
indices[maxI] = indices[minI];
}
for(maxI = 0; maxI < diff; ++maxI) {
increment[maxI] = 1;
indices[maxI] = 0;
}
}
else {
for(N = diff-1, minI = rankMin - 1, maxI = rankMax - 1; maxI >= 0; --maxI) {
if(N >= 0 && dimsToExclude[N] == maxI) {
increment[maxI] = 1;
indices[maxI] = 0;
--N;
}
else {
increment[maxI] = (maxShapeInfo[maxI+1] == minShapeInfo[minI+1]) ? 0 : minShapeInfo[minI+1];
indices[maxI] = indices[minI--];
}
}
}
maxI = rankMax-1;
N = 0;
int step;
maxOffsets[N++] = shape::getOffset(0, maxShapeInfo + 1, maxShapeInfo + rankMax + 1, indices, rankMax);
// nested loops - producing of absolute indices for max array
while(maxI >= 0) {
if(increment[maxI] != 0) {
indices[maxI] += increment[maxI];
if(indices[maxI] >= maxShapeInfo[maxI+1]) {
indices[maxI] %= increment[maxI]; // restore initial value of indices[maxI]
step = -1;
}
else {
maxOffsets[N++] = shape::getOffset(0, maxShapeInfo + 1, maxShapeInfo + rankMax + 1, indices, rankMax);
step = rankMax - 1 - maxI;
}
}
else if(maxI == rankMax - 1)
step = -1;
maxI += step;
}
return N;
}
//////////////////////////////////////////////////////////////////////
INLINEDEF _CUDA_HD int outerArrayIndexes(Nd4jLong* maxIdxs, const Nd4jLong minIdx, const Nd4jLong* maxShapeInfo, const Nd4jLong* minShapeInfo, const int* dimsToExclude) {
const auto rankMin = shape::rank(minShapeInfo);
const auto rankMax = shape::rank(maxShapeInfo);
// if(rankMin >= rankMax)
// throw std::runtime_error("shape::subArrayIndex method: rank of min array should be smaller then rank of max array!");
// if(rankMax > MAX_RANK/2)
// throw std::runtime_error("shape::subArrayIndex method: rank of max array should be <= MAX_RANK/2 !");
const auto diff = rankMax - rankMin; // the size of dimsToExclude is equal to diff
Nd4jLong buffer[MAX_RANK];
Nd4jLong* indices = buffer;
Nd4jLong* increment = buffer + MAX_RANK/2;
int N, minI, maxI;
// calculate min per-dim-indices which corresponds to absolute minIdx index
if(order(minShapeInfo) == 'c')
shape::ind2subC(rankMin, minShapeInfo + 1, minIdx, indices);
else
shape::ind2sub(rankMin, const_cast<Nd4jLong*>(minShapeInfo) + 1, minIdx, indices);
// transform storage indices to contain per-dim max indices, purpose - memory saving
// fill increment array as well
if(dimsToExclude == nullptr) { // means dimsToExclude == {0,1,2,...,diff-1}
for(minI = rankMin - 1, maxI = rankMax-1; maxI >= diff; --maxI, --minI) {
increment[maxI] = (maxShapeInfo[maxI+1] == minShapeInfo[minI+1]) ? 0 : minShapeInfo[minI+1];
indices[maxI] = indices[minI];
}
for(maxI = 0; maxI < diff; ++maxI) {
increment[maxI] = 1;
indices[maxI] = 0;
}
}
else {
for(N = diff-1, minI = rankMin - 1, maxI = rankMax - 1; maxI >= 0; --maxI) {
if(N >= 0 && dimsToExclude[N] == maxI) {
increment[maxI] = 1;
indices[maxI] = 0;
--N;
}
else {
increment[maxI] = (maxShapeInfo[maxI+1] == minShapeInfo[minI+1]) ? 0 : minShapeInfo[minI+1];
indices[maxI] = indices[minI--];
}
}
}
maxI = rankMax-1;
N = 0;
int step;
maxIdxs[N++] = sub2Ind(rankMax, maxShapeInfo + 1, indices);
// nested loops - producing of absolute indices for max array
while(maxI >= 0) {
if(increment[maxI] != 0) {
indices[maxI] += increment[maxI];
if(indices[maxI] >= maxShapeInfo[maxI+1]) {
indices[maxI] %= increment[maxI]; // restore initial value of indices[maxI]
step = -1;
}
else {
maxIdxs[N++] = sub2Ind(rankMax, maxShapeInfo + 1, indices);
step = rankMax - 1 - maxI;
}
}
else if(maxI == rankMax - 1)
step = -1;
maxI += step;
}
return N;
}
INLINEDEF _CUDA_HD void shapeOldScalar(nd4j::DataType dataType, Nd4jLong* const buffer, const char order) {
buffer[0] = 2;
buffer[1] = 1;
buffer[2] = 1;
buffer[3] = 1;
buffer[4] = 1;
buffer[6] = 1;
buffer[7] = (int)order;
nd4j::ArrayOptions::setDataType(buffer, dataType);
}
template <typename T1, typename T2>
INLINEDEF _CUDA_H void convertT(T1 *from, T2 *to, Nd4jLong length) {
for (Nd4jLong e = 0; e < length; e++)
to[e] = (T2) from[e];
};
//////////////////////////////////////////////////////////////////////
INLINEDEF void calcSubArrOffsets(const Nd4jLong numOfSubArrs, const int rank, const Nd4jLong* shape, const Nd4jLong* strides, Nd4jLong* subArrOffsets) {
// set offset for first sub-array, it is equal to zero always
subArrOffsets[0] = 0;
// choose whether to parallelize or not
if(numOfSubArrs > 1024 /*Environment::getInstance()->elementwiseThreshold()*/) {
#pragma omp parallel // PRAGMA_OMP_PARALLEL_ARGS(private(indexes))
{
Nd4jLong* indexes = new Nd4jLong[rank];
#pragma omp for simd schedule(guided) // PRAGMA_OMP_PARALLEL_FOR
for (Nd4jLong i = 1; i < numOfSubArrs; ++i) {
shape::ind2subC(rank, shape, i, indexes);
subArrOffsets[i] = 0;
for (int j = 0; j < rank; ++j)
if(shape[j] != 1)
subArrOffsets[i] += indexes[j] * strides[j];
}
delete []indexes;
}
}
else {
Nd4jLong rankMinusOne = rank - 1;
Nd4jLong i = 1, j = rankMinusOne;
Nd4jLong* idx = new Nd4jLong[rank];
Nd4jLong* currOffset = new Nd4jLong[rank];
memset(idx, 0, sizeof(Nd4jLong) * rank);
memset(currOffset, 0, sizeof(Nd4jLong) * rank);
// nested loops - calculation of sub-array offsets (subArrOffsets)
while(j >= 0) {
if(shape[j] == 1) { --j; continue; } // ignore dimensions equal to unity
if(j == rankMinusOne) { // last dimension
for(idx[j] = 1; idx[j] < shape[j]; ++idx[j])
subArrOffsets[i++] = subArrOffsets[i-1] + strides[j];
--j;
}
else if(idx[j] < shape[j] - 1) {
currOffset[j] += strides[j];
subArrOffsets[i++] = j ? currOffset[j] + currOffset[j-1] : currOffset[j];
++idx[j];
j = rankMinusOne;
}
else
currOffset[j--] = idx[j] = 0;
}
delete []idx;
delete []currOffset;
}
}
//////////////////////////////////////////////////////////////////////
INLINEDEF void _CUDA_HD setEws(Nd4jLong* shapeInfo, Nd4jLong len) {
const int rank = shape::rank(shapeInfo);
const Nd4jLong* shape = shape::shapeOf(shapeInfo);
const Nd4jLong* strides = shape::stride(shapeInfo);
const char order = shape::order(shapeInfo);
Nd4jLong* ews = shape::ews(shapeInfo);
if(len == -1) // calculate array length if it is not given
len = shape::length(shapeInfo);
if(len <= 1) { // empty, scalar or unity-vector case
*ews = 1;
return;
}
int nonUnityDim(0);
if(shape::isCommonVector(shapeInfo, nonUnityDim)) {
*ews = strides[nonUnityDim];
return;
}
// check last(c)/first(f) dimension, it should be equal to 1
if((order == 'c' && shape[rank - 1] != 1 && strides[rank - 1] != 1) || (order == 'f' && shape[0] != 1 && strides[0] != 1)) {
*ews = 0;
return;
}
Nd4jLong correctStride = 1;
if(order == 'c') {
for (int i = rank - 2; i >= 0 ; i--) {
correctStride *= shape[i + 1];
if(shape[i] == 1)
continue;
if(correctStride != strides[i]) {
*ews = 0;
return;
}
}
}
else {
for (int i = 1; i < rank; ++i) {
correctStride *= shape[i - 1];
if(shape[i] == 1)
continue;
if(correctStride != strides[i]) {
*ews = 0;
return;
}
}
}
*ews = 1;
}
}
#endif /* SHAPE_H_ */
|
target_parallel_for_simd_misc_messages.c | // RUN: %clang_cc1 -fsyntax-only -fopenmp -fopenmp-version=45 -verify=expected,omp45 %s -Wuninitialized
// RUN: %clang_cc1 -fsyntax-only -fopenmp -fopenmp-version=50 -verify=expected,omp50 %s -Wuninitialized
// RUN: %clang_cc1 -fsyntax-only -fopenmp-simd -fopenmp-version=45 -verify=expected,omp45 %s -Wuninitialized
// RUN: %clang_cc1 -fsyntax-only -fopenmp-simd -fopenmp-version=50 -verify=expected,omp50 %s -Wuninitialized
// expected-error@+1 {{unexpected OpenMP directive '#pragma omp target parallel for simd'}}
#pragma omp target parallel for simd
// expected-error@+1 {{unexpected OpenMP directive '#pragma omp target parallel for simd'}}
#pragma omp target parallel for simd foo
void test_no_clause() {
int i;
#pragma omp target parallel for simd
for (i = 0; i < 16; ++i)
;
// expected-error@+2 {{statement after '#pragma omp target parallel for simd' must be a for loop}}
#pragma omp target parallel for simd
++i;
}
void test_branch_protected_scope() {
int i = 0;
L1:
++i;
int x[24];
#pragma omp target parallel for simd
for (i = 0; i < 16; ++i) {
if (i == 5)
goto L1; // expected-error {{use of undeclared label 'L1'}}
else if (i == 6)
return; // expected-error {{cannot return from OpenMP region}}
else if (i == 7)
goto L2;
else if (i == 8) {
L2:
x[i]++;
}
}
if (x[0] == 0)
goto L2; // expected-error {{use of undeclared label 'L2'}}
else if (x[1] == 1)
goto L1;
}
void test_invalid_clause() {
int i;
// expected-warning@+1 {{extra tokens at the end of '#pragma omp target parallel for simd' are ignored}}
#pragma omp target parallel for simd foo bar
for (i = 0; i < 16; ++i)
;
}
void test_non_identifiers() {
int i, x;
// expected-warning@+1 {{extra tokens at the end of '#pragma omp target parallel for simd' are ignored}}
#pragma omp target parallel for simd;
for (i = 0; i < 16; ++i)
;
// expected-warning@+1 {{extra tokens at the end of '#pragma omp target parallel for simd' are ignored}}
#pragma omp target parallel for simd private(x);
for (i = 0; i < 16; ++i)
;
// expected-warning@+1 {{extra tokens at the end of '#pragma omp target parallel for simd' are ignored}}
#pragma omp target parallel for simd, private(x);
for (i = 0; i < 16; ++i)
;
}
extern int foo();
void test_collapse() {
int i;
// expected-error@+1 {{expected '('}}
#pragma omp target parallel for simd collapse
for (i = 0; i < 16; ++i)
;
// expected-error@+1 {{expected expression}} expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}}
#pragma omp target parallel for simd collapse(
for (i = 0; i < 16; ++i)
;
// expected-error@+1 {{expected expression}}
#pragma omp target parallel for simd collapse()
for (i = 0; i < 16; ++i)
;
// expected-error@+1 {{expected expression}} expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}}
#pragma omp target parallel for simd collapse(,
for (i = 0; i < 16; ++i)
;
// expected-error@+1 {{expected expression}} expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}}
#pragma omp target parallel for simd collapse(, )
for (i = 0; i < 16; ++i)
;
// expected-warning@+2 {{extra tokens at the end of '#pragma omp target parallel for simd' are ignored}}
// expected-error@+1 {{expected '('}}
#pragma omp target parallel for simd collapse 4)
for (i = 0; i < 16; ++i)
;
// expected-error@+2 {{expected ')'}}
// expected-note@+1 {{to match this '('}} expected-note@+1 {{as specified in 'collapse' clause}}
#pragma omp target parallel for simd collapse(4
for (i = 0; i < 16; ++i)
; // expected-error {{expected 4 for loops after '#pragma omp target parallel for simd', but found only 1}}
// expected-error@+2 {{expected ')'}}
// expected-note@+1 {{to match this '('}} expected-note@+1 {{as specified in 'collapse' clause}}
#pragma omp target parallel for simd collapse(4,
for (i = 0; i < 16; ++i)
; // expected-error {{expected 4 for loops after '#pragma omp target parallel for simd', but found only 1}}
// expected-error@+2 {{expected ')'}}
// expected-note@+1 {{to match this '('}} expected-note@+1 {{as specified in 'collapse' clause}}
#pragma omp target parallel for simd collapse(4, )
for (i = 0; i < 16; ++i)
; // expected-error {{expected 4 for loops after '#pragma omp target parallel for simd', but found only 1}}
// expected-note@+1 {{as specified in 'collapse' clause}}
#pragma omp target parallel for simd collapse(4)
for (i = 0; i < 16; ++i)
; // expected-error {{expected 4 for loops after '#pragma omp target parallel for simd', but found only 1}}
// expected-error@+2 {{expected ')'}}
// expected-note@+1 {{to match this '('}} expected-note@+1 {{as specified in 'collapse' clause}}
#pragma omp target parallel for simd collapse(4 4)
for (i = 0; i < 16; ++i)
; // expected-error {{expected 4 for loops after '#pragma omp target parallel for simd', but found only 1}}
// expected-error@+2 {{expected ')'}}
// expected-note@+1 {{to match this '('}} expected-note@+1 {{as specified in 'collapse' clause}}
#pragma omp target parallel for simd collapse(4, , 4)
for (i = 0; i < 16; ++i)
; // expected-error {{expected 4 for loops after '#pragma omp target parallel for simd', but found only 1}}
#pragma omp target parallel for simd collapse(4)
for (int i1 = 0; i1 < 16; ++i1)
for (int i2 = 0; i2 < 16; ++i2)
for (int i3 = 0; i3 < 16; ++i3)
for (int i4 = 0; i4 < 16; ++i4)
foo();
// expected-error@+2 {{expected ')'}}
// expected-note@+1 {{to match this '('}} expected-note@+1 {{as specified in 'collapse' clause}}
#pragma omp target parallel for simd collapse(4, 8)
for (i = 0; i < 16; ++i)
; // expected-error {{expected 4 for loops after '#pragma omp target parallel for simd', but found only 1}}
// expected-error@+1 {{expression is not an integer constant expression}}
#pragma omp target parallel for simd collapse(2.5)
for (i = 0; i < 16; ++i)
;
// expected-error@+1 {{expression is not an integer constant expression}}
#pragma omp target parallel for simd collapse(foo())
for (i = 0; i < 16; ++i)
;
// expected-error@+1 {{argument to 'collapse' clause must be a strictly positive integer value}}
#pragma omp target parallel for simd collapse(-5)
for (i = 0; i < 16; ++i)
;
// expected-error@+1 {{argument to 'collapse' clause must be a strictly positive integer value}}
#pragma omp target parallel for simd collapse(0)
for (i = 0; i < 16; ++i)
;
// expected-error@+1 {{argument to 'collapse' clause must be a strictly positive integer value}}
#pragma omp target parallel for simd collapse(5 - 5)
for (i = 0; i < 16; ++i)
;
// expected-note@+1 {{defined as firstprivate}}
#pragma omp target parallel for simd collapse(2) firstprivate(i) // expected-note {{defined as firstprivate}}
for (i = 0; i < 16; ++i) // expected-error {{loop iteration variable in the associated loop of 'omp target parallel for simd' directive may not be firstprivate, predetermined as lastprivate}}
// expected-note@+1 {{variable with automatic storage duration is predetermined as private; perhaps you forget to enclose 'omp for' directive into a parallel or another task region?}}
for (int j = 0; j < 16; ++j)
// expected-error@+2 2 {{reduction variable must be shared}}
// expected-error@+1 {{OpenMP constructs may not be nested inside a simd region}}
#pragma omp for reduction(+ : i, j)
for (int k = 0; k < 16; ++k)
i += j;
}
void test_private() {
int i;
// expected-error@+2 {{expected expression}}
// expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}}
#pragma omp target parallel for simd private(
for (i = 0; i < 16; ++i)
;
// expected-error@+2 {{expected ')'}} expected-note@+2 {{to match this '('}}
// expected-error@+1 2 {{expected expression}}
#pragma omp target parallel for simd private(,
for (i = 0; i < 16; ++i)
;
// expected-error@+1 2 {{expected expression}}
#pragma omp target parallel for simd private(, )
for (i = 0; i < 16; ++i)
;
// expected-error@+1 {{expected expression}}
#pragma omp target parallel for simd private()
for (i = 0; i < 16; ++i)
;
// expected-error@+1 {{expected expression}}
#pragma omp target parallel for simd private(int)
for (i = 0; i < 16; ++i)
;
// expected-error@+1 {{expected variable name}}
#pragma omp target parallel for simd private(0)
for (i = 0; i < 16; ++i)
;
int x, y, z;
#pragma omp target parallel for simd private(x)
for (i = 0; i < 16; ++i)
;
#pragma omp target parallel for simd private(x, y)
for (i = 0; i < 16; ++i)
;
#pragma omp target parallel for simd private(x, y, z)
for (i = 0; i < 16; ++i) {
x = y * i + z;
}
}
void test_lastprivate() {
int i;
// expected-error@+2 {{expected ')'}} expected-note@+2 {{to match this '('}}
// expected-error@+1 {{expected expression}}
#pragma omp target parallel for simd lastprivate(
for (i = 0; i < 16; ++i)
;
// expected-error@+2 {{expected ')'}} expected-note@+2 {{to match this '('}}
// expected-error@+1 2 {{expected expression}}
#pragma omp target parallel for simd lastprivate(,
for (i = 0; i < 16; ++i)
;
// expected-error@+1 2 {{expected expression}}
#pragma omp target parallel for simd lastprivate(, )
for (i = 0; i < 16; ++i)
;
// expected-error@+1 {{expected expression}}
#pragma omp target parallel for simd lastprivate()
for (i = 0; i < 16; ++i)
;
// expected-error@+1 {{expected expression}}
#pragma omp target parallel for simd lastprivate(int)
for (i = 0; i < 16; ++i)
;
// expected-error@+1 {{expected variable name}}
#pragma omp target parallel for simd lastprivate(0)
for (i = 0; i < 16; ++i)
;
int x, y, z;
#pragma omp target parallel for simd lastprivate(x)
for (i = 0; i < 16; ++i)
;
#pragma omp target parallel for simd lastprivate(x, y)
for (i = 0; i < 16; ++i)
;
#pragma omp target parallel for simd lastprivate(x, y, z)
for (i = 0; i < 16; ++i)
;
}
void test_firstprivate() {
int i;
// expected-error@+2 {{expected ')'}} expected-note@+2 {{to match this '('}}
// expected-error@+1 {{expected expression}}
#pragma omp target parallel for simd firstprivate(
for (i = 0; i < 16; ++i)
;
// expected-error@+2 {{expected ')'}} expected-note@+2 {{to match this '('}}
// expected-error@+1 2 {{expected expression}}
#pragma omp target parallel for simd firstprivate(,
for (i = 0; i < 16; ++i)
;
// expected-error@+1 2 {{expected expression}}
#pragma omp target parallel for simd firstprivate(, )
for (i = 0; i < 16; ++i)
;
// expected-error@+1 {{expected expression}}
#pragma omp target parallel for simd firstprivate()
for (i = 0; i < 16; ++i)
;
// expected-error@+1 {{expected expression}}
#pragma omp target parallel for simd firstprivate(int)
for (i = 0; i < 16; ++i)
;
// expected-error@+1 {{expected variable name}}
#pragma omp target parallel for simd firstprivate(0)
for (i = 0; i < 16; ++i)
;
int x, y, z;
#pragma omp target parallel for simd lastprivate(x) firstprivate(x)
for (i = 0; i < 16; ++i)
;
#pragma omp target parallel for simd lastprivate(x, y) firstprivate(x, y)
for (i = 0; i < 16; ++i)
;
#pragma omp target parallel for simd lastprivate(x, y, z) firstprivate(x, y, z)
for (i = 0; i < 16; ++i)
;
}
void test_loop_messages() {
float a[100], b[100], c[100];
// expected-error@+2 {{variable must be of integer or pointer type}}
#pragma omp target parallel for simd
for (float fi = 0; fi < 10.0; fi++) {
c[(int)fi] = a[(int)fi] + b[(int)fi];
}
// expected-error@+2 {{variable must be of integer or pointer type}}
#pragma omp target parallel for simd
for (double fi = 0; fi < 10.0; fi++) {
c[(int)fi] = a[(int)fi] + b[(int)fi];
}
}
void test_safelen() {
int i;
// expected-error@+1 {{expected '('}}
#pragma omp target parallel for simd safelen
for (i = 0; i < 16; ++i)
;
// expected-error@+1 {{expected expression}} expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}}
#pragma omp target parallel for simd safelen(
for (i = 0; i < 16; ++i)
;
// expected-error@+1 {{expected expression}}
#pragma omp target parallel for simd safelen()
for (i = 0; i < 16; ++i)
;
// expected-error@+1 {{expected expression}} expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}}
#pragma omp target parallel for simd safelen(,
for (i = 0; i < 16; ++i)
;
// expected-error@+1 {{expected expression}} expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}}
#pragma omp target parallel for simd safelen(, )
for (i = 0; i < 16; ++i)
;
// expected-warning@+2 {{extra tokens at the end of '#pragma omp target parallel for simd' are ignored}}
// expected-error@+1 {{expected '('}}
#pragma omp target parallel for simd safelen 4)
for (i = 0; i < 16; ++i)
;
// expected-error@+2 {{expected ')'}}
// expected-note@+1 {{to match this '('}}
#pragma omp target parallel for simd safelen(4
for (i = 0; i < 16; ++i)
;
// expected-error@+2 {{expected ')'}}
// expected-note@+1 {{to match this '('}}
#pragma omp target parallel for simd safelen(4,
for (i = 0; i < 16; ++i)
;
// expected-error@+2 {{expected ')'}}
// expected-note@+1 {{to match this '('}}
#pragma omp target parallel for simd safelen(4, )
for (i = 0; i < 16; ++i)
;
#pragma omp target parallel for simd safelen(4)
for (i = 0; i < 16; ++i)
;
// expected-error@+2 {{expected ')'}}
// expected-note@+1 {{to match this '('}}
#pragma omp target parallel for simd safelen(4 4)
for (i = 0; i < 16; ++i)
;
// expected-error@+2 {{expected ')'}}
// expected-note@+1 {{to match this '('}}
#pragma omp target parallel for simd safelen(4, , 4)
for (i = 0; i < 16; ++i)
;
#pragma omp target parallel for simd safelen(4)
for (i = 0; i < 16; ++i)
;
// expected-error@+2 {{expected ')'}}
// expected-note@+1 {{to match this '('}}
#pragma omp target parallel for simd safelen(4, 8)
for (i = 0; i < 16; ++i)
;
// expected-error@+1 {{expression is not an integer constant expression}}
#pragma omp target parallel for simd safelen(2.5)
for (i = 0; i < 16; ++i)
;
// expected-error@+1 {{expression is not an integer constant expression}}
#pragma omp target parallel for simd safelen(foo())
for (i = 0; i < 16; ++i)
;
// expected-error@+1 {{argument to 'safelen' clause must be a strictly positive integer value}}
#pragma omp target parallel for simd safelen(-5)
for (i = 0; i < 16; ++i)
;
// expected-error@+1 {{argument to 'safelen' clause must be a strictly positive integer value}}
#pragma omp target parallel for simd safelen(0)
for (i = 0; i < 16; ++i)
;
// expected-error@+1 {{argument to 'safelen' clause must be a strictly positive integer value}}
#pragma omp target parallel for simd safelen(5 - 5)
for (i = 0; i < 16; ++i)
;
}
void test_simdlen() {
int i;
// expected-error@+1 {{expected '('}}
#pragma omp target parallel for simd simdlen
for (i = 0; i < 16; ++i)
;
// expected-error@+1 {{expected expression}} expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}}
#pragma omp target parallel for simd simdlen(
for (i = 0; i < 16; ++i)
;
// expected-error@+1 {{expected expression}}
#pragma omp target parallel for simd simdlen()
for (i = 0; i < 16; ++i)
;
// expected-error@+1 {{expected expression}} expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}}
#pragma omp target parallel for simd simdlen(,
for (i = 0; i < 16; ++i)
;
// expected-error@+1 {{expected expression}} expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}}
#pragma omp target parallel for simd simdlen(, )
for (i = 0; i < 16; ++i)
;
// expected-warning@+2 {{extra tokens at the end of '#pragma omp target parallel for simd' are ignored}}
// expected-error@+1 {{expected '('}}
#pragma omp target parallel for simd simdlen 4)
for (i = 0; i < 16; ++i)
;
// expected-error@+2 {{expected ')'}}
// expected-note@+1 {{to match this '('}}
#pragma omp target parallel for simd simdlen(4
for (i = 0; i < 16; ++i)
;
// expected-error@+2 {{expected ')'}}
// expected-note@+1 {{to match this '('}}
#pragma omp target parallel for simd simdlen(4,
for (i = 0; i < 16; ++i)
;
// expected-error@+2 {{expected ')'}}
// expected-note@+1 {{to match this '('}}
#pragma omp target parallel for simd simdlen(4, )
for (i = 0; i < 16; ++i)
;
#pragma omp target parallel for simd simdlen(4)
for (i = 0; i < 16; ++i)
;
// expected-error@+2 {{expected ')'}}
// expected-note@+1 {{to match this '('}}
#pragma omp target parallel for simd simdlen(4 4)
for (i = 0; i < 16; ++i)
;
// expected-error@+2 {{expected ')'}}
// expected-note@+1 {{to match this '('}}
#pragma omp target parallel for simd simdlen(4, , 4)
for (i = 0; i < 16; ++i)
;
#pragma omp target parallel for simd simdlen(4)
for (i = 0; i < 16; ++i)
;
// expected-error@+2 {{expected ')'}}
// expected-note@+1 {{to match this '('}}
#pragma omp target parallel for simd simdlen(4, 8)
for (i = 0; i < 16; ++i)
;
// expected-error@+1 {{expression is not an integer constant expression}}
#pragma omp target parallel for simd simdlen(2.5)
for (i = 0; i < 16; ++i)
;
// expected-error@+1 {{expression is not an integer constant expression}}
#pragma omp target parallel for simd simdlen(foo())
for (i = 0; i < 16; ++i)
;
// expected-error@+1 {{argument to 'simdlen' clause must be a strictly positive integer value}}
#pragma omp target parallel for simd simdlen(-5)
for (i = 0; i < 16; ++i)
;
// expected-error@+1 {{argument to 'simdlen' clause must be a strictly positive integer value}}
#pragma omp target parallel for simd simdlen(0)
for (i = 0; i < 16; ++i)
;
// expected-error@+1 {{argument to 'simdlen' clause must be a strictly positive integer value}}
#pragma omp target parallel for simd simdlen(5 - 5)
for (i = 0; i < 16; ++i)
;
}
void test_safelen_simdlen() {
int i;
// expected-error@+1 {{the value of 'simdlen' parameter must be less than or equal to the value of the 'safelen' parameter}}
#pragma omp target parallel for simd simdlen(6) safelen(5)
for (i = 0; i < 16; ++i)
;
// expected-error@+1 {{the value of 'simdlen' parameter must be less than or equal to the value of the 'safelen' parameter}}
#pragma omp target parallel for simd safelen(5) simdlen(6)
for (i = 0; i < 16; ++i)
;
}
void test_nontemporal() {
int i;
// omp45-error@+1 {{unexpected OpenMP clause 'nontemporal' in directive '#pragma omp target parallel for simd'}} expected-error@+1 {{expected expression}} expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}}
#pragma omp target parallel for simd nontemporal(
for (i = 0; i < 16; ++i)
;
// omp45-error@+1 {{unexpected OpenMP clause 'nontemporal' in directive '#pragma omp target parallel for simd'}} expected-error@+1 2 {{expected expression}} expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}}
#pragma omp target parallel for simd nontemporal(,
for (i = 0; i < 16; ++i)
;
// omp45-error@+1 {{unexpected OpenMP clause 'nontemporal' in directive '#pragma omp target parallel for simd'}} expected-error@+1 2 {{expected expression}}
#pragma omp target parallel for simd nontemporal(, )
for (i = 0; i < 16; ++i)
;
// omp45-error@+1 {{unexpected OpenMP clause 'nontemporal' in directive '#pragma omp target parallel for simd'}} expected-error@+1 {{expected expression}}
#pragma omp target parallel for simd nontemporal()
for (i = 0; i < 16; ++i)
;
// omp45-error@+1 {{unexpected OpenMP clause 'nontemporal' in directive '#pragma omp target parallel for simd'}} expected-error@+1 {{expected expression}}
#pragma omp target parallel for simd nontemporal(int)
for (i = 0; i < 16; ++i)
;
// omp45-error@+1 {{unexpected OpenMP clause 'nontemporal' in directive '#pragma omp target parallel for simd'}} omp50-error@+1 {{expected variable name}}
#pragma omp target parallel for simd nontemporal(0)
for (i = 0; i < 16; ++i)
;
// omp45-error@+1 {{unexpected OpenMP clause 'nontemporal' in directive '#pragma omp target parallel for simd'}} expected-error@+1 {{use of undeclared identifier 'x'}}
#pragma omp target parallel for simd nontemporal(x)
for (i = 0; i < 16; ++i)
;
// expected-error@+2 {{use of undeclared identifier 'x'}}
// omp45-error@+1 {{unexpected OpenMP clause 'nontemporal' in directive '#pragma omp target parallel for simd'}} expected-error@+1 {{use of undeclared identifier 'y'}}
#pragma omp target parallel for simd nontemporal(x, y)
for (i = 0; i < 16; ++i)
;
// expected-error@+3 {{use of undeclared identifier 'x'}}
// expected-error@+2 {{use of undeclared identifier 'y'}}
// omp45-error@+1 {{unexpected OpenMP clause 'nontemporal' in directive '#pragma omp target parallel for simd'}} expected-error@+1 {{use of undeclared identifier 'z'}}
#pragma omp target parallel for simd nontemporal(x, y, z)
for (i = 0; i < 16; ++i)
;
int x, y;
// omp45-error@+1 {{unexpected OpenMP clause 'nontemporal' in directive '#pragma omp target parallel for simd'}} expected-error@+1 {{expected ',' or ')' in 'nontemporal' clause}} expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}}
#pragma omp target parallel for simd nontemporal(x :)
for (i = 0; i < 16; ++i)
;
// omp45-error@+1 {{unexpected OpenMP clause 'nontemporal' in directive '#pragma omp target parallel for simd'}} expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}} expected-error@+1 {{expected ',' or ')' in 'nontemporal' clause}}
#pragma omp target parallel for simd nontemporal(x :, )
for (i = 0; i < 16; ++i)
;
// omp50-note@+2 {{defined as nontemporal}}
// omp45-error@+1 2 {{unexpected OpenMP clause 'nontemporal' in directive '#pragma omp target parallel for simd'}} omp50-error@+1 {{a variable cannot appear in more than one nontemporal clause}}
#pragma omp target parallel for simd nontemporal(x) nontemporal(x)
for (i = 0; i < 16; ++i)
;
// omp45-error@+1 {{unexpected OpenMP clause 'nontemporal' in directive '#pragma omp target parallel for simd'}}
#pragma omp target parallel for simd private(x) nontemporal(x)
for (i = 0; i < 16; ++i)
;
// omp45-error@+1 {{unexpected OpenMP clause 'nontemporal' in directive '#pragma omp target parallel for simd'}}
#pragma omp target parallel for simd nontemporal(x) private(x)
for (i = 0; i < 16; ++i)
;
// omp45-error@+1 {{unexpected OpenMP clause 'nontemporal' in directive '#pragma omp target parallel for simd'}} expected-note@+1 {{to match this '('}} expected-error@+1 {{expected ',' or ')' in 'nontemporal' clause}} expected-error@+1 {{expected ')'}}
#pragma omp target parallel for simd nontemporal(x, y : 0)
for (i = 0; i < 16; ++i)
;
// omp45-error@+1 {{unexpected OpenMP clause 'nontemporal' in directive '#pragma omp target parallel for simd'}}
#pragma omp target parallel for simd nontemporal(x) lastprivate(x)
for (i = 0; i < 16; ++i)
;
// omp45-error@+1 {{unexpected OpenMP clause 'nontemporal' in directive '#pragma omp target parallel for simd'}}
#pragma omp target parallel for simd lastprivate(x) nontemporal(x)
for (i = 0; i < 16; ++i)
;
}
|
primordial.c | /** @file primordial.c Documented primordial module.
*
* Julien Lesgourgues, 24.08.2010
*
* This module computes the primordial spectra. It can be used in different modes:
* simple parametric form, evolving inflaton perturbations, etc. So far only
* the mode corresponding to a simple analytic form in terms of amplitudes, tilts
* and runnings has been developed.
*
* The following functions can be called from other modules:
*
* -# primordial_init() at the beginning (anytime after perturb_init() and before spectra_init())
* -# primordial_spectrum_at_k() at any time for computing P(k) at any k
* -# primordial_free() at the end
*/
#include "primordial.h"
/**
* Primordial spectra for arbitrary argument and for all initial conditions.
*
* This routine evaluates the primordial spectrum at a given value of k by
* interpolating in the pre-computed table.
*
* When k is not in the pre-computed range but the spectrum can be found
* analytically, it finds it. Otherwise returns an error.
*
* Can be called in two modes; linear or logarithmic:
*
* - linear: takes k, returns P(k)
*
* - logarithmic: takes ln(k), return ln(P(k))
*
* One little subtlety: in case of several correlated initial conditions,
* the cross-correlation spectrum can be negative. Then, in logarithmic mode,
* the non-diagonal elements contain the cross-correlation angle \f$ P_{12}/\sqrt{P_{11} P_{22}}\f$
* (from -1 to 1) instead of \f$\ln{P_{12}}\f$
*
* This function can be
* called from whatever module at whatever time, provided that
* primordial_init() has been called before, and primordial_free() has not
* been called yet.
*
* @param ppm Input: pointer to primordial structure containing tabulated primordial spectrum
* @param index_md Input: index of mode (scalar, tensor, ...)
* @param mode Input: linear or logarithmic
* @param input Input: wavenumber in 1/Mpc (linear mode) or its logarithm (logarithmic mode)
* @param output Output: for each pair of initial conditions, primordial spectra P(k) in \f$Mpc^3\f$ (linear mode), or their logarithms and cross-correlation angles (logarithmic mode)
* @return the error status
*/
int primordial_spectrum_at_k(
struct primordial * ppm,
int index_md,
enum linear_or_logarithmic mode,
double input,
double * output /* array with argument output[index_ic1_ic2] (must be already allocated) */
) {
/** Summary: */
/** - define local variables */
int index_ic1,index_ic2,index_ic1_ic2;
double lnk;
int last_index;
/** - infer ln(k) from input. In linear mode, reject negative value of input k value. */
if (mode == linear) {
class_test(input<=0.,
ppm->error_message,
"k = %e",input);
lnk=log(input);
}
else {
lnk = input;
}
/** - if ln(k) is not in the interpolation range, return an error, unless
we are in the case of a analytic spectrum, for which a direct computation is possible */
if ((lnk > ppm->lnk[ppm->lnk_size-1]) || (lnk < ppm->lnk[0])) {
class_test(ppm->primordial_spec_type != analytic_Pk,
ppm->error_message,
"k=%e out of range [%e : %e]",exp(lnk),exp(ppm->lnk[0]),exp(ppm->lnk[ppm->lnk_size-1]));
/* direct computation */
for (index_ic1 = 0; index_ic1 < ppm->ic_size[index_md]; index_ic1++) {
for (index_ic2 = index_ic1; index_ic2 < ppm->ic_size[index_md]; index_ic2++) {
index_ic1_ic2 = index_symmetric_matrix(index_ic1,index_ic2,ppm->ic_size[index_md]);
if (ppm->is_non_zero[index_md][index_ic1_ic2] == _TRUE_) {
class_call(primordial_analytic_spectrum(ppm,
index_md,
index_ic1_ic2,
exp(lnk),
&(output[index_ic1_ic2])),
ppm->error_message,
ppm->error_message);
}
else {
output[index_ic1_ic2] = 0.;
}
}
}
/* if mode==linear, output is already in the correct format. Otherwise, apply necessary transformation. */
if (mode == logarithmic) {
for (index_ic1 = 0; index_ic1 < ppm->ic_size[index_md]; index_ic1++) {
index_ic1_ic2 = index_symmetric_matrix(index_ic1,index_ic1,ppm->ic_size[index_md]);
output[index_ic1_ic2] = log(output[index_ic1_ic2]);
}
for (index_ic1 = 0; index_ic1 < ppm->ic_size[index_md]; index_ic1++) {
for (index_ic2 = index_ic1+1; index_ic2 < ppm->ic_size[index_md]; index_ic2++) {
index_ic1_ic2 = index_symmetric_matrix(index_ic1,index_ic2,ppm->ic_size[index_md]);
if (ppm->is_non_zero[index_md][index_ic1_ic2] == _TRUE_) {
output[index_ic1_ic2] /= sqrt(output[index_symmetric_matrix(index_ic1,index_ic1,ppm->ic_size[index_md])]*
output[index_symmetric_matrix(index_ic2,index_ic2,ppm->ic_size[index_md])]);
}
}
}
}
}
/** - otherwise, interpolate in the pre-computed table */
else {
class_call(array_interpolate_spline(
ppm->lnk,
ppm->lnk_size,
ppm->lnpk[index_md],
ppm->ddlnpk[index_md],
ppm->ic_ic_size[index_md],
lnk,
&last_index,
output,
ppm->ic_ic_size[index_md],
ppm->error_message),
ppm->error_message,
ppm->error_message);
/* if mode==logarithmic, output is already in the correct format. Otherwise, apply necessary transformation. */
if (mode == linear) {
for (index_ic1 = 0; index_ic1 < ppm->ic_size[index_md]; index_ic1++) {
index_ic1_ic2 = index_symmetric_matrix(index_ic1,index_ic1,ppm->ic_size[index_md]);
output[index_ic1_ic2]=exp(output[index_ic1_ic2]);
}
for (index_ic1 = 0; index_ic1 < ppm->ic_size[index_md]; index_ic1++) {
for (index_ic2 = index_ic1+1; index_ic2 < ppm->ic_size[index_md]; index_ic2++) {
index_ic1_ic2 = index_symmetric_matrix(index_ic1,index_ic2,ppm->ic_size[index_md]);
if (ppm->is_non_zero[index_md][index_ic1_ic2] == _TRUE_) {
output[index_ic1_ic2] *= sqrt(output[index_symmetric_matrix(index_ic1,index_ic1,ppm->ic_size[index_md])]*
output[index_symmetric_matrix(index_ic2,index_ic2,ppm->ic_size[index_md])]);
}
else {
output[index_ic1_ic2] = 0.;
}
}
}
}
}
return _SUCCESS_;
}
/**
* This routine initializes the primordial structure (in particular, it computes table of primordial spectrum values)
*
* @param ppr Input: pointer to precision structure (defines method and precision for all computations)
* @param ppt Input: pointer to perturbation structure (useful for knowing k_min, k_max, etc.)
* @param ppm Output: pointer to initialized primordial structure
* @return the error status
*/
int primordial_init(
struct precision * ppr,
struct perturbs * ppt,
struct primordial * ppm
) {
/** Summary: */
/** - define local variables */
double k,k_min,k_max;
int index_md,index_ic1,index_ic2,index_ic1_ic2,index_k;
double pk,pk1,pk2;
double dlnk,lnpk_pivot,lnpk_minus,lnpk_plus,lnpk_minusminus,lnpk_plusplus;
/* uncomment if you use optional test below
(for correlated isocurvature modes) */
//double cos_delta_k;
/** - check that we really need to compute the primordial spectra */
if (ppt->has_perturbations == _FALSE_) {
ppm->lnk_size=0;
if (ppm->primordial_verbose > 0)
printf("No perturbations requested. Primordial module skipped.\n");
return _SUCCESS_;
}
else {
if (ppm->primordial_verbose > 0)
printf("Computing primordial spectra");
}
/** - get kmin and kmax from perturbation structure. Test that they make sense. */
k_min = ppt->k_min; /* first value, inferred from perturbations structure */
k_max = ppt->k_max; /* last value, inferred from perturbations structure */
class_test(k_min <= 0.,
ppm->error_message,
"k_min negative or null: stop to avoid segmentation fault");
class_test(k_max <= 0.,
ppm->error_message,
"k_max negative or null: stop to avoid segmentation fault");
class_test(ppm->k_pivot <= 0.,
ppm->error_message,
"k_pivot negative or null: stop to avoid segmentation fault");
class_test(ppr->k_per_decade_primordial <= 0.,
ppm->error_message,
"k_per_decade_primordial negative or null: stop to avoid segmentation fault");
class_test(ppr->k_per_decade_primordial <= _K_PER_DECADE_PRIMORDIAL_MIN_,
ppm->error_message,
"k_per_decade_primordial = %e: you ask for such a sparse sampling of the primordial spectrum that this is probably a mistake",
ppr->k_per_decade_primordial);
/** - allocate and fill values of \f$ \ln{k}\f$'s */
class_call(primordial_get_lnk_list(ppm,
k_min,
k_max,
ppr->k_per_decade_primordial
),
ppm->error_message,
ppm->error_message);
/** - define indices and allocate tables in primordial structure */
class_call(primordial_indices(ppt,
ppm),
ppm->error_message,
ppm->error_message);
/** - deal with case of analytic primordial spectra (with amplitudes, tilts, runnings, etc.) */
if (ppm->primordial_spec_type == analytic_Pk) {
if (ppm->primordial_verbose > 0)
printf(" (analytic spectrum)\n");
class_call_except(primordial_analytic_spectrum_init(ppt,
ppm),
ppm->error_message,
ppm->error_message,
primordial_free(ppm));
for (index_k = 0; index_k < ppm->lnk_size; index_k++) {
k=exp(ppm->lnk[index_k]);
for (index_md = 0; index_md < ppt->md_size; index_md++) {
for (index_ic1 = 0; index_ic1 < ppm->ic_size[index_md]; index_ic1++) {
for (index_ic2 = index_ic1; index_ic2 < ppm->ic_size[index_md]; index_ic2++) {
index_ic1_ic2 = index_symmetric_matrix(index_ic1,index_ic2,ppm->ic_size[index_md]);
if (ppm->is_non_zero[index_md][index_ic1_ic2] == _TRUE_) {
class_call(primordial_analytic_spectrum(ppm,
index_md,
index_ic1_ic2,
k,
&pk),
ppm->error_message,
ppm->error_message);
if (index_ic1 == index_ic2) {
/* diagonal coefficients: ln[P(k)] */
ppm->lnpk[index_md][index_k*ppm->ic_ic_size[index_md]+index_ic1_ic2] = log(pk);
}
else {
/* non-diagonal coefficients: cosDelta(k) = P(k)_12/sqrt[P(k)_1 P(k)_2] */
class_call(primordial_analytic_spectrum(ppm,
index_md,
index_symmetric_matrix(index_ic1,index_ic1,ppm->ic_size[index_md]),
k,
&pk1),
ppm->error_message,
ppm->error_message);
class_call(primordial_analytic_spectrum(ppm,
index_md,
index_symmetric_matrix(index_ic2,index_ic2,ppm->ic_size[index_md]),
k,
&pk2),
ppm->error_message,
ppm->error_message);
/* either return an error if correlation is too large... */
/*
cos_delta_k = pk/sqrt(pk1*pk2);
class_test_except((cos_delta_k < -1.) || (cos_delta_k > 1.),
ppm->error_message,
primordial_free(ppm),
"correlation angle between IC's takes unphysical values");
ppm->lnpk[index_md][index_k*ppm->ic_ic_size[index_md]+index_ic1_ic2] = cos_delta_k;
*/
/* ... or enforce definite positive correlation matrix */
if (pk > sqrt(pk1*pk2))
ppm->lnpk[index_md][index_k*ppm->ic_ic_size[index_md]+index_ic1_ic2] = 1.;
else if (pk < -sqrt(pk1*pk2))
ppm->lnpk[index_md][index_k*ppm->ic_ic_size[index_md]+index_ic1_ic2] = -1.;
else
ppm->lnpk[index_md][index_k*ppm->ic_ic_size[index_md]+index_ic1_ic2] = pk/sqrt(pk1*pk2);
}
}
else {
/* non-diagonal coefficients when ic's are uncorrelated */
ppm->lnpk[index_md][index_k*ppm->ic_ic_size[index_md]+index_ic1_ic2] = 0.;
}
}
}
}
}
}
/** - deal with case of inflation with given \f$V(\phi)\f$ or \f$H(\phi)\f$ */
else if ((ppm->primordial_spec_type == inflation_V) || (ppm->primordial_spec_type == inflation_H) || (ppm->primordial_spec_type == inflation_V_end)) {
class_call(primordial_inflation_indices(ppm),
ppm->error_message,
ppm->error_message);
if (ppm->primordial_verbose > 0)
printf(" (simulating inflation)\n");
class_call_except(primordial_inflation_solve_inflation(ppt,ppm,ppr),
ppm->error_message,
ppm->error_message,
primordial_free(ppm));
}
/** - deal with the case of external calculation of \f$ P_k \f$*/
else if (ppm->primordial_spec_type == external_Pk) {
class_test(ppt->has_scalars == _FALSE_,
ppm->error_message,
"external Pk module cannot work if you do not ask for scalar modes");
class_test(ppt->has_vectors == _TRUE_,
ppm->error_message,
"external Pk module cannot work if you ask for vector modes");
class_test(ppt->has_bi == _TRUE_ || ppt->has_cdi == _TRUE_ || ppt->has_nid == _TRUE_ || ppt->has_niv == _TRUE_,
ppm->error_message,
"external Pk module cannot work if you ask for isocurvature modes (but that could be implemented easily in the future!)");
if (ppm->primordial_verbose > 0)
printf(" (Pk calculated externally)\n");
class_call_except(primordial_external_spectrum_init(ppt,ppm),
ppm->error_message,
ppm->error_message,
primordial_free(ppm));
}
else {
class_test(0==0,
ppm->error_message,
"primordial spectrum type not recognized");
}
/** - compute second derivative of each \f$ \ln{P_k} \f$ versus lnk with spline, in view of interpolation */
for (index_md = 0; index_md < ppm->md_size; index_md++) {
class_call(array_spline_table_lines(ppm->lnk,
ppm->lnk_size,
ppm->lnpk[index_md],
ppm->ic_ic_size[index_md],
ppm->ddlnpk[index_md],
_SPLINE_EST_DERIV_,
ppm->error_message),
ppm->error_message,
ppm->error_message);
}
/** - derive spectral parameters from numerically computed spectra
(not used by the rest of the code, but useful to keep in memory for several types of investigation) */
if (ppm->primordial_spec_type != analytic_Pk) {
dlnk = log(10.)/ppr->k_per_decade_primordial;
if (ppt->has_scalars == _TRUE_) {
class_call(primordial_spectrum_at_k(ppm,
ppt->index_md_scalars,
logarithmic,
log(ppm->k_pivot),
&lnpk_pivot),
ppm->error_message,
ppm->error_message);
class_call(primordial_spectrum_at_k(ppm,
ppt->index_md_scalars,
logarithmic,
log(ppm->k_pivot)+dlnk,
&lnpk_plus),
ppm->error_message,
ppm->error_message);
class_call(primordial_spectrum_at_k(ppm,
ppt->index_md_scalars,
logarithmic,
log(ppm->k_pivot)-dlnk,
&lnpk_minus),
ppm->error_message,
ppm->error_message);
ppm->A_s = exp(lnpk_pivot);
ppm->n_s = (lnpk_plus-lnpk_minus)/(2.*dlnk)+1.;
ppm->alpha_s = (lnpk_plus-2.*lnpk_pivot+lnpk_minus)/pow(dlnk,2);
/** - expression for alpha_s comes from:
`ns_2 = (lnpk_plus-lnpk_pivot)/(dlnk)+1`
`ns_1 = (lnpk_pivot-lnpk_minus)/(dlnk)+1`
`alpha_s = dns/dlnk = (ns_2-ns_1)/dlnk = (lnpk_plus-lnpk_pivot-lnpk_pivot+lnpk_minus)/(dlnk)/(dlnk)`
**/
class_call(primordial_spectrum_at_k(ppm,
ppt->index_md_scalars,
logarithmic,
log(ppm->k_pivot)+2.*dlnk,
&lnpk_plusplus),
ppm->error_message,
ppm->error_message);
class_call(primordial_spectrum_at_k(ppm,
ppt->index_md_scalars,
logarithmic,
log(ppm->k_pivot)-2.*dlnk,
&lnpk_minusminus),
ppm->error_message,
ppm->error_message);
/** - expression for beta_s:
`ppm->beta_s = (alpha_plus-alpha_minus)/dlnk = (lnpk_plusplus-2.*lnpk_plus+lnpk_pivot -
(lnpk_pivot-2.*lnpk_minus+lnpk_minusminus)/pow(dlnk,3)`
**/
/* Simplification of the beta_s expression: */
ppm->beta_s = (lnpk_plusplus-2.*lnpk_plus+2.*lnpk_minus-lnpk_minusminus)/pow(dlnk,3);
if (ppm->primordial_verbose > 0)
printf(" -> A_s=%g n_s=%g alpha_s=%g\n",ppm->A_s,ppm->n_s,ppm->alpha_s);
}
if (ppt->has_tensors == _TRUE_) {
class_call(primordial_spectrum_at_k(ppm,
ppt->index_md_tensors,
logarithmic,
log(ppm->k_pivot),
&lnpk_pivot),
ppm->error_message,
ppm->error_message);
class_call(primordial_spectrum_at_k(ppm,
ppt->index_md_tensors,
logarithmic,
log(ppm->k_pivot)+dlnk,
&lnpk_plus),
ppm->error_message,
ppm->error_message);
class_call(primordial_spectrum_at_k(ppm,
ppt->index_md_tensors,
logarithmic,
log(ppm->k_pivot)-dlnk,
&lnpk_minus),
ppm->error_message,
ppm->error_message);
ppm->r = exp(lnpk_pivot)/ppm->A_s;
ppm->n_t = (lnpk_plus-lnpk_minus)/(2.*dlnk);
ppm->alpha_t = (lnpk_plus-2.*lnpk_pivot+lnpk_minus)/pow(dlnk,2);
if (ppm->primordial_verbose > 0)
printf(" -> r=%g n_t=%g alpha_t=%g\n",ppm->r,ppm->n_t,ppm->alpha_t);
}
}
return _SUCCESS_;
}
/**
* This routine frees all the memory space allocated by primordial_init().
*
* To be called at the end of each run.
*
* @param ppm Input: pointer to primordial structure (which fields must be freed)
* @return the error status
*/
int primordial_free(
struct primordial * ppm
) {
int index_md;
if (ppm->lnk_size > 0) {
if (ppm->primordial_spec_type == analytic_Pk) {
for (index_md = 0; index_md < ppm->md_size; index_md++) {
free(ppm->amplitude[index_md]);
free(ppm->tilt[index_md]);
free(ppm->running[index_md]);
}
free(ppm->amplitude);
free(ppm->tilt);
free(ppm->running);
}
else if (ppm->primordial_spec_type == external_Pk) {
free(ppm->command);
}
for (index_md = 0; index_md < ppm->md_size; index_md++) {
free(ppm->lnpk[index_md]);
free(ppm->ddlnpk[index_md]);
free(ppm->is_non_zero[index_md]);
}
free(ppm->lnpk);
free(ppm->ddlnpk);
free(ppm->is_non_zero);
free(ppm->ic_size);
free(ppm->ic_ic_size);
free(ppm->lnk);
}
return _SUCCESS_;
}
/**
* This routine defines indices and allocates tables in the primordial structure
*
* @param ppt Input: pointer to perturbation structure
* @param ppm Input/output: pointer to primordial structure
* @return the error status
*/
int primordial_indices(
struct perturbs * ppt,
struct primordial * ppm
) {
int index_md;
ppm->md_size = ppt->md_size;
class_alloc(ppm->lnpk,ppt->md_size*sizeof(double*),ppm->error_message);
class_alloc(ppm->ddlnpk,ppt->md_size*sizeof(double*),ppm->error_message);
class_alloc(ppm->ic_size,ppt->md_size*sizeof(int*),ppm->error_message);
class_alloc(ppm->ic_ic_size,ppt->md_size*sizeof(int*),ppm->error_message);
class_alloc(ppm->is_non_zero,ppm->md_size*sizeof(short *),ppm->error_message);
for (index_md = 0; index_md < ppt->md_size; index_md++) {
ppm->ic_size[index_md] = ppt->ic_size[index_md];
ppm->ic_ic_size[index_md] = (ppm->ic_size[index_md]*(ppm->ic_size[index_md]+1))/2;
class_alloc(ppm->lnpk[index_md],
ppm->lnk_size*ppm->ic_ic_size[index_md]*sizeof(double),
ppm->error_message);
class_alloc(ppm->ddlnpk[index_md],
ppm->lnk_size*ppm->ic_ic_size[index_md]*sizeof(double),
ppm->error_message);
class_alloc(ppm->is_non_zero[index_md],
ppm->ic_ic_size[index_md]*sizeof(short),
ppm->error_message);
}
return _SUCCESS_;
}
/**
* This routine allocates and fills the list of wavenumbers k
*
*
* @param ppm Input/output: pointer to primordial structure
* @param kmin Input: first value
* @param kmax Input: last value that we should encompass
* @param k_per_decade Input: number of k per decade
* @return the error status
*/
int primordial_get_lnk_list(
struct primordial * ppm,
double kmin,
double kmax,
double k_per_decade
) {
int i;
class_test((kmin <= 0.) || (kmax <= kmin),
ppm->error_message,
"inconsistent values of kmin=%e, kmax=%e",kmin,kmax);
ppm->lnk_size = (int)(log(kmax/kmin)/log(10.)*k_per_decade) + 2;
class_alloc(ppm->lnk,ppm->lnk_size*sizeof(double),ppm->error_message);
for (i=0; i<ppm->lnk_size; i++)
ppm->lnk[i]=log(kmin)+i*log(10.)/k_per_decade;
return _SUCCESS_;
}
/**
* This routine interprets and stores in a condensed form the input parameters
* in the case of a simple analytic spectra with amplitudes, tilts, runnings,
* in such way that later on, the spectrum can be obtained by a quick call to
* the routine primordial_analytic_spectrum(()
*
* @param ppt Input: pointer to perturbation structure
* @param ppm Input/output: pointer to primordial structure
* @return the error status
*/
int primordial_analytic_spectrum_init(
struct perturbs * ppt,
struct primordial * ppm
) {
int index_md,index_ic1,index_ic2;
int index_ic1_ic2,index_ic1_ic1,index_ic2_ic2;
double one_amplitude=0.;
double one_tilt=0.;
double one_running=0.;
double one_correlation=0.;
class_alloc(ppm->amplitude,
ppm->md_size*sizeof(double *),
ppm->error_message);
class_alloc(ppm->tilt,
ppm->md_size*sizeof(double *),
ppm->error_message);
class_alloc(ppm->running,
ppm->md_size*sizeof(double *),
ppm->error_message);
for (index_md = 0; index_md < ppm->md_size; index_md++) {
class_alloc(ppm->amplitude[index_md],
ppm->ic_ic_size[index_md]*sizeof(double),
ppm->error_message);
class_alloc(ppm->tilt[index_md],
ppm->ic_ic_size[index_md]*sizeof(double),
ppm->error_message);
class_alloc(ppm->running[index_md],
ppm->ic_ic_size[index_md]*sizeof(double),
ppm->error_message);
}
for (index_md = 0; index_md < ppm->md_size; index_md++) {
/* diagonal coefficients */
for (index_ic1 = 0; index_ic1 < ppm->ic_size[index_md]; index_ic1++) {
if (_scalars_) {
if ((ppt->has_ad == _TRUE_) && (index_ic1 == ppt->index_ic_ad)) {
one_amplitude = ppm->A_s;
one_tilt = ppm->n_s;
one_running = ppm->alpha_s;
}
if ((ppt->has_bi == _TRUE_) && (index_ic1 == ppt->index_ic_bi)) {
one_amplitude = ppm->A_s*ppm->f_bi*ppm->f_bi;
one_tilt = ppm->n_bi;
one_running = ppm->alpha_bi;
}
if ((ppt->has_cdi == _TRUE_) && (index_ic1 == ppt->index_ic_cdi)) {
one_amplitude = ppm->A_s*ppm->f_cdi*ppm->f_cdi;
one_tilt = ppm->n_cdi;
one_running = ppm->alpha_cdi;
}
if ((ppt->has_nid == _TRUE_) && (index_ic1 == ppt->index_ic_nid)) {
one_amplitude = ppm->A_s*ppm->f_nid*ppm->f_nid;
one_tilt = ppm->n_nid;
one_running = ppm->alpha_nid;
}
if ((ppt->has_niv == _TRUE_) && (index_ic1 == ppt->index_ic_niv)) {
one_amplitude = ppm->A_s*ppm->f_niv*ppm->f_niv;
one_tilt = ppm->n_niv;
one_running = ppm->alpha_niv;
}
}
if (_tensors_) {
if (index_ic1 == ppt->index_ic_ten) {
one_amplitude = ppm->A_s*ppm->r;
one_tilt = ppm->n_t+1.; /* +1 to match usual definition of n_t (equivalent to n_s-1) */
one_running = ppm->alpha_t;
}
}
class_test(one_amplitude <= 0.,
ppm->error_message,
"inconsistent input for primordial amplitude: %g for index_md=%d, index_ic=%d\n",
one_amplitude,index_md,index_ic1);
index_ic1_ic2 = index_symmetric_matrix(index_ic1,index_ic1,ppm->ic_size[index_md]);
ppm->is_non_zero[index_md][index_ic1_ic2] = _TRUE_;
ppm->amplitude[index_md][index_ic1_ic2] = one_amplitude;
ppm->tilt[index_md][index_ic1_ic2] = one_tilt;
ppm->running[index_md][index_ic1_ic2] = one_running;
}
/* non-diagonal coefficients */
for (index_ic1 = 0; index_ic1 < ppm->ic_size[index_md]; index_ic1++) {
for (index_ic2 = index_ic1+1; index_ic2 < ppm->ic_size[index_md]; index_ic2++) {
if (_scalars_) {
if ((ppt->has_ad == _TRUE_) && (ppt->has_bi == _TRUE_) &&
(((index_ic1 == ppt->index_ic_ad) && (index_ic2 == ppt->index_ic_bi)) ||
((index_ic1 == ppt->index_ic_ad) && (index_ic1 == ppt->index_ic_bi)))) {
one_correlation = ppm->c_ad_bi;
one_tilt = ppm->n_ad_bi;
one_running = ppm->alpha_ad_bi;
}
if ((ppt->has_ad == _TRUE_) && (ppt->has_cdi == _TRUE_) &&
(((index_ic1 == ppt->index_ic_ad) && (index_ic2 == ppt->index_ic_cdi)) ||
((index_ic2 == ppt->index_ic_ad) && (index_ic1 == ppt->index_ic_cdi)))) {
one_correlation = ppm->c_ad_cdi;
one_tilt = ppm->n_ad_cdi;
one_running = ppm->alpha_ad_cdi;
}
if ((ppt->has_ad == _TRUE_) && (ppt->has_nid == _TRUE_) &&
(((index_ic1 == ppt->index_ic_ad) && (index_ic2 == ppt->index_ic_nid)) ||
((index_ic2 == ppt->index_ic_ad) && (index_ic1 == ppt->index_ic_nid)))) {
one_correlation = ppm->c_ad_nid;
one_tilt = ppm->n_ad_nid;
one_running = ppm->alpha_ad_nid;
}
if ((ppt->has_ad == _TRUE_) && (ppt->has_niv == _TRUE_) &&
(((index_ic1 == ppt->index_ic_ad) && (index_ic2 == ppt->index_ic_niv)) ||
((index_ic2 == ppt->index_ic_ad) && (index_ic1 == ppt->index_ic_niv)))) {
one_correlation = ppm->c_ad_niv;
one_tilt = ppm->n_ad_niv;
one_running = ppm->alpha_ad_niv;
}
if ((ppt->has_bi == _TRUE_) && (ppt->has_cdi == _TRUE_) &&
(((index_ic1 == ppt->index_ic_bi) && (index_ic2 == ppt->index_ic_cdi)) ||
((index_ic2 == ppt->index_ic_bi) && (index_ic1 == ppt->index_ic_cdi)))) {
one_correlation = ppm->c_bi_cdi;
one_tilt = ppm->n_bi_cdi;
one_running = ppm->alpha_bi_cdi;
}
if ((ppt->has_bi == _TRUE_) && (ppt->has_nid == _TRUE_) &&
(((index_ic1 == ppt->index_ic_bi) && (index_ic2 == ppt->index_ic_nid)) ||
((index_ic2 == ppt->index_ic_bi) && (index_ic1 == ppt->index_ic_nid)))) {
one_correlation = ppm->c_bi_nid;
one_tilt = ppm->n_bi_nid;
one_running = ppm->alpha_bi_nid;
}
if ((ppt->has_bi == _TRUE_) && (ppt->has_niv == _TRUE_) &&
(((index_ic1 == ppt->index_ic_bi) && (index_ic2 == ppt->index_ic_niv)) ||
((index_ic2 == ppt->index_ic_bi) && (index_ic1 == ppt->index_ic_niv)))) {
one_correlation = ppm->c_bi_niv;
one_tilt = ppm->n_bi_niv;
one_running = ppm->alpha_bi_niv;
}
if ((ppt->has_cdi == _TRUE_) && (ppt->has_nid == _TRUE_) &&
(((index_ic1 == ppt->index_ic_cdi) && (index_ic2 == ppt->index_ic_nid)) ||
((index_ic2 == ppt->index_ic_cdi) && (index_ic1 == ppt->index_ic_nid)))) {
one_correlation = ppm->c_cdi_nid;
one_tilt = ppm->n_cdi_nid;
one_running = ppm->alpha_cdi_nid;
}
if ((ppt->has_cdi == _TRUE_) && (ppt->has_niv == _TRUE_) &&
(((index_ic1 == ppt->index_ic_cdi) && (index_ic2 == ppt->index_ic_niv)) ||
((index_ic2 == ppt->index_ic_cdi) && (index_ic1 == ppt->index_ic_niv)))) {
one_correlation = ppm->c_cdi_niv;
one_tilt = ppm->n_cdi_niv;
one_running = ppm->alpha_cdi_niv;
}
if ((ppt->has_nid == _TRUE_) && (ppt->has_niv == _TRUE_) &&
(((index_ic1 == ppt->index_ic_nid) && (index_ic2 == ppt->index_ic_niv)) ||
((index_ic2 == ppt->index_ic_nid) && (index_ic1 == ppt->index_ic_niv)))) {
one_correlation = ppm->c_nid_niv;
one_tilt = ppm->n_nid_niv;
one_running = ppm->alpha_nid_niv;
}
}
class_test((one_correlation < -1) || (one_correlation > 1),
ppm->error_message,
"inconsistent input for isocurvature cross-correlation\n");
index_ic1_ic2 = index_symmetric_matrix(index_ic1,index_ic2,ppm->ic_size[index_md]);
index_ic1_ic1 = index_symmetric_matrix(index_ic1,index_ic1,ppm->ic_size[index_md]);
index_ic2_ic2 = index_symmetric_matrix(index_ic2,index_ic2,ppm->ic_size[index_md]);
if (one_correlation == 0.) {
ppm->is_non_zero[index_md][index_ic1_ic2] = _FALSE_;
ppm->amplitude[index_md][index_ic1_ic2] = 0.;
ppm->tilt[index_md][index_ic1_ic2] = 0.;
ppm->running[index_md][index_ic1_ic2] = 0.;
}
else {
ppm->is_non_zero[index_md][index_ic1_ic2] = _TRUE_;
ppm->amplitude[index_md][index_ic1_ic2] =
sqrt(ppm->amplitude[index_md][index_ic1_ic1]*
ppm->amplitude[index_md][index_ic2_ic2])*
one_correlation;
ppm->tilt[index_md][index_ic1_ic2] =
0.5*(ppm->tilt[index_md][index_ic1_ic1]
+ppm->tilt[index_md][index_ic2_ic2])
+ one_tilt;
ppm->running[index_md][index_ic1_ic2] =
0.5*(ppm->running[index_md][index_ic1_ic1]
+ppm->running[index_md][index_ic2_ic2])
+ one_running;
}
}
}
}
return _SUCCESS_;
}
/**
* This routine returns the primordial spectrum in the simple analytic case with
* amplitudes, tilts, runnings, for each mode (scalar/tensor...),
* pair of initial conditions, and wavenumber.
*
* @param ppm Input/output: pointer to primordial structure
* @param index_md Input: index of mode (scalar, tensor, ...)
* @param index_ic1_ic2 Input: pair of initial conditions (ic1, ic2)
* @param k Input: wavenumber in same units as pivot scale, i.e. in 1/Mpc
* @param pk Output: primordial power spectrum A (k/k_pivot)^(n+...)
* @return the error status
*/
int primordial_analytic_spectrum(
struct primordial * ppm,
int index_md,
int index_ic1_ic2,
double k,
double * pk
) {
if (ppm->is_non_zero[index_md][index_ic1_ic2] == _TRUE_) {
*pk = ppm->amplitude[index_md][index_ic1_ic2]
*exp((ppm->tilt[index_md][index_ic1_ic2]-1.)*log(k/ppm->k_pivot)
+ 0.5 * ppm->running[index_md][index_ic1_ic2] * pow(log(k/ppm->k_pivot), 2.));
}
else {
*pk = 0.;
}
return _SUCCESS_;
}
/**
* This routine encodes the inflaton scalar potential
*
* @param ppm Input: pointer to primordial structure
* @param phi Input: background inflaton field value in units of Mp
* @param V Output: inflaton potential in units of \f$ Mp^4\f$
* @param dV Output: first derivative of inflaton potential wrt the field
* @param ddV Output: second derivative of inflaton potential wrt the field
* @return the error status
*/
int primordial_inflation_potential(
struct primordial * ppm,
double phi,
double * V,
double * dV,
double * ddV
) {
double e,de,dde,mu,dmu,ddmu,l,dl,ddl,p,dp,ddp;
switch (ppm->potential) {
/* V(phi)=polynomial in phi */
case polynomial:
*V = ppm->V0+phi*ppm->V1+pow(phi,2)/2.*ppm->V2+pow(phi,3)/6.*ppm->V3+pow(phi,4)/24.*ppm->V4;
*dV = ppm->V1+phi*ppm->V2+pow(phi,2)/2.*ppm->V3+pow(phi,3)/6.*ppm->V4;
*ddV = ppm->V2+phi*ppm->V3+pow(phi,2)/2.*ppm->V4;
break;
/* V(phi) = Lambda^4(1+cos(phi/f)) = V0 (1+cos(phi/V1)) */
case natural:
*V = ppm->V0*(1.+cos(phi/ppm->V1));
*dV = -ppm->V0/ppm->V1*sin(phi/ppm->V1);
*ddV = -ppm->V0/ppm->V1/ppm->V1*cos(phi/ppm->V1);
break;
/* Higgs inflation from arXiv:1403.6078 */
case higgs_inflation:
// correspondence with 1403.6078:
// V0 = b
// V1 = ksi
// V2 = kappa
// V3 = delta_lambda
// mu = bar(mu)/M_P
// phi = -chi/M_P
e = exp(2./sqrt(6.)*sqrt(8.*_PI_)*phi);
de = 2./sqrt(6.)*sqrt(8.*_PI_)*e;
dde = 2./3. * 8.*_PI_ * e;
mu = pow(1.-e,0.5);
dmu = -0.5*de*pow(1.-e,-0.5);
ddmu = -0.5*dde*pow(1.-e,-0.5)-0.25*de*de*pow(1.-e,-1.5);
l = log(mu/ppm->V2);
dl = dmu/mu;
ddl = ddmu/mu - dl*dl;
p = 1./16. + ppm->V3/ppm->V0 + l*l;
dp = 2.*dl*l;
ddp = 2.*ddl*l+2.*dl*dl;
*V = ppm->V0/4./pow(8.*_PI_,2)/ppm->V1/ppm->V1*p*pow(mu,4);
*dV = ppm->V0/4./pow(8.*_PI_,2)/ppm->V1/ppm->V1*(dp*pow(mu,4)+4.*p*dmu*pow(mu,3));
*ddV = ppm->V0/4./pow(8.*_PI_,2)/ppm->V1/ppm->V1*(ddp*pow(mu,4)+8.*dp*dmu*pow(mu,3)+4.*p*ddmu*pow(mu,3)+12.*p*pow(dmu*mu,2));
//fprintf(stderr,"%e %e %e\n",*V,p,mu);
break;
/* code here other shapes */
default:
class_stop(ppm->error_message,"ppm->potential=%d different from all known cases",ppm->potential);
break;
}
return _SUCCESS_;
}
/**
* This routine encodes the function \f$ H(\phi)\f$
*
* @param ppm Input: pointer to primordial structure
* @param phi Input: background inflaton field value in units of Mp
* @param H Output: Hubble parameters in units of Mp
* @param dH Output: \f$ dH / d\phi \f$
* @param ddH Output: \f$ d^2H / d\phi^2 \f$
* @param dddH Output: \f$ d^3H / d\phi^3 \f$
* @return the error status
*/
int primordial_inflation_hubble(
struct primordial * ppm,
double phi,
double * H,
double * dH,
double * ddH,
double * dddH
) {
*H = ppm->H0 + phi*ppm->H1 + pow(phi,2)/2.*ppm->H2 + pow(phi,3)/6.*ppm->H3 + pow(phi,4)/24.*ppm->H4;
*dH = ppm->H1 + phi*ppm->H2 + pow(phi,2)/2.*ppm->H3 + pow(phi,3)/6.*ppm->H4;
*ddH = ppm->H2 + phi*ppm->H3 + pow(phi,2)/2.*ppm->H4;
*dddH = ppm->H3 + phi*ppm->H4;
return _SUCCESS_;
}
/**
* This routine defines indices used by the inflation simulator
*
* @param ppm Input/output: pointer to primordial structure
* @return the error status
*/
int primordial_inflation_indices(
struct primordial * ppm
) {
int index_in;
index_in = 0;
/* indices for background quantities */
ppm->index_in_a = index_in;
index_in ++;
ppm->index_in_phi = index_in;
index_in ++;
if ((ppm->primordial_spec_type == inflation_V) || (ppm->primordial_spec_type == inflation_V_end)) {
ppm->index_in_dphi = index_in;
index_in ++;
}
/* size of background vector */
ppm->in_bg_size = index_in;
/* indices for perturbations */
ppm->index_in_ksi_re = index_in;
index_in ++;
ppm->index_in_ksi_im = index_in;
index_in ++;
ppm->index_in_dksi_re = index_in;
index_in ++;
ppm->index_in_dksi_im = index_in;
index_in ++;
ppm->index_in_ah_re = index_in;
index_in ++;
ppm->index_in_ah_im = index_in;
index_in ++;
ppm->index_in_dah_re = index_in;
index_in ++;
ppm->index_in_dah_im = index_in;
index_in ++;
/* size of perturbation vector */
ppm->in_size = index_in;
return _SUCCESS_;
}
/**
* Main routine of inflation simulator. Its goal is to check the
* background evolution before and after the pivot value
* phi=phi_pivot, and then, if this evolution is suitable, to call the
* routine primordial_inflation_spectra().
*
* @param ppt Input: pointer to perturbation structure
* @param ppm Input/output: pointer to primordial structure
* @param ppr Input: pointer to precision structure
* @return the error status
*/
int primordial_inflation_solve_inflation(
struct perturbs * ppt,
struct primordial * ppm,
struct precision *ppr
) {
/** Summary: */
/** - define local variables */
double * y;
double * y_ini;
double * dy;
double a_pivot;
double a_try;
double H_pivot;
double H_try;
double phi_try;
double dphidt_pivot;
double dphidt_try;
double aH_ini,aH_end;
double k_max,k_min;
int counter;
double dH,ddH,dddH;
/** - allocate vectors for background/perturbed quantities */
class_alloc(y,ppm->in_size*sizeof(double),ppm->error_message);
class_alloc(y_ini,ppm->in_size*sizeof(double),ppm->error_message);
class_alloc(dy,ppm->in_size*sizeof(double),ppm->error_message);
/** - eventually, needs first to find phi_pivot */
if (ppm->primordial_spec_type == inflation_V_end) {
class_call(primordial_inflation_find_phi_pivot(ppm,ppr,y,dy),
ppm->error_message,
ppm->error_message);
}
else {
ppm->phi_pivot = 0.;
}
// uncomment these lines if for checking, you want first-order slow-roll predictions
/*
if (ppm->primordial_verbose>0) {
if ((ppm->primordial_spec_type == inflation_V) || (ppm->primordial_spec_type == inflation_V_end)) {
double V,dV,ddV;
class_call(primordial_inflation_check_potential(ppm,ppm->phi_pivot,&V,&dV,&ddV),
ppm->error_message,
ppm->error_message);
fprintf(stdout," -> 1st-order slow-roll prediction for A_s: %g\n",128.*_PI_/3.*pow(V,3)/pow(dV,2));
fprintf(stdout," -> 1st-order slow-roll prediction for T/S: %g\n",pow(dV/V,2)/_PI_);
fprintf(stdout," -> 1st-order slow-roll prediction for A_T: %g\n",pow(dV/V,2)/_PI_*128.*_PI_/3.*pow(V,3)/pow(dV,2));
fprintf(stdout," -> 1st-order slow-roll prediction for n_s: %g\n",1.-6./16./_PI_*pow(dV/V,2)+2./8./_PI_*(ddV/V));
fprintf(stdout," -> 1st-order slow-roll prediction for n_t: %g\n",-2./16./_PI_*pow(dV/V,2));
}
}
*/
/** - compute H_pivot at phi_pivot */
switch (ppm->primordial_spec_type) {
case inflation_V:
case inflation_V_end:
/** - check positivity and negative slope of potential in field pivot
value, and find value of phi_dot and H for field's pivot value,
assuming slow-roll attractor solution has been reached. If no
solution, code will stop there. */
if (ppm->primordial_verbose > 1)
printf(" (search attractor at pivot)\n");
class_call_except(primordial_inflation_find_attractor(ppm,
ppr,
ppm->phi_pivot,
ppr->primordial_inflation_attractor_precision_pivot,
y,
dy,
&H_pivot,
&dphidt_pivot),
ppm->error_message,
ppm->error_message,
free(y); free(y_ini); free(dy));
break;
case inflation_H:
/** - check positivity and negative slope of \f$ H(\phi)\f$ in field pivot
value, and get H_pivot */
class_call_except(primordial_inflation_check_hubble(ppm,
ppm->phi_pivot,
&H_pivot,
&dH,
&ddH,
&dddH),
ppm->error_message,
ppm->error_message,
free(y); free(y_ini); free(dy));
break;
default:
free(y);
free(y_ini);
free(dy);
class_stop(ppm->error_message,"ppm->primordial_spec_type=%d different from possible relevant cases",ppm->primordial_spec_type);
break;
}
/** - find a_pivot, value of scale factor when k_pivot crosses horizon while phi=phi_pivot */
a_pivot = ppm->k_pivot/H_pivot;
/** - integrate background solution starting from phi_pivot and until
k_max>>aH. This ensures that the inflationary model considered
here is valid and that the primordial spectrum can be
computed. Otherwise, if slow-roll brakes too early, model is not
suitable and run stops. */
if (ppm->primordial_verbose > 1)
printf(" (check inflation duration after phi_pivot=%e)\n",ppm->phi_pivot);
k_max = exp(ppm->lnk[ppm->lnk_size-1]);
aH_end = k_max/ppr->primordial_inflation_ratio_max;
y[ppm->index_in_a] = a_pivot;
y[ppm->index_in_phi] = ppm->phi_pivot;
if ((ppm->primordial_spec_type == inflation_V) || (ppm->primordial_spec_type == inflation_V_end))
y[ppm->index_in_dphi] = a_pivot*dphidt_pivot;
class_call_except(primordial_inflation_evolve_background(ppm,
ppr,
y,
dy,
_aH_,
aH_end,
_TRUE_,
forward,
conformal),
ppm->error_message,
ppm->error_message,
free(y); free(y_ini); free(dy));
/* we need to do the opposite: to check that there is an initial
time such that k_min << (aH)_ini. A guess is made by integrating
backward in time. This can be done exactly for inflation_H, or
only approximately for inflation_V (using the first-order
approximation to the attractor inflationary solution). However
this approximation is irrelevant because nevertheless, later on,
we compute the attractor solution at the initial time with high
accuracy, and then we integrate the background equations forward
in time. Hence the approximation made here introduces zero
mistake on the final result. It is just a way to find quickly a
reasonable initial phi value. In the inflation_V case, if the
exact forward integration reveals that the guess was not good
(i.e. does not correspond to "early enough"), we iterate over
sequences of backward/forward integration, until a correct time is
found. For potential such that no solution exists (no long-enough
slow-roll period before the pivot scale), the run stops. */
if (ppm->primordial_verbose > 1)
printf(" (check inflation duration before pivot)\n");
k_min = exp(ppm->lnk[0]);
aH_ini = k_min/ppr->primordial_inflation_ratio_min;
switch (ppm->primordial_spec_type) {
case inflation_V:
case inflation_V_end:
counter = 0;
y[ppm->index_in_a] = a_pivot;
y[ppm->index_in_phi] = ppm->phi_pivot;
do {
/* counter to avoid infinite loop */
counter ++;
class_test_except(counter >= ppr->primordial_inflation_phi_ini_maxit,
ppm->error_message,
free(y); free(y_ini); free(dy),
"when searching for an initial value of phi just before observable inflation takes place, could not converge after %d iterations. The potential does not allow eough inflationary e-folds before reaching the pivot scale",
counter);
/* try to find a value phi_try such that
aH=aH_ini*(ppr->primordial_inflation_aH_ini_target) (default:
aH_ini*0.9). But this is using the approximate backward
solution. So, anyway, we will check using the exact forward
solution that at this phi_try, we really have aH < aH_ini; if
this is not the case, we will iterate until a correct phi_try
is found. */
class_call_except(primordial_inflation_evolve_background(ppm,
ppr,
y,
dy,
_aH_,
aH_ini*ppr->primordial_inflation_aH_ini_target,
_TRUE_,
backward,
conformal),
ppm->error_message,
ppm->error_message,
free(y); free(y_ini); free(dy));
phi_try = y[ppm->index_in_phi];
/* in inflation_V case, find the accurate attractor solution for
phi_ini', and then the correct value of a_ini, and finally of
dphi/dtau_ini */
/* find dphi/dt_ini (unlike dphi/dtau_ini, this does not depend on normalization of a) */
class_call_except(primordial_inflation_find_attractor(ppm,
ppr,
phi_try,
ppr->primordial_inflation_attractor_precision_initial,
y,
dy,
&H_try,
&dphidt_try),
ppm->error_message,
ppm->error_message,
free(y); free(y_ini); free(dy));
/* we need to normalize a properly so that a=a_pivot when
phi=phi_pivot. To do so, we evolve starting arbitrarily from
a_ini=1, and then we rescale a_ini appropriately. */
y[ppm->index_in_a] = 1.;
y[ppm->index_in_phi] = phi_try;
y[ppm->index_in_dphi] = y[ppm->index_in_a]*dphidt_try; // dphi/dtau = a dphi/dt
class_call_except(primordial_inflation_evolve_background(ppm,
ppr,
y,
dy,
_phi_,
ppm->phi_pivot,
_TRUE_,
forward,
conformal),
ppm->error_message,
ppm->error_message,
free(y); free(y_ini); free(dy));
/* now impose the correct a_ini */
a_try = a_pivot/y[ppm->index_in_a];
/* in case another iteration will be needed, set a new starting point for the routine primordial_inflation_evolve_background(...,backward) */
y[ppm->index_in_a] = a_try;
y[ppm->index_in_phi] = phi_try;
} while (a_try*H_try > aH_ini);
y_ini[ppm->index_in_a] = a_try;
y_ini[ppm->index_in_phi] = phi_try;
y_ini[ppm->index_in_dphi] = y_ini[ppm->index_in_a]*dphidt_try; // dphi/dtau = a dphi/dt
break;
case inflation_H:
y[ppm->index_in_a] = a_pivot;
y[ppm->index_in_phi] = ppm->phi_pivot;
class_call_except(primordial_inflation_evolve_background(ppm,
ppr,
y,
dy,
_aH_,
aH_ini,
_TRUE_,
backward,
conformal),
ppm->error_message,
ppm->error_message,
free(y); free(y_ini); free(dy));
y_ini[ppm->index_in_a] = y[ppm->index_in_a];
y_ini[ppm->index_in_phi] = y[ppm->index_in_phi];
break;
default:
free(y);
free(y_ini);
free(dy);
class_stop(ppm->error_message,"ppm->primordial_spec_type=%d different from possible relevant cases",ppm->primordial_spec_type);
break;
}
/** - starting from this time, i.e. from y_ini[ ], we run the routine
which takes care of computing the primordial spectrum. */
if (ppm->primordial_verbose > 1)
printf(" (compute spectrum)\n");
if (ppm->behavior == numerical) {
class_call_except(primordial_inflation_spectra(ppt,
ppm,
ppr,
y_ini),
ppm->error_message,
ppm->error_message,
free(y); free(y_ini); free(dy));
}
else if (ppm->behavior == analytical) {
class_call_except(primordial_inflation_analytic_spectra(ppt,
ppm,
ppr,
y_ini),
ppm->error_message,
ppm->error_message,
free(y); free(y_ini); free(dy));
}
else {
class_stop(ppm->error_message,"Uncomprehensible value of the flag ppm->behavior=%d",ppm->behavior);
}
/** - before ending, we want to compute and store the values of \f$ \phi \f$
corresponding to k=aH for k_min and k_max */
y[ppm->index_in_a] = y_ini[ppm->index_in_a];
y[ppm->index_in_phi] = y_ini[ppm->index_in_phi];
if ((ppm->primordial_spec_type == inflation_V) || (ppm->primordial_spec_type == inflation_V_end))
y[ppm->index_in_dphi] = y_ini[ppm->index_in_dphi];
class_call_except(primordial_inflation_evolve_background(ppm,
ppr,
y,
dy,
_aH_,
k_min,
_FALSE_,
forward,
conformal),
ppm->error_message,
ppm->error_message,
free(y); free(y_ini); free(dy));
ppm->phi_min=y[ppm->index_in_phi];
class_call_except(primordial_inflation_evolve_background(ppm,
ppr,
y,
dy,
_aH_,
k_max,
_FALSE_,
forward,
conformal),
ppm->error_message,
ppm->error_message,
free(y); free(y_ini); free(dy));
ppm->phi_max=y[ppm->index_in_phi];
if (ppm->primordial_verbose > 1)
printf(" (observable power spectrum goes from %e to %e)\n",
ppm->phi_min,
ppm->phi_max);
/** - finally, we can de-allocate */
free(y);
free(y_ini);
free(dy);
return _SUCCESS_;
}
/**
* Routine for the computation of an analytic apporoximation to the
* the primordial spectrum. In general, should be used only for
* comparing with exact numerical computation performed by
* primordial_inflation_spectra().
*
* @param ppt Input: pointer to perturbation structure
* @param ppm Input/output: pointer to primordial structure
* @param ppr Input: pointer to precision structure
* @param y_ini Input: initial conditions for the vector of background/perturbations, already allocated and filled
* @return the error status
*/
int primordial_inflation_analytic_spectra(
struct perturbs * ppt,
struct primordial * ppm,
struct precision * ppr,
double * y_ini
) {
double * y;
double * dy;
int index_k;
double k,phi_k;
double curvature,tensors;
double V,dV,ddV;
/** Summary */
/** - allocate vectors for background/perturbed quantities */
class_alloc(y,ppm->in_size*sizeof(double),ppm->error_message);
class_alloc(dy,ppm->in_size*sizeof(double),ppm->error_message);
/** - initialize the background part of the running vector */
y[ppm->index_in_a] = y_ini[ppm->index_in_a];
y[ppm->index_in_phi] = y_ini[ppm->index_in_phi];
if ((ppm->primordial_spec_type == inflation_V) || (ppm->primordial_spec_type == inflation_V_end))
y[ppm->index_in_dphi] = y_ini[ppm->index_in_dphi];
/** - loop over Fourier wavenumbers */
for (index_k=0; index_k < ppm->lnk_size; index_k++) {
k = exp(ppm->lnk[index_k]);
/* evolve background until k=aH is reached */
class_call(primordial_inflation_evolve_background(ppm,
ppr,
y,
dy,
_aH_,
k,
_FALSE_,
forward,
conformal),
ppm->error_message,
ppm->error_message);
/** - read value of phi at time when k=aH */
phi_k = y[ppm->index_in_phi];
/** - get potential (and its derivatives) at this value */
class_call(primordial_inflation_check_potential(ppm,phi_k,&V,&dV,&ddV),
ppm->error_message,
ppm->error_message);
/** - calculate the analytic slow-roll formula for the spectra */
curvature = 128.*_PI_/3.*pow(V,3)/pow(dV,2);
tensors = pow(dV/V,2)/_PI_*128.*_PI_/3.*pow(V,3)/pow(dV,2);
/** - store the obtained result for curvature and tensor perturbations */
ppm->lnpk[ppt->index_md_scalars][index_k] = log(curvature);
ppm->lnpk[ppt->index_md_tensors][index_k] = log(tensors);
}
ppm->is_non_zero[ppt->index_md_scalars][ppt->index_ic_ad] = _TRUE_;
ppm->is_non_zero[ppt->index_md_tensors][ppt->index_ic_ten] = _TRUE_;
return _SUCCESS_;
}
/**
* Routine with a loop over wavenumbers for the computation of the primordial
* spectrum. For each wavenumber it calls primordial_inflation_one_wavenumber()
*
* @param ppt Input: pointer to perturbation structure
* @param ppm Input/output: pointer to primordial structure
* @param ppr Input: pointer to precision structure
* @param y_ini Input: initial conditions for the vector of background/perturbations, already allocated and filled
* @return the error status
*/
int primordial_inflation_spectra(
struct perturbs * ppt,
struct primordial * ppm,
struct precision * ppr,
double * y_ini
) {
int index_k;
/* number of threads (always one if no openmp) */
int number_of_threads=1;
/* index of the thread (always 0 if no openmp) */
int thread=0;
/* This code can be optionally compiled with the openmp option for parallel computation.
Inside parallel regions, the use of the command "return" is forbidden.
For error management, instead of "return _FAILURE_", we will set the variable below
to "abort = _TRUE_". This will lead to a "return _FAILURE_" just after leaving the
parallel region. */
int abort;
#ifdef _OPENMP
/* instrumentation times */
double tstart, tstop, tspent;
#endif
#ifdef _OPENMP
#pragma omp parallel
{
number_of_threads = omp_get_num_threads();
}
#endif
abort = _FALSE_;
#pragma omp parallel shared(ppt,ppm,ppr,abort,y_ini) private(index_k,thread,tspent,tstart,tstop) num_threads(number_of_threads)
{
#ifdef _OPENMP
thread = omp_get_thread_num();
tspent=0.;
#endif
#pragma omp for schedule (dynamic)
/* loop over Fourier wavenumbers */
for (index_k=0; index_k < ppm->lnk_size; index_k++) {
#ifdef _OPENMP
tstart = omp_get_wtime();
#endif
class_call_parallel(primordial_inflation_one_wavenumber(ppt,ppm,ppr,y_ini,index_k),
ppm->error_message,
ppm->error_message);
#ifdef _OPENMP
tstop = omp_get_wtime();
tspent += tstop-tstart;
#endif
}
#ifdef _OPENMP
if (ppm->primordial_verbose>1)
printf("In %s: time spent in parallel region (loop over k's) = %e s for thread %d\n",
__func__,tspent,thread);
#endif
} /* end of parallel zone */
if (abort == _TRUE_) return _FAILURE_;
ppm->is_non_zero[ppt->index_md_scalars][ppt->index_ic_ad] = _TRUE_;
ppm->is_non_zero[ppt->index_md_tensors][ppt->index_ic_ten] = _TRUE_;
return _SUCCESS_;
}
/**
* Routine coordinating the computation of the primordial
* spectrum for one wavenumber. It calls primordial_inflation_one_k() to
* integrate the perturbation equations, and then it stores the result
* for the scalar/tensor spectra.
*
* @param ppt Input: pointer to perturbation structure
* @param ppm Input/output: pointer to primordial structure
* @param ppr Input: pointer to precision structure
* @param y_ini Input: initial conditions for the vector of background/perturbations, already allocated and filled
* @param index_k Input: index of wavenumber to be considered
* @return the error status
*/
int primordial_inflation_one_wavenumber(
struct perturbs * ppt,
struct primordial * ppm,
struct precision * ppr,
double * y_ini,
int index_k
) {
double k;
double curvature,tensors;
double * y;
double * dy;
k = exp(ppm->lnk[index_k]);
/** Summary */
/** - allocate vectors for background/perturbed quantities */
class_alloc(y,ppm->in_size*sizeof(double),ppm->error_message);
class_alloc(dy,ppm->in_size*sizeof(double),ppm->error_message);
/** - initialize the background part of the running vector */
y[ppm->index_in_a] = y_ini[ppm->index_in_a];
y[ppm->index_in_phi] = y_ini[ppm->index_in_phi];
if ((ppm->primordial_spec_type == inflation_V) || (ppm->primordial_spec_type == inflation_V_end))
y[ppm->index_in_dphi] = y_ini[ppm->index_in_dphi];
/** - evolve the background until the relevant initial time for
integrating perturbations */
class_call(primordial_inflation_evolve_background(ppm,
ppr,
y,
dy,
_aH_,
k/ppr->primordial_inflation_ratio_min,
_FALSE_,
forward,
conformal),
ppm->error_message,
ppm->error_message);
/** - evolve the background/perturbation equations from this time and
until some time after Horizon crossing */
class_call(primordial_inflation_one_k(ppm,
ppr,
k,
y,
dy,
&curvature,
&tensors),
ppm->error_message,
ppm->error_message);
free(y);
free(dy);
class_test(curvature<=0.,
ppm->error_message,
"negative curvature spectrum");
class_test(tensors<=0.,
ppm->error_message,
"negative tensor spectrum");
/** - store the obtained result for curvature and tensor perturbations */
ppm->lnpk[ppt->index_md_scalars][index_k] = log(curvature);
ppm->lnpk[ppt->index_md_tensors][index_k] = log(tensors);
/* uncomment if you want to print here the spectra for testing */
/* fprintf(stderr,"%e %e %e\n", */
/* ppm->lnk[index_k], */
/* ppm->lnpk[ppt->index_md_scalars][index_k], */
/* ppm->lnpk[ppt->index_md_tensors][index_k]); */
return _SUCCESS_;
}
/**
* Routine integrating the background plus perturbation equations for
* each wavenumber, and returning the scalar and tensor spectrum.
*
* @param ppm Input: pointer to primordial structure
* @param ppr Input: pointer to precision structure
* @param k Input: Fourier wavenumber
* @param y Input: running vector of background/perturbations, already allocated and initialized
* @param dy Input: running vector of background/perturbation derivatives, already allocated
* @param curvature Output: curvature perturbation
* @param tensor Output: tensor perturbation
* @return the error status
*/
int primordial_inflation_one_k(
struct primordial * ppm,
struct precision * ppr,
double k,
double * y,
double * dy,
double * curvature,
double * tensor
) {
/** Summary: */
/** - define local variables */
double tau_start,tau_end,dtau;
double z,ksi2,ah2;
double aH;
double curvature_old;
double curvature_new;
double dlnPdN;
struct primordial_inflation_parameters_and_workspace pipaw;
struct generic_integrator_workspace gi;
/** - initialize the generic integrator (same integrator already used
in background, thermodynamics and perturbation modules) */
pipaw.ppm = ppm;
pipaw.N = ppm->in_size;
pipaw.integrate = forward;
pipaw.time = conformal;
pipaw.k = k;
class_call(initialize_generic_integrator(pipaw.N,&gi),
gi.error_message,
ppm->error_message);
/* initial conditions for the perturbations, Bunch-Davies vacuum */
y[ppm->index_in_ksi_re]=1./sqrt(2.*k);
y[ppm->index_in_ksi_im]=0.;
y[ppm->index_in_dksi_re]=0.;
y[ppm->index_in_dksi_im]=-k*y[ppm->index_in_ksi_re];
y[ppm->index_in_ah_re]=1./sqrt(2.*k);
y[ppm->index_in_ah_im]=0.;
y[ppm->index_in_dah_re]=0.;
y[ppm->index_in_dah_im]=-k*y[ppm->index_in_ah_re];
/** - initialize variable used for deciding when to stop the calculation (= when the curvature remains stable) */
curvature_new = _HUGE_;
/** - initialize conformal time to arbitrary value (here, only variations
of tau matter: the equations that we integrate do not depend
explicitly on time) */
tau_end = 0;
/** - compute derivative of initial vector and infer first value of adaptive time-step */
class_call(primordial_inflation_derivs(tau_end,
y,
dy,
&pipaw,
ppm->error_message),
ppm->error_message,
ppm->error_message);
dtau = ppr->primordial_inflation_pt_stepsize*2.*_PI_
/MAX(sqrt(fabs(dy[ppm->index_in_dksi_re]/y[ppm->index_in_ksi_re])),k);
/** - loop over time */
do {
/* new time interval [tau_start, tau_end] over which equations will be integrated */
tau_start = tau_end;
tau_end = tau_start + dtau;
class_test(dtau/tau_start < ppr->smallest_allowed_variation,
ppm->error_message,
"integration step: relative change in time =%e < machine precision : leads either to numerical error or infinite loop",dtau/tau_start);
/* evolve the system */
class_call(generic_integrator(primordial_inflation_derivs,
tau_start,
tau_end,
y,
&pipaw,
ppr->primordial_inflation_tol_integration,
ppr->smallest_allowed_variation,
&gi),
gi.error_message,
ppm->error_message);
/* compute derivatives at tau_end, useful to infer new time step and spectra */
class_call(primordial_inflation_derivs(tau_end,
y,
dy,
&pipaw,
ppm->error_message),
ppm->error_message,
ppm->error_message);
/* new time step */
dtau = ppr->primordial_inflation_pt_stepsize*2.*_PI_
/MAX(sqrt(fabs(dy[ppm->index_in_dksi_re]/y[ppm->index_in_ksi_re])),k);
/* new aH */
aH = dy[ppm->index_in_a]/y[ppm->index_in_a];
/* store previous value of curvature (at tau_start) */
curvature_old = curvature_new;
/* new curvature */
z = y[ppm->index_in_a]*dy[ppm->index_in_phi]/aH;
ksi2 = y[ppm->index_in_ksi_re]*y[ppm->index_in_ksi_re]+y[ppm->index_in_ksi_im]*y[ppm->index_in_ksi_im];
curvature_new = k*k*k/2./_PI_/_PI_*ksi2/z/z;
/* variation of curvature with time (dimensionless) */
dlnPdN = (curvature_new-curvature_old)/dtau*y[ppm->index_in_a]/dy[ppm->index_in_a]/curvature_new;
/* stop when (k >> aH) AND curvature is stable */
} while ((k/aH >= ppr->primordial_inflation_ratio_max) || (fabs(dlnPdN) > ppr->primordial_inflation_tol_curvature));
/** - clean the generic integrator */
class_call(cleanup_generic_integrator(&gi),
gi.error_message,
ppm->error_message);
/** - store final value of curvature for this wavenumber */
*curvature = curvature_new;
/** - store final value of tensor perturbation for this wavenumber */
ah2 = y[ppm->index_in_ah_re]*y[ppm->index_in_ah_re]+y[ppm->index_in_ah_im]*y[ppm->index_in_ah_im];
*tensor = 32.*k*k*k/_PI_*ah2/y[ppm->index_in_a]/y[ppm->index_in_a];
//fprintf(stdout,"%g %g %g %g %g\n",k,*curvature,*tensor,*tensor/(*curvature),dlnPdN);
return _SUCCESS_;
}
/**
* Routine searching for the inflationary attractor solution at a
* given phi_0, by iterations, with a given tolerance. If no solution
* found within tolerance, returns error message. The principle is the
* following. The code starts integrating the background equations
* from various values of phi, corresponding to earlier and earlier
* value before phi_0, and separated by a small arbitrary step size,
* corresponding roughly to 1 e-fold of inflation. Each time, the
* integration starts with the initial condition \f$ \phi=-V'/3H\f$ (slow-roll
* prediction). If the found value of \f$\phi'\f$ in phi_0 is stable (up to
* the parameter "precision"), the code considers that there is an
* attractor, and stops iterating. If this process does not converge,
* it returns an error message.
*
* @param ppm Input: pointer to primordial structure
* @param ppr Input: pointer to precision structure
* @param phi_0 Input: field value at which we wish to find the solution
* @param precision Input: tolerance on output values (if too large, an attractor will always considered to be found)
* @param y Input: running vector of background variables, already allocated and initialized
* @param dy Input: running vector of background derivatives, already allocated
* @param H_0 Output: Hubble value at phi_0 for attractor solution
* @param dphidt_0 Output: field derivative value at phi_0 for attractor solution
* @return the error status
*/
int primordial_inflation_find_attractor(
struct primordial * ppm,
struct precision * ppr,
double phi_0,
double precision,
double * y,
double * dy,
double * H_0,
double * dphidt_0
) {
double V_0,dV_0,ddV_0;
double V=0.,dV=0.,ddV=0.;
double a;
double dphidt,dphidt_0new,dphidt_0old,phi;
int counter;
/* we want a series of value of phi' in phi_0, obtained by
integrating the system from earlier and earlier time. The first
value iof the series is the slow-roll prediction phi' =
-V'/3H. The following lines compute this value and initialize relevant quantities. */
class_call(primordial_inflation_check_potential(ppm,phi_0,&V_0,&dV_0,&ddV_0),
ppm->error_message,
ppm->error_message);
dphidt_0new = -dV_0/3./sqrt((8.*_PI_/3.)*V_0);
phi = phi_0;
counter = 0;
dphidt_0old = dphidt_0new/(precision+2.); // this silly value just
// ensures that the loop
// below will be executed
// at least once.
/* loop over different values of phi, from which the background
equations are integrated until phi_0 */
while (fabs(dphidt_0new/dphidt_0old-1.) >= precision) {
counter ++;
class_test(counter >= ppr->primordial_inflation_attractor_maxit,
ppm->error_message,
"could not converge after %d iterations: there exists no attractor solution near phi=%g. Potential probably too steep in this region, or precision parameter primordial_inflation_attractor_precision=%g too small",
counter,
phi_0,
precision);
dphidt_0old = dphidt_0new;
/* take one step in phi, corresponding roughly to adding one more
e-fold of inflation */
phi=phi+dV_0/V_0/16./_PI_;
/* fix the initial phi' to the slow-roll prediction in that point,
and initialize other relevant quantities */
class_call(primordial_inflation_check_potential(ppm,phi,&V,&dV,&ddV),
ppm->error_message,
ppm->error_message);
a = 1.;
dphidt = -dV/3./sqrt((8.*_PI_/3.)*V);
y[ppm->index_in_a]=a;
y[ppm->index_in_phi]=phi;
y[ppm->index_in_dphi]=a*dphidt;
/* evolve the background equations until phi_0 is reached */
class_call(primordial_inflation_evolve_background(ppm,
ppr,
y,
dy,
_phi_,
phi_0,
_TRUE_,
forward,
conformal),
ppm->error_message,
ppm->error_message);
/* compute phi' in phi_0, this is the new point in the series
which convergence we want to check */
dphidt_0new = y[ppm->index_in_dphi]/y[ppm->index_in_a];
}
/* if we have converged and found the attractor, we take the last
value of phi' in phi_0 to be the correct one for the attractor
solution */
*dphidt_0 = dphidt_0new;
*H_0 = sqrt((8.*_PI_/3.)*(0.5*dphidt_0new*dphidt_0new+V_0));
if (ppm->primordial_verbose > 1) {
printf(" (attractor found in phi=%g with phi'=%g, H=%g)\n",phi_0,*dphidt_0,*H_0);
}
return _SUCCESS_;
}
/**
* Routine integrating background equations only, from initial values
* stored in y, to a final value (if target = _aH_, until aH =
* aH_stop; if target = _phi_, till phi = phi_stop; if target =
* _end_inflation_, until \f$ d^2a/dt^2 = 0\f$ (here t = proper time)). In
* output, y contains the final background values. In addition, if
* check_epsilon is true, the routine controls at each step that the
* expansion is accelerated and that inflation holds (wepsilon>1),
* otherwise it returns an error. Thanks to the last argument, it is
* also possible to specify whether the integration should be carried
* forward or backward in time. For the inflation_H case, only a 1st
* order differential equation is involved, so the forward and
* backward case can be done exactly without problems. For the
* inflation_V case, the equation of motion is 2nd order. What the
* module will do in the backward case is to search for an approximate
* solution, corresponding to the (first-order) attractor inflationary
* solution. This approximate backward solution is used in order to
* estimate some initial times, but the approximation made here will
* never impact the final result: the module is written in such a way
* that after using this approximation, the code always computes (and
* relies on) the exact forward solution.
*
* @param ppm Input: pointer to primordial structure
* @param ppr Input: pointer to precision structure
* @param y Input/output: running vector of background variables, already allocated and initialized
* @param dy Input: running vector of background derivatives, already allocated
* @param target Input: whether the goal is to reach a given aH or \f$ \phi \f$
* @param stop Input: the target value of either aH or \f$ \phi \f$
* @param check_epsilon Input: whether we should impose inflation (epsilon>1) at each step
* @param direction Input: whether we should integrate forward or backward in time
* @param time Input: definition of time (proper or conformal)
* @return the error status
*/
int primordial_inflation_evolve_background(
struct primordial * ppm,
struct precision * ppr,
double * y,
double * dy,
enum target_quantity target,
double stop,
short check_epsilon,
enum integration_direction direction,
enum time_definition time
) {
struct primordial_inflation_parameters_and_workspace pipaw;
struct generic_integrator_workspace gi;
double tau_start,tau_end,dtau=0.;
double H,dH,ddH,dddH;
double epsilon,epsilon_old;
double quantity=0.;
double V,dV,ddV;
double sign_dtau=0.;
pipaw.ppm = ppm;
pipaw.N = ppm->in_bg_size;
if ((direction == backward) && ((ppm->primordial_spec_type == inflation_V) || (ppm->primordial_spec_type == inflation_V_end))) {
// -1 to remove the differential equation for phi', since we stick to the attractor
pipaw.N -= 1;
}
pipaw.integrate = direction;
pipaw.time = time;
switch (direction) {
case forward:
sign_dtau = 1.;
break;
case backward:
sign_dtau = -1.;
break;
}
class_call(initialize_generic_integrator(pipaw.N,&gi),
gi.error_message,
ppm->error_message);
/* at starting point, compute eventually epsilon */
if (check_epsilon == _TRUE_) {
class_call(primordial_inflation_get_epsilon(ppm,
y[ppm->index_in_phi],
&epsilon),
ppm->error_message,
ppm->error_message);
}
/* at starting point, compute the stepsize dtau */
tau_end = 0;
class_call(primordial_inflation_derivs(tau_end,
y,
dy,
&pipaw,
ppm->error_message),
ppm->error_message,
ppm->error_message);
// compute timestep (if time = conformal, dtau is the conformal time step,
// if time = proper, dtau is in fact dt, the proper time step)
if ((direction == forward) && ((ppm->primordial_spec_type == inflation_V) || (ppm->primordial_spec_type == inflation_V_end))) {
dtau = ppr->primordial_inflation_bg_stepsize
*MIN(y[ppm->index_in_a]/dy[ppm->index_in_a],fabs(y[ppm->index_in_dphi]/dy[ppm->index_in_dphi]));
}
else {
// minus sign for backward in time
dtau = sign_dtau * ppr->primordial_inflation_bg_stepsize*y[ppm->index_in_a]/dy[ppm->index_in_a];
}
/* expected value of target quantity after the next step */
switch (target) {
case _aH_:
// next (approximate) value of aH after next step
// (a+[da/dx]*dx) H = aH (1 + [da/dx] / a dx)
// where dtau can be conformal or proper time
quantity = dy[ppm->index_in_a] * (1.+ dy[ppm->index_in_a]/y[ppm->index_in_a] * dtau);
if (time == conformal) quantity /= y[ppm->index_in_a];
break;
case _phi_:
// next (approximate) value of phi after next step
quantity = y[ppm->index_in_phi]+dy[ppm->index_in_phi]*dtau;
break;
case _end_inflation_:
// in this case, the goal is to reach d2a/dt2 = 0 (end of accelerated expansion)
stop = 0.;
// current value of quantity = - d2a/dt2 /a = [- (a'/a)^2 + 3/2 8pi/3 phi'^2]/a^2
quantity = -pow(dy[ppm->index_in_a]/y[ppm->index_in_a],2) + 4*_PI_ * y[ppm->index_in_dphi] * y[ppm->index_in_dphi];
if (time == conformal) quantity /= pow(y[ppm->index_in_a],2);
// check that we are in the right case
class_test(ppm->primordial_spec_type != inflation_V_end,
ppm->error_message,
"the target _end_inflation_ is only coded to work with inflation_V_end (but could be generalized if needed)");
break;
case _a_:
// next (approximate) value of a after next step
quantity = y[ppm->index_in_a]+dy[ppm->index_in_a]*dtau;
break;
}
/* loop over time steps, checking that there will be no overshooting */
while (sign_dtau*(quantity - stop) < 0.) {
/* check that V(phi) or H(phi) do not take forbidden values
(negative or positive derivative) */
if ((ppm->primordial_spec_type == inflation_V) || (ppm->primordial_spec_type == inflation_V_end)) {
class_call(primordial_inflation_check_potential(ppm,
y[ppm->index_in_phi],
&V,
&dV,
&ddV),
ppm->error_message,
ppm->error_message);
}
else {
class_call(primordial_inflation_check_hubble(ppm,
y[ppm->index_in_phi],
&H,
&dH,
&ddH,
&dddH),
ppm->error_message,
ppm->error_message);
}
/* take one time step */
tau_start = tau_end;
tau_end = tau_start + dtau;
// mind the fabs(...) below (works for both forward and backward integration)
class_test(fabs(dtau/tau_start) < ppr->smallest_allowed_variation,
ppm->error_message,
"integration step: relative change in time =%e < machine precision : leads either to numerical error or infinite loop",dtau/tau_start);
class_call(generic_integrator(primordial_inflation_derivs,
tau_start,
tau_end,
y,
&pipaw,
ppr->primordial_inflation_tol_integration,
ppr->smallest_allowed_variation,
&gi),
gi.error_message,
ppm->error_message);
/* eventually, check that epsilon is not becoming greater than one */
if (check_epsilon == _TRUE_) {
epsilon_old = epsilon;
class_call_except(primordial_inflation_get_epsilon(ppm,
y[ppm->index_in_phi],
&epsilon),
ppm->error_message,
ppm->error_message,
cleanup_generic_integrator(&gi));
class_test_except((epsilon > 1) && (epsilon_old <= 1),
ppm->error_message,
cleanup_generic_integrator(&gi),
"Inflaton evolution crosses the border from epsilon<1 to epsilon>1 at phi=%g. Inflation disrupted during the observable e-folds",
y[ppm->index_in_phi]);
}
/* recompute new value of next conformal time step */
class_call(primordial_inflation_derivs(tau_end,
y,
dy,
&pipaw,
ppm->error_message),
ppm->error_message,
ppm->error_message);
// compute timestep (if time = conformal, dtau is the conformal time step,
// if time = proper, dtau is in fact dt, the proper time step)
if ((direction == forward) && ((ppm->primordial_spec_type == inflation_V) || (ppm->primordial_spec_type == inflation_V_end))) {
dtau = ppr->primordial_inflation_bg_stepsize
*MIN(y[ppm->index_in_a]/dy[ppm->index_in_a],fabs(y[ppm->index_in_dphi]/dy[ppm->index_in_dphi]));
}
else {
// minus sign for backward in time
dtau = sign_dtau * ppr->primordial_inflation_bg_stepsize*y[ppm->index_in_a]/dy[ppm->index_in_a];
}
/* expected value of target quantity after the next step */
switch (target) {
case _aH_:
// next (approximate) value of aH after next step
// (a+[da/dx]*dx) H = aH (1 + [da/dx] / a dx)
// where dtau can be conformal or proper time
quantity = dy[ppm->index_in_a] * (1.+ dy[ppm->index_in_a]/y[ppm->index_in_a] * dtau);
if (time == conformal) quantity /= y[ppm->index_in_a];
break;
case _phi_:
// next (approximate) value of phi after next step
quantity = y[ppm->index_in_phi]+dy[ppm->index_in_phi]*dtau;
break;
case _end_inflation_:
// current value of quantity = - d2a/dt2 /a = [- (a'/a)^2 + 3/2 8pi/3 phi'^2]/a^2
quantity = -pow(dy[ppm->index_in_a]/y[ppm->index_in_a],2) + 4*_PI_ * y[ppm->index_in_dphi] * y[ppm->index_in_dphi];
if (time == conformal) quantity /= pow(y[ppm->index_in_a],2);
break;
case _a_:
// next (approximate) value of a after next step
quantity = y[ppm->index_in_a]+dy[ppm->index_in_a]*dtau;
break;
}
}
/* won't use the integrator anymore */
class_call(cleanup_generic_integrator(&gi),
gi.error_message,
ppm->error_message);
/* Perform one last step with a simple trapezoidal integral. This
will bring exactly phi or a forward to phi_stop or a_stop, or
approximately aH forward to aH_stop, or approximately [-d2a/dt2
/a] backward to zero. */
switch (target) {
case _aH_:
switch (time) {
case proper:
dtau = (stop/dy[ppm->index_in_a]-1.)/dy[ppm->index_in_a];
break;
case conformal:
dtau = (stop/(dy[ppm->index_in_a]/y[ppm->index_in_a])-1.)/(dy[ppm->index_in_a]/y[ppm->index_in_a]);
break;
}
break;
case _phi_:
dtau = (stop-y[ppm->index_in_phi])/dy[ppm->index_in_phi];
break;
case _end_inflation_:
class_call(primordial_inflation_check_potential(ppm,y[ppm->index_in_phi],&V,&dV,&ddV),
ppm->error_message,
ppm->error_message);
// We can easily pull back quantity=-d2a/dt2 /a by noticing that
// d(quantity)/dtau = 8piG phi' phi'' / a^2 (exact relation!)
// or
// d(quantity)/dtau = 8piG phi^dot (a phi^dot)^dot = 8piG phi^dot (a^dot phi^dot+ a phi^dotdot)
// By taking the step dtau = - quantity / [d(quantity)/dtau] we nearly reach quantity=0 (end of inflation), up to very good approximation
switch (time) {
case proper:
dtau = -quantity/(8.*_PI_*dy[ppm->index_in_phi]*(dy[ppm->index_in_a]*dy[ppm->index_in_phi]+y[ppm->index_in_a]*dy[ppm->index_in_dphi]));
break;
case conformal:
dtau = -quantity/(8.*_PI_/y[ppm->index_in_a]/y[ppm->index_in_a]*dy[ppm->index_in_phi]*dy[ppm->index_in_dphi]);
break;
}
break;
case _a_:
dtau = (stop-y[ppm->index_in_a])/dy[ppm->index_in_a];
break;
}
y[ppm->index_in_a] += dy[ppm->index_in_a]*dtau;
y[ppm->index_in_phi] += dy[ppm->index_in_phi]*dtau;
if ((direction == forward) && ((ppm->primordial_spec_type == inflation_V)||(ppm->primordial_spec_type == inflation_V_end)))
y[ppm->index_in_dphi] += dy[ppm->index_in_dphi]*dtau;
// this last step updates also the dy[]
class_call(primordial_inflation_derivs(tau_end,
y,
dy,
&pipaw,
ppm->error_message),
ppm->error_message,
ppm->error_message);
// uncomment if you want to test that the routine really reached the point at which d2a/dt2=0
/*
if (target == _end_inflation_) {
class_call(primordial_inflation_derivs(tau_end,
y,
dy,
&pipaw,
ppm->error_message),
ppm->error_message,
ppm->error_message);
aH = dy[ppm->index_in_a]/y[ppm->index_in_a];
quantity = (-aH*aH + 4*_PI_ * y[ppm->index_in_dphi] * y[ppm->index_in_dphi])/y[ppm->index_in_a]/y[ppm->index_in_a];
if (ppm->primordial_verbose>1)
printf(" (-d2a/dt2 /a = %e)\n",quantity);
}
*/
return _SUCCESS_;
}
/**
* Routine checking positivity and negative slope of potential. The
* negative slope is an arbitrary choice. Currently the code can only
* deal with monotonic variations of the inflaton during inflation. So
* the slope had to be always negative or always positive... we took
* the first option.
*
* @param ppm Input: pointer to primordial structure
* @param phi Input: field value where to perform the check
* @param V Output: inflaton potential in units of \f$ Mp^4\f$
* @param dV Output: first derivative of inflaton potential wrt the field
* @param ddV Output: second derivative of inflaton potential wrt the field
* @return the error status
*/
int primordial_inflation_check_potential(
struct primordial * ppm,
double phi,
double * V,
double * dV,
double * ddV
) {
class_call(primordial_inflation_potential(ppm,phi,V,dV,ddV),
ppm->error_message,
ppm->error_message);
class_test(*V <= 0.,
ppm->error_message,
"This potential becomes negative at phi=%g, before the end of observable inflation. It cannot be treated by this code",
phi);
class_test(*dV >= 0.,
ppm->error_message,
"All the code is written for the case dV/dphi<0. Here, in phi=%g, we have dV/dphi=%g. This potential cannot be treated by this code",
phi,*dV);
return _SUCCESS_;
}
/**
* Routine checking positivity and negative slope of \f$ H(\phi)\f$. The
* negative slope is an arbitrary choice. Currently the code can only
* deal with monotonic variations of the inflaton during
* inflation. And H can only decrease with time. So the slope \f$ dH/d\phi\f$
* has to be always negative or always positive... we took the first
* option: phi increases, H decreases.
*
* @param ppm Input: pointer to primordial structure
* @param phi Input: field value where to perform the check
* @param H Output: Hubble parameters in units of Mp
* @param dH Output: \f$ dH / d\phi \f$
* @param ddH Output: \f$ d^2H / d\phi^2 \f$
* @param dddH Output: \f$ d^3H / d\phi^3 \f$
* @return the error status
*/
int primordial_inflation_check_hubble(
struct primordial * ppm,
double phi,
double * H,
double * dH,
double * ddH,
double * dddH
) {
class_call(primordial_inflation_hubble(ppm,
phi,
H,dH,ddH,dddH),
ppm->error_message,
ppm->error_message);
class_test(*H < 0.,
ppm->error_message,
"this H(phi) is not physical. H = %e",
*H);
class_test(*dH > 0.,
ppm->error_message,
"this H(phi) is not decreasing with growing phi. dH/dphi = %e",
*dH);
return _SUCCESS_;
}
/**
* Routine computing the first slow-roll parameter epsilon
*
* @param ppm Input: pointer to primordial structure
* @param phi Input: field value where to compute epsilon
* @param epsilon Output: result
* @return the error status
*/
int primordial_inflation_get_epsilon(
struct primordial * ppm,
double phi,
double * epsilon
) {
double V,dV,ddV;
double H,dH,ddH,dddH;
switch (ppm->primordial_spec_type) {
case inflation_V:
case inflation_V_end:
class_call(primordial_inflation_potential(ppm,
phi,
&V,&dV,&ddV),
ppm->error_message,
ppm->error_message);
*epsilon = 1./16./_PI_*pow(dV/V,2);
//*eta = 1./8./pi*(ddV/V)
break;
case inflation_H:
class_call(primordial_inflation_hubble(ppm,
phi,
&H,&dH,&ddH,&dddH),
ppm->error_message,
ppm->error_message);
*epsilon = 1./4./_PI_*pow(dH/H,2);
break;
default:
class_stop(ppm->error_message,"ppm->primordial_spec_type=%d different from possible relevant cases",ppm->primordial_spec_type);
break;
}
return _SUCCESS_;
}
/**
* Routine searching phi_pivot when a given amount of inflation is requested.
*
* @param ppm Input/output: pointer to primordial structure
* @param ppr Input: pointer to precision structure
* @param y Input: running vector of background variables, already allocated and initialized
* @param dy Input: running vector of background derivatives, already allocated
* @return the error status
*/
int primordial_inflation_find_phi_pivot(
struct primordial * ppm,
struct precision * ppr,
double * y,
double * dy
) {
/** Summary: */
/** - define local variables */
double epsilon,dphi;
double phi_try,H_try,dphidt_try,ratio_try=0.;
double phi_left,phi_right,phi_mid;
double phi_small_epsilon,phi_stop;
double dphidt_small_epsilon;
double H_small_epsilon;
double aH_ratio_after_small_epsilon=0.;
double a_ratio_after_small_epsilon=0.;
double target=0.;
double a_pivot,aH_pivot;
double rho_end;
double h;
double H0;
double rho_c0;
double sigma_B;
double Omega_g0;
double Omega_r0;
/** - check whether in vicinity of phi_end, inflation is still ongoing */
class_call(primordial_inflation_get_epsilon(ppm,ppm->phi_end-ppr->primordial_inflation_end_dphi,&epsilon),
ppm->error_message,
ppm->error_message);
/** - case in which epsilon>1: hence we must find the value phi_stop <
phi_end where inflation ends up naturally */
if (epsilon > 1.) {
// assume that inflation ends up naturally
/** - --> find latest value of the field such that epsilon = primordial_inflation_small_epsilon (default: 0.1) */
/** - --> bracketing right-hand value is phi_end (but the potential will not be evaluated exactly there, only closeby */
phi_right = ppm->phi_end;
/** - --> bracketing left-hand value is found by iterating with logarithmic step until epsilon < primordial_inflation_small_epsilon */
dphi = ppr->primordial_inflation_end_dphi;
do {
dphi *= ppr->primordial_inflation_end_logstep;
class_call(primordial_inflation_get_epsilon(ppm,ppm->phi_end-dphi,&epsilon),
ppm->error_message,
ppm->error_message);
} while (epsilon > ppr->primordial_inflation_small_epsilon);
phi_left = ppm->phi_end-dphi;
/** - --> find value such that epsilon = primordial_inflation_small_epsilon by bisection */
do {
phi_mid = 0.5*(phi_left+phi_right);
class_call(primordial_inflation_get_epsilon(ppm,phi_mid,&epsilon),
ppm->error_message,
ppm->error_message);
if (epsilon < ppr->primordial_inflation_small_epsilon) phi_left=phi_mid;
else phi_right=phi_mid;
} while (fabs(epsilon-ppr->primordial_inflation_small_epsilon) > ppr->primordial_inflation_small_epsilon_tol);
/** - --> value found and stored as phi_small_epsilon */
phi_small_epsilon = phi_mid;
/** - --> find inflationary attractor in phi_small_epsilon (should exist since epsilon<<1 there) */
class_call(primordial_inflation_find_attractor(ppm,
ppr,
phi_small_epsilon,
ppr->primordial_inflation_attractor_precision_initial,
y,
dy,
&H_small_epsilon,
&dphidt_small_epsilon),
ppm->error_message,
ppm->error_message);
/** - --> compute amount of inflation between this phi_small_epsilon and the end of inflation */
y[ppm->index_in_a]=1.;
y[ppm->index_in_phi]= phi_small_epsilon;
y[ppm->index_in_dphi]=y[ppm->index_in_a]*dphidt_small_epsilon;
class_call(primordial_inflation_evolve_background(ppm,
ppr,
y,
dy,
_end_inflation_,
0.,
_FALSE_,
forward,
conformal),
ppm->error_message,
ppm->error_message);
// we have used here conformal time, so aH = dy[a]/y[a]
aH_ratio_after_small_epsilon = dy[ppm->index_in_a]/y[ppm->index_in_a]/H_small_epsilon;
a_ratio_after_small_epsilon = y[ppm->index_in_a];
switch (ppm->phi_pivot_method) {
case ln_aH_ratio_auto:
/* get the target value of ln_aH_ratio */
rho_end = 2./8./_PI_*pow(dy[ppm->index_in_a]/y[ppm->index_in_a],2);
rho_end = 8*_PI_/3.*rho_end/(_G_*_h_P_/pow(_c_,3))*pow(_Mpc_over_m_,2);
h = 0.7;
H0 = h * 1.e5 / _c_;
rho_c0 = pow(H0,2);
sigma_B = 2. * pow(_PI_,5) * pow(_k_B_,4) / 15. / pow(_h_P_,3) / pow(_c_,2);
Omega_g0 = (4.*sigma_B/_c_*pow(2.726,4.)) / (3.*_c_*_c_*1.e10*h*h/_Mpc_over_m_/_Mpc_over_m_/8./_PI_/_G_);
Omega_r0 = 3.046*7./8.*pow(4./11.,4./3.)*Omega_g0;
target = log(H0/0.05*pow(Omega_r0,0.5)*pow(2./100.,1./12.)*pow(rho_end/rho_c0,0.25));
//fprintf(stderr,"auto: log(aH_end/aH_*)=%e\n",target);
break;
case ln_aH_ratio:
target = ppm->phi_pivot_target;
//fprintf(stderr,"fixed: log(aH_end/aH_*)=%e\n",target);
break;
case N_star:
target = ppm->phi_pivot_target;
//fprintf(stderr,"fixed: log(a_end/a_*)=%e\n",target);
break;
}
/** - --> by starting from phi_small_epsilon and integrating an approximate
solution backward in time, try to estimate roughly a value close
to phi_pivot but a bit smaller. This is done by trying to reach
an amount of inflation equal to the requested one, minus the
amount after phi_small_epsilon, and plus
primordial_inflation_extra_efolds efolds (default: two). Note
that it is not aggressive to require two extra e-folds of
inflation before the pivot, since the calculation of the spectrum
in the observable range will require even more. */
y[ppm->index_in_a]=1.;
y[ppm->index_in_phi]= phi_small_epsilon;
switch (ppm->phi_pivot_method) {
case ln_aH_ratio_auto:
case ln_aH_ratio:
class_call(primordial_inflation_evolve_background(ppm,
ppr,
y,
dy,
_aH_,
H_small_epsilon/exp(target+ppr->primordial_inflation_extra_efolds)*aH_ratio_after_small_epsilon,
_TRUE_,
backward,
conformal),
ppm->error_message,
ppm->error_message);
break;
case N_star:
class_call(primordial_inflation_evolve_background(ppm,
ppr,
y,
dy,
_a_,
1./exp(target+ppr->primordial_inflation_extra_efolds)*a_ratio_after_small_epsilon,
_TRUE_,
backward,
conformal),
ppm->error_message,
ppm->error_message);
break;
}
/* we now have a value phi_try believed to be close to and slightly smaller than phi_pivot */
phi_try = y[ppm->index_in_phi];
/** - --> find attractor in phi_try */
class_call(primordial_inflation_find_attractor(ppm,
ppr,
phi_try,
ppr->primordial_inflation_attractor_precision_initial,
y,
dy,
&H_try,
&dphidt_try),
ppm->error_message,
ppm->error_message);
/** - --> check the total amount of inflation between phi_try and the end of inflation */
y[ppm->index_in_a]=1.;
y[ppm->index_in_phi]= phi_try;
y[ppm->index_in_dphi]= dphidt_try;
class_call(primordial_inflation_evolve_background(ppm,
ppr,
y,
dy,
_end_inflation_,
0.,
_FALSE_,
forward,
proper),
ppm->error_message,
ppm->error_message);
switch (ppm->phi_pivot_method) {
case ln_aH_ratio_auto:
case ln_aH_ratio:
// aH_ratio (we have used here proper time, so aH = dy[a])
ratio_try = dy[ppm->index_in_a]/H_try;
break;
case N_star:
// a_ratio
ratio_try = y[ppm->index_in_a];
break;
}
class_test(log(ratio_try) < target,
ppm->error_message,
"phi_try not small enough, log(aH_stop/aH_try) or log(a_stop/a_try) (depending on what you asked) is equal to %e instead of requested %e; must write here a loop to deal automatically with this situation (by decreasing phi_try iteratively), or must increase precision parameter primordial_inflation_extra_efolds",
log(ratio_try),
target);
phi_stop = y[1];
if (ppm->primordial_verbose > 1)
printf(" (inflation stops in phi_stop = %e)\n",phi_stop);
/** - --> go back to phi_try, and now find phi_pivot such that the amount
of inflation between phi_pivot and the end of inflation is
exactly the one requested. */
y[ppm->index_in_a]=1.;
y[ppm->index_in_phi]= phi_try;
y[ppm->index_in_dphi]= dphidt_try;
switch (ppm->phi_pivot_method) {
case ln_aH_ratio_auto:
case ln_aH_ratio:
class_call(primordial_inflation_evolve_background(ppm,
ppr,
y,
dy,
_aH_,
H_try*ratio_try/exp(target),
_FALSE_,
forward,
proper),
ppm->error_message,
ppm->error_message);
break;
case N_star:
class_call(primordial_inflation_evolve_background(ppm,
ppr,
y,
dy,
_a_,
ratio_try/exp(target),
_FALSE_,
forward,
proper),
ppm->error_message,
ppm->error_message);
break;
}
ppm->phi_pivot = y[1];
if (ppm->primordial_verbose > 1) {
printf(" (reached phi_pivot=%e)\n",ppm->phi_pivot);
/* - --> In verbose mode, check that phi_pivot is correct. Done by
restarting from phi_pivot and going again till the end of
inflation. */
aH_pivot = dy[0];
a_pivot = y[0];
class_call(primordial_inflation_evolve_background(ppm,
ppr,
y,
dy,
_end_inflation_,
0.,
_FALSE_,
forward,
proper),
ppm->error_message,
ppm->error_message);
printf(" (from phi_pivot till the end, ln(aH_2/aH_1) = %e, ln(a_2/a_1) = %e)\n",log(dy[0]/aH_pivot),log(y[0]/a_pivot));
}
}
/** - case in which epsilon<1: */
else {
/** - --> find inflationary attractor in phi_small_epsilon (should exist since epsilon<1 there) */
class_call(primordial_inflation_find_attractor(ppm,
ppr,
ppm->phi_end,
ppr->primordial_inflation_attractor_precision_initial,
y,
dy,
&H_small_epsilon,
&dphidt_small_epsilon),
ppm->error_message,
ppm->error_message);
/** - --> by starting from phi_end and integrating an approximate
solution backward in time, try to estimate roughly a value close
to phi_pivot but a bit smaller. This is done by trying to reach
an amount of inflation equal to the requested one, minus the
amount after phi_small_epsilon, and plus
primordial_inflation_extra_efolds efolds (default: two). Note
that it is not aggressive to require two extra e-folds of
inflation before the pivot, since the calculation of the spectrum
in the observable range will require even more. */
y[ppm->index_in_a]=1.;
y[ppm->index_in_phi]= ppm->phi_end;
switch (ppm->phi_pivot_method) {
case ln_aH_ratio_auto:
case ln_aH_ratio:
class_call(primordial_inflation_evolve_background(ppm,
ppr,
y,
dy,
_aH_,
H_small_epsilon/exp(target+ppr->primordial_inflation_extra_efolds)*aH_ratio_after_small_epsilon,
_TRUE_,
backward,
conformal),
ppm->error_message,
ppm->error_message);
break;
case N_star:
class_call(primordial_inflation_evolve_background(ppm,
ppr,
y,
dy,
_a_,
1./exp(target+ppr->primordial_inflation_extra_efolds)*a_ratio_after_small_epsilon,
_TRUE_,
backward,
conformal),
ppm->error_message,
ppm->error_message);
break;
}
/** - --> we now have a value phi_try believed to be close to and slightly smaller than phi_pivot */
phi_try = y[ppm->index_in_phi];
/** - --> find attractor in phi_try */
class_call(primordial_inflation_find_attractor(ppm,
ppr,
phi_try,
ppr->primordial_inflation_attractor_precision_initial,
y,
dy,
&H_try,
&dphidt_try),
ppm->error_message,
ppm->error_message);
/** - --> check the total amount of inflation between phi_try and the end of inflation */
y[ppm->index_in_a]=1.;
y[ppm->index_in_phi]= phi_try;
y[ppm->index_in_dphi]= dphidt_try;
class_call(primordial_inflation_evolve_background(ppm,
ppr,
y,
dy,
_phi_,
ppm->phi_end,
_FALSE_,
forward,
proper),
ppm->error_message,
ppm->error_message);
switch (ppm->phi_pivot_method) {
case ln_aH_ratio_auto:
case ln_aH_ratio:
// aH_ratio (we have used here proper time, so aH = dy[a])
ratio_try = dy[ppm->index_in_a]/H_try;
break;
case N_star:
// a_ratio
ratio_try = y[ppm->index_in_a];
break;
}
class_test(log(ratio_try) < target,
ppm->error_message,
"phi_try not small enough, log(aH_stop/aH_try) or log(a_stop/a_try) (depending on what you asked) is equal to %e instead of requested %e; must write here a loop to deal automatically with this situation (by decreasing phi_try iteratively), or must increase precision parameter primordial_inflation_extra_efolds",
log(ratio_try),
target);
phi_stop = y[1];
if (ppm->primordial_verbose > 1)
printf(" (inflation stops in phi_stop = %e)\n",phi_stop);
/** - --> go back to phi_try, and now find phi_pivot such that the amount
of inflation between phi_pivot and the end of inflation is
exactly the one requested. */
y[ppm->index_in_a]=1.;
y[ppm->index_in_phi]= phi_try;
y[ppm->index_in_dphi]= dphidt_try;
switch (ppm->phi_pivot_method) {
case ln_aH_ratio_auto:
case ln_aH_ratio:
class_call(primordial_inflation_evolve_background(ppm,
ppr,
y,
dy,
_aH_,
H_try*ratio_try/exp(target),
_FALSE_,
forward,
proper),
ppm->error_message,
ppm->error_message);
break;
case N_star:
class_call(primordial_inflation_evolve_background(ppm,
ppr,
y,
dy,
_a_,
ratio_try/exp(target),
_FALSE_,
forward,
proper),
ppm->error_message,
ppm->error_message);
break;
}
ppm->phi_pivot = y[1];
if (ppm->primordial_verbose > 1) {
printf(" (reached phi_pivot=%e)\n",ppm->phi_pivot);
/** - --> In verbose mode, check that phi_pivot is correct. Done by
restarting from phi_pivot and going again till the end of
inflation. */
aH_pivot = dy[0];
a_pivot = y[0];
class_call(primordial_inflation_evolve_background(ppm,
ppr,
y,
dy,
_phi_,
ppm->phi_end,
_FALSE_,
forward,
proper),
ppm->error_message,
ppm->error_message);
printf(" (from phi_pivot till the end, ln(aH_2/aH_1) = %e, ln(a_2/a_1) = %e)\n",log(dy[0]/aH_pivot),log(y[0]/a_pivot));
}
}
return _SUCCESS_;
}
/**
* Routine returning derivative of system of background/perturbation
* variables. Like other routines used by the generic integrator
* (background_derivs, thermodynamics_derivs, perturb_derivs), this
* routine has a generic list of arguments, and a slightly different
* error management, with the error message returned directly in an
* ErrMsg field.
*
* @param tau Input: time (not used explicitly inside the routine, but requested by the generic integrator)
* @param y Input/output: running vector of background variables, already allocated and initialized
* @param dy Input: running vector of background derivatives, already allocated
* @param parameters_and_workspace Input: all necessary input variables apart from y
* @param error_message Output: error message
* @return the error status
*/
int primordial_inflation_derivs(
double tau,
double * y,
double * dy,
void * parameters_and_workspace,
ErrorMsg error_message
) {
struct primordial_inflation_parameters_and_workspace * ppipaw;
struct primordial * ppm;
ppipaw = parameters_and_workspace;
ppm = ppipaw->ppm;
// a2
ppipaw->a2=y[ppm->index_in_a]*y[ppm->index_in_a];
// BACKGROUND
switch (ppm->primordial_spec_type) {
case inflation_V:
case inflation_V_end:
class_call(primordial_inflation_potential(ppm,
y[ppm->index_in_phi],
&(ppipaw->V),
&(ppipaw->dV),
&(ppipaw->ddV)),
ppm->error_message,
ppm->error_message);
switch (ppipaw->integrate) {
case forward:
switch (ppipaw->time) {
case conformal:
// a H = a'/a
ppipaw->aH = sqrt((8*_PI_/3.)*(0.5*y[ppm->index_in_dphi]*y[ppm->index_in_dphi]+ppipaw->a2*ppipaw->V));
// 1: a
dy[ppm->index_in_a]=y[ppm->index_in_a]*ppipaw->aH;
// 2: phi
dy[ppm->index_in_phi]=y[ppm->index_in_dphi];
// 3: dphi/dtau
dy[ppm->index_in_dphi]=-2.*ppipaw->aH*y[ppm->index_in_dphi]-ppipaw->a2*ppipaw->dV;
break;
case proper:
// a H = adot
ppipaw->aH = y[ppm->index_in_a]*sqrt((8*_PI_/3.)*(0.5*y[ppm->index_in_dphi]*y[ppm->index_in_dphi]+ppipaw->V));
// 1: a
dy[ppm->index_in_a]=ppipaw->aH;
// 2: phi
dy[ppm->index_in_phi]=y[ppm->index_in_dphi];
// 3: dphi/dt
dy[ppm->index_in_dphi]=-3.*ppipaw->aH/y[ppm->index_in_a]*y[ppm->index_in_dphi]-ppipaw->dV;
break;
}
// z''/z (assumes that conformal time is requested)
ppipaw->zpp_over_z=
2*ppipaw->aH*ppipaw->aH
- ppipaw->a2*ppipaw->ddV
- 4.*_PI_*(7.*y[ppm->index_in_dphi]*y[ppm->index_in_dphi]
+4.*y[ppm->index_in_dphi]/ppipaw->aH*ppipaw->a2*ppipaw->dV)
+32.*_PI_*_PI_*pow(y[ppm->index_in_dphi],4)/pow(ppipaw->aH,2);
// a''/a (assumes that conformal time is requested)
ppipaw->app_over_a=2.*ppipaw->aH*ppipaw->aH - 4.*_PI_*y[ppm->index_in_dphi]*y[ppm->index_in_dphi];
break;
// For backward integration of approximate slow-roll solution:
// Neglect kinetic energy of the field phi'^2/(2a^2) w.r.t. potential energy V
// Neglect phi'' w.r.t 2aHphi', reducing 2nd order Klein-Gordon to approximate 1st-order
case backward:
switch (ppipaw->time) {
case conformal:
// a H = a'/a
ppipaw->aH = sqrt((8*_PI_/3.)*ppipaw->a2*ppipaw->V);
// 1: a
dy[ppm->index_in_a]=y[ppm->index_in_a]*ppipaw->aH;
// 2: phi
dy[ppm->index_in_phi]= -ppipaw->a2*ppipaw->dV/3./ppipaw->aH;
break;
case proper:
// a H = da/dt
ppipaw->aH = y[ppm->index_in_a]*sqrt((8*_PI_/3.)*ppipaw->V);
// 1: a
dy[ppm->index_in_a]=ppipaw->aH;
// 2: phi
dy[ppm->index_in_phi]= -ppipaw->dV/3./ppipaw->aH*y[ppm->index_in_a];
break;
}
break;
}
break;
case inflation_H:
class_call(primordial_inflation_hubble(ppm,
y[ppm->index_in_phi],
&(ppipaw->H),
&(ppipaw->dH),
&(ppipaw->ddH),
&(ppipaw->dddH)),
ppm->error_message,
ppm->error_message);
switch (ppipaw->time) {
case conformal:
// 1: a
dy[ppm->index_in_a]=ppipaw->a2*ppipaw->H;
// 2: phi
dy[ppm->index_in_phi]=-1./4./_PI_*y[ppm->index_in_a]*ppipaw->dH;
break;
case proper:
// 1: a
dy[ppm->index_in_a]=y[ppm->index_in_a]*ppipaw->H;
// 2: phi
dy[ppm->index_in_phi]=-1./4./_PI_*ppipaw->dH;
break;
}
// z''/z (assumes that conformal time is requested)
ppipaw->zpp_over_z =
2. *ppipaw->a2*ppipaw->H*ppipaw->H
-3./4./_PI_ *ppipaw->a2*ppipaw->H*ppipaw->ddH
+1./16./_PI_/_PI_*ppipaw->a2*ppipaw->ddH*ppipaw->ddH
+1./16./_PI_/_PI_*ppipaw->a2*ppipaw->dH*ppipaw->dddH
-1./4./_PI_/_PI_ *ppipaw->a2*ppipaw->dH*ppipaw->dH*ppipaw->ddH/ppipaw->H
+1./2./_PI_ *ppipaw->a2*ppipaw->dH*ppipaw->dH
+1./8./_PI_/_PI_ *ppipaw->a2*ppipaw->dH*ppipaw->dH*ppipaw->dH*ppipaw->dH/ppipaw->H/ppipaw->H;
// a''/a (assumes that conformal time is requested)
ppipaw->app_over_a = 2.*ppipaw->a2*ppipaw->H*ppipaw->H
-4.*_PI_*dy[ppm->index_in_phi]*dy[ppm->index_in_phi];
break;
default:
class_stop(ppm->error_message,"ppm->primordial_spec_type=%d different from possible relevant cases",ppm->primordial_spec_type);
break;
}
if (ppipaw->N <= ppm->in_bg_size) // mind the <= instead of ==, necessary because for backward integration 1 equation is removed
return _SUCCESS_;
// PERTURBATIONS
class_test(ppipaw->time == proper,
ppm->error_message,
"For inflaton perturbations, only conformal time is coded.");
// SCALARS
// 4: ksi_re
dy[ppm->index_in_ksi_re]=y[ppm->index_in_dksi_re];
// 5: ksi_im
dy[ppm->index_in_ksi_im]=y[ppm->index_in_dksi_im];
// 6: d ksi_re / dtau
dy[ppm->index_in_dksi_re]=-(ppipaw->k*ppipaw->k-ppipaw->zpp_over_z)*y[ppm->index_in_ksi_re];
// 7: d ksi_im / dtau
dy[ppm->index_in_dksi_im]=-(ppipaw->k*ppipaw->k-ppipaw->zpp_over_z)*y[ppm->index_in_ksi_im];
// TENSORS
// 8: ah_re
dy[ppm->index_in_ah_re]=y[ppm->index_in_dah_re];
// 9: ah_im
dy[ppm->index_in_ah_im]=y[ppm->index_in_dah_im];
// 10: d ah_re / dtau
dy[ppm->index_in_dah_re]=-(ppipaw->k*ppipaw->k-ppipaw->app_over_a)*y[ppm->index_in_ah_re];
// 11: d ah_im / dtau
dy[ppm->index_in_dah_im]=-(ppipaw->k*ppipaw->k-ppipaw->app_over_a)*y[ppm->index_in_ah_im];
return _SUCCESS_;
}
/**
* This routine reads the primordial spectrum from an external command,
* and stores the tabulated values.
* The sampling of the k's given by the external command is preserved.
*
* Author: Jesus Torrado (torradocacho@lorentz.leidenuniv.nl)
* Date: 2013-12-20
*
* @param ppt Input/output: pointer to perturbation structure
* @param ppm Input/output: pointer to primordial structure
* @return the error status
*/
int primordial_external_spectrum_init(
struct perturbs * ppt,
struct primordial * ppm
) {
/** Summary: */
char arguments[_ARGUMENT_LENGTH_MAX_];
char line[_LINE_LENGTH_MAX_];
char command_with_arguments[2*_ARGUMENT_LENGTH_MAX_];
FILE *process;
int n_data_guess, n_data = 0;
double *k = NULL, *pks = NULL, *pkt = NULL, *tmp = NULL;
double this_k, this_pks, this_pkt;
int status;
int index_k;
/** - Initialization */
/* Prepare the data (with some initial size) */
n_data_guess = 100;
k = (double *)malloc(n_data_guess*sizeof(double));
pks = (double *)malloc(n_data_guess*sizeof(double));
if (ppt->has_tensors == _TRUE_)
pkt = (double *)malloc(n_data_guess*sizeof(double));
/* Prepare the command */
/* If the command is just a "cat", no arguments need to be passed */
if(strncmp("cat ", ppm->command, 4) == 0) {
sprintf(arguments, " ");
}
/* otherwise pass the list of arguments */
else {
sprintf(arguments, " %g %g %g %g %g %g %g %g %g %g",
ppm->custom1, ppm->custom2, ppm->custom3, ppm->custom4, ppm->custom5,
ppm->custom6, ppm->custom7, ppm->custom8, ppm->custom9, ppm->custom10);
}
/* write the actual command in a string */
sprintf(command_with_arguments, "%s %s", ppm->command, arguments);
if (ppm->primordial_verbose > 0)
printf(" -> running: %s\n",command_with_arguments);
/** - Launch the command and retrieve the output */
/* Launch the process */
process = popen(command_with_arguments, "r");
class_test(process == NULL,
ppm->error_message,
"The program failed to set the environment for the external command. Maybe you ran out of memory.");
/* Read output and store it */
while (fgets(line, sizeof(line)-1, process) != NULL) {
if (ppt->has_tensors == _TRUE_) {
sscanf(line, "%lf %lf %lf", &this_k, &this_pks, &this_pkt);
}
else {
sscanf(line, "%lf %lf", &this_k, &this_pks);
}
/* Standard technique in C: if too many data, double the size of the vectors */
/* (it is faster and safer that reallocating every new line) */
if((n_data+1) > n_data_guess) {
n_data_guess *= 2;
tmp = (double *)realloc(k, n_data_guess*sizeof(double));
class_test(tmp == NULL,
ppm->error_message,
"Error allocating memory to read the external spectrum.\n");
k = tmp;
tmp = (double *)realloc(pks, n_data_guess*sizeof(double));
class_test(tmp == NULL,
ppm->error_message,
"Error allocating memory to read the external spectrum.\n");
pks = tmp;
if (ppt->has_tensors == _TRUE_) {
tmp = (double *)realloc(pkt, n_data_guess*sizeof(double));
class_test(tmp == NULL,
ppm->error_message,
"Error allocating memory to read the external spectrum.\n");
pkt = tmp;
};
};
/* Store */
k [n_data] = this_k;
pks[n_data] = this_pks;
if (ppt->has_tensors == _TRUE_) {
pkt[n_data] = this_pkt;
}
n_data++;
/* Check ascending order of the k's */
if(n_data>1) {
class_test(k[n_data-1] <= k[n_data-2],
ppm->error_message,
"The k's are not strictly sorted in ascending order, "
"as it is required for the calculation of the splines.\n");
}
}
/* Close the process */
status = pclose(process);
class_test(status != 0.,
ppm->error_message,
"The attempt to launch the external command was unsuccessful. "
"Try doing it by hand to check for errors.");
/* Test limits of the k's */
class_test(k[1] > ppt->k_min,
ppm->error_message,
"Your table for the primordial spectrum does not have "
"at least 2 points before the minimum value of k: %e . "
"The splines interpolation would not be safe.",ppt->k_min);
class_test(k[n_data-2] < ppt->k_max,
ppm->error_message,
"Your table for the primordial spectrum does not have "
"at least 2 points after the maximum value of k: %e . "
"The splines interpolation would not be safe.",ppt->k_max);
/** - Store the read results into CLASS structures */
ppm->lnk_size = n_data;
/** - Make room */
class_realloc(ppm->lnk,
ppm->lnk,
ppm->lnk_size*sizeof(double),
ppm->error_message);
class_realloc(ppm->lnpk[ppt->index_md_scalars],
ppm->lnpk[ppt->index_md_scalars],
ppm->lnk_size*sizeof(double),
ppm->error_message);
class_realloc(ppm->ddlnpk[ppt->index_md_scalars],
ppm->ddlnpk[ppt->index_md_scalars],
ppm->lnk_size*sizeof(double),
ppm->error_message);
if (ppt->has_tensors == _TRUE_) {
class_realloc(ppm->lnpk[ppt->index_md_tensors],
ppm->lnpk[ppt->index_md_tensors],
ppm->lnk_size*sizeof(double),
ppm->error_message);
class_realloc(ppm->ddlnpk[ppt->index_md_tensors],
ppm->ddlnpk[ppt->index_md_tensors],
ppm->lnk_size*sizeof(double),
ppm->error_message);
};
/** - Store values */
for (index_k=0; index_k<ppm->lnk_size; index_k++) {
ppm->lnk[index_k] = log(k[index_k]);
ppm->lnpk[ppt->index_md_scalars][index_k] = log(pks[index_k]);
if (ppt->has_tensors == _TRUE_)
ppm->lnpk[ppt->index_md_tensors][index_k] = log(pkt[index_k]);
/* DEBUG (with tensors)
fprintf(stderr,"Storing[%d(+1) of %d]: \n k = %g == %g\n pks = %g == %g\n pkt = %g == %g\n",
index_k, n_data,
ppm->lnk[index_k], log(k[index_k]),
ppm->lnpk[ppt->index_md_scalars][index_k], log(pks[index_k]),
ppm->lnpk[ppt->index_md_tensors][index_k], log(pkt[index_k]));
*/
};
/** - Release the memory used locally */
free(k);
free(pks);
if (ppt->has_tensors == _TRUE_)
free(pkt);
/** - Tell CLASS that there are scalar (and tensor) modes */
ppm->is_non_zero[ppt->index_md_scalars][ppt->index_ic_ad] = _TRUE_;
if (ppt->has_tensors == _TRUE_)
ppm->is_non_zero[ppt->index_md_tensors][ppt->index_ic_ten] = _TRUE_;
return _SUCCESS_;
}
int primordial_output_titles(struct perturbs * ppt,
struct primordial * ppm,
char titles[_MAXTITLESTRINGLENGTH_]
) {
class_store_columntitle(titles,"k [1/Mpc]",_TRUE_);
class_store_columntitle(titles,"P_scalar(k)",_TRUE_);
class_store_columntitle(titles,"P_tensor(k)",ppt->has_tensors);
return _SUCCESS_;
}
int primordial_output_data(struct perturbs * ppt,
struct primordial * ppm,
int number_of_titles,
double *data) {
int index_k, storeidx;
double *dataptr;
for (index_k=0; index_k<ppm->lnk_size; index_k++) {
dataptr = data + index_k*number_of_titles;
storeidx = 0;
class_store_double(dataptr, exp(ppm->lnk[index_k]), _TRUE_,storeidx);
class_store_double(dataptr, exp(ppm->lnpk[ppt->index_md_scalars][index_k]), _TRUE_,storeidx);
class_store_double(dataptr, exp(ppm->lnpk[ppt->index_md_tensors][index_k]), ppt->has_tensors,storeidx);
}
return _SUCCESS_;
}
|
emf_mie_ms.c | #include "emf_mie_ms.h"
void read_data_ms(MSPD *msp)
{
FILE *fp;
if((fp=fopen(fn_sphr,"rt"))==NULL){ printf("Can not open the '%s' file. Exit...\n",fn_sphr); exit(1); }
char buf[256]=""; int tmpi; double tmpd,tmpd2;
if(fgets(buf,256,fp)==NULL){
printf("emf_mie_ms.c, read_data_ms(), failed to read the line. exit...\n");
exit(1);
}
if(fgets(buf,256,fp)==NULL){
printf("emf_mie_ms.c, read_data_ms(), failed to read the line. exit...\n");
exit(1);
}
int num,nc;
if(fscanf(fp,"%d\n",&tmpi)!=1){
printf("emf_mie_ms.c, read_data_ms(), failed to read the num. exit...\n");
exit(1);
}
num=tmpi;
if(fgets(buf,256,fp)==NULL){
printf("emf_mie_ms.c, read_data_ms(), failed to read the line. exit...\n");
exit(1);
}
if(num==0){ printf("No sphere defined. Exit...\n"); exit(1); }
msp->n_sphr=num;
msp->sp=(SPD *)m_alloc2(num,sizeof(SPD),"read_data_ms(),msp->sp");
for(nc=0;nc<num;nc++){
if(fscanf(fp,"%lf",&tmpd)!=1){
printf("emf_mie_ms.c, read_data_ms(), failed to read the a. exit...\n");
exit(1);
}
msp->sp[nc].a =tmpd;
if(fscanf(fp,"%lf",&tmpd)!=1){
printf("emf_mie_ms.c, read_data_ms(), failed to read the real(ns). exit...\n");
exit(1);
}
if(fscanf(fp,"%lf",&tmpd2)!=1){
printf("emf_mie_ms.c, read_data_ms(), failed to read the imag(ns). exit...\n");
exit(1);
}
msp->sp[nc].ns =tmpd+I*tmpd2;
if(fscanf(fp,"%lf",&tmpd)!=1){
printf("emf_mie_ms.c, read_data_ms(), failed to read the xs. exit...\n");
exit(1);
}
msp->sp[nc].xs =tmpd;
if(fscanf(fp,"%lf",&tmpd)!=1){
printf("emf_mie_ms.c, read_data_ms(), failed to read the ys. exit...\n");
exit(1);
}
msp->sp[nc].ys =tmpd;
if(fscanf(fp,"%lf",&tmpd)!=1){
printf("emf_mie_ms.c, read_data_ms(), failed to read the zs. exit...\n");
exit(1);
}
msp->sp[nc].zs =tmpd;
if(fscanf(fp,"%d",&tmpi)!=1){
printf("emf_mie_ms.c, read_data_ms(), failed to read the bsn. exit...\n");
exit(1);
}
msp->sp[nc].bsn =tmpi;
if(fscanf(fp,"%d",&tmpi)!=1){
printf("emf_mie_ms.c, read_data_ms(), failed to read the bdv. exit...\n");
exit(1);
}
msp->sp[nc].bdv =tmpi;
if(fscanf(fp,"%d",&tmpi)!=1){
printf("emf_mie_ms.c, read_data_ms(), failed to read the l_limit. exit...\n");
exit(1);
}
msp->sp[nc].l_limit=tmpi;
}
fclose(fp);
// multi fbeam
init_mfb(&(msp->bm)); // initialize
read_data_mfb(&(msp->bm)); // search and read beam datafile
}
void print_data_ms(MSPD *msp)
{
int nc;
print_data_mfb(&(msp->bm)); // print beam data
// print sphere data
printf("---- sphere data ( %s ) ----\n",fn_sphr);
printf("number of spheres : %16d\n",msp->n_sphr);
for(nc=0;nc<msp->n_sphr;nc++){
printf(" Sphere ID %d\n",nc);
printf("radius of sphere : %16.15g\n",msp->sp[nc].a);
printf("refractive index of sphere : %7.6g+%7.6gi\n",creal(msp->sp[nc].ns),cimag(msp->sp[nc].ns));
printf("x-coordinate of sphere center : %16.15g\n",msp->sp[nc].xs);
printf("y-coordinate of sphere center : %16.15g\n",msp->sp[nc].ys);
printf("z-coordinate of sphere center : %16.15g\n",msp->sp[nc].zs);
printf("basic sampling number on sphere surface : %16d\n",msp->sp[nc].bsn);
printf("division number for sphere surface (per PI): %16d\n",msp->sp[nc].bdv);
printf("limit of order number l : %16d\n",msp->sp[nc].l_limit);
}
printf("\n");
//printf("continue? (y/n) : "); if(getchar()!='y'){ printf("Exit\n"); exit(0);}
}
void print_data_ms_mksa(MSPD *msp)
{
int nc;
print_data_mfb_mksa(&(msp->bm)); // print beam data
// print sphere data
printf("---- sphere data ( %s ), MKSA system ----\n",fn_sphr);
printf("number of spheres : %16d\n",msp->n_sphr);
for(nc=0;nc<msp->n_sphr;nc++){
printf(" Sphere ID %d\n",nc);
printf("radius of sphere [m]: %16.15g\n",OSUtoMKSA_length(msp->sp[nc].a));
printf("refractive index of sphere : %7.6g+%7.6gi\n",creal(msp->sp[nc].ns),cimag(msp->sp[nc].ns));
printf("x-coordinate of sphere center [m]: %16.15g\n",OSUtoMKSA_length(msp->sp[nc].xs));
printf("y-coordinate of sphere center [m]: %16.15g\n",OSUtoMKSA_length(msp->sp[nc].ys));
printf("z-coordinate of sphere center [m]: %16.15g\n",OSUtoMKSA_length(msp->sp[nc].zs));
printf("basic sampling number on sphere surface : %16d\n",msp->sp[nc].bsn);
printf("division number for sphere surface (per PI): %16d\n",msp->sp[nc].bdv);
printf("limit of order number l : %16d\n",msp->sp[nc].l_limit);
}
printf("\n");
//printf("continue? (y/n) : "); if(getchar()!='y'){ printf("Exit\n"); exit(0);}
}
void setup_ms(MSPD *msp)
{
void check_position(MSPD *msp);
void setup_sp(SPD *sp);
void setup_coefficient_dispd(SPD *sp,Bobj *bm);
void initialize_eh_r(SPD *sp,Bobj *bm);
void coefficient(SPD *sp);
void check_l_limit_ms(MSPD *msp);
int i;
check_position(msp);
// multi_fbeam
setup_mfb(&(msp->bm));
// spheres
for(i=0;i<msp->n_sphr;i++){
setup_sp(&(msp->sp[i]));
setup_coefficient_dispd(&(msp->sp[i]),&(msp->bm));
initialize_eh_r(&(msp->sp[i]),&(msp->bm));
coefficient(&(msp->sp[i]));
}
check_l_limit_ms(msp);
}
void free_ms(MSPD *msp)
{
void free_sp(SPD *sp);
int i;
// spheres
for(i=0;i<msp->n_sphr;i++){
free_sp(&(msp->sp[i]));
}
free(msp->sp); msp->n_sphr=0;
// multi_fbeam
free_mfb(&(msp->bm));
}
void iterative_ops_ms(MSPD *msp)
{
void field_s_ehr(int src,int obj,MSPD *msp);
void coefficient(SPD *sp);
int i,j,t,nn,sbc,num=msp->n_sphr;
double vf[3],f1,*f0;
int *bc;
if(num<2) return;
bc=(int *)m_alloc2(num,sizeof(int),"iterative_ops_ms(),*mc");
f0=(double *)m_alloc2(num,sizeof(double),"iterative_ops_ms(),*f0");
for(t=0;t<num;t++){
force_ms(t,vf,msp);
f0[t]=vf[0]*vf[0]+vf[1]*vf[1]+vf[2]*vf[2];
bc[t]=ito_breakcount;
}
printf("iterative operation start (convergence criterion : cv < %g)\n",ito_eps);
for(nn=0;nn<ito_max;nn++){
for(i=0;i<num;i++)
for(j=0;j<num;j++) if(i!=j) field_s_ehr(i,j,msp);
for(i=0;i<num;i++) coefficient(&(msp->sp[i]));
printf("%3d, cv : ",nn);
for(t=0;t<num;t++){
force_ms(t,vf,msp);
f1=vf[0]*vf[0]+vf[1]*vf[1]+vf[2]*vf[2];
if(fabs(f1/f0[t]-1.0)<ito_eps) bc[t]--;
printf("%g\t",fabs(f1/f0[t]-1.0));
f0[t]=f1;
}
printf("\n");
sbc=0;
for(t=0;t<num;t++) if(bc[t]<=0) sbc++;
if(sbc==num) break;
}
if(nn==ito_max){
printf("The maximum number of iterations has been reached (The result has not converged).\n");
}
free(bc); free(f0);
}
void output_node_particles(char *fname,MSPD *msp)
{
FILE *fp;
double a,st,ct,sp,cp,x,y,z;
int s1,oid,i,j;
char *sd,fo[256]={},tf[200]={};
s1=strlen(fname);
if(s1>200){
printf("emf_mie_ms.c, output_node_particles(), file name is too long. exit...\n");
exit(1);
}
sprintf(fo,"%s",fname);
sd=strrchr(fo,'.');
if(sd!=NULL){
strncpy(tf,fname,s1-strlen(sd));
sprintf(fo,"%s.particles",tf);
}
if((fp=fopen(fo,"wt"))==NULL){ printf("Can not open the %s file.\n",fo); exit(1); }
fprintf(fp,"# x y z object_id\n");
for(oid=0;oid<msp->n_sphr;oid++){
a=msp->sp[oid].a;
for(i=0;i<msp->sp[oid].ddt.nt;i++){
st=sin(msp->sp[oid].ddt.xt[i]);
ct=cos(msp->sp[oid].ddt.xt[i]);
for(j=0;j<msp->sp[oid].ddt.np;j++){
sp=sin(msp->sp[oid].ddt.xp[j]);
cp=cos(msp->sp[oid].ddt.xp[j]);
x=a*st*cp+msp->sp[oid].xs;
y=a*st*sp+msp->sp[oid].ys;
z=a*ct +msp->sp[oid].zs;
fprintf(fp,"%15.14e %15.14e %15.14e %d\n",x,y,z,oid);
}
}
}
fclose(fp);
}
///////////////////////////////////////////////////////////////////////
void check_position(MSPD *msp)
{
double r,rs;
int i,j;
for(i=0;i<msp->n_sphr;i++){
for(j=i+1;j<msp->n_sphr;j++){
r =msp->sp[i].a+msp->sp[j].a;
rs=sqrt(pow(msp->sp[j].xs-msp->sp[i].xs,2)+pow(msp->sp[j].ys-msp->sp[i].ys,2)+pow(msp->sp[j].zs-msp->sp[i].zs,2));
if(rs<r){
printf("Sphere Position Check Error! sphere_id[%d] and sphere_id[%d] is overlaped. Exit...\n",i,j);
exit(1);
}
}
}
}
void setup_sp(SPD *sp)
{
void gauleg_dv(double a,double b,double *x,double *w,int bn,int dv);
int mn;
int nt= sp->bsn*sp->bdv;
int np=2*sp->bsn*sp->bdv;
sp->ddt.nt=nt;
sp->ddt.np=np;
sp->ddt.xp=(double *)m_alloc2(np,sizeof(double),"setup_sp(),sp->ddt.xp");
sp->ddt.wp=(double *)m_alloc2(np,sizeof(double),"setup_sp(),sp->ddt.wp");
sp->ddt.xt=(double *)m_alloc2(nt,sizeof(double),"setup_sp(),sp->ddt.xt");
sp->ddt.wt=(double *)m_alloc2(nt,sizeof(double),"setup_sp(),sp->ddt.wt");
gauleg_dv(0.0, M_PI,sp->ddt.xt,sp->ddt.wt,sp->bsn, sp->bdv);
gauleg_dv(0.0,2.0*M_PI,sp->ddt.xp,sp->ddt.wp,sp->bsn,2*sp->bdv);
sp->ddt.eri=(double complex *)m_alloc2(np*nt,sizeof(double complex),"setup_sp(),sp->ddt.eri");
sp->ddt.hri=(double complex *)m_alloc2(np*nt,sizeof(double complex),"setup_sp(),sp->ddt.hri");
sp->ddt.ers=(double complex *)m_alloc2(np*nt,sizeof(double complex),"setup_sp(),sp->ddt.ers");
sp->ddt.hrs=(double complex *)m_alloc2(np*nt,sizeof(double complex),"setup_sp(),sp->ddt.hrs");
mn=sp->l_limit;
sp->ddt.cab=(double *)m_alloc2(mn+1,sizeof(double),"setup_sp(),sp->ddt.cab");
sp->ddt.ca=(double complex *)m_alloc2(mn+1,sizeof(double complex),"setup_sp(),sp->ddt.ca");
sp->ddt.cb=(double complex *)m_alloc2(mn+1,sizeof(double complex),"setup_sp(),sp->ddt.cb");
sp->ddt.cc=(double complex *)m_alloc2(mn+1,sizeof(double complex),"setup_sp(),sp->ddt.cc");
sp->ddt.cd=(double complex *)m_alloc2(mn+1,sizeof(double complex),"setup_sp(),sp->ddt.cd");
sp->ddt.Alm=(double complex *)m_alloc2(mn*(mn+2),sizeof(double complex),"setup_sp(),sp->ddt.Alm");
sp->ddt.Blm=(double complex *)m_alloc2(mn*(mn+2),sizeof(double complex),"setup_sp(),sp->ddt.Blm");
}
void gauleg_dv(double a,double b,double *x,double *w,int bn,int dv)
{
double xt[bn],wt[bn];
gauleg(-1.0, 1.0,xt,wt,bn);
double h,dh,x0,x1,cx,cc;
int d,i,j;
h=b-a;
dh=h/(double)dv;
x1=a;
j=0;
for(d=0;d<dv;d++){
x0=x1;
x1=x0+dh;
cx=0.5*(x1-x0);
cc=0.5*(x1+x0);
for(i=0;i<bn;i++){
x[j]= cx*xt[i]+cc;
w[j]= cx*wt[i];
j++;
}
}
}
void free_sp(SPD *sp)
{
free(sp->ddt.xp); free(sp->ddt.wp);
free(sp->ddt.xt); free(sp->ddt.wt);
free(sp->ddt.eri); free(sp->ddt.hri);
free(sp->ddt.ers); free(sp->ddt.hrs);
free(sp->ddt.cab);
free(sp->ddt.ca ); free(sp->ddt.cb );
free(sp->ddt.cc ); free(sp->ddt.cd );
free(sp->ddt.Alm); free(sp->ddt.Blm);
}
void setup_coefficient_dispd(SPD *sp,Bobj *bm)
{
double complex z,nr,b_ac,b_bd,b_t;
double x,a2;
int mn,nn,i;
double complex *xi,*dxi;
double complex *psic,*dpsic;
mn=sp->l_limit;
x=2.0*M_PI*bm->n_0*sp->a/bm->lambda_0;
xi =(double complex *)m_alloc2(mn+1,sizeof(double complex),"setup_coefficient_dispd(),xi");
dxi=(double complex *)m_alloc2(mn+1,sizeof(double complex),"setup_coefficient_dispd(),dxi");
rcth1d(mn,x,&nn,xi,dxi);
sp->ddt.l_max=nn;
z=2.0*M_PI*sp->ns*sp->a/bm->lambda_0;
psic =(double complex *)m_alloc2(mn+1,sizeof(double complex),"setup_coefficient_dispd(),psic");
dpsic=(double complex *)m_alloc2(mn+1,sizeof(double complex),"setpp_coefficient?dispd(),dpsic");
rctjc(mn,z,&nn,psic,dpsic);
if(nn<sp->ddt.l_max) sp->ddt.l_max=nn;
a2=pow(sp->a,2);
for(i=1;i<=mn;i++){
sp->ddt.cab[i]=a2/((double)(i*(i+1))*creal(xi[i]));
}
nr=sp->ns/bm->n_0;
for(i=1;i<=mn;i++){
b_ac=1.0/(nr*psic[i]*dxi[i]-dpsic[i]*xi[i]);
b_bd=1.0/(psic[i]*dxi[i]-nr*dpsic[i]*xi[i]);
b_t =dxi[i]*creal(xi[i])-xi[i]*creal(dxi[i]);
sp->ddt.ca[i]=(dpsic[i]*creal(xi[i])-nr*psic[i]*creal(dxi[i]))*b_ac;
sp->ddt.cb[i]=(nr*dpsic[i]*creal(xi[i])-psic[i]*creal(dxi[i]))*b_bd;
sp->ddt.cc[i]=b_t*b_ac/nr;
sp->ddt.cd[i]=b_t*b_bd;
}
free(xi); free(dxi);
free(psic); free(dpsic);
}
void initialize_eh_r(SPD *sp,Bobj *bm)
{
double complex e[3],h[3];
double r,theta,phi,x[3],sin_t,cos_t,sin_p,cos_p;
int i,j,nt,np;
nt=sp->ddt.nt;
np=sp->ddt.np;
r=sp->a;
for(i=0;i<nt;i++){
theta=sp->ddt.xt[i];
sin_t=sin(theta); cos_t=cos(theta);
#pragma omp parallel for schedule(dynamic) private(phi,sin_p,cos_p,x,e,h) // OpenMP parallel for
for(j=0;j<np;j++){
phi=sp->ddt.xp[j];
sin_p=sin(phi); cos_p=cos(phi);
x[0]=r*sin_t*cos_p+sp->xs;
x[1]=r*sin_t*sin_p+sp->ys;
x[2]=r*cos_t +sp->zs;
calc_mfb_EH(e,h,x,bm);
sp->ddt.eri[i*np+j]=e[0]*sin_t*cos_p+e[1]*sin_t*sin_p+e[2]*cos_t;
sp->ddt.hri[i*np+j]=h[0]*sin_t*cos_p+h[1]*sin_t*sin_p+h[2]*cos_t;
}
}
}
void coefficient(SPD *sp)
{
int ti,lm,np,nt,tt,l,m,i,j,t;
size_t ms,lmax;
lmax=(size_t)sp->l_limit;
ms=gsl_sf_legendre_array_n(lmax);
lm=(int)lmax;
np=sp->ddt.np;
nt=sp->ddt.nt;
for(ti=0;ti<lm*(lm+2);ti++){
sp->ddt.Alm[ti]=0.0; sp->ddt.Blm[ti]=0.0;
}
#pragma omp parallel private(i,j,l,m,t,tt,ti) // pragma parallel
{
double complex Yp,Ym;
double theta,sin_t,cos_t,flag;
double *sphPlm=(double *)m_alloc2(ms,sizeof(double),"coefficient(),*sphPlm");
double complex *e_phim=(double complex *)m_alloc2(np,sizeof(double complex),"coefficient(),*e_phim");
double complex *tmpAlm=(double complex *)m_alloc2(lm*(lm+2),sizeof(double complex),"coefficient(),*tmpAlm");
double complex *tmpBlm=(double complex *)m_alloc2(lm*(lm+2),sizeof(double complex),"coefficient(),*tmpBlm");
#pragma omp for schedule(dynamic) // parallel for
for(i=0;i<nt;i++){
theta=sp->ddt.xt[i];
sin_t=sin(theta); cos_t=cos(theta);
tt=0;
m=0;
gsl_sf_legendre_array_e(GSL_SF_LEGENDRE_SPHARM,lmax,cos_t,-1,sphPlm);
for(l=1;l<=lm;l++){
Yp=sphPlm[gsl_sf_legendre_array_index(l,m)];
for(j=0;j<np;j++){
tmpAlm[tt]+=(sp->ddt.eri[i*np+j]+sp->ddt.ers[i*np+j])*conj(Yp)*sp->ddt.wp[j];
tmpBlm[tt]+=(sp->ddt.hri[i*np+j]+sp->ddt.hrs[i*np+j])*conj(Yp)*sp->ddt.wp[j];
}
tt++;
}
for(m=1;m<=lm;m++){
for(t=0;t<np;t++) e_phim[t]=cos((double)m*sp->ddt.xp[t])+I*sin((double)m*sp->ddt.xp[t]);
if(m%2==0) flag=1.0;
else flag=-1.0;
for(l=m;l<=lm;l++){
for(j=0;j<np;j++){
Yp=sphPlm[gsl_sf_legendre_array_index(l,m)]*e_phim[j];
Ym=flag*conj(Yp);
tmpAlm[tt ]+=(sp->ddt.eri[i*np+j]+sp->ddt.ers[i*np+j])*conj(Yp)*sp->ddt.wp[j];
tmpBlm[tt ]+=(sp->ddt.hri[i*np+j]+sp->ddt.hrs[i*np+j])*conj(Yp)*sp->ddt.wp[j];
tmpAlm[tt+1]+=(sp->ddt.eri[i*np+j]+sp->ddt.ers[i*np+j])*conj(Ym)*sp->ddt.wp[j];
tmpBlm[tt+1]+=(sp->ddt.hri[i*np+j]+sp->ddt.hrs[i*np+j])*conj(Ym)*sp->ddt.wp[j];
}
tt+=2;
}
}
for(ti=0;ti<lm*(lm+2);ti++){
#pragma omp critical // pragma omp critical
sp->ddt.Alm[ti]+=tmpAlm[ti]*sin_t*sp->ddt.wt[i];
#pragma omp critical // pragma omp critical
sp->ddt.Blm[ti]+=tmpBlm[ti]*sin_t*sp->ddt.wt[i];
tmpAlm[ti]=0.0; tmpBlm[ti]=0.0;
}
}
free(sphPlm);
free(e_phim);
free(tmpAlm); free(tmpBlm);
} // pragma parallel end
tt=0; m=0;
for(l=1;l<=lm;l++){
sp->ddt.Alm[tt]*=sp->ddt.cab[l];
sp->ddt.Blm[tt]*=sp->ddt.cab[l];
tt++;
}
for(m=1;m<=lm;m++){
for(l=m;l<=lm;l++){
sp->ddt.Alm[tt ]*=sp->ddt.cab[l]; sp->ddt.Alm[tt+1]*=sp->ddt.cab[l];
sp->ddt.Blm[tt ]*=sp->ddt.cab[l]; sp->ddt.Blm[tt+1]*=sp->ddt.cab[l];
tt+=2;
}
}
for(i=0;i<nt;i++){
for(j=0;j<np;j++){
sp->ddt.ers[i*np+j]=0.0;
sp->ddt.hrs[i*np+j]=0.0;
}
}
}
void check_l_limit_ms(MSPD *msp)
{
int i;
for(i=0;i<msp->n_sphr;i++){
if(msp->sp[i].l_limit>msp->sp[i].ddt.l_max){
printf("Overflow and underflow problem of Riccati-Bessel function occurred sphere id %d. Check the data precision.\n",i);
printf("Available order number is less than %d.\n",msp->sp[i].ddt.l_max);
}
}
}
void field_s_ehr(int src,int obj,MSPD *msp)
{
void scattered_EH(double complex *e,double complex *h,double *xb,SPD *sp,Bobj *bm);
double complex es[3],hs[3];
double cos_t,sin_t,cos_p,sin_p;
double r[3];
double a=msp->sp[obj].a;
int np=msp->sp[obj].ddt.np;
int nt=msp->sp[obj].ddt.nt;
int i,j;
#pragma omp parallel for schedule(dynamic) private(j,cos_t,sin_t,cos_p,sin_p,r,es,hs)
for(i=0;i<nt;i++){
cos_t=cos(msp->sp[obj].ddt.xt[i]); sin_t=sin(msp->sp[obj].ddt.xt[i]);
for(j=0;j<np;j++){
cos_p=cos(msp->sp[obj].ddt.xp[j]); sin_p=sin(msp->sp[obj].ddt.xp[j]);
r[0]=a*sin_t*cos_p+msp->sp[obj].xs;
r[1]=a*sin_t*sin_p+msp->sp[obj].ys;
r[2]=a*cos_t +msp->sp[obj].zs;
scattered_EH(es,hs,r,&(msp->sp[src]),&(msp->bm));
msp->sp[obj].ddt.ers[i*np+j]+=es[0]*sin_t*cos_p+es[1]*sin_t*sin_p+es[2]*cos_t;
msp->sp[obj].ddt.hrs[i*np+j]+=hs[0]*sin_t*cos_p+hs[1]*sin_t*sin_p+hs[2]*cos_t;
}
}
}
void scattered_EH(double complex *e,double complex *h,double *xb,SPD *sp,Bobj *bm)
{
double complex er,et,ep,hr,ht,hp,Yp,Ym,dYp,dYm,dep,expi;
double r,rxy,r2,cos_t,sin_t,cos_p,sin_p,ker,ke,flag,i_ne,ne,x,y,z,i_sin_t,djl,dyl;
int l,m,tt,lm,ai;
size_t ms,lmax=(size_t)sp->l_limit;
ms=gsl_sf_legendre_array_n(lmax);
lm=(int)lmax;
double *sphPlm =(double *)m_alloc2(ms,sizeof(double),"scattered_EH(),*sphPlm");
double *dsphPlm=(double *)m_alloc2(ms,sizeof(double),"scattered_EH(),*dsphPlm");
double complex *xi =(double complex *)m_alloc2(lm+1,sizeof(double complex),"scattered_EH(),*xi");
double complex *dxi=(double complex *)m_alloc2(lm+1,sizeof(double complex),"scattered_EH(),*dxi");
double *jl=(double *)m_alloc2(lm+2,sizeof(double),"scattered_EH(),*jl");
double *yl=(double *)m_alloc2(lm+2,sizeof(double),"scattered_EH(),*yl");
x=xb[0]-sp->xs; y=xb[1]-sp->ys; z=xb[2]-sp->zs;
r2=x*x+y*y+z*z; r=sqrt(r2);
rxy=sqrt(x*x+y*y);
if(rxy==0.0){ // x==0,y==0
x=z*0.7e-7;
y=z*0.7e-7;
r2=x*x+y*y+z*z; r=sqrt(r2);
rxy=sqrt(x*x+y*y);
}
cos_t=z/r; sin_t=rxy/r; i_sin_t=r/rxy;
cos_p=x/rxy; sin_p=y/rxy;
ke =2.0*M_PI*bm->n_0/bm->lambda_0;
ker=ke*r;
ne=bm->n_0;
i_ne=1.0/(bm->n_0);
gsl_sf_bessel_jl_array(lm+1,ker,jl);
gsl_sf_bessel_yl_array(lm+1,ker,yl);
for(l=1;l<=lm;l++){
djl=jl[l-1]-(double)(l+1)/ker*jl[l];
dyl=yl[l-1]-(double)(l+1)/ker*yl[l];
xi[l]=(jl[l]+yl[l]*I)*ker;
dxi[l]=jl[l]+ker*djl+(yl[l]+ker*dyl)*I;
}
dep=cos_p+I*sin_p; expi=1.0;
er=0.0; et=0.0; ep=0.0;
hr=0.0; ht=0.0; hp=0.0;
tt=0; m=0;
gsl_sf_legendre_deriv_alt_array_e(GSL_SF_LEGENDRE_SPHARM,lmax,cos_t,-1,sphPlm,dsphPlm);
for(l=1;l<=lm;l++){
ai=gsl_sf_legendre_array_index(l,m);
Yp = sphPlm[ai];
dYp=dsphPlm[ai];
er+=(double)(l*(l+1))*sp->ddt.ca[l]*sp->ddt.Alm[tt]*xi[l]*Yp;
hr+=(double)(l*(l+1))*sp->ddt.cb[l]*sp->ddt.Blm[tt]*xi[l]*Yp;
et+=sp->ddt.ca[l]*sp->ddt.Alm[tt]*dxi[l]*dYp-(double)m*i_ne*sp->ddt.cb[l]*sp->ddt.Blm[tt]*xi[l]*Yp*i_sin_t;
ht+=sp->ddt.cb[l]*sp->ddt.Blm[tt]*dxi[l]*dYp+(double)m* ne*sp->ddt.ca[l]*sp->ddt.Alm[tt]*xi[l]*Yp*i_sin_t;
ep+=(double)m*sp->ddt.ca[l]*sp->ddt.Alm[tt]*dxi[l]*Yp*i_sin_t-i_ne*sp->ddt.cb[l]*sp->ddt.Blm[tt]*xi[l]*dYp;
hp+=(double)m*sp->ddt.cb[l]*sp->ddt.Blm[tt]*dxi[l]*Yp*i_sin_t+ ne*sp->ddt.ca[l]*sp->ddt.Alm[tt]*xi[l]*dYp;
tt++;
}
for(m=1;m<=lm;m++){
expi*=dep;
if(m%2==0)flag= 1.0;
else flag=-1.0;
for(l=m;l<=lm;l++){
ai=gsl_sf_legendre_array_index(l,m);
Yp = sphPlm[ai]*expi;
dYp=dsphPlm[ai]*expi;
Ym =flag*conj( Yp);
dYm=flag*conj(dYp);
er+=(double)(l*(l+1))*sp->ddt.ca[l]*xi[l]*(sp->ddt.Alm[tt ]*Yp+sp->ddt.Alm[tt+1]*Ym);
et+=dxi[l]*sp->ddt.ca[l]*(sp->ddt.Alm[tt ]*dYp+sp->ddt.Alm[tt+1]*dYm)
-(double)m*i_ne*sp->ddt.cb[l]*xi[l]*i_sin_t*(sp->ddt.Blm[tt ]*Yp-sp->ddt.Blm[tt+1]*Ym);
ep+=(double)m*sp->ddt.ca[l]*dxi[l]*i_sin_t*(sp->ddt.Alm[tt ]*Yp-sp->ddt.Alm[tt+1]*Ym)
-i_ne*sp->ddt.cb[l]*xi[l]*(sp->ddt.Blm[tt ]*dYp+sp->ddt.Blm[tt+1]*dYm);
hr+=(double)(l*(l+1))*sp->ddt.cb[l]*xi[l]*(sp->ddt.Blm[tt ]*Yp+sp->ddt.Blm[tt+1]*Ym);
ht+=sp->ddt.cb[l]*dxi[l]*(sp->ddt.Blm[tt ]*dYp+sp->ddt.Blm[tt+1]*dYm)
+(double)m*ne*sp->ddt.ca[l]*xi[l]*i_sin_t*(sp->ddt.Alm[tt ]*Yp-sp->ddt.Alm[tt+1]*Ym);
hp+=(double)m*sp->ddt.cb[l]*dxi[l]*i_sin_t*(sp->ddt.Blm[tt ]*Yp-sp->ddt.Blm[tt+1]*Ym)
+ne*sp->ddt.ca[l]*xi[l]*(sp->ddt.Alm[tt ]*dYp+sp->ddt.Alm[tt+1]*dYm);
tt+=2;
}
}
er/=r2;
et*=ke/r;
ep*=I*ke/r;
hr/=r2;
ht*=ke/r;
hp*=I*ke/r;
e[0]=er*sin_t*cos_p+et*cos_t*cos_p-ep*sin_p;
e[1]=er*sin_t*sin_p+et*cos_t*sin_p+ep*cos_p;
e[2]=er*cos_t-et*sin_t;
h[0]=hr*sin_t*cos_p+ht*cos_t*cos_p-hp*sin_p;
h[1]=hr*sin_t*sin_p+ht*cos_t*sin_p+hp*cos_p;
h[2]=hr*cos_t-ht*sin_t;
free(sphPlm); free(dsphPlm);
free(xi); free(dxi);
free(jl); free(yl);
}
void internal_EH(double complex *e,double complex *h,double *xb,SPD *sp,Bobj *bm)
{
void internal_EH_r0(double complex *e,double complex *h,SPD *sp,Bobj *bm);
double complex er,et,ep,hr,ht,hp,Yp,Ym,dYp,dYm,dep,expi,ke,ker,i_ne,ne;
double r,rxy,r2,cos_t,sin_t,cos_p,sin_p,flag,x,y,z,i_sin_t;
int l,m,tt,nn,lm,ai;
size_t ms,lmax=(size_t)sp->ddt.l_max;
ms=gsl_sf_legendre_array_n(lmax);
lm=(int)lmax;
double *sphPlm =(double *)m_alloc2(ms,sizeof(double),"internal_EH(),*sphPlm");
double *dsphPlm=(double *)m_alloc2(ms,sizeof(double),"internal_EH(),*dsphPlm");
double complex *psi =(double complex *)m_alloc2(lm+1,sizeof(double complex),"internal_EH(),*psi");
double complex *dpsi=(double complex *)m_alloc2(lm+1,sizeof(double complex),"internal_EH(),*dpsi");
x=xb[0]-sp->xs; y=xb[1]-sp->ys; z=xb[2]-sp->zs;
r2=x*x+y*y+z*z; r=sqrt(r2);
rxy=sqrt(x*x+y*y);
if(rxy==0.0){ // x==0,y==0
if(z==0.0){
internal_EH_r0(e,h,sp,bm);
return;
}
x=z*0.7e-7;
y=z*0.7e-7;
r2=x*x+y*y+z*z; r=sqrt(r2);
rxy=sqrt(x*x+y*y);
}
cos_t=z/r; sin_t=rxy/r; i_sin_t=r/rxy;
cos_p=x/rxy; sin_p=y/rxy;
ke =2.0*M_PI*sp->ns/bm->lambda_0;
ker=ke*r;
ne=sp->ns;
i_ne=1.0/(sp->ns);
rctjc(lm,ker,&nn,psi,dpsi);
if(nn<sp->ddt.l_max) sp->ddt.l_max=nn;
dep=cos_p+I*sin_p; expi=1.0;
er=0.0; et=0.0; ep=0.0;
hr=0.0; ht=0.0; hp=0.0;
tt=0; m=0;
gsl_sf_legendre_deriv_alt_array_e(GSL_SF_LEGENDRE_SPHARM,lmax,cos_t,-1,sphPlm,dsphPlm);
for(l=1;l<=lm;l++){
ai=gsl_sf_legendre_array_index(l,m);
Yp = sphPlm[ai];
dYp=dsphPlm[ai];
er+=(double)(l*(l+1))*sp->ddt.cc[l]*sp->ddt.Alm[tt]*psi[l]*Yp;
et+=sp->ddt.cc[l]*sp->ddt.Alm[tt]*dpsi[l]*dYp-(double)m*i_ne*sp->ddt.cd[l]*sp->ddt.Blm[tt]*psi[l]*Yp*i_sin_t;
ep+=(double)m*sp->ddt.cc[l]*sp->ddt.Alm[tt]*dpsi[l]*Yp*i_sin_t-i_ne*sp->ddt.cd[l]*sp->ddt.Blm[tt]*psi[l]*dYp;
hr+=(double)(l*(l+1))*sp->ddt.cd[l]*sp->ddt.Blm[tt]*psi[l]*Yp;
ht+=sp->ddt.cd[l]*sp->ddt.Blm[tt]*dpsi[l]*dYp+(double)m*ne*sp->ddt.cc[l]*sp->ddt.Alm[tt]*psi[l]*Yp*i_sin_t;
hp+=(double)m*sp->ddt.cd[l]*sp->ddt.Blm[tt]*dpsi[l]*Yp*i_sin_t+ne*sp->ddt.cc[l]*sp->ddt.Alm[tt]*psi[l]*dYp;
tt++;
}
for(m=1;m<=lm;m++){
expi*=dep;
if(m%2==0)flag= 1.0;
else flag=-1.0;
for(l=m;l<=lm;l++){
ai=gsl_sf_legendre_array_index(l,m);
Yp = sphPlm[ai]*expi;
dYp=dsphPlm[ai]*expi;
Ym =flag*conj( Yp);
dYm=flag*conj(dYp);
er+=(double)(l*(l+1))*sp->ddt.cc[l]*psi[l]*(sp->ddt.Alm[tt ]*Yp +sp->ddt.Alm[tt+1]*Ym);
et+=sp->ddt.cc[l]*dpsi[l]*(sp->ddt.Alm[tt ]*dYp +sp->ddt.Alm[tt+1]*dYm)
-(double)m*i_ne*sp->ddt.cd[l]*psi[l]*i_sin_t*(sp->ddt.Blm[tt ]*Yp-sp->ddt.Blm[tt+1]*Ym);
ep+=(double)m*sp->ddt.cc[l]*dpsi[l]*i_sin_t*(sp->ddt.Alm[tt ]*Yp-sp->ddt.Alm[tt+1]*Ym)
-i_ne*sp->ddt.cd[l]*psi[l]*(sp->ddt.Blm[tt ]*dYp+sp->ddt.Blm[tt+1]*dYm);
hr+=(double)(l*(l+1))*sp->ddt.cd[l]*psi[l]*(sp->ddt.Blm[tt ]*Yp+sp->ddt.Blm[tt+1]*Ym);
ht+=sp->ddt.cd[l]*dpsi[l]*(sp->ddt.Blm[tt ]*dYp+sp->ddt.Blm[tt+1]*dYm)
+(double)m*ne*sp->ddt.cc[l]*psi[l]*i_sin_t*(sp->ddt.Alm[tt ]*Yp-sp->ddt.Alm[tt+1]*Ym);
hp+=(double)m*sp->ddt.cd[l]*dpsi[l]*i_sin_t*(sp->ddt.Blm[tt ]*Yp-sp->ddt.Blm[tt+1]*Ym)
+ne*sp->ddt.cc[l]*psi[l]*(sp->ddt.Alm[tt ]*dYp+sp->ddt.Alm[tt+1]*dYm);
tt+=2;
}
}
er/=r2;
et*=ke/r;
ep*=I*ke/r;
hr/=r2;
ht*=ke/r;
hp*=I*ke/r;
e[0]=er*sin_t*cos_p+et*cos_t*cos_p-ep*sin_p;
e[1]=er*sin_t*sin_p+et*cos_t*sin_p+ep*cos_p;
e[2]=er*cos_t-et*sin_t;
h[0]=hr*sin_t*cos_p+ht*cos_t*cos_p-hp*sin_p;
h[1]=hr*sin_t*sin_p+ht*cos_t*sin_p+hp*cos_p;
h[2]=hr*cos_t-ht*sin_t;
free(sphPlm); free(dsphPlm);
free(psi); free(dpsi);
}
// verification function for sphere center field
void internal_EH_r0(double complex *e,double complex *h,SPD *sp,Bobj *bm)
{
double complex pickup_Alm(int l,int m,SPD *sp);
double complex pickup_Blm(int l,int m,SPD *sp);
double complex ke,er,et,ep,hr,ht,hp,c1m1,c1p1,c10,d1m1,d1p1,d10,Y10,Y1p1,Y1m1,dY10,dY1p1,dY1m1;
double cos_t,sin_t,i_sin_t,cos_p,sin_p;
ke =2.0*M_PI*sp->ns/bm->lambda_0;
// assume r=(x,0,0) then x to 0
cos_t=0.0; sin_t=1.0; i_sin_t=1.0;
cos_p=1.0; sin_p=0.0;
c10 =sp->ddt.cc[1]*pickup_Alm(1,0,sp);
c1p1=sp->ddt.cc[1]*pickup_Alm(1,1,sp);
c1m1=sp->ddt.cc[1]*pickup_Alm(1,-1,sp);
d10 =sp->ddt.cd[1]*pickup_Blm(1,0,sp);
d1p1=sp->ddt.cd[1]*pickup_Blm(1,1,sp);
d1m1=sp->ddt.cd[1]*pickup_Blm(1,-1,sp);
Y10 = 0.5*sqrt(3.0/M_PI)*cos_t;
Y1p1=-0.5*sqrt(3.0/(2.0*M_PI))*sin_t*(cos_p+I*sin_p);
Y1m1= 0.5*sqrt(3.0/(2.0*M_PI))*sin_t/(cos_p+I*sin_p);
dY10 =-0.5*sqrt(3.0/M_PI)*sin_t;
dY1p1=-0.5*sqrt(3.0/(2.0*M_PI))*cos_t*(cos_p+I*sin_p);
dY1m1= 0.5*sqrt(3.0/(2.0*M_PI))*cos_t/(cos_p+I*sin_p);
er=2.0*ke*ke/(3.0)*(c10*Y10 +c1p1*Y1p1 +c1m1*Y1m1);
et=2.0*ke*ke/(3.0)*(c10*dY10+c1p1*dY1p1+c1m1*dY1m1);
ep=2.0*I*ke*ke*i_sin_t/(3.0)*(c1p1*Y1p1-c1m1*Y1m1);
hr=2.0*ke*ke/(3.0)*(d10*Y10 +d1p1*Y1p1 +d1m1*Y1m1);
ht=2.0*ke*ke/(3.0)*(d10*dY10+d1p1*dY1p1+d1m1*dY1m1);
hp=2.0*I*ke*ke*i_sin_t/(3.0)*(d1p1*Y1p1-d1m1*Y1m1);
e[0]=er*sin_t*cos_p+et*cos_t*cos_p-ep*sin_p;
e[1]=er*sin_t*sin_p+et*cos_t*sin_p+ep*cos_p;
e[2]=er*cos_t-et*sin_t;
h[0]=hr*sin_t*cos_p+ht*cos_t*cos_p-hp*sin_p;
h[1]=hr*sin_t*sin_p+ht*cos_t*sin_p+hp*cos_p;
h[2]=hr*cos_t-ht*sin_t;
}
double complex pickup_Alm(int l,int m,SPD *sp)
{
int lm=sp->l_limit;
int am=abs(m);
int ll;
if(l<am) return 0.0;
else {
if(m==0)
return sp->ddt.Alm[l-1];
else {
ll=2*lm*am-am*am+am-2-lm+2*l;
if (m>0) return sp->ddt.Alm[ll ];
else return sp->ddt.Alm[ll+1];
}
}
}
double complex pickup_Blm(int l,int m,SPD *sp)
{
int lm=sp->l_limit;
int am=abs(m);
int ll;
if(l<am) return 0.0;
else {
if(m==0)
return sp->ddt.Blm[l-1];
else {
ll=2*lm*am-am*am+am-2-lm+2*l;
if (m>0) return sp->ddt.Blm[ll ];
else return sp->ddt.Blm[ll+1];
}
}
}
|
IJMatrix_parcsr.c | /******************************************************************************
* Copyright 1998-2019 Lawrence Livermore National Security, LLC and other
* HYPRE Project Developers. See the top-level COPYRIGHT file for details.
*
* SPDX-License-Identifier: (Apache-2.0 OR MIT)
******************************************************************************/
/******************************************************************************
*
* IJMatrix_ParCSR interface
*
*****************************************************************************/
#include "_hypre_IJ_mv.h"
#include "_hypre_parcsr_mv.h"
#include "../HYPRE.h"
/******************************************************************************
*
* hypre_IJMatrixCreateParCSR
*
*****************************************************************************/
HYPRE_Int
hypre_IJMatrixCreateParCSR(hypre_IJMatrix *matrix)
{
MPI_Comm comm = hypre_IJMatrixComm(matrix);
HYPRE_BigInt *row_partitioning = hypre_IJMatrixRowPartitioning(matrix);
HYPRE_BigInt *col_partitioning = hypre_IJMatrixColPartitioning(matrix);
hypre_ParCSRMatrix *par_matrix;
HYPRE_BigInt row_starts[2];
HYPRE_BigInt col_starts[2];
HYPRE_Int i;
if (hypre_IJMatrixGlobalFirstRow(matrix))
{
for (i = 0; i < 2; i++)
{
row_starts[i] = row_partitioning[i] - hypre_IJMatrixGlobalFirstRow(matrix);
}
}
else
{
for (i = 0; i < 2; i++)
{
row_starts[i] = row_partitioning[i];
}
}
if (hypre_IJMatrixGlobalFirstCol(matrix))
{
for (i = 0; i < 2; i++)
{
col_starts[i] = col_partitioning[i] - hypre_IJMatrixGlobalFirstCol(matrix);
}
}
else
{
for (i = 0; i < 2; i++)
{
col_starts[i] = col_partitioning[i];
}
}
par_matrix = hypre_ParCSRMatrixCreate(comm, hypre_IJMatrixGlobalNumRows(matrix),
hypre_IJMatrixGlobalNumCols(matrix),
row_starts, col_starts, 0, 0, 0);
hypre_IJMatrixObject(matrix) = par_matrix;
return hypre_error_flag;
}
/******************************************************************************
*
* hypre_IJMatrixSetRowSizesParCSR
*
*****************************************************************************/
HYPRE_Int
hypre_IJMatrixSetRowSizesParCSR(hypre_IJMatrix *matrix,
const HYPRE_Int *sizes)
{
HYPRE_Int local_num_rows, local_num_cols, i, *row_space = NULL;
HYPRE_BigInt *row_partitioning = hypre_IJMatrixRowPartitioning(matrix);
HYPRE_BigInt *col_partitioning = hypre_IJMatrixColPartitioning(matrix);
local_num_rows = (HYPRE_Int)(row_partitioning[1] - row_partitioning[0]);
local_num_cols = (HYPRE_Int)(col_partitioning[1] - col_partitioning[0]);
hypre_AuxParCSRMatrix *aux_matrix = (hypre_AuxParCSRMatrix *) hypre_IJMatrixTranslator(matrix);
if (aux_matrix)
{
row_space = hypre_AuxParCSRMatrixRowSpace(aux_matrix);
}
if (!row_space)
{
row_space = hypre_CTAlloc(HYPRE_Int, local_num_rows, HYPRE_MEMORY_HOST);
}
for (i = 0; i < local_num_rows; i++)
{
row_space[i] = sizes[i];
}
if (!aux_matrix)
{
hypre_AuxParCSRMatrixCreate(&aux_matrix, local_num_rows, local_num_cols, row_space);
hypre_IJMatrixTranslator(matrix) = aux_matrix;
}
hypre_AuxParCSRMatrixRowSpace(aux_matrix) = row_space;
#if defined(HYPRE_USING_CUDA) || defined(HYPRE_USING_HIP)
hypre_AuxParCSRMatrixUsrOnProcElmts(aux_matrix) = 0;
for (i = 0; i < local_num_rows; i++)
{
hypre_AuxParCSRMatrixUsrOnProcElmts(aux_matrix) += sizes[i];
}
#endif
return hypre_error_flag;
}
/******************************************************************************
*
* hypre_IJMatrixSetDiagOffdSizesParCSR
* sets diag_i inside the diag part of the ParCSRMatrix
* and offd_i inside the offd part,
* requires exact row sizes for diag and offd
*
*****************************************************************************/
HYPRE_Int
hypre_IJMatrixSetDiagOffdSizesParCSR(hypre_IJMatrix *matrix,
const HYPRE_Int *diag_sizes,
const HYPRE_Int *offd_sizes)
{
HYPRE_Int local_num_rows, local_num_cols;
HYPRE_BigInt *row_partitioning = hypre_IJMatrixRowPartitioning(matrix);
HYPRE_BigInt *col_partitioning = hypre_IJMatrixColPartitioning(matrix);
local_num_rows = (HYPRE_Int)(row_partitioning[1] - row_partitioning[0]);
local_num_cols = (HYPRE_Int)(col_partitioning[1] - col_partitioning[0]);
hypre_AuxParCSRMatrix *aux_matrix = (hypre_AuxParCSRMatrix *)hypre_IJMatrixTranslator(matrix);
if (!aux_matrix)
{
hypre_AuxParCSRMatrixCreate(&aux_matrix, local_num_rows, local_num_cols, NULL);
hypre_IJMatrixTranslator(matrix) = aux_matrix;
}
if ( hypre_AuxParCSRMatrixDiagSizes(aux_matrix) == NULL)
{
hypre_AuxParCSRMatrixDiagSizes(aux_matrix) = hypre_TAlloc(HYPRE_Int, local_num_rows,
HYPRE_MEMORY_HOST);
}
if ( hypre_AuxParCSRMatrixOffdSizes(aux_matrix) == NULL)
{
hypre_AuxParCSRMatrixOffdSizes(aux_matrix) = hypre_TAlloc(HYPRE_Int, local_num_rows,
HYPRE_MEMORY_HOST);
}
hypre_TMemcpy(hypre_AuxParCSRMatrixDiagSizes(aux_matrix), diag_sizes, HYPRE_Int, local_num_rows,
HYPRE_MEMORY_HOST, HYPRE_MEMORY_HOST);
hypre_TMemcpy(hypre_AuxParCSRMatrixOffdSizes(aux_matrix), offd_sizes, HYPRE_Int, local_num_rows,
HYPRE_MEMORY_HOST, HYPRE_MEMORY_HOST);
hypre_AuxParCSRMatrixNeedAux(aux_matrix) = 0;
return hypre_error_flag;
}
/******************************************************************************
*
* hypre_IJMatrixSetMaxOnProcElmtsParCSR
*
*****************************************************************************/
HYPRE_Int
hypre_IJMatrixSetMaxOnProcElmtsParCSR(hypre_IJMatrix *matrix,
HYPRE_Int max_on_proc_elmts)
{
#if defined(HYPRE_USING_CUDA) || defined(HYPRE_USING_HIP)
hypre_AuxParCSRMatrix *aux_matrix;
HYPRE_Int local_num_rows, local_num_cols, my_id;
HYPRE_BigInt *row_partitioning = hypre_IJMatrixRowPartitioning(matrix);
HYPRE_BigInt *col_partitioning = hypre_IJMatrixColPartitioning(matrix);
MPI_Comm comm = hypre_IJMatrixComm(matrix);
hypre_MPI_Comm_rank(comm, &my_id);
aux_matrix = (hypre_AuxParCSRMatrix *) hypre_IJMatrixTranslator(matrix);
if (!aux_matrix)
{
local_num_rows = (HYPRE_Int)(row_partitioning[1] - row_partitioning[0]);
local_num_cols = (HYPRE_Int)(col_partitioning[1] - col_partitioning[0]);
hypre_AuxParCSRMatrixCreate(&aux_matrix, local_num_rows, local_num_cols, NULL);
hypre_IJMatrixTranslator(matrix) = aux_matrix;
}
hypre_AuxParCSRMatrixUsrOnProcElmts(aux_matrix) = max_on_proc_elmts;
#endif
return hypre_error_flag;
}
/******************************************************************************
*
* hypre_IJMatrixSetMaxOffProcElmtsParCSR
*
*****************************************************************************/
HYPRE_Int
hypre_IJMatrixSetMaxOffProcElmtsParCSR(hypre_IJMatrix *matrix,
HYPRE_Int max_off_proc_elmts)
{
hypre_AuxParCSRMatrix *aux_matrix;
HYPRE_Int local_num_rows, local_num_cols, my_id;
HYPRE_BigInt *row_partitioning = hypre_IJMatrixRowPartitioning(matrix);
HYPRE_BigInt *col_partitioning = hypre_IJMatrixColPartitioning(matrix);
MPI_Comm comm = hypre_IJMatrixComm(matrix);
hypre_MPI_Comm_rank(comm, &my_id);
aux_matrix = (hypre_AuxParCSRMatrix *) hypre_IJMatrixTranslator(matrix);
if (!aux_matrix)
{
local_num_rows = (HYPRE_Int)(row_partitioning[1] - row_partitioning[0]);
local_num_cols = (HYPRE_Int)(col_partitioning[1] - col_partitioning[0]);
hypre_AuxParCSRMatrixCreate(&aux_matrix, local_num_rows,
local_num_cols, NULL);
hypre_IJMatrixTranslator(matrix) = aux_matrix;
}
hypre_AuxParCSRMatrixMaxOffProcElmts(aux_matrix) = max_off_proc_elmts;
#if defined(HYPRE_USING_CUDA) || defined(HYPRE_USING_HIP)
hypre_AuxParCSRMatrixUsrOffProcElmts(aux_matrix) = max_off_proc_elmts;
#endif
return hypre_error_flag;
}
/******************************************************************************
*
* hypre_IJMatrixInitializeParCSR
*
* initializes AuxParCSRMatrix and ParCSRMatrix as necessary
*
*****************************************************************************/
HYPRE_Int
hypre_IJMatrixInitializeParCSR(hypre_IJMatrix *matrix)
{
return hypre_IJMatrixInitializeParCSR_v2(matrix, hypre_HandleMemoryLocation(hypre_handle()));
}
HYPRE_Int
hypre_IJMatrixInitializeParCSR_v2(hypre_IJMatrix *matrix, HYPRE_MemoryLocation memory_location)
{
hypre_ParCSRMatrix *par_matrix = (hypre_ParCSRMatrix *) hypre_IJMatrixObject(matrix);
hypre_AuxParCSRMatrix *aux_matrix = (hypre_AuxParCSRMatrix *) hypre_IJMatrixTranslator(matrix);
HYPRE_MemoryLocation memory_location_aux =
hypre_GetExecPolicy1(memory_location) == HYPRE_EXEC_HOST ? HYPRE_MEMORY_HOST : HYPRE_MEMORY_DEVICE;
if (hypre_IJMatrixAssembleFlag(matrix) == 0)
{
if (!par_matrix)
{
hypre_IJMatrixCreateParCSR(matrix);
par_matrix = (hypre_ParCSRMatrix *) hypre_IJMatrixObject(matrix);
}
HYPRE_Int local_num_rows = hypre_ParCSRMatrixNumRows(par_matrix);
HYPRE_Int i;
hypre_CSRMatrix *diag = hypre_ParCSRMatrixDiag(par_matrix);
hypre_CSRMatrix *offd = hypre_ParCSRMatrixOffd(par_matrix);
if (!aux_matrix)
{
hypre_AuxParCSRMatrixCreate(&aux_matrix, local_num_rows, hypre_ParCSRMatrixNumCols(par_matrix),
NULL);
hypre_IJMatrixTranslator(matrix) = aux_matrix;
}
hypre_ParCSRMatrixInitialize_v2(par_matrix, memory_location);
hypre_AuxParCSRMatrixInitialize_v2(aux_matrix, memory_location_aux);
if (memory_location_aux == HYPRE_MEMORY_HOST)
{
if (hypre_AuxParCSRMatrixDiagSizes(aux_matrix))
{
for (i = 0; i < local_num_rows; i++)
{
hypre_CSRMatrixI(diag)[i + 1] = hypre_CSRMatrixI(diag)[i] + hypre_AuxParCSRMatrixDiagSizes(
aux_matrix)[i];
}
hypre_CSRMatrixNumNonzeros(diag) = hypre_CSRMatrixI(diag)[local_num_rows];
hypre_CSRMatrixInitialize(diag);
}
if (hypre_AuxParCSRMatrixOffdSizes(aux_matrix))
{
for (i = 0; i < local_num_rows; i++)
{
hypre_CSRMatrixI(offd)[i + 1] = hypre_CSRMatrixI(offd)[i] + hypre_AuxParCSRMatrixOffdSizes(
aux_matrix)[i];
}
hypre_CSRMatrixNumNonzeros(offd) = hypre_CSRMatrixI(offd)[local_num_rows];
hypre_CSRMatrixInitialize(offd);
}
}
if (!hypre_AuxParCSRMatrixNeedAux(aux_matrix))
{
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE
#endif
for (i = 0; i < local_num_rows; i++)
{
hypre_AuxParCSRMatrixIndxDiag(aux_matrix)[i] = hypre_CSRMatrixI(diag)[i];
hypre_AuxParCSRMatrixIndxOffd(aux_matrix)[i] = hypre_CSRMatrixI(offd)[i];
}
}
}
else if ( memory_location_aux == HYPRE_MEMORY_HOST )
{
/* AB 4/06 - the assemble routine destroys the aux matrix - so we need
to recreate if initialize is called again
*/
if (!aux_matrix)
{
hypre_AuxParCSRMatrixCreate(&aux_matrix, hypre_ParCSRMatrixNumRows(par_matrix),
hypre_ParCSRMatrixNumCols(par_matrix), NULL);
hypre_AuxParCSRMatrixMemoryLocation(aux_matrix) = HYPRE_MEMORY_HOST;
hypre_AuxParCSRMatrixNeedAux(aux_matrix) = 0;
hypre_IJMatrixTranslator(matrix) = aux_matrix;
}
}
return hypre_error_flag;
}
/******************************************************************************
*
* hypre_IJMatrixGetRowCountsParCSR
*
* gets the number of columns for rows specified by the user
*
*****************************************************************************/
HYPRE_Int hypre_IJMatrixGetRowCountsParCSR( hypre_IJMatrix *matrix,
HYPRE_Int nrows,
HYPRE_BigInt *rows,
HYPRE_Int *ncols)
{
HYPRE_BigInt row_index;
MPI_Comm comm = hypre_IJMatrixComm(matrix);
hypre_ParCSRMatrix *par_matrix = (hypre_ParCSRMatrix *) hypre_IJMatrixObject(matrix);
HYPRE_BigInt *row_partitioning = hypre_IJMatrixRowPartitioning(matrix);
hypre_CSRMatrix *diag = hypre_ParCSRMatrixDiag(par_matrix);
HYPRE_Int *diag_i = hypre_CSRMatrixI(diag);
hypre_CSRMatrix *offd = hypre_ParCSRMatrixOffd(par_matrix);
HYPRE_Int *offd_i = hypre_CSRMatrixI(offd);
HYPRE_Int i, my_id, index;
HYPRE_Int print_level = hypre_IJMatrixPrintLevel(matrix);
hypre_MPI_Comm_rank(comm, &my_id);
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(i, row_index) HYPRE_SMP_SCHEDULE
#endif
for (i = 0; i < nrows; i++)
{
row_index = rows[i];
if (row_index >= row_partitioning[0] &&
row_index < row_partitioning[1])
{
/* compute local row number */
index = (HYPRE_Int)(row_index - row_partitioning[0]);
ncols[i] = diag_i[index + 1] - diag_i[index] + offd_i[index + 1] - offd_i[index];
}
else
{
ncols[i] = 0;
if (print_level)
{
hypre_printf ("Warning! Row %b is not on Proc. %d!\n",
row_index, my_id);
}
}
}
return hypre_error_flag;
}
/******************************************************************************
*
* hypre_IJMatrixGetValuesParCSR
*
* gets values of an IJMatrix
*
*****************************************************************************/
HYPRE_Int
hypre_IJMatrixGetValuesParCSR( hypre_IJMatrix *matrix,
HYPRE_Int nrows,
HYPRE_Int *ncols,
HYPRE_BigInt *rows,
HYPRE_BigInt *cols,
HYPRE_Complex *values)
{
MPI_Comm comm = hypre_IJMatrixComm(matrix);
hypre_ParCSRMatrix *par_matrix = (hypre_ParCSRMatrix *) hypre_IJMatrixObject(matrix);
HYPRE_Int assemble_flag = hypre_IJMatrixAssembleFlag(matrix);
hypre_CSRMatrix *diag;
HYPRE_Int *diag_i;
HYPRE_Int *diag_j;
HYPRE_Complex *diag_data;
hypre_CSRMatrix *offd;
HYPRE_Int *offd_i;
HYPRE_Int *offd_j;
HYPRE_Complex *offd_data;
HYPRE_BigInt *col_map_offd;
HYPRE_BigInt *col_starts = hypre_ParCSRMatrixColStarts(par_matrix);
HYPRE_BigInt *row_partitioning = hypre_IJMatrixRowPartitioning(matrix);
HYPRE_Int i, j, n, ii, indx;
HYPRE_Int num_procs, my_id;
HYPRE_BigInt col_0, col_n, row, col_indx, first;
HYPRE_Int row_local, row_size;
HYPRE_Int warning = 0;
HYPRE_Int *counter;
HYPRE_Int print_level = hypre_IJMatrixPrintLevel(matrix);
hypre_MPI_Comm_size(comm, &num_procs);
hypre_MPI_Comm_rank(comm, &my_id);
if (assemble_flag == 0)
{
hypre_error_in_arg(1);
if (print_level)
{
hypre_printf("Error! Matrix not assembled yet! HYPRE_IJMatrixGetValues\n");
}
}
col_0 = col_starts[0];
col_n = col_starts[1] - 1;
first = hypre_IJMatrixGlobalFirstCol(matrix);
diag = hypre_ParCSRMatrixDiag(par_matrix);
diag_i = hypre_CSRMatrixI(diag);
diag_j = hypre_CSRMatrixJ(diag);
diag_data = hypre_CSRMatrixData(diag);
offd = hypre_ParCSRMatrixOffd(par_matrix);
offd_i = hypre_CSRMatrixI(offd);
if (num_procs > 1)
{
offd_j = hypre_CSRMatrixJ(offd);
offd_data = hypre_CSRMatrixData(offd);
col_map_offd = hypre_ParCSRMatrixColMapOffd(par_matrix);
}
if (nrows < 0)
{
nrows = -nrows;
counter = hypre_CTAlloc(HYPRE_Int, nrows + 1, HYPRE_MEMORY_HOST);
counter[0] = 0;
for (i = 0; i < nrows; i++)
{
counter[i + 1] = counter[i] + ncols[i];
}
indx = 0;
for (i = 0; i < nrows; i++)
{
row = rows[i];
if (row >= row_partitioning[0] && row < row_partitioning[1])
{
row_local = (HYPRE_Int)(row - row_partitioning[0]);
row_size = diag_i[row_local + 1] - diag_i[row_local] +
offd_i[row_local + 1] - offd_i[row_local];
if (counter[i] + row_size > counter[nrows])
{
hypre_error_in_arg(1);
if (print_level)
{
hypre_printf ("Error! Not enough memory! HYPRE_IJMatrixGetValues\n");
}
}
if (ncols[i] < row_size)
{
warning = 1;
}
for (j = diag_i[row_local]; j < diag_i[row_local + 1]; j++)
{
cols[indx] = (HYPRE_BigInt)diag_j[j] + col_0;
values[indx++] = diag_data[j];
}
for (j = offd_i[row_local]; j < offd_i[row_local + 1]; j++)
{
cols[indx] = col_map_offd[offd_j[j]];
values[indx++] = offd_data[j];
}
counter[i + 1] = indx;
}
else
{
if (print_level)
{
hypre_printf ("Warning! Row %b is not on Proc. %d!\n", row, my_id);
}
}
}
if (warning)
{
for (i = 0; i < nrows; i++)
{
ncols[i] = counter[i + 1] - counter[i];
}
if (print_level)
{
hypre_printf ("Warning! ncols has been changed!\n");
}
}
hypre_TFree(counter, HYPRE_MEMORY_HOST);
}
else
{
indx = 0;
for (ii = 0; ii < nrows; ii++)
{
row = rows[ii];
n = ncols[ii];
if (n == 0) /* empty row */
{
continue;
}
if (row >= row_partitioning[0] && row < row_partitioning[1])
{
row_local = (HYPRE_Int)(row - row_partitioning[0]);
/* compute local row number */
for (i = 0; i < n; i++)
{
col_indx = cols[indx] - first;
values[indx] = 0.0;
if (col_indx < col_0 || col_indx > col_n)
/* search in offd */
{
for (j = offd_i[row_local]; j < offd_i[row_local + 1]; j++)
{
if (col_map_offd[offd_j[j]] == col_indx)
{
values[indx] = offd_data[j];
break;
}
}
}
else /* search in diag */
{
col_indx = col_indx - col_0;
for (j = diag_i[row_local]; j < diag_i[row_local + 1]; j++)
{
if (diag_j[j] == (HYPRE_Int)col_indx)
{
values[indx] = diag_data[j];
break;
}
}
}
indx++;
}
}
else
{
if (print_level)
{
hypre_printf ("Warning! Row %b is not on Proc. %d!\n", row, my_id);
}
}
}
}
return hypre_error_flag;
}
/******************************************************************************
*
* hypre_IJMatrixSetValuesParCSR
*
* sets values in an IJMatrix before assembly,
*
*****************************************************************************/
HYPRE_Int
hypre_IJMatrixSetValuesParCSR( hypre_IJMatrix *matrix,
HYPRE_Int nrows,
HYPRE_Int *ncols,
const HYPRE_BigInt *rows,
const HYPRE_Int *row_indexes,
const HYPRE_BigInt *cols,
const HYPRE_Complex *values )
{
hypre_ParCSRMatrix *par_matrix;
hypre_CSRMatrix *diag, *offd;
hypre_AuxParCSRMatrix *aux_matrix;
HYPRE_BigInt *row_partitioning;
HYPRE_BigInt *col_partitioning;
MPI_Comm comm = hypre_IJMatrixComm(matrix);
HYPRE_Int num_procs, my_id;
HYPRE_Int row_local;
//HYPRE_Int row_len;
HYPRE_BigInt col_0, col_n, row;
HYPRE_Int i, ii, j, n, not_found;
//HYPRE_Int col_indx, cnt1;
HYPRE_BigInt **aux_j;
HYPRE_BigInt *local_j;
HYPRE_BigInt *tmp_j;
HYPRE_Complex **aux_data;
HYPRE_Complex *local_data;
HYPRE_Complex *tmp_data;
HYPRE_Int diag_space, offd_space;
HYPRE_Int *row_length, *row_space;
HYPRE_Int need_aux;
HYPRE_Int tmp_indx, indx;
HYPRE_Int space, size, old_size;
HYPRE_Int cnt, cnt_diag, cnt_offd;
HYPRE_Int pos_diag, pos_offd;
HYPRE_Int len_diag, len_offd;
HYPRE_Int offd_indx, diag_indx;
HYPRE_Int *diag_i;
HYPRE_Int *diag_j;
HYPRE_Complex *diag_data;
HYPRE_Int *offd_i;
HYPRE_Int *offd_j;
HYPRE_Complex *offd_data;
HYPRE_BigInt first;
/*HYPRE_Int current_num_elmts;*/
/*HYPRE_Int max_off_proc_elmts;*/
//HYPRE_Int off_proc_i_indx;
//HYPRE_BigInt *off_proc_i;
//HYPRE_BigInt *off_proc_j;
HYPRE_Int print_level = hypre_IJMatrixPrintLevel(matrix);
/*HYPRE_Complex *off_proc_data;*/
hypre_MPI_Comm_size(comm, &num_procs);
hypre_MPI_Comm_rank(comm, &my_id);
par_matrix = (hypre_ParCSRMatrix *) hypre_IJMatrixObject( matrix );
row_partitioning = hypre_IJMatrixRowPartitioning(matrix);
col_partitioning = hypre_IJMatrixColPartitioning(matrix);
col_0 = col_partitioning[0];
col_n = col_partitioning[1] - 1;
first = hypre_IJMatrixGlobalFirstCol(matrix);
if (nrows < 0)
{
hypre_error_in_arg(2);
if (print_level)
{
hypre_printf("Error! nrows negative! HYPRE_IJMatrixSetValues\n");
}
}
if (hypre_IJMatrixAssembleFlag(matrix)) /* matrix already assembled*/
{
HYPRE_BigInt *col_map_offd;
HYPRE_Int num_cols_offd;
HYPRE_Int j_offd;
for (ii = 0; ii < nrows; ii++)
{
row = rows[ii];
n = ncols ? ncols[ii] : 1;
if (n == 0) /* empty row */
{
continue;
}
indx = row_indexes[ii];
/* processor owns the row */
if (row >= row_partitioning[0] && row < row_partitioning[1])
{
row_local = (HYPRE_Int)(row - row_partitioning[0]);
/* compute local row number */
diag = hypre_ParCSRMatrixDiag(par_matrix);
diag_i = hypre_CSRMatrixI(diag);
diag_j = hypre_CSRMatrixJ(diag);
diag_data = hypre_CSRMatrixData(diag);
offd = hypre_ParCSRMatrixOffd(par_matrix);
offd_i = hypre_CSRMatrixI(offd);
num_cols_offd = hypre_CSRMatrixNumCols(offd);
if (num_cols_offd)
{
col_map_offd = hypre_ParCSRMatrixColMapOffd(par_matrix);
offd_j = hypre_CSRMatrixJ(offd);
offd_data = hypre_CSRMatrixData(offd);
}
size = diag_i[row_local + 1] - diag_i[row_local] +
offd_i[row_local + 1] - offd_i[row_local];
if (n > size) /* Should we change this and allow this?
This could be same column index, i.e. only last
value is set, previous ones overwritten. */
{
hypre_error(HYPRE_ERROR_GENERIC);
if (print_level)
{
hypre_printf (" row %b too long! \n", row);
}
return hypre_error_flag;
}
pos_diag = diag_i[row_local];
pos_offd = offd_i[row_local];
len_diag = diag_i[row_local + 1];
len_offd = offd_i[row_local + 1];
not_found = 1;
for (i = 0; i < n; i++)
{
if (cols[indx] < col_0 || cols[indx] > col_n)
/* insert into offd */
{
j_offd = hypre_BigBinarySearch(col_map_offd, cols[indx] - first,
num_cols_offd);
if (j_offd == -1)
{
hypre_error(HYPRE_ERROR_GENERIC);
if (print_level)
{
hypre_printf (" Error, element %b %b does not exist\n",
row, cols[indx]);
}
return hypre_error_flag;
}
for (j = pos_offd; j < len_offd; j++)
{
if (offd_j[j] == j_offd)
{
offd_data[j] = values[indx];
not_found = 0;
break;
}
}
if (not_found)
{
hypre_error(HYPRE_ERROR_GENERIC);
if (print_level)
{
hypre_printf (" Error, element %b %b does not exist\n",
row, cols[indx]);
}
return hypre_error_flag;
}
not_found = 1;
}
/* diagonal element */
else if (cols[indx] == row)
{
if (diag_j[pos_diag] != row_local)
{
hypre_error(HYPRE_ERROR_GENERIC);
if (print_level)
{
hypre_printf (" Error, element %b %b does not exist\n",
row, cols[indx]);
}
/* return -1;*/
return hypre_error_flag;
}
diag_data[pos_diag] = values[indx];
}
else /* insert into diag */
{
for (j = pos_diag; j < len_diag; j++)
{
if (diag_j[j] == (HYPRE_Int)(cols[indx] - col_0))
{
diag_data[j] = values[indx];
not_found = 0;
break;
}
}
if (not_found)
{
hypre_error(HYPRE_ERROR_GENERIC);
if (print_level)
{
hypre_printf (" Error, element %b %b does not exist\n",
row, cols[indx]);
}
/* return -1; */
return hypre_error_flag;
}
}
indx++;
}
}
}
}
else
{
aux_matrix = (hypre_AuxParCSRMatrix *) hypre_IJMatrixTranslator(matrix);
row_space = hypre_AuxParCSRMatrixRowSpace(aux_matrix);
row_length = hypre_AuxParCSRMatrixRowLength(aux_matrix);
need_aux = hypre_AuxParCSRMatrixNeedAux(aux_matrix);
for (ii = 0; ii < nrows; ii++)
{
row = rows[ii];
n = ncols ? ncols[ii] : 1;
if (n == 0) /* empty row */
{
continue;
}
indx = row_indexes[ii];
/* processor owns the row */
if (row >= row_partitioning[0] && row < row_partitioning[1])
{
row_local = (HYPRE_Int)(row - row_partitioning[0]);
/* compute local row number */
if (need_aux)
{
aux_j = hypre_AuxParCSRMatrixAuxJ(aux_matrix);
aux_data = hypre_AuxParCSRMatrixAuxData(aux_matrix);
local_j = aux_j[row_local];
local_data = aux_data[row_local];
space = row_space[row_local];
old_size = row_length[row_local];
size = space - old_size;
if (size < n)
{
size = n - size;
tmp_j = hypre_CTAlloc(HYPRE_BigInt, size, HYPRE_MEMORY_HOST);
tmp_data = hypre_CTAlloc(HYPRE_Complex, size, HYPRE_MEMORY_HOST);
}
else
{
tmp_j = NULL;
}
tmp_indx = 0;
not_found = 1;
size = old_size;
for (i = 0; i < n; i++)
{
for (j = 0; j < old_size; j++)
{
if (local_j[j] == cols[indx])
{
local_data[j] = values[indx];
not_found = 0;
break;
}
}
if (not_found)
{
if (size < space)
{
local_j[size] = cols[indx];
local_data[size++] = values[indx];
}
else
{
tmp_j[tmp_indx] = cols[indx];
tmp_data[tmp_indx++] = values[indx];
}
}
not_found = 1;
indx++;
}
row_length[row_local] = size + tmp_indx;
if (tmp_indx)
{
aux_j[row_local] = hypre_TReAlloc(aux_j[row_local], HYPRE_BigInt,
size + tmp_indx, HYPRE_MEMORY_HOST);
aux_data[row_local] = hypre_TReAlloc(aux_data[row_local],
HYPRE_Complex, size + tmp_indx, HYPRE_MEMORY_HOST);
row_space[row_local] = size + tmp_indx;
local_j = aux_j[row_local];
local_data = aux_data[row_local];
}
cnt = size;
for (i = 0; i < tmp_indx; i++)
{
local_j[cnt] = tmp_j[i];
local_data[cnt++] = tmp_data[i];
}
if (tmp_j)
{
hypre_TFree(tmp_j, HYPRE_MEMORY_HOST);
hypre_TFree(tmp_data, HYPRE_MEMORY_HOST);
}
}
else /* insert immediately into data in ParCSRMatrix structure */
{
HYPRE_BigInt *big_offd_j;
HYPRE_Int col_j;
offd_indx = hypre_AuxParCSRMatrixIndxOffd(aux_matrix)[row_local];
diag_indx = hypre_AuxParCSRMatrixIndxDiag(aux_matrix)[row_local];
diag = hypre_ParCSRMatrixDiag(par_matrix);
diag_i = hypre_CSRMatrixI(diag);
diag_j = hypre_CSRMatrixJ(diag);
diag_data = hypre_CSRMatrixData(diag);
offd = hypre_ParCSRMatrixOffd(par_matrix);
offd_i = hypre_CSRMatrixI(offd);
if (num_procs > 1)
{
big_offd_j = hypre_CSRMatrixBigJ(offd);
offd_data = hypre_CSRMatrixData(offd);
if (!big_offd_j)
{
big_offd_j = hypre_CTAlloc(HYPRE_BigInt, offd_i[hypre_CSRMatrixNumRows(offd)],
hypre_CSRMatrixMemoryLocation(offd));
hypre_CSRMatrixBigJ(offd) = big_offd_j;
}
}
cnt_diag = diag_indx;
cnt_offd = offd_indx;
diag_space = diag_i[row_local + 1];
offd_space = offd_i[row_local + 1];
not_found = 1;
for (i = 0; i < n; i++)
{
if (cols[indx] < col_0 || cols[indx] > col_n)
/* insert into offd */
{
for (j = offd_i[row_local]; j < offd_indx; j++)
{
if (big_offd_j[j] == cols[indx])
{
offd_data[j] = values[indx];
not_found = 0;
break;
}
}
if (not_found)
{
if (cnt_offd < offd_space)
{
big_offd_j[cnt_offd] = cols[indx];
offd_data[cnt_offd++] = values[indx];
}
else
{
hypre_error(HYPRE_ERROR_GENERIC);
if (print_level)
{
hypre_printf("Error in row %b ! Too many elements!\n",
row);
}
/* return 1; */
return hypre_error_flag;
}
}
not_found = 1;
}
else /* insert into diag */
{
col_j = (HYPRE_Int)(cols[indx] - col_0);
for (j = diag_i[row_local]; j < diag_indx; j++)
{
if (diag_j[j] == col_j)
{
diag_data[j] = values[indx];
not_found = 0;
break;
}
}
if (not_found)
{
if (cnt_diag < diag_space)
{
diag_j[cnt_diag] = col_j;
diag_data[cnt_diag++] = values[indx];
}
else
{
hypre_error(HYPRE_ERROR_GENERIC);
if (print_level)
{
hypre_printf("Error in row %b ! Too many elements !\n",
row);
}
/* return 1; */
return hypre_error_flag;
}
}
not_found = 1;
}
indx++;
}
hypre_AuxParCSRMatrixIndxDiag(aux_matrix)[row_local] = cnt_diag;
hypre_AuxParCSRMatrixIndxOffd(aux_matrix)[row_local] = cnt_offd;
}
}
}
}
return hypre_error_flag;
}
/******************************************************************************
*
* hypre_IJMatrixSetConstantValuesParCSR
*
* sets all values in an already assembled IJMatrix to a constant value.
*
*****************************************************************************/
void
hypre_IJMatrixSetConstantValuesParCSRHost( hypre_IJMatrix *matrix,
HYPRE_Complex value )
{
hypre_ParCSRMatrix *par_matrix = (hypre_ParCSRMatrix *) hypre_IJMatrixObject( matrix );
hypre_CSRMatrix *diag = hypre_ParCSRMatrixDiag(par_matrix);
hypre_CSRMatrix *offd = hypre_ParCSRMatrixOffd(par_matrix);
HYPRE_Complex *diag_data = hypre_CSRMatrixData(diag);
HYPRE_Complex *offd_data = hypre_CSRMatrixData(offd);
HYPRE_Int nnz_diag = hypre_CSRMatrixNumNonzeros(diag);
HYPRE_Int nnz_offd = hypre_CSRMatrixNumNonzeros(offd);
HYPRE_Int ii;
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(ii) HYPRE_SMP_SCHEDULE
#endif
for (ii = 0; ii < nnz_diag; ii++)
{
diag_data[ii] = value;
}
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(ii) HYPRE_SMP_SCHEDULE
#endif
for (ii = 0; ii < nnz_offd; ii++)
{
offd_data[ii] = value;
}
}
HYPRE_Int
hypre_IJMatrixSetConstantValuesParCSR( hypre_IJMatrix *matrix,
HYPRE_Complex value )
{
if (hypre_IJMatrixAssembleFlag(matrix)) /* matrix already assembled*/
{
#if defined(HYPRE_USING_CUDA) || defined(HYPRE_USING_HIP)
if (hypre_GetExecPolicy1(hypre_IJMatrixMemoryLocation(matrix)) == HYPRE_EXEC_DEVICE)
{
hypre_IJMatrixSetConstantValuesParCSRDevice(matrix, value);
}
else
#endif
{
hypre_IJMatrixSetConstantValuesParCSRHost(matrix, value);
}
}
else
{
hypre_error_w_msg(HYPRE_ERROR_GENERIC,
"Matrix not assembled! Required to set constant values!");
}
return hypre_error_flag;
}
/******************************************************************************
*
* hypre_IJMatrixAddToValuesParCSR
*
* adds row values to an IJMatrix
*
*****************************************************************************/
HYPRE_Int
hypre_IJMatrixAddToValuesParCSR( hypre_IJMatrix *matrix,
HYPRE_Int nrows,
HYPRE_Int *ncols,
const HYPRE_BigInt *rows,
const HYPRE_Int *row_indexes,
const HYPRE_BigInt *cols,
const HYPRE_Complex *values )
{
hypre_ParCSRMatrix *par_matrix;
hypre_CSRMatrix *diag, *offd;
hypre_AuxParCSRMatrix *aux_matrix;
HYPRE_BigInt *row_partitioning;
HYPRE_BigInt *col_partitioning;
MPI_Comm comm = hypre_IJMatrixComm(matrix);
HYPRE_Int num_procs, my_id;
HYPRE_Int row_local;
HYPRE_BigInt row;
HYPRE_BigInt col_0, col_n;
HYPRE_Int i, ii, j, n, not_found;
HYPRE_BigInt **aux_j;
HYPRE_BigInt *local_j;
HYPRE_BigInt *tmp_j;
HYPRE_Complex **aux_data;
HYPRE_Complex *local_data;
HYPRE_Complex *tmp_data;
HYPRE_Int diag_space, offd_space;
HYPRE_Int *row_length, *row_space;
HYPRE_Int need_aux;
HYPRE_Int tmp_indx, indx;
HYPRE_Int space, size, old_size;
HYPRE_Int cnt, cnt_diag, cnt_offd;
HYPRE_Int pos_diag, pos_offd;
HYPRE_Int len_diag, len_offd;
HYPRE_Int offd_indx, diag_indx;
HYPRE_BigInt first;
HYPRE_Int *diag_i;
HYPRE_Int *diag_j;
HYPRE_Complex *diag_data;
HYPRE_Int *offd_i;
HYPRE_Int *offd_j;
HYPRE_Complex *offd_data;
HYPRE_Int current_num_elmts;
HYPRE_Int max_off_proc_elmts;
HYPRE_Int off_proc_i_indx;
HYPRE_BigInt *off_proc_i;
HYPRE_BigInt *off_proc_j;
HYPRE_Complex *off_proc_data;
HYPRE_Int print_level = hypre_IJMatrixPrintLevel(matrix);
hypre_MPI_Comm_size(comm, &num_procs);
hypre_MPI_Comm_rank(comm, &my_id);
par_matrix = (hypre_ParCSRMatrix *) hypre_IJMatrixObject( matrix );
row_partitioning = hypre_IJMatrixRowPartitioning(matrix);
col_partitioning = hypre_IJMatrixColPartitioning(matrix);
col_0 = col_partitioning[0];
col_n = col_partitioning[1] - 1;
first = hypre_IJMatrixGlobalFirstCol(matrix);
if (hypre_IJMatrixAssembleFlag(matrix))
{
HYPRE_Int num_cols_offd;
HYPRE_BigInt *col_map_offd;
HYPRE_Int j_offd;
/* AB - 4/06 - need to get this object*/
aux_matrix = (hypre_AuxParCSRMatrix *) hypre_IJMatrixTranslator(matrix);
for (ii = 0; ii < nrows; ii++)
{
row = rows[ii];
n = ncols ? ncols[ii] : 1;
if (n == 0) /* empty row */
{
continue;
}
indx = row_indexes[ii];
if (row >= row_partitioning[0] && row < row_partitioning[1])
{
row_local = (HYPRE_Int)(row - row_partitioning[0]);
/* compute local row number */
diag = hypre_ParCSRMatrixDiag(par_matrix);
diag_i = hypre_CSRMatrixI(diag);
diag_j = hypre_CSRMatrixJ(diag);
diag_data = hypre_CSRMatrixData(diag);
offd = hypre_ParCSRMatrixOffd(par_matrix);
offd_i = hypre_CSRMatrixI(offd);
num_cols_offd = hypre_CSRMatrixNumCols(offd);
if (num_cols_offd)
{
col_map_offd = hypre_ParCSRMatrixColMapOffd(par_matrix);
offd_j = hypre_CSRMatrixJ(offd);
offd_data = hypre_CSRMatrixData(offd);
}
size = diag_i[row_local + 1] - diag_i[row_local] +
offd_i[row_local + 1] - offd_i[row_local];
if (n > size) /* Should we change this and allow this?
This could be same column index, i.e. only last
value is set, previous ones overwritten. */
{
hypre_error(HYPRE_ERROR_GENERIC);
if (print_level)
{
hypre_printf (" row %b too long! \n", row);
}
return hypre_error_flag;
}
pos_diag = diag_i[row_local];
pos_offd = offd_i[row_local];
len_diag = diag_i[row_local + 1];
len_offd = offd_i[row_local + 1];
not_found = 1;
for (i = 0; i < n; i++)
{
if (cols[indx] < col_0 || cols[indx] > col_n)
/* insert into offd */
{
j_offd = hypre_BigBinarySearch(col_map_offd, cols[indx] - first,
num_cols_offd);
if (j_offd == -1)
{
hypre_error(HYPRE_ERROR_GENERIC);
if (print_level)
{
hypre_printf (" Error, element %b %b does not exist\n",
row, cols[indx]);
}
return hypre_error_flag;
/* return -1; */
}
for (j = pos_offd; j < len_offd; j++)
{
if (offd_j[j] == j_offd)
{
offd_data[j] += values[indx];
not_found = 0;
break;
}
}
if (not_found)
{
hypre_error(HYPRE_ERROR_GENERIC);
if (print_level)
{
hypre_printf (" Error, element %b %b does not exist\n",
row, cols[indx]);
}
return hypre_error_flag;
}
not_found = 1;
}
/* diagonal element */
else if (cols[indx] == row)
{
if (diag_j[pos_diag] != row_local)
{
hypre_error(HYPRE_ERROR_GENERIC);
if (print_level)
{
hypre_printf (" Error, element %b %b does not exist\n",
row, cols[indx]);
}
return hypre_error_flag;
}
diag_data[pos_diag] += values[indx];
}
else /* insert into diag */
{
for (j = pos_diag; j < len_diag; j++)
{
if (diag_j[j] == (HYPRE_Int)(cols[indx] - col_0))
{
diag_data[j] += values[indx];
not_found = 0;
break;
}
}
if (not_found)
{
hypre_error(HYPRE_ERROR_GENERIC);
if (print_level)
{
hypre_printf (" Error, element %b %b does not exist\n",
row, cols[indx]);
}
return hypre_error_flag;
}
}
indx++;
}
}
/* not my row */
else
{
if (!aux_matrix)
{
size = (HYPRE_Int)(row_partitioning[1] - row_partitioning[0]);
hypre_AuxParCSRMatrixCreate(&aux_matrix, size, size, NULL);
hypre_AuxParCSRMatrixNeedAux(aux_matrix) = 0;
hypre_IJMatrixTranslator(matrix) = aux_matrix;
}
current_num_elmts
= hypre_AuxParCSRMatrixCurrentOffProcElmts(aux_matrix);
max_off_proc_elmts
= hypre_AuxParCSRMatrixMaxOffProcElmts(aux_matrix);
off_proc_i_indx = hypre_AuxParCSRMatrixOffProcIIndx(aux_matrix);
off_proc_i = hypre_AuxParCSRMatrixOffProcI(aux_matrix);
off_proc_j = hypre_AuxParCSRMatrixOffProcJ(aux_matrix);
off_proc_data = hypre_AuxParCSRMatrixOffProcData(aux_matrix);
if (!max_off_proc_elmts)
{
max_off_proc_elmts = hypre_max(n, 1000);
hypre_AuxParCSRMatrixMaxOffProcElmts(aux_matrix) = max_off_proc_elmts;
hypre_AuxParCSRMatrixOffProcI(aux_matrix)
= hypre_CTAlloc(HYPRE_BigInt, 2 * max_off_proc_elmts, HYPRE_MEMORY_HOST);
hypre_AuxParCSRMatrixOffProcJ(aux_matrix)
= hypre_CTAlloc(HYPRE_BigInt, max_off_proc_elmts, HYPRE_MEMORY_HOST);
hypre_AuxParCSRMatrixOffProcData(aux_matrix)
= hypre_CTAlloc(HYPRE_Complex, max_off_proc_elmts, HYPRE_MEMORY_HOST);
off_proc_i = hypre_AuxParCSRMatrixOffProcI(aux_matrix);
off_proc_j = hypre_AuxParCSRMatrixOffProcJ(aux_matrix);
off_proc_data = hypre_AuxParCSRMatrixOffProcData(aux_matrix);
}
else if (current_num_elmts + n > max_off_proc_elmts)
{
max_off_proc_elmts += 3 * n;
off_proc_i = hypre_TReAlloc(off_proc_i, HYPRE_BigInt, 2 * max_off_proc_elmts, HYPRE_MEMORY_HOST);
off_proc_j = hypre_TReAlloc(off_proc_j, HYPRE_BigInt, max_off_proc_elmts, HYPRE_MEMORY_HOST);
off_proc_data = hypre_TReAlloc(off_proc_data, HYPRE_Complex,
max_off_proc_elmts, HYPRE_MEMORY_HOST);
hypre_AuxParCSRMatrixMaxOffProcElmts(aux_matrix) = max_off_proc_elmts;
hypre_AuxParCSRMatrixOffProcI(aux_matrix) = off_proc_i;
hypre_AuxParCSRMatrixOffProcJ(aux_matrix) = off_proc_j;
hypre_AuxParCSRMatrixOffProcData(aux_matrix) = off_proc_data;
}
/* AB - 4/6 - the row should be negative to indicate an add */
/* UMY - 12/28/09 - now positive since we eliminated the feature of
setting on other processors */
/* off_proc_i[off_proc_i_indx++] = row; */
off_proc_i[off_proc_i_indx++] = row;
off_proc_i[off_proc_i_indx++] = n;
for (i = 0; i < n; i++)
{
off_proc_j[current_num_elmts] = cols[indx];
off_proc_data[current_num_elmts++] = values[indx++];
}
hypre_AuxParCSRMatrixOffProcIIndx(aux_matrix) = off_proc_i_indx;
hypre_AuxParCSRMatrixCurrentOffProcElmts(aux_matrix)
= current_num_elmts;
}
}
}
/* not assembled */
else
{
aux_matrix = (hypre_AuxParCSRMatrix *) hypre_IJMatrixTranslator(matrix);
row_space = hypre_AuxParCSRMatrixRowSpace(aux_matrix);
row_length = hypre_AuxParCSRMatrixRowLength(aux_matrix);
need_aux = hypre_AuxParCSRMatrixNeedAux(aux_matrix);
for (ii = 0; ii < nrows; ii++)
{
row = rows[ii];
n = ncols ? ncols[ii] : 1;
if (n == 0) /* empty row */
{
continue;
}
indx = row_indexes[ii];
if (row >= row_partitioning[0] && row < row_partitioning[1])
{
row_local = (HYPRE_Int)(row - row_partitioning[0]);
/* compute local row number */
if (need_aux)
{
aux_j = hypre_AuxParCSRMatrixAuxJ(aux_matrix);
aux_data = hypre_AuxParCSRMatrixAuxData(aux_matrix);
local_j = aux_j[row_local];
local_data = aux_data[row_local];
space = row_space[row_local];
old_size = row_length[row_local];
size = space - old_size;
if (size < n)
{
size = n - size;
tmp_j = hypre_CTAlloc(HYPRE_BigInt, size, HYPRE_MEMORY_HOST);
tmp_data = hypre_CTAlloc(HYPRE_Complex, size, HYPRE_MEMORY_HOST);
}
else
{
tmp_j = NULL;
}
tmp_indx = 0;
not_found = 1;
size = old_size;
for (i = 0; i < n; i++)
{
for (j = 0; j < old_size; j++)
{
if (local_j[j] == cols[indx])
{
local_data[j] += values[indx];
not_found = 0;
break;
}
}
if (not_found)
{
if (size < space)
{
local_j[size] = cols[indx];
local_data[size++] = values[indx];
}
else
{
tmp_j[tmp_indx] = cols[indx];
tmp_data[tmp_indx++] = values[indx];
}
}
not_found = 1;
indx++;
}
row_length[row_local] = size + tmp_indx;
if (tmp_indx)
{
aux_j[row_local] = hypre_TReAlloc(aux_j[row_local], HYPRE_BigInt,
size + tmp_indx, HYPRE_MEMORY_HOST);
aux_data[row_local] = hypre_TReAlloc(aux_data[row_local],
HYPRE_Complex, size + tmp_indx, HYPRE_MEMORY_HOST);
row_space[row_local] = size + tmp_indx;
local_j = aux_j[row_local];
local_data = aux_data[row_local];
}
cnt = size;
for (i = 0; i < tmp_indx; i++)
{
local_j[cnt] = tmp_j[i];
local_data[cnt++] = tmp_data[i];
}
if (tmp_j)
{
hypre_TFree(tmp_j, HYPRE_MEMORY_HOST);
hypre_TFree(tmp_data, HYPRE_MEMORY_HOST);
}
}
else /* insert immediately into data in ParCSRMatrix structure */
{
HYPRE_BigInt *big_offd_j;
offd_indx = hypre_AuxParCSRMatrixIndxOffd(aux_matrix)[row_local];
diag_indx = hypre_AuxParCSRMatrixIndxDiag(aux_matrix)[row_local];
diag = hypre_ParCSRMatrixDiag(par_matrix);
diag_i = hypre_CSRMatrixI(diag);
diag_j = hypre_CSRMatrixJ(diag);
diag_data = hypre_CSRMatrixData(diag);
offd = hypre_ParCSRMatrixOffd(par_matrix);
offd_i = hypre_CSRMatrixI(offd);
if (num_procs > 1)
{
big_offd_j = hypre_CSRMatrixBigJ(offd);
offd_data = hypre_CSRMatrixData(offd);
if (!big_offd_j)
{
big_offd_j = hypre_CTAlloc(HYPRE_BigInt, offd_i[hypre_CSRMatrixNumRows(offd)],
hypre_CSRMatrixMemoryLocation(offd));
hypre_CSRMatrixBigJ(offd) = big_offd_j;
}
}
cnt_diag = diag_indx;
cnt_offd = offd_indx;
diag_space = diag_i[row_local + 1];
offd_space = offd_i[row_local + 1];
not_found = 1;
for (i = 0; i < n; i++)
{
if (cols[indx] < col_0 || cols[indx] > col_n)
/* insert into offd */
{
for (j = offd_i[row_local]; j < offd_indx; j++)
{
if (big_offd_j[j] == cols[indx])
{
offd_data[j] += values[indx];
not_found = 0;
break;
}
}
if (not_found)
{
if (cnt_offd < offd_space)
{
big_offd_j[cnt_offd] = cols[indx];
offd_data[cnt_offd++] = values[indx];
}
else
{
hypre_error(HYPRE_ERROR_GENERIC);
if (print_level)
{
hypre_printf("Error in row %b ! Too many elements!\n",
row);
}
/* return 1;*/
return hypre_error_flag;
}
}
not_found = 1;
}
else /* insert into diag */
{
HYPRE_Int col_j = (HYPRE_Int)( cols[indx] - col_0);
for (j = diag_i[row_local]; j < diag_indx; j++)
{
if (diag_j[j] == col_j)
{
diag_data[j] += values[indx];
not_found = 0;
break;
}
}
if (not_found)
{
if (cnt_diag < diag_space)
{
diag_j[cnt_diag] = col_j;
diag_data[cnt_diag++] = values[indx];
}
else
{
hypre_error(HYPRE_ERROR_GENERIC);
if (print_level)
{
hypre_printf("Error in row %b ! Too many elements !\n",
row);
}
/* return 1; */
return hypre_error_flag;
}
}
not_found = 1;
}
indx++;
}
hypre_AuxParCSRMatrixIndxDiag(aux_matrix)[row_local] = cnt_diag;
hypre_AuxParCSRMatrixIndxOffd(aux_matrix)[row_local] = cnt_offd;
}
}
/* not my row */
else
{
current_num_elmts
= hypre_AuxParCSRMatrixCurrentOffProcElmts(aux_matrix);
max_off_proc_elmts
= hypre_AuxParCSRMatrixMaxOffProcElmts(aux_matrix);
off_proc_i_indx = hypre_AuxParCSRMatrixOffProcIIndx(aux_matrix);
off_proc_i = hypre_AuxParCSRMatrixOffProcI(aux_matrix);
off_proc_j = hypre_AuxParCSRMatrixOffProcJ(aux_matrix);
off_proc_data = hypre_AuxParCSRMatrixOffProcData(aux_matrix);
if (!max_off_proc_elmts)
{
max_off_proc_elmts = hypre_max(n, 1000);
hypre_AuxParCSRMatrixMaxOffProcElmts(aux_matrix) =
max_off_proc_elmts;
hypre_AuxParCSRMatrixOffProcI(aux_matrix)
= hypre_CTAlloc(HYPRE_BigInt, 2 * max_off_proc_elmts, HYPRE_MEMORY_HOST);
hypre_AuxParCSRMatrixOffProcJ(aux_matrix)
= hypre_CTAlloc(HYPRE_BigInt, max_off_proc_elmts, HYPRE_MEMORY_HOST);
hypre_AuxParCSRMatrixOffProcData(aux_matrix)
= hypre_CTAlloc(HYPRE_Complex, max_off_proc_elmts, HYPRE_MEMORY_HOST);
off_proc_i = hypre_AuxParCSRMatrixOffProcI(aux_matrix);
off_proc_j = hypre_AuxParCSRMatrixOffProcJ(aux_matrix);
off_proc_data = hypre_AuxParCSRMatrixOffProcData(aux_matrix);
}
else if (current_num_elmts + n > max_off_proc_elmts)
{
max_off_proc_elmts += 3 * n;
off_proc_i = hypre_TReAlloc(off_proc_i, HYPRE_BigInt, 2 * max_off_proc_elmts, HYPRE_MEMORY_HOST);
off_proc_j = hypre_TReAlloc(off_proc_j, HYPRE_BigInt, max_off_proc_elmts, HYPRE_MEMORY_HOST);
off_proc_data = hypre_TReAlloc(off_proc_data, HYPRE_Complex,
max_off_proc_elmts, HYPRE_MEMORY_HOST);
hypre_AuxParCSRMatrixMaxOffProcElmts(aux_matrix)
= max_off_proc_elmts;
hypre_AuxParCSRMatrixOffProcI(aux_matrix) = off_proc_i;
hypre_AuxParCSRMatrixOffProcJ(aux_matrix) = off_proc_j;
hypre_AuxParCSRMatrixOffProcData(aux_matrix) = off_proc_data;
}
off_proc_i[off_proc_i_indx++] = row;
off_proc_i[off_proc_i_indx++] = n;
for (i = 0; i < n; i++)
{
off_proc_j[current_num_elmts] = cols[indx];
off_proc_data[current_num_elmts++] = values[indx++];
}
hypre_AuxParCSRMatrixOffProcIIndx(aux_matrix) = off_proc_i_indx;
hypre_AuxParCSRMatrixCurrentOffProcElmts(aux_matrix)
= current_num_elmts;
}
}
}
return hypre_error_flag;
}
/******************************************************************************
*
* hypre_IJMatrixDestroyParCSR
*
* frees an IJMatrix
*
*****************************************************************************/
HYPRE_Int
hypre_IJMatrixDestroyParCSR(hypre_IJMatrix *matrix)
{
hypre_ParCSRMatrixDestroy((hypre_ParCSRMatrix *)hypre_IJMatrixObject(matrix));
hypre_AuxParCSRMatrixDestroy((hypre_AuxParCSRMatrix*)hypre_IJMatrixTranslator(matrix));
return hypre_error_flag;
}
/******************************************************************************
*
* hypre_IJMatrixTransposeParCSR
*
* Tranposes an IJMatrix of type ParCSRMatrix
*
*****************************************************************************/
HYPRE_Int
hypre_IJMatrixTransposeParCSR( hypre_IJMatrix *matrix_A,
hypre_IJMatrix *matrix_AT )
{
hypre_ParCSRMatrix *par_A = (hypre_ParCSRMatrix*) hypre_IJMatrixObject(matrix_A);
hypre_ParCSRMatrix *par_AT;
/* Free old object if existent */
if (hypre_IJMatrixObject(matrix_AT))
{
par_AT = (hypre_ParCSRMatrix*) hypre_IJMatrixObject(matrix_AT);
hypre_ParCSRMatrixDestroy(par_AT);
hypre_IJMatrixObject(matrix_AT) = NULL;
}
hypre_ParCSRMatrixTranspose(par_A, &par_AT, 1);
hypre_ParCSRMatrixSetNumNonzeros(par_AT);
hypre_ParCSRMatrixSetDNumNonzeros(par_AT);
hypre_MatvecCommPkgCreate(par_AT);
hypre_IJMatrixObject(matrix_AT) = (void *) par_AT;
return hypre_error_flag;
}
/******************************************************************************
*
* hypre_IJMatrixNormParCSR
*
* Computes the Infinity norm of an IJMatrix of type ParCSRMatrix
*
* TODO: Add other norms
*
*****************************************************************************/
HYPRE_Int
hypre_IJMatrixNormParCSR( hypre_IJMatrix *matrix,
HYPRE_Real *norm )
{
hypre_ParCSRMatrix *par_matrix = (hypre_ParCSRMatrix*) hypre_IJMatrixObject(matrix);
hypre_ParCSRMatrixInfNorm(par_matrix, norm);
return hypre_error_flag;
}
/******************************************************************************
*
* hypre_IJMatrixAddParCSR
*
* Performs C = alpha*A + beta*B, where A, B and C are IJMatrices of
* type ParCSRMatrix.
*
*****************************************************************************/
HYPRE_Int
hypre_IJMatrixAddParCSR( HYPRE_Complex alpha,
hypre_IJMatrix *matrix_A,
HYPRE_Complex beta,
hypre_IJMatrix *matrix_B,
hypre_IJMatrix *matrix_C )
{
hypre_ParCSRMatrix *par_A = (hypre_ParCSRMatrix*) hypre_IJMatrixObject(matrix_A);
hypre_ParCSRMatrix *par_B = (hypre_ParCSRMatrix*) hypre_IJMatrixObject(matrix_B);
hypre_ParCSRMatrix *par_C;
/* Free old object if existent */
if (hypre_IJMatrixObject(matrix_C))
{
par_C = (hypre_ParCSRMatrix*) hypre_IJMatrixObject(matrix_C);
hypre_ParCSRMatrixDestroy(par_C);
hypre_IJMatrixObject(matrix_C) = NULL;
}
hypre_ParCSRMatrixAdd(alpha, par_A, beta, par_B, &par_C);
hypre_ParCSRMatrixSetNumNonzeros(par_C);
hypre_ParCSRMatrixSetDNumNonzeros(par_C);
if (!hypre_ParCSRMatrixCommPkg(par_C))
{
hypre_MatvecCommPkgCreate(par_C);
}
hypre_IJMatrixObject(matrix_C) = (void *) par_C;
return hypre_error_flag;
}
/******************************************************************************
*
* hypre_IJMatrixAssembleOffProcValsParCSR
*
* This is for handling set and get values calls to off-proc. entries -
* it is called from matrix assemble. There is an alternate version for
* when the assumed partition is being used.
*
*****************************************************************************/
HYPRE_Int
hypre_IJMatrixAssembleOffProcValsParCSR( hypre_IJMatrix *matrix,
HYPRE_Int off_proc_i_indx,
HYPRE_Int max_off_proc_elmts,
HYPRE_Int current_num_elmts,
HYPRE_MemoryLocation memory_location,
HYPRE_BigInt *off_proc_i,
HYPRE_BigInt *off_proc_j,
HYPRE_Complex *off_proc_data )
{
MPI_Comm comm = hypre_IJMatrixComm(matrix);
HYPRE_Int i, j, k, in_i;
HYPRE_Int myid;
HYPRE_Int proc_id, last_proc, prev_id, tmp_id;
HYPRE_Int max_response_size;
HYPRE_BigInt global_num_cols;
HYPRE_BigInt global_first_col;
HYPRE_BigInt global_first_row;
HYPRE_Int ex_num_contacts = 0, num_rows = 0;
HYPRE_BigInt range_start, range_end;
HYPRE_Int num_elements;
HYPRE_Int storage;
HYPRE_Int indx;
HYPRE_BigInt row;
HYPRE_Int num_ranges, row_index = 0;
HYPRE_Int num_recvs;
HYPRE_BigInt upper_bound;
HYPRE_Int counter;
HYPRE_Int num_real_procs;
HYPRE_Int /*current_proc,*/ original_proc_indx;
HYPRE_BigInt *row_list = NULL;
HYPRE_Int *row_list_num_elements = NULL;
HYPRE_Int *a_proc_id = NULL, *orig_order = NULL;
HYPRE_Int *real_proc_id = NULL, *us_real_proc_id = NULL;
HYPRE_Int *ex_contact_procs = NULL, *ex_contact_vec_starts = NULL;
HYPRE_BigInt *ex_contact_buf = NULL;
HYPRE_Int *recv_starts = NULL;
HYPRE_BigInt *response_buf = NULL;
HYPRE_Int *response_buf_starts = NULL;
HYPRE_Int *num_rows_per_proc = NULL, *num_elements_total = NULL;
HYPRE_Int *argsort_contact_procs = NULL;
HYPRE_Int obj_size_bytes, complex_size;
HYPRE_BigInt big_int_size;
HYPRE_Int tmp_int;
HYPRE_BigInt tmp_big_int;
HYPRE_BigInt *col_ptr;
HYPRE_BigInt *big_int_data = NULL;
HYPRE_Int big_int_data_size = 0, complex_data_size = 0;
void *void_contact_buf = NULL;
void *index_ptr;
void *recv_data_ptr;
HYPRE_Complex tmp_complex;
HYPRE_Complex *col_data_ptr;
HYPRE_Complex *complex_data = NULL;
hypre_DataExchangeResponse response_obj1, response_obj2;
hypre_ProcListElements send_proc_obj;
hypre_IJAssumedPart *apart;
hypre_MPI_Comm_rank(comm, &myid);
global_num_cols = hypre_IJMatrixGlobalNumCols(matrix);
global_first_col = hypre_IJMatrixGlobalFirstCol(matrix);
global_first_row = hypre_IJMatrixGlobalFirstRow(matrix);
if (memory_location == HYPRE_MEMORY_DEVICE)
{
HYPRE_BigInt *tmp = hypre_TAlloc(HYPRE_BigInt, current_num_elmts,
HYPRE_MEMORY_HOST);
HYPRE_BigInt *off_proc_i_h = hypre_TAlloc(HYPRE_BigInt, 2 * current_num_elmts,
HYPRE_MEMORY_HOST);
HYPRE_BigInt *off_proc_j_h = hypre_TAlloc(HYPRE_BigInt, current_num_elmts,
HYPRE_MEMORY_HOST);
HYPRE_Complex *off_proc_data_h = hypre_TAlloc(HYPRE_Complex, current_num_elmts,
HYPRE_MEMORY_HOST);
hypre_TMemcpy(tmp, off_proc_i, HYPRE_BigInt, current_num_elmts, HYPRE_MEMORY_HOST,
HYPRE_MEMORY_DEVICE);
hypre_TMemcpy(off_proc_j_h, off_proc_j, HYPRE_BigInt, current_num_elmts, HYPRE_MEMORY_HOST,
HYPRE_MEMORY_DEVICE);
hypre_TMemcpy(off_proc_data_h, off_proc_data, HYPRE_Complex, current_num_elmts, HYPRE_MEMORY_HOST,
HYPRE_MEMORY_DEVICE);
for (i = 0; i < current_num_elmts; i++)
{
#if defined(HYPRE_DEBUG)
hypre_assert(tmp[i] < hypre_IJMatrixRowPartitioning(matrix)[0] ||
tmp[i] >= hypre_IJMatrixRowPartitioning(matrix)[1]);
hypre_assert(tmp[i] >= global_first_row &&
tmp[i] < global_first_row + hypre_IJMatrixGlobalNumRows(matrix));
hypre_assert(off_proc_j_h[i] >= global_first_col &&
off_proc_j_h[i] < global_first_col + global_num_cols);
#endif
off_proc_i_h[2 * i] = tmp[i];
off_proc_i_h[2 * i + 1] = 1;
}
off_proc_i_indx = current_num_elmts * 2;
off_proc_i = off_proc_i_h;
off_proc_j = off_proc_j_h;
off_proc_data = off_proc_data_h;
hypre_TFree(tmp, HYPRE_MEMORY_HOST);
}
/* call hypre_IJMatrixAddToValuesParCSR directly inside this function
* with one chunk of data */
HYPRE_Int off_proc_nelm_recv_cur = 0;
HYPRE_Int off_proc_nelm_recv_max = 0;
HYPRE_BigInt *off_proc_i_recv = NULL;
HYPRE_BigInt *off_proc_j_recv = NULL;
HYPRE_Complex *off_proc_data_recv = NULL;
HYPRE_BigInt *off_proc_i_recv_d = NULL;
HYPRE_BigInt *off_proc_j_recv_d = NULL;
HYPRE_Complex *off_proc_data_recv_d = NULL;
num_rows = off_proc_i_indx / 2;
/* verify that we have created the assumed partition */
if (hypre_IJMatrixAssumedPart(matrix) == NULL)
{
hypre_IJMatrixCreateAssumedPartition(matrix);
}
apart = (hypre_IJAssumedPart*) hypre_IJMatrixAssumedPart(matrix);
/*if (hypre_ParCSRMatrixAssumedPartition(par_matrix) == NULL)
{
hypre_ParCSRMatrixCreateAssumedPartition(par_matrix);
}
apart = hypre_ParCSRMatrixAssumedPartition(par_matrix);*/
row_list = hypre_CTAlloc(HYPRE_BigInt, num_rows, HYPRE_MEMORY_HOST);
row_list_num_elements = hypre_CTAlloc(HYPRE_Int, num_rows, HYPRE_MEMORY_HOST);
a_proc_id = hypre_CTAlloc(HYPRE_Int, num_rows, HYPRE_MEMORY_HOST);
orig_order = hypre_CTAlloc(HYPRE_Int, num_rows, HYPRE_MEMORY_HOST);
real_proc_id = hypre_CTAlloc(HYPRE_Int, num_rows, HYPRE_MEMORY_HOST);
/* get the assumed processor id for each row */
if (num_rows > 0 )
{
for (i = 0; i < num_rows; i++)
{
row = off_proc_i[i * 2];
//if (row < 0) row = -row - 1;
row_list[i] = row;
row_list_num_elements[i] = off_proc_i[i * 2 + 1];
hypre_GetAssumedPartitionProcFromRow(comm, row, global_first_row,
global_num_cols, &proc_id);
a_proc_id[i] = proc_id;
orig_order[i] = i;
}
/* now we need to find the actual order of each row - sort on row -
this will result in proc ids sorted also...*/
hypre_BigQsortb2i(row_list, a_proc_id, orig_order, 0, num_rows - 1);
/* calculate the number of contacts */
ex_num_contacts = 1;
last_proc = a_proc_id[0];
for (i = 1; i < num_rows; i++)
{
if (a_proc_id[i] > last_proc)
{
ex_num_contacts++;
last_proc = a_proc_id[i];
}
}
}
/* now we will go through a create a contact list - need to contact assumed
processors and find out who the actual row owner is - we will contact with
a range (2 numbers) */
ex_contact_procs = hypre_CTAlloc(HYPRE_Int, ex_num_contacts, HYPRE_MEMORY_HOST);
ex_contact_vec_starts = hypre_CTAlloc(HYPRE_Int, ex_num_contacts + 1, HYPRE_MEMORY_HOST);
ex_contact_buf = hypre_CTAlloc(HYPRE_BigInt, ex_num_contacts * 2, HYPRE_MEMORY_HOST);
counter = 0;
range_end = -1;
for (i = 0; i < num_rows; i++)
{
if (row_list[i] > range_end)
{
/* assumed proc */
proc_id = a_proc_id[i];
/* end of prev. range */
if (counter > 0)
{
ex_contact_buf[counter * 2 - 1] = row_list[i - 1];
}
/*start new range*/
ex_contact_procs[counter] = proc_id;
ex_contact_vec_starts[counter] = counter * 2;
ex_contact_buf[counter * 2] = row_list[i];
counter++;
hypre_GetAssumedPartitionRowRange(comm, proc_id, global_first_col, global_num_cols,
&range_start, &range_end);
}
}
/* finish the starts */
ex_contact_vec_starts[counter] = counter * 2;
/* finish the last range */
if (counter > 0)
{
ex_contact_buf[counter * 2 - 1] = row_list[num_rows - 1];
}
/* don't allocate space for responses */
/* create response object - can use same fill response as used in the commpkg
routine */
response_obj1.fill_response = hypre_RangeFillResponseIJDetermineRecvProcs;
response_obj1.data1 = apart; /* this is necessary so we can fill responses*/
response_obj1.data2 = NULL;
max_response_size = 6; /* 6 means we can fit 3 ranges*/
hypre_DataExchangeList(ex_num_contacts, ex_contact_procs,
ex_contact_buf, ex_contact_vec_starts, sizeof(HYPRE_BigInt),
sizeof(HYPRE_BigInt), &response_obj1, max_response_size, 1,
comm, (void**) &response_buf, &response_buf_starts);
/* now response_buf contains a proc_id followed by a range upper bound */
hypre_TFree(ex_contact_procs, HYPRE_MEMORY_HOST);
hypre_TFree(ex_contact_buf, HYPRE_MEMORY_HOST);
hypre_TFree(ex_contact_vec_starts, HYPRE_MEMORY_HOST);
hypre_TFree(a_proc_id, HYPRE_MEMORY_HOST);
/*how many ranges were returned?*/
num_ranges = response_buf_starts[ex_num_contacts];
num_ranges = num_ranges / 2;
prev_id = -1;
j = 0;
counter = 0;
num_real_procs = 0;
/* loop through ranges - create a list of actual processor ids*/
for (i = 0; i < num_ranges; i++)
{
upper_bound = response_buf[i * 2 + 1];
counter = 0;
tmp_id = response_buf[i * 2];
/* loop through row_list entries - counting how many are in the range */
while (j < num_rows && row_list[j] <= upper_bound)
{
real_proc_id[j] = tmp_id;
j++;
counter++;
}
if (counter > 0 && tmp_id != prev_id)
{
num_real_procs++;
}
prev_id = tmp_id;
}
/* now we have the list of real processor ids (real_proc_id) - and the number
of distinct ones - so now we can set up data to be sent - we have
HYPRE_Int data and HYPRE_Complex data. that we will need to pack
together */
/* first find out how many rows and elements we need to send per proc - so we
can do storage */
ex_contact_procs = hypre_CTAlloc(HYPRE_Int, num_real_procs, HYPRE_MEMORY_HOST);
num_rows_per_proc = hypre_CTAlloc(HYPRE_Int, num_real_procs, HYPRE_MEMORY_HOST);
num_elements_total = hypre_CTAlloc(HYPRE_Int, num_real_procs, HYPRE_MEMORY_HOST);
counter = 0;
if (num_real_procs > 0 )
{
ex_contact_procs[0] = real_proc_id[0];
num_rows_per_proc[0] = 1;
num_elements_total[0] = row_list_num_elements[orig_order[0]];
/* loop through real procs - these are sorted (row_list is sorted also)*/
for (i = 1; i < num_rows; i++)
{
if (real_proc_id[i] == ex_contact_procs[counter]) /* same processor */
{
num_rows_per_proc[counter] += 1; /*another row */
num_elements_total[counter] += row_list_num_elements[orig_order[i]];
}
else /* new processor */
{
counter++;
ex_contact_procs[counter] = real_proc_id[i];
num_rows_per_proc[counter] = 1;
num_elements_total[counter] = row_list_num_elements[orig_order[i]];
}
}
}
/* to pack together, we need to use the largest obj. size of
(HYPRE_Int) and (HYPRE_Complex) - if these are much different, then we are
wasting some storage, but I do not think that it will be a
large amount since this function should not be used on really
large amounts of data anyway*/
big_int_size = sizeof(HYPRE_BigInt);
complex_size = sizeof(HYPRE_Complex);
obj_size_bytes = hypre_max(big_int_size, complex_size);
/* set up data to be sent to send procs */
/* for each proc, ex_contact_buf contains #rows, row #,
no. elements, col indicies, col data, row #, no. elements, col
indicies, col data, etc. */
/* first calculate total storage and make vec_starts arrays */
storage = 0;
ex_contact_vec_starts = hypre_CTAlloc(HYPRE_Int, num_real_procs + 1, HYPRE_MEMORY_HOST);
ex_contact_vec_starts[0] = -1;
for (i = 0; i < num_real_procs; i++)
{
storage += 1 + 2 * num_rows_per_proc[i] + 2 * num_elements_total[i];
ex_contact_vec_starts[i + 1] = -storage - 1; /* need negative for next loop */
}
hypre_TFree(num_elements_total, HYPRE_MEMORY_HOST);
/*void_contact_buf = hypre_MAlloc(storage*obj_size_bytes);*/
void_contact_buf = hypre_CTAlloc(char, storage * obj_size_bytes, HYPRE_MEMORY_HOST);
index_ptr = void_contact_buf; /* step through with this index */
/* for each proc: #rows, row #, no. elements,
col indicies, col data, row #, no. elements, col indicies, col data, etc. */
/* un-sort real_proc_id - we want to access data arrays in order, so
cheaper to do this*/
us_real_proc_id = hypre_CTAlloc(HYPRE_Int, num_rows, HYPRE_MEMORY_HOST);
for (i = 0; i < num_rows; i++)
{
us_real_proc_id[orig_order[i]] = real_proc_id[i];
}
hypre_TFree(real_proc_id, HYPRE_MEMORY_HOST);
counter = 0; /* index into data arrays */
prev_id = -1;
for (i = 0; i < num_rows; i++)
{
proc_id = us_real_proc_id[i];
/* can't use row list[i] - you loose the negative signs that differentiate
add/set values */
row = off_proc_i[i * 2];
num_elements = row_list_num_elements[i];
/* find position of this processor */
indx = hypre_BinarySearch(ex_contact_procs, proc_id, num_real_procs);
in_i = ex_contact_vec_starts[indx];
index_ptr = (void *) ((char *) void_contact_buf + in_i * obj_size_bytes);
/* first time for this processor - add the number of rows to the buffer */
if (in_i < 0)
{
in_i = -in_i - 1;
/* re-calc. index_ptr since in_i was negative */
index_ptr = (void *) ((char *) void_contact_buf + in_i * obj_size_bytes);
tmp_int = num_rows_per_proc[indx];
hypre_TMemcpy( index_ptr, &tmp_int, HYPRE_Int, 1, HYPRE_MEMORY_HOST, HYPRE_MEMORY_HOST);
index_ptr = (void *) ((char *) index_ptr + obj_size_bytes);
in_i++;
}
/* add row # */
hypre_TMemcpy( index_ptr, &row, HYPRE_BigInt, 1, HYPRE_MEMORY_HOST, HYPRE_MEMORY_HOST);
index_ptr = (void *) ((char *) index_ptr + obj_size_bytes);
in_i++;
/* add number of elements */
hypre_TMemcpy( index_ptr, &num_elements, HYPRE_Int, 1, HYPRE_MEMORY_HOST, HYPRE_MEMORY_HOST);
index_ptr = (void *) ((char *) index_ptr + obj_size_bytes);
in_i++;
/* now add col indices */
for (j = 0; j < num_elements; j++)
{
tmp_big_int = off_proc_j[counter + j]; /* col number */
hypre_TMemcpy( index_ptr, &tmp_big_int, HYPRE_BigInt, 1, HYPRE_MEMORY_HOST, HYPRE_MEMORY_HOST);
index_ptr = (void *) ((char *) index_ptr + obj_size_bytes);
in_i ++;
}
/* now add data */
for (j = 0; j < num_elements; j++)
{
tmp_complex = off_proc_data[counter++]; /* value */
hypre_TMemcpy( index_ptr, &tmp_complex, HYPRE_Complex, 1, HYPRE_MEMORY_HOST, HYPRE_MEMORY_HOST);
index_ptr = (void *) ((char *) index_ptr + obj_size_bytes);
in_i++;
}
/* increment the indexes to keep track of where we are - we
* adjust below to be actual starts*/
ex_contact_vec_starts[indx] = in_i;
}
/* some clean up */
hypre_TFree(response_buf, HYPRE_MEMORY_HOST);
hypre_TFree(response_buf_starts, HYPRE_MEMORY_HOST);
hypre_TFree(us_real_proc_id, HYPRE_MEMORY_HOST);
hypre_TFree(orig_order, HYPRE_MEMORY_HOST);
hypre_TFree(row_list, HYPRE_MEMORY_HOST);
hypre_TFree(row_list_num_elements, HYPRE_MEMORY_HOST);
hypre_TFree(num_rows_per_proc, HYPRE_MEMORY_HOST);
for (i = num_real_procs; i > 0; i--)
{
ex_contact_vec_starts[i] = ex_contact_vec_starts[i - 1];
}
ex_contact_vec_starts[0] = 0;
/* now send the data */
/***********************************/
/* first get the integer info in send_proc_obj */
/* the response we expect is just a confirmation*/
response_buf = NULL;
response_buf_starts = NULL;
/*build the response object*/
/* use the send_proc_obj for the info kept from contacts */
/*estimate inital storage allocation */
send_proc_obj.length = 0;
send_proc_obj.storage_length = num_real_procs + 5;
send_proc_obj.id =
hypre_CTAlloc(HYPRE_Int, send_proc_obj.storage_length + 1, HYPRE_MEMORY_HOST);
send_proc_obj.vec_starts =
hypre_CTAlloc(HYPRE_Int, send_proc_obj.storage_length + 1, HYPRE_MEMORY_HOST);
send_proc_obj.vec_starts[0] = 0;
send_proc_obj.element_storage_length = storage + 20;
send_proc_obj.v_elements =
hypre_TAlloc(char, obj_size_bytes * send_proc_obj.element_storage_length, HYPRE_MEMORY_HOST);
response_obj2.fill_response = hypre_FillResponseIJOffProcVals;
response_obj2.data1 = NULL;
response_obj2.data2 = &send_proc_obj;
max_response_size = 0;
hypre_DataExchangeList(num_real_procs, ex_contact_procs,
void_contact_buf, ex_contact_vec_starts, obj_size_bytes,
0, &response_obj2, max_response_size, 2,
comm, (void **) &response_buf, &response_buf_starts);
hypre_TFree(response_buf, HYPRE_MEMORY_HOST);
hypre_TFree(response_buf_starts, HYPRE_MEMORY_HOST);
hypre_TFree(ex_contact_procs, HYPRE_MEMORY_HOST);
hypre_TFree(void_contact_buf, HYPRE_MEMORY_HOST);
hypre_TFree(ex_contact_vec_starts, HYPRE_MEMORY_HOST);
/* Now we can unpack the send_proc_objects and call set
and add to values functions. We unpack messages in a
deterministic order, using processor rank */
num_recvs = send_proc_obj.length;
argsort_contact_procs = hypre_CTAlloc(HYPRE_Int, num_recvs, HYPRE_MEMORY_HOST);
for (i = 0; i < num_recvs; i++)
{
argsort_contact_procs[i] = i;
}
/* This sort's the id array, but the original indices are stored in
* argsort_contact_procs */
hypre_qsort2i( send_proc_obj.id, argsort_contact_procs, 0, num_recvs - 1 );
/* alias */
recv_data_ptr = send_proc_obj.v_elements;
recv_starts = send_proc_obj.vec_starts;
for (i = 0; i < num_recvs; i++)
{
/* Find the current processor in order, and reset recv_data_ptr to that processor's message */
original_proc_indx = argsort_contact_procs[i];
/*current_proc = send_proc_obj.id[i];*/
indx = recv_starts[original_proc_indx];
recv_data_ptr = (void *) ((char *) send_proc_obj.v_elements + indx * obj_size_bytes);
/* get the number of rows for this recv */
hypre_TMemcpy( &num_rows, recv_data_ptr, HYPRE_Int, 1, HYPRE_MEMORY_HOST, HYPRE_MEMORY_HOST);
recv_data_ptr = (void *) ((char *)recv_data_ptr + obj_size_bytes);
indx++;
for (j = 0; j < num_rows; j++) /* for each row: unpack info */
{
/* row # */
hypre_TMemcpy( &row, recv_data_ptr, HYPRE_BigInt, 1, HYPRE_MEMORY_HOST, HYPRE_MEMORY_HOST);
recv_data_ptr = (void *) ((char *)recv_data_ptr + obj_size_bytes);
indx++;
/* num elements for this row */
hypre_TMemcpy( &num_elements, recv_data_ptr, HYPRE_Int, 1, HYPRE_MEMORY_HOST, HYPRE_MEMORY_HOST);
recv_data_ptr = (void *) ((char *)recv_data_ptr + obj_size_bytes);
indx++;
/* col indices */ /* Need to check this again !!!! */
if (big_int_size == obj_size_bytes)
{
col_ptr = (HYPRE_BigInt *) recv_data_ptr;
recv_data_ptr = (void *) ((char *)recv_data_ptr + num_elements * obj_size_bytes);
}
else /* copy data */
{
if (big_int_data_size < num_elements)
{
big_int_data = hypre_TReAlloc(big_int_data, HYPRE_BigInt, num_elements + 10, HYPRE_MEMORY_HOST);
}
for (k = 0; k < num_elements; k++)
{
hypre_TMemcpy( &big_int_data[k], recv_data_ptr, HYPRE_BigInt, 1, HYPRE_MEMORY_HOST,
HYPRE_MEMORY_HOST);
recv_data_ptr = (void *) ((char *)recv_data_ptr + obj_size_bytes);
}
col_ptr = big_int_data;
}
/* col data */
if (complex_size == obj_size_bytes)
{
col_data_ptr = (HYPRE_Complex *) recv_data_ptr;
recv_data_ptr = (void *) ((char *)recv_data_ptr + num_elements * obj_size_bytes);
}
else /* copy data */
{
if (complex_data_size < num_elements)
{
complex_data =
hypre_TReAlloc(complex_data, HYPRE_Complex, num_elements + 10, HYPRE_MEMORY_HOST);
}
for (k = 0; k < num_elements; k++)
{
hypre_TMemcpy( &complex_data[k], recv_data_ptr, HYPRE_Complex, 1, HYPRE_MEMORY_HOST,
HYPRE_MEMORY_HOST);
recv_data_ptr = (void *) ((char *)recv_data_ptr + obj_size_bytes);
}
col_data_ptr = complex_data;
}
if (memory_location == HYPRE_MEMORY_HOST)
{
hypre_IJMatrixAddToValuesParCSR(matrix, 1, &num_elements, &row, &row_index, col_ptr, col_data_ptr);
}
else
{
HYPRE_Int nelm_new = off_proc_nelm_recv_cur + num_elements;
if (nelm_new > off_proc_nelm_recv_max)
{
off_proc_nelm_recv_max = nelm_new * 2;
off_proc_i_recv = hypre_TReAlloc(off_proc_i_recv, HYPRE_BigInt, off_proc_nelm_recv_max,
HYPRE_MEMORY_HOST);
off_proc_j_recv = hypre_TReAlloc(off_proc_j_recv, HYPRE_BigInt, off_proc_nelm_recv_max,
HYPRE_MEMORY_HOST);
off_proc_data_recv = hypre_TReAlloc(off_proc_data_recv, HYPRE_Complex, off_proc_nelm_recv_max,
HYPRE_MEMORY_HOST);
}
HYPRE_Int i;
for (i = 0; i < num_elements; i++)
{
off_proc_i_recv[off_proc_nelm_recv_cur + i] = row;
}
hypre_TMemcpy(off_proc_j_recv + off_proc_nelm_recv_cur, col_ptr, HYPRE_BigInt, num_elements,
HYPRE_MEMORY_HOST, HYPRE_MEMORY_HOST);
hypre_TMemcpy(off_proc_data_recv + off_proc_nelm_recv_cur, col_data_ptr, HYPRE_Complex,
num_elements,
HYPRE_MEMORY_HOST, HYPRE_MEMORY_HOST);
off_proc_nelm_recv_cur = nelm_new;
}
indx += (num_elements * 2);
}
}
if (memory_location == HYPRE_MEMORY_DEVICE)
{
off_proc_i_recv_d = hypre_TAlloc(HYPRE_BigInt, off_proc_nelm_recv_cur, HYPRE_MEMORY_DEVICE);
off_proc_j_recv_d = hypre_TAlloc(HYPRE_BigInt, off_proc_nelm_recv_cur, HYPRE_MEMORY_DEVICE);
off_proc_data_recv_d = hypre_TAlloc(HYPRE_Complex, off_proc_nelm_recv_cur, HYPRE_MEMORY_DEVICE);
hypre_TMemcpy(off_proc_i_recv_d, off_proc_i_recv, HYPRE_BigInt, off_proc_nelm_recv_cur,
HYPRE_MEMORY_DEVICE, HYPRE_MEMORY_HOST);
hypre_TMemcpy(off_proc_j_recv_d, off_proc_j_recv, HYPRE_BigInt, off_proc_nelm_recv_cur,
HYPRE_MEMORY_DEVICE, HYPRE_MEMORY_HOST);
hypre_TMemcpy(off_proc_data_recv_d, off_proc_data_recv, HYPRE_Complex, off_proc_nelm_recv_cur,
HYPRE_MEMORY_DEVICE, HYPRE_MEMORY_HOST);
#if defined(HYPRE_USING_CUDA) || defined(HYPRE_USING_HIP)
hypre_IJMatrixSetAddValuesParCSRDevice(matrix, off_proc_nelm_recv_cur, NULL, off_proc_i_recv_d,
NULL, off_proc_j_recv_d,
off_proc_data_recv_d, "add");
#endif
}
hypre_TFree(send_proc_obj.v_elements, HYPRE_MEMORY_HOST);
hypre_TFree(send_proc_obj.vec_starts, HYPRE_MEMORY_HOST);
hypre_TFree(send_proc_obj.id, HYPRE_MEMORY_HOST);
hypre_TFree(argsort_contact_procs, HYPRE_MEMORY_HOST);
if (big_int_data)
{
hypre_TFree(big_int_data, HYPRE_MEMORY_HOST);
}
if (complex_data)
{
hypre_TFree(complex_data, HYPRE_MEMORY_HOST);
}
if (memory_location == HYPRE_MEMORY_DEVICE)
{
hypre_TFree(off_proc_i, HYPRE_MEMORY_HOST);
hypre_TFree(off_proc_j, HYPRE_MEMORY_HOST);
hypre_TFree(off_proc_data, HYPRE_MEMORY_HOST);
}
hypre_TFree(off_proc_i_recv, HYPRE_MEMORY_HOST);
hypre_TFree(off_proc_j_recv, HYPRE_MEMORY_HOST);
hypre_TFree(off_proc_data_recv, HYPRE_MEMORY_HOST);
hypre_TFree(off_proc_i_recv_d, HYPRE_MEMORY_DEVICE);
hypre_TFree(off_proc_j_recv_d, HYPRE_MEMORY_DEVICE);
hypre_TFree(off_proc_data_recv_d, HYPRE_MEMORY_DEVICE);
return hypre_error_flag;
}
/*--------------------------------------------------------------------
* hypre_FillResponseIJOffProcVals
* Fill response function for the previous function (2nd data exchange)
*--------------------------------------------------------------------*/
HYPRE_Int
hypre_FillResponseIJOffProcVals(void *p_recv_contact_buf,
HYPRE_Int contact_size,
HYPRE_Int contact_proc,
void *ro,
MPI_Comm comm,
void **p_send_response_buf,
HYPRE_Int *response_message_size )
{
HYPRE_Int myid;
HYPRE_Int index, count, elength;
HYPRE_Int object_size;
void *index_ptr;
hypre_DataExchangeResponse *response_obj = (hypre_DataExchangeResponse*) ro;
hypre_ProcListElements *send_proc_obj = (hypre_ProcListElements*) response_obj->data2;
object_size = hypre_max(sizeof(HYPRE_BigInt), sizeof(HYPRE_Complex));
hypre_MPI_Comm_rank(comm, &myid );
/*check to see if we need to allocate more space in send_proc_obj for vec starts
* and id */
if (send_proc_obj->length == send_proc_obj->storage_length)
{
send_proc_obj->storage_length += 20; /*add space for 20 more contact*/
send_proc_obj->vec_starts = hypre_TReAlloc(send_proc_obj->vec_starts, HYPRE_Int,
send_proc_obj->storage_length + 1, HYPRE_MEMORY_HOST);
if ( send_proc_obj->id != NULL)
{
send_proc_obj->id = hypre_TReAlloc(send_proc_obj->id, HYPRE_Int,
send_proc_obj->storage_length + 1, HYPRE_MEMORY_HOST);
}
}
/*initialize*/
count = send_proc_obj->length;
index = send_proc_obj->vec_starts[count]; /* current number of elements */
if ( send_proc_obj->id != NULL)
{
send_proc_obj->id[count] = contact_proc;
}
/*do we need more storage for the elements?*/
if (send_proc_obj->element_storage_length < index + contact_size)
{
elength = hypre_max(contact_size, 100);
elength += index;
send_proc_obj->v_elements = hypre_TReAlloc((char*)send_proc_obj->v_elements,
char, elength * object_size, HYPRE_MEMORY_HOST);
send_proc_obj->element_storage_length = elength;
}
/*populate send_proc_obj*/
index_ptr = (void *) ((char *) send_proc_obj->v_elements + index * object_size);
hypre_TMemcpy(index_ptr, p_recv_contact_buf, char, object_size * contact_size, HYPRE_MEMORY_HOST,
HYPRE_MEMORY_HOST);
send_proc_obj->vec_starts[count + 1] = index + contact_size;
send_proc_obj->length++;
/* output - no message to return (confirmation) */
*response_message_size = 0;
return hypre_error_flag;
}
/*--------------------------------------------------------------------*/
HYPRE_Int hypre_FindProc(HYPRE_BigInt *list, HYPRE_BigInt value, HYPRE_Int list_length)
{
HYPRE_Int low, high, m;
low = 0;
high = list_length;
if (value >= list[high] || value < list[low])
{
return -1;
}
else
{
while (low + 1 < high)
{
m = (low + high) / 2;
if (value < list[m])
{
high = m;
}
else if (value >= list[m])
{
low = m;
}
}
return low;
}
}
/******************************************************************************
*
* hypre_IJMatrixAssembleParCSR
*
* assembles IJMatrix from AuxParCSRMatrix auxiliary structure
*****************************************************************************/
HYPRE_Int
hypre_IJMatrixAssembleParCSR(hypre_IJMatrix *matrix)
{
MPI_Comm comm = hypre_IJMatrixComm(matrix);
hypre_ParCSRMatrix *par_matrix = (hypre_ParCSRMatrix*) hypre_IJMatrixObject(matrix);
hypre_AuxParCSRMatrix *aux_matrix = (hypre_AuxParCSRMatrix*) hypre_IJMatrixTranslator(matrix);
HYPRE_BigInt *row_partitioning = hypre_IJMatrixRowPartitioning(matrix);
HYPRE_BigInt *col_partitioning = hypre_IJMatrixColPartitioning(matrix);
hypre_CSRMatrix *diag = hypre_ParCSRMatrixDiag(par_matrix);
hypre_CSRMatrix *offd = hypre_ParCSRMatrixOffd(par_matrix);
HYPRE_Int *diag_i = hypre_CSRMatrixI(diag);
HYPRE_Int *offd_i = hypre_CSRMatrixI(offd);
HYPRE_Int *diag_j;
HYPRE_Int *offd_j = NULL;
HYPRE_Complex *diag_data;
HYPRE_Complex *offd_data = NULL;
HYPRE_Int i, j, j0;
HYPRE_Int num_cols_offd;
HYPRE_Int *diag_pos;
HYPRE_BigInt *col_map_offd;
HYPRE_Int *rownnz;
HYPRE_Int *row_length;
HYPRE_BigInt **aux_j;
HYPRE_Complex **aux_data;
HYPRE_Int my_id, num_procs;
HYPRE_Int num_rows;
HYPRE_Int num_rownnz;
HYPRE_Int i_diag, i_offd;
HYPRE_BigInt col_0, col_n;
HYPRE_Int nnz_offd;
HYPRE_BigInt *big_offd_j;
HYPRE_BigInt *tmp_j;
HYPRE_Complex temp;
HYPRE_BigInt base = hypre_IJMatrixGlobalFirstCol(matrix);
HYPRE_Int off_proc_i_indx;
HYPRE_Int max_off_proc_elmts;
HYPRE_Int current_num_elmts;
HYPRE_BigInt *off_proc_i;
HYPRE_BigInt *off_proc_j;
HYPRE_Complex *off_proc_data;
HYPRE_Int offd_proc_elmts;
//HYPRE_Int new_off_proc_i_indx;
//HYPRE_Int cancel_indx;
//HYPRE_Int col_indx;
//HYPRE_Int current_indx;
//HYPRE_Int current_i;
//HYPRE_Int row_len;
HYPRE_Int max_num_threads;
HYPRE_Int aux_flag, aux_flag_global;
HYPRE_ANNOTATE_FUNC_BEGIN;
max_num_threads = hypre_NumThreads();
/* first find out if anyone has an aux_matrix, and create one if you don't
* have one, but other procs do */
aux_flag = 0;
aux_flag_global = 0;
if (aux_matrix)
{
aux_flag = 1;
}
hypre_MPI_Allreduce(&aux_flag, &aux_flag_global, 1, HYPRE_MPI_INT, hypre_MPI_SUM, comm);
if (aux_flag_global && (!aux_flag))
{
hypre_MPI_Comm_rank(comm, &my_id);
num_rows = (HYPRE_Int)(row_partitioning[1] - row_partitioning[0]);
hypre_AuxParCSRMatrixCreate(&aux_matrix, num_rows, num_rows, NULL);
hypre_AuxParCSRMatrixNeedAux(aux_matrix) = 0;
hypre_IJMatrixTranslator(matrix) = aux_matrix;
}
if (aux_matrix)
{
/* first delete all cancelled elements */
/*cancel_indx = hypre_AuxParCSRMatrixCancelIndx(aux_matrix);
if (cancel_indx)
{
current_num_elmts=hypre_AuxParCSRMatrixCurrentOffProcElmts(aux_matrix);
off_proc_i=hypre_AuxParCSRMatrixOffProcI(aux_matrix);
off_proc_j=hypre_AuxParCSRMatrixOffProcJ(aux_matrix);
off_proc_data=hypre_AuxParCSRMatrixOffProcData(aux_matrix);
off_proc_i_indx = hypre_AuxParCSRMatrixOffProcIIndx(aux_matrix);
col_indx = 0;
current_i = 0;
current_indx = 0;
new_off_proc_i_indx = off_proc_i_indx;
for (i=0; i < off_proc_i_indx; i= i+2)
{
row_len = off_proc_i[i+1];
for (j=0; j < off_proc_i[i+1]; j++)
{
if (off_proc_j[col_indx] == -1)
{
col_indx++;
row_len--;
current_num_elmts--;
}
else
{
off_proc_j[current_indx] = off_proc_j[col_indx];
off_proc_data[current_indx++] = off_proc_data[col_indx++];
}
}
if (row_len)
{
off_proc_i[current_i] = off_proc_i[i];
off_proc_i[current_i+1] = row_len;
current_i += 2;
}
else
{
new_off_proc_i_indx -= 2;
}
}
hypre_AuxParCSRMatrixOffProcIIndx(aux_matrix) = new_off_proc_i_indx;
hypre_AuxParCSRMatrixCurrentOffProcElmts(aux_matrix) = current_num_elmts;
}*/
off_proc_i_indx = hypre_AuxParCSRMatrixOffProcIIndx(aux_matrix);
hypre_MPI_Allreduce(&off_proc_i_indx, &offd_proc_elmts, 1, HYPRE_MPI_INT,
hypre_MPI_SUM, comm);
if (offd_proc_elmts)
{
max_off_proc_elmts = hypre_AuxParCSRMatrixMaxOffProcElmts(aux_matrix);
current_num_elmts = hypre_AuxParCSRMatrixCurrentOffProcElmts(aux_matrix);
off_proc_i = hypre_AuxParCSRMatrixOffProcI(aux_matrix);
off_proc_j = hypre_AuxParCSRMatrixOffProcJ(aux_matrix);
off_proc_data = hypre_AuxParCSRMatrixOffProcData(aux_matrix);
hypre_IJMatrixAssembleOffProcValsParCSR(
matrix, off_proc_i_indx, max_off_proc_elmts, current_num_elmts,
HYPRE_MEMORY_HOST,
off_proc_i, off_proc_j, off_proc_data);
}
}
if (hypre_IJMatrixAssembleFlag(matrix) == 0)
{
hypre_MPI_Comm_size(comm, &num_procs);
hypre_MPI_Comm_rank(comm, &my_id);
num_rows = (HYPRE_Int)(row_partitioning[1] - row_partitioning[0]);
col_0 = col_partitioning[0];
col_n = col_partitioning[1] - 1;
/* move data into ParCSRMatrix if not there already */
if (hypre_AuxParCSRMatrixNeedAux(aux_matrix))
{
HYPRE_Int *diag_array;
HYPRE_Int *offd_array;
/* Update nonzero rows of aux_matrix */
hypre_AuxParCSRMatrixSetRownnz(aux_matrix);
aux_j = hypre_AuxParCSRMatrixAuxJ(aux_matrix);
aux_data = hypre_AuxParCSRMatrixAuxData(aux_matrix);
row_length = hypre_AuxParCSRMatrixRowLength(aux_matrix);
num_rownnz = hypre_AuxParCSRMatrixLocalNumRownnz(aux_matrix);
rownnz = hypre_AuxParCSRMatrixRownnz(aux_matrix);
diag_array = hypre_CTAlloc(HYPRE_Int, max_num_threads, HYPRE_MEMORY_HOST);
offd_array = hypre_CTAlloc(HYPRE_Int, max_num_threads, HYPRE_MEMORY_HOST);
diag_pos = hypre_TAlloc(HYPRE_Int, num_rownnz, HYPRE_MEMORY_HOST);
i_diag = i_offd = 0;
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel private(i, j, i_diag, i_offd)
#endif
{
HYPRE_BigInt *local_j;
HYPRE_Complex *local_data;
HYPRE_Int ii, rest, size, ns, ne;
HYPRE_Int num_threads, my_thread_num;
num_threads = hypre_NumActiveThreads();
my_thread_num = hypre_GetThreadNum();
size = num_rownnz / num_threads;
rest = num_rownnz - size * num_threads;
if (my_thread_num < rest)
{
ns = my_thread_num * (size + 1);
ne = (my_thread_num + 1) * (size + 1);
}
else
{
ns = my_thread_num * size + rest;
ne = (my_thread_num + 1) * size + rest;
}
i_diag = i_offd = 0;
for (i = ns; i < ne; i++)
{
ii = rownnz ? rownnz[i] : i;
local_j = aux_j[ii];
local_data = aux_data[ii];
diag_pos[i] = -1;
for (j = 0; j < row_length[ii]; j++)
{
if (local_j[j] < col_0 || local_j[j] > col_n)
{
i_offd++;
}
else
{
i_diag++;
if ((HYPRE_Int)(local_j[j] - col_0) == i)
{
diag_pos[i] = j;
}
}
}
}
diag_array[my_thread_num] = i_diag;
offd_array[my_thread_num] = i_offd;
#ifdef HYPRE_USING_OPENMP
#pragma omp barrier
#endif
if (my_thread_num == 0)
{
i_diag = 0;
i_offd = 0;
for (i = 0; i < num_threads; i++)
{
i_diag += diag_array[i];
i_offd += offd_array[i];
diag_array[i] = i_diag;
offd_array[i] = i_offd;
}
diag_i[num_rows] = i_diag;
offd_i[num_rows] = i_offd;
hypre_TFree(hypre_CSRMatrixJ(diag), hypre_CSRMatrixMemoryLocation(diag));
hypre_TFree(hypre_CSRMatrixData(diag), hypre_CSRMatrixMemoryLocation(diag));
hypre_TFree(hypre_CSRMatrixJ(offd), hypre_CSRMatrixMemoryLocation(offd));
hypre_TFree(hypre_CSRMatrixData(offd), hypre_CSRMatrixMemoryLocation(offd));
hypre_TFree(hypre_CSRMatrixBigJ(offd), hypre_CSRMatrixMemoryLocation(offd));
diag_j = hypre_CTAlloc(HYPRE_Int, i_diag, hypre_CSRMatrixMemoryLocation(diag));
diag_data = hypre_CTAlloc(HYPRE_Complex, i_diag, hypre_CSRMatrixMemoryLocation(diag));
offd_j = hypre_CTAlloc(HYPRE_Int, i_offd, hypre_CSRMatrixMemoryLocation(offd));
offd_data = hypre_CTAlloc(HYPRE_Complex, i_offd, hypre_CSRMatrixMemoryLocation(offd));
big_offd_j = hypre_CTAlloc(HYPRE_BigInt, i_offd, hypre_CSRMatrixMemoryLocation(offd));
}
#ifdef HYPRE_USING_OPENMP
#pragma omp barrier
#endif
if (my_thread_num)
{
i_diag = diag_array[my_thread_num - 1];
i_offd = offd_array[my_thread_num - 1];
}
else
{
i_diag = 0;
i_offd = 0;
}
for (i = ns; i < ne; i++)
{
ii = rownnz ? rownnz[i] : i;
diag_i[ii] = i_diag;
offd_i[ii] = i_offd;
local_j = aux_j[ii];
local_data = aux_data[ii];
if (diag_pos[i] > -1)
{
diag_j[i_diag] = (HYPRE_Int)(local_j[diag_pos[i]] - col_0);
diag_data[i_diag++] = local_data[diag_pos[i]];
}
for (j = 0; j < row_length[ii]; j++)
{
if (local_j[j] < col_0 || local_j[j] > col_n)
{
big_offd_j[i_offd] = local_j[j];
offd_data[i_offd++] = local_data[j];
}
else if (j != diag_pos[i])
{
diag_j[i_diag] = (HYPRE_Int)(local_j[j] - col_0);
diag_data[i_diag++] = local_data[j];
}
}
}
/* Correct diag_i and offd_i */
if (rownnz != NULL)
{
#ifdef HYPRE_USING_OPENMP
#pragma omp barrier
#endif
for (i = ns; i < (ne - 1); i++)
{
for (ii = rownnz[i] + 1; ii < rownnz[i + 1]; ii++)
{
diag_i[ii] = diag_i[rownnz[i + 1]];
offd_i[ii] = offd_i[rownnz[i + 1]];
}
}
if (my_thread_num < (num_threads - 1))
{
for (ii = rownnz[ne - 1] + 1; ii < rownnz[ne]; ii++)
{
diag_i[ii] = diag_i[rownnz[ne]];
offd_i[ii] = offd_i[rownnz[ne]];
}
}
else
{
for (ii = rownnz[ne - 1] + 1; ii < num_rows; ii++)
{
diag_i[ii] = diag_i[num_rows];
offd_i[ii] = offd_i[num_rows];
}
}
}
} /* end parallel region */
hypre_TFree(diag_array, HYPRE_MEMORY_HOST);
hypre_TFree(offd_array, HYPRE_MEMORY_HOST);
hypre_CSRMatrixJ(diag) = diag_j;
hypre_CSRMatrixData(diag) = diag_data;
hypre_CSRMatrixNumNonzeros(diag) = diag_i[num_rows];
if (offd_i[num_rows] > 0)
{
hypre_CSRMatrixJ(offd) = offd_j;
hypre_CSRMatrixBigJ(offd) = big_offd_j;
hypre_CSRMatrixData(offd) = offd_data;
}
hypre_CSRMatrixNumNonzeros(offd) = offd_i[num_rows];
hypre_TFree(diag_pos, HYPRE_MEMORY_HOST);
}
else
{
/* move diagonal element into first space */
big_offd_j = hypre_CSRMatrixBigJ(offd);
diag_j = hypre_CSRMatrixJ(diag);
diag_data = hypre_CSRMatrixData(diag);
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private (i,j,j0,temp)
#endif
for (i = 0; i < num_rows; i++)
{
j0 = diag_i[i];
for (j = j0; j < diag_i[i + 1]; j++)
{
if (diag_j[j] == i)
{
temp = diag_data[j0];
diag_data[j0] = diag_data[j];
diag_data[j] = temp;
diag_j[j] = diag_j[j0];
diag_j[j0] = i;
break;
}
}
}
offd_j = hypre_CSRMatrixJ(offd);
if (!offd_j && offd_i[num_rows])
{
offd_j = hypre_CTAlloc(HYPRE_Int, offd_i[num_rows], hypre_CSRMatrixMemoryLocation(offd));
hypre_CSRMatrixJ(offd) = offd_j;
}
}
/* generate the nonzero rows inside offd and diag by calling */
hypre_CSRMatrixSetRownnz(diag);
hypre_CSRMatrixSetRownnz(offd);
/* generate col_map_offd */
nnz_offd = offd_i[num_rows];
if (nnz_offd)
{
tmp_j = hypre_CTAlloc(HYPRE_BigInt, nnz_offd, HYPRE_MEMORY_HOST);
for (i = 0; i < nnz_offd; i++)
{
tmp_j[i] = big_offd_j[i];
}
hypre_BigQsort0(tmp_j, 0, nnz_offd - 1);
num_cols_offd = 1;
for (i = 0; i < nnz_offd - 1; i++)
{
if (tmp_j[i + 1] > tmp_j[i])
{
tmp_j[num_cols_offd++] = tmp_j[i + 1];
}
}
col_map_offd = hypre_CTAlloc(HYPRE_BigInt, num_cols_offd, HYPRE_MEMORY_HOST);
for (i = 0; i < num_cols_offd; i++)
{
col_map_offd[i] = tmp_j[i];
}
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(i)
#endif
for (i = 0; i < nnz_offd; i++)
{
offd_j[i] = hypre_BigBinarySearch(col_map_offd, big_offd_j[i], num_cols_offd);
}
if (base)
{
for (i = 0; i < num_cols_offd; i++)
{
col_map_offd[i] -= base;
}
}
hypre_ParCSRMatrixColMapOffd(par_matrix) = col_map_offd;
hypre_CSRMatrixNumCols(offd) = num_cols_offd;
hypre_TFree(tmp_j, HYPRE_MEMORY_HOST);
hypre_TFree(big_offd_j, hypre_CSRMatrixMemoryLocation(offd));
hypre_CSRMatrixBigJ(offd) = NULL;
}
hypre_IJMatrixAssembleFlag(matrix) = 1;
}
hypre_AuxParCSRMatrixDestroy(aux_matrix);
hypre_IJMatrixTranslator(matrix) = NULL;
HYPRE_ANNOTATE_FUNC_END;
return hypre_error_flag;
}
/******************************************************************************
*
* IJMatrix_ParCSR interface
*
*****************************************************************************/
#include "_hypre_IJ_mv.h"
#include "../HYPRE.h"
/******************************************************************************
*
* hypre_IJMatrixSetValuesOMPParCSR
*
* sets values in an IJMatrix before assembly,
* use of this routine requires that the values in rows are different from each
* other, i.e rows[i] != rows[j] for i != j
* to ensure accurate threading
*
*****************************************************************************/
HYPRE_Int
hypre_IJMatrixSetValuesOMPParCSR( hypre_IJMatrix *matrix,
HYPRE_Int nrows,
HYPRE_Int *ncols,
const HYPRE_BigInt *rows,
const HYPRE_Int *row_indexes,
const HYPRE_BigInt *cols,
const HYPRE_Complex *values )
{
hypre_ParCSRMatrix *par_matrix;
hypre_CSRMatrix *diag, *offd;
hypre_AuxParCSRMatrix *aux_matrix;
HYPRE_BigInt *row_partitioning;
HYPRE_BigInt *col_partitioning;
MPI_Comm comm = hypre_IJMatrixComm(matrix);
HYPRE_Int num_procs, my_id;
HYPRE_BigInt col_0, col_n, first;
//HYPRE_Int cancel_indx;
HYPRE_BigInt **aux_j;
HYPRE_Complex **aux_data;
HYPRE_Int *row_length, *row_space;
HYPRE_Int need_aux;
HYPRE_Int *diag_i;
HYPRE_Int *diag_j;
HYPRE_Complex *diag_data;
HYPRE_Int *offd_i;
HYPRE_Int *offd_j;
HYPRE_BigInt *big_offd_j;
HYPRE_Complex *offd_data;
/*HYPRE_Int current_num_elmts;*/
/*HYPRE_Int max_off_proc_elmts;*/
//HYPRE_Int off_proc_i_indx;
//HYPRE_BigInt *off_proc_i;
//HYPRE_BigInt *off_proc_j;
//HYPRE_Int *offproc_cnt;
HYPRE_Int print_level = hypre_IJMatrixPrintLevel(matrix);
//HYPRE_Int max_num_threads;
HYPRE_Int error_flag = 0;
/*HYPRE_Complex *off_proc_data;*/
hypre_MPI_Comm_size(comm, &num_procs);
hypre_MPI_Comm_rank(comm, &my_id);
//max_num_threads = hypre_NumThreads();
par_matrix = (hypre_ParCSRMatrix *) hypre_IJMatrixObject( matrix );
row_partitioning = hypre_IJMatrixRowPartitioning(matrix);
col_partitioning = hypre_IJMatrixColPartitioning(matrix);
//offproc_cnt = hypre_CTAlloc(HYPRE_Int, max_num_threads, HYPRE_MEMORY_HOST);
col_0 = col_partitioning[0];
col_n = col_partitioning[1] - 1;
first = hypre_IJMatrixGlobalFirstCol(matrix);
if (nrows < 0)
{
hypre_error_in_arg(2);
if (print_level)
{
hypre_printf("Error! nrows negative! HYPRE_IJMatrixSetValues\n");
}
return hypre_error_flag;
}
if (hypre_IJMatrixAssembleFlag(matrix)) /* matrix already assembled*/
{
HYPRE_BigInt *col_map_offd;
HYPRE_Int num_cols_offd;
diag = hypre_ParCSRMatrixDiag(par_matrix);
diag_i = hypre_CSRMatrixI(diag);
diag_j = hypre_CSRMatrixJ(diag);
diag_data = hypre_CSRMatrixData(diag);
offd = hypre_ParCSRMatrixOffd(par_matrix);
offd_i = hypre_CSRMatrixI(offd);
num_cols_offd = hypre_CSRMatrixNumCols(offd);
if (num_cols_offd)
{
col_map_offd = hypre_ParCSRMatrixColMapOffd(par_matrix);
offd_j = hypre_CSRMatrixJ(offd);
offd_data = hypre_CSRMatrixData(offd);
}
aux_matrix = (hypre_AuxParCSRMatrix*) hypre_IJMatrixTranslator(matrix);
/*if (aux_matrix)
{
current_num_elmts
= hypre_AuxParCSRMatrixCurrentOffProcElmts(aux_matrix);
off_proc_i_indx = hypre_AuxParCSRMatrixOffProcIIndx(aux_matrix);
off_proc_i = hypre_AuxParCSRMatrixOffProcI(aux_matrix);
off_proc_j = hypre_AuxParCSRMatrixOffProcJ(aux_matrix);
cancel_indx = hypre_AuxParCSRMatrixCancelIndx(aux_matrix);
}*/
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel
#endif
{
HYPRE_Int j_offd;
HYPRE_Int num_threads, my_thread_num;
HYPRE_Int len, rest, ns, ne;
HYPRE_Int pos_diag, pos_offd;
HYPRE_Int len_diag, len_offd;
//HYPRE_Int row_len;
HYPRE_Int row_local;
HYPRE_Int i, j, ii, n;
HYPRE_BigInt row;
HYPRE_Int not_found, size, indx;
num_threads = hypre_NumActiveThreads();
my_thread_num = hypre_GetThreadNum();
len = nrows / num_threads;
rest = nrows - len * num_threads;
if (my_thread_num < rest)
{
ns = my_thread_num * (len + 1);
ne = (my_thread_num + 1) * (len + 1);
}
else
{
ns = my_thread_num * len + rest;
ne = (my_thread_num + 1) * len + rest;
}
for (ii = ns; ii < ne; ii++)
{
row = rows[ii];
n = ncols ? ncols[ii] : 1;
if (n == 0) /* empty row */
{
continue;
}
indx = row_indexes[ii];
/* processor owns the row */
if (row >= row_partitioning[0] && row < row_partitioning[1])
{
row_local = (HYPRE_Int)(row - row_partitioning[0]);
/* compute local row number */
size = diag_i[row_local + 1] - diag_i[row_local]
+ offd_i[row_local + 1] - offd_i[row_local];
if (n > size)
{
hypre_error(HYPRE_ERROR_GENERIC);
#ifdef HYPRE_USING_OPENMP
#pragma omp atomic
#endif
error_flag++;
if (print_level)
{
hypre_printf (" row %b too long! \n", row);
}
break;
/*return hypre_error_flag; */
}
pos_diag = diag_i[row_local];
pos_offd = offd_i[row_local];
len_diag = diag_i[row_local + 1];
len_offd = offd_i[row_local + 1];
not_found = 1;
for (i = 0; i < n; i++)
{
if (cols[indx] < col_0 || cols[indx] > col_n)
/* insert into offd */
{
j_offd = hypre_BigBinarySearch(col_map_offd, cols[indx] - first,
num_cols_offd);
if (j_offd == -1)
{
hypre_error(HYPRE_ERROR_GENERIC);
#ifdef HYPRE_USING_OPENMP
#pragma omp atomic
#endif
error_flag++;
if (print_level)
{
hypre_printf (" Error, element %b %b does not exist\n",
row, cols[indx]);
}
break;
/*return hypre_error_flag; */
}
for (j = pos_offd; j < len_offd; j++)
{
if (offd_j[j] == j_offd)
{
offd_data[j] = values[indx];
not_found = 0;
break;
}
}
if (not_found)
{
hypre_error(HYPRE_ERROR_GENERIC);
#ifdef HYPRE_USING_OPENMP
#pragma omp atomic
#endif
error_flag++;
if (print_level)
{
hypre_printf (" Error, element %b %b does not exist\n",
row, cols[indx]);
}
break;
/*return hypre_error_flag;*/
}
not_found = 1;
}
/* diagonal element */
else if (cols[indx] == row)
{
if (diag_j[pos_diag] != row_local)
{
hypre_error(HYPRE_ERROR_GENERIC);
#ifdef HYPRE_USING_OPENMP
#pragma omp atomic
#endif
error_flag++;
if (print_level)
{
hypre_printf (" Error, element %b %b does not exist\n",
row, cols[indx]);
}
break;
/*return hypre_error_flag; */
}
diag_data[pos_diag] = values[indx];
}
else /* insert into diag */
{
for (j = pos_diag; j < len_diag; j++)
{
if (diag_j[j] == (HYPRE_Int)(cols[indx] - col_0))
{
diag_data[j] = values[indx];
not_found = 0;
break;
}
}
if (not_found)
{
hypre_error(HYPRE_ERROR_GENERIC);
#ifdef HYPRE_USING_OPENMP
#pragma omp atomic
#endif
error_flag++;
if (print_level)
{
hypre_printf (" Error, element %b %b does not exist\n",
row, cols[indx]);
}
break;
/*return hypre_error_flag;*/
}
}
indx++;
}
}
/* processor does not own the row */
//else /*search for previous occurrences and cancel them */
/*{
if (aux_matrix)
{
col_indx = 0;
for (i=0; i < off_proc_i_indx; i=i+2)
{
row_len = off_proc_i[i+1];
if (off_proc_i[i] == row)
{
for (j=0; j < n; j++)
{
cnt1 = col_indx;
for (k=0; k < row_len; k++)
{
if (off_proc_j[cnt1] == cols[j])
{
off_proc_j[cnt1++] = -1;
offproc_cnt[my_thread_num]++; */
/*cancel_indx++;*/
/* if no repetition allowed */
/* off_proc_j[col_indx] = -1;
col_indx -= k;
break; */
/*}
else
{
cnt1++;
}
}
}
col_indx += row_len;
}
else
{
col_indx += row_len;
}
}*/
/*hypre_AuxParCSRMatrixCancelIndx(aux_matrix) = cancel_indx;*/
//}
//}
}
} /*end parallel region */
}
else /* matrix not assembled */
{
aux_matrix = (hypre_AuxParCSRMatrix*) hypre_IJMatrixTranslator(matrix);
/*if (aux_matrix)
{
current_num_elmts
= hypre_AuxParCSRMatrixCurrentOffProcElmts(aux_matrix);
off_proc_i_indx = hypre_AuxParCSRMatrixOffProcIIndx(aux_matrix);
off_proc_i = hypre_AuxParCSRMatrixOffProcI(aux_matrix);
off_proc_j = hypre_AuxParCSRMatrixOffProcJ(aux_matrix);
cancel_indx = hypre_AuxParCSRMatrixCancelIndx(aux_matrix);
}*/
row_space = hypre_AuxParCSRMatrixRowSpace(aux_matrix);
row_length = hypre_AuxParCSRMatrixRowLength(aux_matrix);
need_aux = hypre_AuxParCSRMatrixNeedAux(aux_matrix);
if (need_aux)
{
aux_j = hypre_AuxParCSRMatrixAuxJ(aux_matrix);
aux_data = hypre_AuxParCSRMatrixAuxData(aux_matrix);
}
else
{
diag = hypre_ParCSRMatrixDiag(par_matrix);
diag_i = hypre_CSRMatrixI(diag);
diag_j = hypre_CSRMatrixJ(diag);
diag_data = hypre_CSRMatrixData(diag);
offd = hypre_ParCSRMatrixOffd(par_matrix);
offd_i = hypre_CSRMatrixI(offd);
if (num_procs > 1)
{
offd_data = hypre_CSRMatrixData(offd);
big_offd_j = hypre_CSRMatrixBigJ(offd);
if (!big_offd_j)
{
big_offd_j = hypre_CTAlloc(HYPRE_BigInt, offd_i[hypre_CSRMatrixNumRows(offd)],
hypre_CSRMatrixMemoryLocation(offd));
hypre_CSRMatrixBigJ(offd) = big_offd_j;
}
}
}
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel
#endif
{
HYPRE_Int num_threads, my_thread_num;
HYPRE_Int len, rest, ns, ne;
HYPRE_BigInt *tmp_j = NULL;
HYPRE_BigInt *local_j = NULL;
HYPRE_Complex *tmp_data = NULL;
HYPRE_Complex *local_data = NULL;
HYPRE_Int tmp_indx;
//HYPRE_Int row_len;
HYPRE_Int row_local;
HYPRE_Int i, j, ii, n;
HYPRE_BigInt row;
HYPRE_Int not_found, size, indx;
HYPRE_Int old_size, space, cnt;
num_threads = hypre_NumActiveThreads();
my_thread_num = hypre_GetThreadNum();
len = nrows / num_threads;
rest = nrows - len * num_threads;
if (my_thread_num < rest)
{
ns = my_thread_num * (len + 1);
ne = (my_thread_num + 1) * (len + 1);
}
else
{
ns = my_thread_num * len + rest;
ne = (my_thread_num + 1) * len + rest;
}
for (ii = ns; ii < ne; ii++)
{
row = rows[ii];
n = ncols ? ncols[ii] : 1;
if (n == 0) /* empty row */
{
continue;
}
indx = row_indexes[ii];
/* processor owns the row */
if (row >= row_partitioning[0] && row < row_partitioning[1])
{
row_local = (HYPRE_Int)(row - row_partitioning[0]);
/* compute local row number */
if (need_aux)
{
local_j = aux_j[row_local];
local_data = aux_data[row_local];
space = row_space[row_local];
old_size = row_length[row_local];
size = space - old_size;
if (size < n)
{
size = n - size;
tmp_j = hypre_CTAlloc(HYPRE_BigInt, size, HYPRE_MEMORY_HOST);
tmp_data = hypre_CTAlloc(HYPRE_Complex, size, HYPRE_MEMORY_HOST);
}
tmp_indx = 0;
not_found = 1;
size = old_size;
for (i = 0; i < n; i++)
{
for (j = 0; j < old_size; j++)
{
if (local_j[j] == cols[indx])
{
local_data[j] = values[indx];
not_found = 0;
break;
}
}
if (not_found)
{
if (size < space)
{
local_j[size] = cols[indx];
local_data[size++] = values[indx];
}
else
{
tmp_j[tmp_indx] = cols[indx];
tmp_data[tmp_indx++] = values[indx];
}
}
not_found = 1;
indx++;
}
row_length[row_local] = size + tmp_indx;
if (tmp_indx)
{
aux_j[row_local] = hypre_TReAlloc(aux_j[row_local], HYPRE_BigInt,
size + tmp_indx, HYPRE_MEMORY_HOST);
aux_data[row_local] = hypre_TReAlloc(aux_data[row_local],
HYPRE_Complex, size + tmp_indx, HYPRE_MEMORY_HOST);
row_space[row_local] = size + tmp_indx;
local_j = aux_j[row_local];
local_data = aux_data[row_local];
}
cnt = size;
for (i = 0; i < tmp_indx; i++)
{
local_j[cnt] = tmp_j[i];
local_data[cnt++] = tmp_data[i];
}
if (tmp_j)
{
hypre_TFree(tmp_j, HYPRE_MEMORY_HOST);
hypre_TFree(tmp_data, HYPRE_MEMORY_HOST);
}
}
else /* insert immediately into data in ParCSRMatrix structure */
{
HYPRE_Int offd_indx, diag_indx;
HYPRE_Int offd_space, diag_space;
HYPRE_Int cnt_diag, cnt_offd;
offd_indx = hypre_AuxParCSRMatrixIndxOffd(aux_matrix)[row_local];
diag_indx = hypre_AuxParCSRMatrixIndxDiag(aux_matrix)[row_local];
cnt_diag = diag_indx;
cnt_offd = offd_indx;
diag_space = diag_i[row_local + 1];
offd_space = offd_i[row_local + 1];
not_found = 1;
for (i = 0; i < n; i++)
{
if (cols[indx] < col_0 || cols[indx] > col_n)
/* insert into offd */
{
for (j = offd_i[row_local]; j < offd_indx; j++)
{
if (big_offd_j[j] == cols[indx])
{
offd_data[j] = values[indx];
not_found = 0;
break;
}
}
if (not_found)
{
if (cnt_offd < offd_space)
{
big_offd_j[cnt_offd] = cols[indx];
offd_data[cnt_offd++] = values[indx];
}
else
{
hypre_error(HYPRE_ERROR_GENERIC);
#ifdef HYPRE_USING_OPENMP
#pragma omp atomic
#endif
error_flag++;
if (print_level)
{
hypre_printf("Error in row %b ! Too many elements!\n",
row);
}
break;
/*return hypre_error_flag;*/
}
}
not_found = 1;
}
else /* insert into diag */
{
for (j = diag_i[row_local]; j < diag_indx; j++)
{
if (diag_j[j] == (HYPRE_Int)(cols[indx] - col_0))
{
diag_data[j] = values[indx];
not_found = 0;
break;
}
}
if (not_found)
{
if (cnt_diag < diag_space)
{
diag_j[cnt_diag] = (HYPRE_Int)(cols[indx] - col_0);
diag_data[cnt_diag++] = values[indx];
}
else
{
hypre_error(HYPRE_ERROR_GENERIC);
#ifdef HYPRE_USING_OPENMP
#pragma omp atomic
#endif
error_flag++;
if (print_level)
{
hypre_printf("Error in row %b ! Too many elements !\n",
row);
}
break;
/*return hypre_error_flag;*/
}
}
not_found = 1;
}
indx++;
}
hypre_AuxParCSRMatrixIndxDiag(aux_matrix)[row_local] = cnt_diag;
hypre_AuxParCSRMatrixIndxOffd(aux_matrix)[row_local] = cnt_offd;
}
}
/* processor does not own the row */
/*else
{
if (aux_matrix)
{
col_indx = 0;
for (i=0; i < off_proc_i_indx; i=i+2)
{
row_len = off_proc_i[i+1];
if (off_proc_i[i] == row)
{
for (j=0; j < n; j++)
{
cnt1 = col_indx;
for (k=0; k < row_len; k++)
{
if (off_proc_j[cnt1] == cols[j])
{
off_proc_j[cnt1++] = -1; */
/*cancel_indx++;*/
//offproc_cnt[my_thread_num]++;
/* if no repetition allowed */
/* off_proc_j[col_indx] = -1;
col_indx -= k;
break; */
/* }
else
{
cnt1++;
}
}
}
col_indx += row_len;
}
else
{
col_indx += row_len;
}
}*/
/*hypre_AuxParCSRMatrixCancelIndx(aux_matrix) = cancel_indx;*/
/*}
}*/
}
} /* end parallel region */
}
/*if (error_flag)
{
return hypre_error_flag;
}
if (aux_matrix)
{
for (i1=0; i1 < max_num_threads; i1++)
{
cancel_indx += offproc_cnt[i1];
}
hypre_AuxParCSRMatrixCancelIndx(aux_matrix) = cancel_indx;
}*/
//hypre_TFree(offproc_cnt, HYPRE_MEMORY_HOST);
return hypre_error_flag;
}
/******************************************************************************
*
* hypre_IJMatrixAddToValuesOMPParCSR
*
* adds row values to an IJMatrix
*
*****************************************************************************/
HYPRE_Int
hypre_IJMatrixAddToValuesOMPParCSR( hypre_IJMatrix *matrix,
HYPRE_Int nrows,
HYPRE_Int *ncols,
const HYPRE_BigInt *rows,
const HYPRE_Int *row_indexes,
const HYPRE_BigInt *cols,
const HYPRE_Complex *values )
{
hypre_ParCSRMatrix *par_matrix;
hypre_CSRMatrix *diag, *offd;
hypre_AuxParCSRMatrix *aux_matrix;
HYPRE_BigInt *row_partitioning;
HYPRE_BigInt *col_partitioning;
MPI_Comm comm = hypre_IJMatrixComm(matrix);
HYPRE_Int num_procs, my_id;
HYPRE_BigInt col_0, col_n, first;
HYPRE_BigInt **aux_j;
HYPRE_Complex **aux_data;
HYPRE_Int *row_length, *row_space;
HYPRE_Int need_aux;
HYPRE_Int *diag_i;
HYPRE_Int *diag_j;
HYPRE_Complex *diag_data;
HYPRE_Int *offd_i;
HYPRE_Int *offd_j;
HYPRE_BigInt *big_offd_j;
HYPRE_Complex *offd_data;
HYPRE_Int current_num_elmts;
HYPRE_Int max_off_proc_elmts;
HYPRE_Int off_proc_i_indx;
HYPRE_BigInt *off_proc_i;
HYPRE_BigInt *off_proc_j;
HYPRE_Complex *off_proc_data;
HYPRE_Int **offproc_cnt;
HYPRE_Int print_level = hypre_IJMatrixPrintLevel(matrix);
HYPRE_Int max_num_threads;
HYPRE_Int error_flag = 0;
HYPRE_Int i1;
hypre_MPI_Comm_size(comm, &num_procs);
hypre_MPI_Comm_rank(comm, &my_id);
max_num_threads = hypre_NumThreads();
par_matrix = (hypre_ParCSRMatrix*) hypre_IJMatrixObject( matrix );
row_partitioning = hypre_IJMatrixRowPartitioning(matrix);
col_partitioning = hypre_IJMatrixColPartitioning(matrix);
offproc_cnt = hypre_CTAlloc(HYPRE_Int *, max_num_threads, HYPRE_MEMORY_HOST);
for (i1 = 0; i1 < max_num_threads; i1++)
{
offproc_cnt[i1] = NULL;
}
col_0 = col_partitioning[0];
col_n = col_partitioning[1] - 1;
first = hypre_IJMatrixGlobalFirstCol(matrix);
if (hypre_IJMatrixAssembleFlag(matrix)) /* matrix already assembled */
{
HYPRE_Int num_cols_offd;
HYPRE_BigInt *col_map_offd;
diag = hypre_ParCSRMatrixDiag(par_matrix);
diag_i = hypre_CSRMatrixI(diag);
diag_j = hypre_CSRMatrixJ(diag);
diag_data = hypre_CSRMatrixData(diag);
offd = hypre_ParCSRMatrixOffd(par_matrix);
offd_i = hypre_CSRMatrixI(offd);
num_cols_offd = hypre_CSRMatrixNumCols(offd);
if (num_cols_offd)
{
col_map_offd = hypre_ParCSRMatrixColMapOffd(par_matrix);
offd_j = hypre_CSRMatrixJ(offd);
offd_data = hypre_CSRMatrixData(offd);
}
aux_matrix = (hypre_AuxParCSRMatrix*) hypre_IJMatrixTranslator(matrix);
if (aux_matrix)
{
current_num_elmts
= hypre_AuxParCSRMatrixCurrentOffProcElmts(aux_matrix);
off_proc_i_indx = hypre_AuxParCSRMatrixOffProcIIndx(aux_matrix);
off_proc_i = hypre_AuxParCSRMatrixOffProcI(aux_matrix);
off_proc_j = hypre_AuxParCSRMatrixOffProcJ(aux_matrix);
}
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel
#endif
{
HYPRE_Int j_offd;
HYPRE_Int num_threads, my_thread_num;
HYPRE_Int len, rest, ns, ne;
HYPRE_Int pos_diag, pos_offd;
HYPRE_Int len_diag, len_offd;
HYPRE_Int row_local;
HYPRE_Int i, j, ii, n;
HYPRE_BigInt row;
HYPRE_Int not_found, size, indx;
HYPRE_Int *my_offproc_cnt = NULL;
num_threads = hypre_NumActiveThreads();
my_thread_num = hypre_GetThreadNum();
len = nrows / num_threads;
rest = nrows - len * num_threads;
if (my_thread_num < rest)
{
ns = my_thread_num * (len + 1);
ne = (my_thread_num + 1) * (len + 1);
}
else
{
ns = my_thread_num * len + rest;
ne = (my_thread_num + 1) * len + rest;
}
for (ii = ns; ii < ne; ii++)
{
row = rows[ii];
n = ncols ? ncols[ii] : 1;
if (n == 0) /* empty row */
{
continue;
}
indx = row_indexes[ii];
if (row >= row_partitioning[0] && row < row_partitioning[1])
{
row_local = (HYPRE_Int)(row - row_partitioning[0]);
/* compute local row number */
size = diag_i[row_local + 1] - diag_i[row_local]
+ offd_i[row_local + 1] - offd_i[row_local];
if (n > size)
{
hypre_error(HYPRE_ERROR_GENERIC);
#ifdef HYPRE_USING_OPENMP
#pragma omp atomic
#endif
error_flag++;
if (print_level)
{
hypre_printf (" row %b too long! \n", row);
}
break;
/*return hypre_error_flag; */
}
pos_diag = diag_i[row_local];
pos_offd = offd_i[row_local];
len_diag = diag_i[row_local + 1];
len_offd = offd_i[row_local + 1];
not_found = 1;
for (i = 0; i < n; i++)
{
if (cols[indx] < col_0 || cols[indx] > col_n)
/* insert into offd */
{
j_offd = hypre_BigBinarySearch(col_map_offd, cols[indx] - first,
num_cols_offd);
if (j_offd == -1)
{
hypre_error(HYPRE_ERROR_GENERIC);
#ifdef HYPRE_USING_OPENMP
#pragma omp atomic
#endif
error_flag++;
if (print_level)
{
hypre_printf (" Error, element %b %b does not exist\n",
row, cols[indx]);
}
break;
/*return hypre_error_flag;*/
}
for (j = pos_offd; j < len_offd; j++)
{
if (offd_j[j] == j_offd)
{
offd_data[j] += values[indx];
not_found = 0;
break;
}
}
if (not_found)
{
hypre_error(HYPRE_ERROR_GENERIC);
#ifdef HYPRE_USING_OPENMP
#pragma omp atomic
#endif
error_flag++;
if (print_level)
{
hypre_printf (" Error, element %b %b does not exist\n",
row, cols[indx]);
}
break;
/*return hypre_error_flag;*/
}
not_found = 1;
}
/* diagonal element */
else if (cols[indx] == row)
{
if (diag_j[pos_diag] != row_local)
{
hypre_error(HYPRE_ERROR_GENERIC);
#ifdef HYPRE_USING_OPENMP
#pragma omp atomic
#endif
error_flag++;
if (print_level)
{
hypre_printf (" Error, element %b %b does not exist\n",
row, cols[indx]);
}
break;
/*return hypre_error_flag;*/
}
diag_data[pos_diag] += values[indx];
}
else /* insert into diag */
{
for (j = pos_diag; j < len_diag; j++)
{
if (diag_j[j] == (HYPRE_Int)(cols[indx] - col_0))
{
diag_data[j] += values[indx];
not_found = 0;
break;
}
}
if (not_found)
{
hypre_error(HYPRE_ERROR_GENERIC);
#ifdef HYPRE_USING_OPENMP
#pragma omp atomic
#endif
error_flag++;
if (print_level)
{
hypre_printf (" Error, element %b %b does not exist\n",
row, cols[indx]);
}
break;
/*return hypre_error_flag;*/
}
}
indx++;
}
}
/* not my row */
/* need to find solution for threaded version!!!! */
/* could save row number and process later .... */
else
{
if (!my_offproc_cnt)
{
my_offproc_cnt = hypre_CTAlloc(HYPRE_Int, 200, HYPRE_MEMORY_HOST);
offproc_cnt[my_thread_num] = my_offproc_cnt;
my_offproc_cnt[0] = 200;
my_offproc_cnt[1] = 2;
}
i = my_offproc_cnt[1];
if (i + 2 < my_offproc_cnt[0])
{
my_offproc_cnt[i] = ii;
my_offproc_cnt[i + 1] = indx;
my_offproc_cnt[1] += 2;
}
else
{
size = my_offproc_cnt[0];
my_offproc_cnt = hypre_TReAlloc(my_offproc_cnt, HYPRE_Int, size + 200, HYPRE_MEMORY_HOST);
my_offproc_cnt[0] += 200;
my_offproc_cnt[i] = ii;
my_offproc_cnt[i + 1] = indx;
my_offproc_cnt[1] += 2;
}
}
}
} /* end parallel region */
}
/* not assembled */
else
{
aux_matrix = (hypre_AuxParCSRMatrix*) hypre_IJMatrixTranslator(matrix);
if (aux_matrix)
{
current_num_elmts
= hypre_AuxParCSRMatrixCurrentOffProcElmts(aux_matrix);
off_proc_i_indx = hypre_AuxParCSRMatrixOffProcIIndx(aux_matrix);
off_proc_i = hypre_AuxParCSRMatrixOffProcI(aux_matrix);
off_proc_j = hypre_AuxParCSRMatrixOffProcJ(aux_matrix);
}
row_space = hypre_AuxParCSRMatrixRowSpace(aux_matrix);
row_length = hypre_AuxParCSRMatrixRowLength(aux_matrix);
need_aux = hypre_AuxParCSRMatrixNeedAux(aux_matrix);
if (need_aux)
{
aux_j = hypre_AuxParCSRMatrixAuxJ(aux_matrix);
aux_data = hypre_AuxParCSRMatrixAuxData(aux_matrix);
}
else
{
diag = hypre_ParCSRMatrixDiag(par_matrix);
diag_i = hypre_CSRMatrixI(diag);
diag_j = hypre_CSRMatrixJ(diag);
diag_data = hypre_CSRMatrixData(diag);
offd = hypre_ParCSRMatrixOffd(par_matrix);
offd_i = hypre_CSRMatrixI(offd);
if (num_procs > 1)
{
big_offd_j = hypre_CSRMatrixBigJ(offd);
offd_data = hypre_CSRMatrixData(offd);
if (!big_offd_j)
{
big_offd_j = hypre_CTAlloc(HYPRE_BigInt, offd_i[hypre_CSRMatrixNumRows(offd)],
hypre_CSRMatrixMemoryLocation(offd));
hypre_CSRMatrixBigJ(offd) = big_offd_j;
}
}
}
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel
#endif
{
HYPRE_Int num_threads, my_thread_num;
HYPRE_Int len, rest, ns, ne;
HYPRE_BigInt *tmp_j = NULL;
HYPRE_BigInt *local_j = NULL;
HYPRE_Complex *tmp_data = NULL;
HYPRE_Complex *local_data = NULL;
HYPRE_Int tmp_indx;
HYPRE_Int row_local;
HYPRE_BigInt row;
HYPRE_Int i, j, ii, n;
HYPRE_Int not_found, size, indx;
HYPRE_Int old_size, space, cnt;
HYPRE_Int *my_offproc_cnt = NULL;
num_threads = hypre_NumActiveThreads();
my_thread_num = hypre_GetThreadNum();
len = nrows / num_threads;
rest = nrows - len * num_threads;
if (my_thread_num < rest)
{
ns = my_thread_num * (len + 1);
ne = (my_thread_num + 1) * (len + 1);
}
else
{
ns = my_thread_num * len + rest;
ne = (my_thread_num + 1) * len + rest;
}
for (ii = ns; ii < ne; ii++)
{
row = rows[ii];
n = ncols ? ncols[ii] : 1;
if (n == 0) /* empty row */
{
continue;
}
indx = row_indexes[ii];
if (row >= row_partitioning[0] && row < row_partitioning[1])
{
row_local = (HYPRE_Int)(row - row_partitioning[0]);
/* compute local row number */
if (need_aux)
{
local_j = aux_j[row_local];
local_data = aux_data[row_local];
space = row_space[row_local];
old_size = row_length[row_local];
size = space - old_size;
if (size < n)
{
size = n - size;
tmp_j = hypre_CTAlloc(HYPRE_BigInt, size, HYPRE_MEMORY_HOST);
tmp_data = hypre_CTAlloc(HYPRE_Complex, size, HYPRE_MEMORY_HOST);
}
tmp_indx = 0;
not_found = 1;
size = old_size;
for (i = 0; i < n; i++)
{
for (j = 0; j < old_size; j++)
{
if (local_j[j] == cols[indx])
{
local_data[j] += values[indx];
not_found = 0;
break;
}
}
if (not_found)
{
if (size < space)
{
local_j[size] = cols[indx];
local_data[size++] = values[indx];
}
else
{
tmp_j[tmp_indx] = cols[indx];
tmp_data[tmp_indx++] = values[indx];
}
}
not_found = 1;
indx++;
}
row_length[row_local] = size + tmp_indx;
if (tmp_indx)
{
aux_j[row_local] = hypre_TReAlloc(aux_j[row_local], HYPRE_BigInt,
size + tmp_indx, HYPRE_MEMORY_HOST);
aux_data[row_local] = hypre_TReAlloc(aux_data[row_local],
HYPRE_Complex, size + tmp_indx, HYPRE_MEMORY_HOST);
row_space[row_local] = size + tmp_indx;
local_j = aux_j[row_local];
local_data = aux_data[row_local];
}
cnt = size;
for (i = 0; i < tmp_indx; i++)
{
local_j[cnt] = tmp_j[i];
local_data[cnt++] = tmp_data[i];
}
if (tmp_j)
{
hypre_TFree(tmp_j, HYPRE_MEMORY_HOST);
hypre_TFree(tmp_data, HYPRE_MEMORY_HOST);
}
}
else /* insert immediately into data in ParCSRMatrix structure */
{
HYPRE_Int offd_indx, diag_indx;
HYPRE_Int offd_space, diag_space;
HYPRE_Int cnt_diag, cnt_offd;
offd_indx = hypre_AuxParCSRMatrixIndxOffd(aux_matrix)[row_local];
diag_indx = hypre_AuxParCSRMatrixIndxDiag(aux_matrix)[row_local];
cnt_diag = diag_indx;
cnt_offd = offd_indx;
diag_space = diag_i[row_local + 1];
offd_space = offd_i[row_local + 1];
not_found = 1;
for (i = 0; i < n; i++)
{
if (cols[indx] < col_0 || cols[indx] > col_n)
/* insert into offd */
{
for (j = offd_i[row_local]; j < offd_indx; j++)
{
if (big_offd_j[j] == cols[indx])
{
offd_data[j] += values[indx];
not_found = 0;
break;
}
}
if (not_found)
{
if (cnt_offd < offd_space)
{
big_offd_j[cnt_offd] = cols[indx];
offd_data[cnt_offd++] = values[indx];
}
else
{
hypre_error(HYPRE_ERROR_GENERIC);
#ifdef HYPRE_USING_OPENMP
#pragma omp atomic
#endif
error_flag++;
if (print_level)
{
hypre_printf("Error in row %b ! Too many elements!\n",
row);
}
break;
/*return hypre_error_flag;*/
}
}
not_found = 1;
}
else /* insert into diag */
{
for (j = diag_i[row_local]; j < diag_indx; j++)
{
if (diag_j[j] == (HYPRE_Int)(cols[indx] - col_0))
{
diag_data[j] += values[indx];
not_found = 0;
break;
}
}
if (not_found)
{
if (cnt_diag < diag_space)
{
diag_j[cnt_diag] = (HYPRE_Int)(cols[indx] - col_0);
diag_data[cnt_diag++] = values[indx];
}
else
{
hypre_error(HYPRE_ERROR_GENERIC);
#ifdef HYPRE_USING_OPENMP
#pragma omp atomic
#endif
error_flag++;
if (print_level)
{
hypre_printf("Error in row %b ! Too many elements !\n",
row);
}
break;
/*return hypre_error_flag;*/
}
}
not_found = 1;
}
indx++;
}
hypre_AuxParCSRMatrixIndxDiag(aux_matrix)[row_local] = cnt_diag;
hypre_AuxParCSRMatrixIndxOffd(aux_matrix)[row_local] = cnt_offd;
}
}
/* not my row */
else
{
if (!my_offproc_cnt)
{
my_offproc_cnt = hypre_CTAlloc(HYPRE_Int, 200, HYPRE_MEMORY_HOST);
offproc_cnt[my_thread_num] = my_offproc_cnt;
my_offproc_cnt[0] = 200;
my_offproc_cnt[1] = 2;
}
i = my_offproc_cnt[1];
if (i + 2 < my_offproc_cnt[0])
{
my_offproc_cnt[i] = ii;
my_offproc_cnt[i + 1] = indx;
my_offproc_cnt[1] += 2;
}
else
{
size = my_offproc_cnt[0];
my_offproc_cnt = hypre_TReAlloc(my_offproc_cnt, HYPRE_Int, size + 200, HYPRE_MEMORY_HOST);
my_offproc_cnt[0] += 200;
my_offproc_cnt[i] = ii;
my_offproc_cnt[i + 1] = indx;
my_offproc_cnt[1] += 2;
}
}
}
} /*end parallel region */
}
if (error_flag)
{
return hypre_error_flag;
}
if (!aux_matrix)
{
HYPRE_Int size = (HYPRE_Int)(row_partitioning[1] - row_partitioning[0]);
hypre_AuxParCSRMatrixCreate(&aux_matrix, size, size, NULL);
hypre_AuxParCSRMatrixNeedAux(aux_matrix) = 0;
hypre_IJMatrixTranslator(matrix) = aux_matrix;
}
for (i1 = 0; i1 < max_num_threads; i1++)
{
if (offproc_cnt[i1])
{
HYPRE_Int *my_offproc_cnt = offproc_cnt[i1];
HYPRE_Int i, i2, ii, n, indx;
HYPRE_BigInt row;
for (i2 = 2; i2 < my_offproc_cnt[1]; i2 += 2)
{
ii = my_offproc_cnt[i2];
row = rows[ii];
n = ncols ? ncols[ii] : 1;
if (n == 0) /* empty row */
{
continue;
}
indx = my_offproc_cnt[i2 + 1];
current_num_elmts
= hypre_AuxParCSRMatrixCurrentOffProcElmts(aux_matrix);
max_off_proc_elmts
= hypre_AuxParCSRMatrixMaxOffProcElmts(aux_matrix);
off_proc_i_indx = hypre_AuxParCSRMatrixOffProcIIndx(aux_matrix);
off_proc_i = hypre_AuxParCSRMatrixOffProcI(aux_matrix);
off_proc_j = hypre_AuxParCSRMatrixOffProcJ(aux_matrix);
off_proc_data = hypre_AuxParCSRMatrixOffProcData(aux_matrix);
if (!max_off_proc_elmts)
{
max_off_proc_elmts = hypre_max(n, 1000);
hypre_AuxParCSRMatrixMaxOffProcElmts(aux_matrix) =
max_off_proc_elmts;
hypre_AuxParCSRMatrixOffProcI(aux_matrix)
= hypre_CTAlloc(HYPRE_BigInt, 2 * max_off_proc_elmts, HYPRE_MEMORY_HOST);
hypre_AuxParCSRMatrixOffProcJ(aux_matrix)
= hypre_CTAlloc(HYPRE_BigInt, max_off_proc_elmts, HYPRE_MEMORY_HOST);
hypre_AuxParCSRMatrixOffProcData(aux_matrix)
= hypre_CTAlloc(HYPRE_Complex, max_off_proc_elmts, HYPRE_MEMORY_HOST);
off_proc_i = hypre_AuxParCSRMatrixOffProcI(aux_matrix);
off_proc_j = hypre_AuxParCSRMatrixOffProcJ(aux_matrix);
off_proc_data = hypre_AuxParCSRMatrixOffProcData(aux_matrix);
}
else if (current_num_elmts + n > max_off_proc_elmts)
{
max_off_proc_elmts += 3 * n;
off_proc_i = hypre_TReAlloc(off_proc_i, HYPRE_BigInt, 2 * max_off_proc_elmts, HYPRE_MEMORY_HOST);
off_proc_j = hypre_TReAlloc(off_proc_j, HYPRE_BigInt, max_off_proc_elmts, HYPRE_MEMORY_HOST);
off_proc_data = hypre_TReAlloc(off_proc_data, HYPRE_Complex,
max_off_proc_elmts, HYPRE_MEMORY_HOST);
hypre_AuxParCSRMatrixMaxOffProcElmts(aux_matrix)
= max_off_proc_elmts;
hypre_AuxParCSRMatrixOffProcI(aux_matrix) = off_proc_i;
hypre_AuxParCSRMatrixOffProcJ(aux_matrix) = off_proc_j;
hypre_AuxParCSRMatrixOffProcData(aux_matrix) = off_proc_data;
}
off_proc_i[off_proc_i_indx++] = row;
off_proc_i[off_proc_i_indx++] = n;
for (i = 0; i < n; i++)
{
off_proc_j[current_num_elmts] = cols[indx];
off_proc_data[current_num_elmts++] = values[indx++];
}
hypre_AuxParCSRMatrixOffProcIIndx(aux_matrix) = off_proc_i_indx;
hypre_AuxParCSRMatrixCurrentOffProcElmts(aux_matrix) = current_num_elmts;
}
hypre_TFree(offproc_cnt[i1], HYPRE_MEMORY_HOST);
}
}
hypre_TFree(offproc_cnt, HYPRE_MEMORY_HOST);
return hypre_error_flag;
}
|
com3.c | #include <mpi.h>
extern int local_cell_blocks;
extern int local_edge_blocks;
#include "grid.h"
#include "memory.h"
#include "component.h"
#include "io.h"
#include <stdint.h>
void com3_init(GRID * g);
void com3_compute(GRID * g);
void com3_io(GRID * g);
double com3_flops(GRID * g);
double com3_memory(GRID * g);
uint64_t com3_checksum(GRID *);
void com3_cleanup(GRID * g);
void O2VertIntegration(GRID * g);
void O3Indirect2D(GRID * g);
void O4Indirect3D(GRID * g);
void O5Indirect3D(GRID * g);
struct {
char *name;
int loc;
int dim;
union {
GVAL *restrict * restrict p2;
GVAL *restrict * restrict * restrict p3;
} data_pointer;
} *gv_vi;
struct {
char *name;
int loc;
int dim;
union {
GVAL *restrict * restrict p2;
GVAL *restrict * restrict * restrict p3;
} data_pointer;
} *gv_ind2Dparam;
struct {
char *name;
int loc;
int dim;
union {
GVAL *restrict * restrict p2;
GVAL *restrict * restrict * restrict p3;
} data_pointer;
} *gv_ind2Dvar;
struct {
char *name;
int loc;
int dim;
union {
GVAL *restrict * restrict p2;
GVAL *restrict * restrict * restrict p3;
} data_pointer;
} *gv_ind3Dvar;
struct {
char *name;
int loc;
int dim;
union {
int *restrict * restrict p2;
int *restrict * restrict * restrict p3;
} data_pointer;
} *t3DBlk;
struct {
char *name;
int loc;
int dim;
union {
int *restrict * restrict p2;
int *restrict * restrict * restrict p3;
} data_pointer;
} *t3DIdx;
struct {
char *name;
int loc;
int dim;
union {
int *restrict * restrict p2;
int *restrict * restrict * restrict p3;
} data_pointer;
} *t3DVer;
io_var_t io_gv_vi;
io_var_t io_gv_ind2Dvar;
io_var_t io_gv_ind3Dvar;
MODEL_COMPONENT com3 = { 0, com3_init, com3_compute, com3_io, com3_flops, com3_memory, com3_checksum, com3_cleanup };
extern struct {
char *name;
int loc;
int dim;
union {
GVAL *restrict * restrict p2;
GVAL *restrict * restrict * restrict p3;
} data_pointer;
} *gv_grad;
void init_opO4(GRID * g)
{
{
int num_blocks = local_edge_blocks ? local_edge_blocks : (((g->eBlkCnt) + g->mpi_world_size - 1) / g->mpi_world_size);
t3DBlk = malloc(24);
t3DBlk->name = "t3DBlk";
t3DBlk->loc = 1;
t3DBlk->dim = 3;
t3DBlk->data_pointer.p3 = malloc((num_blocks * g->height * g->blkSize) * sizeof(int) + (num_blocks * g->height) * sizeof(char *) + (num_blocks) * sizeof(char *));
char *pos = (char *) t3DBlk->data_pointer.p3 + num_blocks * sizeof(char *);
char *pos2 = (char *) t3DBlk->data_pointer.p3 + num_blocks * sizeof(char *) + num_blocks * g->height * sizeof(char *);
for (int b = 0; b < num_blocks; b++) {
t3DBlk->data_pointer.p3[b] = (int * *) pos;
pos += g->height * sizeof(char *);
for (int k = 0; k < g->height; k++) {
t3DBlk->data_pointer.p3[b][k] = (int *) pos2;
pos2 += g->blkSize * sizeof(int);
for (int e = 0; e < g->blkSize; e++) {
t3DBlk->data_pointer.p3[b][k][e] = (int) 0;
}
}
}
}
{
int num_blocks = local_edge_blocks ? local_edge_blocks : (((g->eBlkCnt) + g->mpi_world_size - 1) / g->mpi_world_size);
t3DIdx = malloc(24);
t3DIdx->name = "t3DIdx";
t3DIdx->loc = 1;
t3DIdx->dim = 3;
t3DIdx->data_pointer.p3 = malloc((num_blocks * g->height * g->blkSize) * sizeof(int) + (num_blocks * g->height) * sizeof(char *) + (num_blocks) * sizeof(char *));
char *pos = (char *) t3DIdx->data_pointer.p3 + num_blocks * sizeof(char *);
char *pos2 = (char *) t3DIdx->data_pointer.p3 + num_blocks * sizeof(char *) + num_blocks * g->height * sizeof(char *);
for (int b = 0; b < num_blocks; b++) {
t3DIdx->data_pointer.p3[b] = (int * *) pos;
pos += g->height * sizeof(char *);
for (int k = 0; k < g->height; k++) {
t3DIdx->data_pointer.p3[b][k] = (int *) pos2;
pos2 += g->blkSize * sizeof(int);
for (int e = 0; e < g->blkSize; e++) {
t3DIdx->data_pointer.p3[b][k][e] = (int) 0;
}
}
}
}
{
size_t min_block = g->mpi_rank == (0) / (((g->eBlkCnt) + g->mpi_world_size - 1) / g->mpi_world_size) ? 0 % (((g->eBlkCnt) + g->mpi_world_size - 1) / g->mpi_world_size) : 0;
size_t max_block = g->mpi_rank < (0) / (((g->eBlkCnt) + g->mpi_world_size - 1) / g->mpi_world_size) || g->mpi_rank > (g->eBlkCnt - 1) / (((g->eBlkCnt) + g->mpi_world_size - 1) / g->mpi_world_size) ? 0 : g->mpi_rank == (g->eBlkCnt - 1) / (((g->eBlkCnt) + g->mpi_world_size - 1) / g->mpi_world_size) ? g->eBlkCnt % (((g->eBlkCnt) + g->mpi_world_size - 1) / g->mpi_world_size) ? g->eBlkCnt % (((g->eBlkCnt) + g->mpi_world_size - 1) / g->mpi_world_size) : (((g->eBlkCnt) + g->mpi_world_size - 1) / g->mpi_world_size) : (((g->eBlkCnt) + g->mpi_world_size - 1) / g->mpi_world_size);
#pragma omp parallel for
for (size_t block_index = (min_block); block_index < (max_block); block_index++) {
for (size_t height_index = (0); height_index < (g->height); height_index++) {
for (size_t edge_index = (0); edge_index < (g->blkSize); edge_index++) {
if (gv_grad->data_pointer.p3[(block_index)][(height_index)][(edge_index)] > 0) {
t3DBlk->data_pointer.p3[(block_index)][(height_index)][(edge_index)] = g->eCellBlk[0]->data_pointer.p2[(block_index)][(edge_index)];
t3DIdx->data_pointer.p3[(block_index)][(height_index)][(edge_index)] = g->eCellIdx[0]->data_pointer.p2[(block_index)][(edge_index)];
} else {
t3DBlk->data_pointer.p3[(block_index)][(height_index)][(edge_index)] = g->eCellBlk[1]->data_pointer.p2[(block_index)][(edge_index)];
t3DIdx->data_pointer.p3[(block_index)][(height_index)][(edge_index)] = g->eCellIdx[1]->data_pointer.p2[(block_index)][(edge_index)];
}
}
}
}
}
}
void init_opO5(GRID * g)
{
{
int num_blocks = local_edge_blocks ? local_edge_blocks : (((g->eBlkCnt) + g->mpi_world_size - 1) / g->mpi_world_size);
t3DVer = malloc(24);
t3DVer->name = "t3DVer";
t3DVer->loc = 1;
t3DVer->dim = 3;
t3DVer->data_pointer.p3 = malloc((num_blocks * g->height * g->blkSize) * sizeof(int) + (num_blocks * g->height) * sizeof(char *) + (num_blocks) * sizeof(char *));
char *pos = (char *) t3DVer->data_pointer.p3 + num_blocks * sizeof(char *);
char *pos2 = (char *) t3DVer->data_pointer.p3 + num_blocks * sizeof(char *) + num_blocks * g->height * sizeof(char *);
for (int b = 0; b < num_blocks; b++) {
t3DVer->data_pointer.p3[b] = (int * *) pos;
pos += g->height * sizeof(char *);
for (int k = 0; k < g->height; k++) {
t3DVer->data_pointer.p3[b][k] = (int *) pos2;
pos2 += g->blkSize * sizeof(int);
for (int e = 0; e < g->blkSize; e++) {
t3DVer->data_pointer.p3[b][k][e] = (int) 0;
}
}
}
}
{
size_t min_block = g->mpi_rank == (0) / (((g->eBlkCnt) + g->mpi_world_size - 1) / g->mpi_world_size) ? 0 % (((g->eBlkCnt) + g->mpi_world_size - 1) / g->mpi_world_size) : 0;
size_t max_block = g->mpi_rank < (0) / (((g->eBlkCnt) + g->mpi_world_size - 1) / g->mpi_world_size) || g->mpi_rank > (g->eBlkCnt - 1) / (((g->eBlkCnt) + g->mpi_world_size - 1) / g->mpi_world_size) ? 0 : g->mpi_rank == (g->eBlkCnt - 1) / (((g->eBlkCnt) + g->mpi_world_size - 1) / g->mpi_world_size) ? g->eBlkCnt % (((g->eBlkCnt) + g->mpi_world_size - 1) / g->mpi_world_size) ? g->eBlkCnt % (((g->eBlkCnt) + g->mpi_world_size - 1) / g->mpi_world_size) : (((g->eBlkCnt) + g->mpi_world_size - 1) / g->mpi_world_size) : (((g->eBlkCnt) + g->mpi_world_size - 1) / g->mpi_world_size);
#pragma omp parallel for
for (size_t block_index = (min_block); block_index < (max_block); block_index++) {
for (size_t height_index = (0); height_index < ((g->height - 2)); height_index++) {
for (size_t edge_index = (0); edge_index < (g->blkSize); edge_index++) {
t3DVer->data_pointer.p3[(block_index)][(height_index)][(edge_index)] = height_index + 1;
}
}
}
}
{
size_t min_block = g->mpi_rank == (0) / (((g->eBlkCnt) + g->mpi_world_size - 1) / g->mpi_world_size) ? 0 % (((g->eBlkCnt) + g->mpi_world_size - 1) / g->mpi_world_size) : 0;
size_t max_block = g->mpi_rank < (0) / (((g->eBlkCnt) + g->mpi_world_size - 1) / g->mpi_world_size) || g->mpi_rank > (g->eBlkCnt - 1) / (((g->eBlkCnt) + g->mpi_world_size - 1) / g->mpi_world_size) ? 0 : g->mpi_rank == (g->eBlkCnt - 1) / (((g->eBlkCnt) + g->mpi_world_size - 1) / g->mpi_world_size) ? g->eBlkCnt % (((g->eBlkCnt) + g->mpi_world_size - 1) / g->mpi_world_size) ? g->eBlkCnt % (((g->eBlkCnt) + g->mpi_world_size - 1) / g->mpi_world_size) : (((g->eBlkCnt) + g->mpi_world_size - 1) / g->mpi_world_size) : (((g->eBlkCnt) + g->mpi_world_size - 1) / g->mpi_world_size);
#pragma omp parallel for
for (size_t block_index = (min_block); block_index < (max_block); block_index++) {
const size_t height_index = ((g->height - 1));
{
for (size_t edge_index = (0); edge_index < (g->blkSize); edge_index++) {
t3DVer->data_pointer.p3[(block_index)][(height_index)][(edge_index)] = (g->height - 1);
}
}
}
}
}
extern MODEL_COMPONENT com1;
void com3_init(GRID * g)
{
com3.loaded = 1;
if (!com1.loaded)
com1.init(g);
{
int num_blocks = local_cell_blocks ? local_cell_blocks : (((g->cBlkCnt) + g->mpi_world_size - 1) / g->mpi_world_size);
gv_vi = malloc(24);
gv_vi->name = "gv_vi";
gv_vi->loc = 0;
gv_vi->dim = 2;
gv_vi->data_pointer.p2 = malloc((num_blocks * g->blkSize) * sizeof(GVAL) + (num_blocks) * sizeof(char *));
char *pos = (char *) gv_vi->data_pointer.p2 + num_blocks * sizeof(char *);
for (int b = 0; b < num_blocks; b++) {
gv_vi->data_pointer.p2[b] = (GVAL *) pos;
pos += g->blkSize * sizeof(GVAL);
for (int c = 0; c < g->blkSize; c++) {
gv_vi->data_pointer.p2[b][c] = (GVAL) 0;
}
}
}
{
int num_blocks = local_edge_blocks ? local_edge_blocks : (((g->eBlkCnt) + g->mpi_world_size - 1) / g->mpi_world_size);
gv_ind2Dparam = malloc(24);
gv_ind2Dparam->name = "gv_ind2Dparam";
gv_ind2Dparam->loc = 1;
gv_ind2Dparam->dim = 2;
gv_ind2Dparam->data_pointer.p2 = malloc((num_blocks * g->blkSize) * sizeof(GVAL) + (num_blocks) * sizeof(char *));
char *pos = (char *) gv_ind2Dparam->data_pointer.p2 + num_blocks * sizeof(char *);
for (int b = 0; b < num_blocks; b++) {
gv_ind2Dparam->data_pointer.p2[b] = (GVAL *) pos;
pos += g->blkSize * sizeof(GVAL);
for (int e = 0; e < g->blkSize; e++) {
gv_ind2Dparam->data_pointer.p2[b][e] = (GVAL) 0;
}
}
}
{
int num_blocks = local_edge_blocks ? local_edge_blocks : (((g->eBlkCnt) + g->mpi_world_size - 1) / g->mpi_world_size);
gv_ind2Dvar = malloc(24);
gv_ind2Dvar->name = "gv_ind2Dvar";
gv_ind2Dvar->loc = 1;
gv_ind2Dvar->dim = 3;
gv_ind2Dvar->data_pointer.p3 = malloc((num_blocks * g->height * g->blkSize) * sizeof(GVAL) + (num_blocks * g->height) * sizeof(char *) + (num_blocks) * sizeof(char *));
char *pos = (char *) gv_ind2Dvar->data_pointer.p3 + num_blocks * sizeof(char *);
char *pos2 = (char *) gv_ind2Dvar->data_pointer.p3 + num_blocks * sizeof(char *) + num_blocks * g->height * sizeof(char *);
for (int b = 0; b < num_blocks; b++) {
gv_ind2Dvar->data_pointer.p3[b] = (GVAL * *)pos;
pos += g->height * sizeof(char *);
for (int k = 0; k < g->height; k++) {
gv_ind2Dvar->data_pointer.p3[b][k] = (GVAL *) pos2;
pos2 += g->blkSize * sizeof(GVAL);
for (int e = 0; e < g->blkSize; e++) {
gv_ind2Dvar->data_pointer.p3[b][k][e] = (GVAL) 0;
}
}
}
}
{
int num_blocks = local_edge_blocks ? local_edge_blocks : (((g->eBlkCnt) + g->mpi_world_size - 1) / g->mpi_world_size);
gv_ind3Dvar = malloc(24);
gv_ind3Dvar->name = "gv_ind3Dvar";
gv_ind3Dvar->loc = 1;
gv_ind3Dvar->dim = 3;
gv_ind3Dvar->data_pointer.p3 = malloc((num_blocks * g->height * g->blkSize) * sizeof(GVAL) + (num_blocks * g->height) * sizeof(char *) + (num_blocks) * sizeof(char *));
char *pos = (char *) gv_ind3Dvar->data_pointer.p3 + num_blocks * sizeof(char *);
char *pos2 = (char *) gv_ind3Dvar->data_pointer.p3 + num_blocks * sizeof(char *) + num_blocks * g->height * sizeof(char *);
for (int b = 0; b < num_blocks; b++) {
gv_ind3Dvar->data_pointer.p3[b] = (GVAL * *)pos;
pos += g->height * sizeof(char *);
for (int k = 0; k < g->height; k++) {
gv_ind3Dvar->data_pointer.p3[b][k] = (GVAL *) pos2;
pos2 += g->blkSize * sizeof(GVAL);
for (int e = 0; e < g->blkSize; e++) {
gv_ind3Dvar->data_pointer.p3[b][k][e] = (GVAL) 0;
}
}
}
}
init_opO4(g);
init_opO5(g);
io_read_register(g, "gv_ind2Dparam", (GVAL *) gv_ind2Dparam, FLOAT32, FLOAT32, GRID_POS_EDGE, GRID_DIM_2D);
io_write_define(g, "gv_vi", (GVAL *) gv_vi, FLOAT32, GRID_POS_CELL, GRID_DIM_2D, &io_gv_vi);
io_write_define(g, "gv_ind2Dvar", (GVAL *) gv_ind2Dvar, FLOAT32, GRID_POS_EDGE, GRID_DIM_3D, &io_gv_ind2Dvar);
io_write_define(g, "gv_ind3Dvar", (GVAL *) gv_ind3Dvar, FLOAT32, GRID_POS_EDGE, GRID_DIM_3D, &io_gv_ind3Dvar);
}
void com3_compute(GRID * g)
{
O2VertIntegration(g);
O3Indirect2D(g);
O4Indirect3D(g);
O5Indirect3D(g);
}
void com3_io(GRID * g)
{
io_write_announce(g, &io_gv_vi);
io_write_announce(g, &io_gv_ind2Dvar);
io_write_announce(g, &io_gv_ind3Dvar);
}
double com3_flops(GRID * g)
{
double flop = (double) g->cellCount * (double) g->height + 3.0 * (double) g->edgeCount * (double) g->height + (double) g->edgeCount * (double) g->height;
return flop;
}
double com3_memory(GRID * g)
{
double mem = ((double) g->cellCount + (double) g->edgeCount + (double) g->edgeCount * (double) g->height + (double) g->edgeCount * (double) g->height) * (double) sizeof(GVAL) + (3.0 * (double) g->edgeCount * (double) g->height) * (double) sizeof(int);
return mem / (1024 * 1024);
}
uint64_t com3_checksum(GRID * g)
{
uint64_t ret = 0;
{
size_t min_block = g->mpi_rank == (0) / (((g->cBlkCnt) + g->mpi_world_size - 1) / g->mpi_world_size) ? 0 % (((g->cBlkCnt) + g->mpi_world_size - 1) / g->mpi_world_size) : 0;
size_t max_block = g->mpi_rank < (0) / (((g->cBlkCnt) + g->mpi_world_size - 1) / g->mpi_world_size) || g->mpi_rank > (g->cBlkCnt - 1) / (((g->cBlkCnt) + g->mpi_world_size - 1) / g->mpi_world_size) ? 0 : g->mpi_rank == (g->cBlkCnt - 1) / (((g->cBlkCnt) + g->mpi_world_size - 1) / g->mpi_world_size) ? g->cBlkCnt % (((g->cBlkCnt) + g->mpi_world_size - 1) / g->mpi_world_size) ? g->cBlkCnt % (((g->cBlkCnt) + g->mpi_world_size - 1) / g->mpi_world_size) : (((g->cBlkCnt) + g->mpi_world_size - 1) / g->mpi_world_size) : (((g->cBlkCnt) + g->mpi_world_size - 1) / g->mpi_world_size);
#pragma omp parallel for
for (size_t block_index = (min_block); block_index < (max_block); block_index++) {
for (size_t cell_index = (0); cell_index < (g->blkSize); cell_index++) {
ret += (uint64_t) gv_vi->data_pointer.p2[(block_index)][(cell_index)];
}
}
}
{
size_t min_block = g->mpi_rank == (0) / (((g->eBlkCnt) + g->mpi_world_size - 1) / g->mpi_world_size) ? 0 % (((g->eBlkCnt) + g->mpi_world_size - 1) / g->mpi_world_size) : 0;
size_t max_block = g->mpi_rank < (0) / (((g->eBlkCnt) + g->mpi_world_size - 1) / g->mpi_world_size) || g->mpi_rank > (g->eBlkCnt - 1) / (((g->eBlkCnt) + g->mpi_world_size - 1) / g->mpi_world_size) ? 0 : g->mpi_rank == (g->eBlkCnt - 1) / (((g->eBlkCnt) + g->mpi_world_size - 1) / g->mpi_world_size) ? g->eBlkCnt % (((g->eBlkCnt) + g->mpi_world_size - 1) / g->mpi_world_size) ? g->eBlkCnt % (((g->eBlkCnt) + g->mpi_world_size - 1) / g->mpi_world_size) : (((g->eBlkCnt) + g->mpi_world_size - 1) / g->mpi_world_size) : (((g->eBlkCnt) + g->mpi_world_size - 1) / g->mpi_world_size);
#pragma omp parallel for
for (size_t block_index = (min_block); block_index < (max_block); block_index++) {
for (size_t height_index = (0); height_index < (g->height); height_index++) {
for (size_t edge_index = (0); edge_index < (g->blkSize); edge_index++) {
ret += (uint64_t) gv_ind2Dvar->data_pointer.p3[(block_index)][(height_index)][(edge_index)];
ret += (uint64_t) gv_ind3Dvar->data_pointer.p3[(block_index)][(height_index)][(edge_index)];
}
}
}
}
return ret;
}
void com3_cleanup(GRID * g)
{
com3.loaded = 0;
free((void *) gv_vi->data_pointer.p2);
free((void *) gv_ind2Dparam->data_pointer.p2);
free((void *) gv_ind2Dvar->data_pointer.p3);
free((void *) gv_ind3Dvar->data_pointer.p3);
free((void *) t3DBlk->data_pointer.p3);
free((void *) t3DIdx->data_pointer.p3);
free((void *) t3DVer->data_pointer.p3);
}
|
3d7pt_var.c | /*
* Order-1, 3D 7 point stencil with variable coefficients
* Adapted from PLUTO and Pochoir test bench
*
* Tareq Malas
*/
#include <stdio.h>
#include <stdlib.h>
#include <sys/time.h>
#ifdef LIKWID_PERFMON
#include <likwid.h>
#endif
#include "print_utils.h"
#define TESTS 2
#define MAX(a,b) ((a) > (b) ? a : b)
#define MIN(a,b) ((a) < (b) ? a : b)
/* Subtract the `struct timeval' values X and Y,
* storing the result in RESULT.
*
* Return 1 if the difference is negative, otherwise 0.
*/
int timeval_subtract(struct timeval *result, struct timeval *x, struct timeval *y)
{
/* Perform the carry for the later subtraction by updating y. */
if (x->tv_usec < y->tv_usec)
{
int nsec = (y->tv_usec - x->tv_usec) / 1000000 + 1;
y->tv_usec -= 1000000 * nsec;
y->tv_sec += nsec;
}
if (x->tv_usec - y->tv_usec > 1000000)
{
int nsec = (x->tv_usec - y->tv_usec) / 1000000;
y->tv_usec += 1000000 * nsec;
y->tv_sec -= nsec;
}
/* Compute the time remaining to wait.
* tv_usec is certainly positive.
*/
result->tv_sec = x->tv_sec - y->tv_sec;
result->tv_usec = x->tv_usec - y->tv_usec;
/* Return 1 if result is negative. */
return x->tv_sec < y->tv_sec;
}
int main(int argc, char *argv[])
{
int t, i, j, k, m, test;
int Nx, Ny, Nz, Nt;
if (argc > 3) {
Nx = atoi(argv[1])+2;
Ny = atoi(argv[2])+2;
Nz = atoi(argv[3])+2;
}
if (argc > 4)
Nt = atoi(argv[4]);
// allocate the arrays
double ****A = (double ****) malloc(sizeof(double***)*2);
for(m=0; m<2;m++){
A[m] = (double ***) malloc(sizeof(double**)*Nz);
for(i=0; i<Nz; i++){
A[m][i] = (double**) malloc(sizeof(double*)*Ny);
for(j=0;j<Ny;j++){
A[m][i][j] = (double*) malloc(sizeof(double)*Nx);
}
}
}
double ****coef = (double ****) malloc(sizeof(double***)*7);
for(m=0; m<7;m++){
coef[m] = (double ***) malloc(sizeof(double**)*Nz);
for(i=0; i<Nz; i++){
coef[m][i] = (double**) malloc(sizeof(double*)*Ny);
for(j=0;j<Ny;j++){
coef[m][i][j] = (double*) malloc(sizeof(double)*Nx);
}
}
}
// tile size information, including extra element to decide the list length
int *tile_size = (int*) malloc(sizeof(int));
tile_size[0] = -1;
// The list is modified here before source-to-source transformations
tile_size = (int*) realloc((void *)tile_size, sizeof(int)*5);
tile_size[0] = 8;
tile_size[1] = 8;
tile_size[2] = 24;
tile_size[3] = 32;
tile_size[4] = -1;
// for timekeeping
int ts_return = -1;
struct timeval start, end, result;
double tdiff = 0.0, min_tdiff=1.e100;
const int BASE = 1024;
// initialize variables
//
srand(42);
for (i = 1; i < Nz; i++) {
for (j = 1; j < Ny; j++) {
for (k = 1; k < Nx; k++) {
A[0][i][j][k] = 1.0 * (rand() % BASE);
}
}
}
for (m=0; m<7; m++) {
for (i=1; i<Nz; i++) {
for (j=1; j<Ny; j++) {
for (k=1; k<Nx; k++) {
coef[m][i][j][k] = 1.0 * (rand() % BASE);
}
}
}
}
#ifdef LIKWID_PERFMON
LIKWID_MARKER_INIT;
#pragma omp parallel
{
LIKWID_MARKER_THREADINIT;
#pragma omp barrier
LIKWID_MARKER_START("calc");
}
#endif
int num_threads = 1;
#if defined(_OPENMP)
num_threads = omp_get_max_threads();
#endif
for(test=0; test<TESTS; test++){
gettimeofday(&start, 0);
// serial execution - Addition: 6 && Multiplication: 2
#pragma scop
for (t = 0; t < Nt-1; t++) {
for (i = 1; i < Nz-1; i++) {
for (j = 1; j < Ny-1; j++) {
for (k = 1; k < Nx-1; k++) {
A[(t+1)%2][i][j][k] = coef[0][i][j][k] * A[t%2][i ][j ][k ] +
coef[1][i][j][k] * A[t%2][i-1][j ][k ] +
coef[2][i][j][k] * A[t%2][i ][j-1][k ] +
coef[3][i][j][k] * A[t%2][i ][j ][k-1] +
coef[4][i][j][k] * A[t%2][i+1][j ][k ] +
coef[5][i][j][k] * A[t%2][i ][j+1][k ] +
coef[6][i][j][k] * A[t%2][i ][j ][k+1];
}
}
}
}
#pragma endscop
gettimeofday(&end, 0);
ts_return = timeval_subtract(&result, &end, &start);
tdiff = (double) (result.tv_sec + result.tv_usec * 1.0e-6);
min_tdiff = min(min_tdiff, tdiff);
printf("Rank 0 TEST# %d time: %f\n", test, tdiff);
}
PRINT_RESULTS(1, "variable no-symmetry")
#ifdef LIKWID_PERFMON
#pragma omp parallel
{
LIKWID_MARKER_STOP("calc");
}
LIKWID_MARKER_CLOSE;
#endif
// Free allocated arrays
for(i=0; i<Nz; i++){
for(j=0;j<Ny;j++){
free(A[0][i][j]);
free(A[1][i][j]);
}
free(A[0][i]);
free(A[1][i]);
}
free(A[0]);
free(A[1]);
for(m=0; m<7;m++){
for(i=0; i<Nz; i++){
for(j=0;j<Ny;j++){
free(coef[m][i][j]);
}
free(coef[m][i]);
}
free(coef[m]);
}
return 0;
}
|
for-3.c | /* { dg-additional-options "-std=gnu99" } */
extern void abort ();
#define M(x, y, z) O(x, y, z)
#define O(x, y, z) x ## _ ## y ## _ ## z
#pragma omp declare target
#define F distribute
#define G d
#define S
#define N(x) M(x, G, normal)
#include "for-2.h"
#undef S
#undef N
#undef F
#undef G
#define F distribute
#define G d_ds128
#define S dist_schedule(static, 128)
#define N(x) M(x, G, normal)
#include "for-2.h"
#undef S
#undef N
#undef F
#undef G
#define F distribute simd
#define G ds
#define S
#define N(x) M(x, G, normal)
#include "for-2.h"
#undef S
#undef N
#undef F
#undef G
#define F distribute simd
#define G ds_ds128
#define S dist_schedule(static, 128)
#define N(x) M(x, G, normal)
#include "for-2.h"
#undef S
#undef N
#undef F
#undef G
#define F distribute parallel for
#define G dpf
#include "for-1.h"
#undef F
#undef G
#define F distribute parallel for dist_schedule(static, 128)
#define G dpf_ds128
#include "for-1.h"
#undef F
#undef G
#define F distribute parallel for simd
#define G dpfs
#include "for-1.h"
#undef F
#undef G
#define F distribute parallel for simd dist_schedule(static, 128)
#define G dpfs_ds128
#include "for-1.h"
#undef F
#undef G
#pragma omp end declare target
int
main ()
{
int err = 0;
#pragma omp target teams reduction(|:err)
{
err |= test_d_normal ();
err |= test_d_ds128_normal ();
err |= test_ds_normal ();
err |= test_ds_ds128_normal ();
err |= test_dpf_static ();
err |= test_dpf_static32 ();
err |= test_dpf_auto ();
err |= test_dpf_guided32 ();
err |= test_dpf_runtime ();
err |= test_dpf_ds128_static ();
err |= test_dpf_ds128_static32 ();
err |= test_dpf_ds128_auto ();
err |= test_dpf_ds128_guided32 ();
err |= test_dpf_ds128_runtime ();
err |= test_dpfs_static ();
err |= test_dpfs_static32 ();
err |= test_dpfs_auto ();
err |= test_dpfs_guided32 ();
err |= test_dpfs_runtime ();
err |= test_dpfs_ds128_static ();
err |= test_dpfs_ds128_static32 ();
err |= test_dpfs_ds128_auto ();
err |= test_dpfs_ds128_guided32 ();
err |= test_dpfs_ds128_runtime ();
}
if (err)
abort ();
return 0;
}
|
fx.c | /*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% FFFFF X X %
% F X X %
% FFF X %
% F X X %
% F X X %
% %
% %
% MagickCore Image Special Effects Methods %
% %
% Software Design %
% Cristy %
% October 1996 %
% %
% %
% Copyright 1999-2017 ImageMagick Studio LLC, a non-profit organization %
% dedicated to making software imaging solutions freely available. %
% %
% You may not use this file except in compliance with the License. You may %
% obtain a copy of the License at %
% %
% https://www.imagemagick.org/script/license.php %
% %
% Unless required by applicable law or agreed to in writing, software %
% distributed under the License is distributed on an "AS IS" BASIS, %
% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. %
% See the License for the specific language governing permissions and %
% limitations under the License. %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
%
%
*/
/*
Include declarations.
*/
#include "MagickCore/studio.h"
#include "MagickCore/accelerate-private.h"
#include "MagickCore/annotate.h"
#include "MagickCore/artifact.h"
#include "MagickCore/attribute.h"
#include "MagickCore/cache.h"
#include "MagickCore/cache-view.h"
#include "MagickCore/channel.h"
#include "MagickCore/color.h"
#include "MagickCore/color-private.h"
#include "MagickCore/colorspace-private.h"
#include "MagickCore/composite.h"
#include "MagickCore/decorate.h"
#include "MagickCore/distort.h"
#include "MagickCore/draw.h"
#include "MagickCore/effect.h"
#include "MagickCore/enhance.h"
#include "MagickCore/exception.h"
#include "MagickCore/exception-private.h"
#include "MagickCore/fx.h"
#include "MagickCore/fx-private.h"
#include "MagickCore/gem.h"
#include "MagickCore/gem-private.h"
#include "MagickCore/geometry.h"
#include "MagickCore/layer.h"
#include "MagickCore/list.h"
#include "MagickCore/log.h"
#include "MagickCore/image.h"
#include "MagickCore/image-private.h"
#include "MagickCore/magick.h"
#include "MagickCore/memory_.h"
#include "MagickCore/monitor.h"
#include "MagickCore/monitor-private.h"
#include "MagickCore/option.h"
#include "MagickCore/pixel.h"
#include "MagickCore/pixel-accessor.h"
#include "MagickCore/property.h"
#include "MagickCore/quantum.h"
#include "MagickCore/quantum-private.h"
#include "MagickCore/random_.h"
#include "MagickCore/random-private.h"
#include "MagickCore/resample.h"
#include "MagickCore/resample-private.h"
#include "MagickCore/resize.h"
#include "MagickCore/resource_.h"
#include "MagickCore/splay-tree.h"
#include "MagickCore/statistic.h"
#include "MagickCore/string_.h"
#include "MagickCore/string-private.h"
#include "MagickCore/thread-private.h"
#include "MagickCore/transform.h"
#include "MagickCore/transform-private.h"
#include "MagickCore/utility.h"
/*
Define declarations.
*/
#define LeftShiftOperator 0xf5U
#define RightShiftOperator 0xf6U
#define LessThanEqualOperator 0xf7U
#define GreaterThanEqualOperator 0xf8U
#define EqualOperator 0xf9U
#define NotEqualOperator 0xfaU
#define LogicalAndOperator 0xfbU
#define LogicalOrOperator 0xfcU
#define ExponentialNotation 0xfdU
struct _FxInfo
{
const Image
*images;
char
*expression;
FILE
*file;
SplayTreeInfo
*colors,
*symbols;
CacheView
**view;
RandomInfo
*random_info;
ExceptionInfo
*exception;
};
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ A c q u i r e F x I n f o %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% AcquireFxInfo() allocates the FxInfo structure.
%
% The format of the AcquireFxInfo method is:
%
% FxInfo *AcquireFxInfo(Image *images,const char *expression,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o images: the image sequence.
%
% o expression: the expression.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickPrivate FxInfo *AcquireFxInfo(const Image *images,const char *expression,
ExceptionInfo *exception)
{
char
fx_op[2];
const Image
*next;
FxInfo
*fx_info;
register ssize_t
i;
fx_info=(FxInfo *) AcquireMagickMemory(sizeof(*fx_info));
if (fx_info == (FxInfo *) NULL)
ThrowFatalException(ResourceLimitFatalError,"MemoryAllocationFailed");
(void) ResetMagickMemory(fx_info,0,sizeof(*fx_info));
fx_info->exception=AcquireExceptionInfo();
fx_info->images=images;
fx_info->colors=NewSplayTree(CompareSplayTreeString,RelinquishMagickMemory,
RelinquishMagickMemory);
fx_info->symbols=NewSplayTree(CompareSplayTreeString,RelinquishMagickMemory,
RelinquishMagickMemory);
fx_info->view=(CacheView **) AcquireQuantumMemory(GetImageListLength(
fx_info->images),sizeof(*fx_info->view));
if (fx_info->view == (CacheView **) NULL)
ThrowFatalException(ResourceLimitFatalError,"MemoryAllocationFailed");
i=0;
next=GetFirstImageInList(fx_info->images);
for ( ; next != (Image *) NULL; next=next->next)
{
fx_info->view[i]=AcquireVirtualCacheView(next,exception);
i++;
}
fx_info->random_info=AcquireRandomInfo();
fx_info->expression=ConstantString(expression);
fx_info->file=stderr;
(void) SubstituteString(&fx_info->expression," ",""); /* compact string */
/*
Force right-to-left associativity for unary negation.
*/
(void) SubstituteString(&fx_info->expression,"-","-1.0*");
(void) SubstituteString(&fx_info->expression,"^-1.0*","^-");
(void) SubstituteString(&fx_info->expression,"E-1.0*","E-");
(void) SubstituteString(&fx_info->expression,"e-1.0*","e-");
/*
Convert compound to simple operators.
*/
fx_op[1]='\0';
*fx_op=(char) LeftShiftOperator;
(void) SubstituteString(&fx_info->expression,"<<",fx_op);
*fx_op=(char) RightShiftOperator;
(void) SubstituteString(&fx_info->expression,">>",fx_op);
*fx_op=(char) LessThanEqualOperator;
(void) SubstituteString(&fx_info->expression,"<=",fx_op);
*fx_op=(char) GreaterThanEqualOperator;
(void) SubstituteString(&fx_info->expression,">=",fx_op);
*fx_op=(char) EqualOperator;
(void) SubstituteString(&fx_info->expression,"==",fx_op);
*fx_op=(char) NotEqualOperator;
(void) SubstituteString(&fx_info->expression,"!=",fx_op);
*fx_op=(char) LogicalAndOperator;
(void) SubstituteString(&fx_info->expression,"&&",fx_op);
*fx_op=(char) LogicalOrOperator;
(void) SubstituteString(&fx_info->expression,"||",fx_op);
*fx_op=(char) ExponentialNotation;
(void) SubstituteString(&fx_info->expression,"**",fx_op);
return(fx_info);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% A d d N o i s e I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% AddNoiseImage() adds random noise to the image.
%
% The format of the AddNoiseImage method is:
%
% Image *AddNoiseImage(const Image *image,const NoiseType noise_type,
% const double attenuate,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o channel: the channel type.
%
% o noise_type: The type of noise: Uniform, Gaussian, Multiplicative,
% Impulse, Laplacian, or Poisson.
%
% o attenuate: attenuate the random distribution.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *AddNoiseImage(const Image *image,const NoiseType noise_type,
const double attenuate,ExceptionInfo *exception)
{
#define AddNoiseImageTag "AddNoise/Image"
CacheView
*image_view,
*noise_view;
Image
*noise_image;
MagickBooleanType
status;
MagickOffsetType
progress;
RandomInfo
**magick_restrict random_info;
ssize_t
y;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
unsigned long
key;
#endif
/*
Initialize noise image attributes.
*/
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
#if defined(MAGICKCORE_OPENCL_SUPPORT)
noise_image=AccelerateAddNoiseImage(image,noise_type,exception);
if (noise_image != (Image *) NULL)
return(noise_image);
#endif
noise_image=CloneImage(image,image->columns,image->rows,MagickTrue,exception);
if (noise_image == (Image *) NULL)
return((Image *) NULL);
if (SetImageStorageClass(noise_image,DirectClass,exception) == MagickFalse)
{
noise_image=DestroyImage(noise_image);
return((Image *) NULL);
}
/*
Add noise in each row.
*/
status=MagickTrue;
progress=0;
random_info=AcquireRandomInfoThreadSet();
image_view=AcquireVirtualCacheView(image,exception);
noise_view=AcquireAuthenticCacheView(noise_image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
key=GetRandomSecretKey(random_info[0]);
#pragma omp parallel for schedule(static,4) shared(progress,status) \
magick_threads(image,noise_image,image->rows,key == ~0UL)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
const int
id = GetOpenMPThreadId();
MagickBooleanType
sync;
register const Quantum
*magick_restrict p;
register ssize_t
x;
register Quantum
*magick_restrict q;
if (status == MagickFalse)
continue;
p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception);
q=QueueCacheViewAuthenticPixels(noise_view,0,y,noise_image->columns,1,
exception);
if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL))
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) image->columns; x++)
{
register ssize_t
i;
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
PixelChannel channel = GetPixelChannelChannel(image,i);
PixelTrait traits = GetPixelChannelTraits(image,channel);
PixelTrait noise_traits=GetPixelChannelTraits(noise_image,channel);
if ((traits == UndefinedPixelTrait) ||
(noise_traits == UndefinedPixelTrait))
continue;
if (((noise_traits & CopyPixelTrait) != 0) ||
(GetPixelWriteMask(image,p) <= (QuantumRange/2)))
{
SetPixelChannel(noise_image,channel,p[i],q);
continue;
}
SetPixelChannel(noise_image,channel,ClampToQuantum(
GenerateDifferentialNoise(random_info[id],p[i],noise_type,attenuate)),
q);
}
p+=GetPixelChannels(image);
q+=GetPixelChannels(noise_image);
}
sync=SyncCacheViewAuthenticPixels(noise_view,exception);
if (sync == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp critical (MagickCore_AddNoiseImage)
#endif
proceed=SetImageProgress(image,AddNoiseImageTag,progress++,
image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
noise_view=DestroyCacheView(noise_view);
image_view=DestroyCacheView(image_view);
random_info=DestroyRandomInfoThreadSet(random_info);
if (status == MagickFalse)
noise_image=DestroyImage(noise_image);
return(noise_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% B l u e S h i f t I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% BlueShiftImage() mutes the colors of the image to simulate a scene at
% nighttime in the moonlight.
%
% The format of the BlueShiftImage method is:
%
% Image *BlueShiftImage(const Image *image,const double factor,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o factor: the shift factor.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *BlueShiftImage(const Image *image,const double factor,
ExceptionInfo *exception)
{
#define BlueShiftImageTag "BlueShift/Image"
CacheView
*image_view,
*shift_view;
Image
*shift_image;
MagickBooleanType
status;
MagickOffsetType
progress;
ssize_t
y;
/*
Allocate blue shift image.
*/
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
shift_image=CloneImage(image,image->columns,image->rows,MagickTrue,exception);
if (shift_image == (Image *) NULL)
return((Image *) NULL);
if (SetImageStorageClass(shift_image,DirectClass,exception) == MagickFalse)
{
shift_image=DestroyImage(shift_image);
return((Image *) NULL);
}
/*
Blue-shift DirectClass image.
*/
status=MagickTrue;
progress=0;
image_view=AcquireVirtualCacheView(image,exception);
shift_view=AcquireAuthenticCacheView(shift_image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static,4) shared(progress,status) \
magick_threads(image,shift_image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
MagickBooleanType
sync;
PixelInfo
pixel;
Quantum
quantum;
register const Quantum
*magick_restrict p;
register ssize_t
x;
register Quantum
*magick_restrict q;
if (status == MagickFalse)
continue;
p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception);
q=QueueCacheViewAuthenticPixels(shift_view,0,y,shift_image->columns,1,
exception);
if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL))
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) image->columns; x++)
{
quantum=GetPixelRed(image,p);
if (GetPixelGreen(image,p) < quantum)
quantum=GetPixelGreen(image,p);
if (GetPixelBlue(image,p) < quantum)
quantum=GetPixelBlue(image,p);
pixel.red=0.5*(GetPixelRed(image,p)+factor*quantum);
pixel.green=0.5*(GetPixelGreen(image,p)+factor*quantum);
pixel.blue=0.5*(GetPixelBlue(image,p)+factor*quantum);
quantum=GetPixelRed(image,p);
if (GetPixelGreen(image,p) > quantum)
quantum=GetPixelGreen(image,p);
if (GetPixelBlue(image,p) > quantum)
quantum=GetPixelBlue(image,p);
pixel.red=0.5*(pixel.red+factor*quantum);
pixel.green=0.5*(pixel.green+factor*quantum);
pixel.blue=0.5*(pixel.blue+factor*quantum);
SetPixelRed(shift_image,ClampToQuantum(pixel.red),q);
SetPixelGreen(shift_image,ClampToQuantum(pixel.green),q);
SetPixelBlue(shift_image,ClampToQuantum(pixel.blue),q);
p+=GetPixelChannels(image);
q+=GetPixelChannels(shift_image);
}
sync=SyncCacheViewAuthenticPixels(shift_view,exception);
if (sync == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp critical (MagickCore_BlueShiftImage)
#endif
proceed=SetImageProgress(image,BlueShiftImageTag,progress++,
image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
image_view=DestroyCacheView(image_view);
shift_view=DestroyCacheView(shift_view);
if (status == MagickFalse)
shift_image=DestroyImage(shift_image);
return(shift_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% C h a r c o a l I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% CharcoalImage() creates a new image that is a copy of an existing one with
% the edge highlighted. It allocates the memory necessary for the new Image
% structure and returns a pointer to the new image.
%
% The format of the CharcoalImage method is:
%
% Image *CharcoalImage(const Image *image,const double radius,
% const double sigma,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o radius: the radius of the pixel neighborhood.
%
% o sigma: the standard deviation of the Gaussian, in pixels.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *CharcoalImage(const Image *image,const double radius,
const double sigma,ExceptionInfo *exception)
{
Image
*charcoal_image,
*clone_image,
*edge_image;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
clone_image=CloneImage(image,0,0,MagickTrue,exception);
if (clone_image == (Image *) NULL)
return((Image *) NULL);
edge_image=EdgeImage(clone_image,radius,exception);
clone_image=DestroyImage(clone_image);
if (edge_image == (Image *) NULL)
return((Image *) NULL);
charcoal_image=BlurImage(edge_image,radius,sigma,exception);
edge_image=DestroyImage(edge_image);
if (charcoal_image == (Image *) NULL)
return((Image *) NULL);
(void) NormalizeImage(charcoal_image,exception);
(void) NegateImage(charcoal_image,MagickFalse,exception);
(void) GrayscaleImage(charcoal_image,image->intensity,exception);
return(charcoal_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% C o l o r i z e I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ColorizeImage() blends the fill color with each pixel in the image.
% A percentage blend is specified with opacity. Control the application
% of different color components by specifying a different percentage for
% each component (e.g. 90/100/10 is 90% red, 100% green, and 10% blue).
%
% The format of the ColorizeImage method is:
%
% Image *ColorizeImage(const Image *image,const char *blend,
% const PixelInfo *colorize,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o blend: A character string indicating the level of blending as a
% percentage.
%
% o colorize: A color value.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *ColorizeImage(const Image *image,const char *blend,
const PixelInfo *colorize,ExceptionInfo *exception)
{
#define ColorizeImageTag "Colorize/Image"
#define Colorize(pixel,blend_percentage,colorize) \
(((pixel)*(100.0-(blend_percentage))+(colorize)*(blend_percentage))/100.0)
CacheView
*image_view;
GeometryInfo
geometry_info;
Image
*colorize_image;
MagickBooleanType
status;
MagickOffsetType
progress;
MagickStatusType
flags;
PixelInfo
blend_percentage;
ssize_t
y;
/*
Allocate colorized image.
*/
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
colorize_image=CloneImage(image,0,0,MagickTrue,exception);
if (colorize_image == (Image *) NULL)
return((Image *) NULL);
if (SetImageStorageClass(colorize_image,DirectClass,exception) == MagickFalse)
{
colorize_image=DestroyImage(colorize_image);
return((Image *) NULL);
}
if ((IsGrayColorspace(colorize_image->colorspace) != MagickFalse) ||
(IsPixelInfoGray(colorize) != MagickFalse))
(void) SetImageColorspace(colorize_image,sRGBColorspace,exception);
if ((colorize_image->alpha_trait == UndefinedPixelTrait) &&
(colorize->alpha_trait != UndefinedPixelTrait))
(void) SetImageAlpha(colorize_image,OpaqueAlpha,exception);
if (blend == (const char *) NULL)
return(colorize_image);
GetPixelInfo(colorize_image,&blend_percentage);
flags=ParseGeometry(blend,&geometry_info);
blend_percentage.red=geometry_info.rho;
blend_percentage.green=geometry_info.rho;
blend_percentage.blue=geometry_info.rho;
blend_percentage.black=geometry_info.rho;
blend_percentage.alpha=(MagickRealType) TransparentAlpha;
if ((flags & SigmaValue) != 0)
blend_percentage.green=geometry_info.sigma;
if ((flags & XiValue) != 0)
blend_percentage.blue=geometry_info.xi;
if ((flags & PsiValue) != 0)
blend_percentage.alpha=geometry_info.psi;
if (blend_percentage.colorspace == CMYKColorspace)
{
if ((flags & PsiValue) != 0)
blend_percentage.black=geometry_info.psi;
if ((flags & ChiValue) != 0)
blend_percentage.alpha=geometry_info.chi;
}
/*
Colorize DirectClass image.
*/
status=MagickTrue;
progress=0;
image_view=AcquireVirtualCacheView(colorize_image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static,4) shared(progress,status) \
magick_threads(colorize_image,colorize_image,colorize_image->rows,1)
#endif
for (y=0; y < (ssize_t) colorize_image->rows; y++)
{
MagickBooleanType
sync;
register Quantum
*magick_restrict q;
register ssize_t
x;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(image_view,0,y,colorize_image->columns,1,
exception);
if (q == (Quantum *) NULL)
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) colorize_image->columns; x++)
{
register ssize_t
i;
for (i=0; i < (ssize_t) GetPixelChannels(colorize_image); i++)
{
PixelTrait traits = GetPixelChannelTraits(colorize_image,
(PixelChannel) i);
if (traits == UndefinedPixelTrait)
continue;
if (((traits & CopyPixelTrait) != 0) ||
(GetPixelWriteMask(colorize_image,q) <= (QuantumRange/2)))
continue;
SetPixelChannel(colorize_image,(PixelChannel) i,ClampToQuantum(
Colorize(q[i],GetPixelInfoChannel(&blend_percentage,(PixelChannel) i),
GetPixelInfoChannel(colorize,(PixelChannel) i))),q);
}
q+=GetPixelChannels(colorize_image);
}
sync=SyncCacheViewAuthenticPixels(image_view,exception);
if (sync == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp critical (MagickCore_ColorizeImage)
#endif
proceed=SetImageProgress(image,ColorizeImageTag,progress++,
colorize_image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
image_view=DestroyCacheView(image_view);
if (status == MagickFalse)
colorize_image=DestroyImage(colorize_image);
return(colorize_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% C o l o r M a t r i x I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ColorMatrixImage() applies color transformation to an image. This method
% permits saturation changes, hue rotation, luminance to alpha, and various
% other effects. Although variable-sized transformation matrices can be used,
% typically one uses a 5x5 matrix for an RGBA image and a 6x6 for CMYKA
% (or RGBA with offsets). The matrix is similar to those used by Adobe Flash
% except offsets are in column 6 rather than 5 (in support of CMYKA images)
% and offsets are normalized (divide Flash offset by 255).
%
% The format of the ColorMatrixImage method is:
%
% Image *ColorMatrixImage(const Image *image,
% const KernelInfo *color_matrix,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o color_matrix: the color matrix.
%
% o exception: return any errors or warnings in this structure.
%
*/
/* FUTURE: modify to make use of a MagickMatrix Mutliply function
That should be provided in "matrix.c"
(ASIDE: actually distorts should do this too but currently doesn't)
*/
MagickExport Image *ColorMatrixImage(const Image *image,
const KernelInfo *color_matrix,ExceptionInfo *exception)
{
#define ColorMatrixImageTag "ColorMatrix/Image"
CacheView
*color_view,
*image_view;
double
ColorMatrix[6][6] =
{
{ 1.0, 0.0, 0.0, 0.0, 0.0, 0.0 },
{ 0.0, 1.0, 0.0, 0.0, 0.0, 0.0 },
{ 0.0, 0.0, 1.0, 0.0, 0.0, 0.0 },
{ 0.0, 0.0, 0.0, 1.0, 0.0, 0.0 },
{ 0.0, 0.0, 0.0, 0.0, 1.0, 0.0 },
{ 0.0, 0.0, 0.0, 0.0, 0.0, 1.0 }
};
Image
*color_image;
MagickBooleanType
status;
MagickOffsetType
progress;
register ssize_t
i;
ssize_t
u,
v,
y;
/*
Map given color_matrix, into a 6x6 matrix RGBKA and a constant
*/
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
i=0;
for (v=0; v < (ssize_t) color_matrix->height; v++)
for (u=0; u < (ssize_t) color_matrix->width; u++)
{
if ((v < 6) && (u < 6))
ColorMatrix[v][u]=color_matrix->values[i];
i++;
}
/*
Initialize color image.
*/
color_image=CloneImage(image,0,0,MagickTrue,exception);
if (color_image == (Image *) NULL)
return((Image *) NULL);
if (SetImageStorageClass(color_image,DirectClass,exception) == MagickFalse)
{
color_image=DestroyImage(color_image);
return((Image *) NULL);
}
if (image->debug != MagickFalse)
{
char
format[MagickPathExtent],
*message;
(void) LogMagickEvent(TransformEvent,GetMagickModule(),
" ColorMatrix image with color matrix:");
message=AcquireString("");
for (v=0; v < 6; v++)
{
*message='\0';
(void) FormatLocaleString(format,MagickPathExtent,"%.20g: ",(double) v);
(void) ConcatenateString(&message,format);
for (u=0; u < 6; u++)
{
(void) FormatLocaleString(format,MagickPathExtent,"%+f ",
ColorMatrix[v][u]);
(void) ConcatenateString(&message,format);
}
(void) LogMagickEvent(TransformEvent,GetMagickModule(),"%s",message);
}
message=DestroyString(message);
}
/*
Apply the ColorMatrix to image.
*/
status=MagickTrue;
progress=0;
image_view=AcquireVirtualCacheView(image,exception);
color_view=AcquireAuthenticCacheView(color_image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static,4) shared(progress,status) \
magick_threads(image,color_image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
PixelInfo
pixel;
register const Quantum
*magick_restrict p;
register Quantum
*magick_restrict q;
register ssize_t
x;
if (status == MagickFalse)
continue;
p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception);
q=GetCacheViewAuthenticPixels(color_view,0,y,color_image->columns,1,
exception);
if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL))
{
status=MagickFalse;
continue;
}
GetPixelInfo(image,&pixel);
for (x=0; x < (ssize_t) image->columns; x++)
{
register ssize_t
v;
size_t
height;
GetPixelInfoPixel(image,p,&pixel);
height=color_matrix->height > 6 ? 6UL : color_matrix->height;
for (v=0; v < (ssize_t) height; v++)
{
double
sum;
sum=ColorMatrix[v][0]*GetPixelRed(image,p)+ColorMatrix[v][1]*
GetPixelGreen(image,p)+ColorMatrix[v][2]*GetPixelBlue(image,p);
if (image->colorspace == CMYKColorspace)
sum+=ColorMatrix[v][3]*GetPixelBlack(image,p);
if (image->alpha_trait != UndefinedPixelTrait)
sum+=ColorMatrix[v][4]*GetPixelAlpha(image,p);
sum+=QuantumRange*ColorMatrix[v][5];
switch (v)
{
case 0: pixel.red=sum; break;
case 1: pixel.green=sum; break;
case 2: pixel.blue=sum; break;
case 3: pixel.black=sum; break;
case 4: pixel.alpha=sum; break;
default: break;
}
}
SetPixelViaPixelInfo(color_image,&pixel,q);
p+=GetPixelChannels(image);
q+=GetPixelChannels(color_image);
}
if (SyncCacheViewAuthenticPixels(color_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp critical (MagickCore_ColorMatrixImage)
#endif
proceed=SetImageProgress(image,ColorMatrixImageTag,progress++,
image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
color_view=DestroyCacheView(color_view);
image_view=DestroyCacheView(image_view);
if (status == MagickFalse)
color_image=DestroyImage(color_image);
return(color_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ D e s t r o y F x I n f o %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% DestroyFxInfo() deallocates memory associated with an FxInfo structure.
%
% The format of the DestroyFxInfo method is:
%
% ImageInfo *DestroyFxInfo(ImageInfo *fx_info)
%
% A description of each parameter follows:
%
% o fx_info: the fx info.
%
*/
MagickPrivate FxInfo *DestroyFxInfo(FxInfo *fx_info)
{
register ssize_t
i;
fx_info->exception=DestroyExceptionInfo(fx_info->exception);
fx_info->expression=DestroyString(fx_info->expression);
fx_info->symbols=DestroySplayTree(fx_info->symbols);
fx_info->colors=DestroySplayTree(fx_info->colors);
for (i=(ssize_t) GetImageListLength(fx_info->images)-1; i >= 0; i--)
fx_info->view[i]=DestroyCacheView(fx_info->view[i]);
fx_info->view=(CacheView **) RelinquishMagickMemory(fx_info->view);
fx_info->random_info=DestroyRandomInfo(fx_info->random_info);
fx_info=(FxInfo *) RelinquishMagickMemory(fx_info);
return(fx_info);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ F x E v a l u a t e C h a n n e l E x p r e s s i o n %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% FxEvaluateChannelExpression() evaluates an expression and returns the
% results.
%
% The format of the FxEvaluateExpression method is:
%
% double FxEvaluateChannelExpression(FxInfo *fx_info,
% const PixelChannel channel,const ssize_t x,const ssize_t y,
% double *alpha,Exceptioninfo *exception)
% double FxEvaluateExpression(FxInfo *fx_info,
% double *alpha,Exceptioninfo *exception)
%
% A description of each parameter follows:
%
% o fx_info: the fx info.
%
% o channel: the channel.
%
% o x,y: the pixel position.
%
% o alpha: the result.
%
% o exception: return any errors or warnings in this structure.
%
*/
static double FxChannelStatistics(FxInfo *fx_info,Image *image,
PixelChannel channel,const char *symbol,ExceptionInfo *exception)
{
ChannelType
channel_mask;
char
key[MagickPathExtent],
statistic[MagickPathExtent];
const char
*value;
register const char
*p;
channel_mask=UndefinedChannel;
for (p=symbol; (*p != '.') && (*p != '\0'); p++) ;
if (*p == '.')
{
ssize_t
option;
option=ParseCommandOption(MagickPixelChannelOptions,MagickTrue,p+1);
if (option >= 0)
{
channel=(PixelChannel) option;
channel_mask=SetPixelChannelMask(image,(ChannelType) (1 << channel));
}
}
(void) FormatLocaleString(key,MagickPathExtent,"%p.%.20g.%s",(void *) image,
(double) channel,symbol);
value=(const char *) GetValueFromSplayTree(fx_info->symbols,key);
if (value != (const char *) NULL)
{
if (channel_mask != UndefinedChannel)
(void) SetPixelChannelMask(image,channel_mask);
return(QuantumScale*StringToDouble(value,(char **) NULL));
}
(void) DeleteNodeFromSplayTree(fx_info->symbols,key);
if (LocaleNCompare(symbol,"depth",5) == 0)
{
size_t
depth;
depth=GetImageDepth(image,exception);
(void) FormatLocaleString(statistic,MagickPathExtent,"%.20g",(double)
depth);
}
if (LocaleNCompare(symbol,"kurtosis",8) == 0)
{
double
kurtosis,
skewness;
(void) GetImageKurtosis(image,&kurtosis,&skewness,exception);
(void) FormatLocaleString(statistic,MagickPathExtent,"%g",kurtosis);
}
if (LocaleNCompare(symbol,"maxima",6) == 0)
{
double
maxima,
minima;
(void) GetImageRange(image,&minima,&maxima,exception);
(void) FormatLocaleString(statistic,MagickPathExtent,"%g",maxima);
}
if (LocaleNCompare(symbol,"mean",4) == 0)
{
double
mean,
standard_deviation;
(void) GetImageMean(image,&mean,&standard_deviation,exception);
(void) FormatLocaleString(statistic,MagickPathExtent,"%g",mean);
}
if (LocaleNCompare(symbol,"minima",6) == 0)
{
double
maxima,
minima;
(void) GetImageRange(image,&minima,&maxima,exception);
(void) FormatLocaleString(statistic,MagickPathExtent,"%g",minima);
}
if (LocaleNCompare(symbol,"skewness",8) == 0)
{
double
kurtosis,
skewness;
(void) GetImageKurtosis(image,&kurtosis,&skewness,exception);
(void) FormatLocaleString(statistic,MagickPathExtent,"%g",skewness);
}
if (LocaleNCompare(symbol,"standard_deviation",18) == 0)
{
double
mean,
standard_deviation;
(void) GetImageMean(image,&mean,&standard_deviation,exception);
(void) FormatLocaleString(statistic,MagickPathExtent,"%g",
standard_deviation);
}
if (channel_mask != UndefinedChannel)
(void) SetPixelChannelMask(image,channel_mask);
(void) AddValueToSplayTree(fx_info->symbols,ConstantString(key),
ConstantString(statistic));
return(QuantumScale*StringToDouble(statistic,(char **) NULL));
}
static double
FxEvaluateSubexpression(FxInfo *,const PixelChannel,const ssize_t,
const ssize_t,const char *,size_t *,double *,ExceptionInfo *);
static MagickOffsetType FxGCD(MagickOffsetType alpha,MagickOffsetType beta)
{
if (beta != 0)
return(FxGCD(beta,alpha % beta));
return(alpha);
}
static inline const char *FxSubexpression(const char *expression,
ExceptionInfo *exception)
{
const char
*subexpression;
register ssize_t
level;
level=0;
subexpression=expression;
while ((*subexpression != '\0') &&
((level != 1) || (strchr(")",(int) *subexpression) == (char *) NULL)))
{
if (strchr("(",(int) *subexpression) != (char *) NULL)
level++;
else
if (strchr(")",(int) *subexpression) != (char *) NULL)
level--;
subexpression++;
}
if (*subexpression == '\0')
(void) ThrowMagickException(exception,GetMagickModule(),OptionError,
"UnbalancedParenthesis","`%s'",expression);
return(subexpression);
}
static double FxGetSymbol(FxInfo *fx_info,const PixelChannel channel,
const ssize_t x,const ssize_t y,const char *expression,
ExceptionInfo *exception)
{
char
*q,
subexpression[MagickPathExtent],
symbol[MagickPathExtent];
const char
*p,
*value;
Image
*image;
PixelInfo
pixel;
double
alpha,
beta;
PointInfo
point;
register ssize_t
i;
size_t
depth,
length,
level;
p=expression;
i=GetImageIndexInList(fx_info->images);
depth=0;
level=0;
point.x=(double) x;
point.y=(double) y;
if (isalpha((int) ((unsigned char) *(p+1))) == 0)
{
if (strchr("suv",(int) *p) != (char *) NULL)
{
switch (*p)
{
case 's':
default:
{
i=GetImageIndexInList(fx_info->images);
break;
}
case 'u': i=0; break;
case 'v': i=1; break;
}
p++;
if (*p == '[')
{
level++;
q=subexpression;
for (p++; *p != '\0'; )
{
if (*p == '[')
level++;
else
if (*p == ']')
{
level--;
if (level == 0)
break;
}
*q++=(*p++);
}
*q='\0';
alpha=FxEvaluateSubexpression(fx_info,channel,x,y,subexpression,
&depth,&beta,exception);
i=(ssize_t) alpha;
p++;
}
if (*p == '.')
p++;
}
if ((*p == 'p') && (isalpha((int) ((unsigned char) *(p+1))) == 0))
{
p++;
if (*p == '{')
{
level++;
q=subexpression;
for (p++; *p != '\0'; )
{
if (*p == '{')
level++;
else
if (*p == '}')
{
level--;
if (level == 0)
break;
}
*q++=(*p++);
}
*q='\0';
alpha=FxEvaluateSubexpression(fx_info,channel,x,y,subexpression,
&depth,&beta,exception);
point.x=alpha;
point.y=beta;
p++;
}
else
if (*p == '[')
{
level++;
q=subexpression;
for (p++; *p != '\0'; )
{
if (*p == '[')
level++;
else
if (*p == ']')
{
level--;
if (level == 0)
break;
}
*q++=(*p++);
}
*q='\0';
alpha=FxEvaluateSubexpression(fx_info,channel,x,y,subexpression,
&depth,&beta,exception);
point.x+=alpha;
point.y+=beta;
p++;
}
if (*p == '.')
p++;
}
}
length=GetImageListLength(fx_info->images);
while (i < 0)
i+=(ssize_t) length;
if (length != 0)
i%=length;
image=GetImageFromList(fx_info->images,i);
if (image == (Image *) NULL)
{
(void) ThrowMagickException(exception,GetMagickModule(),OptionError,
"NoSuchImage","`%s'",expression);
return(0.0);
}
GetPixelInfo(image,&pixel);
(void) InterpolatePixelInfo(image,fx_info->view[i],image->interpolate,
point.x,point.y,&pixel,exception);
if ((strlen(p) > 2) && (LocaleCompare(p,"intensity") != 0) &&
(LocaleCompare(p,"luma") != 0) && (LocaleCompare(p,"luminance") != 0) &&
(LocaleCompare(p,"hue") != 0) && (LocaleCompare(p,"saturation") != 0) &&
(LocaleCompare(p,"lightness") != 0))
{
char
name[MagickPathExtent];
(void) CopyMagickString(name,p,MagickPathExtent);
for (q=name+(strlen(name)-1); q > name; q--)
{
if (*q == ')')
break;
if (*q == '.')
{
*q='\0';
break;
}
}
if ((strlen(name) > 2) &&
(GetValueFromSplayTree(fx_info->symbols,name) == (const char *) NULL))
{
PixelInfo
*color;
color=(PixelInfo *) GetValueFromSplayTree(fx_info->colors,name);
if (color != (PixelInfo *) NULL)
{
pixel=(*color);
p+=strlen(name);
}
else
{
MagickBooleanType
status;
status=QueryColorCompliance(name,AllCompliance,&pixel,
fx_info->exception);
if (status != MagickFalse)
{
(void) AddValueToSplayTree(fx_info->colors,ConstantString(
name),ClonePixelInfo(&pixel));
p+=strlen(name);
}
}
}
}
(void) CopyMagickString(symbol,p,MagickPathExtent);
StripString(symbol);
if (*symbol == '\0')
{
switch (channel)
{
case RedPixelChannel: return(QuantumScale*pixel.red);
case GreenPixelChannel: return(QuantumScale*pixel.green);
case BluePixelChannel: return(QuantumScale*pixel.blue);
case BlackPixelChannel:
{
if (image->colorspace != CMYKColorspace)
{
(void) ThrowMagickException(exception,GetMagickModule(),
ImageError,"ColorSeparatedImageRequired","`%s'",
image->filename);
return(0.0);
}
return(QuantumScale*pixel.black);
}
case AlphaPixelChannel:
{
if (pixel.alpha_trait == UndefinedPixelTrait)
return(1.0);
alpha=(double) (QuantumScale*pixel.alpha);
return(alpha);
}
case IndexPixelChannel:
return(0.0);
case IntensityPixelChannel:
{
Quantum
quantum_pixel[MaxPixelChannels];
SetPixelViaPixelInfo(image,&pixel,quantum_pixel);
return(QuantumScale*GetPixelIntensity(image,quantum_pixel));
}
default:
break;
}
(void) ThrowMagickException(exception,GetMagickModule(),OptionError,
"UnableToParseExpression","`%s'",p);
return(0.0);
}
switch (*symbol)
{
case 'A':
case 'a':
{
if (LocaleCompare(symbol,"a") == 0)
return((QuantumScale*pixel.alpha));
break;
}
case 'B':
case 'b':
{
if (LocaleCompare(symbol,"b") == 0)
return(QuantumScale*pixel.blue);
break;
}
case 'C':
case 'c':
{
if (LocaleNCompare(symbol,"channel",7) == 0)
{
GeometryInfo
channel_info;
MagickStatusType
flags;
flags=ParseGeometry(symbol+7,&channel_info);
if (image->colorspace == CMYKColorspace)
switch (channel)
{
case CyanPixelChannel:
{
if ((flags & RhoValue) == 0)
return(0.0);
return(channel_info.rho);
}
case MagentaPixelChannel:
{
if ((flags & SigmaValue) == 0)
return(0.0);
return(channel_info.sigma);
}
case YellowPixelChannel:
{
if ((flags & XiValue) == 0)
return(0.0);
return(channel_info.xi);
}
case BlackPixelChannel:
{
if ((flags & PsiValue) == 0)
return(0.0);
return(channel_info.psi);
}
case AlphaPixelChannel:
{
if ((flags & ChiValue) == 0)
return(0.0);
return(channel_info.chi);
}
default:
return(0.0);
}
switch (channel)
{
case RedPixelChannel:
{
if ((flags & RhoValue) == 0)
return(0.0);
return(channel_info.rho);
}
case GreenPixelChannel:
{
if ((flags & SigmaValue) == 0)
return(0.0);
return(channel_info.sigma);
}
case BluePixelChannel:
{
if ((flags & XiValue) == 0)
return(0.0);
return(channel_info.xi);
}
case BlackPixelChannel:
{
if ((flags & ChiValue) == 0)
return(0.0);
return(channel_info.chi);
}
case AlphaPixelChannel:
{
if ((flags & PsiValue) == 0)
return(0.0);
return(channel_info.psi);
}
default:
return(0.0);
}
}
if (LocaleCompare(symbol,"c") == 0)
return(QuantumScale*pixel.red);
break;
}
case 'D':
case 'd':
{
if (LocaleNCompare(symbol,"depth",5) == 0)
return(FxChannelStatistics(fx_info,image,channel,symbol,exception));
break;
}
case 'G':
case 'g':
{
if (LocaleCompare(symbol,"g") == 0)
return(QuantumScale*pixel.green);
break;
}
case 'K':
case 'k':
{
if (LocaleNCompare(symbol,"kurtosis",8) == 0)
return(FxChannelStatistics(fx_info,image,channel,symbol,exception));
if (LocaleCompare(symbol,"k") == 0)
{
if (image->colorspace != CMYKColorspace)
{
(void) ThrowMagickException(exception,GetMagickModule(),
OptionError,"ColorSeparatedImageRequired","`%s'",
image->filename);
return(0.0);
}
return(QuantumScale*pixel.black);
}
break;
}
case 'H':
case 'h':
{
if (LocaleCompare(symbol,"h") == 0)
return((double) image->rows);
if (LocaleCompare(symbol,"hue") == 0)
{
double
hue,
lightness,
saturation;
ConvertRGBToHSL(pixel.red,pixel.green,pixel.blue,&hue,&saturation,
&lightness);
return(hue);
}
break;
}
case 'I':
case 'i':
{
if ((LocaleCompare(symbol,"image.depth") == 0) ||
(LocaleCompare(symbol,"image.minima") == 0) ||
(LocaleCompare(symbol,"image.maxima") == 0) ||
(LocaleCompare(symbol,"image.mean") == 0) ||
(LocaleCompare(symbol,"image.kurtosis") == 0) ||
(LocaleCompare(symbol,"image.skewness") == 0) ||
(LocaleCompare(symbol,"image.standard_deviation") == 0))
return(FxChannelStatistics(fx_info,image,channel,symbol+6,exception));
if (LocaleCompare(symbol,"image.resolution.x") == 0)
return(image->resolution.x);
if (LocaleCompare(symbol,"image.resolution.y") == 0)
return(image->resolution.y);
if (LocaleCompare(symbol,"intensity") == 0)
{
Quantum
quantum_pixel[MaxPixelChannels];
SetPixelViaPixelInfo(image,&pixel,quantum_pixel);
return(QuantumScale*GetPixelIntensity(image,quantum_pixel));
}
if (LocaleCompare(symbol,"i") == 0)
return((double) x);
break;
}
case 'J':
case 'j':
{
if (LocaleCompare(symbol,"j") == 0)
return((double) y);
break;
}
case 'L':
case 'l':
{
if (LocaleCompare(symbol,"lightness") == 0)
{
double
hue,
lightness,
saturation;
ConvertRGBToHSL(pixel.red,pixel.green,pixel.blue,&hue,&saturation,
&lightness);
return(lightness);
}
if (LocaleCompare(symbol,"luma") == 0)
{
double
luma;
luma=0.212656*pixel.red+0.715158*pixel.green+0.072186*pixel.blue;
return(QuantumScale*luma);
}
if (LocaleCompare(symbol,"luminance") == 0)
{
double
luminence;
luminence=0.212656*pixel.red+0.715158*pixel.green+0.072186*pixel.blue;
return(QuantumScale*luminence);
}
break;
}
case 'M':
case 'm':
{
if (LocaleNCompare(symbol,"maxima",6) == 0)
return(FxChannelStatistics(fx_info,image,channel,symbol,exception));
if (LocaleNCompare(symbol,"mean",4) == 0)
return(FxChannelStatistics(fx_info,image,channel,symbol,exception));
if (LocaleNCompare(symbol,"minima",6) == 0)
return(FxChannelStatistics(fx_info,image,channel,symbol,exception));
if (LocaleCompare(symbol,"m") == 0)
return(QuantumScale*pixel.green);
break;
}
case 'N':
case 'n':
{
if (LocaleCompare(symbol,"n") == 0)
return((double) GetImageListLength(fx_info->images));
break;
}
case 'O':
case 'o':
{
if (LocaleCompare(symbol,"o") == 0)
return(QuantumScale*pixel.alpha);
break;
}
case 'P':
case 'p':
{
if (LocaleCompare(symbol,"page.height") == 0)
return((double) image->page.height);
if (LocaleCompare(symbol,"page.width") == 0)
return((double) image->page.width);
if (LocaleCompare(symbol,"page.x") == 0)
return((double) image->page.x);
if (LocaleCompare(symbol,"page.y") == 0)
return((double) image->page.y);
break;
}
case 'Q':
case 'q':
{
if (LocaleCompare(symbol,"quality") == 0)
return((double) image->quality);
break;
}
case 'R':
case 'r':
{
if (LocaleCompare(symbol,"resolution.x") == 0)
return(image->resolution.x);
if (LocaleCompare(symbol,"resolution.y") == 0)
return(image->resolution.y);
if (LocaleCompare(symbol,"r") == 0)
return(QuantumScale*pixel.red);
break;
}
case 'S':
case 's':
{
if (LocaleCompare(symbol,"saturation") == 0)
{
double
hue,
lightness,
saturation;
ConvertRGBToHSL(pixel.red,pixel.green,pixel.blue,&hue,&saturation,
&lightness);
return(saturation);
}
if (LocaleNCompare(symbol,"skewness",8) == 0)
return(FxChannelStatistics(fx_info,image,channel,symbol,exception));
if (LocaleNCompare(symbol,"standard_deviation",18) == 0)
return(FxChannelStatistics(fx_info,image,channel,symbol,exception));
break;
}
case 'T':
case 't':
{
if (LocaleCompare(symbol,"t") == 0)
return((double) GetImageIndexInList(fx_info->images));
break;
}
case 'W':
case 'w':
{
if (LocaleCompare(symbol,"w") == 0)
return((double) image->columns);
break;
}
case 'Y':
case 'y':
{
if (LocaleCompare(symbol,"y") == 0)
return(QuantumScale*pixel.blue);
break;
}
case 'Z':
case 'z':
{
if (LocaleCompare(symbol,"z") == 0)
return((double)GetImageDepth(image, fx_info->exception));
break;
}
default:
break;
}
value=(const char *) GetValueFromSplayTree(fx_info->symbols,symbol);
if (value != (const char *) NULL)
return(StringToDouble(value,(char **) NULL));
(void) ThrowMagickException(exception,GetMagickModule(),OptionError,
"UnableToParseExpression","`%s'",symbol);
return(0.0);
}
static const char *FxOperatorPrecedence(const char *expression,
ExceptionInfo *exception)
{
typedef enum
{
UndefinedPrecedence,
NullPrecedence,
BitwiseComplementPrecedence,
ExponentPrecedence,
ExponentialNotationPrecedence,
MultiplyPrecedence,
AdditionPrecedence,
ShiftPrecedence,
RelationalPrecedence,
EquivalencyPrecedence,
BitwiseAndPrecedence,
BitwiseOrPrecedence,
LogicalAndPrecedence,
LogicalOrPrecedence,
TernaryPrecedence,
AssignmentPrecedence,
CommaPrecedence,
SeparatorPrecedence
} FxPrecedence;
FxPrecedence
precedence,
target;
register const char
*subexpression;
register int
c;
size_t
level;
c=0;
level=0;
subexpression=(const char *) NULL;
target=NullPrecedence;
while (*expression != '\0')
{
precedence=UndefinedPrecedence;
if ((isspace((int) ((unsigned char) *expression)) != 0) || (c == (int) '@'))
{
expression++;
continue;
}
switch (*expression)
{
case 'A':
case 'a':
{
#if defined(MAGICKCORE_HAVE_ACOSH)
if (LocaleNCompare(expression,"acosh",5) == 0)
{
expression+=5;
break;
}
#endif
#if defined(MAGICKCORE_HAVE_ASINH)
if (LocaleNCompare(expression,"asinh",5) == 0)
{
expression+=5;
break;
}
#endif
#if defined(MAGICKCORE_HAVE_ATANH)
if (LocaleNCompare(expression,"atanh",5) == 0)
{
expression+=5;
break;
}
#endif
if (LocaleNCompare(expression,"atan2",5) == 0)
{
expression+=5;
break;
}
break;
}
case 'E':
case 'e':
{
if ((isdigit((int) ((unsigned char) c)) != 0) &&
((LocaleNCompare(expression,"E+",2) == 0) ||
(LocaleNCompare(expression,"E-",2) == 0)))
{
expression+=2; /* scientific notation */
break;
}
}
case 'J':
case 'j':
{
if ((LocaleNCompare(expression,"j0",2) == 0) ||
(LocaleNCompare(expression,"j1",2) == 0))
{
expression+=2;
break;
}
break;
}
case '#':
{
while (isxdigit((int) ((unsigned char) *(expression+1))) != 0)
expression++;
break;
}
default:
break;
}
if ((c == (int) '{') || (c == (int) '['))
level++;
else
if ((c == (int) '}') || (c == (int) ']'))
level--;
if (level == 0)
switch ((unsigned char) *expression)
{
case '~':
case '!':
{
precedence=BitwiseComplementPrecedence;
break;
}
case '^':
case '@':
{
precedence=ExponentPrecedence;
break;
}
default:
{
if (((c != 0) && ((isdigit((int) ((unsigned char) c)) != 0) ||
(strchr(")",(int) ((unsigned char) c)) != (char *) NULL))) &&
(((islower((int) ((unsigned char) *expression)) != 0) ||
(strchr("(",(int) ((unsigned char) *expression)) != (char *) NULL)) ||
((isdigit((int) ((unsigned char) c)) == 0) &&
(isdigit((int) ((unsigned char) *expression)) != 0))) &&
(strchr("xy",(int) ((unsigned char) *expression)) == (char *) NULL))
precedence=MultiplyPrecedence;
break;
}
case '*':
case '/':
case '%':
{
precedence=MultiplyPrecedence;
break;
}
case '+':
case '-':
{
if ((strchr("(+-/*%:&^|<>~,",c) == (char *) NULL) ||
(isalpha(c) != 0))
precedence=AdditionPrecedence;
break;
}
case LeftShiftOperator:
case RightShiftOperator:
{
precedence=ShiftPrecedence;
break;
}
case '<':
case LessThanEqualOperator:
case GreaterThanEqualOperator:
case '>':
{
precedence=RelationalPrecedence;
break;
}
case EqualOperator:
case NotEqualOperator:
{
precedence=EquivalencyPrecedence;
break;
}
case '&':
{
precedence=BitwiseAndPrecedence;
break;
}
case '|':
{
precedence=BitwiseOrPrecedence;
break;
}
case LogicalAndOperator:
{
precedence=LogicalAndPrecedence;
break;
}
case LogicalOrOperator:
{
precedence=LogicalOrPrecedence;
break;
}
case ExponentialNotation:
{
precedence=ExponentialNotationPrecedence;
break;
}
case ':':
case '?':
{
precedence=TernaryPrecedence;
break;
}
case '=':
{
precedence=AssignmentPrecedence;
break;
}
case ',':
{
precedence=CommaPrecedence;
break;
}
case ';':
{
precedence=SeparatorPrecedence;
break;
}
}
if ((precedence == BitwiseComplementPrecedence) ||
(precedence == TernaryPrecedence) ||
(precedence == AssignmentPrecedence))
{
if (precedence > target)
{
/*
Right-to-left associativity.
*/
target=precedence;
subexpression=expression;
}
}
else
if (precedence >= target)
{
/*
Left-to-right associativity.
*/
target=precedence;
subexpression=expression;
}
if (strchr("(",(int) *expression) != (char *) NULL)
expression=FxSubexpression(expression,exception);
c=(int) (*expression++);
}
return(subexpression);
}
static double FxEvaluateSubexpression(FxInfo *fx_info,
const PixelChannel channel,const ssize_t x,const ssize_t y,
const char *expression,size_t *depth,double *beta,ExceptionInfo *exception)
{
#define FxMaxParenthesisDepth 58
char
*q,
subexpression[MagickPathExtent];
double
alpha,
gamma;
register const char
*p;
*beta=0.0;
if (exception->severity >= ErrorException)
return(0.0);
while (isspace((int) ((unsigned char) *expression)) != 0)
expression++;
if (*expression == '\0')
return(0.0);
*subexpression='\0';
p=FxOperatorPrecedence(expression,exception);
if (p != (const char *) NULL)
{
(void) CopyMagickString(subexpression,expression,(size_t)
(p-expression+1));
alpha=FxEvaluateSubexpression(fx_info,channel,x,y,subexpression,depth,
beta,exception);
switch ((unsigned char) *p)
{
case '~':
{
*beta=FxEvaluateSubexpression(fx_info,channel,x,y,++p,depth,beta,
exception);
*beta=(double) (~(size_t) *beta);
return(*beta);
}
case '!':
{
*beta=FxEvaluateSubexpression(fx_info,channel,x,y,++p,depth,beta,
exception);
return(*beta == 0.0 ? 1.0 : 0.0);
}
case '^':
{
*beta=pow(alpha,FxEvaluateSubexpression(fx_info,channel,x,y,++p,depth,
beta,exception));
return(*beta);
}
case '*':
case ExponentialNotation:
{
*beta=FxEvaluateSubexpression(fx_info,channel,x,y,++p,depth,beta,
exception);
return(alpha*(*beta));
}
case '/':
{
*beta=FxEvaluateSubexpression(fx_info,channel,x,y,++p,depth,beta,
exception);
if (*beta == 0.0)
{
(void) ThrowMagickException(exception,GetMagickModule(),
OptionError,"DivideByZero","`%s'",expression);
return(0.0);
}
return(alpha/(*beta));
}
case '%':
{
*beta=FxEvaluateSubexpression(fx_info,channel,x,y,++p,depth,beta,
exception);
*beta=fabs(floor((*beta)+0.5));
if (*beta == 0.0)
{
(void) ThrowMagickException(exception,GetMagickModule(),
OptionError,"DivideByZero","`%s'",expression);
return(0.0);
}
return(fmod(alpha,*beta));
}
case '+':
{
*beta=FxEvaluateSubexpression(fx_info,channel,x,y,++p,depth,beta,
exception);
return(alpha+(*beta));
}
case '-':
{
*beta=FxEvaluateSubexpression(fx_info,channel,x,y,++p,depth,beta,
exception);
return(alpha-(*beta));
}
case LeftShiftOperator:
{
gamma=FxEvaluateSubexpression(fx_info,channel,x,y,++p,depth,beta,
exception);
*beta=(double) ((size_t) (alpha+0.5) << (size_t) (gamma+0.5));
return(*beta);
}
case RightShiftOperator:
{
gamma=FxEvaluateSubexpression(fx_info,channel,x,y,++p,depth,beta,
exception);
*beta=(double) ((size_t) (alpha+0.5) >> (size_t) (gamma+0.5));
return(*beta);
}
case '<':
{
*beta=FxEvaluateSubexpression(fx_info,channel,x,y,++p,depth,beta,
exception);
return(alpha < *beta ? 1.0 : 0.0);
}
case LessThanEqualOperator:
{
*beta=FxEvaluateSubexpression(fx_info,channel,x,y,++p,depth,beta,
exception);
return(alpha <= *beta ? 1.0 : 0.0);
}
case '>':
{
*beta=FxEvaluateSubexpression(fx_info,channel,x,y,++p,depth,beta,
exception);
return(alpha > *beta ? 1.0 : 0.0);
}
case GreaterThanEqualOperator:
{
*beta=FxEvaluateSubexpression(fx_info,channel,x,y,++p,depth,beta,
exception);
return(alpha >= *beta ? 1.0 : 0.0);
}
case EqualOperator:
{
*beta=FxEvaluateSubexpression(fx_info,channel,x,y,++p,depth,beta,
exception);
return(fabs(alpha-(*beta)) < MagickEpsilon ? 1.0 : 0.0);
}
case NotEqualOperator:
{
*beta=FxEvaluateSubexpression(fx_info,channel,x,y,++p,depth,beta,
exception);
return(fabs(alpha-(*beta)) >= MagickEpsilon ? 1.0 : 0.0);
}
case '&':
{
gamma=FxEvaluateSubexpression(fx_info,channel,x,y,++p,depth,beta,
exception);
*beta=(double) ((size_t) (alpha+0.5) & (size_t) (gamma+0.5));
return(*beta);
}
case '|':
{
gamma=FxEvaluateSubexpression(fx_info,channel,x,y,++p,depth,beta,
exception);
*beta=(double) ((size_t) (alpha+0.5) | (size_t) (gamma+0.5));
return(*beta);
}
case LogicalAndOperator:
{
p++;
if (alpha <= 0.0)
{
*beta=0.0;
return(*beta);
}
gamma=FxEvaluateSubexpression(fx_info,channel,x,y,p,depth,beta,
exception);
*beta=(gamma > 0.0) ? 1.0 : 0.0;
return(*beta);
}
case LogicalOrOperator:
{
p++;
if (alpha > 0.0)
{
*beta=1.0;
return(*beta);
}
gamma=FxEvaluateSubexpression(fx_info,channel,x,y,p,depth,beta,
exception);
*beta=(gamma > 0.0) ? 1.0 : 0.0;
return(*beta);
}
case '?':
{
(void) CopyMagickString(subexpression,++p,MagickPathExtent);
q=subexpression;
p=StringToken(":",&q);
if (q == (char *) NULL)
{
(void) ThrowMagickException(exception,GetMagickModule(),
OptionError,"UnableToParseExpression","`%s'",subexpression);
return(0.0);
}
if (fabs(alpha) >= MagickEpsilon)
gamma=FxEvaluateSubexpression(fx_info,channel,x,y,p,depth,beta,
exception);
else
gamma=FxEvaluateSubexpression(fx_info,channel,x,y,q,depth,beta,
exception);
return(gamma);
}
case '=':
{
char
numeric[MagickPathExtent];
q=subexpression;
while (isalpha((int) ((unsigned char) *q)) != 0)
q++;
if (*q != '\0')
{
(void) ThrowMagickException(exception,GetMagickModule(),
OptionError,"UnableToParseExpression","`%s'",subexpression);
return(0.0);
}
ClearMagickException(exception);
*beta=FxEvaluateSubexpression(fx_info,channel,x,y,++p,depth,beta,
exception);
(void) FormatLocaleString(numeric,MagickPathExtent,"%g",*beta);
(void) DeleteNodeFromSplayTree(fx_info->symbols,subexpression);
(void) AddValueToSplayTree(fx_info->symbols,ConstantString(
subexpression),ConstantString(numeric));
return(*beta);
}
case ',':
{
*beta=FxEvaluateSubexpression(fx_info,channel,x,y,++p,depth,beta,
exception);
return(alpha);
}
case ';':
{
*beta=FxEvaluateSubexpression(fx_info,channel,x,y,++p,depth,beta,
exception);
return(*beta);
}
default:
{
gamma=alpha*FxEvaluateSubexpression(fx_info,channel,x,y,p,depth,beta,
exception);
return(gamma);
}
}
}
if (strchr("(",(int) *expression) != (char *) NULL)
{
(*depth)++;
if (*depth >= FxMaxParenthesisDepth)
(void) ThrowMagickException(exception,GetMagickModule(),OptionError,
"ParenthesisNestedTooDeeply","`%s'",expression);
(void) CopyMagickString(subexpression,expression+1,MagickPathExtent);
subexpression[strlen(subexpression)-1]='\0';
gamma=FxEvaluateSubexpression(fx_info,channel,x,y,subexpression,depth,
beta,exception);
(*depth)--;
return(gamma);
}
switch (*expression)
{
case '+':
{
gamma=FxEvaluateSubexpression(fx_info,channel,x,y,expression+1,depth,beta,
exception);
return(1.0*gamma);
}
case '-':
{
gamma=FxEvaluateSubexpression(fx_info,channel,x,y,expression+1,depth,beta,
exception);
return(-1.0*gamma);
}
case '~':
{
gamma=FxEvaluateSubexpression(fx_info,channel,x,y,expression+1,depth,beta,
exception);
return((double) (~(size_t) (gamma+0.5)));
}
case 'A':
case 'a':
{
if (LocaleNCompare(expression,"abs",3) == 0)
{
alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+3,depth,
beta,exception);
return(fabs(alpha));
}
#if defined(MAGICKCORE_HAVE_ACOSH)
if (LocaleNCompare(expression,"acosh",5) == 0)
{
alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+5,depth,
beta,exception);
return(acosh(alpha));
}
#endif
if (LocaleNCompare(expression,"acos",4) == 0)
{
alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+4,depth,
beta,exception);
return(acos(alpha));
}
#if defined(MAGICKCORE_HAVE_J1)
if (LocaleNCompare(expression,"airy",4) == 0)
{
alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+4,depth,
beta,exception);
if (alpha == 0.0)
return(1.0);
gamma=2.0*j1((MagickPI*alpha))/(MagickPI*alpha);
return(gamma*gamma);
}
#endif
#if defined(MAGICKCORE_HAVE_ASINH)
if (LocaleNCompare(expression,"asinh",5) == 0)
{
alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+5,depth,
beta,exception);
return(asinh(alpha));
}
#endif
if (LocaleNCompare(expression,"asin",4) == 0)
{
alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+4,depth,
beta,exception);
return(asin(alpha));
}
if (LocaleNCompare(expression,"alt",3) == 0)
{
alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+3,depth,
beta,exception);
return(((ssize_t) alpha) & 0x01 ? -1.0 : 1.0);
}
if (LocaleNCompare(expression,"atan2",5) == 0)
{
alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+5,depth,
beta,exception);
return(atan2(alpha,*beta));
}
#if defined(MAGICKCORE_HAVE_ATANH)
if (LocaleNCompare(expression,"atanh",5) == 0)
{
alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+5,depth,
beta,exception);
return(atanh(alpha));
}
#endif
if (LocaleNCompare(expression,"atan",4) == 0)
{
alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+4,depth,
beta,exception);
return(atan(alpha));
}
if (LocaleCompare(expression,"a") == 0)
return(FxGetSymbol(fx_info,channel,x,y,expression,exception));
break;
}
case 'B':
case 'b':
{
if (LocaleCompare(expression,"b") == 0)
return(FxGetSymbol(fx_info,channel,x,y,expression,exception));
break;
}
case 'C':
case 'c':
{
if (LocaleNCompare(expression,"ceil",4) == 0)
{
alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+4,depth,
beta,exception);
return(ceil(alpha));
}
if (LocaleNCompare(expression,"clamp",5) == 0)
{
alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+5,depth,
beta,exception);
if (alpha < 0.0)
return(0.0);
if (alpha > 1.0)
return(1.0);
return(alpha);
}
if (LocaleNCompare(expression,"cosh",4) == 0)
{
alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+4,depth,
beta,exception);
return(cosh(alpha));
}
if (LocaleNCompare(expression,"cos",3) == 0)
{
alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+3,depth,
beta,exception);
return(cos(alpha));
}
if (LocaleCompare(expression,"c") == 0)
return(FxGetSymbol(fx_info,channel,x,y,expression,exception));
break;
}
case 'D':
case 'd':
{
if (LocaleNCompare(expression,"debug",5) == 0)
{
const char
*type;
alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+5,depth,
beta,exception);
if (fx_info->images->colorspace == CMYKColorspace)
switch (channel)
{
case CyanPixelChannel: type="cyan"; break;
case MagentaPixelChannel: type="magenta"; break;
case YellowPixelChannel: type="yellow"; break;
case AlphaPixelChannel: type="opacity"; break;
case BlackPixelChannel: type="black"; break;
default: type="unknown"; break;
}
else
switch (channel)
{
case RedPixelChannel: type="red"; break;
case GreenPixelChannel: type="green"; break;
case BluePixelChannel: type="blue"; break;
case AlphaPixelChannel: type="opacity"; break;
default: type="unknown"; break;
}
(void) CopyMagickString(subexpression,expression+6,MagickPathExtent);
if (strlen(subexpression) > 1)
subexpression[strlen(subexpression)-1]='\0';
if (fx_info->file != (FILE *) NULL)
(void) FormatLocaleFile(fx_info->file,"%s[%.20g,%.20g].%s: "
"%s=%.*g\n",fx_info->images->filename,(double) x,(double) y,type,
subexpression,GetMagickPrecision(),alpha);
return(0.0);
}
if (LocaleNCompare(expression,"drc",3) == 0)
{
alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+3,depth,
beta,exception);
return((alpha/(*beta*(alpha-1.0)+1.0)));
}
break;
}
case 'E':
case 'e':
{
if (LocaleCompare(expression,"epsilon") == 0)
return(MagickEpsilon);
#if defined(MAGICKCORE_HAVE_ERF)
if (LocaleNCompare(expression,"erf",3) == 0)
{
alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+3,depth,
beta,exception);
return(erf(alpha));
}
#endif
if (LocaleNCompare(expression,"exp",3) == 0)
{
alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+3,depth,
beta,exception);
return(exp(alpha));
}
if (LocaleCompare(expression,"e") == 0)
return(2.7182818284590452354);
break;
}
case 'F':
case 'f':
{
if (LocaleNCompare(expression,"floor",5) == 0)
{
alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+5,depth,
beta,exception);
return(floor(alpha));
}
break;
}
case 'G':
case 'g':
{
if (LocaleNCompare(expression,"gauss",5) == 0)
{
alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+5,depth,
beta,exception);
gamma=exp((-alpha*alpha/2.0))/sqrt(2.0*MagickPI);
return(gamma);
}
if (LocaleNCompare(expression,"gcd",3) == 0)
{
MagickOffsetType
gcd;
alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+3,depth,
beta,exception);
gcd=FxGCD((MagickOffsetType) (alpha+0.5),(MagickOffsetType) (*beta+
0.5));
return((double) gcd);
}
if (LocaleCompare(expression,"g") == 0)
return(FxGetSymbol(fx_info,channel,x,y,expression,exception));
break;
}
case 'H':
case 'h':
{
if (LocaleCompare(expression,"h") == 0)
return(FxGetSymbol(fx_info,channel,x,y,expression,exception));
if (LocaleCompare(expression,"hue") == 0)
return(FxGetSymbol(fx_info,channel,x,y,expression,exception));
if (LocaleNCompare(expression,"hypot",5) == 0)
{
alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+5,depth,
beta,exception);
return(hypot(alpha,*beta));
}
break;
}
case 'K':
case 'k':
{
if (LocaleCompare(expression,"k") == 0)
return(FxGetSymbol(fx_info,channel,x,y,expression,exception));
break;
}
case 'I':
case 'i':
{
if (LocaleCompare(expression,"intensity") == 0)
return(FxGetSymbol(fx_info,channel,x,y,expression,exception));
if (LocaleNCompare(expression,"int",3) == 0)
{
alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+3,depth,
beta,exception);
return(floor(alpha));
}
if (LocaleNCompare(expression,"isnan",5) == 0)
{
alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+5,depth,
beta,exception);
return((double) !!IsNaN(alpha));
}
if (LocaleCompare(expression,"i") == 0)
return(FxGetSymbol(fx_info,channel,x,y,expression,exception));
break;
}
case 'J':
case 'j':
{
if (LocaleCompare(expression,"j") == 0)
return(FxGetSymbol(fx_info,channel,x,y,expression,exception));
#if defined(MAGICKCORE_HAVE_J0)
if (LocaleNCompare(expression,"j0",2) == 0)
{
alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+2,depth,
beta,exception);
return(j0(alpha));
}
#endif
#if defined(MAGICKCORE_HAVE_J1)
if (LocaleNCompare(expression,"j1",2) == 0)
{
alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+2,depth,
beta,exception);
return(j1(alpha));
}
#endif
#if defined(MAGICKCORE_HAVE_J1)
if (LocaleNCompare(expression,"jinc",4) == 0)
{
alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+4,depth,
beta,exception);
if (alpha == 0.0)
return(1.0);
gamma=(2.0*j1((MagickPI*alpha))/(MagickPI*alpha));
return(gamma);
}
#endif
break;
}
case 'L':
case 'l':
{
if (LocaleNCompare(expression,"ln",2) == 0)
{
alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+2,depth,
beta,exception);
return(log(alpha));
}
if (LocaleNCompare(expression,"logtwo",6) == 0)
{
alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+6,depth,
beta,exception);
return(log10(alpha))/log10(2.0);
}
if (LocaleNCompare(expression,"log",3) == 0)
{
alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+3,depth,
beta,exception);
return(log10(alpha));
}
if (LocaleCompare(expression,"lightness") == 0)
return(FxGetSymbol(fx_info,channel,x,y,expression,exception));
break;
}
case 'M':
case 'm':
{
if (LocaleCompare(expression,"MaxRGB") == 0)
return(QuantumRange);
if (LocaleNCompare(expression,"maxima",6) == 0)
break;
if (LocaleNCompare(expression,"max",3) == 0)
{
alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+3,depth,
beta,exception);
return(alpha > *beta ? alpha : *beta);
}
if (LocaleNCompare(expression,"minima",6) == 0)
break;
if (LocaleNCompare(expression,"min",3) == 0)
{
alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+3,depth,
beta,exception);
return(alpha < *beta ? alpha : *beta);
}
if (LocaleNCompare(expression,"mod",3) == 0)
{
alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+3,depth,
beta,exception);
gamma=alpha-floor((alpha/(*beta)))*(*beta);
return(gamma);
}
if (LocaleCompare(expression,"m") == 0)
return(FxGetSymbol(fx_info,channel,x,y,expression,exception));
break;
}
case 'N':
case 'n':
{
if (LocaleNCompare(expression,"not",3) == 0)
{
alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+3,depth,
beta,exception);
return((double) (alpha < MagickEpsilon));
}
if (LocaleCompare(expression,"n") == 0)
return(FxGetSymbol(fx_info,channel,x,y,expression,exception));
break;
}
case 'O':
case 'o':
{
if (LocaleCompare(expression,"Opaque") == 0)
return(1.0);
if (LocaleCompare(expression,"o") == 0)
return(FxGetSymbol(fx_info,channel,x,y,expression,exception));
break;
}
case 'P':
case 'p':
{
if (LocaleCompare(expression,"phi") == 0)
return(MagickPHI);
if (LocaleCompare(expression,"pi") == 0)
return(MagickPI);
if (LocaleNCompare(expression,"pow",3) == 0)
{
alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+3,depth,
beta,exception);
return(pow(alpha,*beta));
}
if (LocaleCompare(expression,"p") == 0)
return(FxGetSymbol(fx_info,channel,x,y,expression,exception));
break;
}
case 'Q':
case 'q':
{
if (LocaleCompare(expression,"QuantumRange") == 0)
return(QuantumRange);
if (LocaleCompare(expression,"QuantumScale") == 0)
return(QuantumScale);
break;
}
case 'R':
case 'r':
{
if (LocaleNCompare(expression,"rand",4) == 0)
{
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp critical (MagickCore_FxEvaluateSubexpression)
#endif
alpha=GetPseudoRandomValue(fx_info->random_info);
return(alpha);
}
if (LocaleNCompare(expression,"round",5) == 0)
{
alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+5,depth,
beta,exception);
return(floor(alpha+0.5));
}
if (LocaleCompare(expression,"r") == 0)
return(FxGetSymbol(fx_info,channel,x,y,expression,exception));
break;
}
case 'S':
case 's':
{
if (LocaleCompare(expression,"saturation") == 0)
return(FxGetSymbol(fx_info,channel,x,y,expression,exception));
if (LocaleNCompare(expression,"sign",4) == 0)
{
alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+4,depth,
beta,exception);
return(alpha < 0.0 ? -1.0 : 1.0);
}
if (LocaleNCompare(expression,"sinc",4) == 0)
{
alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+4,depth,
beta,exception);
if (alpha == 0)
return(1.0);
gamma=sin((MagickPI*alpha))/(MagickPI*alpha);
return(gamma);
}
if (LocaleNCompare(expression,"sinh",4) == 0)
{
alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+4,depth,
beta,exception);
return(sinh(alpha));
}
if (LocaleNCompare(expression,"sin",3) == 0)
{
alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+3,depth,
beta,exception);
return(sin(alpha));
}
if (LocaleNCompare(expression,"sqrt",4) == 0)
{
alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+4,depth,
beta,exception);
return(sqrt(alpha));
}
if (LocaleNCompare(expression,"squish",6) == 0)
{
alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+6,depth,
beta,exception);
return((1.0/(1.0+exp(-alpha))));
}
if (LocaleCompare(expression,"s") == 0)
return(FxGetSymbol(fx_info,channel,x,y,expression,exception));
break;
}
case 'T':
case 't':
{
if (LocaleNCompare(expression,"tanh",4) == 0)
{
alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+4,depth,
beta,exception);
return(tanh(alpha));
}
if (LocaleNCompare(expression,"tan",3) == 0)
{
alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+3,depth,
beta,exception);
return(tan(alpha));
}
if (LocaleCompare(expression,"Transparent") == 0)
return(0.0);
if (LocaleNCompare(expression,"trunc",5) == 0)
{
alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+5,depth,
beta,exception);
if (alpha >= 0.0)
return(floor(alpha));
return(ceil(alpha));
}
if (LocaleCompare(expression,"t") == 0)
return(FxGetSymbol(fx_info,channel,x,y,expression,exception));
break;
}
case 'U':
case 'u':
{
if (LocaleCompare(expression,"u") == 0)
return(FxGetSymbol(fx_info,channel,x,y,expression,exception));
break;
}
case 'V':
case 'v':
{
if (LocaleCompare(expression,"v") == 0)
return(FxGetSymbol(fx_info,channel,x,y,expression,exception));
break;
}
case 'W':
case 'w':
{
if (LocaleNCompare(expression,"while",5) == 0)
{
do
{
alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+5,
depth,beta,exception);
} while (fabs(alpha) >= MagickEpsilon);
return(*beta);
}
if (LocaleCompare(expression,"w") == 0)
return(FxGetSymbol(fx_info,channel,x,y,expression,exception));
break;
}
case 'Y':
case 'y':
{
if (LocaleCompare(expression,"y") == 0)
return(FxGetSymbol(fx_info,channel,x,y,expression,exception));
break;
}
case 'Z':
case 'z':
{
if (LocaleCompare(expression,"z") == 0)
return(FxGetSymbol(fx_info,channel,x,y,expression,exception));
break;
}
default:
break;
}
q=(char *) expression;
alpha=InterpretSiPrefixValue(expression,&q);
if (q == expression)
return(FxGetSymbol(fx_info,channel,x,y,expression,exception));
return(alpha);
}
MagickPrivate MagickBooleanType FxEvaluateExpression(FxInfo *fx_info,
double *alpha,ExceptionInfo *exception)
{
MagickBooleanType
status;
status=FxEvaluateChannelExpression(fx_info,GrayPixelChannel,0,0,alpha,
exception);
return(status);
}
MagickExport MagickBooleanType FxPreprocessExpression(FxInfo *fx_info,
double *alpha,ExceptionInfo *exception)
{
FILE
*file;
MagickBooleanType
status;
file=fx_info->file;
fx_info->file=(FILE *) NULL;
status=FxEvaluateChannelExpression(fx_info,GrayPixelChannel,0,0,alpha,
exception);
fx_info->file=file;
return(status);
}
MagickPrivate MagickBooleanType FxEvaluateChannelExpression(FxInfo *fx_info,
const PixelChannel channel,const ssize_t x,const ssize_t y,
double *alpha,ExceptionInfo *exception)
{
double
beta;
size_t
depth;
depth=0;
beta=0.0;
*alpha=FxEvaluateSubexpression(fx_info,channel,x,y,fx_info->expression,&depth,
&beta,exception);
return(exception->severity == OptionError ? MagickFalse : MagickTrue);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% F x I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% FxImage() applies a mathematical expression to the specified image.
%
% The format of the FxImage method is:
%
% Image *FxImage(const Image *image,const char *expression,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o expression: A mathematical expression.
%
% o exception: return any errors or warnings in this structure.
%
*/
static FxInfo **DestroyFxThreadSet(FxInfo **fx_info)
{
register ssize_t
i;
assert(fx_info != (FxInfo **) NULL);
for (i=0; i < (ssize_t) GetMagickResourceLimit(ThreadResource); i++)
if (fx_info[i] != (FxInfo *) NULL)
fx_info[i]=DestroyFxInfo(fx_info[i]);
fx_info=(FxInfo **) RelinquishMagickMemory(fx_info);
return(fx_info);
}
static FxInfo **AcquireFxThreadSet(const Image *image,const char *expression,
ExceptionInfo *exception)
{
char
*fx_expression;
FxInfo
**fx_info;
double
alpha;
register ssize_t
i;
size_t
number_threads;
number_threads=(size_t) GetMagickResourceLimit(ThreadResource);
fx_info=(FxInfo **) AcquireQuantumMemory(number_threads,sizeof(*fx_info));
if (fx_info == (FxInfo **) NULL)
{
(void) ThrowMagickException(exception,GetMagickModule(),
ResourceLimitError,"MemoryAllocationFailed","`%s'",image->filename);
return((FxInfo **) NULL);
}
(void) ResetMagickMemory(fx_info,0,number_threads*sizeof(*fx_info));
if (*expression != '@')
fx_expression=ConstantString(expression);
else
fx_expression=FileToString(expression+1,~0UL,exception);
for (i=0; i < (ssize_t) number_threads; i++)
{
MagickBooleanType
status;
fx_info[i]=AcquireFxInfo(image,fx_expression,exception);
if (fx_info[i] == (FxInfo *) NULL)
break;
status=FxPreprocessExpression(fx_info[i],&alpha,exception);
if (status == MagickFalse)
break;
}
fx_expression=DestroyString(fx_expression);
if (i < (ssize_t) number_threads)
fx_info=DestroyFxThreadSet(fx_info);
return(fx_info);
}
MagickExport Image *FxImage(const Image *image,const char *expression,
ExceptionInfo *exception)
{
#define FxImageTag "Fx/Image"
CacheView
*fx_view,
*image_view;
FxInfo
**magick_restrict fx_info;
Image
*fx_image;
MagickBooleanType
status;
MagickOffsetType
progress;
ssize_t
y;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
fx_info=AcquireFxThreadSet(image,expression,exception);
if (fx_info == (FxInfo **) NULL)
return((Image *) NULL);
fx_image=CloneImage(image,image->columns,image->rows,MagickTrue,exception);
if (fx_image == (Image *) NULL)
{
fx_info=DestroyFxThreadSet(fx_info);
return((Image *) NULL);
}
if (SetImageStorageClass(fx_image,DirectClass,exception) == MagickFalse)
{
fx_info=DestroyFxThreadSet(fx_info);
fx_image=DestroyImage(fx_image);
return((Image *) NULL);
}
/*
Fx image.
*/
status=MagickTrue;
progress=0;
image_view=AcquireVirtualCacheView(image,exception);
fx_view=AcquireAuthenticCacheView(fx_image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static,4) shared(progress,status) \
magick_threads(image,fx_image,fx_image->rows,1)
#endif
for (y=0; y < (ssize_t) fx_image->rows; y++)
{
const int
id = GetOpenMPThreadId();
register const Quantum
*magick_restrict p;
register Quantum
*magick_restrict q;
register ssize_t
x;
if (status == MagickFalse)
continue;
p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception);
q=QueueCacheViewAuthenticPixels(fx_view,0,y,fx_image->columns,1,exception);
if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL))
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) fx_image->columns; x++)
{
register ssize_t
i;
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
double
alpha;
PixelChannel channel = GetPixelChannelChannel(image,i);
PixelTrait traits = GetPixelChannelTraits(image,channel);
PixelTrait fx_traits=GetPixelChannelTraits(fx_image,channel);
if ((traits == UndefinedPixelTrait) ||
(fx_traits == UndefinedPixelTrait))
continue;
if (((fx_traits & CopyPixelTrait) != 0) ||
(GetPixelWriteMask(image,p) <= (QuantumRange/2)))
{
SetPixelChannel(fx_image,channel,p[i],q);
continue;
}
alpha=0.0;
(void) FxEvaluateChannelExpression(fx_info[id],channel,x,y,&alpha,
exception);
q[i]=ClampToQuantum(QuantumRange*alpha);
}
p+=GetPixelChannels(image);
q+=GetPixelChannels(fx_image);
}
if (SyncCacheViewAuthenticPixels(fx_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp critical (MagickCore_FxImage)
#endif
proceed=SetImageProgress(image,FxImageTag,progress++,image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
fx_view=DestroyCacheView(fx_view);
image_view=DestroyCacheView(image_view);
fx_info=DestroyFxThreadSet(fx_info);
if (status == MagickFalse)
fx_image=DestroyImage(fx_image);
return(fx_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% I m p l o d e I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ImplodeImage() creates a new image that is a copy of an existing
% one with the image pixels "implode" by the specified percentage. It
% allocates the memory necessary for the new Image structure and returns a
% pointer to the new image.
%
% The format of the ImplodeImage method is:
%
% Image *ImplodeImage(const Image *image,const double amount,
% const PixelInterpolateMethod method,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o implode_image: Method ImplodeImage returns a pointer to the image
% after it is implode. A null image is returned if there is a memory
% shortage.
%
% o image: the image.
%
% o amount: Define the extent of the implosion.
%
% o method: the pixel interpolation method.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *ImplodeImage(const Image *image,const double amount,
const PixelInterpolateMethod method,ExceptionInfo *exception)
{
#define ImplodeImageTag "Implode/Image"
CacheView
*canvas_view,
*implode_view,
*interpolate_view;
Image
*canvas,
*implode_image;
MagickBooleanType
status;
MagickOffsetType
progress;
double
radius;
PointInfo
center,
scale;
ssize_t
y;
/*
Initialize implode image attributes.
*/
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
canvas=CloneImage(image,0,0,MagickTrue,exception);
if (canvas == (Image *) NULL)
return((Image *) NULL);
if ((canvas->alpha_trait == UndefinedPixelTrait) &&
(canvas->background_color.alpha != OpaqueAlpha))
(void) SetImageAlphaChannel(canvas,OpaqueAlphaChannel,exception);
implode_image=CloneImage(canvas,canvas->columns,canvas->rows,MagickTrue,
exception);
if (implode_image == (Image *) NULL)
{
canvas=DestroyImage(canvas);
return((Image *) NULL);
}
if (SetImageStorageClass(implode_image,DirectClass,exception) == MagickFalse)
{
canvas=DestroyImage(canvas);
implode_image=DestroyImage(implode_image);
return((Image *) NULL);
}
/*
Compute scaling factor.
*/
scale.x=1.0;
scale.y=1.0;
center.x=0.5*canvas->columns;
center.y=0.5*canvas->rows;
radius=center.x;
if (canvas->columns > canvas->rows)
scale.y=(double) canvas->columns/(double) canvas->rows;
else
if (canvas->columns < canvas->rows)
{
scale.x=(double) canvas->rows/(double) canvas->columns;
radius=center.y;
}
/*
Implode image.
*/
status=MagickTrue;
progress=0;
canvas_view=AcquireVirtualCacheView(canvas,exception);
interpolate_view=AcquireVirtualCacheView(canvas,exception);
implode_view=AcquireAuthenticCacheView(implode_image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static,4) shared(progress,status) \
magick_threads(canvas,implode_image,canvas->rows,1)
#endif
for (y=0; y < (ssize_t) canvas->rows; y++)
{
double
distance;
PointInfo
delta;
register const Quantum
*magick_restrict p;
register ssize_t
x;
register Quantum
*magick_restrict q;
if (status == MagickFalse)
continue;
p=GetCacheViewVirtualPixels(canvas_view,0,y,canvas->columns,1,exception);
q=QueueCacheViewAuthenticPixels(implode_view,0,y,implode_image->columns,1,
exception);
if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL))
{
status=MagickFalse;
continue;
}
delta.y=scale.y*(double) (y-center.y);
for (x=0; x < (ssize_t) canvas->columns; x++)
{
register ssize_t
i;
/*
Determine if the pixel is within an ellipse.
*/
if (GetPixelWriteMask(canvas,p) <= (QuantumRange/2))
{
SetPixelBackgoundColor(implode_image,q);
p+=GetPixelChannels(canvas);
q+=GetPixelChannels(implode_image);
continue;
}
delta.x=scale.x*(double) (x-center.x);
distance=delta.x*delta.x+delta.y*delta.y;
if (distance >= (radius*radius))
for (i=0; i < (ssize_t) GetPixelChannels(canvas); i++)
{
PixelChannel channel = GetPixelChannelChannel(canvas,i);
PixelTrait traits = GetPixelChannelTraits(canvas,channel);
PixelTrait implode_traits = GetPixelChannelTraits(implode_image,
channel);
if ((traits == UndefinedPixelTrait) ||
(implode_traits == UndefinedPixelTrait))
continue;
SetPixelChannel(implode_image,channel,p[i],q);
}
else
{
double
factor;
/*
Implode the pixel.
*/
factor=1.0;
if (distance > 0.0)
factor=pow(sin(MagickPI*sqrt((double) distance)/radius/2),-amount);
status=InterpolatePixelChannels(canvas,interpolate_view,implode_image,
method,(double) (factor*delta.x/scale.x+center.x),(double) (factor*
delta.y/scale.y+center.y),q,exception);
}
p+=GetPixelChannels(canvas);
q+=GetPixelChannels(implode_image);
}
if (SyncCacheViewAuthenticPixels(implode_view,exception) == MagickFalse)
status=MagickFalse;
if (canvas->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp critical (MagickCore_ImplodeImage)
#endif
proceed=SetImageProgress(canvas,ImplodeImageTag,progress++,
canvas->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
implode_view=DestroyCacheView(implode_view);
interpolate_view=DestroyCacheView(interpolate_view);
canvas_view=DestroyCacheView(canvas_view);
canvas=DestroyImage(canvas);
if (status == MagickFalse)
implode_image=DestroyImage(implode_image);
return(implode_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% M o r p h I m a g e s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% The MorphImages() method requires a minimum of two images. The first
% image is transformed into the second by a number of intervening images
% as specified by frames.
%
% The format of the MorphImage method is:
%
% Image *MorphImages(const Image *image,const size_t number_frames,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o number_frames: Define the number of in-between image to generate.
% The more in-between frames, the smoother the morph.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *MorphImages(const Image *image,const size_t number_frames,
ExceptionInfo *exception)
{
#define MorphImageTag "Morph/Image"
double
alpha,
beta;
Image
*morph_image,
*morph_images;
MagickBooleanType
status;
MagickOffsetType
scene;
register const Image
*next;
register ssize_t
n;
ssize_t
y;
/*
Clone first frame in sequence.
*/
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
morph_images=CloneImage(image,0,0,MagickTrue,exception);
if (morph_images == (Image *) NULL)
return((Image *) NULL);
if (GetNextImageInList(image) == (Image *) NULL)
{
/*
Morph single image.
*/
for (n=1; n < (ssize_t) number_frames; n++)
{
morph_image=CloneImage(image,0,0,MagickTrue,exception);
if (morph_image == (Image *) NULL)
{
morph_images=DestroyImageList(morph_images);
return((Image *) NULL);
}
AppendImageToList(&morph_images,morph_image);
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
proceed=SetImageProgress(image,MorphImageTag,(MagickOffsetType) n,
number_frames);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
return(GetFirstImageInList(morph_images));
}
/*
Morph image sequence.
*/
status=MagickTrue;
scene=0;
next=image;
for ( ; GetNextImageInList(next) != (Image *) NULL; next=GetNextImageInList(next))
{
for (n=0; n < (ssize_t) number_frames; n++)
{
CacheView
*image_view,
*morph_view;
beta=(double) (n+1.0)/(double) (number_frames+1.0);
alpha=1.0-beta;
morph_image=ResizeImage(next,(size_t) (alpha*next->columns+beta*
GetNextImageInList(next)->columns+0.5),(size_t) (alpha*next->rows+beta*
GetNextImageInList(next)->rows+0.5),next->filter,exception);
if (morph_image == (Image *) NULL)
{
morph_images=DestroyImageList(morph_images);
return((Image *) NULL);
}
status=SetImageStorageClass(morph_image,DirectClass,exception);
if (status == MagickFalse)
{
morph_image=DestroyImage(morph_image);
return((Image *) NULL);
}
AppendImageToList(&morph_images,morph_image);
morph_images=GetLastImageInList(morph_images);
morph_image=ResizeImage(GetNextImageInList(next),morph_images->columns,
morph_images->rows,GetNextImageInList(next)->filter,exception);
if (morph_image == (Image *) NULL)
{
morph_images=DestroyImageList(morph_images);
return((Image *) NULL);
}
image_view=AcquireVirtualCacheView(morph_image,exception);
morph_view=AcquireAuthenticCacheView(morph_images,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static,4) shared(status) \
magick_threads(morph_image,morph_image,morph_image->rows,1)
#endif
for (y=0; y < (ssize_t) morph_images->rows; y++)
{
MagickBooleanType
sync;
register const Quantum
*magick_restrict p;
register ssize_t
x;
register Quantum
*magick_restrict q;
if (status == MagickFalse)
continue;
p=GetCacheViewVirtualPixels(image_view,0,y,morph_image->columns,1,
exception);
q=GetCacheViewAuthenticPixels(morph_view,0,y,morph_images->columns,1,
exception);
if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL))
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) morph_images->columns; x++)
{
register ssize_t
i;
for (i=0; i < (ssize_t) GetPixelChannels(morph_image); i++)
{
PixelChannel channel = GetPixelChannelChannel(morph_image,i);
PixelTrait traits = GetPixelChannelTraits(morph_image,channel);
PixelTrait morph_traits=GetPixelChannelTraits(morph_images,channel);
if ((traits == UndefinedPixelTrait) ||
(morph_traits == UndefinedPixelTrait))
continue;
if (((morph_traits & CopyPixelTrait) != 0) ||
(GetPixelWriteMask(morph_images,p) <= (QuantumRange/2)))
{
SetPixelChannel(morph_image,channel,p[i],q);
continue;
}
SetPixelChannel(morph_image,channel,ClampToQuantum(alpha*
GetPixelChannel(morph_images,channel,q)+beta*p[i]),q);
}
p+=GetPixelChannels(morph_image);
q+=GetPixelChannels(morph_images);
}
sync=SyncCacheViewAuthenticPixels(morph_view,exception);
if (sync == MagickFalse)
status=MagickFalse;
}
morph_view=DestroyCacheView(morph_view);
image_view=DestroyCacheView(image_view);
morph_image=DestroyImage(morph_image);
}
if (n < (ssize_t) number_frames)
break;
/*
Clone last frame in sequence.
*/
morph_image=CloneImage(GetNextImageInList(next),0,0,MagickTrue,exception);
if (morph_image == (Image *) NULL)
{
morph_images=DestroyImageList(morph_images);
return((Image *) NULL);
}
AppendImageToList(&morph_images,morph_image);
morph_images=GetLastImageInList(morph_images);
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp critical (MagickCore_MorphImages)
#endif
proceed=SetImageProgress(image,MorphImageTag,scene,
GetImageListLength(image));
if (proceed == MagickFalse)
status=MagickFalse;
}
scene++;
}
if (GetNextImageInList(next) != (Image *) NULL)
{
morph_images=DestroyImageList(morph_images);
return((Image *) NULL);
}
return(GetFirstImageInList(morph_images));
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% P l a s m a I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% PlasmaImage() initializes an image with plasma fractal values. The image
% must be initialized with a base color and the random number generator
% seeded before this method is called.
%
% The format of the PlasmaImage method is:
%
% MagickBooleanType PlasmaImage(Image *image,const SegmentInfo *segment,
% size_t attenuate,size_t depth,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o segment: Define the region to apply plasma fractals values.
%
% o attenuate: Define the plasma attenuation factor.
%
% o depth: Limit the plasma recursion depth.
%
% o exception: return any errors or warnings in this structure.
%
*/
static inline Quantum PlasmaPixel(RandomInfo *random_info,
const double pixel,const double noise)
{
Quantum
plasma;
plasma=ClampToQuantum(pixel+noise*GetPseudoRandomValue(random_info)-
noise/2.0);
if (plasma <= 0)
return((Quantum) 0);
if (plasma >= QuantumRange)
return(QuantumRange);
return(plasma);
}
static MagickBooleanType PlasmaImageProxy(Image *image,CacheView *image_view,
CacheView *u_view,CacheView *v_view,RandomInfo *random_info,
const SegmentInfo *segment,size_t attenuate,size_t depth,
ExceptionInfo *exception)
{
double
plasma;
register const Quantum
*magick_restrict u,
*magick_restrict v;
register Quantum
*magick_restrict q;
register ssize_t
i;
ssize_t
x,
x_mid,
y,
y_mid;
if ((fabs(segment->x2-segment->x1) <= MagickEpsilon) &&
(fabs(segment->y2-segment->y1) <= MagickEpsilon))
return(MagickTrue);
if (depth != 0)
{
MagickBooleanType
status;
SegmentInfo
local_info;
/*
Divide the area into quadrants and recurse.
*/
depth--;
attenuate++;
x_mid=(ssize_t) ceil((segment->x1+segment->x2)/2-0.5);
y_mid=(ssize_t) ceil((segment->y1+segment->y2)/2-0.5);
local_info=(*segment);
local_info.x2=(double) x_mid;
local_info.y2=(double) y_mid;
(void) PlasmaImageProxy(image,image_view,u_view,v_view,random_info,
&local_info,attenuate,depth,exception);
local_info=(*segment);
local_info.y1=(double) y_mid;
local_info.x2=(double) x_mid;
(void) PlasmaImageProxy(image,image_view,u_view,v_view,random_info,
&local_info,attenuate,depth,exception);
local_info=(*segment);
local_info.x1=(double) x_mid;
local_info.y2=(double) y_mid;
(void) PlasmaImageProxy(image,image_view,u_view,v_view,random_info,
&local_info,attenuate,depth,exception);
local_info=(*segment);
local_info.x1=(double) x_mid;
local_info.y1=(double) y_mid;
status=PlasmaImageProxy(image,image_view,u_view,v_view,random_info,
&local_info,attenuate,depth,exception);
return(status);
}
x_mid=(ssize_t) ceil((segment->x1+segment->x2)/2-0.5);
y_mid=(ssize_t) ceil((segment->y1+segment->y2)/2-0.5);
if ((fabs(segment->x1-x_mid) < MagickEpsilon) &&
(fabs(segment->x2-x_mid) < MagickEpsilon) &&
(fabs(segment->y1-y_mid) < MagickEpsilon) &&
(fabs(segment->y2-y_mid) < MagickEpsilon))
return(MagickFalse);
/*
Average pixels and apply plasma.
*/
plasma=(double) QuantumRange/(2.0*attenuate);
if ((fabs(segment->x1-x_mid) > MagickEpsilon) ||
(fabs(segment->x2-x_mid) > MagickEpsilon))
{
/*
Left pixel.
*/
x=(ssize_t) ceil(segment->x1-0.5);
u=GetCacheViewVirtualPixels(u_view,x,(ssize_t) ceil(segment->y1-0.5),1,1,
exception);
v=GetCacheViewVirtualPixels(v_view,x,(ssize_t) ceil(segment->y2-0.5),1,1,
exception);
q=QueueCacheViewAuthenticPixels(image_view,x,y_mid,1,1,exception);
if ((u == (const Quantum *) NULL) || (v == (const Quantum *) NULL) ||
(q == (Quantum *) NULL))
return(MagickTrue);
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
PixelChannel channel = GetPixelChannelChannel(image,i);
PixelTrait traits = GetPixelChannelTraits(image,channel);
if (traits == UndefinedPixelTrait)
continue;
q[i]=PlasmaPixel(random_info,(u[i]+v[i])/2.0,plasma);
}
(void) SyncCacheViewAuthenticPixels(image_view,exception);
if (fabs(segment->x1-segment->x2) > MagickEpsilon)
{
/*
Right pixel.
*/
x=(ssize_t) ceil(segment->x2-0.5);
u=GetCacheViewVirtualPixels(u_view,x,(ssize_t) ceil(segment->y1-0.5),
1,1,exception);
v=GetCacheViewVirtualPixels(v_view,x,(ssize_t) ceil(segment->y2-0.5),
1,1,exception);
q=QueueCacheViewAuthenticPixels(image_view,x,y_mid,1,1,exception);
if ((u == (const Quantum *) NULL) || (v == (const Quantum *) NULL) ||
(q == (Quantum *) NULL))
return(MagickTrue);
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
PixelChannel channel = GetPixelChannelChannel(image,i);
PixelTrait traits = GetPixelChannelTraits(image,channel);
if (traits == UndefinedPixelTrait)
continue;
q[i]=PlasmaPixel(random_info,(u[i]+v[i])/2.0,plasma);
}
(void) SyncCacheViewAuthenticPixels(image_view,exception);
}
}
if ((fabs(segment->y1-y_mid) > MagickEpsilon) ||
(fabs(segment->y2-y_mid) > MagickEpsilon))
{
if ((fabs(segment->x1-x_mid) > MagickEpsilon) ||
(fabs(segment->y2-y_mid) > MagickEpsilon))
{
/*
Bottom pixel.
*/
y=(ssize_t) ceil(segment->y2-0.5);
u=GetCacheViewVirtualPixels(u_view,(ssize_t) ceil(segment->x1-0.5),y,
1,1,exception);
v=GetCacheViewVirtualPixels(v_view,(ssize_t) ceil(segment->x2-0.5),y,
1,1,exception);
q=QueueCacheViewAuthenticPixels(image_view,x_mid,y,1,1,exception);
if ((u == (const Quantum *) NULL) || (v == (const Quantum *) NULL) ||
(q == (Quantum *) NULL))
return(MagickTrue);
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
PixelChannel channel = GetPixelChannelChannel(image,i);
PixelTrait traits = GetPixelChannelTraits(image,channel);
if (traits == UndefinedPixelTrait)
continue;
q[i]=PlasmaPixel(random_info,(u[i]+v[i])/2.0,plasma);
}
(void) SyncCacheViewAuthenticPixels(image_view,exception);
}
if (fabs(segment->y1-segment->y2) > MagickEpsilon)
{
/*
Top pixel.
*/
y=(ssize_t) ceil(segment->y1-0.5);
u=GetCacheViewVirtualPixels(u_view,(ssize_t) ceil(segment->x1-0.5),y,
1,1,exception);
v=GetCacheViewVirtualPixels(v_view,(ssize_t) ceil(segment->x2-0.5),y,
1,1,exception);
q=QueueCacheViewAuthenticPixels(image_view,x_mid,y,1,1,exception);
if ((u == (const Quantum *) NULL) || (v == (const Quantum *) NULL) ||
(q == (Quantum *) NULL))
return(MagickTrue);
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
PixelChannel channel = GetPixelChannelChannel(image,i);
PixelTrait traits = GetPixelChannelTraits(image,channel);
if (traits == UndefinedPixelTrait)
continue;
q[i]=PlasmaPixel(random_info,(u[i]+v[i])/2.0,plasma);
}
(void) SyncCacheViewAuthenticPixels(image_view,exception);
}
}
if ((fabs(segment->x1-segment->x2) > MagickEpsilon) ||
(fabs(segment->y1-segment->y2) > MagickEpsilon))
{
/*
Middle pixel.
*/
x=(ssize_t) ceil(segment->x1-0.5);
y=(ssize_t) ceil(segment->y1-0.5);
u=GetCacheViewVirtualPixels(u_view,x,y,1,1,exception);
x=(ssize_t) ceil(segment->x2-0.5);
y=(ssize_t) ceil(segment->y2-0.5);
v=GetCacheViewVirtualPixels(v_view,x,y,1,1,exception);
q=QueueCacheViewAuthenticPixels(image_view,x_mid,y_mid,1,1,exception);
if ((u == (const Quantum *) NULL) || (v == (const Quantum *) NULL) ||
(q == (Quantum *) NULL))
return(MagickTrue);
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
PixelChannel channel = GetPixelChannelChannel(image,i);
PixelTrait traits = GetPixelChannelTraits(image,channel);
if (traits == UndefinedPixelTrait)
continue;
q[i]=PlasmaPixel(random_info,(u[i]+v[i])/2.0,plasma);
}
(void) SyncCacheViewAuthenticPixels(image_view,exception);
}
if ((fabs(segment->x2-segment->x1) < 3.0) &&
(fabs(segment->y2-segment->y1) < 3.0))
return(MagickTrue);
return(MagickFalse);
}
MagickExport MagickBooleanType PlasmaImage(Image *image,
const SegmentInfo *segment,size_t attenuate,size_t depth,
ExceptionInfo *exception)
{
CacheView
*image_view,
*u_view,
*v_view;
MagickBooleanType
status;
RandomInfo
*random_info;
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"...");
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"...");
if (SetImageStorageClass(image,DirectClass,exception) == MagickFalse)
return(MagickFalse);
image_view=AcquireAuthenticCacheView(image,exception);
u_view=AcquireVirtualCacheView(image,exception);
v_view=AcquireVirtualCacheView(image,exception);
random_info=AcquireRandomInfo();
status=PlasmaImageProxy(image,image_view,u_view,v_view,random_info,segment,
attenuate,depth,exception);
random_info=DestroyRandomInfo(random_info);
v_view=DestroyCacheView(v_view);
u_view=DestroyCacheView(u_view);
image_view=DestroyCacheView(image_view);
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% P o l a r o i d I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% PolaroidImage() simulates a Polaroid picture.
%
% The format of the PolaroidImage method is:
%
% Image *PolaroidImage(const Image *image,const DrawInfo *draw_info,
% const char *caption,const double angle,
% const PixelInterpolateMethod method,ExceptionInfo exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o draw_info: the draw info.
%
% o caption: the Polaroid caption.
%
% o angle: Apply the effect along this angle.
%
% o method: the pixel interpolation method.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *PolaroidImage(const Image *image,const DrawInfo *draw_info,
const char *caption,const double angle,const PixelInterpolateMethod method,
ExceptionInfo *exception)
{
Image
*bend_image,
*caption_image,
*flop_image,
*picture_image,
*polaroid_image,
*rotate_image,
*trim_image;
size_t
height;
ssize_t
quantum;
/*
Simulate a Polaroid picture.
*/
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
quantum=(ssize_t) MagickMax(MagickMax((double) image->columns,(double)
image->rows)/25.0,10.0);
height=image->rows+2*quantum;
caption_image=(Image *) NULL;
if (caption != (const char *) NULL)
{
char
geometry[MagickPathExtent],
*text;
DrawInfo
*annotate_info;
ImageInfo
*image_info;
MagickBooleanType
status;
ssize_t
count;
TypeMetric
metrics;
/*
Generate caption image.
*/
caption_image=CloneImage(image,image->columns,1,MagickTrue,exception);
if (caption_image == (Image *) NULL)
return((Image *) NULL);
image_info=AcquireImageInfo();
annotate_info=CloneDrawInfo((const ImageInfo *) NULL,draw_info);
text=InterpretImageProperties(image_info,(Image *) image,caption,
exception);
image_info=DestroyImageInfo(image_info);
(void) CloneString(&annotate_info->text,text);
count=FormatMagickCaption(caption_image,annotate_info,MagickTrue,&metrics,
&text,exception);
status=SetImageExtent(caption_image,image->columns,(size_t) ((count+1)*
(metrics.ascent-metrics.descent)+0.5),exception);
if (status == MagickFalse)
caption_image=DestroyImage(caption_image);
else
{
caption_image->background_color=image->border_color;
(void) SetImageBackgroundColor(caption_image,exception);
(void) CloneString(&annotate_info->text,text);
(void) FormatLocaleString(geometry,MagickPathExtent,"+0+%g",
metrics.ascent);
if (annotate_info->gravity == UndefinedGravity)
(void) CloneString(&annotate_info->geometry,AcquireString(
geometry));
(void) AnnotateImage(caption_image,annotate_info,exception);
height+=caption_image->rows;
}
annotate_info=DestroyDrawInfo(annotate_info);
text=DestroyString(text);
}
picture_image=CloneImage(image,image->columns+2*quantum,height,MagickTrue,
exception);
if (picture_image == (Image *) NULL)
{
if (caption_image != (Image *) NULL)
caption_image=DestroyImage(caption_image);
return((Image *) NULL);
}
picture_image->background_color=image->border_color;
(void) SetImageBackgroundColor(picture_image,exception);
(void) CompositeImage(picture_image,image,OverCompositeOp,MagickTrue,quantum,
quantum,exception);
if (caption_image != (Image *) NULL)
{
(void) CompositeImage(picture_image,caption_image,OverCompositeOp,
MagickTrue,quantum,(ssize_t) (image->rows+3*quantum/2),exception);
caption_image=DestroyImage(caption_image);
}
(void) QueryColorCompliance("none",AllCompliance,
&picture_image->background_color,exception);
(void) SetImageAlphaChannel(picture_image,OpaqueAlphaChannel,exception);
rotate_image=RotateImage(picture_image,90.0,exception);
picture_image=DestroyImage(picture_image);
if (rotate_image == (Image *) NULL)
return((Image *) NULL);
picture_image=rotate_image;
bend_image=WaveImage(picture_image,0.01*picture_image->rows,2.0*
picture_image->columns,method,exception);
picture_image=DestroyImage(picture_image);
if (bend_image == (Image *) NULL)
return((Image *) NULL);
picture_image=bend_image;
rotate_image=RotateImage(picture_image,-90.0,exception);
picture_image=DestroyImage(picture_image);
if (rotate_image == (Image *) NULL)
return((Image *) NULL);
picture_image=rotate_image;
picture_image->background_color=image->background_color;
polaroid_image=ShadowImage(picture_image,80.0,2.0,quantum/3,quantum/3,
exception);
if (polaroid_image == (Image *) NULL)
{
picture_image=DestroyImage(picture_image);
return(picture_image);
}
flop_image=FlopImage(polaroid_image,exception);
polaroid_image=DestroyImage(polaroid_image);
if (flop_image == (Image *) NULL)
{
picture_image=DestroyImage(picture_image);
return(picture_image);
}
polaroid_image=flop_image;
(void) CompositeImage(polaroid_image,picture_image,OverCompositeOp,
MagickTrue,(ssize_t) (-0.01*picture_image->columns/2.0),0L,exception);
picture_image=DestroyImage(picture_image);
(void) QueryColorCompliance("none",AllCompliance,
&polaroid_image->background_color,exception);
rotate_image=RotateImage(polaroid_image,angle,exception);
polaroid_image=DestroyImage(polaroid_image);
if (rotate_image == (Image *) NULL)
return((Image *) NULL);
polaroid_image=rotate_image;
trim_image=TrimImage(polaroid_image,exception);
polaroid_image=DestroyImage(polaroid_image);
if (trim_image == (Image *) NULL)
return((Image *) NULL);
polaroid_image=trim_image;
return(polaroid_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% S e p i a T o n e I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% MagickSepiaToneImage() applies a special effect to the image, similar to the
% effect achieved in a photo darkroom by sepia toning. Threshold ranges from
% 0 to QuantumRange and is a measure of the extent of the sepia toning. A
% threshold of 80% is a good starting point for a reasonable tone.
%
% The format of the SepiaToneImage method is:
%
% Image *SepiaToneImage(const Image *image,const double threshold,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o threshold: the tone threshold.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *SepiaToneImage(const Image *image,const double threshold,
ExceptionInfo *exception)
{
#define SepiaToneImageTag "SepiaTone/Image"
CacheView
*image_view,
*sepia_view;
Image
*sepia_image;
MagickBooleanType
status;
MagickOffsetType
progress;
ssize_t
y;
/*
Initialize sepia-toned image attributes.
*/
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
sepia_image=CloneImage(image,0,0,MagickTrue,exception);
if (sepia_image == (Image *) NULL)
return((Image *) NULL);
if (SetImageStorageClass(sepia_image,DirectClass,exception) == MagickFalse)
{
sepia_image=DestroyImage(sepia_image);
return((Image *) NULL);
}
/*
Tone each row of the image.
*/
status=MagickTrue;
progress=0;
image_view=AcquireVirtualCacheView(image,exception);
sepia_view=AcquireAuthenticCacheView(sepia_image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static,4) shared(progress,status) \
magick_threads(image,sepia_image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
register const Quantum
*magick_restrict p;
register ssize_t
x;
register Quantum
*magick_restrict q;
if (status == MagickFalse)
continue;
p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception);
q=GetCacheViewAuthenticPixels(sepia_view,0,y,sepia_image->columns,1,
exception);
if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL))
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) image->columns; x++)
{
double
intensity,
tone;
intensity=GetPixelIntensity(image,p);
tone=intensity > threshold ? (double) QuantumRange : intensity+
(double) QuantumRange-threshold;
SetPixelRed(sepia_image,ClampToQuantum(tone),q);
tone=intensity > (7.0*threshold/6.0) ? (double) QuantumRange :
intensity+(double) QuantumRange-7.0*threshold/6.0;
SetPixelGreen(sepia_image,ClampToQuantum(tone),q);
tone=intensity < (threshold/6.0) ? 0 : intensity-threshold/6.0;
SetPixelBlue(sepia_image,ClampToQuantum(tone),q);
tone=threshold/7.0;
if ((double) GetPixelGreen(image,q) < tone)
SetPixelGreen(sepia_image,ClampToQuantum(tone),q);
if ((double) GetPixelBlue(image,q) < tone)
SetPixelBlue(sepia_image,ClampToQuantum(tone),q);
SetPixelAlpha(sepia_image,GetPixelAlpha(image,p),q);
p+=GetPixelChannels(image);
q+=GetPixelChannels(sepia_image);
}
if (SyncCacheViewAuthenticPixels(sepia_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp critical (MagickCore_SepiaToneImage)
#endif
proceed=SetImageProgress(image,SepiaToneImageTag,progress++,
image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
sepia_view=DestroyCacheView(sepia_view);
image_view=DestroyCacheView(image_view);
(void) NormalizeImage(sepia_image,exception);
(void) ContrastImage(sepia_image,MagickTrue,exception);
if (status == MagickFalse)
sepia_image=DestroyImage(sepia_image);
return(sepia_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% S h a d o w I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ShadowImage() simulates a shadow from the specified image and returns it.
%
% The format of the ShadowImage method is:
%
% Image *ShadowImage(const Image *image,const double alpha,
% const double sigma,const ssize_t x_offset,const ssize_t y_offset,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o alpha: percentage transparency.
%
% o sigma: the standard deviation of the Gaussian, in pixels.
%
% o x_offset: the shadow x-offset.
%
% o y_offset: the shadow y-offset.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *ShadowImage(const Image *image,const double alpha,
const double sigma,const ssize_t x_offset,const ssize_t y_offset,
ExceptionInfo *exception)
{
#define ShadowImageTag "Shadow/Image"
CacheView
*image_view;
ChannelType
channel_mask;
Image
*border_image,
*clone_image,
*shadow_image;
MagickBooleanType
status;
PixelInfo
background_color;
RectangleInfo
border_info;
ssize_t
y;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
clone_image=CloneImage(image,0,0,MagickTrue,exception);
if (clone_image == (Image *) NULL)
return((Image *) NULL);
if (IsGrayColorspace(image->colorspace) != MagickFalse)
(void) SetImageColorspace(clone_image,sRGBColorspace,exception);
(void) SetImageVirtualPixelMethod(clone_image,EdgeVirtualPixelMethod,
exception);
border_info.width=(size_t) floor(2.0*sigma+0.5);
border_info.height=(size_t) floor(2.0*sigma+0.5);
border_info.x=0;
border_info.y=0;
(void) QueryColorCompliance("none",AllCompliance,&clone_image->border_color,
exception);
clone_image->alpha_trait=BlendPixelTrait;
border_image=BorderImage(clone_image,&border_info,OverCompositeOp,exception);
clone_image=DestroyImage(clone_image);
if (border_image == (Image *) NULL)
return((Image *) NULL);
if (border_image->alpha_trait == UndefinedPixelTrait)
(void) SetImageAlphaChannel(border_image,OpaqueAlphaChannel,exception);
/*
Shadow image.
*/
status=MagickTrue;
background_color=border_image->background_color;
background_color.alpha_trait=BlendPixelTrait;
image_view=AcquireAuthenticCacheView(border_image,exception);
for (y=0; y < (ssize_t) border_image->rows; y++)
{
register Quantum
*magick_restrict q;
register ssize_t
x;
if (status == MagickFalse)
continue;
q=QueueCacheViewAuthenticPixels(image_view,0,y,border_image->columns,1,
exception);
if (q == (Quantum *) NULL)
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) border_image->columns; x++)
{
if (border_image->alpha_trait != UndefinedPixelTrait)
background_color.alpha=GetPixelAlpha(border_image,q)*alpha/100.0;
SetPixelViaPixelInfo(border_image,&background_color,q);
q+=GetPixelChannels(border_image);
}
if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse)
status=MagickFalse;
}
image_view=DestroyCacheView(image_view);
if (status == MagickFalse)
{
border_image=DestroyImage(border_image);
return((Image *) NULL);
}
channel_mask=SetImageChannelMask(border_image,AlphaChannel);
shadow_image=BlurImage(border_image,0.0,sigma,exception);
border_image=DestroyImage(border_image);
if (shadow_image == (Image *) NULL)
return((Image *) NULL);
(void) SetPixelChannelMask(shadow_image,channel_mask);
if (shadow_image->page.width == 0)
shadow_image->page.width=shadow_image->columns;
if (shadow_image->page.height == 0)
shadow_image->page.height=shadow_image->rows;
shadow_image->page.width+=x_offset-(ssize_t) border_info.width;
shadow_image->page.height+=y_offset-(ssize_t) border_info.height;
shadow_image->page.x+=x_offset-(ssize_t) border_info.width;
shadow_image->page.y+=y_offset-(ssize_t) border_info.height;
return(shadow_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% S k e t c h I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% SketchImage() simulates a pencil sketch. We convolve the image with a
% Gaussian operator of the given radius and standard deviation (sigma). For
% reasonable results, radius should be larger than sigma. Use a radius of 0
% and SketchImage() selects a suitable radius for you. Angle gives the angle
% of the sketch.
%
% The format of the SketchImage method is:
%
% Image *SketchImage(const Image *image,const double radius,
% const double sigma,const double angle,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o radius: the radius of the Gaussian, in pixels, not counting the
% center pixel.
%
% o sigma: the standard deviation of the Gaussian, in pixels.
%
% o angle: apply the effect along this angle.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *SketchImage(const Image *image,const double radius,
const double sigma,const double angle,ExceptionInfo *exception)
{
CacheView
*random_view;
Image
*blend_image,
*blur_image,
*dodge_image,
*random_image,
*sketch_image;
MagickBooleanType
status;
RandomInfo
**magick_restrict random_info;
ssize_t
y;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
unsigned long
key;
#endif
/*
Sketch image.
*/
random_image=CloneImage(image,image->columns << 1,image->rows << 1,
MagickTrue,exception);
if (random_image == (Image *) NULL)
return((Image *) NULL);
status=MagickTrue;
random_info=AcquireRandomInfoThreadSet();
random_view=AcquireAuthenticCacheView(random_image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
key=GetRandomSecretKey(random_info[0]);
#pragma omp parallel for schedule(static,4) shared(status) \
magick_threads(random_image,random_image,random_image->rows,key == ~0UL)
#endif
for (y=0; y < (ssize_t) random_image->rows; y++)
{
const int
id = GetOpenMPThreadId();
register Quantum
*magick_restrict q;
register ssize_t
x;
if (status == MagickFalse)
continue;
q=QueueCacheViewAuthenticPixels(random_view,0,y,random_image->columns,1,
exception);
if (q == (Quantum *) NULL)
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) random_image->columns; x++)
{
double
value;
register ssize_t
i;
if (GetPixelWriteMask(random_image,q) <= (QuantumRange/2))
{
q+=GetPixelChannels(random_image);
continue;
}
value=GetPseudoRandomValue(random_info[id]);
for (i=0; i < (ssize_t) GetPixelChannels(random_image); i++)
{
PixelChannel channel = GetPixelChannelChannel(image,i);
PixelTrait traits = GetPixelChannelTraits(image,channel);
if (traits == UndefinedPixelTrait)
continue;
q[i]=ClampToQuantum(QuantumRange*value);
}
q+=GetPixelChannels(random_image);
}
if (SyncCacheViewAuthenticPixels(random_view,exception) == MagickFalse)
status=MagickFalse;
}
random_view=DestroyCacheView(random_view);
random_info=DestroyRandomInfoThreadSet(random_info);
if (status == MagickFalse)
{
random_image=DestroyImage(random_image);
return(random_image);
}
blur_image=MotionBlurImage(random_image,radius,sigma,angle,exception);
random_image=DestroyImage(random_image);
if (blur_image == (Image *) NULL)
return((Image *) NULL);
dodge_image=EdgeImage(blur_image,radius,exception);
blur_image=DestroyImage(blur_image);
if (dodge_image == (Image *) NULL)
return((Image *) NULL);
(void) NormalizeImage(dodge_image,exception);
(void) NegateImage(dodge_image,MagickFalse,exception);
(void) TransformImage(&dodge_image,(char *) NULL,"50%",exception);
sketch_image=CloneImage(image,0,0,MagickTrue,exception);
if (sketch_image == (Image *) NULL)
{
dodge_image=DestroyImage(dodge_image);
return((Image *) NULL);
}
(void) CompositeImage(sketch_image,dodge_image,ColorDodgeCompositeOp,
MagickTrue,0,0,exception);
dodge_image=DestroyImage(dodge_image);
blend_image=CloneImage(image,0,0,MagickTrue,exception);
if (blend_image == (Image *) NULL)
{
sketch_image=DestroyImage(sketch_image);
return((Image *) NULL);
}
if (blend_image->alpha_trait != BlendPixelTrait)
(void) SetImageAlpha(blend_image,TransparentAlpha,exception);
(void) SetImageArtifact(blend_image,"compose:args","20x80");
(void) CompositeImage(sketch_image,blend_image,BlendCompositeOp,MagickTrue,
0,0,exception);
blend_image=DestroyImage(blend_image);
return(sketch_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% S o l a r i z e I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% SolarizeImage() applies a special effect to the image, similar to the effect
% achieved in a photo darkroom by selectively exposing areas of photo
% sensitive paper to light. Threshold ranges from 0 to QuantumRange and is a
% measure of the extent of the solarization.
%
% The format of the SolarizeImage method is:
%
% MagickBooleanType SolarizeImage(Image *image,const double threshold,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o threshold: Define the extent of the solarization.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport MagickBooleanType SolarizeImage(Image *image,
const double threshold,ExceptionInfo *exception)
{
#define SolarizeImageTag "Solarize/Image"
CacheView
*image_view;
MagickBooleanType
status;
MagickOffsetType
progress;
ssize_t
y;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
if (IsGrayColorspace(image->colorspace) != MagickFalse)
(void) SetImageColorspace(image,sRGBColorspace,exception);
if (image->storage_class == PseudoClass)
{
register ssize_t
i;
/*
Solarize colormap.
*/
for (i=0; i < (ssize_t) image->colors; i++)
{
if ((double) image->colormap[i].red > threshold)
image->colormap[i].red=QuantumRange-image->colormap[i].red;
if ((double) image->colormap[i].green > threshold)
image->colormap[i].green=QuantumRange-image->colormap[i].green;
if ((double) image->colormap[i].blue > threshold)
image->colormap[i].blue=QuantumRange-image->colormap[i].blue;
}
}
/*
Solarize image.
*/
status=MagickTrue;
progress=0;
image_view=AcquireAuthenticCacheView(image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static,4) shared(progress,status) \
magick_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
register ssize_t
x;
register Quantum
*magick_restrict q;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception);
if (q == (Quantum *) NULL)
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) image->columns; x++)
{
register ssize_t
i;
if (GetPixelWriteMask(image,q) <= (QuantumRange/2))
{
q+=GetPixelChannels(image);
continue;
}
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
PixelChannel channel = GetPixelChannelChannel(image,i);
PixelTrait traits = GetPixelChannelTraits(image,channel);
if ((traits & UpdatePixelTrait) == 0)
continue;
if ((double) q[i] > threshold)
q[i]=QuantumRange-q[i];
}
q+=GetPixelChannels(image);
}
if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp critical (MagickCore_SolarizeImage)
#endif
proceed=SetImageProgress(image,SolarizeImageTag,progress++,image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
image_view=DestroyCacheView(image_view);
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% S t e g a n o I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% SteganoImage() hides a digital watermark within the image. Recover
% the hidden watermark later to prove that the authenticity of an image.
% Offset defines the start position within the image to hide the watermark.
%
% The format of the SteganoImage method is:
%
% Image *SteganoImage(const Image *image,Image *watermark,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o watermark: the watermark image.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *SteganoImage(const Image *image,const Image *watermark,
ExceptionInfo *exception)
{
#define GetBit(alpha,i) ((((size_t) (alpha) >> (size_t) (i)) & 0x01) != 0)
#define SetBit(alpha,i,set) (Quantum) ((set) != 0 ? (size_t) (alpha) \
| (one << (size_t) (i)) : (size_t) (alpha) & ~(one << (size_t) (i)))
#define SteganoImageTag "Stegano/Image"
CacheView
*stegano_view,
*watermark_view;
Image
*stegano_image;
int
c;
MagickBooleanType
status;
PixelInfo
pixel;
register Quantum
*q;
register ssize_t
x;
size_t
depth,
one;
ssize_t
i,
j,
k,
y;
/*
Initialize steganographic image attributes.
*/
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(watermark != (const Image *) NULL);
assert(watermark->signature == MagickCoreSignature);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
one=1UL;
stegano_image=CloneImage(image,0,0,MagickTrue,exception);
if (stegano_image == (Image *) NULL)
return((Image *) NULL);
stegano_image->depth=MAGICKCORE_QUANTUM_DEPTH;
if (SetImageStorageClass(stegano_image,DirectClass,exception) == MagickFalse)
{
stegano_image=DestroyImage(stegano_image);
return((Image *) NULL);
}
/*
Hide watermark in low-order bits of image.
*/
c=0;
i=0;
j=0;
depth=stegano_image->depth;
k=stegano_image->offset;
status=MagickTrue;
watermark_view=AcquireVirtualCacheView(watermark,exception);
stegano_view=AcquireAuthenticCacheView(stegano_image,exception);
for (i=(ssize_t) depth-1; (i >= 0) && (j < (ssize_t) depth); i--)
{
for (y=0; (y < (ssize_t) watermark->rows) && (j < (ssize_t) depth); y++)
{
for (x=0; (x < (ssize_t) watermark->columns) && (j < (ssize_t) depth); x++)
{
ssize_t
offset;
(void) GetOneCacheViewVirtualPixelInfo(watermark_view,x,y,&pixel,
exception);
offset=k/(ssize_t) stegano_image->columns;
if (offset >= (ssize_t) stegano_image->rows)
break;
q=GetCacheViewAuthenticPixels(stegano_view,k % (ssize_t)
stegano_image->columns,k/(ssize_t) stegano_image->columns,1,1,
exception);
if (q == (Quantum *) NULL)
break;
switch (c)
{
case 0:
{
SetPixelRed(stegano_image,SetBit(GetPixelRed(stegano_image,q),j,
GetBit(GetPixelInfoIntensity(stegano_image,&pixel),i)),q);
break;
}
case 1:
{
SetPixelGreen(stegano_image,SetBit(GetPixelGreen(stegano_image,q),j,
GetBit(GetPixelInfoIntensity(stegano_image,&pixel),i)),q);
break;
}
case 2:
{
SetPixelBlue(stegano_image,SetBit(GetPixelBlue(stegano_image,q),j,
GetBit(GetPixelInfoIntensity(stegano_image,&pixel),i)),q);
break;
}
}
if (SyncCacheViewAuthenticPixels(stegano_view,exception) == MagickFalse)
break;
c++;
if (c == 3)
c=0;
k++;
if (k == (ssize_t) (stegano_image->columns*stegano_image->columns))
k=0;
if (k == stegano_image->offset)
j++;
}
}
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
proceed=SetImageProgress(image,SteganoImageTag,(MagickOffsetType)
(depth-i),depth);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
stegano_view=DestroyCacheView(stegano_view);
watermark_view=DestroyCacheView(watermark_view);
if (status == MagickFalse)
stegano_image=DestroyImage(stegano_image);
return(stegano_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% S t e r e o A n a g l y p h I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% StereoAnaglyphImage() combines two images and produces a single image that
% is the composite of a left and right image of a stereo pair. Special
% red-green stereo glasses are required to view this effect.
%
% The format of the StereoAnaglyphImage method is:
%
% Image *StereoImage(const Image *left_image,const Image *right_image,
% ExceptionInfo *exception)
% Image *StereoAnaglyphImage(const Image *left_image,
% const Image *right_image,const ssize_t x_offset,const ssize_t y_offset,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o left_image: the left image.
%
% o right_image: the right image.
%
% o exception: return any errors or warnings in this structure.
%
% o x_offset: amount, in pixels, by which the left image is offset to the
% right of the right image.
%
% o y_offset: amount, in pixels, by which the left image is offset to the
% bottom of the right image.
%
%
*/
MagickExport Image *StereoImage(const Image *left_image,
const Image *right_image,ExceptionInfo *exception)
{
return(StereoAnaglyphImage(left_image,right_image,0,0,exception));
}
MagickExport Image *StereoAnaglyphImage(const Image *left_image,
const Image *right_image,const ssize_t x_offset,const ssize_t y_offset,
ExceptionInfo *exception)
{
#define StereoImageTag "Stereo/Image"
const Image
*image;
Image
*stereo_image;
MagickBooleanType
status;
ssize_t
y;
assert(left_image != (const Image *) NULL);
assert(left_image->signature == MagickCoreSignature);
if (left_image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",
left_image->filename);
assert(right_image != (const Image *) NULL);
assert(right_image->signature == MagickCoreSignature);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
assert(right_image != (const Image *) NULL);
image=left_image;
if ((left_image->columns != right_image->columns) ||
(left_image->rows != right_image->rows))
ThrowImageException(ImageError,"LeftAndRightImageSizesDiffer");
/*
Initialize stereo image attributes.
*/
stereo_image=CloneImage(left_image,left_image->columns,left_image->rows,
MagickTrue,exception);
if (stereo_image == (Image *) NULL)
return((Image *) NULL);
if (SetImageStorageClass(stereo_image,DirectClass,exception) == MagickFalse)
{
stereo_image=DestroyImage(stereo_image);
return((Image *) NULL);
}
(void) SetImageColorspace(stereo_image,sRGBColorspace,exception);
/*
Copy left image to red channel and right image to blue channel.
*/
status=MagickTrue;
for (y=0; y < (ssize_t) stereo_image->rows; y++)
{
register const Quantum
*magick_restrict p,
*magick_restrict q;
register ssize_t
x;
register Quantum
*magick_restrict r;
p=GetVirtualPixels(left_image,-x_offset,y-y_offset,image->columns,1,
exception);
q=GetVirtualPixels(right_image,0,y,right_image->columns,1,exception);
r=QueueAuthenticPixels(stereo_image,0,y,stereo_image->columns,1,exception);
if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL) ||
(r == (Quantum *) NULL))
break;
for (x=0; x < (ssize_t) stereo_image->columns; x++)
{
SetPixelRed(image,GetPixelRed(left_image,p),r);
SetPixelGreen(image,GetPixelGreen(right_image,q),r);
SetPixelBlue(image,GetPixelBlue(right_image,q),r);
if ((GetPixelAlphaTraits(stereo_image) & CopyPixelTrait) != 0)
SetPixelAlpha(image,(GetPixelAlpha(left_image,p)+
GetPixelAlpha(right_image,q))/2,r);
p+=GetPixelChannels(left_image);
q+=GetPixelChannels(right_image);
r+=GetPixelChannels(stereo_image);
}
if (SyncAuthenticPixels(stereo_image,exception) == MagickFalse)
break;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
proceed=SetImageProgress(image,StereoImageTag,(MagickOffsetType) y,
stereo_image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
if (status == MagickFalse)
stereo_image=DestroyImage(stereo_image);
return(stereo_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% S w i r l I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% SwirlImage() swirls the pixels about the center of the image, where
% degrees indicates the sweep of the arc through which each pixel is moved.
% You get a more dramatic effect as the degrees move from 1 to 360.
%
% The format of the SwirlImage method is:
%
% Image *SwirlImage(const Image *image,double degrees,
% const PixelInterpolateMethod method,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o degrees: Define the tightness of the swirling effect.
%
% o method: the pixel interpolation method.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *SwirlImage(const Image *image,double degrees,
const PixelInterpolateMethod method,ExceptionInfo *exception)
{
#define SwirlImageTag "Swirl/Image"
CacheView
*canvas_view,
*interpolate_view,
*swirl_view;
Image
*canvas,
*swirl_image;
MagickBooleanType
status;
MagickOffsetType
progress;
double
radius;
PointInfo
center,
scale;
ssize_t
y;
/*
Initialize swirl image attributes.
*/
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
canvas=CloneImage(image,0,0,MagickTrue,exception);
if (canvas == (Image *) NULL)
return((Image *) NULL);
if ((canvas->alpha_trait == UndefinedPixelTrait) &&
(canvas->background_color.alpha != OpaqueAlpha))
(void) SetImageAlphaChannel(canvas,OpaqueAlphaChannel,exception);
swirl_image=CloneImage(canvas,canvas->columns,canvas->rows,MagickTrue,
exception);
if (swirl_image == (Image *) NULL)
{
canvas=DestroyImage(canvas);
return((Image *) NULL);
}
if (SetImageStorageClass(swirl_image,DirectClass,exception) == MagickFalse)
{
canvas=DestroyImage(canvas);
swirl_image=DestroyImage(swirl_image);
return((Image *) NULL);
}
/*
Compute scaling factor.
*/
center.x=(double) canvas->columns/2.0;
center.y=(double) canvas->rows/2.0;
radius=MagickMax(center.x,center.y);
scale.x=1.0;
scale.y=1.0;
if (canvas->columns > canvas->rows)
scale.y=(double) canvas->columns/(double) canvas->rows;
else
if (canvas->columns < canvas->rows)
scale.x=(double) canvas->rows/(double) canvas->columns;
degrees=(double) DegreesToRadians(degrees);
/*
Swirl image.
*/
status=MagickTrue;
progress=0;
canvas_view=AcquireVirtualCacheView(canvas,exception);
interpolate_view=AcquireVirtualCacheView(image,exception);
swirl_view=AcquireAuthenticCacheView(swirl_image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static,4) shared(progress,status) \
magick_threads(canvas,swirl_image,canvas->rows,1)
#endif
for (y=0; y < (ssize_t) canvas->rows; y++)
{
double
distance;
PointInfo
delta;
register const Quantum
*magick_restrict p;
register ssize_t
x;
register Quantum
*magick_restrict q;
if (status == MagickFalse)
continue;
p=GetCacheViewVirtualPixels(canvas_view,0,y,canvas->columns,1,exception);
q=QueueCacheViewAuthenticPixels(swirl_view,0,y,swirl_image->columns,1,
exception);
if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL))
{
status=MagickFalse;
continue;
}
delta.y=scale.y*(double) (y-center.y);
for (x=0; x < (ssize_t) canvas->columns; x++)
{
/*
Determine if the pixel is within an ellipse.
*/
if (GetPixelWriteMask(canvas,p) <= (QuantumRange/2))
{
SetPixelBackgoundColor(swirl_image,q);
p+=GetPixelChannels(canvas);
q+=GetPixelChannels(swirl_image);
continue;
}
delta.x=scale.x*(double) (x-center.x);
distance=delta.x*delta.x+delta.y*delta.y;
if (distance >= (radius*radius))
{
register ssize_t
i;
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
PixelChannel channel = GetPixelChannelChannel(canvas,i);
PixelTrait traits = GetPixelChannelTraits(canvas,channel);
PixelTrait swirl_traits = GetPixelChannelTraits(swirl_image,
channel);
if ((traits == UndefinedPixelTrait) ||
(swirl_traits == UndefinedPixelTrait))
continue;
SetPixelChannel(swirl_image,channel,p[i],q);
}
}
else
{
double
cosine,
factor,
sine;
/*
Swirl the pixel.
*/
factor=1.0-sqrt((double) distance)/radius;
sine=sin((double) (degrees*factor*factor));
cosine=cos((double) (degrees*factor*factor));
status=InterpolatePixelChannels(canvas,interpolate_view,swirl_image,
method,((cosine*delta.x-sine*delta.y)/scale.x+center.x),(double)
((sine*delta.x+cosine*delta.y)/scale.y+center.y),q,exception);
}
p+=GetPixelChannels(canvas);
q+=GetPixelChannels(swirl_image);
}
if (SyncCacheViewAuthenticPixels(swirl_view,exception) == MagickFalse)
status=MagickFalse;
if (canvas->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp critical (MagickCore_SwirlImage)
#endif
proceed=SetImageProgress(canvas,SwirlImageTag,progress++,canvas->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
swirl_view=DestroyCacheView(swirl_view);
interpolate_view=DestroyCacheView(interpolate_view);
canvas_view=DestroyCacheView(canvas_view);
canvas=DestroyImage(canvas);
if (status == MagickFalse)
swirl_image=DestroyImage(swirl_image);
return(swirl_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% T i n t I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% TintImage() applies a color vector to each pixel in the image. The length
% of the vector is 0 for black and white and at its maximum for the midtones.
% The vector weighting function is f(x)=(1-(4.0*((x-0.5)*(x-0.5))))
%
% The format of the TintImage method is:
%
% Image *TintImage(const Image *image,const char *blend,
% const PixelInfo *tint,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o blend: A color value used for tinting.
%
% o tint: A color value used for tinting.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *TintImage(const Image *image,const char *blend,
const PixelInfo *tint,ExceptionInfo *exception)
{
#define TintImageTag "Tint/Image"
CacheView
*image_view,
*tint_view;
double
intensity;
GeometryInfo
geometry_info;
Image
*tint_image;
MagickBooleanType
status;
MagickOffsetType
progress;
PixelInfo
color_vector;
MagickStatusType
flags;
ssize_t
y;
/*
Allocate tint image.
*/
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
tint_image=CloneImage(image,image->columns,image->rows,MagickTrue,exception);
if (tint_image == (Image *) NULL)
return((Image *) NULL);
if (SetImageStorageClass(tint_image,DirectClass,exception) == MagickFalse)
{
tint_image=DestroyImage(tint_image);
return((Image *) NULL);
}
if ((IsGrayColorspace(image->colorspace) != MagickFalse) &&
(IsPixelInfoGray(tint) == MagickFalse))
(void) SetImageColorspace(tint_image,sRGBColorspace,exception);
if (blend == (const char *) NULL)
return(tint_image);
/*
Determine RGB values of the color.
*/
GetPixelInfo(image,&color_vector);
flags=ParseGeometry(blend,&geometry_info);
color_vector.red=geometry_info.rho;
color_vector.green=geometry_info.rho;
color_vector.blue=geometry_info.rho;
color_vector.alpha=(MagickRealType) OpaqueAlpha;
if ((flags & SigmaValue) != 0)
color_vector.green=geometry_info.sigma;
if ((flags & XiValue) != 0)
color_vector.blue=geometry_info.xi;
if ((flags & PsiValue) != 0)
color_vector.alpha=geometry_info.psi;
if (image->colorspace == CMYKColorspace)
{
color_vector.black=geometry_info.rho;
if ((flags & PsiValue) != 0)
color_vector.black=geometry_info.psi;
if ((flags & ChiValue) != 0)
color_vector.alpha=geometry_info.chi;
}
intensity=(double) GetPixelInfoIntensity((const Image *) NULL,tint);
color_vector.red=(double) (color_vector.red*tint->red/100.0-intensity);
color_vector.green=(double) (color_vector.green*tint->green/100.0-intensity);
color_vector.blue=(double) (color_vector.blue*tint->blue/100.0-intensity);
color_vector.black=(double) (color_vector.black*tint->black/100.0-intensity);
color_vector.alpha=(double) (color_vector.alpha*tint->alpha/100.0-intensity);
/*
Tint image.
*/
status=MagickTrue;
progress=0;
image_view=AcquireVirtualCacheView(image,exception);
tint_view=AcquireAuthenticCacheView(tint_image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static,4) shared(progress,status) \
magick_threads(image,tint_image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
register const Quantum
*magick_restrict p;
register Quantum
*magick_restrict q;
register ssize_t
x;
if (status == MagickFalse)
continue;
p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception);
q=QueueCacheViewAuthenticPixels(tint_view,0,y,tint_image->columns,1,
exception);
if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL))
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) image->columns; x++)
{
PixelInfo
pixel;
double
weight;
register ssize_t
i;
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
PixelChannel channel = GetPixelChannelChannel(image,i);
PixelTrait traits = GetPixelChannelTraits(image,channel);
PixelTrait tint_traits=GetPixelChannelTraits(tint_image,channel);
if ((traits == UndefinedPixelTrait) ||
(tint_traits == UndefinedPixelTrait))
continue;
if (((tint_traits & CopyPixelTrait) != 0) ||
(GetPixelWriteMask(image,p) <= (QuantumRange/2)))
{
SetPixelChannel(tint_image,channel,p[i],q);
continue;
}
}
GetPixelInfo(image,&pixel);
weight=QuantumScale*GetPixelRed(image,p)-0.5;
pixel.red=(double) GetPixelRed(image,p)+color_vector.red*(1.0-(4.0*
(weight*weight)));
weight=QuantumScale*GetPixelGreen(image,p)-0.5;
pixel.green=(double) GetPixelGreen(image,p)+color_vector.green*(1.0-(4.0*
(weight*weight)));
weight=QuantumScale*GetPixelBlue(image,p)-0.5;
pixel.blue=(double) GetPixelBlue(image,p)+color_vector.blue*(1.0-(4.0*
(weight*weight)));
weight=QuantumScale*GetPixelBlack(image,p)-0.5;
pixel.black=(double) GetPixelBlack(image,p)+color_vector.black*(1.0-(4.0*
(weight*weight)));
SetPixelViaPixelInfo(tint_image,&pixel,q);
p+=GetPixelChannels(image);
q+=GetPixelChannels(tint_image);
}
if (SyncCacheViewAuthenticPixels(tint_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp critical (MagickCore_TintImage)
#endif
proceed=SetImageProgress(image,TintImageTag,progress++,image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
tint_view=DestroyCacheView(tint_view);
image_view=DestroyCacheView(image_view);
if (status == MagickFalse)
tint_image=DestroyImage(tint_image);
return(tint_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% V i g n e t t e I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% VignetteImage() softens the edges of the image in vignette style.
%
% The format of the VignetteImage method is:
%
% Image *VignetteImage(const Image *image,const double radius,
% const double sigma,const ssize_t x,const ssize_t y,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o radius: the radius of the pixel neighborhood.
%
% o sigma: the standard deviation of the Gaussian, in pixels.
%
% o x, y: Define the x and y ellipse offset.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *VignetteImage(const Image *image,const double radius,
const double sigma,const ssize_t x,const ssize_t y,ExceptionInfo *exception)
{
char
ellipse[MagickPathExtent];
DrawInfo
*draw_info;
Image
*canvas,
*blur_image,
*oval_image,
*vignette_image;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
canvas=CloneImage(image,0,0,MagickTrue,exception);
if (canvas == (Image *) NULL)
return((Image *) NULL);
if (SetImageStorageClass(canvas,DirectClass,exception) == MagickFalse)
{
canvas=DestroyImage(canvas);
return((Image *) NULL);
}
canvas->alpha_trait=BlendPixelTrait;
oval_image=CloneImage(canvas,canvas->columns,canvas->rows,MagickTrue,
exception);
if (oval_image == (Image *) NULL)
{
canvas=DestroyImage(canvas);
return((Image *) NULL);
}
(void) QueryColorCompliance("#000000",AllCompliance,
&oval_image->background_color,exception);
(void) SetImageBackgroundColor(oval_image,exception);
draw_info=CloneDrawInfo((const ImageInfo *) NULL,(const DrawInfo *) NULL);
(void) QueryColorCompliance("#ffffff",AllCompliance,&draw_info->fill,
exception);
(void) QueryColorCompliance("#ffffff",AllCompliance,&draw_info->stroke,
exception);
(void) FormatLocaleString(ellipse,MagickPathExtent,"ellipse %g,%g,%g,%g,"
"0.0,360.0",image->columns/2.0,image->rows/2.0,image->columns/2.0-x,
image->rows/2.0-y);
draw_info->primitive=AcquireString(ellipse);
(void) DrawImage(oval_image,draw_info,exception);
draw_info=DestroyDrawInfo(draw_info);
blur_image=BlurImage(oval_image,radius,sigma,exception);
oval_image=DestroyImage(oval_image);
if (blur_image == (Image *) NULL)
{
canvas=DestroyImage(canvas);
return((Image *) NULL);
}
blur_image->alpha_trait=UndefinedPixelTrait;
(void) CompositeImage(canvas,blur_image,IntensityCompositeOp,MagickTrue,
0,0,exception);
blur_image=DestroyImage(blur_image);
vignette_image=MergeImageLayers(canvas,FlattenLayer,exception);
canvas=DestroyImage(canvas);
if (vignette_image != (Image *) NULL)
(void) TransformImageColorspace(vignette_image,image->colorspace,exception);
return(vignette_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% W a v e I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% WaveImage() creates a "ripple" effect in the image by shifting the pixels
% vertically along a sine wave whose amplitude and wavelength is specified
% by the given parameters.
%
% The format of the WaveImage method is:
%
% Image *WaveImage(const Image *image,const double amplitude,
% const double wave_length,const PixelInterpolateMethod method,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o amplitude, wave_length: Define the amplitude and wave length of the
% sine wave.
%
% o interpolate: the pixel interpolation method.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *WaveImage(const Image *image,const double amplitude,
const double wave_length,const PixelInterpolateMethod method,
ExceptionInfo *exception)
{
#define WaveImageTag "Wave/Image"
CacheView
*canvas_view,
*wave_view;
Image
*canvas,
*wave_image;
MagickBooleanType
status;
MagickOffsetType
progress;
double
*sine_map;
register ssize_t
i;
ssize_t
y;
/*
Initialize wave image attributes.
*/
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
canvas=CloneImage(image,0,0,MagickTrue,exception);
if (canvas == (Image *) NULL)
return((Image *) NULL);
if ((canvas->alpha_trait == UndefinedPixelTrait) &&
(canvas->background_color.alpha != OpaqueAlpha))
(void) SetImageAlpha(canvas,OpaqueAlpha,exception);
wave_image=CloneImage(canvas,canvas->columns,(size_t) (canvas->rows+2.0*
fabs(amplitude)),MagickTrue,exception);
if (wave_image == (Image *) NULL)
{
canvas=DestroyImage(canvas);
return((Image *) NULL);
}
if (SetImageStorageClass(wave_image,DirectClass,exception) == MagickFalse)
{
canvas=DestroyImage(canvas);
wave_image=DestroyImage(wave_image);
return((Image *) NULL);
}
/*
Allocate sine map.
*/
sine_map=(double *) AcquireQuantumMemory((size_t) wave_image->columns,
sizeof(*sine_map));
if (sine_map == (double *) NULL)
{
canvas=DestroyImage(canvas);
wave_image=DestroyImage(wave_image);
ThrowImageException(ResourceLimitError,"MemoryAllocationFailed");
}
for (i=0; i < (ssize_t) wave_image->columns; i++)
sine_map[i]=fabs(amplitude)+amplitude*sin((double) ((2.0*MagickPI*i)/
wave_length));
/*
Wave image.
*/
status=MagickTrue;
progress=0;
canvas_view=AcquireVirtualCacheView(canvas,exception);
wave_view=AcquireAuthenticCacheView(wave_image,exception);
(void) SetCacheViewVirtualPixelMethod(canvas_view,
BackgroundVirtualPixelMethod);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static,4) shared(progress,status) \
magick_threads(canvas,wave_image,wave_image->rows,1)
#endif
for (y=0; y < (ssize_t) wave_image->rows; y++)
{
register Quantum
*magick_restrict q;
register ssize_t
x;
if (status == MagickFalse)
continue;
q=QueueCacheViewAuthenticPixels(wave_view,0,y,wave_image->columns,1,
exception);
if (q == (Quantum *) NULL)
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) wave_image->columns; x++)
{
status=InterpolatePixelChannels(canvas,canvas_view,wave_image,method,
(double) x,(double) (y-sine_map[x]),q,exception);
q+=GetPixelChannels(wave_image);
}
if (SyncCacheViewAuthenticPixels(wave_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp critical (MagickCore_WaveImage)
#endif
proceed=SetImageProgress(canvas,WaveImageTag,progress++,canvas->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
wave_view=DestroyCacheView(wave_view);
canvas_view=DestroyCacheView(canvas_view);
canvas=DestroyImage(canvas);
sine_map=(double *) RelinquishMagickMemory(sine_map);
if (status == MagickFalse)
wave_image=DestroyImage(wave_image);
return(wave_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% W a v e l e t D e n o i s e I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% WaveletDenoiseImage() removes noise from the image using a wavelet
% transform. The wavelet transform is a fast hierarchical scheme for
% processing an image using a set of consecutive lowpass and high_pass filters,
% followed by a decimation. This results in a decomposition into different
% scales which can be regarded as different “frequency bands”, determined by
% the mother wavelet. Adapted from dcraw.c by David Coffin.
%
% The format of the WaveletDenoiseImage method is:
%
% Image *WaveletDenoiseImage(const Image *image,const double threshold,
% const double softness,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o threshold: set the threshold for smoothing.
%
% o softness: attenuate the smoothing threshold.
%
% o exception: return any errors or warnings in this structure.
%
*/
static inline void HatTransform(const float *magick_restrict pixels,
const size_t stride,const size_t extent,const size_t scale,float *kernel)
{
const float
*magick_restrict p,
*magick_restrict q,
*magick_restrict r;
register ssize_t
i;
p=pixels;
q=pixels+scale*stride;
r=pixels+scale*stride;
for (i=0; i < (ssize_t) scale; i++)
{
kernel[i]=0.25f*(*p+(*p)+(*q)+(*r));
p+=stride;
q-=stride;
r+=stride;
}
for ( ; i < (ssize_t) (extent-scale); i++)
{
kernel[i]=0.25f*(2.0f*(*p)+*(p-scale*stride)+*(p+scale*stride));
p+=stride;
}
q=p-scale*stride;
r=pixels+stride*(extent-2);
for ( ; i < (ssize_t) extent; i++)
{
kernel[i]=0.25f*(*p+(*p)+(*q)+(*r));
p+=stride;
q+=stride;
r-=stride;
}
}
MagickExport Image *WaveletDenoiseImage(const Image *image,
const double threshold,const double softness,ExceptionInfo *exception)
{
CacheView
*image_view,
*noise_view;
float
*kernel,
*pixels;
Image
*noise_image;
MagickBooleanType
status;
MagickSizeType
number_pixels;
MemoryInfo
*pixels_info;
ssize_t
channel;
static const float
noise_levels[] = { 0.8002f, 0.2735f, 0.1202f, 0.0585f, 0.0291f, 0.0152f,
0.0080f, 0.0044f };
/*
Initialize noise image attributes.
*/
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
#if defined(MAGICKCORE_OPENCL_SUPPORT)
noise_image=AccelerateWaveletDenoiseImage(image,threshold,exception);
if (noise_image != (Image *) NULL)
return(noise_image);
#endif
noise_image=CloneImage(image,0,0,MagickTrue,exception);
if (noise_image == (Image *) NULL)
return((Image *) NULL);
if (SetImageStorageClass(noise_image,DirectClass,exception) == MagickFalse)
{
noise_image=DestroyImage(noise_image);
return((Image *) NULL);
}
if (AcquireMagickResource(WidthResource,4*image->columns) == MagickFalse)
ThrowImageException(ResourceLimitError,"MemoryAllocationFailed");
pixels_info=AcquireVirtualMemory(3*image->columns,image->rows*
sizeof(*pixels));
kernel=(float *) AcquireQuantumMemory(MagickMax(image->rows,image->columns),
GetOpenMPMaximumThreads()*sizeof(*kernel));
if ((pixels_info == (MemoryInfo *) NULL) || (kernel == (float *) NULL))
{
if (kernel != (float *) NULL)
kernel=(float *) RelinquishMagickMemory(kernel);
if (pixels_info != (MemoryInfo *) NULL)
pixels_info=RelinquishVirtualMemory(pixels_info);
ThrowImageException(ResourceLimitError,"MemoryAllocationFailed");
}
pixels=(float *) GetVirtualMemoryBlob(pixels_info);
status=MagickTrue;
number_pixels=(MagickSizeType) image->columns*image->rows;
image_view=AcquireAuthenticCacheView(image,exception);
noise_view=AcquireAuthenticCacheView(noise_image,exception);
for (channel=0; channel < (ssize_t) GetPixelChannels(image); channel++)
{
register ssize_t
i;
size_t
high_pass,
low_pass;
ssize_t
level,
y;
PixelChannel
pixel_channel;
PixelTrait
traits;
if (status == MagickFalse)
continue;
traits=GetPixelChannelTraits(image,(PixelChannel) channel);
if (traits == UndefinedPixelTrait)
continue;
pixel_channel=GetPixelChannelChannel(image,channel);
if ((pixel_channel != RedPixelChannel) &&
(pixel_channel != GreenPixelChannel) &&
(pixel_channel != BluePixelChannel))
continue;
/*
Copy channel from image to wavelet pixel array.
*/
i=0;
for (y=0; y < (ssize_t) image->rows; y++)
{
register const Quantum
*magick_restrict p;
ssize_t
x;
p=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception);
if (p == (const Quantum *) NULL)
{
status=MagickFalse;
break;
}
for (x=0; x < (ssize_t) image->columns; x++)
{
pixels[i++]=(float) p[channel];
p+=GetPixelChannels(image);
}
}
/*
Low pass filter outputs are called approximation kernel & high pass
filters are referred to as detail kernel. The detail kernel
have high values in the noisy parts of the signal.
*/
high_pass=0;
for (level=0; level < 5; level++)
{
double
magnitude;
ssize_t
x,
y;
low_pass=(size_t) (number_pixels*((level & 0x01)+1));
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static,1) \
magick_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
const int
id = GetOpenMPThreadId();
register float
*magick_restrict p,
*magick_restrict q;
register ssize_t
x;
p=kernel+id*image->columns;
q=pixels+y*image->columns;
HatTransform(q+high_pass,1,image->columns,(size_t) (1 << level),p);
q+=low_pass;
for (x=0; x < (ssize_t) image->columns; x++)
*q++=(*p++);
}
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static,1) \
magick_threads(image,image,image->columns,1)
#endif
for (x=0; x < (ssize_t) image->columns; x++)
{
const int
id = GetOpenMPThreadId();
register float
*magick_restrict p,
*magick_restrict q;
register ssize_t
y;
p=kernel+id*image->rows;
q=pixels+x+low_pass;
HatTransform(q,image->columns,image->rows,(size_t) (1 << level),p);
for (y=0; y < (ssize_t) image->rows; y++)
{
*q=(*p++);
q+=image->columns;
}
}
/*
To threshold, each coefficient is compared to a threshold value and
attenuated / shrunk by some factor.
*/
magnitude=threshold*noise_levels[level];
for (i=0; i < (ssize_t) number_pixels; ++i)
{
pixels[high_pass+i]-=pixels[low_pass+i];
if (pixels[high_pass+i] < -magnitude)
pixels[high_pass+i]+=magnitude-softness*magnitude;
else
if (pixels[high_pass+i] > magnitude)
pixels[high_pass+i]-=magnitude-softness*magnitude;
else
pixels[high_pass+i]*=softness;
if (high_pass != 0)
pixels[i]+=pixels[high_pass+i];
}
high_pass=low_pass;
}
/*
Reconstruct image from the thresholded wavelet kernel.
*/
i=0;
for (y=0; y < (ssize_t) image->rows; y++)
{
MagickBooleanType
sync;
register Quantum
*magick_restrict q;
register ssize_t
x;
ssize_t
offset;
q=GetCacheViewAuthenticPixels(noise_view,0,y,noise_image->columns,1,
exception);
if (q == (Quantum *) NULL)
{
status=MagickFalse;
break;
}
offset=GetPixelChannelOffset(noise_image,pixel_channel);
for (x=0; x < (ssize_t) image->columns; x++)
{
MagickRealType
pixel;
pixel=(MagickRealType) pixels[i]+pixels[low_pass+i];
q[offset]=ClampToQuantum(pixel);
i++;
q+=GetPixelChannels(noise_image);
}
sync=SyncCacheViewAuthenticPixels(noise_view,exception);
if (sync == MagickFalse)
status=MagickFalse;
}
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
proceed=SetImageProgress(image,AddNoiseImageTag,(MagickOffsetType)
channel,GetPixelChannels(image));
if (proceed == MagickFalse)
status=MagickFalse;
}
}
noise_view=DestroyCacheView(noise_view);
image_view=DestroyCacheView(image_view);
kernel=(float *) RelinquishMagickMemory(kernel);
pixels_info=RelinquishVirtualMemory(pixels_info);
if (status == MagickFalse)
noise_image=DestroyImage(noise_image);
return(noise_image);
}
|
nr_direct.c | /* Copyright 2014-2018 The PySCF Developers. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*
* Author: Qiming Sun <osirpt.sun@gmail.com>
*/
#include <stdlib.h>
#include <string.h>
#include <assert.h>
#include <math.h>
//#include <omp.h>
#include "config.h"
#include "cint.h"
#include "optimizer.h"
#include "nr_direct.h"
int GTOmax_shell_dim(const int *ao_loc, const int *shls_slice, int ncenter);
int GTOmax_cache_size(int (*intor)(), int *shls_slice, int ncenter,
int *atm, int natm, int *bas, int nbas, double *env);
#define AO_BLOCK_SIZE 32
#define MIN(I,J) ((I) < (J) ? (I) : (J))
#define DECLARE_ALL \
const int *atm = envs->atm; \
const int *bas = envs->bas; \
const double *env = envs->env; \
const int natm = envs->natm; \
const int nbas = envs->nbas; \
const int *ao_loc = envs->ao_loc; \
const int *shls_slice = envs->shls_slice; \
const CINTOpt *cintopt = envs->cintopt; \
const int ioff = ao_loc[shls_slice[0]]; \
const int joff = ao_loc[shls_slice[2]]; \
const int koff = ao_loc[shls_slice[4]]; \
const int loff = ao_loc[shls_slice[6]]; \
const int ish0 = ishls[0]; \
const int ish1 = ishls[1]; \
const int jsh0 = jshls[0]; \
const int jsh1 = jshls[1]; \
const int ksh0 = kshls[0]; \
const int ksh1 = kshls[1]; \
const int lsh0 = lshls[0]; \
const int lsh1 = lshls[1]; \
int shls[4]; \
void (*pf)(double *eri, double *dm, JKArray *vjk, int *shls, \
int i0, int i1, int j0, int j1, \
int k0, int k1, int l0, int l1); \
int (*fprescreen)(); \
if (vhfopt) { \
fprescreen = vhfopt->fprescreen; \
} else { \
fprescreen = CVHFnoscreen; \
} \
int ish, jsh, ksh, lsh, i0, j0, k0, l0, i1, j1, k1, l1, idm;
#define INTOR_AND_CONTRACT \
shls[0] = ish; \
shls[1] = jsh; \
shls[2] = ksh; \
shls[3] = lsh; \
if ((*fprescreen)(shls, vhfopt, atm, bas, env) \
&& (*intor)(buf, NULL, shls, atm, natm, bas, nbas, env, \
cintopt, cache)) { \
i0 = ao_loc[ish] - ioff; \
j0 = ao_loc[jsh] - joff; \
k0 = ao_loc[ksh] - koff; \
l0 = ao_loc[lsh] - loff; \
i1 = ao_loc[ish+1] - ioff; \
j1 = ao_loc[jsh+1] - joff; \
k1 = ao_loc[ksh+1] - koff; \
l1 = ao_loc[lsh+1] - loff; \
for (idm = 0; idm < n_dm; idm++) { \
pf = jkop[idm]->contract; \
(*pf)(buf, dms[idm], vjk[idm], shls, \
i0, i1, j0, j1, k0, k1, l0, l1); \
} \
}
/*
* for given ksh, lsh, loop all ish, jsh
*/
void CVHFdot_nrs1(int (*intor)(), JKOperator **jkop, JKArray **vjk,
double **dms, double *buf, double *cache, int n_dm,
int *ishls, int *jshls, int *kshls, int *lshls,
CVHFOpt *vhfopt, IntorEnvs *envs)
{
DECLARE_ALL;
for (ish = ish0; ish < ish1; ish++) {
for (jsh = jsh0; jsh < jsh1; jsh++) {
for (ksh = ksh0; ksh < ksh1; ksh++) {
for (lsh = lsh0; lsh < lsh1; lsh++) {
INTOR_AND_CONTRACT;
} } } }
}
void CVHFdot_nrs2ij(int (*intor)(), JKOperator **jkop, JKArray **vjk,
double **dms, double *buf, double *cache, int n_dm,
int *ishls, int *jshls, int *kshls, int *lshls,
CVHFOpt *vhfopt, IntorEnvs *envs)
{
if (ishls[0] > jshls[0]) {
return CVHFdot_nrs1(intor, jkop, vjk, dms, buf, cache, n_dm,
ishls, jshls, kshls, lshls, vhfopt, envs);
} else if (ishls[0] == jshls[0]) {
DECLARE_ALL;
for (ish = ish0; ish < ish1; ish++) {
for (jsh = jsh0; jsh <= ish; jsh++) {
for (ksh = ksh0; ksh < ksh1; ksh++) {
for (lsh = lsh0; lsh < lsh1; lsh++) {
INTOR_AND_CONTRACT;
} } } }
}
}
void CVHFdot_nrs2kl(int (*intor)(), JKOperator **jkop, JKArray **vjk,
double **dms, double *buf, double *cache, int n_dm,
int *ishls, int *jshls, int *kshls, int *lshls,
CVHFOpt *vhfopt, IntorEnvs *envs)
{
if (kshls[0] > lshls[0]) {
return CVHFdot_nrs1(intor, jkop, vjk, dms, buf, cache, n_dm,
ishls, jshls, kshls, lshls, vhfopt, envs);
} else if (kshls[0] == lshls[0]) {
assert(kshls[1] == lshls[1]);
DECLARE_ALL;
for (ish = ish0; ish < ish1; ish++) {
for (jsh = jsh0; jsh < jsh1; jsh++) {
for (ksh = ksh0; ksh < ksh1; ksh++) {
for (lsh = lsh0; lsh <= ksh; lsh++) {
INTOR_AND_CONTRACT;
} } } }
}
}
void CVHFdot_nrs4(int (*intor)(), JKOperator **jkop, JKArray **vjk,
double **dms, double *buf, double *cache, int n_dm,
int *ishls, int *jshls, int *kshls, int *lshls,
CVHFOpt *vhfopt, IntorEnvs *envs)
{
if (ishls[0] > jshls[0]) {
return CVHFdot_nrs2kl(intor, jkop, vjk, dms, buf, cache, n_dm,
ishls, jshls, kshls, lshls, vhfopt, envs);
} else if (ishls[1] <= jshls[0]) {
return;
} else if (kshls[0] > lshls[0]) { // ishls == jshls
return CVHFdot_nrs2ij(intor, jkop, vjk, dms, buf, cache, n_dm,
ishls, jshls, kshls, lshls, vhfopt, envs);
} else if (kshls[0] == lshls[0]) { // ishls == jshls
assert(kshls[1] == lshls[1]);
DECLARE_ALL;
for (ish = ish0; ish < ish1; ish++) {
for (jsh = jsh0; jsh <= ish; jsh++) {
for (ksh = ksh0; ksh < ksh1; ksh++) {
for (lsh = lsh0; lsh <= ksh; lsh++) {
INTOR_AND_CONTRACT;
} } } }
}
}
void CVHFdot_nrs8(int (*intor)(), JKOperator **jkop, JKArray **vjk,
double **dms, double *buf, double *cache, int n_dm,
int *ishls, int *jshls, int *kshls, int *lshls,
CVHFOpt *vhfopt, IntorEnvs *envs)
{
if (ishls[0] > kshls[0]) {
return CVHFdot_nrs4(intor, jkop, vjk, dms, buf, cache, n_dm,
ishls, jshls, kshls, lshls, vhfopt, envs);
} else if (ishls[0] < kshls[0]) {
return;
} else if ((ishls[1] <= jshls[0]) || (kshls[1] <= lshls[0])) {
assert(ishls[1] == kshls[1]);
return;
}
// else i == k && i >= j && k >= l
assert(ishls[1] == kshls[1]);
DECLARE_ALL;
for (ish = ish0; ish < ish1; ish++) {
for (jsh = jsh0; jsh < MIN(jsh1, ish+1); jsh++) {
for (ksh = ksh0; ksh <= ish; ksh++) {
for (lsh = lsh0; lsh < MIN(lsh1, ksh+1); lsh++) {
/* when ksh==ish, (lsh<jsh) misses some integrals (eg k<i&&l>j).
* These integrals are calculated in the next (ish,jsh) pair. To show
* that, we just need to prove that every elements in shell^4 appeared
* only once in fjk_s8. */
if ((ksh == ish) && (lsh > jsh)) {
break;
}
INTOR_AND_CONTRACT;
} } } }
}
static JKArray *allocate_JKArray(JKOperator *op, int *shls_slice, int *ao_loc, int ncomp)
{
JKArray *jkarray = malloc(sizeof(JKArray));
int ibra = op->ibra_shl0;
int iket = op->iket_shl0;
int obra = op->obra_shl0;
int oket = op->oket_shl0;
int v_bra_sh0 = shls_slice[obra];
int v_ket_sh0 = shls_slice[oket];
int v_bra_sh1 = shls_slice[obra+1];
int v_ket_sh1 = shls_slice[oket+1];
jkarray->v_ket_nsh = shls_slice[oket+1] - shls_slice[oket];
jkarray->dm_dims[0] = ao_loc[shls_slice[ibra+1]] - ao_loc[shls_slice[ibra]];
jkarray->dm_dims[1] = ao_loc[shls_slice[iket+1]] - ao_loc[shls_slice[iket]];
int v_rows = ao_loc[v_bra_sh1] - ao_loc[v_bra_sh0];
int v_cols = ao_loc[v_ket_sh1] - ao_loc[v_ket_sh0];
jkarray->offset0_outptr = v_bra_sh0 * jkarray->v_ket_nsh + v_ket_sh0;
int outptr_size =((shls_slice[obra+1] - shls_slice[obra]) *
(shls_slice[oket+1] - shls_slice[oket]));
jkarray->outptr = malloc(sizeof(int) * outptr_size);
memset(jkarray->outptr, NOVALUE, sizeof(int) * outptr_size);
jkarray->stack_size = 0;
int data_size = v_rows * v_cols * ncomp;
jkarray->data = malloc(sizeof(double) * data_size);
jkarray->ncomp = ncomp;
return jkarray;
}
static void deallocate_JKArray(JKArray *jkarray)
{
free(jkarray->outptr);
free(jkarray->data);
free(jkarray);
}
static double *allocate_and_reorder_dm(JKOperator *op, double *dm,
int *shls_slice, int *ao_loc)
{
int ibra = op->ibra_shl0;
int iket = op->iket_shl0;
int ish0 = shls_slice[ibra];
int jsh0 = shls_slice[iket];
int ish1 = shls_slice[ibra+1];
int jsh1 = shls_slice[iket+1];
int ioff = ao_loc[ish0];
int joff = ao_loc[jsh0];
int nrow = ao_loc[ish1] - ioff;
int ncol = ao_loc[jsh1] - joff;
double *out = malloc(sizeof(double) * nrow*ncol);
int ish, jsh, i0, i1, j0, j1, i, j, ij;
ij = 0;
for (ish = ish0; ish < ish1; ish++) {
for (jsh = jsh0; jsh < jsh1; jsh++) {
i0 = ao_loc[ish ] - ioff;
i1 = ao_loc[ish+1] - ioff;
j0 = ao_loc[jsh ] - joff;
j1 = ao_loc[jsh+1] - joff;
for (i = i0; i < i1; i++) {
for (j = j0; j < j1; j++, ij++) {
out[ij] = dm[i*ncol+j];
} }
} }
return out;
}
static void zero_out_vjk(double *vjk, JKOperator *op,
int *shls_slice, int *ao_loc, int ncomp)
{
int obra = op->obra_shl0;
int oket = op->oket_shl0;
int ish0 = shls_slice[obra];
int jsh0 = shls_slice[oket];
int ish1 = shls_slice[obra+1];
int jsh1 = shls_slice[oket+1];
int nbra = ao_loc[ish1] - ao_loc[ish0];
int nket = ao_loc[jsh1] - ao_loc[jsh0];
memset(vjk, 0, sizeof(double) * nbra * nket * ncomp);
}
static void assemble_v(double *vjk, JKOperator *op, JKArray *jkarray,
int *shls_slice, int *ao_loc)
{
int obra = op->obra_shl0;
int oket = op->oket_shl0;
int ish0 = shls_slice[obra];
int jsh0 = shls_slice[oket];
int ish1 = shls_slice[obra+1];
int jsh1 = shls_slice[oket+1];
int njsh = jsh1 - jsh0;
size_t vrow = ao_loc[ish1] - ao_loc[ish0];
size_t vcol = ao_loc[jsh1] - ao_loc[jsh0];
int ncomp = jkarray->ncomp;
int voffset = ao_loc[ish0] * vcol + ao_loc[jsh0];
int i, j, ish, jsh;
int di, dj, icomp;
int optr;
double *data, *pv;
for (ish = ish0; ish < ish1; ish++) {
for (jsh = jsh0; jsh < jsh1; jsh++) {
optr = jkarray->outptr[ish*njsh+jsh-jkarray->offset0_outptr];
if (optr != NOVALUE) {
di = ao_loc[ish+1] - ao_loc[ish];
dj = ao_loc[jsh+1] - ao_loc[jsh];
data = jkarray->data + optr;
pv = vjk + ao_loc[ish]*vcol+ao_loc[jsh] - voffset;
for (icomp = 0; icomp < ncomp; icomp++) {
for (i = 0; i < di; i++) {
for (j = 0; j < dj; j++) {
pv[i*vcol+j] += data[i*dj+j];
} }
pv += vrow * vcol;
data += di * dj;
}
}
} }
}
// Divide shls into subblocks with roughly equal number of AOs in each block
int CVHFshls_block_partition(int *block_loc, int *shls_slice, int *ao_loc)
{
int ish0 = shls_slice[0];
int ish1 = shls_slice[1];
int ao_loc_last = ao_loc[ish0];
int count = 1;
int ish;
block_loc[0] = ish0;
for (ish = ish0 + 1; ish < ish1; ish++) {
if (ao_loc[ish] - ao_loc_last > AO_BLOCK_SIZE) {
block_loc[count] = ish;
count++;
ao_loc_last = ao_loc[ish];
}
}
block_loc[count] = ish1;
return count;
}
/*
* drv loop over ij, generate eris of kl for given ij, call fjk to
* calculate vj, vk.
*
* n_dm is the number of dms for one [array(ij|kl)], it is also the size of dms and vjk
* ncomp is the number of components that produced by intor
* shls_slice = [ishstart, ishend, jshstart, jshend, kshstart, kshend, lshstart, lshend]
*
* ao_loc[i+1] = ao_loc[i] + CINTcgto_spheric(i, bas) for i = 0..nbas
*
* Return [(ptr[ncomp,nao,nao] in C-contiguous) for ptr in vjk]
*/
void CVHFnr_direct_drv(int (*intor)(), void (*fdot)(), JKOperator **jkop,
double **dms, double **vjk, int n_dm, int ncomp,
int *shls_slice, int *ao_loc,
CINTOpt *cintopt, CVHFOpt *vhfopt,
int *atm, int natm, int *bas, int nbas, double *env)
{
IntorEnvs envs = {natm, nbas, atm, bas, env, shls_slice, ao_loc, NULL,
cintopt, ncomp};
int idm;
double *tile_dms[n_dm];
for (idm = 0; idm < n_dm; idm++) {
zero_out_vjk(vjk[idm], jkop[idm], shls_slice, ao_loc, ncomp);
tile_dms[idm] = allocate_and_reorder_dm(jkop[idm], dms[idm],
shls_slice, ao_loc);
}
const int di = GTOmax_shell_dim(ao_loc, shls_slice, 4);
const int cache_size = GTOmax_cache_size(intor, shls_slice, 4,
atm, natm, bas, nbas, env);
const int ish0 = shls_slice[0];
const int ish1 = shls_slice[1];
const int jsh0 = shls_slice[2];
const int jsh1 = shls_slice[3];
const int ksh0 = shls_slice[4];
const int ksh1 = shls_slice[5];
const int lsh0 = shls_slice[6];
const int lsh1 = shls_slice[7];
const int nish = ish1 - ish0;
const int njsh = jsh1 - jsh0;
const int nksh = ksh1 - ksh0;
const int nlsh = lsh1 - lsh0;
int *block_iloc = malloc(sizeof(int) * (nish + njsh + nksh + nlsh + 4));
int *block_jloc = block_iloc + nish + 1;
int *block_kloc = block_jloc + njsh + 1;
int *block_lloc = block_kloc + nksh + 1;
const size_t nblock_i = CVHFshls_block_partition(block_iloc, shls_slice+0, ao_loc);
const size_t nblock_j = CVHFshls_block_partition(block_jloc, shls_slice+2, ao_loc);
const size_t nblock_k = CVHFshls_block_partition(block_kloc, shls_slice+4, ao_loc);
const size_t nblock_l = CVHFshls_block_partition(block_lloc, shls_slice+6, ao_loc);
const size_t nblock_kl = nblock_k * nblock_l;
const size_t nblock_jkl = nblock_j * nblock_kl;
#pragma omp parallel
{
size_t i, j, k, l, r, blk_id;
JKArray *v_priv[n_dm];
for (i = 0; i < n_dm; i++) {
v_priv[i] = allocate_JKArray(jkop[i], shls_slice, ao_loc, ncomp);
}
double *buf = malloc(sizeof(double) * (di*di*di*di*ncomp + cache_size));
double *cache = buf + di*di*di*di*ncomp;
#pragma omp for nowait schedule(dynamic, 1)
for (blk_id = 0; blk_id < nblock_jkl; blk_id++) {
r = blk_id;
j = r / nblock_kl ; r = r % nblock_kl;
k = r / nblock_l ; r = r % nblock_l;
l = r;
for (i = 0; i < nblock_i; i++) {
(*fdot)(intor, jkop, v_priv, tile_dms, buf, cache, n_dm,
block_iloc+i, block_jloc+j, block_kloc+k, block_lloc+l,
vhfopt, &envs);
}
}
#pragma omp critical
{
for (i = 0; i < n_dm; i++) {
assemble_v(vjk[i], jkop[i], v_priv[i], shls_slice, ao_loc);
deallocate_JKArray(v_priv[i]);
}
}
free(buf);
}
for (idm = 0; idm < n_dm; idm++) {
free(tile_dms[idm]);
}
free(block_iloc);
}
|
two_step_v_p_strategy.h | //
// Project Name: KratosPFEMFluidDynamicsApplication $
// Last modified by: $Author: AFranci $
// Date: $Date: January 2016 $
// Revision: $Revision: 0.0 $
//
//
#ifndef KRATOS_TWO_STEP_V_P_STRATEGY_H
#define KRATOS_TWO_STEP_V_P_STRATEGY_H
#include "includes/define.h"
#include "includes/model_part.h"
#include "includes/deprecated_variables.h"
#include "includes/cfd_variables.h"
#include "utilities/openmp_utils.h"
#include "processes/process.h"
#include "solving_strategies/schemes/scheme.h"
#include "solving_strategies/strategies/solving_strategy.h"
#include "custom_utilities/mesher_utilities.hpp"
#include "custom_utilities/boundary_normals_calculation_utilities.hpp"
#include "solving_strategies/schemes/residualbased_incrementalupdate_static_scheme.h"
/* #include "solving_strategies/schemes/residualbased_incrementalupdate_static_scheme_slip.h" */
#include "solving_strategies/builder_and_solvers/residualbased_elimination_builder_and_solver.h"
#include "solving_strategies/builder_and_solvers/residualbased_elimination_builder_and_solver_componentwise.h"
#include "solving_strategies/builder_and_solvers/residualbased_block_builder_and_solver.h"
#include "custom_utilities/solver_settings.h"
#include "custom_strategies/strategies/gauss_seidel_linear_strategy.h"
#include "pfem_fluid_dynamics_application_variables.h"
#include <stdio.h>
#include <math.h>
namespace Kratos
{
///@addtogroup PFEMFluidDynamicsApplication
///@{
///@name Kratos Globals
///@{
///@}
///@name Type Definitions
///@{
///@}
///@name Enum's
///@{
///@}
///@name Functions
///@{
///@}
///@name Kratos Classes
///@{
template <class TSparseSpace,
class TDenseSpace,
class TLinearSolver>
class TwoStepVPStrategy : public SolvingStrategy<TSparseSpace, TDenseSpace, TLinearSolver>
{
public:
///@name Type Definitions
///@{
KRATOS_CLASS_POINTER_DEFINITION(TwoStepVPStrategy);
/// Counted pointer of TwoStepVPStrategy
//typedef boost::shared_ptr< TwoStepVPStrategy<TSparseSpace, TDenseSpace, TLinearSolver> > Pointer;
typedef SolvingStrategy<TSparseSpace, TDenseSpace, TLinearSolver> BaseType;
typedef typename BaseType::TDataType TDataType;
//typedef typename BaseType::DofSetType DofSetType;
typedef typename BaseType::DofsArrayType DofsArrayType;
typedef typename BaseType::TSystemMatrixType TSystemMatrixType;
typedef typename BaseType::TSystemVectorType TSystemVectorType;
typedef typename BaseType::LocalSystemVectorType LocalSystemVectorType;
typedef typename BaseType::LocalSystemMatrixType LocalSystemMatrixType;
typedef typename SolvingStrategy<TSparseSpace, TDenseSpace, TLinearSolver>::Pointer StrategyPointerType;
typedef TwoStepVPSolverSettings<TSparseSpace, TDenseSpace, TLinearSolver> SolverSettingsType;
///@}
///@name Life Cycle
///@{
TwoStepVPStrategy(ModelPart &rModelPart,
SolverSettingsType &rSolverConfig) : BaseType(rModelPart)
{
InitializeStrategy(rSolverConfig);
}
TwoStepVPStrategy(ModelPart &rModelPart,
/*SolverConfiguration<TSparseSpace, TDenseSpace, TLinearSolver>& rSolverConfig,*/
typename TLinearSolver::Pointer pVelocityLinearSolver,
typename TLinearSolver::Pointer pPressureLinearSolver,
bool ReformDofSet = true,
double VelTol = 0.0001,
double PresTol = 0.0001,
int MaxPressureIterations = 1, // Only for predictor-corrector
unsigned int TimeOrder = 2,
unsigned int DomainSize = 2) : BaseType(rModelPart), // Move Mesh flag, pass as input?
mVelocityTolerance(VelTol),
mPressureTolerance(PresTol),
mMaxPressureIter(MaxPressureIterations),
mDomainSize(DomainSize),
mTimeOrder(TimeOrder),
mReformDofSet(ReformDofSet)
{
KRATOS_TRY;
BaseType::SetEchoLevel(1);
// Check that input parameters are reasonable and sufficient.
this->Check();
bool CalculateNormDxFlag = true;
bool ReformDofAtEachIteration = false; // DofSet modifiaction is managed by the fractional step strategy, auxiliary strategies should not modify the DofSet directly.
// Additional Typedefs
//typedef typename Kratos::VariableComponent<Kratos::VectorComponentAdaptor<Kratos::array_1d<double, 3 > > > VarComponent;
typedef typename BuilderAndSolver<TSparseSpace, TDenseSpace, TLinearSolver>::Pointer BuilderSolverTypePointer;
typedef SolvingStrategy<TSparseSpace, TDenseSpace, TLinearSolver> BaseType;
//initializing fractional velocity solution step
typedef Scheme<TSparseSpace, TDenseSpace> SchemeType;
typename SchemeType::Pointer pScheme;
typename SchemeType::Pointer Temp = typename SchemeType::Pointer(new ResidualBasedIncrementalUpdateStaticScheme<TSparseSpace, TDenseSpace>());
pScheme.swap(Temp);
//CONSTRUCTION OF VELOCITY
BuilderSolverTypePointer vel_build = BuilderSolverTypePointer(new ResidualBasedEliminationBuilderAndSolver<TSparseSpace, TDenseSpace, TLinearSolver>(pVelocityLinearSolver));
/* BuilderSolverTypePointer vel_build = BuilderSolverTypePointer(new ResidualBasedBlockBuilderAndSolver<TSparseSpace, TDenseSpace, TLinearSolver > (pVelocityLinearSolver)); */
this->mpMomentumStrategy = typename BaseType::Pointer(new GaussSeidelLinearStrategy<TSparseSpace, TDenseSpace, TLinearSolver>(rModelPart, pScheme, pVelocityLinearSolver, vel_build, ReformDofAtEachIteration, CalculateNormDxFlag));
this->mpMomentumStrategy->SetEchoLevel(BaseType::GetEchoLevel());
vel_build->SetCalculateReactionsFlag(false);
/* BuilderSolverTypePointer pressure_build = BuilderSolverTypePointer(new ResidualBasedEliminationBuilderAndSolverComponentwise<TSparseSpace, TDenseSpace, TLinearSolver, Variable<double> >(pPressureLinearSolver, PRESSURE)); */
BuilderSolverTypePointer pressure_build = BuilderSolverTypePointer(new ResidualBasedBlockBuilderAndSolver<TSparseSpace, TDenseSpace, TLinearSolver>(pPressureLinearSolver));
this->mpPressureStrategy = typename BaseType::Pointer(new GaussSeidelLinearStrategy<TSparseSpace, TDenseSpace, TLinearSolver>(rModelPart, pScheme, pPressureLinearSolver, pressure_build, ReformDofAtEachIteration, CalculateNormDxFlag));
this->mpPressureStrategy->SetEchoLevel(BaseType::GetEchoLevel());
pressure_build->SetCalculateReactionsFlag(false);
KRATOS_CATCH("");
}
/// Destructor.
virtual ~TwoStepVPStrategy() {}
int Check() override
{
KRATOS_TRY;
// Check elements and conditions in the model part
int ierr = BaseType::Check();
if (ierr != 0)
return ierr;
if (DELTA_TIME.Key() == 0)
KRATOS_THROW_ERROR(std::runtime_error, "DELTA_TIME Key is 0. Check that the application was correctly registered.", "");
if (BDF_COEFFICIENTS.Key() == 0)
KRATOS_THROW_ERROR(std::runtime_error, "BDF_COEFFICIENTS Key is 0. Check that the application was correctly registered.", "");
ModelPart &rModelPart = BaseType::GetModelPart();
if (mTimeOrder == 2 && rModelPart.GetBufferSize() < 3)
KRATOS_THROW_ERROR(std::invalid_argument, "Buffer size too small for fractional step strategy (BDF2), needed 3, got ", rModelPart.GetBufferSize());
if (mTimeOrder == 1 && rModelPart.GetBufferSize() < 2)
KRATOS_THROW_ERROR(std::invalid_argument, "Buffer size too small for fractional step strategy (Backward Euler), needed 2, got ", rModelPart.GetBufferSize());
const ProcessInfo &rCurrentProcessInfo = rModelPart.GetProcessInfo();
for (ModelPart::ElementIterator itEl = rModelPart.ElementsBegin(); itEl != rModelPart.ElementsEnd(); ++itEl)
{
ierr = itEl->Check(rCurrentProcessInfo);
if (ierr != 0)
break;
}
/* for ( ModelPart::ConditionIterator itCond = rModelPart.ConditionsBegin(); itCond != rModelPart.ConditionsEnd(); ++itCond) */
/* { */
/* ierr = itCond->Check(rCurrentProcessInfo); */
/* if (ierr != 0) break; */
/* } */
return ierr;
KRATOS_CATCH("");
}
double Solve() override
{
// Initialize BDF2 coefficients
ModelPart &rModelPart = BaseType::GetModelPart();
this->SetTimeCoefficients(rModelPart.GetProcessInfo());
double NormDp = 0.0;
ProcessInfo &rCurrentProcessInfo = rModelPart.GetProcessInfo();
double currentTime = rCurrentProcessInfo[TIME];
double timeInterval = rCurrentProcessInfo[DELTA_TIME];
bool timeIntervalChanged = rCurrentProcessInfo[TIME_INTERVAL_CHANGED];
unsigned int maxNonLinearIterations = mMaxPressureIter;
KRATOS_INFO("TwoStepVPStrategy") << "\n Solve with two_step_vp strategy at t=" << currentTime << "s" << std::endl;
if (timeIntervalChanged == true && currentTime > 10 * timeInterval)
{
maxNonLinearIterations *= 2;
}
if (currentTime < 10 * timeInterval)
{
if (BaseType::GetEchoLevel() > 1)
std::cout << "within the first 10 time steps, I consider the given iteration number x3" << std::endl;
maxNonLinearIterations *= 3;
}
if (currentTime < 20 * timeInterval && currentTime >= 10 * timeInterval)
{
if (BaseType::GetEchoLevel() > 1)
std::cout << "within the second 10 time steps, I consider the given iteration number x2" << std::endl;
maxNonLinearIterations *= 2;
}
bool momentumConverged = true;
bool continuityConverged = false;
bool fixedTimeStep = false;
bool momentumAlreadyConverged = false;
bool continuityAlreadyConverged = false;
/* boost::timer solve_step_time; */
// Iterative solution for pressure
/* unsigned int timeStep = rCurrentProcessInfo[STEP]; */
/* if(timeStep==1){ */
/* unsigned int iter=0; */
/* continuityConverged = this->SolveContinuityIteration(iter,maxNonLinearIterations); */
/* }else if(timeStep==2){ */
/* unsigned int iter=0; */
/* momentumConverged = this->SolveMomentumIteration(iter,maxNonLinearIterations,fixedTimeStep); */
/* }else{ */
for (unsigned int it = 0; it < maxNonLinearIterations; ++it)
{
if (BaseType::GetEchoLevel() > 1 && rModelPart.GetCommunicator().MyPID() == 0)
std::cout << "----- > iteration: " << it << std::endl;
momentumConverged = this->SolveMomentumIteration(it, maxNonLinearIterations, fixedTimeStep);
this->UpdateTopology(rModelPart, BaseType::GetEchoLevel());
if ((momentumConverged == true || it == maxNonLinearIterations - 1) && momentumAlreadyConverged == false)
{
// std::ofstream myfile;
// myfile.open ("momentumConvergedIteration.txt",std::ios::app);
// myfile << currentTime << "\t" << it << "\n";
// myfile.close();
momentumAlreadyConverged = true;
}
if ((continuityConverged == true || it == maxNonLinearIterations - 1) && continuityAlreadyConverged == false)
{
// std::ofstream myfile;
// myfile.open ("continuityConvergedIteration.txt",std::ios::app);
// myfile << currentTime << "\t" << it << "\n";
// myfile.close();
continuityAlreadyConverged = true;
}
if (fixedTimeStep == false)
{
continuityConverged = this->SolveContinuityIteration(it, maxNonLinearIterations);
}
if (it == maxNonLinearIterations - 1 || ((continuityConverged && momentumConverged) && it > 2))
{
//this->ComputeErrorL2Norm();
//this->ComputeErrorL2NormCasePoiseuille();
this->UpdateStressStrain();
// std::ofstream myfile;
// myfile.open ("maxConvergedIteration.txt",std::ios::app);
// myfile << currentTime << "\t" << it << "\n";
// myfile.close();
}
if ((continuityConverged && momentumConverged) && it > 2)
{
rCurrentProcessInfo.SetValue(BAD_VELOCITY_CONVERGENCE, false);
rCurrentProcessInfo.SetValue(BAD_PRESSURE_CONVERGENCE, false);
KRATOS_INFO("TwoStepVPStrategy") << "V-P strategy converged in " << it + 1 << " iterations." << std::endl;
break;
}
if (fixedTimeStep == true)
{
break;
}
}
/* } */
if (!continuityConverged && !momentumConverged && BaseType::GetEchoLevel() > 0 && rModelPart.GetCommunicator().MyPID() == 0)
std::cout << "Convergence tolerance not reached." << std::endl;
/* std::cout << "solve_step_time : " << solve_step_time.elapsed() << std::endl; */
if (mReformDofSet)
this->Clear();
return NormDp;
}
void FinalizeSolutionStep() override
{
/* this->UpdateStressStrain(); */
}
void InitializeSolutionStep() override
{
}
void UpdateTopology(ModelPart &rModelPart, unsigned int echoLevel)
{
KRATOS_TRY;
this->CalculateDisplacementsAndPorosity();
BaseType::MoveMesh();
/* BoundaryNormalsCalculationUtilities BoundaryComputation; */
/* BoundaryComputation.CalculateWeightedBoundaryNormals(rModelPart, echoLevel); */
KRATOS_CATCH("");
}
void CalculatePressureVelocity()
{
ModelPart &rModelPart = BaseType::GetModelPart();
ProcessInfo &rCurrentProcessInfo = rModelPart.GetProcessInfo();
const double timeInterval = rCurrentProcessInfo[DELTA_TIME];
unsigned int timeStep = rCurrentProcessInfo[STEP];
for (ModelPart::NodeIterator i = rModelPart.NodesBegin();
i != rModelPart.NodesEnd(); ++i)
{
if (timeStep == 1)
{
(i)->FastGetSolutionStepValue(PRESSURE_VELOCITY, 0) = 0;
(i)->FastGetSolutionStepValue(PRESSURE_VELOCITY, 1) = 0;
}
else
{
double &CurrentPressure = (i)->FastGetSolutionStepValue(PRESSURE, 0);
double &PreviousPressure = (i)->FastGetSolutionStepValue(PRESSURE, 1);
double &CurrentPressureVelocity = (i)->FastGetSolutionStepValue(PRESSURE_VELOCITY, 0);
CurrentPressureVelocity = (CurrentPressure - PreviousPressure) / timeInterval;
}
}
}
void CalculatePressureAcceleration()
{
ModelPart &rModelPart = BaseType::GetModelPart();
ProcessInfo &rCurrentProcessInfo = rModelPart.GetProcessInfo();
const double timeInterval = rCurrentProcessInfo[DELTA_TIME];
unsigned int timeStep = rCurrentProcessInfo[STEP];
for (ModelPart::NodeIterator i = rModelPart.NodesBegin();
i != rModelPart.NodesEnd(); ++i)
{
if (timeStep == 1)
{
(i)->FastGetSolutionStepValue(PRESSURE_ACCELERATION, 0) = 0;
(i)->FastGetSolutionStepValue(PRESSURE_ACCELERATION, 1) = 0;
}
else
{
double &CurrentPressureVelocity = (i)->FastGetSolutionStepValue(PRESSURE_VELOCITY, 0);
double &PreviousPressureVelocity = (i)->FastGetSolutionStepValue(PRESSURE_VELOCITY, 1);
double &CurrentPressureAcceleration = (i)->FastGetSolutionStepValue(PRESSURE_ACCELERATION, 0);
CurrentPressureAcceleration = (CurrentPressureVelocity - PreviousPressureVelocity) / timeInterval;
}
}
}
virtual void CalculateTemporalVariables()
{
ModelPart &rModelPart = BaseType::GetModelPart();
ProcessInfo &rCurrentProcessInfo = rModelPart.GetProcessInfo();
Vector &BDFcoeffs = rCurrentProcessInfo[BDF_COEFFICIENTS];
for (ModelPart::NodeIterator i = rModelPart.NodesBegin();
i != rModelPart.NodesEnd(); ++i)
{
array_1d<double, 3> &CurrentVelocity = (i)->FastGetSolutionStepValue(VELOCITY, 0);
array_1d<double, 3> &PreviousVelocity = (i)->FastGetSolutionStepValue(VELOCITY, 1);
array_1d<double, 3> &CurrentAcceleration = (i)->FastGetSolutionStepValue(ACCELERATION, 0);
array_1d<double, 3> &PreviousAcceleration = (i)->FastGetSolutionStepValue(ACCELERATION, 1);
/* if((i)->IsNot(ISOLATED) || (i)->Is(SOLID)){ */
if ((i)->IsNot(ISOLATED) && (i)->IsNot(RIGID))
{
UpdateAccelerations(CurrentAcceleration, CurrentVelocity, PreviousAcceleration, PreviousVelocity, BDFcoeffs);
}
else if ((i)->Is(RIGID))
{
array_1d<double, 3> Zeros(3, 0.0);
(i)->FastGetSolutionStepValue(ACCELERATION, 0) = Zeros;
(i)->FastGetSolutionStepValue(ACCELERATION, 1) = Zeros;
}
else
{
(i)->FastGetSolutionStepValue(PRESSURE, 0) = 0.0;
(i)->FastGetSolutionStepValue(PRESSURE, 1) = 0.0;
(i)->FastGetSolutionStepValue(PRESSURE_VELOCITY, 0) = 0.0;
(i)->FastGetSolutionStepValue(PRESSURE_VELOCITY, 1) = 0.0;
(i)->FastGetSolutionStepValue(PRESSURE_ACCELERATION, 0) = 0.0;
(i)->FastGetSolutionStepValue(PRESSURE_ACCELERATION, 1) = 0.0;
if ((i)->SolutionStepsDataHas(VOLUME_ACCELERATION))
{
array_1d<double, 3> &VolumeAcceleration = (i)->FastGetSolutionStepValue(VOLUME_ACCELERATION);
(i)->FastGetSolutionStepValue(ACCELERATION, 0) = VolumeAcceleration;
(i)->FastGetSolutionStepValue(VELOCITY, 0) += VolumeAcceleration * rCurrentProcessInfo[DELTA_TIME];
}
}
const double timeInterval = rCurrentProcessInfo[DELTA_TIME];
unsigned int timeStep = rCurrentProcessInfo[STEP];
if (timeStep == 1)
{
(i)->FastGetSolutionStepValue(PRESSURE_VELOCITY, 0) = 0;
(i)->FastGetSolutionStepValue(PRESSURE_VELOCITY, 1) = 0;
(i)->FastGetSolutionStepValue(PRESSURE_ACCELERATION, 0) = 0;
(i)->FastGetSolutionStepValue(PRESSURE_ACCELERATION, 1) = 0;
}
else
{
double &CurrentPressure = (i)->FastGetSolutionStepValue(PRESSURE, 0);
double &PreviousPressure = (i)->FastGetSolutionStepValue(PRESSURE, 1);
double &CurrentPressureVelocity = (i)->FastGetSolutionStepValue(PRESSURE_VELOCITY, 0);
double &CurrentPressureAcceleration = (i)->FastGetSolutionStepValue(PRESSURE_ACCELERATION, 0);
CurrentPressureAcceleration = CurrentPressureVelocity / timeInterval;
CurrentPressureVelocity = (CurrentPressure - PreviousPressure) / timeInterval;
CurrentPressureAcceleration += -CurrentPressureVelocity / timeInterval;
}
}
}
void CalculateAccelerations()
{
ModelPart &rModelPart = BaseType::GetModelPart();
ProcessInfo &rCurrentProcessInfo = rModelPart.GetProcessInfo();
Vector &BDFcoeffs = rCurrentProcessInfo[BDF_COEFFICIENTS];
for (ModelPart::NodeIterator i = rModelPart.NodesBegin();
i != rModelPart.NodesEnd(); ++i)
{
array_1d<double, 3> &CurrentVelocity = (i)->FastGetSolutionStepValue(VELOCITY, 0);
array_1d<double, 3> &PreviousVelocity = (i)->FastGetSolutionStepValue(VELOCITY, 1);
array_1d<double, 3> &CurrentAcceleration = (i)->FastGetSolutionStepValue(ACCELERATION, 0);
array_1d<double, 3> &PreviousAcceleration = (i)->FastGetSolutionStepValue(ACCELERATION, 1);
/* if((i)->IsNot(ISOLATED) || (i)->Is(SOLID)){ */
if ((i)->IsNot(ISOLATED) && (i)->IsNot(RIGID))
{
UpdateAccelerations(CurrentAcceleration, CurrentVelocity, PreviousAcceleration, PreviousVelocity, BDFcoeffs);
}
else if ((i)->Is(RIGID))
{
array_1d<double, 3> Zeros(3, 0.0);
(i)->FastGetSolutionStepValue(ACCELERATION, 0) = Zeros;
(i)->FastGetSolutionStepValue(ACCELERATION, 1) = Zeros;
}
else
{
(i)->FastGetSolutionStepValue(PRESSURE, 0) = 0.0;
(i)->FastGetSolutionStepValue(PRESSURE, 1) = 0.0;
(i)->FastGetSolutionStepValue(PRESSURE_VELOCITY, 0) = 0.0;
(i)->FastGetSolutionStepValue(PRESSURE_VELOCITY, 1) = 0.0;
(i)->FastGetSolutionStepValue(PRESSURE_ACCELERATION, 0) = 0.0;
(i)->FastGetSolutionStepValue(PRESSURE_ACCELERATION, 1) = 0.0;
if ((i)->SolutionStepsDataHas(VOLUME_ACCELERATION))
{
array_1d<double, 3> &VolumeAcceleration = (i)->FastGetSolutionStepValue(VOLUME_ACCELERATION);
(i)->FastGetSolutionStepValue(ACCELERATION, 0) = VolumeAcceleration;
(i)->FastGetSolutionStepValue(VELOCITY, 0) += VolumeAcceleration * rCurrentProcessInfo[DELTA_TIME];
}
}
}
}
inline void UpdateAccelerations(array_1d<double, 3> &CurrentAcceleration,
const array_1d<double, 3> &CurrentVelocity,
array_1d<double, 3> &PreviousAcceleration,
const array_1d<double, 3> &PreviousVelocity,
Vector &BDFcoeffs)
{
/* noalias(PreviousAcceleration)=CurrentAcceleration; */
noalias(CurrentAcceleration) = -BDFcoeffs[1] * (CurrentVelocity - PreviousVelocity) - PreviousAcceleration;
// std::cout<<"rBDFCoeffs[0] is "<<rBDFCoeffs[0]<<std::endl;//3/(2*delta_t)
// std::cout<<"rBDFCoeffs[1] is "<<rBDFCoeffs[1]<<std::endl;//-2/(delta_t)
// std::cout<<"rBDFCoeffs[2] is "<<rBDFCoeffs[2]<<std::endl;//1/(2*delta_t)
}
virtual void CalculateDisplacementsAndPorosity()
{
ModelPart &rModelPart = BaseType::GetModelPart();
ProcessInfo &rCurrentProcessInfo = rModelPart.GetProcessInfo();
const double TimeStep = rCurrentProcessInfo[DELTA_TIME];
for (ModelPart::NodeIterator i = rModelPart.NodesBegin();
i != rModelPart.NodesEnd(); ++i)
{
array_1d<double, 3> &CurrentVelocity = (i)->FastGetSolutionStepValue(VELOCITY, 0);
array_1d<double, 3> &PreviousVelocity = (i)->FastGetSolutionStepValue(VELOCITY, 1);
array_1d<double, 3> &CurrentDisplacement = (i)->FastGetSolutionStepValue(DISPLACEMENT, 0);
array_1d<double, 3> &PreviousDisplacement = (i)->FastGetSolutionStepValue(DISPLACEMENT, 1);
/* if( i->IsFixed(DISPLACEMENT_X) == false ) */
CurrentDisplacement[0] = 0.5 * TimeStep * (CurrentVelocity[0] + PreviousVelocity[0]) + PreviousDisplacement[0];
/* if( i->IsFixed(DISPLACEMENT_Y) == false ) */
CurrentDisplacement[1] = 0.5 * TimeStep * (CurrentVelocity[1] + PreviousVelocity[1]) + PreviousDisplacement[1];
/* if( i->IsFixed(DISPLACEMENT_Z) == false ) */
CurrentDisplacement[2] = 0.5 * TimeStep * (CurrentVelocity[2] + PreviousVelocity[2]) + PreviousDisplacement[2];
// currentFluidFractionRate = (currentFluidFraction - previousFluidFraction)/TimeStep;
}
}
void UpdateStressStrain()
{
ModelPart &rModelPart = BaseType::GetModelPart();
ProcessInfo &rCurrentProcessInfo = rModelPart.GetProcessInfo();
#pragma omp parallel
{
ModelPart::ElementIterator ElemBegin;
ModelPart::ElementIterator ElemEnd;
OpenMPUtils::PartitionedIterators(rModelPart.Elements(), ElemBegin, ElemEnd);
for (ModelPart::ElementIterator itElem = ElemBegin; itElem != ElemEnd; ++itElem)
{
/* itElem-> InitializeElementStrainStressState(); */
itElem->InitializeSolutionStep(rCurrentProcessInfo);
}
}
/* this->CalculateAccelerations(); */
/* this->CalculatePressureVelocity(); */
/* this->CalculatePressureAcceleration(); */
this->CalculateTemporalVariables();
}
void Clear() override
{
mpMomentumStrategy->Clear();
mpPressureStrategy->Clear();
}
///@}
///@name Access
///@{
void SetEchoLevel(int Level) override
{
BaseType::SetEchoLevel(Level);
int StrategyLevel = Level > 0 ? Level - 1 : 0;
mpMomentumStrategy->SetEchoLevel(StrategyLevel);
mpPressureStrategy->SetEchoLevel(StrategyLevel);
}
///@}
///@name Inquiry
///@{
///@}
///@name Input and output
///@{
/// Turn back information as a string.
std::string Info() const override
{
std::stringstream buffer;
buffer << "TwoStepVPStrategy";
return buffer.str();
}
/// Print information about this object.
void PrintInfo(std::ostream &rOStream) const override
{
rOStream << "TwoStepVPStrategy";
}
/// Print object's data.
void PrintData(std::ostream &rOStream) const override
{
}
///@}
///@name Friends
///@{
///@}
protected:
///@name Protected Life Cycle
///@{
///@}
///@name Protected static Member Variables
///@{
///@}
///@name Protected member Variables
///@{
///@}
///@name Protected Operators
///@{
///@}
///@name Protected Operations
///@{
/// Calculate the coefficients for time iteration.
/**
* @param rCurrentProcessInfo ProcessInfo instance from the fluid ModelPart. Must contain DELTA_TIME and BDF_COEFFICIENTS variables.
*/
void SetTimeCoefficients(ProcessInfo &rCurrentProcessInfo)
{
KRATOS_TRY;
if (mTimeOrder == 2)
{
//calculate the BDF coefficients
double Dt = rCurrentProcessInfo[DELTA_TIME];
double OldDt = rCurrentProcessInfo.GetPreviousTimeStepInfo(1)[DELTA_TIME];
double Rho = OldDt / Dt;
double TimeCoeff = 1.0 / (Dt * Rho * Rho + Dt * Rho);
Vector &BDFcoeffs = rCurrentProcessInfo[BDF_COEFFICIENTS];
BDFcoeffs.resize(3, false);
BDFcoeffs[0] = TimeCoeff * (Rho * Rho + 2.0 * Rho); //coefficient for step n+1 (3/2Dt if Dt is constant)
BDFcoeffs[1] = -TimeCoeff * (Rho * Rho + 2.0 * Rho + 1.0); //coefficient for step n (-4/2Dt if Dt is constant)
BDFcoeffs[2] = TimeCoeff; //coefficient for step n-1 (1/2Dt if Dt is constant)
}
else if (mTimeOrder == 1)
{
double Dt = rCurrentProcessInfo[DELTA_TIME];
double TimeCoeff = 1.0 / Dt;
Vector &BDFcoeffs = rCurrentProcessInfo[BDF_COEFFICIENTS];
BDFcoeffs.resize(2, false);
BDFcoeffs[0] = TimeCoeff; //coefficient for step n+1 (1/Dt)
BDFcoeffs[1] = -TimeCoeff; //coefficient for step n (-1/Dt)
}
KRATOS_CATCH("");
}
bool SolveMomentumIteration(unsigned int it, unsigned int maxIt, bool &fixedTimeStep)
{
ModelPart &rModelPart = BaseType::GetModelPart();
int Rank = rModelPart.GetCommunicator().MyPID();
bool ConvergedMomentum = false;
double NormDv = 0;
fixedTimeStep = false;
// build momentum system and solve for fractional step velocity increment
rModelPart.GetProcessInfo().SetValue(FRACTIONAL_STEP, 1);
/* std::cout<<"---- m o m e n t u m e q u a t i o n s ----"<<std::endl; */
if (it == 0)
{
mpMomentumStrategy->InitializeSolutionStep();
}
/* else{ */
/* NormDv = mpMomentumStrategy->Solve(); */
/* } */
NormDv = mpMomentumStrategy->Solve();
if (BaseType::GetEchoLevel() > 1 && Rank == 0)
std::cout << "-------------- s o l v e d ! ------------------" << std::endl;
double DvErrorNorm = 0;
ConvergedMomentum = this->CheckVelocityConvergence(NormDv, DvErrorNorm);
unsigned int iterationForCheck = 3;
KRATOS_INFO("TwoStepVPStrategy") << "iteration(" << it << ") Velocity error: " << DvErrorNorm << " velTol: " << mVelocityTolerance << std::endl;
// Check convergence
if (it == maxIt - 1)
{
KRATOS_INFO("TwoStepVPStrategy") << "iteration(" << it << ") Final Velocity error: " << DvErrorNorm << " velTol: " << mVelocityTolerance << std::endl;
fixedTimeStep = this->FixTimeStepMomentum(DvErrorNorm);
}
else if (it > iterationForCheck)
{
fixedTimeStep = this->CheckMomentumConvergence(DvErrorNorm);
}
// ProcessInfo& rCurrentProcessInfo = rModelPart.GetProcessInfo();
// double currentTime = rCurrentProcessInfo[TIME];
// double tolerance=0.0000000001;
// if(currentTime>(0.25-tolerance) && currentTime<(0.25+tolerance)){
// std::ofstream myfile;
// myfile.open ("velocityConvergenceAt025s.txt",std::ios::app);
// myfile << it << "\t" << DvErrorNorm << "\n";
// myfile.close();
// }
// else if(currentTime>(0.5-tolerance) && currentTime<(0.5+tolerance)){
// std::ofstream myfile;
// myfile.open ("velocityConvergenceAt05s.txt",std::ios::app);
// myfile << it << "\t" << DvErrorNorm << "\n";
// myfile.close();
// }
// else if(currentTime>(0.75-tolerance) && currentTime<(0.75+tolerance)){
// std::ofstream myfile;
// myfile.open ("velocityConvergenceAt075s.txt",std::ios::app);
// myfile << it << "\t" << DvErrorNorm << "\n";
// myfile.close();
// }
// else if(currentTime>(1.0-tolerance) && currentTime<(1.0+tolerance)){
// std::ofstream myfile;
// myfile.open ("velocityConvergenceAt100s.txt",std::ios::app);
// myfile << it << "\t" << DvErrorNorm << "\n";
// myfile.close();
// }
if (!ConvergedMomentum && BaseType::GetEchoLevel() > 0 && Rank == 0)
std::cout << "Momentum equations did not reach the convergence tolerance." << std::endl;
return ConvergedMomentum;
}
bool SolveContinuityIteration(unsigned int it, unsigned int maxIt)
{
ModelPart &rModelPart = BaseType::GetModelPart();
int Rank = rModelPart.GetCommunicator().MyPID();
bool ConvergedContinuity = false;
double NormDp = 0;
// 2. Pressure solution
rModelPart.GetProcessInfo().SetValue(FRACTIONAL_STEP, 5);
/* std::cout<<" ---- c o n t i n u i t y e q u a t i o n ----"<<std::endl; */
if (it == 0)
{
mpPressureStrategy->InitializeSolutionStep();
}
/* else{ */
/* NormDp = mpPressureStrategy->Solve(); */
/* } */
NormDp = mpPressureStrategy->Solve();
if (BaseType::GetEchoLevel() > 0 && Rank == 0)
std::cout << "The norm of pressure is: " << NormDp << std::endl;
double DpErrorNorm = 0;
ConvergedContinuity = this->CheckPressureConvergence(NormDp, DpErrorNorm);
KRATOS_INFO("TwoStepVPStrategy") << " iteration(" << it << ") Pressure error: " << DpErrorNorm << " presTol: " << mPressureTolerance << std::endl;
// Check convergence
if (it == maxIt - 1)
{
KRATOS_INFO("TwoStepVPStrategy") << " iteration(" << it << ") Final Pressure error: " << DpErrorNorm << " presTol: " << mPressureTolerance << std::endl;
ConvergedContinuity = this->FixTimeStepContinuity(DpErrorNorm);
}
// ProcessInfo& rCurrentProcessInfo = rModelPart.GetProcessInfo();
// double currentTime = rCurrentProcessInfo[TIME];
// double tolerance=0.0000000001;
// if(currentTime>(0.25-tolerance) && currentTime<(0.25+tolerance)){
// std::ofstream myfile;
// myfile.open ("pressureConvergenceAt025s.txt",std::ios::app);
// myfile << it << "\t" << DpErrorNorm << "\n";
// myfile.close();
// }
// else if(currentTime>(0.5-tolerance) && currentTime<(0.5+tolerance)){
// std::ofstream myfile;
// myfile.open ("pressureConvergenceAt05s.txt",std::ios::app);
// myfile << it << "\t" << DpErrorNorm << "\n";
// myfile.close();
// }
// else if(currentTime>(0.75-tolerance) && currentTime<(0.75+tolerance)){
// std::ofstream myfile;
// myfile.open ("pressureConvergenceAt075s.txt",std::ios::app);
// myfile << it << "\t" << DpErrorNorm << "\n";
// myfile.close();
// }
// else if(currentTime>(1.0-tolerance) && currentTime<(1.0+tolerance)){
// std::ofstream myfile;
// myfile.open ("pressureConvergenceAt100s.txt",std::ios::app);
// myfile << it << "\t" << DpErrorNorm << "\n";
// myfile.close();
// }
if (!ConvergedContinuity && BaseType::GetEchoLevel() > 0 && Rank == 0)
std::cout << "Continuity equation did not reach the convergence tolerance." << std::endl;
return ConvergedContinuity;
}
void ComputeErrorL2Norm()
{
ModelPart &rModelPart = BaseType::GetModelPart();
ProcessInfo &rCurrentProcessInfo = rModelPart.GetProcessInfo();
const double currentTime = rCurrentProcessInfo[TIME];
const unsigned int dimension = rModelPart.ElementsBegin()->GetGeometry().WorkingSpaceDimension();
long double sumErrorL2Velocity = 0;
long double sumErrorL2VelocityX = 0;
long double sumErrorL2VelocityY = 0;
long double sumErrorL2Pressure = 0;
long double sumErrorL2TauXX = 0;
long double sumErrorL2TauYY = 0;
long double sumErrorL2TauXY = 0;
#pragma omp parallel
{
ModelPart::ElementIterator ElemBegin;
ModelPart::ElementIterator ElemEnd;
OpenMPUtils::PartitionedIterators(rModelPart.Elements(), ElemBegin, ElemEnd);
for (ModelPart::ElementIterator itElem = ElemBegin; itElem != ElemEnd; ++itElem)
{
Element::GeometryType &geometry = itElem->GetGeometry();
long double nodalArea = 0;
if (dimension == 2)
{
nodalArea = geometry.Area() / 3.0;
}
else if (dimension == 3)
{
nodalArea = geometry.Volume() * 0.25;
}
long double bariPosX = 0;
long double bariPosY = 0;
long double eleErrorL2Velocity = 0;
long double eleErrorL2VelocityX = 0;
long double eleErrorL2VelocityY = 0;
long double eleErrorL2Pressure = 0;
//ShapeFunctionDerivativesArrayType DN_DX;
Matrix NContainer;
NContainer = geometry.ShapeFunctionsValues(GeometryData::GI_GAUSS_1);
//this->CalculateGeometryData(DN_DX,NContainer,GaussWeights);
const Vector &N = row(NContainer, 0);
// itElem->EvaluateInPoint(elementalPressure,PRESSURE,N);
const unsigned int NumNodes = geometry.size();
double elementalPressure = N[0] * geometry(0)->FastGetSolutionStepValue(PRESSURE);
double elementalVelocityX = N[0] * geometry(0)->FastGetSolutionStepValue(VELOCITY_X);
double elementalVelocityY = N[0] * geometry(0)->FastGetSolutionStepValue(VELOCITY_Y);
;
for (unsigned int i = 1; i < NumNodes; i++)
{
elementalPressure += N[i] * geometry(i)->FastGetSolutionStepValue(PRESSURE);
elementalVelocityX += N[i] * geometry(i)->FastGetSolutionStepValue(VELOCITY_X);
elementalVelocityY += N[i] * geometry(i)->FastGetSolutionStepValue(VELOCITY_Y);
}
for (unsigned int i = 0; i < geometry.size(); i++)
{
// index = i*dimension;
const long double nodalPosX = geometry(i)->X();
const long double nodalPosY = geometry(i)->Y();
// const long double velX = geometry(i)->FastGetSolutionStepValue(VELOCITY_X);
// const long double velY = geometry(i)->FastGetSolutionStepValue(VELOCITY_Y);
// const long double pressure = geometry(i)->FastGetSolutionStepValue(PRESSURE);
// long double expectedVelocityX = pow(posX,2) * (1.0-posX)*(1.0-posX) * ( 2.0*posY - 6.0*pow(posY,2) + 4.0*pow(posY,3) );
// long double expectedVelocityY = -pow(posY,2) * (1.0-posY)*(1.0-posY) * ( 2.0*posX - 6.0*pow(posX,2) + 4.0*pow(posX,3) );
// long double expectedPressure = -posX * (1.0-posX);
// long double nodalErrorVelocityX = velX - expectedVelocityX;
// long double nodalErrorVelocityY = velY - expectedVelocityY;
// long double nodalErrorPressure = pressure - expectedPressure;
// sumErrorL2Velocity += (pow(nodalErrorVelocityX,2) + pow(nodalErrorVelocityY,2)) * nodalArea;
// sumErrorL2VelocityX += pow(nodalErrorVelocityX,2) * nodalArea;
// sumErrorL2VelocityY += pow(nodalErrorVelocityY,2) * nodalArea;
// sumErrorL2Pressure += pow(nodalErrorPressure,2) * nodalArea;
// eleErrorL2Velocity += pow(nodalErrorVelocityX,2) + pow(nodalErrorVelocityY,2);
// eleErrorL2VelocityX += pow(nodalErrorVelocityX,2);
// eleErrorL2VelocityY += pow(nodalErrorVelocityY,2);
// eleErrorL2Pressure += pow(nodalErrorPressure,2);
bariPosX += nodalPosX / 3.0;
bariPosY += nodalPosY / 3.0;
}
const long double posX = bariPosX;
const long double posY = bariPosY;
long double expectedVelocityX = pow(posX, 2) * (1.0 - posX) * (1.0 - posX) * (2.0 * posY - 6.0 * pow(posY, 2) + 4.0 * pow(posY, 3));
long double expectedVelocityY = -pow(posY, 2) * (1.0 - posY) * (1.0 - posY) * (2.0 * posX - 6.0 * pow(posX, 2) + 4.0 * pow(posX, 3));
long double expectedPressure = -posX * (1.0 - posX);
eleErrorL2VelocityX = elementalVelocityX - expectedVelocityX;
eleErrorL2VelocityY = elementalVelocityY - expectedVelocityY;
eleErrorL2Pressure = elementalPressure - expectedPressure;
sumErrorL2VelocityX += pow(eleErrorL2VelocityX, 2) * geometry.Area();
sumErrorL2VelocityY += pow(eleErrorL2VelocityY, 2) * geometry.Area();
sumErrorL2Pressure += pow(eleErrorL2Pressure, 2) * geometry.Area();
// sumErrorL2Velocity += eleErrorL2Velocity * geometry.Area();
// sumErrorL2VelocityX += eleErrorL2VelocityX * geometry.Area();
// sumErrorL2VelocityY += eleErrorL2VelocityY * geometry.Area();
// sumErrorL2Pressure += eleErrorL2Pressure * geometry.Area();
const long double tauXX = 0; // itElem->GetValue(ELEMENTAL_DEVIATORIC_STRESS_XX);
const long double tauYY = 0; // itElem->GetValue(ELEMENTAL_DEVIATORIC_STRESS_YY);
const long double tauXY = 0; // itElem->GetValue(ELEMENTAL_DEVIATORIC_STRESS_XY);
long double expectedTauXX = 2.0 * (-4.0 * (1.0 - bariPosX) * bariPosX * (-1.0 + 2.0 * bariPosX) * bariPosY * (1.0 - 3.0 * bariPosY + 2.0 * pow(bariPosY, 2)));
long double expectedTauYY = 2.0 * (4.0 * bariPosX * (1.0 - 3.0 * bariPosX + 2.0 * pow(bariPosX, 2)) * (1.0 - bariPosY) * bariPosY * (-1.0 + 2.0 * bariPosY));
long double expectedTauXY = (2.0 * (1.0 - 6.0 * bariPosY + 6.0 * pow(bariPosY, 2)) * (1.0 - bariPosX) * (1.0 - bariPosX) * pow(bariPosX, 2) - 2.0 * (1.0 - 6.0 * bariPosX + 6.0 * pow(bariPosX, 2)) * (1.0 - bariPosY) * (1 - bariPosY) * pow(bariPosY, 2));
long double nodalErrorTauXX = tauXX - expectedTauXX;
long double nodalErrorTauYY = tauYY - expectedTauYY;
long double nodalErrorTauXY = tauXY - expectedTauXY;
// std::cout<<"tauXX "<<tauXX<<" expectedtauXX "<<expectedTauXX<<" nodalErrorTauXX "<<nodalErrorTauXX<<std::endl;
// std::cout<<"tauyy "<<tauYY<<" expectedtauYY "<<expectedTauYY<<" nodalErrorTauYY "<<nodalErrorTauYY<<std::endl;
// std::cout<<"tauXY "<<tauXY<<" expectedtauXY "<<expectedTauXY<<" nodalErrorTauXY "<<nodalErrorTauXY<<std::endl;
sumErrorL2TauXX += pow(nodalErrorTauXX, 2) * geometry.Area();
sumErrorL2TauYY += pow(nodalErrorTauYY, 2) * geometry.Area();
sumErrorL2TauXY += pow(nodalErrorTauXY, 2) * geometry.Area();
}
}
// long double errorL2Velocity = sumErrorL2Velocity;
// long double errorL2VelocityX = sumErrorL2VelocityX;
// long double errorL2VelocityY = sumErrorL2VelocityY;
// long double errorL2Pressure = sumErrorL2Pressure;
long double errorL2Velocity = sqrt(sumErrorL2Velocity);
long double errorL2VelocityX = sqrt(sumErrorL2VelocityX);
long double errorL2VelocityY = sqrt(sumErrorL2VelocityY);
long double errorL2Pressure = sqrt(sumErrorL2Pressure);
long double errorL2TauXX = sqrt(sumErrorL2TauXX);
long double errorL2TauYY = sqrt(sumErrorL2TauYY);
long double errorL2TauXY = sqrt(sumErrorL2TauXY);
std::ofstream myfileVelocity;
myfileVelocity.open("errorL2VelocityFile.txt", std::ios::app);
myfileVelocity << currentTime << "\t" << errorL2Velocity << "\n";
myfileVelocity.close();
std::ofstream myfileVelocityX;
myfileVelocityX.open("errorL2VelocityXFile.txt", std::ios::app);
myfileVelocityX << currentTime << "\t" << errorL2VelocityX << "\n";
myfileVelocityX.close();
std::ofstream myfileVelocityY;
myfileVelocityY.open("errorL2VelocityYFile.txt", std::ios::app);
myfileVelocityY << currentTime << "\t" << errorL2VelocityY << "\n";
myfileVelocityY.close();
std::ofstream myfilePressure;
myfilePressure.open("errorL2PressureFile.txt", std::ios::app);
myfilePressure << currentTime << "\t" << errorL2Pressure << "\n";
myfilePressure.close();
std::ofstream myfileTauXX;
myfileTauXX.open("errorL2TauXXFile.txt", std::ios::app);
myfileTauXX << currentTime << "\t" << errorL2TauXX << "\n";
myfileTauXX.close();
std::ofstream myfileTauYY;
myfileTauYY.open("errorL2TauYYFile.txt", std::ios::app);
myfileTauYY << currentTime << "\t" << errorL2TauYY << "\n";
myfileTauYY.close();
std::ofstream myfileTauXY;
myfileTauXY.open("errorL2TauXYFile.txt", std::ios::app);
myfileTauXY << currentTime << "\t" << errorL2TauXY << "\n";
myfileTauXY.close();
}
void ComputeErrorL2NormCasePoiseuille()
{
ModelPart &rModelPart = BaseType::GetModelPart();
ProcessInfo &rCurrentProcessInfo = rModelPart.GetProcessInfo();
const double currentTime = rCurrentProcessInfo[TIME];
const unsigned int dimension = rModelPart.ElementsBegin()->GetGeometry().WorkingSpaceDimension();
double sumErrorL2VelocityTheta = 0;
double sumErrorL2TauTheta = 0;
double r_in = 0.2;
double R_out = 0.5;
double kappa = r_in / R_out;
double omega = 0.5;
double viscosity = 100.0;
#pragma omp parallel
{
ModelPart::ElementIterator ElemBegin;
ModelPart::ElementIterator ElemEnd;
OpenMPUtils::PartitionedIterators(rModelPart.Elements(), ElemBegin, ElemEnd);
for (ModelPart::ElementIterator itElem = ElemBegin; itElem != ElemEnd; ++itElem)
{
Element::GeometryType &geometry = itElem->GetGeometry();
long double nodalArea = 0;
if (dimension == 2)
{
nodalArea = geometry.Area() / 3.0;
}
else if (dimension == 3)
{
nodalArea = geometry.Volume() * 0.25;
}
long double bariPosX = 0;
long double bariPosY = 0;
long double eleErrorL2Velocity = 0;
long double eleErrorL2VelocityX = 0;
long double eleErrorL2VelocityY = 0;
long double eleErrorL2Pressure = 0;
//ShapeFunctionDerivativesArrayType DN_DX;
Matrix NContainer;
NContainer = geometry.ShapeFunctionsValues(GeometryData::GI_GAUSS_1);
//this->CalculateGeometryData(DN_DX,NContainer,GaussWeights);
const Vector &N = row(NContainer, 0);
// itElem->EvaluateInPoint(elementalPressure,PRESSURE,N);
const unsigned int NumNodes = geometry.size();
double elementalPressure = N[0] * geometry(0)->FastGetSolutionStepValue(PRESSURE);
double elementalVelocityX = N[0] * geometry(0)->FastGetSolutionStepValue(VELOCITY_X);
double elementalVelocityY = N[0] * geometry(0)->FastGetSolutionStepValue(VELOCITY_Y);
;
for (unsigned int i = 1; i < NumNodes; i++)
{
elementalPressure += N[i] * geometry(i)->FastGetSolutionStepValue(PRESSURE);
elementalVelocityX += N[i] * geometry(i)->FastGetSolutionStepValue(VELOCITY_X);
elementalVelocityY += N[i] * geometry(i)->FastGetSolutionStepValue(VELOCITY_Y);
}
for (unsigned int i = 0; i < geometry.size(); i++)
{
// index = i*dimension;
const long double nodalPosX = geometry(i)->X();
const long double nodalPosY = geometry(i)->Y();
bariPosX += nodalPosX / 3.0;
bariPosY += nodalPosY / 3.0;
}
const long double posX = bariPosX;
const long double posY = bariPosY;
const double rPos = sqrt(pow(posX, 2) + pow(posY, 2));
const double cosalfa = posX / rPos;
const double sinalfa = posY / rPos;
const double sin2alfa = 2.0 * cosalfa * sinalfa;
const double cos2alfa = 1.0 - 2.0 * pow(sinalfa, 2);
double expectedVelocityTheta = pow(kappa, 2) * omega * R_out / (1.0 - pow(kappa, 2)) * (R_out / rPos - rPos / R_out);
double computedVelocityTheta = sqrt(pow(elementalVelocityX, 2) + pow(elementalVelocityY, 2));
double nodalErrorVelocityTheta = computedVelocityTheta - expectedVelocityTheta;
const long double tauXX = 0; // itElem->GetValue(ELEMENTAL_DEVIATORIC_STRESS_XX);
const long double tauYY = 0; // itElem->GetValue(ELEMENTAL_DEVIATORIC_STRESS_YY);
const long double tauXY = 0; // itElem->GetValue(ELEMENTAL_DEVIATORIC_STRESS_XY);
double expectedTauTheta = (2.0 * viscosity * pow(kappa, 2) * omega * pow(R_out, 2)) / (1.0 - pow(kappa, 2)) / pow(rPos, 2);
double computedTauTheta = (tauXX - tauYY) * sin2alfa / 2.0 - tauXY * cos2alfa;
double nodalErrorTauTheta = computedTauTheta - expectedTauTheta;
sumErrorL2VelocityTheta += pow(nodalErrorVelocityTheta, 2) * geometry.Area();
sumErrorL2TauTheta += pow(nodalErrorTauTheta, 2) * geometry.Area();
}
}
double errorL2VelocityTheta = sqrt(sumErrorL2VelocityTheta);
double errorL2TauTheta = sqrt(sumErrorL2TauTheta);
std::ofstream myfileVelocity;
myfileVelocity.open("errorL2Poiseuille.txt", std::ios::app);
myfileVelocity << currentTime << "\t" << errorL2VelocityTheta << "\t" << errorL2TauTheta << "\n";
myfileVelocity.close();
}
bool CheckVelocityConvergence(const double NormDv, double &errorNormDv)
{
ModelPart &rModelPart = BaseType::GetModelPart();
double NormV = 0.00;
errorNormDv = 0;
#pragma omp parallel reduction(+ \
: NormV)
{
ModelPart::NodeIterator NodeBegin;
ModelPart::NodeIterator NodeEnd;
OpenMPUtils::PartitionedIterators(rModelPart.Nodes(), NodeBegin, NodeEnd);
for (ModelPart::NodeIterator itNode = NodeBegin; itNode != NodeEnd; ++itNode)
{
const array_1d<double, 3> &Vel = itNode->FastGetSolutionStepValue(VELOCITY);
double NormVelNode = 0;
for (unsigned int d = 0; d < 3; ++d)
{
NormVelNode += Vel[d] * Vel[d];
NormV += Vel[d] * Vel[d];
}
}
}
BaseType::GetModelPart().GetCommunicator().GetDataCommunicator().SumAll(NormV);
NormV = sqrt(NormV);
if (NormV == 0.0)
NormV = 1.00;
errorNormDv = NormDv / NormV;
if (BaseType::GetEchoLevel() > 0 && rModelPart.GetCommunicator().MyPID() == 0)
{
std::cout << "The norm of velocity increment is: " << NormDv << std::endl;
std::cout << "The norm of velocity is: " << NormV << std::endl;
std::cout << "Velocity error: " << errorNormDv << "mVelocityTolerance: " << mVelocityTolerance << std::endl;
}
/* else{ */
/* std::cout<<"Velocity error: "<< errorNormDv <<" velTol: " << mVelocityTolerance<< std::endl; */
/* } */
if (errorNormDv < mVelocityTolerance)
{
return true;
}
else
{
return false;
}
}
bool CheckPressureConvergence(const double NormDp, double &errorNormDp)
{
ModelPart &rModelPart = BaseType::GetModelPart();
double NormP = 0.00;
errorNormDp = 0;
#pragma omp parallel reduction(+ \
: NormP)
{
ModelPart::NodeIterator NodeBegin;
ModelPart::NodeIterator NodeEnd;
OpenMPUtils::PartitionedIterators(rModelPart.Nodes(), NodeBegin, NodeEnd);
for (ModelPart::NodeIterator itNode = NodeBegin; itNode != NodeEnd; ++itNode)
{
const double Pr = itNode->FastGetSolutionStepValue(PRESSURE);
NormP += Pr * Pr;
}
}
BaseType::GetModelPart().GetCommunicator().GetDataCommunicator().SumAll(NormP);
NormP = sqrt(NormP);
if (NormP == 0.0)
NormP = 1.00;
errorNormDp = NormDp / NormP;
if (BaseType::GetEchoLevel() > 0 && rModelPart.GetCommunicator().MyPID() == 0)
{
std::cout << " The norm of pressure increment is: " << NormDp << std::endl;
std::cout << " The norm of pressure is: " << NormP << std::endl;
std::cout << " Pressure error: " << errorNormDp << std::endl;
}
/* else{ */
/* std::cout<<" Pressure error: "<<errorNormDp <<" presTol: "<<mPressureTolerance << std::endl; */
/* } */
if (errorNormDp < mPressureTolerance)
{
return true;
}
else
return false;
}
bool FixTimeStepMomentum(const double DvErrorNorm)
{
ModelPart &rModelPart = BaseType::GetModelPart();
ProcessInfo &rCurrentProcessInfo = rModelPart.GetProcessInfo();
double currentTime = rCurrentProcessInfo[TIME];
double timeInterval = rCurrentProcessInfo[DELTA_TIME];
double minTolerance = 0.005;
bool fixedTimeStep = false;
if (currentTime < 10 * timeInterval)
{
minTolerance = 10;
}
bool isItNan = false;
isItNan = std::isnan(DvErrorNorm);
bool isItInf = false;
isItInf = std::isinf(DvErrorNorm);
if ((DvErrorNorm > minTolerance || (DvErrorNorm < 0 && DvErrorNorm > 0) || (DvErrorNorm != DvErrorNorm) || isItNan == true || isItInf == true) && DvErrorNorm != 0 && DvErrorNorm != 1)
{
rCurrentProcessInfo.SetValue(BAD_VELOCITY_CONVERGENCE, true);
std::cout << "NOT GOOD CONVERGENCE!!! I'll reduce the next time interval" << DvErrorNorm << std::endl;
minTolerance = 0.05;
if (DvErrorNorm > minTolerance)
{
std::cout << "BAD CONVERGENCE!!! I GO AHEAD WITH THE PREVIOUS VELOCITY AND PRESSURE FIELDS" << DvErrorNorm << std::endl;
fixedTimeStep = true;
#pragma omp parallel
{
ModelPart::NodeIterator NodeBegin;
ModelPart::NodeIterator NodeEnd;
OpenMPUtils::PartitionedIterators(rModelPart.Nodes(), NodeBegin, NodeEnd);
for (ModelPart::NodeIterator itNode = NodeBegin; itNode != NodeEnd; ++itNode)
{
itNode->FastGetSolutionStepValue(VELOCITY, 0) = itNode->FastGetSolutionStepValue(VELOCITY, 1);
itNode->FastGetSolutionStepValue(PRESSURE, 0) = itNode->FastGetSolutionStepValue(PRESSURE, 1);
itNode->FastGetSolutionStepValue(ACCELERATION, 0) = itNode->FastGetSolutionStepValue(ACCELERATION, 1);
}
}
}
}
else
{
rCurrentProcessInfo.SetValue(BAD_VELOCITY_CONVERGENCE, false);
}
return fixedTimeStep;
}
bool CheckMomentumConvergence(const double DvErrorNorm)
{
ModelPart &rModelPart = BaseType::GetModelPart();
ProcessInfo &rCurrentProcessInfo = rModelPart.GetProcessInfo();
double minTolerance = 0.99999;
bool fixedTimeStep = false;
bool isItNan = false;
isItNan = std::isnan(DvErrorNorm);
bool isItInf = false;
isItInf = std::isinf(DvErrorNorm);
if ((DvErrorNorm > minTolerance || (DvErrorNorm < 0 && DvErrorNorm > 0) || (DvErrorNorm != DvErrorNorm) || isItNan == true || isItInf == true) && DvErrorNorm != 0 && DvErrorNorm != 1)
{
rCurrentProcessInfo.SetValue(BAD_VELOCITY_CONVERGENCE, true);
std::cout << " BAD CONVERGENCE DETECTED DURING THE ITERATIVE LOOP!!! error: " << DvErrorNorm << " higher than 0.9999" << std::endl;
std::cout << " I GO AHEAD WITH THE PREVIOUS VELOCITY AND PRESSURE FIELDS" << std::endl;
fixedTimeStep = true;
#pragma omp parallel
{
ModelPart::NodeIterator NodeBegin;
ModelPart::NodeIterator NodeEnd;
OpenMPUtils::PartitionedIterators(rModelPart.Nodes(), NodeBegin, NodeEnd);
for (ModelPart::NodeIterator itNode = NodeBegin; itNode != NodeEnd; ++itNode)
{
itNode->FastGetSolutionStepValue(VELOCITY, 0) = itNode->FastGetSolutionStepValue(VELOCITY, 1);
itNode->FastGetSolutionStepValue(PRESSURE, 0) = itNode->FastGetSolutionStepValue(PRESSURE, 1);
itNode->FastGetSolutionStepValue(ACCELERATION, 0) = itNode->FastGetSolutionStepValue(ACCELERATION, 1);
}
}
}
else
{
rCurrentProcessInfo.SetValue(BAD_VELOCITY_CONVERGENCE, false);
}
return fixedTimeStep;
}
bool FixTimeStepContinuity(const double DvErrorNorm)
{
ModelPart &rModelPart = BaseType::GetModelPart();
ProcessInfo &rCurrentProcessInfo = rModelPart.GetProcessInfo();
double currentTime = rCurrentProcessInfo[TIME];
double timeInterval = rCurrentProcessInfo[DELTA_TIME];
double minTolerance = 0.01;
bool fixedTimeStep = false;
if (currentTime < 10 * timeInterval)
{
minTolerance = 10;
}
bool isItNan = false;
isItNan = std::isnan(DvErrorNorm);
bool isItInf = false;
isItInf = std::isinf(DvErrorNorm);
if ((DvErrorNorm > minTolerance || (DvErrorNorm < 0 && DvErrorNorm > 0) || (DvErrorNorm != DvErrorNorm) || isItNan == true || isItInf == true) && DvErrorNorm != 0 && DvErrorNorm != 1)
{
fixedTimeStep = true;
rCurrentProcessInfo.SetValue(BAD_PRESSURE_CONVERGENCE, true);
}
else
{
rCurrentProcessInfo.SetValue(BAD_PRESSURE_CONVERGENCE, false);
}
return fixedTimeStep;
}
///@}
///@name Protected Access
///@{
///@}
///@name Protected Inquiry
///@{
///@}
///@name Protected LifeCycle
///@{
///@}
///@name Static Member Variables
///@{
///@}
///@name Member Variables
///@{
double mVelocityTolerance;
double mPressureTolerance;
unsigned int mMaxPressureIter;
unsigned int mDomainSize;
unsigned int mTimeOrder;
bool mReformDofSet;
// Fractional step index.
/* 1 : Momentum step (calculate fractional step velocity)
* 2-3 : Unused (reserved for componentwise calculation of frac step velocity)
* 4 : Pressure step
* 5 : Computation of projections
* 6 : End of step velocity
*/
// unsigned int mStepId;
/// Scheme for the solution of the momentum equation
StrategyPointerType mpMomentumStrategy;
/// Scheme for the solution of the mass equation
StrategyPointerType mpPressureStrategy;
///@}
///@name Private Operators
///@{
///@}
///@name Private Operations
///@{
virtual void InitializeStrategy(SolverSettingsType &rSolverConfig)
{
KRATOS_TRY;
mTimeOrder = rSolverConfig.GetTimeOrder();
// Check that input parameters are reasonable and sufficient.
this->Check();
//ModelPart& rModelPart = this->GetModelPart();
mDomainSize = rSolverConfig.GetDomainSize();
mReformDofSet = rSolverConfig.GetReformDofSet();
BaseType::SetEchoLevel(rSolverConfig.GetEchoLevel());
// Initialize strategies for each step
bool HaveVelStrategy = rSolverConfig.FindStrategy(SolverSettingsType::Velocity, mpMomentumStrategy);
if (HaveVelStrategy)
{
rSolverConfig.FindTolerance(SolverSettingsType::Velocity, mVelocityTolerance);
/* rSolverConfig.FindMaxIter(SolverSettingsType::Velocity,mMaxVelocityIter); */
}
else
{
KRATOS_THROW_ERROR(std::runtime_error, "TwoStepVPStrategy error: No Velocity strategy defined in FractionalStepSettings", "");
}
bool HavePressStrategy = rSolverConfig.FindStrategy(SolverSettingsType::Pressure, mpPressureStrategy);
if (HavePressStrategy)
{
rSolverConfig.FindTolerance(SolverSettingsType::Pressure, mPressureTolerance);
rSolverConfig.FindMaxIter(SolverSettingsType::Pressure, mMaxPressureIter);
}
else
{
KRATOS_THROW_ERROR(std::runtime_error, "TwoStepVPStrategy error: No Pressure strategy defined in FractionalStepSettings", "");
}
// Check input parameters
this->Check();
KRATOS_CATCH("");
}
///@}
///@name Private Access
///@{
///@}
///@name Private Inquiry
///@{
///@}
///@name Un accessible methods
///@{
/// Assignment operator.
TwoStepVPStrategy &operator=(TwoStepVPStrategy const &rOther) {}
/// Copy constructor.
TwoStepVPStrategy(TwoStepVPStrategy const &rOther) {}
///@}
}; /// Class TwoStepVPStrategy
///@}
///@name Type Definitions
///@{
///@}
///@} // addtogroup
} // namespace Kratos.
#endif // KRATOS_TWO_STEP_V_P_STRATEGY_H
|
GB_unaryop__lnot_uint16_int64.c | //------------------------------------------------------------------------------
// GB_unaryop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2019, All Rights Reserved.
// http://suitesparse.com See GraphBLAS/Doc/License.txt for license.
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_iterator.h"
#include "GB_unaryop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB_unop__lnot_uint16_int64
// op(A') function: GB_tran__lnot_uint16_int64
// C type: uint16_t
// A type: int64_t
// cast: uint16_t cij = (uint16_t) aij
// unaryop: cij = !(aij != 0)
#define GB_ATYPE \
int64_t
#define GB_CTYPE \
uint16_t
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
int64_t aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = !(x != 0) ;
// casting
#define GB_CASTING(z, x) \
uint16_t z = (uint16_t) x ;
// cij = op (cast (aij))
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
GB_GETA (aij, Ax, pA) ; \
/* Cx [pC] = op (cast (aij)) */ \
GB_CASTING (x, aij) ; \
GB_OP (GB_CX (pC), x) ; \
}
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_LNOT || GxB_NO_UINT16 || GxB_NO_INT64)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_unop__lnot_uint16_int64
(
uint16_t *restrict Cx,
const int64_t *restrict Ax,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (int64_t p = 0 ; p < anz ; p++)
{
GB_CAST_OP (p, p) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_tran__lnot_uint16_int64
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t *restrict *Rowcounts,
GBI_single_iterator Iter,
const int64_t *restrict A_slice,
int naslice
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#define GB_PHASE_2_OF_2
#include "GB_unaryop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
3d7pt_var.lbpar.c | #include <omp.h>
#include <math.h>
#define ceild(n,d) ceil(((double)(n))/((double)(d)))
#define floord(n,d) floor(((double)(n))/((double)(d)))
#define max(x,y) ((x) > (y)? (x) : (y))
#define min(x,y) ((x) < (y)? (x) : (y))
/*
* Order-1, 3D 7 point stencil with variable coefficients
* Adapted from PLUTO and Pochoir test bench
*
* Tareq Malas
*/
#include <stdio.h>
#include <stdlib.h>
#include <sys/time.h>
#ifdef LIKWID_PERFMON
#include <likwid.h>
#endif
#include "print_utils.h"
#define TESTS 2
#define MAX(a,b) ((a) > (b) ? a : b)
#define MIN(a,b) ((a) < (b) ? a : b)
/* Subtract the `struct timeval' values X and Y,
* storing the result in RESULT.
*
* Return 1 if the difference is negative, otherwise 0.
*/
int timeval_subtract(struct timeval *result, struct timeval *x, struct timeval *y)
{
/* Perform the carry for the later subtraction by updating y. */
if (x->tv_usec < y->tv_usec)
{
int nsec = (y->tv_usec - x->tv_usec) / 1000000 + 1;
y->tv_usec -= 1000000 * nsec;
y->tv_sec += nsec;
}
if (x->tv_usec - y->tv_usec > 1000000)
{
int nsec = (x->tv_usec - y->tv_usec) / 1000000;
y->tv_usec += 1000000 * nsec;
y->tv_sec -= nsec;
}
/* Compute the time remaining to wait.
* tv_usec is certainly positive.
*/
result->tv_sec = x->tv_sec - y->tv_sec;
result->tv_usec = x->tv_usec - y->tv_usec;
/* Return 1 if result is negative. */
return x->tv_sec < y->tv_sec;
}
int main(int argc, char *argv[])
{
int t, i, j, k, m, test;
int Nx, Ny, Nz, Nt;
if (argc > 3) {
Nx = atoi(argv[1])+2;
Ny = atoi(argv[2])+2;
Nz = atoi(argv[3])+2;
}
if (argc > 4)
Nt = atoi(argv[4]);
// allocate the arrays
double ****A = (double ****) malloc(sizeof(double***)*2);
for(m=0; m<2;m++){
A[m] = (double ***) malloc(sizeof(double**)*Nz);
for(i=0; i<Nz; i++){
A[m][i] = (double**) malloc(sizeof(double*)*Ny);
for(j=0;j<Ny;j++){
A[m][i][j] = (double*) malloc(sizeof(double)*Nx);
}
}
}
double ****coef = (double ****) malloc(sizeof(double***)*7);
for(m=0; m<7;m++){
coef[m] = (double ***) malloc(sizeof(double**)*Nz);
for(i=0; i<Nz; i++){
coef[m][i] = (double**) malloc(sizeof(double*)*Ny);
for(j=0;j<Ny;j++){
coef[m][i][j] = (double*) malloc(sizeof(double)*Nx);
}
}
}
// tile size information, including extra element to decide the list length
int *tile_size = (int*) malloc(sizeof(int));
tile_size[0] = -1;
// The list is modified here before source-to-source transformations
tile_size = (int*) realloc((void *)tile_size, sizeof(int)*5);
tile_size[0] = 24;
tile_size[1] = 24;
tile_size[2] = 24;
tile_size[3] = 32;
tile_size[4] = -1;
// for timekeeping
int ts_return = -1;
struct timeval start, end, result;
double tdiff = 0.0, min_tdiff=1.e100;
const int BASE = 1024;
// initialize variables
//
srand(42);
for (i = 1; i < Nz; i++) {
for (j = 1; j < Ny; j++) {
for (k = 1; k < Nx; k++) {
A[0][i][j][k] = 1.0 * (rand() % BASE);
}
}
}
for (m=0; m<7; m++) {
for (i=1; i<Nz; i++) {
for (j=1; j<Ny; j++) {
for (k=1; k<Nx; k++) {
coef[m][i][j][k] = 1.0 * (rand() % BASE);
}
}
}
}
#ifdef LIKWID_PERFMON
LIKWID_MARKER_INIT;
#pragma omp parallel
{
LIKWID_MARKER_THREADINIT;
#pragma omp barrier
LIKWID_MARKER_START("calc");
}
#endif
int num_threads = 1;
#if defined(_OPENMP)
num_threads = omp_get_max_threads();
#endif
for(test=0; test<TESTS; test++){
gettimeofday(&start, 0);
// serial execution - Addition: 6 && Multiplication: 2
/* Copyright (C) 1991-2014 Free Software Foundation, Inc.
This file is part of the GNU C Library.
The GNU C Library is free software; you can redistribute it and/or
modify it under the terms of the GNU Lesser General Public
License as published by the Free Software Foundation; either
version 2.1 of the License, or (at your option) any later version.
The GNU C Library is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
Lesser General Public License for more details.
You should have received a copy of the GNU Lesser General Public
License along with the GNU C Library; if not, see
<http://www.gnu.org/licenses/>. */
/* This header is separate from features.h so that the compiler can
include it implicitly at the start of every compilation. It must
not itself include <features.h> or any other header that includes
<features.h> because the implicit include comes before any feature
test macros that may be defined in a source file before it first
explicitly includes a system header. GCC knows the name of this
header in order to preinclude it. */
/* glibc's intent is to support the IEC 559 math functionality, real
and complex. If the GCC (4.9 and later) predefined macros
specifying compiler intent are available, use them to determine
whether the overall intent is to support these features; otherwise,
presume an older compiler has intent to support these features and
define these macros by default. */
/* wchar_t uses ISO/IEC 10646 (2nd ed., published 2011-03-15) /
Unicode 6.0. */
/* We do not support C11 <threads.h>. */
int t1, t2, t3, t4, t5, t6, t7, t8;
int lb, ub, lbp, ubp, lb2, ub2;
register int lbv, ubv;
/* Start of CLooG code */
if ((Nt >= 2) && (Nx >= 3) && (Ny >= 3) && (Nz >= 3)) {
for (t1=-1;t1<=floord(Nt-2,12);t1++) {
lbp=max(ceild(t1,2),ceild(24*t1-Nt+3,24));
ubp=min(floord(Nt+Nz-4,24),floord(12*t1+Nz+9,24));
#pragma omp parallel for private(lbv,ubv,t3,t4,t5,t6,t7,t8)
for (t2=lbp;t2<=ubp;t2++) {
for (t3=max(max(0,ceild(t1-1,2)),ceild(24*t2-Nz-20,24));t3<=min(min(min(floord(Nt+Ny-4,24),floord(12*t1+Ny+21,24)),floord(24*t2+Ny+20,24)),floord(24*t1-24*t2+Nz+Ny+19,24));t3++) {
for (t4=max(max(max(0,ceild(3*t1-7,8)),ceild(24*t2-Nz-28,32)),ceild(24*t3-Ny-28,32));t4<=min(min(min(min(floord(Nt+Nx-4,32),floord(12*t1+Nx+21,32)),floord(24*t2+Nx+20,32)),floord(24*t3+Nx+20,32)),floord(24*t1-24*t2+Nz+Nx+19,32));t4++) {
for (t5=max(max(max(max(max(0,12*t1),24*t1-24*t2+1),24*t2-Nz+2),24*t3-Ny+2),32*t4-Nx+2);t5<=min(min(min(min(min(Nt-2,12*t1+23),24*t2+22),24*t3+22),32*t4+30),24*t1-24*t2+Nz+21);t5++) {
for (t6=max(max(24*t2,t5+1),-24*t1+24*t2+2*t5-23);t6<=min(min(24*t2+23,-24*t1+24*t2+2*t5),t5+Nz-2);t6++) {
for (t7=max(24*t3,t5+1);t7<=min(24*t3+23,t5+Ny-2);t7++) {
lbv=max(32*t4,t5+1);
ubv=min(32*t4+31,t5+Nx-2);
#pragma ivdep
#pragma vector always
for (t8=lbv;t8<=ubv;t8++) {
A[( t5 + 1) % 2][ (-t5+t6)][ (-t5+t7)][ (-t5+t8)] = (((((((coef[0][ (-t5+t6)][ (-t5+t7)][ (-t5+t8)] * A[ t5 % 2][ (-t5+t6)][ (-t5+t7)][ (-t5+t8)]) + (coef[1][ (-t5+t6)][ (-t5+t7)][ (-t5+t8)] * A[ t5 % 2][ (-t5+t6) - 1][ (-t5+t7)][ (-t5+t8)])) + (coef[2][ (-t5+t6)][ (-t5+t7)][ (-t5+t8)] * A[ t5 % 2][ (-t5+t6)][ (-t5+t7) - 1][ (-t5+t8)])) + (coef[3][ (-t5+t6)][ (-t5+t7)][ (-t5+t8)] * A[ t5 % 2][ (-t5+t6)][ (-t5+t7)][ (-t5+t8) - 1])) + (coef[4][ (-t5+t6)][ (-t5+t7)][ (-t5+t8)] * A[ t5 % 2][ (-t5+t6) + 1][ (-t5+t7)][ (-t5+t8)])) + (coef[5][ (-t5+t6)][ (-t5+t7)][ (-t5+t8)] * A[ t5 % 2][ (-t5+t6)][ (-t5+t7) + 1][ (-t5+t8)])) + (coef[6][ (-t5+t6)][ (-t5+t7)][ (-t5+t8)] * A[ t5 % 2][ (-t5+t6)][ (-t5+t7)][ (-t5+t8) + 1]));;
}
}
}
}
}
}
}
}
}
/* End of CLooG code */
gettimeofday(&end, 0);
ts_return = timeval_subtract(&result, &end, &start);
tdiff = (double) (result.tv_sec + result.tv_usec * 1.0e-6);
min_tdiff = min(min_tdiff, tdiff);
printf("Rank 0 TEST# %d time: %f\n", test, tdiff);
}
PRINT_RESULTS(1, "variable no-symmetry")
#ifdef LIKWID_PERFMON
#pragma omp parallel
{
LIKWID_MARKER_STOP("calc");
}
LIKWID_MARKER_CLOSE;
#endif
// Free allocated arrays
for(i=0; i<Nz; i++){
for(j=0;j<Ny;j++){
free(A[0][i][j]);
free(A[1][i][j]);
}
free(A[0][i]);
free(A[1][i]);
}
free(A[0]);
free(A[1]);
for(m=0; m<7;m++){
for(i=0; i<Nz; i++){
for(j=0;j<Ny;j++){
free(coef[m][i][j]);
}
free(coef[m][i]);
}
free(coef[m]);
}
return 0;
}
|
CT_OMP_TARGET_IMPL.c | /*
* _CT_OMP_TARGET_IMPL_C
*
* Copyright (C) 2017-2021 Tactical Computing Laboratories, LLC
* All Rights Reserved
* contact@tactcomplabs.com
*
* See LICENSE in the top level directory for licensing details
*/
#include <omp.h>
#include <stdint.h>
/* OpenMP Target Benchmark Implementations
*
* Benchmark implementations are in the form:
*
* void BENCHTYPE_ATOMTYPE( uint64_t *ARRAY, uint64_t *IDX,
* unsigned long long iters,
* unsigned long long pes )
*
*/
void RAND_ADD( uint64_t *restrict ARRAY,
uint64_t *restrict IDX,
uint64_t iters,
uint64_t pes ){
#pragma omp target teams num_teams(pes) is_device_ptr(ARRAY, IDX) map(to:iters, pes)
{
#pragma omp parallel
{
uint64_t i = 0;
// Divide iters across number of threads per team & set start
uint64_t num_threads = (uint64_t) omp_get_num_threads();
uint64_t iters_per_thread = (omp_get_thread_num() == num_threads - 1) ?
(iters / num_threads) + (iters % num_threads) :
(iters / num_threads);
uint64_t start = (uint64_t) ( (omp_get_team_num() * iters) +
(omp_get_thread_num() * (iters/num_threads)) );
for( i=start; i<(start+iters_per_thread); i++ ){
__atomic_fetch_add( &ARRAY[IDX[i]], (uint64_t)(0x1), __ATOMIC_RELAXED );
}
}
}
}
void RAND_CAS( uint64_t *restrict ARRAY,
uint64_t *restrict IDX,
uint64_t iters,
uint64_t pes ){
#pragma omp target teams num_teams(pes) is_device_ptr(ARRAY, IDX) map(to:iters, pes)
{
#pragma omp parallel
{
uint64_t i = 0;
// Divide iters across number of threads per team & set start
uint64_t num_threads = (uint64_t) omp_get_num_threads();
uint64_t iters_per_thread = (omp_get_thread_num() == num_threads - 1) ?
(iters / num_threads) + (iters % num_threads) :
(iters / num_threads);
uint64_t start = (uint64_t) ( (omp_get_team_num() * iters) +
(omp_get_thread_num() * (iters/num_threads)) );
for( i=start; i<(start+iters_per_thread); i++ ){
__atomic_compare_exchange_n( &ARRAY[IDX[i]], &ARRAY[IDX[i]], ARRAY[IDX[i]],
0, __ATOMIC_RELAXED, __ATOMIC_RELAXED );
}
}
}
}
void STRIDE1_ADD( uint64_t *restrict ARRAY,
uint64_t *restrict IDX,
uint64_t iters,
uint64_t pes ){
#pragma omp target teams num_teams(pes) is_device_ptr(ARRAY, IDX) map(to:iters, pes)
{
#pragma omp parallel
{
uint64_t i = 0;
// Divide iters across number of threads per team & set start
uint64_t num_threads = (uint64_t) omp_get_num_threads();
uint64_t iters_per_thread = (omp_get_thread_num() == num_threads - 1) ?
(iters / num_threads) + (iters % num_threads) :
(iters / num_threads);
uint64_t start = (uint64_t) ( (omp_get_team_num() * iters) +
(omp_get_thread_num() * (iters/num_threads)) );
for( i=start; i<(start+iters_per_thread); i++ ){
__atomic_fetch_add( &ARRAY[i], (uint64_t)(0xF), __ATOMIC_RELAXED );
}
}
}
}
void STRIDE1_CAS( uint64_t *restrict ARRAY,
uint64_t *restrict IDX,
uint64_t iters,
uint64_t pes ){
#pragma omp target teams num_teams(pes) is_device_ptr(ARRAY, IDX) map(to:iters, pes)
{
#pragma omp parallel
{
uint64_t i = 0;
// Divide iters across number of threads per team & set start
uint64_t num_threads = (uint64_t) omp_get_num_threads();
uint64_t iters_per_thread = (omp_get_thread_num() == num_threads - 1) ?
(iters / num_threads) + (iters % num_threads) :
(iters / num_threads);
uint64_t start = (uint64_t) ( (omp_get_team_num() * iters) +
(omp_get_thread_num() * (iters/num_threads)) );
for( i=start; i<(start+iters_per_thread); i++ ){
__atomic_compare_exchange_n( &ARRAY[i], &ARRAY[i], ARRAY[i],
0, __ATOMIC_RELAXED, __ATOMIC_RELAXED );
}
}
}
}
void STRIDEN_ADD( uint64_t *restrict ARRAY,
uint64_t *restrict IDX,
uint64_t iters,
uint64_t pes,
uint64_t stride ){
#pragma omp target teams num_teams(pes) is_device_ptr(ARRAY, IDX) map(to:iters, pes, stride)
{
#pragma omp parallel
{
uint64_t i = 0;
// Divide iters across number of threads per team & set start
uint64_t num_threads = (uint64_t) omp_get_num_threads();
uint64_t iters_per_thread = (omp_get_thread_num() == num_threads - 1) ?
(iters / num_threads) + (iters % num_threads) :
(iters / num_threads);
uint64_t start = (uint64_t) ( (omp_get_team_num() * iters) +
(omp_get_thread_num() * (iters/num_threads) * stride) );
for( i=start; i<(start+iters_per_thread); i+=stride ){
__atomic_fetch_add( &ARRAY[i], (uint64_t)(0xF), __ATOMIC_RELAXED );
}
}
}
}
void STRIDEN_CAS( uint64_t *restrict ARRAY,
uint64_t *restrict IDX,
uint64_t iters,
uint64_t pes,
uint64_t stride ){
#pragma omp target teams num_teams(pes) is_device_ptr(ARRAY, IDX) map(to:iters, pes, stride)
{
#pragma omp parallel
{
uint64_t i = 0;
// Divide iters across number of threads per team & set start
uint64_t num_threads = (uint64_t) omp_get_num_threads();
uint64_t iters_per_thread = (omp_get_thread_num() == num_threads - 1) ?
(iters / num_threads) + (iters % num_threads) :
(iters / num_threads);
uint64_t start = (uint64_t) ( (omp_get_team_num() * iters) +
(omp_get_thread_num() * (iters/num_threads) * stride) );
for( i=start; i<(start+iters_per_thread); i+=stride ){
__atomic_compare_exchange_n( &ARRAY[i], &ARRAY[i], ARRAY[i],
0, __ATOMIC_RELAXED, __ATOMIC_RELAXED );
}
}
}
}
void PTRCHASE_ADD( uint64_t *restrict ARRAY,
uint64_t *restrict IDX,
uint64_t iters,
uint64_t pes ){
#pragma omp target teams num_teams(pes) is_device_ptr(ARRAY, IDX) map(to:iters, pes)
{
#pragma omp parallel
{
uint64_t i = 0;
// Divide iters across number of threads per team & set start
uint64_t num_threads = (uint64_t) omp_get_num_threads();
uint64_t iters_per_thread = (omp_get_thread_num() == num_threads - 1) ?
(iters / num_threads) + (iters % num_threads) :
(iters / num_threads);
uint64_t start = (uint64_t) ( (omp_get_team_num() * iters) +
(omp_get_thread_num() * (iters/num_threads)) );
for( i=0; i<iters_per_thread; i++ ){
start = __atomic_fetch_add( &IDX[start],
(uint64_t)(0x00ull),
__ATOMIC_RELAXED );
}
}
}
}
void PTRCHASE_CAS( uint64_t *restrict ARRAY,
uint64_t *restrict IDX,
uint64_t iters,
uint64_t pes ){
#pragma omp target teams num_teams(pes) is_device_ptr(ARRAY, IDX) map(to:iters, pes)
{
#pragma omp parallel
{
uint64_t i = 0;
// Divide iters across number of threads per team & set start
uint64_t num_threads = (uint64_t) omp_get_num_threads();
uint64_t iters_per_thread = (omp_get_thread_num() == num_threads - 1) ?
(iters / num_threads) + (iters % num_threads) :
(iters / num_threads);
uint64_t start = (uint64_t) ( (omp_get_team_num() * iters) +
(omp_get_thread_num() * (iters/num_threads)) );
for( i=0; i<iters_per_thread; i++ ){
__atomic_compare_exchange_n( &IDX[start], &start, IDX[start],
0, __ATOMIC_RELAXED, __ATOMIC_RELAXED );
}
}
}
}
void SG_ADD( uint64_t *restrict ARRAY,
uint64_t *restrict IDX,
uint64_t iters,
uint64_t pes ){
#pragma omp target teams num_teams(pes) is_device_ptr(ARRAY, IDX) map(to:iters, pes)
{
#pragma omp parallel
{
uint64_t i = 0;
uint64_t src = 0;
uint64_t dest = 0;
uint64_t val = 0;
// Divide iters across number of threads per team & set start
uint64_t num_threads = (uint64_t) omp_get_num_threads();
uint64_t iters_per_thread = (omp_get_thread_num() == num_threads - 1) ?
(iters / num_threads) + (iters % num_threads) :
(iters / num_threads);
uint64_t start = (uint64_t) ( (omp_get_team_num() * iters) +
(omp_get_thread_num() * (iters/num_threads)) );
for( i=start; i<(start+iters_per_thread); i++ ){
src = __atomic_fetch_add( &IDX[i], (uint64_t)(0x00ull), __ATOMIC_RELAXED );
dest = __atomic_fetch_add( &IDX[i+1], (uint64_t)(0x00ull), __ATOMIC_RELAXED );
val = __atomic_fetch_add( &ARRAY[src], (uint64_t)(0x01ull), __ATOMIC_RELAXED );
__atomic_fetch_add( &ARRAY[dest], val, __ATOMIC_RELAXED );
}
}
}
}
void SG_CAS( uint64_t *restrict ARRAY,
uint64_t *restrict IDX,
uint64_t iters,
uint64_t pes ){
#pragma omp target teams num_teams(pes) is_device_ptr(ARRAY, IDX) map(to:iters, pes)
{
#pragma omp parallel
{
uint64_t i = 0;
uint64_t src = 0;
uint64_t dest = 0;
uint64_t val = 0;
// Divide iters across number of threads per team & set start
uint64_t num_threads = (uint64_t) omp_get_num_threads();
uint64_t iters_per_thread = (omp_get_thread_num() == num_threads - 1) ?
(iters / num_threads) + (iters % num_threads) :
(iters / num_threads);
uint64_t start = (uint64_t) ( (omp_get_team_num() * iters) +
(omp_get_thread_num() * (iters/num_threads)) );
for( i=start; i<(start+iters_per_thread); i++ ){
__atomic_compare_exchange_n( &IDX[i], &src, IDX[i],
0, __ATOMIC_RELAXED, __ATOMIC_RELAXED );
__atomic_compare_exchange_n( &IDX[i+1], &dest, IDX[i+1],
0, __ATOMIC_RELAXED, __ATOMIC_RELAXED );
__atomic_compare_exchange_n( &ARRAY[src], &val, ARRAY[src],
0, __ATOMIC_RELAXED, __ATOMIC_RELAXED );
__atomic_compare_exchange_n( &ARRAY[dest], &ARRAY[dest], val,
0, __ATOMIC_RELAXED, __ATOMIC_RELAXED );
}
}
}
}
void CENTRAL_ADD( uint64_t *restrict ARRAY,
uint64_t *restrict IDX,
uint64_t iters,
uint64_t pes ){
#pragma omp target teams num_teams(pes) is_device_ptr(ARRAY, IDX) map(to:iters, pes)
{
#pragma omp parallel
{
uint64_t i = 0;
// Divide iters across number of threads per team & set start
uint64_t num_threads = (uint64_t) omp_get_num_threads();
uint64_t iters_per_thread = (omp_get_thread_num() == num_threads - 1) ?
(iters / num_threads) + (iters % num_threads) :
(iters / num_threads);
for( i=0; i<iters_per_thread; i++ ){
__atomic_fetch_add( &ARRAY[0], (uint64_t)(0x1), __ATOMIC_RELAXED );
}
}
}
}
void CENTRAL_CAS( uint64_t *restrict ARRAY,
uint64_t *restrict IDX,
uint64_t iters,
uint64_t pes ){
#pragma omp target teams num_teams(pes) is_device_ptr(ARRAY, IDX) map(to:iters, pes)
{
#pragma omp parallel
{
uint64_t i = 0;
// Divide iters across number of threads per team & set start
uint64_t num_threads = (uint64_t) omp_get_num_threads();
uint64_t iters_per_thread = (omp_get_thread_num() == num_threads - 1) ?
(iters / num_threads) + (iters % num_threads) :
(iters / num_threads);
for( i=0; i<iters_per_thread; i++ ){
__atomic_compare_exchange_n( &ARRAY[0], &ARRAY[0], ARRAY[0],
0, __ATOMIC_RELAXED, __ATOMIC_RELAXED );
}
}
}
}
void SCATTER_ADD( uint64_t *restrict ARRAY,
uint64_t *restrict IDX,
uint64_t iters,
uint64_t pes ){
#pragma omp target teams num_teams(pes) is_device_ptr(ARRAY, IDX) map(to:iters, pes)
{
#pragma omp parallel
{
uint64_t i = 0;
uint64_t dest = 0;
uint64_t val = 0;
// Divide iters across number of threads per team & set start
uint64_t num_threads = (uint64_t) omp_get_num_threads();
uint64_t iters_per_thread = (omp_get_thread_num() == num_threads - 1) ?
(iters / num_threads) + (iters % num_threads) :
(iters / num_threads);
uint64_t start = (uint64_t) ( (omp_get_team_num() * iters) +
(omp_get_thread_num() * (iters/num_threads)) );
for( i=start; i<(start+iters_per_thread); i++ ){
dest = __atomic_fetch_add( &IDX[i+1], (uint64_t)(0x00ull), __ATOMIC_RELAXED );
val = __atomic_fetch_add( &ARRAY[i], (uint64_t)(0x01ull), __ATOMIC_RELAXED );
__atomic_fetch_add( &ARRAY[dest], val, __ATOMIC_RELAXED );
}
}
}
}
void SCATTER_CAS( uint64_t *restrict ARRAY,
uint64_t *restrict IDX,
uint64_t iters,
uint64_t pes ){
#pragma omp target teams num_teams(pes) is_device_ptr(ARRAY, IDX) map(to:iters, pes)
{
#pragma omp parallel
{
uint64_t i = 0;
uint64_t dest = 0;
uint64_t val = 0;
// Divide iters across number of threads per team & set start
uint64_t num_threads = (uint64_t) omp_get_num_threads();
uint64_t iters_per_thread = (omp_get_thread_num() == num_threads - 1) ?
(iters / num_threads) + (iters % num_threads) :
(iters / num_threads);
uint64_t start = (uint64_t) ( (omp_get_team_num() * iters) +
(omp_get_thread_num() * (iters/num_threads)) );
for( i=start; i<(start+iters_per_thread); i++ ){
__atomic_compare_exchange_n( &IDX[i+1], &dest, IDX[i+1],
0, __ATOMIC_RELAXED, __ATOMIC_RELAXED );
__atomic_compare_exchange_n( &ARRAY[i], &val, ARRAY[i],
0, __ATOMIC_RELAXED, __ATOMIC_RELAXED );
__atomic_compare_exchange_n( &ARRAY[dest], &ARRAY[dest], val,
0, __ATOMIC_RELAXED, __ATOMIC_RELAXED );
}
}
}
}
void GATHER_ADD( uint64_t *restrict ARRAY,
uint64_t *restrict IDX,
uint64_t iters,
uint64_t pes ){
#pragma omp target teams num_teams(pes) is_device_ptr(ARRAY, IDX) map(to:iters, pes)
{
#pragma omp parallel
{
uint64_t i = 0;
uint64_t dest = 0;
uint64_t val = 0;
// Divide iters across number of threads per team & set start
uint64_t num_threads = (uint64_t) omp_get_num_threads();
uint64_t iters_per_thread = (omp_get_thread_num() == num_threads - 1) ?
(iters / num_threads) + (iters % num_threads) :
(iters / num_threads);
uint64_t start = (uint64_t) ( (omp_get_team_num() * iters) +
(omp_get_thread_num() * (iters/num_threads)) );
for( i=start; i<(start+iters_per_thread); i++ ){
dest = __atomic_fetch_add( &IDX[i+1], (uint64_t)(0x00ull), __ATOMIC_RELAXED );
val = __atomic_fetch_add( &ARRAY[dest], (uint64_t)(0x01ull), __ATOMIC_RELAXED );
__atomic_fetch_add( &ARRAY[i], val, __ATOMIC_RELAXED );
}
}
}
}
void GATHER_CAS( uint64_t *restrict ARRAY,
uint64_t *restrict IDX,
uint64_t iters,
uint64_t pes ){
#pragma omp target teams num_teams(pes) is_device_ptr(ARRAY, IDX) map(to:iters, pes)
{
#pragma omp parallel
{
uint64_t i = 0;
uint64_t dest = 0;
uint64_t val = 0;
// Divide iters across number of threads per team & set start
uint64_t num_threads = (uint64_t) omp_get_num_threads();
uint64_t iters_per_thread = (omp_get_thread_num() == num_threads - 1) ?
(iters / num_threads) + (iters % num_threads) :
(iters / num_threads);
uint64_t start = (uint64_t) ( (omp_get_team_num() * iters) +
(omp_get_thread_num() * (iters/num_threads)) );
for( i=start; i<(start+iters_per_thread); i++ ){
__atomic_compare_exchange_n( &IDX[i+1], &dest, IDX[i+1],
0, __ATOMIC_RELAXED, __ATOMIC_RELAXED );
__atomic_compare_exchange_n( &ARRAY[dest], &val, ARRAY[dest],
0, __ATOMIC_RELAXED, __ATOMIC_RELAXED );
__atomic_compare_exchange_n( &ARRAY[i], &ARRAY[i], val,
0, __ATOMIC_RELAXED, __ATOMIC_RELAXED );
}
}
}
}
/* EOF */
|
progress_counter.h | /*
* Copyright (C) 2015, Nils Moehrle
* TU Darmstadt - Graphics, Capture and Massively Parallel Computing
* All rights reserved.
*
* This software may be modified and distributed under the terms
* of the BSD 3-Clause license. See the LICENSE.txt file for details.
*/
#ifndef TEX_PROGRESSCOUNTER_HEADER
#define TEX_PROGRESSCOUNTER_HEADER
#include "util/timer.h"
#include <atomic>
#include <cmath>
#include <fstream>
#include <iostream>
#include <sstream>
enum ProgressCounterStyle { ETA, SIMPLE };
static const std::string clear = "\r" + std::string(80, ' ') + "\r";
class ProgressCounter {
private:
std::ofstream tty;
util::WallTimer timer;
std::string task;
std::size_t max;
std::atomic_size_t count;
public:
ProgressCounter(std::string const& _task, std::size_t max);
template <ProgressCounterStyle T>
void progress(void);
void inc(void);
void reset(std::string const& _task);
};
inline ProgressCounter::ProgressCounter(
std::string const& _task,
std::size_t _max)
: tty("/dev/tty", std::ios_base::out), timer(), task(_task), max(_max),
count(0) {
}
inline void ProgressCounter::inc(void) {
// std::size_t tmp;
// tmp = ++count;
// if(tmp == max) {
// std::stringstream ss;
// ss << clear << task << " 100%... done. (Took "
// << timer.get_elapsed_sec() << "s)";
// #pragma omp critical(progress_counter_inc)
// std::cout << ss.rdbuf() << std::endl;
// }
}
inline void ProgressCounter::reset(std::string const& _task) {
// timer.reset();
// count = 0;
// task = _task;
}
template <ProgressCounterStyle T>
void ProgressCounter::progress(void) {
// if ((max > 100 && count % (max / 100) == 0) || max <= 100) {
// float percent = static_cast<float>(count) / max;
// int ipercent = std::floor(percent * 100.0f + 0.5f);
// std::stringstream ss;
// ss << clear << task << " " << ipercent << "%...";
// if (T == ETA && ipercent > 3){
// std::size_t const elapsed = timer.get_elapsed();
// std::size_t eta = (elapsed / percent - elapsed) / 1000;
// ss << " eta ~ " << eta << " s";
// }
// #pragma omp critical(progress_counter_progress)
// tty << ss.rdbuf() << std::flush;
// }
}
#endif /* TEX_PROGRESSCOUNTER_HEADER */
|
Singleton.h | /**
* Copyright (C) 2007-2011 YU Zhi. All rights reserved.
* $Id$
* @file singleton_holder.h
*
* created on: 2008-04-16
* Author: salmon
*/
#ifndef SP_SINGLETON_H_
#define SP_SINGLETON_H_
namespace sp
{
/** @ingroup design_pattern
*
* @addtogroup singleton Singleton
* @{
*
* @brief singleton
*
* @note Meyers Singleton,
* Ref:Andrei Alexandrescu Chap 6.4
* Modern C++ Design Generic Programming and Design Patterns Applied 2001 Addison Wesley ,
*/
template <class T>
class Singleton
{
public:
static T &instance()
{
if (!pInstance_)
{
//#pragma omp critical
// TOD add some for mt critical
if (!pInstance_)
{
static T tmp;
pInstance_ = &tmp;
}
}
return *pInstance_;
}
protected:
Singleton() {}
~Singleton() {}
static T *volatile pInstance_;
};
template <class T>
T *volatile Singleton<T>::pInstance_ = 0;
/** @} */
} // namespace sp
#endif // SP_SINGLETON_H_
|
tinyexr.h | #ifndef TINYEXR_H_
#define TINYEXR_H_
/*
Copyright (c) 2014 - 2020, Syoyo Fujita and many contributors.
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the distribution.
* Neither the name of the Syoyo Fujita nor the
names of its contributors may be used to endorse or promote products
derived from this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL <COPYRIGHT HOLDER> BE LIABLE FOR ANY
DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
// TinyEXR contains some OpenEXR code, which is licensed under ------------
///////////////////////////////////////////////////////////////////////////
//
// Copyright (c) 2002, Industrial Light & Magic, a division of Lucas
// Digital Ltd. LLC
//
// All rights reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following disclaimer
// in the documentation and/or other materials provided with the
// distribution.
// * Neither the name of Industrial Light & Magic nor the names of
// its contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
///////////////////////////////////////////////////////////////////////////
// End of OpenEXR license -------------------------------------------------
//
//
// Do this:
// #define TINYEXR_IMPLEMENTATION
// before you include this file in *one* C or C++ file to create the
// implementation.
//
// // i.e. it should look like this:
// #include ...
// #include ...
// #include ...
// #define TINYEXR_IMPLEMENTATION
// #include "tinyexr.h"
//
//
#include <stddef.h> // for size_t
#include <stdint.h> // guess stdint.h is available(C99)
#ifdef __cplusplus
extern "C" {
#endif
// Use embedded miniz or not to decode ZIP format pixel. Linking with zlib
// required if this flas is 0.
#ifndef TINYEXR_USE_MINIZ
#define TINYEXR_USE_MINIZ (0)
#endif
#if !TINYEXR_USE_MINIZ
#include <zlib.h>
#endif
// Disable PIZ comporession when applying cpplint.
#ifndef TINYEXR_USE_PIZ
#define TINYEXR_USE_PIZ (0)
#endif
#ifndef TINYEXR_USE_ZFP
#define TINYEXR_USE_ZFP (0) // TinyEXR extension.
// http://computation.llnl.gov/projects/floating-point-compression
#endif
#ifndef TINYEXR_USE_THREAD
#define TINYEXR_USE_THREAD (0) // No threaded loading.
// http://computation.llnl.gov/projects/floating-point-compression
#endif
#ifndef TINYEXR_USE_OPENMP
#ifdef _OPENMP
#define TINYEXR_USE_OPENMP (1)
#else
#define TINYEXR_USE_OPENMP (0)
#endif
#endif
#define TINYEXR_SUCCESS (0)
#define TINYEXR_ERROR_INVALID_MAGIC_NUMBER (-1)
#define TINYEXR_ERROR_INVALID_EXR_VERSION (-2)
#define TINYEXR_ERROR_INVALID_ARGUMENT (-3)
#define TINYEXR_ERROR_INVALID_DATA (-4)
#define TINYEXR_ERROR_INVALID_FILE (-5)
#define TINYEXR_ERROR_INVALID_PARAMETER (-6)
#define TINYEXR_ERROR_CANT_OPEN_FILE (-7)
#define TINYEXR_ERROR_UNSUPPORTED_FORMAT (-8)
#define TINYEXR_ERROR_INVALID_HEADER (-9)
#define TINYEXR_ERROR_UNSUPPORTED_FEATURE (-10)
#define TINYEXR_ERROR_CANT_WRITE_FILE (-11)
#define TINYEXR_ERROR_SERIALZATION_FAILED (-12)
#define TINYEXR_ERROR_LAYER_NOT_FOUND (-13)
// @note { OpenEXR file format: http://www.openexr.com/openexrfilelayout.pdf }
// pixel type: possible values are: UINT = 0 HALF = 1 FLOAT = 2
#define TINYEXR_PIXELTYPE_UINT (0)
#define TINYEXR_PIXELTYPE_HALF (1)
#define TINYEXR_PIXELTYPE_FLOAT (2)
#define TINYEXR_MAX_HEADER_ATTRIBUTES (1024)
#define TINYEXR_MAX_CUSTOM_ATTRIBUTES (128)
#define TINYEXR_COMPRESSIONTYPE_NONE (0)
#define TINYEXR_COMPRESSIONTYPE_RLE (1)
#define TINYEXR_COMPRESSIONTYPE_ZIPS (2)
#define TINYEXR_COMPRESSIONTYPE_ZIP (3)
#define TINYEXR_COMPRESSIONTYPE_PIZ (4)
#define TINYEXR_COMPRESSIONTYPE_ZFP (128) // TinyEXR extension
#define TINYEXR_ZFP_COMPRESSIONTYPE_RATE (0)
#define TINYEXR_ZFP_COMPRESSIONTYPE_PRECISION (1)
#define TINYEXR_ZFP_COMPRESSIONTYPE_ACCURACY (2)
#define TINYEXR_TILE_ONE_LEVEL (0)
#define TINYEXR_TILE_MIPMAP_LEVELS (1)
#define TINYEXR_TILE_RIPMAP_LEVELS (2)
#define TINYEXR_TILE_ROUND_DOWN (0)
#define TINYEXR_TILE_ROUND_UP (1)
typedef struct _EXRVersion {
int version; // this must be 2
// tile format image;
// not zero for only a single-part "normal" tiled file (according to spec.)
int tiled;
int long_name; // long name attribute
// deep image(EXR 2.0);
// for a multi-part file, indicates that at least one part is of type deep* (according to spec.)
int non_image;
int multipart; // multi-part(EXR 2.0)
} EXRVersion;
typedef struct _EXRAttribute {
char name[256]; // name and type are up to 255 chars long.
char type[256];
unsigned char *value; // uint8_t*
int size;
int pad0;
} EXRAttribute;
typedef struct _EXRChannelInfo {
char name[256]; // less than 255 bytes long
int pixel_type;
int x_sampling;
int y_sampling;
unsigned char p_linear;
unsigned char pad[3];
} EXRChannelInfo;
typedef struct _EXRTile {
int offset_x;
int offset_y;
int level_x;
int level_y;
int width; // actual width in a tile.
int height; // actual height int a tile.
unsigned char **images; // image[channels][pixels]
} EXRTile;
typedef struct _EXRBox2i {
int min_x;
int min_y;
int max_x;
int max_y;
} EXRBox2i;
typedef struct _EXRHeader {
float pixel_aspect_ratio;
int line_order;
EXRBox2i data_window;
EXRBox2i display_window;
float screen_window_center[2];
float screen_window_width;
int chunk_count;
// Properties for tiled format(`tiledesc`).
int tiled;
int tile_size_x;
int tile_size_y;
int tile_level_mode;
int tile_rounding_mode;
int long_name;
// for a single-part file, agree with the version field bit 11
// for a multi-part file, it is consistent with the type of part
int non_image;
int multipart;
unsigned int header_len;
// Custom attributes(exludes required attributes(e.g. `channels`,
// `compression`, etc)
int num_custom_attributes;
EXRAttribute *custom_attributes; // array of EXRAttribute. size =
// `num_custom_attributes`.
EXRChannelInfo *channels; // [num_channels]
int *pixel_types; // Loaded pixel type(TINYEXR_PIXELTYPE_*) of `images` for
// each channel. This is overwritten with `requested_pixel_types` when
// loading.
int num_channels;
int compression_type; // compression type(TINYEXR_COMPRESSIONTYPE_*)
int *requested_pixel_types; // Filled initially by
// ParseEXRHeaderFrom(Meomory|File), then users
// can edit it(only valid for HALF pixel type
// channel)
// name attribute required for multipart files;
// must be unique and non empty (according to spec.);
// use EXRSetNameAttr for setting value;
// max 255 character allowed - excluding terminating zero
char name[256];
} EXRHeader;
typedef struct _EXRMultiPartHeader {
int num_headers;
EXRHeader *headers;
} EXRMultiPartHeader;
typedef struct _EXRImage {
EXRTile *tiles; // Tiled pixel data. The application must reconstruct image
// from tiles manually. NULL if scanline format.
struct _EXRImage* next_level; // NULL if scanline format or image is the last level.
int level_x; // x level index
int level_y; // y level index
unsigned char **images; // image[channels][pixels]. NULL if tiled format.
int width;
int height;
int num_channels;
// Properties for tile format.
int num_tiles;
} EXRImage;
typedef struct _EXRMultiPartImage {
int num_images;
EXRImage *images;
} EXRMultiPartImage;
typedef struct _DeepImage {
const char **channel_names;
float ***image; // image[channels][scanlines][samples]
int **offset_table; // offset_table[scanline][offsets]
int num_channels;
int width;
int height;
int pad0;
} DeepImage;
// @deprecated { For backward compatibility. Not recommended to use. }
// Loads single-frame OpenEXR image. Assume EXR image contains A(single channel
// alpha) or RGB(A) channels.
// Application must free image data as returned by `out_rgba`
// Result image format is: float x RGBA x width x hight
// Returns negative value and may set error string in `err` when there's an
// error
extern int LoadEXR(float **out_rgba, int *width, int *height,
const char *filename, const char **err);
// Loads single-frame OpenEXR image by specifying layer name. Assume EXR image
// contains A(single channel alpha) or RGB(A) channels. Application must free
// image data as returned by `out_rgba` Result image format is: float x RGBA x
// width x hight Returns negative value and may set error string in `err` when
// there's an error When the specified layer name is not found in the EXR file,
// the function will return `TINYEXR_ERROR_LAYER_NOT_FOUND`.
extern int LoadEXRWithLayer(float **out_rgba, int *width, int *height,
const char *filename, const char *layer_name,
const char **err);
//
// Get layer infos from EXR file.
//
// @param[out] layer_names List of layer names. Application must free memory
// after using this.
// @param[out] num_layers The number of layers
// @param[out] err Error string(will be filled when the function returns error
// code). Free it using FreeEXRErrorMessage after using this value.
//
// @return TINYEXR_SUCCEES upon success.
//
extern int EXRLayers(const char *filename, const char **layer_names[],
int *num_layers, const char **err);
// @deprecated { to be removed. }
// Simple wrapper API for ParseEXRHeaderFromFile.
// checking given file is a EXR file(by just look up header)
// @return TINYEXR_SUCCEES for EXR image, TINYEXR_ERROR_INVALID_HEADER for
// others
extern int IsEXR(const char *filename);
// @deprecated { to be removed. }
// Saves single-frame OpenEXR image. Assume EXR image contains RGB(A) channels.
// components must be 1(Grayscale), 3(RGB) or 4(RGBA).
// Input image format is: `float x width x height`, or `float x RGB(A) x width x
// hight`
// Save image as fp16(HALF) format when `save_as_fp16` is positive non-zero
// value.
// Save image as fp32(FLOAT) format when `save_as_fp16` is 0.
// Use ZIP compression by default.
// Returns negative value and may set error string in `err` when there's an
// error
extern int SaveEXR(const float *data, const int width, const int height,
const int components, const int save_as_fp16,
const char *filename, const char **err);
// Returns the number of resolution levels of the image (including the base)
extern int EXRNumLevels(const EXRImage* exr_image);
// Initialize EXRHeader struct
extern void InitEXRHeader(EXRHeader *exr_header);
// Set name attribute of EXRHeader struct (it makes a copy)
extern void EXRSetNameAttr(EXRHeader *exr_header, const char* name);
// Initialize EXRImage struct
extern void InitEXRImage(EXRImage *exr_image);
// Frees internal data of EXRHeader struct
extern int FreeEXRHeader(EXRHeader *exr_header);
// Frees internal data of EXRImage struct
extern int FreeEXRImage(EXRImage *exr_image);
// Frees error message
extern void FreeEXRErrorMessage(const char *msg);
// Parse EXR version header of a file.
extern int ParseEXRVersionFromFile(EXRVersion *version, const char *filename);
// Parse EXR version header from memory-mapped EXR data.
extern int ParseEXRVersionFromMemory(EXRVersion *version,
const unsigned char *memory, size_t size);
// Parse single-part OpenEXR header from a file and initialize `EXRHeader`.
// When there was an error message, Application must free `err` with
// FreeEXRErrorMessage()
extern int ParseEXRHeaderFromFile(EXRHeader *header, const EXRVersion *version,
const char *filename, const char **err);
// Parse single-part OpenEXR header from a memory and initialize `EXRHeader`.
// When there was an error message, Application must free `err` with
// FreeEXRErrorMessage()
extern int ParseEXRHeaderFromMemory(EXRHeader *header,
const EXRVersion *version,
const unsigned char *memory, size_t size,
const char **err);
// Parse multi-part OpenEXR headers from a file and initialize `EXRHeader*`
// array.
// When there was an error message, Application must free `err` with
// FreeEXRErrorMessage()
extern int ParseEXRMultipartHeaderFromFile(EXRHeader ***headers,
int *num_headers,
const EXRVersion *version,
const char *filename,
const char **err);
// Parse multi-part OpenEXR headers from a memory and initialize `EXRHeader*`
// array
// When there was an error message, Application must free `err` with
// FreeEXRErrorMessage()
extern int ParseEXRMultipartHeaderFromMemory(EXRHeader ***headers,
int *num_headers,
const EXRVersion *version,
const unsigned char *memory,
size_t size, const char **err);
// Loads single-part OpenEXR image from a file.
// Application must setup `ParseEXRHeaderFromFile` before calling this function.
// Application can free EXRImage using `FreeEXRImage`
// Returns negative value and may set error string in `err` when there's an
// error
// When there was an error message, Application must free `err` with
// FreeEXRErrorMessage()
extern int LoadEXRImageFromFile(EXRImage *image, const EXRHeader *header,
const char *filename, const char **err);
// Loads single-part OpenEXR image from a memory.
// Application must setup `EXRHeader` with
// `ParseEXRHeaderFromMemory` before calling this function.
// Application can free EXRImage using `FreeEXRImage`
// Returns negative value and may set error string in `err` when there's an
// error
// When there was an error message, Application must free `err` with
// FreeEXRErrorMessage()
extern int LoadEXRImageFromMemory(EXRImage *image, const EXRHeader *header,
const unsigned char *memory,
const size_t size, const char **err);
// Loads multi-part OpenEXR image from a file.
// Application must setup `ParseEXRMultipartHeaderFromFile` before calling this
// function.
// Application can free EXRImage using `FreeEXRImage`
// Returns negative value and may set error string in `err` when there's an
// error
// When there was an error message, Application must free `err` with
// FreeEXRErrorMessage()
extern int LoadEXRMultipartImageFromFile(EXRImage *images,
const EXRHeader **headers,
unsigned int num_parts,
const char *filename,
const char **err);
// Loads multi-part OpenEXR image from a memory.
// Application must setup `EXRHeader*` array with
// `ParseEXRMultipartHeaderFromMemory` before calling this function.
// Application can free EXRImage using `FreeEXRImage`
// Returns negative value and may set error string in `err` when there's an
// error
// When there was an error message, Application must free `err` with
// FreeEXRErrorMessage()
extern int LoadEXRMultipartImageFromMemory(EXRImage *images,
const EXRHeader **headers,
unsigned int num_parts,
const unsigned char *memory,
const size_t size, const char **err);
// Saves multi-channel, single-frame OpenEXR image to a file.
// Returns negative value and may set error string in `err` when there's an
// error
// When there was an error message, Application must free `err` with
// FreeEXRErrorMessage()
extern int SaveEXRImageToFile(const EXRImage *image,
const EXRHeader *exr_header, const char *filename,
const char **err);
// Saves multi-channel, single-frame OpenEXR image to a memory.
// Image is compressed using EXRImage.compression value.
// Return the number of bytes if success.
// Return zero and will set error string in `err` when there's an
// error.
// When there was an error message, Application must free `err` with
// FreeEXRErrorMessage()
extern size_t SaveEXRImageToMemory(const EXRImage *image,
const EXRHeader *exr_header,
unsigned char **memory, const char **err);
// Saves multi-channel, multi-frame OpenEXR image to a memory.
// Image is compressed using EXRImage.compression value.
// File global attributes (eg. display_window) must be set in the first header.
// Returns negative value and may set error string in `err` when there's an
// error
// When there was an error message, Application must free `err` with
// FreeEXRErrorMessage()
extern int SaveEXRMultipartImageToFile(const EXRImage *images,
const EXRHeader **exr_headers,
unsigned int num_parts,
const char *filename, const char **err);
// Saves multi-channel, multi-frame OpenEXR image to a memory.
// Image is compressed using EXRImage.compression value.
// File global attributes (eg. display_window) must be set in the first header.
// Return the number of bytes if success.
// Return zero and will set error string in `err` when there's an
// error.
// When there was an error message, Application must free `err` with
// FreeEXRErrorMessage()
extern size_t SaveEXRMultipartImageToMemory(const EXRImage *images,
const EXRHeader **exr_headers,
unsigned int num_parts,
unsigned char **memory, const char **err);
// Loads single-frame OpenEXR deep image.
// Application must free memory of variables in DeepImage(image, offset_table)
// Returns negative value and may set error string in `err` when there's an
// error
// When there was an error message, Application must free `err` with
// FreeEXRErrorMessage()
extern int LoadDeepEXR(DeepImage *out_image, const char *filename,
const char **err);
// NOT YET IMPLEMENTED:
// Saves single-frame OpenEXR deep image.
// Returns negative value and may set error string in `err` when there's an
// error
// extern int SaveDeepEXR(const DeepImage *in_image, const char *filename,
// const char **err);
// NOT YET IMPLEMENTED:
// Loads multi-part OpenEXR deep image.
// Application must free memory of variables in DeepImage(image, offset_table)
// extern int LoadMultiPartDeepEXR(DeepImage **out_image, int num_parts, const
// char *filename,
// const char **err);
// For emscripten.
// Loads single-frame OpenEXR image from memory. Assume EXR image contains
// RGB(A) channels.
// Returns negative value and may set error string in `err` when there's an
// error
// When there was an error message, Application must free `err` with
// FreeEXRErrorMessage()
extern int LoadEXRFromMemory(float **out_rgba, int *width, int *height,
const unsigned char *memory, size_t size,
const char **err);
#ifdef __cplusplus
}
#endif
#endif // TINYEXR_H_
#ifdef TINYEXR_IMPLEMENTATION
#ifndef TINYEXR_IMPLEMENTATION_DEFINED
#define TINYEXR_IMPLEMENTATION_DEFINED
#ifdef _WIN32
#ifndef WIN32_LEAN_AND_MEAN
#define WIN32_LEAN_AND_MEAN
#endif
#ifndef NOMINMAX
#define NOMINMAX
#endif
#include <windows.h> // for UTF-8
#endif
#include <algorithm>
#include <cassert>
#include <cstdio>
#include <cstdlib>
#include <cstring>
#include <sstream>
// #include <iostream> // debug
#include <limits>
#include <string>
#include <vector>
#include <set>
// https://stackoverflow.com/questions/5047971/how-do-i-check-for-c11-support
#if __cplusplus > 199711L || (defined(_MSC_VER) && _MSC_VER >= 1900)
#define TINYEXR_HAS_CXX11 (1)
// C++11
#include <cstdint>
#if TINYEXR_USE_THREAD
#include <atomic>
#include <thread>
#endif
#endif // __cplusplus > 199711L
#if TINYEXR_USE_OPENMP
#include <omp.h>
#endif
#if TINYEXR_USE_MINIZ
#else
// Issue #46. Please include your own zlib-compatible API header before
// including `tinyexr.h`
//#include "zlib.h"
#endif
#if TINYEXR_USE_ZFP
#ifdef __clang__
#pragma clang diagnostic push
#pragma clang diagnostic ignored "-Weverything"
#endif
#include "zfp.h"
#ifdef __clang__
#pragma clang diagnostic pop
#endif
#endif
namespace tinyexr {
#if __cplusplus > 199711L
// C++11
typedef uint64_t tinyexr_uint64;
typedef int64_t tinyexr_int64;
#else
// Although `long long` is not a standard type pre C++11, assume it is defined
// as a compiler's extension.
#ifdef __clang__
#pragma clang diagnostic push
#pragma clang diagnostic ignored "-Wc++11-long-long"
#endif
typedef unsigned long long tinyexr_uint64;
typedef long long tinyexr_int64;
#ifdef __clang__
#pragma clang diagnostic pop
#endif
#endif
#if TINYEXR_USE_MINIZ
namespace miniz {
#ifdef __clang__
#pragma clang diagnostic push
#pragma clang diagnostic ignored "-Wc++11-long-long"
#pragma clang diagnostic ignored "-Wold-style-cast"
#pragma clang diagnostic ignored "-Wpadded"
#pragma clang diagnostic ignored "-Wsign-conversion"
#pragma clang diagnostic ignored "-Wc++11-extensions"
#pragma clang diagnostic ignored "-Wconversion"
#pragma clang diagnostic ignored "-Wunused-function"
#pragma clang diagnostic ignored "-Wc++98-compat-pedantic"
#pragma clang diagnostic ignored "-Wundef"
#if __has_warning("-Wcomma")
#pragma clang diagnostic ignored "-Wcomma"
#endif
#if __has_warning("-Wmacro-redefined")
#pragma clang diagnostic ignored "-Wmacro-redefined"
#endif
#if __has_warning("-Wcast-qual")
#pragma clang diagnostic ignored "-Wcast-qual"
#endif
#if __has_warning("-Wzero-as-null-pointer-constant")
#pragma clang diagnostic ignored "-Wzero-as-null-pointer-constant"
#endif
#if __has_warning("-Wtautological-constant-compare")
#pragma clang diagnostic ignored "-Wtautological-constant-compare"
#endif
#if __has_warning("-Wextra-semi-stmt")
#pragma clang diagnostic ignored "-Wextra-semi-stmt"
#endif
#endif
/* miniz.c v1.15 - public domain deflate/inflate, zlib-subset, ZIP
reading/writing/appending, PNG writing
See "unlicense" statement at the end of this file.
Rich Geldreich <richgel99@gmail.com>, last updated Oct. 13, 2013
Implements RFC 1950: http://www.ietf.org/rfc/rfc1950.txt and RFC 1951:
http://www.ietf.org/rfc/rfc1951.txt
Most API's defined in miniz.c are optional. For example, to disable the
archive related functions just define
MINIZ_NO_ARCHIVE_APIS, or to get rid of all stdio usage define MINIZ_NO_STDIO
(see the list below for more macros).
* Change History
10/13/13 v1.15 r4 - Interim bugfix release while I work on the next major
release with Zip64 support (almost there!):
- Critical fix for the MZ_ZIP_FLAG_DO_NOT_SORT_CENTRAL_DIRECTORY bug
(thanks kahmyong.moon@hp.com) which could cause locate files to not find
files. This bug
would only have occurred in earlier versions if you explicitly used this
flag, OR if you used mz_zip_extract_archive_file_to_heap() or
mz_zip_add_mem_to_archive_file_in_place()
(which used this flag). If you can't switch to v1.15 but want to fix
this bug, just remove the uses of this flag from both helper funcs (and of
course don't use the flag).
- Bugfix in mz_zip_reader_extract_to_mem_no_alloc() from kymoon when
pUser_read_buf is not NULL and compressed size is > uncompressed size
- Fixing mz_zip_reader_extract_*() funcs so they don't try to extract
compressed data from directory entries, to account for weird zipfiles which
contain zero-size compressed data on dir entries.
Hopefully this fix won't cause any issues on weird zip archives,
because it assumes the low 16-bits of zip external attributes are DOS
attributes (which I believe they always are in practice).
- Fixing mz_zip_reader_is_file_a_directory() so it doesn't check the
internal attributes, just the filename and external attributes
- mz_zip_reader_init_file() - missing MZ_FCLOSE() call if the seek failed
- Added cmake support for Linux builds which builds all the examples,
tested with clang v3.3 and gcc v4.6.
- Clang fix for tdefl_write_image_to_png_file_in_memory() from toffaletti
- Merged MZ_FORCEINLINE fix from hdeanclark
- Fix <time.h> include before config #ifdef, thanks emil.brink
- Added tdefl_write_image_to_png_file_in_memory_ex(): supports Y flipping
(super useful for OpenGL apps), and explicit control over the compression
level (so you can
set it to 1 for real-time compression).
- Merged in some compiler fixes from paulharris's github repro.
- Retested this build under Windows (VS 2010, including static analysis),
tcc 0.9.26, gcc v4.6 and clang v3.3.
- Added example6.c, which dumps an image of the mandelbrot set to a PNG
file.
- Modified example2 to help test the
MZ_ZIP_FLAG_DO_NOT_SORT_CENTRAL_DIRECTORY flag more.
- In r3: Bugfix to mz_zip_writer_add_file() found during merge: Fix
possible src file fclose() leak if alignment bytes+local header file write
faiiled
- In r4: Minor bugfix to mz_zip_writer_add_from_zip_reader():
Was pushing the wrong central dir header offset, appears harmless in this
release, but it became a problem in the zip64 branch
5/20/12 v1.14 - MinGW32/64 GCC 4.6.1 compiler fixes: added MZ_FORCEINLINE,
#include <time.h> (thanks fermtect).
5/19/12 v1.13 - From jason@cornsyrup.org and kelwert@mtu.edu - Fix
mz_crc32() so it doesn't compute the wrong CRC-32's when mz_ulong is 64-bit.
- Temporarily/locally slammed in "typedef unsigned long mz_ulong" and
re-ran a randomized regression test on ~500k files.
- Eliminated a bunch of warnings when compiling with GCC 32-bit/64.
- Ran all examples, miniz.c, and tinfl.c through MSVC 2008's /analyze
(static analysis) option and fixed all warnings (except for the silly
"Use of the comma-operator in a tested expression.." analysis warning,
which I purposely use to work around a MSVC compiler warning).
- Created 32-bit and 64-bit Codeblocks projects/workspace. Built and
tested Linux executables. The codeblocks workspace is compatible with
Linux+Win32/x64.
- Added miniz_tester solution/project, which is a useful little app
derived from LZHAM's tester app that I use as part of the regression test.
- Ran miniz.c and tinfl.c through another series of regression testing on
~500,000 files and archives.
- Modified example5.c so it purposely disables a bunch of high-level
functionality (MINIZ_NO_STDIO, etc.). (Thanks to corysama for the
MINIZ_NO_STDIO bug report.)
- Fix ftell() usage in examples so they exit with an error on files which
are too large (a limitation of the examples, not miniz itself).
4/12/12 v1.12 - More comments, added low-level example5.c, fixed a couple
minor level_and_flags issues in the archive API's.
level_and_flags can now be set to MZ_DEFAULT_COMPRESSION. Thanks to Bruce
Dawson <bruced@valvesoftware.com> for the feedback/bug report.
5/28/11 v1.11 - Added statement from unlicense.org
5/27/11 v1.10 - Substantial compressor optimizations:
- Level 1 is now ~4x faster than before. The L1 compressor's throughput
now varies between 70-110MB/sec. on a
- Core i7 (actual throughput varies depending on the type of data, and x64
vs. x86).
- Improved baseline L2-L9 compression perf. Also, greatly improved
compression perf. issues on some file types.
- Refactored the compression code for better readability and
maintainability.
- Added level 10 compression level (L10 has slightly better ratio than
level 9, but could have a potentially large
drop in throughput on some files).
5/15/11 v1.09 - Initial stable release.
* Low-level Deflate/Inflate implementation notes:
Compression: Use the "tdefl" API's. The compressor supports raw, static,
and dynamic blocks, lazy or
greedy parsing, match length filtering, RLE-only, and Huffman-only streams.
It performs and compresses
approximately as well as zlib.
Decompression: Use the "tinfl" API's. The entire decompressor is
implemented as a single function
coroutine: see tinfl_decompress(). It supports decompression into a 32KB
(or larger power of 2) wrapping buffer, or into a memory
block large enough to hold the entire file.
The low-level tdefl/tinfl API's do not make any use of dynamic memory
allocation.
* zlib-style API notes:
miniz.c implements a fairly large subset of zlib. There's enough
functionality present for it to be a drop-in
zlib replacement in many apps:
The z_stream struct, optional memory allocation callbacks
deflateInit/deflateInit2/deflate/deflateReset/deflateEnd/deflateBound
inflateInit/inflateInit2/inflate/inflateEnd
compress, compress2, compressBound, uncompress
CRC-32, Adler-32 - Using modern, minimal code size, CPU cache friendly
routines.
Supports raw deflate streams or standard zlib streams with adler-32
checking.
Limitations:
The callback API's are not implemented yet. No support for gzip headers or
zlib static dictionaries.
I've tried to closely emulate zlib's various flavors of stream flushing
and return status codes, but
there are no guarantees that miniz.c pulls this off perfectly.
* PNG writing: See the tdefl_write_image_to_png_file_in_memory() function,
originally written by
Alex Evans. Supports 1-4 bytes/pixel images.
* ZIP archive API notes:
The ZIP archive API's where designed with simplicity and efficiency in
mind, with just enough abstraction to
get the job done with minimal fuss. There are simple API's to retrieve file
information, read files from
existing archives, create new archives, append new files to existing
archives, or clone archive data from
one archive to another. It supports archives located in memory or the heap,
on disk (using stdio.h),
or you can specify custom file read/write callbacks.
- Archive reading: Just call this function to read a single file from a
disk archive:
void *mz_zip_extract_archive_file_to_heap(const char *pZip_filename, const
char *pArchive_name,
size_t *pSize, mz_uint zip_flags);
For more complex cases, use the "mz_zip_reader" functions. Upon opening an
archive, the entire central
directory is located and read as-is into memory, and subsequent file access
only occurs when reading individual files.
- Archives file scanning: The simple way is to use this function to scan a
loaded archive for a specific file:
int mz_zip_reader_locate_file(mz_zip_archive *pZip, const char *pName,
const char *pComment, mz_uint flags);
The locate operation can optionally check file comments too, which (as one
example) can be used to identify
multiple versions of the same file in an archive. This function uses a
simple linear search through the central
directory, so it's not very fast.
Alternately, you can iterate through all the files in an archive (using
mz_zip_reader_get_num_files()) and
retrieve detailed info on each file by calling mz_zip_reader_file_stat().
- Archive creation: Use the "mz_zip_writer" functions. The ZIP writer
immediately writes compressed file data
to disk and builds an exact image of the central directory in memory. The
central directory image is written
all at once at the end of the archive file when the archive is finalized.
The archive writer can optionally align each file's local header and file
data to any power of 2 alignment,
which can be useful when the archive will be read from optical media. Also,
the writer supports placing
arbitrary data blobs at the very beginning of ZIP archives. Archives
written using either feature are still
readable by any ZIP tool.
- Archive appending: The simple way to add a single file to an archive is
to call this function:
mz_bool mz_zip_add_mem_to_archive_file_in_place(const char *pZip_filename,
const char *pArchive_name,
const void *pBuf, size_t buf_size, const void *pComment, mz_uint16
comment_size, mz_uint level_and_flags);
The archive will be created if it doesn't already exist, otherwise it'll be
appended to.
Note the appending is done in-place and is not an atomic operation, so if
something goes wrong
during the operation it's possible the archive could be left without a
central directory (although the local
file headers and file data will be fine, so the archive will be
recoverable).
For more complex archive modification scenarios:
1. The safest way is to use a mz_zip_reader to read the existing archive,
cloning only those bits you want to
preserve into a new archive using using the
mz_zip_writer_add_from_zip_reader() function (which compiles the
compressed file data as-is). When you're done, delete the old archive and
rename the newly written archive, and
you're done. This is safe but requires a bunch of temporary disk space or
heap memory.
2. Or, you can convert an mz_zip_reader in-place to an mz_zip_writer using
mz_zip_writer_init_from_reader(),
append new files as needed, then finalize the archive which will write an
updated central directory to the
original archive. (This is basically what
mz_zip_add_mem_to_archive_file_in_place() does.) There's a
possibility that the archive's central directory could be lost with this
method if anything goes wrong, though.
- ZIP archive support limitations:
No zip64 or spanning support. Extraction functions can only handle
unencrypted, stored or deflated files.
Requires streams capable of seeking.
* This is a header file library, like stb_image.c. To get only a header file,
either cut and paste the
below header, or create miniz.h, #define MINIZ_HEADER_FILE_ONLY, and then
include miniz.c from it.
* Important: For best perf. be sure to customize the below macros for your
target platform:
#define MINIZ_USE_UNALIGNED_LOADS_AND_STORES 1
#define MINIZ_LITTLE_ENDIAN 1
#define MINIZ_HAS_64BIT_REGISTERS 1
* On platforms using glibc, Be sure to "#define _LARGEFILE64_SOURCE 1" before
including miniz.c to ensure miniz
uses the 64-bit variants: fopen64(), stat64(), etc. Otherwise you won't be
able to process large files
(i.e. 32-bit stat() fails for me on files > 0x7FFFFFFF bytes).
*/
#ifndef MINIZ_HEADER_INCLUDED
#define MINIZ_HEADER_INCLUDED
//#include <stdlib.h>
// Defines to completely disable specific portions of miniz.c:
// If all macros here are defined the only functionality remaining will be
// CRC-32, adler-32, tinfl, and tdefl.
// Define MINIZ_NO_STDIO to disable all usage and any functions which rely on
// stdio for file I/O.
//#define MINIZ_NO_STDIO
// If MINIZ_NO_TIME is specified then the ZIP archive functions will not be able
// to get the current time, or
// get/set file times, and the C run-time funcs that get/set times won't be
// called.
// The current downside is the times written to your archives will be from 1979.
#define MINIZ_NO_TIME
// Define MINIZ_NO_ARCHIVE_APIS to disable all ZIP archive API's.
#define MINIZ_NO_ARCHIVE_APIS
// Define MINIZ_NO_ARCHIVE_APIS to disable all writing related ZIP archive
// API's.
//#define MINIZ_NO_ARCHIVE_WRITING_APIS
// Define MINIZ_NO_ZLIB_APIS to remove all ZLIB-style compression/decompression
// API's.
//#define MINIZ_NO_ZLIB_APIS
// Define MINIZ_NO_ZLIB_COMPATIBLE_NAME to disable zlib names, to prevent
// conflicts against stock zlib.
//#define MINIZ_NO_ZLIB_COMPATIBLE_NAMES
// Define MINIZ_NO_MALLOC to disable all calls to malloc, free, and realloc.
// Note if MINIZ_NO_MALLOC is defined then the user must always provide custom
// user alloc/free/realloc
// callbacks to the zlib and archive API's, and a few stand-alone helper API's
// which don't provide custom user
// functions (such as tdefl_compress_mem_to_heap() and
// tinfl_decompress_mem_to_heap()) won't work.
//#define MINIZ_NO_MALLOC
#if defined(__TINYC__) && (defined(__linux) || defined(__linux__))
// TODO: Work around "error: include file 'sys\utime.h' when compiling with tcc
// on Linux
#define MINIZ_NO_TIME
#endif
#if !defined(MINIZ_NO_TIME) && !defined(MINIZ_NO_ARCHIVE_APIS)
//#include <time.h>
#endif
#if defined(_M_IX86) || defined(_M_X64) || defined(__i386__) || \
defined(__i386) || defined(__i486__) || defined(__i486) || \
defined(i386) || defined(__ia64__) || defined(__x86_64__)
// MINIZ_X86_OR_X64_CPU is only used to help set the below macros.
#define MINIZ_X86_OR_X64_CPU 1
#endif
#if defined(__sparcv9)
// Big endian
#else
#if (__BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__) || MINIZ_X86_OR_X64_CPU
// Set MINIZ_LITTLE_ENDIAN to 1 if the processor is little endian.
#define MINIZ_LITTLE_ENDIAN 1
#endif
#endif
#if MINIZ_X86_OR_X64_CPU
// Set MINIZ_USE_UNALIGNED_LOADS_AND_STORES to 1 on CPU's that permit efficient
// integer loads and stores from unaligned addresses.
//#define MINIZ_USE_UNALIGNED_LOADS_AND_STORES 1
#define MINIZ_USE_UNALIGNED_LOADS_AND_STORES \
0 // disable to suppress compiler warnings
#endif
#if defined(_M_X64) || defined(_WIN64) || defined(__MINGW64__) || \
defined(_LP64) || defined(__LP64__) || defined(__ia64__) || \
defined(__x86_64__)
// Set MINIZ_HAS_64BIT_REGISTERS to 1 if operations on 64-bit integers are
// reasonably fast (and don't involve compiler generated calls to helper
// functions).
#define MINIZ_HAS_64BIT_REGISTERS 1
#endif
#ifdef __cplusplus
extern "C" {
#endif
// ------------------- zlib-style API Definitions.
// For more compatibility with zlib, miniz.c uses unsigned long for some
// parameters/struct members. Beware: mz_ulong can be either 32 or 64-bits!
typedef unsigned long mz_ulong;
// mz_free() internally uses the MZ_FREE() macro (which by default calls free()
// unless you've modified the MZ_MALLOC macro) to release a block allocated from
// the heap.
void mz_free(void *p);
#define MZ_ADLER32_INIT (1)
// mz_adler32() returns the initial adler-32 value to use when called with
// ptr==NULL.
mz_ulong mz_adler32(mz_ulong adler, const unsigned char *ptr, size_t buf_len);
#define MZ_CRC32_INIT (0)
// mz_crc32() returns the initial CRC-32 value to use when called with
// ptr==NULL.
mz_ulong mz_crc32(mz_ulong crc, const unsigned char *ptr, size_t buf_len);
// Compression strategies.
enum {
MZ_DEFAULT_STRATEGY = 0,
MZ_FILTERED = 1,
MZ_HUFFMAN_ONLY = 2,
MZ_RLE = 3,
MZ_FIXED = 4
};
// Method
#define MZ_DEFLATED 8
#ifndef MINIZ_NO_ZLIB_APIS
// Heap allocation callbacks.
// Note that mz_alloc_func parameter types purpsosely differ from zlib's:
// items/size is size_t, not unsigned long.
typedef void *(*mz_alloc_func)(void *opaque, size_t items, size_t size);
typedef void (*mz_free_func)(void *opaque, void *address);
typedef void *(*mz_realloc_func)(void *opaque, void *address, size_t items,
size_t size);
#define MZ_VERSION "9.1.15"
#define MZ_VERNUM 0x91F0
#define MZ_VER_MAJOR 9
#define MZ_VER_MINOR 1
#define MZ_VER_REVISION 15
#define MZ_VER_SUBREVISION 0
// Flush values. For typical usage you only need MZ_NO_FLUSH and MZ_FINISH. The
// other values are for advanced use (refer to the zlib docs).
enum {
MZ_NO_FLUSH = 0,
MZ_PARTIAL_FLUSH = 1,
MZ_SYNC_FLUSH = 2,
MZ_FULL_FLUSH = 3,
MZ_FINISH = 4,
MZ_BLOCK = 5
};
// Return status codes. MZ_PARAM_ERROR is non-standard.
enum {
MZ_OK = 0,
MZ_STREAM_END = 1,
MZ_NEED_DICT = 2,
MZ_ERRNO = -1,
MZ_STREAM_ERROR = -2,
MZ_DATA_ERROR = -3,
MZ_MEM_ERROR = -4,
MZ_BUF_ERROR = -5,
MZ_VERSION_ERROR = -6,
MZ_PARAM_ERROR = -10000
};
// Compression levels: 0-9 are the standard zlib-style levels, 10 is best
// possible compression (not zlib compatible, and may be very slow),
// MZ_DEFAULT_COMPRESSION=MZ_DEFAULT_LEVEL.
enum {
MZ_NO_COMPRESSION = 0,
MZ_BEST_SPEED = 1,
MZ_BEST_COMPRESSION = 9,
MZ_UBER_COMPRESSION = 10,
MZ_DEFAULT_LEVEL = 6,
MZ_DEFAULT_COMPRESSION = -1
};
// Window bits
#define MZ_DEFAULT_WINDOW_BITS 15
struct mz_internal_state;
// Compression/decompression stream struct.
typedef struct mz_stream_s {
const unsigned char *next_in; // pointer to next byte to read
unsigned int avail_in; // number of bytes available at next_in
mz_ulong total_in; // total number of bytes consumed so far
unsigned char *next_out; // pointer to next byte to write
unsigned int avail_out; // number of bytes that can be written to next_out
mz_ulong total_out; // total number of bytes produced so far
char *msg; // error msg (unused)
struct mz_internal_state *state; // internal state, allocated by zalloc/zfree
mz_alloc_func
zalloc; // optional heap allocation function (defaults to malloc)
mz_free_func zfree; // optional heap free function (defaults to free)
void *opaque; // heap alloc function user pointer
int data_type; // data_type (unused)
mz_ulong adler; // adler32 of the source or uncompressed data
mz_ulong reserved; // not used
} mz_stream;
typedef mz_stream *mz_streamp;
// Returns the version string of miniz.c.
const char *mz_version(void);
// mz_deflateInit() initializes a compressor with default options:
// Parameters:
// pStream must point to an initialized mz_stream struct.
// level must be between [MZ_NO_COMPRESSION, MZ_BEST_COMPRESSION].
// level 1 enables a specially optimized compression function that's been
// optimized purely for performance, not ratio.
// (This special func. is currently only enabled when
// MINIZ_USE_UNALIGNED_LOADS_AND_STORES and MINIZ_LITTLE_ENDIAN are defined.)
// Return values:
// MZ_OK on success.
// MZ_STREAM_ERROR if the stream is bogus.
// MZ_PARAM_ERROR if the input parameters are bogus.
// MZ_MEM_ERROR on out of memory.
int mz_deflateInit(mz_streamp pStream, int level);
// mz_deflateInit2() is like mz_deflate(), except with more control:
// Additional parameters:
// method must be MZ_DEFLATED
// window_bits must be MZ_DEFAULT_WINDOW_BITS (to wrap the deflate stream with
// zlib header/adler-32 footer) or -MZ_DEFAULT_WINDOW_BITS (raw deflate/no
// header or footer)
// mem_level must be between [1, 9] (it's checked but ignored by miniz.c)
int mz_deflateInit2(mz_streamp pStream, int level, int method, int window_bits,
int mem_level, int strategy);
// Quickly resets a compressor without having to reallocate anything. Same as
// calling mz_deflateEnd() followed by mz_deflateInit()/mz_deflateInit2().
int mz_deflateReset(mz_streamp pStream);
// mz_deflate() compresses the input to output, consuming as much of the input
// and producing as much output as possible.
// Parameters:
// pStream is the stream to read from and write to. You must initialize/update
// the next_in, avail_in, next_out, and avail_out members.
// flush may be MZ_NO_FLUSH, MZ_PARTIAL_FLUSH/MZ_SYNC_FLUSH, MZ_FULL_FLUSH, or
// MZ_FINISH.
// Return values:
// MZ_OK on success (when flushing, or if more input is needed but not
// available, and/or there's more output to be written but the output buffer
// is full).
// MZ_STREAM_END if all input has been consumed and all output bytes have been
// written. Don't call mz_deflate() on the stream anymore.
// MZ_STREAM_ERROR if the stream is bogus.
// MZ_PARAM_ERROR if one of the parameters is invalid.
// MZ_BUF_ERROR if no forward progress is possible because the input and/or
// output buffers are empty. (Fill up the input buffer or free up some output
// space and try again.)
int mz_deflate(mz_streamp pStream, int flush);
// mz_deflateEnd() deinitializes a compressor:
// Return values:
// MZ_OK on success.
// MZ_STREAM_ERROR if the stream is bogus.
int mz_deflateEnd(mz_streamp pStream);
// mz_deflateBound() returns a (very) conservative upper bound on the amount of
// data that could be generated by deflate(), assuming flush is set to only
// MZ_NO_FLUSH or MZ_FINISH.
mz_ulong mz_deflateBound(mz_streamp pStream, mz_ulong source_len);
// Single-call compression functions mz_compress() and mz_compress2():
// Returns MZ_OK on success, or one of the error codes from mz_deflate() on
// failure.
int mz_compress(unsigned char *pDest, mz_ulong *pDest_len,
const unsigned char *pSource, mz_ulong source_len);
int mz_compress2(unsigned char *pDest, mz_ulong *pDest_len,
const unsigned char *pSource, mz_ulong source_len, int level);
// mz_compressBound() returns a (very) conservative upper bound on the amount of
// data that could be generated by calling mz_compress().
mz_ulong mz_compressBound(mz_ulong source_len);
// Initializes a decompressor.
int mz_inflateInit(mz_streamp pStream);
// mz_inflateInit2() is like mz_inflateInit() with an additional option that
// controls the window size and whether or not the stream has been wrapped with
// a zlib header/footer:
// window_bits must be MZ_DEFAULT_WINDOW_BITS (to parse zlib header/footer) or
// -MZ_DEFAULT_WINDOW_BITS (raw deflate).
int mz_inflateInit2(mz_streamp pStream, int window_bits);
// Decompresses the input stream to the output, consuming only as much of the
// input as needed, and writing as much to the output as possible.
// Parameters:
// pStream is the stream to read from and write to. You must initialize/update
// the next_in, avail_in, next_out, and avail_out members.
// flush may be MZ_NO_FLUSH, MZ_SYNC_FLUSH, or MZ_FINISH.
// On the first call, if flush is MZ_FINISH it's assumed the input and output
// buffers are both sized large enough to decompress the entire stream in a
// single call (this is slightly faster).
// MZ_FINISH implies that there are no more source bytes available beside
// what's already in the input buffer, and that the output buffer is large
// enough to hold the rest of the decompressed data.
// Return values:
// MZ_OK on success. Either more input is needed but not available, and/or
// there's more output to be written but the output buffer is full.
// MZ_STREAM_END if all needed input has been consumed and all output bytes
// have been written. For zlib streams, the adler-32 of the decompressed data
// has also been verified.
// MZ_STREAM_ERROR if the stream is bogus.
// MZ_DATA_ERROR if the deflate stream is invalid.
// MZ_PARAM_ERROR if one of the parameters is invalid.
// MZ_BUF_ERROR if no forward progress is possible because the input buffer is
// empty but the inflater needs more input to continue, or if the output
// buffer is not large enough. Call mz_inflate() again
// with more input data, or with more room in the output buffer (except when
// using single call decompression, described above).
int mz_inflate(mz_streamp pStream, int flush);
// Deinitializes a decompressor.
int mz_inflateEnd(mz_streamp pStream);
// Single-call decompression.
// Returns MZ_OK on success, or one of the error codes from mz_inflate() on
// failure.
int mz_uncompress(unsigned char *pDest, mz_ulong *pDest_len,
const unsigned char *pSource, mz_ulong source_len);
// Returns a string description of the specified error code, or NULL if the
// error code is invalid.
const char *mz_error(int err);
// Redefine zlib-compatible names to miniz equivalents, so miniz.c can be used
// as a drop-in replacement for the subset of zlib that miniz.c supports.
// Define MINIZ_NO_ZLIB_COMPATIBLE_NAMES to disable zlib-compatibility if you
// use zlib in the same project.
#ifndef MINIZ_NO_ZLIB_COMPATIBLE_NAMES
typedef unsigned char Byte;
typedef unsigned int uInt;
typedef mz_ulong uLong;
typedef Byte Bytef;
typedef uInt uIntf;
typedef char charf;
typedef int intf;
typedef void *voidpf;
typedef uLong uLongf;
typedef void *voidp;
typedef void *const voidpc;
#define Z_NULL 0
#define Z_NO_FLUSH MZ_NO_FLUSH
#define Z_PARTIAL_FLUSH MZ_PARTIAL_FLUSH
#define Z_SYNC_FLUSH MZ_SYNC_FLUSH
#define Z_FULL_FLUSH MZ_FULL_FLUSH
#define Z_FINISH MZ_FINISH
#define Z_BLOCK MZ_BLOCK
#define Z_OK MZ_OK
#define Z_STREAM_END MZ_STREAM_END
#define Z_NEED_DICT MZ_NEED_DICT
#define Z_ERRNO MZ_ERRNO
#define Z_STREAM_ERROR MZ_STREAM_ERROR
#define Z_DATA_ERROR MZ_DATA_ERROR
#define Z_MEM_ERROR MZ_MEM_ERROR
#define Z_BUF_ERROR MZ_BUF_ERROR
#define Z_VERSION_ERROR MZ_VERSION_ERROR
#define Z_PARAM_ERROR MZ_PARAM_ERROR
#define Z_NO_COMPRESSION MZ_NO_COMPRESSION
#define Z_BEST_SPEED MZ_BEST_SPEED
#define Z_BEST_COMPRESSION MZ_BEST_COMPRESSION
#define Z_DEFAULT_COMPRESSION MZ_DEFAULT_COMPRESSION
#define Z_DEFAULT_STRATEGY MZ_DEFAULT_STRATEGY
#define Z_FILTERED MZ_FILTERED
#define Z_HUFFMAN_ONLY MZ_HUFFMAN_ONLY
#define Z_RLE MZ_RLE
#define Z_FIXED MZ_FIXED
#define Z_DEFLATED MZ_DEFLATED
#define Z_DEFAULT_WINDOW_BITS MZ_DEFAULT_WINDOW_BITS
#define alloc_func mz_alloc_func
#define free_func mz_free_func
#define internal_state mz_internal_state
#define z_stream mz_stream
#define deflateInit mz_deflateInit
#define deflateInit2 mz_deflateInit2
#define deflateReset mz_deflateReset
#define deflate mz_deflate
#define deflateEnd mz_deflateEnd
#define deflateBound mz_deflateBound
#define compress mz_compress
#define compress2 mz_compress2
#define compressBound mz_compressBound
#define inflateInit mz_inflateInit
#define inflateInit2 mz_inflateInit2
#define inflate mz_inflate
#define inflateEnd mz_inflateEnd
#define uncompress mz_uncompress
#define crc32 mz_crc32
#define adler32 mz_adler32
#define MAX_WBITS 15
#define MAX_MEM_LEVEL 9
#define zError mz_error
#define ZLIB_VERSION MZ_VERSION
#define ZLIB_VERNUM MZ_VERNUM
#define ZLIB_VER_MAJOR MZ_VER_MAJOR
#define ZLIB_VER_MINOR MZ_VER_MINOR
#define ZLIB_VER_REVISION MZ_VER_REVISION
#define ZLIB_VER_SUBREVISION MZ_VER_SUBREVISION
#define zlibVersion mz_version
#define zlib_version mz_version()
#endif // #ifndef MINIZ_NO_ZLIB_COMPATIBLE_NAMES
#endif // MINIZ_NO_ZLIB_APIS
// ------------------- Types and macros
typedef unsigned char mz_uint8;
typedef signed short mz_int16;
typedef unsigned short mz_uint16;
typedef unsigned int mz_uint32;
typedef unsigned int mz_uint;
typedef long long mz_int64;
typedef unsigned long long mz_uint64;
typedef int mz_bool;
#define MZ_FALSE (0)
#define MZ_TRUE (1)
// An attempt to work around MSVC's spammy "warning C4127: conditional
// expression is constant" message.
#ifdef _MSC_VER
#define MZ_MACRO_END while (0, 0)
#else
#define MZ_MACRO_END while (0)
#endif
// ------------------- ZIP archive reading/writing
#ifndef MINIZ_NO_ARCHIVE_APIS
enum {
MZ_ZIP_MAX_IO_BUF_SIZE = 64 * 1024,
MZ_ZIP_MAX_ARCHIVE_FILENAME_SIZE = 260,
MZ_ZIP_MAX_ARCHIVE_FILE_COMMENT_SIZE = 256
};
typedef struct {
mz_uint32 m_file_index;
mz_uint32 m_central_dir_ofs;
mz_uint16 m_version_made_by;
mz_uint16 m_version_needed;
mz_uint16 m_bit_flag;
mz_uint16 m_method;
#ifndef MINIZ_NO_TIME
time_t m_time;
#endif
mz_uint32 m_crc32;
mz_uint64 m_comp_size;
mz_uint64 m_uncomp_size;
mz_uint16 m_internal_attr;
mz_uint32 m_external_attr;
mz_uint64 m_local_header_ofs;
mz_uint32 m_comment_size;
char m_filename[MZ_ZIP_MAX_ARCHIVE_FILENAME_SIZE];
char m_comment[MZ_ZIP_MAX_ARCHIVE_FILE_COMMENT_SIZE];
} mz_zip_archive_file_stat;
typedef size_t (*mz_file_read_func)(void *pOpaque, mz_uint64 file_ofs,
void *pBuf, size_t n);
typedef size_t (*mz_file_write_func)(void *pOpaque, mz_uint64 file_ofs,
const void *pBuf, size_t n);
struct mz_zip_internal_state_tag;
typedef struct mz_zip_internal_state_tag mz_zip_internal_state;
typedef enum {
MZ_ZIP_MODE_INVALID = 0,
MZ_ZIP_MODE_READING = 1,
MZ_ZIP_MODE_WRITING = 2,
MZ_ZIP_MODE_WRITING_HAS_BEEN_FINALIZED = 3
} mz_zip_mode;
typedef struct mz_zip_archive_tag {
mz_uint64 m_archive_size;
mz_uint64 m_central_directory_file_ofs;
mz_uint m_total_files;
mz_zip_mode m_zip_mode;
mz_uint m_file_offset_alignment;
mz_alloc_func m_pAlloc;
mz_free_func m_pFree;
mz_realloc_func m_pRealloc;
void *m_pAlloc_opaque;
mz_file_read_func m_pRead;
mz_file_write_func m_pWrite;
void *m_pIO_opaque;
mz_zip_internal_state *m_pState;
} mz_zip_archive;
typedef enum {
MZ_ZIP_FLAG_CASE_SENSITIVE = 0x0100,
MZ_ZIP_FLAG_IGNORE_PATH = 0x0200,
MZ_ZIP_FLAG_COMPRESSED_DATA = 0x0400,
MZ_ZIP_FLAG_DO_NOT_SORT_CENTRAL_DIRECTORY = 0x0800
} mz_zip_flags;
// ZIP archive reading
// Inits a ZIP archive reader.
// These functions read and validate the archive's central directory.
mz_bool mz_zip_reader_init(mz_zip_archive *pZip, mz_uint64 size,
mz_uint32 flags);
mz_bool mz_zip_reader_init_mem(mz_zip_archive *pZip, const void *pMem,
size_t size, mz_uint32 flags);
#ifndef MINIZ_NO_STDIO
mz_bool mz_zip_reader_init_file(mz_zip_archive *pZip, const char *pFilename,
mz_uint32 flags);
#endif
// Returns the total number of files in the archive.
mz_uint mz_zip_reader_get_num_files(mz_zip_archive *pZip);
// Returns detailed information about an archive file entry.
mz_bool mz_zip_reader_file_stat(mz_zip_archive *pZip, mz_uint file_index,
mz_zip_archive_file_stat *pStat);
// Determines if an archive file entry is a directory entry.
mz_bool mz_zip_reader_is_file_a_directory(mz_zip_archive *pZip,
mz_uint file_index);
mz_bool mz_zip_reader_is_file_encrypted(mz_zip_archive *pZip,
mz_uint file_index);
// Retrieves the filename of an archive file entry.
// Returns the number of bytes written to pFilename, or if filename_buf_size is
// 0 this function returns the number of bytes needed to fully store the
// filename.
mz_uint mz_zip_reader_get_filename(mz_zip_archive *pZip, mz_uint file_index,
char *pFilename, mz_uint filename_buf_size);
// Attempts to locates a file in the archive's central directory.
// Valid flags: MZ_ZIP_FLAG_CASE_SENSITIVE, MZ_ZIP_FLAG_IGNORE_PATH
// Returns -1 if the file cannot be found.
int mz_zip_reader_locate_file(mz_zip_archive *pZip, const char *pName,
const char *pComment, mz_uint flags);
// Extracts a archive file to a memory buffer using no memory allocation.
mz_bool mz_zip_reader_extract_to_mem_no_alloc(mz_zip_archive *pZip,
mz_uint file_index, void *pBuf,
size_t buf_size, mz_uint flags,
void *pUser_read_buf,
size_t user_read_buf_size);
mz_bool mz_zip_reader_extract_file_to_mem_no_alloc(
mz_zip_archive *pZip, const char *pFilename, void *pBuf, size_t buf_size,
mz_uint flags, void *pUser_read_buf, size_t user_read_buf_size);
// Extracts a archive file to a memory buffer.
mz_bool mz_zip_reader_extract_to_mem(mz_zip_archive *pZip, mz_uint file_index,
void *pBuf, size_t buf_size,
mz_uint flags);
mz_bool mz_zip_reader_extract_file_to_mem(mz_zip_archive *pZip,
const char *pFilename, void *pBuf,
size_t buf_size, mz_uint flags);
// Extracts a archive file to a dynamically allocated heap buffer.
void *mz_zip_reader_extract_to_heap(mz_zip_archive *pZip, mz_uint file_index,
size_t *pSize, mz_uint flags);
void *mz_zip_reader_extract_file_to_heap(mz_zip_archive *pZip,
const char *pFilename, size_t *pSize,
mz_uint flags);
// Extracts a archive file using a callback function to output the file's data.
mz_bool mz_zip_reader_extract_to_callback(mz_zip_archive *pZip,
mz_uint file_index,
mz_file_write_func pCallback,
void *pOpaque, mz_uint flags);
mz_bool mz_zip_reader_extract_file_to_callback(mz_zip_archive *pZip,
const char *pFilename,
mz_file_write_func pCallback,
void *pOpaque, mz_uint flags);
#ifndef MINIZ_NO_STDIO
// Extracts a archive file to a disk file and sets its last accessed and
// modified times.
// This function only extracts files, not archive directory records.
mz_bool mz_zip_reader_extract_to_file(mz_zip_archive *pZip, mz_uint file_index,
const char *pDst_filename, mz_uint flags);
mz_bool mz_zip_reader_extract_file_to_file(mz_zip_archive *pZip,
const char *pArchive_filename,
const char *pDst_filename,
mz_uint flags);
#endif
// Ends archive reading, freeing all allocations, and closing the input archive
// file if mz_zip_reader_init_file() was used.
mz_bool mz_zip_reader_end(mz_zip_archive *pZip);
// ZIP archive writing
#ifndef MINIZ_NO_ARCHIVE_WRITING_APIS
// Inits a ZIP archive writer.
mz_bool mz_zip_writer_init(mz_zip_archive *pZip, mz_uint64 existing_size);
mz_bool mz_zip_writer_init_heap(mz_zip_archive *pZip,
size_t size_to_reserve_at_beginning,
size_t initial_allocation_size);
#ifndef MINIZ_NO_STDIO
mz_bool mz_zip_writer_init_file(mz_zip_archive *pZip, const char *pFilename,
mz_uint64 size_to_reserve_at_beginning);
#endif
// Converts a ZIP archive reader object into a writer object, to allow efficient
// in-place file appends to occur on an existing archive.
// For archives opened using mz_zip_reader_init_file, pFilename must be the
// archive's filename so it can be reopened for writing. If the file can't be
// reopened, mz_zip_reader_end() will be called.
// For archives opened using mz_zip_reader_init_mem, the memory block must be
// growable using the realloc callback (which defaults to realloc unless you've
// overridden it).
// Finally, for archives opened using mz_zip_reader_init, the mz_zip_archive's
// user provided m_pWrite function cannot be NULL.
// Note: In-place archive modification is not recommended unless you know what
// you're doing, because if execution stops or something goes wrong before
// the archive is finalized the file's central directory will be hosed.
mz_bool mz_zip_writer_init_from_reader(mz_zip_archive *pZip,
const char *pFilename);
// Adds the contents of a memory buffer to an archive. These functions record
// the current local time into the archive.
// To add a directory entry, call this method with an archive name ending in a
// forwardslash with empty buffer.
// level_and_flags - compression level (0-10, see MZ_BEST_SPEED,
// MZ_BEST_COMPRESSION, etc.) logically OR'd with zero or more mz_zip_flags, or
// just set to MZ_DEFAULT_COMPRESSION.
mz_bool mz_zip_writer_add_mem(mz_zip_archive *pZip, const char *pArchive_name,
const void *pBuf, size_t buf_size,
mz_uint level_and_flags);
mz_bool mz_zip_writer_add_mem_ex(mz_zip_archive *pZip,
const char *pArchive_name, const void *pBuf,
size_t buf_size, const void *pComment,
mz_uint16 comment_size,
mz_uint level_and_flags, mz_uint64 uncomp_size,
mz_uint32 uncomp_crc32);
#ifndef MINIZ_NO_STDIO
// Adds the contents of a disk file to an archive. This function also records
// the disk file's modified time into the archive.
// level_and_flags - compression level (0-10, see MZ_BEST_SPEED,
// MZ_BEST_COMPRESSION, etc.) logically OR'd with zero or more mz_zip_flags, or
// just set to MZ_DEFAULT_COMPRESSION.
mz_bool mz_zip_writer_add_file(mz_zip_archive *pZip, const char *pArchive_name,
const char *pSrc_filename, const void *pComment,
mz_uint16 comment_size, mz_uint level_and_flags);
#endif
// Adds a file to an archive by fully cloning the data from another archive.
// This function fully clones the source file's compressed data (no
// recompression), along with its full filename, extra data, and comment fields.
mz_bool mz_zip_writer_add_from_zip_reader(mz_zip_archive *pZip,
mz_zip_archive *pSource_zip,
mz_uint file_index);
// Finalizes the archive by writing the central directory records followed by
// the end of central directory record.
// After an archive is finalized, the only valid call on the mz_zip_archive
// struct is mz_zip_writer_end().
// An archive must be manually finalized by calling this function for it to be
// valid.
mz_bool mz_zip_writer_finalize_archive(mz_zip_archive *pZip);
mz_bool mz_zip_writer_finalize_heap_archive(mz_zip_archive *pZip, void **pBuf,
size_t *pSize);
// Ends archive writing, freeing all allocations, and closing the output file if
// mz_zip_writer_init_file() was used.
// Note for the archive to be valid, it must have been finalized before ending.
mz_bool mz_zip_writer_end(mz_zip_archive *pZip);
// Misc. high-level helper functions:
// mz_zip_add_mem_to_archive_file_in_place() efficiently (but not atomically)
// appends a memory blob to a ZIP archive.
// level_and_flags - compression level (0-10, see MZ_BEST_SPEED,
// MZ_BEST_COMPRESSION, etc.) logically OR'd with zero or more mz_zip_flags, or
// just set to MZ_DEFAULT_COMPRESSION.
mz_bool mz_zip_add_mem_to_archive_file_in_place(
const char *pZip_filename, const char *pArchive_name, const void *pBuf,
size_t buf_size, const void *pComment, mz_uint16 comment_size,
mz_uint level_and_flags);
// Reads a single file from an archive into a heap block.
// Returns NULL on failure.
void *mz_zip_extract_archive_file_to_heap(const char *pZip_filename,
const char *pArchive_name,
size_t *pSize, mz_uint zip_flags);
#endif // #ifndef MINIZ_NO_ARCHIVE_WRITING_APIS
#endif // #ifndef MINIZ_NO_ARCHIVE_APIS
// ------------------- Low-level Decompression API Definitions
// Decompression flags used by tinfl_decompress().
// TINFL_FLAG_PARSE_ZLIB_HEADER: If set, the input has a valid zlib header and
// ends with an adler32 checksum (it's a valid zlib stream). Otherwise, the
// input is a raw deflate stream.
// TINFL_FLAG_HAS_MORE_INPUT: If set, there are more input bytes available
// beyond the end of the supplied input buffer. If clear, the input buffer
// contains all remaining input.
// TINFL_FLAG_USING_NON_WRAPPING_OUTPUT_BUF: If set, the output buffer is large
// enough to hold the entire decompressed stream. If clear, the output buffer is
// at least the size of the dictionary (typically 32KB).
// TINFL_FLAG_COMPUTE_ADLER32: Force adler-32 checksum computation of the
// decompressed bytes.
enum {
TINFL_FLAG_PARSE_ZLIB_HEADER = 1,
TINFL_FLAG_HAS_MORE_INPUT = 2,
TINFL_FLAG_USING_NON_WRAPPING_OUTPUT_BUF = 4,
TINFL_FLAG_COMPUTE_ADLER32 = 8
};
// High level decompression functions:
// tinfl_decompress_mem_to_heap() decompresses a block in memory to a heap block
// allocated via malloc().
// On entry:
// pSrc_buf, src_buf_len: Pointer and size of the Deflate or zlib source data
// to decompress.
// On return:
// Function returns a pointer to the decompressed data, or NULL on failure.
// *pOut_len will be set to the decompressed data's size, which could be larger
// than src_buf_len on uncompressible data.
// The caller must call mz_free() on the returned block when it's no longer
// needed.
void *tinfl_decompress_mem_to_heap(const void *pSrc_buf, size_t src_buf_len,
size_t *pOut_len, int flags);
// tinfl_decompress_mem_to_mem() decompresses a block in memory to another block
// in memory.
// Returns TINFL_DECOMPRESS_MEM_TO_MEM_FAILED on failure, or the number of bytes
// written on success.
#define TINFL_DECOMPRESS_MEM_TO_MEM_FAILED ((size_t)(-1))
size_t tinfl_decompress_mem_to_mem(void *pOut_buf, size_t out_buf_len,
const void *pSrc_buf, size_t src_buf_len,
int flags);
// tinfl_decompress_mem_to_callback() decompresses a block in memory to an
// internal 32KB buffer, and a user provided callback function will be called to
// flush the buffer.
// Returns 1 on success or 0 on failure.
typedef int (*tinfl_put_buf_func_ptr)(const void *pBuf, int len, void *pUser);
int tinfl_decompress_mem_to_callback(const void *pIn_buf, size_t *pIn_buf_size,
tinfl_put_buf_func_ptr pPut_buf_func,
void *pPut_buf_user, int flags);
struct tinfl_decompressor_tag;
typedef struct tinfl_decompressor_tag tinfl_decompressor;
// Max size of LZ dictionary.
#define TINFL_LZ_DICT_SIZE 32768
// Return status.
typedef enum {
TINFL_STATUS_BAD_PARAM = -3,
TINFL_STATUS_ADLER32_MISMATCH = -2,
TINFL_STATUS_FAILED = -1,
TINFL_STATUS_DONE = 0,
TINFL_STATUS_NEEDS_MORE_INPUT = 1,
TINFL_STATUS_HAS_MORE_OUTPUT = 2
} tinfl_status;
// Initializes the decompressor to its initial state.
#define tinfl_init(r) \
do { \
(r)->m_state = 0; \
} \
MZ_MACRO_END
#define tinfl_get_adler32(r) (r)->m_check_adler32
// Main low-level decompressor coroutine function. This is the only function
// actually needed for decompression. All the other functions are just
// high-level helpers for improved usability.
// This is a universal API, i.e. it can be used as a building block to build any
// desired higher level decompression API. In the limit case, it can be called
// once per every byte input or output.
tinfl_status tinfl_decompress(tinfl_decompressor *r,
const mz_uint8 *pIn_buf_next,
size_t *pIn_buf_size, mz_uint8 *pOut_buf_start,
mz_uint8 *pOut_buf_next, size_t *pOut_buf_size,
const mz_uint32 decomp_flags);
// Internal/private bits follow.
enum {
TINFL_MAX_HUFF_TABLES = 3,
TINFL_MAX_HUFF_SYMBOLS_0 = 288,
TINFL_MAX_HUFF_SYMBOLS_1 = 32,
TINFL_MAX_HUFF_SYMBOLS_2 = 19,
TINFL_FAST_LOOKUP_BITS = 10,
TINFL_FAST_LOOKUP_SIZE = 1 << TINFL_FAST_LOOKUP_BITS
};
typedef struct {
mz_uint8 m_code_size[TINFL_MAX_HUFF_SYMBOLS_0];
mz_int16 m_look_up[TINFL_FAST_LOOKUP_SIZE],
m_tree[TINFL_MAX_HUFF_SYMBOLS_0 * 2];
} tinfl_huff_table;
#if MINIZ_HAS_64BIT_REGISTERS
#define TINFL_USE_64BIT_BITBUF 1
#endif
#if TINFL_USE_64BIT_BITBUF
typedef mz_uint64 tinfl_bit_buf_t;
#define TINFL_BITBUF_SIZE (64)
#else
typedef mz_uint32 tinfl_bit_buf_t;
#define TINFL_BITBUF_SIZE (32)
#endif
struct tinfl_decompressor_tag {
mz_uint32 m_state, m_num_bits, m_zhdr0, m_zhdr1, m_z_adler32, m_final, m_type,
m_check_adler32, m_dist, m_counter, m_num_extra,
m_table_sizes[TINFL_MAX_HUFF_TABLES];
tinfl_bit_buf_t m_bit_buf;
size_t m_dist_from_out_buf_start;
tinfl_huff_table m_tables[TINFL_MAX_HUFF_TABLES];
mz_uint8 m_raw_header[4],
m_len_codes[TINFL_MAX_HUFF_SYMBOLS_0 + TINFL_MAX_HUFF_SYMBOLS_1 + 137];
};
// ------------------- Low-level Compression API Definitions
// Set TDEFL_LESS_MEMORY to 1 to use less memory (compression will be slightly
// slower, and raw/dynamic blocks will be output more frequently).
#define TDEFL_LESS_MEMORY 0
// tdefl_init() compression flags logically OR'd together (low 12 bits contain
// the max. number of probes per dictionary search):
// TDEFL_DEFAULT_MAX_PROBES: The compressor defaults to 128 dictionary probes
// per dictionary search. 0=Huffman only, 1=Huffman+LZ (fastest/crap
// compression), 4095=Huffman+LZ (slowest/best compression).
enum {
TDEFL_HUFFMAN_ONLY = 0,
TDEFL_DEFAULT_MAX_PROBES = 128,
TDEFL_MAX_PROBES_MASK = 0xFFF
};
// TDEFL_WRITE_ZLIB_HEADER: If set, the compressor outputs a zlib header before
// the deflate data, and the Adler-32 of the source data at the end. Otherwise,
// you'll get raw deflate data.
// TDEFL_COMPUTE_ADLER32: Always compute the adler-32 of the input data (even
// when not writing zlib headers).
// TDEFL_GREEDY_PARSING_FLAG: Set to use faster greedy parsing, instead of more
// efficient lazy parsing.
// TDEFL_NONDETERMINISTIC_PARSING_FLAG: Enable to decrease the compressor's
// initialization time to the minimum, but the output may vary from run to run
// given the same input (depending on the contents of memory).
// TDEFL_RLE_MATCHES: Only look for RLE matches (matches with a distance of 1)
// TDEFL_FILTER_MATCHES: Discards matches <= 5 chars if enabled.
// TDEFL_FORCE_ALL_STATIC_BLOCKS: Disable usage of optimized Huffman tables.
// TDEFL_FORCE_ALL_RAW_BLOCKS: Only use raw (uncompressed) deflate blocks.
// The low 12 bits are reserved to control the max # of hash probes per
// dictionary lookup (see TDEFL_MAX_PROBES_MASK).
enum {
TDEFL_WRITE_ZLIB_HEADER = 0x01000,
TDEFL_COMPUTE_ADLER32 = 0x02000,
TDEFL_GREEDY_PARSING_FLAG = 0x04000,
TDEFL_NONDETERMINISTIC_PARSING_FLAG = 0x08000,
TDEFL_RLE_MATCHES = 0x10000,
TDEFL_FILTER_MATCHES = 0x20000,
TDEFL_FORCE_ALL_STATIC_BLOCKS = 0x40000,
TDEFL_FORCE_ALL_RAW_BLOCKS = 0x80000
};
// High level compression functions:
// tdefl_compress_mem_to_heap() compresses a block in memory to a heap block
// allocated via malloc().
// On entry:
// pSrc_buf, src_buf_len: Pointer and size of source block to compress.
// flags: The max match finder probes (default is 128) logically OR'd against
// the above flags. Higher probes are slower but improve compression.
// On return:
// Function returns a pointer to the compressed data, or NULL on failure.
// *pOut_len will be set to the compressed data's size, which could be larger
// than src_buf_len on uncompressible data.
// The caller must free() the returned block when it's no longer needed.
void *tdefl_compress_mem_to_heap(const void *pSrc_buf, size_t src_buf_len,
size_t *pOut_len, int flags);
// tdefl_compress_mem_to_mem() compresses a block in memory to another block in
// memory.
// Returns 0 on failure.
size_t tdefl_compress_mem_to_mem(void *pOut_buf, size_t out_buf_len,
const void *pSrc_buf, size_t src_buf_len,
int flags);
// Compresses an image to a compressed PNG file in memory.
// On entry:
// pImage, w, h, and num_chans describe the image to compress. num_chans may be
// 1, 2, 3, or 4.
// The image pitch in bytes per scanline will be w*num_chans. The leftmost
// pixel on the top scanline is stored first in memory.
// level may range from [0,10], use MZ_NO_COMPRESSION, MZ_BEST_SPEED,
// MZ_BEST_COMPRESSION, etc. or a decent default is MZ_DEFAULT_LEVEL
// If flip is true, the image will be flipped on the Y axis (useful for OpenGL
// apps).
// On return:
// Function returns a pointer to the compressed data, or NULL on failure.
// *pLen_out will be set to the size of the PNG image file.
// The caller must mz_free() the returned heap block (which will typically be
// larger than *pLen_out) when it's no longer needed.
void *tdefl_write_image_to_png_file_in_memory_ex(const void *pImage, int w,
int h, int num_chans,
size_t *pLen_out,
mz_uint level, mz_bool flip);
void *tdefl_write_image_to_png_file_in_memory(const void *pImage, int w, int h,
int num_chans, size_t *pLen_out);
// Output stream interface. The compressor uses this interface to write
// compressed data. It'll typically be called TDEFL_OUT_BUF_SIZE at a time.
typedef mz_bool (*tdefl_put_buf_func_ptr)(const void *pBuf, int len,
void *pUser);
// tdefl_compress_mem_to_output() compresses a block to an output stream. The
// above helpers use this function internally.
mz_bool tdefl_compress_mem_to_output(const void *pBuf, size_t buf_len,
tdefl_put_buf_func_ptr pPut_buf_func,
void *pPut_buf_user, int flags);
enum {
TDEFL_MAX_HUFF_TABLES = 3,
TDEFL_MAX_HUFF_SYMBOLS_0 = 288,
TDEFL_MAX_HUFF_SYMBOLS_1 = 32,
TDEFL_MAX_HUFF_SYMBOLS_2 = 19,
TDEFL_LZ_DICT_SIZE = 32768,
TDEFL_LZ_DICT_SIZE_MASK = TDEFL_LZ_DICT_SIZE - 1,
TDEFL_MIN_MATCH_LEN = 3,
TDEFL_MAX_MATCH_LEN = 258
};
// TDEFL_OUT_BUF_SIZE MUST be large enough to hold a single entire compressed
// output block (using static/fixed Huffman codes).
#if TDEFL_LESS_MEMORY
enum {
TDEFL_LZ_CODE_BUF_SIZE = 24 * 1024,
TDEFL_OUT_BUF_SIZE = (TDEFL_LZ_CODE_BUF_SIZE * 13) / 10,
TDEFL_MAX_HUFF_SYMBOLS = 288,
TDEFL_LZ_HASH_BITS = 12,
TDEFL_LEVEL1_HASH_SIZE_MASK = 4095,
TDEFL_LZ_HASH_SHIFT = (TDEFL_LZ_HASH_BITS + 2) / 3,
TDEFL_LZ_HASH_SIZE = 1 << TDEFL_LZ_HASH_BITS
};
#else
enum {
TDEFL_LZ_CODE_BUF_SIZE = 64 * 1024,
TDEFL_OUT_BUF_SIZE = (TDEFL_LZ_CODE_BUF_SIZE * 13) / 10,
TDEFL_MAX_HUFF_SYMBOLS = 288,
TDEFL_LZ_HASH_BITS = 15,
TDEFL_LEVEL1_HASH_SIZE_MASK = 4095,
TDEFL_LZ_HASH_SHIFT = (TDEFL_LZ_HASH_BITS + 2) / 3,
TDEFL_LZ_HASH_SIZE = 1 << TDEFL_LZ_HASH_BITS
};
#endif
// The low-level tdefl functions below may be used directly if the above helper
// functions aren't flexible enough. The low-level functions don't make any heap
// allocations, unlike the above helper functions.
typedef enum {
TDEFL_STATUS_BAD_PARAM = -2,
TDEFL_STATUS_PUT_BUF_FAILED = -1,
TDEFL_STATUS_OKAY = 0,
TDEFL_STATUS_DONE = 1
} tdefl_status;
// Must map to MZ_NO_FLUSH, MZ_SYNC_FLUSH, etc. enums
typedef enum {
TDEFL_NO_FLUSH = 0,
TDEFL_SYNC_FLUSH = 2,
TDEFL_FULL_FLUSH = 3,
TDEFL_FINISH = 4
} tdefl_flush;
// tdefl's compression state structure.
typedef struct {
tdefl_put_buf_func_ptr m_pPut_buf_func;
void *m_pPut_buf_user;
mz_uint m_flags, m_max_probes[2];
int m_greedy_parsing;
mz_uint m_adler32, m_lookahead_pos, m_lookahead_size, m_dict_size;
mz_uint8 *m_pLZ_code_buf, *m_pLZ_flags, *m_pOutput_buf, *m_pOutput_buf_end;
mz_uint m_num_flags_left, m_total_lz_bytes, m_lz_code_buf_dict_pos, m_bits_in,
m_bit_buffer;
mz_uint m_saved_match_dist, m_saved_match_len, m_saved_lit,
m_output_flush_ofs, m_output_flush_remaining, m_finished, m_block_index,
m_wants_to_finish;
tdefl_status m_prev_return_status;
const void *m_pIn_buf;
void *m_pOut_buf;
size_t *m_pIn_buf_size, *m_pOut_buf_size;
tdefl_flush m_flush;
const mz_uint8 *m_pSrc;
size_t m_src_buf_left, m_out_buf_ofs;
mz_uint8 m_dict[TDEFL_LZ_DICT_SIZE + TDEFL_MAX_MATCH_LEN - 1];
mz_uint16 m_huff_count[TDEFL_MAX_HUFF_TABLES][TDEFL_MAX_HUFF_SYMBOLS];
mz_uint16 m_huff_codes[TDEFL_MAX_HUFF_TABLES][TDEFL_MAX_HUFF_SYMBOLS];
mz_uint8 m_huff_code_sizes[TDEFL_MAX_HUFF_TABLES][TDEFL_MAX_HUFF_SYMBOLS];
mz_uint8 m_lz_code_buf[TDEFL_LZ_CODE_BUF_SIZE];
mz_uint16 m_next[TDEFL_LZ_DICT_SIZE];
mz_uint16 m_hash[TDEFL_LZ_HASH_SIZE];
mz_uint8 m_output_buf[TDEFL_OUT_BUF_SIZE];
} tdefl_compressor;
// Initializes the compressor.
// There is no corresponding deinit() function because the tdefl API's do not
// dynamically allocate memory.
// pBut_buf_func: If NULL, output data will be supplied to the specified
// callback. In this case, the user should call the tdefl_compress_buffer() API
// for compression.
// If pBut_buf_func is NULL the user should always call the tdefl_compress()
// API.
// flags: See the above enums (TDEFL_HUFFMAN_ONLY, TDEFL_WRITE_ZLIB_HEADER,
// etc.)
tdefl_status tdefl_init(tdefl_compressor *d,
tdefl_put_buf_func_ptr pPut_buf_func,
void *pPut_buf_user, int flags);
// Compresses a block of data, consuming as much of the specified input buffer
// as possible, and writing as much compressed data to the specified output
// buffer as possible.
tdefl_status tdefl_compress(tdefl_compressor *d, const void *pIn_buf,
size_t *pIn_buf_size, void *pOut_buf,
size_t *pOut_buf_size, tdefl_flush flush);
// tdefl_compress_buffer() is only usable when the tdefl_init() is called with a
// non-NULL tdefl_put_buf_func_ptr.
// tdefl_compress_buffer() always consumes the entire input buffer.
tdefl_status tdefl_compress_buffer(tdefl_compressor *d, const void *pIn_buf,
size_t in_buf_size, tdefl_flush flush);
tdefl_status tdefl_get_prev_return_status(tdefl_compressor *d);
mz_uint32 tdefl_get_adler32(tdefl_compressor *d);
// Can't use tdefl_create_comp_flags_from_zip_params if MINIZ_NO_ZLIB_APIS isn't
// defined, because it uses some of its macros.
#ifndef MINIZ_NO_ZLIB_APIS
// Create tdefl_compress() flags given zlib-style compression parameters.
// level may range from [0,10] (where 10 is absolute max compression, but may be
// much slower on some files)
// window_bits may be -15 (raw deflate) or 15 (zlib)
// strategy may be either MZ_DEFAULT_STRATEGY, MZ_FILTERED, MZ_HUFFMAN_ONLY,
// MZ_RLE, or MZ_FIXED
mz_uint tdefl_create_comp_flags_from_zip_params(int level, int window_bits,
int strategy);
#endif // #ifndef MINIZ_NO_ZLIB_APIS
#ifdef __cplusplus
}
#endif
#endif // MINIZ_HEADER_INCLUDED
// ------------------- End of Header: Implementation follows. (If you only want
// the header, define MINIZ_HEADER_FILE_ONLY.)
#ifndef MINIZ_HEADER_FILE_ONLY
typedef unsigned char mz_validate_uint16[sizeof(mz_uint16) == 2 ? 1 : -1];
typedef unsigned char mz_validate_uint32[sizeof(mz_uint32) == 4 ? 1 : -1];
typedef unsigned char mz_validate_uint64[sizeof(mz_uint64) == 8 ? 1 : -1];
//#include <assert.h>
//#include <string.h>
#define MZ_ASSERT(x) assert(x)
#ifdef MINIZ_NO_MALLOC
#define MZ_MALLOC(x) NULL
#define MZ_FREE(x) (void)x, ((void)0)
#define MZ_REALLOC(p, x) NULL
#else
#define MZ_MALLOC(x) malloc(x)
#define MZ_FREE(x) free(x)
#define MZ_REALLOC(p, x) realloc(p, x)
#endif
#define MZ_MAX(a, b) (((a) > (b)) ? (a) : (b))
#define MZ_MIN(a, b) (((a) < (b)) ? (a) : (b))
#define MZ_CLEAR_OBJ(obj) memset(&(obj), 0, sizeof(obj))
#if MINIZ_USE_UNALIGNED_LOADS_AND_STORES && MINIZ_LITTLE_ENDIAN
#define MZ_READ_LE16(p) *((const mz_uint16 *)(p))
#define MZ_READ_LE32(p) *((const mz_uint32 *)(p))
#else
#define MZ_READ_LE16(p) \
((mz_uint32)(((const mz_uint8 *)(p))[0]) | \
((mz_uint32)(((const mz_uint8 *)(p))[1]) << 8U))
#define MZ_READ_LE32(p) \
((mz_uint32)(((const mz_uint8 *)(p))[0]) | \
((mz_uint32)(((const mz_uint8 *)(p))[1]) << 8U) | \
((mz_uint32)(((const mz_uint8 *)(p))[2]) << 16U) | \
((mz_uint32)(((const mz_uint8 *)(p))[3]) << 24U))
#endif
#ifdef _MSC_VER
#define MZ_FORCEINLINE __forceinline
#elif defined(__GNUC__)
#define MZ_FORCEINLINE inline __attribute__((__always_inline__))
#else
#define MZ_FORCEINLINE inline
#endif
#ifdef __cplusplus
extern "C" {
#endif
// ------------------- zlib-style API's
mz_ulong mz_adler32(mz_ulong adler, const unsigned char *ptr, size_t buf_len) {
mz_uint32 i, s1 = (mz_uint32)(adler & 0xffff), s2 = (mz_uint32)(adler >> 16);
size_t block_len = buf_len % 5552;
if (!ptr) return MZ_ADLER32_INIT;
while (buf_len) {
for (i = 0; i + 7 < block_len; i += 8, ptr += 8) {
s1 += ptr[0], s2 += s1;
s1 += ptr[1], s2 += s1;
s1 += ptr[2], s2 += s1;
s1 += ptr[3], s2 += s1;
s1 += ptr[4], s2 += s1;
s1 += ptr[5], s2 += s1;
s1 += ptr[6], s2 += s1;
s1 += ptr[7], s2 += s1;
}
for (; i < block_len; ++i) s1 += *ptr++, s2 += s1;
s1 %= 65521U, s2 %= 65521U;
buf_len -= block_len;
block_len = 5552;
}
return (s2 << 16) + s1;
}
// Karl Malbrain's compact CRC-32. See "A compact CCITT crc16 and crc32 C
// implementation that balances processor cache usage against speed":
// http://www.geocities.com/malbrain/
mz_ulong mz_crc32(mz_ulong crc, const mz_uint8 *ptr, size_t buf_len) {
static const mz_uint32 s_crc32[16] = {
0, 0x1db71064, 0x3b6e20c8, 0x26d930ac, 0x76dc4190, 0x6b6b51f4,
0x4db26158, 0x5005713c, 0xedb88320, 0xf00f9344, 0xd6d6a3e8, 0xcb61b38c,
0x9b64c2b0, 0x86d3d2d4, 0xa00ae278, 0xbdbdf21c};
mz_uint32 crcu32 = (mz_uint32)crc;
if (!ptr) return MZ_CRC32_INIT;
crcu32 = ~crcu32;
while (buf_len--) {
mz_uint8 b = *ptr++;
crcu32 = (crcu32 >> 4) ^ s_crc32[(crcu32 & 0xF) ^ (b & 0xF)];
crcu32 = (crcu32 >> 4) ^ s_crc32[(crcu32 & 0xF) ^ (b >> 4)];
}
return ~crcu32;
}
void mz_free(void *p) { MZ_FREE(p); }
#ifndef MINIZ_NO_ZLIB_APIS
static void *def_alloc_func(void *opaque, size_t items, size_t size) {
(void)opaque, (void)items, (void)size;
return MZ_MALLOC(items * size);
}
static void def_free_func(void *opaque, void *address) {
(void)opaque, (void)address;
MZ_FREE(address);
}
// static void *def_realloc_func(void *opaque, void *address, size_t items,
// size_t size) {
// (void)opaque, (void)address, (void)items, (void)size;
// return MZ_REALLOC(address, items * size);
//}
const char *mz_version(void) { return MZ_VERSION; }
int mz_deflateInit(mz_streamp pStream, int level) {
return mz_deflateInit2(pStream, level, MZ_DEFLATED, MZ_DEFAULT_WINDOW_BITS, 9,
MZ_DEFAULT_STRATEGY);
}
int mz_deflateInit2(mz_streamp pStream, int level, int method, int window_bits,
int mem_level, int strategy) {
tdefl_compressor *pComp;
mz_uint comp_flags =
TDEFL_COMPUTE_ADLER32 |
tdefl_create_comp_flags_from_zip_params(level, window_bits, strategy);
if (!pStream) return MZ_STREAM_ERROR;
if ((method != MZ_DEFLATED) || ((mem_level < 1) || (mem_level > 9)) ||
((window_bits != MZ_DEFAULT_WINDOW_BITS) &&
(-window_bits != MZ_DEFAULT_WINDOW_BITS)))
return MZ_PARAM_ERROR;
pStream->data_type = 0;
pStream->adler = MZ_ADLER32_INIT;
pStream->msg = NULL;
pStream->reserved = 0;
pStream->total_in = 0;
pStream->total_out = 0;
if (!pStream->zalloc) pStream->zalloc = def_alloc_func;
if (!pStream->zfree) pStream->zfree = def_free_func;
pComp = (tdefl_compressor *)pStream->zalloc(pStream->opaque, 1,
sizeof(tdefl_compressor));
if (!pComp) return MZ_MEM_ERROR;
pStream->state = (struct mz_internal_state *)pComp;
if (tdefl_init(pComp, NULL, NULL, comp_flags) != TDEFL_STATUS_OKAY) {
mz_deflateEnd(pStream);
return MZ_PARAM_ERROR;
}
return MZ_OK;
}
int mz_deflateReset(mz_streamp pStream) {
if ((!pStream) || (!pStream->state) || (!pStream->zalloc) ||
(!pStream->zfree))
return MZ_STREAM_ERROR;
pStream->total_in = pStream->total_out = 0;
tdefl_init((tdefl_compressor *)pStream->state, NULL, NULL,
((tdefl_compressor *)pStream->state)->m_flags);
return MZ_OK;
}
int mz_deflate(mz_streamp pStream, int flush) {
size_t in_bytes, out_bytes;
mz_ulong orig_total_in, orig_total_out;
int mz_status = MZ_OK;
if ((!pStream) || (!pStream->state) || (flush < 0) || (flush > MZ_FINISH) ||
(!pStream->next_out))
return MZ_STREAM_ERROR;
if (!pStream->avail_out) return MZ_BUF_ERROR;
if (flush == MZ_PARTIAL_FLUSH) flush = MZ_SYNC_FLUSH;
if (((tdefl_compressor *)pStream->state)->m_prev_return_status ==
TDEFL_STATUS_DONE)
return (flush == MZ_FINISH) ? MZ_STREAM_END : MZ_BUF_ERROR;
orig_total_in = pStream->total_in;
orig_total_out = pStream->total_out;
for (;;) {
tdefl_status defl_status;
in_bytes = pStream->avail_in;
out_bytes = pStream->avail_out;
defl_status = tdefl_compress((tdefl_compressor *)pStream->state,
pStream->next_in, &in_bytes, pStream->next_out,
&out_bytes, (tdefl_flush)flush);
pStream->next_in += (mz_uint)in_bytes;
pStream->avail_in -= (mz_uint)in_bytes;
pStream->total_in += (mz_uint)in_bytes;
pStream->adler = tdefl_get_adler32((tdefl_compressor *)pStream->state);
pStream->next_out += (mz_uint)out_bytes;
pStream->avail_out -= (mz_uint)out_bytes;
pStream->total_out += (mz_uint)out_bytes;
if (defl_status < 0) {
mz_status = MZ_STREAM_ERROR;
break;
} else if (defl_status == TDEFL_STATUS_DONE) {
mz_status = MZ_STREAM_END;
break;
} else if (!pStream->avail_out)
break;
else if ((!pStream->avail_in) && (flush != MZ_FINISH)) {
if ((flush) || (pStream->total_in != orig_total_in) ||
(pStream->total_out != orig_total_out))
break;
return MZ_BUF_ERROR; // Can't make forward progress without some input.
}
}
return mz_status;
}
int mz_deflateEnd(mz_streamp pStream) {
if (!pStream) return MZ_STREAM_ERROR;
if (pStream->state) {
pStream->zfree(pStream->opaque, pStream->state);
pStream->state = NULL;
}
return MZ_OK;
}
mz_ulong mz_deflateBound(mz_streamp pStream, mz_ulong source_len) {
(void)pStream;
// This is really over conservative. (And lame, but it's actually pretty
// tricky to compute a true upper bound given the way tdefl's blocking works.)
return MZ_MAX(128 + (source_len * 110) / 100,
128 + source_len + ((source_len / (31 * 1024)) + 1) * 5);
}
int mz_compress2(unsigned char *pDest, mz_ulong *pDest_len,
const unsigned char *pSource, mz_ulong source_len, int level) {
int status;
mz_stream stream;
memset(&stream, 0, sizeof(stream));
// In case mz_ulong is 64-bits (argh I hate longs).
if ((source_len | *pDest_len) > 0xFFFFFFFFU) return MZ_PARAM_ERROR;
stream.next_in = pSource;
stream.avail_in = (mz_uint32)source_len;
stream.next_out = pDest;
stream.avail_out = (mz_uint32)*pDest_len;
status = mz_deflateInit(&stream, level);
if (status != MZ_OK) return status;
status = mz_deflate(&stream, MZ_FINISH);
if (status != MZ_STREAM_END) {
mz_deflateEnd(&stream);
return (status == MZ_OK) ? MZ_BUF_ERROR : status;
}
*pDest_len = stream.total_out;
return mz_deflateEnd(&stream);
}
int mz_compress(unsigned char *pDest, mz_ulong *pDest_len,
const unsigned char *pSource, mz_ulong source_len) {
return mz_compress2(pDest, pDest_len, pSource, source_len,
MZ_DEFAULT_COMPRESSION);
}
mz_ulong mz_compressBound(mz_ulong source_len) {
return mz_deflateBound(NULL, source_len);
}
typedef struct {
tinfl_decompressor m_decomp;
mz_uint m_dict_ofs, m_dict_avail, m_first_call, m_has_flushed;
int m_window_bits;
mz_uint8 m_dict[TINFL_LZ_DICT_SIZE];
tinfl_status m_last_status;
} inflate_state;
int mz_inflateInit2(mz_streamp pStream, int window_bits) {
inflate_state *pDecomp;
if (!pStream) return MZ_STREAM_ERROR;
if ((window_bits != MZ_DEFAULT_WINDOW_BITS) &&
(-window_bits != MZ_DEFAULT_WINDOW_BITS))
return MZ_PARAM_ERROR;
pStream->data_type = 0;
pStream->adler = 0;
pStream->msg = NULL;
pStream->total_in = 0;
pStream->total_out = 0;
pStream->reserved = 0;
if (!pStream->zalloc) pStream->zalloc = def_alloc_func;
if (!pStream->zfree) pStream->zfree = def_free_func;
pDecomp = (inflate_state *)pStream->zalloc(pStream->opaque, 1,
sizeof(inflate_state));
if (!pDecomp) return MZ_MEM_ERROR;
pStream->state = (struct mz_internal_state *)pDecomp;
tinfl_init(&pDecomp->m_decomp);
pDecomp->m_dict_ofs = 0;
pDecomp->m_dict_avail = 0;
pDecomp->m_last_status = TINFL_STATUS_NEEDS_MORE_INPUT;
pDecomp->m_first_call = 1;
pDecomp->m_has_flushed = 0;
pDecomp->m_window_bits = window_bits;
return MZ_OK;
}
int mz_inflateInit(mz_streamp pStream) {
return mz_inflateInit2(pStream, MZ_DEFAULT_WINDOW_BITS);
}
int mz_inflate(mz_streamp pStream, int flush) {
inflate_state *pState;
mz_uint n, first_call, decomp_flags = TINFL_FLAG_COMPUTE_ADLER32;
size_t in_bytes, out_bytes, orig_avail_in;
tinfl_status status;
if ((!pStream) || (!pStream->state)) return MZ_STREAM_ERROR;
if (flush == MZ_PARTIAL_FLUSH) flush = MZ_SYNC_FLUSH;
if ((flush) && (flush != MZ_SYNC_FLUSH) && (flush != MZ_FINISH))
return MZ_STREAM_ERROR;
pState = (inflate_state *)pStream->state;
if (pState->m_window_bits > 0) decomp_flags |= TINFL_FLAG_PARSE_ZLIB_HEADER;
orig_avail_in = pStream->avail_in;
first_call = pState->m_first_call;
pState->m_first_call = 0;
if (pState->m_last_status < 0) return MZ_DATA_ERROR;
if (pState->m_has_flushed && (flush != MZ_FINISH)) return MZ_STREAM_ERROR;
pState->m_has_flushed |= (flush == MZ_FINISH);
if ((flush == MZ_FINISH) && (first_call)) {
// MZ_FINISH on the first call implies that the input and output buffers are
// large enough to hold the entire compressed/decompressed file.
decomp_flags |= TINFL_FLAG_USING_NON_WRAPPING_OUTPUT_BUF;
in_bytes = pStream->avail_in;
out_bytes = pStream->avail_out;
status = tinfl_decompress(&pState->m_decomp, pStream->next_in, &in_bytes,
pStream->next_out, pStream->next_out, &out_bytes,
decomp_flags);
pState->m_last_status = status;
pStream->next_in += (mz_uint)in_bytes;
pStream->avail_in -= (mz_uint)in_bytes;
pStream->total_in += (mz_uint)in_bytes;
pStream->adler = tinfl_get_adler32(&pState->m_decomp);
pStream->next_out += (mz_uint)out_bytes;
pStream->avail_out -= (mz_uint)out_bytes;
pStream->total_out += (mz_uint)out_bytes;
if (status < 0)
return MZ_DATA_ERROR;
else if (status != TINFL_STATUS_DONE) {
pState->m_last_status = TINFL_STATUS_FAILED;
return MZ_BUF_ERROR;
}
return MZ_STREAM_END;
}
// flush != MZ_FINISH then we must assume there's more input.
if (flush != MZ_FINISH) decomp_flags |= TINFL_FLAG_HAS_MORE_INPUT;
if (pState->m_dict_avail) {
n = MZ_MIN(pState->m_dict_avail, pStream->avail_out);
memcpy(pStream->next_out, pState->m_dict + pState->m_dict_ofs, n);
pStream->next_out += n;
pStream->avail_out -= n;
pStream->total_out += n;
pState->m_dict_avail -= n;
pState->m_dict_ofs = (pState->m_dict_ofs + n) & (TINFL_LZ_DICT_SIZE - 1);
return ((pState->m_last_status == TINFL_STATUS_DONE) &&
(!pState->m_dict_avail))
? MZ_STREAM_END
: MZ_OK;
}
for (;;) {
in_bytes = pStream->avail_in;
out_bytes = TINFL_LZ_DICT_SIZE - pState->m_dict_ofs;
status = tinfl_decompress(
&pState->m_decomp, pStream->next_in, &in_bytes, pState->m_dict,
pState->m_dict + pState->m_dict_ofs, &out_bytes, decomp_flags);
pState->m_last_status = status;
pStream->next_in += (mz_uint)in_bytes;
pStream->avail_in -= (mz_uint)in_bytes;
pStream->total_in += (mz_uint)in_bytes;
pStream->adler = tinfl_get_adler32(&pState->m_decomp);
pState->m_dict_avail = (mz_uint)out_bytes;
n = MZ_MIN(pState->m_dict_avail, pStream->avail_out);
memcpy(pStream->next_out, pState->m_dict + pState->m_dict_ofs, n);
pStream->next_out += n;
pStream->avail_out -= n;
pStream->total_out += n;
pState->m_dict_avail -= n;
pState->m_dict_ofs = (pState->m_dict_ofs + n) & (TINFL_LZ_DICT_SIZE - 1);
if (status < 0)
return MZ_DATA_ERROR; // Stream is corrupted (there could be some
// uncompressed data left in the output dictionary -
// oh well).
else if ((status == TINFL_STATUS_NEEDS_MORE_INPUT) && (!orig_avail_in))
return MZ_BUF_ERROR; // Signal caller that we can't make forward progress
// without supplying more input or by setting flush
// to MZ_FINISH.
else if (flush == MZ_FINISH) {
// The output buffer MUST be large to hold the remaining uncompressed data
// when flush==MZ_FINISH.
if (status == TINFL_STATUS_DONE)
return pState->m_dict_avail ? MZ_BUF_ERROR : MZ_STREAM_END;
// status here must be TINFL_STATUS_HAS_MORE_OUTPUT, which means there's
// at least 1 more byte on the way. If there's no more room left in the
// output buffer then something is wrong.
else if (!pStream->avail_out)
return MZ_BUF_ERROR;
} else if ((status == TINFL_STATUS_DONE) || (!pStream->avail_in) ||
(!pStream->avail_out) || (pState->m_dict_avail))
break;
}
return ((status == TINFL_STATUS_DONE) && (!pState->m_dict_avail))
? MZ_STREAM_END
: MZ_OK;
}
int mz_inflateEnd(mz_streamp pStream) {
if (!pStream) return MZ_STREAM_ERROR;
if (pStream->state) {
pStream->zfree(pStream->opaque, pStream->state);
pStream->state = NULL;
}
return MZ_OK;
}
int mz_uncompress(unsigned char *pDest, mz_ulong *pDest_len,
const unsigned char *pSource, mz_ulong source_len) {
mz_stream stream;
int status;
memset(&stream, 0, sizeof(stream));
// In case mz_ulong is 64-bits (argh I hate longs).
if ((source_len | *pDest_len) > 0xFFFFFFFFU) return MZ_PARAM_ERROR;
stream.next_in = pSource;
stream.avail_in = (mz_uint32)source_len;
stream.next_out = pDest;
stream.avail_out = (mz_uint32)*pDest_len;
status = mz_inflateInit(&stream);
if (status != MZ_OK) return status;
status = mz_inflate(&stream, MZ_FINISH);
if (status != MZ_STREAM_END) {
mz_inflateEnd(&stream);
return ((status == MZ_BUF_ERROR) && (!stream.avail_in)) ? MZ_DATA_ERROR
: status;
}
*pDest_len = stream.total_out;
return mz_inflateEnd(&stream);
}
const char *mz_error(int err) {
static struct {
int m_err;
const char *m_pDesc;
} s_error_descs[] = {{MZ_OK, ""},
{MZ_STREAM_END, "stream end"},
{MZ_NEED_DICT, "need dictionary"},
{MZ_ERRNO, "file error"},
{MZ_STREAM_ERROR, "stream error"},
{MZ_DATA_ERROR, "data error"},
{MZ_MEM_ERROR, "out of memory"},
{MZ_BUF_ERROR, "buf error"},
{MZ_VERSION_ERROR, "version error"},
{MZ_PARAM_ERROR, "parameter error"}};
mz_uint i;
for (i = 0; i < sizeof(s_error_descs) / sizeof(s_error_descs[0]); ++i)
if (s_error_descs[i].m_err == err) return s_error_descs[i].m_pDesc;
return NULL;
}
#endif // MINIZ_NO_ZLIB_APIS
// ------------------- Low-level Decompression (completely independent from all
// compression API's)
#define TINFL_MEMCPY(d, s, l) memcpy(d, s, l)
#define TINFL_MEMSET(p, c, l) memset(p, c, l)
#define TINFL_CR_BEGIN \
switch (r->m_state) { \
case 0:
#define TINFL_CR_RETURN(state_index, result) \
do { \
status = result; \
r->m_state = state_index; \
goto common_exit; \
case state_index:; \
} \
MZ_MACRO_END
#define TINFL_CR_RETURN_FOREVER(state_index, result) \
do { \
for (;;) { \
TINFL_CR_RETURN(state_index, result); \
} \
} \
MZ_MACRO_END
#define TINFL_CR_FINISH }
// TODO: If the caller has indicated that there's no more input, and we attempt
// to read beyond the input buf, then something is wrong with the input because
// the inflator never
// reads ahead more than it needs to. Currently TINFL_GET_BYTE() pads the end of
// the stream with 0's in this scenario.
#define TINFL_GET_BYTE(state_index, c) \
do { \
if (pIn_buf_cur >= pIn_buf_end) { \
for (;;) { \
if (decomp_flags & TINFL_FLAG_HAS_MORE_INPUT) { \
TINFL_CR_RETURN(state_index, TINFL_STATUS_NEEDS_MORE_INPUT); \
if (pIn_buf_cur < pIn_buf_end) { \
c = *pIn_buf_cur++; \
break; \
} \
} else { \
c = 0; \
break; \
} \
} \
} else \
c = *pIn_buf_cur++; \
} \
MZ_MACRO_END
#define TINFL_NEED_BITS(state_index, n) \
do { \
mz_uint c; \
TINFL_GET_BYTE(state_index, c); \
bit_buf |= (((tinfl_bit_buf_t)c) << num_bits); \
num_bits += 8; \
} while (num_bits < (mz_uint)(n))
#define TINFL_SKIP_BITS(state_index, n) \
do { \
if (num_bits < (mz_uint)(n)) { \
TINFL_NEED_BITS(state_index, n); \
} \
bit_buf >>= (n); \
num_bits -= (n); \
} \
MZ_MACRO_END
#define TINFL_GET_BITS(state_index, b, n) \
do { \
if (num_bits < (mz_uint)(n)) { \
TINFL_NEED_BITS(state_index, n); \
} \
b = bit_buf & ((1 << (n)) - 1); \
bit_buf >>= (n); \
num_bits -= (n); \
} \
MZ_MACRO_END
// TINFL_HUFF_BITBUF_FILL() is only used rarely, when the number of bytes
// remaining in the input buffer falls below 2.
// It reads just enough bytes from the input stream that are needed to decode
// the next Huffman code (and absolutely no more). It works by trying to fully
// decode a
// Huffman code by using whatever bits are currently present in the bit buffer.
// If this fails, it reads another byte, and tries again until it succeeds or
// until the
// bit buffer contains >=15 bits (deflate's max. Huffman code size).
#define TINFL_HUFF_BITBUF_FILL(state_index, pHuff) \
do { \
temp = (pHuff)->m_look_up[bit_buf & (TINFL_FAST_LOOKUP_SIZE - 1)]; \
if (temp >= 0) { \
code_len = temp >> 9; \
if ((code_len) && (num_bits >= code_len)) break; \
} else if (num_bits > TINFL_FAST_LOOKUP_BITS) { \
code_len = TINFL_FAST_LOOKUP_BITS; \
do { \
temp = (pHuff)->m_tree[~temp + ((bit_buf >> code_len++) & 1)]; \
} while ((temp < 0) && (num_bits >= (code_len + 1))); \
if (temp >= 0) break; \
} \
TINFL_GET_BYTE(state_index, c); \
bit_buf |= (((tinfl_bit_buf_t)c) << num_bits); \
num_bits += 8; \
} while (num_bits < 15);
// TINFL_HUFF_DECODE() decodes the next Huffman coded symbol. It's more complex
// than you would initially expect because the zlib API expects the decompressor
// to never read
// beyond the final byte of the deflate stream. (In other words, when this macro
// wants to read another byte from the input, it REALLY needs another byte in
// order to fully
// decode the next Huffman code.) Handling this properly is particularly
// important on raw deflate (non-zlib) streams, which aren't followed by a byte
// aligned adler-32.
// The slow path is only executed at the very end of the input buffer.
#define TINFL_HUFF_DECODE(state_index, sym, pHuff) \
do { \
int temp; \
mz_uint code_len, c; \
if (num_bits < 15) { \
if ((pIn_buf_end - pIn_buf_cur) < 2) { \
TINFL_HUFF_BITBUF_FILL(state_index, pHuff); \
} else { \
bit_buf |= (((tinfl_bit_buf_t)pIn_buf_cur[0]) << num_bits) | \
(((tinfl_bit_buf_t)pIn_buf_cur[1]) << (num_bits + 8)); \
pIn_buf_cur += 2; \
num_bits += 16; \
} \
} \
if ((temp = (pHuff)->m_look_up[bit_buf & (TINFL_FAST_LOOKUP_SIZE - 1)]) >= \
0) \
code_len = temp >> 9, temp &= 511; \
else { \
code_len = TINFL_FAST_LOOKUP_BITS; \
do { \
temp = (pHuff)->m_tree[~temp + ((bit_buf >> code_len++) & 1)]; \
} while (temp < 0); \
} \
sym = temp; \
bit_buf >>= code_len; \
num_bits -= code_len; \
} \
MZ_MACRO_END
tinfl_status tinfl_decompress(tinfl_decompressor *r,
const mz_uint8 *pIn_buf_next,
size_t *pIn_buf_size, mz_uint8 *pOut_buf_start,
mz_uint8 *pOut_buf_next, size_t *pOut_buf_size,
const mz_uint32 decomp_flags) {
static const int s_length_base[31] = {
3, 4, 5, 6, 7, 8, 9, 10, 11, 13, 15, 17, 19, 23, 27, 31,
35, 43, 51, 59, 67, 83, 99, 115, 131, 163, 195, 227, 258, 0, 0};
static const int s_length_extra[31] = {0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1,
1, 2, 2, 2, 2, 3, 3, 3, 3, 4, 4,
4, 4, 5, 5, 5, 5, 0, 0, 0};
static const int s_dist_base[32] = {
1, 2, 3, 4, 5, 7, 9, 13, 17, 25, 33,
49, 65, 97, 129, 193, 257, 385, 513, 769, 1025, 1537,
2049, 3073, 4097, 6145, 8193, 12289, 16385, 24577, 0, 0};
static const int s_dist_extra[32] = {0, 0, 0, 0, 1, 1, 2, 2, 3, 3,
4, 4, 5, 5, 6, 6, 7, 7, 8, 8,
9, 9, 10, 10, 11, 11, 12, 12, 13, 13};
static const mz_uint8 s_length_dezigzag[19] = {
16, 17, 18, 0, 8, 7, 9, 6, 10, 5, 11, 4, 12, 3, 13, 2, 14, 1, 15};
static const int s_min_table_sizes[3] = {257, 1, 4};
tinfl_status status = TINFL_STATUS_FAILED;
mz_uint32 num_bits, dist, counter, num_extra;
tinfl_bit_buf_t bit_buf;
const mz_uint8 *pIn_buf_cur = pIn_buf_next, *const pIn_buf_end =
pIn_buf_next + *pIn_buf_size;
mz_uint8 *pOut_buf_cur = pOut_buf_next, *const pOut_buf_end =
pOut_buf_next + *pOut_buf_size;
size_t out_buf_size_mask =
(decomp_flags & TINFL_FLAG_USING_NON_WRAPPING_OUTPUT_BUF)
? (size_t)-1
: ((pOut_buf_next - pOut_buf_start) + *pOut_buf_size) - 1,
dist_from_out_buf_start;
// Ensure the output buffer's size is a power of 2, unless the output buffer
// is large enough to hold the entire output file (in which case it doesn't
// matter).
if (((out_buf_size_mask + 1) & out_buf_size_mask) ||
(pOut_buf_next < pOut_buf_start)) {
*pIn_buf_size = *pOut_buf_size = 0;
return TINFL_STATUS_BAD_PARAM;
}
num_bits = r->m_num_bits;
bit_buf = r->m_bit_buf;
dist = r->m_dist;
counter = r->m_counter;
num_extra = r->m_num_extra;
dist_from_out_buf_start = r->m_dist_from_out_buf_start;
TINFL_CR_BEGIN
bit_buf = num_bits = dist = counter = num_extra = r->m_zhdr0 = r->m_zhdr1 = 0;
r->m_z_adler32 = r->m_check_adler32 = 1;
if (decomp_flags & TINFL_FLAG_PARSE_ZLIB_HEADER) {
TINFL_GET_BYTE(1, r->m_zhdr0);
TINFL_GET_BYTE(2, r->m_zhdr1);
counter = (((r->m_zhdr0 * 256 + r->m_zhdr1) % 31 != 0) ||
(r->m_zhdr1 & 32) || ((r->m_zhdr0 & 15) != 8));
if (!(decomp_flags & TINFL_FLAG_USING_NON_WRAPPING_OUTPUT_BUF))
counter |= (((1U << (8U + (r->m_zhdr0 >> 4))) > 32768U) ||
((out_buf_size_mask + 1) <
(size_t)(1ULL << (8U + (r->m_zhdr0 >> 4)))));
if (counter) {
TINFL_CR_RETURN_FOREVER(36, TINFL_STATUS_FAILED);
}
}
do {
TINFL_GET_BITS(3, r->m_final, 3);
r->m_type = r->m_final >> 1;
if (r->m_type == 0) {
TINFL_SKIP_BITS(5, num_bits & 7);
for (counter = 0; counter < 4; ++counter) {
if (num_bits)
TINFL_GET_BITS(6, r->m_raw_header[counter], 8);
else
TINFL_GET_BYTE(7, r->m_raw_header[counter]);
}
if ((counter = (r->m_raw_header[0] | (r->m_raw_header[1] << 8))) !=
(mz_uint)(0xFFFF ^
(r->m_raw_header[2] | (r->m_raw_header[3] << 8)))) {
TINFL_CR_RETURN_FOREVER(39, TINFL_STATUS_FAILED);
}
while ((counter) && (num_bits)) {
TINFL_GET_BITS(51, dist, 8);
while (pOut_buf_cur >= pOut_buf_end) {
TINFL_CR_RETURN(52, TINFL_STATUS_HAS_MORE_OUTPUT);
}
*pOut_buf_cur++ = (mz_uint8)dist;
counter--;
}
while (counter) {
size_t n;
while (pOut_buf_cur >= pOut_buf_end) {
TINFL_CR_RETURN(9, TINFL_STATUS_HAS_MORE_OUTPUT);
}
while (pIn_buf_cur >= pIn_buf_end) {
if (decomp_flags & TINFL_FLAG_HAS_MORE_INPUT) {
TINFL_CR_RETURN(38, TINFL_STATUS_NEEDS_MORE_INPUT);
} else {
TINFL_CR_RETURN_FOREVER(40, TINFL_STATUS_FAILED);
}
}
n = MZ_MIN(MZ_MIN((size_t)(pOut_buf_end - pOut_buf_cur),
(size_t)(pIn_buf_end - pIn_buf_cur)),
counter);
TINFL_MEMCPY(pOut_buf_cur, pIn_buf_cur, n);
pIn_buf_cur += n;
pOut_buf_cur += n;
counter -= (mz_uint)n;
}
} else if (r->m_type == 3) {
TINFL_CR_RETURN_FOREVER(10, TINFL_STATUS_FAILED);
} else {
if (r->m_type == 1) {
mz_uint8 *p = r->m_tables[0].m_code_size;
mz_uint i;
r->m_table_sizes[0] = 288;
r->m_table_sizes[1] = 32;
TINFL_MEMSET(r->m_tables[1].m_code_size, 5, 32);
for (i = 0; i <= 143; ++i) *p++ = 8;
for (; i <= 255; ++i) *p++ = 9;
for (; i <= 279; ++i) *p++ = 7;
for (; i <= 287; ++i) *p++ = 8;
} else {
for (counter = 0; counter < 3; counter++) {
TINFL_GET_BITS(11, r->m_table_sizes[counter], "\05\05\04"[counter]);
r->m_table_sizes[counter] += s_min_table_sizes[counter];
}
MZ_CLEAR_OBJ(r->m_tables[2].m_code_size);
for (counter = 0; counter < r->m_table_sizes[2]; counter++) {
mz_uint s;
TINFL_GET_BITS(14, s, 3);
r->m_tables[2].m_code_size[s_length_dezigzag[counter]] = (mz_uint8)s;
}
r->m_table_sizes[2] = 19;
}
for (; (int)r->m_type >= 0; r->m_type--) {
int tree_next, tree_cur;
tinfl_huff_table *pTable;
mz_uint i, j, used_syms, total, sym_index, next_code[17],
total_syms[16];
pTable = &r->m_tables[r->m_type];
MZ_CLEAR_OBJ(total_syms);
MZ_CLEAR_OBJ(pTable->m_look_up);
MZ_CLEAR_OBJ(pTable->m_tree);
for (i = 0; i < r->m_table_sizes[r->m_type]; ++i)
total_syms[pTable->m_code_size[i]]++;
used_syms = 0, total = 0;
next_code[0] = next_code[1] = 0;
for (i = 1; i <= 15; ++i) {
used_syms += total_syms[i];
next_code[i + 1] = (total = ((total + total_syms[i]) << 1));
}
if ((65536 != total) && (used_syms > 1)) {
TINFL_CR_RETURN_FOREVER(35, TINFL_STATUS_FAILED);
}
for (tree_next = -1, sym_index = 0;
sym_index < r->m_table_sizes[r->m_type]; ++sym_index) {
mz_uint rev_code = 0, l, cur_code,
code_size = pTable->m_code_size[sym_index];
if (!code_size) continue;
cur_code = next_code[code_size]++;
for (l = code_size; l > 0; l--, cur_code >>= 1)
rev_code = (rev_code << 1) | (cur_code & 1);
if (code_size <= TINFL_FAST_LOOKUP_BITS) {
mz_int16 k = (mz_int16)((code_size << 9) | sym_index);
while (rev_code < TINFL_FAST_LOOKUP_SIZE) {
pTable->m_look_up[rev_code] = k;
rev_code += (1 << code_size);
}
continue;
}
if (0 ==
(tree_cur = pTable->m_look_up[rev_code &
(TINFL_FAST_LOOKUP_SIZE - 1)])) {
pTable->m_look_up[rev_code & (TINFL_FAST_LOOKUP_SIZE - 1)] =
(mz_int16)tree_next;
tree_cur = tree_next;
tree_next -= 2;
}
rev_code >>= (TINFL_FAST_LOOKUP_BITS - 1);
for (j = code_size; j > (TINFL_FAST_LOOKUP_BITS + 1); j--) {
tree_cur -= ((rev_code >>= 1) & 1);
if (!pTable->m_tree[-tree_cur - 1]) {
pTable->m_tree[-tree_cur - 1] = (mz_int16)tree_next;
tree_cur = tree_next;
tree_next -= 2;
} else
tree_cur = pTable->m_tree[-tree_cur - 1];
}
tree_cur -= ((rev_code >>= 1) & 1);
pTable->m_tree[-tree_cur - 1] = (mz_int16)sym_index;
}
if (r->m_type == 2) {
for (counter = 0;
counter < (r->m_table_sizes[0] + r->m_table_sizes[1]);) {
mz_uint s;
TINFL_HUFF_DECODE(16, dist, &r->m_tables[2]);
if (dist < 16) {
r->m_len_codes[counter++] = (mz_uint8)dist;
continue;
}
if ((dist == 16) && (!counter)) {
TINFL_CR_RETURN_FOREVER(17, TINFL_STATUS_FAILED);
}
num_extra = "\02\03\07"[dist - 16];
TINFL_GET_BITS(18, s, num_extra);
s += "\03\03\013"[dist - 16];
TINFL_MEMSET(r->m_len_codes + counter,
(dist == 16) ? r->m_len_codes[counter - 1] : 0, s);
counter += s;
}
if ((r->m_table_sizes[0] + r->m_table_sizes[1]) != counter) {
TINFL_CR_RETURN_FOREVER(21, TINFL_STATUS_FAILED);
}
TINFL_MEMCPY(r->m_tables[0].m_code_size, r->m_len_codes,
r->m_table_sizes[0]);
TINFL_MEMCPY(r->m_tables[1].m_code_size,
r->m_len_codes + r->m_table_sizes[0],
r->m_table_sizes[1]);
}
}
for (;;) {
mz_uint8 *pSrc;
for (;;) {
if (((pIn_buf_end - pIn_buf_cur) < 4) ||
((pOut_buf_end - pOut_buf_cur) < 2)) {
TINFL_HUFF_DECODE(23, counter, &r->m_tables[0]);
if (counter >= 256) break;
while (pOut_buf_cur >= pOut_buf_end) {
TINFL_CR_RETURN(24, TINFL_STATUS_HAS_MORE_OUTPUT);
}
*pOut_buf_cur++ = (mz_uint8)counter;
} else {
int sym2;
mz_uint code_len;
#if TINFL_USE_64BIT_BITBUF
if (num_bits < 30) {
bit_buf |=
(((tinfl_bit_buf_t)MZ_READ_LE32(pIn_buf_cur)) << num_bits);
pIn_buf_cur += 4;
num_bits += 32;
}
#else
if (num_bits < 15) {
bit_buf |=
(((tinfl_bit_buf_t)MZ_READ_LE16(pIn_buf_cur)) << num_bits);
pIn_buf_cur += 2;
num_bits += 16;
}
#endif
if ((sym2 =
r->m_tables[0]
.m_look_up[bit_buf & (TINFL_FAST_LOOKUP_SIZE - 1)]) >=
0)
code_len = sym2 >> 9;
else {
code_len = TINFL_FAST_LOOKUP_BITS;
do {
sym2 = r->m_tables[0]
.m_tree[~sym2 + ((bit_buf >> code_len++) & 1)];
} while (sym2 < 0);
}
counter = sym2;
bit_buf >>= code_len;
num_bits -= code_len;
if (counter & 256) break;
#if !TINFL_USE_64BIT_BITBUF
if (num_bits < 15) {
bit_buf |=
(((tinfl_bit_buf_t)MZ_READ_LE16(pIn_buf_cur)) << num_bits);
pIn_buf_cur += 2;
num_bits += 16;
}
#endif
if ((sym2 =
r->m_tables[0]
.m_look_up[bit_buf & (TINFL_FAST_LOOKUP_SIZE - 1)]) >=
0)
code_len = sym2 >> 9;
else {
code_len = TINFL_FAST_LOOKUP_BITS;
do {
sym2 = r->m_tables[0]
.m_tree[~sym2 + ((bit_buf >> code_len++) & 1)];
} while (sym2 < 0);
}
bit_buf >>= code_len;
num_bits -= code_len;
pOut_buf_cur[0] = (mz_uint8)counter;
if (sym2 & 256) {
pOut_buf_cur++;
counter = sym2;
break;
}
pOut_buf_cur[1] = (mz_uint8)sym2;
pOut_buf_cur += 2;
}
}
if ((counter &= 511) == 256) break;
num_extra = s_length_extra[counter - 257];
counter = s_length_base[counter - 257];
if (num_extra) {
mz_uint extra_bits;
TINFL_GET_BITS(25, extra_bits, num_extra);
counter += extra_bits;
}
TINFL_HUFF_DECODE(26, dist, &r->m_tables[1]);
num_extra = s_dist_extra[dist];
dist = s_dist_base[dist];
if (num_extra) {
mz_uint extra_bits;
TINFL_GET_BITS(27, extra_bits, num_extra);
dist += extra_bits;
}
dist_from_out_buf_start = pOut_buf_cur - pOut_buf_start;
if ((dist > dist_from_out_buf_start) &&
(decomp_flags & TINFL_FLAG_USING_NON_WRAPPING_OUTPUT_BUF)) {
TINFL_CR_RETURN_FOREVER(37, TINFL_STATUS_FAILED);
}
pSrc = pOut_buf_start +
((dist_from_out_buf_start - dist) & out_buf_size_mask);
if ((MZ_MAX(pOut_buf_cur, pSrc) + counter) > pOut_buf_end) {
while (counter--) {
while (pOut_buf_cur >= pOut_buf_end) {
TINFL_CR_RETURN(53, TINFL_STATUS_HAS_MORE_OUTPUT);
}
*pOut_buf_cur++ =
pOut_buf_start[(dist_from_out_buf_start++ - dist) &
out_buf_size_mask];
}
continue;
}
#if MINIZ_USE_UNALIGNED_LOADS_AND_STORES
else if ((counter >= 9) && (counter <= dist)) {
const mz_uint8 *pSrc_end = pSrc + (counter & ~7);
do {
((mz_uint32 *)pOut_buf_cur)[0] = ((const mz_uint32 *)pSrc)[0];
((mz_uint32 *)pOut_buf_cur)[1] = ((const mz_uint32 *)pSrc)[1];
pOut_buf_cur += 8;
} while ((pSrc += 8) < pSrc_end);
if ((counter &= 7) < 3) {
if (counter) {
pOut_buf_cur[0] = pSrc[0];
if (counter > 1) pOut_buf_cur[1] = pSrc[1];
pOut_buf_cur += counter;
}
continue;
}
}
#endif
do {
pOut_buf_cur[0] = pSrc[0];
pOut_buf_cur[1] = pSrc[1];
pOut_buf_cur[2] = pSrc[2];
pOut_buf_cur += 3;
pSrc += 3;
} while ((int)(counter -= 3) > 2);
if ((int)counter > 0) {
pOut_buf_cur[0] = pSrc[0];
if ((int)counter > 1) pOut_buf_cur[1] = pSrc[1];
pOut_buf_cur += counter;
}
}
}
} while (!(r->m_final & 1));
if (decomp_flags & TINFL_FLAG_PARSE_ZLIB_HEADER) {
TINFL_SKIP_BITS(32, num_bits & 7);
for (counter = 0; counter < 4; ++counter) {
mz_uint s;
if (num_bits)
TINFL_GET_BITS(41, s, 8);
else
TINFL_GET_BYTE(42, s);
r->m_z_adler32 = (r->m_z_adler32 << 8) | s;
}
}
TINFL_CR_RETURN_FOREVER(34, TINFL_STATUS_DONE);
TINFL_CR_FINISH
common_exit:
r->m_num_bits = num_bits;
r->m_bit_buf = bit_buf;
r->m_dist = dist;
r->m_counter = counter;
r->m_num_extra = num_extra;
r->m_dist_from_out_buf_start = dist_from_out_buf_start;
*pIn_buf_size = pIn_buf_cur - pIn_buf_next;
*pOut_buf_size = pOut_buf_cur - pOut_buf_next;
if ((decomp_flags &
(TINFL_FLAG_PARSE_ZLIB_HEADER | TINFL_FLAG_COMPUTE_ADLER32)) &&
(status >= 0)) {
const mz_uint8 *ptr = pOut_buf_next;
size_t buf_len = *pOut_buf_size;
mz_uint32 i, s1 = r->m_check_adler32 & 0xffff,
s2 = r->m_check_adler32 >> 16;
size_t block_len = buf_len % 5552;
while (buf_len) {
for (i = 0; i + 7 < block_len; i += 8, ptr += 8) {
s1 += ptr[0], s2 += s1;
s1 += ptr[1], s2 += s1;
s1 += ptr[2], s2 += s1;
s1 += ptr[3], s2 += s1;
s1 += ptr[4], s2 += s1;
s1 += ptr[5], s2 += s1;
s1 += ptr[6], s2 += s1;
s1 += ptr[7], s2 += s1;
}
for (; i < block_len; ++i) s1 += *ptr++, s2 += s1;
s1 %= 65521U, s2 %= 65521U;
buf_len -= block_len;
block_len = 5552;
}
r->m_check_adler32 = (s2 << 16) + s1;
if ((status == TINFL_STATUS_DONE) &&
(decomp_flags & TINFL_FLAG_PARSE_ZLIB_HEADER) &&
(r->m_check_adler32 != r->m_z_adler32))
status = TINFL_STATUS_ADLER32_MISMATCH;
}
return status;
}
// Higher level helper functions.
void *tinfl_decompress_mem_to_heap(const void *pSrc_buf, size_t src_buf_len,
size_t *pOut_len, int flags) {
tinfl_decompressor decomp;
void *pBuf = NULL, *pNew_buf;
size_t src_buf_ofs = 0, out_buf_capacity = 0;
*pOut_len = 0;
tinfl_init(&decomp);
for (;;) {
size_t src_buf_size = src_buf_len - src_buf_ofs,
dst_buf_size = out_buf_capacity - *pOut_len, new_out_buf_capacity;
tinfl_status status = tinfl_decompress(
&decomp, (const mz_uint8 *)pSrc_buf + src_buf_ofs, &src_buf_size,
(mz_uint8 *)pBuf, pBuf ? (mz_uint8 *)pBuf + *pOut_len : NULL,
&dst_buf_size,
(flags & ~TINFL_FLAG_HAS_MORE_INPUT) |
TINFL_FLAG_USING_NON_WRAPPING_OUTPUT_BUF);
if ((status < 0) || (status == TINFL_STATUS_NEEDS_MORE_INPUT)) {
MZ_FREE(pBuf);
*pOut_len = 0;
return NULL;
}
src_buf_ofs += src_buf_size;
*pOut_len += dst_buf_size;
if (status == TINFL_STATUS_DONE) break;
new_out_buf_capacity = out_buf_capacity * 2;
if (new_out_buf_capacity < 128) new_out_buf_capacity = 128;
pNew_buf = MZ_REALLOC(pBuf, new_out_buf_capacity);
if (!pNew_buf) {
MZ_FREE(pBuf);
*pOut_len = 0;
return NULL;
}
pBuf = pNew_buf;
out_buf_capacity = new_out_buf_capacity;
}
return pBuf;
}
size_t tinfl_decompress_mem_to_mem(void *pOut_buf, size_t out_buf_len,
const void *pSrc_buf, size_t src_buf_len,
int flags) {
tinfl_decompressor decomp;
tinfl_status status;
tinfl_init(&decomp);
status =
tinfl_decompress(&decomp, (const mz_uint8 *)pSrc_buf, &src_buf_len,
(mz_uint8 *)pOut_buf, (mz_uint8 *)pOut_buf, &out_buf_len,
(flags & ~TINFL_FLAG_HAS_MORE_INPUT) |
TINFL_FLAG_USING_NON_WRAPPING_OUTPUT_BUF);
return (status != TINFL_STATUS_DONE) ? TINFL_DECOMPRESS_MEM_TO_MEM_FAILED
: out_buf_len;
}
int tinfl_decompress_mem_to_callback(const void *pIn_buf, size_t *pIn_buf_size,
tinfl_put_buf_func_ptr pPut_buf_func,
void *pPut_buf_user, int flags) {
int result = 0;
tinfl_decompressor decomp;
mz_uint8 *pDict = (mz_uint8 *)MZ_MALLOC(TINFL_LZ_DICT_SIZE);
size_t in_buf_ofs = 0, dict_ofs = 0;
if (!pDict) return TINFL_STATUS_FAILED;
tinfl_init(&decomp);
for (;;) {
size_t in_buf_size = *pIn_buf_size - in_buf_ofs,
dst_buf_size = TINFL_LZ_DICT_SIZE - dict_ofs;
tinfl_status status =
tinfl_decompress(&decomp, (const mz_uint8 *)pIn_buf + in_buf_ofs,
&in_buf_size, pDict, pDict + dict_ofs, &dst_buf_size,
(flags & ~(TINFL_FLAG_HAS_MORE_INPUT |
TINFL_FLAG_USING_NON_WRAPPING_OUTPUT_BUF)));
in_buf_ofs += in_buf_size;
if ((dst_buf_size) &&
(!(*pPut_buf_func)(pDict + dict_ofs, (int)dst_buf_size, pPut_buf_user)))
break;
if (status != TINFL_STATUS_HAS_MORE_OUTPUT) {
result = (status == TINFL_STATUS_DONE);
break;
}
dict_ofs = (dict_ofs + dst_buf_size) & (TINFL_LZ_DICT_SIZE - 1);
}
MZ_FREE(pDict);
*pIn_buf_size = in_buf_ofs;
return result;
}
// ------------------- Low-level Compression (independent from all decompression
// API's)
// Purposely making these tables static for faster init and thread safety.
static const mz_uint16 s_tdefl_len_sym[256] = {
257, 258, 259, 260, 261, 262, 263, 264, 265, 265, 266, 266, 267, 267, 268,
268, 269, 269, 269, 269, 270, 270, 270, 270, 271, 271, 271, 271, 272, 272,
272, 272, 273, 273, 273, 273, 273, 273, 273, 273, 274, 274, 274, 274, 274,
274, 274, 274, 275, 275, 275, 275, 275, 275, 275, 275, 276, 276, 276, 276,
276, 276, 276, 276, 277, 277, 277, 277, 277, 277, 277, 277, 277, 277, 277,
277, 277, 277, 277, 277, 278, 278, 278, 278, 278, 278, 278, 278, 278, 278,
278, 278, 278, 278, 278, 278, 279, 279, 279, 279, 279, 279, 279, 279, 279,
279, 279, 279, 279, 279, 279, 279, 280, 280, 280, 280, 280, 280, 280, 280,
280, 280, 280, 280, 280, 280, 280, 280, 281, 281, 281, 281, 281, 281, 281,
281, 281, 281, 281, 281, 281, 281, 281, 281, 281, 281, 281, 281, 281, 281,
281, 281, 281, 281, 281, 281, 281, 281, 281, 281, 282, 282, 282, 282, 282,
282, 282, 282, 282, 282, 282, 282, 282, 282, 282, 282, 282, 282, 282, 282,
282, 282, 282, 282, 282, 282, 282, 282, 282, 282, 282, 282, 283, 283, 283,
283, 283, 283, 283, 283, 283, 283, 283, 283, 283, 283, 283, 283, 283, 283,
283, 283, 283, 283, 283, 283, 283, 283, 283, 283, 283, 283, 283, 283, 284,
284, 284, 284, 284, 284, 284, 284, 284, 284, 284, 284, 284, 284, 284, 284,
284, 284, 284, 284, 284, 284, 284, 284, 284, 284, 284, 284, 284, 284, 284,
285};
static const mz_uint8 s_tdefl_len_extra[256] = {
0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 2, 2, 2, 2, 2, 2, 2, 2,
2, 2, 2, 2, 2, 2, 2, 2, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3,
3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 4, 4, 4, 4, 4, 4, 4, 4,
4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
4, 4, 4, 4, 4, 4, 4, 4, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5,
5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5,
5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5,
5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5,
5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5,
5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 0};
static const mz_uint8 s_tdefl_small_dist_sym[512] = {
0, 1, 2, 3, 4, 4, 5, 5, 6, 6, 6, 6, 7, 7, 7, 7, 8, 8, 8,
8, 8, 8, 8, 8, 9, 9, 9, 9, 9, 9, 9, 9, 10, 10, 10, 10, 10, 10,
10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 11, 11, 11, 11, 11, 11, 11, 11, 11,
11, 11, 11, 11, 11, 11, 11, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12,
12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12,
12, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13,
13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 14, 14, 14, 14, 14,
14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14,
14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14,
14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14,
14, 14, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15,
15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15,
15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15,
15, 15, 15, 15, 15, 15, 15, 15, 15, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16,
16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16,
16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16,
16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16,
16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16,
16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16,
16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16,
16, 16, 16, 16, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17,
17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17,
17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17,
17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17,
17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17,
17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17,
17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17};
static const mz_uint8 s_tdefl_small_dist_extra[512] = {
0, 0, 0, 0, 1, 1, 1, 1, 2, 2, 2, 2, 2, 2, 2, 2, 3, 3, 3, 3, 3, 3, 3, 3, 3,
3, 3, 3, 3, 3, 3, 3, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5,
5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5,
5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5,
5, 5, 5, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6,
6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6,
6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6,
6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6,
6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6,
6, 6, 6, 6, 6, 6, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7,
7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7,
7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7,
7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7,
7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7,
7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7,
7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7,
7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7,
7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7,
7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7,
7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7};
static const mz_uint8 s_tdefl_large_dist_sym[128] = {
0, 0, 18, 19, 20, 20, 21, 21, 22, 22, 22, 22, 23, 23, 23, 23, 24, 24, 24,
24, 24, 24, 24, 24, 25, 25, 25, 25, 25, 25, 25, 25, 26, 26, 26, 26, 26, 26,
26, 26, 26, 26, 26, 26, 26, 26, 26, 26, 27, 27, 27, 27, 27, 27, 27, 27, 27,
27, 27, 27, 27, 27, 27, 27, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28,
28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28,
28, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29,
29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29};
static const mz_uint8 s_tdefl_large_dist_extra[128] = {
0, 0, 8, 8, 9, 9, 9, 9, 10, 10, 10, 10, 10, 10, 10, 10, 11, 11, 11,
11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 12, 12, 12, 12, 12, 12,
12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12,
12, 12, 12, 12, 12, 12, 12, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13,
13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13,
13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13,
13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13};
// Radix sorts tdefl_sym_freq[] array by 16-bit key m_key. Returns ptr to sorted
// values.
typedef struct {
mz_uint16 m_key, m_sym_index;
} tdefl_sym_freq;
static tdefl_sym_freq *tdefl_radix_sort_syms(mz_uint num_syms,
tdefl_sym_freq *pSyms0,
tdefl_sym_freq *pSyms1) {
mz_uint32 total_passes = 2, pass_shift, pass, i, hist[256 * 2];
tdefl_sym_freq *pCur_syms = pSyms0, *pNew_syms = pSyms1;
MZ_CLEAR_OBJ(hist);
for (i = 0; i < num_syms; i++) {
mz_uint freq = pSyms0[i].m_key;
hist[freq & 0xFF]++;
hist[256 + ((freq >> 8) & 0xFF)]++;
}
while ((total_passes > 1) && (num_syms == hist[(total_passes - 1) * 256]))
total_passes--;
for (pass_shift = 0, pass = 0; pass < total_passes; pass++, pass_shift += 8) {
const mz_uint32 *pHist = &hist[pass << 8];
mz_uint offsets[256], cur_ofs = 0;
for (i = 0; i < 256; i++) {
offsets[i] = cur_ofs;
cur_ofs += pHist[i];
}
for (i = 0; i < num_syms; i++)
pNew_syms[offsets[(pCur_syms[i].m_key >> pass_shift) & 0xFF]++] =
pCur_syms[i];
{
tdefl_sym_freq *t = pCur_syms;
pCur_syms = pNew_syms;
pNew_syms = t;
}
}
return pCur_syms;
}
// tdefl_calculate_minimum_redundancy() originally written by: Alistair Moffat,
// alistair@cs.mu.oz.au, Jyrki Katajainen, jyrki@diku.dk, November 1996.
static void tdefl_calculate_minimum_redundancy(tdefl_sym_freq *A, int n) {
int root, leaf, next, avbl, used, dpth;
if (n == 0)
return;
else if (n == 1) {
A[0].m_key = 1;
return;
}
A[0].m_key += A[1].m_key;
root = 0;
leaf = 2;
for (next = 1; next < n - 1; next++) {
if (leaf >= n || A[root].m_key < A[leaf].m_key) {
A[next].m_key = A[root].m_key;
A[root++].m_key = (mz_uint16)next;
} else
A[next].m_key = A[leaf++].m_key;
if (leaf >= n || (root < next && A[root].m_key < A[leaf].m_key)) {
A[next].m_key = (mz_uint16)(A[next].m_key + A[root].m_key);
A[root++].m_key = (mz_uint16)next;
} else
A[next].m_key = (mz_uint16)(A[next].m_key + A[leaf++].m_key);
}
A[n - 2].m_key = 0;
for (next = n - 3; next >= 0; next--)
A[next].m_key = A[A[next].m_key].m_key + 1;
avbl = 1;
used = dpth = 0;
root = n - 2;
next = n - 1;
while (avbl > 0) {
while (root >= 0 && (int)A[root].m_key == dpth) {
used++;
root--;
}
while (avbl > used) {
A[next--].m_key = (mz_uint16)(dpth);
avbl--;
}
avbl = 2 * used;
dpth++;
used = 0;
}
}
// Limits canonical Huffman code table's max code size.
enum { TDEFL_MAX_SUPPORTED_HUFF_CODESIZE = 32 };
static void tdefl_huffman_enforce_max_code_size(int *pNum_codes,
int code_list_len,
int max_code_size) {
int i;
mz_uint32 total = 0;
if (code_list_len <= 1) return;
for (i = max_code_size + 1; i <= TDEFL_MAX_SUPPORTED_HUFF_CODESIZE; i++)
pNum_codes[max_code_size] += pNum_codes[i];
for (i = max_code_size; i > 0; i--)
total += (((mz_uint32)pNum_codes[i]) << (max_code_size - i));
while (total != (1UL << max_code_size)) {
pNum_codes[max_code_size]--;
for (i = max_code_size - 1; i > 0; i--)
if (pNum_codes[i]) {
pNum_codes[i]--;
pNum_codes[i + 1] += 2;
break;
}
total--;
}
}
static void tdefl_optimize_huffman_table(tdefl_compressor *d, int table_num,
int table_len, int code_size_limit,
int static_table) {
int i, j, l, num_codes[1 + TDEFL_MAX_SUPPORTED_HUFF_CODESIZE];
mz_uint next_code[TDEFL_MAX_SUPPORTED_HUFF_CODESIZE + 1];
MZ_CLEAR_OBJ(num_codes);
if (static_table) {
for (i = 0; i < table_len; i++)
num_codes[d->m_huff_code_sizes[table_num][i]]++;
} else {
tdefl_sym_freq syms0[TDEFL_MAX_HUFF_SYMBOLS], syms1[TDEFL_MAX_HUFF_SYMBOLS],
*pSyms;
int num_used_syms = 0;
const mz_uint16 *pSym_count = &d->m_huff_count[table_num][0];
for (i = 0; i < table_len; i++)
if (pSym_count[i]) {
syms0[num_used_syms].m_key = (mz_uint16)pSym_count[i];
syms0[num_used_syms++].m_sym_index = (mz_uint16)i;
}
pSyms = tdefl_radix_sort_syms(num_used_syms, syms0, syms1);
tdefl_calculate_minimum_redundancy(pSyms, num_used_syms);
for (i = 0; i < num_used_syms; i++) num_codes[pSyms[i].m_key]++;
tdefl_huffman_enforce_max_code_size(num_codes, num_used_syms,
code_size_limit);
MZ_CLEAR_OBJ(d->m_huff_code_sizes[table_num]);
MZ_CLEAR_OBJ(d->m_huff_codes[table_num]);
for (i = 1, j = num_used_syms; i <= code_size_limit; i++)
for (l = num_codes[i]; l > 0; l--)
d->m_huff_code_sizes[table_num][pSyms[--j].m_sym_index] = (mz_uint8)(i);
}
next_code[1] = 0;
for (j = 0, i = 2; i <= code_size_limit; i++)
next_code[i] = j = ((j + num_codes[i - 1]) << 1);
for (i = 0; i < table_len; i++) {
mz_uint rev_code = 0, code, code_size;
if ((code_size = d->m_huff_code_sizes[table_num][i]) == 0) continue;
code = next_code[code_size]++;
for (l = code_size; l > 0; l--, code >>= 1)
rev_code = (rev_code << 1) | (code & 1);
d->m_huff_codes[table_num][i] = (mz_uint16)rev_code;
}
}
#define TDEFL_PUT_BITS(b, l) \
do { \
mz_uint bits = b; \
mz_uint len = l; \
MZ_ASSERT(bits <= ((1U << len) - 1U)); \
d->m_bit_buffer |= (bits << d->m_bits_in); \
d->m_bits_in += len; \
while (d->m_bits_in >= 8) { \
if (d->m_pOutput_buf < d->m_pOutput_buf_end) \
*d->m_pOutput_buf++ = (mz_uint8)(d->m_bit_buffer); \
d->m_bit_buffer >>= 8; \
d->m_bits_in -= 8; \
} \
} \
MZ_MACRO_END
#define TDEFL_RLE_PREV_CODE_SIZE() \
{ \
if (rle_repeat_count) { \
if (rle_repeat_count < 3) { \
d->m_huff_count[2][prev_code_size] = (mz_uint16)( \
d->m_huff_count[2][prev_code_size] + rle_repeat_count); \
while (rle_repeat_count--) \
packed_code_sizes[num_packed_code_sizes++] = prev_code_size; \
} else { \
d->m_huff_count[2][16] = (mz_uint16)(d->m_huff_count[2][16] + 1); \
packed_code_sizes[num_packed_code_sizes++] = 16; \
packed_code_sizes[num_packed_code_sizes++] = \
(mz_uint8)(rle_repeat_count - 3); \
} \
rle_repeat_count = 0; \
} \
}
#define TDEFL_RLE_ZERO_CODE_SIZE() \
{ \
if (rle_z_count) { \
if (rle_z_count < 3) { \
d->m_huff_count[2][0] = \
(mz_uint16)(d->m_huff_count[2][0] + rle_z_count); \
while (rle_z_count--) packed_code_sizes[num_packed_code_sizes++] = 0; \
} else if (rle_z_count <= 10) { \
d->m_huff_count[2][17] = (mz_uint16)(d->m_huff_count[2][17] + 1); \
packed_code_sizes[num_packed_code_sizes++] = 17; \
packed_code_sizes[num_packed_code_sizes++] = \
(mz_uint8)(rle_z_count - 3); \
} else { \
d->m_huff_count[2][18] = (mz_uint16)(d->m_huff_count[2][18] + 1); \
packed_code_sizes[num_packed_code_sizes++] = 18; \
packed_code_sizes[num_packed_code_sizes++] = \
(mz_uint8)(rle_z_count - 11); \
} \
rle_z_count = 0; \
} \
}
static mz_uint8 s_tdefl_packed_code_size_syms_swizzle[] = {
16, 17, 18, 0, 8, 7, 9, 6, 10, 5, 11, 4, 12, 3, 13, 2, 14, 1, 15};
static void tdefl_start_dynamic_block(tdefl_compressor *d) {
int num_lit_codes, num_dist_codes, num_bit_lengths;
mz_uint i, total_code_sizes_to_pack, num_packed_code_sizes, rle_z_count,
rle_repeat_count, packed_code_sizes_index;
mz_uint8
code_sizes_to_pack[TDEFL_MAX_HUFF_SYMBOLS_0 + TDEFL_MAX_HUFF_SYMBOLS_1],
packed_code_sizes[TDEFL_MAX_HUFF_SYMBOLS_0 + TDEFL_MAX_HUFF_SYMBOLS_1],
prev_code_size = 0xFF;
d->m_huff_count[0][256] = 1;
tdefl_optimize_huffman_table(d, 0, TDEFL_MAX_HUFF_SYMBOLS_0, 15, MZ_FALSE);
tdefl_optimize_huffman_table(d, 1, TDEFL_MAX_HUFF_SYMBOLS_1, 15, MZ_FALSE);
for (num_lit_codes = 286; num_lit_codes > 257; num_lit_codes--)
if (d->m_huff_code_sizes[0][num_lit_codes - 1]) break;
for (num_dist_codes = 30; num_dist_codes > 1; num_dist_codes--)
if (d->m_huff_code_sizes[1][num_dist_codes - 1]) break;
memcpy(code_sizes_to_pack, &d->m_huff_code_sizes[0][0], num_lit_codes);
memcpy(code_sizes_to_pack + num_lit_codes, &d->m_huff_code_sizes[1][0],
num_dist_codes);
total_code_sizes_to_pack = num_lit_codes + num_dist_codes;
num_packed_code_sizes = 0;
rle_z_count = 0;
rle_repeat_count = 0;
memset(&d->m_huff_count[2][0], 0,
sizeof(d->m_huff_count[2][0]) * TDEFL_MAX_HUFF_SYMBOLS_2);
for (i = 0; i < total_code_sizes_to_pack; i++) {
mz_uint8 code_size = code_sizes_to_pack[i];
if (!code_size) {
TDEFL_RLE_PREV_CODE_SIZE();
if (++rle_z_count == 138) {
TDEFL_RLE_ZERO_CODE_SIZE();
}
} else {
TDEFL_RLE_ZERO_CODE_SIZE();
if (code_size != prev_code_size) {
TDEFL_RLE_PREV_CODE_SIZE();
d->m_huff_count[2][code_size] =
(mz_uint16)(d->m_huff_count[2][code_size] + 1);
packed_code_sizes[num_packed_code_sizes++] = code_size;
} else if (++rle_repeat_count == 6) {
TDEFL_RLE_PREV_CODE_SIZE();
}
}
prev_code_size = code_size;
}
if (rle_repeat_count) {
TDEFL_RLE_PREV_CODE_SIZE();
} else {
TDEFL_RLE_ZERO_CODE_SIZE();
}
tdefl_optimize_huffman_table(d, 2, TDEFL_MAX_HUFF_SYMBOLS_2, 7, MZ_FALSE);
TDEFL_PUT_BITS(2, 2);
TDEFL_PUT_BITS(num_lit_codes - 257, 5);
TDEFL_PUT_BITS(num_dist_codes - 1, 5);
for (num_bit_lengths = 18; num_bit_lengths >= 0; num_bit_lengths--)
if (d->m_huff_code_sizes
[2][s_tdefl_packed_code_size_syms_swizzle[num_bit_lengths]])
break;
num_bit_lengths = MZ_MAX(4, (num_bit_lengths + 1));
TDEFL_PUT_BITS(num_bit_lengths - 4, 4);
for (i = 0; (int)i < num_bit_lengths; i++)
TDEFL_PUT_BITS(
d->m_huff_code_sizes[2][s_tdefl_packed_code_size_syms_swizzle[i]], 3);
for (packed_code_sizes_index = 0;
packed_code_sizes_index < num_packed_code_sizes;) {
mz_uint code = packed_code_sizes[packed_code_sizes_index++];
MZ_ASSERT(code < TDEFL_MAX_HUFF_SYMBOLS_2);
TDEFL_PUT_BITS(d->m_huff_codes[2][code], d->m_huff_code_sizes[2][code]);
if (code >= 16)
TDEFL_PUT_BITS(packed_code_sizes[packed_code_sizes_index++],
"\02\03\07"[code - 16]);
}
}
static void tdefl_start_static_block(tdefl_compressor *d) {
mz_uint i;
mz_uint8 *p = &d->m_huff_code_sizes[0][0];
for (i = 0; i <= 143; ++i) *p++ = 8;
for (; i <= 255; ++i) *p++ = 9;
for (; i <= 279; ++i) *p++ = 7;
for (; i <= 287; ++i) *p++ = 8;
memset(d->m_huff_code_sizes[1], 5, 32);
tdefl_optimize_huffman_table(d, 0, 288, 15, MZ_TRUE);
tdefl_optimize_huffman_table(d, 1, 32, 15, MZ_TRUE);
TDEFL_PUT_BITS(1, 2);
}
static const mz_uint mz_bitmasks[17] = {
0x0000, 0x0001, 0x0003, 0x0007, 0x000F, 0x001F, 0x003F, 0x007F, 0x00FF,
0x01FF, 0x03FF, 0x07FF, 0x0FFF, 0x1FFF, 0x3FFF, 0x7FFF, 0xFFFF};
#if MINIZ_USE_UNALIGNED_LOADS_AND_STORES && MINIZ_LITTLE_ENDIAN && \
MINIZ_HAS_64BIT_REGISTERS
static mz_bool tdefl_compress_lz_codes(tdefl_compressor *d) {
mz_uint flags;
mz_uint8 *pLZ_codes;
mz_uint8 *pOutput_buf = d->m_pOutput_buf;
mz_uint8 *pLZ_code_buf_end = d->m_pLZ_code_buf;
mz_uint64 bit_buffer = d->m_bit_buffer;
mz_uint bits_in = d->m_bits_in;
#define TDEFL_PUT_BITS_FAST(b, l) \
{ \
bit_buffer |= (((mz_uint64)(b)) << bits_in); \
bits_in += (l); \
}
flags = 1;
for (pLZ_codes = d->m_lz_code_buf; pLZ_codes < pLZ_code_buf_end;
flags >>= 1) {
if (flags == 1) flags = *pLZ_codes++ | 0x100;
if (flags & 1) {
mz_uint s0, s1, n0, n1, sym, num_extra_bits;
mz_uint match_len = pLZ_codes[0],
match_dist = *(const mz_uint16 *)(pLZ_codes + 1);
pLZ_codes += 3;
MZ_ASSERT(d->m_huff_code_sizes[0][s_tdefl_len_sym[match_len]]);
TDEFL_PUT_BITS_FAST(d->m_huff_codes[0][s_tdefl_len_sym[match_len]],
d->m_huff_code_sizes[0][s_tdefl_len_sym[match_len]]);
TDEFL_PUT_BITS_FAST(match_len & mz_bitmasks[s_tdefl_len_extra[match_len]],
s_tdefl_len_extra[match_len]);
// This sequence coaxes MSVC into using cmov's vs. jmp's.
s0 = s_tdefl_small_dist_sym[match_dist & 511];
n0 = s_tdefl_small_dist_extra[match_dist & 511];
s1 = s_tdefl_large_dist_sym[match_dist >> 8];
n1 = s_tdefl_large_dist_extra[match_dist >> 8];
sym = (match_dist < 512) ? s0 : s1;
num_extra_bits = (match_dist < 512) ? n0 : n1;
MZ_ASSERT(d->m_huff_code_sizes[1][sym]);
TDEFL_PUT_BITS_FAST(d->m_huff_codes[1][sym],
d->m_huff_code_sizes[1][sym]);
TDEFL_PUT_BITS_FAST(match_dist & mz_bitmasks[num_extra_bits],
num_extra_bits);
} else {
mz_uint lit = *pLZ_codes++;
MZ_ASSERT(d->m_huff_code_sizes[0][lit]);
TDEFL_PUT_BITS_FAST(d->m_huff_codes[0][lit],
d->m_huff_code_sizes[0][lit]);
if (((flags & 2) == 0) && (pLZ_codes < pLZ_code_buf_end)) {
flags >>= 1;
lit = *pLZ_codes++;
MZ_ASSERT(d->m_huff_code_sizes[0][lit]);
TDEFL_PUT_BITS_FAST(d->m_huff_codes[0][lit],
d->m_huff_code_sizes[0][lit]);
if (((flags & 2) == 0) && (pLZ_codes < pLZ_code_buf_end)) {
flags >>= 1;
lit = *pLZ_codes++;
MZ_ASSERT(d->m_huff_code_sizes[0][lit]);
TDEFL_PUT_BITS_FAST(d->m_huff_codes[0][lit],
d->m_huff_code_sizes[0][lit]);
}
}
}
if (pOutput_buf >= d->m_pOutput_buf_end) return MZ_FALSE;
*(mz_uint64 *)pOutput_buf = bit_buffer;
pOutput_buf += (bits_in >> 3);
bit_buffer >>= (bits_in & ~7);
bits_in &= 7;
}
#undef TDEFL_PUT_BITS_FAST
d->m_pOutput_buf = pOutput_buf;
d->m_bits_in = 0;
d->m_bit_buffer = 0;
while (bits_in) {
mz_uint32 n = MZ_MIN(bits_in, 16);
TDEFL_PUT_BITS((mz_uint)bit_buffer & mz_bitmasks[n], n);
bit_buffer >>= n;
bits_in -= n;
}
TDEFL_PUT_BITS(d->m_huff_codes[0][256], d->m_huff_code_sizes[0][256]);
return (d->m_pOutput_buf < d->m_pOutput_buf_end);
}
#else
static mz_bool tdefl_compress_lz_codes(tdefl_compressor *d) {
mz_uint flags;
mz_uint8 *pLZ_codes;
flags = 1;
for (pLZ_codes = d->m_lz_code_buf; pLZ_codes < d->m_pLZ_code_buf;
flags >>= 1) {
if (flags == 1) flags = *pLZ_codes++ | 0x100;
if (flags & 1) {
mz_uint sym, num_extra_bits;
mz_uint match_len = pLZ_codes[0],
match_dist = (pLZ_codes[1] | (pLZ_codes[2] << 8));
pLZ_codes += 3;
MZ_ASSERT(d->m_huff_code_sizes[0][s_tdefl_len_sym[match_len]]);
TDEFL_PUT_BITS(d->m_huff_codes[0][s_tdefl_len_sym[match_len]],
d->m_huff_code_sizes[0][s_tdefl_len_sym[match_len]]);
TDEFL_PUT_BITS(match_len & mz_bitmasks[s_tdefl_len_extra[match_len]],
s_tdefl_len_extra[match_len]);
if (match_dist < 512) {
sym = s_tdefl_small_dist_sym[match_dist];
num_extra_bits = s_tdefl_small_dist_extra[match_dist];
} else {
sym = s_tdefl_large_dist_sym[match_dist >> 8];
num_extra_bits = s_tdefl_large_dist_extra[match_dist >> 8];
}
MZ_ASSERT(d->m_huff_code_sizes[1][sym]);
TDEFL_PUT_BITS(d->m_huff_codes[1][sym], d->m_huff_code_sizes[1][sym]);
TDEFL_PUT_BITS(match_dist & mz_bitmasks[num_extra_bits], num_extra_bits);
} else {
mz_uint lit = *pLZ_codes++;
MZ_ASSERT(d->m_huff_code_sizes[0][lit]);
TDEFL_PUT_BITS(d->m_huff_codes[0][lit], d->m_huff_code_sizes[0][lit]);
}
}
TDEFL_PUT_BITS(d->m_huff_codes[0][256], d->m_huff_code_sizes[0][256]);
return (d->m_pOutput_buf < d->m_pOutput_buf_end);
}
#endif // MINIZ_USE_UNALIGNED_LOADS_AND_STORES && MINIZ_LITTLE_ENDIAN &&
// MINIZ_HAS_64BIT_REGISTERS
static mz_bool tdefl_compress_block(tdefl_compressor *d, mz_bool static_block) {
if (static_block)
tdefl_start_static_block(d);
else
tdefl_start_dynamic_block(d);
return tdefl_compress_lz_codes(d);
}
static int tdefl_flush_block(tdefl_compressor *d, int flush) {
mz_uint saved_bit_buf, saved_bits_in;
mz_uint8 *pSaved_output_buf;
mz_bool comp_block_succeeded = MZ_FALSE;
int n, use_raw_block =
((d->m_flags & TDEFL_FORCE_ALL_RAW_BLOCKS) != 0) &&
(d->m_lookahead_pos - d->m_lz_code_buf_dict_pos) <= d->m_dict_size;
mz_uint8 *pOutput_buf_start =
((d->m_pPut_buf_func == NULL) &&
((*d->m_pOut_buf_size - d->m_out_buf_ofs) >= TDEFL_OUT_BUF_SIZE))
? ((mz_uint8 *)d->m_pOut_buf + d->m_out_buf_ofs)
: d->m_output_buf;
d->m_pOutput_buf = pOutput_buf_start;
d->m_pOutput_buf_end = d->m_pOutput_buf + TDEFL_OUT_BUF_SIZE - 16;
MZ_ASSERT(!d->m_output_flush_remaining);
d->m_output_flush_ofs = 0;
d->m_output_flush_remaining = 0;
*d->m_pLZ_flags = (mz_uint8)(*d->m_pLZ_flags >> d->m_num_flags_left);
d->m_pLZ_code_buf -= (d->m_num_flags_left == 8);
if ((d->m_flags & TDEFL_WRITE_ZLIB_HEADER) && (!d->m_block_index)) {
TDEFL_PUT_BITS(0x78, 8);
TDEFL_PUT_BITS(0x01, 8);
}
TDEFL_PUT_BITS(flush == TDEFL_FINISH, 1);
pSaved_output_buf = d->m_pOutput_buf;
saved_bit_buf = d->m_bit_buffer;
saved_bits_in = d->m_bits_in;
if (!use_raw_block)
comp_block_succeeded =
tdefl_compress_block(d, (d->m_flags & TDEFL_FORCE_ALL_STATIC_BLOCKS) ||
(d->m_total_lz_bytes < 48));
// If the block gets expanded, forget the current contents of the output
// buffer and send a raw block instead.
if (((use_raw_block) ||
((d->m_total_lz_bytes) && ((d->m_pOutput_buf - pSaved_output_buf + 1U) >=
d->m_total_lz_bytes))) &&
((d->m_lookahead_pos - d->m_lz_code_buf_dict_pos) <= d->m_dict_size)) {
mz_uint i;
d->m_pOutput_buf = pSaved_output_buf;
d->m_bit_buffer = saved_bit_buf, d->m_bits_in = saved_bits_in;
TDEFL_PUT_BITS(0, 2);
if (d->m_bits_in) {
TDEFL_PUT_BITS(0, 8 - d->m_bits_in);
}
for (i = 2; i; --i, d->m_total_lz_bytes ^= 0xFFFF) {
TDEFL_PUT_BITS(d->m_total_lz_bytes & 0xFFFF, 16);
}
for (i = 0; i < d->m_total_lz_bytes; ++i) {
TDEFL_PUT_BITS(
d->m_dict[(d->m_lz_code_buf_dict_pos + i) & TDEFL_LZ_DICT_SIZE_MASK],
8);
}
}
// Check for the extremely unlikely (if not impossible) case of the compressed
// block not fitting into the output buffer when using dynamic codes.
else if (!comp_block_succeeded) {
d->m_pOutput_buf = pSaved_output_buf;
d->m_bit_buffer = saved_bit_buf, d->m_bits_in = saved_bits_in;
tdefl_compress_block(d, MZ_TRUE);
}
if (flush) {
if (flush == TDEFL_FINISH) {
if (d->m_bits_in) {
TDEFL_PUT_BITS(0, 8 - d->m_bits_in);
}
if (d->m_flags & TDEFL_WRITE_ZLIB_HEADER) {
mz_uint i, a = d->m_adler32;
for (i = 0; i < 4; i++) {
TDEFL_PUT_BITS((a >> 24) & 0xFF, 8);
a <<= 8;
}
}
} else {
mz_uint i, z = 0;
TDEFL_PUT_BITS(0, 3);
if (d->m_bits_in) {
TDEFL_PUT_BITS(0, 8 - d->m_bits_in);
}
for (i = 2; i; --i, z ^= 0xFFFF) {
TDEFL_PUT_BITS(z & 0xFFFF, 16);
}
}
}
MZ_ASSERT(d->m_pOutput_buf < d->m_pOutput_buf_end);
memset(&d->m_huff_count[0][0], 0,
sizeof(d->m_huff_count[0][0]) * TDEFL_MAX_HUFF_SYMBOLS_0);
memset(&d->m_huff_count[1][0], 0,
sizeof(d->m_huff_count[1][0]) * TDEFL_MAX_HUFF_SYMBOLS_1);
d->m_pLZ_code_buf = d->m_lz_code_buf + 1;
d->m_pLZ_flags = d->m_lz_code_buf;
d->m_num_flags_left = 8;
d->m_lz_code_buf_dict_pos += d->m_total_lz_bytes;
d->m_total_lz_bytes = 0;
d->m_block_index++;
if ((n = (int)(d->m_pOutput_buf - pOutput_buf_start)) != 0) {
if (d->m_pPut_buf_func) {
*d->m_pIn_buf_size = d->m_pSrc - (const mz_uint8 *)d->m_pIn_buf;
if (!(*d->m_pPut_buf_func)(d->m_output_buf, n, d->m_pPut_buf_user))
return (d->m_prev_return_status = TDEFL_STATUS_PUT_BUF_FAILED);
} else if (pOutput_buf_start == d->m_output_buf) {
int bytes_to_copy = (int)MZ_MIN(
(size_t)n, (size_t)(*d->m_pOut_buf_size - d->m_out_buf_ofs));
memcpy((mz_uint8 *)d->m_pOut_buf + d->m_out_buf_ofs, d->m_output_buf,
bytes_to_copy);
d->m_out_buf_ofs += bytes_to_copy;
if ((n -= bytes_to_copy) != 0) {
d->m_output_flush_ofs = bytes_to_copy;
d->m_output_flush_remaining = n;
}
} else {
d->m_out_buf_ofs += n;
}
}
return d->m_output_flush_remaining;
}
#if MINIZ_USE_UNALIGNED_LOADS_AND_STORES
#define TDEFL_READ_UNALIGNED_WORD(p) *(const mz_uint16 *)(p)
static MZ_FORCEINLINE void tdefl_find_match(
tdefl_compressor *d, mz_uint lookahead_pos, mz_uint max_dist,
mz_uint max_match_len, mz_uint *pMatch_dist, mz_uint *pMatch_len) {
mz_uint dist, pos = lookahead_pos & TDEFL_LZ_DICT_SIZE_MASK,
match_len = *pMatch_len, probe_pos = pos, next_probe_pos,
probe_len;
mz_uint num_probes_left = d->m_max_probes[match_len >= 32];
const mz_uint16 *s = (const mz_uint16 *)(d->m_dict + pos), *p, *q;
mz_uint16 c01 = TDEFL_READ_UNALIGNED_WORD(&d->m_dict[pos + match_len - 1]),
s01 = TDEFL_READ_UNALIGNED_WORD(s);
MZ_ASSERT(max_match_len <= TDEFL_MAX_MATCH_LEN);
if (max_match_len <= match_len) return;
for (;;) {
for (;;) {
if (--num_probes_left == 0) return;
#define TDEFL_PROBE \
next_probe_pos = d->m_next[probe_pos]; \
if ((!next_probe_pos) || \
((dist = (mz_uint16)(lookahead_pos - next_probe_pos)) > max_dist)) \
return; \
probe_pos = next_probe_pos & TDEFL_LZ_DICT_SIZE_MASK; \
if (TDEFL_READ_UNALIGNED_WORD(&d->m_dict[probe_pos + match_len - 1]) == c01) \
break;
TDEFL_PROBE;
TDEFL_PROBE;
TDEFL_PROBE;
}
if (!dist) break;
q = (const mz_uint16 *)(d->m_dict + probe_pos);
if (TDEFL_READ_UNALIGNED_WORD(q) != s01) continue;
p = s;
probe_len = 32;
do {
} while (
(TDEFL_READ_UNALIGNED_WORD(++p) == TDEFL_READ_UNALIGNED_WORD(++q)) &&
(TDEFL_READ_UNALIGNED_WORD(++p) == TDEFL_READ_UNALIGNED_WORD(++q)) &&
(TDEFL_READ_UNALIGNED_WORD(++p) == TDEFL_READ_UNALIGNED_WORD(++q)) &&
(TDEFL_READ_UNALIGNED_WORD(++p) == TDEFL_READ_UNALIGNED_WORD(++q)) &&
(--probe_len > 0));
if (!probe_len) {
*pMatch_dist = dist;
*pMatch_len = MZ_MIN(max_match_len, TDEFL_MAX_MATCH_LEN);
break;
} else if ((probe_len = ((mz_uint)(p - s) * 2) +
(mz_uint)(*(const mz_uint8 *)p ==
*(const mz_uint8 *)q)) > match_len) {
*pMatch_dist = dist;
if ((*pMatch_len = match_len = MZ_MIN(max_match_len, probe_len)) ==
max_match_len)
break;
c01 = TDEFL_READ_UNALIGNED_WORD(&d->m_dict[pos + match_len - 1]);
}
}
}
#else
static MZ_FORCEINLINE void tdefl_find_match(
tdefl_compressor *d, mz_uint lookahead_pos, mz_uint max_dist,
mz_uint max_match_len, mz_uint *pMatch_dist, mz_uint *pMatch_len) {
mz_uint dist, pos = lookahead_pos & TDEFL_LZ_DICT_SIZE_MASK,
match_len = *pMatch_len, probe_pos = pos, next_probe_pos,
probe_len;
mz_uint num_probes_left = d->m_max_probes[match_len >= 32];
const mz_uint8 *s = d->m_dict + pos, *p, *q;
mz_uint8 c0 = d->m_dict[pos + match_len], c1 = d->m_dict[pos + match_len - 1];
MZ_ASSERT(max_match_len <= TDEFL_MAX_MATCH_LEN);
if (max_match_len <= match_len) return;
for (;;) {
for (;;) {
if (--num_probes_left == 0) return;
#define TDEFL_PROBE \
next_probe_pos = d->m_next[probe_pos]; \
if ((!next_probe_pos) || \
((dist = (mz_uint16)(lookahead_pos - next_probe_pos)) > max_dist)) \
return; \
probe_pos = next_probe_pos & TDEFL_LZ_DICT_SIZE_MASK; \
if ((d->m_dict[probe_pos + match_len] == c0) && \
(d->m_dict[probe_pos + match_len - 1] == c1)) \
break;
TDEFL_PROBE;
TDEFL_PROBE;
TDEFL_PROBE;
}
if (!dist) break;
p = s;
q = d->m_dict + probe_pos;
for (probe_len = 0; probe_len < max_match_len; probe_len++)
if (*p++ != *q++) break;
if (probe_len > match_len) {
*pMatch_dist = dist;
if ((*pMatch_len = match_len = probe_len) == max_match_len) return;
c0 = d->m_dict[pos + match_len];
c1 = d->m_dict[pos + match_len - 1];
}
}
}
#endif // #if MINIZ_USE_UNALIGNED_LOADS_AND_STORES
#if MINIZ_USE_UNALIGNED_LOADS_AND_STORES && MINIZ_LITTLE_ENDIAN
static mz_bool tdefl_compress_fast(tdefl_compressor *d) {
// Faster, minimally featured LZRW1-style match+parse loop with better
// register utilization. Intended for applications where raw throughput is
// valued more highly than ratio.
mz_uint lookahead_pos = d->m_lookahead_pos,
lookahead_size = d->m_lookahead_size, dict_size = d->m_dict_size,
total_lz_bytes = d->m_total_lz_bytes,
num_flags_left = d->m_num_flags_left;
mz_uint8 *pLZ_code_buf = d->m_pLZ_code_buf, *pLZ_flags = d->m_pLZ_flags;
mz_uint cur_pos = lookahead_pos & TDEFL_LZ_DICT_SIZE_MASK;
while ((d->m_src_buf_left) || ((d->m_flush) && (lookahead_size))) {
const mz_uint TDEFL_COMP_FAST_LOOKAHEAD_SIZE = 4096;
mz_uint dst_pos =
(lookahead_pos + lookahead_size) & TDEFL_LZ_DICT_SIZE_MASK;
mz_uint num_bytes_to_process = (mz_uint)MZ_MIN(
d->m_src_buf_left, TDEFL_COMP_FAST_LOOKAHEAD_SIZE - lookahead_size);
d->m_src_buf_left -= num_bytes_to_process;
lookahead_size += num_bytes_to_process;
while (num_bytes_to_process) {
mz_uint32 n = MZ_MIN(TDEFL_LZ_DICT_SIZE - dst_pos, num_bytes_to_process);
memcpy(d->m_dict + dst_pos, d->m_pSrc, n);
if (dst_pos < (TDEFL_MAX_MATCH_LEN - 1))
memcpy(d->m_dict + TDEFL_LZ_DICT_SIZE + dst_pos, d->m_pSrc,
MZ_MIN(n, (TDEFL_MAX_MATCH_LEN - 1) - dst_pos));
d->m_pSrc += n;
dst_pos = (dst_pos + n) & TDEFL_LZ_DICT_SIZE_MASK;
num_bytes_to_process -= n;
}
dict_size = MZ_MIN(TDEFL_LZ_DICT_SIZE - lookahead_size, dict_size);
if ((!d->m_flush) && (lookahead_size < TDEFL_COMP_FAST_LOOKAHEAD_SIZE))
break;
while (lookahead_size >= 4) {
mz_uint cur_match_dist, cur_match_len = 1;
mz_uint8 *pCur_dict = d->m_dict + cur_pos;
mz_uint first_trigram = (*(const mz_uint32 *)pCur_dict) & 0xFFFFFF;
mz_uint hash =
(first_trigram ^ (first_trigram >> (24 - (TDEFL_LZ_HASH_BITS - 8)))) &
TDEFL_LEVEL1_HASH_SIZE_MASK;
mz_uint probe_pos = d->m_hash[hash];
d->m_hash[hash] = (mz_uint16)lookahead_pos;
if (((cur_match_dist = (mz_uint16)(lookahead_pos - probe_pos)) <=
dict_size) &&
((*(const mz_uint32 *)(d->m_dict +
(probe_pos &= TDEFL_LZ_DICT_SIZE_MASK)) &
0xFFFFFF) == first_trigram)) {
const mz_uint16 *p = (const mz_uint16 *)pCur_dict;
const mz_uint16 *q = (const mz_uint16 *)(d->m_dict + probe_pos);
mz_uint32 probe_len = 32;
do {
} while ((TDEFL_READ_UNALIGNED_WORD(++p) ==
TDEFL_READ_UNALIGNED_WORD(++q)) &&
(TDEFL_READ_UNALIGNED_WORD(++p) ==
TDEFL_READ_UNALIGNED_WORD(++q)) &&
(TDEFL_READ_UNALIGNED_WORD(++p) ==
TDEFL_READ_UNALIGNED_WORD(++q)) &&
(TDEFL_READ_UNALIGNED_WORD(++p) ==
TDEFL_READ_UNALIGNED_WORD(++q)) &&
(--probe_len > 0));
cur_match_len = ((mz_uint)(p - (const mz_uint16 *)pCur_dict) * 2) +
(mz_uint)(*(const mz_uint8 *)p == *(const mz_uint8 *)q);
if (!probe_len)
cur_match_len = cur_match_dist ? TDEFL_MAX_MATCH_LEN : 0;
if ((cur_match_len < TDEFL_MIN_MATCH_LEN) ||
((cur_match_len == TDEFL_MIN_MATCH_LEN) &&
(cur_match_dist >= 8U * 1024U))) {
cur_match_len = 1;
*pLZ_code_buf++ = (mz_uint8)first_trigram;
*pLZ_flags = (mz_uint8)(*pLZ_flags >> 1);
d->m_huff_count[0][(mz_uint8)first_trigram]++;
} else {
mz_uint32 s0, s1;
cur_match_len = MZ_MIN(cur_match_len, lookahead_size);
MZ_ASSERT((cur_match_len >= TDEFL_MIN_MATCH_LEN) &&
(cur_match_dist >= 1) &&
(cur_match_dist <= TDEFL_LZ_DICT_SIZE));
cur_match_dist--;
pLZ_code_buf[0] = (mz_uint8)(cur_match_len - TDEFL_MIN_MATCH_LEN);
*(mz_uint16 *)(&pLZ_code_buf[1]) = (mz_uint16)cur_match_dist;
pLZ_code_buf += 3;
*pLZ_flags = (mz_uint8)((*pLZ_flags >> 1) | 0x80);
s0 = s_tdefl_small_dist_sym[cur_match_dist & 511];
s1 = s_tdefl_large_dist_sym[cur_match_dist >> 8];
d->m_huff_count[1][(cur_match_dist < 512) ? s0 : s1]++;
d->m_huff_count[0][s_tdefl_len_sym[cur_match_len -
TDEFL_MIN_MATCH_LEN]]++;
}
} else {
*pLZ_code_buf++ = (mz_uint8)first_trigram;
*pLZ_flags = (mz_uint8)(*pLZ_flags >> 1);
d->m_huff_count[0][(mz_uint8)first_trigram]++;
}
if (--num_flags_left == 0) {
num_flags_left = 8;
pLZ_flags = pLZ_code_buf++;
}
total_lz_bytes += cur_match_len;
lookahead_pos += cur_match_len;
dict_size = MZ_MIN(dict_size + cur_match_len, TDEFL_LZ_DICT_SIZE);
cur_pos = (cur_pos + cur_match_len) & TDEFL_LZ_DICT_SIZE_MASK;
MZ_ASSERT(lookahead_size >= cur_match_len);
lookahead_size -= cur_match_len;
if (pLZ_code_buf > &d->m_lz_code_buf[TDEFL_LZ_CODE_BUF_SIZE - 8]) {
int n;
d->m_lookahead_pos = lookahead_pos;
d->m_lookahead_size = lookahead_size;
d->m_dict_size = dict_size;
d->m_total_lz_bytes = total_lz_bytes;
d->m_pLZ_code_buf = pLZ_code_buf;
d->m_pLZ_flags = pLZ_flags;
d->m_num_flags_left = num_flags_left;
if ((n = tdefl_flush_block(d, 0)) != 0)
return (n < 0) ? MZ_FALSE : MZ_TRUE;
total_lz_bytes = d->m_total_lz_bytes;
pLZ_code_buf = d->m_pLZ_code_buf;
pLZ_flags = d->m_pLZ_flags;
num_flags_left = d->m_num_flags_left;
}
}
while (lookahead_size) {
mz_uint8 lit = d->m_dict[cur_pos];
total_lz_bytes++;
*pLZ_code_buf++ = lit;
*pLZ_flags = (mz_uint8)(*pLZ_flags >> 1);
if (--num_flags_left == 0) {
num_flags_left = 8;
pLZ_flags = pLZ_code_buf++;
}
d->m_huff_count[0][lit]++;
lookahead_pos++;
dict_size = MZ_MIN(dict_size + 1, TDEFL_LZ_DICT_SIZE);
cur_pos = (cur_pos + 1) & TDEFL_LZ_DICT_SIZE_MASK;
lookahead_size--;
if (pLZ_code_buf > &d->m_lz_code_buf[TDEFL_LZ_CODE_BUF_SIZE - 8]) {
int n;
d->m_lookahead_pos = lookahead_pos;
d->m_lookahead_size = lookahead_size;
d->m_dict_size = dict_size;
d->m_total_lz_bytes = total_lz_bytes;
d->m_pLZ_code_buf = pLZ_code_buf;
d->m_pLZ_flags = pLZ_flags;
d->m_num_flags_left = num_flags_left;
if ((n = tdefl_flush_block(d, 0)) != 0)
return (n < 0) ? MZ_FALSE : MZ_TRUE;
total_lz_bytes = d->m_total_lz_bytes;
pLZ_code_buf = d->m_pLZ_code_buf;
pLZ_flags = d->m_pLZ_flags;
num_flags_left = d->m_num_flags_left;
}
}
}
d->m_lookahead_pos = lookahead_pos;
d->m_lookahead_size = lookahead_size;
d->m_dict_size = dict_size;
d->m_total_lz_bytes = total_lz_bytes;
d->m_pLZ_code_buf = pLZ_code_buf;
d->m_pLZ_flags = pLZ_flags;
d->m_num_flags_left = num_flags_left;
return MZ_TRUE;
}
#endif // MINIZ_USE_UNALIGNED_LOADS_AND_STORES && MINIZ_LITTLE_ENDIAN
static MZ_FORCEINLINE void tdefl_record_literal(tdefl_compressor *d,
mz_uint8 lit) {
d->m_total_lz_bytes++;
*d->m_pLZ_code_buf++ = lit;
*d->m_pLZ_flags = (mz_uint8)(*d->m_pLZ_flags >> 1);
if (--d->m_num_flags_left == 0) {
d->m_num_flags_left = 8;
d->m_pLZ_flags = d->m_pLZ_code_buf++;
}
d->m_huff_count[0][lit]++;
}
static MZ_FORCEINLINE void tdefl_record_match(tdefl_compressor *d,
mz_uint match_len,
mz_uint match_dist) {
mz_uint32 s0, s1;
MZ_ASSERT((match_len >= TDEFL_MIN_MATCH_LEN) && (match_dist >= 1) &&
(match_dist <= TDEFL_LZ_DICT_SIZE));
d->m_total_lz_bytes += match_len;
d->m_pLZ_code_buf[0] = (mz_uint8)(match_len - TDEFL_MIN_MATCH_LEN);
match_dist -= 1;
d->m_pLZ_code_buf[1] = (mz_uint8)(match_dist & 0xFF);
d->m_pLZ_code_buf[2] = (mz_uint8)(match_dist >> 8);
d->m_pLZ_code_buf += 3;
*d->m_pLZ_flags = (mz_uint8)((*d->m_pLZ_flags >> 1) | 0x80);
if (--d->m_num_flags_left == 0) {
d->m_num_flags_left = 8;
d->m_pLZ_flags = d->m_pLZ_code_buf++;
}
s0 = s_tdefl_small_dist_sym[match_dist & 511];
s1 = s_tdefl_large_dist_sym[(match_dist >> 8) & 127];
d->m_huff_count[1][(match_dist < 512) ? s0 : s1]++;
if (match_len >= TDEFL_MIN_MATCH_LEN)
d->m_huff_count[0][s_tdefl_len_sym[match_len - TDEFL_MIN_MATCH_LEN]]++;
}
static mz_bool tdefl_compress_normal(tdefl_compressor *d) {
const mz_uint8 *pSrc = d->m_pSrc;
size_t src_buf_left = d->m_src_buf_left;
tdefl_flush flush = d->m_flush;
while ((src_buf_left) || ((flush) && (d->m_lookahead_size))) {
mz_uint len_to_move, cur_match_dist, cur_match_len, cur_pos;
// Update dictionary and hash chains. Keeps the lookahead size equal to
// TDEFL_MAX_MATCH_LEN.
if ((d->m_lookahead_size + d->m_dict_size) >= (TDEFL_MIN_MATCH_LEN - 1)) {
mz_uint dst_pos = (d->m_lookahead_pos + d->m_lookahead_size) &
TDEFL_LZ_DICT_SIZE_MASK,
ins_pos = d->m_lookahead_pos + d->m_lookahead_size - 2;
mz_uint hash = (d->m_dict[ins_pos & TDEFL_LZ_DICT_SIZE_MASK]
<< TDEFL_LZ_HASH_SHIFT) ^
d->m_dict[(ins_pos + 1) & TDEFL_LZ_DICT_SIZE_MASK];
mz_uint num_bytes_to_process = (mz_uint)MZ_MIN(
src_buf_left, TDEFL_MAX_MATCH_LEN - d->m_lookahead_size);
const mz_uint8 *pSrc_end = pSrc + num_bytes_to_process;
src_buf_left -= num_bytes_to_process;
d->m_lookahead_size += num_bytes_to_process;
while (pSrc != pSrc_end) {
mz_uint8 c = *pSrc++;
d->m_dict[dst_pos] = c;
if (dst_pos < (TDEFL_MAX_MATCH_LEN - 1))
d->m_dict[TDEFL_LZ_DICT_SIZE + dst_pos] = c;
hash = ((hash << TDEFL_LZ_HASH_SHIFT) ^ c) & (TDEFL_LZ_HASH_SIZE - 1);
d->m_next[ins_pos & TDEFL_LZ_DICT_SIZE_MASK] = d->m_hash[hash];
d->m_hash[hash] = (mz_uint16)(ins_pos);
dst_pos = (dst_pos + 1) & TDEFL_LZ_DICT_SIZE_MASK;
ins_pos++;
}
} else {
while ((src_buf_left) && (d->m_lookahead_size < TDEFL_MAX_MATCH_LEN)) {
mz_uint8 c = *pSrc++;
mz_uint dst_pos = (d->m_lookahead_pos + d->m_lookahead_size) &
TDEFL_LZ_DICT_SIZE_MASK;
src_buf_left--;
d->m_dict[dst_pos] = c;
if (dst_pos < (TDEFL_MAX_MATCH_LEN - 1))
d->m_dict[TDEFL_LZ_DICT_SIZE + dst_pos] = c;
if ((++d->m_lookahead_size + d->m_dict_size) >= TDEFL_MIN_MATCH_LEN) {
mz_uint ins_pos = d->m_lookahead_pos + (d->m_lookahead_size - 1) - 2;
mz_uint hash = ((d->m_dict[ins_pos & TDEFL_LZ_DICT_SIZE_MASK]
<< (TDEFL_LZ_HASH_SHIFT * 2)) ^
(d->m_dict[(ins_pos + 1) & TDEFL_LZ_DICT_SIZE_MASK]
<< TDEFL_LZ_HASH_SHIFT) ^
c) &
(TDEFL_LZ_HASH_SIZE - 1);
d->m_next[ins_pos & TDEFL_LZ_DICT_SIZE_MASK] = d->m_hash[hash];
d->m_hash[hash] = (mz_uint16)(ins_pos);
}
}
}
d->m_dict_size =
MZ_MIN(TDEFL_LZ_DICT_SIZE - d->m_lookahead_size, d->m_dict_size);
if ((!flush) && (d->m_lookahead_size < TDEFL_MAX_MATCH_LEN)) break;
// Simple lazy/greedy parsing state machine.
len_to_move = 1;
cur_match_dist = 0;
cur_match_len =
d->m_saved_match_len ? d->m_saved_match_len : (TDEFL_MIN_MATCH_LEN - 1);
cur_pos = d->m_lookahead_pos & TDEFL_LZ_DICT_SIZE_MASK;
if (d->m_flags & (TDEFL_RLE_MATCHES | TDEFL_FORCE_ALL_RAW_BLOCKS)) {
if ((d->m_dict_size) && (!(d->m_flags & TDEFL_FORCE_ALL_RAW_BLOCKS))) {
mz_uint8 c = d->m_dict[(cur_pos - 1) & TDEFL_LZ_DICT_SIZE_MASK];
cur_match_len = 0;
while (cur_match_len < d->m_lookahead_size) {
if (d->m_dict[cur_pos + cur_match_len] != c) break;
cur_match_len++;
}
if (cur_match_len < TDEFL_MIN_MATCH_LEN)
cur_match_len = 0;
else
cur_match_dist = 1;
}
} else {
tdefl_find_match(d, d->m_lookahead_pos, d->m_dict_size,
d->m_lookahead_size, &cur_match_dist, &cur_match_len);
}
if (((cur_match_len == TDEFL_MIN_MATCH_LEN) &&
(cur_match_dist >= 8U * 1024U)) ||
(cur_pos == cur_match_dist) ||
((d->m_flags & TDEFL_FILTER_MATCHES) && (cur_match_len <= 5))) {
cur_match_dist = cur_match_len = 0;
}
if (d->m_saved_match_len) {
if (cur_match_len > d->m_saved_match_len) {
tdefl_record_literal(d, (mz_uint8)d->m_saved_lit);
if (cur_match_len >= 128) {
tdefl_record_match(d, cur_match_len, cur_match_dist);
d->m_saved_match_len = 0;
len_to_move = cur_match_len;
} else {
d->m_saved_lit = d->m_dict[cur_pos];
d->m_saved_match_dist = cur_match_dist;
d->m_saved_match_len = cur_match_len;
}
} else {
tdefl_record_match(d, d->m_saved_match_len, d->m_saved_match_dist);
len_to_move = d->m_saved_match_len - 1;
d->m_saved_match_len = 0;
}
} else if (!cur_match_dist)
tdefl_record_literal(d,
d->m_dict[MZ_MIN(cur_pos, sizeof(d->m_dict) - 1)]);
else if ((d->m_greedy_parsing) || (d->m_flags & TDEFL_RLE_MATCHES) ||
(cur_match_len >= 128)) {
tdefl_record_match(d, cur_match_len, cur_match_dist);
len_to_move = cur_match_len;
} else {
d->m_saved_lit = d->m_dict[MZ_MIN(cur_pos, sizeof(d->m_dict) - 1)];
d->m_saved_match_dist = cur_match_dist;
d->m_saved_match_len = cur_match_len;
}
// Move the lookahead forward by len_to_move bytes.
d->m_lookahead_pos += len_to_move;
MZ_ASSERT(d->m_lookahead_size >= len_to_move);
d->m_lookahead_size -= len_to_move;
d->m_dict_size =
MZ_MIN(d->m_dict_size + len_to_move, (mz_uint)TDEFL_LZ_DICT_SIZE);
// Check if it's time to flush the current LZ codes to the internal output
// buffer.
if ((d->m_pLZ_code_buf > &d->m_lz_code_buf[TDEFL_LZ_CODE_BUF_SIZE - 8]) ||
((d->m_total_lz_bytes > 31 * 1024) &&
(((((mz_uint)(d->m_pLZ_code_buf - d->m_lz_code_buf) * 115) >> 7) >=
d->m_total_lz_bytes) ||
(d->m_flags & TDEFL_FORCE_ALL_RAW_BLOCKS)))) {
int n;
d->m_pSrc = pSrc;
d->m_src_buf_left = src_buf_left;
if ((n = tdefl_flush_block(d, 0)) != 0)
return (n < 0) ? MZ_FALSE : MZ_TRUE;
}
}
d->m_pSrc = pSrc;
d->m_src_buf_left = src_buf_left;
return MZ_TRUE;
}
static tdefl_status tdefl_flush_output_buffer(tdefl_compressor *d) {
if (d->m_pIn_buf_size) {
*d->m_pIn_buf_size = d->m_pSrc - (const mz_uint8 *)d->m_pIn_buf;
}
if (d->m_pOut_buf_size) {
size_t n = MZ_MIN(*d->m_pOut_buf_size - d->m_out_buf_ofs,
d->m_output_flush_remaining);
memcpy((mz_uint8 *)d->m_pOut_buf + d->m_out_buf_ofs,
d->m_output_buf + d->m_output_flush_ofs, n);
d->m_output_flush_ofs += (mz_uint)n;
d->m_output_flush_remaining -= (mz_uint)n;
d->m_out_buf_ofs += n;
*d->m_pOut_buf_size = d->m_out_buf_ofs;
}
return (d->m_finished && !d->m_output_flush_remaining) ? TDEFL_STATUS_DONE
: TDEFL_STATUS_OKAY;
}
tdefl_status tdefl_compress(tdefl_compressor *d, const void *pIn_buf,
size_t *pIn_buf_size, void *pOut_buf,
size_t *pOut_buf_size, tdefl_flush flush) {
if (!d) {
if (pIn_buf_size) *pIn_buf_size = 0;
if (pOut_buf_size) *pOut_buf_size = 0;
return TDEFL_STATUS_BAD_PARAM;
}
d->m_pIn_buf = pIn_buf;
d->m_pIn_buf_size = pIn_buf_size;
d->m_pOut_buf = pOut_buf;
d->m_pOut_buf_size = pOut_buf_size;
d->m_pSrc = (const mz_uint8 *)(pIn_buf);
d->m_src_buf_left = pIn_buf_size ? *pIn_buf_size : 0;
d->m_out_buf_ofs = 0;
d->m_flush = flush;
if (((d->m_pPut_buf_func != NULL) ==
((pOut_buf != NULL) || (pOut_buf_size != NULL))) ||
(d->m_prev_return_status != TDEFL_STATUS_OKAY) ||
(d->m_wants_to_finish && (flush != TDEFL_FINISH)) ||
(pIn_buf_size && *pIn_buf_size && !pIn_buf) ||
(pOut_buf_size && *pOut_buf_size && !pOut_buf)) {
if (pIn_buf_size) *pIn_buf_size = 0;
if (pOut_buf_size) *pOut_buf_size = 0;
return (d->m_prev_return_status = TDEFL_STATUS_BAD_PARAM);
}
d->m_wants_to_finish |= (flush == TDEFL_FINISH);
if ((d->m_output_flush_remaining) || (d->m_finished))
return (d->m_prev_return_status = tdefl_flush_output_buffer(d));
#if MINIZ_USE_UNALIGNED_LOADS_AND_STORES && MINIZ_LITTLE_ENDIAN
if (((d->m_flags & TDEFL_MAX_PROBES_MASK) == 1) &&
((d->m_flags & TDEFL_GREEDY_PARSING_FLAG) != 0) &&
((d->m_flags & (TDEFL_FILTER_MATCHES | TDEFL_FORCE_ALL_RAW_BLOCKS |
TDEFL_RLE_MATCHES)) == 0)) {
if (!tdefl_compress_fast(d)) return d->m_prev_return_status;
} else
#endif // #if MINIZ_USE_UNALIGNED_LOADS_AND_STORES && MINIZ_LITTLE_ENDIAN
{
if (!tdefl_compress_normal(d)) return d->m_prev_return_status;
}
if ((d->m_flags & (TDEFL_WRITE_ZLIB_HEADER | TDEFL_COMPUTE_ADLER32)) &&
(pIn_buf))
d->m_adler32 =
(mz_uint32)mz_adler32(d->m_adler32, (const mz_uint8 *)pIn_buf,
d->m_pSrc - (const mz_uint8 *)pIn_buf);
if ((flush) && (!d->m_lookahead_size) && (!d->m_src_buf_left) &&
(!d->m_output_flush_remaining)) {
if (tdefl_flush_block(d, flush) < 0) return d->m_prev_return_status;
d->m_finished = (flush == TDEFL_FINISH);
if (flush == TDEFL_FULL_FLUSH) {
MZ_CLEAR_OBJ(d->m_hash);
MZ_CLEAR_OBJ(d->m_next);
d->m_dict_size = 0;
}
}
return (d->m_prev_return_status = tdefl_flush_output_buffer(d));
}
tdefl_status tdefl_compress_buffer(tdefl_compressor *d, const void *pIn_buf,
size_t in_buf_size, tdefl_flush flush) {
MZ_ASSERT(d->m_pPut_buf_func);
return tdefl_compress(d, pIn_buf, &in_buf_size, NULL, NULL, flush);
}
tdefl_status tdefl_init(tdefl_compressor *d,
tdefl_put_buf_func_ptr pPut_buf_func,
void *pPut_buf_user, int flags) {
d->m_pPut_buf_func = pPut_buf_func;
d->m_pPut_buf_user = pPut_buf_user;
d->m_flags = (mz_uint)(flags);
d->m_max_probes[0] = 1 + ((flags & 0xFFF) + 2) / 3;
d->m_greedy_parsing = (flags & TDEFL_GREEDY_PARSING_FLAG) != 0;
d->m_max_probes[1] = 1 + (((flags & 0xFFF) >> 2) + 2) / 3;
if (!(flags & TDEFL_NONDETERMINISTIC_PARSING_FLAG)) MZ_CLEAR_OBJ(d->m_hash);
d->m_lookahead_pos = d->m_lookahead_size = d->m_dict_size =
d->m_total_lz_bytes = d->m_lz_code_buf_dict_pos = d->m_bits_in = 0;
d->m_output_flush_ofs = d->m_output_flush_remaining = d->m_finished =
d->m_block_index = d->m_bit_buffer = d->m_wants_to_finish = 0;
d->m_pLZ_code_buf = d->m_lz_code_buf + 1;
d->m_pLZ_flags = d->m_lz_code_buf;
d->m_num_flags_left = 8;
d->m_pOutput_buf = d->m_output_buf;
d->m_pOutput_buf_end = d->m_output_buf;
d->m_prev_return_status = TDEFL_STATUS_OKAY;
d->m_saved_match_dist = d->m_saved_match_len = d->m_saved_lit = 0;
d->m_adler32 = 1;
d->m_pIn_buf = NULL;
d->m_pOut_buf = NULL;
d->m_pIn_buf_size = NULL;
d->m_pOut_buf_size = NULL;
d->m_flush = TDEFL_NO_FLUSH;
d->m_pSrc = NULL;
d->m_src_buf_left = 0;
d->m_out_buf_ofs = 0;
memset(&d->m_huff_count[0][0], 0,
sizeof(d->m_huff_count[0][0]) * TDEFL_MAX_HUFF_SYMBOLS_0);
memset(&d->m_huff_count[1][0], 0,
sizeof(d->m_huff_count[1][0]) * TDEFL_MAX_HUFF_SYMBOLS_1);
return TDEFL_STATUS_OKAY;
}
tdefl_status tdefl_get_prev_return_status(tdefl_compressor *d) {
return d->m_prev_return_status;
}
mz_uint32 tdefl_get_adler32(tdefl_compressor *d) { return d->m_adler32; }
mz_bool tdefl_compress_mem_to_output(const void *pBuf, size_t buf_len,
tdefl_put_buf_func_ptr pPut_buf_func,
void *pPut_buf_user, int flags) {
tdefl_compressor *pComp;
mz_bool succeeded;
if (((buf_len) && (!pBuf)) || (!pPut_buf_func)) return MZ_FALSE;
pComp = (tdefl_compressor *)MZ_MALLOC(sizeof(tdefl_compressor));
if (!pComp) return MZ_FALSE;
succeeded = (tdefl_init(pComp, pPut_buf_func, pPut_buf_user, flags) ==
TDEFL_STATUS_OKAY);
succeeded =
succeeded && (tdefl_compress_buffer(pComp, pBuf, buf_len, TDEFL_FINISH) ==
TDEFL_STATUS_DONE);
MZ_FREE(pComp);
return succeeded;
}
typedef struct {
size_t m_size, m_capacity;
mz_uint8 *m_pBuf;
mz_bool m_expandable;
} tdefl_output_buffer;
static mz_bool tdefl_output_buffer_putter(const void *pBuf, int len,
void *pUser) {
tdefl_output_buffer *p = (tdefl_output_buffer *)pUser;
size_t new_size = p->m_size + len;
if (new_size > p->m_capacity) {
size_t new_capacity = p->m_capacity;
mz_uint8 *pNew_buf;
if (!p->m_expandable) return MZ_FALSE;
do {
new_capacity = MZ_MAX(128U, new_capacity << 1U);
} while (new_size > new_capacity);
pNew_buf = (mz_uint8 *)MZ_REALLOC(p->m_pBuf, new_capacity);
if (!pNew_buf) return MZ_FALSE;
p->m_pBuf = pNew_buf;
p->m_capacity = new_capacity;
}
memcpy((mz_uint8 *)p->m_pBuf + p->m_size, pBuf, len);
p->m_size = new_size;
return MZ_TRUE;
}
void *tdefl_compress_mem_to_heap(const void *pSrc_buf, size_t src_buf_len,
size_t *pOut_len, int flags) {
tdefl_output_buffer out_buf;
MZ_CLEAR_OBJ(out_buf);
if (!pOut_len)
return MZ_FALSE;
else
*pOut_len = 0;
out_buf.m_expandable = MZ_TRUE;
if (!tdefl_compress_mem_to_output(
pSrc_buf, src_buf_len, tdefl_output_buffer_putter, &out_buf, flags))
return NULL;
*pOut_len = out_buf.m_size;
return out_buf.m_pBuf;
}
size_t tdefl_compress_mem_to_mem(void *pOut_buf, size_t out_buf_len,
const void *pSrc_buf, size_t src_buf_len,
int flags) {
tdefl_output_buffer out_buf;
MZ_CLEAR_OBJ(out_buf);
if (!pOut_buf) return 0;
out_buf.m_pBuf = (mz_uint8 *)pOut_buf;
out_buf.m_capacity = out_buf_len;
if (!tdefl_compress_mem_to_output(
pSrc_buf, src_buf_len, tdefl_output_buffer_putter, &out_buf, flags))
return 0;
return out_buf.m_size;
}
#ifndef MINIZ_NO_ZLIB_APIS
static const mz_uint s_tdefl_num_probes[11] = {0, 1, 6, 32, 16, 32,
128, 256, 512, 768, 1500};
// level may actually range from [0,10] (10 is a "hidden" max level, where we
// want a bit more compression and it's fine if throughput to fall off a cliff
// on some files).
mz_uint tdefl_create_comp_flags_from_zip_params(int level, int window_bits,
int strategy) {
mz_uint comp_flags =
s_tdefl_num_probes[(level >= 0) ? MZ_MIN(10, level) : MZ_DEFAULT_LEVEL] |
((level <= 3) ? TDEFL_GREEDY_PARSING_FLAG : 0);
if (window_bits > 0) comp_flags |= TDEFL_WRITE_ZLIB_HEADER;
if (!level)
comp_flags |= TDEFL_FORCE_ALL_RAW_BLOCKS;
else if (strategy == MZ_FILTERED)
comp_flags |= TDEFL_FILTER_MATCHES;
else if (strategy == MZ_HUFFMAN_ONLY)
comp_flags &= ~TDEFL_MAX_PROBES_MASK;
else if (strategy == MZ_FIXED)
comp_flags |= TDEFL_FORCE_ALL_STATIC_BLOCKS;
else if (strategy == MZ_RLE)
comp_flags |= TDEFL_RLE_MATCHES;
return comp_flags;
}
#endif // MINIZ_NO_ZLIB_APIS
#ifdef _MSC_VER
#pragma warning(push)
#pragma warning(disable : 4204) // nonstandard extension used : non-constant
// aggregate initializer (also supported by GNU
// C and C99, so no big deal)
#pragma warning(disable : 4244) // 'initializing': conversion from '__int64' to
// 'int', possible loss of data
#pragma warning(disable : 4267) // 'argument': conversion from '__int64' to
// 'int', possible loss of data
#pragma warning(disable : 4996) // 'strdup': The POSIX name for this item is
// deprecated. Instead, use the ISO C and C++
// conformant name: _strdup.
#endif
// Simple PNG writer function by Alex Evans, 2011. Released into the public
// domain: https://gist.github.com/908299, more context at
// http://altdevblogaday.org/2011/04/06/a-smaller-jpg-encoder/.
// This is actually a modification of Alex's original code so PNG files
// generated by this function pass pngcheck.
void *tdefl_write_image_to_png_file_in_memory_ex(const void *pImage, int w,
int h, int num_chans,
size_t *pLen_out,
mz_uint level, mz_bool flip) {
// Using a local copy of this array here in case MINIZ_NO_ZLIB_APIS was
// defined.
static const mz_uint s_tdefl_png_num_probes[11] = {
0, 1, 6, 32, 16, 32, 128, 256, 512, 768, 1500};
tdefl_compressor *pComp =
(tdefl_compressor *)MZ_MALLOC(sizeof(tdefl_compressor));
tdefl_output_buffer out_buf;
int i, bpl = w * num_chans, y, z;
mz_uint32 c;
*pLen_out = 0;
if (!pComp) return NULL;
MZ_CLEAR_OBJ(out_buf);
out_buf.m_expandable = MZ_TRUE;
out_buf.m_capacity = 57 + MZ_MAX(64, (1 + bpl) * h);
if (NULL == (out_buf.m_pBuf = (mz_uint8 *)MZ_MALLOC(out_buf.m_capacity))) {
MZ_FREE(pComp);
return NULL;
}
// write dummy header
for (z = 41; z; --z) tdefl_output_buffer_putter(&z, 1, &out_buf);
// compress image data
tdefl_init(
pComp, tdefl_output_buffer_putter, &out_buf,
s_tdefl_png_num_probes[MZ_MIN(10, level)] | TDEFL_WRITE_ZLIB_HEADER);
for (y = 0; y < h; ++y) {
tdefl_compress_buffer(pComp, &z, 1, TDEFL_NO_FLUSH);
tdefl_compress_buffer(pComp,
(mz_uint8 *)pImage + (flip ? (h - 1 - y) : y) * bpl,
bpl, TDEFL_NO_FLUSH);
}
if (tdefl_compress_buffer(pComp, NULL, 0, TDEFL_FINISH) !=
TDEFL_STATUS_DONE) {
MZ_FREE(pComp);
MZ_FREE(out_buf.m_pBuf);
return NULL;
}
// write real header
*pLen_out = out_buf.m_size - 41;
{
static const mz_uint8 chans[] = {0x00, 0x00, 0x04, 0x02, 0x06};
mz_uint8 pnghdr[41] = {0x89,
0x50,
0x4e,
0x47,
0x0d,
0x0a,
0x1a,
0x0a,
0x00,
0x00,
0x00,
0x0d,
0x49,
0x48,
0x44,
0x52,
0,
0,
(mz_uint8)(w >> 8),
(mz_uint8)w,
0,
0,
(mz_uint8)(h >> 8),
(mz_uint8)h,
8,
chans[num_chans],
0,
0,
0,
0,
0,
0,
0,
(mz_uint8)(*pLen_out >> 24),
(mz_uint8)(*pLen_out >> 16),
(mz_uint8)(*pLen_out >> 8),
(mz_uint8)*pLen_out,
0x49,
0x44,
0x41,
0x54};
c = (mz_uint32)mz_crc32(MZ_CRC32_INIT, pnghdr + 12, 17);
for (i = 0; i < 4; ++i, c <<= 8)
((mz_uint8 *)(pnghdr + 29))[i] = (mz_uint8)(c >> 24);
memcpy(out_buf.m_pBuf, pnghdr, 41);
}
// write footer (IDAT CRC-32, followed by IEND chunk)
if (!tdefl_output_buffer_putter(
"\0\0\0\0\0\0\0\0\x49\x45\x4e\x44\xae\x42\x60\x82", 16, &out_buf)) {
*pLen_out = 0;
MZ_FREE(pComp);
MZ_FREE(out_buf.m_pBuf);
return NULL;
}
c = (mz_uint32)mz_crc32(MZ_CRC32_INIT, out_buf.m_pBuf + 41 - 4,
*pLen_out + 4);
for (i = 0; i < 4; ++i, c <<= 8)
(out_buf.m_pBuf + out_buf.m_size - 16)[i] = (mz_uint8)(c >> 24);
// compute final size of file, grab compressed data buffer and return
*pLen_out += 57;
MZ_FREE(pComp);
return out_buf.m_pBuf;
}
void *tdefl_write_image_to_png_file_in_memory(const void *pImage, int w, int h,
int num_chans, size_t *pLen_out) {
// Level 6 corresponds to TDEFL_DEFAULT_MAX_PROBES or MZ_DEFAULT_LEVEL (but we
// can't depend on MZ_DEFAULT_LEVEL being available in case the zlib API's
// where #defined out)
return tdefl_write_image_to_png_file_in_memory_ex(pImage, w, h, num_chans,
pLen_out, 6, MZ_FALSE);
}
// ------------------- .ZIP archive reading
#ifndef MINIZ_NO_ARCHIVE_APIS
#error "No arvhive APIs"
#ifdef MINIZ_NO_STDIO
#define MZ_FILE void *
#else
#include <stdio.h>
#include <sys/stat.h>
#if defined(_MSC_VER) || defined(__MINGW64__)
static FILE *mz_fopen(const char *pFilename, const char *pMode) {
FILE *pFile = NULL;
fopen_s(&pFile, pFilename, pMode);
return pFile;
}
static FILE *mz_freopen(const char *pPath, const char *pMode, FILE *pStream) {
FILE *pFile = NULL;
if (freopen_s(&pFile, pPath, pMode, pStream)) return NULL;
return pFile;
}
#ifndef MINIZ_NO_TIME
#include <sys/utime.h>
#endif
#define MZ_FILE FILE
#define MZ_FOPEN mz_fopen
#define MZ_FCLOSE fclose
#define MZ_FREAD fread
#define MZ_FWRITE fwrite
#define MZ_FTELL64 _ftelli64
#define MZ_FSEEK64 _fseeki64
#define MZ_FILE_STAT_STRUCT _stat
#define MZ_FILE_STAT _stat
#define MZ_FFLUSH fflush
#define MZ_FREOPEN mz_freopen
#define MZ_DELETE_FILE remove
#elif defined(__MINGW32__)
#ifndef MINIZ_NO_TIME
#include <sys/utime.h>
#endif
#define MZ_FILE FILE
#define MZ_FOPEN(f, m) fopen(f, m)
#define MZ_FCLOSE fclose
#define MZ_FREAD fread
#define MZ_FWRITE fwrite
#define MZ_FTELL64 ftello64
#define MZ_FSEEK64 fseeko64
#define MZ_FILE_STAT_STRUCT _stat
#define MZ_FILE_STAT _stat
#define MZ_FFLUSH fflush
#define MZ_FREOPEN(f, m, s) freopen(f, m, s)
#define MZ_DELETE_FILE remove
#elif defined(__TINYC__)
#ifndef MINIZ_NO_TIME
#include <sys/utime.h>
#endif
#define MZ_FILE FILE
#define MZ_FOPEN(f, m) fopen(f, m)
#define MZ_FCLOSE fclose
#define MZ_FREAD fread
#define MZ_FWRITE fwrite
#define MZ_FTELL64 ftell
#define MZ_FSEEK64 fseek
#define MZ_FILE_STAT_STRUCT stat
#define MZ_FILE_STAT stat
#define MZ_FFLUSH fflush
#define MZ_FREOPEN(f, m, s) freopen(f, m, s)
#define MZ_DELETE_FILE remove
#elif defined(__GNUC__) && defined(_LARGEFILE64_SOURCE) && _LARGEFILE64_SOURCE
#ifndef MINIZ_NO_TIME
#include <utime.h>
#endif
#define MZ_FILE FILE
#define MZ_FOPEN(f, m) fopen64(f, m)
#define MZ_FCLOSE fclose
#define MZ_FREAD fread
#define MZ_FWRITE fwrite
#define MZ_FTELL64 ftello64
#define MZ_FSEEK64 fseeko64
#define MZ_FILE_STAT_STRUCT stat64
#define MZ_FILE_STAT stat64
#define MZ_FFLUSH fflush
#define MZ_FREOPEN(p, m, s) freopen64(p, m, s)
#define MZ_DELETE_FILE remove
#else
#ifndef MINIZ_NO_TIME
#include <utime.h>
#endif
#define MZ_FILE FILE
#define MZ_FOPEN(f, m) fopen(f, m)
#define MZ_FCLOSE fclose
#define MZ_FREAD fread
#define MZ_FWRITE fwrite
#define MZ_FTELL64 ftello
#define MZ_FSEEK64 fseeko
#define MZ_FILE_STAT_STRUCT stat
#define MZ_FILE_STAT stat
#define MZ_FFLUSH fflush
#define MZ_FREOPEN(f, m, s) freopen(f, m, s)
#define MZ_DELETE_FILE remove
#endif // #ifdef _MSC_VER
#endif // #ifdef MINIZ_NO_STDIO
#define MZ_TOLOWER(c) ((((c) >= 'A') && ((c) <= 'Z')) ? ((c) - 'A' + 'a') : (c))
// Various ZIP archive enums. To completely avoid cross platform compiler
// alignment and platform endian issues, miniz.c doesn't use structs for any of
// this stuff.
enum {
// ZIP archive identifiers and record sizes
MZ_ZIP_END_OF_CENTRAL_DIR_HEADER_SIG = 0x06054b50,
MZ_ZIP_CENTRAL_DIR_HEADER_SIG = 0x02014b50,
MZ_ZIP_LOCAL_DIR_HEADER_SIG = 0x04034b50,
MZ_ZIP_LOCAL_DIR_HEADER_SIZE = 30,
MZ_ZIP_CENTRAL_DIR_HEADER_SIZE = 46,
MZ_ZIP_END_OF_CENTRAL_DIR_HEADER_SIZE = 22,
// Central directory header record offsets
MZ_ZIP_CDH_SIG_OFS = 0,
MZ_ZIP_CDH_VERSION_MADE_BY_OFS = 4,
MZ_ZIP_CDH_VERSION_NEEDED_OFS = 6,
MZ_ZIP_CDH_BIT_FLAG_OFS = 8,
MZ_ZIP_CDH_METHOD_OFS = 10,
MZ_ZIP_CDH_FILE_TIME_OFS = 12,
MZ_ZIP_CDH_FILE_DATE_OFS = 14,
MZ_ZIP_CDH_CRC32_OFS = 16,
MZ_ZIP_CDH_COMPRESSED_SIZE_OFS = 20,
MZ_ZIP_CDH_DECOMPRESSED_SIZE_OFS = 24,
MZ_ZIP_CDH_FILENAME_LEN_OFS = 28,
MZ_ZIP_CDH_EXTRA_LEN_OFS = 30,
MZ_ZIP_CDH_COMMENT_LEN_OFS = 32,
MZ_ZIP_CDH_DISK_START_OFS = 34,
MZ_ZIP_CDH_INTERNAL_ATTR_OFS = 36,
MZ_ZIP_CDH_EXTERNAL_ATTR_OFS = 38,
MZ_ZIP_CDH_LOCAL_HEADER_OFS = 42,
// Local directory header offsets
MZ_ZIP_LDH_SIG_OFS = 0,
MZ_ZIP_LDH_VERSION_NEEDED_OFS = 4,
MZ_ZIP_LDH_BIT_FLAG_OFS = 6,
MZ_ZIP_LDH_METHOD_OFS = 8,
MZ_ZIP_LDH_FILE_TIME_OFS = 10,
MZ_ZIP_LDH_FILE_DATE_OFS = 12,
MZ_ZIP_LDH_CRC32_OFS = 14,
MZ_ZIP_LDH_COMPRESSED_SIZE_OFS = 18,
MZ_ZIP_LDH_DECOMPRESSED_SIZE_OFS = 22,
MZ_ZIP_LDH_FILENAME_LEN_OFS = 26,
MZ_ZIP_LDH_EXTRA_LEN_OFS = 28,
// End of central directory offsets
MZ_ZIP_ECDH_SIG_OFS = 0,
MZ_ZIP_ECDH_NUM_THIS_DISK_OFS = 4,
MZ_ZIP_ECDH_NUM_DISK_CDIR_OFS = 6,
MZ_ZIP_ECDH_CDIR_NUM_ENTRIES_ON_DISK_OFS = 8,
MZ_ZIP_ECDH_CDIR_TOTAL_ENTRIES_OFS = 10,
MZ_ZIP_ECDH_CDIR_SIZE_OFS = 12,
MZ_ZIP_ECDH_CDIR_OFS_OFS = 16,
MZ_ZIP_ECDH_COMMENT_SIZE_OFS = 20,
};
typedef struct {
void *m_p;
size_t m_size, m_capacity;
mz_uint m_element_size;
} mz_zip_array;
struct mz_zip_internal_state_tag {
mz_zip_array m_central_dir;
mz_zip_array m_central_dir_offsets;
mz_zip_array m_sorted_central_dir_offsets;
MZ_FILE *m_pFile;
void *m_pMem;
size_t m_mem_size;
size_t m_mem_capacity;
};
#define MZ_ZIP_ARRAY_SET_ELEMENT_SIZE(array_ptr, element_size) \
(array_ptr)->m_element_size = element_size
#define MZ_ZIP_ARRAY_ELEMENT(array_ptr, element_type, index) \
((element_type *)((array_ptr)->m_p))[index]
static MZ_FORCEINLINE void mz_zip_array_clear(mz_zip_archive *pZip,
mz_zip_array *pArray) {
pZip->m_pFree(pZip->m_pAlloc_opaque, pArray->m_p);
memset(pArray, 0, sizeof(mz_zip_array));
}
static mz_bool mz_zip_array_ensure_capacity(mz_zip_archive *pZip,
mz_zip_array *pArray,
size_t min_new_capacity,
mz_uint growing) {
void *pNew_p;
size_t new_capacity = min_new_capacity;
MZ_ASSERT(pArray->m_element_size);
if (pArray->m_capacity >= min_new_capacity) return MZ_TRUE;
if (growing) {
new_capacity = MZ_MAX(1, pArray->m_capacity);
while (new_capacity < min_new_capacity) new_capacity *= 2;
}
if (NULL == (pNew_p = pZip->m_pRealloc(pZip->m_pAlloc_opaque, pArray->m_p,
pArray->m_element_size, new_capacity)))
return MZ_FALSE;
pArray->m_p = pNew_p;
pArray->m_capacity = new_capacity;
return MZ_TRUE;
}
static MZ_FORCEINLINE mz_bool mz_zip_array_reserve(mz_zip_archive *pZip,
mz_zip_array *pArray,
size_t new_capacity,
mz_uint growing) {
if (new_capacity > pArray->m_capacity) {
if (!mz_zip_array_ensure_capacity(pZip, pArray, new_capacity, growing))
return MZ_FALSE;
}
return MZ_TRUE;
}
static MZ_FORCEINLINE mz_bool mz_zip_array_resize(mz_zip_archive *pZip,
mz_zip_array *pArray,
size_t new_size,
mz_uint growing) {
if (new_size > pArray->m_capacity) {
if (!mz_zip_array_ensure_capacity(pZip, pArray, new_size, growing))
return MZ_FALSE;
}
pArray->m_size = new_size;
return MZ_TRUE;
}
static MZ_FORCEINLINE mz_bool mz_zip_array_ensure_room(mz_zip_archive *pZip,
mz_zip_array *pArray,
size_t n) {
return mz_zip_array_reserve(pZip, pArray, pArray->m_size + n, MZ_TRUE);
}
static MZ_FORCEINLINE mz_bool mz_zip_array_push_back(mz_zip_archive *pZip,
mz_zip_array *pArray,
const void *pElements,
size_t n) {
size_t orig_size = pArray->m_size;
if (!mz_zip_array_resize(pZip, pArray, orig_size + n, MZ_TRUE))
return MZ_FALSE;
memcpy((mz_uint8 *)pArray->m_p + orig_size * pArray->m_element_size,
pElements, n * pArray->m_element_size);
return MZ_TRUE;
}
#ifndef MINIZ_NO_TIME
static time_t mz_zip_dos_to_time_t(int dos_time, int dos_date) {
struct tm tm;
memset(&tm, 0, sizeof(tm));
tm.tm_isdst = -1;
tm.tm_year = ((dos_date >> 9) & 127) + 1980 - 1900;
tm.tm_mon = ((dos_date >> 5) & 15) - 1;
tm.tm_mday = dos_date & 31;
tm.tm_hour = (dos_time >> 11) & 31;
tm.tm_min = (dos_time >> 5) & 63;
tm.tm_sec = (dos_time << 1) & 62;
return mktime(&tm);
}
static void mz_zip_time_to_dos_time(time_t time, mz_uint16 *pDOS_time,
mz_uint16 *pDOS_date) {
#ifdef _MSC_VER
struct tm tm_struct;
struct tm *tm = &tm_struct;
errno_t err = localtime_s(tm, &time);
if (err) {
*pDOS_date = 0;
*pDOS_time = 0;
return;
}
#else
struct tm *tm = localtime(&time);
#endif
*pDOS_time = (mz_uint16)(((tm->tm_hour) << 11) + ((tm->tm_min) << 5) +
((tm->tm_sec) >> 1));
*pDOS_date = (mz_uint16)(((tm->tm_year + 1900 - 1980) << 9) +
((tm->tm_mon + 1) << 5) + tm->tm_mday);
}
#endif
#ifndef MINIZ_NO_STDIO
static mz_bool mz_zip_get_file_modified_time(const char *pFilename,
mz_uint16 *pDOS_time,
mz_uint16 *pDOS_date) {
#ifdef MINIZ_NO_TIME
(void)pFilename;
*pDOS_date = *pDOS_time = 0;
#else
struct MZ_FILE_STAT_STRUCT file_stat;
// On Linux with x86 glibc, this call will fail on large files (>= 0x80000000
// bytes) unless you compiled with _LARGEFILE64_SOURCE. Argh.
if (MZ_FILE_STAT(pFilename, &file_stat) != 0) return MZ_FALSE;
mz_zip_time_to_dos_time(file_stat.st_mtime, pDOS_time, pDOS_date);
#endif // #ifdef MINIZ_NO_TIME
return MZ_TRUE;
}
#ifndef MINIZ_NO_TIME
static mz_bool mz_zip_set_file_times(const char *pFilename, time_t access_time,
time_t modified_time) {
struct utimbuf t;
t.actime = access_time;
t.modtime = modified_time;
return !utime(pFilename, &t);
}
#endif // #ifndef MINIZ_NO_TIME
#endif // #ifndef MINIZ_NO_STDIO
static mz_bool mz_zip_reader_init_internal(mz_zip_archive *pZip,
mz_uint32 flags) {
(void)flags;
if ((!pZip) || (pZip->m_pState) || (pZip->m_zip_mode != MZ_ZIP_MODE_INVALID))
return MZ_FALSE;
if (!pZip->m_pAlloc) pZip->m_pAlloc = def_alloc_func;
if (!pZip->m_pFree) pZip->m_pFree = def_free_func;
if (!pZip->m_pRealloc) pZip->m_pRealloc = def_realloc_func;
pZip->m_zip_mode = MZ_ZIP_MODE_READING;
pZip->m_archive_size = 0;
pZip->m_central_directory_file_ofs = 0;
pZip->m_total_files = 0;
if (NULL == (pZip->m_pState = (mz_zip_internal_state *)pZip->m_pAlloc(
pZip->m_pAlloc_opaque, 1, sizeof(mz_zip_internal_state))))
return MZ_FALSE;
memset(pZip->m_pState, 0, sizeof(mz_zip_internal_state));
MZ_ZIP_ARRAY_SET_ELEMENT_SIZE(&pZip->m_pState->m_central_dir,
sizeof(mz_uint8));
MZ_ZIP_ARRAY_SET_ELEMENT_SIZE(&pZip->m_pState->m_central_dir_offsets,
sizeof(mz_uint32));
MZ_ZIP_ARRAY_SET_ELEMENT_SIZE(&pZip->m_pState->m_sorted_central_dir_offsets,
sizeof(mz_uint32));
return MZ_TRUE;
}
static MZ_FORCEINLINE mz_bool
mz_zip_reader_filename_less(const mz_zip_array *pCentral_dir_array,
const mz_zip_array *pCentral_dir_offsets,
mz_uint l_index, mz_uint r_index) {
const mz_uint8 *pL = &MZ_ZIP_ARRAY_ELEMENT(
pCentral_dir_array, mz_uint8,
MZ_ZIP_ARRAY_ELEMENT(pCentral_dir_offsets, mz_uint32,
l_index)),
*pE;
const mz_uint8 *pR = &MZ_ZIP_ARRAY_ELEMENT(
pCentral_dir_array, mz_uint8,
MZ_ZIP_ARRAY_ELEMENT(pCentral_dir_offsets, mz_uint32, r_index));
mz_uint l_len = MZ_READ_LE16(pL + MZ_ZIP_CDH_FILENAME_LEN_OFS),
r_len = MZ_READ_LE16(pR + MZ_ZIP_CDH_FILENAME_LEN_OFS);
mz_uint8 l = 0, r = 0;
pL += MZ_ZIP_CENTRAL_DIR_HEADER_SIZE;
pR += MZ_ZIP_CENTRAL_DIR_HEADER_SIZE;
pE = pL + MZ_MIN(l_len, r_len);
while (pL < pE) {
if ((l = MZ_TOLOWER(*pL)) != (r = MZ_TOLOWER(*pR))) break;
pL++;
pR++;
}
return (pL == pE) ? (l_len < r_len) : (l < r);
}
#define MZ_SWAP_UINT32(a, b) \
do { \
mz_uint32 t = a; \
a = b; \
b = t; \
} \
MZ_MACRO_END
// Heap sort of lowercased filenames, used to help accelerate plain central
// directory searches by mz_zip_reader_locate_file(). (Could also use qsort(),
// but it could allocate memory.)
static void mz_zip_reader_sort_central_dir_offsets_by_filename(
mz_zip_archive *pZip) {
mz_zip_internal_state *pState = pZip->m_pState;
const mz_zip_array *pCentral_dir_offsets = &pState->m_central_dir_offsets;
const mz_zip_array *pCentral_dir = &pState->m_central_dir;
mz_uint32 *pIndices = &MZ_ZIP_ARRAY_ELEMENT(
&pState->m_sorted_central_dir_offsets, mz_uint32, 0);
const int size = pZip->m_total_files;
int start = (size - 2) >> 1, end;
while (start >= 0) {
int child, root = start;
for (;;) {
if ((child = (root << 1) + 1) >= size) break;
child +=
(((child + 1) < size) &&
(mz_zip_reader_filename_less(pCentral_dir, pCentral_dir_offsets,
pIndices[child], pIndices[child + 1])));
if (!mz_zip_reader_filename_less(pCentral_dir, pCentral_dir_offsets,
pIndices[root], pIndices[child]))
break;
MZ_SWAP_UINT32(pIndices[root], pIndices[child]);
root = child;
}
start--;
}
end = size - 1;
while (end > 0) {
int child, root = 0;
MZ_SWAP_UINT32(pIndices[end], pIndices[0]);
for (;;) {
if ((child = (root << 1) + 1) >= end) break;
child +=
(((child + 1) < end) &&
mz_zip_reader_filename_less(pCentral_dir, pCentral_dir_offsets,
pIndices[child], pIndices[child + 1]));
if (!mz_zip_reader_filename_less(pCentral_dir, pCentral_dir_offsets,
pIndices[root], pIndices[child]))
break;
MZ_SWAP_UINT32(pIndices[root], pIndices[child]);
root = child;
}
end--;
}
}
static mz_bool mz_zip_reader_read_central_dir(mz_zip_archive *pZip,
mz_uint32 flags) {
mz_uint cdir_size, num_this_disk, cdir_disk_index;
mz_uint64 cdir_ofs;
mz_int64 cur_file_ofs;
const mz_uint8 *p;
mz_uint32 buf_u32[4096 / sizeof(mz_uint32)];
mz_uint8 *pBuf = (mz_uint8 *)buf_u32;
mz_bool sort_central_dir =
((flags & MZ_ZIP_FLAG_DO_NOT_SORT_CENTRAL_DIRECTORY) == 0);
// Basic sanity checks - reject files which are too small, and check the first
// 4 bytes of the file to make sure a local header is there.
if (pZip->m_archive_size < MZ_ZIP_END_OF_CENTRAL_DIR_HEADER_SIZE)
return MZ_FALSE;
// Find the end of central directory record by scanning the file from the end
// towards the beginning.
cur_file_ofs =
MZ_MAX((mz_int64)pZip->m_archive_size - (mz_int64)sizeof(buf_u32), 0);
for (;;) {
int i,
n = (int)MZ_MIN(sizeof(buf_u32), pZip->m_archive_size - cur_file_ofs);
if (pZip->m_pRead(pZip->m_pIO_opaque, cur_file_ofs, pBuf, n) != (mz_uint)n)
return MZ_FALSE;
for (i = n - 4; i >= 0; --i)
if (MZ_READ_LE32(pBuf + i) == MZ_ZIP_END_OF_CENTRAL_DIR_HEADER_SIG) break;
if (i >= 0) {
cur_file_ofs += i;
break;
}
if ((!cur_file_ofs) || ((pZip->m_archive_size - cur_file_ofs) >=
(0xFFFF + MZ_ZIP_END_OF_CENTRAL_DIR_HEADER_SIZE)))
return MZ_FALSE;
cur_file_ofs = MZ_MAX(cur_file_ofs - (sizeof(buf_u32) - 3), 0);
}
// Read and verify the end of central directory record.
if (pZip->m_pRead(pZip->m_pIO_opaque, cur_file_ofs, pBuf,
MZ_ZIP_END_OF_CENTRAL_DIR_HEADER_SIZE) !=
MZ_ZIP_END_OF_CENTRAL_DIR_HEADER_SIZE)
return MZ_FALSE;
if ((MZ_READ_LE32(pBuf + MZ_ZIP_ECDH_SIG_OFS) !=
MZ_ZIP_END_OF_CENTRAL_DIR_HEADER_SIG) ||
((pZip->m_total_files =
MZ_READ_LE16(pBuf + MZ_ZIP_ECDH_CDIR_TOTAL_ENTRIES_OFS)) !=
MZ_READ_LE16(pBuf + MZ_ZIP_ECDH_CDIR_NUM_ENTRIES_ON_DISK_OFS)))
return MZ_FALSE;
num_this_disk = MZ_READ_LE16(pBuf + MZ_ZIP_ECDH_NUM_THIS_DISK_OFS);
cdir_disk_index = MZ_READ_LE16(pBuf + MZ_ZIP_ECDH_NUM_DISK_CDIR_OFS);
if (((num_this_disk | cdir_disk_index) != 0) &&
((num_this_disk != 1) || (cdir_disk_index != 1)))
return MZ_FALSE;
if ((cdir_size = MZ_READ_LE32(pBuf + MZ_ZIP_ECDH_CDIR_SIZE_OFS)) <
pZip->m_total_files * MZ_ZIP_CENTRAL_DIR_HEADER_SIZE)
return MZ_FALSE;
cdir_ofs = MZ_READ_LE32(pBuf + MZ_ZIP_ECDH_CDIR_OFS_OFS);
if ((cdir_ofs + (mz_uint64)cdir_size) > pZip->m_archive_size) return MZ_FALSE;
pZip->m_central_directory_file_ofs = cdir_ofs;
if (pZip->m_total_files) {
mz_uint i, n;
// Read the entire central directory into a heap block, and allocate another
// heap block to hold the unsorted central dir file record offsets, and
// another to hold the sorted indices.
if ((!mz_zip_array_resize(pZip, &pZip->m_pState->m_central_dir, cdir_size,
MZ_FALSE)) ||
(!mz_zip_array_resize(pZip, &pZip->m_pState->m_central_dir_offsets,
pZip->m_total_files, MZ_FALSE)))
return MZ_FALSE;
if (sort_central_dir) {
if (!mz_zip_array_resize(pZip,
&pZip->m_pState->m_sorted_central_dir_offsets,
pZip->m_total_files, MZ_FALSE))
return MZ_FALSE;
}
if (pZip->m_pRead(pZip->m_pIO_opaque, cdir_ofs,
pZip->m_pState->m_central_dir.m_p,
cdir_size) != cdir_size)
return MZ_FALSE;
// Now create an index into the central directory file records, do some
// basic sanity checking on each record, and check for zip64 entries (which
// are not yet supported).
p = (const mz_uint8 *)pZip->m_pState->m_central_dir.m_p;
for (n = cdir_size, i = 0; i < pZip->m_total_files; ++i) {
mz_uint total_header_size, comp_size, decomp_size, disk_index;
if ((n < MZ_ZIP_CENTRAL_DIR_HEADER_SIZE) ||
(MZ_READ_LE32(p) != MZ_ZIP_CENTRAL_DIR_HEADER_SIG))
return MZ_FALSE;
MZ_ZIP_ARRAY_ELEMENT(&pZip->m_pState->m_central_dir_offsets, mz_uint32,
i) =
(mz_uint32)(p - (const mz_uint8 *)pZip->m_pState->m_central_dir.m_p);
if (sort_central_dir)
MZ_ZIP_ARRAY_ELEMENT(&pZip->m_pState->m_sorted_central_dir_offsets,
mz_uint32, i) = i;
comp_size = MZ_READ_LE32(p + MZ_ZIP_CDH_COMPRESSED_SIZE_OFS);
decomp_size = MZ_READ_LE32(p + MZ_ZIP_CDH_DECOMPRESSED_SIZE_OFS);
if (((!MZ_READ_LE32(p + MZ_ZIP_CDH_METHOD_OFS)) &&
(decomp_size != comp_size)) ||
(decomp_size && !comp_size) || (decomp_size == 0xFFFFFFFF) ||
(comp_size == 0xFFFFFFFF))
return MZ_FALSE;
disk_index = MZ_READ_LE16(p + MZ_ZIP_CDH_DISK_START_OFS);
if ((disk_index != num_this_disk) && (disk_index != 1)) return MZ_FALSE;
if (((mz_uint64)MZ_READ_LE32(p + MZ_ZIP_CDH_LOCAL_HEADER_OFS) +
MZ_ZIP_LOCAL_DIR_HEADER_SIZE + comp_size) > pZip->m_archive_size)
return MZ_FALSE;
if ((total_header_size = MZ_ZIP_CENTRAL_DIR_HEADER_SIZE +
MZ_READ_LE16(p + MZ_ZIP_CDH_FILENAME_LEN_OFS) +
MZ_READ_LE16(p + MZ_ZIP_CDH_EXTRA_LEN_OFS) +
MZ_READ_LE16(p + MZ_ZIP_CDH_COMMENT_LEN_OFS)) >
n)
return MZ_FALSE;
n -= total_header_size;
p += total_header_size;
}
}
if (sort_central_dir)
mz_zip_reader_sort_central_dir_offsets_by_filename(pZip);
return MZ_TRUE;
}
mz_bool mz_zip_reader_init(mz_zip_archive *pZip, mz_uint64 size,
mz_uint32 flags) {
if ((!pZip) || (!pZip->m_pRead)) return MZ_FALSE;
if (!mz_zip_reader_init_internal(pZip, flags)) return MZ_FALSE;
pZip->m_archive_size = size;
if (!mz_zip_reader_read_central_dir(pZip, flags)) {
mz_zip_reader_end(pZip);
return MZ_FALSE;
}
return MZ_TRUE;
}
static size_t mz_zip_mem_read_func(void *pOpaque, mz_uint64 file_ofs,
void *pBuf, size_t n) {
mz_zip_archive *pZip = (mz_zip_archive *)pOpaque;
size_t s = (file_ofs >= pZip->m_archive_size)
? 0
: (size_t)MZ_MIN(pZip->m_archive_size - file_ofs, n);
memcpy(pBuf, (const mz_uint8 *)pZip->m_pState->m_pMem + file_ofs, s);
return s;
}
mz_bool mz_zip_reader_init_mem(mz_zip_archive *pZip, const void *pMem,
size_t size, mz_uint32 flags) {
if (!mz_zip_reader_init_internal(pZip, flags)) return MZ_FALSE;
pZip->m_archive_size = size;
pZip->m_pRead = mz_zip_mem_read_func;
pZip->m_pIO_opaque = pZip;
#ifdef __cplusplus
pZip->m_pState->m_pMem = const_cast<void *>(pMem);
#else
pZip->m_pState->m_pMem = (void *)pMem;
#endif
pZip->m_pState->m_mem_size = size;
if (!mz_zip_reader_read_central_dir(pZip, flags)) {
mz_zip_reader_end(pZip);
return MZ_FALSE;
}
return MZ_TRUE;
}
#ifndef MINIZ_NO_STDIO
static size_t mz_zip_file_read_func(void *pOpaque, mz_uint64 file_ofs,
void *pBuf, size_t n) {
mz_zip_archive *pZip = (mz_zip_archive *)pOpaque;
mz_int64 cur_ofs = MZ_FTELL64(pZip->m_pState->m_pFile);
if (((mz_int64)file_ofs < 0) ||
(((cur_ofs != (mz_int64)file_ofs)) &&
(MZ_FSEEK64(pZip->m_pState->m_pFile, (mz_int64)file_ofs, SEEK_SET))))
return 0;
return MZ_FREAD(pBuf, 1, n, pZip->m_pState->m_pFile);
}
mz_bool mz_zip_reader_init_file(mz_zip_archive *pZip, const char *pFilename,
mz_uint32 flags) {
mz_uint64 file_size;
MZ_FILE *pFile = MZ_FOPEN(pFilename, "rb");
if (!pFile) return MZ_FALSE;
if (MZ_FSEEK64(pFile, 0, SEEK_END)) {
MZ_FCLOSE(pFile);
return MZ_FALSE;
}
file_size = MZ_FTELL64(pFile);
if (!mz_zip_reader_init_internal(pZip, flags)) {
MZ_FCLOSE(pFile);
return MZ_FALSE;
}
pZip->m_pRead = mz_zip_file_read_func;
pZip->m_pIO_opaque = pZip;
pZip->m_pState->m_pFile = pFile;
pZip->m_archive_size = file_size;
if (!mz_zip_reader_read_central_dir(pZip, flags)) {
mz_zip_reader_end(pZip);
return MZ_FALSE;
}
return MZ_TRUE;
}
#endif // #ifndef MINIZ_NO_STDIO
mz_uint mz_zip_reader_get_num_files(mz_zip_archive *pZip) {
return pZip ? pZip->m_total_files : 0;
}
static MZ_FORCEINLINE const mz_uint8 *mz_zip_reader_get_cdh(
mz_zip_archive *pZip, mz_uint file_index) {
if ((!pZip) || (!pZip->m_pState) || (file_index >= pZip->m_total_files) ||
(pZip->m_zip_mode != MZ_ZIP_MODE_READING))
return NULL;
return &MZ_ZIP_ARRAY_ELEMENT(
&pZip->m_pState->m_central_dir, mz_uint8,
MZ_ZIP_ARRAY_ELEMENT(&pZip->m_pState->m_central_dir_offsets, mz_uint32,
file_index));
}
mz_bool mz_zip_reader_is_file_encrypted(mz_zip_archive *pZip,
mz_uint file_index) {
mz_uint m_bit_flag;
const mz_uint8 *p = mz_zip_reader_get_cdh(pZip, file_index);
if (!p) return MZ_FALSE;
m_bit_flag = MZ_READ_LE16(p + MZ_ZIP_CDH_BIT_FLAG_OFS);
return (m_bit_flag & 1);
}
mz_bool mz_zip_reader_is_file_a_directory(mz_zip_archive *pZip,
mz_uint file_index) {
mz_uint filename_len, external_attr;
const mz_uint8 *p = mz_zip_reader_get_cdh(pZip, file_index);
if (!p) return MZ_FALSE;
// First see if the filename ends with a '/' character.
filename_len = MZ_READ_LE16(p + MZ_ZIP_CDH_FILENAME_LEN_OFS);
if (filename_len) {
if (*(p + MZ_ZIP_CENTRAL_DIR_HEADER_SIZE + filename_len - 1) == '/')
return MZ_TRUE;
}
// Bugfix: This code was also checking if the internal attribute was non-zero,
// which wasn't correct.
// Most/all zip writers (hopefully) set DOS file/directory attributes in the
// low 16-bits, so check for the DOS directory flag and ignore the source OS
// ID in the created by field.
// FIXME: Remove this check? Is it necessary - we already check the filename.
external_attr = MZ_READ_LE32(p + MZ_ZIP_CDH_EXTERNAL_ATTR_OFS);
if ((external_attr & 0x10) != 0) return MZ_TRUE;
return MZ_FALSE;
}
mz_bool mz_zip_reader_file_stat(mz_zip_archive *pZip, mz_uint file_index,
mz_zip_archive_file_stat *pStat) {
mz_uint n;
const mz_uint8 *p = mz_zip_reader_get_cdh(pZip, file_index);
if ((!p) || (!pStat)) return MZ_FALSE;
// Unpack the central directory record.
pStat->m_file_index = file_index;
pStat->m_central_dir_ofs = MZ_ZIP_ARRAY_ELEMENT(
&pZip->m_pState->m_central_dir_offsets, mz_uint32, file_index);
pStat->m_version_made_by = MZ_READ_LE16(p + MZ_ZIP_CDH_VERSION_MADE_BY_OFS);
pStat->m_version_needed = MZ_READ_LE16(p + MZ_ZIP_CDH_VERSION_NEEDED_OFS);
pStat->m_bit_flag = MZ_READ_LE16(p + MZ_ZIP_CDH_BIT_FLAG_OFS);
pStat->m_method = MZ_READ_LE16(p + MZ_ZIP_CDH_METHOD_OFS);
#ifndef MINIZ_NO_TIME
pStat->m_time =
mz_zip_dos_to_time_t(MZ_READ_LE16(p + MZ_ZIP_CDH_FILE_TIME_OFS),
MZ_READ_LE16(p + MZ_ZIP_CDH_FILE_DATE_OFS));
#endif
pStat->m_crc32 = MZ_READ_LE32(p + MZ_ZIP_CDH_CRC32_OFS);
pStat->m_comp_size = MZ_READ_LE32(p + MZ_ZIP_CDH_COMPRESSED_SIZE_OFS);
pStat->m_uncomp_size = MZ_READ_LE32(p + MZ_ZIP_CDH_DECOMPRESSED_SIZE_OFS);
pStat->m_internal_attr = MZ_READ_LE16(p + MZ_ZIP_CDH_INTERNAL_ATTR_OFS);
pStat->m_external_attr = MZ_READ_LE32(p + MZ_ZIP_CDH_EXTERNAL_ATTR_OFS);
pStat->m_local_header_ofs = MZ_READ_LE32(p + MZ_ZIP_CDH_LOCAL_HEADER_OFS);
// Copy as much of the filename and comment as possible.
n = MZ_READ_LE16(p + MZ_ZIP_CDH_FILENAME_LEN_OFS);
n = MZ_MIN(n, MZ_ZIP_MAX_ARCHIVE_FILENAME_SIZE - 1);
memcpy(pStat->m_filename, p + MZ_ZIP_CENTRAL_DIR_HEADER_SIZE, n);
pStat->m_filename[n] = '\0';
n = MZ_READ_LE16(p + MZ_ZIP_CDH_COMMENT_LEN_OFS);
n = MZ_MIN(n, MZ_ZIP_MAX_ARCHIVE_FILE_COMMENT_SIZE - 1);
pStat->m_comment_size = n;
memcpy(pStat->m_comment,
p + MZ_ZIP_CENTRAL_DIR_HEADER_SIZE +
MZ_READ_LE16(p + MZ_ZIP_CDH_FILENAME_LEN_OFS) +
MZ_READ_LE16(p + MZ_ZIP_CDH_EXTRA_LEN_OFS),
n);
pStat->m_comment[n] = '\0';
return MZ_TRUE;
}
mz_uint mz_zip_reader_get_filename(mz_zip_archive *pZip, mz_uint file_index,
char *pFilename, mz_uint filename_buf_size) {
mz_uint n;
const mz_uint8 *p = mz_zip_reader_get_cdh(pZip, file_index);
if (!p) {
if (filename_buf_size) pFilename[0] = '\0';
return 0;
}
n = MZ_READ_LE16(p + MZ_ZIP_CDH_FILENAME_LEN_OFS);
if (filename_buf_size) {
n = MZ_MIN(n, filename_buf_size - 1);
memcpy(pFilename, p + MZ_ZIP_CENTRAL_DIR_HEADER_SIZE, n);
pFilename[n] = '\0';
}
return n + 1;
}
static MZ_FORCEINLINE mz_bool mz_zip_reader_string_equal(const char *pA,
const char *pB,
mz_uint len,
mz_uint flags) {
mz_uint i;
if (flags & MZ_ZIP_FLAG_CASE_SENSITIVE) return 0 == memcmp(pA, pB, len);
for (i = 0; i < len; ++i)
if (MZ_TOLOWER(pA[i]) != MZ_TOLOWER(pB[i])) return MZ_FALSE;
return MZ_TRUE;
}
static MZ_FORCEINLINE int mz_zip_reader_filename_compare(
const mz_zip_array *pCentral_dir_array,
const mz_zip_array *pCentral_dir_offsets, mz_uint l_index, const char *pR,
mz_uint r_len) {
const mz_uint8 *pL = &MZ_ZIP_ARRAY_ELEMENT(
pCentral_dir_array, mz_uint8,
MZ_ZIP_ARRAY_ELEMENT(pCentral_dir_offsets, mz_uint32,
l_index)),
*pE;
mz_uint l_len = MZ_READ_LE16(pL + MZ_ZIP_CDH_FILENAME_LEN_OFS);
mz_uint8 l = 0, r = 0;
pL += MZ_ZIP_CENTRAL_DIR_HEADER_SIZE;
pE = pL + MZ_MIN(l_len, r_len);
while (pL < pE) {
if ((l = MZ_TOLOWER(*pL)) != (r = MZ_TOLOWER(*pR))) break;
pL++;
pR++;
}
return (pL == pE) ? (int)(l_len - r_len) : (l - r);
}
static int mz_zip_reader_locate_file_binary_search(mz_zip_archive *pZip,
const char *pFilename) {
mz_zip_internal_state *pState = pZip->m_pState;
const mz_zip_array *pCentral_dir_offsets = &pState->m_central_dir_offsets;
const mz_zip_array *pCentral_dir = &pState->m_central_dir;
mz_uint32 *pIndices = &MZ_ZIP_ARRAY_ELEMENT(
&pState->m_sorted_central_dir_offsets, mz_uint32, 0);
const int size = pZip->m_total_files;
const mz_uint filename_len = (mz_uint)strlen(pFilename);
int l = 0, h = size - 1;
while (l <= h) {
int m = (l + h) >> 1, file_index = pIndices[m],
comp =
mz_zip_reader_filename_compare(pCentral_dir, pCentral_dir_offsets,
file_index, pFilename, filename_len);
if (!comp)
return file_index;
else if (comp < 0)
l = m + 1;
else
h = m - 1;
}
return -1;
}
int mz_zip_reader_locate_file(mz_zip_archive *pZip, const char *pName,
const char *pComment, mz_uint flags) {
mz_uint file_index;
size_t name_len, comment_len;
if ((!pZip) || (!pZip->m_pState) || (!pName) ||
(pZip->m_zip_mode != MZ_ZIP_MODE_READING))
return -1;
if (((flags & (MZ_ZIP_FLAG_IGNORE_PATH | MZ_ZIP_FLAG_CASE_SENSITIVE)) == 0) &&
(!pComment) && (pZip->m_pState->m_sorted_central_dir_offsets.m_size))
return mz_zip_reader_locate_file_binary_search(pZip, pName);
name_len = strlen(pName);
if (name_len > 0xFFFF) return -1;
comment_len = pComment ? strlen(pComment) : 0;
if (comment_len > 0xFFFF) return -1;
for (file_index = 0; file_index < pZip->m_total_files; file_index++) {
const mz_uint8 *pHeader = &MZ_ZIP_ARRAY_ELEMENT(
&pZip->m_pState->m_central_dir, mz_uint8,
MZ_ZIP_ARRAY_ELEMENT(&pZip->m_pState->m_central_dir_offsets, mz_uint32,
file_index));
mz_uint filename_len = MZ_READ_LE16(pHeader + MZ_ZIP_CDH_FILENAME_LEN_OFS);
const char *pFilename =
(const char *)pHeader + MZ_ZIP_CENTRAL_DIR_HEADER_SIZE;
if (filename_len < name_len) continue;
if (comment_len) {
mz_uint file_extra_len = MZ_READ_LE16(pHeader + MZ_ZIP_CDH_EXTRA_LEN_OFS),
file_comment_len =
MZ_READ_LE16(pHeader + MZ_ZIP_CDH_COMMENT_LEN_OFS);
const char *pFile_comment = pFilename + filename_len + file_extra_len;
if ((file_comment_len != comment_len) ||
(!mz_zip_reader_string_equal(pComment, pFile_comment,
file_comment_len, flags)))
continue;
}
if ((flags & MZ_ZIP_FLAG_IGNORE_PATH) && (filename_len)) {
int ofs = filename_len - 1;
do {
if ((pFilename[ofs] == '/') || (pFilename[ofs] == '\\') ||
(pFilename[ofs] == ':'))
break;
} while (--ofs >= 0);
ofs++;
pFilename += ofs;
filename_len -= ofs;
}
if ((filename_len == name_len) &&
(mz_zip_reader_string_equal(pName, pFilename, filename_len, flags)))
return file_index;
}
return -1;
}
mz_bool mz_zip_reader_extract_to_mem_no_alloc(mz_zip_archive *pZip,
mz_uint file_index, void *pBuf,
size_t buf_size, mz_uint flags,
void *pUser_read_buf,
size_t user_read_buf_size) {
int status = TINFL_STATUS_DONE;
mz_uint64 needed_size, cur_file_ofs, comp_remaining,
out_buf_ofs = 0, read_buf_size, read_buf_ofs = 0, read_buf_avail;
mz_zip_archive_file_stat file_stat;
void *pRead_buf;
mz_uint32
local_header_u32[(MZ_ZIP_LOCAL_DIR_HEADER_SIZE + sizeof(mz_uint32) - 1) /
sizeof(mz_uint32)];
mz_uint8 *pLocal_header = (mz_uint8 *)local_header_u32;
tinfl_decompressor inflator;
if ((buf_size) && (!pBuf)) return MZ_FALSE;
if (!mz_zip_reader_file_stat(pZip, file_index, &file_stat)) return MZ_FALSE;
// Empty file, or a directory (but not always a directory - I've seen odd zips
// with directories that have compressed data which inflates to 0 bytes)
if (!file_stat.m_comp_size) return MZ_TRUE;
// Entry is a subdirectory (I've seen old zips with dir entries which have
// compressed deflate data which inflates to 0 bytes, but these entries claim
// to uncompress to 512 bytes in the headers).
// I'm torn how to handle this case - should it fail instead?
if (mz_zip_reader_is_file_a_directory(pZip, file_index)) return MZ_TRUE;
// Encryption and patch files are not supported.
if (file_stat.m_bit_flag & (1 | 32)) return MZ_FALSE;
// This function only supports stored and deflate.
if ((!(flags & MZ_ZIP_FLAG_COMPRESSED_DATA)) && (file_stat.m_method != 0) &&
(file_stat.m_method != MZ_DEFLATED))
return MZ_FALSE;
// Ensure supplied output buffer is large enough.
needed_size = (flags & MZ_ZIP_FLAG_COMPRESSED_DATA) ? file_stat.m_comp_size
: file_stat.m_uncomp_size;
if (buf_size < needed_size) return MZ_FALSE;
// Read and parse the local directory entry.
cur_file_ofs = file_stat.m_local_header_ofs;
if (pZip->m_pRead(pZip->m_pIO_opaque, cur_file_ofs, pLocal_header,
MZ_ZIP_LOCAL_DIR_HEADER_SIZE) !=
MZ_ZIP_LOCAL_DIR_HEADER_SIZE)
return MZ_FALSE;
if (MZ_READ_LE32(pLocal_header) != MZ_ZIP_LOCAL_DIR_HEADER_SIG)
return MZ_FALSE;
cur_file_ofs += MZ_ZIP_LOCAL_DIR_HEADER_SIZE +
MZ_READ_LE16(pLocal_header + MZ_ZIP_LDH_FILENAME_LEN_OFS) +
MZ_READ_LE16(pLocal_header + MZ_ZIP_LDH_EXTRA_LEN_OFS);
if ((cur_file_ofs + file_stat.m_comp_size) > pZip->m_archive_size)
return MZ_FALSE;
if ((flags & MZ_ZIP_FLAG_COMPRESSED_DATA) || (!file_stat.m_method)) {
// The file is stored or the caller has requested the compressed data.
if (pZip->m_pRead(pZip->m_pIO_opaque, cur_file_ofs, pBuf,
(size_t)needed_size) != needed_size)
return MZ_FALSE;
return ((flags & MZ_ZIP_FLAG_COMPRESSED_DATA) != 0) ||
(mz_crc32(MZ_CRC32_INIT, (const mz_uint8 *)pBuf,
(size_t)file_stat.m_uncomp_size) == file_stat.m_crc32);
}
// Decompress the file either directly from memory or from a file input
// buffer.
tinfl_init(&inflator);
if (pZip->m_pState->m_pMem) {
// Read directly from the archive in memory.
pRead_buf = (mz_uint8 *)pZip->m_pState->m_pMem + cur_file_ofs;
read_buf_size = read_buf_avail = file_stat.m_comp_size;
comp_remaining = 0;
} else if (pUser_read_buf) {
// Use a user provided read buffer.
if (!user_read_buf_size) return MZ_FALSE;
pRead_buf = (mz_uint8 *)pUser_read_buf;
read_buf_size = user_read_buf_size;
read_buf_avail = 0;
comp_remaining = file_stat.m_comp_size;
} else {
// Temporarily allocate a read buffer.
read_buf_size =
MZ_MIN(file_stat.m_comp_size, (mz_uint)MZ_ZIP_MAX_IO_BUF_SIZE);
#ifdef _MSC_VER
if (((0, sizeof(size_t) == sizeof(mz_uint32))) &&
(read_buf_size > 0x7FFFFFFF))
#else
if (((sizeof(size_t) == sizeof(mz_uint32))) && (read_buf_size > 0x7FFFFFFF))
#endif
return MZ_FALSE;
if (NULL == (pRead_buf = pZip->m_pAlloc(pZip->m_pAlloc_opaque, 1,
(size_t)read_buf_size)))
return MZ_FALSE;
read_buf_avail = 0;
comp_remaining = file_stat.m_comp_size;
}
do {
size_t in_buf_size,
out_buf_size = (size_t)(file_stat.m_uncomp_size - out_buf_ofs);
if ((!read_buf_avail) && (!pZip->m_pState->m_pMem)) {
read_buf_avail = MZ_MIN(read_buf_size, comp_remaining);
if (pZip->m_pRead(pZip->m_pIO_opaque, cur_file_ofs, pRead_buf,
(size_t)read_buf_avail) != read_buf_avail) {
status = TINFL_STATUS_FAILED;
break;
}
cur_file_ofs += read_buf_avail;
comp_remaining -= read_buf_avail;
read_buf_ofs = 0;
}
in_buf_size = (size_t)read_buf_avail;
status = tinfl_decompress(
&inflator, (mz_uint8 *)pRead_buf + read_buf_ofs, &in_buf_size,
(mz_uint8 *)pBuf, (mz_uint8 *)pBuf + out_buf_ofs, &out_buf_size,
TINFL_FLAG_USING_NON_WRAPPING_OUTPUT_BUF |
(comp_remaining ? TINFL_FLAG_HAS_MORE_INPUT : 0));
read_buf_avail -= in_buf_size;
read_buf_ofs += in_buf_size;
out_buf_ofs += out_buf_size;
} while (status == TINFL_STATUS_NEEDS_MORE_INPUT);
if (status == TINFL_STATUS_DONE) {
// Make sure the entire file was decompressed, and check its CRC.
if ((out_buf_ofs != file_stat.m_uncomp_size) ||
(mz_crc32(MZ_CRC32_INIT, (const mz_uint8 *)pBuf,
(size_t)file_stat.m_uncomp_size) != file_stat.m_crc32))
status = TINFL_STATUS_FAILED;
}
if ((!pZip->m_pState->m_pMem) && (!pUser_read_buf))
pZip->m_pFree(pZip->m_pAlloc_opaque, pRead_buf);
return status == TINFL_STATUS_DONE;
}
mz_bool mz_zip_reader_extract_file_to_mem_no_alloc(
mz_zip_archive *pZip, const char *pFilename, void *pBuf, size_t buf_size,
mz_uint flags, void *pUser_read_buf, size_t user_read_buf_size) {
int file_index = mz_zip_reader_locate_file(pZip, pFilename, NULL, flags);
if (file_index < 0) return MZ_FALSE;
return mz_zip_reader_extract_to_mem_no_alloc(pZip, file_index, pBuf, buf_size,
flags, pUser_read_buf,
user_read_buf_size);
}
mz_bool mz_zip_reader_extract_to_mem(mz_zip_archive *pZip, mz_uint file_index,
void *pBuf, size_t buf_size,
mz_uint flags) {
return mz_zip_reader_extract_to_mem_no_alloc(pZip, file_index, pBuf, buf_size,
flags, NULL, 0);
}
mz_bool mz_zip_reader_extract_file_to_mem(mz_zip_archive *pZip,
const char *pFilename, void *pBuf,
size_t buf_size, mz_uint flags) {
return mz_zip_reader_extract_file_to_mem_no_alloc(pZip, pFilename, pBuf,
buf_size, flags, NULL, 0);
}
void *mz_zip_reader_extract_to_heap(mz_zip_archive *pZip, mz_uint file_index,
size_t *pSize, mz_uint flags) {
mz_uint64 comp_size, uncomp_size, alloc_size;
const mz_uint8 *p = mz_zip_reader_get_cdh(pZip, file_index);
void *pBuf;
if (pSize) *pSize = 0;
if (!p) return NULL;
comp_size = MZ_READ_LE32(p + MZ_ZIP_CDH_COMPRESSED_SIZE_OFS);
uncomp_size = MZ_READ_LE32(p + MZ_ZIP_CDH_DECOMPRESSED_SIZE_OFS);
alloc_size = (flags & MZ_ZIP_FLAG_COMPRESSED_DATA) ? comp_size : uncomp_size;
#ifdef _MSC_VER
if (((0, sizeof(size_t) == sizeof(mz_uint32))) && (alloc_size > 0x7FFFFFFF))
#else
if (((sizeof(size_t) == sizeof(mz_uint32))) && (alloc_size > 0x7FFFFFFF))
#endif
return NULL;
if (NULL ==
(pBuf = pZip->m_pAlloc(pZip->m_pAlloc_opaque, 1, (size_t)alloc_size)))
return NULL;
if (!mz_zip_reader_extract_to_mem(pZip, file_index, pBuf, (size_t)alloc_size,
flags)) {
pZip->m_pFree(pZip->m_pAlloc_opaque, pBuf);
return NULL;
}
if (pSize) *pSize = (size_t)alloc_size;
return pBuf;
}
void *mz_zip_reader_extract_file_to_heap(mz_zip_archive *pZip,
const char *pFilename, size_t *pSize,
mz_uint flags) {
int file_index = mz_zip_reader_locate_file(pZip, pFilename, NULL, flags);
if (file_index < 0) {
if (pSize) *pSize = 0;
return MZ_FALSE;
}
return mz_zip_reader_extract_to_heap(pZip, file_index, pSize, flags);
}
mz_bool mz_zip_reader_extract_to_callback(mz_zip_archive *pZip,
mz_uint file_index,
mz_file_write_func pCallback,
void *pOpaque, mz_uint flags) {
int status = TINFL_STATUS_DONE;
mz_uint file_crc32 = MZ_CRC32_INIT;
mz_uint64 read_buf_size, read_buf_ofs = 0, read_buf_avail, comp_remaining,
out_buf_ofs = 0, cur_file_ofs;
mz_zip_archive_file_stat file_stat;
void *pRead_buf = NULL;
void *pWrite_buf = NULL;
mz_uint32
local_header_u32[(MZ_ZIP_LOCAL_DIR_HEADER_SIZE + sizeof(mz_uint32) - 1) /
sizeof(mz_uint32)];
mz_uint8 *pLocal_header = (mz_uint8 *)local_header_u32;
if (!mz_zip_reader_file_stat(pZip, file_index, &file_stat)) return MZ_FALSE;
// Empty file, or a directory (but not always a directory - I've seen odd zips
// with directories that have compressed data which inflates to 0 bytes)
if (!file_stat.m_comp_size) return MZ_TRUE;
// Entry is a subdirectory (I've seen old zips with dir entries which have
// compressed deflate data which inflates to 0 bytes, but these entries claim
// to uncompress to 512 bytes in the headers).
// I'm torn how to handle this case - should it fail instead?
if (mz_zip_reader_is_file_a_directory(pZip, file_index)) return MZ_TRUE;
// Encryption and patch files are not supported.
if (file_stat.m_bit_flag & (1 | 32)) return MZ_FALSE;
// This function only supports stored and deflate.
if ((!(flags & MZ_ZIP_FLAG_COMPRESSED_DATA)) && (file_stat.m_method != 0) &&
(file_stat.m_method != MZ_DEFLATED))
return MZ_FALSE;
// Read and parse the local directory entry.
cur_file_ofs = file_stat.m_local_header_ofs;
if (pZip->m_pRead(pZip->m_pIO_opaque, cur_file_ofs, pLocal_header,
MZ_ZIP_LOCAL_DIR_HEADER_SIZE) !=
MZ_ZIP_LOCAL_DIR_HEADER_SIZE)
return MZ_FALSE;
if (MZ_READ_LE32(pLocal_header) != MZ_ZIP_LOCAL_DIR_HEADER_SIG)
return MZ_FALSE;
cur_file_ofs += MZ_ZIP_LOCAL_DIR_HEADER_SIZE +
MZ_READ_LE16(pLocal_header + MZ_ZIP_LDH_FILENAME_LEN_OFS) +
MZ_READ_LE16(pLocal_header + MZ_ZIP_LDH_EXTRA_LEN_OFS);
if ((cur_file_ofs + file_stat.m_comp_size) > pZip->m_archive_size)
return MZ_FALSE;
// Decompress the file either directly from memory or from a file input
// buffer.
if (pZip->m_pState->m_pMem) {
pRead_buf = (mz_uint8 *)pZip->m_pState->m_pMem + cur_file_ofs;
read_buf_size = read_buf_avail = file_stat.m_comp_size;
comp_remaining = 0;
} else {
read_buf_size =
MZ_MIN(file_stat.m_comp_size, (mz_uint)MZ_ZIP_MAX_IO_BUF_SIZE);
if (NULL == (pRead_buf = pZip->m_pAlloc(pZip->m_pAlloc_opaque, 1,
(size_t)read_buf_size)))
return MZ_FALSE;
read_buf_avail = 0;
comp_remaining = file_stat.m_comp_size;
}
if ((flags & MZ_ZIP_FLAG_COMPRESSED_DATA) || (!file_stat.m_method)) {
// The file is stored or the caller has requested the compressed data.
if (pZip->m_pState->m_pMem) {
#ifdef _MSC_VER
if (((0, sizeof(size_t) == sizeof(mz_uint32))) &&
(file_stat.m_comp_size > 0xFFFFFFFF))
#else
if (((sizeof(size_t) == sizeof(mz_uint32))) &&
(file_stat.m_comp_size > 0xFFFFFFFF))
#endif
return MZ_FALSE;
if (pCallback(pOpaque, out_buf_ofs, pRead_buf,
(size_t)file_stat.m_comp_size) != file_stat.m_comp_size)
status = TINFL_STATUS_FAILED;
else if (!(flags & MZ_ZIP_FLAG_COMPRESSED_DATA))
file_crc32 =
(mz_uint32)mz_crc32(file_crc32, (const mz_uint8 *)pRead_buf,
(size_t)file_stat.m_comp_size);
cur_file_ofs += file_stat.m_comp_size;
out_buf_ofs += file_stat.m_comp_size;
comp_remaining = 0;
} else {
while (comp_remaining) {
read_buf_avail = MZ_MIN(read_buf_size, comp_remaining);
if (pZip->m_pRead(pZip->m_pIO_opaque, cur_file_ofs, pRead_buf,
(size_t)read_buf_avail) != read_buf_avail) {
status = TINFL_STATUS_FAILED;
break;
}
if (!(flags & MZ_ZIP_FLAG_COMPRESSED_DATA))
file_crc32 = (mz_uint32)mz_crc32(
file_crc32, (const mz_uint8 *)pRead_buf, (size_t)read_buf_avail);
if (pCallback(pOpaque, out_buf_ofs, pRead_buf,
(size_t)read_buf_avail) != read_buf_avail) {
status = TINFL_STATUS_FAILED;
break;
}
cur_file_ofs += read_buf_avail;
out_buf_ofs += read_buf_avail;
comp_remaining -= read_buf_avail;
}
}
} else {
tinfl_decompressor inflator;
tinfl_init(&inflator);
if (NULL == (pWrite_buf = pZip->m_pAlloc(pZip->m_pAlloc_opaque, 1,
TINFL_LZ_DICT_SIZE)))
status = TINFL_STATUS_FAILED;
else {
do {
mz_uint8 *pWrite_buf_cur =
(mz_uint8 *)pWrite_buf + (out_buf_ofs & (TINFL_LZ_DICT_SIZE - 1));
size_t in_buf_size,
out_buf_size =
TINFL_LZ_DICT_SIZE - (out_buf_ofs & (TINFL_LZ_DICT_SIZE - 1));
if ((!read_buf_avail) && (!pZip->m_pState->m_pMem)) {
read_buf_avail = MZ_MIN(read_buf_size, comp_remaining);
if (pZip->m_pRead(pZip->m_pIO_opaque, cur_file_ofs, pRead_buf,
(size_t)read_buf_avail) != read_buf_avail) {
status = TINFL_STATUS_FAILED;
break;
}
cur_file_ofs += read_buf_avail;
comp_remaining -= read_buf_avail;
read_buf_ofs = 0;
}
in_buf_size = (size_t)read_buf_avail;
status = tinfl_decompress(
&inflator, (const mz_uint8 *)pRead_buf + read_buf_ofs, &in_buf_size,
(mz_uint8 *)pWrite_buf, pWrite_buf_cur, &out_buf_size,
comp_remaining ? TINFL_FLAG_HAS_MORE_INPUT : 0);
read_buf_avail -= in_buf_size;
read_buf_ofs += in_buf_size;
if (out_buf_size) {
if (pCallback(pOpaque, out_buf_ofs, pWrite_buf_cur, out_buf_size) !=
out_buf_size) {
status = TINFL_STATUS_FAILED;
break;
}
file_crc32 =
(mz_uint32)mz_crc32(file_crc32, pWrite_buf_cur, out_buf_size);
if ((out_buf_ofs += out_buf_size) > file_stat.m_uncomp_size) {
status = TINFL_STATUS_FAILED;
break;
}
}
} while ((status == TINFL_STATUS_NEEDS_MORE_INPUT) ||
(status == TINFL_STATUS_HAS_MORE_OUTPUT));
}
}
if ((status == TINFL_STATUS_DONE) &&
(!(flags & MZ_ZIP_FLAG_COMPRESSED_DATA))) {
// Make sure the entire file was decompressed, and check its CRC.
if ((out_buf_ofs != file_stat.m_uncomp_size) ||
(file_crc32 != file_stat.m_crc32))
status = TINFL_STATUS_FAILED;
}
if (!pZip->m_pState->m_pMem) pZip->m_pFree(pZip->m_pAlloc_opaque, pRead_buf);
if (pWrite_buf) pZip->m_pFree(pZip->m_pAlloc_opaque, pWrite_buf);
return status == TINFL_STATUS_DONE;
}
mz_bool mz_zip_reader_extract_file_to_callback(mz_zip_archive *pZip,
const char *pFilename,
mz_file_write_func pCallback,
void *pOpaque, mz_uint flags) {
int file_index = mz_zip_reader_locate_file(pZip, pFilename, NULL, flags);
if (file_index < 0) return MZ_FALSE;
return mz_zip_reader_extract_to_callback(pZip, file_index, pCallback, pOpaque,
flags);
}
#ifndef MINIZ_NO_STDIO
static size_t mz_zip_file_write_callback(void *pOpaque, mz_uint64 ofs,
const void *pBuf, size_t n) {
(void)ofs;
return MZ_FWRITE(pBuf, 1, n, (MZ_FILE *)pOpaque);
}
mz_bool mz_zip_reader_extract_to_file(mz_zip_archive *pZip, mz_uint file_index,
const char *pDst_filename,
mz_uint flags) {
mz_bool status;
mz_zip_archive_file_stat file_stat;
MZ_FILE *pFile;
if (!mz_zip_reader_file_stat(pZip, file_index, &file_stat)) return MZ_FALSE;
pFile = MZ_FOPEN(pDst_filename, "wb");
if (!pFile) return MZ_FALSE;
status = mz_zip_reader_extract_to_callback(
pZip, file_index, mz_zip_file_write_callback, pFile, flags);
if (MZ_FCLOSE(pFile) == EOF) return MZ_FALSE;
#ifndef MINIZ_NO_TIME
if (status)
mz_zip_set_file_times(pDst_filename, file_stat.m_time, file_stat.m_time);
#endif
return status;
}
#endif // #ifndef MINIZ_NO_STDIO
mz_bool mz_zip_reader_end(mz_zip_archive *pZip) {
if ((!pZip) || (!pZip->m_pState) || (!pZip->m_pAlloc) || (!pZip->m_pFree) ||
(pZip->m_zip_mode != MZ_ZIP_MODE_READING))
return MZ_FALSE;
if (pZip->m_pState) {
mz_zip_internal_state *pState = pZip->m_pState;
pZip->m_pState = NULL;
mz_zip_array_clear(pZip, &pState->m_central_dir);
mz_zip_array_clear(pZip, &pState->m_central_dir_offsets);
mz_zip_array_clear(pZip, &pState->m_sorted_central_dir_offsets);
#ifndef MINIZ_NO_STDIO
if (pState->m_pFile) {
MZ_FCLOSE(pState->m_pFile);
pState->m_pFile = NULL;
}
#endif // #ifndef MINIZ_NO_STDIO
pZip->m_pFree(pZip->m_pAlloc_opaque, pState);
}
pZip->m_zip_mode = MZ_ZIP_MODE_INVALID;
return MZ_TRUE;
}
#ifndef MINIZ_NO_STDIO
mz_bool mz_zip_reader_extract_file_to_file(mz_zip_archive *pZip,
const char *pArchive_filename,
const char *pDst_filename,
mz_uint flags) {
int file_index =
mz_zip_reader_locate_file(pZip, pArchive_filename, NULL, flags);
if (file_index < 0) return MZ_FALSE;
return mz_zip_reader_extract_to_file(pZip, file_index, pDst_filename, flags);
}
#endif
// ------------------- .ZIP archive writing
#ifndef MINIZ_NO_ARCHIVE_WRITING_APIS
static void mz_write_le16(mz_uint8 *p, mz_uint16 v) {
p[0] = (mz_uint8)v;
p[1] = (mz_uint8)(v >> 8);
}
static void mz_write_le32(mz_uint8 *p, mz_uint32 v) {
p[0] = (mz_uint8)v;
p[1] = (mz_uint8)(v >> 8);
p[2] = (mz_uint8)(v >> 16);
p[3] = (mz_uint8)(v >> 24);
}
#define MZ_WRITE_LE16(p, v) mz_write_le16((mz_uint8 *)(p), (mz_uint16)(v))
#define MZ_WRITE_LE32(p, v) mz_write_le32((mz_uint8 *)(p), (mz_uint32)(v))
mz_bool mz_zip_writer_init(mz_zip_archive *pZip, mz_uint64 existing_size) {
if ((!pZip) || (pZip->m_pState) || (!pZip->m_pWrite) ||
(pZip->m_zip_mode != MZ_ZIP_MODE_INVALID))
return MZ_FALSE;
if (pZip->m_file_offset_alignment) {
// Ensure user specified file offset alignment is a power of 2.
if (pZip->m_file_offset_alignment & (pZip->m_file_offset_alignment - 1))
return MZ_FALSE;
}
if (!pZip->m_pAlloc) pZip->m_pAlloc = def_alloc_func;
if (!pZip->m_pFree) pZip->m_pFree = def_free_func;
if (!pZip->m_pRealloc) pZip->m_pRealloc = def_realloc_func;
pZip->m_zip_mode = MZ_ZIP_MODE_WRITING;
pZip->m_archive_size = existing_size;
pZip->m_central_directory_file_ofs = 0;
pZip->m_total_files = 0;
if (NULL == (pZip->m_pState = (mz_zip_internal_state *)pZip->m_pAlloc(
pZip->m_pAlloc_opaque, 1, sizeof(mz_zip_internal_state))))
return MZ_FALSE;
memset(pZip->m_pState, 0, sizeof(mz_zip_internal_state));
MZ_ZIP_ARRAY_SET_ELEMENT_SIZE(&pZip->m_pState->m_central_dir,
sizeof(mz_uint8));
MZ_ZIP_ARRAY_SET_ELEMENT_SIZE(&pZip->m_pState->m_central_dir_offsets,
sizeof(mz_uint32));
MZ_ZIP_ARRAY_SET_ELEMENT_SIZE(&pZip->m_pState->m_sorted_central_dir_offsets,
sizeof(mz_uint32));
return MZ_TRUE;
}
static size_t mz_zip_heap_write_func(void *pOpaque, mz_uint64 file_ofs,
const void *pBuf, size_t n) {
mz_zip_archive *pZip = (mz_zip_archive *)pOpaque;
mz_zip_internal_state *pState = pZip->m_pState;
mz_uint64 new_size = MZ_MAX(file_ofs + n, pState->m_mem_size);
#ifdef _MSC_VER
if ((!n) ||
((0, sizeof(size_t) == sizeof(mz_uint32)) && (new_size > 0x7FFFFFFF)))
#else
if ((!n) ||
((sizeof(size_t) == sizeof(mz_uint32)) && (new_size > 0x7FFFFFFF)))
#endif
return 0;
if (new_size > pState->m_mem_capacity) {
void *pNew_block;
size_t new_capacity = MZ_MAX(64, pState->m_mem_capacity);
while (new_capacity < new_size) new_capacity *= 2;
if (NULL == (pNew_block = pZip->m_pRealloc(
pZip->m_pAlloc_opaque, pState->m_pMem, 1, new_capacity)))
return 0;
pState->m_pMem = pNew_block;
pState->m_mem_capacity = new_capacity;
}
memcpy((mz_uint8 *)pState->m_pMem + file_ofs, pBuf, n);
pState->m_mem_size = (size_t)new_size;
return n;
}
mz_bool mz_zip_writer_init_heap(mz_zip_archive *pZip,
size_t size_to_reserve_at_beginning,
size_t initial_allocation_size) {
pZip->m_pWrite = mz_zip_heap_write_func;
pZip->m_pIO_opaque = pZip;
if (!mz_zip_writer_init(pZip, size_to_reserve_at_beginning)) return MZ_FALSE;
if (0 != (initial_allocation_size = MZ_MAX(initial_allocation_size,
size_to_reserve_at_beginning))) {
if (NULL == (pZip->m_pState->m_pMem = pZip->m_pAlloc(
pZip->m_pAlloc_opaque, 1, initial_allocation_size))) {
mz_zip_writer_end(pZip);
return MZ_FALSE;
}
pZip->m_pState->m_mem_capacity = initial_allocation_size;
}
return MZ_TRUE;
}
#ifndef MINIZ_NO_STDIO
static size_t mz_zip_file_write_func(void *pOpaque, mz_uint64 file_ofs,
const void *pBuf, size_t n) {
mz_zip_archive *pZip = (mz_zip_archive *)pOpaque;
mz_int64 cur_ofs = MZ_FTELL64(pZip->m_pState->m_pFile);
if (((mz_int64)file_ofs < 0) ||
(((cur_ofs != (mz_int64)file_ofs)) &&
(MZ_FSEEK64(pZip->m_pState->m_pFile, (mz_int64)file_ofs, SEEK_SET))))
return 0;
return MZ_FWRITE(pBuf, 1, n, pZip->m_pState->m_pFile);
}
mz_bool mz_zip_writer_init_file(mz_zip_archive *pZip, const char *pFilename,
mz_uint64 size_to_reserve_at_beginning) {
MZ_FILE *pFile;
pZip->m_pWrite = mz_zip_file_write_func;
pZip->m_pIO_opaque = pZip;
if (!mz_zip_writer_init(pZip, size_to_reserve_at_beginning)) return MZ_FALSE;
if (NULL == (pFile = MZ_FOPEN(pFilename, "wb"))) {
mz_zip_writer_end(pZip);
return MZ_FALSE;
}
pZip->m_pState->m_pFile = pFile;
if (size_to_reserve_at_beginning) {
mz_uint64 cur_ofs = 0;
char buf[4096];
MZ_CLEAR_OBJ(buf);
do {
size_t n = (size_t)MZ_MIN(sizeof(buf), size_to_reserve_at_beginning);
if (pZip->m_pWrite(pZip->m_pIO_opaque, cur_ofs, buf, n) != n) {
mz_zip_writer_end(pZip);
return MZ_FALSE;
}
cur_ofs += n;
size_to_reserve_at_beginning -= n;
} while (size_to_reserve_at_beginning);
}
return MZ_TRUE;
}
#endif // #ifndef MINIZ_NO_STDIO
mz_bool mz_zip_writer_init_from_reader(mz_zip_archive *pZip,
const char *pFilename) {
mz_zip_internal_state *pState;
if ((!pZip) || (!pZip->m_pState) || (pZip->m_zip_mode != MZ_ZIP_MODE_READING))
return MZ_FALSE;
// No sense in trying to write to an archive that's already at the support max
// size
if ((pZip->m_total_files == 0xFFFF) ||
((pZip->m_archive_size + MZ_ZIP_CENTRAL_DIR_HEADER_SIZE +
MZ_ZIP_LOCAL_DIR_HEADER_SIZE) > 0xFFFFFFFF))
return MZ_FALSE;
pState = pZip->m_pState;
if (pState->m_pFile) {
#ifdef MINIZ_NO_STDIO
pFilename;
return MZ_FALSE;
#else
// Archive is being read from stdio - try to reopen as writable.
if (pZip->m_pIO_opaque != pZip) return MZ_FALSE;
if (!pFilename) return MZ_FALSE;
pZip->m_pWrite = mz_zip_file_write_func;
if (NULL ==
(pState->m_pFile = MZ_FREOPEN(pFilename, "r+b", pState->m_pFile))) {
// The mz_zip_archive is now in a bogus state because pState->m_pFile is
// NULL, so just close it.
mz_zip_reader_end(pZip);
return MZ_FALSE;
}
#endif // #ifdef MINIZ_NO_STDIO
} else if (pState->m_pMem) {
// Archive lives in a memory block. Assume it's from the heap that we can
// resize using the realloc callback.
if (pZip->m_pIO_opaque != pZip) return MZ_FALSE;
pState->m_mem_capacity = pState->m_mem_size;
pZip->m_pWrite = mz_zip_heap_write_func;
}
// Archive is being read via a user provided read function - make sure the
// user has specified a write function too.
else if (!pZip->m_pWrite)
return MZ_FALSE;
// Start writing new files at the archive's current central directory
// location.
pZip->m_archive_size = pZip->m_central_directory_file_ofs;
pZip->m_zip_mode = MZ_ZIP_MODE_WRITING;
pZip->m_central_directory_file_ofs = 0;
return MZ_TRUE;
}
mz_bool mz_zip_writer_add_mem(mz_zip_archive *pZip, const char *pArchive_name,
const void *pBuf, size_t buf_size,
mz_uint level_and_flags) {
return mz_zip_writer_add_mem_ex(pZip, pArchive_name, pBuf, buf_size, NULL, 0,
level_and_flags, 0, 0);
}
typedef struct {
mz_zip_archive *m_pZip;
mz_uint64 m_cur_archive_file_ofs;
mz_uint64 m_comp_size;
} mz_zip_writer_add_state;
static mz_bool mz_zip_writer_add_put_buf_callback(const void *pBuf, int len,
void *pUser) {
mz_zip_writer_add_state *pState = (mz_zip_writer_add_state *)pUser;
if ((int)pState->m_pZip->m_pWrite(pState->m_pZip->m_pIO_opaque,
pState->m_cur_archive_file_ofs, pBuf,
len) != len)
return MZ_FALSE;
pState->m_cur_archive_file_ofs += len;
pState->m_comp_size += len;
return MZ_TRUE;
}
static mz_bool mz_zip_writer_create_local_dir_header(
mz_zip_archive *pZip, mz_uint8 *pDst, mz_uint16 filename_size,
mz_uint16 extra_size, mz_uint64 uncomp_size, mz_uint64 comp_size,
mz_uint32 uncomp_crc32, mz_uint16 method, mz_uint16 bit_flags,
mz_uint16 dos_time, mz_uint16 dos_date) {
(void)pZip;
memset(pDst, 0, MZ_ZIP_LOCAL_DIR_HEADER_SIZE);
MZ_WRITE_LE32(pDst + MZ_ZIP_LDH_SIG_OFS, MZ_ZIP_LOCAL_DIR_HEADER_SIG);
MZ_WRITE_LE16(pDst + MZ_ZIP_LDH_VERSION_NEEDED_OFS, method ? 20 : 0);
MZ_WRITE_LE16(pDst + MZ_ZIP_LDH_BIT_FLAG_OFS, bit_flags);
MZ_WRITE_LE16(pDst + MZ_ZIP_LDH_METHOD_OFS, method);
MZ_WRITE_LE16(pDst + MZ_ZIP_LDH_FILE_TIME_OFS, dos_time);
MZ_WRITE_LE16(pDst + MZ_ZIP_LDH_FILE_DATE_OFS, dos_date);
MZ_WRITE_LE32(pDst + MZ_ZIP_LDH_CRC32_OFS, uncomp_crc32);
MZ_WRITE_LE32(pDst + MZ_ZIP_LDH_COMPRESSED_SIZE_OFS, comp_size);
MZ_WRITE_LE32(pDst + MZ_ZIP_LDH_DECOMPRESSED_SIZE_OFS, uncomp_size);
MZ_WRITE_LE16(pDst + MZ_ZIP_LDH_FILENAME_LEN_OFS, filename_size);
MZ_WRITE_LE16(pDst + MZ_ZIP_LDH_EXTRA_LEN_OFS, extra_size);
return MZ_TRUE;
}
static mz_bool mz_zip_writer_create_central_dir_header(
mz_zip_archive *pZip, mz_uint8 *pDst, mz_uint16 filename_size,
mz_uint16 extra_size, mz_uint16 comment_size, mz_uint64 uncomp_size,
mz_uint64 comp_size, mz_uint32 uncomp_crc32, mz_uint16 method,
mz_uint16 bit_flags, mz_uint16 dos_time, mz_uint16 dos_date,
mz_uint64 local_header_ofs, mz_uint32 ext_attributes) {
(void)pZip;
memset(pDst, 0, MZ_ZIP_CENTRAL_DIR_HEADER_SIZE);
MZ_WRITE_LE32(pDst + MZ_ZIP_CDH_SIG_OFS, MZ_ZIP_CENTRAL_DIR_HEADER_SIG);
MZ_WRITE_LE16(pDst + MZ_ZIP_CDH_VERSION_NEEDED_OFS, method ? 20 : 0);
MZ_WRITE_LE16(pDst + MZ_ZIP_CDH_BIT_FLAG_OFS, bit_flags);
MZ_WRITE_LE16(pDst + MZ_ZIP_CDH_METHOD_OFS, method);
MZ_WRITE_LE16(pDst + MZ_ZIP_CDH_FILE_TIME_OFS, dos_time);
MZ_WRITE_LE16(pDst + MZ_ZIP_CDH_FILE_DATE_OFS, dos_date);
MZ_WRITE_LE32(pDst + MZ_ZIP_CDH_CRC32_OFS, uncomp_crc32);
MZ_WRITE_LE32(pDst + MZ_ZIP_CDH_COMPRESSED_SIZE_OFS, comp_size);
MZ_WRITE_LE32(pDst + MZ_ZIP_CDH_DECOMPRESSED_SIZE_OFS, uncomp_size);
MZ_WRITE_LE16(pDst + MZ_ZIP_CDH_FILENAME_LEN_OFS, filename_size);
MZ_WRITE_LE16(pDst + MZ_ZIP_CDH_EXTRA_LEN_OFS, extra_size);
MZ_WRITE_LE16(pDst + MZ_ZIP_CDH_COMMENT_LEN_OFS, comment_size);
MZ_WRITE_LE32(pDst + MZ_ZIP_CDH_EXTERNAL_ATTR_OFS, ext_attributes);
MZ_WRITE_LE32(pDst + MZ_ZIP_CDH_LOCAL_HEADER_OFS, local_header_ofs);
return MZ_TRUE;
}
static mz_bool mz_zip_writer_add_to_central_dir(
mz_zip_archive *pZip, const char *pFilename, mz_uint16 filename_size,
const void *pExtra, mz_uint16 extra_size, const void *pComment,
mz_uint16 comment_size, mz_uint64 uncomp_size, mz_uint64 comp_size,
mz_uint32 uncomp_crc32, mz_uint16 method, mz_uint16 bit_flags,
mz_uint16 dos_time, mz_uint16 dos_date, mz_uint64 local_header_ofs,
mz_uint32 ext_attributes) {
mz_zip_internal_state *pState = pZip->m_pState;
mz_uint32 central_dir_ofs = (mz_uint32)pState->m_central_dir.m_size;
size_t orig_central_dir_size = pState->m_central_dir.m_size;
mz_uint8 central_dir_header[MZ_ZIP_CENTRAL_DIR_HEADER_SIZE];
// No zip64 support yet
if ((local_header_ofs > 0xFFFFFFFF) ||
(((mz_uint64)pState->m_central_dir.m_size +
MZ_ZIP_CENTRAL_DIR_HEADER_SIZE + filename_size + extra_size +
comment_size) > 0xFFFFFFFF))
return MZ_FALSE;
if (!mz_zip_writer_create_central_dir_header(
pZip, central_dir_header, filename_size, extra_size, comment_size,
uncomp_size, comp_size, uncomp_crc32, method, bit_flags, dos_time,
dos_date, local_header_ofs, ext_attributes))
return MZ_FALSE;
if ((!mz_zip_array_push_back(pZip, &pState->m_central_dir, central_dir_header,
MZ_ZIP_CENTRAL_DIR_HEADER_SIZE)) ||
(!mz_zip_array_push_back(pZip, &pState->m_central_dir, pFilename,
filename_size)) ||
(!mz_zip_array_push_back(pZip, &pState->m_central_dir, pExtra,
extra_size)) ||
(!mz_zip_array_push_back(pZip, &pState->m_central_dir, pComment,
comment_size)) ||
(!mz_zip_array_push_back(pZip, &pState->m_central_dir_offsets,
¢ral_dir_ofs, 1))) {
// Try to push the central directory array back into its original state.
mz_zip_array_resize(pZip, &pState->m_central_dir, orig_central_dir_size,
MZ_FALSE);
return MZ_FALSE;
}
return MZ_TRUE;
}
static mz_bool mz_zip_writer_validate_archive_name(const char *pArchive_name) {
// Basic ZIP archive filename validity checks: Valid filenames cannot start
// with a forward slash, cannot contain a drive letter, and cannot use
// DOS-style backward slashes.
if (*pArchive_name == '/') return MZ_FALSE;
while (*pArchive_name) {
if ((*pArchive_name == '\\') || (*pArchive_name == ':')) return MZ_FALSE;
pArchive_name++;
}
return MZ_TRUE;
}
static mz_uint mz_zip_writer_compute_padding_needed_for_file_alignment(
mz_zip_archive *pZip) {
mz_uint32 n;
if (!pZip->m_file_offset_alignment) return 0;
n = (mz_uint32)(pZip->m_archive_size & (pZip->m_file_offset_alignment - 1));
return (pZip->m_file_offset_alignment - n) &
(pZip->m_file_offset_alignment - 1);
}
static mz_bool mz_zip_writer_write_zeros(mz_zip_archive *pZip,
mz_uint64 cur_file_ofs, mz_uint32 n) {
char buf[4096];
memset(buf, 0, MZ_MIN(sizeof(buf), n));
while (n) {
mz_uint32 s = MZ_MIN(sizeof(buf), n);
if (pZip->m_pWrite(pZip->m_pIO_opaque, cur_file_ofs, buf, s) != s)
return MZ_FALSE;
cur_file_ofs += s;
n -= s;
}
return MZ_TRUE;
}
mz_bool mz_zip_writer_add_mem_ex(mz_zip_archive *pZip,
const char *pArchive_name, const void *pBuf,
size_t buf_size, const void *pComment,
mz_uint16 comment_size,
mz_uint level_and_flags, mz_uint64 uncomp_size,
mz_uint32 uncomp_crc32) {
mz_uint16 method = 0, dos_time = 0, dos_date = 0;
mz_uint level, ext_attributes = 0, num_alignment_padding_bytes;
mz_uint64 local_dir_header_ofs = pZip->m_archive_size,
cur_archive_file_ofs = pZip->m_archive_size, comp_size = 0;
size_t archive_name_size;
mz_uint8 local_dir_header[MZ_ZIP_LOCAL_DIR_HEADER_SIZE];
tdefl_compressor *pComp = NULL;
mz_bool store_data_uncompressed;
mz_zip_internal_state *pState;
if ((int)level_and_flags < 0) level_and_flags = MZ_DEFAULT_LEVEL;
level = level_and_flags & 0xF;
store_data_uncompressed =
((!level) || (level_and_flags & MZ_ZIP_FLAG_COMPRESSED_DATA));
if ((!pZip) || (!pZip->m_pState) ||
(pZip->m_zip_mode != MZ_ZIP_MODE_WRITING) || ((buf_size) && (!pBuf)) ||
(!pArchive_name) || ((comment_size) && (!pComment)) ||
(pZip->m_total_files == 0xFFFF) || (level > MZ_UBER_COMPRESSION))
return MZ_FALSE;
pState = pZip->m_pState;
if ((!(level_and_flags & MZ_ZIP_FLAG_COMPRESSED_DATA)) && (uncomp_size))
return MZ_FALSE;
// No zip64 support yet
if ((buf_size > 0xFFFFFFFF) || (uncomp_size > 0xFFFFFFFF)) return MZ_FALSE;
if (!mz_zip_writer_validate_archive_name(pArchive_name)) return MZ_FALSE;
#ifndef MINIZ_NO_TIME
{
time_t cur_time;
time(&cur_time);
mz_zip_time_to_dos_time(cur_time, &dos_time, &dos_date);
}
#endif // #ifndef MINIZ_NO_TIME
archive_name_size = strlen(pArchive_name);
if (archive_name_size > 0xFFFF) return MZ_FALSE;
num_alignment_padding_bytes =
mz_zip_writer_compute_padding_needed_for_file_alignment(pZip);
// no zip64 support yet
if ((pZip->m_total_files == 0xFFFF) ||
((pZip->m_archive_size + num_alignment_padding_bytes +
MZ_ZIP_LOCAL_DIR_HEADER_SIZE + MZ_ZIP_CENTRAL_DIR_HEADER_SIZE +
comment_size + archive_name_size) > 0xFFFFFFFF))
return MZ_FALSE;
if ((archive_name_size) && (pArchive_name[archive_name_size - 1] == '/')) {
// Set DOS Subdirectory attribute bit.
ext_attributes |= 0x10;
// Subdirectories cannot contain data.
if ((buf_size) || (uncomp_size)) return MZ_FALSE;
}
// Try to do any allocations before writing to the archive, so if an
// allocation fails the file remains unmodified. (A good idea if we're doing
// an in-place modification.)
if ((!mz_zip_array_ensure_room(
pZip, &pState->m_central_dir,
MZ_ZIP_CENTRAL_DIR_HEADER_SIZE + archive_name_size + comment_size)) ||
(!mz_zip_array_ensure_room(pZip, &pState->m_central_dir_offsets, 1)))
return MZ_FALSE;
if ((!store_data_uncompressed) && (buf_size)) {
if (NULL == (pComp = (tdefl_compressor *)pZip->m_pAlloc(
pZip->m_pAlloc_opaque, 1, sizeof(tdefl_compressor))))
return MZ_FALSE;
}
if (!mz_zip_writer_write_zeros(
pZip, cur_archive_file_ofs,
num_alignment_padding_bytes + sizeof(local_dir_header))) {
pZip->m_pFree(pZip->m_pAlloc_opaque, pComp);
return MZ_FALSE;
}
local_dir_header_ofs += num_alignment_padding_bytes;
if (pZip->m_file_offset_alignment) {
MZ_ASSERT((local_dir_header_ofs & (pZip->m_file_offset_alignment - 1)) ==
0);
}
cur_archive_file_ofs +=
num_alignment_padding_bytes + sizeof(local_dir_header);
MZ_CLEAR_OBJ(local_dir_header);
if (pZip->m_pWrite(pZip->m_pIO_opaque, cur_archive_file_ofs, pArchive_name,
archive_name_size) != archive_name_size) {
pZip->m_pFree(pZip->m_pAlloc_opaque, pComp);
return MZ_FALSE;
}
cur_archive_file_ofs += archive_name_size;
if (!(level_and_flags & MZ_ZIP_FLAG_COMPRESSED_DATA)) {
uncomp_crc32 =
(mz_uint32)mz_crc32(MZ_CRC32_INIT, (const mz_uint8 *)pBuf, buf_size);
uncomp_size = buf_size;
if (uncomp_size <= 3) {
level = 0;
store_data_uncompressed = MZ_TRUE;
}
}
if (store_data_uncompressed) {
if (pZip->m_pWrite(pZip->m_pIO_opaque, cur_archive_file_ofs, pBuf,
buf_size) != buf_size) {
pZip->m_pFree(pZip->m_pAlloc_opaque, pComp);
return MZ_FALSE;
}
cur_archive_file_ofs += buf_size;
comp_size = buf_size;
if (level_and_flags & MZ_ZIP_FLAG_COMPRESSED_DATA) method = MZ_DEFLATED;
} else if (buf_size) {
mz_zip_writer_add_state state;
state.m_pZip = pZip;
state.m_cur_archive_file_ofs = cur_archive_file_ofs;
state.m_comp_size = 0;
if ((tdefl_init(pComp, mz_zip_writer_add_put_buf_callback, &state,
tdefl_create_comp_flags_from_zip_params(
level, -15, MZ_DEFAULT_STRATEGY)) !=
TDEFL_STATUS_OKAY) ||
(tdefl_compress_buffer(pComp, pBuf, buf_size, TDEFL_FINISH) !=
TDEFL_STATUS_DONE)) {
pZip->m_pFree(pZip->m_pAlloc_opaque, pComp);
return MZ_FALSE;
}
comp_size = state.m_comp_size;
cur_archive_file_ofs = state.m_cur_archive_file_ofs;
method = MZ_DEFLATED;
}
pZip->m_pFree(pZip->m_pAlloc_opaque, pComp);
pComp = NULL;
// no zip64 support yet
if ((comp_size > 0xFFFFFFFF) || (cur_archive_file_ofs > 0xFFFFFFFF))
return MZ_FALSE;
if (!mz_zip_writer_create_local_dir_header(
pZip, local_dir_header, (mz_uint16)archive_name_size, 0, uncomp_size,
comp_size, uncomp_crc32, method, 0, dos_time, dos_date))
return MZ_FALSE;
if (pZip->m_pWrite(pZip->m_pIO_opaque, local_dir_header_ofs, local_dir_header,
sizeof(local_dir_header)) != sizeof(local_dir_header))
return MZ_FALSE;
if (!mz_zip_writer_add_to_central_dir(
pZip, pArchive_name, (mz_uint16)archive_name_size, NULL, 0, pComment,
comment_size, uncomp_size, comp_size, uncomp_crc32, method, 0,
dos_time, dos_date, local_dir_header_ofs, ext_attributes))
return MZ_FALSE;
pZip->m_total_files++;
pZip->m_archive_size = cur_archive_file_ofs;
return MZ_TRUE;
}
#ifndef MINIZ_NO_STDIO
mz_bool mz_zip_writer_add_file(mz_zip_archive *pZip, const char *pArchive_name,
const char *pSrc_filename, const void *pComment,
mz_uint16 comment_size,
mz_uint level_and_flags) {
mz_uint uncomp_crc32 = MZ_CRC32_INIT, level, num_alignment_padding_bytes;
mz_uint16 method = 0, dos_time = 0, dos_date = 0, ext_attributes = 0;
mz_uint64 local_dir_header_ofs = pZip->m_archive_size,
cur_archive_file_ofs = pZip->m_archive_size, uncomp_size = 0,
comp_size = 0;
size_t archive_name_size;
mz_uint8 local_dir_header[MZ_ZIP_LOCAL_DIR_HEADER_SIZE];
MZ_FILE *pSrc_file = NULL;
if ((int)level_and_flags < 0) level_and_flags = MZ_DEFAULT_LEVEL;
level = level_and_flags & 0xF;
if ((!pZip) || (!pZip->m_pState) ||
(pZip->m_zip_mode != MZ_ZIP_MODE_WRITING) || (!pArchive_name) ||
((comment_size) && (!pComment)) || (level > MZ_UBER_COMPRESSION))
return MZ_FALSE;
if (level_and_flags & MZ_ZIP_FLAG_COMPRESSED_DATA) return MZ_FALSE;
if (!mz_zip_writer_validate_archive_name(pArchive_name)) return MZ_FALSE;
archive_name_size = strlen(pArchive_name);
if (archive_name_size > 0xFFFF) return MZ_FALSE;
num_alignment_padding_bytes =
mz_zip_writer_compute_padding_needed_for_file_alignment(pZip);
// no zip64 support yet
if ((pZip->m_total_files == 0xFFFF) ||
((pZip->m_archive_size + num_alignment_padding_bytes +
MZ_ZIP_LOCAL_DIR_HEADER_SIZE + MZ_ZIP_CENTRAL_DIR_HEADER_SIZE +
comment_size + archive_name_size) > 0xFFFFFFFF))
return MZ_FALSE;
if (!mz_zip_get_file_modified_time(pSrc_filename, &dos_time, &dos_date))
return MZ_FALSE;
pSrc_file = MZ_FOPEN(pSrc_filename, "rb");
if (!pSrc_file) return MZ_FALSE;
MZ_FSEEK64(pSrc_file, 0, SEEK_END);
uncomp_size = MZ_FTELL64(pSrc_file);
MZ_FSEEK64(pSrc_file, 0, SEEK_SET);
if (uncomp_size > 0xFFFFFFFF) {
// No zip64 support yet
MZ_FCLOSE(pSrc_file);
return MZ_FALSE;
}
if (uncomp_size <= 3) level = 0;
if (!mz_zip_writer_write_zeros(
pZip, cur_archive_file_ofs,
num_alignment_padding_bytes + sizeof(local_dir_header))) {
MZ_FCLOSE(pSrc_file);
return MZ_FALSE;
}
local_dir_header_ofs += num_alignment_padding_bytes;
if (pZip->m_file_offset_alignment) {
MZ_ASSERT((local_dir_header_ofs & (pZip->m_file_offset_alignment - 1)) ==
0);
}
cur_archive_file_ofs +=
num_alignment_padding_bytes + sizeof(local_dir_header);
MZ_CLEAR_OBJ(local_dir_header);
if (pZip->m_pWrite(pZip->m_pIO_opaque, cur_archive_file_ofs, pArchive_name,
archive_name_size) != archive_name_size) {
MZ_FCLOSE(pSrc_file);
return MZ_FALSE;
}
cur_archive_file_ofs += archive_name_size;
if (uncomp_size) {
mz_uint64 uncomp_remaining = uncomp_size;
void *pRead_buf =
pZip->m_pAlloc(pZip->m_pAlloc_opaque, 1, MZ_ZIP_MAX_IO_BUF_SIZE);
if (!pRead_buf) {
MZ_FCLOSE(pSrc_file);
return MZ_FALSE;
}
if (!level) {
while (uncomp_remaining) {
mz_uint n =
(mz_uint)MZ_MIN((mz_uint)MZ_ZIP_MAX_IO_BUF_SIZE, uncomp_remaining);
if ((MZ_FREAD(pRead_buf, 1, n, pSrc_file) != n) ||
(pZip->m_pWrite(pZip->m_pIO_opaque, cur_archive_file_ofs, pRead_buf,
n) != n)) {
pZip->m_pFree(pZip->m_pAlloc_opaque, pRead_buf);
MZ_FCLOSE(pSrc_file);
return MZ_FALSE;
}
uncomp_crc32 =
(mz_uint32)mz_crc32(uncomp_crc32, (const mz_uint8 *)pRead_buf, n);
uncomp_remaining -= n;
cur_archive_file_ofs += n;
}
comp_size = uncomp_size;
} else {
mz_bool result = MZ_FALSE;
mz_zip_writer_add_state state;
tdefl_compressor *pComp = (tdefl_compressor *)pZip->m_pAlloc(
pZip->m_pAlloc_opaque, 1, sizeof(tdefl_compressor));
if (!pComp) {
pZip->m_pFree(pZip->m_pAlloc_opaque, pRead_buf);
MZ_FCLOSE(pSrc_file);
return MZ_FALSE;
}
state.m_pZip = pZip;
state.m_cur_archive_file_ofs = cur_archive_file_ofs;
state.m_comp_size = 0;
if (tdefl_init(pComp, mz_zip_writer_add_put_buf_callback, &state,
tdefl_create_comp_flags_from_zip_params(
level, -15, MZ_DEFAULT_STRATEGY)) !=
TDEFL_STATUS_OKAY) {
pZip->m_pFree(pZip->m_pAlloc_opaque, pComp);
pZip->m_pFree(pZip->m_pAlloc_opaque, pRead_buf);
MZ_FCLOSE(pSrc_file);
return MZ_FALSE;
}
for (;;) {
size_t in_buf_size = (mz_uint32)MZ_MIN(uncomp_remaining,
(mz_uint)MZ_ZIP_MAX_IO_BUF_SIZE);
tdefl_status status;
if (MZ_FREAD(pRead_buf, 1, in_buf_size, pSrc_file) != in_buf_size)
break;
uncomp_crc32 = (mz_uint32)mz_crc32(
uncomp_crc32, (const mz_uint8 *)pRead_buf, in_buf_size);
uncomp_remaining -= in_buf_size;
status = tdefl_compress_buffer(
pComp, pRead_buf, in_buf_size,
uncomp_remaining ? TDEFL_NO_FLUSH : TDEFL_FINISH);
if (status == TDEFL_STATUS_DONE) {
result = MZ_TRUE;
break;
} else if (status != TDEFL_STATUS_OKAY)
break;
}
pZip->m_pFree(pZip->m_pAlloc_opaque, pComp);
if (!result) {
pZip->m_pFree(pZip->m_pAlloc_opaque, pRead_buf);
MZ_FCLOSE(pSrc_file);
return MZ_FALSE;
}
comp_size = state.m_comp_size;
cur_archive_file_ofs = state.m_cur_archive_file_ofs;
method = MZ_DEFLATED;
}
pZip->m_pFree(pZip->m_pAlloc_opaque, pRead_buf);
}
MZ_FCLOSE(pSrc_file);
pSrc_file = NULL;
// no zip64 support yet
if ((comp_size > 0xFFFFFFFF) || (cur_archive_file_ofs > 0xFFFFFFFF))
return MZ_FALSE;
if (!mz_zip_writer_create_local_dir_header(
pZip, local_dir_header, (mz_uint16)archive_name_size, 0, uncomp_size,
comp_size, uncomp_crc32, method, 0, dos_time, dos_date))
return MZ_FALSE;
if (pZip->m_pWrite(pZip->m_pIO_opaque, local_dir_header_ofs, local_dir_header,
sizeof(local_dir_header)) != sizeof(local_dir_header))
return MZ_FALSE;
if (!mz_zip_writer_add_to_central_dir(
pZip, pArchive_name, (mz_uint16)archive_name_size, NULL, 0, pComment,
comment_size, uncomp_size, comp_size, uncomp_crc32, method, 0,
dos_time, dos_date, local_dir_header_ofs, ext_attributes))
return MZ_FALSE;
pZip->m_total_files++;
pZip->m_archive_size = cur_archive_file_ofs;
return MZ_TRUE;
}
#endif // #ifndef MINIZ_NO_STDIO
mz_bool mz_zip_writer_add_from_zip_reader(mz_zip_archive *pZip,
mz_zip_archive *pSource_zip,
mz_uint file_index) {
mz_uint n, bit_flags, num_alignment_padding_bytes;
mz_uint64 comp_bytes_remaining, local_dir_header_ofs;
mz_uint64 cur_src_file_ofs, cur_dst_file_ofs;
mz_uint32
local_header_u32[(MZ_ZIP_LOCAL_DIR_HEADER_SIZE + sizeof(mz_uint32) - 1) /
sizeof(mz_uint32)];
mz_uint8 *pLocal_header = (mz_uint8 *)local_header_u32;
mz_uint8 central_header[MZ_ZIP_CENTRAL_DIR_HEADER_SIZE];
size_t orig_central_dir_size;
mz_zip_internal_state *pState;
void *pBuf;
const mz_uint8 *pSrc_central_header;
if ((!pZip) || (!pZip->m_pState) || (pZip->m_zip_mode != MZ_ZIP_MODE_WRITING))
return MZ_FALSE;
if (NULL ==
(pSrc_central_header = mz_zip_reader_get_cdh(pSource_zip, file_index)))
return MZ_FALSE;
pState = pZip->m_pState;
num_alignment_padding_bytes =
mz_zip_writer_compute_padding_needed_for_file_alignment(pZip);
// no zip64 support yet
if ((pZip->m_total_files == 0xFFFF) ||
((pZip->m_archive_size + num_alignment_padding_bytes +
MZ_ZIP_LOCAL_DIR_HEADER_SIZE + MZ_ZIP_CENTRAL_DIR_HEADER_SIZE) >
0xFFFFFFFF))
return MZ_FALSE;
cur_src_file_ofs =
MZ_READ_LE32(pSrc_central_header + MZ_ZIP_CDH_LOCAL_HEADER_OFS);
cur_dst_file_ofs = pZip->m_archive_size;
if (pSource_zip->m_pRead(pSource_zip->m_pIO_opaque, cur_src_file_ofs,
pLocal_header, MZ_ZIP_LOCAL_DIR_HEADER_SIZE) !=
MZ_ZIP_LOCAL_DIR_HEADER_SIZE)
return MZ_FALSE;
if (MZ_READ_LE32(pLocal_header) != MZ_ZIP_LOCAL_DIR_HEADER_SIG)
return MZ_FALSE;
cur_src_file_ofs += MZ_ZIP_LOCAL_DIR_HEADER_SIZE;
if (!mz_zip_writer_write_zeros(pZip, cur_dst_file_ofs,
num_alignment_padding_bytes))
return MZ_FALSE;
cur_dst_file_ofs += num_alignment_padding_bytes;
local_dir_header_ofs = cur_dst_file_ofs;
if (pZip->m_file_offset_alignment) {
MZ_ASSERT((local_dir_header_ofs & (pZip->m_file_offset_alignment - 1)) ==
0);
}
if (pZip->m_pWrite(pZip->m_pIO_opaque, cur_dst_file_ofs, pLocal_header,
MZ_ZIP_LOCAL_DIR_HEADER_SIZE) !=
MZ_ZIP_LOCAL_DIR_HEADER_SIZE)
return MZ_FALSE;
cur_dst_file_ofs += MZ_ZIP_LOCAL_DIR_HEADER_SIZE;
n = MZ_READ_LE16(pLocal_header + MZ_ZIP_LDH_FILENAME_LEN_OFS) +
MZ_READ_LE16(pLocal_header + MZ_ZIP_LDH_EXTRA_LEN_OFS);
comp_bytes_remaining =
n + MZ_READ_LE32(pSrc_central_header + MZ_ZIP_CDH_COMPRESSED_SIZE_OFS);
if (NULL == (pBuf = pZip->m_pAlloc(
pZip->m_pAlloc_opaque, 1,
(size_t)MZ_MAX(sizeof(mz_uint32) * 4,
MZ_MIN((mz_uint)MZ_ZIP_MAX_IO_BUF_SIZE,
comp_bytes_remaining)))))
return MZ_FALSE;
while (comp_bytes_remaining) {
n = (mz_uint)MZ_MIN((mz_uint)MZ_ZIP_MAX_IO_BUF_SIZE, comp_bytes_remaining);
if (pSource_zip->m_pRead(pSource_zip->m_pIO_opaque, cur_src_file_ofs, pBuf,
n) != n) {
pZip->m_pFree(pZip->m_pAlloc_opaque, pBuf);
return MZ_FALSE;
}
cur_src_file_ofs += n;
if (pZip->m_pWrite(pZip->m_pIO_opaque, cur_dst_file_ofs, pBuf, n) != n) {
pZip->m_pFree(pZip->m_pAlloc_opaque, pBuf);
return MZ_FALSE;
}
cur_dst_file_ofs += n;
comp_bytes_remaining -= n;
}
bit_flags = MZ_READ_LE16(pLocal_header + MZ_ZIP_LDH_BIT_FLAG_OFS);
if (bit_flags & 8) {
// Copy data descriptor
if (pSource_zip->m_pRead(pSource_zip->m_pIO_opaque, cur_src_file_ofs, pBuf,
sizeof(mz_uint32) * 4) != sizeof(mz_uint32) * 4) {
pZip->m_pFree(pZip->m_pAlloc_opaque, pBuf);
return MZ_FALSE;
}
n = sizeof(mz_uint32) * ((MZ_READ_LE32(pBuf) == 0x08074b50) ? 4 : 3);
if (pZip->m_pWrite(pZip->m_pIO_opaque, cur_dst_file_ofs, pBuf, n) != n) {
pZip->m_pFree(pZip->m_pAlloc_opaque, pBuf);
return MZ_FALSE;
}
cur_src_file_ofs += n;
cur_dst_file_ofs += n;
}
pZip->m_pFree(pZip->m_pAlloc_opaque, pBuf);
// no zip64 support yet
if (cur_dst_file_ofs > 0xFFFFFFFF) return MZ_FALSE;
orig_central_dir_size = pState->m_central_dir.m_size;
memcpy(central_header, pSrc_central_header, MZ_ZIP_CENTRAL_DIR_HEADER_SIZE);
MZ_WRITE_LE32(central_header + MZ_ZIP_CDH_LOCAL_HEADER_OFS,
local_dir_header_ofs);
if (!mz_zip_array_push_back(pZip, &pState->m_central_dir, central_header,
MZ_ZIP_CENTRAL_DIR_HEADER_SIZE))
return MZ_FALSE;
n = MZ_READ_LE16(pSrc_central_header + MZ_ZIP_CDH_FILENAME_LEN_OFS) +
MZ_READ_LE16(pSrc_central_header + MZ_ZIP_CDH_EXTRA_LEN_OFS) +
MZ_READ_LE16(pSrc_central_header + MZ_ZIP_CDH_COMMENT_LEN_OFS);
if (!mz_zip_array_push_back(
pZip, &pState->m_central_dir,
pSrc_central_header + MZ_ZIP_CENTRAL_DIR_HEADER_SIZE, n)) {
mz_zip_array_resize(pZip, &pState->m_central_dir, orig_central_dir_size,
MZ_FALSE);
return MZ_FALSE;
}
if (pState->m_central_dir.m_size > 0xFFFFFFFF) return MZ_FALSE;
n = (mz_uint32)orig_central_dir_size;
if (!mz_zip_array_push_back(pZip, &pState->m_central_dir_offsets, &n, 1)) {
mz_zip_array_resize(pZip, &pState->m_central_dir, orig_central_dir_size,
MZ_FALSE);
return MZ_FALSE;
}
pZip->m_total_files++;
pZip->m_archive_size = cur_dst_file_ofs;
return MZ_TRUE;
}
mz_bool mz_zip_writer_finalize_archive(mz_zip_archive *pZip) {
mz_zip_internal_state *pState;
mz_uint64 central_dir_ofs, central_dir_size;
mz_uint8 hdr[MZ_ZIP_END_OF_CENTRAL_DIR_HEADER_SIZE];
if ((!pZip) || (!pZip->m_pState) || (pZip->m_zip_mode != MZ_ZIP_MODE_WRITING))
return MZ_FALSE;
pState = pZip->m_pState;
// no zip64 support yet
if ((pZip->m_total_files > 0xFFFF) ||
((pZip->m_archive_size + pState->m_central_dir.m_size +
MZ_ZIP_END_OF_CENTRAL_DIR_HEADER_SIZE) > 0xFFFFFFFF))
return MZ_FALSE;
central_dir_ofs = 0;
central_dir_size = 0;
if (pZip->m_total_files) {
// Write central directory
central_dir_ofs = pZip->m_archive_size;
central_dir_size = pState->m_central_dir.m_size;
pZip->m_central_directory_file_ofs = central_dir_ofs;
if (pZip->m_pWrite(pZip->m_pIO_opaque, central_dir_ofs,
pState->m_central_dir.m_p,
(size_t)central_dir_size) != central_dir_size)
return MZ_FALSE;
pZip->m_archive_size += central_dir_size;
}
// Write end of central directory record
MZ_CLEAR_OBJ(hdr);
MZ_WRITE_LE32(hdr + MZ_ZIP_ECDH_SIG_OFS,
MZ_ZIP_END_OF_CENTRAL_DIR_HEADER_SIG);
MZ_WRITE_LE16(hdr + MZ_ZIP_ECDH_CDIR_NUM_ENTRIES_ON_DISK_OFS,
pZip->m_total_files);
MZ_WRITE_LE16(hdr + MZ_ZIP_ECDH_CDIR_TOTAL_ENTRIES_OFS, pZip->m_total_files);
MZ_WRITE_LE32(hdr + MZ_ZIP_ECDH_CDIR_SIZE_OFS, central_dir_size);
MZ_WRITE_LE32(hdr + MZ_ZIP_ECDH_CDIR_OFS_OFS, central_dir_ofs);
if (pZip->m_pWrite(pZip->m_pIO_opaque, pZip->m_archive_size, hdr,
sizeof(hdr)) != sizeof(hdr))
return MZ_FALSE;
#ifndef MINIZ_NO_STDIO
if ((pState->m_pFile) && (MZ_FFLUSH(pState->m_pFile) == EOF)) return MZ_FALSE;
#endif // #ifndef MINIZ_NO_STDIO
pZip->m_archive_size += sizeof(hdr);
pZip->m_zip_mode = MZ_ZIP_MODE_WRITING_HAS_BEEN_FINALIZED;
return MZ_TRUE;
}
mz_bool mz_zip_writer_finalize_heap_archive(mz_zip_archive *pZip, void **pBuf,
size_t *pSize) {
if ((!pZip) || (!pZip->m_pState) || (!pBuf) || (!pSize)) return MZ_FALSE;
if (pZip->m_pWrite != mz_zip_heap_write_func) return MZ_FALSE;
if (!mz_zip_writer_finalize_archive(pZip)) return MZ_FALSE;
*pBuf = pZip->m_pState->m_pMem;
*pSize = pZip->m_pState->m_mem_size;
pZip->m_pState->m_pMem = NULL;
pZip->m_pState->m_mem_size = pZip->m_pState->m_mem_capacity = 0;
return MZ_TRUE;
}
mz_bool mz_zip_writer_end(mz_zip_archive *pZip) {
mz_zip_internal_state *pState;
mz_bool status = MZ_TRUE;
if ((!pZip) || (!pZip->m_pState) || (!pZip->m_pAlloc) || (!pZip->m_pFree) ||
((pZip->m_zip_mode != MZ_ZIP_MODE_WRITING) &&
(pZip->m_zip_mode != MZ_ZIP_MODE_WRITING_HAS_BEEN_FINALIZED)))
return MZ_FALSE;
pState = pZip->m_pState;
pZip->m_pState = NULL;
mz_zip_array_clear(pZip, &pState->m_central_dir);
mz_zip_array_clear(pZip, &pState->m_central_dir_offsets);
mz_zip_array_clear(pZip, &pState->m_sorted_central_dir_offsets);
#ifndef MINIZ_NO_STDIO
if (pState->m_pFile) {
MZ_FCLOSE(pState->m_pFile);
pState->m_pFile = NULL;
}
#endif // #ifndef MINIZ_NO_STDIO
if ((pZip->m_pWrite == mz_zip_heap_write_func) && (pState->m_pMem)) {
pZip->m_pFree(pZip->m_pAlloc_opaque, pState->m_pMem);
pState->m_pMem = NULL;
}
pZip->m_pFree(pZip->m_pAlloc_opaque, pState);
pZip->m_zip_mode = MZ_ZIP_MODE_INVALID;
return status;
}
#ifndef MINIZ_NO_STDIO
mz_bool mz_zip_add_mem_to_archive_file_in_place(
const char *pZip_filename, const char *pArchive_name, const void *pBuf,
size_t buf_size, const void *pComment, mz_uint16 comment_size,
mz_uint level_and_flags) {
mz_bool status, created_new_archive = MZ_FALSE;
mz_zip_archive zip_archive;
struct MZ_FILE_STAT_STRUCT file_stat;
MZ_CLEAR_OBJ(zip_archive);
if ((int)level_and_flags < 0) level_and_flags = MZ_DEFAULT_LEVEL;
if ((!pZip_filename) || (!pArchive_name) || ((buf_size) && (!pBuf)) ||
((comment_size) && (!pComment)) ||
((level_and_flags & 0xF) > MZ_UBER_COMPRESSION))
return MZ_FALSE;
if (!mz_zip_writer_validate_archive_name(pArchive_name)) return MZ_FALSE;
if (MZ_FILE_STAT(pZip_filename, &file_stat) != 0) {
// Create a new archive.
if (!mz_zip_writer_init_file(&zip_archive, pZip_filename, 0))
return MZ_FALSE;
created_new_archive = MZ_TRUE;
} else {
// Append to an existing archive.
if (!mz_zip_reader_init_file(
&zip_archive, pZip_filename,
level_and_flags | MZ_ZIP_FLAG_DO_NOT_SORT_CENTRAL_DIRECTORY))
return MZ_FALSE;
if (!mz_zip_writer_init_from_reader(&zip_archive, pZip_filename)) {
mz_zip_reader_end(&zip_archive);
return MZ_FALSE;
}
}
status =
mz_zip_writer_add_mem_ex(&zip_archive, pArchive_name, pBuf, buf_size,
pComment, comment_size, level_and_flags, 0, 0);
// Always finalize, even if adding failed for some reason, so we have a valid
// central directory. (This may not always succeed, but we can try.)
if (!mz_zip_writer_finalize_archive(&zip_archive)) status = MZ_FALSE;
if (!mz_zip_writer_end(&zip_archive)) status = MZ_FALSE;
if ((!status) && (created_new_archive)) {
// It's a new archive and something went wrong, so just delete it.
int ignoredStatus = MZ_DELETE_FILE(pZip_filename);
(void)ignoredStatus;
}
return status;
}
void *mz_zip_extract_archive_file_to_heap(const char *pZip_filename,
const char *pArchive_name,
size_t *pSize, mz_uint flags) {
int file_index;
mz_zip_archive zip_archive;
void *p = NULL;
if (pSize) *pSize = 0;
if ((!pZip_filename) || (!pArchive_name)) return NULL;
MZ_CLEAR_OBJ(zip_archive);
if (!mz_zip_reader_init_file(
&zip_archive, pZip_filename,
flags | MZ_ZIP_FLAG_DO_NOT_SORT_CENTRAL_DIRECTORY))
return NULL;
if ((file_index = mz_zip_reader_locate_file(&zip_archive, pArchive_name, NULL,
flags)) >= 0)
p = mz_zip_reader_extract_to_heap(&zip_archive, file_index, pSize, flags);
mz_zip_reader_end(&zip_archive);
return p;
}
#endif // #ifndef MINIZ_NO_STDIO
#endif // #ifndef MINIZ_NO_ARCHIVE_WRITING_APIS
#endif // #ifndef MINIZ_NO_ARCHIVE_APIS
#ifdef __cplusplus
}
#endif
#ifdef _MSC_VER
#pragma warning(pop)
#endif
#endif // MINIZ_HEADER_FILE_ONLY
/*
This is free and unencumbered software released into the public domain.
Anyone is free to copy, modify, publish, use, compile, sell, or
distribute this software, either in source code form or as a compiled
binary, for any purpose, commercial or non-commercial, and by any
means.
In jurisdictions that recognize copyright laws, the author or authors
of this software dedicate any and all copyright interest in the
software to the public domain. We make this dedication for the benefit
of the public at large and to the detriment of our heirs and
successors. We intend this dedication to be an overt act of
relinquishment in perpetuity of all present and future rights to this
software under copyright law.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
IN NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY CLAIM, DAMAGES OR
OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
OTHER DEALINGS IN THE SOFTWARE.
For more information, please refer to <http://unlicense.org/>
*/
// ---------------------- end of miniz ----------------------------------------
#ifdef __clang__
#pragma clang diagnostic pop
#endif
} // namespace miniz
#else
// Reuse MINIZ_LITTE_ENDIAN macro
#if defined(_M_IX86) || defined(_M_X64) || defined(__i386__) || \
defined(__i386) || defined(__i486__) || defined(__i486) || \
defined(i386) || defined(__ia64__) || defined(__x86_64__)
// MINIZ_X86_OR_X64_CPU is only used to help set the below macros.
#define MINIZ_X86_OR_X64_CPU 1
#endif
#if defined(__sparcv9)
// Big endian
#else
#if (__BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__) || MINIZ_X86_OR_X64_CPU
// Set MINIZ_LITTLE_ENDIAN to 1 if the processor is little endian.
#define MINIZ_LITTLE_ENDIAN 1
#endif
#endif
#endif // TINYEXR_USE_MINIZ
// static bool IsBigEndian(void) {
// union {
// unsigned int i;
// char c[4];
// } bint = {0x01020304};
//
// return bint.c[0] == 1;
//}
static void SetErrorMessage(const std::string &msg, const char **err) {
if (err) {
#ifdef _WIN32
(*err) = _strdup(msg.c_str());
#else
(*err) = strdup(msg.c_str());
#endif
}
}
static const int kEXRVersionSize = 8;
static void cpy2(unsigned short *dst_val, const unsigned short *src_val) {
unsigned char *dst = reinterpret_cast<unsigned char *>(dst_val);
const unsigned char *src = reinterpret_cast<const unsigned char *>(src_val);
dst[0] = src[0];
dst[1] = src[1];
}
static void swap2(unsigned short *val) {
#ifdef MINIZ_LITTLE_ENDIAN
(void)val;
#else
unsigned short tmp = *val;
unsigned char *dst = reinterpret_cast<unsigned char *>(val);
unsigned char *src = reinterpret_cast<unsigned char *>(&tmp);
dst[0] = src[1];
dst[1] = src[0];
#endif
}
#ifdef __clang__
#pragma clang diagnostic push
#pragma clang diagnostic ignored "-Wunused-function"
#endif
#ifdef __GNUC__
#pragma GCC diagnostic push
#pragma GCC diagnostic ignored "-Wunused-function"
#endif
static void cpy4(int *dst_val, const int *src_val) {
unsigned char *dst = reinterpret_cast<unsigned char *>(dst_val);
const unsigned char *src = reinterpret_cast<const unsigned char *>(src_val);
dst[0] = src[0];
dst[1] = src[1];
dst[2] = src[2];
dst[3] = src[3];
}
static void cpy4(unsigned int *dst_val, const unsigned int *src_val) {
unsigned char *dst = reinterpret_cast<unsigned char *>(dst_val);
const unsigned char *src = reinterpret_cast<const unsigned char *>(src_val);
dst[0] = src[0];
dst[1] = src[1];
dst[2] = src[2];
dst[3] = src[3];
}
static void cpy4(float *dst_val, const float *src_val) {
unsigned char *dst = reinterpret_cast<unsigned char *>(dst_val);
const unsigned char *src = reinterpret_cast<const unsigned char *>(src_val);
dst[0] = src[0];
dst[1] = src[1];
dst[2] = src[2];
dst[3] = src[3];
}
#ifdef __clang__
#pragma clang diagnostic pop
#endif
#ifdef __GNUC__
#pragma GCC diagnostic pop
#endif
static void swap4(unsigned int *val) {
#ifdef MINIZ_LITTLE_ENDIAN
(void)val;
#else
unsigned int tmp = *val;
unsigned char *dst = reinterpret_cast<unsigned char *>(val);
unsigned char *src = reinterpret_cast<unsigned char *>(&tmp);
dst[0] = src[3];
dst[1] = src[2];
dst[2] = src[1];
dst[3] = src[0];
#endif
}
static void swap4(int *val) {
#ifdef MINIZ_LITTLE_ENDIAN
(void)val;
#else
int tmp = *val;
unsigned char *dst = reinterpret_cast<unsigned char *>(val);
unsigned char *src = reinterpret_cast<unsigned char *>(&tmp);
dst[0] = src[3];
dst[1] = src[2];
dst[2] = src[1];
dst[3] = src[0];
#endif
}
static void swap4(float *val) {
#ifdef MINIZ_LITTLE_ENDIAN
(void)val;
#else
float tmp = *val;
unsigned char *dst = reinterpret_cast<unsigned char *>(val);
unsigned char *src = reinterpret_cast<unsigned char *>(&tmp);
dst[0] = src[3];
dst[1] = src[2];
dst[2] = src[1];
dst[3] = src[0];
#endif
}
#if 0
static void cpy8(tinyexr::tinyexr_uint64 *dst_val, const tinyexr::tinyexr_uint64 *src_val) {
unsigned char *dst = reinterpret_cast<unsigned char *>(dst_val);
const unsigned char *src = reinterpret_cast<const unsigned char *>(src_val);
dst[0] = src[0];
dst[1] = src[1];
dst[2] = src[2];
dst[3] = src[3];
dst[4] = src[4];
dst[5] = src[5];
dst[6] = src[6];
dst[7] = src[7];
}
#endif
static void swap8(tinyexr::tinyexr_uint64 *val) {
#ifdef MINIZ_LITTLE_ENDIAN
(void)val;
#else
tinyexr::tinyexr_uint64 tmp = (*val);
unsigned char *dst = reinterpret_cast<unsigned char *>(val);
unsigned char *src = reinterpret_cast<unsigned char *>(&tmp);
dst[0] = src[7];
dst[1] = src[6];
dst[2] = src[5];
dst[3] = src[4];
dst[4] = src[3];
dst[5] = src[2];
dst[6] = src[1];
dst[7] = src[0];
#endif
}
// https://gist.github.com/rygorous/2156668
// Reuse MINIZ_LITTLE_ENDIAN flag from miniz.
union FP32 {
unsigned int u;
float f;
struct {
#if MINIZ_LITTLE_ENDIAN
unsigned int Mantissa : 23;
unsigned int Exponent : 8;
unsigned int Sign : 1;
#else
unsigned int Sign : 1;
unsigned int Exponent : 8;
unsigned int Mantissa : 23;
#endif
} s;
};
#ifdef __clang__
#pragma clang diagnostic push
#pragma clang diagnostic ignored "-Wpadded"
#endif
union FP16 {
unsigned short u;
struct {
#if MINIZ_LITTLE_ENDIAN
unsigned int Mantissa : 10;
unsigned int Exponent : 5;
unsigned int Sign : 1;
#else
unsigned int Sign : 1;
unsigned int Exponent : 5;
unsigned int Mantissa : 10;
#endif
} s;
};
#ifdef __clang__
#pragma clang diagnostic pop
#endif
static FP32 half_to_float(FP16 h) {
static const FP32 magic = {113 << 23};
static const unsigned int shifted_exp = 0x7c00
<< 13; // exponent mask after shift
FP32 o;
o.u = (h.u & 0x7fffU) << 13U; // exponent/mantissa bits
unsigned int exp_ = shifted_exp & o.u; // just the exponent
o.u += (127 - 15) << 23; // exponent adjust
// handle exponent special cases
if (exp_ == shifted_exp) // Inf/NaN?
o.u += (128 - 16) << 23; // extra exp adjust
else if (exp_ == 0) // Zero/Denormal?
{
o.u += 1 << 23; // extra exp adjust
o.f -= magic.f; // renormalize
}
o.u |= (h.u & 0x8000U) << 16U; // sign bit
return o;
}
static FP16 float_to_half_full(FP32 f) {
FP16 o = {0};
// Based on ISPC reference code (with minor modifications)
if (f.s.Exponent == 0) // Signed zero/denormal (which will underflow)
o.s.Exponent = 0;
else if (f.s.Exponent == 255) // Inf or NaN (all exponent bits set)
{
o.s.Exponent = 31;
o.s.Mantissa = f.s.Mantissa ? 0x200 : 0; // NaN->qNaN and Inf->Inf
} else // Normalized number
{
// Exponent unbias the single, then bias the halfp
int newexp = f.s.Exponent - 127 + 15;
if (newexp >= 31) // Overflow, return signed infinity
o.s.Exponent = 31;
else if (newexp <= 0) // Underflow
{
if ((14 - newexp) <= 24) // Mantissa might be non-zero
{
unsigned int mant = f.s.Mantissa | 0x800000; // Hidden 1 bit
o.s.Mantissa = mant >> (14 - newexp);
if ((mant >> (13 - newexp)) & 1) // Check for rounding
o.u++; // Round, might overflow into exp bit, but this is OK
}
} else {
o.s.Exponent = static_cast<unsigned int>(newexp);
o.s.Mantissa = f.s.Mantissa >> 13;
if (f.s.Mantissa & 0x1000) // Check for rounding
o.u++; // Round, might overflow to inf, this is OK
}
}
o.s.Sign = f.s.Sign;
return o;
}
// NOTE: From OpenEXR code
// #define IMF_INCREASING_Y 0
// #define IMF_DECREASING_Y 1
// #define IMF_RAMDOM_Y 2
//
// #define IMF_NO_COMPRESSION 0
// #define IMF_RLE_COMPRESSION 1
// #define IMF_ZIPS_COMPRESSION 2
// #define IMF_ZIP_COMPRESSION 3
// #define IMF_PIZ_COMPRESSION 4
// #define IMF_PXR24_COMPRESSION 5
// #define IMF_B44_COMPRESSION 6
// #define IMF_B44A_COMPRESSION 7
#ifdef __clang__
#pragma clang diagnostic push
#if __has_warning("-Wzero-as-null-pointer-constant")
#pragma clang diagnostic ignored "-Wzero-as-null-pointer-constant"
#endif
#endif
static const char *ReadString(std::string *s, const char *ptr, size_t len) {
// Read untile NULL(\0).
const char *p = ptr;
const char *q = ptr;
while ((size_t(q - ptr) < len) && (*q) != 0) {
q++;
}
if (size_t(q - ptr) >= len) {
(*s) = std::string();
return NULL;
}
(*s) = std::string(p, q);
return q + 1; // skip '\0'
}
static bool ReadAttribute(std::string *name, std::string *type,
std::vector<unsigned char> *data, size_t *marker_size,
const char *marker, size_t size) {
size_t name_len = strnlen(marker, size);
if (name_len == size) {
// String does not have a terminating character.
return false;
}
*name = std::string(marker, name_len);
marker += name_len + 1;
size -= name_len + 1;
size_t type_len = strnlen(marker, size);
if (type_len == size) {
return false;
}
*type = std::string(marker, type_len);
marker += type_len + 1;
size -= type_len + 1;
if (size < sizeof(uint32_t)) {
return false;
}
uint32_t data_len;
memcpy(&data_len, marker, sizeof(uint32_t));
tinyexr::swap4(reinterpret_cast<unsigned int *>(&data_len));
if (data_len == 0) {
if ((*type).compare("string") == 0) {
// Accept empty string attribute.
marker += sizeof(uint32_t);
size -= sizeof(uint32_t);
*marker_size = name_len + 1 + type_len + 1 + sizeof(uint32_t);
data->resize(1);
(*data)[0] = '\0';
return true;
} else {
return false;
}
}
marker += sizeof(uint32_t);
size -= sizeof(uint32_t);
if (size < data_len) {
return false;
}
data->resize(static_cast<size_t>(data_len));
memcpy(&data->at(0), marker, static_cast<size_t>(data_len));
*marker_size = name_len + 1 + type_len + 1 + sizeof(uint32_t) + data_len;
return true;
}
static void WriteAttributeToMemory(std::vector<unsigned char> *out,
const char *name, const char *type,
const unsigned char *data, int len) {
out->insert(out->end(), name, name + strlen(name) + 1);
out->insert(out->end(), type, type + strlen(type) + 1);
int outLen = len;
tinyexr::swap4(&outLen);
out->insert(out->end(), reinterpret_cast<unsigned char *>(&outLen),
reinterpret_cast<unsigned char *>(&outLen) + sizeof(int));
out->insert(out->end(), data, data + len);
}
typedef struct {
std::string name; // less than 255 bytes long
int pixel_type;
int requested_pixel_type;
int x_sampling;
int y_sampling;
unsigned char p_linear;
unsigned char pad[3];
} ChannelInfo;
typedef struct {
int min_x;
int min_y;
int max_x;
int max_y;
} Box2iInfo;
struct HeaderInfo {
std::vector<tinyexr::ChannelInfo> channels;
std::vector<EXRAttribute> attributes;
Box2iInfo data_window;
int line_order;
Box2iInfo display_window;
float screen_window_center[2];
float screen_window_width;
float pixel_aspect_ratio;
int chunk_count;
// Tiled format
int tiled; // Non-zero if the part is tiled.
int tile_size_x;
int tile_size_y;
int tile_level_mode;
int tile_rounding_mode;
unsigned int header_len;
int compression_type;
// required for multi-part or non-image files
std::string name;
// required for multi-part or non-image files
std::string type;
void clear() {
channels.clear();
attributes.clear();
data_window.min_x = 0;
data_window.min_y = 0;
data_window.max_x = 0;
data_window.max_y = 0;
line_order = 0;
display_window.min_x = 0;
display_window.min_y = 0;
display_window.max_x = 0;
display_window.max_y = 0;
screen_window_center[0] = 0.0f;
screen_window_center[1] = 0.0f;
screen_window_width = 0.0f;
pixel_aspect_ratio = 0.0f;
chunk_count = 0;
// Tiled format
tiled = 0;
tile_size_x = 0;
tile_size_y = 0;
tile_level_mode = 0;
tile_rounding_mode = 0;
header_len = 0;
compression_type = 0;
name.clear();
type.clear();
}
};
static bool ReadChannelInfo(std::vector<ChannelInfo> &channels,
const std::vector<unsigned char> &data) {
const char *p = reinterpret_cast<const char *>(&data.at(0));
for (;;) {
if ((*p) == 0) {
break;
}
ChannelInfo info;
tinyexr_int64 data_len = static_cast<tinyexr_int64>(data.size()) -
(p - reinterpret_cast<const char *>(data.data()));
if (data_len < 0) {
return false;
}
p = ReadString(&info.name, p, size_t(data_len));
if ((p == NULL) && (info.name.empty())) {
// Buffer overrun. Issue #51.
return false;
}
const unsigned char *data_end =
reinterpret_cast<const unsigned char *>(p) + 16;
if (data_end >= (data.data() + data.size())) {
return false;
}
memcpy(&info.pixel_type, p, sizeof(int));
p += 4;
info.p_linear = static_cast<unsigned char>(p[0]); // uchar
p += 1 + 3; // reserved: uchar[3]
memcpy(&info.x_sampling, p, sizeof(int)); // int
p += 4;
memcpy(&info.y_sampling, p, sizeof(int)); // int
p += 4;
tinyexr::swap4(&info.pixel_type);
tinyexr::swap4(&info.x_sampling);
tinyexr::swap4(&info.y_sampling);
channels.push_back(info);
}
return true;
}
static void WriteChannelInfo(std::vector<unsigned char> &data,
const std::vector<ChannelInfo> &channels) {
size_t sz = 0;
// Calculate total size.
for (size_t c = 0; c < channels.size(); c++) {
sz += strlen(channels[c].name.c_str()) + 1; // +1 for \0
sz += 16; // 4 * int
}
data.resize(sz + 1);
unsigned char *p = &data.at(0);
for (size_t c = 0; c < channels.size(); c++) {
memcpy(p, channels[c].name.c_str(), strlen(channels[c].name.c_str()));
p += strlen(channels[c].name.c_str());
(*p) = '\0';
p++;
int pixel_type = channels[c].requested_pixel_type;
int x_sampling = channels[c].x_sampling;
int y_sampling = channels[c].y_sampling;
tinyexr::swap4(&pixel_type);
tinyexr::swap4(&x_sampling);
tinyexr::swap4(&y_sampling);
memcpy(p, &pixel_type, sizeof(int));
p += sizeof(int);
(*p) = channels[c].p_linear;
p += 4;
memcpy(p, &x_sampling, sizeof(int));
p += sizeof(int);
memcpy(p, &y_sampling, sizeof(int));
p += sizeof(int);
}
(*p) = '\0';
}
static void CompressZip(unsigned char *dst,
tinyexr::tinyexr_uint64 &compressedSize,
const unsigned char *src, unsigned long src_size) {
std::vector<unsigned char> tmpBuf(src_size);
//
// Apply EXR-specific? postprocess. Grabbed from OpenEXR's
// ImfZipCompressor.cpp
//
//
// Reorder the pixel data.
//
const char *srcPtr = reinterpret_cast<const char *>(src);
{
char *t1 = reinterpret_cast<char *>(&tmpBuf.at(0));
char *t2 = reinterpret_cast<char *>(&tmpBuf.at(0)) + (src_size + 1) / 2;
const char *stop = srcPtr + src_size;
for (;;) {
if (srcPtr < stop)
*(t1++) = *(srcPtr++);
else
break;
if (srcPtr < stop)
*(t2++) = *(srcPtr++);
else
break;
}
}
//
// Predictor.
//
{
unsigned char *t = &tmpBuf.at(0) + 1;
unsigned char *stop = &tmpBuf.at(0) + src_size;
int p = t[-1];
while (t < stop) {
int d = int(t[0]) - p + (128 + 256);
p = t[0];
t[0] = static_cast<unsigned char>(d);
++t;
}
}
#if TINYEXR_USE_MINIZ
//
// Compress the data using miniz
//
miniz::mz_ulong outSize = miniz::mz_compressBound(src_size);
int ret = miniz::mz_compress(
dst, &outSize, static_cast<const unsigned char *>(&tmpBuf.at(0)),
src_size);
assert(ret == miniz::MZ_OK);
(void)ret;
compressedSize = outSize;
#else
uLong outSize = compressBound(static_cast<uLong>(src_size));
int ret = compress(dst, &outSize, static_cast<const Bytef *>(&tmpBuf.at(0)),
src_size);
assert(ret == Z_OK);
(void)ret;
compressedSize = outSize;
#endif
// Use uncompressed data when compressed data is larger than uncompressed.
// (Issue 40)
if (compressedSize >= src_size) {
compressedSize = src_size;
memcpy(dst, src, src_size);
}
}
static bool DecompressZip(unsigned char *dst,
unsigned long *uncompressed_size /* inout */,
const unsigned char *src, unsigned long src_size) {
if ((*uncompressed_size) == src_size) {
// Data is not compressed(Issue 40).
memcpy(dst, src, src_size);
return true;
}
std::vector<unsigned char> tmpBuf(*uncompressed_size);
#if TINYEXR_USE_MINIZ
int ret =
miniz::mz_uncompress(&tmpBuf.at(0), uncompressed_size, src, src_size);
if (miniz::MZ_OK != ret) {
return false;
}
#else
int ret = uncompress(&tmpBuf.at(0), uncompressed_size, src, src_size);
if (Z_OK != ret) {
return false;
}
#endif
//
// Apply EXR-specific? postprocess. Grabbed from OpenEXR's
// ImfZipCompressor.cpp
//
// Predictor.
{
unsigned char *t = &tmpBuf.at(0) + 1;
unsigned char *stop = &tmpBuf.at(0) + (*uncompressed_size);
while (t < stop) {
int d = int(t[-1]) + int(t[0]) - 128;
t[0] = static_cast<unsigned char>(d);
++t;
}
}
// Reorder the pixel data.
{
const char *t1 = reinterpret_cast<const char *>(&tmpBuf.at(0));
const char *t2 = reinterpret_cast<const char *>(&tmpBuf.at(0)) +
(*uncompressed_size + 1) / 2;
char *s = reinterpret_cast<char *>(dst);
char *stop = s + (*uncompressed_size);
for (;;) {
if (s < stop)
*(s++) = *(t1++);
else
break;
if (s < stop)
*(s++) = *(t2++);
else
break;
}
}
return true;
}
// RLE code from OpenEXR --------------------------------------
#ifdef __clang__
#pragma clang diagnostic push
#pragma clang diagnostic ignored "-Wsign-conversion"
#if __has_warning("-Wextra-semi-stmt")
#pragma clang diagnostic ignored "-Wextra-semi-stmt"
#endif
#endif
#ifdef _MSC_VER
#pragma warning(push)
#pragma warning(disable : 4204) // nonstandard extension used : non-constant
// aggregate initializer (also supported by GNU
// C and C99, so no big deal)
#pragma warning(disable : 4244) // 'initializing': conversion from '__int64' to
// 'int', possible loss of data
#pragma warning(disable : 4267) // 'argument': conversion from '__int64' to
// 'int', possible loss of data
#pragma warning(disable : 4996) // 'strdup': The POSIX name for this item is
// deprecated. Instead, use the ISO C and C++
// conformant name: _strdup.
#endif
const int MIN_RUN_LENGTH = 3;
const int MAX_RUN_LENGTH = 127;
//
// Compress an array of bytes, using run-length encoding,
// and return the length of the compressed data.
//
static int rleCompress(int inLength, const char in[], signed char out[]) {
const char *inEnd = in + inLength;
const char *runStart = in;
const char *runEnd = in + 1;
signed char *outWrite = out;
while (runStart < inEnd) {
while (runEnd < inEnd && *runStart == *runEnd &&
runEnd - runStart - 1 < MAX_RUN_LENGTH) {
++runEnd;
}
if (runEnd - runStart >= MIN_RUN_LENGTH) {
//
// Compressible run
//
*outWrite++ = static_cast<char>(runEnd - runStart) - 1;
*outWrite++ = *(reinterpret_cast<const signed char *>(runStart));
runStart = runEnd;
} else {
//
// Uncompressable run
//
while (runEnd < inEnd &&
((runEnd + 1 >= inEnd || *runEnd != *(runEnd + 1)) ||
(runEnd + 2 >= inEnd || *(runEnd + 1) != *(runEnd + 2))) &&
runEnd - runStart < MAX_RUN_LENGTH) {
++runEnd;
}
*outWrite++ = static_cast<char>(runStart - runEnd);
while (runStart < runEnd) {
*outWrite++ = *(reinterpret_cast<const signed char *>(runStart++));
}
}
++runEnd;
}
return static_cast<int>(outWrite - out);
}
//
// Uncompress an array of bytes compressed with rleCompress().
// Returns the length of the oncompressed data, or 0 if the
// length of the uncompressed data would be more than maxLength.
//
static int rleUncompress(int inLength, int maxLength, const signed char in[],
char out[]) {
char *outStart = out;
while (inLength > 0) {
if (*in < 0) {
int count = -(static_cast<int>(*in++));
inLength -= count + 1;
// Fixes #116: Add bounds check to in buffer.
if ((0 > (maxLength -= count)) || (inLength < 0)) return 0;
memcpy(out, in, count);
out += count;
in += count;
} else {
int count = *in++;
inLength -= 2;
if (0 > (maxLength -= count + 1)) return 0;
memset(out, *reinterpret_cast<const char *>(in), count + 1);
out += count + 1;
in++;
}
}
return static_cast<int>(out - outStart);
}
#ifdef __clang__
#pragma clang diagnostic pop
#endif
// End of RLE code from OpenEXR -----------------------------------
static void CompressRle(unsigned char *dst,
tinyexr::tinyexr_uint64 &compressedSize,
const unsigned char *src, unsigned long src_size) {
std::vector<unsigned char> tmpBuf(src_size);
//
// Apply EXR-specific? postprocess. Grabbed from OpenEXR's
// ImfRleCompressor.cpp
//
//
// Reorder the pixel data.
//
const char *srcPtr = reinterpret_cast<const char *>(src);
{
char *t1 = reinterpret_cast<char *>(&tmpBuf.at(0));
char *t2 = reinterpret_cast<char *>(&tmpBuf.at(0)) + (src_size + 1) / 2;
const char *stop = srcPtr + src_size;
for (;;) {
if (srcPtr < stop)
*(t1++) = *(srcPtr++);
else
break;
if (srcPtr < stop)
*(t2++) = *(srcPtr++);
else
break;
}
}
//
// Predictor.
//
{
unsigned char *t = &tmpBuf.at(0) + 1;
unsigned char *stop = &tmpBuf.at(0) + src_size;
int p = t[-1];
while (t < stop) {
int d = int(t[0]) - p + (128 + 256);
p = t[0];
t[0] = static_cast<unsigned char>(d);
++t;
}
}
// outSize will be (srcSiz * 3) / 2 at max.
int outSize = rleCompress(static_cast<int>(src_size),
reinterpret_cast<const char *>(&tmpBuf.at(0)),
reinterpret_cast<signed char *>(dst));
assert(outSize > 0);
compressedSize = static_cast<tinyexr::tinyexr_uint64>(outSize);
// Use uncompressed data when compressed data is larger than uncompressed.
// (Issue 40)
if (compressedSize >= src_size) {
compressedSize = src_size;
memcpy(dst, src, src_size);
}
}
static bool DecompressRle(unsigned char *dst,
const unsigned long uncompressed_size,
const unsigned char *src, unsigned long src_size) {
if (uncompressed_size == src_size) {
// Data is not compressed(Issue 40).
memcpy(dst, src, src_size);
return true;
}
// Workaround for issue #112.
// TODO(syoyo): Add more robust out-of-bounds check in `rleUncompress`.
if (src_size <= 2) {
return false;
}
std::vector<unsigned char> tmpBuf(uncompressed_size);
int ret = rleUncompress(static_cast<int>(src_size),
static_cast<int>(uncompressed_size),
reinterpret_cast<const signed char *>(src),
reinterpret_cast<char *>(&tmpBuf.at(0)));
if (ret != static_cast<int>(uncompressed_size)) {
return false;
}
//
// Apply EXR-specific? postprocess. Grabbed from OpenEXR's
// ImfRleCompressor.cpp
//
// Predictor.
{
unsigned char *t = &tmpBuf.at(0) + 1;
unsigned char *stop = &tmpBuf.at(0) + uncompressed_size;
while (t < stop) {
int d = int(t[-1]) + int(t[0]) - 128;
t[0] = static_cast<unsigned char>(d);
++t;
}
}
// Reorder the pixel data.
{
const char *t1 = reinterpret_cast<const char *>(&tmpBuf.at(0));
const char *t2 = reinterpret_cast<const char *>(&tmpBuf.at(0)) +
(uncompressed_size + 1) / 2;
char *s = reinterpret_cast<char *>(dst);
char *stop = s + uncompressed_size;
for (;;) {
if (s < stop)
*(s++) = *(t1++);
else
break;
if (s < stop)
*(s++) = *(t2++);
else
break;
}
}
return true;
}
#if TINYEXR_USE_PIZ
#ifdef __clang__
#pragma clang diagnostic push
#pragma clang diagnostic ignored "-Wc++11-long-long"
#pragma clang diagnostic ignored "-Wold-style-cast"
#pragma clang diagnostic ignored "-Wpadded"
#pragma clang diagnostic ignored "-Wsign-conversion"
#pragma clang diagnostic ignored "-Wc++11-extensions"
#pragma clang diagnostic ignored "-Wconversion"
#pragma clang diagnostic ignored "-Wc++98-compat-pedantic"
#if __has_warning("-Wcast-qual")
#pragma clang diagnostic ignored "-Wcast-qual"
#endif
#if __has_warning("-Wextra-semi-stmt")
#pragma clang diagnostic ignored "-Wextra-semi-stmt"
#endif
#endif
//
// PIZ compress/uncompress, based on OpenEXR's ImfPizCompressor.cpp
//
// -----------------------------------------------------------------
// Copyright (c) 2004, Industrial Light & Magic, a division of Lucas
// Digital Ltd. LLC)
// (3 clause BSD license)
//
struct PIZChannelData {
unsigned short *start;
unsigned short *end;
int nx;
int ny;
int ys;
int size;
};
//-----------------------------------------------------------------------------
//
// 16-bit Haar Wavelet encoding and decoding
//
// The source code in this file is derived from the encoding
// and decoding routines written by Christian Rouet for his
// PIZ image file format.
//
//-----------------------------------------------------------------------------
//
// Wavelet basis functions without modulo arithmetic; they produce
// the best compression ratios when the wavelet-transformed data are
// Huffman-encoded, but the wavelet transform works only for 14-bit
// data (untransformed data values must be less than (1 << 14)).
//
inline void wenc14(unsigned short a, unsigned short b, unsigned short &l,
unsigned short &h) {
short as = static_cast<short>(a);
short bs = static_cast<short>(b);
short ms = (as + bs) >> 1;
short ds = as - bs;
l = static_cast<unsigned short>(ms);
h = static_cast<unsigned short>(ds);
}
inline void wdec14(unsigned short l, unsigned short h, unsigned short &a,
unsigned short &b) {
short ls = static_cast<short>(l);
short hs = static_cast<short>(h);
int hi = hs;
int ai = ls + (hi & 1) + (hi >> 1);
short as = static_cast<short>(ai);
short bs = static_cast<short>(ai - hi);
a = static_cast<unsigned short>(as);
b = static_cast<unsigned short>(bs);
}
//
// Wavelet basis functions with modulo arithmetic; they work with full
// 16-bit data, but Huffman-encoding the wavelet-transformed data doesn't
// compress the data quite as well.
//
const int NBITS = 16;
const int A_OFFSET = 1 << (NBITS - 1);
const int M_OFFSET = 1 << (NBITS - 1);
const int MOD_MASK = (1 << NBITS) - 1;
inline void wenc16(unsigned short a, unsigned short b, unsigned short &l,
unsigned short &h) {
int ao = (a + A_OFFSET) & MOD_MASK;
int m = ((ao + b) >> 1);
int d = ao - b;
if (d < 0) m = (m + M_OFFSET) & MOD_MASK;
d &= MOD_MASK;
l = static_cast<unsigned short>(m);
h = static_cast<unsigned short>(d);
}
inline void wdec16(unsigned short l, unsigned short h, unsigned short &a,
unsigned short &b) {
int m = l;
int d = h;
int bb = (m - (d >> 1)) & MOD_MASK;
int aa = (d + bb - A_OFFSET) & MOD_MASK;
b = static_cast<unsigned short>(bb);
a = static_cast<unsigned short>(aa);
}
//
// 2D Wavelet encoding:
//
static void wav2Encode(
unsigned short *in, // io: values are transformed in place
int nx, // i : x size
int ox, // i : x offset
int ny, // i : y size
int oy, // i : y offset
unsigned short mx) // i : maximum in[x][y] value
{
bool w14 = (mx < (1 << 14));
int n = (nx > ny) ? ny : nx;
int p = 1; // == 1 << level
int p2 = 2; // == 1 << (level+1)
//
// Hierarchical loop on smaller dimension n
//
while (p2 <= n) {
unsigned short *py = in;
unsigned short *ey = in + oy * (ny - p2);
int oy1 = oy * p;
int oy2 = oy * p2;
int ox1 = ox * p;
int ox2 = ox * p2;
unsigned short i00, i01, i10, i11;
//
// Y loop
//
for (; py <= ey; py += oy2) {
unsigned short *px = py;
unsigned short *ex = py + ox * (nx - p2);
//
// X loop
//
for (; px <= ex; px += ox2) {
unsigned short *p01 = px + ox1;
unsigned short *p10 = px + oy1;
unsigned short *p11 = p10 + ox1;
//
// 2D wavelet encoding
//
if (w14) {
wenc14(*px, *p01, i00, i01);
wenc14(*p10, *p11, i10, i11);
wenc14(i00, i10, *px, *p10);
wenc14(i01, i11, *p01, *p11);
} else {
wenc16(*px, *p01, i00, i01);
wenc16(*p10, *p11, i10, i11);
wenc16(i00, i10, *px, *p10);
wenc16(i01, i11, *p01, *p11);
}
}
//
// Encode (1D) odd column (still in Y loop)
//
if (nx & p) {
unsigned short *p10 = px + oy1;
if (w14)
wenc14(*px, *p10, i00, *p10);
else
wenc16(*px, *p10, i00, *p10);
*px = i00;
}
}
//
// Encode (1D) odd line (must loop in X)
//
if (ny & p) {
unsigned short *px = py;
unsigned short *ex = py + ox * (nx - p2);
for (; px <= ex; px += ox2) {
unsigned short *p01 = px + ox1;
if (w14)
wenc14(*px, *p01, i00, *p01);
else
wenc16(*px, *p01, i00, *p01);
*px = i00;
}
}
//
// Next level
//
p = p2;
p2 <<= 1;
}
}
//
// 2D Wavelet decoding:
//
static void wav2Decode(
unsigned short *in, // io: values are transformed in place
int nx, // i : x size
int ox, // i : x offset
int ny, // i : y size
int oy, // i : y offset
unsigned short mx) // i : maximum in[x][y] value
{
bool w14 = (mx < (1 << 14));
int n = (nx > ny) ? ny : nx;
int p = 1;
int p2;
//
// Search max level
//
while (p <= n) p <<= 1;
p >>= 1;
p2 = p;
p >>= 1;
//
// Hierarchical loop on smaller dimension n
//
while (p >= 1) {
unsigned short *py = in;
unsigned short *ey = in + oy * (ny - p2);
int oy1 = oy * p;
int oy2 = oy * p2;
int ox1 = ox * p;
int ox2 = ox * p2;
unsigned short i00, i01, i10, i11;
//
// Y loop
//
for (; py <= ey; py += oy2) {
unsigned short *px = py;
unsigned short *ex = py + ox * (nx - p2);
//
// X loop
//
for (; px <= ex; px += ox2) {
unsigned short *p01 = px + ox1;
unsigned short *p10 = px + oy1;
unsigned short *p11 = p10 + ox1;
//
// 2D wavelet decoding
//
if (w14) {
wdec14(*px, *p10, i00, i10);
wdec14(*p01, *p11, i01, i11);
wdec14(i00, i01, *px, *p01);
wdec14(i10, i11, *p10, *p11);
} else {
wdec16(*px, *p10, i00, i10);
wdec16(*p01, *p11, i01, i11);
wdec16(i00, i01, *px, *p01);
wdec16(i10, i11, *p10, *p11);
}
}
//
// Decode (1D) odd column (still in Y loop)
//
if (nx & p) {
unsigned short *p10 = px + oy1;
if (w14)
wdec14(*px, *p10, i00, *p10);
else
wdec16(*px, *p10, i00, *p10);
*px = i00;
}
}
//
// Decode (1D) odd line (must loop in X)
//
if (ny & p) {
unsigned short *px = py;
unsigned short *ex = py + ox * (nx - p2);
for (; px <= ex; px += ox2) {
unsigned short *p01 = px + ox1;
if (w14)
wdec14(*px, *p01, i00, *p01);
else
wdec16(*px, *p01, i00, *p01);
*px = i00;
}
}
//
// Next level
//
p2 = p;
p >>= 1;
}
}
//-----------------------------------------------------------------------------
//
// 16-bit Huffman compression and decompression.
//
// The source code in this file is derived from the 8-bit
// Huffman compression and decompression routines written
// by Christian Rouet for his PIZ image file format.
//
//-----------------------------------------------------------------------------
// Adds some modification for tinyexr.
const int HUF_ENCBITS = 16; // literal (value) bit length
const int HUF_DECBITS = 14; // decoding bit size (>= 8)
const int HUF_ENCSIZE = (1 << HUF_ENCBITS) + 1; // encoding table size
const int HUF_DECSIZE = 1 << HUF_DECBITS; // decoding table size
const int HUF_DECMASK = HUF_DECSIZE - 1;
struct HufDec { // short code long code
//-------------------------------
unsigned int len : 8; // code length 0
unsigned int lit : 24; // lit p size
unsigned int *p; // 0 lits
};
inline long long hufLength(long long code) { return code & 63; }
inline long long hufCode(long long code) { return code >> 6; }
inline void outputBits(int nBits, long long bits, long long &c, int &lc,
char *&out) {
c <<= nBits;
lc += nBits;
c |= bits;
while (lc >= 8) *out++ = static_cast<char>((c >> (lc -= 8)));
}
inline long long getBits(int nBits, long long &c, int &lc, const char *&in) {
while (lc < nBits) {
c = (c << 8) | *(reinterpret_cast<const unsigned char *>(in++));
lc += 8;
}
lc -= nBits;
return (c >> lc) & ((1 << nBits) - 1);
}
//
// ENCODING TABLE BUILDING & (UN)PACKING
//
//
// Build a "canonical" Huffman code table:
// - for each (uncompressed) symbol, hcode contains the length
// of the corresponding code (in the compressed data)
// - canonical codes are computed and stored in hcode
// - the rules for constructing canonical codes are as follows:
// * shorter codes (if filled with zeroes to the right)
// have a numerically higher value than longer codes
// * for codes with the same length, numerical values
// increase with numerical symbol values
// - because the canonical code table can be constructed from
// symbol lengths alone, the code table can be transmitted
// without sending the actual code values
// - see http://www.compressconsult.com/huffman/
//
static void hufCanonicalCodeTable(long long hcode[HUF_ENCSIZE]) {
long long n[59];
//
// For each i from 0 through 58, count the
// number of different codes of length i, and
// store the count in n[i].
//
for (int i = 0; i <= 58; ++i) n[i] = 0;
for (int i = 0; i < HUF_ENCSIZE; ++i) n[hcode[i]] += 1;
//
// For each i from 58 through 1, compute the
// numerically lowest code with length i, and
// store that code in n[i].
//
long long c = 0;
for (int i = 58; i > 0; --i) {
long long nc = ((c + n[i]) >> 1);
n[i] = c;
c = nc;
}
//
// hcode[i] contains the length, l, of the
// code for symbol i. Assign the next available
// code of length l to the symbol and store both
// l and the code in hcode[i].
//
for (int i = 0; i < HUF_ENCSIZE; ++i) {
int l = static_cast<int>(hcode[i]);
if (l > 0) hcode[i] = l | (n[l]++ << 6);
}
}
//
// Compute Huffman codes (based on frq input) and store them in frq:
// - code structure is : [63:lsb - 6:msb] | [5-0: bit length];
// - max code length is 58 bits;
// - codes outside the range [im-iM] have a null length (unused values);
// - original frequencies are destroyed;
// - encoding tables are used by hufEncode() and hufBuildDecTable();
//
struct FHeapCompare {
bool operator()(long long *a, long long *b) { return *a > *b; }
};
static void hufBuildEncTable(
long long *frq, // io: input frequencies [HUF_ENCSIZE], output table
int *im, // o: min frq index
int *iM) // o: max frq index
{
//
// This function assumes that when it is called, array frq
// indicates the frequency of all possible symbols in the data
// that are to be Huffman-encoded. (frq[i] contains the number
// of occurrences of symbol i in the data.)
//
// The loop below does three things:
//
// 1) Finds the minimum and maximum indices that point
// to non-zero entries in frq:
//
// frq[im] != 0, and frq[i] == 0 for all i < im
// frq[iM] != 0, and frq[i] == 0 for all i > iM
//
// 2) Fills array fHeap with pointers to all non-zero
// entries in frq.
//
// 3) Initializes array hlink such that hlink[i] == i
// for all array entries.
//
std::vector<int> hlink(HUF_ENCSIZE);
std::vector<long long *> fHeap(HUF_ENCSIZE);
*im = 0;
while (!frq[*im]) (*im)++;
int nf = 0;
for (int i = *im; i < HUF_ENCSIZE; i++) {
hlink[i] = i;
if (frq[i]) {
fHeap[nf] = &frq[i];
nf++;
*iM = i;
}
}
//
// Add a pseudo-symbol, with a frequency count of 1, to frq;
// adjust the fHeap and hlink array accordingly. Function
// hufEncode() uses the pseudo-symbol for run-length encoding.
//
(*iM)++;
frq[*iM] = 1;
fHeap[nf] = &frq[*iM];
nf++;
//
// Build an array, scode, such that scode[i] contains the number
// of bits assigned to symbol i. Conceptually this is done by
// constructing a tree whose leaves are the symbols with non-zero
// frequency:
//
// Make a heap that contains all symbols with a non-zero frequency,
// with the least frequent symbol on top.
//
// Repeat until only one symbol is left on the heap:
//
// Take the two least frequent symbols off the top of the heap.
// Create a new node that has first two nodes as children, and
// whose frequency is the sum of the frequencies of the first
// two nodes. Put the new node back into the heap.
//
// The last node left on the heap is the root of the tree. For each
// leaf node, the distance between the root and the leaf is the length
// of the code for the corresponding symbol.
//
// The loop below doesn't actually build the tree; instead we compute
// the distances of the leaves from the root on the fly. When a new
// node is added to the heap, then that node's descendants are linked
// into a single linear list that starts at the new node, and the code
// lengths of the descendants (that is, their distance from the root
// of the tree) are incremented by one.
//
std::make_heap(&fHeap[0], &fHeap[nf], FHeapCompare());
std::vector<long long> scode(HUF_ENCSIZE);
memset(scode.data(), 0, sizeof(long long) * HUF_ENCSIZE);
while (nf > 1) {
//
// Find the indices, mm and m, of the two smallest non-zero frq
// values in fHeap, add the smallest frq to the second-smallest
// frq, and remove the smallest frq value from fHeap.
//
int mm = fHeap[0] - frq;
std::pop_heap(&fHeap[0], &fHeap[nf], FHeapCompare());
--nf;
int m = fHeap[0] - frq;
std::pop_heap(&fHeap[0], &fHeap[nf], FHeapCompare());
frq[m] += frq[mm];
std::push_heap(&fHeap[0], &fHeap[nf], FHeapCompare());
//
// The entries in scode are linked into lists with the
// entries in hlink serving as "next" pointers and with
// the end of a list marked by hlink[j] == j.
//
// Traverse the lists that start at scode[m] and scode[mm].
// For each element visited, increment the length of the
// corresponding code by one bit. (If we visit scode[j]
// during the traversal, then the code for symbol j becomes
// one bit longer.)
//
// Merge the lists that start at scode[m] and scode[mm]
// into a single list that starts at scode[m].
//
//
// Add a bit to all codes in the first list.
//
for (int j = m;; j = hlink[j]) {
scode[j]++;
assert(scode[j] <= 58);
if (hlink[j] == j) {
//
// Merge the two lists.
//
hlink[j] = mm;
break;
}
}
//
// Add a bit to all codes in the second list
//
for (int j = mm;; j = hlink[j]) {
scode[j]++;
assert(scode[j] <= 58);
if (hlink[j] == j) break;
}
}
//
// Build a canonical Huffman code table, replacing the code
// lengths in scode with (code, code length) pairs. Copy the
// code table from scode into frq.
//
hufCanonicalCodeTable(scode.data());
memcpy(frq, scode.data(), sizeof(long long) * HUF_ENCSIZE);
}
//
// Pack an encoding table:
// - only code lengths, not actual codes, are stored
// - runs of zeroes are compressed as follows:
//
// unpacked packed
// --------------------------------
// 1 zero 0 (6 bits)
// 2 zeroes 59
// 3 zeroes 60
// 4 zeroes 61
// 5 zeroes 62
// n zeroes (6 or more) 63 n-6 (6 + 8 bits)
//
const int SHORT_ZEROCODE_RUN = 59;
const int LONG_ZEROCODE_RUN = 63;
const int SHORTEST_LONG_RUN = 2 + LONG_ZEROCODE_RUN - SHORT_ZEROCODE_RUN;
const int LONGEST_LONG_RUN = 255 + SHORTEST_LONG_RUN;
static void hufPackEncTable(
const long long *hcode, // i : encoding table [HUF_ENCSIZE]
int im, // i : min hcode index
int iM, // i : max hcode index
char **pcode) // o: ptr to packed table (updated)
{
char *p = *pcode;
long long c = 0;
int lc = 0;
for (; im <= iM; im++) {
int l = hufLength(hcode[im]);
if (l == 0) {
int zerun = 1;
while ((im < iM) && (zerun < LONGEST_LONG_RUN)) {
if (hufLength(hcode[im + 1]) > 0) break;
im++;
zerun++;
}
if (zerun >= 2) {
if (zerun >= SHORTEST_LONG_RUN) {
outputBits(6, LONG_ZEROCODE_RUN, c, lc, p);
outputBits(8, zerun - SHORTEST_LONG_RUN, c, lc, p);
} else {
outputBits(6, SHORT_ZEROCODE_RUN + zerun - 2, c, lc, p);
}
continue;
}
}
outputBits(6, l, c, lc, p);
}
if (lc > 0) *p++ = (unsigned char)(c << (8 - lc));
*pcode = p;
}
//
// Unpack an encoding table packed by hufPackEncTable():
//
static bool hufUnpackEncTable(
const char **pcode, // io: ptr to packed table (updated)
int ni, // i : input size (in bytes)
int im, // i : min hcode index
int iM, // i : max hcode index
long long *hcode) // o: encoding table [HUF_ENCSIZE]
{
memset(hcode, 0, sizeof(long long) * HUF_ENCSIZE);
const char *p = *pcode;
long long c = 0;
int lc = 0;
for (; im <= iM; im++) {
if (p - *pcode >= ni) {
return false;
}
long long l = hcode[im] = getBits(6, c, lc, p); // code length
if (l == (long long)LONG_ZEROCODE_RUN) {
if (p - *pcode > ni) {
return false;
}
int zerun = getBits(8, c, lc, p) + SHORTEST_LONG_RUN;
if (im + zerun > iM + 1) {
return false;
}
while (zerun--) hcode[im++] = 0;
im--;
} else if (l >= (long long)SHORT_ZEROCODE_RUN) {
int zerun = l - SHORT_ZEROCODE_RUN + 2;
if (im + zerun > iM + 1) {
return false;
}
while (zerun--) hcode[im++] = 0;
im--;
}
}
*pcode = const_cast<char *>(p);
hufCanonicalCodeTable(hcode);
return true;
}
//
// DECODING TABLE BUILDING
//
//
// Clear a newly allocated decoding table so that it contains only zeroes.
//
static void hufClearDecTable(HufDec *hdecod) // io: (allocated by caller)
// decoding table [HUF_DECSIZE]
{
for (int i = 0; i < HUF_DECSIZE; i++) {
hdecod[i].len = 0;
hdecod[i].lit = 0;
hdecod[i].p = NULL;
}
// memset(hdecod, 0, sizeof(HufDec) * HUF_DECSIZE);
}
//
// Build a decoding hash table based on the encoding table hcode:
// - short codes (<= HUF_DECBITS) are resolved with a single table access;
// - long code entry allocations are not optimized, because long codes are
// unfrequent;
// - decoding tables are used by hufDecode();
//
static bool hufBuildDecTable(const long long *hcode, // i : encoding table
int im, // i : min index in hcode
int iM, // i : max index in hcode
HufDec *hdecod) // o: (allocated by caller)
// decoding table [HUF_DECSIZE]
{
//
// Init hashtable & loop on all codes.
// Assumes that hufClearDecTable(hdecod) has already been called.
//
for (; im <= iM; im++) {
long long c = hufCode(hcode[im]);
int l = hufLength(hcode[im]);
if (c >> l) {
//
// Error: c is supposed to be an l-bit code,
// but c contains a value that is greater
// than the largest l-bit number.
//
// invalidTableEntry();
return false;
}
if (l > HUF_DECBITS) {
//
// Long code: add a secondary entry
//
HufDec *pl = hdecod + (c >> (l - HUF_DECBITS));
if (pl->len) {
//
// Error: a short code has already
// been stored in table entry *pl.
//
// invalidTableEntry();
return false;
}
pl->lit++;
if (pl->p) {
unsigned int *p = pl->p;
pl->p = new unsigned int[pl->lit];
for (int i = 0; i < pl->lit - 1; ++i) pl->p[i] = p[i];
delete[] p;
} else {
pl->p = new unsigned int[1];
}
pl->p[pl->lit - 1] = im;
} else if (l) {
//
// Short code: init all primary entries
//
HufDec *pl = hdecod + (c << (HUF_DECBITS - l));
for (long long i = 1ULL << (HUF_DECBITS - l); i > 0; i--, pl++) {
if (pl->len || pl->p) {
//
// Error: a short code or a long code has
// already been stored in table entry *pl.
//
// invalidTableEntry();
return false;
}
pl->len = l;
pl->lit = im;
}
}
}
return true;
}
//
// Free the long code entries of a decoding table built by hufBuildDecTable()
//
static void hufFreeDecTable(HufDec *hdecod) // io: Decoding table
{
for (int i = 0; i < HUF_DECSIZE; i++) {
if (hdecod[i].p) {
delete[] hdecod[i].p;
hdecod[i].p = 0;
}
}
}
//
// ENCODING
//
inline void outputCode(long long code, long long &c, int &lc, char *&out) {
outputBits(hufLength(code), hufCode(code), c, lc, out);
}
inline void sendCode(long long sCode, int runCount, long long runCode,
long long &c, int &lc, char *&out) {
//
// Output a run of runCount instances of the symbol sCount.
// Output the symbols explicitly, or if that is shorter, output
// the sCode symbol once followed by a runCode symbol and runCount
// expressed as an 8-bit number.
//
if (hufLength(sCode) + hufLength(runCode) + 8 < hufLength(sCode) * runCount) {
outputCode(sCode, c, lc, out);
outputCode(runCode, c, lc, out);
outputBits(8, runCount, c, lc, out);
} else {
while (runCount-- >= 0) outputCode(sCode, c, lc, out);
}
}
//
// Encode (compress) ni values based on the Huffman encoding table hcode:
//
static int hufEncode // return: output size (in bits)
(const long long *hcode, // i : encoding table
const unsigned short *in, // i : uncompressed input buffer
const int ni, // i : input buffer size (in bytes)
int rlc, // i : rl code
char *out) // o: compressed output buffer
{
char *outStart = out;
long long c = 0; // bits not yet written to out
int lc = 0; // number of valid bits in c (LSB)
int s = in[0];
int cs = 0;
//
// Loop on input values
//
for (int i = 1; i < ni; i++) {
//
// Count same values or send code
//
if (s == in[i] && cs < 255) {
cs++;
} else {
sendCode(hcode[s], cs, hcode[rlc], c, lc, out);
cs = 0;
}
s = in[i];
}
//
// Send remaining code
//
sendCode(hcode[s], cs, hcode[rlc], c, lc, out);
if (lc) *out = (c << (8 - lc)) & 0xff;
return (out - outStart) * 8 + lc;
}
//
// DECODING
//
//
// In order to force the compiler to inline them,
// getChar() and getCode() are implemented as macros
// instead of "inline" functions.
//
#define getChar(c, lc, in) \
{ \
c = (c << 8) | *(unsigned char *)(in++); \
lc += 8; \
}
#if 0
#define getCode(po, rlc, c, lc, in, out, ob, oe) \
{ \
if (po == rlc) { \
if (lc < 8) getChar(c, lc, in); \
\
lc -= 8; \
\
unsigned char cs = (c >> lc); \
\
if (out + cs > oe) return false; \
\
/* TinyEXR issue 78 */ \
unsigned short s = out[-1]; \
\
while (cs-- > 0) *out++ = s; \
} else if (out < oe) { \
*out++ = po; \
} else { \
return false; \
} \
}
#else
static bool getCode(int po, int rlc, long long &c, int &lc, const char *&in,
const char *in_end, unsigned short *&out,
const unsigned short *ob, const unsigned short *oe) {
(void)ob;
if (po == rlc) {
if (lc < 8) {
/* TinyEXR issue 78 */
if ((in + 1) >= in_end) {
return false;
}
getChar(c, lc, in);
}
lc -= 8;
unsigned char cs = (c >> lc);
if (out + cs > oe) return false;
// Bounds check for safety
// Issue 100.
if ((out - 1) < ob) return false;
unsigned short s = out[-1];
while (cs-- > 0) *out++ = s;
} else if (out < oe) {
*out++ = po;
} else {
return false;
}
return true;
}
#endif
//
// Decode (uncompress) ni bits based on encoding & decoding tables:
//
static bool hufDecode(const long long *hcode, // i : encoding table
const HufDec *hdecod, // i : decoding table
const char *in, // i : compressed input buffer
int ni, // i : input size (in bits)
int rlc, // i : run-length code
int no, // i : expected output size (in bytes)
unsigned short *out) // o: uncompressed output buffer
{
long long c = 0;
int lc = 0;
unsigned short *outb = out; // begin
unsigned short *oe = out + no; // end
const char *ie = in + (ni + 7) / 8; // input byte size
//
// Loop on input bytes
//
while (in < ie) {
getChar(c, lc, in);
//
// Access decoding table
//
while (lc >= HUF_DECBITS) {
const HufDec pl = hdecod[(c >> (lc - HUF_DECBITS)) & HUF_DECMASK];
if (pl.len) {
//
// Get short code
//
lc -= pl.len;
// std::cout << "lit = " << pl.lit << std::endl;
// std::cout << "rlc = " << rlc << std::endl;
// std::cout << "c = " << c << std::endl;
// std::cout << "lc = " << lc << std::endl;
// std::cout << "in = " << in << std::endl;
// std::cout << "out = " << out << std::endl;
// std::cout << "oe = " << oe << std::endl;
if (!getCode(pl.lit, rlc, c, lc, in, ie, out, outb, oe)) {
return false;
}
} else {
if (!pl.p) {
return false;
}
// invalidCode(); // wrong code
//
// Search long code
//
int j;
for (j = 0; j < pl.lit; j++) {
int l = hufLength(hcode[pl.p[j]]);
while (lc < l && in < ie) // get more bits
getChar(c, lc, in);
if (lc >= l) {
if (hufCode(hcode[pl.p[j]]) ==
((c >> (lc - l)) & (((long long)(1) << l) - 1))) {
//
// Found : get long code
//
lc -= l;
if (!getCode(pl.p[j], rlc, c, lc, in, ie, out, outb, oe)) {
return false;
}
break;
}
}
}
if (j == pl.lit) {
return false;
// invalidCode(); // Not found
}
}
}
}
//
// Get remaining (short) codes
//
int i = (8 - ni) & 7;
c >>= i;
lc -= i;
while (lc > 0) {
const HufDec pl = hdecod[(c << (HUF_DECBITS - lc)) & HUF_DECMASK];
if (pl.len) {
lc -= pl.len;
if (!getCode(pl.lit, rlc, c, lc, in, ie, out, outb, oe)) {
return false;
}
} else {
return false;
// invalidCode(); // wrong (long) code
}
}
if (out - outb != no) {
return false;
}
// notEnoughData ();
return true;
}
static void countFrequencies(std::vector<long long> &freq,
const unsigned short data[/*n*/], int n) {
for (int i = 0; i < HUF_ENCSIZE; ++i) freq[i] = 0;
for (int i = 0; i < n; ++i) ++freq[data[i]];
}
static void writeUInt(char buf[4], unsigned int i) {
unsigned char *b = (unsigned char *)buf;
b[0] = i;
b[1] = i >> 8;
b[2] = i >> 16;
b[3] = i >> 24;
}
static unsigned int readUInt(const char buf[4]) {
const unsigned char *b = (const unsigned char *)buf;
return (b[0] & 0x000000ff) | ((b[1] << 8) & 0x0000ff00) |
((b[2] << 16) & 0x00ff0000) | ((b[3] << 24) & 0xff000000);
}
//
// EXTERNAL INTERFACE
//
static int hufCompress(const unsigned short raw[], int nRaw,
char compressed[]) {
if (nRaw == 0) return 0;
std::vector<long long> freq(HUF_ENCSIZE);
countFrequencies(freq, raw, nRaw);
int im = 0;
int iM = 0;
hufBuildEncTable(freq.data(), &im, &iM);
char *tableStart = compressed + 20;
char *tableEnd = tableStart;
hufPackEncTable(freq.data(), im, iM, &tableEnd);
int tableLength = tableEnd - tableStart;
char *dataStart = tableEnd;
int nBits = hufEncode(freq.data(), raw, nRaw, iM, dataStart);
int data_length = (nBits + 7) / 8;
writeUInt(compressed, im);
writeUInt(compressed + 4, iM);
writeUInt(compressed + 8, tableLength);
writeUInt(compressed + 12, nBits);
writeUInt(compressed + 16, 0); // room for future extensions
return dataStart + data_length - compressed;
}
static bool hufUncompress(const char compressed[], int nCompressed,
std::vector<unsigned short> *raw) {
if (nCompressed == 0) {
if (raw->size() != 0) return false;
return false;
}
int im = readUInt(compressed);
int iM = readUInt(compressed + 4);
// int tableLength = readUInt (compressed + 8);
int nBits = readUInt(compressed + 12);
if (im < 0 || im >= HUF_ENCSIZE || iM < 0 || iM >= HUF_ENCSIZE) return false;
const char *ptr = compressed + 20;
//
// Fast decoder needs at least 2x64-bits of compressed data, and
// needs to be run-able on this platform. Otherwise, fall back
// to the original decoder
//
// if (FastHufDecoder::enabled() && nBits > 128)
//{
// FastHufDecoder fhd (ptr, nCompressed - (ptr - compressed), im, iM, iM);
// fhd.decode ((unsigned char*)ptr, nBits, raw, nRaw);
//}
// else
{
std::vector<long long> freq(HUF_ENCSIZE);
std::vector<HufDec> hdec(HUF_DECSIZE);
hufClearDecTable(&hdec.at(0));
hufUnpackEncTable(&ptr, nCompressed - (ptr - compressed), im, iM,
&freq.at(0));
{
if (nBits > 8 * (nCompressed - (ptr - compressed))) {
return false;
}
hufBuildDecTable(&freq.at(0), im, iM, &hdec.at(0));
hufDecode(&freq.at(0), &hdec.at(0), ptr, nBits, iM, raw->size(),
raw->data());
}
// catch (...)
//{
// hufFreeDecTable (hdec);
// throw;
//}
hufFreeDecTable(&hdec.at(0));
}
return true;
}
//
// Functions to compress the range of values in the pixel data
//
const int USHORT_RANGE = (1 << 16);
const int BITMAP_SIZE = (USHORT_RANGE >> 3);
static void bitmapFromData(const unsigned short data[/*nData*/], int nData,
unsigned char bitmap[BITMAP_SIZE],
unsigned short &minNonZero,
unsigned short &maxNonZero) {
for (int i = 0; i < BITMAP_SIZE; ++i) bitmap[i] = 0;
for (int i = 0; i < nData; ++i) bitmap[data[i] >> 3] |= (1 << (data[i] & 7));
bitmap[0] &= ~1; // zero is not explicitly stored in
// the bitmap; we assume that the
// data always contain zeroes
minNonZero = BITMAP_SIZE - 1;
maxNonZero = 0;
for (int i = 0; i < BITMAP_SIZE; ++i) {
if (bitmap[i]) {
if (minNonZero > i) minNonZero = i;
if (maxNonZero < i) maxNonZero = i;
}
}
}
static unsigned short forwardLutFromBitmap(
const unsigned char bitmap[BITMAP_SIZE], unsigned short lut[USHORT_RANGE]) {
int k = 0;
for (int i = 0; i < USHORT_RANGE; ++i) {
if ((i == 0) || (bitmap[i >> 3] & (1 << (i & 7))))
lut[i] = k++;
else
lut[i] = 0;
}
return k - 1; // maximum value stored in lut[],
} // i.e. number of ones in bitmap minus 1
static unsigned short reverseLutFromBitmap(
const unsigned char bitmap[BITMAP_SIZE], unsigned short lut[USHORT_RANGE]) {
int k = 0;
for (int i = 0; i < USHORT_RANGE; ++i) {
if ((i == 0) || (bitmap[i >> 3] & (1 << (i & 7)))) lut[k++] = i;
}
int n = k - 1;
while (k < USHORT_RANGE) lut[k++] = 0;
return n; // maximum k where lut[k] is non-zero,
} // i.e. number of ones in bitmap minus 1
static void applyLut(const unsigned short lut[USHORT_RANGE],
unsigned short data[/*nData*/], int nData) {
for (int i = 0; i < nData; ++i) data[i] = lut[data[i]];
}
#ifdef __clang__
#pragma clang diagnostic pop
#endif // __clang__
#ifdef _MSC_VER
#pragma warning(pop)
#endif
static bool CompressPiz(unsigned char *outPtr, unsigned int *outSize,
const unsigned char *inPtr, size_t inSize,
const std::vector<ChannelInfo> &channelInfo,
int data_width, int num_lines) {
std::vector<unsigned char> bitmap(BITMAP_SIZE);
unsigned short minNonZero;
unsigned short maxNonZero;
#if !MINIZ_LITTLE_ENDIAN
// @todo { PIZ compression on BigEndian architecture. }
assert(0);
return false;
#endif
// Assume `inSize` is multiple of 2 or 4.
std::vector<unsigned short> tmpBuffer(inSize / sizeof(unsigned short));
std::vector<PIZChannelData> channelData(channelInfo.size());
unsigned short *tmpBufferEnd = &tmpBuffer.at(0);
for (size_t c = 0; c < channelData.size(); c++) {
PIZChannelData &cd = channelData[c];
cd.start = tmpBufferEnd;
cd.end = cd.start;
cd.nx = data_width;
cd.ny = num_lines;
// cd.ys = c.channel().ySampling;
size_t pixelSize = sizeof(int); // UINT and FLOAT
if (channelInfo[c].requested_pixel_type == TINYEXR_PIXELTYPE_HALF) {
pixelSize = sizeof(short);
}
cd.size = static_cast<int>(pixelSize / sizeof(short));
tmpBufferEnd += cd.nx * cd.ny * cd.size;
}
const unsigned char *ptr = inPtr;
for (int y = 0; y < num_lines; ++y) {
for (size_t i = 0; i < channelData.size(); ++i) {
PIZChannelData &cd = channelData[i];
// if (modp (y, cd.ys) != 0)
// continue;
size_t n = static_cast<size_t>(cd.nx * cd.size);
memcpy(cd.end, ptr, n * sizeof(unsigned short));
ptr += n * sizeof(unsigned short);
cd.end += n;
}
}
bitmapFromData(&tmpBuffer.at(0), static_cast<int>(tmpBuffer.size()),
bitmap.data(), minNonZero, maxNonZero);
std::vector<unsigned short> lut(USHORT_RANGE);
unsigned short maxValue = forwardLutFromBitmap(bitmap.data(), lut.data());
applyLut(lut.data(), &tmpBuffer.at(0), static_cast<int>(tmpBuffer.size()));
//
// Store range compression info in _outBuffer
//
char *buf = reinterpret_cast<char *>(outPtr);
memcpy(buf, &minNonZero, sizeof(unsigned short));
buf += sizeof(unsigned short);
memcpy(buf, &maxNonZero, sizeof(unsigned short));
buf += sizeof(unsigned short);
if (minNonZero <= maxNonZero) {
memcpy(buf, reinterpret_cast<char *>(&bitmap[0] + minNonZero),
maxNonZero - minNonZero + 1);
buf += maxNonZero - minNonZero + 1;
}
//
// Apply wavelet encoding
//
for (size_t i = 0; i < channelData.size(); ++i) {
PIZChannelData &cd = channelData[i];
for (int j = 0; j < cd.size; ++j) {
wav2Encode(cd.start + j, cd.nx, cd.size, cd.ny, cd.nx * cd.size,
maxValue);
}
}
//
// Apply Huffman encoding; append the result to _outBuffer
//
// length header(4byte), then huff data. Initialize length header with zero,
// then later fill it by `length`.
char *lengthPtr = buf;
int zero = 0;
memcpy(buf, &zero, sizeof(int));
buf += sizeof(int);
int length =
hufCompress(&tmpBuffer.at(0), static_cast<int>(tmpBuffer.size()), buf);
memcpy(lengthPtr, &length, sizeof(int));
(*outSize) = static_cast<unsigned int>(
(reinterpret_cast<unsigned char *>(buf) - outPtr) +
static_cast<unsigned int>(length));
// Use uncompressed data when compressed data is larger than uncompressed.
// (Issue 40)
if ((*outSize) >= inSize) {
(*outSize) = static_cast<unsigned int>(inSize);
memcpy(outPtr, inPtr, inSize);
}
return true;
}
static bool DecompressPiz(unsigned char *outPtr, const unsigned char *inPtr,
size_t tmpBufSize, size_t inLen, int num_channels,
const EXRChannelInfo *channels, int data_width,
int num_lines) {
if (inLen == tmpBufSize) {
// Data is not compressed(Issue 40).
memcpy(outPtr, inPtr, inLen);
return true;
}
std::vector<unsigned char> bitmap(BITMAP_SIZE);
unsigned short minNonZero;
unsigned short maxNonZero;
#if !MINIZ_LITTLE_ENDIAN
// @todo { PIZ compression on BigEndian architecture. }
assert(0);
return false;
#endif
memset(bitmap.data(), 0, BITMAP_SIZE);
const unsigned char *ptr = inPtr;
// minNonZero = *(reinterpret_cast<const unsigned short *>(ptr));
tinyexr::cpy2(&minNonZero, reinterpret_cast<const unsigned short *>(ptr));
// maxNonZero = *(reinterpret_cast<const unsigned short *>(ptr + 2));
tinyexr::cpy2(&maxNonZero, reinterpret_cast<const unsigned short *>(ptr + 2));
ptr += 4;
if (maxNonZero >= BITMAP_SIZE) {
return false;
}
if (minNonZero <= maxNonZero) {
memcpy(reinterpret_cast<char *>(&bitmap[0] + minNonZero), ptr,
maxNonZero - minNonZero + 1);
ptr += maxNonZero - minNonZero + 1;
}
std::vector<unsigned short> lut(USHORT_RANGE);
memset(lut.data(), 0, sizeof(unsigned short) * USHORT_RANGE);
unsigned short maxValue = reverseLutFromBitmap(bitmap.data(), lut.data());
//
// Huffman decoding
//
int length;
// length = *(reinterpret_cast<const int *>(ptr));
tinyexr::cpy4(&length, reinterpret_cast<const int *>(ptr));
ptr += sizeof(int);
if (size_t((ptr - inPtr) + length) > inLen) {
return false;
}
std::vector<unsigned short> tmpBuffer(tmpBufSize);
hufUncompress(reinterpret_cast<const char *>(ptr), length, &tmpBuffer);
//
// Wavelet decoding
//
std::vector<PIZChannelData> channelData(static_cast<size_t>(num_channels));
unsigned short *tmpBufferEnd = &tmpBuffer.at(0);
for (size_t i = 0; i < static_cast<size_t>(num_channels); ++i) {
const EXRChannelInfo &chan = channels[i];
size_t pixelSize = sizeof(int); // UINT and FLOAT
if (chan.pixel_type == TINYEXR_PIXELTYPE_HALF) {
pixelSize = sizeof(short);
}
channelData[i].start = tmpBufferEnd;
channelData[i].end = channelData[i].start;
channelData[i].nx = data_width;
channelData[i].ny = num_lines;
// channelData[i].ys = 1;
channelData[i].size = static_cast<int>(pixelSize / sizeof(short));
tmpBufferEnd += channelData[i].nx * channelData[i].ny * channelData[i].size;
}
for (size_t i = 0; i < channelData.size(); ++i) {
PIZChannelData &cd = channelData[i];
for (int j = 0; j < cd.size; ++j) {
wav2Decode(cd.start + j, cd.nx, cd.size, cd.ny, cd.nx * cd.size,
maxValue);
}
}
//
// Expand the pixel data to their original range
//
applyLut(lut.data(), &tmpBuffer.at(0), static_cast<int>(tmpBufSize));
for (int y = 0; y < num_lines; y++) {
for (size_t i = 0; i < channelData.size(); ++i) {
PIZChannelData &cd = channelData[i];
// if (modp (y, cd.ys) != 0)
// continue;
size_t n = static_cast<size_t>(cd.nx * cd.size);
memcpy(outPtr, cd.end, static_cast<size_t>(n * sizeof(unsigned short)));
outPtr += n * sizeof(unsigned short);
cd.end += n;
}
}
return true;
}
#endif // TINYEXR_USE_PIZ
#if TINYEXR_USE_ZFP
struct ZFPCompressionParam {
double rate;
unsigned int precision;
unsigned int __pad0;
double tolerance;
int type; // TINYEXR_ZFP_COMPRESSIONTYPE_*
unsigned int __pad1;
ZFPCompressionParam() {
type = TINYEXR_ZFP_COMPRESSIONTYPE_RATE;
rate = 2.0;
precision = 0;
tolerance = 0.0;
}
};
static bool FindZFPCompressionParam(ZFPCompressionParam *param,
const EXRAttribute *attributes,
int num_attributes, std::string *err) {
bool foundType = false;
for (int i = 0; i < num_attributes; i++) {
if ((strcmp(attributes[i].name, "zfpCompressionType") == 0)) {
if (attributes[i].size == 1) {
param->type = static_cast<int>(attributes[i].value[0]);
foundType = true;
break;
} else {
if (err) {
(*err) +=
"zfpCompressionType attribute must be uchar(1 byte) type.\n";
}
return false;
}
}
}
if (!foundType) {
if (err) {
(*err) += "`zfpCompressionType` attribute not found.\n";
}
return false;
}
if (param->type == TINYEXR_ZFP_COMPRESSIONTYPE_RATE) {
for (int i = 0; i < num_attributes; i++) {
if ((strcmp(attributes[i].name, "zfpCompressionRate") == 0) &&
(attributes[i].size == 8)) {
param->rate = *(reinterpret_cast<double *>(attributes[i].value));
return true;
}
}
if (err) {
(*err) += "`zfpCompressionRate` attribute not found.\n";
}
} else if (param->type == TINYEXR_ZFP_COMPRESSIONTYPE_PRECISION) {
for (int i = 0; i < num_attributes; i++) {
if ((strcmp(attributes[i].name, "zfpCompressionPrecision") == 0) &&
(attributes[i].size == 4)) {
param->rate = *(reinterpret_cast<int *>(attributes[i].value));
return true;
}
}
if (err) {
(*err) += "`zfpCompressionPrecision` attribute not found.\n";
}
} else if (param->type == TINYEXR_ZFP_COMPRESSIONTYPE_ACCURACY) {
for (int i = 0; i < num_attributes; i++) {
if ((strcmp(attributes[i].name, "zfpCompressionTolerance") == 0) &&
(attributes[i].size == 8)) {
param->tolerance = *(reinterpret_cast<double *>(attributes[i].value));
return true;
}
}
if (err) {
(*err) += "`zfpCompressionTolerance` attribute not found.\n";
}
} else {
if (err) {
(*err) += "Unknown value specified for `zfpCompressionType`.\n";
}
}
return false;
}
// Assume pixel format is FLOAT for all channels.
static bool DecompressZfp(float *dst, int dst_width, int dst_num_lines,
size_t num_channels, const unsigned char *src,
unsigned long src_size,
const ZFPCompressionParam ¶m) {
size_t uncompressed_size =
size_t(dst_width) * size_t(dst_num_lines) * num_channels;
if (uncompressed_size == src_size) {
// Data is not compressed(Issue 40).
memcpy(dst, src, src_size);
}
zfp_stream *zfp = NULL;
zfp_field *field = NULL;
assert((dst_width % 4) == 0);
assert((dst_num_lines % 4) == 0);
if ((size_t(dst_width) & 3U) || (size_t(dst_num_lines) & 3U)) {
return false;
}
field =
zfp_field_2d(reinterpret_cast<void *>(const_cast<unsigned char *>(src)),
zfp_type_float, static_cast<unsigned int>(dst_width),
static_cast<unsigned int>(dst_num_lines) *
static_cast<unsigned int>(num_channels));
zfp = zfp_stream_open(NULL);
if (param.type == TINYEXR_ZFP_COMPRESSIONTYPE_RATE) {
zfp_stream_set_rate(zfp, param.rate, zfp_type_float, /* dimension */ 2,
/* write random access */ 0);
} else if (param.type == TINYEXR_ZFP_COMPRESSIONTYPE_PRECISION) {
zfp_stream_set_precision(zfp, param.precision);
} else if (param.type == TINYEXR_ZFP_COMPRESSIONTYPE_ACCURACY) {
zfp_stream_set_accuracy(zfp, param.tolerance);
} else {
assert(0);
}
size_t buf_size = zfp_stream_maximum_size(zfp, field);
std::vector<unsigned char> buf(buf_size);
memcpy(&buf.at(0), src, src_size);
bitstream *stream = stream_open(&buf.at(0), buf_size);
zfp_stream_set_bit_stream(zfp, stream);
zfp_stream_rewind(zfp);
size_t image_size = size_t(dst_width) * size_t(dst_num_lines);
for (size_t c = 0; c < size_t(num_channels); c++) {
// decompress 4x4 pixel block.
for (size_t y = 0; y < size_t(dst_num_lines); y += 4) {
for (size_t x = 0; x < size_t(dst_width); x += 4) {
float fblock[16];
zfp_decode_block_float_2(zfp, fblock);
for (size_t j = 0; j < 4; j++) {
for (size_t i = 0; i < 4; i++) {
dst[c * image_size + ((y + j) * size_t(dst_width) + (x + i))] =
fblock[j * 4 + i];
}
}
}
}
}
zfp_field_free(field);
zfp_stream_close(zfp);
stream_close(stream);
return true;
}
// Assume pixel format is FLOAT for all channels.
static bool CompressZfp(std::vector<unsigned char> *outBuf,
unsigned int *outSize, const float *inPtr, int width,
int num_lines, int num_channels,
const ZFPCompressionParam ¶m) {
zfp_stream *zfp = NULL;
zfp_field *field = NULL;
assert((width % 4) == 0);
assert((num_lines % 4) == 0);
if ((size_t(width) & 3U) || (size_t(num_lines) & 3U)) {
return false;
}
// create input array.
field = zfp_field_2d(reinterpret_cast<void *>(const_cast<float *>(inPtr)),
zfp_type_float, static_cast<unsigned int>(width),
static_cast<unsigned int>(num_lines * num_channels));
zfp = zfp_stream_open(NULL);
if (param.type == TINYEXR_ZFP_COMPRESSIONTYPE_RATE) {
zfp_stream_set_rate(zfp, param.rate, zfp_type_float, 2, 0);
} else if (param.type == TINYEXR_ZFP_COMPRESSIONTYPE_PRECISION) {
zfp_stream_set_precision(zfp, param.precision);
} else if (param.type == TINYEXR_ZFP_COMPRESSIONTYPE_ACCURACY) {
zfp_stream_set_accuracy(zfp, param.tolerance);
} else {
assert(0);
}
size_t buf_size = zfp_stream_maximum_size(zfp, field);
outBuf->resize(buf_size);
bitstream *stream = stream_open(&outBuf->at(0), buf_size);
zfp_stream_set_bit_stream(zfp, stream);
zfp_field_free(field);
size_t image_size = size_t(width) * size_t(num_lines);
for (size_t c = 0; c < size_t(num_channels); c++) {
// compress 4x4 pixel block.
for (size_t y = 0; y < size_t(num_lines); y += 4) {
for (size_t x = 0; x < size_t(width); x += 4) {
float fblock[16];
for (size_t j = 0; j < 4; j++) {
for (size_t i = 0; i < 4; i++) {
fblock[j * 4 + i] =
inPtr[c * image_size + ((y + j) * size_t(width) + (x + i))];
}
}
zfp_encode_block_float_2(zfp, fblock);
}
}
}
zfp_stream_flush(zfp);
(*outSize) = static_cast<unsigned int>(zfp_stream_compressed_size(zfp));
zfp_stream_close(zfp);
return true;
}
#endif
//
// -----------------------------------------------------------------
//
// heuristics
#define TINYEXR_DIMENSION_THRESHOLD (1024 * 8192)
// TODO(syoyo): Refactor function arguments.
static bool DecodePixelData(/* out */ unsigned char **out_images,
const int *requested_pixel_types,
const unsigned char *data_ptr, size_t data_len,
int compression_type, int line_order, int width,
int height, int x_stride, int y, int line_no,
int num_lines, size_t pixel_data_size,
size_t num_attributes,
const EXRAttribute *attributes, size_t num_channels,
const EXRChannelInfo *channels,
const std::vector<size_t> &channel_offset_list) {
if (compression_type == TINYEXR_COMPRESSIONTYPE_PIZ) { // PIZ
#if TINYEXR_USE_PIZ
if ((width == 0) || (num_lines == 0) || (pixel_data_size == 0)) {
// Invalid input #90
return false;
}
// Allocate original data size.
std::vector<unsigned char> outBuf(static_cast<size_t>(
static_cast<size_t>(width * num_lines) * pixel_data_size));
size_t tmpBufLen = outBuf.size();
bool ret = tinyexr::DecompressPiz(
reinterpret_cast<unsigned char *>(&outBuf.at(0)), data_ptr, tmpBufLen,
data_len, static_cast<int>(num_channels), channels, width, num_lines);
if (!ret) {
return false;
}
// For PIZ_COMPRESSION:
// pixel sample data for channel 0 for scanline 0
// pixel sample data for channel 1 for scanline 0
// pixel sample data for channel ... for scanline 0
// pixel sample data for channel n for scanline 0
// pixel sample data for channel 0 for scanline 1
// pixel sample data for channel 1 for scanline 1
// pixel sample data for channel ... for scanline 1
// pixel sample data for channel n for scanline 1
// ...
for (size_t c = 0; c < static_cast<size_t>(num_channels); c++) {
if (channels[c].pixel_type == TINYEXR_PIXELTYPE_HALF) {
for (size_t v = 0; v < static_cast<size_t>(num_lines); v++) {
const unsigned short *line_ptr = reinterpret_cast<unsigned short *>(
&outBuf.at(v * pixel_data_size * static_cast<size_t>(width) +
channel_offset_list[c] * static_cast<size_t>(width)));
for (size_t u = 0; u < static_cast<size_t>(width); u++) {
FP16 hf;
// hf.u = line_ptr[u];
// use `cpy` to avoid unaligned memory access when compiler's
// optimization is on.
tinyexr::cpy2(&(hf.u), line_ptr + u);
tinyexr::swap2(reinterpret_cast<unsigned short *>(&hf.u));
if (requested_pixel_types[c] == TINYEXR_PIXELTYPE_HALF) {
unsigned short *image =
reinterpret_cast<unsigned short **>(out_images)[c];
if (line_order == 0) {
image += (static_cast<size_t>(line_no) + v) *
static_cast<size_t>(x_stride) +
u;
} else {
image += static_cast<size_t>(
(height - 1 - (line_no + static_cast<int>(v)))) *
static_cast<size_t>(x_stride) +
u;
}
*image = hf.u;
} else { // HALF -> FLOAT
FP32 f32 = half_to_float(hf);
float *image = reinterpret_cast<float **>(out_images)[c];
size_t offset = 0;
if (line_order == 0) {
offset = (static_cast<size_t>(line_no) + v) *
static_cast<size_t>(x_stride) +
u;
} else {
offset = static_cast<size_t>(
(height - 1 - (line_no + static_cast<int>(v)))) *
static_cast<size_t>(x_stride) +
u;
}
image += offset;
*image = f32.f;
}
}
}
} else if (channels[c].pixel_type == TINYEXR_PIXELTYPE_UINT) {
assert(requested_pixel_types[c] == TINYEXR_PIXELTYPE_UINT);
for (size_t v = 0; v < static_cast<size_t>(num_lines); v++) {
const unsigned int *line_ptr = reinterpret_cast<unsigned int *>(
&outBuf.at(v * pixel_data_size * static_cast<size_t>(width) +
channel_offset_list[c] * static_cast<size_t>(width)));
for (size_t u = 0; u < static_cast<size_t>(width); u++) {
unsigned int val;
// val = line_ptr[u];
tinyexr::cpy4(&val, line_ptr + u);
tinyexr::swap4(&val);
unsigned int *image =
reinterpret_cast<unsigned int **>(out_images)[c];
if (line_order == 0) {
image += (static_cast<size_t>(line_no) + v) *
static_cast<size_t>(x_stride) +
u;
} else {
image += static_cast<size_t>(
(height - 1 - (line_no + static_cast<int>(v)))) *
static_cast<size_t>(x_stride) +
u;
}
*image = val;
}
}
} else if (channels[c].pixel_type == TINYEXR_PIXELTYPE_FLOAT) {
assert(requested_pixel_types[c] == TINYEXR_PIXELTYPE_FLOAT);
for (size_t v = 0; v < static_cast<size_t>(num_lines); v++) {
const float *line_ptr = reinterpret_cast<float *>(&outBuf.at(
v * pixel_data_size * static_cast<size_t>(x_stride) +
channel_offset_list[c] * static_cast<size_t>(x_stride)));
for (size_t u = 0; u < static_cast<size_t>(width); u++) {
float val;
// val = line_ptr[u];
tinyexr::cpy4(&val, line_ptr + u);
tinyexr::swap4(reinterpret_cast<unsigned int *>(&val));
float *image = reinterpret_cast<float **>(out_images)[c];
if (line_order == 0) {
image += (static_cast<size_t>(line_no) + v) *
static_cast<size_t>(x_stride) +
u;
} else {
image += static_cast<size_t>(
(height - 1 - (line_no + static_cast<int>(v)))) *
static_cast<size_t>(x_stride) +
u;
}
*image = val;
}
}
} else {
assert(0);
}
}
#else
assert(0 && "PIZ is enabled in this build");
return false;
#endif
} else if (compression_type == TINYEXR_COMPRESSIONTYPE_ZIPS ||
compression_type == TINYEXR_COMPRESSIONTYPE_ZIP) {
// Allocate original data size.
std::vector<unsigned char> outBuf(static_cast<size_t>(width) *
static_cast<size_t>(num_lines) *
pixel_data_size);
unsigned long dstLen = static_cast<unsigned long>(outBuf.size());
assert(dstLen > 0);
if (!tinyexr::DecompressZip(
reinterpret_cast<unsigned char *>(&outBuf.at(0)), &dstLen, data_ptr,
static_cast<unsigned long>(data_len))) {
return false;
}
// For ZIP_COMPRESSION:
// pixel sample data for channel 0 for scanline 0
// pixel sample data for channel 1 for scanline 0
// pixel sample data for channel ... for scanline 0
// pixel sample data for channel n for scanline 0
// pixel sample data for channel 0 for scanline 1
// pixel sample data for channel 1 for scanline 1
// pixel sample data for channel ... for scanline 1
// pixel sample data for channel n for scanline 1
// ...
for (size_t c = 0; c < static_cast<size_t>(num_channels); c++) {
if (channels[c].pixel_type == TINYEXR_PIXELTYPE_HALF) {
for (size_t v = 0; v < static_cast<size_t>(num_lines); v++) {
const unsigned short *line_ptr = reinterpret_cast<unsigned short *>(
&outBuf.at(v * static_cast<size_t>(pixel_data_size) *
static_cast<size_t>(width) +
channel_offset_list[c] * static_cast<size_t>(width)));
for (size_t u = 0; u < static_cast<size_t>(width); u++) {
tinyexr::FP16 hf;
// hf.u = line_ptr[u];
tinyexr::cpy2(&(hf.u), line_ptr + u);
tinyexr::swap2(reinterpret_cast<unsigned short *>(&hf.u));
if (requested_pixel_types[c] == TINYEXR_PIXELTYPE_HALF) {
unsigned short *image =
reinterpret_cast<unsigned short **>(out_images)[c];
if (line_order == 0) {
image += (static_cast<size_t>(line_no) + v) *
static_cast<size_t>(x_stride) +
u;
} else {
image += (static_cast<size_t>(height) - 1U -
(static_cast<size_t>(line_no) + v)) *
static_cast<size_t>(x_stride) +
u;
}
*image = hf.u;
} else { // HALF -> FLOAT
tinyexr::FP32 f32 = half_to_float(hf);
float *image = reinterpret_cast<float **>(out_images)[c];
size_t offset = 0;
if (line_order == 0) {
offset = (static_cast<size_t>(line_no) + v) *
static_cast<size_t>(x_stride) +
u;
} else {
offset = (static_cast<size_t>(height) - 1U -
(static_cast<size_t>(line_no) + v)) *
static_cast<size_t>(x_stride) +
u;
}
image += offset;
*image = f32.f;
}
}
}
} else if (channels[c].pixel_type == TINYEXR_PIXELTYPE_UINT) {
assert(requested_pixel_types[c] == TINYEXR_PIXELTYPE_UINT);
for (size_t v = 0; v < static_cast<size_t>(num_lines); v++) {
const unsigned int *line_ptr = reinterpret_cast<unsigned int *>(
&outBuf.at(v * pixel_data_size * static_cast<size_t>(width) +
channel_offset_list[c] * static_cast<size_t>(width)));
for (size_t u = 0; u < static_cast<size_t>(width); u++) {
unsigned int val;
// val = line_ptr[u];
tinyexr::cpy4(&val, line_ptr + u);
tinyexr::swap4(&val);
unsigned int *image =
reinterpret_cast<unsigned int **>(out_images)[c];
if (line_order == 0) {
image += (static_cast<size_t>(line_no) + v) *
static_cast<size_t>(x_stride) +
u;
} else {
image += (static_cast<size_t>(height) - 1U -
(static_cast<size_t>(line_no) + v)) *
static_cast<size_t>(x_stride) +
u;
}
*image = val;
}
}
} else if (channels[c].pixel_type == TINYEXR_PIXELTYPE_FLOAT) {
assert(requested_pixel_types[c] == TINYEXR_PIXELTYPE_FLOAT);
for (size_t v = 0; v < static_cast<size_t>(num_lines); v++) {
const float *line_ptr = reinterpret_cast<float *>(
&outBuf.at(v * pixel_data_size * static_cast<size_t>(width) +
channel_offset_list[c] * static_cast<size_t>(width)));
for (size_t u = 0; u < static_cast<size_t>(width); u++) {
float val;
// val = line_ptr[u];
tinyexr::cpy4(&val, line_ptr + u);
tinyexr::swap4(reinterpret_cast<unsigned int *>(&val));
float *image = reinterpret_cast<float **>(out_images)[c];
if (line_order == 0) {
image += (static_cast<size_t>(line_no) + v) *
static_cast<size_t>(x_stride) +
u;
} else {
image += (static_cast<size_t>(height) - 1U -
(static_cast<size_t>(line_no) + v)) *
static_cast<size_t>(x_stride) +
u;
}
*image = val;
}
}
} else {
assert(0);
return false;
}
}
} else if (compression_type == TINYEXR_COMPRESSIONTYPE_RLE) {
// Allocate original data size.
std::vector<unsigned char> outBuf(static_cast<size_t>(width) *
static_cast<size_t>(num_lines) *
pixel_data_size);
unsigned long dstLen = static_cast<unsigned long>(outBuf.size());
if (dstLen == 0) {
return false;
}
if (!tinyexr::DecompressRle(
reinterpret_cast<unsigned char *>(&outBuf.at(0)), dstLen, data_ptr,
static_cast<unsigned long>(data_len))) {
return false;
}
// For RLE_COMPRESSION:
// pixel sample data for channel 0 for scanline 0
// pixel sample data for channel 1 for scanline 0
// pixel sample data for channel ... for scanline 0
// pixel sample data for channel n for scanline 0
// pixel sample data for channel 0 for scanline 1
// pixel sample data for channel 1 for scanline 1
// pixel sample data for channel ... for scanline 1
// pixel sample data for channel n for scanline 1
// ...
for (size_t c = 0; c < static_cast<size_t>(num_channels); c++) {
if (channels[c].pixel_type == TINYEXR_PIXELTYPE_HALF) {
for (size_t v = 0; v < static_cast<size_t>(num_lines); v++) {
const unsigned short *line_ptr = reinterpret_cast<unsigned short *>(
&outBuf.at(v * static_cast<size_t>(pixel_data_size) *
static_cast<size_t>(width) +
channel_offset_list[c] * static_cast<size_t>(width)));
for (size_t u = 0; u < static_cast<size_t>(width); u++) {
tinyexr::FP16 hf;
// hf.u = line_ptr[u];
tinyexr::cpy2(&(hf.u), line_ptr + u);
tinyexr::swap2(reinterpret_cast<unsigned short *>(&hf.u));
if (requested_pixel_types[c] == TINYEXR_PIXELTYPE_HALF) {
unsigned short *image =
reinterpret_cast<unsigned short **>(out_images)[c];
if (line_order == 0) {
image += (static_cast<size_t>(line_no) + v) *
static_cast<size_t>(x_stride) +
u;
} else {
image += (static_cast<size_t>(height) - 1U -
(static_cast<size_t>(line_no) + v)) *
static_cast<size_t>(x_stride) +
u;
}
*image = hf.u;
} else { // HALF -> FLOAT
tinyexr::FP32 f32 = half_to_float(hf);
float *image = reinterpret_cast<float **>(out_images)[c];
if (line_order == 0) {
image += (static_cast<size_t>(line_no) + v) *
static_cast<size_t>(x_stride) +
u;
} else {
image += (static_cast<size_t>(height) - 1U -
(static_cast<size_t>(line_no) + v)) *
static_cast<size_t>(x_stride) +
u;
}
*image = f32.f;
}
}
}
} else if (channels[c].pixel_type == TINYEXR_PIXELTYPE_UINT) {
assert(requested_pixel_types[c] == TINYEXR_PIXELTYPE_UINT);
for (size_t v = 0; v < static_cast<size_t>(num_lines); v++) {
const unsigned int *line_ptr = reinterpret_cast<unsigned int *>(
&outBuf.at(v * pixel_data_size * static_cast<size_t>(width) +
channel_offset_list[c] * static_cast<size_t>(width)));
for (size_t u = 0; u < static_cast<size_t>(width); u++) {
unsigned int val;
// val = line_ptr[u];
tinyexr::cpy4(&val, line_ptr + u);
tinyexr::swap4(&val);
unsigned int *image =
reinterpret_cast<unsigned int **>(out_images)[c];
if (line_order == 0) {
image += (static_cast<size_t>(line_no) + v) *
static_cast<size_t>(x_stride) +
u;
} else {
image += (static_cast<size_t>(height) - 1U -
(static_cast<size_t>(line_no) + v)) *
static_cast<size_t>(x_stride) +
u;
}
*image = val;
}
}
} else if (channels[c].pixel_type == TINYEXR_PIXELTYPE_FLOAT) {
assert(requested_pixel_types[c] == TINYEXR_PIXELTYPE_FLOAT);
for (size_t v = 0; v < static_cast<size_t>(num_lines); v++) {
const float *line_ptr = reinterpret_cast<float *>(
&outBuf.at(v * pixel_data_size * static_cast<size_t>(width) +
channel_offset_list[c] * static_cast<size_t>(width)));
for (size_t u = 0; u < static_cast<size_t>(width); u++) {
float val;
// val = line_ptr[u];
tinyexr::cpy4(&val, line_ptr + u);
tinyexr::swap4(reinterpret_cast<unsigned int *>(&val));
float *image = reinterpret_cast<float **>(out_images)[c];
if (line_order == 0) {
image += (static_cast<size_t>(line_no) + v) *
static_cast<size_t>(x_stride) +
u;
} else {
image += (static_cast<size_t>(height) - 1U -
(static_cast<size_t>(line_no) + v)) *
static_cast<size_t>(x_stride) +
u;
}
*image = val;
}
}
} else {
assert(0);
return false;
}
}
} else if (compression_type == TINYEXR_COMPRESSIONTYPE_ZFP) {
#if TINYEXR_USE_ZFP
tinyexr::ZFPCompressionParam zfp_compression_param;
std::string e;
if (!tinyexr::FindZFPCompressionParam(&zfp_compression_param, attributes,
int(num_attributes), &e)) {
// This code path should not be reachable.
assert(0);
return false;
}
// Allocate original data size.
std::vector<unsigned char> outBuf(static_cast<size_t>(width) *
static_cast<size_t>(num_lines) *
pixel_data_size);
unsigned long dstLen = outBuf.size();
assert(dstLen > 0);
tinyexr::DecompressZfp(reinterpret_cast<float *>(&outBuf.at(0)), width,
num_lines, num_channels, data_ptr,
static_cast<unsigned long>(data_len),
zfp_compression_param);
// For ZFP_COMPRESSION:
// pixel sample data for channel 0 for scanline 0
// pixel sample data for channel 1 for scanline 0
// pixel sample data for channel ... for scanline 0
// pixel sample data for channel n for scanline 0
// pixel sample data for channel 0 for scanline 1
// pixel sample data for channel 1 for scanline 1
// pixel sample data for channel ... for scanline 1
// pixel sample data for channel n for scanline 1
// ...
for (size_t c = 0; c < static_cast<size_t>(num_channels); c++) {
assert(channels[c].pixel_type == TINYEXR_PIXELTYPE_FLOAT);
if (channels[c].pixel_type == TINYEXR_PIXELTYPE_FLOAT) {
assert(requested_pixel_types[c] == TINYEXR_PIXELTYPE_FLOAT);
for (size_t v = 0; v < static_cast<size_t>(num_lines); v++) {
const float *line_ptr = reinterpret_cast<float *>(
&outBuf.at(v * pixel_data_size * static_cast<size_t>(width) +
channel_offset_list[c] * static_cast<size_t>(width)));
for (size_t u = 0; u < static_cast<size_t>(width); u++) {
float val;
tinyexr::cpy4(&val, line_ptr + u);
tinyexr::swap4(reinterpret_cast<unsigned int *>(&val));
float *image = reinterpret_cast<float **>(out_images)[c];
if (line_order == 0) {
image += (static_cast<size_t>(line_no) + v) *
static_cast<size_t>(x_stride) +
u;
} else {
image += (static_cast<size_t>(height) - 1U -
(static_cast<size_t>(line_no) + v)) *
static_cast<size_t>(x_stride) +
u;
}
*image = val;
}
}
} else {
assert(0);
return false;
}
}
#else
(void)attributes;
(void)num_attributes;
(void)num_channels;
assert(0);
return false;
#endif
} else if (compression_type == TINYEXR_COMPRESSIONTYPE_NONE) {
for (size_t c = 0; c < num_channels; c++) {
for (size_t v = 0; v < static_cast<size_t>(num_lines); v++) {
if (channels[c].pixel_type == TINYEXR_PIXELTYPE_HALF) {
const unsigned short *line_ptr =
reinterpret_cast<const unsigned short *>(
data_ptr + v * pixel_data_size * size_t(width) +
channel_offset_list[c] * static_cast<size_t>(width));
if (requested_pixel_types[c] == TINYEXR_PIXELTYPE_HALF) {
unsigned short *outLine =
reinterpret_cast<unsigned short *>(out_images[c]);
if (line_order == 0) {
outLine += (size_t(y) + v) * size_t(x_stride);
} else {
outLine +=
(size_t(height) - 1 - (size_t(y) + v)) * size_t(x_stride);
}
for (int u = 0; u < width; u++) {
tinyexr::FP16 hf;
// hf.u = line_ptr[u];
tinyexr::cpy2(&(hf.u), line_ptr + u);
tinyexr::swap2(reinterpret_cast<unsigned short *>(&hf.u));
outLine[u] = hf.u;
}
} else if (requested_pixel_types[c] == TINYEXR_PIXELTYPE_FLOAT) {
float *outLine = reinterpret_cast<float *>(out_images[c]);
if (line_order == 0) {
outLine += (size_t(y) + v) * size_t(x_stride);
} else {
outLine +=
(size_t(height) - 1 - (size_t(y) + v)) * size_t(x_stride);
}
if (reinterpret_cast<const unsigned char *>(line_ptr + width) >
(data_ptr + data_len)) {
// Insufficient data size
return false;
}
for (int u = 0; u < width; u++) {
tinyexr::FP16 hf;
// address may not be aliged. use byte-wise copy for safety.#76
// hf.u = line_ptr[u];
tinyexr::cpy2(&(hf.u), line_ptr + u);
tinyexr::swap2(reinterpret_cast<unsigned short *>(&hf.u));
tinyexr::FP32 f32 = half_to_float(hf);
outLine[u] = f32.f;
}
} else {
assert(0);
return false;
}
} else if (channels[c].pixel_type == TINYEXR_PIXELTYPE_FLOAT) {
const float *line_ptr = reinterpret_cast<const float *>(
data_ptr + v * pixel_data_size * size_t(width) +
channel_offset_list[c] * static_cast<size_t>(width));
float *outLine = reinterpret_cast<float *>(out_images[c]);
if (line_order == 0) {
outLine += (size_t(y) + v) * size_t(x_stride);
} else {
outLine +=
(size_t(height) - 1 - (size_t(y) + v)) * size_t(x_stride);
}
if (reinterpret_cast<const unsigned char *>(line_ptr + width) >
(data_ptr + data_len)) {
// Insufficient data size
return false;
}
for (int u = 0; u < width; u++) {
float val;
tinyexr::cpy4(&val, line_ptr + u);
tinyexr::swap4(reinterpret_cast<unsigned int *>(&val));
outLine[u] = val;
}
} else if (channels[c].pixel_type == TINYEXR_PIXELTYPE_UINT) {
const unsigned int *line_ptr = reinterpret_cast<const unsigned int *>(
data_ptr + v * pixel_data_size * size_t(width) +
channel_offset_list[c] * static_cast<size_t>(width));
unsigned int *outLine =
reinterpret_cast<unsigned int *>(out_images[c]);
if (line_order == 0) {
outLine += (size_t(y) + v) * size_t(x_stride);
} else {
outLine +=
(size_t(height) - 1 - (size_t(y) + v)) * size_t(x_stride);
}
for (int u = 0; u < width; u++) {
if (reinterpret_cast<const unsigned char *>(line_ptr + u) >=
(data_ptr + data_len)) {
// Corrupsed data?
return false;
}
unsigned int val;
tinyexr::cpy4(&val, line_ptr + u);
tinyexr::swap4(reinterpret_cast<unsigned int *>(&val));
outLine[u] = val;
}
}
}
}
}
return true;
}
static bool DecodeTiledPixelData(
unsigned char **out_images, int *width, int *height,
const int *requested_pixel_types, const unsigned char *data_ptr,
size_t data_len, int compression_type, int line_order, int data_width,
int data_height, int tile_offset_x, int tile_offset_y, int tile_size_x,
int tile_size_y, size_t pixel_data_size, size_t num_attributes,
const EXRAttribute *attributes, size_t num_channels,
const EXRChannelInfo *channels,
const std::vector<size_t> &channel_offset_list) {
// Here, data_width and data_height are the dimensions of the current (sub)level.
if (tile_size_x * tile_offset_x > data_width ||
tile_size_y * tile_offset_y > data_height) {
return false;
}
// Compute actual image size in a tile.
if ((tile_offset_x + 1) * tile_size_x >= data_width) {
(*width) = data_width - (tile_offset_x * tile_size_x);
} else {
(*width) = tile_size_x;
}
if ((tile_offset_y + 1) * tile_size_y >= data_height) {
(*height) = data_height - (tile_offset_y * tile_size_y);
} else {
(*height) = tile_size_y;
}
// Image size = tile size.
return DecodePixelData(out_images, requested_pixel_types, data_ptr, data_len,
compression_type, line_order, (*width), tile_size_y,
/* stride */ tile_size_x, /* y */ 0, /* line_no */ 0,
(*height), pixel_data_size, num_attributes, attributes,
num_channels, channels, channel_offset_list);
}
static bool ComputeChannelLayout(std::vector<size_t> *channel_offset_list,
int *pixel_data_size, size_t *channel_offset,
int num_channels,
const EXRChannelInfo *channels) {
channel_offset_list->resize(static_cast<size_t>(num_channels));
(*pixel_data_size) = 0;
(*channel_offset) = 0;
for (size_t c = 0; c < static_cast<size_t>(num_channels); c++) {
(*channel_offset_list)[c] = (*channel_offset);
if (channels[c].pixel_type == TINYEXR_PIXELTYPE_HALF) {
(*pixel_data_size) += sizeof(unsigned short);
(*channel_offset) += sizeof(unsigned short);
} else if (channels[c].pixel_type == TINYEXR_PIXELTYPE_FLOAT) {
(*pixel_data_size) += sizeof(float);
(*channel_offset) += sizeof(float);
} else if (channels[c].pixel_type == TINYEXR_PIXELTYPE_UINT) {
(*pixel_data_size) += sizeof(unsigned int);
(*channel_offset) += sizeof(unsigned int);
} else {
// ???
return false;
}
}
return true;
}
static unsigned char **AllocateImage(int num_channels,
const EXRChannelInfo *channels,
const int *requested_pixel_types,
int data_width, int data_height) {
unsigned char **images =
reinterpret_cast<unsigned char **>(static_cast<float **>(
malloc(sizeof(float *) * static_cast<size_t>(num_channels))));
for (size_t c = 0; c < static_cast<size_t>(num_channels); c++) {
size_t data_len =
static_cast<size_t>(data_width) * static_cast<size_t>(data_height);
if (channels[c].pixel_type == TINYEXR_PIXELTYPE_HALF) {
// pixel_data_size += sizeof(unsigned short);
// channel_offset += sizeof(unsigned short);
// Alloc internal image for half type.
if (requested_pixel_types[c] == TINYEXR_PIXELTYPE_HALF) {
images[c] =
reinterpret_cast<unsigned char *>(static_cast<unsigned short *>(
malloc(sizeof(unsigned short) * data_len)));
} else if (requested_pixel_types[c] == TINYEXR_PIXELTYPE_FLOAT) {
images[c] = reinterpret_cast<unsigned char *>(
static_cast<float *>(malloc(sizeof(float) * data_len)));
} else {
assert(0);
}
} else if (channels[c].pixel_type == TINYEXR_PIXELTYPE_FLOAT) {
// pixel_data_size += sizeof(float);
// channel_offset += sizeof(float);
images[c] = reinterpret_cast<unsigned char *>(
static_cast<float *>(malloc(sizeof(float) * data_len)));
} else if (channels[c].pixel_type == TINYEXR_PIXELTYPE_UINT) {
// pixel_data_size += sizeof(unsigned int);
// channel_offset += sizeof(unsigned int);
images[c] = reinterpret_cast<unsigned char *>(
static_cast<unsigned int *>(malloc(sizeof(unsigned int) * data_len)));
} else {
assert(0);
}
}
return images;
}
#ifdef _WIN32
static inline std::wstring UTF8ToWchar(const std::string &str) {
int wstr_size =
MultiByteToWideChar(CP_UTF8, 0, str.data(), (int)str.size(), NULL, 0);
std::wstring wstr(wstr_size, 0);
MultiByteToWideChar(CP_UTF8, 0, str.data(), (int)str.size(), &wstr[0],
(int)wstr.size());
return wstr;
}
#endif
static int ParseEXRHeader(HeaderInfo *info, bool *empty_header,
const EXRVersion *version, std::string *err,
const unsigned char *buf, size_t size) {
const char *marker = reinterpret_cast<const char *>(&buf[0]);
if (empty_header) {
(*empty_header) = false;
}
if (version->multipart) {
if (size > 0 && marker[0] == '\0') {
// End of header list.
if (empty_header) {
(*empty_header) = true;
}
return TINYEXR_SUCCESS;
}
}
// According to the spec, the header of every OpenEXR file must contain at
// least the following attributes:
//
// channels chlist
// compression compression
// dataWindow box2i
// displayWindow box2i
// lineOrder lineOrder
// pixelAspectRatio float
// screenWindowCenter v2f
// screenWindowWidth float
bool has_channels = false;
bool has_compression = false;
bool has_data_window = false;
bool has_display_window = false;
bool has_line_order = false;
bool has_pixel_aspect_ratio = false;
bool has_screen_window_center = false;
bool has_screen_window_width = false;
bool has_name = false;
bool has_type = false;
info->name.clear();
info->type.clear();
info->data_window.min_x = 0;
info->data_window.min_y = 0;
info->data_window.max_x = 0;
info->data_window.max_y = 0;
info->line_order = 0; // @fixme
info->display_window.min_x = 0;
info->display_window.min_y = 0;
info->display_window.max_x = 0;
info->display_window.max_y = 0;
info->screen_window_center[0] = 0.0f;
info->screen_window_center[1] = 0.0f;
info->screen_window_width = -1.0f;
info->pixel_aspect_ratio = -1.0f;
info->tiled = 0;
info->tile_size_x = -1;
info->tile_size_y = -1;
info->tile_level_mode = -1;
info->tile_rounding_mode = -1;
info->attributes.clear();
// Read attributes
size_t orig_size = size;
for (size_t nattr = 0; nattr < TINYEXR_MAX_HEADER_ATTRIBUTES; nattr++) {
if (0 == size) {
if (err) {
(*err) += "Insufficient data size for attributes.\n";
}
return TINYEXR_ERROR_INVALID_DATA;
} else if (marker[0] == '\0') {
size--;
break;
}
std::string attr_name;
std::string attr_type;
std::vector<unsigned char> data;
size_t marker_size;
if (!tinyexr::ReadAttribute(&attr_name, &attr_type, &data, &marker_size,
marker, size)) {
if (err) {
(*err) += "Failed to read attribute.\n";
}
return TINYEXR_ERROR_INVALID_DATA;
}
marker += marker_size;
size -= marker_size;
// For a multipart file, the version field 9th bit is 0.
if ((version->tiled || version->multipart || version->non_image) && attr_name.compare("tiles") == 0) {
unsigned int x_size, y_size;
unsigned char tile_mode;
assert(data.size() == 9);
memcpy(&x_size, &data.at(0), sizeof(int));
memcpy(&y_size, &data.at(4), sizeof(int));
tile_mode = data[8];
tinyexr::swap4(&x_size);
tinyexr::swap4(&y_size);
if (x_size > static_cast<unsigned int>(std::numeric_limits<int>::max()) ||
y_size > static_cast<unsigned int>(std::numeric_limits<int>::max())) {
if (err) {
(*err) = "Tile sizes were invalid.";
}
return TINYEXR_ERROR_UNSUPPORTED_FORMAT;
}
info->tile_size_x = static_cast<int>(x_size);
info->tile_size_y = static_cast<int>(y_size);
// mode = levelMode + roundingMode * 16
info->tile_level_mode = tile_mode & 0x3;
info->tile_rounding_mode = (tile_mode >> 4) & 0x1;
info->tiled = 1;
} else if (attr_name.compare("compression") == 0) {
bool ok = false;
if (data[0] < TINYEXR_COMPRESSIONTYPE_PIZ) {
ok = true;
}
if (data[0] == TINYEXR_COMPRESSIONTYPE_PIZ) {
#if TINYEXR_USE_PIZ
ok = true;
#else
if (err) {
(*err) = "PIZ compression is not supported.";
}
return TINYEXR_ERROR_UNSUPPORTED_FORMAT;
#endif
}
if (data[0] == TINYEXR_COMPRESSIONTYPE_ZFP) {
#if TINYEXR_USE_ZFP
ok = true;
#else
if (err) {
(*err) = "ZFP compression is not supported.";
}
return TINYEXR_ERROR_UNSUPPORTED_FORMAT;
#endif
}
if (!ok) {
if (err) {
(*err) = "Unknown compression type.";
}
return TINYEXR_ERROR_UNSUPPORTED_FORMAT;
}
info->compression_type = static_cast<int>(data[0]);
has_compression = true;
} else if (attr_name.compare("channels") == 0) {
// name: zero-terminated string, from 1 to 255 bytes long
// pixel type: int, possible values are: UINT = 0 HALF = 1 FLOAT = 2
// pLinear: unsigned char, possible values are 0 and 1
// reserved: three chars, should be zero
// xSampling: int
// ySampling: int
if (!ReadChannelInfo(info->channels, data)) {
if (err) {
(*err) += "Failed to parse channel info.\n";
}
return TINYEXR_ERROR_INVALID_DATA;
}
if (info->channels.size() < 1) {
if (err) {
(*err) += "# of channels is zero.\n";
}
return TINYEXR_ERROR_INVALID_DATA;
}
has_channels = true;
} else if (attr_name.compare("dataWindow") == 0) {
if (data.size() >= 16) {
memcpy(&info->data_window.min_x, &data.at(0), sizeof(int));
memcpy(&info->data_window.min_y, &data.at(4), sizeof(int));
memcpy(&info->data_window.max_x, &data.at(8), sizeof(int));
memcpy(&info->data_window.max_y, &data.at(12), sizeof(int));
tinyexr::swap4(&info->data_window.min_x);
tinyexr::swap4(&info->data_window.min_y);
tinyexr::swap4(&info->data_window.max_x);
tinyexr::swap4(&info->data_window.max_y);
has_data_window = true;
}
} else if (attr_name.compare("displayWindow") == 0) {
if (data.size() >= 16) {
memcpy(&info->display_window.min_x, &data.at(0), sizeof(int));
memcpy(&info->display_window.min_y, &data.at(4), sizeof(int));
memcpy(&info->display_window.max_x, &data.at(8), sizeof(int));
memcpy(&info->display_window.max_y, &data.at(12), sizeof(int));
tinyexr::swap4(&info->display_window.min_x);
tinyexr::swap4(&info->display_window.min_y);
tinyexr::swap4(&info->display_window.max_x);
tinyexr::swap4(&info->display_window.max_y);
has_display_window = true;
}
} else if (attr_name.compare("lineOrder") == 0) {
if (data.size() >= 1) {
info->line_order = static_cast<int>(data[0]);
has_line_order = true;
}
} else if (attr_name.compare("pixelAspectRatio") == 0) {
if (data.size() >= sizeof(float)) {
memcpy(&info->pixel_aspect_ratio, &data.at(0), sizeof(float));
tinyexr::swap4(&info->pixel_aspect_ratio);
has_pixel_aspect_ratio = true;
}
} else if (attr_name.compare("screenWindowCenter") == 0) {
if (data.size() >= 8) {
memcpy(&info->screen_window_center[0], &data.at(0), sizeof(float));
memcpy(&info->screen_window_center[1], &data.at(4), sizeof(float));
tinyexr::swap4(&info->screen_window_center[0]);
tinyexr::swap4(&info->screen_window_center[1]);
has_screen_window_center = true;
}
} else if (attr_name.compare("screenWindowWidth") == 0) {
if (data.size() >= sizeof(float)) {
memcpy(&info->screen_window_width, &data.at(0), sizeof(float));
tinyexr::swap4(&info->screen_window_width);
has_screen_window_width = true;
}
} else if (attr_name.compare("chunkCount") == 0) {
if (data.size() >= sizeof(int)) {
memcpy(&info->chunk_count, &data.at(0), sizeof(int));
tinyexr::swap4(&info->chunk_count);
}
} else if (attr_name.compare("name") == 0) {
if (!data.empty() && data[0]) {
data.push_back(0);
size_t len = strlen(reinterpret_cast<const char*>(&data[0]));
info->name.resize(len);
info->name.assign(reinterpret_cast<const char*>(&data[0]), len);
has_name = true;
}
} else if (attr_name.compare("type") == 0) {
if (!data.empty() && data[0]) {
data.push_back(0);
size_t len = strlen(reinterpret_cast<const char*>(&data[0]));
info->type.resize(len);
info->type.assign(reinterpret_cast<const char*>(&data[0]), len);
has_type = true;
}
} else {
// Custom attribute(up to TINYEXR_MAX_CUSTOM_ATTRIBUTES)
if (info->attributes.size() < TINYEXR_MAX_CUSTOM_ATTRIBUTES) {
EXRAttribute attrib;
#ifdef _MSC_VER
strncpy_s(attrib.name, attr_name.c_str(), 255);
strncpy_s(attrib.type, attr_type.c_str(), 255);
#else
strncpy(attrib.name, attr_name.c_str(), 255);
strncpy(attrib.type, attr_type.c_str(), 255);
#endif
attrib.name[255] = '\0';
attrib.type[255] = '\0';
attrib.size = static_cast<int>(data.size());
attrib.value = static_cast<unsigned char *>(malloc(data.size()));
memcpy(reinterpret_cast<char *>(attrib.value), &data.at(0),
data.size());
info->attributes.push_back(attrib);
}
}
}
// Check if required attributes exist
{
std::stringstream ss_err;
if (!has_compression) {
ss_err << "\"compression\" attribute not found in the header."
<< std::endl;
}
if (!has_channels) {
ss_err << "\"channels\" attribute not found in the header." << std::endl;
}
if (!has_line_order) {
ss_err << "\"lineOrder\" attribute not found in the header." << std::endl;
}
if (!has_display_window) {
ss_err << "\"displayWindow\" attribute not found in the header."
<< std::endl;
}
if (!has_data_window) {
ss_err << "\"dataWindow\" attribute not found in the header or invalid."
<< std::endl;
}
if (!has_pixel_aspect_ratio) {
ss_err << "\"pixelAspectRatio\" attribute not found in the header."
<< std::endl;
}
if (!has_screen_window_width) {
ss_err << "\"screenWindowWidth\" attribute not found in the header."
<< std::endl;
}
if (!has_screen_window_center) {
ss_err << "\"screenWindowCenter\" attribute not found in the header."
<< std::endl;
}
if (version->multipart || version->non_image) {
if (!has_name) {
ss_err << "\"name\" attribute not found in the header."
<< std::endl;
}
if (!has_type) {
ss_err << "\"type\" attribute not found in the header."
<< std::endl;
}
}
if (!(ss_err.str().empty())) {
if (err) {
(*err) += ss_err.str();
}
return TINYEXR_ERROR_INVALID_HEADER;
}
}
info->header_len = static_cast<unsigned int>(orig_size - size);
return TINYEXR_SUCCESS;
}
// C++ HeaderInfo to C EXRHeader conversion.
static void ConvertHeader(EXRHeader *exr_header, const HeaderInfo &info) {
exr_header->pixel_aspect_ratio = info.pixel_aspect_ratio;
exr_header->screen_window_center[0] = info.screen_window_center[0];
exr_header->screen_window_center[1] = info.screen_window_center[1];
exr_header->screen_window_width = info.screen_window_width;
exr_header->chunk_count = info.chunk_count;
exr_header->display_window.min_x = info.display_window.min_x;
exr_header->display_window.min_y = info.display_window.min_y;
exr_header->display_window.max_x = info.display_window.max_x;
exr_header->display_window.max_y = info.display_window.max_y;
exr_header->data_window.min_x = info.data_window.min_x;
exr_header->data_window.min_y = info.data_window.min_y;
exr_header->data_window.max_x = info.data_window.max_x;
exr_header->data_window.max_y = info.data_window.max_y;
exr_header->line_order = info.line_order;
exr_header->compression_type = info.compression_type;
exr_header->tiled = info.tiled;
exr_header->tile_size_x = info.tile_size_x;
exr_header->tile_size_y = info.tile_size_y;
exr_header->tile_level_mode = info.tile_level_mode;
exr_header->tile_rounding_mode = info.tile_rounding_mode;
EXRSetNameAttr(exr_header, info.name.c_str());
if (!info.type.empty()) {
if (info.type == "scanlineimage") {
assert(!exr_header->tiled);
} else if (info.type == "tiledimage") {
assert(exr_header->tiled);
} else if (info.type == "deeptile") {
exr_header->non_image = 1;
assert(exr_header->tiled);
} else if (info.type == "deepscanline") {
exr_header->non_image = 1;
assert(!exr_header->tiled);
} else {
assert(false);
}
}
exr_header->num_channels = static_cast<int>(info.channels.size());
exr_header->channels = static_cast<EXRChannelInfo *>(malloc(
sizeof(EXRChannelInfo) * static_cast<size_t>(exr_header->num_channels)));
for (size_t c = 0; c < static_cast<size_t>(exr_header->num_channels); c++) {
#ifdef _MSC_VER
strncpy_s(exr_header->channels[c].name, info.channels[c].name.c_str(), 255);
#else
strncpy(exr_header->channels[c].name, info.channels[c].name.c_str(), 255);
#endif
// manually add '\0' for safety.
exr_header->channels[c].name[255] = '\0';
exr_header->channels[c].pixel_type = info.channels[c].pixel_type;
exr_header->channels[c].p_linear = info.channels[c].p_linear;
exr_header->channels[c].x_sampling = info.channels[c].x_sampling;
exr_header->channels[c].y_sampling = info.channels[c].y_sampling;
}
exr_header->pixel_types = static_cast<int *>(
malloc(sizeof(int) * static_cast<size_t>(exr_header->num_channels)));
for (size_t c = 0; c < static_cast<size_t>(exr_header->num_channels); c++) {
exr_header->pixel_types[c] = info.channels[c].pixel_type;
}
// Initially fill with values of `pixel_types`
exr_header->requested_pixel_types = static_cast<int *>(
malloc(sizeof(int) * static_cast<size_t>(exr_header->num_channels)));
for (size_t c = 0; c < static_cast<size_t>(exr_header->num_channels); c++) {
exr_header->requested_pixel_types[c] = info.channels[c].pixel_type;
}
exr_header->num_custom_attributes = static_cast<int>(info.attributes.size());
if (exr_header->num_custom_attributes > 0) {
// TODO(syoyo): Report warning when # of attributes exceeds
// `TINYEXR_MAX_CUSTOM_ATTRIBUTES`
if (exr_header->num_custom_attributes > TINYEXR_MAX_CUSTOM_ATTRIBUTES) {
exr_header->num_custom_attributes = TINYEXR_MAX_CUSTOM_ATTRIBUTES;
}
exr_header->custom_attributes = static_cast<EXRAttribute *>(malloc(
sizeof(EXRAttribute) * size_t(exr_header->num_custom_attributes)));
for (size_t i = 0; i < info.attributes.size(); i++) {
memcpy(exr_header->custom_attributes[i].name, info.attributes[i].name,
256);
memcpy(exr_header->custom_attributes[i].type, info.attributes[i].type,
256);
exr_header->custom_attributes[i].size = info.attributes[i].size;
// Just copy pointer
exr_header->custom_attributes[i].value = info.attributes[i].value;
}
} else {
exr_header->custom_attributes = NULL;
}
exr_header->header_len = info.header_len;
}
struct OffsetData {
OffsetData() : num_x_levels(0), num_y_levels(0) {}
std::vector<std::vector<std::vector <tinyexr::tinyexr_uint64> > > offsets;
int num_x_levels;
int num_y_levels;
};
int LevelIndex(int lx, int ly, int tile_level_mode, int num_x_levels) {
switch (tile_level_mode) {
case TINYEXR_TILE_ONE_LEVEL:
return 0;
case TINYEXR_TILE_MIPMAP_LEVELS:
return lx;
case TINYEXR_TILE_RIPMAP_LEVELS:
return lx + ly * num_x_levels;
default:
assert(false);
}
return 0;
}
static int LevelSize(int toplevel_size, int level, int tile_rounding_mode) {
assert(level >= 0);
int b = (int)(1u << (unsigned)level);
int level_size = toplevel_size / b;
if (tile_rounding_mode == TINYEXR_TILE_ROUND_UP && level_size * b < toplevel_size)
level_size += 1;
return std::max(level_size, 1);
}
static int DecodeTiledLevel(EXRImage* exr_image, const EXRHeader* exr_header,
const OffsetData& offset_data,
const std::vector<size_t>& channel_offset_list,
int pixel_data_size,
const unsigned char* head, const size_t size,
std::string* err) {
int num_channels = exr_header->num_channels;
int level_index = LevelIndex(exr_image->level_x, exr_image->level_y, exr_header->tile_level_mode, offset_data.num_x_levels);
int num_y_tiles = (int)offset_data.offsets[level_index].size();
assert(num_y_tiles);
int num_x_tiles = (int)offset_data.offsets[level_index][0].size();
assert(num_x_tiles);
int num_tiles = num_x_tiles * num_y_tiles;
int err_code = TINYEXR_SUCCESS;
enum {
EF_SUCCESS = 0,
EF_INVALID_DATA = 1,
EF_INSUFFICIENT_DATA = 2,
EF_FAILED_TO_DECODE = 4
};
#if TINYEXR_HAS_CXX11 && (TINYEXR_USE_THREAD > 0)
std::atomic<unsigned> error_flag(EF_SUCCESS);
#else
unsigned error_flag(EF_SUCCESS);
#endif
// Although the spec says : "...the data window is subdivided into an array of smaller rectangles...",
// the IlmImf library allows the dimensions of the tile to be larger (or equal) than the dimensions of the data window.
#if 0
if ((exr_header->tile_size_x > exr_image->width || exr_header->tile_size_y > exr_image->height) &&
exr_image->level_x == 0 && exr_image->level_y == 0) {
if (err) {
(*err) += "Failed to decode tile data.\n";
}
err_code = TINYEXR_ERROR_INVALID_DATA;
}
#endif
exr_image->tiles = static_cast<EXRTile*>(
calloc(sizeof(EXRTile), static_cast<size_t>(num_tiles)));
#if TINYEXR_HAS_CXX11 && (TINYEXR_USE_THREAD > 0)
std::vector<std::thread> workers;
std::atomic<int> tile_count(0);
int num_threads = std::max(1, int(std::thread::hardware_concurrency()));
if (num_threads > int(num_tiles)) {
num_threads = int(num_tiles);
}
for (int t = 0; t < num_threads; t++) {
workers.emplace_back(std::thread([&]()
{
int tile_idx = 0;
while ((tile_idx = tile_count++) < num_tiles) {
#else
#if TINYEXR_USE_OPENMP
#pragma omp parallel for
#endif
for (int tile_idx = 0; tile_idx < num_tiles; tile_idx++) {
#endif
// Allocate memory for each tile.
exr_image->tiles[tile_idx].images = tinyexr::AllocateImage(
num_channels, exr_header->channels,
exr_header->requested_pixel_types, exr_header->tile_size_x,
exr_header->tile_size_y);
int x_tile = tile_idx % num_x_tiles;
int y_tile = tile_idx / num_x_tiles;
// 16 byte: tile coordinates
// 4 byte : data size
// ~ : data(uncompressed or compressed)
tinyexr::tinyexr_uint64 offset = offset_data.offsets[level_index][y_tile][x_tile];
if (offset + sizeof(int) * 5 > size) {
// Insufficient data size.
error_flag |= EF_INSUFFICIENT_DATA;
continue;
}
size_t data_size =
size_t(size - (offset + sizeof(int) * 5));
const unsigned char* data_ptr =
reinterpret_cast<const unsigned char*>(head + offset);
int tile_coordinates[4];
memcpy(tile_coordinates, data_ptr, sizeof(int) * 4);
tinyexr::swap4(&tile_coordinates[0]);
tinyexr::swap4(&tile_coordinates[1]);
tinyexr::swap4(&tile_coordinates[2]);
tinyexr::swap4(&tile_coordinates[3]);
if (tile_coordinates[2] != exr_image->level_x) {
// Invalid data.
error_flag |= EF_INVALID_DATA;
continue;
}
if (tile_coordinates[3] != exr_image->level_y) {
// Invalid data.
error_flag |= EF_INVALID_DATA;
continue;
}
int data_len;
memcpy(&data_len, data_ptr + 16,
sizeof(int)); // 16 = sizeof(tile_coordinates)
tinyexr::swap4(&data_len);
if (data_len < 2 || size_t(data_len) > data_size) {
// Insufficient data size.
error_flag |= EF_INSUFFICIENT_DATA;
continue;
}
// Move to data addr: 20 = 16 + 4;
data_ptr += 20;
bool ret = tinyexr::DecodeTiledPixelData(
exr_image->tiles[tile_idx].images,
&(exr_image->tiles[tile_idx].width),
&(exr_image->tiles[tile_idx].height),
exr_header->requested_pixel_types, data_ptr,
static_cast<size_t>(data_len), exr_header->compression_type,
exr_header->line_order,
exr_image->width, exr_image->height,
tile_coordinates[0], tile_coordinates[1], exr_header->tile_size_x,
exr_header->tile_size_y, static_cast<size_t>(pixel_data_size),
static_cast<size_t>(exr_header->num_custom_attributes),
exr_header->custom_attributes,
static_cast<size_t>(exr_header->num_channels),
exr_header->channels, channel_offset_list);
if (!ret) {
// Failed to decode tile data.
error_flag |= EF_FAILED_TO_DECODE;
}
exr_image->tiles[tile_idx].offset_x = tile_coordinates[0];
exr_image->tiles[tile_idx].offset_y = tile_coordinates[1];
exr_image->tiles[tile_idx].level_x = tile_coordinates[2];
exr_image->tiles[tile_idx].level_y = tile_coordinates[3];
#if TINYEXR_HAS_CXX11 && (TINYEXR_USE_THREAD > 0)
}
}));
} // num_thread loop
for (auto& t : workers) {
t.join();
}
#else
} // parallel for
#endif
// Even in the event of an error, the reserved memory may be freed.
exr_image->num_channels = num_channels;
exr_image->num_tiles = static_cast<int>(num_tiles);
if (error_flag) err_code = TINYEXR_ERROR_INVALID_DATA;
if (err) {
if (error_flag & EF_INSUFFICIENT_DATA) {
(*err) += "Insufficient data length.\n";
}
if (error_flag & EF_FAILED_TO_DECODE) {
(*err) += "Failed to decode tile data.\n";
}
}
return err_code;
}
static int DecodeChunk(EXRImage *exr_image, const EXRHeader *exr_header,
const OffsetData& offset_data,
const unsigned char *head, const size_t size,
std::string *err) {
int num_channels = exr_header->num_channels;
int num_scanline_blocks = 1;
if (exr_header->compression_type == TINYEXR_COMPRESSIONTYPE_ZIP) {
num_scanline_blocks = 16;
} else if (exr_header->compression_type == TINYEXR_COMPRESSIONTYPE_PIZ) {
num_scanline_blocks = 32;
} else if (exr_header->compression_type == TINYEXR_COMPRESSIONTYPE_ZFP) {
num_scanline_blocks = 16;
#if TINYEXR_USE_ZFP
tinyexr::ZFPCompressionParam zfp_compression_param;
if (!FindZFPCompressionParam(&zfp_compression_param,
exr_header->custom_attributes,
int(exr_header->num_custom_attributes), err)) {
return TINYEXR_ERROR_INVALID_HEADER;
}
#endif
}
if (exr_header->data_window.max_x < exr_header->data_window.min_x ||
exr_header->data_window.max_y < exr_header->data_window.min_y) {
if (err) {
(*err) += "Invalid data window.\n";
}
return TINYEXR_ERROR_INVALID_DATA;
}
int data_width =
exr_header->data_window.max_x - exr_header->data_window.min_x + 1;
int data_height =
exr_header->data_window.max_y - exr_header->data_window.min_y + 1;
// Do not allow too large data_width and data_height. header invalid?
{
if ((data_width > TINYEXR_DIMENSION_THRESHOLD) || (data_height > TINYEXR_DIMENSION_THRESHOLD)) {
if (err) {
std::stringstream ss;
ss << "data_with or data_height too large. data_width: " << data_width
<< ", "
<< "data_height = " << data_height << std::endl;
(*err) += ss.str();
}
return TINYEXR_ERROR_INVALID_DATA;
}
if (exr_header->tiled) {
if ((exr_header->tile_size_x > TINYEXR_DIMENSION_THRESHOLD) || (exr_header->tile_size_y > TINYEXR_DIMENSION_THRESHOLD)) {
if (err) {
std::stringstream ss;
ss << "tile with or tile height too large. tile width: " << exr_header->tile_size_x
<< ", "
<< "tile height = " << exr_header->tile_size_y << std::endl;
(*err) += ss.str();
}
return TINYEXR_ERROR_INVALID_DATA;
}
}
}
const std::vector<tinyexr::tinyexr_uint64>& offsets = offset_data.offsets[0][0];
size_t num_blocks = offsets.size();
std::vector<size_t> channel_offset_list;
int pixel_data_size = 0;
size_t channel_offset = 0;
if (!tinyexr::ComputeChannelLayout(&channel_offset_list, &pixel_data_size,
&channel_offset, num_channels,
exr_header->channels)) {
if (err) {
(*err) += "Failed to compute channel layout.\n";
}
return TINYEXR_ERROR_INVALID_DATA;
}
#if TINYEXR_HAS_CXX11 && (TINYEXR_USE_THREAD > 0)
std::atomic<bool> invalid_data(false);
#else
bool invalid_data(false);
#endif
if (exr_header->tiled) {
// value check
if (exr_header->tile_size_x < 0) {
if (err) {
std::stringstream ss;
ss << "Invalid tile size x : " << exr_header->tile_size_x << "\n";
(*err) += ss.str();
}
return TINYEXR_ERROR_INVALID_HEADER;
}
if (exr_header->tile_size_y < 0) {
if (err) {
std::stringstream ss;
ss << "Invalid tile size y : " << exr_header->tile_size_y << "\n";
(*err) += ss.str();
}
return TINYEXR_ERROR_INVALID_HEADER;
}
if (exr_header->tile_level_mode != TINYEXR_TILE_RIPMAP_LEVELS) {
EXRImage* level_image = NULL;
for (int level = 0; level < offset_data.num_x_levels; ++level) {
if (!level_image) {
level_image = exr_image;
} else {
level_image->next_level = new EXRImage;
InitEXRImage(level_image->next_level);
level_image = level_image->next_level;
}
level_image->width =
LevelSize(exr_header->data_window.max_x - exr_header->data_window.min_x + 1, level, exr_header->tile_rounding_mode);
level_image->height =
LevelSize(exr_header->data_window.max_y - exr_header->data_window.min_y + 1, level, exr_header->tile_rounding_mode);
level_image->level_x = level;
level_image->level_y = level;
int ret = DecodeTiledLevel(level_image, exr_header,
offset_data,
channel_offset_list,
pixel_data_size,
head, size,
err);
if (ret != TINYEXR_SUCCESS) return ret;
}
} else {
EXRImage* level_image = NULL;
for (int level_y = 0; level_y < offset_data.num_y_levels; ++level_y)
for (int level_x = 0; level_x < offset_data.num_x_levels; ++level_x) {
if (!level_image) {
level_image = exr_image;
} else {
level_image->next_level = new EXRImage;
InitEXRImage(level_image->next_level);
level_image = level_image->next_level;
}
level_image->width =
LevelSize(exr_header->data_window.max_x - exr_header->data_window.min_x + 1, level_x, exr_header->tile_rounding_mode);
level_image->height =
LevelSize(exr_header->data_window.max_y - exr_header->data_window.min_y + 1, level_y, exr_header->tile_rounding_mode);
level_image->level_x = level_x;
level_image->level_y = level_y;
int ret = DecodeTiledLevel(level_image, exr_header,
offset_data,
channel_offset_list,
pixel_data_size,
head, size,
err);
if (ret != TINYEXR_SUCCESS) return ret;
}
}
} else { // scanline format
// Don't allow too large image(256GB * pixel_data_size or more). Workaround
// for #104.
size_t total_data_len =
size_t(data_width) * size_t(data_height) * size_t(num_channels);
const bool total_data_len_overflown =
sizeof(void *) == 8 ? (total_data_len >= 0x4000000000) : false;
if ((total_data_len == 0) || total_data_len_overflown) {
if (err) {
std::stringstream ss;
ss << "Image data size is zero or too large: width = " << data_width
<< ", height = " << data_height << ", channels = " << num_channels
<< std::endl;
(*err) += ss.str();
}
return TINYEXR_ERROR_INVALID_DATA;
}
exr_image->images = tinyexr::AllocateImage(
num_channels, exr_header->channels, exr_header->requested_pixel_types,
data_width, data_height);
#if TINYEXR_HAS_CXX11 && (TINYEXR_USE_THREAD > 0)
std::vector<std::thread> workers;
std::atomic<int> y_count(0);
int num_threads = std::max(1, int(std::thread::hardware_concurrency()));
if (num_threads > int(num_blocks)) {
num_threads = int(num_blocks);
}
for (int t = 0; t < num_threads; t++) {
workers.emplace_back(std::thread([&]() {
int y = 0;
while ((y = y_count++) < int(num_blocks)) {
#else
#if TINYEXR_USE_OPENMP
#pragma omp parallel for
#endif
for (int y = 0; y < static_cast<int>(num_blocks); y++) {
#endif
size_t y_idx = static_cast<size_t>(y);
if (offsets[y_idx] + sizeof(int) * 2 > size) {
invalid_data = true;
} else {
// 4 byte: scan line
// 4 byte: data size
// ~ : pixel data(uncompressed or compressed)
size_t data_size =
size_t(size - (offsets[y_idx] + sizeof(int) * 2));
const unsigned char *data_ptr =
reinterpret_cast<const unsigned char *>(head + offsets[y_idx]);
int line_no;
memcpy(&line_no, data_ptr, sizeof(int));
int data_len;
memcpy(&data_len, data_ptr + 4, sizeof(int));
tinyexr::swap4(&line_no);
tinyexr::swap4(&data_len);
if (size_t(data_len) > data_size) {
invalid_data = true;
} else if ((line_no > (2 << 20)) || (line_no < -(2 << 20))) {
// Too large value. Assume this is invalid
// 2**20 = 1048576 = heuristic value.
invalid_data = true;
} else if (data_len == 0) {
// TODO(syoyo): May be ok to raise the threshold for example
// `data_len < 4`
invalid_data = true;
} else {
// line_no may be negative.
int end_line_no = (std::min)(line_no + num_scanline_blocks,
(exr_header->data_window.max_y + 1));
int num_lines = end_line_no - line_no;
if (num_lines <= 0) {
invalid_data = true;
} else {
// Move to data addr: 8 = 4 + 4;
data_ptr += 8;
// Adjust line_no with data_window.bmin.y
// overflow check
tinyexr_int64 lno =
static_cast<tinyexr_int64>(line_no) -
static_cast<tinyexr_int64>(exr_header->data_window.min_y);
if (lno > std::numeric_limits<int>::max()) {
line_no = -1; // invalid
} else if (lno < -std::numeric_limits<int>::max()) {
line_no = -1; // invalid
} else {
line_no -= exr_header->data_window.min_y;
}
if (line_no < 0) {
invalid_data = true;
} else {
if (!tinyexr::DecodePixelData(
exr_image->images, exr_header->requested_pixel_types,
data_ptr, static_cast<size_t>(data_len),
exr_header->compression_type, exr_header->line_order,
data_width, data_height, data_width, y, line_no,
num_lines, static_cast<size_t>(pixel_data_size),
static_cast<size_t>(
exr_header->num_custom_attributes),
exr_header->custom_attributes,
static_cast<size_t>(exr_header->num_channels),
exr_header->channels, channel_offset_list)) {
invalid_data = true;
}
}
}
}
}
#if TINYEXR_HAS_CXX11 && (TINYEXR_USE_THREAD > 0)
}
}));
}
for (auto &t : workers) {
t.join();
}
#else
} // omp parallel
#endif
}
if (invalid_data) {
if (err) {
std::stringstream ss;
(*err) += "Invalid data found when decoding pixels.\n";
}
return TINYEXR_ERROR_INVALID_DATA;
}
// Overwrite `pixel_type` with `requested_pixel_type`.
{
for (int c = 0; c < exr_header->num_channels; c++) {
exr_header->pixel_types[c] = exr_header->requested_pixel_types[c];
}
}
{
exr_image->num_channels = num_channels;
exr_image->width = data_width;
exr_image->height = data_height;
}
return TINYEXR_SUCCESS;
}
static bool ReconstructLineOffsets(
std::vector<tinyexr::tinyexr_uint64> *offsets, size_t n,
const unsigned char *head, const unsigned char *marker, const size_t size) {
assert(head < marker);
assert(offsets->size() == n);
for (size_t i = 0; i < n; i++) {
size_t offset = static_cast<size_t>(marker - head);
// Offset should not exceed whole EXR file/data size.
if ((offset + sizeof(tinyexr::tinyexr_uint64)) >= size) {
return false;
}
int y;
unsigned int data_len;
memcpy(&y, marker, sizeof(int));
memcpy(&data_len, marker + 4, sizeof(unsigned int));
if (data_len >= size) {
return false;
}
tinyexr::swap4(&y);
tinyexr::swap4(&data_len);
(*offsets)[i] = offset;
marker += data_len + 8; // 8 = 4 bytes(y) + 4 bytes(data_len)
}
return true;
}
static int FloorLog2(unsigned x) {
//
// For x > 0, floorLog2(y) returns floor(log(x)/log(2)).
//
int y = 0;
while (x > 1) {
y += 1;
x >>= 1u;
}
return y;
}
static int CeilLog2(unsigned x) {
//
// For x > 0, ceilLog2(y) returns ceil(log(x)/log(2)).
//
int y = 0;
int r = 0;
while (x > 1) {
if (x & 1)
r = 1;
y += 1;
x >>= 1u;
}
return y + r;
}
static int RoundLog2(int x, int tile_rounding_mode) {
return (tile_rounding_mode == TINYEXR_TILE_ROUND_DOWN) ? FloorLog2(static_cast<unsigned>(x)) : CeilLog2(static_cast<unsigned>(x));
}
static int CalculateNumXLevels(const EXRHeader* exr_header) {
int min_x = exr_header->data_window.min_x;
int max_x = exr_header->data_window.max_x;
int min_y = exr_header->data_window.min_y;
int max_y = exr_header->data_window.max_y;
int num = 0;
switch (exr_header->tile_level_mode) {
case TINYEXR_TILE_ONE_LEVEL:
num = 1;
break;
case TINYEXR_TILE_MIPMAP_LEVELS:
{
int w = max_x - min_x + 1;
int h = max_y - min_y + 1;
num = RoundLog2(std::max(w, h), exr_header->tile_rounding_mode) + 1;
}
break;
case TINYEXR_TILE_RIPMAP_LEVELS:
{
int w = max_x - min_x + 1;
num = RoundLog2(w, exr_header->tile_rounding_mode) + 1;
}
break;
default:
assert(false);
}
return num;
}
static int CalculateNumYLevels(const EXRHeader* exr_header) {
int min_x = exr_header->data_window.min_x;
int max_x = exr_header->data_window.max_x;
int min_y = exr_header->data_window.min_y;
int max_y = exr_header->data_window.max_y;
int num = 0;
switch (exr_header->tile_level_mode) {
case TINYEXR_TILE_ONE_LEVEL:
num = 1;
break;
case TINYEXR_TILE_MIPMAP_LEVELS:
{
int w = max_x - min_x + 1;
int h = max_y - min_y + 1;
num = RoundLog2(std::max(w, h), exr_header->tile_rounding_mode) + 1;
}
break;
case TINYEXR_TILE_RIPMAP_LEVELS:
{
int h = max_y - min_y + 1;
num = RoundLog2(h, exr_header->tile_rounding_mode) + 1;
}
break;
default:
assert(false);
}
return num;
}
static void CalculateNumTiles(std::vector<int>& numTiles,
int toplevel_size,
int size,
int tile_rounding_mode) {
for (unsigned i = 0; i < numTiles.size(); i++) {
int l = LevelSize(toplevel_size, i, tile_rounding_mode);
assert(l <= std::numeric_limits<int>::max() - size + 1);
numTiles[i] = (l + size - 1) / size;
}
}
static void PrecalculateTileInfo(std::vector<int>& num_x_tiles,
std::vector<int>& num_y_tiles,
const EXRHeader* exr_header) {
int min_x = exr_header->data_window.min_x;
int max_x = exr_header->data_window.max_x;
int min_y = exr_header->data_window.min_y;
int max_y = exr_header->data_window.max_y;
int num_x_levels = CalculateNumXLevels(exr_header);
int num_y_levels = CalculateNumYLevels(exr_header);
num_x_tiles.resize(num_x_levels);
num_y_tiles.resize(num_y_levels);
CalculateNumTiles(num_x_tiles,
max_x - min_x + 1,
exr_header->tile_size_x,
exr_header->tile_rounding_mode);
CalculateNumTiles(num_y_tiles,
max_y - min_y + 1,
exr_header->tile_size_y,
exr_header->tile_rounding_mode);
}
static void InitSingleResolutionOffsets(OffsetData& offset_data, size_t num_blocks) {
offset_data.offsets.resize(1);
offset_data.offsets[0].resize(1);
offset_data.offsets[0][0].resize(num_blocks);
offset_data.num_x_levels = 1;
offset_data.num_y_levels = 1;
}
// Return sum of tile blocks.
static int InitTileOffsets(OffsetData& offset_data,
const EXRHeader* exr_header,
const std::vector<int>& num_x_tiles,
const std::vector<int>& num_y_tiles) {
int num_tile_blocks = 0;
offset_data.num_x_levels = static_cast<int>(num_x_tiles.size());
offset_data.num_y_levels = static_cast<int>(num_y_tiles.size());
switch (exr_header->tile_level_mode) {
case TINYEXR_TILE_ONE_LEVEL:
case TINYEXR_TILE_MIPMAP_LEVELS:
assert(offset_data.num_x_levels == offset_data.num_y_levels);
offset_data.offsets.resize(offset_data.num_x_levels);
for (unsigned int l = 0; l < offset_data.offsets.size(); ++l) {
offset_data.offsets[l].resize(num_y_tiles[l]);
for (unsigned int dy = 0; dy < offset_data.offsets[l].size(); ++dy) {
offset_data.offsets[l][dy].resize(num_x_tiles[l]);
num_tile_blocks += num_x_tiles[l];
}
}
break;
case TINYEXR_TILE_RIPMAP_LEVELS:
offset_data.offsets.resize(static_cast<size_t>(offset_data.num_x_levels) * static_cast<size_t>(offset_data.num_y_levels));
for (int ly = 0; ly < offset_data.num_y_levels; ++ly) {
for (int lx = 0; lx < offset_data.num_x_levels; ++lx) {
int l = ly * offset_data.num_x_levels + lx;
offset_data.offsets[l].resize(num_y_tiles[ly]);
for (size_t dy = 0; dy < offset_data.offsets[l].size(); ++dy) {
offset_data.offsets[l][dy].resize(num_x_tiles[lx]);
num_tile_blocks += num_x_tiles[lx];
}
}
}
break;
default:
assert(false);
}
return num_tile_blocks;
}
static bool IsAnyOffsetsAreInvalid(const OffsetData& offset_data) {
for (unsigned int l = 0; l < offset_data.offsets.size(); ++l)
for (unsigned int dy = 0; dy < offset_data.offsets[l].size(); ++dy)
for (unsigned int dx = 0; dx < offset_data.offsets[l][dy].size(); ++dx)
if (reinterpret_cast<const tinyexr::tinyexr_int64&>(offset_data.offsets[l][dy][dx]) <= 0)
return true;
return false;
}
static bool isValidTile(const EXRHeader* exr_header,
const OffsetData& offset_data,
int dx, int dy, int lx, int ly) {
if (lx < 0 || ly < 0 || dx < 0 || dy < 0) return false;
int num_x_levels = offset_data.num_x_levels;
int num_y_levels = offset_data.num_y_levels;
switch (exr_header->tile_level_mode) {
case TINYEXR_TILE_ONE_LEVEL:
if (lx == 0 &&
ly == 0 &&
offset_data.offsets.size() > 0 &&
offset_data.offsets[0].size() > static_cast<size_t>(dy) &&
offset_data.offsets[0][dy].size() > static_cast<size_t>(dx)) {
return true;
}
break;
case TINYEXR_TILE_MIPMAP_LEVELS:
if (lx < num_x_levels &&
ly < num_y_levels &&
offset_data.offsets.size() > static_cast<size_t>(lx) &&
offset_data.offsets[lx].size() > static_cast<size_t>(dy) &&
offset_data.offsets[lx][dy].size() > static_cast<size_t>(dx)) {
return true;
}
break;
case TINYEXR_TILE_RIPMAP_LEVELS:
{
size_t idx = static_cast<size_t>(lx) + static_cast<size_t>(ly)* static_cast<size_t>(num_x_levels);
if (lx < num_x_levels &&
ly < num_y_levels &&
(offset_data.offsets.size() > idx) &&
offset_data.offsets[idx].size() > static_cast<size_t>(dy) &&
offset_data.offsets[idx][dy].size() > static_cast<size_t>(dx)) {
return true;
}
}
break;
default:
return false;
}
return false;
}
static void ReconstructTileOffsets(OffsetData& offset_data,
const EXRHeader* exr_header,
const unsigned char* head, const unsigned char* marker, const size_t /*size*/,
bool isMultiPartFile,
bool isDeep) {
int numXLevels = offset_data.num_x_levels;
for (unsigned int l = 0; l < offset_data.offsets.size(); ++l) {
for (unsigned int dy = 0; dy < offset_data.offsets[l].size(); ++dy) {
for (unsigned int dx = 0; dx < offset_data.offsets[l][dy].size(); ++dx) {
tinyexr::tinyexr_uint64 tileOffset = marker - head;
if (isMultiPartFile) {
//int partNumber;
marker += sizeof(int);
}
int tileX;
memcpy(&tileX, marker, sizeof(int));
tinyexr::swap4(&tileX);
marker += sizeof(int);
int tileY;
memcpy(&tileY, marker, sizeof(int));
tinyexr::swap4(&tileY);
marker += sizeof(int);
int levelX;
memcpy(&levelX, marker, sizeof(int));
tinyexr::swap4(&levelX);
marker += sizeof(int);
int levelY;
memcpy(&levelY, marker, sizeof(int));
tinyexr::swap4(&levelY);
marker += sizeof(int);
if (isDeep) {
tinyexr::tinyexr_int64 packed_offset_table_size;
memcpy(&packed_offset_table_size, marker, sizeof(tinyexr::tinyexr_int64));
tinyexr::swap8(reinterpret_cast<tinyexr::tinyexr_uint64*>(&packed_offset_table_size));
marker += sizeof(tinyexr::tinyexr_int64);
tinyexr::tinyexr_int64 packed_sample_size;
memcpy(&packed_sample_size, marker, sizeof(tinyexr::tinyexr_int64));
tinyexr::swap8(reinterpret_cast<tinyexr::tinyexr_uint64*>(&packed_sample_size));
marker += sizeof(tinyexr::tinyexr_int64);
// next Int64 is unpacked sample size - skip that too
marker += packed_offset_table_size + packed_sample_size + 8;
} else {
int dataSize;
memcpy(&dataSize, marker, sizeof(int));
tinyexr::swap4(&dataSize);
marker += sizeof(int);
marker += dataSize;
}
if (!isValidTile(exr_header, offset_data,
tileX, tileY, levelX, levelY))
return;
int level_idx = LevelIndex(levelX, levelY, exr_header->tile_level_mode, numXLevels);
offset_data.offsets[level_idx][tileY][tileX] = tileOffset;
}
}
}
}
// marker output is also
static int ReadOffsets(OffsetData& offset_data,
const unsigned char* head,
const unsigned char*& marker,
const size_t size,
const char** err) {
for (unsigned int l = 0; l < offset_data.offsets.size(); ++l) {
for (unsigned int dy = 0; dy < offset_data.offsets[l].size(); ++dy) {
for (unsigned int dx = 0; dx < offset_data.offsets[l][dy].size(); ++dx) {
tinyexr::tinyexr_uint64 offset;
if ((marker + sizeof(tinyexr_uint64)) >= (head + size)) {
tinyexr::SetErrorMessage("Insufficient data size in offset table.", err);
return TINYEXR_ERROR_INVALID_DATA;
}
memcpy(&offset, marker, sizeof(tinyexr::tinyexr_uint64));
tinyexr::swap8(&offset);
if (offset >= size) {
tinyexr::SetErrorMessage("Invalid offset value in DecodeEXRImage.", err);
return TINYEXR_ERROR_INVALID_DATA;
}
marker += sizeof(tinyexr::tinyexr_uint64); // = 8
offset_data.offsets[l][dy][dx] = offset;
}
}
}
return TINYEXR_SUCCESS;
}
static int DecodeEXRImage(EXRImage *exr_image, const EXRHeader *exr_header,
const unsigned char *head,
const unsigned char *marker, const size_t size,
const char **err) {
if (exr_image == NULL || exr_header == NULL || head == NULL ||
marker == NULL || (size <= tinyexr::kEXRVersionSize)) {
tinyexr::SetErrorMessage("Invalid argument for DecodeEXRImage().", err);
return TINYEXR_ERROR_INVALID_ARGUMENT;
}
int num_scanline_blocks = 1;
if (exr_header->compression_type == TINYEXR_COMPRESSIONTYPE_ZIP) {
num_scanline_blocks = 16;
} else if (exr_header->compression_type == TINYEXR_COMPRESSIONTYPE_PIZ) {
num_scanline_blocks = 32;
} else if (exr_header->compression_type == TINYEXR_COMPRESSIONTYPE_ZFP) {
num_scanline_blocks = 16;
}
if (exr_header->data_window.max_x < exr_header->data_window.min_x ||
exr_header->data_window.max_x - exr_header->data_window.min_x ==
std::numeric_limits<int>::max()) {
// Issue 63
tinyexr::SetErrorMessage("Invalid data width value", err);
return TINYEXR_ERROR_INVALID_DATA;
}
int data_width =
exr_header->data_window.max_x - exr_header->data_window.min_x + 1;
if (exr_header->data_window.max_y < exr_header->data_window.min_y ||
exr_header->data_window.max_y - exr_header->data_window.min_y ==
std::numeric_limits<int>::max()) {
tinyexr::SetErrorMessage("Invalid data height value", err);
return TINYEXR_ERROR_INVALID_DATA;
}
int data_height =
exr_header->data_window.max_y - exr_header->data_window.min_y + 1;
// Do not allow too large data_width and data_height. header invalid?
{
if (data_width > TINYEXR_DIMENSION_THRESHOLD) {
tinyexr::SetErrorMessage("data width too large.", err);
return TINYEXR_ERROR_INVALID_DATA;
}
if (data_height > TINYEXR_DIMENSION_THRESHOLD) {
tinyexr::SetErrorMessage("data height too large.", err);
return TINYEXR_ERROR_INVALID_DATA;
}
}
if (exr_header->tiled) {
if (exr_header->tile_size_x > TINYEXR_DIMENSION_THRESHOLD) {
tinyexr::SetErrorMessage("tile width too large.", err);
return TINYEXR_ERROR_INVALID_DATA;
}
if (exr_header->tile_size_y > TINYEXR_DIMENSION_THRESHOLD) {
tinyexr::SetErrorMessage("tile height too large.", err);
return TINYEXR_ERROR_INVALID_DATA;
}
}
// Read offset tables.
OffsetData offset_data;
size_t num_blocks = 0;
// For a multi-resolution image, the size of the offset table will be calculated from the other attributes of the header.
// If chunk_count > 0 then chunk_count must be equal to the calculated tile count.
if (exr_header->tiled) {
{
std::vector<int> num_x_tiles, num_y_tiles;
PrecalculateTileInfo(num_x_tiles, num_y_tiles, exr_header);
num_blocks = InitTileOffsets(offset_data, exr_header, num_x_tiles, num_y_tiles);
if (exr_header->chunk_count > 0) {
if (exr_header->chunk_count != static_cast<int>(num_blocks)) {
tinyexr::SetErrorMessage("Invalid offset table size.", err);
return TINYEXR_ERROR_INVALID_DATA;
}
}
}
int ret = ReadOffsets(offset_data, head, marker, size, err);
if (ret != TINYEXR_SUCCESS) return ret;
if (IsAnyOffsetsAreInvalid(offset_data)) {
ReconstructTileOffsets(offset_data, exr_header,
head, marker, size,
exr_header->multipart, exr_header->non_image);
}
} else if (exr_header->chunk_count > 0) {
// Use `chunkCount` attribute.
num_blocks = static_cast<size_t>(exr_header->chunk_count);
InitSingleResolutionOffsets(offset_data, num_blocks);
} else {
num_blocks = static_cast<size_t>(data_height) /
static_cast<size_t>(num_scanline_blocks);
if (num_blocks * static_cast<size_t>(num_scanline_blocks) <
static_cast<size_t>(data_height)) {
num_blocks++;
}
InitSingleResolutionOffsets(offset_data, num_blocks);
}
if (!exr_header->tiled) {
std::vector<tinyexr::tinyexr_uint64>& offsets = offset_data.offsets[0][0];
for (size_t y = 0; y < num_blocks; y++) {
tinyexr::tinyexr_uint64 offset;
// Issue #81
if ((marker + sizeof(tinyexr_uint64)) >= (head + size)) {
tinyexr::SetErrorMessage("Insufficient data size in offset table.", err);
return TINYEXR_ERROR_INVALID_DATA;
}
memcpy(&offset, marker, sizeof(tinyexr::tinyexr_uint64));
tinyexr::swap8(&offset);
if (offset >= size) {
tinyexr::SetErrorMessage("Invalid offset value in DecodeEXRImage.", err);
return TINYEXR_ERROR_INVALID_DATA;
}
marker += sizeof(tinyexr::tinyexr_uint64); // = 8
offsets[y] = offset;
}
// If line offsets are invalid, we try to reconstruct it.
// See OpenEXR/IlmImf/ImfScanLineInputFile.cpp::readLineOffsets() for details.
for (size_t y = 0; y < num_blocks; y++) {
if (offsets[y] <= 0) {
// TODO(syoyo) Report as warning?
// if (err) {
// stringstream ss;
// ss << "Incomplete lineOffsets." << std::endl;
// (*err) += ss.str();
//}
bool ret =
ReconstructLineOffsets(&offsets, num_blocks, head, marker, size);
if (ret) {
// OK
break;
} else {
tinyexr::SetErrorMessage(
"Cannot reconstruct lineOffset table in DecodeEXRImage.", err);
return TINYEXR_ERROR_INVALID_DATA;
}
}
}
}
{
std::string e;
int ret = DecodeChunk(exr_image, exr_header, offset_data, head, size, &e);
if (ret != TINYEXR_SUCCESS) {
if (!e.empty()) {
tinyexr::SetErrorMessage(e, err);
}
#if 1
FreeEXRImage(exr_image);
#else
// release memory(if exists)
if ((exr_header->num_channels > 0) && exr_image && exr_image->images) {
for (size_t c = 0; c < size_t(exr_header->num_channels); c++) {
if (exr_image->images[c]) {
free(exr_image->images[c]);
exr_image->images[c] = NULL;
}
}
free(exr_image->images);
exr_image->images = NULL;
}
#endif
}
return ret;
}
}
static void GetLayers(const EXRHeader &exr_header,
std::vector<std::string> &layer_names) {
// Naive implementation
// Group channels by layers
// go over all channel names, split by periods
// collect unique names
layer_names.clear();
for (int c = 0; c < exr_header.num_channels; c++) {
std::string full_name(exr_header.channels[c].name);
const size_t pos = full_name.find_last_of('.');
if (pos != std::string::npos && pos != 0 && pos + 1 < full_name.size()) {
full_name.erase(pos);
if (std::find(layer_names.begin(), layer_names.end(), full_name) ==
layer_names.end())
layer_names.push_back(full_name);
}
}
}
struct LayerChannel {
explicit LayerChannel(size_t i, std::string n) : index(i), name(n) {}
size_t index;
std::string name;
};
static void ChannelsInLayer(const EXRHeader &exr_header,
const std::string layer_name,
std::vector<LayerChannel> &channels) {
channels.clear();
for (int c = 0; c < exr_header.num_channels; c++) {
std::string ch_name(exr_header.channels[c].name);
if (layer_name.empty()) {
const size_t pos = ch_name.find_last_of('.');
if (pos != std::string::npos && pos < ch_name.size()) {
ch_name = ch_name.substr(pos + 1);
}
} else {
const size_t pos = ch_name.find(layer_name + '.');
if (pos == std::string::npos) continue;
if (pos == 0) {
ch_name = ch_name.substr(layer_name.size() + 1);
}
}
LayerChannel ch(size_t(c), ch_name);
channels.push_back(ch);
}
}
} // namespace tinyexr
int EXRLayers(const char *filename, const char **layer_names[], int *num_layers,
const char **err) {
EXRVersion exr_version;
EXRHeader exr_header;
InitEXRHeader(&exr_header);
{
int ret = ParseEXRVersionFromFile(&exr_version, filename);
if (ret != TINYEXR_SUCCESS) {
tinyexr::SetErrorMessage("Invalid EXR header.", err);
return ret;
}
if (exr_version.multipart || exr_version.non_image) {
tinyexr::SetErrorMessage(
"Loading multipart or DeepImage is not supported in LoadEXR() API",
err);
return TINYEXR_ERROR_INVALID_DATA; // @fixme.
}
}
int ret = ParseEXRHeaderFromFile(&exr_header, &exr_version, filename, err);
if (ret != TINYEXR_SUCCESS) {
FreeEXRHeader(&exr_header);
return ret;
}
std::vector<std::string> layer_vec;
tinyexr::GetLayers(exr_header, layer_vec);
(*num_layers) = int(layer_vec.size());
(*layer_names) = static_cast<const char **>(
malloc(sizeof(const char *) * static_cast<size_t>(layer_vec.size())));
for (size_t c = 0; c < static_cast<size_t>(layer_vec.size()); c++) {
#ifdef _MSC_VER
(*layer_names)[c] = _strdup(layer_vec[c].c_str());
#else
(*layer_names)[c] = strdup(layer_vec[c].c_str());
#endif
}
FreeEXRHeader(&exr_header);
return TINYEXR_SUCCESS;
}
int LoadEXR(float **out_rgba, int *width, int *height, const char *filename,
const char **err) {
return LoadEXRWithLayer(out_rgba, width, height, filename,
/* layername */ NULL, err);
}
int LoadEXRWithLayer(float **out_rgba, int *width, int *height,
const char *filename, const char *layername,
const char **err) {
if (out_rgba == NULL) {
tinyexr::SetErrorMessage("Invalid argument for LoadEXR()", err);
return TINYEXR_ERROR_INVALID_ARGUMENT;
}
EXRVersion exr_version;
EXRImage exr_image;
EXRHeader exr_header;
InitEXRHeader(&exr_header);
InitEXRImage(&exr_image);
{
int ret = ParseEXRVersionFromFile(&exr_version, filename);
if (ret != TINYEXR_SUCCESS) {
std::stringstream ss;
ss << "Failed to open EXR file or read version info from EXR file. code("
<< ret << ")";
tinyexr::SetErrorMessage(ss.str(), err);
return ret;
}
if (exr_version.multipart || exr_version.non_image) {
tinyexr::SetErrorMessage(
"Loading multipart or DeepImage is not supported in LoadEXR() API",
err);
return TINYEXR_ERROR_INVALID_DATA; // @fixme.
}
}
{
int ret = ParseEXRHeaderFromFile(&exr_header, &exr_version, filename, err);
if (ret != TINYEXR_SUCCESS) {
FreeEXRHeader(&exr_header);
return ret;
}
}
// Read HALF channel as FLOAT.
for (int i = 0; i < exr_header.num_channels; i++) {
if (exr_header.pixel_types[i] == TINYEXR_PIXELTYPE_HALF) {
exr_header.requested_pixel_types[i] = TINYEXR_PIXELTYPE_FLOAT;
}
}
// TODO: Probably limit loading to layers (channels) selected by layer index
{
int ret = LoadEXRImageFromFile(&exr_image, &exr_header, filename, err);
if (ret != TINYEXR_SUCCESS) {
FreeEXRHeader(&exr_header);
return ret;
}
}
// RGBA
int idxR = -1;
int idxG = -1;
int idxB = -1;
int idxA = -1;
std::vector<std::string> layer_names;
tinyexr::GetLayers(exr_header, layer_names);
std::vector<tinyexr::LayerChannel> channels;
tinyexr::ChannelsInLayer(
exr_header, layername == NULL ? "" : std::string(layername), channels);
if (channels.size() < 1) {
tinyexr::SetErrorMessage("Layer Not Found", err);
FreeEXRHeader(&exr_header);
FreeEXRImage(&exr_image);
return TINYEXR_ERROR_LAYER_NOT_FOUND;
}
size_t ch_count = channels.size() < 4 ? channels.size() : 4;
for (size_t c = 0; c < ch_count; c++) {
const tinyexr::LayerChannel &ch = channels[c];
if (ch.name == "R") {
idxR = int(ch.index);
} else if (ch.name == "G") {
idxG = int(ch.index);
} else if (ch.name == "B") {
idxB = int(ch.index);
} else if (ch.name == "A") {
idxA = int(ch.index);
}
}
if (channels.size() == 1) {
int chIdx = int(channels.front().index);
// Grayscale channel only.
(*out_rgba) = reinterpret_cast<float *>(
malloc(4 * sizeof(float) * static_cast<size_t>(exr_image.width) *
static_cast<size_t>(exr_image.height)));
if (exr_header.tiled) {
for (int it = 0; it < exr_image.num_tiles; it++) {
for (int j = 0; j < exr_header.tile_size_y; j++) {
for (int i = 0; i < exr_header.tile_size_x; i++) {
const int ii = exr_image.tiles[it].offset_x *
static_cast<int>(exr_header.tile_size_x) +
i;
const int jj = exr_image.tiles[it].offset_y *
static_cast<int>(exr_header.tile_size_y) +
j;
const int idx = ii + jj * static_cast<int>(exr_image.width);
// out of region check.
if (ii >= exr_image.width) {
continue;
}
if (jj >= exr_image.height) {
continue;
}
const int srcIdx = i + j * exr_header.tile_size_x;
unsigned char **src = exr_image.tiles[it].images;
(*out_rgba)[4 * idx + 0] =
reinterpret_cast<float **>(src)[chIdx][srcIdx];
(*out_rgba)[4 * idx + 1] =
reinterpret_cast<float **>(src)[chIdx][srcIdx];
(*out_rgba)[4 * idx + 2] =
reinterpret_cast<float **>(src)[chIdx][srcIdx];
(*out_rgba)[4 * idx + 3] =
reinterpret_cast<float **>(src)[chIdx][srcIdx];
}
}
}
} else {
for (int i = 0; i < exr_image.width * exr_image.height; i++) {
const float val =
reinterpret_cast<float **>(exr_image.images)[chIdx][i];
(*out_rgba)[4 * i + 0] = val;
(*out_rgba)[4 * i + 1] = val;
(*out_rgba)[4 * i + 2] = val;
(*out_rgba)[4 * i + 3] = val;
}
}
} else {
// Assume RGB(A)
if (idxR == -1) {
tinyexr::SetErrorMessage("R channel not found", err);
FreeEXRHeader(&exr_header);
FreeEXRImage(&exr_image);
return TINYEXR_ERROR_INVALID_DATA;
}
if (idxG == -1) {
tinyexr::SetErrorMessage("G channel not found", err);
FreeEXRHeader(&exr_header);
FreeEXRImage(&exr_image);
return TINYEXR_ERROR_INVALID_DATA;
}
if (idxB == -1) {
tinyexr::SetErrorMessage("B channel not found", err);
FreeEXRHeader(&exr_header);
FreeEXRImage(&exr_image);
return TINYEXR_ERROR_INVALID_DATA;
}
(*out_rgba) = reinterpret_cast<float *>(
malloc(4 * sizeof(float) * static_cast<size_t>(exr_image.width) *
static_cast<size_t>(exr_image.height)));
if (exr_header.tiled) {
for (int it = 0; it < exr_image.num_tiles; it++) {
for (int j = 0; j < exr_header.tile_size_y; j++) {
for (int i = 0; i < exr_header.tile_size_x; i++) {
const int ii =
exr_image.tiles[it].offset_x * exr_header.tile_size_x + i;
const int jj =
exr_image.tiles[it].offset_y * exr_header.tile_size_y + j;
const int idx = ii + jj * exr_image.width;
// out of region check.
if (ii >= exr_image.width) {
continue;
}
if (jj >= exr_image.height) {
continue;
}
const int srcIdx = i + j * exr_header.tile_size_x;
unsigned char **src = exr_image.tiles[it].images;
(*out_rgba)[4 * idx + 0] =
reinterpret_cast<float **>(src)[idxR][srcIdx];
(*out_rgba)[4 * idx + 1] =
reinterpret_cast<float **>(src)[idxG][srcIdx];
(*out_rgba)[4 * idx + 2] =
reinterpret_cast<float **>(src)[idxB][srcIdx];
if (idxA != -1) {
(*out_rgba)[4 * idx + 3] =
reinterpret_cast<float **>(src)[idxA][srcIdx];
} else {
(*out_rgba)[4 * idx + 3] = 1.0;
}
}
}
}
} else {
for (int i = 0; i < exr_image.width * exr_image.height; i++) {
(*out_rgba)[4 * i + 0] =
reinterpret_cast<float **>(exr_image.images)[idxR][i];
(*out_rgba)[4 * i + 1] =
reinterpret_cast<float **>(exr_image.images)[idxG][i];
(*out_rgba)[4 * i + 2] =
reinterpret_cast<float **>(exr_image.images)[idxB][i];
if (idxA != -1) {
(*out_rgba)[4 * i + 3] =
reinterpret_cast<float **>(exr_image.images)[idxA][i];
} else {
(*out_rgba)[4 * i + 3] = 1.0;
}
}
}
}
(*width) = exr_image.width;
(*height) = exr_image.height;
FreeEXRHeader(&exr_header);
FreeEXRImage(&exr_image);
return TINYEXR_SUCCESS;
}
int IsEXR(const char *filename) {
EXRVersion exr_version;
int ret = ParseEXRVersionFromFile(&exr_version, filename);
if (ret != TINYEXR_SUCCESS) {
return ret;
}
return TINYEXR_SUCCESS;
}
int ParseEXRHeaderFromMemory(EXRHeader *exr_header, const EXRVersion *version,
const unsigned char *memory, size_t size,
const char **err) {
if (memory == NULL || exr_header == NULL) {
tinyexr::SetErrorMessage(
"Invalid argument. `memory` or `exr_header` argument is null in "
"ParseEXRHeaderFromMemory()",
err);
// Invalid argument
return TINYEXR_ERROR_INVALID_ARGUMENT;
}
if (size < tinyexr::kEXRVersionSize) {
tinyexr::SetErrorMessage("Insufficient header/data size.\n", err);
return TINYEXR_ERROR_INVALID_DATA;
}
const unsigned char *marker = memory + tinyexr::kEXRVersionSize;
size_t marker_size = size - tinyexr::kEXRVersionSize;
tinyexr::HeaderInfo info;
info.clear();
std::string err_str;
int ret = ParseEXRHeader(&info, NULL, version, &err_str, marker, marker_size);
if (ret != TINYEXR_SUCCESS) {
if (err && !err_str.empty()) {
tinyexr::SetErrorMessage(err_str, err);
}
}
ConvertHeader(exr_header, info);
exr_header->multipart = version->multipart ? 1 : 0;
exr_header->non_image = version->non_image ? 1 : 0;
return ret;
}
int LoadEXRFromMemory(float **out_rgba, int *width, int *height,
const unsigned char *memory, size_t size,
const char **err) {
if (out_rgba == NULL || memory == NULL) {
tinyexr::SetErrorMessage("Invalid argument for LoadEXRFromMemory", err);
return TINYEXR_ERROR_INVALID_ARGUMENT;
}
EXRVersion exr_version;
EXRImage exr_image;
EXRHeader exr_header;
InitEXRHeader(&exr_header);
int ret = ParseEXRVersionFromMemory(&exr_version, memory, size);
if (ret != TINYEXR_SUCCESS) {
std::stringstream ss;
ss << "Failed to parse EXR version. code(" << ret << ")";
tinyexr::SetErrorMessage(ss.str(), err);
return ret;
}
ret = ParseEXRHeaderFromMemory(&exr_header, &exr_version, memory, size, err);
if (ret != TINYEXR_SUCCESS) {
return ret;
}
// Read HALF channel as FLOAT.
for (int i = 0; i < exr_header.num_channels; i++) {
if (exr_header.pixel_types[i] == TINYEXR_PIXELTYPE_HALF) {
exr_header.requested_pixel_types[i] = TINYEXR_PIXELTYPE_FLOAT;
}
}
InitEXRImage(&exr_image);
ret = LoadEXRImageFromMemory(&exr_image, &exr_header, memory, size, err);
if (ret != TINYEXR_SUCCESS) {
return ret;
}
// RGBA
int idxR = -1;
int idxG = -1;
int idxB = -1;
int idxA = -1;
for (int c = 0; c < exr_header.num_channels; c++) {
if (strcmp(exr_header.channels[c].name, "R") == 0) {
idxR = c;
} else if (strcmp(exr_header.channels[c].name, "G") == 0) {
idxG = c;
} else if (strcmp(exr_header.channels[c].name, "B") == 0) {
idxB = c;
} else if (strcmp(exr_header.channels[c].name, "A") == 0) {
idxA = c;
}
}
// TODO(syoyo): Refactor removing same code as used in LoadEXR().
if (exr_header.num_channels == 1) {
// Grayscale channel only.
(*out_rgba) = reinterpret_cast<float *>(
malloc(4 * sizeof(float) * static_cast<size_t>(exr_image.width) *
static_cast<size_t>(exr_image.height)));
if (exr_header.tiled) {
for (int it = 0; it < exr_image.num_tiles; it++) {
for (int j = 0; j < exr_header.tile_size_y; j++) {
for (int i = 0; i < exr_header.tile_size_x; i++) {
const int ii =
exr_image.tiles[it].offset_x * exr_header.tile_size_x + i;
const int jj =
exr_image.tiles[it].offset_y * exr_header.tile_size_y + j;
const int idx = ii + jj * exr_image.width;
// out of region check.
if (ii >= exr_image.width) {
continue;
}
if (jj >= exr_image.height) {
continue;
}
const int srcIdx = i + j * exr_header.tile_size_x;
unsigned char **src = exr_image.tiles[it].images;
(*out_rgba)[4 * idx + 0] =
reinterpret_cast<float **>(src)[0][srcIdx];
(*out_rgba)[4 * idx + 1] =
reinterpret_cast<float **>(src)[0][srcIdx];
(*out_rgba)[4 * idx + 2] =
reinterpret_cast<float **>(src)[0][srcIdx];
(*out_rgba)[4 * idx + 3] =
reinterpret_cast<float **>(src)[0][srcIdx];
}
}
}
} else {
for (int i = 0; i < exr_image.width * exr_image.height; i++) {
const float val = reinterpret_cast<float **>(exr_image.images)[0][i];
(*out_rgba)[4 * i + 0] = val;
(*out_rgba)[4 * i + 1] = val;
(*out_rgba)[4 * i + 2] = val;
(*out_rgba)[4 * i + 3] = val;
}
}
} else {
// TODO(syoyo): Support non RGBA image.
if (idxR == -1) {
tinyexr::SetErrorMessage("R channel not found", err);
// @todo { free exr_image }
return TINYEXR_ERROR_INVALID_DATA;
}
if (idxG == -1) {
tinyexr::SetErrorMessage("G channel not found", err);
// @todo { free exr_image }
return TINYEXR_ERROR_INVALID_DATA;
}
if (idxB == -1) {
tinyexr::SetErrorMessage("B channel not found", err);
// @todo { free exr_image }
return TINYEXR_ERROR_INVALID_DATA;
}
(*out_rgba) = reinterpret_cast<float *>(
malloc(4 * sizeof(float) * static_cast<size_t>(exr_image.width) *
static_cast<size_t>(exr_image.height)));
if (exr_header.tiled) {
for (int it = 0; it < exr_image.num_tiles; it++) {
for (int j = 0; j < exr_header.tile_size_y; j++)
for (int i = 0; i < exr_header.tile_size_x; i++) {
const int ii =
exr_image.tiles[it].offset_x * exr_header.tile_size_x + i;
const int jj =
exr_image.tiles[it].offset_y * exr_header.tile_size_y + j;
const int idx = ii + jj * exr_image.width;
// out of region check.
if (ii >= exr_image.width) {
continue;
}
if (jj >= exr_image.height) {
continue;
}
const int srcIdx = i + j * exr_header.tile_size_x;
unsigned char **src = exr_image.tiles[it].images;
(*out_rgba)[4 * idx + 0] =
reinterpret_cast<float **>(src)[idxR][srcIdx];
(*out_rgba)[4 * idx + 1] =
reinterpret_cast<float **>(src)[idxG][srcIdx];
(*out_rgba)[4 * idx + 2] =
reinterpret_cast<float **>(src)[idxB][srcIdx];
if (idxA != -1) {
(*out_rgba)[4 * idx + 3] =
reinterpret_cast<float **>(src)[idxA][srcIdx];
} else {
(*out_rgba)[4 * idx + 3] = 1.0;
}
}
}
} else {
for (int i = 0; i < exr_image.width * exr_image.height; i++) {
(*out_rgba)[4 * i + 0] =
reinterpret_cast<float **>(exr_image.images)[idxR][i];
(*out_rgba)[4 * i + 1] =
reinterpret_cast<float **>(exr_image.images)[idxG][i];
(*out_rgba)[4 * i + 2] =
reinterpret_cast<float **>(exr_image.images)[idxB][i];
if (idxA != -1) {
(*out_rgba)[4 * i + 3] =
reinterpret_cast<float **>(exr_image.images)[idxA][i];
} else {
(*out_rgba)[4 * i + 3] = 1.0;
}
}
}
}
(*width) = exr_image.width;
(*height) = exr_image.height;
FreeEXRHeader(&exr_header);
FreeEXRImage(&exr_image);
return TINYEXR_SUCCESS;
}
int LoadEXRImageFromFile(EXRImage *exr_image, const EXRHeader *exr_header,
const char *filename, const char **err) {
if (exr_image == NULL) {
tinyexr::SetErrorMessage("Invalid argument for LoadEXRImageFromFile", err);
return TINYEXR_ERROR_INVALID_ARGUMENT;
}
FILE *fp = NULL;
#ifdef _WIN32
#if defined(_MSC_VER) || defined(__MINGW32__) // MSVC, MinGW gcc or clang
errno_t errcode =
_wfopen_s(&fp, tinyexr::UTF8ToWchar(filename).c_str(), L"rb");
if (errcode != 0) {
tinyexr::SetErrorMessage("Cannot read file " + std::string(filename), err);
// TODO(syoyo): return wfopen_s erro code
return TINYEXR_ERROR_CANT_OPEN_FILE;
}
#else
// Unknown compiler
fp = fopen(filename, "rb");
#endif
#else
fp = fopen(filename, "rb");
#endif
if (!fp) {
tinyexr::SetErrorMessage("Cannot read file " + std::string(filename), err);
return TINYEXR_ERROR_CANT_OPEN_FILE;
}
size_t filesize;
// Compute size
fseek(fp, 0, SEEK_END);
filesize = static_cast<size_t>(ftell(fp));
fseek(fp, 0, SEEK_SET);
if (filesize < 16) {
tinyexr::SetErrorMessage("File size too short " + std::string(filename),
err);
return TINYEXR_ERROR_INVALID_FILE;
}
std::vector<unsigned char> buf(filesize); // @todo { use mmap }
{
size_t ret;
ret = fread(&buf[0], 1, filesize, fp);
assert(ret == filesize);
fclose(fp);
(void)ret;
}
return LoadEXRImageFromMemory(exr_image, exr_header, &buf.at(0), filesize,
err);
}
int LoadEXRImageFromMemory(EXRImage *exr_image, const EXRHeader *exr_header,
const unsigned char *memory, const size_t size,
const char **err) {
if (exr_image == NULL || memory == NULL ||
(size < tinyexr::kEXRVersionSize)) {
tinyexr::SetErrorMessage("Invalid argument for LoadEXRImageFromMemory",
err);
return TINYEXR_ERROR_INVALID_ARGUMENT;
}
if (exr_header->header_len == 0) {
tinyexr::SetErrorMessage("EXRHeader variable is not initialized.", err);
return TINYEXR_ERROR_INVALID_ARGUMENT;
}
const unsigned char *head = memory;
const unsigned char *marker = reinterpret_cast<const unsigned char *>(
memory + exr_header->header_len +
8); // +8 for magic number + version header.
return tinyexr::DecodeEXRImage(exr_image, exr_header, head, marker, size,
err);
}
namespace tinyexr
{
// out_data must be allocated initially with the block-header size
// of the current image(-part) type
static bool EncodePixelData(/* out */ std::vector<unsigned char>& out_data,
const unsigned char* const* images,
int compression_type,
int /*line_order*/,
int width, // for tiled : tile.width
int /*height*/, // for tiled : header.tile_size_y
int x_stride, // for tiled : header.tile_size_x
int line_no, // for tiled : 0
int num_lines, // for tiled : tile.height
size_t pixel_data_size,
const std::vector<ChannelInfo>& channels,
const std::vector<size_t>& channel_offset_list,
const void* compression_param = 0) // zfp compression param
{
size_t buf_size = static_cast<size_t>(width) *
static_cast<size_t>(num_lines) *
static_cast<size_t>(pixel_data_size);
//int last2bit = (buf_size & 3);
// buf_size must be multiple of four
//if(last2bit) buf_size += 4 - last2bit;
std::vector<unsigned char> buf(buf_size);
size_t start_y = static_cast<size_t>(line_no);
for (size_t c = 0; c < channels.size(); c++) {
if (channels[c].pixel_type == TINYEXR_PIXELTYPE_HALF) {
if (channels[c].requested_pixel_type == TINYEXR_PIXELTYPE_FLOAT) {
for (int y = 0; y < num_lines; y++) {
// Assume increasing Y
float *line_ptr = reinterpret_cast<float *>(&buf.at(
static_cast<size_t>(pixel_data_size * y * width) +
channel_offset_list[c] *
static_cast<size_t>(width)));
for (int x = 0; x < width; x++) {
tinyexr::FP16 h16;
h16.u = reinterpret_cast<const unsigned short * const *>(
images)[c][(y + start_y) * x_stride + x];
tinyexr::FP32 f32 = half_to_float(h16);
tinyexr::swap4(&f32.f);
// line_ptr[x] = f32.f;
tinyexr::cpy4(line_ptr + x, &(f32.f));
}
}
} else if (channels[c].requested_pixel_type == TINYEXR_PIXELTYPE_HALF) {
for (int y = 0; y < num_lines; y++) {
// Assume increasing Y
unsigned short *line_ptr = reinterpret_cast<unsigned short *>(
&buf.at(static_cast<size_t>(pixel_data_size * y *
width) +
channel_offset_list[c] *
static_cast<size_t>(width)));
for (int x = 0; x < width; x++) {
unsigned short val = reinterpret_cast<const unsigned short * const *>(
images)[c][(y + start_y) * x_stride + x];
tinyexr::swap2(&val);
// line_ptr[x] = val;
tinyexr::cpy2(line_ptr + x, &val);
}
}
} else {
assert(0);
}
} else if (channels[c].pixel_type == TINYEXR_PIXELTYPE_FLOAT) {
if (channels[c].requested_pixel_type == TINYEXR_PIXELTYPE_HALF) {
for (int y = 0; y < num_lines; y++) {
// Assume increasing Y
unsigned short *line_ptr = reinterpret_cast<unsigned short *>(
&buf.at(static_cast<size_t>(pixel_data_size * y *
width) +
channel_offset_list[c] *
static_cast<size_t>(width)));
for (int x = 0; x < width; x++) {
tinyexr::FP32 f32;
f32.f = reinterpret_cast<const float * const *>(
images)[c][(y + start_y) * x_stride + x];
tinyexr::FP16 h16;
h16 = float_to_half_full(f32);
tinyexr::swap2(reinterpret_cast<unsigned short *>(&h16.u));
// line_ptr[x] = h16.u;
tinyexr::cpy2(line_ptr + x, &(h16.u));
}
}
} else if (channels[c].requested_pixel_type == TINYEXR_PIXELTYPE_FLOAT) {
for (int y = 0; y < num_lines; y++) {
// Assume increasing Y
float *line_ptr = reinterpret_cast<float *>(&buf.at(
static_cast<size_t>(pixel_data_size * y * width) +
channel_offset_list[c] *
static_cast<size_t>(width)));
for (int x = 0; x < width; x++) {
float val = reinterpret_cast<const float * const *>(
images)[c][(y + start_y) * x_stride + x];
tinyexr::swap4(&val);
// line_ptr[x] = val;
tinyexr::cpy4(line_ptr + x, &val);
}
}
} else {
assert(0);
}
} else if (channels[c].pixel_type == TINYEXR_PIXELTYPE_UINT) {
for (int y = 0; y < num_lines; y++) {
// Assume increasing Y
unsigned int *line_ptr = reinterpret_cast<unsigned int *>(&buf.at(
static_cast<size_t>(pixel_data_size * y * width) +
channel_offset_list[c] * static_cast<size_t>(width)));
for (int x = 0; x < width; x++) {
unsigned int val = reinterpret_cast<const unsigned int * const *>(
images)[c][(y + start_y) * x_stride + x];
tinyexr::swap4(&val);
// line_ptr[x] = val;
tinyexr::cpy4(line_ptr + x, &val);
}
}
}
}
if (compression_type == TINYEXR_COMPRESSIONTYPE_NONE) {
// 4 byte: scan line
// 4 byte: data size
// ~ : pixel data(uncompressed)
out_data.insert(out_data.end(), buf.begin(), buf.end());
} else if ((compression_type == TINYEXR_COMPRESSIONTYPE_ZIPS) ||
(compression_type == TINYEXR_COMPRESSIONTYPE_ZIP)) {
#if TINYEXR_USE_MINIZ
std::vector<unsigned char> block(tinyexr::miniz::mz_compressBound(
static_cast<unsigned long>(buf.size())));
#else
std::vector<unsigned char> block(
compressBound(static_cast<uLong>(buf.size())));
#endif
tinyexr::tinyexr_uint64 outSize = block.size();
tinyexr::CompressZip(&block.at(0), outSize,
reinterpret_cast<const unsigned char *>(&buf.at(0)),
static_cast<unsigned long>(buf.size()));
// 4 byte: scan line
// 4 byte: data size
// ~ : pixel data(compressed)
unsigned int data_len = static_cast<unsigned int>(outSize); // truncate
out_data.insert(out_data.end(), block.begin(), block.begin() + data_len);
} else if (compression_type == TINYEXR_COMPRESSIONTYPE_RLE) {
// (buf.size() * 3) / 2 would be enough.
std::vector<unsigned char> block((buf.size() * 3) / 2);
tinyexr::tinyexr_uint64 outSize = block.size();
tinyexr::CompressRle(&block.at(0), outSize,
reinterpret_cast<const unsigned char *>(&buf.at(0)),
static_cast<unsigned long>(buf.size()));
// 4 byte: scan line
// 4 byte: data size
// ~ : pixel data(compressed)
unsigned int data_len = static_cast<unsigned int>(outSize); // truncate
out_data.insert(out_data.end(), block.begin(), block.begin() + data_len);
} else if (compression_type == TINYEXR_COMPRESSIONTYPE_PIZ) {
#if TINYEXR_USE_PIZ
unsigned int bufLen =
8192 + static_cast<unsigned int>(
2 * static_cast<unsigned int>(
buf.size())); // @fixme { compute good bound. }
std::vector<unsigned char> block(bufLen);
unsigned int outSize = static_cast<unsigned int>(block.size());
CompressPiz(&block.at(0), &outSize,
reinterpret_cast<const unsigned char *>(&buf.at(0)),
buf.size(), channels, width, num_lines);
// 4 byte: scan line
// 4 byte: data size
// ~ : pixel data(compressed)
unsigned int data_len = outSize;
out_data.insert(out_data.end(), block.begin(), block.begin() + data_len);
#else
assert(0);
#endif
} else if (compression_type == TINYEXR_COMPRESSIONTYPE_ZFP) {
#if TINYEXR_USE_ZFP
const ZFPCompressionParam* zfp_compression_param = reinterpret_cast<const ZFPCompressionParam*>(compression_param);
std::vector<unsigned char> block;
unsigned int outSize;
tinyexr::CompressZfp(
&block, &outSize, reinterpret_cast<const float *>(&buf.at(0)),
width, num_lines, static_cast<int>(channels.size()), *zfp_compression_param);
// 4 byte: scan line
// 4 byte: data size
// ~ : pixel data(compressed)
unsigned int data_len = outSize;
out_data.insert(out_data.end(), block.begin(), block.begin() + data_len);
#else
(void)compression_param;
assert(0);
#endif
} else {
assert(0);
return false;
}
return true;
}
static int EncodeTiledLevel(const EXRImage* level_image, const EXRHeader* exr_header,
const std::vector<tinyexr::ChannelInfo>& channels,
std::vector<std::vector<unsigned char> >& data_list,
size_t start_index, // for data_list
int num_x_tiles, int num_y_tiles,
const std::vector<size_t>& channel_offset_list,
int pixel_data_size,
const void* compression_param, // must be set if zfp compression is enabled
std::string* err) {
int num_tiles = num_x_tiles * num_y_tiles;
assert(num_tiles == level_image->num_tiles);
if ((exr_header->tile_size_x > level_image->width || exr_header->tile_size_y > level_image->height) &&
level_image->level_x == 0 && level_image->level_y == 0) {
if (err) {
(*err) += "Failed to encode tile data.\n";
}
return TINYEXR_ERROR_INVALID_DATA;
}
#if TINYEXR_HAS_CXX11 && (TINYEXR_USE_THREAD > 0)
std::atomic<bool> invalid_data(false);
#else
bool invalid_data(false);
#endif
#if TINYEXR_HAS_CXX11 && (TINYEXR_USE_THREAD > 0)
std::vector<std::thread> workers;
std::atomic<int> tile_count(0);
int num_threads = std::max(1, int(std::thread::hardware_concurrency()));
if (num_threads > int(num_tiles)) {
num_threads = int(num_tiles);
}
for (int t = 0; t < num_threads; t++) {
workers.emplace_back(std::thread([&]() {
int i = 0;
while ((i = tile_count++) < num_tiles) {
#else
// Use signed int since some OpenMP compiler doesn't allow unsigned type for
// `parallel for`
#if TINYEXR_USE_OPENMP
#pragma omp parallel for
#endif
for (int i = 0; i < num_tiles; i++) {
#endif
size_t tile_idx = static_cast<size_t>(i);
size_t data_idx = tile_idx + start_index;
int x_tile = i % num_x_tiles;
int y_tile = i / num_x_tiles;
EXRTile& tile = level_image->tiles[tile_idx];
const unsigned char* const* images =
static_cast<const unsigned char* const*>(tile.images);
data_list[data_idx].resize(5*sizeof(int));
size_t data_header_size = data_list[data_idx].size();
bool ret = EncodePixelData(data_list[data_idx],
images,
exr_header->compression_type,
0, // increasing y
tile.width,
exr_header->tile_size_y,
exr_header->tile_size_x,
0,
tile.height,
pixel_data_size,
channels,
channel_offset_list,
compression_param);
if (!ret) {
invalid_data = true;
continue;
}
assert(data_list[data_idx].size() > data_header_size);
int data_len = static_cast<int>(data_list[data_idx].size() - data_header_size);
//tileX, tileY, levelX, levelY // pixel_data_size(int)
memcpy(&data_list[data_idx][0], &x_tile, sizeof(int));
memcpy(&data_list[data_idx][4], &y_tile, sizeof(int));
memcpy(&data_list[data_idx][8], &level_image->level_x, sizeof(int));
memcpy(&data_list[data_idx][12], &level_image->level_y, sizeof(int));
memcpy(&data_list[data_idx][16], &data_len, sizeof(int));
swap4(reinterpret_cast<int*>(&data_list[data_idx][0]));
swap4(reinterpret_cast<int*>(&data_list[data_idx][4]));
swap4(reinterpret_cast<int*>(&data_list[data_idx][8]));
swap4(reinterpret_cast<int*>(&data_list[data_idx][12]));
swap4(reinterpret_cast<int*>(&data_list[data_idx][16]));
#if TINYEXR_HAS_CXX11 && (TINYEXR_USE_THREAD > 0)
}
}));
}
for (auto &t : workers) {
t.join();
}
#else
} // omp parallel
#endif
if (invalid_data) {
if (err) {
(*err) += "Failed to encode tile data.\n";
}
return TINYEXR_ERROR_INVALID_DATA;
}
return TINYEXR_SUCCESS;
}
static int NumScanlines(int compression_type) {
int num_scanlines = 1;
if (compression_type == TINYEXR_COMPRESSIONTYPE_ZIP) {
num_scanlines = 16;
} else if (compression_type == TINYEXR_COMPRESSIONTYPE_PIZ) {
num_scanlines = 32;
} else if (compression_type == TINYEXR_COMPRESSIONTYPE_ZFP) {
num_scanlines = 16;
}
return num_scanlines;
}
static int EncodeChunk(const EXRImage* exr_image, const EXRHeader* exr_header,
const std::vector<ChannelInfo>& channels,
int num_blocks,
tinyexr_uint64 chunk_offset, // starting offset of current chunk
bool is_multipart,
OffsetData& offset_data, // output block offsets, must be initialized
std::vector<std::vector<unsigned char> >& data_list, // output
tinyexr_uint64& total_size, // output: ending offset of current chunk
std::string* err) {
int num_scanlines = NumScanlines(exr_header->compression_type);
data_list.resize(num_blocks);
std::vector<size_t> channel_offset_list(
static_cast<size_t>(exr_header->num_channels));
int pixel_data_size = 0;
{
size_t channel_offset = 0;
for (size_t c = 0; c < static_cast<size_t>(exr_header->num_channels); c++) {
channel_offset_list[c] = channel_offset;
if (channels[c].requested_pixel_type == TINYEXR_PIXELTYPE_HALF) {
pixel_data_size += sizeof(unsigned short);
channel_offset += sizeof(unsigned short);
} else if (channels[c].requested_pixel_type ==
TINYEXR_PIXELTYPE_FLOAT) {
pixel_data_size += sizeof(float);
channel_offset += sizeof(float);
} else if (channels[c].requested_pixel_type == TINYEXR_PIXELTYPE_UINT) {
pixel_data_size += sizeof(unsigned int);
channel_offset += sizeof(unsigned int);
} else {
assert(0);
}
}
}
const void* compression_param = 0;
#if TINYEXR_USE_ZFP
tinyexr::ZFPCompressionParam zfp_compression_param;
// Use ZFP compression parameter from custom attributes(if such a parameter
// exists)
{
std::string e;
bool ret = tinyexr::FindZFPCompressionParam(
&zfp_compression_param, exr_header->custom_attributes,
exr_header->num_custom_attributes, &e);
if (!ret) {
// Use predefined compression parameter.
zfp_compression_param.type = 0;
zfp_compression_param.rate = 2;
}
compression_param = &zfp_compression_param;
}
#endif
tinyexr_uint64 offset = chunk_offset;
tinyexr_uint64 doffset = is_multipart ? 4u : 0u;
if (exr_image->tiles) {
const EXRImage* level_image = exr_image;
size_t block_idx = 0;
tinyexr::tinyexr_uint64 block_data_size = 0;
int num_levels = (exr_header->tile_level_mode != TINYEXR_TILE_RIPMAP_LEVELS) ?
offset_data.num_x_levels : (offset_data.num_x_levels * offset_data.num_y_levels);
for (int level_index = 0; level_index < num_levels; ++level_index) {
if (!level_image) {
if (err) {
(*err) += "Invalid number of tiled levels for EncodeChunk\n";
}
return TINYEXR_ERROR_INVALID_DATA;
}
int level_index_from_image = LevelIndex(level_image->level_x, level_image->level_y,
exr_header->tile_level_mode, offset_data.num_x_levels);
if (level_index_from_image != level_index) {
if (err) {
(*err) += "Incorrect level ordering in tiled image\n";
}
return TINYEXR_ERROR_INVALID_DATA;
}
int num_y_tiles = (int)offset_data.offsets[level_index].size();
assert(num_y_tiles);
int num_x_tiles = (int)offset_data.offsets[level_index][0].size();
assert(num_x_tiles);
std::string e;
int ret = EncodeTiledLevel(level_image,
exr_header,
channels,
data_list,
block_idx,
num_x_tiles,
num_y_tiles,
channel_offset_list,
pixel_data_size,
compression_param,
&e);
if (ret != TINYEXR_SUCCESS) {
if (!e.empty() && err) {
(*err) += e;
}
return ret;
}
for (size_t j = 0; j < static_cast<size_t>(num_y_tiles); ++j)
for (size_t i = 0; i < static_cast<size_t>(num_x_tiles); ++i) {
offset_data.offsets[level_index][j][i] = offset;
swap8(reinterpret_cast<tinyexr_uint64*>(&offset_data.offsets[level_index][j][i]));
offset += data_list[block_idx].size() + doffset;
block_data_size += data_list[block_idx].size();
++block_idx;
}
level_image = level_image->next_level;
}
assert(static_cast<int>(block_idx) == num_blocks);
total_size = offset;
} else { // scanlines
std::vector<tinyexr::tinyexr_uint64>& offsets = offset_data.offsets[0][0];
#if TINYEXR_HAS_CXX11 && (TINYEXR_USE_THREAD > 0)
std::atomic<bool> invalid_data(false);
std::vector<std::thread> workers;
std::atomic<int> block_count(0);
int num_threads = std::min(std::max(1, int(std::thread::hardware_concurrency())), num_blocks);
for (int t = 0; t < num_threads; t++) {
workers.emplace_back(std::thread([&]() {
int i = 0;
while ((i = block_count++) < num_blocks) {
#else
bool invalid_data(false);
#if TINYEXR_USE_OPENMP
#pragma omp parallel for
#endif
for (int i = 0; i < num_blocks; i++) {
#endif
int start_y = num_scanlines * i;
int end_Y = (std::min)(num_scanlines * (i + 1), exr_image->height);
int num_lines = end_Y - start_y;
const unsigned char* const* images =
static_cast<const unsigned char* const*>(exr_image->images);
data_list[i].resize(2*sizeof(int));
size_t data_header_size = data_list[i].size();
bool ret = EncodePixelData(data_list[i],
images,
exr_header->compression_type,
0, // increasing y
exr_image->width,
exr_image->height,
exr_image->width,
start_y,
num_lines,
pixel_data_size,
channels,
channel_offset_list,
compression_param);
if (!ret) {
invalid_data = true;
continue; // "break" cannot be used with OpenMP
}
assert(data_list[i].size() > data_header_size);
int data_len = static_cast<int>(data_list[i].size() - data_header_size);
memcpy(&data_list[i][0], &start_y, sizeof(int));
memcpy(&data_list[i][4], &data_len, sizeof(int));
swap4(reinterpret_cast<int*>(&data_list[i][0]));
swap4(reinterpret_cast<int*>(&data_list[i][4]));
#if TINYEXR_HAS_CXX11 && (TINYEXR_USE_THREAD > 0)
}
}));
}
for (auto &t : workers) {
t.join();
}
#else
} // omp parallel
#endif
if (invalid_data) {
if (err) {
(*err) += "Failed to encode scanline data.\n";
}
return TINYEXR_ERROR_INVALID_DATA;
}
for (size_t i = 0; i < static_cast<size_t>(num_blocks); i++) {
offsets[i] = offset;
tinyexr::swap8(reinterpret_cast<tinyexr::tinyexr_uint64 *>(&offsets[i]));
offset += data_list[i].size() + doffset;
}
total_size = static_cast<size_t>(offset);
}
return TINYEXR_SUCCESS;
}
// can save a single or multi-part image (no deep* formats)
static size_t SaveEXRNPartImageToMemory(const EXRImage* exr_images,
const EXRHeader** exr_headers,
unsigned int num_parts,
unsigned char** memory_out, const char** err) {
if (exr_images == NULL || exr_headers == NULL || num_parts == 0 ||
memory_out == NULL) {
SetErrorMessage("Invalid argument for SaveEXRNPartImageToMemory",
err);
return 0;
}
{
for (unsigned int i = 0; i < num_parts; ++i) {
if (exr_headers[i]->compression_type < 0) {
SetErrorMessage("Invalid argument for SaveEXRNPartImageToMemory",
err);
return 0;
}
#if !TINYEXR_USE_PIZ
if (exr_headers[i]->compression_type == TINYEXR_COMPRESSIONTYPE_PIZ) {
SetErrorMessage("PIZ compression is not supported in this build",
err);
return 0;
}
#endif
#if !TINYEXR_USE_ZFP
if (exr_headers[i]->compression_type == TINYEXR_COMPRESSIONTYPE_ZFP) {
SetErrorMessage("ZFP compression is not supported in this build",
err);
return 0;
}
#else
for (int c = 0; c < exr_header->num_channels; ++c) {
if (exr_headers[i]->requested_pixel_types[c] != TINYEXR_PIXELTYPE_FLOAT) {
SetErrorMessage("Pixel type must be FLOAT for ZFP compression",
err);
return 0;
}
}
#endif
}
}
std::vector<unsigned char> memory;
// Header
{
const char header[] = { 0x76, 0x2f, 0x31, 0x01 };
memory.insert(memory.end(), header, header + 4);
}
// Version
// using value from the first header
int long_name = exr_headers[0]->long_name;
{
char marker[] = { 2, 0, 0, 0 };
/* @todo
if (exr_header->non_image) {
marker[1] |= 0x8;
}
*/
// tiled
if (num_parts == 1 && exr_images[0].tiles) {
marker[1] |= 0x2;
}
// long_name
if (long_name) {
marker[1] |= 0x4;
}
// multipart
if (num_parts > 1) {
marker[1] |= 0x10;
}
memory.insert(memory.end(), marker, marker + 4);
}
int total_chunk_count = 0;
std::vector<int> chunk_count(num_parts);
std::vector<OffsetData> offset_data(num_parts);
for (unsigned int i = 0; i < num_parts; ++i) {
if (!exr_images[i].tiles) {
int num_scanlines = NumScanlines(exr_headers[i]->compression_type);
chunk_count[i] =
(exr_images[i].height + num_scanlines - 1) / num_scanlines;
InitSingleResolutionOffsets(offset_data[i], chunk_count[i]);
total_chunk_count += chunk_count[i];
} else {
{
std::vector<int> num_x_tiles, num_y_tiles;
PrecalculateTileInfo(num_x_tiles, num_y_tiles, exr_headers[i]);
chunk_count[i] =
InitTileOffsets(offset_data[i], exr_headers[i], num_x_tiles, num_y_tiles);
total_chunk_count += chunk_count[i];
}
}
}
// Write attributes to memory buffer.
std::vector< std::vector<tinyexr::ChannelInfo> > channels(num_parts);
{
std::set<std::string> partnames;
for (unsigned int i = 0; i < num_parts; ++i) {
//channels
{
std::vector<unsigned char> data;
for (int c = 0; c < exr_headers[i]->num_channels; c++) {
tinyexr::ChannelInfo info;
info.p_linear = 0;
info.pixel_type = exr_headers[i]->pixel_types[c];
info.requested_pixel_type = exr_headers[i]->requested_pixel_types[c];
info.x_sampling = 1;
info.y_sampling = 1;
info.name = std::string(exr_headers[i]->channels[c].name);
channels[i].push_back(info);
}
tinyexr::WriteChannelInfo(data, channels[i]);
tinyexr::WriteAttributeToMemory(&memory, "channels", "chlist", &data.at(0),
static_cast<int>(data.size()));
}
{
int comp = exr_headers[i]->compression_type;
swap4(&comp);
WriteAttributeToMemory(
&memory, "compression", "compression",
reinterpret_cast<const unsigned char*>(&comp), 1);
}
{
int data[4] = { 0, 0, exr_images[i].width - 1, exr_images[i].height - 1 };
swap4(&data[0]);
swap4(&data[1]);
swap4(&data[2]);
swap4(&data[3]);
WriteAttributeToMemory(
&memory, "dataWindow", "box2i",
reinterpret_cast<const unsigned char*>(data), sizeof(int) * 4);
int data0[4] = { 0, 0, exr_images[0].width - 1, exr_images[0].height - 1 };
swap4(&data0[0]);
swap4(&data0[1]);
swap4(&data0[2]);
swap4(&data0[3]);
// Note: must be the same across parts (currently, using value from the first header)
WriteAttributeToMemory(
&memory, "displayWindow", "box2i",
reinterpret_cast<const unsigned char*>(data0), sizeof(int) * 4);
}
{
unsigned char line_order = 0; // @fixme { read line_order from EXRHeader }
WriteAttributeToMemory(&memory, "lineOrder", "lineOrder",
&line_order, 1);
}
{
// Note: must be the same across parts
float aspectRatio = 1.0f;
swap4(&aspectRatio);
WriteAttributeToMemory(
&memory, "pixelAspectRatio", "float",
reinterpret_cast<const unsigned char*>(&aspectRatio), sizeof(float));
}
{
float center[2] = { 0.0f, 0.0f };
swap4(¢er[0]);
swap4(¢er[1]);
WriteAttributeToMemory(
&memory, "screenWindowCenter", "v2f",
reinterpret_cast<const unsigned char*>(center), 2 * sizeof(float));
}
{
float w = 1.0f;
swap4(&w);
WriteAttributeToMemory(&memory, "screenWindowWidth", "float",
reinterpret_cast<const unsigned char*>(&w),
sizeof(float));
}
if (exr_images[i].tiles) {
unsigned char tile_mode = static_cast<unsigned char>(exr_headers[i]->tile_level_mode & 0x3);
if (exr_headers[i]->tile_rounding_mode) tile_mode |= (1u << 4u);
//unsigned char data[9] = { 0, 0, 0, 0, 0, 0, 0, 0, 0 };
unsigned int datai[3] = { 0, 0, 0 };
unsigned char* data = reinterpret_cast<unsigned char*>(&datai[0]);
datai[0] = static_cast<unsigned int>(exr_headers[i]->tile_size_x);
datai[1] = static_cast<unsigned int>(exr_headers[i]->tile_size_y);
data[8] = tile_mode;
swap4(reinterpret_cast<unsigned int*>(&data[0]));
swap4(reinterpret_cast<unsigned int*>(&data[4]));
WriteAttributeToMemory(
&memory, "tiles", "tiledesc",
reinterpret_cast<const unsigned char*>(data), 9);
}
// must be present for multi-part files - according to spec.
if (num_parts > 1) {
// name
{
size_t len = 0;
if ((len = strlen(exr_headers[i]->name)) > 0) {
partnames.insert(std::string(exr_headers[i]->name));
if (partnames.size() != i + 1) {
SetErrorMessage("'name' attributes must be unique for a multi-part file", err);
return 0;
}
WriteAttributeToMemory(
&memory, "name", "string",
reinterpret_cast<const unsigned char*>(exr_headers[i]->name),
static_cast<int>(len));
} else {
SetErrorMessage("Invalid 'name' attribute for a multi-part file", err);
return 0;
}
}
// type
{
const char* type = "scanlineimage";
if (exr_images[i].tiles) type = "tiledimage";
WriteAttributeToMemory(
&memory, "type", "string",
reinterpret_cast<const unsigned char*>(type),
static_cast<int>(strlen(type)));
}
// chunkCount
{
WriteAttributeToMemory(
&memory, "chunkCount", "int",
reinterpret_cast<const unsigned char*>(&chunk_count[i]),
4);
}
}
// Custom attributes
if (exr_headers[i]->num_custom_attributes > 0) {
for (int j = 0; j < exr_headers[i]->num_custom_attributes; j++) {
tinyexr::WriteAttributeToMemory(
&memory, exr_headers[i]->custom_attributes[j].name,
exr_headers[i]->custom_attributes[j].type,
reinterpret_cast<const unsigned char*>(
exr_headers[i]->custom_attributes[j].value),
exr_headers[i]->custom_attributes[j].size);
}
}
{ // end of header
memory.push_back(0);
}
}
}
if (num_parts > 1) {
// end of header list
memory.push_back(0);
}
tinyexr_uint64 chunk_offset = memory.size() + size_t(total_chunk_count) * sizeof(tinyexr_uint64);
tinyexr_uint64 total_size = 0;
std::vector< std::vector< std::vector<unsigned char> > > data_lists(num_parts);
for (unsigned int i = 0; i < num_parts; ++i) {
std::string e;
int ret = EncodeChunk(&exr_images[i], exr_headers[i],
channels[i],
chunk_count[i],
// starting offset of current chunk after part-number
chunk_offset,
num_parts > 1,
offset_data[i], // output: block offsets, must be initialized
data_lists[i], // output
total_size, // output
&e);
if (ret != TINYEXR_SUCCESS) {
if (!e.empty()) {
tinyexr::SetErrorMessage(e, err);
}
return 0;
}
chunk_offset = total_size;
}
// Allocating required memory
if (total_size == 0) { // something went wrong
tinyexr::SetErrorMessage("Output memory size is zero", err);
return 0;
}
(*memory_out) = static_cast<unsigned char*>(malloc(total_size));
// Writing header
memcpy((*memory_out), &memory[0], memory.size());
unsigned char* memory_ptr = *memory_out + memory.size();
size_t sum = memory.size();
// Writing offset data for chunks
for (unsigned int i = 0; i < num_parts; ++i) {
if (exr_images[i].tiles) {
const EXRImage* level_image = &exr_images[i];
int num_levels = (exr_headers[i]->tile_level_mode != TINYEXR_TILE_RIPMAP_LEVELS) ?
offset_data[i].num_x_levels : (offset_data[i].num_x_levels * offset_data[i].num_y_levels);
for (int level_index = 0; level_index < num_levels; ++level_index) {
for (size_t j = 0; j < offset_data[i].offsets[level_index].size(); ++j) {
size_t num_bytes = sizeof(tinyexr_uint64) * offset_data[i].offsets[level_index][j].size();
sum += num_bytes;
assert(sum <= total_size);
memcpy(memory_ptr,
reinterpret_cast<unsigned char*>(&offset_data[i].offsets[level_index][j][0]),
num_bytes);
memory_ptr += num_bytes;
}
level_image = level_image->next_level;
}
} else {
size_t num_bytes = sizeof(tinyexr::tinyexr_uint64) * static_cast<size_t>(chunk_count[i]);
sum += num_bytes;
assert(sum <= total_size);
std::vector<tinyexr::tinyexr_uint64>& offsets = offset_data[i].offsets[0][0];
memcpy(memory_ptr, reinterpret_cast<unsigned char*>(&offsets[0]), num_bytes);
memory_ptr += num_bytes;
}
}
// Writing chunk data
for (unsigned int i = 0; i < num_parts; ++i) {
for (size_t j = 0; j < static_cast<size_t>(chunk_count[i]); ++j) {
if (num_parts > 1) {
sum += 4;
assert(sum <= total_size);
unsigned int part_number = i;
swap4(&part_number);
memcpy(memory_ptr, &part_number, 4);
memory_ptr += 4;
}
sum += data_lists[i][j].size();
assert(sum <= total_size);
memcpy(memory_ptr, &data_lists[i][j][0], data_lists[i][j].size());
memory_ptr += data_lists[i][j].size();
}
}
assert(sum == total_size);
return total_size; // OK
}
} // tinyexr
size_t SaveEXRImageToMemory(const EXRImage* exr_image,
const EXRHeader* exr_header,
unsigned char** memory_out, const char** err) {
return tinyexr::SaveEXRNPartImageToMemory(exr_image, &exr_header, 1, memory_out, err);
}
int SaveEXRImageToFile(const EXRImage *exr_image, const EXRHeader *exr_header,
const char *filename, const char **err) {
if (exr_image == NULL || filename == NULL ||
exr_header->compression_type < 0) {
tinyexr::SetErrorMessage("Invalid argument for SaveEXRImageToFile", err);
return TINYEXR_ERROR_INVALID_ARGUMENT;
}
#if !TINYEXR_USE_PIZ
if (exr_header->compression_type == TINYEXR_COMPRESSIONTYPE_PIZ) {
tinyexr::SetErrorMessage("PIZ compression is not supported in this build",
err);
return TINYEXR_ERROR_UNSUPPORTED_FEATURE;
}
#endif
#if !TINYEXR_USE_ZFP
if (exr_header->compression_type == TINYEXR_COMPRESSIONTYPE_ZFP) {
tinyexr::SetErrorMessage("ZFP compression is not supported in this build",
err);
return TINYEXR_ERROR_UNSUPPORTED_FEATURE;
}
#endif
FILE *fp = NULL;
#ifdef _WIN32
#if defined(_MSC_VER) || defined(__MINGW32__) // MSVC, MinGW gcc or clang
errno_t errcode =
_wfopen_s(&fp, tinyexr::UTF8ToWchar(filename).c_str(), L"wb");
if (errcode != 0) {
tinyexr::SetErrorMessage("Cannot write a file: " + std::string(filename),
err);
return TINYEXR_ERROR_CANT_WRITE_FILE;
}
#else
// Unknown compiler
fp = fopen(filename, "wb");
#endif
#else
fp = fopen(filename, "wb");
#endif
if (!fp) {
tinyexr::SetErrorMessage("Cannot write a file: " + std::string(filename),
err);
return TINYEXR_ERROR_CANT_WRITE_FILE;
}
unsigned char *mem = NULL;
size_t mem_size = SaveEXRImageToMemory(exr_image, exr_header, &mem, err);
if (mem_size == 0) {
return TINYEXR_ERROR_SERIALZATION_FAILED;
}
size_t written_size = 0;
if ((mem_size > 0) && mem) {
written_size = fwrite(mem, 1, mem_size, fp);
}
free(mem);
fclose(fp);
if (written_size != mem_size) {
tinyexr::SetErrorMessage("Cannot write a file", err);
return TINYEXR_ERROR_CANT_WRITE_FILE;
}
return TINYEXR_SUCCESS;
}
size_t SaveEXRMultipartImageToMemory(const EXRImage* exr_images,
const EXRHeader** exr_headers,
unsigned int num_parts,
unsigned char** memory_out, const char** err) {
if (exr_images == NULL || exr_headers == NULL || num_parts < 2 ||
memory_out == NULL) {
tinyexr::SetErrorMessage("Invalid argument for SaveEXRNPartImageToMemory",
err);
return 0;
}
return tinyexr::SaveEXRNPartImageToMemory(exr_images, exr_headers, num_parts, memory_out, err);
}
int SaveEXRMultipartImageToFile(const EXRImage* exr_images,
const EXRHeader** exr_headers,
unsigned int num_parts,
const char* filename,
const char** err) {
if (exr_images == NULL || exr_headers == NULL || num_parts < 2) {
tinyexr::SetErrorMessage("Invalid argument for SaveEXRMultipartImageToFile",
err);
return TINYEXR_ERROR_INVALID_ARGUMENT;
}
FILE *fp = NULL;
#ifdef _WIN32
#if defined(_MSC_VER) || defined(__MINGW32__) // MSVC, MinGW gcc or clang
errno_t errcode =
_wfopen_s(&fp, tinyexr::UTF8ToWchar(filename).c_str(), L"wb");
if (errcode != 0) {
tinyexr::SetErrorMessage("Cannot write a file: " + std::string(filename),
err);
return TINYEXR_ERROR_CANT_WRITE_FILE;
}
#else
// Unknown compiler
fp = fopen(filename, "wb");
#endif
#else
fp = fopen(filename, "wb");
#endif
if (!fp) {
tinyexr::SetErrorMessage("Cannot write a file: " + std::string(filename),
err);
return TINYEXR_ERROR_CANT_WRITE_FILE;
}
unsigned char *mem = NULL;
size_t mem_size = SaveEXRMultipartImageToMemory(exr_images, exr_headers, num_parts, &mem, err);
if (mem_size == 0) {
return TINYEXR_ERROR_SERIALZATION_FAILED;
}
size_t written_size = 0;
if ((mem_size > 0) && mem) {
written_size = fwrite(mem, 1, mem_size, fp);
}
free(mem);
fclose(fp);
if (written_size != mem_size) {
tinyexr::SetErrorMessage("Cannot write a file", err);
return TINYEXR_ERROR_CANT_WRITE_FILE;
}
return TINYEXR_SUCCESS;
}
int LoadDeepEXR(DeepImage *deep_image, const char *filename, const char **err) {
if (deep_image == NULL) {
tinyexr::SetErrorMessage("Invalid argument for LoadDeepEXR", err);
return TINYEXR_ERROR_INVALID_ARGUMENT;
}
#ifdef _WIN32
FILE *fp = NULL;
#if defined(_MSC_VER) || defined(__MINGW32__) // MSVC, MinGW gcc or clang
errno_t errcode =
_wfopen_s(&fp, tinyexr::UTF8ToWchar(filename).c_str(), L"rb");
if (errcode != 0) {
tinyexr::SetErrorMessage("Cannot read a file " + std::string(filename),
err);
return TINYEXR_ERROR_CANT_OPEN_FILE;
}
#else
// Unknown compiler
fp = fopen(filename, "rb");
#endif
if (!fp) {
tinyexr::SetErrorMessage("Cannot read a file " + std::string(filename),
err);
return TINYEXR_ERROR_CANT_OPEN_FILE;
}
#else
FILE *fp = fopen(filename, "rb");
if (!fp) {
tinyexr::SetErrorMessage("Cannot read a file " + std::string(filename),
err);
return TINYEXR_ERROR_CANT_OPEN_FILE;
}
#endif
size_t filesize;
// Compute size
fseek(fp, 0, SEEK_END);
filesize = static_cast<size_t>(ftell(fp));
fseek(fp, 0, SEEK_SET);
if (filesize == 0) {
fclose(fp);
tinyexr::SetErrorMessage("File size is zero : " + std::string(filename),
err);
return TINYEXR_ERROR_INVALID_FILE;
}
std::vector<char> buf(filesize); // @todo { use mmap }
{
size_t ret;
ret = fread(&buf[0], 1, filesize, fp);
assert(ret == filesize);
(void)ret;
}
fclose(fp);
const char *head = &buf[0];
const char *marker = &buf[0];
// Header check.
{
const char header[] = {0x76, 0x2f, 0x31, 0x01};
if (memcmp(marker, header, 4) != 0) {
tinyexr::SetErrorMessage("Invalid magic number", err);
return TINYEXR_ERROR_INVALID_MAGIC_NUMBER;
}
marker += 4;
}
// Version, scanline.
{
// ver 2.0, scanline, deep bit on(0x800)
// must be [2, 0, 0, 0]
if (marker[0] != 2 || marker[1] != 8 || marker[2] != 0 || marker[3] != 0) {
tinyexr::SetErrorMessage("Unsupported version or scanline", err);
return TINYEXR_ERROR_UNSUPPORTED_FORMAT;
}
marker += 4;
}
int dx = -1;
int dy = -1;
int dw = -1;
int dh = -1;
int num_scanline_blocks = 1; // 16 for ZIP compression.
int compression_type = -1;
int num_channels = -1;
std::vector<tinyexr::ChannelInfo> channels;
// Read attributes
size_t size = filesize - tinyexr::kEXRVersionSize;
for (;;) {
if (0 == size) {
return TINYEXR_ERROR_INVALID_DATA;
} else if (marker[0] == '\0') {
marker++;
size--;
break;
}
std::string attr_name;
std::string attr_type;
std::vector<unsigned char> data;
size_t marker_size;
if (!tinyexr::ReadAttribute(&attr_name, &attr_type, &data, &marker_size,
marker, size)) {
std::stringstream ss;
ss << "Failed to parse attribute\n";
tinyexr::SetErrorMessage(ss.str(), err);
return TINYEXR_ERROR_INVALID_DATA;
}
marker += marker_size;
size -= marker_size;
if (attr_name.compare("compression") == 0) {
compression_type = data[0];
if (compression_type > TINYEXR_COMPRESSIONTYPE_PIZ) {
std::stringstream ss;
ss << "Unsupported compression type : " << compression_type;
tinyexr::SetErrorMessage(ss.str(), err);
return TINYEXR_ERROR_UNSUPPORTED_FORMAT;
}
if (compression_type == TINYEXR_COMPRESSIONTYPE_ZIP) {
num_scanline_blocks = 16;
}
} else if (attr_name.compare("channels") == 0) {
// name: zero-terminated string, from 1 to 255 bytes long
// pixel type: int, possible values are: UINT = 0 HALF = 1 FLOAT = 2
// pLinear: unsigned char, possible values are 0 and 1
// reserved: three chars, should be zero
// xSampling: int
// ySampling: int
if (!tinyexr::ReadChannelInfo(channels, data)) {
tinyexr::SetErrorMessage("Failed to parse channel info", err);
return TINYEXR_ERROR_INVALID_DATA;
}
num_channels = static_cast<int>(channels.size());
if (num_channels < 1) {
tinyexr::SetErrorMessage("Invalid channels format", err);
return TINYEXR_ERROR_INVALID_DATA;
}
} else if (attr_name.compare("dataWindow") == 0) {
memcpy(&dx, &data.at(0), sizeof(int));
memcpy(&dy, &data.at(4), sizeof(int));
memcpy(&dw, &data.at(8), sizeof(int));
memcpy(&dh, &data.at(12), sizeof(int));
tinyexr::swap4(&dx);
tinyexr::swap4(&dy);
tinyexr::swap4(&dw);
tinyexr::swap4(&dh);
} else if (attr_name.compare("displayWindow") == 0) {
int x;
int y;
int w;
int h;
memcpy(&x, &data.at(0), sizeof(int));
memcpy(&y, &data.at(4), sizeof(int));
memcpy(&w, &data.at(8), sizeof(int));
memcpy(&h, &data.at(12), sizeof(int));
tinyexr::swap4(&x);
tinyexr::swap4(&y);
tinyexr::swap4(&w);
tinyexr::swap4(&h);
}
}
assert(dx >= 0);
assert(dy >= 0);
assert(dw >= 0);
assert(dh >= 0);
assert(num_channels >= 1);
int data_width = dw - dx + 1;
int data_height = dh - dy + 1;
std::vector<float> image(
static_cast<size_t>(data_width * data_height * 4)); // 4 = RGBA
// Read offset tables.
int num_blocks = data_height / num_scanline_blocks;
if (num_blocks * num_scanline_blocks < data_height) {
num_blocks++;
}
std::vector<tinyexr::tinyexr_int64> offsets(static_cast<size_t>(num_blocks));
for (size_t y = 0; y < static_cast<size_t>(num_blocks); y++) {
tinyexr::tinyexr_int64 offset;
memcpy(&offset, marker, sizeof(tinyexr::tinyexr_int64));
tinyexr::swap8(reinterpret_cast<tinyexr::tinyexr_uint64 *>(&offset));
marker += sizeof(tinyexr::tinyexr_int64); // = 8
offsets[y] = offset;
}
#if TINYEXR_USE_PIZ
if ((compression_type == TINYEXR_COMPRESSIONTYPE_NONE) ||
(compression_type == TINYEXR_COMPRESSIONTYPE_RLE) ||
(compression_type == TINYEXR_COMPRESSIONTYPE_ZIPS) ||
(compression_type == TINYEXR_COMPRESSIONTYPE_ZIP) ||
(compression_type == TINYEXR_COMPRESSIONTYPE_PIZ)) {
#else
if ((compression_type == TINYEXR_COMPRESSIONTYPE_NONE) ||
(compression_type == TINYEXR_COMPRESSIONTYPE_RLE) ||
(compression_type == TINYEXR_COMPRESSIONTYPE_ZIPS) ||
(compression_type == TINYEXR_COMPRESSIONTYPE_ZIP)) {
#endif
// OK
} else {
tinyexr::SetErrorMessage("Unsupported compression format", err);
return TINYEXR_ERROR_UNSUPPORTED_FORMAT;
}
deep_image->image = static_cast<float ***>(
malloc(sizeof(float **) * static_cast<size_t>(num_channels)));
for (int c = 0; c < num_channels; c++) {
deep_image->image[c] = static_cast<float **>(
malloc(sizeof(float *) * static_cast<size_t>(data_height)));
for (int y = 0; y < data_height; y++) {
}
}
deep_image->offset_table = static_cast<int **>(
malloc(sizeof(int *) * static_cast<size_t>(data_height)));
for (int y = 0; y < data_height; y++) {
deep_image->offset_table[y] = static_cast<int *>(
malloc(sizeof(int) * static_cast<size_t>(data_width)));
}
for (size_t y = 0; y < static_cast<size_t>(num_blocks); y++) {
const unsigned char *data_ptr =
reinterpret_cast<const unsigned char *>(head + offsets[y]);
// int: y coordinate
// int64: packed size of pixel offset table
// int64: packed size of sample data
// int64: unpacked size of sample data
// compressed pixel offset table
// compressed sample data
int line_no;
tinyexr::tinyexr_int64 packedOffsetTableSize;
tinyexr::tinyexr_int64 packedSampleDataSize;
tinyexr::tinyexr_int64 unpackedSampleDataSize;
memcpy(&line_no, data_ptr, sizeof(int));
memcpy(&packedOffsetTableSize, data_ptr + 4,
sizeof(tinyexr::tinyexr_int64));
memcpy(&packedSampleDataSize, data_ptr + 12,
sizeof(tinyexr::tinyexr_int64));
memcpy(&unpackedSampleDataSize, data_ptr + 20,
sizeof(tinyexr::tinyexr_int64));
tinyexr::swap4(&line_no);
tinyexr::swap8(
reinterpret_cast<tinyexr::tinyexr_uint64 *>(&packedOffsetTableSize));
tinyexr::swap8(
reinterpret_cast<tinyexr::tinyexr_uint64 *>(&packedSampleDataSize));
tinyexr::swap8(
reinterpret_cast<tinyexr::tinyexr_uint64 *>(&unpackedSampleDataSize));
std::vector<int> pixelOffsetTable(static_cast<size_t>(data_width));
// decode pixel offset table.
{
unsigned long dstLen =
static_cast<unsigned long>(pixelOffsetTable.size() * sizeof(int));
if (!tinyexr::DecompressZip(
reinterpret_cast<unsigned char *>(&pixelOffsetTable.at(0)),
&dstLen, data_ptr + 28,
static_cast<unsigned long>(packedOffsetTableSize))) {
return false;
}
assert(dstLen == pixelOffsetTable.size() * sizeof(int));
for (size_t i = 0; i < static_cast<size_t>(data_width); i++) {
deep_image->offset_table[y][i] = pixelOffsetTable[i];
}
}
std::vector<unsigned char> sample_data(
static_cast<size_t>(unpackedSampleDataSize));
// decode sample data.
{
unsigned long dstLen = static_cast<unsigned long>(unpackedSampleDataSize);
if (dstLen) {
if (!tinyexr::DecompressZip(
reinterpret_cast<unsigned char *>(&sample_data.at(0)), &dstLen,
data_ptr + 28 + packedOffsetTableSize,
static_cast<unsigned long>(packedSampleDataSize))) {
return false;
}
assert(dstLen == static_cast<unsigned long>(unpackedSampleDataSize));
}
}
// decode sample
int sampleSize = -1;
std::vector<int> channel_offset_list(static_cast<size_t>(num_channels));
{
int channel_offset = 0;
for (size_t i = 0; i < static_cast<size_t>(num_channels); i++) {
channel_offset_list[i] = channel_offset;
if (channels[i].pixel_type == TINYEXR_PIXELTYPE_UINT) { // UINT
channel_offset += 4;
} else if (channels[i].pixel_type == TINYEXR_PIXELTYPE_HALF) { // half
channel_offset += 2;
} else if (channels[i].pixel_type ==
TINYEXR_PIXELTYPE_FLOAT) { // float
channel_offset += 4;
} else {
assert(0);
}
}
sampleSize = channel_offset;
}
assert(sampleSize >= 2);
assert(static_cast<size_t>(
pixelOffsetTable[static_cast<size_t>(data_width - 1)] *
sampleSize) == sample_data.size());
int samples_per_line = static_cast<int>(sample_data.size()) / sampleSize;
//
// Alloc memory
//
//
// pixel data is stored as image[channels][pixel_samples]
//
{
tinyexr::tinyexr_uint64 data_offset = 0;
for (size_t c = 0; c < static_cast<size_t>(num_channels); c++) {
deep_image->image[c][y] = static_cast<float *>(
malloc(sizeof(float) * static_cast<size_t>(samples_per_line)));
if (channels[c].pixel_type == 0) { // UINT
for (size_t x = 0; x < static_cast<size_t>(samples_per_line); x++) {
unsigned int ui;
unsigned int *src_ptr = reinterpret_cast<unsigned int *>(
&sample_data.at(size_t(data_offset) + x * sizeof(int)));
tinyexr::cpy4(&ui, src_ptr);
deep_image->image[c][y][x] = static_cast<float>(ui); // @fixme
}
data_offset +=
sizeof(unsigned int) * static_cast<size_t>(samples_per_line);
} else if (channels[c].pixel_type == 1) { // half
for (size_t x = 0; x < static_cast<size_t>(samples_per_line); x++) {
tinyexr::FP16 f16;
const unsigned short *src_ptr = reinterpret_cast<unsigned short *>(
&sample_data.at(size_t(data_offset) + x * sizeof(short)));
tinyexr::cpy2(&(f16.u), src_ptr);
tinyexr::FP32 f32 = half_to_float(f16);
deep_image->image[c][y][x] = f32.f;
}
data_offset += sizeof(short) * static_cast<size_t>(samples_per_line);
} else { // float
for (size_t x = 0; x < static_cast<size_t>(samples_per_line); x++) {
float f;
const float *src_ptr = reinterpret_cast<float *>(
&sample_data.at(size_t(data_offset) + x * sizeof(float)));
tinyexr::cpy4(&f, src_ptr);
deep_image->image[c][y][x] = f;
}
data_offset += sizeof(float) * static_cast<size_t>(samples_per_line);
}
}
}
} // y
deep_image->width = data_width;
deep_image->height = data_height;
deep_image->channel_names = static_cast<const char **>(
malloc(sizeof(const char *) * static_cast<size_t>(num_channels)));
for (size_t c = 0; c < static_cast<size_t>(num_channels); c++) {
#ifdef _WIN32
deep_image->channel_names[c] = _strdup(channels[c].name.c_str());
#else
deep_image->channel_names[c] = strdup(channels[c].name.c_str());
#endif
}
deep_image->num_channels = num_channels;
return TINYEXR_SUCCESS;
}
void InitEXRImage(EXRImage *exr_image) {
if (exr_image == NULL) {
return;
}
exr_image->width = 0;
exr_image->height = 0;
exr_image->num_channels = 0;
exr_image->images = NULL;
exr_image->tiles = NULL;
exr_image->next_level = NULL;
exr_image->level_x = 0;
exr_image->level_y = 0;
exr_image->num_tiles = 0;
}
void FreeEXRErrorMessage(const char *msg) {
if (msg) {
free(reinterpret_cast<void *>(const_cast<char *>(msg)));
}
return;
}
void InitEXRHeader(EXRHeader *exr_header) {
if (exr_header == NULL) {
return;
}
memset(exr_header, 0, sizeof(EXRHeader));
}
int FreeEXRHeader(EXRHeader *exr_header) {
if (exr_header == NULL) {
return TINYEXR_ERROR_INVALID_ARGUMENT;
}
if (exr_header->channels) {
free(exr_header->channels);
}
if (exr_header->pixel_types) {
free(exr_header->pixel_types);
}
if (exr_header->requested_pixel_types) {
free(exr_header->requested_pixel_types);
}
for (int i = 0; i < exr_header->num_custom_attributes; i++) {
if (exr_header->custom_attributes[i].value) {
free(exr_header->custom_attributes[i].value);
}
}
if (exr_header->custom_attributes) {
free(exr_header->custom_attributes);
}
EXRSetNameAttr(exr_header, NULL);
return TINYEXR_SUCCESS;
}
void EXRSetNameAttr(EXRHeader* exr_header, const char* name) {
if (exr_header == NULL) {
return;
}
memset(exr_header->name, 0, 256);
if (name != NULL) {
size_t len = std::min(strlen(name), (size_t)255);
if (len) {
memcpy(exr_header->name, name, len);
}
}
}
int EXRNumLevels(const EXRImage* exr_image) {
if (exr_image == NULL) return 0;
if(exr_image->images) return 1; // scanlines
int levels = 1;
const EXRImage* level_image = exr_image;
while((level_image = level_image->next_level)) ++levels;
return levels;
}
int FreeEXRImage(EXRImage *exr_image) {
if (exr_image == NULL) {
return TINYEXR_ERROR_INVALID_ARGUMENT;
}
if (exr_image->next_level) {
FreeEXRImage(exr_image->next_level);
delete exr_image->next_level;
}
for (int i = 0; i < exr_image->num_channels; i++) {
if (exr_image->images && exr_image->images[i]) {
free(exr_image->images[i]);
}
}
if (exr_image->images) {
free(exr_image->images);
}
if (exr_image->tiles) {
for (int tid = 0; tid < exr_image->num_tiles; tid++) {
for (int i = 0; i < exr_image->num_channels; i++) {
if (exr_image->tiles[tid].images && exr_image->tiles[tid].images[i]) {
free(exr_image->tiles[tid].images[i]);
}
}
if (exr_image->tiles[tid].images) {
free(exr_image->tiles[tid].images);
}
}
free(exr_image->tiles);
}
return TINYEXR_SUCCESS;
}
int ParseEXRHeaderFromFile(EXRHeader *exr_header, const EXRVersion *exr_version,
const char *filename, const char **err) {
if (exr_header == NULL || exr_version == NULL || filename == NULL) {
tinyexr::SetErrorMessage("Invalid argument for ParseEXRHeaderFromFile",
err);
return TINYEXR_ERROR_INVALID_ARGUMENT;
}
FILE *fp = NULL;
#ifdef _WIN32
#if defined(_MSC_VER) || defined(__MINGW32__) // MSVC, MinGW gcc or clang
errno_t errcode =
_wfopen_s(&fp, tinyexr::UTF8ToWchar(filename).c_str(), L"rb");
if (errcode != 0) {
tinyexr::SetErrorMessage("Cannot read file " + std::string(filename), err);
return TINYEXR_ERROR_INVALID_FILE;
}
#else
// Unknown compiler
fp = fopen(filename, "rb");
#endif
#else
fp = fopen(filename, "rb");
#endif
if (!fp) {
tinyexr::SetErrorMessage("Cannot read file " + std::string(filename), err);
return TINYEXR_ERROR_CANT_OPEN_FILE;
}
size_t filesize;
// Compute size
fseek(fp, 0, SEEK_END);
filesize = static_cast<size_t>(ftell(fp));
fseek(fp, 0, SEEK_SET);
std::vector<unsigned char> buf(filesize); // @todo { use mmap }
{
size_t ret;
ret = fread(&buf[0], 1, filesize, fp);
assert(ret == filesize);
fclose(fp);
if (ret != filesize) {
tinyexr::SetErrorMessage("fread() error on " + std::string(filename),
err);
return TINYEXR_ERROR_INVALID_FILE;
}
}
return ParseEXRHeaderFromMemory(exr_header, exr_version, &buf.at(0), filesize,
err);
}
int ParseEXRMultipartHeaderFromMemory(EXRHeader ***exr_headers,
int *num_headers,
const EXRVersion *exr_version,
const unsigned char *memory, size_t size,
const char **err) {
if (memory == NULL || exr_headers == NULL || num_headers == NULL ||
exr_version == NULL) {
// Invalid argument
tinyexr::SetErrorMessage(
"Invalid argument for ParseEXRMultipartHeaderFromMemory", err);
return TINYEXR_ERROR_INVALID_ARGUMENT;
}
if (size < tinyexr::kEXRVersionSize) {
tinyexr::SetErrorMessage("Data size too short", err);
return TINYEXR_ERROR_INVALID_DATA;
}
const unsigned char *marker = memory + tinyexr::kEXRVersionSize;
size_t marker_size = size - tinyexr::kEXRVersionSize;
std::vector<tinyexr::HeaderInfo> infos;
for (;;) {
tinyexr::HeaderInfo info;
info.clear();
std::string err_str;
bool empty_header = false;
int ret = ParseEXRHeader(&info, &empty_header, exr_version, &err_str,
marker, marker_size);
if (ret != TINYEXR_SUCCESS) {
tinyexr::SetErrorMessage(err_str, err);
return ret;
}
if (empty_header) {
marker += 1; // skip '\0'
break;
}
// `chunkCount` must exist in the header.
if (info.chunk_count == 0) {
tinyexr::SetErrorMessage(
"`chunkCount' attribute is not found in the header.", err);
return TINYEXR_ERROR_INVALID_DATA;
}
infos.push_back(info);
// move to next header.
marker += info.header_len;
size -= info.header_len;
}
// allocate memory for EXRHeader and create array of EXRHeader pointers.
(*exr_headers) =
static_cast<EXRHeader **>(malloc(sizeof(EXRHeader *) * infos.size()));
for (size_t i = 0; i < infos.size(); i++) {
EXRHeader *exr_header = static_cast<EXRHeader *>(malloc(sizeof(EXRHeader)));
memset(exr_header, 0, sizeof(EXRHeader));
ConvertHeader(exr_header, infos[i]);
exr_header->multipart = exr_version->multipart ? 1 : 0;
(*exr_headers)[i] = exr_header;
}
(*num_headers) = static_cast<int>(infos.size());
return TINYEXR_SUCCESS;
}
int ParseEXRMultipartHeaderFromFile(EXRHeader ***exr_headers, int *num_headers,
const EXRVersion *exr_version,
const char *filename, const char **err) {
if (exr_headers == NULL || num_headers == NULL || exr_version == NULL ||
filename == NULL) {
tinyexr::SetErrorMessage(
"Invalid argument for ParseEXRMultipartHeaderFromFile()", err);
return TINYEXR_ERROR_INVALID_ARGUMENT;
}
FILE *fp = NULL;
#ifdef _WIN32
#if defined(_MSC_VER) || defined(__MINGW32__) // MSVC, MinGW gcc or clang
errno_t errcode =
_wfopen_s(&fp, tinyexr::UTF8ToWchar(filename).c_str(), L"rb");
if (errcode != 0) {
tinyexr::SetErrorMessage("Cannot read file " + std::string(filename), err);
return TINYEXR_ERROR_INVALID_FILE;
}
#else
// Unknown compiler
fp = fopen(filename, "rb");
#endif
#else
fp = fopen(filename, "rb");
#endif
if (!fp) {
tinyexr::SetErrorMessage("Cannot read file " + std::string(filename), err);
return TINYEXR_ERROR_CANT_OPEN_FILE;
}
size_t filesize;
// Compute size
fseek(fp, 0, SEEK_END);
filesize = static_cast<size_t>(ftell(fp));
fseek(fp, 0, SEEK_SET);
std::vector<unsigned char> buf(filesize); // @todo { use mmap }
{
size_t ret;
ret = fread(&buf[0], 1, filesize, fp);
assert(ret == filesize);
fclose(fp);
if (ret != filesize) {
tinyexr::SetErrorMessage("`fread' error. file may be corrupted.", err);
return TINYEXR_ERROR_INVALID_FILE;
}
}
return ParseEXRMultipartHeaderFromMemory(
exr_headers, num_headers, exr_version, &buf.at(0), filesize, err);
}
int ParseEXRVersionFromMemory(EXRVersion *version, const unsigned char *memory,
size_t size) {
if (version == NULL || memory == NULL) {
return TINYEXR_ERROR_INVALID_ARGUMENT;
}
if (size < tinyexr::kEXRVersionSize) {
return TINYEXR_ERROR_INVALID_DATA;
}
const unsigned char *marker = memory;
// Header check.
{
const char header[] = {0x76, 0x2f, 0x31, 0x01};
if (memcmp(marker, header, 4) != 0) {
return TINYEXR_ERROR_INVALID_MAGIC_NUMBER;
}
marker += 4;
}
version->tiled = false;
version->long_name = false;
version->non_image = false;
version->multipart = false;
// Parse version header.
{
// must be 2
if (marker[0] != 2) {
return TINYEXR_ERROR_INVALID_EXR_VERSION;
}
if (version == NULL) {
return TINYEXR_SUCCESS; // May OK
}
version->version = 2;
if (marker[1] & 0x2) { // 9th bit
version->tiled = true;
}
if (marker[1] & 0x4) { // 10th bit
version->long_name = true;
}
if (marker[1] & 0x8) { // 11th bit
version->non_image = true; // (deep image)
}
if (marker[1] & 0x10) { // 12th bit
version->multipart = true;
}
}
return TINYEXR_SUCCESS;
}
int ParseEXRVersionFromFile(EXRVersion *version, const char *filename) {
if (filename == NULL) {
return TINYEXR_ERROR_INVALID_ARGUMENT;
}
FILE *fp = NULL;
#ifdef _WIN32
#if defined(_MSC_VER) || defined(__MINGW32__) // MSVC, MinGW gcc or clang
errno_t err = _wfopen_s(&fp, tinyexr::UTF8ToWchar(filename).c_str(), L"rb");
if (err != 0) {
// TODO(syoyo): return wfopen_s erro code
return TINYEXR_ERROR_CANT_OPEN_FILE;
}
#else
// Unknown compiler
fp = fopen(filename, "rb");
#endif
#else
fp = fopen(filename, "rb");
#endif
if (!fp) {
return TINYEXR_ERROR_CANT_OPEN_FILE;
}
size_t file_size;
// Compute size
fseek(fp, 0, SEEK_END);
file_size = static_cast<size_t>(ftell(fp));
fseek(fp, 0, SEEK_SET);
if (file_size < tinyexr::kEXRVersionSize) {
return TINYEXR_ERROR_INVALID_FILE;
}
unsigned char buf[tinyexr::kEXRVersionSize];
size_t ret = fread(&buf[0], 1, tinyexr::kEXRVersionSize, fp);
fclose(fp);
if (ret != tinyexr::kEXRVersionSize) {
return TINYEXR_ERROR_INVALID_FILE;
}
return ParseEXRVersionFromMemory(version, buf, tinyexr::kEXRVersionSize);
}
int LoadEXRMultipartImageFromMemory(EXRImage *exr_images,
const EXRHeader **exr_headers,
unsigned int num_parts,
const unsigned char *memory,
const size_t size, const char **err) {
if (exr_images == NULL || exr_headers == NULL || num_parts == 0 ||
memory == NULL || (size <= tinyexr::kEXRVersionSize)) {
tinyexr::SetErrorMessage(
"Invalid argument for LoadEXRMultipartImageFromMemory()", err);
return TINYEXR_ERROR_INVALID_ARGUMENT;
}
// compute total header size.
size_t total_header_size = 0;
for (unsigned int i = 0; i < num_parts; i++) {
if (exr_headers[i]->header_len == 0) {
tinyexr::SetErrorMessage("EXRHeader variable is not initialized.", err);
return TINYEXR_ERROR_INVALID_ARGUMENT;
}
total_header_size += exr_headers[i]->header_len;
}
const char *marker = reinterpret_cast<const char *>(
memory + total_header_size + 4 +
4); // +8 for magic number and version header.
marker += 1; // Skip empty header.
// NOTE 1:
// In multipart image, There is 'part number' before chunk data.
// 4 byte : part number
// 4+ : chunk
//
// NOTE 2:
// EXR spec says 'part number' is 'unsigned long' but actually this is
// 'unsigned int(4 bytes)' in OpenEXR implementation...
// http://www.openexr.com/openexrfilelayout.pdf
// Load chunk offset table.
std::vector<tinyexr::OffsetData> chunk_offset_table_list;
chunk_offset_table_list.reserve(num_parts);
for (size_t i = 0; i < static_cast<size_t>(num_parts); i++) {
chunk_offset_table_list.resize(chunk_offset_table_list.size() + 1);
tinyexr::OffsetData& offset_data = chunk_offset_table_list.back();
if (!exr_headers[i]->tiled || exr_headers[i]->tile_level_mode == TINYEXR_TILE_ONE_LEVEL) {
tinyexr::InitSingleResolutionOffsets(offset_data, exr_headers[i]->chunk_count);
std::vector<tinyexr::tinyexr_uint64>& offset_table = offset_data.offsets[0][0];
for (size_t c = 0; c < offset_table.size(); c++) {
tinyexr::tinyexr_uint64 offset;
memcpy(&offset, marker, 8);
tinyexr::swap8(&offset);
if (offset >= size) {
tinyexr::SetErrorMessage("Invalid offset size in EXR header chunks.",
err);
return TINYEXR_ERROR_INVALID_DATA;
}
offset_table[c] = offset + 4; // +4 to skip 'part number'
marker += 8;
}
} else {
{
std::vector<int> num_x_tiles, num_y_tiles;
tinyexr::PrecalculateTileInfo(num_x_tiles, num_y_tiles, exr_headers[i]);
int num_blocks = InitTileOffsets(offset_data, exr_headers[i], num_x_tiles, num_y_tiles);
if (num_blocks != exr_headers[i]->chunk_count) {
tinyexr::SetErrorMessage("Invalid offset table size.", err);
return TINYEXR_ERROR_INVALID_DATA;
}
}
for (unsigned int l = 0; l < offset_data.offsets.size(); ++l) {
for (unsigned int dy = 0; dy < offset_data.offsets[l].size(); ++dy) {
for (unsigned int dx = 0; dx < offset_data.offsets[l][dy].size(); ++dx) {
tinyexr::tinyexr_uint64 offset;
memcpy(&offset, marker, sizeof(tinyexr::tinyexr_uint64));
tinyexr::swap8(&offset);
if (offset >= size) {
tinyexr::SetErrorMessage("Invalid offset size in EXR header chunks.",
err);
return TINYEXR_ERROR_INVALID_DATA;
}
offset_data.offsets[l][dy][dx] = offset + 4; // +4 to skip 'part number'
marker += sizeof(tinyexr::tinyexr_uint64); // = 8
}
}
}
}
}
// Decode image.
for (size_t i = 0; i < static_cast<size_t>(num_parts); i++) {
tinyexr::OffsetData &offset_data = chunk_offset_table_list[i];
// First check 'part number' is identitical to 'i'
for (unsigned int l = 0; l < offset_data.offsets.size(); ++l)
for (unsigned int dy = 0; dy < offset_data.offsets[l].size(); ++dy)
for (unsigned int dx = 0; dx < offset_data.offsets[l][dy].size(); ++dx) {
const unsigned char *part_number_addr =
memory + offset_data.offsets[l][dy][dx] - 4; // -4 to move to 'part number' field.
unsigned int part_no;
memcpy(&part_no, part_number_addr, sizeof(unsigned int)); // 4
tinyexr::swap4(&part_no);
if (part_no != i) {
tinyexr::SetErrorMessage("Invalid `part number' in EXR header chunks.",
err);
return TINYEXR_ERROR_INVALID_DATA;
}
}
std::string e;
int ret = tinyexr::DecodeChunk(&exr_images[i], exr_headers[i], offset_data,
memory, size, &e);
if (ret != TINYEXR_SUCCESS) {
if (!e.empty()) {
tinyexr::SetErrorMessage(e, err);
}
return ret;
}
}
return TINYEXR_SUCCESS;
}
int LoadEXRMultipartImageFromFile(EXRImage *exr_images,
const EXRHeader **exr_headers,
unsigned int num_parts, const char *filename,
const char **err) {
if (exr_images == NULL || exr_headers == NULL || num_parts == 0) {
tinyexr::SetErrorMessage(
"Invalid argument for LoadEXRMultipartImageFromFile", err);
return TINYEXR_ERROR_INVALID_ARGUMENT;
}
FILE *fp = NULL;
#ifdef _WIN32
#if defined(_MSC_VER) || defined(__MINGW32__) // MSVC, MinGW gcc or clang
errno_t errcode =
_wfopen_s(&fp, tinyexr::UTF8ToWchar(filename).c_str(), L"rb");
if (errcode != 0) {
tinyexr::SetErrorMessage("Cannot read file " + std::string(filename), err);
return TINYEXR_ERROR_CANT_OPEN_FILE;
}
#else
// Unknown compiler
fp = fopen(filename, "rb");
#endif
#else
fp = fopen(filename, "rb");
#endif
if (!fp) {
tinyexr::SetErrorMessage("Cannot read file " + std::string(filename), err);
return TINYEXR_ERROR_CANT_OPEN_FILE;
}
size_t filesize;
// Compute size
fseek(fp, 0, SEEK_END);
filesize = static_cast<size_t>(ftell(fp));
fseek(fp, 0, SEEK_SET);
std::vector<unsigned char> buf(filesize); // @todo { use mmap }
{
size_t ret;
ret = fread(&buf[0], 1, filesize, fp);
assert(ret == filesize);
fclose(fp);
(void)ret;
}
return LoadEXRMultipartImageFromMemory(exr_images, exr_headers, num_parts,
&buf.at(0), filesize, err);
}
int SaveEXR(const float *data, int width, int height, int components,
const int save_as_fp16, const char *outfilename, const char **err) {
if ((components == 1) || components == 3 || components == 4) {
// OK
} else {
std::stringstream ss;
ss << "Unsupported component value : " << components << std::endl;
tinyexr::SetErrorMessage(ss.str(), err);
return TINYEXR_ERROR_INVALID_ARGUMENT;
}
EXRHeader header;
InitEXRHeader(&header);
if ((width < 16) && (height < 16)) {
// No compression for small image.
header.compression_type = TINYEXR_COMPRESSIONTYPE_NONE;
} else {
header.compression_type = TINYEXR_COMPRESSIONTYPE_ZIP;
}
EXRImage image;
InitEXRImage(&image);
image.num_channels = components;
std::vector<float> images[4];
if (components == 1) {
images[0].resize(static_cast<size_t>(width * height));
memcpy(images[0].data(), data, sizeof(float) * size_t(width * height));
} else {
images[0].resize(static_cast<size_t>(width * height));
images[1].resize(static_cast<size_t>(width * height));
images[2].resize(static_cast<size_t>(width * height));
images[3].resize(static_cast<size_t>(width * height));
// Split RGB(A)RGB(A)RGB(A)... into R, G and B(and A) layers
for (size_t i = 0; i < static_cast<size_t>(width * height); i++) {
images[0][i] = data[static_cast<size_t>(components) * i + 0];
images[1][i] = data[static_cast<size_t>(components) * i + 1];
images[2][i] = data[static_cast<size_t>(components) * i + 2];
if (components == 4) {
images[3][i] = data[static_cast<size_t>(components) * i + 3];
}
}
}
float *image_ptr[4] = {0, 0, 0, 0};
if (components == 4) {
image_ptr[0] = &(images[3].at(0)); // A
image_ptr[1] = &(images[2].at(0)); // B
image_ptr[2] = &(images[1].at(0)); // G
image_ptr[3] = &(images[0].at(0)); // R
} else if (components == 3) {
image_ptr[0] = &(images[2].at(0)); // B
image_ptr[1] = &(images[1].at(0)); // G
image_ptr[2] = &(images[0].at(0)); // R
} else if (components == 1) {
image_ptr[0] = &(images[0].at(0)); // A
}
image.images = reinterpret_cast<unsigned char **>(image_ptr);
image.width = width;
image.height = height;
header.num_channels = components;
header.channels = static_cast<EXRChannelInfo *>(malloc(
sizeof(EXRChannelInfo) * static_cast<size_t>(header.num_channels)));
// Must be (A)BGR order, since most of EXR viewers expect this channel order.
if (components == 4) {
#ifdef _MSC_VER
strncpy_s(header.channels[0].name, "A", 255);
strncpy_s(header.channels[1].name, "B", 255);
strncpy_s(header.channels[2].name, "G", 255);
strncpy_s(header.channels[3].name, "R", 255);
#else
strncpy(header.channels[0].name, "A", 255);
strncpy(header.channels[1].name, "B", 255);
strncpy(header.channels[2].name, "G", 255);
strncpy(header.channels[3].name, "R", 255);
#endif
header.channels[0].name[strlen("A")] = '\0';
header.channels[1].name[strlen("B")] = '\0';
header.channels[2].name[strlen("G")] = '\0';
header.channels[3].name[strlen("R")] = '\0';
} else if (components == 3) {
#ifdef _MSC_VER
strncpy_s(header.channels[0].name, "B", 255);
strncpy_s(header.channels[1].name, "G", 255);
strncpy_s(header.channels[2].name, "R", 255);
#else
strncpy(header.channels[0].name, "B", 255);
strncpy(header.channels[1].name, "G", 255);
strncpy(header.channels[2].name, "R", 255);
#endif
header.channels[0].name[strlen("B")] = '\0';
header.channels[1].name[strlen("G")] = '\0';
header.channels[2].name[strlen("R")] = '\0';
} else {
#ifdef _MSC_VER
strncpy_s(header.channels[0].name, "A", 255);
#else
strncpy(header.channels[0].name, "A", 255);
#endif
header.channels[0].name[strlen("A")] = '\0';
}
header.pixel_types = static_cast<int *>(
malloc(sizeof(int) * static_cast<size_t>(header.num_channels)));
header.requested_pixel_types = static_cast<int *>(
malloc(sizeof(int) * static_cast<size_t>(header.num_channels)));
for (int i = 0; i < header.num_channels; i++) {
header.pixel_types[i] =
TINYEXR_PIXELTYPE_FLOAT; // pixel type of input image
if (save_as_fp16 > 0) {
header.requested_pixel_types[i] =
TINYEXR_PIXELTYPE_HALF; // save with half(fp16) pixel format
} else {
header.requested_pixel_types[i] =
TINYEXR_PIXELTYPE_FLOAT; // save with float(fp32) pixel format(i.e.
// no precision reduction)
}
}
int ret = SaveEXRImageToFile(&image, &header, outfilename, err);
if (ret != TINYEXR_SUCCESS) {
return ret;
}
free(header.channels);
free(header.pixel_types);
free(header.requested_pixel_types);
return ret;
}
#ifdef __clang__
// zero-as-null-ppinter-constant
#pragma clang diagnostic pop
#endif
#endif // TINYEXR_IMPLEMENTATION_DEFINED
#endif // TINYEXR_IMPLEMENTATION
|
GxB_Desc_get.c | //------------------------------------------------------------------------------
// GxB_Desc_get: get a field in a descriptor
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
#include "GB.h"
GrB_Info GxB_Desc_get // get a parameter from a descriptor
(
GrB_Descriptor desc, // descriptor to query; NULL is ok
GrB_Desc_Field field, // parameter to query
... // return value of the descriptor
)
{
//--------------------------------------------------------------------------
// check inputs
//--------------------------------------------------------------------------
GB_WHERE1 ("GxB_Desc_get (desc, field, &value)") ;
GB_RETURN_IF_FAULTY (desc) ;
//--------------------------------------------------------------------------
// get the parameter
//--------------------------------------------------------------------------
va_list ap ;
switch (field)
{
case GrB_OUTP :
{
va_start (ap, field) ;
GrB_Desc_Value *value = va_arg (ap, GrB_Desc_Value *) ;
va_end (ap) ;
GB_RETURN_IF_NULL (value) ;
(*value) = (desc == NULL) ? GxB_DEFAULT : desc->out ;
}
break ;
case GrB_MASK :
{
va_start (ap, field) ;
GrB_Desc_Value *value = va_arg (ap, GrB_Desc_Value *) ;
va_end (ap) ;
GB_RETURN_IF_NULL (value) ;
(*value) = (desc == NULL) ? GxB_DEFAULT : desc->mask ;
}
break ;
case GrB_INP0 :
{
va_start (ap, field) ;
GrB_Desc_Value *value = va_arg (ap, GrB_Desc_Value *) ;
va_end (ap) ;
GB_RETURN_IF_NULL (value) ;
(*value) = (desc == NULL) ? GxB_DEFAULT : desc->in0 ;
}
break ;
case GrB_INP1 :
{
va_start (ap, field) ;
GrB_Desc_Value *value = va_arg (ap, GrB_Desc_Value *) ;
va_end (ap) ;
GB_RETURN_IF_NULL (value) ;
(*value) = (desc == NULL) ? GxB_DEFAULT : desc->in1 ;
}
break ;
case GxB_DESCRIPTOR_NTHREADS : // same as GxB_NTHREADS
{
va_start (ap, field) ;
int *nthreads = va_arg (ap, int *) ;
va_end (ap) ;
GB_RETURN_IF_NULL (nthreads) ;
int nth = (desc == NULL) ? GxB_DEFAULT : desc->nthreads_max ;
(*nthreads) = nth ;
}
break ;
case GxB_DESCRIPTOR_CHUNK : // same as GxB_CHUNK
{
va_start (ap, field) ;
double *chunk = va_arg (ap, double *) ;
va_end (ap) ;
GB_RETURN_IF_NULL (chunk) ;
(*chunk) = (desc == NULL) ? GxB_DEFAULT : desc->chunk ;
}
break ;
case GxB_AxB_METHOD :
{
va_start (ap, field) ;
GrB_Desc_Value *value = va_arg (ap, GrB_Desc_Value *) ;
va_end (ap) ;
GB_RETURN_IF_NULL (value) ;
(*value) = (desc == NULL) ? GxB_DEFAULT : desc->axb ;
}
break ;
case GxB_SORT :
{
va_start (ap, field) ;
int *do_sort = va_arg (ap, int *) ;
va_end (ap) ;
GB_RETURN_IF_NULL (do_sort) ;
int s = (desc == NULL) ? GxB_DEFAULT : desc->do_sort ;
(*do_sort) = s ;
}
break ;
case GxB_COMPRESSION :
{
va_start (ap, field) ;
int *compression = va_arg (ap, int *) ;
va_end (ap) ;
GB_RETURN_IF_NULL (compression) ;
int s = (desc == NULL) ? GxB_DEFAULT : desc->compression ;
(*compression) = s ;
}
break ;
case GxB_IMPORT :
{
va_start (ap, field) ;
int *method = va_arg (ap, int *) ;
va_end (ap) ;
GB_RETURN_IF_NULL (method) ;
int s = (desc == NULL) ? GxB_DEFAULT : desc->import ;
if (s != GxB_DEFAULT) s = GxB_SECURE_IMPORT ;
(*method) = s ;
}
break ;
default :
return (GrB_INVALID_VALUE) ;
}
#pragma omp flush
return (GrB_SUCCESS) ;
}
|
ResultHandler.h | /**
* Copyright (c) Facebook, Inc. and its affiliates.
*
* This source code is licensed under the MIT license found in the
* LICENSE file in the root directory of this source tree.
*/
/*
* Structures that collect search results from distance computations
*/
#pragma once
#include <faiss/impl/AuxIndexStructures.h>
#include <faiss/utils/Heap.h>
#include <faiss/utils/partitioning.h>
namespace faiss {
/*****************************************************************
* Heap based result handler
*****************************************************************/
template <class C>
struct HeapResultHandler {
using T = typename C::T;
using TI = typename C::TI;
int nq;
T* heap_dis_tab;
TI* heap_ids_tab;
int64_t k; // number of results to keep
HeapResultHandler(size_t nq, T* heap_dis_tab, TI* heap_ids_tab, size_t k)
: nq(nq),
heap_dis_tab(heap_dis_tab),
heap_ids_tab(heap_ids_tab),
k(k) {}
/******************************************************
* API for 1 result at a time (each SingleResultHandler is
* called from 1 thread)
*/
struct SingleResultHandler {
HeapResultHandler& hr;
size_t k;
T* heap_dis;
TI* heap_ids;
T thresh;
SingleResultHandler(HeapResultHandler& hr) : hr(hr), k(hr.k) {}
/// begin results for query # i
void begin(size_t i) {
heap_dis = hr.heap_dis_tab + i * k;
heap_ids = hr.heap_ids_tab + i * k;
heap_heapify<C>(k, heap_dis, heap_ids);
thresh = heap_dis[0];
}
/// add one result for query i
void add_result(T dis, TI idx) {
if (C::cmp(heap_dis[0], dis)) {
heap_replace_top<C>(k, heap_dis, heap_ids, dis, idx);
thresh = heap_dis[0];
}
}
/// series of results for query i is done
void end() {
heap_reorder<C>(k, heap_dis, heap_ids);
}
};
/******************************************************
* API for multiple results (called from 1 thread)
*/
size_t i0, i1;
/// begin
void begin_multiple(size_t i0, size_t i1) {
this->i0 = i0;
this->i1 = i1;
for (size_t i = i0; i < i1; i++) {
heap_heapify<C>(k, heap_dis_tab + i * k, heap_ids_tab + i * k);
}
}
/// add results for query i0..i1 and j0..j1
void add_results(size_t j0, size_t j1, const T* dis_tab) {
#pragma omp parallel for
for (int64_t i = i0; i < i1; i++) {
T* heap_dis = heap_dis_tab + i * k;
TI* heap_ids = heap_ids_tab + i * k;
const T* dis_tab_i = dis_tab + (j1 - j0) * (i - i0) - j0;
T thresh = heap_dis[0];
for (size_t j = j0; j < j1; j++) {
T dis = dis_tab_i[j];
if (C::cmp(thresh, dis)) {
heap_replace_top<C>(k, heap_dis, heap_ids, dis, j);
thresh = heap_dis[0];
}
}
}
}
/// series of results for queries i0..i1 is done
void end_multiple() {
// maybe parallel for
for (size_t i = i0; i < i1; i++) {
heap_reorder<C>(k, heap_dis_tab + i * k, heap_ids_tab + i * k);
}
}
};
/*****************************************************************
* Reservoir result handler
*
* A reservoir is a result array of size capacity > n (number of requested
* results) all results below a threshold are stored in an arbitrary order. When
* the capacity is reached, a new threshold is chosen by partitionning the
* distance array.
*****************************************************************/
/// Reservoir for a single query
template <class C>
struct ReservoirTopN {
using T = typename C::T;
using TI = typename C::TI;
T* vals;
TI* ids;
size_t i; // number of stored elements
size_t n; // number of requested elements
size_t capacity; // size of storage
T threshold; // current threshold
ReservoirTopN() {}
ReservoirTopN(size_t n, size_t capacity, T* vals, TI* ids)
: vals(vals), ids(ids), i(0), n(n), capacity(capacity) {
assert(n < capacity);
threshold = C::neutral();
}
void add(T val, TI id) {
if (C::cmp(threshold, val)) {
if (i == capacity) {
shrink_fuzzy();
}
vals[i] = val;
ids[i] = id;
i++;
}
}
// reduce storage from capacity to anything
// between n and (capacity + n) / 2
void shrink_fuzzy() {
assert(i == capacity);
threshold = partition_fuzzy<C>(
vals, ids, capacity, n, (capacity + n) / 2, &i);
}
void to_result(T* heap_dis, TI* heap_ids) const {
for (int j = 0; j < std::min(i, n); j++) {
heap_push<C>(j + 1, heap_dis, heap_ids, vals[j], ids[j]);
}
if (i < n) {
heap_reorder<C>(i, heap_dis, heap_ids);
// add empty results
heap_heapify<C>(n - i, heap_dis + i, heap_ids + i);
} else {
// add remaining elements
heap_addn<C>(n, heap_dis, heap_ids, vals + n, ids + n, i - n);
heap_reorder<C>(n, heap_dis, heap_ids);
}
}
};
template <class C>
struct ReservoirResultHandler {
using T = typename C::T;
using TI = typename C::TI;
int nq;
T* heap_dis_tab;
TI* heap_ids_tab;
int64_t k; // number of results to keep
size_t capacity; // capacity of the reservoirs
ReservoirResultHandler(
size_t nq,
T* heap_dis_tab,
TI* heap_ids_tab,
size_t k)
: nq(nq),
heap_dis_tab(heap_dis_tab),
heap_ids_tab(heap_ids_tab),
k(k) {
// double then round up to multiple of 16 (for SIMD alignment)
capacity = (2 * k + 15) & ~15;
}
/******************************************************
* API for 1 result at a time (each SingleResultHandler is
* called from 1 thread)
*/
struct SingleResultHandler {
ReservoirResultHandler& hr;
std::vector<T> reservoir_dis;
std::vector<TI> reservoir_ids;
ReservoirTopN<C> res1;
SingleResultHandler(ReservoirResultHandler& hr)
: hr(hr),
reservoir_dis(hr.capacity),
reservoir_ids(hr.capacity) {}
size_t i;
/// begin results for query # i
void begin(size_t i) {
res1 = ReservoirTopN<C>(
hr.k,
hr.capacity,
reservoir_dis.data(),
reservoir_ids.data());
this->i = i;
}
/// add one result for query i
void add_result(T dis, TI idx) {
res1.add(dis, idx);
}
/// series of results for query i is done
void end() {
T* heap_dis = hr.heap_dis_tab + i * hr.k;
TI* heap_ids = hr.heap_ids_tab + i * hr.k;
res1.to_result(heap_dis, heap_ids);
}
};
/******************************************************
* API for multiple results (called from 1 thread)
*/
size_t i0, i1;
std::vector<T> reservoir_dis;
std::vector<TI> reservoir_ids;
std::vector<ReservoirTopN<C>> reservoirs;
/// begin
void begin_multiple(size_t i0, size_t i1) {
this->i0 = i0;
this->i1 = i1;
reservoir_dis.resize((i1 - i0) * capacity);
reservoir_ids.resize((i1 - i0) * capacity);
reservoirs.clear();
for (size_t i = i0; i < i1; i++) {
reservoirs.emplace_back(
k,
capacity,
reservoir_dis.data() + (i - i0) * capacity,
reservoir_ids.data() + (i - i0) * capacity);
}
}
/// add results for query i0..i1 and j0..j1
void add_results(size_t j0, size_t j1, const T* dis_tab) {
// maybe parallel for
#pragma omp parallel for
for (int64_t i = i0; i < i1; i++) {
ReservoirTopN<C>& reservoir = reservoirs[i - i0];
const T* dis_tab_i = dis_tab + (j1 - j0) * (i - i0) - j0;
for (size_t j = j0; j < j1; j++) {
T dis = dis_tab_i[j];
reservoir.add(dis, j);
}
}
}
/// series of results for queries i0..i1 is done
void end_multiple() {
// maybe parallel for
for (size_t i = i0; i < i1; i++) {
reservoirs[i - i0].to_result(
heap_dis_tab + i * k, heap_ids_tab + i * k);
}
}
};
/*****************************************************************
* Result handler for range searches
*****************************************************************/
template <class C>
struct RangeSearchResultHandler {
using T = typename C::T;
using TI = typename C::TI;
RangeSearchResult* res;
float radius;
RangeSearchResultHandler(RangeSearchResult* res, float radius)
: res(res), radius(radius) {}
/******************************************************
* API for 1 result at a time (each SingleResultHandler is
* called from 1 thread)
******************************************************/
struct SingleResultHandler {
// almost the same interface as RangeSearchResultHandler
RangeSearchPartialResult pres;
float radius;
RangeQueryResult* qr = nullptr;
SingleResultHandler(RangeSearchResultHandler& rh)
: pres(rh.res), radius(rh.radius) {}
/// begin results for query # i
void begin(size_t i) {
qr = &pres.new_result(i);
}
/// add one result for query i
void add_result(T dis, TI idx) {
if (C::cmp(radius, dis)) {
qr->add(dis, idx);
}
}
/// series of results for query i is done
void end() {}
~SingleResultHandler() {
pres.finalize();
}
};
/******************************************************
* API for multiple results (called from 1 thread)
******************************************************/
size_t i0, i1;
std::vector<RangeSearchPartialResult*> partial_results;
std::vector<size_t> j0s;
int pr = 0;
/// begin
void begin_multiple(size_t i0, size_t i1) {
this->i0 = i0;
this->i1 = i1;
}
/// add results for query i0..i1 and j0..j1
void add_results(size_t j0, size_t j1, const T* dis_tab) {
RangeSearchPartialResult* pres;
// there is one RangeSearchPartialResult structure per j0
// (= block of columns of the large distance matrix)
// it is a bit tricky to find the poper PartialResult structure
// because the inner loop is on db not on queries.
if (pr < j0s.size() && j0 == j0s[pr]) {
pres = partial_results[pr];
pr++;
} else if (j0 == 0 && j0s.size() > 0) {
pr = 0;
pres = partial_results[pr];
pr++;
} else { // did not find this j0
pres = new RangeSearchPartialResult(res);
partial_results.push_back(pres);
j0s.push_back(j0);
pr = partial_results.size();
}
for (size_t i = i0; i < i1; i++) {
const float* ip_line = dis_tab + (i - i0) * (j1 - j0);
RangeQueryResult& qres = pres->new_result(i);
for (size_t j = j0; j < j1; j++) {
float dis = *ip_line++;
if (C::cmp(radius, dis)) {
qres.add(dis, j);
}
}
}
}
void end_multiple() {}
~RangeSearchResultHandler() {
if (partial_results.size() > 0) {
RangeSearchPartialResult::merge(partial_results);
}
}
};
} // namespace faiss
|
schedule.c | #include <stdio.h>
#include <stdlib.h>
#include <omp.h>
#include <unistd.h>
#include <time.h>
struct timespec start, end;
void time_start();
double time_end();
int extra_work(int n);
int main(int argc, char *argv[])
{
const int NX = 1000000;
int nx, low, high, bias;
if (argc > 1) { nx = atoi(argv[1]); } else nx = NX;
int nthreads = omp_get_num_threads();
bias = 82320;
low = nx/10 + bias;
high = nx/5 + bias;
printf("%i < i and i < %i are slow indices out of %i:\n",low, high, nx);
time_start();
#pragma omp parallel
{
int tid = omp_get_thread_num();
int b = 0;
long long m = 0;
#pragma omp for schedule(runtime)
for (int i = 0; i < nx; i++) {
b++;
if (low < i & i < high) { m = m + extra_work(nx/100); }
}
printf("%i outer additions in thread %i, did %li extra work\n", b, tid, m);
}
time_end();
printf("\n%i is slow thread:\n", 0);
time_start();
#pragma omp parallel
{
int tid = omp_get_thread_num();
int b = 0;
long long m = 0;
#pragma omp for schedule(runtime)
for (int i = 0; i < nx; i++) {
b++;
if(tid == 0) { m = m + extra_work(nx/100); }
}
printf("%i outer additions in thread %i, did %li extra work\n", b, tid, m);
}
time_end();
return 0;
}
void time_start() {
int ierr = clock_gettime(CLOCK_REALTIME, &start);
}
double time_end() {
double time_used;
int ierr = clock_gettime(CLOCK_REALTIME, &end);
time_used = ((double) (end.tv_nsec-start.tv_nsec)) / 1e9 + (double) (end.tv_sec - start.tv_sec);
printf("time: %f s\n", time_used);
}
void frobnicate(int m) { }
|
Matrix.h | #pragma once
#include <algorithm>
#include <exception>
#include <functional>
#include <iostream>
#include <omp.h>
#include <stdexcept>
#include <type_traits>
#include <vector>
namespace cppmath
{
template <typename T>
class Matrix
{
static_assert(std::is_floating_point_v<T>, "An specilization of the matrix class has be of a floating point type!");
public:
using MatrixDataType = std::vector<std::vector<T>>;
Matrix() = delete;
Matrix(std::size_t rows, std::size_t cols);
Matrix(std::size_t rows, std::size_t cols, const T &value);
~Matrix() noexcept = default;
Matrix(const Matrix &other) = default;
Matrix &operator=(const Matrix &other) = default;
Matrix(Matrix &&other) noexcept = default;
Matrix &operator=(Matrix &&other) noexcept = default;
Matrix operator+(const Matrix &rhs);
Matrix &operator+=(const Matrix &rhs);
Matrix operator-(const Matrix &rhs);
Matrix &operator-=(const Matrix &rhs);
Matrix operator*(const T &scalar);
Matrix &operator*=(const T &scalar);
Matrix operator/(const T &scalar);
Matrix &operator/=(const T &scalar);
Matrix operator*(const Matrix &rhs);
Matrix &operator*=(const Matrix &rhs);
void dot(const Matrix &matrixA, const Matrix &matrixB, Matrix &result);
void parallel_dot(const Matrix &matrixA, const Matrix &matrixB, Matrix &result);
void print_matrix() const;
std::size_t num_rows() const;
std::size_t num_cols() const;
private:
std::size_t m_rows;
std::size_t m_cols;
MatrixDataType m_data;
};
template <typename T>
Matrix<T>::Matrix(std::size_t rows, std::size_t cols)
: m_rows(rows), m_cols(cols), m_data(m_rows, std::vector<T>(m_cols, 0))
{
}
template <typename T>
Matrix<T>::Matrix(std::size_t rows, std::size_t cols, const T &value)
: m_rows(rows), m_cols(cols), m_data(m_rows, std::vector<T>(m_cols, value))
{
}
template <typename T>
Matrix<T> Matrix<T>::operator+(const Matrix<T> &rhs)
{
if (m_rows != rhs.m_rows)
{
throw(std::invalid_argument("Number of rows are not equal!"));
}
if (m_cols != rhs.m_cols)
{
throw(std::invalid_argument("Number of cols are not equal!"));
}
Matrix<T> result(m_rows, m_cols);
for (std::size_t i = 0; i != m_rows; ++i)
{
std::transform(m_data[i].begin(),
m_data[i].end(),
rhs.m_data[i].begin(),
result.m_data[i].begin(),
std::plus<T>());
}
return result;
}
template <typename T>
Matrix<T> &Matrix<T>::operator+=(const Matrix<T> &rhs)
{
if (m_rows != rhs.m_rows)
{
throw(std::invalid_argument("Number of rows are not equal!"));
}
if (m_cols != rhs.m_cols)
{
throw(std::invalid_argument("Number of cols are not equal!"));
}
for (std::size_t i = 0; i != m_rows; ++i)
{
std::transform(m_data[i].begin(), m_data[i].end(), rhs.m_data[i].begin(), m_data[i].begin(), std::plus<T>());
}
return *this;
}
template <typename T>
Matrix<T> Matrix<T>::operator-(const Matrix<T> &rhs)
{
if (m_rows != rhs.m_rows)
{
throw(std::invalid_argument("Number of rows are not equal!"));
}
if (m_cols != rhs.m_cols)
{
throw(std::invalid_argument("Number of cols are not equal!"));
}
Matrix<T> result(m_rows, m_cols);
for (std::size_t i = 0; i != m_rows; ++i)
{
std::transform(m_data[i].begin(),
m_data[i].end(),
rhs.m_data[i].begin(),
result.m_data[i].begin(),
std::minus<T>());
}
return result;
}
template <typename T>
Matrix<T> &Matrix<T>::operator-=(const Matrix<T> &rhs)
{
if (m_rows != rhs.m_rows)
{
throw(std::invalid_argument("Number of rows are not equal!"));
}
if (m_cols != rhs.m_cols)
{
throw(std::invalid_argument("Number of cols are not equal!"));
}
for (std::size_t i = 0; i != m_rows; ++i)
{
std::transform(m_data[i].begin(), m_data[i].end(), rhs.m_data[i].begin(), m_data[i].begin(), std::minus<T>());
}
return *this;
}
template <typename T>
Matrix<T> Matrix<T>::operator*(const T &scalar)
{
Matrix<T> result(m_rows, m_cols);
for (std::size_t i = 0; i != m_rows; ++i)
{
std::transform(m_data[i].begin(), m_data[i].end(), result.m_data[i].begin(), [scalar](const T val) -> T {
return val * scalar;
});
}
return result;
}
template <typename T>
Matrix<T> &Matrix<T>::operator*=(const T &scalar)
{
for (std::size_t i = 0; i != m_rows; ++i)
{
std::transform(m_data[i].begin(), m_data[i].end(), m_data[i].begin(), [scalar](const T val) -> T {
return val * scalar;
});
}
return *this;
}
template <typename T>
Matrix<T> Matrix<T>::operator/(const T &scalar)
{
if (scalar == 0)
{
throw(std::overflow_error("You cannot divide by a scalar value of zero!"));
}
Matrix<T> result(m_rows, m_cols);
for (std::size_t i = 0; i != m_rows; ++i)
{
std::transform(m_data[i].begin(), m_data[i].end(), result.m_data[i].begin(), [scalar](const T val) -> T {
return val / scalar;
});
}
return result;
}
template <typename T>
Matrix<T> &Matrix<T>::operator/=(const T &scalar)
{
for (std::size_t i = 0; i != m_rows; ++i)
{
std::transform(m_data[i].begin(), m_data[i].end(), m_data[i].begin(), [scalar](const T val) -> T {
return val / scalar;
});
}
return *this;
}
template <typename T>
Matrix<T> Matrix<T>::operator*(const Matrix<T> &rhs)
{
if (m_cols != rhs.m_rows)
{
throw(std::invalid_argument("Number of cols are not equal!"));
}
Matrix<T> result(m_rows, rhs.m_cols);
if (m_rows < 250 && m_cols < 250)
{
dot(*this, rhs, result);
}
else
{
parallel_dot(*this, rhs, result);
}
return result;
}
template <typename T>
Matrix<T> &Matrix<T>::operator*=(const Matrix<T> &rhs)
{
if (m_cols != rhs.m_rows)
{
throw(std::invalid_argument("Number of cols are not equal!"));
}
*this = (*this) * rhs;
return *this;
}
template <typename T>
void Matrix<T>::dot(const Matrix<T> &matrixA, const Matrix<T> &matrixB, Matrix<T> &result)
{
for (std::size_t i = 0; i != matrixA.m_rows; ++i)
{
for (std::size_t j = 0; j != matrixB.m_cols; ++j)
{
for (std::size_t k = 0; k != matrixB.m_rows; ++k)
{
result.m_data[i][j] = result.m_data[i][j] + matrixA.m_data[i][k] * matrixB.m_data[k][j];
}
}
}
}
template <typename T>
void Matrix<T>::parallel_dot(const Matrix<T> &matrixA, const Matrix<T> &matrixB, Matrix<T> &result)
{
std::size_t i = 0;
std::size_t j = 0;
std::size_t k = 0;
#pragma omp parallel for shared(result) private(i, j, k) num_threads(4)
for (i = 0; i != matrixA.m_rows; ++i)
{
for (j = 0; j != matrixB.m_cols; ++j)
{
for (k = 0; k != matrixB.m_rows; ++k)
{
result.m_data[i][j] = result.m_data[i][j] + matrixA.m_data[i][k] * matrixB.m_data[k][j];
}
}
}
}
template <typename T>
void Matrix<T>::print_matrix() const
{
for (std::size_t i = 0; i < m_rows; ++i)
{
for (std::size_t j = 0; j < m_cols; ++j)
{
std::cout << m_data[i][j] << " ";
}
std::cout << std::endl;
}
std::cout << std::endl;
}
template <typename T>
std::size_t Matrix<T>::num_rows() const
{
return m_rows;
}
template <typename T>
std::size_t Matrix<T>::num_cols() const
{
return m_cols;
}
} // namespace cppmath
|
convolution_7x7_pack1to8_int8.h | // Tencent is pleased to support the open source community by making ncnn available.
//
// Copyright (C) 2021 THL A29 Limited, a Tencent company. All rights reserved.
//
// Licensed under the BSD 3-Clause License (the "License"); you may not use this file except
// in compliance with the License. You may obtain a copy of the License at
//
// https://opensource.org/licenses/BSD-3-Clause
//
// Unless required by applicable law or agreed to in writing, software distributed
// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
// CONDITIONS OF ANY KIND, either express or implied. See the License for the
// specific language governing permissions and limitations under the License.
static void conv7x7s2_pack1to8_int8_neon(const Mat& bottom_blob, Mat& top_blob, const Mat& kernel, const Option& opt)
{
int w = bottom_blob.w;
int inch = bottom_blob.c;
int outw = top_blob.w;
int outh = top_blob.h;
const int size = outw * outh;
const int maxk = 49;
// im2col
Mat bottom_im2col(size, maxk, inch, 1u, 1, opt.workspace_allocator);
{
const int gap = w * 2 - outw * 2;
#pragma omp parallel for num_threads(opt.num_threads)
for (int p = 0; p < inch; p++)
{
const Mat img = bottom_blob.channel(p);
signed char* ptr = bottom_im2col.channel(p);
for (int u = 0; u < 7; u++)
{
for (int v = 0; v < 7; v++)
{
const signed char* sptr = img.row<const signed char>(u) + v;
for (int i = 0; i < outh; i++)
{
int j = 0;
for (; j + 3 < outw; j += 4)
{
ptr[0] = sptr[0];
ptr[1] = sptr[2];
ptr[2] = sptr[4];
ptr[3] = sptr[6];
sptr += 8;
ptr += 4;
}
for (; j + 1 < outw; j += 2)
{
ptr[0] = sptr[0];
ptr[1] = sptr[2];
sptr += 4;
ptr += 2;
}
for (; j < outw; j++)
{
ptr[0] = sptr[0];
sptr += 2;
ptr += 1;
}
sptr += gap;
}
}
}
}
}
im2col_sgemm_pack1to8_int8_neon(bottom_im2col, top_blob, kernel, opt);
}
|
15_blur_parallel.c | #include <stddef.h>
#include <stdlib.h>
#include <stdio.h>
#include <time.h>
#include <unistd.h>
#include <omp.h>
#define NX 1002
#define NY 1002
void blur(int *image, size_t szx, size_t szy, size_t iters){
int *temp = malloc(sizeof(int) * szx * szy);
for (size_t i = 0; i< NX*NY; ++i) temp[i]=image[i];
for (size_t iit = 0; iit < iters; ++iit){
#pragma omp parallel for
for (size_t ix = 1; ix< szx-1; ++ix){
for (size_t iy = 1; iy< szy-1; ++iy){
temp[iy + ix * szy] = (int)(0.25 * (float)(image[iy + (ix+1) * szy] +
image[iy + (ix-1) * szy] + image[(iy-1) + ix * szy] +
image[(iy+1) + ix * szy]) + 0.5);
}
}
for (size_t i = 0; i < (szx * szy); ++i){
image[i] = temp[i];
}
}
free(temp);
}
int main(){
int image[(NX)*(NY)];
struct timespec t1, t2;
float dtime;
for (size_t i = 0; i< NX*NY; ++i) image[i]=5;
printf("OpenMP code running on %i threads\n",omp_get_max_threads());
clock_gettime(CLOCK_REALTIME, &t1);
blur(image,NX,NY, 10000);
clock_gettime(CLOCK_REALTIME, &t2);
dtime = (float)(t2.tv_sec - t1.tv_sec) + ((float)(t2.tv_nsec - t1.tv_nsec)
/1.0e9);
printf("Time taken was %f seconds\n",dtime);
printf("Arbitrary value from image %i\n",image[100]);
printf("Arbitrary value printed to avoid compiler optimising the blur out\n");
}
|
kmpp.h | /**
* Copyright (c) 2015, Jozef Stefan Institute, Quintelligence d.o.o. and contributors
* All rights reserved.
*
* This source code is licensed under the FreeBSD license found in the
* LICENSE file in the root directory of this source tree.
*/
/*
* kmpp.h
*
* Created on: Jan 26, 2011
* Author: tadej
*/
/**
* Initialize k-means in a smart way that gives us some theoretical guarantee
* about clustering quality.
*
* See: David Arthur, Sergei Vassilvitskii: k-means++: The advantages of careful seeding,
* Proceedings of the eighteenth annual ACM-SIAM symposium on Discrete algorithms, 2007
*
* We parametrize by vector type and LinAlg package used.
* If using a custom vector/matrix type that is not available in LinAlg,
* Make sure it implements the TMatrix interface and LA:AddVec and LA:DotProduct.
*/
template<class V, class LA, class M>
class TKMeans {
protected:
TCRef CRef;
public:
friend class TPt<TKMeans<V,LA,M> >;
protected:
const M *DocVV;
TVec<TFltV> DCSim;
int Dim;
TFullColMatrix Centroids;
TIntV Assignment;
int k;
int maxItr;
PNotify Notify;
TRnd Rnd;
void ChooseSmartCenters(int numLocalTries, TIntV & centers);
double GetCandidatePotential(const TFltV & sqDistances, int index) const;
int SelectCentroidCenter(double potential, const TFltV & sqDistances);
void PutUnitNorm(TFltV & Vec);
void PutUnitNorm(TIntFltKdV & Vec);
virtual bool ShouldReassign(double sim, double bestSim, int docIndex) const {
return sim > bestSim;
}
void MakeCentroids(TVec<TIntV> & ClusterDocs, TIntV & CentroidSize);
TFltV& Centroid(int ClusterId);
public:
TKMeans(const M *Docs, int K, int maxItr, PNotify Not, const TRnd& _Rnd = TRnd(1));
~TKMeans() {}
void Init();
void Apply();
int GetDocs() const;
/** Implement this for row access for custom matrix implementatios */
/*const V& GetDoc(int DocId) const;*/
int GetK() const;
int GetDim() const;
const TFltV& GetCentroid(int ClusterId) const;
const TIntV& GetAssignment() const;
double GetClusterCompactness(const TFltV & Cluster) const;
double GetClusteringCompactness() const;
double GetClusteringQualityBySize(const TIntV & ClusterSizes) const;
void GetClustersQualityByDCSim(TFltV& ClusterDCQ) const;
double GetClusteringQualityByDCSim() const;
double GetClusteringSSE() const;
};
class TDefaultMatrixAccess {
public:
static const TIntFltKdV& GetDoc(const TSparseColMatrix *DocVV, int DocId) {
return DocVV->ColSpVV[DocId];
}
static const TFltV& GetDoc(const TFullColMatrix *DocVV, int DocId) {
return DocVV->ColV[DocId];
}
static const PBowSpV& GetDoc(const TBowMatrix *DocVV, int DocId) {
return DocVV->ColSpVV[DocId];
}
};
template class TKMeans<TIntFltKdV, TLinAlg, TSparseColMatrix>;
template class TKMeans<TFltV, TLinAlg, TFullColMatrix>;
template class TKMeans<PBowSpV, TBowLinAlg, TBowMatrix>;
typedef TKMeans<TIntFltKdV, TLinAlg, TSparseColMatrix> TSparseKMeans;
typedef TKMeans<TFltV, TLinAlg, TFullColMatrix> TDenseKMeans;
typedef TKMeans<PBowSpV, TBowLinAlg, TBowMatrix> TBowKMeans;
/*
template TSparseKMeans::TSparseKMeans(const TSparseColMatrix *Docs, int K, int maxItr, PNotify Not);
template<TIntFltKdV, TLinAlg, TSparseColMatrix> TSparseKMeans::~TSparseKMeans();
template void TSparseKMeans::Init();
template void TSparseKMeans::Apply();
template int TSparseKMeans::GetDocs() const;
template int TSparseKMeans::GetK() const;
template int TSparseKMeans::GetDim() const;
template const TFltV& TSparseKMeans::GetCentroid(int ClusterId) const;
template const TIntV& TSparseKMeans::GetAssignment() const;
template double TSparseKMeans::GetClusterCompactness(const TFltV & Cluster) const;
template double TSparseKMeans::GetClusteringCompactness() const;
template double TSparseKMeans::GetClusteringQualityBySize(const TIntV & ClusterSizes) const;
template void TSparseKMeans::GetClustersQualityByDCSim(TFltV& ClusterDCQ) const;
template double TSparseKMeans::GetClusteringQualityByDCSim() const;
template double TSparseKMeans::GetClusteringSSE() const;*/
/**
* Vectors are sparse 64-bit sparse doubles. Also look at TBowKMeans
*/
/**
* Vectors are dense 64-bit doubles.
*/
/**
* An interface of TKMeans to the TextGarden tmine package.
* Should look and act the same as TBowClust.
*/
class TBowKMeansUtils {
public:
/** Compatibility layer */
static PBowDocPart GetKMeansPartForDocWgtBs(
const PNotify& Notify,
const PBowDocWgtBs& Wgt,
const PBowDocBs& Bow, const PBowSim& BowSim, TRnd& Rnd,
const int& Clusts, const int& ClustTrials,
const double& ConvergEps, const int& MnDocsPerClust,
const TIntFltPrV& DocIdWgtPrV=TIntFltPrV()) {
TBowMatrix DocMtx(Wgt);
TBowKMeans KMeans(&DocMtx, Clusts, ClustTrials, Notify, Rnd);
KMeans.Init();
KMeans.Apply();
PBowDocPart Part = TBowDocPart::New();
TVec<TIntV> ClusterDIdV(Clusts);
const TIntV& Assignment = KMeans.GetAssignment();
for (int i = 0; i < KMeans.GetDocs(); i++) {
ClusterDIdV[Assignment[i]].Add(i);
}
for (int CId = 0; CId < Clusts; CId++) {
const TFltV& Cluster = KMeans.GetCentroid(CId);
TStr CNm = "Cluster " + TInt::GetStr(CId);
double Qual = 0.0;
PBowSpV ConceptSpV = TBowSpV::New(CId, Cluster, 0.0);
PBowDocPart SubPart = TBowDocPart::New();
PBowDocPartClust Clust = TBowDocPartClust::New(Bow, CNm, Qual,
ClusterDIdV[CId], ConceptSpV, SubPart);
Part->AddClust(Clust);
}
return Part;
}
};
template<class V, class LA, class M>
TKMeans<V, LA, M>::TKMeans(const M *Docs, int K, int MaxItr, PNotify Not, const TRnd& _Rnd) :
DocVV(Docs), DCSim(K), k(K), maxItr(MaxItr), Notify(Not), Rnd(_Rnd) {
Centroids.ColV.Gen(k);
Centroids.ColN = k;
Centroids.RowN = DocVV->GetRows();
Assignment.Gen(GetDocs());
for (int CId = 0; CId < k; CId++) {
DCSim[CId].Gen(GetDocs());
}
}
template<class V, class LA, class M>
void TKMeans<V, LA, M>::Init() {
// Initially, no nodes belong to any cluster
Assignment.PutAll(-1);
TIntV Centers;
// Select good centers for clusters
ChooseSmartCenters(3 + (int) log((double) k), Centers);
for (int i = 0; i < Centers.Len(); i++) {
Assignment[Centers[i]] = i;
}
for (int i = 0; i < k; i++) {
Centroids.ColV[i].Gen(GetDim());
}
TVec<TIntV> ClusterDocs(k);
for (int d = 0; d < GetDocs(); d++) {
if (Assignment[d] != -1) {
ClusterDocs[Assignment[d]].Add(d);
}
}
TIntV CentroidSize(k);
MakeCentroids(ClusterDocs, CentroidSize);
for (int c = 0; c < k; c++) {
PutUnitNorm(Centroid(c));
}
}
template<class V, class LA, class M>
int TKMeans<V, LA, M>::GetDocs() const {
return DocVV->GetCols();
}
template<class V, class LA, class M>
int TKMeans<V, LA, M>::GetDim() const {
return DocVV->GetRows();
}
template<class V, class LA, class M>
int TKMeans<V, LA, M>::GetK() const {
return k;
}
template<class V, class LA, class M>
const TFltV& TKMeans<V, LA, M>::GetCentroid(int ClusterId) const {
return Centroids.ColV[ClusterId];
}
template<class V, class LA, class M>
double TKMeans<V, LA, M>::GetClusterCompactness(const TFltV& Cluster) const {
return TLinAlg::DotProduct(Cluster, Cluster);
}
template<class V, class LA, class M>
double TKMeans<V, LA, M>::GetClusteringCompactness() const {
double q = 0.0;
for (int i = 0; i < k; i++) {
q += GetClusterCompactness(GetCentroid(i));
}
return q;
}
template<class V, class LA, class M>
double TKMeans<V, LA, M>::GetClusteringQualityBySize(
const TIntV& ClusterSizes) const {
double q = 0.0;
for (int i = 0; i < k; i++) {
q += GetClusterCompactness(GetCentroid(i)) * ClusterSizes[i];
}
return q / GetDocs();
}
template<class V, class LA, class M>
void TKMeans<V, LA, M>::GetClustersQualityByDCSim(TFltV& ClusterDCQ) const {
for (int DocId = 0; DocId < Assignment.Len(); DocId++) {
double Sim = DCSim[Assignment[DocId]][DocId];
ClusterDCQ[Assignment[DocId]] += Sim;
}
}
template<class V, class LA, class M>
double TKMeans<V, LA, M>::GetClusteringQualityByDCSim() const {
TFltV ClusterDCQ(k);
GetClustersQualityByDCSim(ClusterDCQ);
return TLinAlg::SumVec(ClusterDCQ);
}
template<class V, class LA, class M>
double TKMeans<V, LA, M>::GetClusteringSSE() const {
double Qual = 0.0;
for (int DocId = 0; DocId < Assignment.Len(); DocId++) {
double Sim = DCSim[Assignment[DocId]][DocId];
Qual += (1.0 - Sim) * (1.0 - Sim);
}
return Qual;
}
template<class V, class LA, class M>
void TKMeans<V, LA, M>::PutUnitNorm(TFltV& Vec) {
int WIds = Vec.Len();
// get suqare-weight-sum
double SqWgtSum = 0;
for (int WIdN = 0; WIdN < WIds; WIdN++) {
SqWgtSum += TMath::Sqr(Vec[WIdN]);
}
if (SqWgtSum > 0) {
// normalize weights
for (int WIdN = 0; WIdN < WIds; WIdN++) {
Vec[WIdN] = (sdouble) sqrt(TMath::Sqr(Vec[WIdN]) / SqWgtSum);
}
}
}
template<class V, class LA, class M>
void TKMeans<V, LA, M>::PutUnitNorm(TIntFltKdV& Vec) {
int WIds = Vec.Len();
// get square-weight-sum
double SqWgtSum = 0;
for (int WIdN = 0; WIdN < WIds; WIdN++) {
SqWgtSum += TMath::Sqr(Vec[WIdN].Dat);
}
if (SqWgtSum > 0) {
// normalize weights
for (int WIdN = 0; WIdN < WIds; WIdN++) {
Vec[WIdN].Dat = (sdouble) sqrt(
TMath::Sqr(Vec[WIdN].Dat) / SqWgtSum);
}
}
}
template<class V, class LA, class M>
TFltV& TKMeans<V, LA, M>::Centroid(int ClusterId) {
return Centroids.ColV[ClusterId];
}
template<class V, class LA, class M>
void TKMeans<V, LA, M>::MakeCentroids(TVec<TIntV> & ClusterDocs,
TIntV & CentroidSize) {
// sum centroids
#pragma omp parallel for
for (int CId = 0; CId < k; CId++) {
const TIntV & DocIdV = ClusterDocs[CId];
TFltV & Cen = Centroid(CId);
for (int i = 0; i < DocIdV.Len(); i++) {
const V& Vec = TDefaultMatrixAccess::GetDoc(DocVV, DocIdV[i]);
LA::AddVec(1.0, Vec, Cen, Cen);
}
CentroidSize[CId] = ClusterDocs[CId].Len();
}
}
template<class V, class LA, class M>
const TIntV& TKMeans<V, LA, M>::GetAssignment() const {
return Assignment;
}
template<class V, class LA, class M>
void TKMeans<V, LA, M>::Apply() {
bool stable = false;
TIntV CentroidSize(k);
//TFltVV DCSims(GetDocs(), k);
for (int Iter = 0; (Iter < maxItr) && !stable; Iter++) {
stable = true;
// calculate
#pragma omp parallel for
for (int CId = 0; CId < k; CId++) {
DocVV->MultiplyT(GetCentroid(CId), DCSim[CId]);
}
//DocVV.MultiplyT()
/*#pragma omp parallel for
for (int d = 0; d < GetDocs(); d++) {
const TIntFltKdV& Doc = GetDoc(d);
for (int c = 0; c < k; c++) {
const TFltV& Cluster = GetCentroid(c);
double Sim = TLinAlg::DotProduct(Cluster, Doc);
//printf("%2.4f\t", Sim);
DCSims(d,c) = Sim;
}
//printf("\n");
}*/
// reassign
#pragma omp parallel for
for (int DId = 0; DId < GetDocs(); DId++) {
double BestSim = 0.0;
int BestClust = 0;
for (int c = 0; c < k; c++) {
double Sim = DCSim[c][DId];
AssertR(Sim <= 1 + 1e-6, TFlt::GetStr(Sim));
// just in case
if (ShouldReassign(Sim, BestSim, DId)) {
BestSim = Sim;
BestClust = c;
}
}
//printf("%2.4f\n", BestSim);
int OldAssignment = Assignment[DId];
Assignment[DId] = BestClust;
if (BestClust != OldAssignment) {
stable = false;
}
}
if (stable)
break;
// reset
for (int CId = 0; CId < k; CId++) {
Centroid(CId).PutAll(0.0);
}
CentroidSize.PutAll(0);
// gather docs
TVec<TIntV> ClusterDocs(k);
for (int DId = 0; DId < GetDocs(); DId++) {
ClusterDocs[Assignment[DId]].Add(DId);
}
MakeCentroids(ClusterDocs, CentroidSize);
// renormalize to actual centroids
for (int CId = 0; CId < k; CId++) {
const int _CentroidSize = CentroidSize[CId];
if (_CentroidSize > 0) {
TFltV & Cen = Centroid(CId);
TLinAlg::MultiplyScalar(1.0 / (double)_CentroidSize, Cen, Cen);
}
}
double Qual = GetClusteringCompactness();
double QualSiz = GetClusteringQualityBySize(CentroidSize);
double QualDc = GetClusteringQualityByDCSim();
double Sse = GetClusteringSSE();
Notify->OnStatusFmt(
"Iteration %d, qc:%2.4f, qs:%2.4f, qdc:%2.4f, sse:%2.4f ",
Iter, Qual, QualSiz, QualDc, Sse);
// renormalize to 2-norm to have fast calculation of DCSims
for (int CId = 0; CId < k; CId++) {
PutUnitNorm(Centroid(CId));
}
}
}
/**
* KMeans++ magic
* Chooses a number of Centers from the data set as follows:
* - One center is chosen randomly.
* - Now repeat k-1 times:
* - Repeat numLocalTries times:
* - Add a point x with probability proportional to the distance squared from x
* to the closest existing center
* - Add the point chosen above that results in the smallest potential.
*
* @return indices of seed centroids
**/
template<class V, class LA, class M>
void TKMeans<V, LA, M>::ChooseSmartCenters(int numLocalTries, TIntV& Centers) {
IAssert(GetDocs() > 0);
IAssert(GetDocs() > k);
int center = 0;
Centers.Gen(k);
TFltV SqDistances(GetDocs());
// Choose one random center and initialize closest sq. distances
int Index = (int) (Rnd.GetUniDev() * GetDocs());
Centers[center++] = Index;
// Get initial potential
double potential = 0.0;
const V& FirstCenter = TDefaultMatrixAccess::GetDoc(DocVV, Index);
for (int i = 0; i < GetDocs(); i++) {
const V& Doc = TDefaultMatrixAccess::GetDoc(DocVV, i);
double Dist = 1.0 - LA::DotProduct(Doc, FirstCenter);
double SqDist = Dist * Dist;
SqDistances[i] = SqDist;
potential += SqDist;
}
// Choose each center
for (int i = 1; i < k; i++) {
// Repeat several trials
double bestNewCandidate = -1.0;
int bestNewIndex = 0;
for (int j = 0; j < numLocalTries; j++) {
// Choose our center - have to be slightly careful to return a valid answer even accounting
// for possible rounding errors
Index = SelectCentroidCenter(potential, SqDistances);
// Compute the new potential
double newCandidate = GetCandidatePotential(SqDistances, Index);
// Store the best result
if (bestNewCandidate < 0 || newCandidate < bestNewCandidate) {
bestNewCandidate = newCandidate;
bestNewIndex = Index;
}
}
// Add the appropriate center
Centers[center++] = bestNewIndex;
potential = bestNewCandidate;
const V& BestCand = TDefaultMatrixAccess::GetDoc(DocVV, bestNewIndex);
for (int j = 0; j < GetDocs(); j++) {
const V& Doc = TDefaultMatrixAccess::GetDoc(DocVV, j);
double Dist = 1.0 - LA::DotProduct(Doc, BestCand);
const TFlt SqDist = Dist * Dist;
SqDistances[j] = TMath::Mn(SqDist, SqDistances[j]);
}
}
}
/**
* Get sum of nearest distances to other seeds.
*
* @param SqDistances, D(x_i)^2 for each x_i
* @param Index, Index of candidate data point
* @return data point potential
*/
template<class V, class LA, class M>
double TKMeans<V, LA, M>::GetCandidatePotential(const TFltV& SqDistances,
int Index) const {
double newCandidate = 0;
const V& BestCand = TDefaultMatrixAccess::GetDoc(DocVV, Index);
for (int i = 0; i < GetDocs(); i++) {
const V& Doc = TDefaultMatrixAccess::GetDoc(DocVV, i);
double Dist = 1.0 - LA::DotProduct(Doc, BestCand);
const TFlt SqDist = Dist * Dist;
newCandidate += TMath::Mn(SqDist, SqDistances[i]);
}
return newCandidate;
}
/**
* Returns data point x' with probability D(x')^2 / sum{D(x)^2)
* @param potential, equal to sum{D(x)^2)
* @param SqDistances, D(x_i)^2 for each x_i
* @param rnd, randomizer
* @return Index of chosen data point
*/
template<class V, class LA, class M>
int TKMeans<V, LA, M>::SelectCentroidCenter(double potential,
const TFltV& SqDistances) {
int i;
// Get random value
double r = Rnd.GetUniDev() * potential;
// Subtract distances from this value and see where we end up
for (i = 0; i < GetDocs() - 1; i++) {
if (r <= SqDistances[i]) {
break;
}
r -= SqDistances[i];
}
// i is the lucky winner
return i;
}
|
distributiongenerator.h | /**
* @file distributiongenerator.h This code provides basic structure for distribution generators. This should be inherited by all other distribution generators.
* @author TPOC: palisade@njit.edu
*
* @copyright Copyright (c) 2017, New Jersey Institute of Technology (NJIT)
* All rights reserved.
* Redistribution and use in source and binary forms, with or without modification,
* are permitted provided that the following conditions are met:
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright notice, this
* list of conditions and the following disclaimer in the documentation and/or other
* materials provided with the distribution.
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
* WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR
* ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
* (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS
* OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
* NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN
* IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
*/
#ifndef LBCRYPTO_MATH_DISTRIBUTIONGENERATOR_H_
#define LBCRYPTO_MATH_DISTRIBUTIONGENERATOR_H_
//used to define a thread-safe generator
#if defined (_MSC_VER) // Visual studio
//#define thread_local __declspec( thread )
#elif defined (__GCC__) // GCC
#define thread_local __thread
#endif
#include <chrono>
#include <memory>
#include <mutex>
#include <random>
#include <thread>
#include "backend.h"
//#define FIXED_SEED // if defined, then uses a fixed seed number for reproducible results during debug. Use only one OMP thread to ensure reproducibility
namespace lbcrypto {
/**
* @brief Abstract class describing generator requirements.
*
* The Distribution Generator defines the methods that must be implemented by a real generator.
* It also holds the single PRNG, which should be called by all child class when generating a random number is required.
*
*/
class PseudoRandomNumberGenerator {
public:
static std::mt19937 &GetPRNG () {
std::call_once(m_flag, [] () {
std::random_device rd;
#if defined(FIXED_SEED)
//TP: Need reproducibility to debug NTL.
std::cerr << "**FOR DEBUGGING ONLY!!!! Using fixed initializer for PRNG. Use a single thread only!" << std::endl;
std::mt19937 *gen;
gen = new std::mt19937(1);
gen->seed(1);
m_prng.reset(gen);
#else
//m_prng.reset(new std::mt19937(rd()));
m_prng.reset(new std::mt19937(std::chrono::high_resolution_clock::now().time_since_epoch().count()+std::hash<std::thread::id>{}(std::this_thread::get_id())));
#endif
});
return *m_prng;
}
private:
static std::once_flag m_flag;
#if !defined(FIXED_SEED)
// avoid contention on m_flag
#pragma omp threadprivate(m_flag)
#endif
static std::shared_ptr<std::mt19937> m_prng;
#if !defined(FIXED_SEED)
// avoid contention on m_prng
#pragma omp threadprivate(m_prng)
#endif
};
// Base class for Distribution Generator by type
template<typename VecType>
class DistributionGenerator {
public:
DistributionGenerator () {}
virtual ~DistributionGenerator() {}
};
} // namespace lbcrypto
#endif // LBCRYPTO_MATH_DISTRIBUTIONGENERATOR_H_
|
countSorting.c | #include <stdlib.h>
#include <stdio.h>
#include <time.h>
#include <omp.h>
#define SIZE 100000
void count_sort(int a[], int n)
{
int i, j, count;
int *temp = malloc(n*sizeof(int));
#pragma omp parallel for num_threads(100) private(i, j, count)
for(i=0; i<n; i++){
count = 0;
for(j=0; j<n; j++){
if(a[j] < a[i]){
#pragma omp atomic
count++;
}
else if(a[j] == a[i] && j < i){
#pragma omp atomic
count++;
}
}
temp[count] = a[i];
}
memcpy(a, temp, n*sizeof(int));
}
int *create_array(int n){
int i;
int *a = malloc(n*sizeof(int));
srand(time(NULL));
for(i=0; i<n; i++){
a[i] = rand()%10;
}
return a;
}
void print_array(int *a, int n){
int i;
for(i=0; i<n; i++){
printf("%d ", a[i]);
}
printf("\n\n");
}
int main()
{
clock_t begin, end;
double runtime;
// Clock starts
begin = clock();
// Program runs
int *a = create_array(SIZE);
count_sort(a, SIZE);
print_array(a, SIZE);
// Clock ends
end = clock();
runtime = (double)(end - begin) / CLOCKS_PER_SEC;
printf("\n\nRuntime : %f\n\n", runtime);
return 0;
}
|
Sema.h | //===--- Sema.h - Semantic Analysis & AST Building --------------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// This file defines the Sema class, which performs semantic analysis and
// builds ASTs.
//
//===----------------------------------------------------------------------===//
#ifndef LLVM_CLANG_SEMA_SEMA_H
#define LLVM_CLANG_SEMA_SEMA_H
#include "clang/AST/ASTConcept.h"
#include "clang/AST/ASTFwd.h"
#include "clang/AST/Attr.h"
#include "clang/AST/Availability.h"
#include "clang/AST/ComparisonCategories.h"
#include "clang/AST/DeclTemplate.h"
#include "clang/AST/DeclarationName.h"
#include "clang/AST/Expr.h"
#include "clang/AST/ExprConcepts.h"
#include "clang/AST/ExprCXX.h"
#include "clang/AST/ExprObjC.h"
#include "clang/AST/ExternalASTSource.h"
#include "clang/AST/LocInfoType.h"
#include "clang/AST/MangleNumberingContext.h"
#include "clang/AST/NSAPI.h"
#include "clang/AST/PrettyPrinter.h"
#include "clang/AST/StmtCXX.h"
#include "clang/AST/TypeLoc.h"
#include "clang/AST/TypeOrdering.h"
#include "clang/Basic/BitmaskEnum.h"
#include "clang/Basic/ExpressionTraits.h"
#include "clang/Basic/Module.h"
#include "clang/Basic/OpenMPKinds.h"
#include "clang/Basic/PragmaKinds.h"
#include "clang/Basic/Specifiers.h"
#include "clang/Basic/TemplateKinds.h"
#include "clang/Basic/TypeTraits.h"
#include "clang/Sema/AnalysisBasedWarnings.h"
#include "clang/Sema/CleanupInfo.h"
#include "clang/Sema/DeclSpec.h"
#include "clang/Sema/ExternalSemaSource.h"
#include "clang/Sema/IdentifierResolver.h"
#include "clang/Sema/ObjCMethodList.h"
#include "clang/Sema/Ownership.h"
#include "clang/Sema/Scope.h"
#include "clang/Sema/SemaConcept.h"
#include "clang/Sema/TypoCorrection.h"
#include "clang/Sema/Weak.h"
#include "llvm/ADT/ArrayRef.h"
#include "llvm/ADT/Optional.h"
#include "llvm/ADT/SetVector.h"
#include "llvm/ADT/SmallBitVector.h"
#include "llvm/ADT/SmallPtrSet.h"
#include "llvm/ADT/SmallVector.h"
#include "llvm/ADT/TinyPtrVector.h"
#include "llvm/Frontend/OpenMP/OMPConstants.h"
#include <deque>
#include <memory>
#include <string>
#include <tuple>
#include <vector>
namespace llvm {
class APSInt;
template <typename ValueT> struct DenseMapInfo;
template <typename ValueT, typename ValueInfoT> class DenseSet;
class SmallBitVector;
struct InlineAsmIdentifierInfo;
}
namespace clang {
class ADLResult;
class ASTConsumer;
class ASTContext;
class ASTMutationListener;
class ASTReader;
class ASTWriter;
class ArrayType;
class ParsedAttr;
class BindingDecl;
class BlockDecl;
class CapturedDecl;
class CXXBasePath;
class CXXBasePaths;
class CXXBindTemporaryExpr;
typedef SmallVector<CXXBaseSpecifier*, 4> CXXCastPath;
class CXXConstructorDecl;
class CXXConversionDecl;
class CXXDeleteExpr;
class CXXDestructorDecl;
class CXXFieldCollector;
class CXXMemberCallExpr;
class CXXMethodDecl;
class CXXScopeSpec;
class CXXTemporary;
class CXXTryStmt;
class CallExpr;
class ClassTemplateDecl;
class ClassTemplatePartialSpecializationDecl;
class ClassTemplateSpecializationDecl;
class VarTemplatePartialSpecializationDecl;
class CodeCompleteConsumer;
class CodeCompletionAllocator;
class CodeCompletionTUInfo;
class CodeCompletionResult;
class CoroutineBodyStmt;
class Decl;
class DeclAccessPair;
class DeclContext;
class DeclRefExpr;
class DeclaratorDecl;
class DeducedTemplateArgument;
class DependentDiagnostic;
class DesignatedInitExpr;
class Designation;
class EnableIfAttr;
class EnumConstantDecl;
class Expr;
class ExtVectorType;
class FormatAttr;
class FriendDecl;
class FunctionDecl;
class FunctionProtoType;
class FunctionTemplateDecl;
class ImplicitConversionSequence;
typedef MutableArrayRef<ImplicitConversionSequence> ConversionSequenceList;
class InitListExpr;
class InitializationKind;
class InitializationSequence;
class InitializedEntity;
class IntegerLiteral;
class LabelStmt;
class LambdaExpr;
class LangOptions;
class LocalInstantiationScope;
class LookupResult;
class MacroInfo;
typedef ArrayRef<std::pair<IdentifierInfo *, SourceLocation>> ModuleIdPath;
class ModuleLoader;
class MultiLevelTemplateArgumentList;
class NamedDecl;
class ObjCCategoryDecl;
class ObjCCategoryImplDecl;
class ObjCCompatibleAliasDecl;
class ObjCContainerDecl;
class ObjCImplDecl;
class ObjCImplementationDecl;
class ObjCInterfaceDecl;
class ObjCIvarDecl;
template <class T> class ObjCList;
class ObjCMessageExpr;
class ObjCMethodDecl;
class ObjCPropertyDecl;
class ObjCProtocolDecl;
class OMPThreadPrivateDecl;
class OMPRequiresDecl;
class OMPDeclareReductionDecl;
class OMPDeclareSimdDecl;
class OMPClause;
struct OMPVarListLocTy;
struct OverloadCandidate;
enum class OverloadCandidateParamOrder : char;
enum OverloadCandidateRewriteKind : unsigned;
class OverloadCandidateSet;
class OverloadExpr;
class ParenListExpr;
class ParmVarDecl;
class Preprocessor;
class PseudoDestructorTypeStorage;
class PseudoObjectExpr;
class QualType;
class StandardConversionSequence;
class Stmt;
class StringLiteral;
class SwitchStmt;
class TemplateArgument;
class TemplateArgumentList;
class TemplateArgumentLoc;
class TemplateDecl;
class TemplateInstantiationCallback;
class TemplateParameterList;
class TemplatePartialOrderingContext;
class TemplateTemplateParmDecl;
class Token;
class TypeAliasDecl;
class TypedefDecl;
class TypedefNameDecl;
class TypeLoc;
class TypoCorrectionConsumer;
class UnqualifiedId;
class UnresolvedLookupExpr;
class UnresolvedMemberExpr;
class UnresolvedSetImpl;
class UnresolvedSetIterator;
class UsingDecl;
class UsingShadowDecl;
class ValueDecl;
class VarDecl;
class VarTemplateSpecializationDecl;
class VisibilityAttr;
class VisibleDeclConsumer;
class IndirectFieldDecl;
struct DeductionFailureInfo;
class TemplateSpecCandidateSet;
namespace sema {
class AccessedEntity;
class BlockScopeInfo;
class Capture;
class CapturedRegionScopeInfo;
class CapturingScopeInfo;
class CompoundScopeInfo;
class DelayedDiagnostic;
class DelayedDiagnosticPool;
class FunctionScopeInfo;
class LambdaScopeInfo;
class PossiblyUnreachableDiag;
class SemaPPCallbacks;
class TemplateDeductionInfo;
}
namespace threadSafety {
class BeforeSet;
void threadSafetyCleanup(BeforeSet* Cache);
}
// FIXME: No way to easily map from TemplateTypeParmTypes to
// TemplateTypeParmDecls, so we have this horrible PointerUnion.
typedef std::pair<llvm::PointerUnion<const TemplateTypeParmType*, NamedDecl*>,
SourceLocation> UnexpandedParameterPack;
/// Describes whether we've seen any nullability information for the given
/// file.
struct FileNullability {
/// The first pointer declarator (of any pointer kind) in the file that does
/// not have a corresponding nullability annotation.
SourceLocation PointerLoc;
/// The end location for the first pointer declarator in the file. Used for
/// placing fix-its.
SourceLocation PointerEndLoc;
/// Which kind of pointer declarator we saw.
uint8_t PointerKind;
/// Whether we saw any type nullability annotations in the given file.
bool SawTypeNullability = false;
};
/// A mapping from file IDs to a record of whether we've seen nullability
/// information in that file.
class FileNullabilityMap {
/// A mapping from file IDs to the nullability information for each file ID.
llvm::DenseMap<FileID, FileNullability> Map;
/// A single-element cache based on the file ID.
struct {
FileID File;
FileNullability Nullability;
} Cache;
public:
FileNullability &operator[](FileID file) {
// Check the single-element cache.
if (file == Cache.File)
return Cache.Nullability;
// It's not in the single-element cache; flush the cache if we have one.
if (!Cache.File.isInvalid()) {
Map[Cache.File] = Cache.Nullability;
}
// Pull this entry into the cache.
Cache.File = file;
Cache.Nullability = Map[file];
return Cache.Nullability;
}
};
/// Keeps track of expected type during expression parsing. The type is tied to
/// a particular token, all functions that update or consume the type take a
/// start location of the token they are looking at as a parameter. This allows
/// to avoid updating the type on hot paths in the parser.
class PreferredTypeBuilder {
public:
PreferredTypeBuilder() = default;
explicit PreferredTypeBuilder(QualType Type) : Type(Type) {}
void enterCondition(Sema &S, SourceLocation Tok);
void enterReturn(Sema &S, SourceLocation Tok);
void enterVariableInit(SourceLocation Tok, Decl *D);
/// Computing a type for the function argument may require running
/// overloading, so we postpone its computation until it is actually needed.
///
/// Clients should be very careful when using this funciton, as it stores a
/// function_ref, clients should make sure all calls to get() with the same
/// location happen while function_ref is alive.
void enterFunctionArgument(SourceLocation Tok,
llvm::function_ref<QualType()> ComputeType);
void enterParenExpr(SourceLocation Tok, SourceLocation LParLoc);
void enterUnary(Sema &S, SourceLocation Tok, tok::TokenKind OpKind,
SourceLocation OpLoc);
void enterBinary(Sema &S, SourceLocation Tok, Expr *LHS, tok::TokenKind Op);
void enterMemAccess(Sema &S, SourceLocation Tok, Expr *Base);
void enterSubscript(Sema &S, SourceLocation Tok, Expr *LHS);
/// Handles all type casts, including C-style cast, C++ casts, etc.
void enterTypeCast(SourceLocation Tok, QualType CastType);
QualType get(SourceLocation Tok) const {
if (Tok != ExpectedLoc)
return QualType();
if (!Type.isNull())
return Type;
if (ComputeType)
return ComputeType();
return QualType();
}
private:
/// Start position of a token for which we store expected type.
SourceLocation ExpectedLoc;
/// Expected type for a token starting at ExpectedLoc.
QualType Type;
/// A function to compute expected type at ExpectedLoc. It is only considered
/// if Type is null.
llvm::function_ref<QualType()> ComputeType;
};
/// Sema - This implements semantic analysis and AST building for C.
class Sema final {
Sema(const Sema &) = delete;
void operator=(const Sema &) = delete;
/// A key method to reduce duplicate debug info from Sema.
virtual void anchor();
///Source of additional semantic information.
ExternalSemaSource *ExternalSource;
///Whether Sema has generated a multiplexer and has to delete it.
bool isMultiplexExternalSource;
static bool mightHaveNonExternalLinkage(const DeclaratorDecl *FD);
bool isVisibleSlow(const NamedDecl *D);
/// Determine whether two declarations should be linked together, given that
/// the old declaration might not be visible and the new declaration might
/// not have external linkage.
bool shouldLinkPossiblyHiddenDecl(const NamedDecl *Old,
const NamedDecl *New) {
if (isVisible(Old))
return true;
// See comment in below overload for why it's safe to compute the linkage
// of the new declaration here.
if (New->isExternallyDeclarable()) {
assert(Old->isExternallyDeclarable() &&
"should not have found a non-externally-declarable previous decl");
return true;
}
return false;
}
bool shouldLinkPossiblyHiddenDecl(LookupResult &Old, const NamedDecl *New);
void setupImplicitSpecialMemberType(CXXMethodDecl *SpecialMem,
QualType ResultTy,
ArrayRef<QualType> Args);
public:
/// The maximum alignment, same as in llvm::Value. We duplicate them here
/// because that allows us not to duplicate the constants in clang code,
/// which we must to since we can't directly use the llvm constants.
/// The value is verified against llvm here: lib/CodeGen/CGDecl.cpp
///
/// This is the greatest alignment value supported by load, store, and alloca
/// instructions, and global values.
static const unsigned MaxAlignmentExponent = 29;
static const unsigned MaximumAlignment = 1u << MaxAlignmentExponent;
typedef OpaquePtr<DeclGroupRef> DeclGroupPtrTy;
typedef OpaquePtr<TemplateName> TemplateTy;
typedef OpaquePtr<QualType> TypeTy;
OpenCLOptions OpenCLFeatures;
FPOptions FPFeatures;
const LangOptions &LangOpts;
Preprocessor &PP;
ASTContext &Context;
ASTConsumer &Consumer;
DiagnosticsEngine &Diags;
SourceManager &SourceMgr;
/// Flag indicating whether or not to collect detailed statistics.
bool CollectStats;
/// Code-completion consumer.
CodeCompleteConsumer *CodeCompleter;
/// CurContext - This is the current declaration context of parsing.
DeclContext *CurContext;
/// Generally null except when we temporarily switch decl contexts,
/// like in \see ActOnObjCTemporaryExitContainerContext.
DeclContext *OriginalLexicalContext;
/// VAListTagName - The declaration name corresponding to __va_list_tag.
/// This is used as part of a hack to omit that class from ADL results.
DeclarationName VAListTagName;
bool MSStructPragmaOn; // True when \#pragma ms_struct on
/// Controls member pointer representation format under the MS ABI.
LangOptions::PragmaMSPointersToMembersKind
MSPointerToMemberRepresentationMethod;
/// Stack of active SEH __finally scopes. Can be empty.
SmallVector<Scope*, 2> CurrentSEHFinally;
/// Source location for newly created implicit MSInheritanceAttrs
SourceLocation ImplicitMSInheritanceAttrLoc;
/// Holds TypoExprs that are created from `createDelayedTypo`. This is used by
/// `TransformTypos` in order to keep track of any TypoExprs that are created
/// recursively during typo correction and wipe them away if the correction
/// fails.
llvm::SmallVector<TypoExpr *, 2> TypoExprs;
/// pragma clang section kind
enum PragmaClangSectionKind {
PCSK_Invalid = 0,
PCSK_BSS = 1,
PCSK_Data = 2,
PCSK_Rodata = 3,
PCSK_Text = 4,
PCSK_Relro = 5
};
enum PragmaClangSectionAction {
PCSA_Set = 0,
PCSA_Clear = 1
};
struct PragmaClangSection {
std::string SectionName;
bool Valid = false;
SourceLocation PragmaLocation;
void Act(SourceLocation PragmaLocation,
PragmaClangSectionAction Action,
StringLiteral* Name);
};
PragmaClangSection PragmaClangBSSSection;
PragmaClangSection PragmaClangDataSection;
PragmaClangSection PragmaClangRodataSection;
PragmaClangSection PragmaClangRelroSection;
PragmaClangSection PragmaClangTextSection;
enum PragmaMsStackAction {
PSK_Reset = 0x0, // #pragma ()
PSK_Set = 0x1, // #pragma (value)
PSK_Push = 0x2, // #pragma (push[, id])
PSK_Pop = 0x4, // #pragma (pop[, id])
PSK_Show = 0x8, // #pragma (show) -- only for "pack"!
PSK_Push_Set = PSK_Push | PSK_Set, // #pragma (push[, id], value)
PSK_Pop_Set = PSK_Pop | PSK_Set, // #pragma (pop[, id], value)
};
template<typename ValueType>
struct PragmaStack {
struct Slot {
llvm::StringRef StackSlotLabel;
ValueType Value;
SourceLocation PragmaLocation;
SourceLocation PragmaPushLocation;
Slot(llvm::StringRef StackSlotLabel, ValueType Value,
SourceLocation PragmaLocation, SourceLocation PragmaPushLocation)
: StackSlotLabel(StackSlotLabel), Value(Value),
PragmaLocation(PragmaLocation),
PragmaPushLocation(PragmaPushLocation) {}
};
void Act(SourceLocation PragmaLocation,
PragmaMsStackAction Action,
llvm::StringRef StackSlotLabel,
ValueType Value);
// MSVC seems to add artificial slots to #pragma stacks on entering a C++
// method body to restore the stacks on exit, so it works like this:
//
// struct S {
// #pragma <name>(push, InternalPragmaSlot, <current_pragma_value>)
// void Method {}
// #pragma <name>(pop, InternalPragmaSlot)
// };
//
// It works even with #pragma vtordisp, although MSVC doesn't support
// #pragma vtordisp(push [, id], n)
// syntax.
//
// Push / pop a named sentinel slot.
void SentinelAction(PragmaMsStackAction Action, StringRef Label) {
assert((Action == PSK_Push || Action == PSK_Pop) &&
"Can only push / pop #pragma stack sentinels!");
Act(CurrentPragmaLocation, Action, Label, CurrentValue);
}
// Constructors.
explicit PragmaStack(const ValueType &Default)
: DefaultValue(Default), CurrentValue(Default) {}
bool hasValue() const { return CurrentValue != DefaultValue; }
SmallVector<Slot, 2> Stack;
ValueType DefaultValue; // Value used for PSK_Reset action.
ValueType CurrentValue;
SourceLocation CurrentPragmaLocation;
};
// FIXME: We should serialize / deserialize these if they occur in a PCH (but
// we shouldn't do so if they're in a module).
/// Whether to insert vtordisps prior to virtual bases in the Microsoft
/// C++ ABI. Possible values are 0, 1, and 2, which mean:
///
/// 0: Suppress all vtordisps
/// 1: Insert vtordisps in the presence of vbase overrides and non-trivial
/// structors
/// 2: Always insert vtordisps to support RTTI on partially constructed
/// objects
PragmaStack<MSVtorDispMode> VtorDispStack;
// #pragma pack.
// Sentinel to represent when the stack is set to mac68k alignment.
static const unsigned kMac68kAlignmentSentinel = ~0U;
PragmaStack<unsigned> PackStack;
// The current #pragma pack values and locations at each #include.
struct PackIncludeState {
unsigned CurrentValue;
SourceLocation CurrentPragmaLocation;
bool HasNonDefaultValue, ShouldWarnOnInclude;
};
SmallVector<PackIncludeState, 8> PackIncludeStack;
// Segment #pragmas.
PragmaStack<StringLiteral *> DataSegStack;
PragmaStack<StringLiteral *> BSSSegStack;
PragmaStack<StringLiteral *> ConstSegStack;
PragmaStack<StringLiteral *> CodeSegStack;
// RAII object to push / pop sentinel slots for all MS #pragma stacks.
// Actions should be performed only if we enter / exit a C++ method body.
class PragmaStackSentinelRAII {
public:
PragmaStackSentinelRAII(Sema &S, StringRef SlotLabel, bool ShouldAct);
~PragmaStackSentinelRAII();
private:
Sema &S;
StringRef SlotLabel;
bool ShouldAct;
};
/// A mapping that describes the nullability we've seen in each header file.
FileNullabilityMap NullabilityMap;
/// Last section used with #pragma init_seg.
StringLiteral *CurInitSeg;
SourceLocation CurInitSegLoc;
/// VisContext - Manages the stack for \#pragma GCC visibility.
void *VisContext; // Really a "PragmaVisStack*"
/// This an attribute introduced by \#pragma clang attribute.
struct PragmaAttributeEntry {
SourceLocation Loc;
ParsedAttr *Attribute;
SmallVector<attr::SubjectMatchRule, 4> MatchRules;
bool IsUsed;
};
/// A push'd group of PragmaAttributeEntries.
struct PragmaAttributeGroup {
/// The location of the push attribute.
SourceLocation Loc;
/// The namespace of this push group.
const IdentifierInfo *Namespace;
SmallVector<PragmaAttributeEntry, 2> Entries;
};
SmallVector<PragmaAttributeGroup, 2> PragmaAttributeStack;
/// The declaration that is currently receiving an attribute from the
/// #pragma attribute stack.
const Decl *PragmaAttributeCurrentTargetDecl;
/// This represents the last location of a "#pragma clang optimize off"
/// directive if such a directive has not been closed by an "on" yet. If
/// optimizations are currently "on", this is set to an invalid location.
SourceLocation OptimizeOffPragmaLocation;
/// Flag indicating if Sema is building a recovery call expression.
///
/// This flag is used to avoid building recovery call expressions
/// if Sema is already doing so, which would cause infinite recursions.
bool IsBuildingRecoveryCallExpr;
/// Used to control the generation of ExprWithCleanups.
CleanupInfo Cleanup;
/// ExprCleanupObjects - This is the stack of objects requiring
/// cleanup that are created by the current full expression. The
/// element type here is ExprWithCleanups::Object.
SmallVector<BlockDecl*, 8> ExprCleanupObjects;
/// Store a set of either DeclRefExprs or MemberExprs that contain a reference
/// to a variable (constant) that may or may not be odr-used in this Expr, and
/// we won't know until all lvalue-to-rvalue and discarded value conversions
/// have been applied to all subexpressions of the enclosing full expression.
/// This is cleared at the end of each full expression.
using MaybeODRUseExprSet = llvm::SmallPtrSet<Expr *, 2>;
MaybeODRUseExprSet MaybeODRUseExprs;
std::unique_ptr<sema::FunctionScopeInfo> CachedFunctionScope;
/// Stack containing information about each of the nested
/// function, block, and method scopes that are currently active.
SmallVector<sema::FunctionScopeInfo *, 4> FunctionScopes;
/// Stack containing information needed when in C++2a an 'auto' is encountered
/// in a function declaration parameter type specifier in order to invent a
/// corresponding template parameter in the enclosing abbreviated function
/// template. This information is also present in LambdaScopeInfo, stored in
/// the FunctionScopes stack.
SmallVector<InventedTemplateParameterInfo, 4> InventedParameterInfos;
typedef LazyVector<TypedefNameDecl *, ExternalSemaSource,
&ExternalSemaSource::ReadExtVectorDecls, 2, 2>
ExtVectorDeclsType;
/// ExtVectorDecls - This is a list all the extended vector types. This allows
/// us to associate a raw vector type with one of the ext_vector type names.
/// This is only necessary for issuing pretty diagnostics.
ExtVectorDeclsType ExtVectorDecls;
/// FieldCollector - Collects CXXFieldDecls during parsing of C++ classes.
std::unique_ptr<CXXFieldCollector> FieldCollector;
typedef llvm::SmallSetVector<NamedDecl *, 16> NamedDeclSetType;
/// Set containing all declared private fields that are not used.
NamedDeclSetType UnusedPrivateFields;
/// Set containing all typedefs that are likely unused.
llvm::SmallSetVector<const TypedefNameDecl *, 4>
UnusedLocalTypedefNameCandidates;
/// Delete-expressions to be analyzed at the end of translation unit
///
/// This list contains class members, and locations of delete-expressions
/// that could not be proven as to whether they mismatch with new-expression
/// used in initializer of the field.
typedef std::pair<SourceLocation, bool> DeleteExprLoc;
typedef llvm::SmallVector<DeleteExprLoc, 4> DeleteLocs;
llvm::MapVector<FieldDecl *, DeleteLocs> DeleteExprs;
typedef llvm::SmallPtrSet<const CXXRecordDecl*, 8> RecordDeclSetTy;
/// PureVirtualClassDiagSet - a set of class declarations which we have
/// emitted a list of pure virtual functions. Used to prevent emitting the
/// same list more than once.
std::unique_ptr<RecordDeclSetTy> PureVirtualClassDiagSet;
/// ParsingInitForAutoVars - a set of declarations with auto types for which
/// we are currently parsing the initializer.
llvm::SmallPtrSet<const Decl*, 4> ParsingInitForAutoVars;
/// Look for a locally scoped extern "C" declaration by the given name.
NamedDecl *findLocallyScopedExternCDecl(DeclarationName Name);
typedef LazyVector<VarDecl *, ExternalSemaSource,
&ExternalSemaSource::ReadTentativeDefinitions, 2, 2>
TentativeDefinitionsType;
/// All the tentative definitions encountered in the TU.
TentativeDefinitionsType TentativeDefinitions;
/// All the external declarations encoutered and used in the TU.
SmallVector<VarDecl *, 4> ExternalDeclarations;
typedef LazyVector<const DeclaratorDecl *, ExternalSemaSource,
&ExternalSemaSource::ReadUnusedFileScopedDecls, 2, 2>
UnusedFileScopedDeclsType;
/// The set of file scoped decls seen so far that have not been used
/// and must warn if not used. Only contains the first declaration.
UnusedFileScopedDeclsType UnusedFileScopedDecls;
typedef LazyVector<CXXConstructorDecl *, ExternalSemaSource,
&ExternalSemaSource::ReadDelegatingConstructors, 2, 2>
DelegatingCtorDeclsType;
/// All the delegating constructors seen so far in the file, used for
/// cycle detection at the end of the TU.
DelegatingCtorDeclsType DelegatingCtorDecls;
/// All the overriding functions seen during a class definition
/// that had their exception spec checks delayed, plus the overridden
/// function.
SmallVector<std::pair<const CXXMethodDecl*, const CXXMethodDecl*>, 2>
DelayedOverridingExceptionSpecChecks;
/// All the function redeclarations seen during a class definition that had
/// their exception spec checks delayed, plus the prior declaration they
/// should be checked against. Except during error recovery, the new decl
/// should always be a friend declaration, as that's the only valid way to
/// redeclare a special member before its class is complete.
SmallVector<std::pair<FunctionDecl*, FunctionDecl*>, 2>
DelayedEquivalentExceptionSpecChecks;
typedef llvm::MapVector<const FunctionDecl *,
std::unique_ptr<LateParsedTemplate>>
LateParsedTemplateMapT;
LateParsedTemplateMapT LateParsedTemplateMap;
/// Callback to the parser to parse templated functions when needed.
typedef void LateTemplateParserCB(void *P, LateParsedTemplate &LPT);
typedef void LateTemplateParserCleanupCB(void *P);
LateTemplateParserCB *LateTemplateParser;
LateTemplateParserCleanupCB *LateTemplateParserCleanup;
void *OpaqueParser;
void SetLateTemplateParser(LateTemplateParserCB *LTP,
LateTemplateParserCleanupCB *LTPCleanup,
void *P) {
LateTemplateParser = LTP;
LateTemplateParserCleanup = LTPCleanup;
OpaqueParser = P;
}
class DelayedDiagnostics;
class DelayedDiagnosticsState {
sema::DelayedDiagnosticPool *SavedPool;
friend class Sema::DelayedDiagnostics;
};
typedef DelayedDiagnosticsState ParsingDeclState;
typedef DelayedDiagnosticsState ProcessingContextState;
/// A class which encapsulates the logic for delaying diagnostics
/// during parsing and other processing.
class DelayedDiagnostics {
/// The current pool of diagnostics into which delayed
/// diagnostics should go.
sema::DelayedDiagnosticPool *CurPool;
public:
DelayedDiagnostics() : CurPool(nullptr) {}
/// Adds a delayed diagnostic.
void add(const sema::DelayedDiagnostic &diag); // in DelayedDiagnostic.h
/// Determines whether diagnostics should be delayed.
bool shouldDelayDiagnostics() { return CurPool != nullptr; }
/// Returns the current delayed-diagnostics pool.
sema::DelayedDiagnosticPool *getCurrentPool() const {
return CurPool;
}
/// Enter a new scope. Access and deprecation diagnostics will be
/// collected in this pool.
DelayedDiagnosticsState push(sema::DelayedDiagnosticPool &pool) {
DelayedDiagnosticsState state;
state.SavedPool = CurPool;
CurPool = &pool;
return state;
}
/// Leave a delayed-diagnostic state that was previously pushed.
/// Do not emit any of the diagnostics. This is performed as part
/// of the bookkeeping of popping a pool "properly".
void popWithoutEmitting(DelayedDiagnosticsState state) {
CurPool = state.SavedPool;
}
/// Enter a new scope where access and deprecation diagnostics are
/// not delayed.
DelayedDiagnosticsState pushUndelayed() {
DelayedDiagnosticsState state;
state.SavedPool = CurPool;
CurPool = nullptr;
return state;
}
/// Undo a previous pushUndelayed().
void popUndelayed(DelayedDiagnosticsState state) {
assert(CurPool == nullptr);
CurPool = state.SavedPool;
}
} DelayedDiagnostics;
/// A RAII object to temporarily push a declaration context.
class ContextRAII {
private:
Sema &S;
DeclContext *SavedContext;
ProcessingContextState SavedContextState;
QualType SavedCXXThisTypeOverride;
public:
ContextRAII(Sema &S, DeclContext *ContextToPush, bool NewThisContext = true)
: S(S), SavedContext(S.CurContext),
SavedContextState(S.DelayedDiagnostics.pushUndelayed()),
SavedCXXThisTypeOverride(S.CXXThisTypeOverride)
{
assert(ContextToPush && "pushing null context");
S.CurContext = ContextToPush;
if (NewThisContext)
S.CXXThisTypeOverride = QualType();
}
void pop() {
if (!SavedContext) return;
S.CurContext = SavedContext;
S.DelayedDiagnostics.popUndelayed(SavedContextState);
S.CXXThisTypeOverride = SavedCXXThisTypeOverride;
SavedContext = nullptr;
}
~ContextRAII() {
pop();
}
};
/// Whether the AST is currently being rebuilt to correct immediate
/// invocations. Immediate invocation candidates and references to consteval
/// functions aren't tracked when this is set.
bool RebuildingImmediateInvocation = false;
/// Used to change context to isConstantEvaluated without pushing a heavy
/// ExpressionEvaluationContextRecord object.
bool isConstantEvaluatedOverride;
bool isConstantEvaluated() {
return ExprEvalContexts.back().isConstantEvaluated() ||
isConstantEvaluatedOverride;
}
/// RAII object to handle the state changes required to synthesize
/// a function body.
class SynthesizedFunctionScope {
Sema &S;
Sema::ContextRAII SavedContext;
bool PushedCodeSynthesisContext = false;
public:
SynthesizedFunctionScope(Sema &S, DeclContext *DC)
: S(S), SavedContext(S, DC) {
S.PushFunctionScope();
S.PushExpressionEvaluationContext(
Sema::ExpressionEvaluationContext::PotentiallyEvaluated);
if (auto *FD = dyn_cast<FunctionDecl>(DC))
FD->setWillHaveBody(true);
else
assert(isa<ObjCMethodDecl>(DC));
}
void addContextNote(SourceLocation UseLoc) {
assert(!PushedCodeSynthesisContext);
Sema::CodeSynthesisContext Ctx;
Ctx.Kind = Sema::CodeSynthesisContext::DefiningSynthesizedFunction;
Ctx.PointOfInstantiation = UseLoc;
Ctx.Entity = cast<Decl>(S.CurContext);
S.pushCodeSynthesisContext(Ctx);
PushedCodeSynthesisContext = true;
}
~SynthesizedFunctionScope() {
if (PushedCodeSynthesisContext)
S.popCodeSynthesisContext();
if (auto *FD = dyn_cast<FunctionDecl>(S.CurContext))
FD->setWillHaveBody(false);
S.PopExpressionEvaluationContext();
S.PopFunctionScopeInfo();
}
};
/// WeakUndeclaredIdentifiers - Identifiers contained in
/// \#pragma weak before declared. rare. may alias another
/// identifier, declared or undeclared
llvm::MapVector<IdentifierInfo *, WeakInfo> WeakUndeclaredIdentifiers;
/// ExtnameUndeclaredIdentifiers - Identifiers contained in
/// \#pragma redefine_extname before declared. Used in Solaris system headers
/// to define functions that occur in multiple standards to call the version
/// in the currently selected standard.
llvm::DenseMap<IdentifierInfo*,AsmLabelAttr*> ExtnameUndeclaredIdentifiers;
/// Load weak undeclared identifiers from the external source.
void LoadExternalWeakUndeclaredIdentifiers();
/// WeakTopLevelDecl - Translation-unit scoped declarations generated by
/// \#pragma weak during processing of other Decls.
/// I couldn't figure out a clean way to generate these in-line, so
/// we store them here and handle separately -- which is a hack.
/// It would be best to refactor this.
SmallVector<Decl*,2> WeakTopLevelDecl;
IdentifierResolver IdResolver;
/// Translation Unit Scope - useful to Objective-C actions that need
/// to lookup file scope declarations in the "ordinary" C decl namespace.
/// For example, user-defined classes, built-in "id" type, etc.
Scope *TUScope;
/// The C++ "std" namespace, where the standard library resides.
LazyDeclPtr StdNamespace;
/// The C++ "std::bad_alloc" class, which is defined by the C++
/// standard library.
LazyDeclPtr StdBadAlloc;
/// The C++ "std::align_val_t" enum class, which is defined by the C++
/// standard library.
LazyDeclPtr StdAlignValT;
/// The C++ "std::experimental" namespace, where the experimental parts
/// of the standard library resides.
NamespaceDecl *StdExperimentalNamespaceCache;
/// The C++ "std::initializer_list" template, which is defined in
/// \<initializer_list>.
ClassTemplateDecl *StdInitializerList;
/// The C++ "std::coroutine_traits" template, which is defined in
/// \<coroutine_traits>
ClassTemplateDecl *StdCoroutineTraitsCache;
/// The C++ "type_info" declaration, which is defined in \<typeinfo>.
RecordDecl *CXXTypeInfoDecl;
/// The MSVC "_GUID" struct, which is defined in MSVC header files.
RecordDecl *MSVCGuidDecl;
/// Caches identifiers/selectors for NSFoundation APIs.
std::unique_ptr<NSAPI> NSAPIObj;
/// The declaration of the Objective-C NSNumber class.
ObjCInterfaceDecl *NSNumberDecl;
/// The declaration of the Objective-C NSValue class.
ObjCInterfaceDecl *NSValueDecl;
/// Pointer to NSNumber type (NSNumber *).
QualType NSNumberPointer;
/// Pointer to NSValue type (NSValue *).
QualType NSValuePointer;
/// The Objective-C NSNumber methods used to create NSNumber literals.
ObjCMethodDecl *NSNumberLiteralMethods[NSAPI::NumNSNumberLiteralMethods];
/// The declaration of the Objective-C NSString class.
ObjCInterfaceDecl *NSStringDecl;
/// Pointer to NSString type (NSString *).
QualType NSStringPointer;
/// The declaration of the stringWithUTF8String: method.
ObjCMethodDecl *StringWithUTF8StringMethod;
/// The declaration of the valueWithBytes:objCType: method.
ObjCMethodDecl *ValueWithBytesObjCTypeMethod;
/// The declaration of the Objective-C NSArray class.
ObjCInterfaceDecl *NSArrayDecl;
/// The declaration of the arrayWithObjects:count: method.
ObjCMethodDecl *ArrayWithObjectsMethod;
/// The declaration of the Objective-C NSDictionary class.
ObjCInterfaceDecl *NSDictionaryDecl;
/// The declaration of the dictionaryWithObjects:forKeys:count: method.
ObjCMethodDecl *DictionaryWithObjectsMethod;
/// id<NSCopying> type.
QualType QIDNSCopying;
/// will hold 'respondsToSelector:'
Selector RespondsToSelectorSel;
/// A flag to remember whether the implicit forms of operator new and delete
/// have been declared.
bool GlobalNewDeleteDeclared;
/// A flag to indicate that we're in a context that permits abstract
/// references to fields. This is really a
bool AllowAbstractFieldReference;
/// Describes how the expressions currently being parsed are
/// evaluated at run-time, if at all.
enum class ExpressionEvaluationContext {
/// The current expression and its subexpressions occur within an
/// unevaluated operand (C++11 [expr]p7), such as the subexpression of
/// \c sizeof, where the type of the expression may be significant but
/// no code will be generated to evaluate the value of the expression at
/// run time.
Unevaluated,
/// The current expression occurs within a braced-init-list within
/// an unevaluated operand. This is mostly like a regular unevaluated
/// context, except that we still instantiate constexpr functions that are
/// referenced here so that we can perform narrowing checks correctly.
UnevaluatedList,
/// The current expression occurs within a discarded statement.
/// This behaves largely similarly to an unevaluated operand in preventing
/// definitions from being required, but not in other ways.
DiscardedStatement,
/// The current expression occurs within an unevaluated
/// operand that unconditionally permits abstract references to
/// fields, such as a SIZE operator in MS-style inline assembly.
UnevaluatedAbstract,
/// The current context is "potentially evaluated" in C++11 terms,
/// but the expression is evaluated at compile-time (like the values of
/// cases in a switch statement).
ConstantEvaluated,
/// The current expression is potentially evaluated at run time,
/// which means that code may be generated to evaluate the value of the
/// expression at run time.
PotentiallyEvaluated,
/// The current expression is potentially evaluated, but any
/// declarations referenced inside that expression are only used if
/// in fact the current expression is used.
///
/// This value is used when parsing default function arguments, for which
/// we would like to provide diagnostics (e.g., passing non-POD arguments
/// through varargs) but do not want to mark declarations as "referenced"
/// until the default argument is used.
PotentiallyEvaluatedIfUsed
};
using ImmediateInvocationCandidate = llvm::PointerIntPair<ConstantExpr *, 1>;
/// Data structure used to record current or nested
/// expression evaluation contexts.
struct ExpressionEvaluationContextRecord {
/// The expression evaluation context.
ExpressionEvaluationContext Context;
/// Whether the enclosing context needed a cleanup.
CleanupInfo ParentCleanup;
/// Whether we are in a decltype expression.
bool IsDecltype;
/// The number of active cleanup objects when we entered
/// this expression evaluation context.
unsigned NumCleanupObjects;
/// The number of typos encountered during this expression evaluation
/// context (i.e. the number of TypoExprs created).
unsigned NumTypos;
MaybeODRUseExprSet SavedMaybeODRUseExprs;
/// The lambdas that are present within this context, if it
/// is indeed an unevaluated context.
SmallVector<LambdaExpr *, 2> Lambdas;
/// The declaration that provides context for lambda expressions
/// and block literals if the normal declaration context does not
/// suffice, e.g., in a default function argument.
Decl *ManglingContextDecl;
/// If we are processing a decltype type, a set of call expressions
/// for which we have deferred checking the completeness of the return type.
SmallVector<CallExpr *, 8> DelayedDecltypeCalls;
/// If we are processing a decltype type, a set of temporary binding
/// expressions for which we have deferred checking the destructor.
SmallVector<CXXBindTemporaryExpr *, 8> DelayedDecltypeBinds;
llvm::SmallPtrSet<const Expr *, 8> PossibleDerefs;
/// Expressions appearing as the LHS of a volatile assignment in this
/// context. We produce a warning for these when popping the context if
/// they are not discarded-value expressions nor unevaluated operands.
SmallVector<Expr*, 2> VolatileAssignmentLHSs;
/// Set of candidates for starting an immediate invocation.
llvm::SmallVector<ImmediateInvocationCandidate, 4> ImmediateInvocationCandidates;
/// Set of DeclRefExprs referencing a consteval function when used in a
/// context not already known to be immediately invoked.
llvm::SmallPtrSet<DeclRefExpr *, 4> ReferenceToConsteval;
/// \brief Describes whether we are in an expression constext which we have
/// to handle differently.
enum ExpressionKind {
EK_Decltype, EK_TemplateArgument, EK_Other
} ExprContext;
ExpressionEvaluationContextRecord(ExpressionEvaluationContext Context,
unsigned NumCleanupObjects,
CleanupInfo ParentCleanup,
Decl *ManglingContextDecl,
ExpressionKind ExprContext)
: Context(Context), ParentCleanup(ParentCleanup),
NumCleanupObjects(NumCleanupObjects), NumTypos(0),
ManglingContextDecl(ManglingContextDecl), ExprContext(ExprContext) {}
bool isUnevaluated() const {
return Context == ExpressionEvaluationContext::Unevaluated ||
Context == ExpressionEvaluationContext::UnevaluatedAbstract ||
Context == ExpressionEvaluationContext::UnevaluatedList;
}
bool isConstantEvaluated() const {
return Context == ExpressionEvaluationContext::ConstantEvaluated;
}
};
/// A stack of expression evaluation contexts.
SmallVector<ExpressionEvaluationContextRecord, 8> ExprEvalContexts;
/// Emit a warning for all pending noderef expressions that we recorded.
void WarnOnPendingNoDerefs(ExpressionEvaluationContextRecord &Rec);
/// Compute the mangling number context for a lambda expression or
/// block literal. Also return the extra mangling decl if any.
///
/// \param DC - The DeclContext containing the lambda expression or
/// block literal.
std::tuple<MangleNumberingContext *, Decl *>
getCurrentMangleNumberContext(const DeclContext *DC);
/// SpecialMemberOverloadResult - The overloading result for a special member
/// function.
///
/// This is basically a wrapper around PointerIntPair. The lowest bits of the
/// integer are used to determine whether overload resolution succeeded.
class SpecialMemberOverloadResult {
public:
enum Kind {
NoMemberOrDeleted,
Ambiguous,
Success
};
private:
llvm::PointerIntPair<CXXMethodDecl*, 2> Pair;
public:
SpecialMemberOverloadResult() : Pair() {}
SpecialMemberOverloadResult(CXXMethodDecl *MD)
: Pair(MD, MD->isDeleted() ? NoMemberOrDeleted : Success) {}
CXXMethodDecl *getMethod() const { return Pair.getPointer(); }
void setMethod(CXXMethodDecl *MD) { Pair.setPointer(MD); }
Kind getKind() const { return static_cast<Kind>(Pair.getInt()); }
void setKind(Kind K) { Pair.setInt(K); }
};
class SpecialMemberOverloadResultEntry
: public llvm::FastFoldingSetNode,
public SpecialMemberOverloadResult {
public:
SpecialMemberOverloadResultEntry(const llvm::FoldingSetNodeID &ID)
: FastFoldingSetNode(ID)
{}
};
/// A cache of special member function overload resolution results
/// for C++ records.
llvm::FoldingSet<SpecialMemberOverloadResultEntry> SpecialMemberCache;
/// A cache of the flags available in enumerations with the flag_bits
/// attribute.
mutable llvm::DenseMap<const EnumDecl*, llvm::APInt> FlagBitsCache;
/// The kind of translation unit we are processing.
///
/// When we're processing a complete translation unit, Sema will perform
/// end-of-translation-unit semantic tasks (such as creating
/// initializers for tentative definitions in C) once parsing has
/// completed. Modules and precompiled headers perform different kinds of
/// checks.
TranslationUnitKind TUKind;
llvm::BumpPtrAllocator BumpAlloc;
/// The number of SFINAE diagnostics that have been trapped.
unsigned NumSFINAEErrors;
typedef llvm::DenseMap<ParmVarDecl *, llvm::TinyPtrVector<ParmVarDecl *>>
UnparsedDefaultArgInstantiationsMap;
/// A mapping from parameters with unparsed default arguments to the
/// set of instantiations of each parameter.
///
/// This mapping is a temporary data structure used when parsing
/// nested class templates or nested classes of class templates,
/// where we might end up instantiating an inner class before the
/// default arguments of its methods have been parsed.
UnparsedDefaultArgInstantiationsMap UnparsedDefaultArgInstantiations;
// Contains the locations of the beginning of unparsed default
// argument locations.
llvm::DenseMap<ParmVarDecl *, SourceLocation> UnparsedDefaultArgLocs;
/// UndefinedInternals - all the used, undefined objects which require a
/// definition in this translation unit.
llvm::MapVector<NamedDecl *, SourceLocation> UndefinedButUsed;
/// Determine if VD, which must be a variable or function, is an external
/// symbol that nonetheless can't be referenced from outside this translation
/// unit because its type has no linkage and it's not extern "C".
bool isExternalWithNoLinkageType(ValueDecl *VD);
/// Obtain a sorted list of functions that are undefined but ODR-used.
void getUndefinedButUsed(
SmallVectorImpl<std::pair<NamedDecl *, SourceLocation> > &Undefined);
/// Retrieves list of suspicious delete-expressions that will be checked at
/// the end of translation unit.
const llvm::MapVector<FieldDecl *, DeleteLocs> &
getMismatchingDeleteExpressions() const;
typedef std::pair<ObjCMethodList, ObjCMethodList> GlobalMethods;
typedef llvm::DenseMap<Selector, GlobalMethods> GlobalMethodPool;
/// Method Pool - allows efficient lookup when typechecking messages to "id".
/// We need to maintain a list, since selectors can have differing signatures
/// across classes. In Cocoa, this happens to be extremely uncommon (only 1%
/// of selectors are "overloaded").
/// At the head of the list it is recorded whether there were 0, 1, or >= 2
/// methods inside categories with a particular selector.
GlobalMethodPool MethodPool;
/// Method selectors used in a \@selector expression. Used for implementation
/// of -Wselector.
llvm::MapVector<Selector, SourceLocation> ReferencedSelectors;
/// List of SourceLocations where 'self' is implicitly retained inside a
/// block.
llvm::SmallVector<std::pair<SourceLocation, const BlockDecl *>, 1>
ImplicitlyRetainedSelfLocs;
/// Kinds of C++ special members.
enum CXXSpecialMember {
CXXDefaultConstructor,
CXXCopyConstructor,
CXXMoveConstructor,
CXXCopyAssignment,
CXXMoveAssignment,
CXXDestructor,
CXXInvalid
};
typedef llvm::PointerIntPair<CXXRecordDecl *, 3, CXXSpecialMember>
SpecialMemberDecl;
/// The C++ special members which we are currently in the process of
/// declaring. If this process recursively triggers the declaration of the
/// same special member, we should act as if it is not yet declared.
llvm::SmallPtrSet<SpecialMemberDecl, 4> SpecialMembersBeingDeclared;
/// Kinds of defaulted comparison operator functions.
enum class DefaultedComparisonKind : unsigned char {
/// This is not a defaultable comparison operator.
None,
/// This is an operator== that should be implemented as a series of
/// subobject comparisons.
Equal,
/// This is an operator<=> that should be implemented as a series of
/// subobject comparisons.
ThreeWay,
/// This is an operator!= that should be implemented as a rewrite in terms
/// of a == comparison.
NotEqual,
/// This is an <, <=, >, or >= that should be implemented as a rewrite in
/// terms of a <=> comparison.
Relational,
};
/// The function definitions which were renamed as part of typo-correction
/// to match their respective declarations. We want to keep track of them
/// to ensure that we don't emit a "redefinition" error if we encounter a
/// correctly named definition after the renamed definition.
llvm::SmallPtrSet<const NamedDecl *, 4> TypoCorrectedFunctionDefinitions;
/// Stack of types that correspond to the parameter entities that are
/// currently being copy-initialized. Can be empty.
llvm::SmallVector<QualType, 4> CurrentParameterCopyTypes;
void ReadMethodPool(Selector Sel);
void updateOutOfDateSelector(Selector Sel);
/// Private Helper predicate to check for 'self'.
bool isSelfExpr(Expr *RExpr);
bool isSelfExpr(Expr *RExpr, const ObjCMethodDecl *Method);
/// Cause the active diagnostic on the DiagosticsEngine to be
/// emitted. This is closely coupled to the SemaDiagnosticBuilder class and
/// should not be used elsewhere.
void EmitCurrentDiagnostic(unsigned DiagID);
/// Records and restores the FPFeatures state on entry/exit of compound
/// statements.
class FPFeaturesStateRAII {
public:
FPFeaturesStateRAII(Sema &S) : S(S), OldFPFeaturesState(S.FPFeatures) {}
~FPFeaturesStateRAII() { S.FPFeatures = OldFPFeaturesState; }
private:
Sema& S;
FPOptions OldFPFeaturesState;
};
void addImplicitTypedef(StringRef Name, QualType T);
bool WarnedStackExhausted = false;
public:
Sema(Preprocessor &pp, ASTContext &ctxt, ASTConsumer &consumer,
TranslationUnitKind TUKind = TU_Complete,
CodeCompleteConsumer *CompletionConsumer = nullptr);
~Sema();
/// Perform initialization that occurs after the parser has been
/// initialized but before it parses anything.
void Initialize();
const LangOptions &getLangOpts() const { return LangOpts; }
OpenCLOptions &getOpenCLOptions() { return OpenCLFeatures; }
FPOptions &getFPOptions() { return FPFeatures; }
DiagnosticsEngine &getDiagnostics() const { return Diags; }
SourceManager &getSourceManager() const { return SourceMgr; }
Preprocessor &getPreprocessor() const { return PP; }
ASTContext &getASTContext() const { return Context; }
ASTConsumer &getASTConsumer() const { return Consumer; }
ASTMutationListener *getASTMutationListener() const;
ExternalSemaSource* getExternalSource() const { return ExternalSource; }
///Registers an external source. If an external source already exists,
/// creates a multiplex external source and appends to it.
///
///\param[in] E - A non-null external sema source.
///
void addExternalSource(ExternalSemaSource *E);
void PrintStats() const;
/// Warn that the stack is nearly exhausted.
void warnStackExhausted(SourceLocation Loc);
/// Run some code with "sufficient" stack space. (Currently, at least 256K is
/// guaranteed). Produces a warning if we're low on stack space and allocates
/// more in that case. Use this in code that may recurse deeply (for example,
/// in template instantiation) to avoid stack overflow.
void runWithSufficientStackSpace(SourceLocation Loc,
llvm::function_ref<void()> Fn);
/// Helper class that creates diagnostics with optional
/// template instantiation stacks.
///
/// This class provides a wrapper around the basic DiagnosticBuilder
/// class that emits diagnostics. SemaDiagnosticBuilder is
/// responsible for emitting the diagnostic (as DiagnosticBuilder
/// does) and, if the diagnostic comes from inside a template
/// instantiation, printing the template instantiation stack as
/// well.
class SemaDiagnosticBuilder : public DiagnosticBuilder {
Sema &SemaRef;
unsigned DiagID;
public:
SemaDiagnosticBuilder(DiagnosticBuilder &DB, Sema &SemaRef, unsigned DiagID)
: DiagnosticBuilder(DB), SemaRef(SemaRef), DiagID(DiagID) { }
// This is a cunning lie. DiagnosticBuilder actually performs move
// construction in its copy constructor (but due to varied uses, it's not
// possible to conveniently express this as actual move construction). So
// the default copy ctor here is fine, because the base class disables the
// source anyway, so the user-defined ~SemaDiagnosticBuilder is a safe no-op
// in that case anwyay.
SemaDiagnosticBuilder(const SemaDiagnosticBuilder&) = default;
~SemaDiagnosticBuilder() {
// If we aren't active, there is nothing to do.
if (!isActive()) return;
// Otherwise, we need to emit the diagnostic. First flush the underlying
// DiagnosticBuilder data, and clear the diagnostic builder itself so it
// won't emit the diagnostic in its own destructor.
//
// This seems wasteful, in that as written the DiagnosticBuilder dtor will
// do its own needless checks to see if the diagnostic needs to be
// emitted. However, because we take care to ensure that the builder
// objects never escape, a sufficiently smart compiler will be able to
// eliminate that code.
FlushCounts();
Clear();
// Dispatch to Sema to emit the diagnostic.
SemaRef.EmitCurrentDiagnostic(DiagID);
}
/// Teach operator<< to produce an object of the correct type.
template<typename T>
friend const SemaDiagnosticBuilder &operator<<(
const SemaDiagnosticBuilder &Diag, const T &Value) {
const DiagnosticBuilder &BaseDiag = Diag;
BaseDiag << Value;
return Diag;
}
};
/// Emit a diagnostic.
SemaDiagnosticBuilder Diag(SourceLocation Loc, unsigned DiagID) {
DiagnosticBuilder DB = Diags.Report(Loc, DiagID);
return SemaDiagnosticBuilder(DB, *this, DiagID);
}
/// Emit a partial diagnostic.
SemaDiagnosticBuilder Diag(SourceLocation Loc, const PartialDiagnostic& PD);
/// Build a partial diagnostic.
PartialDiagnostic PDiag(unsigned DiagID = 0); // in SemaInternal.h
bool findMacroSpelling(SourceLocation &loc, StringRef name);
/// Get a string to suggest for zero-initialization of a type.
std::string
getFixItZeroInitializerForType(QualType T, SourceLocation Loc) const;
std::string getFixItZeroLiteralForType(QualType T, SourceLocation Loc) const;
/// Calls \c Lexer::getLocForEndOfToken()
SourceLocation getLocForEndOfToken(SourceLocation Loc, unsigned Offset = 0);
/// Retrieve the module loader associated with the preprocessor.
ModuleLoader &getModuleLoader() const;
/// Invent a new identifier for parameters of abbreviated templates.
IdentifierInfo *
InventAbbreviatedTemplateParameterTypeName(IdentifierInfo *ParamName,
unsigned Index);
void emitAndClearUnusedLocalTypedefWarnings();
enum TUFragmentKind {
/// The global module fragment, between 'module;' and a module-declaration.
Global,
/// A normal translation unit fragment. For a non-module unit, this is the
/// entire translation unit. Otherwise, it runs from the module-declaration
/// to the private-module-fragment (if any) or the end of the TU (if not).
Normal,
/// The private module fragment, between 'module :private;' and the end of
/// the translation unit.
Private
};
void ActOnStartOfTranslationUnit();
void ActOnEndOfTranslationUnit();
void ActOnEndOfTranslationUnitFragment(TUFragmentKind Kind);
void CheckDelegatingCtorCycles();
Scope *getScopeForContext(DeclContext *Ctx);
void PushFunctionScope();
void PushBlockScope(Scope *BlockScope, BlockDecl *Block);
sema::LambdaScopeInfo *PushLambdaScope();
/// This is used to inform Sema what the current TemplateParameterDepth
/// is during Parsing. Currently it is used to pass on the depth
/// when parsing generic lambda 'auto' parameters.
void RecordParsingTemplateParameterDepth(unsigned Depth);
void PushCapturedRegionScope(Scope *RegionScope, CapturedDecl *CD,
RecordDecl *RD, CapturedRegionKind K,
unsigned OpenMPCaptureLevel = 0);
/// Custom deleter to allow FunctionScopeInfos to be kept alive for a short
/// time after they've been popped.
class PoppedFunctionScopeDeleter {
Sema *Self;
public:
explicit PoppedFunctionScopeDeleter(Sema *Self) : Self(Self) {}
void operator()(sema::FunctionScopeInfo *Scope) const;
};
using PoppedFunctionScopePtr =
std::unique_ptr<sema::FunctionScopeInfo, PoppedFunctionScopeDeleter>;
PoppedFunctionScopePtr
PopFunctionScopeInfo(const sema::AnalysisBasedWarnings::Policy *WP = nullptr,
const Decl *D = nullptr,
QualType BlockType = QualType());
sema::FunctionScopeInfo *getCurFunction() const {
return FunctionScopes.empty() ? nullptr : FunctionScopes.back();
}
sema::FunctionScopeInfo *getEnclosingFunction() const;
void setFunctionHasBranchIntoScope();
void setFunctionHasBranchProtectedScope();
void setFunctionHasIndirectGoto();
void PushCompoundScope(bool IsStmtExpr);
void PopCompoundScope();
sema::CompoundScopeInfo &getCurCompoundScope() const;
bool hasAnyUnrecoverableErrorsInThisFunction() const;
/// Retrieve the current block, if any.
sema::BlockScopeInfo *getCurBlock();
/// Get the innermost lambda enclosing the current location, if any. This
/// looks through intervening non-lambda scopes such as local functions and
/// blocks.
sema::LambdaScopeInfo *getEnclosingLambda() const;
/// Retrieve the current lambda scope info, if any.
/// \param IgnoreNonLambdaCapturingScope true if should find the top-most
/// lambda scope info ignoring all inner capturing scopes that are not
/// lambda scopes.
sema::LambdaScopeInfo *
getCurLambda(bool IgnoreNonLambdaCapturingScope = false);
/// Retrieve the current generic lambda info, if any.
sema::LambdaScopeInfo *getCurGenericLambda();
/// Retrieve the current captured region, if any.
sema::CapturedRegionScopeInfo *getCurCapturedRegion();
/// WeakTopLevelDeclDecls - access to \#pragma weak-generated Decls
SmallVectorImpl<Decl *> &WeakTopLevelDecls() { return WeakTopLevelDecl; }
/// Called before parsing a function declarator belonging to a function
/// declaration.
void ActOnStartFunctionDeclarationDeclarator(Declarator &D,
unsigned TemplateParameterDepth);
/// Called after parsing a function declarator belonging to a function
/// declaration.
void ActOnFinishFunctionDeclarationDeclarator(Declarator &D);
void ActOnComment(SourceRange Comment);
//===--------------------------------------------------------------------===//
// Type Analysis / Processing: SemaType.cpp.
//
QualType BuildQualifiedType(QualType T, SourceLocation Loc, Qualifiers Qs,
const DeclSpec *DS = nullptr);
QualType BuildQualifiedType(QualType T, SourceLocation Loc, unsigned CVRA,
const DeclSpec *DS = nullptr);
QualType BuildPointerType(QualType T,
SourceLocation Loc, DeclarationName Entity);
QualType BuildReferenceType(QualType T, bool LValueRef,
SourceLocation Loc, DeclarationName Entity);
QualType BuildArrayType(QualType T, ArrayType::ArraySizeModifier ASM,
Expr *ArraySize, unsigned Quals,
SourceRange Brackets, DeclarationName Entity);
QualType BuildVectorType(QualType T, Expr *VecSize, SourceLocation AttrLoc);
QualType BuildExtVectorType(QualType T, Expr *ArraySize,
SourceLocation AttrLoc);
QualType BuildAddressSpaceAttr(QualType &T, LangAS ASIdx, Expr *AddrSpace,
SourceLocation AttrLoc);
/// Same as above, but constructs the AddressSpace index if not provided.
QualType BuildAddressSpaceAttr(QualType &T, Expr *AddrSpace,
SourceLocation AttrLoc);
bool CheckQualifiedFunctionForTypeId(QualType T, SourceLocation Loc);
bool CheckFunctionReturnType(QualType T, SourceLocation Loc);
/// Build a function type.
///
/// This routine checks the function type according to C++ rules and
/// under the assumption that the result type and parameter types have
/// just been instantiated from a template. It therefore duplicates
/// some of the behavior of GetTypeForDeclarator, but in a much
/// simpler form that is only suitable for this narrow use case.
///
/// \param T The return type of the function.
///
/// \param ParamTypes The parameter types of the function. This array
/// will be modified to account for adjustments to the types of the
/// function parameters.
///
/// \param Loc The location of the entity whose type involves this
/// function type or, if there is no such entity, the location of the
/// type that will have function type.
///
/// \param Entity The name of the entity that involves the function
/// type, if known.
///
/// \param EPI Extra information about the function type. Usually this will
/// be taken from an existing function with the same prototype.
///
/// \returns A suitable function type, if there are no errors. The
/// unqualified type will always be a FunctionProtoType.
/// Otherwise, returns a NULL type.
QualType BuildFunctionType(QualType T,
MutableArrayRef<QualType> ParamTypes,
SourceLocation Loc, DeclarationName Entity,
const FunctionProtoType::ExtProtoInfo &EPI);
QualType BuildMemberPointerType(QualType T, QualType Class,
SourceLocation Loc,
DeclarationName Entity);
QualType BuildBlockPointerType(QualType T,
SourceLocation Loc, DeclarationName Entity);
QualType BuildParenType(QualType T);
QualType BuildAtomicType(QualType T, SourceLocation Loc);
QualType BuildReadPipeType(QualType T,
SourceLocation Loc);
QualType BuildWritePipeType(QualType T,
SourceLocation Loc);
TypeSourceInfo *GetTypeForDeclarator(Declarator &D, Scope *S);
TypeSourceInfo *GetTypeForDeclaratorCast(Declarator &D, QualType FromTy);
/// Package the given type and TSI into a ParsedType.
ParsedType CreateParsedType(QualType T, TypeSourceInfo *TInfo);
DeclarationNameInfo GetNameForDeclarator(Declarator &D);
DeclarationNameInfo GetNameFromUnqualifiedId(const UnqualifiedId &Name);
static QualType GetTypeFromParser(ParsedType Ty,
TypeSourceInfo **TInfo = nullptr);
CanThrowResult canThrow(const Stmt *E);
const FunctionProtoType *ResolveExceptionSpec(SourceLocation Loc,
const FunctionProtoType *FPT);
void UpdateExceptionSpec(FunctionDecl *FD,
const FunctionProtoType::ExceptionSpecInfo &ESI);
bool CheckSpecifiedExceptionType(QualType &T, SourceRange Range);
bool CheckDistantExceptionSpec(QualType T);
bool CheckEquivalentExceptionSpec(FunctionDecl *Old, FunctionDecl *New);
bool CheckEquivalentExceptionSpec(
const FunctionProtoType *Old, SourceLocation OldLoc,
const FunctionProtoType *New, SourceLocation NewLoc);
bool CheckEquivalentExceptionSpec(
const PartialDiagnostic &DiagID, const PartialDiagnostic & NoteID,
const FunctionProtoType *Old, SourceLocation OldLoc,
const FunctionProtoType *New, SourceLocation NewLoc);
bool handlerCanCatch(QualType HandlerType, QualType ExceptionType);
bool CheckExceptionSpecSubset(const PartialDiagnostic &DiagID,
const PartialDiagnostic &NestedDiagID,
const PartialDiagnostic &NoteID,
const PartialDiagnostic &NoThrowDiagID,
const FunctionProtoType *Superset,
SourceLocation SuperLoc,
const FunctionProtoType *Subset,
SourceLocation SubLoc);
bool CheckParamExceptionSpec(const PartialDiagnostic &NestedDiagID,
const PartialDiagnostic &NoteID,
const FunctionProtoType *Target,
SourceLocation TargetLoc,
const FunctionProtoType *Source,
SourceLocation SourceLoc);
TypeResult ActOnTypeName(Scope *S, Declarator &D);
/// The parser has parsed the context-sensitive type 'instancetype'
/// in an Objective-C message declaration. Return the appropriate type.
ParsedType ActOnObjCInstanceType(SourceLocation Loc);
/// Abstract class used to diagnose incomplete types.
struct TypeDiagnoser {
TypeDiagnoser() {}
virtual void diagnose(Sema &S, SourceLocation Loc, QualType T) = 0;
virtual ~TypeDiagnoser() {}
};
static int getPrintable(int I) { return I; }
static unsigned getPrintable(unsigned I) { return I; }
static bool getPrintable(bool B) { return B; }
static const char * getPrintable(const char *S) { return S; }
static StringRef getPrintable(StringRef S) { return S; }
static const std::string &getPrintable(const std::string &S) { return S; }
static const IdentifierInfo *getPrintable(const IdentifierInfo *II) {
return II;
}
static DeclarationName getPrintable(DeclarationName N) { return N; }
static QualType getPrintable(QualType T) { return T; }
static SourceRange getPrintable(SourceRange R) { return R; }
static SourceRange getPrintable(SourceLocation L) { return L; }
static SourceRange getPrintable(const Expr *E) { return E->getSourceRange(); }
static SourceRange getPrintable(TypeLoc TL) { return TL.getSourceRange();}
template <typename... Ts> class BoundTypeDiagnoser : public TypeDiagnoser {
unsigned DiagID;
std::tuple<const Ts &...> Args;
template <std::size_t... Is>
void emit(const SemaDiagnosticBuilder &DB,
std::index_sequence<Is...>) const {
// Apply all tuple elements to the builder in order.
bool Dummy[] = {false, (DB << getPrintable(std::get<Is>(Args)))...};
(void)Dummy;
}
public:
BoundTypeDiagnoser(unsigned DiagID, const Ts &...Args)
: TypeDiagnoser(), DiagID(DiagID), Args(Args...) {
assert(DiagID != 0 && "no diagnostic for type diagnoser");
}
void diagnose(Sema &S, SourceLocation Loc, QualType T) override {
const SemaDiagnosticBuilder &DB = S.Diag(Loc, DiagID);
emit(DB, std::index_sequence_for<Ts...>());
DB << T;
}
};
private:
/// Methods for marking which expressions involve dereferencing a pointer
/// marked with the 'noderef' attribute. Expressions are checked bottom up as
/// they are parsed, meaning that a noderef pointer may not be accessed. For
/// example, in `&*p` where `p` is a noderef pointer, we will first parse the
/// `*p`, but need to check that `address of` is called on it. This requires
/// keeping a container of all pending expressions and checking if the address
/// of them are eventually taken.
void CheckSubscriptAccessOfNoDeref(const ArraySubscriptExpr *E);
void CheckAddressOfNoDeref(const Expr *E);
void CheckMemberAccessOfNoDeref(const MemberExpr *E);
bool RequireCompleteTypeImpl(SourceLocation Loc, QualType T,
TypeDiagnoser *Diagnoser);
struct ModuleScope {
SourceLocation BeginLoc;
clang::Module *Module = nullptr;
bool ModuleInterface = false;
bool ImplicitGlobalModuleFragment = false;
VisibleModuleSet OuterVisibleModules;
};
/// The modules we're currently parsing.
llvm::SmallVector<ModuleScope, 16> ModuleScopes;
/// Namespace definitions that we will export when they finish.
llvm::SmallPtrSet<const NamespaceDecl*, 8> DeferredExportedNamespaces;
/// Get the module whose scope we are currently within.
Module *getCurrentModule() const {
return ModuleScopes.empty() ? nullptr : ModuleScopes.back().Module;
}
VisibleModuleSet VisibleModules;
public:
/// Get the module owning an entity.
Module *getOwningModule(const Decl *Entity) {
return Entity->getOwningModule();
}
/// Make a merged definition of an existing hidden definition \p ND
/// visible at the specified location.
void makeMergedDefinitionVisible(NamedDecl *ND);
bool isModuleVisible(const Module *M, bool ModulePrivate = false);
/// Determine whether a declaration is visible to name lookup.
bool isVisible(const NamedDecl *D) {
return !D->isHidden() || isVisibleSlow(D);
}
/// Determine whether any declaration of an entity is visible.
bool
hasVisibleDeclaration(const NamedDecl *D,
llvm::SmallVectorImpl<Module *> *Modules = nullptr) {
return isVisible(D) || hasVisibleDeclarationSlow(D, Modules);
}
bool hasVisibleDeclarationSlow(const NamedDecl *D,
llvm::SmallVectorImpl<Module *> *Modules);
bool hasVisibleMergedDefinition(NamedDecl *Def);
bool hasMergedDefinitionInCurrentModule(NamedDecl *Def);
/// Determine if \p D and \p Suggested have a structurally compatible
/// layout as described in C11 6.2.7/1.
bool hasStructuralCompatLayout(Decl *D, Decl *Suggested);
/// Determine if \p D has a visible definition. If not, suggest a declaration
/// that should be made visible to expose the definition.
bool hasVisibleDefinition(NamedDecl *D, NamedDecl **Suggested,
bool OnlyNeedComplete = false);
bool hasVisibleDefinition(const NamedDecl *D) {
NamedDecl *Hidden;
return hasVisibleDefinition(const_cast<NamedDecl*>(D), &Hidden);
}
/// Determine if the template parameter \p D has a visible default argument.
bool
hasVisibleDefaultArgument(const NamedDecl *D,
llvm::SmallVectorImpl<Module *> *Modules = nullptr);
/// Determine if there is a visible declaration of \p D that is an explicit
/// specialization declaration for a specialization of a template. (For a
/// member specialization, use hasVisibleMemberSpecialization.)
bool hasVisibleExplicitSpecialization(
const NamedDecl *D, llvm::SmallVectorImpl<Module *> *Modules = nullptr);
/// Determine if there is a visible declaration of \p D that is a member
/// specialization declaration (as opposed to an instantiated declaration).
bool hasVisibleMemberSpecialization(
const NamedDecl *D, llvm::SmallVectorImpl<Module *> *Modules = nullptr);
/// Determine if \p A and \p B are equivalent internal linkage declarations
/// from different modules, and thus an ambiguity error can be downgraded to
/// an extension warning.
bool isEquivalentInternalLinkageDeclaration(const NamedDecl *A,
const NamedDecl *B);
void diagnoseEquivalentInternalLinkageDeclarations(
SourceLocation Loc, const NamedDecl *D,
ArrayRef<const NamedDecl *> Equiv);
bool isUsualDeallocationFunction(const CXXMethodDecl *FD);
bool isCompleteType(SourceLocation Loc, QualType T) {
return !RequireCompleteTypeImpl(Loc, T, nullptr);
}
bool RequireCompleteType(SourceLocation Loc, QualType T,
TypeDiagnoser &Diagnoser);
bool RequireCompleteType(SourceLocation Loc, QualType T,
unsigned DiagID);
template <typename... Ts>
bool RequireCompleteType(SourceLocation Loc, QualType T, unsigned DiagID,
const Ts &...Args) {
BoundTypeDiagnoser<Ts...> Diagnoser(DiagID, Args...);
return RequireCompleteType(Loc, T, Diagnoser);
}
void completeExprArrayBound(Expr *E);
bool RequireCompleteExprType(Expr *E, TypeDiagnoser &Diagnoser);
bool RequireCompleteExprType(Expr *E, unsigned DiagID);
template <typename... Ts>
bool RequireCompleteExprType(Expr *E, unsigned DiagID, const Ts &...Args) {
BoundTypeDiagnoser<Ts...> Diagnoser(DiagID, Args...);
return RequireCompleteExprType(E, Diagnoser);
}
bool RequireLiteralType(SourceLocation Loc, QualType T,
TypeDiagnoser &Diagnoser);
bool RequireLiteralType(SourceLocation Loc, QualType T, unsigned DiagID);
template <typename... Ts>
bool RequireLiteralType(SourceLocation Loc, QualType T, unsigned DiagID,
const Ts &...Args) {
BoundTypeDiagnoser<Ts...> Diagnoser(DiagID, Args...);
return RequireLiteralType(Loc, T, Diagnoser);
}
QualType getElaboratedType(ElaboratedTypeKeyword Keyword,
const CXXScopeSpec &SS, QualType T,
TagDecl *OwnedTagDecl = nullptr);
QualType BuildTypeofExprType(Expr *E, SourceLocation Loc);
/// If AsUnevaluated is false, E is treated as though it were an evaluated
/// context, such as when building a type for decltype(auto).
QualType BuildDecltypeType(Expr *E, SourceLocation Loc,
bool AsUnevaluated = true);
QualType BuildUnaryTransformType(QualType BaseType,
UnaryTransformType::UTTKind UKind,
SourceLocation Loc);
//===--------------------------------------------------------------------===//
// Symbol table / Decl tracking callbacks: SemaDecl.cpp.
//
struct SkipBodyInfo {
SkipBodyInfo()
: ShouldSkip(false), CheckSameAsPrevious(false), Previous(nullptr),
New(nullptr) {}
bool ShouldSkip;
bool CheckSameAsPrevious;
NamedDecl *Previous;
NamedDecl *New;
};
DeclGroupPtrTy ConvertDeclToDeclGroup(Decl *Ptr, Decl *OwnedType = nullptr);
void DiagnoseUseOfUnimplementedSelectors();
bool isSimpleTypeSpecifier(tok::TokenKind Kind) const;
ParsedType getTypeName(const IdentifierInfo &II, SourceLocation NameLoc,
Scope *S, CXXScopeSpec *SS = nullptr,
bool isClassName = false, bool HasTrailingDot = false,
ParsedType ObjectType = nullptr,
bool IsCtorOrDtorName = false,
bool WantNontrivialTypeSourceInfo = false,
bool IsClassTemplateDeductionContext = true,
IdentifierInfo **CorrectedII = nullptr);
TypeSpecifierType isTagName(IdentifierInfo &II, Scope *S);
bool isMicrosoftMissingTypename(const CXXScopeSpec *SS, Scope *S);
void DiagnoseUnknownTypeName(IdentifierInfo *&II,
SourceLocation IILoc,
Scope *S,
CXXScopeSpec *SS,
ParsedType &SuggestedType,
bool IsTemplateName = false);
/// Attempt to behave like MSVC in situations where lookup of an unqualified
/// type name has failed in a dependent context. In these situations, we
/// automatically form a DependentTypeName that will retry lookup in a related
/// scope during instantiation.
ParsedType ActOnMSVCUnknownTypeName(const IdentifierInfo &II,
SourceLocation NameLoc,
bool IsTemplateTypeArg);
/// Describes the result of the name lookup and resolution performed
/// by \c ClassifyName().
enum NameClassificationKind {
/// This name is not a type or template in this context, but might be
/// something else.
NC_Unknown,
/// Classification failed; an error has been produced.
NC_Error,
/// The name has been typo-corrected to a keyword.
NC_Keyword,
/// The name was classified as a type.
NC_Type,
/// The name was classified as a specific non-type, non-template
/// declaration. ActOnNameClassifiedAsNonType should be called to
/// convert the declaration to an expression.
NC_NonType,
/// The name was classified as an ADL-only function name.
/// ActOnNameClassifiedAsUndeclaredNonType should be called to convert the
/// result to an expression.
NC_UndeclaredNonType,
/// The name denotes a member of a dependent type that could not be
/// resolved. ActOnNameClassifiedAsDependentNonType should be called to
/// convert the result to an expression.
NC_DependentNonType,
/// The name was classified as a non-type, and an expression representing
/// that name has been formed.
NC_ContextIndependentExpr,
/// The name was classified as a template whose specializations are types.
NC_TypeTemplate,
/// The name was classified as a variable template name.
NC_VarTemplate,
/// The name was classified as a function template name.
NC_FunctionTemplate,
/// The name was classified as an ADL-only function template name.
NC_UndeclaredTemplate,
/// The name was classified as a concept name.
NC_Concept,
};
class NameClassification {
NameClassificationKind Kind;
union {
ExprResult Expr;
NamedDecl *NonTypeDecl;
TemplateName Template;
ParsedType Type;
};
explicit NameClassification(NameClassificationKind Kind) : Kind(Kind) {}
public:
NameClassification(ParsedType Type) : Kind(NC_Type), Type(Type) {}
NameClassification(const IdentifierInfo *Keyword) : Kind(NC_Keyword) {}
static NameClassification Error() {
return NameClassification(NC_Error);
}
static NameClassification Unknown() {
return NameClassification(NC_Unknown);
}
static NameClassification ContextIndependentExpr(ExprResult E) {
NameClassification Result(NC_ContextIndependentExpr);
Result.Expr = E;
return Result;
}
static NameClassification NonType(NamedDecl *D) {
NameClassification Result(NC_NonType);
Result.NonTypeDecl = D;
return Result;
}
static NameClassification UndeclaredNonType() {
return NameClassification(NC_UndeclaredNonType);
}
static NameClassification DependentNonType() {
return NameClassification(NC_DependentNonType);
}
static NameClassification TypeTemplate(TemplateName Name) {
NameClassification Result(NC_TypeTemplate);
Result.Template = Name;
return Result;
}
static NameClassification VarTemplate(TemplateName Name) {
NameClassification Result(NC_VarTemplate);
Result.Template = Name;
return Result;
}
static NameClassification FunctionTemplate(TemplateName Name) {
NameClassification Result(NC_FunctionTemplate);
Result.Template = Name;
return Result;
}
static NameClassification Concept(TemplateName Name) {
NameClassification Result(NC_Concept);
Result.Template = Name;
return Result;
}
static NameClassification UndeclaredTemplate(TemplateName Name) {
NameClassification Result(NC_UndeclaredTemplate);
Result.Template = Name;
return Result;
}
NameClassificationKind getKind() const { return Kind; }
ExprResult getExpression() const {
assert(Kind == NC_ContextIndependentExpr);
return Expr;
}
ParsedType getType() const {
assert(Kind == NC_Type);
return Type;
}
NamedDecl *getNonTypeDecl() const {
assert(Kind == NC_NonType);
return NonTypeDecl;
}
TemplateName getTemplateName() const {
assert(Kind == NC_TypeTemplate || Kind == NC_FunctionTemplate ||
Kind == NC_VarTemplate || Kind == NC_Concept ||
Kind == NC_UndeclaredTemplate);
return Template;
}
TemplateNameKind getTemplateNameKind() const {
switch (Kind) {
case NC_TypeTemplate:
return TNK_Type_template;
case NC_FunctionTemplate:
return TNK_Function_template;
case NC_VarTemplate:
return TNK_Var_template;
case NC_Concept:
return TNK_Concept_template;
case NC_UndeclaredTemplate:
return TNK_Undeclared_template;
default:
llvm_unreachable("unsupported name classification.");
}
}
};
/// Perform name lookup on the given name, classifying it based on
/// the results of name lookup and the following token.
///
/// This routine is used by the parser to resolve identifiers and help direct
/// parsing. When the identifier cannot be found, this routine will attempt
/// to correct the typo and classify based on the resulting name.
///
/// \param S The scope in which we're performing name lookup.
///
/// \param SS The nested-name-specifier that precedes the name.
///
/// \param Name The identifier. If typo correction finds an alternative name,
/// this pointer parameter will be updated accordingly.
///
/// \param NameLoc The location of the identifier.
///
/// \param NextToken The token following the identifier. Used to help
/// disambiguate the name.
///
/// \param CCC The correction callback, if typo correction is desired.
NameClassification ClassifyName(Scope *S, CXXScopeSpec &SS,
IdentifierInfo *&Name, SourceLocation NameLoc,
const Token &NextToken,
CorrectionCandidateCallback *CCC = nullptr);
/// Act on the result of classifying a name as an undeclared (ADL-only)
/// non-type declaration.
ExprResult ActOnNameClassifiedAsUndeclaredNonType(IdentifierInfo *Name,
SourceLocation NameLoc);
/// Act on the result of classifying a name as an undeclared member of a
/// dependent base class.
ExprResult ActOnNameClassifiedAsDependentNonType(const CXXScopeSpec &SS,
IdentifierInfo *Name,
SourceLocation NameLoc,
bool IsAddressOfOperand);
/// Act on the result of classifying a name as a specific non-type
/// declaration.
ExprResult ActOnNameClassifiedAsNonType(Scope *S, const CXXScopeSpec &SS,
NamedDecl *Found,
SourceLocation NameLoc,
const Token &NextToken);
/// Describes the detailed kind of a template name. Used in diagnostics.
enum class TemplateNameKindForDiagnostics {
ClassTemplate,
FunctionTemplate,
VarTemplate,
AliasTemplate,
TemplateTemplateParam,
Concept,
DependentTemplate
};
TemplateNameKindForDiagnostics
getTemplateNameKindForDiagnostics(TemplateName Name);
/// Determine whether it's plausible that E was intended to be a
/// template-name.
bool mightBeIntendedToBeTemplateName(ExprResult E, bool &Dependent) {
if (!getLangOpts().CPlusPlus || E.isInvalid())
return false;
Dependent = false;
if (auto *DRE = dyn_cast<DeclRefExpr>(E.get()))
return !DRE->hasExplicitTemplateArgs();
if (auto *ME = dyn_cast<MemberExpr>(E.get()))
return !ME->hasExplicitTemplateArgs();
Dependent = true;
if (auto *DSDRE = dyn_cast<DependentScopeDeclRefExpr>(E.get()))
return !DSDRE->hasExplicitTemplateArgs();
if (auto *DSME = dyn_cast<CXXDependentScopeMemberExpr>(E.get()))
return !DSME->hasExplicitTemplateArgs();
// Any additional cases recognized here should also be handled by
// diagnoseExprIntendedAsTemplateName.
return false;
}
void diagnoseExprIntendedAsTemplateName(Scope *S, ExprResult TemplateName,
SourceLocation Less,
SourceLocation Greater);
Decl *ActOnDeclarator(Scope *S, Declarator &D);
NamedDecl *HandleDeclarator(Scope *S, Declarator &D,
MultiTemplateParamsArg TemplateParameterLists);
void RegisterLocallyScopedExternCDecl(NamedDecl *ND, Scope *S);
bool DiagnoseClassNameShadow(DeclContext *DC, DeclarationNameInfo Info);
bool diagnoseQualifiedDeclaration(CXXScopeSpec &SS, DeclContext *DC,
DeclarationName Name, SourceLocation Loc,
bool IsTemplateId);
void
diagnoseIgnoredQualifiers(unsigned DiagID, unsigned Quals,
SourceLocation FallbackLoc,
SourceLocation ConstQualLoc = SourceLocation(),
SourceLocation VolatileQualLoc = SourceLocation(),
SourceLocation RestrictQualLoc = SourceLocation(),
SourceLocation AtomicQualLoc = SourceLocation(),
SourceLocation UnalignedQualLoc = SourceLocation());
static bool adjustContextForLocalExternDecl(DeclContext *&DC);
void DiagnoseFunctionSpecifiers(const DeclSpec &DS);
NamedDecl *getShadowedDeclaration(const TypedefNameDecl *D,
const LookupResult &R);
NamedDecl *getShadowedDeclaration(const VarDecl *D, const LookupResult &R);
void CheckShadow(NamedDecl *D, NamedDecl *ShadowedDecl,
const LookupResult &R);
void CheckShadow(Scope *S, VarDecl *D);
/// Warn if 'E', which is an expression that is about to be modified, refers
/// to a shadowing declaration.
void CheckShadowingDeclModification(Expr *E, SourceLocation Loc);
void DiagnoseShadowingLambdaDecls(const sema::LambdaScopeInfo *LSI);
private:
/// Map of current shadowing declarations to shadowed declarations. Warn if
/// it looks like the user is trying to modify the shadowing declaration.
llvm::DenseMap<const NamedDecl *, const NamedDecl *> ShadowingDecls;
public:
void CheckCastAlign(Expr *Op, QualType T, SourceRange TRange);
void handleTagNumbering(const TagDecl *Tag, Scope *TagScope);
void setTagNameForLinkagePurposes(TagDecl *TagFromDeclSpec,
TypedefNameDecl *NewTD);
void CheckTypedefForVariablyModifiedType(Scope *S, TypedefNameDecl *D);
NamedDecl* ActOnTypedefDeclarator(Scope* S, Declarator& D, DeclContext* DC,
TypeSourceInfo *TInfo,
LookupResult &Previous);
NamedDecl* ActOnTypedefNameDecl(Scope* S, DeclContext* DC, TypedefNameDecl *D,
LookupResult &Previous, bool &Redeclaration);
NamedDecl *ActOnVariableDeclarator(Scope *S, Declarator &D, DeclContext *DC,
TypeSourceInfo *TInfo,
LookupResult &Previous,
MultiTemplateParamsArg TemplateParamLists,
bool &AddToScope,
ArrayRef<BindingDecl *> Bindings = None);
NamedDecl *
ActOnDecompositionDeclarator(Scope *S, Declarator &D,
MultiTemplateParamsArg TemplateParamLists);
// Returns true if the variable declaration is a redeclaration
bool CheckVariableDeclaration(VarDecl *NewVD, LookupResult &Previous);
void CheckVariableDeclarationType(VarDecl *NewVD);
bool DeduceVariableDeclarationType(VarDecl *VDecl, bool DirectInit,
Expr *Init);
void CheckCompleteVariableDeclaration(VarDecl *VD);
void CheckCompleteDecompositionDeclaration(DecompositionDecl *DD);
void MaybeSuggestAddingStaticToDecl(const FunctionDecl *D);
NamedDecl* ActOnFunctionDeclarator(Scope* S, Declarator& D, DeclContext* DC,
TypeSourceInfo *TInfo,
LookupResult &Previous,
MultiTemplateParamsArg TemplateParamLists,
bool &AddToScope);
bool AddOverriddenMethods(CXXRecordDecl *DC, CXXMethodDecl *MD);
enum class CheckConstexprKind {
/// Diagnose issues that are non-constant or that are extensions.
Diagnose,
/// Identify whether this function satisfies the formal rules for constexpr
/// functions in the current lanugage mode (with no extensions).
CheckValid
};
bool CheckConstexprFunctionDefinition(const FunctionDecl *FD,
CheckConstexprKind Kind);
void DiagnoseHiddenVirtualMethods(CXXMethodDecl *MD);
void FindHiddenVirtualMethods(CXXMethodDecl *MD,
SmallVectorImpl<CXXMethodDecl*> &OverloadedMethods);
void NoteHiddenVirtualMethods(CXXMethodDecl *MD,
SmallVectorImpl<CXXMethodDecl*> &OverloadedMethods);
// Returns true if the function declaration is a redeclaration
bool CheckFunctionDeclaration(Scope *S,
FunctionDecl *NewFD, LookupResult &Previous,
bool IsMemberSpecialization);
bool shouldLinkDependentDeclWithPrevious(Decl *D, Decl *OldDecl);
bool canFullyTypeCheckRedeclaration(ValueDecl *NewD, ValueDecl *OldD,
QualType NewT, QualType OldT);
void CheckMain(FunctionDecl *FD, const DeclSpec &D);
void CheckMSVCRTEntryPoint(FunctionDecl *FD);
Attr *getImplicitCodeSegOrSectionAttrForFunction(const FunctionDecl *FD,
bool IsDefinition);
void CheckFunctionOrTemplateParamDeclarator(Scope *S, Declarator &D);
Decl *ActOnParamDeclarator(Scope *S, Declarator &D);
ParmVarDecl *BuildParmVarDeclForTypedef(DeclContext *DC,
SourceLocation Loc,
QualType T);
ParmVarDecl *CheckParameter(DeclContext *DC, SourceLocation StartLoc,
SourceLocation NameLoc, IdentifierInfo *Name,
QualType T, TypeSourceInfo *TSInfo,
StorageClass SC);
void ActOnParamDefaultArgument(Decl *param,
SourceLocation EqualLoc,
Expr *defarg);
void ActOnParamUnparsedDefaultArgument(Decl *param,
SourceLocation EqualLoc,
SourceLocation ArgLoc);
void ActOnParamDefaultArgumentError(Decl *param, SourceLocation EqualLoc);
bool SetParamDefaultArgument(ParmVarDecl *Param, Expr *DefaultArg,
SourceLocation EqualLoc);
// Contexts where using non-trivial C union types can be disallowed. This is
// passed to err_non_trivial_c_union_in_invalid_context.
enum NonTrivialCUnionContext {
// Function parameter.
NTCUC_FunctionParam,
// Function return.
NTCUC_FunctionReturn,
// Default-initialized object.
NTCUC_DefaultInitializedObject,
// Variable with automatic storage duration.
NTCUC_AutoVar,
// Initializer expression that might copy from another object.
NTCUC_CopyInit,
// Assignment.
NTCUC_Assignment,
// Compound literal.
NTCUC_CompoundLiteral,
// Block capture.
NTCUC_BlockCapture,
// lvalue-to-rvalue conversion of volatile type.
NTCUC_LValueToRValueVolatile,
};
/// Emit diagnostics if the initializer or any of its explicit or
/// implicitly-generated subexpressions require copying or
/// default-initializing a type that is or contains a C union type that is
/// non-trivial to copy or default-initialize.
void checkNonTrivialCUnionInInitializer(const Expr *Init, SourceLocation Loc);
// These flags are passed to checkNonTrivialCUnion.
enum NonTrivialCUnionKind {
NTCUK_Init = 0x1,
NTCUK_Destruct = 0x2,
NTCUK_Copy = 0x4,
};
/// Emit diagnostics if a non-trivial C union type or a struct that contains
/// a non-trivial C union is used in an invalid context.
void checkNonTrivialCUnion(QualType QT, SourceLocation Loc,
NonTrivialCUnionContext UseContext,
unsigned NonTrivialKind);
void AddInitializerToDecl(Decl *dcl, Expr *init, bool DirectInit);
void ActOnUninitializedDecl(Decl *dcl);
void ActOnInitializerError(Decl *Dcl);
void ActOnPureSpecifier(Decl *D, SourceLocation PureSpecLoc);
void ActOnCXXForRangeDecl(Decl *D);
StmtResult ActOnCXXForRangeIdentifier(Scope *S, SourceLocation IdentLoc,
IdentifierInfo *Ident,
ParsedAttributes &Attrs,
SourceLocation AttrEnd);
void SetDeclDeleted(Decl *dcl, SourceLocation DelLoc);
void SetDeclDefaulted(Decl *dcl, SourceLocation DefaultLoc);
void CheckStaticLocalForDllExport(VarDecl *VD);
void FinalizeDeclaration(Decl *D);
DeclGroupPtrTy FinalizeDeclaratorGroup(Scope *S, const DeclSpec &DS,
ArrayRef<Decl *> Group);
DeclGroupPtrTy BuildDeclaratorGroup(MutableArrayRef<Decl *> Group);
/// Should be called on all declarations that might have attached
/// documentation comments.
void ActOnDocumentableDecl(Decl *D);
void ActOnDocumentableDecls(ArrayRef<Decl *> Group);
void ActOnFinishKNRParamDeclarations(Scope *S, Declarator &D,
SourceLocation LocAfterDecls);
void CheckForFunctionRedefinition(
FunctionDecl *FD, const FunctionDecl *EffectiveDefinition = nullptr,
SkipBodyInfo *SkipBody = nullptr);
Decl *ActOnStartOfFunctionDef(Scope *S, Declarator &D,
MultiTemplateParamsArg TemplateParamLists,
SkipBodyInfo *SkipBody = nullptr);
Decl *ActOnStartOfFunctionDef(Scope *S, Decl *D,
SkipBodyInfo *SkipBody = nullptr);
void ActOnStartTrailingRequiresClause(Scope *S, Declarator &D);
ExprResult ActOnFinishTrailingRequiresClause(ExprResult ConstraintExpr);
void ActOnStartOfObjCMethodDef(Scope *S, Decl *D);
bool isObjCMethodDecl(Decl *D) {
return D && isa<ObjCMethodDecl>(D);
}
/// Determine whether we can delay parsing the body of a function or
/// function template until it is used, assuming we don't care about emitting
/// code for that function.
///
/// This will be \c false if we may need the body of the function in the
/// middle of parsing an expression (where it's impractical to switch to
/// parsing a different function), for instance, if it's constexpr in C++11
/// or has an 'auto' return type in C++14. These cases are essentially bugs.
bool canDelayFunctionBody(const Declarator &D);
/// Determine whether we can skip parsing the body of a function
/// definition, assuming we don't care about analyzing its body or emitting
/// code for that function.
///
/// This will be \c false only if we may need the body of the function in
/// order to parse the rest of the program (for instance, if it is
/// \c constexpr in C++11 or has an 'auto' return type in C++14).
bool canSkipFunctionBody(Decl *D);
void computeNRVO(Stmt *Body, sema::FunctionScopeInfo *Scope);
Decl *ActOnFinishFunctionBody(Decl *Decl, Stmt *Body);
Decl *ActOnFinishFunctionBody(Decl *Decl, Stmt *Body, bool IsInstantiation);
Decl *ActOnSkippedFunctionBody(Decl *Decl);
void ActOnFinishInlineFunctionDef(FunctionDecl *D);
/// ActOnFinishDelayedAttribute - Invoked when we have finished parsing an
/// attribute for which parsing is delayed.
void ActOnFinishDelayedAttribute(Scope *S, Decl *D, ParsedAttributes &Attrs);
/// Diagnose any unused parameters in the given sequence of
/// ParmVarDecl pointers.
void DiagnoseUnusedParameters(ArrayRef<ParmVarDecl *> Parameters);
/// Diagnose whether the size of parameters or return value of a
/// function or obj-c method definition is pass-by-value and larger than a
/// specified threshold.
void
DiagnoseSizeOfParametersAndReturnValue(ArrayRef<ParmVarDecl *> Parameters,
QualType ReturnTy, NamedDecl *D);
void DiagnoseInvalidJumps(Stmt *Body);
Decl *ActOnFileScopeAsmDecl(Expr *expr,
SourceLocation AsmLoc,
SourceLocation RParenLoc);
/// Handle a C++11 empty-declaration and attribute-declaration.
Decl *ActOnEmptyDeclaration(Scope *S, const ParsedAttributesView &AttrList,
SourceLocation SemiLoc);
enum class ModuleDeclKind {
Interface, ///< 'export module X;'
Implementation, ///< 'module X;'
};
/// The parser has processed a module-declaration that begins the definition
/// of a module interface or implementation.
DeclGroupPtrTy ActOnModuleDecl(SourceLocation StartLoc,
SourceLocation ModuleLoc, ModuleDeclKind MDK,
ModuleIdPath Path, bool IsFirstDecl);
/// The parser has processed a global-module-fragment declaration that begins
/// the definition of the global module fragment of the current module unit.
/// \param ModuleLoc The location of the 'module' keyword.
DeclGroupPtrTy ActOnGlobalModuleFragmentDecl(SourceLocation ModuleLoc);
/// The parser has processed a private-module-fragment declaration that begins
/// the definition of the private module fragment of the current module unit.
/// \param ModuleLoc The location of the 'module' keyword.
/// \param PrivateLoc The location of the 'private' keyword.
DeclGroupPtrTy ActOnPrivateModuleFragmentDecl(SourceLocation ModuleLoc,
SourceLocation PrivateLoc);
/// The parser has processed a module import declaration.
///
/// \param StartLoc The location of the first token in the declaration. This
/// could be the location of an '@', 'export', or 'import'.
/// \param ExportLoc The location of the 'export' keyword, if any.
/// \param ImportLoc The location of the 'import' keyword.
/// \param Path The module access path.
DeclResult ActOnModuleImport(SourceLocation StartLoc,
SourceLocation ExportLoc,
SourceLocation ImportLoc, ModuleIdPath Path);
DeclResult ActOnModuleImport(SourceLocation StartLoc,
SourceLocation ExportLoc,
SourceLocation ImportLoc, Module *M,
ModuleIdPath Path = {});
/// The parser has processed a module import translated from a
/// #include or similar preprocessing directive.
void ActOnModuleInclude(SourceLocation DirectiveLoc, Module *Mod);
void BuildModuleInclude(SourceLocation DirectiveLoc, Module *Mod);
/// The parsed has entered a submodule.
void ActOnModuleBegin(SourceLocation DirectiveLoc, Module *Mod);
/// The parser has left a submodule.
void ActOnModuleEnd(SourceLocation DirectiveLoc, Module *Mod);
/// Create an implicit import of the given module at the given
/// source location, for error recovery, if possible.
///
/// This routine is typically used when an entity found by name lookup
/// is actually hidden within a module that we know about but the user
/// has forgotten to import.
void createImplicitModuleImportForErrorRecovery(SourceLocation Loc,
Module *Mod);
/// Kinds of missing import. Note, the values of these enumerators correspond
/// to %select values in diagnostics.
enum class MissingImportKind {
Declaration,
Definition,
DefaultArgument,
ExplicitSpecialization,
PartialSpecialization
};
/// Diagnose that the specified declaration needs to be visible but
/// isn't, and suggest a module import that would resolve the problem.
void diagnoseMissingImport(SourceLocation Loc, NamedDecl *Decl,
MissingImportKind MIK, bool Recover = true);
void diagnoseMissingImport(SourceLocation Loc, NamedDecl *Decl,
SourceLocation DeclLoc, ArrayRef<Module *> Modules,
MissingImportKind MIK, bool Recover);
Decl *ActOnStartExportDecl(Scope *S, SourceLocation ExportLoc,
SourceLocation LBraceLoc);
Decl *ActOnFinishExportDecl(Scope *S, Decl *ExportDecl,
SourceLocation RBraceLoc);
/// We've found a use of a templated declaration that would trigger an
/// implicit instantiation. Check that any relevant explicit specializations
/// and partial specializations are visible, and diagnose if not.
void checkSpecializationVisibility(SourceLocation Loc, NamedDecl *Spec);
/// We've found a use of a template specialization that would select a
/// partial specialization. Check that the partial specialization is visible,
/// and diagnose if not.
void checkPartialSpecializationVisibility(SourceLocation Loc,
NamedDecl *Spec);
/// Retrieve a suitable printing policy for diagnostics.
PrintingPolicy getPrintingPolicy() const {
return getPrintingPolicy(Context, PP);
}
/// Retrieve a suitable printing policy for diagnostics.
static PrintingPolicy getPrintingPolicy(const ASTContext &Ctx,
const Preprocessor &PP);
/// Scope actions.
void ActOnPopScope(SourceLocation Loc, Scope *S);
void ActOnTranslationUnitScope(Scope *S);
Decl *ParsedFreeStandingDeclSpec(Scope *S, AccessSpecifier AS, DeclSpec &DS,
RecordDecl *&AnonRecord);
Decl *ParsedFreeStandingDeclSpec(Scope *S, AccessSpecifier AS, DeclSpec &DS,
MultiTemplateParamsArg TemplateParams,
bool IsExplicitInstantiation,
RecordDecl *&AnonRecord);
Decl *BuildAnonymousStructOrUnion(Scope *S, DeclSpec &DS,
AccessSpecifier AS,
RecordDecl *Record,
const PrintingPolicy &Policy);
Decl *BuildMicrosoftCAnonymousStruct(Scope *S, DeclSpec &DS,
RecordDecl *Record);
/// Common ways to introduce type names without a tag for use in diagnostics.
/// Keep in sync with err_tag_reference_non_tag.
enum NonTagKind {
NTK_NonStruct,
NTK_NonClass,
NTK_NonUnion,
NTK_NonEnum,
NTK_Typedef,
NTK_TypeAlias,
NTK_Template,
NTK_TypeAliasTemplate,
NTK_TemplateTemplateArgument,
};
/// Given a non-tag type declaration, returns an enum useful for indicating
/// what kind of non-tag type this is.
NonTagKind getNonTagTypeDeclKind(const Decl *D, TagTypeKind TTK);
bool isAcceptableTagRedeclaration(const TagDecl *Previous,
TagTypeKind NewTag, bool isDefinition,
SourceLocation NewTagLoc,
const IdentifierInfo *Name);
enum TagUseKind {
TUK_Reference, // Reference to a tag: 'struct foo *X;'
TUK_Declaration, // Fwd decl of a tag: 'struct foo;'
TUK_Definition, // Definition of a tag: 'struct foo { int X; } Y;'
TUK_Friend // Friend declaration: 'friend struct foo;'
};
Decl *ActOnTag(Scope *S, unsigned TagSpec, TagUseKind TUK,
SourceLocation KWLoc, CXXScopeSpec &SS, IdentifierInfo *Name,
SourceLocation NameLoc, const ParsedAttributesView &Attr,
AccessSpecifier AS, SourceLocation ModulePrivateLoc,
MultiTemplateParamsArg TemplateParameterLists, bool &OwnedDecl,
bool &IsDependent, SourceLocation ScopedEnumKWLoc,
bool ScopedEnumUsesClassTag, TypeResult UnderlyingType,
bool IsTypeSpecifier, bool IsTemplateParamOrArg,
SkipBodyInfo *SkipBody = nullptr);
Decl *ActOnTemplatedFriendTag(Scope *S, SourceLocation FriendLoc,
unsigned TagSpec, SourceLocation TagLoc,
CXXScopeSpec &SS, IdentifierInfo *Name,
SourceLocation NameLoc,
const ParsedAttributesView &Attr,
MultiTemplateParamsArg TempParamLists);
TypeResult ActOnDependentTag(Scope *S,
unsigned TagSpec,
TagUseKind TUK,
const CXXScopeSpec &SS,
IdentifierInfo *Name,
SourceLocation TagLoc,
SourceLocation NameLoc);
void ActOnDefs(Scope *S, Decl *TagD, SourceLocation DeclStart,
IdentifierInfo *ClassName,
SmallVectorImpl<Decl *> &Decls);
Decl *ActOnField(Scope *S, Decl *TagD, SourceLocation DeclStart,
Declarator &D, Expr *BitfieldWidth);
FieldDecl *HandleField(Scope *S, RecordDecl *TagD, SourceLocation DeclStart,
Declarator &D, Expr *BitfieldWidth,
InClassInitStyle InitStyle,
AccessSpecifier AS);
MSPropertyDecl *HandleMSProperty(Scope *S, RecordDecl *TagD,
SourceLocation DeclStart, Declarator &D,
Expr *BitfieldWidth,
InClassInitStyle InitStyle,
AccessSpecifier AS,
const ParsedAttr &MSPropertyAttr);
FieldDecl *CheckFieldDecl(DeclarationName Name, QualType T,
TypeSourceInfo *TInfo,
RecordDecl *Record, SourceLocation Loc,
bool Mutable, Expr *BitfieldWidth,
InClassInitStyle InitStyle,
SourceLocation TSSL,
AccessSpecifier AS, NamedDecl *PrevDecl,
Declarator *D = nullptr);
bool CheckNontrivialField(FieldDecl *FD);
void DiagnoseNontrivial(const CXXRecordDecl *Record, CXXSpecialMember CSM);
enum TrivialABIHandling {
/// The triviality of a method unaffected by "trivial_abi".
TAH_IgnoreTrivialABI,
/// The triviality of a method affected by "trivial_abi".
TAH_ConsiderTrivialABI
};
bool SpecialMemberIsTrivial(CXXMethodDecl *MD, CXXSpecialMember CSM,
TrivialABIHandling TAH = TAH_IgnoreTrivialABI,
bool Diagnose = false);
/// For a defaulted function, the kind of defaulted function that it is.
class DefaultedFunctionKind {
CXXSpecialMember SpecialMember : 8;
DefaultedComparisonKind Comparison : 8;
public:
DefaultedFunctionKind()
: SpecialMember(CXXInvalid), Comparison(DefaultedComparisonKind::None) {
}
DefaultedFunctionKind(CXXSpecialMember CSM)
: SpecialMember(CSM), Comparison(DefaultedComparisonKind::None) {}
DefaultedFunctionKind(DefaultedComparisonKind Comp)
: SpecialMember(CXXInvalid), Comparison(Comp) {}
bool isSpecialMember() const { return SpecialMember != CXXInvalid; }
bool isComparison() const {
return Comparison != DefaultedComparisonKind::None;
}
explicit operator bool() const {
return isSpecialMember() || isComparison();
}
CXXSpecialMember asSpecialMember() const { return SpecialMember; }
DefaultedComparisonKind asComparison() const { return Comparison; }
/// Get the index of this function kind for use in diagnostics.
unsigned getDiagnosticIndex() const {
static_assert(CXXInvalid > CXXDestructor,
"invalid should have highest index");
static_assert((unsigned)DefaultedComparisonKind::None == 0,
"none should be equal to zero");
return SpecialMember + (unsigned)Comparison;
}
};
DefaultedFunctionKind getDefaultedFunctionKind(const FunctionDecl *FD);
CXXSpecialMember getSpecialMember(const CXXMethodDecl *MD) {
return getDefaultedFunctionKind(MD).asSpecialMember();
}
DefaultedComparisonKind getDefaultedComparisonKind(const FunctionDecl *FD) {
return getDefaultedFunctionKind(FD).asComparison();
}
void ActOnLastBitfield(SourceLocation DeclStart,
SmallVectorImpl<Decl *> &AllIvarDecls);
Decl *ActOnIvar(Scope *S, SourceLocation DeclStart,
Declarator &D, Expr *BitfieldWidth,
tok::ObjCKeywordKind visibility);
// This is used for both record definitions and ObjC interface declarations.
void ActOnFields(Scope *S, SourceLocation RecLoc, Decl *TagDecl,
ArrayRef<Decl *> Fields, SourceLocation LBrac,
SourceLocation RBrac, const ParsedAttributesView &AttrList);
/// ActOnTagStartDefinition - Invoked when we have entered the
/// scope of a tag's definition (e.g., for an enumeration, class,
/// struct, or union).
void ActOnTagStartDefinition(Scope *S, Decl *TagDecl);
/// Perform ODR-like check for C/ObjC when merging tag types from modules.
/// Differently from C++, actually parse the body and reject / error out
/// in case of a structural mismatch.
bool ActOnDuplicateDefinition(DeclSpec &DS, Decl *Prev,
SkipBodyInfo &SkipBody);
typedef void *SkippedDefinitionContext;
/// Invoked when we enter a tag definition that we're skipping.
SkippedDefinitionContext ActOnTagStartSkippedDefinition(Scope *S, Decl *TD);
Decl *ActOnObjCContainerStartDefinition(Decl *IDecl);
/// ActOnStartCXXMemberDeclarations - Invoked when we have parsed a
/// C++ record definition's base-specifiers clause and are starting its
/// member declarations.
void ActOnStartCXXMemberDeclarations(Scope *S, Decl *TagDecl,
SourceLocation FinalLoc,
bool IsFinalSpelledSealed,
SourceLocation LBraceLoc);
/// ActOnTagFinishDefinition - Invoked once we have finished parsing
/// the definition of a tag (enumeration, class, struct, or union).
void ActOnTagFinishDefinition(Scope *S, Decl *TagDecl,
SourceRange BraceRange);
void ActOnTagFinishSkippedDefinition(SkippedDefinitionContext Context);
void ActOnObjCContainerFinishDefinition();
/// Invoked when we must temporarily exit the objective-c container
/// scope for parsing/looking-up C constructs.
///
/// Must be followed by a call to \see ActOnObjCReenterContainerContext
void ActOnObjCTemporaryExitContainerContext(DeclContext *DC);
void ActOnObjCReenterContainerContext(DeclContext *DC);
/// ActOnTagDefinitionError - Invoked when there was an unrecoverable
/// error parsing the definition of a tag.
void ActOnTagDefinitionError(Scope *S, Decl *TagDecl);
EnumConstantDecl *CheckEnumConstant(EnumDecl *Enum,
EnumConstantDecl *LastEnumConst,
SourceLocation IdLoc,
IdentifierInfo *Id,
Expr *val);
bool CheckEnumUnderlyingType(TypeSourceInfo *TI);
bool CheckEnumRedeclaration(SourceLocation EnumLoc, bool IsScoped,
QualType EnumUnderlyingTy, bool IsFixed,
const EnumDecl *Prev);
/// Determine whether the body of an anonymous enumeration should be skipped.
/// \param II The name of the first enumerator.
SkipBodyInfo shouldSkipAnonEnumBody(Scope *S, IdentifierInfo *II,
SourceLocation IILoc);
Decl *ActOnEnumConstant(Scope *S, Decl *EnumDecl, Decl *LastEnumConstant,
SourceLocation IdLoc, IdentifierInfo *Id,
const ParsedAttributesView &Attrs,
SourceLocation EqualLoc, Expr *Val);
void ActOnEnumBody(SourceLocation EnumLoc, SourceRange BraceRange,
Decl *EnumDecl, ArrayRef<Decl *> Elements, Scope *S,
const ParsedAttributesView &Attr);
DeclContext *getContainingDC(DeclContext *DC);
/// Set the current declaration context until it gets popped.
void PushDeclContext(Scope *S, DeclContext *DC);
void PopDeclContext();
/// EnterDeclaratorContext - Used when we must lookup names in the context
/// of a declarator's nested name specifier.
void EnterDeclaratorContext(Scope *S, DeclContext *DC);
void ExitDeclaratorContext(Scope *S);
/// Push the parameters of D, which must be a function, into scope.
void ActOnReenterFunctionContext(Scope* S, Decl* D);
void ActOnExitFunctionContext();
DeclContext *getFunctionLevelDeclContext();
/// getCurFunctionDecl - If inside of a function body, this returns a pointer
/// to the function decl for the function being parsed. If we're currently
/// in a 'block', this returns the containing context.
FunctionDecl *getCurFunctionDecl();
/// getCurMethodDecl - If inside of a method body, this returns a pointer to
/// the method decl for the method being parsed. If we're currently
/// in a 'block', this returns the containing context.
ObjCMethodDecl *getCurMethodDecl();
/// getCurFunctionOrMethodDecl - Return the Decl for the current ObjC method
/// or C function we're in, otherwise return null. If we're currently
/// in a 'block', this returns the containing context.
NamedDecl *getCurFunctionOrMethodDecl();
/// Add this decl to the scope shadowed decl chains.
void PushOnScopeChains(NamedDecl *D, Scope *S, bool AddToContext = true);
/// isDeclInScope - If 'Ctx' is a function/method, isDeclInScope returns true
/// if 'D' is in Scope 'S', otherwise 'S' is ignored and isDeclInScope returns
/// true if 'D' belongs to the given declaration context.
///
/// \param AllowInlineNamespace If \c true, allow the declaration to be in the
/// enclosing namespace set of the context, rather than contained
/// directly within it.
bool isDeclInScope(NamedDecl *D, DeclContext *Ctx, Scope *S = nullptr,
bool AllowInlineNamespace = false);
/// Finds the scope corresponding to the given decl context, if it
/// happens to be an enclosing scope. Otherwise return NULL.
static Scope *getScopeForDeclContext(Scope *S, DeclContext *DC);
/// Subroutines of ActOnDeclarator().
TypedefDecl *ParseTypedefDecl(Scope *S, Declarator &D, QualType T,
TypeSourceInfo *TInfo);
bool isIncompatibleTypedef(TypeDecl *Old, TypedefNameDecl *New);
/// Describes the kind of merge to perform for availability
/// attributes (including "deprecated", "unavailable", and "availability").
enum AvailabilityMergeKind {
/// Don't merge availability attributes at all.
AMK_None,
/// Merge availability attributes for a redeclaration, which requires
/// an exact match.
AMK_Redeclaration,
/// Merge availability attributes for an override, which requires
/// an exact match or a weakening of constraints.
AMK_Override,
/// Merge availability attributes for an implementation of
/// a protocol requirement.
AMK_ProtocolImplementation,
};
/// Describes the kind of priority given to an availability attribute.
///
/// The sum of priorities deteremines the final priority of the attribute.
/// The final priority determines how the attribute will be merged.
/// An attribute with a lower priority will always remove higher priority
/// attributes for the specified platform when it is being applied. An
/// attribute with a higher priority will not be applied if the declaration
/// already has an availability attribute with a lower priority for the
/// specified platform. The final prirority values are not expected to match
/// the values in this enumeration, but instead should be treated as a plain
/// integer value. This enumeration just names the priority weights that are
/// used to calculate that final vaue.
enum AvailabilityPriority : int {
/// The availability attribute was specified explicitly next to the
/// declaration.
AP_Explicit = 0,
/// The availability attribute was applied using '#pragma clang attribute'.
AP_PragmaClangAttribute = 1,
/// The availability attribute for a specific platform was inferred from
/// an availability attribute for another platform.
AP_InferredFromOtherPlatform = 2
};
/// Attribute merging methods. Return true if a new attribute was added.
AvailabilityAttr *
mergeAvailabilityAttr(NamedDecl *D, const AttributeCommonInfo &CI,
IdentifierInfo *Platform, bool Implicit,
VersionTuple Introduced, VersionTuple Deprecated,
VersionTuple Obsoleted, bool IsUnavailable,
StringRef Message, bool IsStrict, StringRef Replacement,
AvailabilityMergeKind AMK, int Priority);
TypeVisibilityAttr *
mergeTypeVisibilityAttr(Decl *D, const AttributeCommonInfo &CI,
TypeVisibilityAttr::VisibilityType Vis);
VisibilityAttr *mergeVisibilityAttr(Decl *D, const AttributeCommonInfo &CI,
VisibilityAttr::VisibilityType Vis);
UuidAttr *mergeUuidAttr(Decl *D, const AttributeCommonInfo &CI,
StringRef Uuid);
DLLImportAttr *mergeDLLImportAttr(Decl *D, const AttributeCommonInfo &CI);
DLLExportAttr *mergeDLLExportAttr(Decl *D, const AttributeCommonInfo &CI);
MSInheritanceAttr *mergeMSInheritanceAttr(Decl *D,
const AttributeCommonInfo &CI,
bool BestCase,
MSInheritanceModel Model);
FormatAttr *mergeFormatAttr(Decl *D, const AttributeCommonInfo &CI,
IdentifierInfo *Format, int FormatIdx,
int FirstArg);
SectionAttr *mergeSectionAttr(Decl *D, const AttributeCommonInfo &CI,
StringRef Name);
CodeSegAttr *mergeCodeSegAttr(Decl *D, const AttributeCommonInfo &CI,
StringRef Name);
AlwaysInlineAttr *mergeAlwaysInlineAttr(Decl *D,
const AttributeCommonInfo &CI,
const IdentifierInfo *Ident);
MinSizeAttr *mergeMinSizeAttr(Decl *D, const AttributeCommonInfo &CI);
NoSpeculativeLoadHardeningAttr *
mergeNoSpeculativeLoadHardeningAttr(Decl *D,
const NoSpeculativeLoadHardeningAttr &AL);
SpeculativeLoadHardeningAttr *
mergeSpeculativeLoadHardeningAttr(Decl *D,
const SpeculativeLoadHardeningAttr &AL);
OptimizeNoneAttr *mergeOptimizeNoneAttr(Decl *D,
const AttributeCommonInfo &CI);
InternalLinkageAttr *mergeInternalLinkageAttr(Decl *D, const ParsedAttr &AL);
InternalLinkageAttr *mergeInternalLinkageAttr(Decl *D,
const InternalLinkageAttr &AL);
CommonAttr *mergeCommonAttr(Decl *D, const ParsedAttr &AL);
CommonAttr *mergeCommonAttr(Decl *D, const CommonAttr &AL);
void mergeDeclAttributes(NamedDecl *New, Decl *Old,
AvailabilityMergeKind AMK = AMK_Redeclaration);
void MergeTypedefNameDecl(Scope *S, TypedefNameDecl *New,
LookupResult &OldDecls);
bool MergeFunctionDecl(FunctionDecl *New, NamedDecl *&Old, Scope *S,
bool MergeTypeWithOld);
bool MergeCompatibleFunctionDecls(FunctionDecl *New, FunctionDecl *Old,
Scope *S, bool MergeTypeWithOld);
void mergeObjCMethodDecls(ObjCMethodDecl *New, ObjCMethodDecl *Old);
void MergeVarDecl(VarDecl *New, LookupResult &Previous);
void MergeVarDeclTypes(VarDecl *New, VarDecl *Old, bool MergeTypeWithOld);
void MergeVarDeclExceptionSpecs(VarDecl *New, VarDecl *Old);
bool checkVarDeclRedefinition(VarDecl *OldDefn, VarDecl *NewDefn);
void notePreviousDefinition(const NamedDecl *Old, SourceLocation New);
bool MergeCXXFunctionDecl(FunctionDecl *New, FunctionDecl *Old, Scope *S);
// AssignmentAction - This is used by all the assignment diagnostic functions
// to represent what is actually causing the operation
enum AssignmentAction {
AA_Assigning,
AA_Passing,
AA_Returning,
AA_Converting,
AA_Initializing,
AA_Sending,
AA_Casting,
AA_Passing_CFAudited
};
/// C++ Overloading.
enum OverloadKind {
/// This is a legitimate overload: the existing declarations are
/// functions or function templates with different signatures.
Ovl_Overload,
/// This is not an overload because the signature exactly matches
/// an existing declaration.
Ovl_Match,
/// This is not an overload because the lookup results contain a
/// non-function.
Ovl_NonFunction
};
OverloadKind CheckOverload(Scope *S,
FunctionDecl *New,
const LookupResult &OldDecls,
NamedDecl *&OldDecl,
bool IsForUsingDecl);
bool IsOverload(FunctionDecl *New, FunctionDecl *Old, bool IsForUsingDecl,
bool ConsiderCudaAttrs = true,
bool ConsiderRequiresClauses = true);
enum class AllowedExplicit {
/// Allow no explicit functions to be used.
None,
/// Allow explicit conversion functions but not explicit constructors.
Conversions,
/// Allow both explicit conversion functions and explicit constructors.
All
};
ImplicitConversionSequence
TryImplicitConversion(Expr *From, QualType ToType,
bool SuppressUserConversions,
AllowedExplicit AllowExplicit,
bool InOverloadResolution,
bool CStyle,
bool AllowObjCWritebackConversion);
bool IsIntegralPromotion(Expr *From, QualType FromType, QualType ToType);
bool IsFloatingPointPromotion(QualType FromType, QualType ToType);
bool IsComplexPromotion(QualType FromType, QualType ToType);
bool IsPointerConversion(Expr *From, QualType FromType, QualType ToType,
bool InOverloadResolution,
QualType& ConvertedType, bool &IncompatibleObjC);
bool isObjCPointerConversion(QualType FromType, QualType ToType,
QualType& ConvertedType, bool &IncompatibleObjC);
bool isObjCWritebackConversion(QualType FromType, QualType ToType,
QualType &ConvertedType);
bool IsBlockPointerConversion(QualType FromType, QualType ToType,
QualType& ConvertedType);
bool FunctionParamTypesAreEqual(const FunctionProtoType *OldType,
const FunctionProtoType *NewType,
unsigned *ArgPos = nullptr);
void HandleFunctionTypeMismatch(PartialDiagnostic &PDiag,
QualType FromType, QualType ToType);
void maybeExtendBlockObject(ExprResult &E);
CastKind PrepareCastToObjCObjectPointer(ExprResult &E);
bool CheckPointerConversion(Expr *From, QualType ToType,
CastKind &Kind,
CXXCastPath& BasePath,
bool IgnoreBaseAccess,
bool Diagnose = true);
bool IsMemberPointerConversion(Expr *From, QualType FromType, QualType ToType,
bool InOverloadResolution,
QualType &ConvertedType);
bool CheckMemberPointerConversion(Expr *From, QualType ToType,
CastKind &Kind,
CXXCastPath &BasePath,
bool IgnoreBaseAccess);
bool IsQualificationConversion(QualType FromType, QualType ToType,
bool CStyle, bool &ObjCLifetimeConversion);
bool IsFunctionConversion(QualType FromType, QualType ToType,
QualType &ResultTy);
bool DiagnoseMultipleUserDefinedConversion(Expr *From, QualType ToType);
bool isSameOrCompatibleFunctionType(CanQualType Param, CanQualType Arg);
ExprResult PerformMoveOrCopyInitialization(const InitializedEntity &Entity,
const VarDecl *NRVOCandidate,
QualType ResultType,
Expr *Value,
bool AllowNRVO = true);
bool CanPerformAggregateInitializationForOverloadResolution(
const InitializedEntity &Entity, InitListExpr *From);
bool CanPerformCopyInitialization(const InitializedEntity &Entity,
ExprResult Init);
ExprResult PerformCopyInitialization(const InitializedEntity &Entity,
SourceLocation EqualLoc,
ExprResult Init,
bool TopLevelOfInitList = false,
bool AllowExplicit = false);
ExprResult PerformObjectArgumentInitialization(Expr *From,
NestedNameSpecifier *Qualifier,
NamedDecl *FoundDecl,
CXXMethodDecl *Method);
/// Check that the lifetime of the initializer (and its subobjects) is
/// sufficient for initializing the entity, and perform lifetime extension
/// (when permitted) if not.
void checkInitializerLifetime(const InitializedEntity &Entity, Expr *Init);
ExprResult PerformContextuallyConvertToBool(Expr *From);
ExprResult PerformContextuallyConvertToObjCPointer(Expr *From);
/// Contexts in which a converted constant expression is required.
enum CCEKind {
CCEK_CaseValue, ///< Expression in a case label.
CCEK_Enumerator, ///< Enumerator value with fixed underlying type.
CCEK_TemplateArg, ///< Value of a non-type template parameter.
CCEK_NewExpr, ///< Constant expression in a noptr-new-declarator.
CCEK_ConstexprIf, ///< Condition in a constexpr if statement.
CCEK_ExplicitBool ///< Condition in an explicit(bool) specifier.
};
ExprResult CheckConvertedConstantExpression(Expr *From, QualType T,
llvm::APSInt &Value, CCEKind CCE);
ExprResult CheckConvertedConstantExpression(Expr *From, QualType T,
APValue &Value, CCEKind CCE);
/// Abstract base class used to perform a contextual implicit
/// conversion from an expression to any type passing a filter.
class ContextualImplicitConverter {
public:
bool Suppress;
bool SuppressConversion;
ContextualImplicitConverter(bool Suppress = false,
bool SuppressConversion = false)
: Suppress(Suppress), SuppressConversion(SuppressConversion) {}
/// Determine whether the specified type is a valid destination type
/// for this conversion.
virtual bool match(QualType T) = 0;
/// Emits a diagnostic complaining that the expression does not have
/// integral or enumeration type.
virtual SemaDiagnosticBuilder
diagnoseNoMatch(Sema &S, SourceLocation Loc, QualType T) = 0;
/// Emits a diagnostic when the expression has incomplete class type.
virtual SemaDiagnosticBuilder
diagnoseIncomplete(Sema &S, SourceLocation Loc, QualType T) = 0;
/// Emits a diagnostic when the only matching conversion function
/// is explicit.
virtual SemaDiagnosticBuilder diagnoseExplicitConv(
Sema &S, SourceLocation Loc, QualType T, QualType ConvTy) = 0;
/// Emits a note for the explicit conversion function.
virtual SemaDiagnosticBuilder
noteExplicitConv(Sema &S, CXXConversionDecl *Conv, QualType ConvTy) = 0;
/// Emits a diagnostic when there are multiple possible conversion
/// functions.
virtual SemaDiagnosticBuilder
diagnoseAmbiguous(Sema &S, SourceLocation Loc, QualType T) = 0;
/// Emits a note for one of the candidate conversions.
virtual SemaDiagnosticBuilder
noteAmbiguous(Sema &S, CXXConversionDecl *Conv, QualType ConvTy) = 0;
/// Emits a diagnostic when we picked a conversion function
/// (for cases when we are not allowed to pick a conversion function).
virtual SemaDiagnosticBuilder diagnoseConversion(
Sema &S, SourceLocation Loc, QualType T, QualType ConvTy) = 0;
virtual ~ContextualImplicitConverter() {}
};
class ICEConvertDiagnoser : public ContextualImplicitConverter {
bool AllowScopedEnumerations;
public:
ICEConvertDiagnoser(bool AllowScopedEnumerations,
bool Suppress, bool SuppressConversion)
: ContextualImplicitConverter(Suppress, SuppressConversion),
AllowScopedEnumerations(AllowScopedEnumerations) {}
/// Match an integral or (possibly scoped) enumeration type.
bool match(QualType T) override;
SemaDiagnosticBuilder
diagnoseNoMatch(Sema &S, SourceLocation Loc, QualType T) override {
return diagnoseNotInt(S, Loc, T);
}
/// Emits a diagnostic complaining that the expression does not have
/// integral or enumeration type.
virtual SemaDiagnosticBuilder
diagnoseNotInt(Sema &S, SourceLocation Loc, QualType T) = 0;
};
/// Perform a contextual implicit conversion.
ExprResult PerformContextualImplicitConversion(
SourceLocation Loc, Expr *FromE, ContextualImplicitConverter &Converter);
enum ObjCSubscriptKind {
OS_Array,
OS_Dictionary,
OS_Error
};
ObjCSubscriptKind CheckSubscriptingKind(Expr *FromE);
// Note that LK_String is intentionally after the other literals, as
// this is used for diagnostics logic.
enum ObjCLiteralKind {
LK_Array,
LK_Dictionary,
LK_Numeric,
LK_Boxed,
LK_String,
LK_Block,
LK_None
};
ObjCLiteralKind CheckLiteralKind(Expr *FromE);
ExprResult PerformObjectMemberConversion(Expr *From,
NestedNameSpecifier *Qualifier,
NamedDecl *FoundDecl,
NamedDecl *Member);
// Members have to be NamespaceDecl* or TranslationUnitDecl*.
// TODO: make this is a typesafe union.
typedef llvm::SmallSetVector<DeclContext *, 16> AssociatedNamespaceSet;
typedef llvm::SmallSetVector<CXXRecordDecl *, 16> AssociatedClassSet;
using ADLCallKind = CallExpr::ADLCallKind;
void AddOverloadCandidate(FunctionDecl *Function, DeclAccessPair FoundDecl,
ArrayRef<Expr *> Args,
OverloadCandidateSet &CandidateSet,
bool SuppressUserConversions = false,
bool PartialOverloading = false,
bool AllowExplicit = true,
bool AllowExplicitConversion = false,
ADLCallKind IsADLCandidate = ADLCallKind::NotADL,
ConversionSequenceList EarlyConversions = None,
OverloadCandidateParamOrder PO = {});
void AddFunctionCandidates(const UnresolvedSetImpl &Functions,
ArrayRef<Expr *> Args,
OverloadCandidateSet &CandidateSet,
TemplateArgumentListInfo *ExplicitTemplateArgs = nullptr,
bool SuppressUserConversions = false,
bool PartialOverloading = false,
bool FirstArgumentIsBase = false);
void AddMethodCandidate(DeclAccessPair FoundDecl,
QualType ObjectType,
Expr::Classification ObjectClassification,
ArrayRef<Expr *> Args,
OverloadCandidateSet& CandidateSet,
bool SuppressUserConversion = false,
OverloadCandidateParamOrder PO = {});
void AddMethodCandidate(CXXMethodDecl *Method,
DeclAccessPair FoundDecl,
CXXRecordDecl *ActingContext, QualType ObjectType,
Expr::Classification ObjectClassification,
ArrayRef<Expr *> Args,
OverloadCandidateSet& CandidateSet,
bool SuppressUserConversions = false,
bool PartialOverloading = false,
ConversionSequenceList EarlyConversions = None,
OverloadCandidateParamOrder PO = {});
void AddMethodTemplateCandidate(FunctionTemplateDecl *MethodTmpl,
DeclAccessPair FoundDecl,
CXXRecordDecl *ActingContext,
TemplateArgumentListInfo *ExplicitTemplateArgs,
QualType ObjectType,
Expr::Classification ObjectClassification,
ArrayRef<Expr *> Args,
OverloadCandidateSet& CandidateSet,
bool SuppressUserConversions = false,
bool PartialOverloading = false,
OverloadCandidateParamOrder PO = {});
void AddTemplateOverloadCandidate(
FunctionTemplateDecl *FunctionTemplate, DeclAccessPair FoundDecl,
TemplateArgumentListInfo *ExplicitTemplateArgs, ArrayRef<Expr *> Args,
OverloadCandidateSet &CandidateSet, bool SuppressUserConversions = false,
bool PartialOverloading = false, bool AllowExplicit = true,
ADLCallKind IsADLCandidate = ADLCallKind::NotADL,
OverloadCandidateParamOrder PO = {});
bool CheckNonDependentConversions(
FunctionTemplateDecl *FunctionTemplate, ArrayRef<QualType> ParamTypes,
ArrayRef<Expr *> Args, OverloadCandidateSet &CandidateSet,
ConversionSequenceList &Conversions, bool SuppressUserConversions,
CXXRecordDecl *ActingContext = nullptr, QualType ObjectType = QualType(),
Expr::Classification ObjectClassification = {},
OverloadCandidateParamOrder PO = {});
void AddConversionCandidate(
CXXConversionDecl *Conversion, DeclAccessPair FoundDecl,
CXXRecordDecl *ActingContext, Expr *From, QualType ToType,
OverloadCandidateSet &CandidateSet, bool AllowObjCConversionOnExplicit,
bool AllowExplicit, bool AllowResultConversion = true);
void AddTemplateConversionCandidate(
FunctionTemplateDecl *FunctionTemplate, DeclAccessPair FoundDecl,
CXXRecordDecl *ActingContext, Expr *From, QualType ToType,
OverloadCandidateSet &CandidateSet, bool AllowObjCConversionOnExplicit,
bool AllowExplicit, bool AllowResultConversion = true);
void AddSurrogateCandidate(CXXConversionDecl *Conversion,
DeclAccessPair FoundDecl,
CXXRecordDecl *ActingContext,
const FunctionProtoType *Proto,
Expr *Object, ArrayRef<Expr *> Args,
OverloadCandidateSet& CandidateSet);
void AddNonMemberOperatorCandidates(
const UnresolvedSetImpl &Functions, ArrayRef<Expr *> Args,
OverloadCandidateSet &CandidateSet,
TemplateArgumentListInfo *ExplicitTemplateArgs = nullptr);
void AddMemberOperatorCandidates(OverloadedOperatorKind Op,
SourceLocation OpLoc, ArrayRef<Expr *> Args,
OverloadCandidateSet &CandidateSet,
OverloadCandidateParamOrder PO = {});
void AddBuiltinCandidate(QualType *ParamTys, ArrayRef<Expr *> Args,
OverloadCandidateSet& CandidateSet,
bool IsAssignmentOperator = false,
unsigned NumContextualBoolArguments = 0);
void AddBuiltinOperatorCandidates(OverloadedOperatorKind Op,
SourceLocation OpLoc, ArrayRef<Expr *> Args,
OverloadCandidateSet& CandidateSet);
void AddArgumentDependentLookupCandidates(DeclarationName Name,
SourceLocation Loc,
ArrayRef<Expr *> Args,
TemplateArgumentListInfo *ExplicitTemplateArgs,
OverloadCandidateSet& CandidateSet,
bool PartialOverloading = false);
// Emit as a 'note' the specific overload candidate
void NoteOverloadCandidate(
NamedDecl *Found, FunctionDecl *Fn,
OverloadCandidateRewriteKind RewriteKind = OverloadCandidateRewriteKind(),
QualType DestType = QualType(), bool TakingAddress = false);
// Emit as a series of 'note's all template and non-templates identified by
// the expression Expr
void NoteAllOverloadCandidates(Expr *E, QualType DestType = QualType(),
bool TakingAddress = false);
/// Check the enable_if expressions on the given function. Returns the first
/// failing attribute, or NULL if they were all successful.
EnableIfAttr *CheckEnableIf(FunctionDecl *Function, ArrayRef<Expr *> Args,
bool MissingImplicitThis = false);
/// Find the failed Boolean condition within a given Boolean
/// constant expression, and describe it with a string.
std::pair<Expr *, std::string> findFailedBooleanCondition(Expr *Cond);
/// Emit diagnostics for the diagnose_if attributes on Function, ignoring any
/// non-ArgDependent DiagnoseIfAttrs.
///
/// Argument-dependent diagnose_if attributes should be checked each time a
/// function is used as a direct callee of a function call.
///
/// Returns true if any errors were emitted.
bool diagnoseArgDependentDiagnoseIfAttrs(const FunctionDecl *Function,
const Expr *ThisArg,
ArrayRef<const Expr *> Args,
SourceLocation Loc);
/// Emit diagnostics for the diagnose_if attributes on Function, ignoring any
/// ArgDependent DiagnoseIfAttrs.
///
/// Argument-independent diagnose_if attributes should be checked on every use
/// of a function.
///
/// Returns true if any errors were emitted.
bool diagnoseArgIndependentDiagnoseIfAttrs(const NamedDecl *ND,
SourceLocation Loc);
/// Returns whether the given function's address can be taken or not,
/// optionally emitting a diagnostic if the address can't be taken.
///
/// Returns false if taking the address of the function is illegal.
bool checkAddressOfFunctionIsAvailable(const FunctionDecl *Function,
bool Complain = false,
SourceLocation Loc = SourceLocation());
// [PossiblyAFunctionType] --> [Return]
// NonFunctionType --> NonFunctionType
// R (A) --> R(A)
// R (*)(A) --> R (A)
// R (&)(A) --> R (A)
// R (S::*)(A) --> R (A)
QualType ExtractUnqualifiedFunctionType(QualType PossiblyAFunctionType);
FunctionDecl *
ResolveAddressOfOverloadedFunction(Expr *AddressOfExpr,
QualType TargetType,
bool Complain,
DeclAccessPair &Found,
bool *pHadMultipleCandidates = nullptr);
FunctionDecl *
resolveAddressOfSingleOverloadCandidate(Expr *E, DeclAccessPair &FoundResult);
bool resolveAndFixAddressOfSingleOverloadCandidate(
ExprResult &SrcExpr, bool DoFunctionPointerConversion = false);
FunctionDecl *
ResolveSingleFunctionTemplateSpecialization(OverloadExpr *ovl,
bool Complain = false,
DeclAccessPair *Found = nullptr);
bool ResolveAndFixSingleFunctionTemplateSpecialization(
ExprResult &SrcExpr,
bool DoFunctionPointerConverion = false,
bool Complain = false,
SourceRange OpRangeForComplaining = SourceRange(),
QualType DestTypeForComplaining = QualType(),
unsigned DiagIDForComplaining = 0);
Expr *FixOverloadedFunctionReference(Expr *E,
DeclAccessPair FoundDecl,
FunctionDecl *Fn);
ExprResult FixOverloadedFunctionReference(ExprResult,
DeclAccessPair FoundDecl,
FunctionDecl *Fn);
void AddOverloadedCallCandidates(UnresolvedLookupExpr *ULE,
ArrayRef<Expr *> Args,
OverloadCandidateSet &CandidateSet,
bool PartialOverloading = false);
// An enum used to represent the different possible results of building a
// range-based for loop.
enum ForRangeStatus {
FRS_Success,
FRS_NoViableFunction,
FRS_DiagnosticIssued
};
ForRangeStatus BuildForRangeBeginEndCall(SourceLocation Loc,
SourceLocation RangeLoc,
const DeclarationNameInfo &NameInfo,
LookupResult &MemberLookup,
OverloadCandidateSet *CandidateSet,
Expr *Range, ExprResult *CallExpr);
ExprResult BuildOverloadedCallExpr(Scope *S, Expr *Fn,
UnresolvedLookupExpr *ULE,
SourceLocation LParenLoc,
MultiExprArg Args,
SourceLocation RParenLoc,
Expr *ExecConfig,
bool AllowTypoCorrection=true,
bool CalleesAddressIsTaken=false);
bool buildOverloadedCallSet(Scope *S, Expr *Fn, UnresolvedLookupExpr *ULE,
MultiExprArg Args, SourceLocation RParenLoc,
OverloadCandidateSet *CandidateSet,
ExprResult *Result);
ExprResult CreateOverloadedUnaryOp(SourceLocation OpLoc,
UnaryOperatorKind Opc,
const UnresolvedSetImpl &Fns,
Expr *input, bool RequiresADL = true);
void LookupOverloadedBinOp(OverloadCandidateSet &CandidateSet,
OverloadedOperatorKind Op,
const UnresolvedSetImpl &Fns,
ArrayRef<Expr *> Args, bool RequiresADL = true);
ExprResult CreateOverloadedBinOp(SourceLocation OpLoc,
BinaryOperatorKind Opc,
const UnresolvedSetImpl &Fns,
Expr *LHS, Expr *RHS,
bool RequiresADL = true,
bool AllowRewrittenCandidates = true,
FunctionDecl *DefaultedFn = nullptr);
ExprResult BuildSynthesizedThreeWayComparison(SourceLocation OpLoc,
const UnresolvedSetImpl &Fns,
Expr *LHS, Expr *RHS,
FunctionDecl *DefaultedFn);
ExprResult CreateOverloadedArraySubscriptExpr(SourceLocation LLoc,
SourceLocation RLoc,
Expr *Base,Expr *Idx);
ExprResult
BuildCallToMemberFunction(Scope *S, Expr *MemExpr,
SourceLocation LParenLoc,
MultiExprArg Args,
SourceLocation RParenLoc);
ExprResult
BuildCallToObjectOfClassType(Scope *S, Expr *Object, SourceLocation LParenLoc,
MultiExprArg Args,
SourceLocation RParenLoc);
ExprResult BuildOverloadedArrowExpr(Scope *S, Expr *Base,
SourceLocation OpLoc,
bool *NoArrowOperatorFound = nullptr);
/// CheckCallReturnType - Checks that a call expression's return type is
/// complete. Returns true on failure. The location passed in is the location
/// that best represents the call.
bool CheckCallReturnType(QualType ReturnType, SourceLocation Loc,
CallExpr *CE, FunctionDecl *FD);
/// Helpers for dealing with blocks and functions.
bool CheckParmsForFunctionDef(ArrayRef<ParmVarDecl *> Parameters,
bool CheckParameterNames);
void CheckCXXDefaultArguments(FunctionDecl *FD);
void CheckExtraCXXDefaultArguments(Declarator &D);
Scope *getNonFieldDeclScope(Scope *S);
/// \name Name lookup
///
/// These routines provide name lookup that is used during semantic
/// analysis to resolve the various kinds of names (identifiers,
/// overloaded operator names, constructor names, etc.) into zero or
/// more declarations within a particular scope. The major entry
/// points are LookupName, which performs unqualified name lookup,
/// and LookupQualifiedName, which performs qualified name lookup.
///
/// All name lookup is performed based on some specific criteria,
/// which specify what names will be visible to name lookup and how
/// far name lookup should work. These criteria are important both
/// for capturing language semantics (certain lookups will ignore
/// certain names, for example) and for performance, since name
/// lookup is often a bottleneck in the compilation of C++. Name
/// lookup criteria is specified via the LookupCriteria enumeration.
///
/// The results of name lookup can vary based on the kind of name
/// lookup performed, the current language, and the translation
/// unit. In C, for example, name lookup will either return nothing
/// (no entity found) or a single declaration. In C++, name lookup
/// can additionally refer to a set of overloaded functions or
/// result in an ambiguity. All of the possible results of name
/// lookup are captured by the LookupResult class, which provides
/// the ability to distinguish among them.
//@{
/// Describes the kind of name lookup to perform.
enum LookupNameKind {
/// Ordinary name lookup, which finds ordinary names (functions,
/// variables, typedefs, etc.) in C and most kinds of names
/// (functions, variables, members, types, etc.) in C++.
LookupOrdinaryName = 0,
/// Tag name lookup, which finds the names of enums, classes,
/// structs, and unions.
LookupTagName,
/// Label name lookup.
LookupLabel,
/// Member name lookup, which finds the names of
/// class/struct/union members.
LookupMemberName,
/// Look up of an operator name (e.g., operator+) for use with
/// operator overloading. This lookup is similar to ordinary name
/// lookup, but will ignore any declarations that are class members.
LookupOperatorName,
/// Look up a name following ~ in a destructor name. This is an ordinary
/// lookup, but prefers tags to typedefs.
LookupDestructorName,
/// Look up of a name that precedes the '::' scope resolution
/// operator in C++. This lookup completely ignores operator, object,
/// function, and enumerator names (C++ [basic.lookup.qual]p1).
LookupNestedNameSpecifierName,
/// Look up a namespace name within a C++ using directive or
/// namespace alias definition, ignoring non-namespace names (C++
/// [basic.lookup.udir]p1).
LookupNamespaceName,
/// Look up all declarations in a scope with the given name,
/// including resolved using declarations. This is appropriate
/// for checking redeclarations for a using declaration.
LookupUsingDeclName,
/// Look up an ordinary name that is going to be redeclared as a
/// name with linkage. This lookup ignores any declarations that
/// are outside of the current scope unless they have linkage. See
/// C99 6.2.2p4-5 and C++ [basic.link]p6.
LookupRedeclarationWithLinkage,
/// Look up a friend of a local class. This lookup does not look
/// outside the innermost non-class scope. See C++11 [class.friend]p11.
LookupLocalFriendName,
/// Look up the name of an Objective-C protocol.
LookupObjCProtocolName,
/// Look up implicit 'self' parameter of an objective-c method.
LookupObjCImplicitSelfParam,
/// Look up the name of an OpenMP user-defined reduction operation.
LookupOMPReductionName,
/// Look up the name of an OpenMP user-defined mapper.
LookupOMPMapperName,
/// Look up any declaration with any name.
LookupAnyName
};
/// Specifies whether (or how) name lookup is being performed for a
/// redeclaration (vs. a reference).
enum RedeclarationKind {
/// The lookup is a reference to this name that is not for the
/// purpose of redeclaring the name.
NotForRedeclaration = 0,
/// The lookup results will be used for redeclaration of a name,
/// if an entity by that name already exists and is visible.
ForVisibleRedeclaration,
/// The lookup results will be used for redeclaration of a name
/// with external linkage; non-visible lookup results with external linkage
/// may also be found.
ForExternalRedeclaration
};
RedeclarationKind forRedeclarationInCurContext() {
// A declaration with an owning module for linkage can never link against
// anything that is not visible. We don't need to check linkage here; if
// the context has internal linkage, redeclaration lookup won't find things
// from other TUs, and we can't safely compute linkage yet in general.
if (cast<Decl>(CurContext)
->getOwningModuleForLinkage(/*IgnoreLinkage*/true))
return ForVisibleRedeclaration;
return ForExternalRedeclaration;
}
/// The possible outcomes of name lookup for a literal operator.
enum LiteralOperatorLookupResult {
/// The lookup resulted in an error.
LOLR_Error,
/// The lookup found no match but no diagnostic was issued.
LOLR_ErrorNoDiagnostic,
/// The lookup found a single 'cooked' literal operator, which
/// expects a normal literal to be built and passed to it.
LOLR_Cooked,
/// The lookup found a single 'raw' literal operator, which expects
/// a string literal containing the spelling of the literal token.
LOLR_Raw,
/// The lookup found an overload set of literal operator templates,
/// which expect the characters of the spelling of the literal token to be
/// passed as a non-type template argument pack.
LOLR_Template,
/// The lookup found an overload set of literal operator templates,
/// which expect the character type and characters of the spelling of the
/// string literal token to be passed as template arguments.
LOLR_StringTemplate
};
SpecialMemberOverloadResult LookupSpecialMember(CXXRecordDecl *D,
CXXSpecialMember SM,
bool ConstArg,
bool VolatileArg,
bool RValueThis,
bool ConstThis,
bool VolatileThis);
typedef std::function<void(const TypoCorrection &)> TypoDiagnosticGenerator;
typedef std::function<ExprResult(Sema &, TypoExpr *, TypoCorrection)>
TypoRecoveryCallback;
private:
bool CppLookupName(LookupResult &R, Scope *S);
struct TypoExprState {
std::unique_ptr<TypoCorrectionConsumer> Consumer;
TypoDiagnosticGenerator DiagHandler;
TypoRecoveryCallback RecoveryHandler;
TypoExprState();
TypoExprState(TypoExprState &&other) noexcept;
TypoExprState &operator=(TypoExprState &&other) noexcept;
};
/// The set of unhandled TypoExprs and their associated state.
llvm::MapVector<TypoExpr *, TypoExprState> DelayedTypos;
/// Creates a new TypoExpr AST node.
TypoExpr *createDelayedTypo(std::unique_ptr<TypoCorrectionConsumer> TCC,
TypoDiagnosticGenerator TDG,
TypoRecoveryCallback TRC);
// The set of known/encountered (unique, canonicalized) NamespaceDecls.
//
// The boolean value will be true to indicate that the namespace was loaded
// from an AST/PCH file, or false otherwise.
llvm::MapVector<NamespaceDecl*, bool> KnownNamespaces;
/// Whether we have already loaded known namespaces from an extenal
/// source.
bool LoadedExternalKnownNamespaces;
/// Helper for CorrectTypo and CorrectTypoDelayed used to create and
/// populate a new TypoCorrectionConsumer. Returns nullptr if typo correction
/// should be skipped entirely.
std::unique_ptr<TypoCorrectionConsumer>
makeTypoCorrectionConsumer(const DeclarationNameInfo &Typo,
Sema::LookupNameKind LookupKind, Scope *S,
CXXScopeSpec *SS,
CorrectionCandidateCallback &CCC,
DeclContext *MemberContext, bool EnteringContext,
const ObjCObjectPointerType *OPT,
bool ErrorRecovery);
public:
const TypoExprState &getTypoExprState(TypoExpr *TE) const;
/// Clears the state of the given TypoExpr.
void clearDelayedTypo(TypoExpr *TE);
/// Look up a name, looking for a single declaration. Return
/// null if the results were absent, ambiguous, or overloaded.
///
/// It is preferable to use the elaborated form and explicitly handle
/// ambiguity and overloaded.
NamedDecl *LookupSingleName(Scope *S, DeclarationName Name,
SourceLocation Loc,
LookupNameKind NameKind,
RedeclarationKind Redecl
= NotForRedeclaration);
bool LookupBuiltin(LookupResult &R);
bool LookupName(LookupResult &R, Scope *S,
bool AllowBuiltinCreation = false);
bool LookupQualifiedName(LookupResult &R, DeclContext *LookupCtx,
bool InUnqualifiedLookup = false);
bool LookupQualifiedName(LookupResult &R, DeclContext *LookupCtx,
CXXScopeSpec &SS);
bool LookupParsedName(LookupResult &R, Scope *S, CXXScopeSpec *SS,
bool AllowBuiltinCreation = false,
bool EnteringContext = false);
ObjCProtocolDecl *LookupProtocol(IdentifierInfo *II, SourceLocation IdLoc,
RedeclarationKind Redecl
= NotForRedeclaration);
bool LookupInSuper(LookupResult &R, CXXRecordDecl *Class);
void LookupOverloadedOperatorName(OverloadedOperatorKind Op, Scope *S,
QualType T1, QualType T2,
UnresolvedSetImpl &Functions);
LabelDecl *LookupOrCreateLabel(IdentifierInfo *II, SourceLocation IdentLoc,
SourceLocation GnuLabelLoc = SourceLocation());
DeclContextLookupResult LookupConstructors(CXXRecordDecl *Class);
CXXConstructorDecl *LookupDefaultConstructor(CXXRecordDecl *Class);
CXXConstructorDecl *LookupCopyingConstructor(CXXRecordDecl *Class,
unsigned Quals);
CXXMethodDecl *LookupCopyingAssignment(CXXRecordDecl *Class, unsigned Quals,
bool RValueThis, unsigned ThisQuals);
CXXConstructorDecl *LookupMovingConstructor(CXXRecordDecl *Class,
unsigned Quals);
CXXMethodDecl *LookupMovingAssignment(CXXRecordDecl *Class, unsigned Quals,
bool RValueThis, unsigned ThisQuals);
CXXDestructorDecl *LookupDestructor(CXXRecordDecl *Class);
bool checkLiteralOperatorId(const CXXScopeSpec &SS, const UnqualifiedId &Id);
LiteralOperatorLookupResult LookupLiteralOperator(Scope *S, LookupResult &R,
ArrayRef<QualType> ArgTys,
bool AllowRaw,
bool AllowTemplate,
bool AllowStringTemplate,
bool DiagnoseMissing);
bool isKnownName(StringRef name);
/// Status of the function emission on the CUDA/HIP/OpenMP host/device attrs.
enum class FunctionEmissionStatus {
Emitted,
CUDADiscarded, // Discarded due to CUDA/HIP hostness
OMPDiscarded, // Discarded due to OpenMP hostness
TemplateDiscarded, // Discarded due to uninstantiated templates
Unknown,
};
FunctionEmissionStatus getEmissionStatus(FunctionDecl *Decl);
// Whether the callee should be ignored in CUDA/HIP/OpenMP host/device check.
bool shouldIgnoreInHostDeviceCheck(FunctionDecl *Callee);
void ArgumentDependentLookup(DeclarationName Name, SourceLocation Loc,
ArrayRef<Expr *> Args, ADLResult &Functions);
void LookupVisibleDecls(Scope *S, LookupNameKind Kind,
VisibleDeclConsumer &Consumer,
bool IncludeGlobalScope = true,
bool LoadExternal = true);
void LookupVisibleDecls(DeclContext *Ctx, LookupNameKind Kind,
VisibleDeclConsumer &Consumer,
bool IncludeGlobalScope = true,
bool IncludeDependentBases = false,
bool LoadExternal = true);
enum CorrectTypoKind {
CTK_NonError, // CorrectTypo used in a non error recovery situation.
CTK_ErrorRecovery // CorrectTypo used in normal error recovery.
};
TypoCorrection CorrectTypo(const DeclarationNameInfo &Typo,
Sema::LookupNameKind LookupKind,
Scope *S, CXXScopeSpec *SS,
CorrectionCandidateCallback &CCC,
CorrectTypoKind Mode,
DeclContext *MemberContext = nullptr,
bool EnteringContext = false,
const ObjCObjectPointerType *OPT = nullptr,
bool RecordFailure = true);
TypoExpr *CorrectTypoDelayed(const DeclarationNameInfo &Typo,
Sema::LookupNameKind LookupKind, Scope *S,
CXXScopeSpec *SS,
CorrectionCandidateCallback &CCC,
TypoDiagnosticGenerator TDG,
TypoRecoveryCallback TRC, CorrectTypoKind Mode,
DeclContext *MemberContext = nullptr,
bool EnteringContext = false,
const ObjCObjectPointerType *OPT = nullptr);
/// Process any TypoExprs in the given Expr and its children,
/// generating diagnostics as appropriate and returning a new Expr if there
/// were typos that were all successfully corrected and ExprError if one or
/// more typos could not be corrected.
///
/// \param E The Expr to check for TypoExprs.
///
/// \param InitDecl A VarDecl to avoid because the Expr being corrected is its
/// initializer.
///
/// \param Filter A function applied to a newly rebuilt Expr to determine if
/// it is an acceptable/usable result from a single combination of typo
/// corrections. As long as the filter returns ExprError, different
/// combinations of corrections will be tried until all are exhausted.
ExprResult
CorrectDelayedTyposInExpr(Expr *E, VarDecl *InitDecl = nullptr,
llvm::function_ref<ExprResult(Expr *)> Filter =
[](Expr *E) -> ExprResult { return E; });
ExprResult
CorrectDelayedTyposInExpr(Expr *E,
llvm::function_ref<ExprResult(Expr *)> Filter) {
return CorrectDelayedTyposInExpr(E, nullptr, Filter);
}
ExprResult
CorrectDelayedTyposInExpr(ExprResult ER, VarDecl *InitDecl = nullptr,
llvm::function_ref<ExprResult(Expr *)> Filter =
[](Expr *E) -> ExprResult { return E; }) {
return ER.isInvalid() ? ER : CorrectDelayedTyposInExpr(ER.get(), Filter);
}
ExprResult
CorrectDelayedTyposInExpr(ExprResult ER,
llvm::function_ref<ExprResult(Expr *)> Filter) {
return CorrectDelayedTyposInExpr(ER, nullptr, Filter);
}
void diagnoseTypo(const TypoCorrection &Correction,
const PartialDiagnostic &TypoDiag,
bool ErrorRecovery = true);
void diagnoseTypo(const TypoCorrection &Correction,
const PartialDiagnostic &TypoDiag,
const PartialDiagnostic &PrevNote,
bool ErrorRecovery = true);
void MarkTypoCorrectedFunctionDefinition(const NamedDecl *F);
void FindAssociatedClassesAndNamespaces(SourceLocation InstantiationLoc,
ArrayRef<Expr *> Args,
AssociatedNamespaceSet &AssociatedNamespaces,
AssociatedClassSet &AssociatedClasses);
void FilterLookupForScope(LookupResult &R, DeclContext *Ctx, Scope *S,
bool ConsiderLinkage, bool AllowInlineNamespace);
bool CheckRedeclarationModuleOwnership(NamedDecl *New, NamedDecl *Old);
void DiagnoseAmbiguousLookup(LookupResult &Result);
//@}
ObjCInterfaceDecl *getObjCInterfaceDecl(IdentifierInfo *&Id,
SourceLocation IdLoc,
bool TypoCorrection = false);
NamedDecl *LazilyCreateBuiltin(IdentifierInfo *II, unsigned ID,
Scope *S, bool ForRedeclaration,
SourceLocation Loc);
NamedDecl *ImplicitlyDefineFunction(SourceLocation Loc, IdentifierInfo &II,
Scope *S);
void AddKnownFunctionAttributesForReplaceableGlobalAllocationFunction(
FunctionDecl *FD);
void AddKnownFunctionAttributes(FunctionDecl *FD);
// More parsing and symbol table subroutines.
void ProcessPragmaWeak(Scope *S, Decl *D);
// Decl attributes - this routine is the top level dispatcher.
void ProcessDeclAttributes(Scope *S, Decl *D, const Declarator &PD);
// Helper for delayed processing of attributes.
void ProcessDeclAttributeDelayed(Decl *D,
const ParsedAttributesView &AttrList);
void ProcessDeclAttributeList(Scope *S, Decl *D, const ParsedAttributesView &AL,
bool IncludeCXX11Attributes = true);
bool ProcessAccessDeclAttributeList(AccessSpecDecl *ASDecl,
const ParsedAttributesView &AttrList);
void checkUnusedDeclAttributes(Declarator &D);
/// Determine if type T is a valid subject for a nonnull and similar
/// attributes. By default, we look through references (the behavior used by
/// nonnull), but if the second parameter is true, then we treat a reference
/// type as valid.
bool isValidPointerAttrType(QualType T, bool RefOkay = false);
bool CheckRegparmAttr(const ParsedAttr &attr, unsigned &value);
bool CheckCallingConvAttr(const ParsedAttr &attr, CallingConv &CC,
const FunctionDecl *FD = nullptr);
bool CheckAttrTarget(const ParsedAttr &CurrAttr);
bool CheckAttrNoArgs(const ParsedAttr &CurrAttr);
bool checkStringLiteralArgumentAttr(const ParsedAttr &Attr, unsigned ArgNum,
StringRef &Str,
SourceLocation *ArgLocation = nullptr);
bool checkSectionName(SourceLocation LiteralLoc, StringRef Str);
bool checkTargetAttr(SourceLocation LiteralLoc, StringRef Str);
bool checkMSInheritanceAttrOnDefinition(
CXXRecordDecl *RD, SourceRange Range, bool BestCase,
MSInheritanceModel SemanticSpelling);
void CheckAlignasUnderalignment(Decl *D);
/// Adjust the calling convention of a method to be the ABI default if it
/// wasn't specified explicitly. This handles method types formed from
/// function type typedefs and typename template arguments.
void adjustMemberFunctionCC(QualType &T, bool IsStatic, bool IsCtorOrDtor,
SourceLocation Loc);
// Check if there is an explicit attribute, but only look through parens.
// The intent is to look for an attribute on the current declarator, but not
// one that came from a typedef.
bool hasExplicitCallingConv(QualType T);
/// Get the outermost AttributedType node that sets a calling convention.
/// Valid types should not have multiple attributes with different CCs.
const AttributedType *getCallingConvAttributedType(QualType T) const;
/// Stmt attributes - this routine is the top level dispatcher.
StmtResult ProcessStmtAttributes(Stmt *Stmt,
const ParsedAttributesView &Attrs,
SourceRange Range);
void WarnConflictingTypedMethods(ObjCMethodDecl *Method,
ObjCMethodDecl *MethodDecl,
bool IsProtocolMethodDecl);
void CheckConflictingOverridingMethod(ObjCMethodDecl *Method,
ObjCMethodDecl *Overridden,
bool IsProtocolMethodDecl);
/// WarnExactTypedMethods - This routine issues a warning if method
/// implementation declaration matches exactly that of its declaration.
void WarnExactTypedMethods(ObjCMethodDecl *Method,
ObjCMethodDecl *MethodDecl,
bool IsProtocolMethodDecl);
typedef llvm::SmallPtrSet<Selector, 8> SelectorSet;
/// CheckImplementationIvars - This routine checks if the instance variables
/// listed in the implelementation match those listed in the interface.
void CheckImplementationIvars(ObjCImplementationDecl *ImpDecl,
ObjCIvarDecl **Fields, unsigned nIvars,
SourceLocation Loc);
/// ImplMethodsVsClassMethods - This is main routine to warn if any method
/// remains unimplemented in the class or category \@implementation.
void ImplMethodsVsClassMethods(Scope *S, ObjCImplDecl* IMPDecl,
ObjCContainerDecl* IDecl,
bool IncompleteImpl = false);
/// DiagnoseUnimplementedProperties - This routine warns on those properties
/// which must be implemented by this implementation.
void DiagnoseUnimplementedProperties(Scope *S, ObjCImplDecl* IMPDecl,
ObjCContainerDecl *CDecl,
bool SynthesizeProperties);
/// Diagnose any null-resettable synthesized setters.
void diagnoseNullResettableSynthesizedSetters(const ObjCImplDecl *impDecl);
/// DefaultSynthesizeProperties - This routine default synthesizes all
/// properties which must be synthesized in the class's \@implementation.
void DefaultSynthesizeProperties(Scope *S, ObjCImplDecl *IMPDecl,
ObjCInterfaceDecl *IDecl,
SourceLocation AtEnd);
void DefaultSynthesizeProperties(Scope *S, Decl *D, SourceLocation AtEnd);
/// IvarBacksCurrentMethodAccessor - This routine returns 'true' if 'IV' is
/// an ivar synthesized for 'Method' and 'Method' is a property accessor
/// declared in class 'IFace'.
bool IvarBacksCurrentMethodAccessor(ObjCInterfaceDecl *IFace,
ObjCMethodDecl *Method, ObjCIvarDecl *IV);
/// DiagnoseUnusedBackingIvarInAccessor - Issue an 'unused' warning if ivar which
/// backs the property is not used in the property's accessor.
void DiagnoseUnusedBackingIvarInAccessor(Scope *S,
const ObjCImplementationDecl *ImplD);
/// GetIvarBackingPropertyAccessor - If method is a property setter/getter and
/// it property has a backing ivar, returns this ivar; otherwise, returns NULL.
/// It also returns ivar's property on success.
ObjCIvarDecl *GetIvarBackingPropertyAccessor(const ObjCMethodDecl *Method,
const ObjCPropertyDecl *&PDecl) const;
/// Called by ActOnProperty to handle \@property declarations in
/// class extensions.
ObjCPropertyDecl *HandlePropertyInClassExtension(Scope *S,
SourceLocation AtLoc,
SourceLocation LParenLoc,
FieldDeclarator &FD,
Selector GetterSel,
SourceLocation GetterNameLoc,
Selector SetterSel,
SourceLocation SetterNameLoc,
const bool isReadWrite,
unsigned &Attributes,
const unsigned AttributesAsWritten,
QualType T,
TypeSourceInfo *TSI,
tok::ObjCKeywordKind MethodImplKind);
/// Called by ActOnProperty and HandlePropertyInClassExtension to
/// handle creating the ObjcPropertyDecl for a category or \@interface.
ObjCPropertyDecl *CreatePropertyDecl(Scope *S,
ObjCContainerDecl *CDecl,
SourceLocation AtLoc,
SourceLocation LParenLoc,
FieldDeclarator &FD,
Selector GetterSel,
SourceLocation GetterNameLoc,
Selector SetterSel,
SourceLocation SetterNameLoc,
const bool isReadWrite,
const unsigned Attributes,
const unsigned AttributesAsWritten,
QualType T,
TypeSourceInfo *TSI,
tok::ObjCKeywordKind MethodImplKind,
DeclContext *lexicalDC = nullptr);
/// AtomicPropertySetterGetterRules - This routine enforces the rule (via
/// warning) when atomic property has one but not the other user-declared
/// setter or getter.
void AtomicPropertySetterGetterRules(ObjCImplDecl* IMPDecl,
ObjCInterfaceDecl* IDecl);
void DiagnoseOwningPropertyGetterSynthesis(const ObjCImplementationDecl *D);
void DiagnoseMissingDesignatedInitOverrides(
const ObjCImplementationDecl *ImplD,
const ObjCInterfaceDecl *IFD);
void DiagnoseDuplicateIvars(ObjCInterfaceDecl *ID, ObjCInterfaceDecl *SID);
enum MethodMatchStrategy {
MMS_loose,
MMS_strict
};
/// MatchTwoMethodDeclarations - Checks if two methods' type match and returns
/// true, or false, accordingly.
bool MatchTwoMethodDeclarations(const ObjCMethodDecl *Method,
const ObjCMethodDecl *PrevMethod,
MethodMatchStrategy strategy = MMS_strict);
/// MatchAllMethodDeclarations - Check methods declaraed in interface or
/// or protocol against those declared in their implementations.
void MatchAllMethodDeclarations(const SelectorSet &InsMap,
const SelectorSet &ClsMap,
SelectorSet &InsMapSeen,
SelectorSet &ClsMapSeen,
ObjCImplDecl* IMPDecl,
ObjCContainerDecl* IDecl,
bool &IncompleteImpl,
bool ImmediateClass,
bool WarnCategoryMethodImpl=false);
/// CheckCategoryVsClassMethodMatches - Checks that methods implemented in
/// category matches with those implemented in its primary class and
/// warns each time an exact match is found.
void CheckCategoryVsClassMethodMatches(ObjCCategoryImplDecl *CatIMP);
/// Add the given method to the list of globally-known methods.
void addMethodToGlobalList(ObjCMethodList *List, ObjCMethodDecl *Method);
/// Returns default addr space for method qualifiers.
LangAS getDefaultCXXMethodAddrSpace() const;
private:
/// AddMethodToGlobalPool - Add an instance or factory method to the global
/// pool. See descriptoin of AddInstanceMethodToGlobalPool.
void AddMethodToGlobalPool(ObjCMethodDecl *Method, bool impl, bool instance);
/// LookupMethodInGlobalPool - Returns the instance or factory method and
/// optionally warns if there are multiple signatures.
ObjCMethodDecl *LookupMethodInGlobalPool(Selector Sel, SourceRange R,
bool receiverIdOrClass,
bool instance);
public:
/// - Returns instance or factory methods in global method pool for
/// given selector. It checks the desired kind first, if none is found, and
/// parameter checkTheOther is set, it then checks the other kind. If no such
/// method or only one method is found, function returns false; otherwise, it
/// returns true.
bool
CollectMultipleMethodsInGlobalPool(Selector Sel,
SmallVectorImpl<ObjCMethodDecl*>& Methods,
bool InstanceFirst, bool CheckTheOther,
const ObjCObjectType *TypeBound = nullptr);
bool
AreMultipleMethodsInGlobalPool(Selector Sel, ObjCMethodDecl *BestMethod,
SourceRange R, bool receiverIdOrClass,
SmallVectorImpl<ObjCMethodDecl*>& Methods);
void
DiagnoseMultipleMethodInGlobalPool(SmallVectorImpl<ObjCMethodDecl*> &Methods,
Selector Sel, SourceRange R,
bool receiverIdOrClass);
private:
/// - Returns a selector which best matches given argument list or
/// nullptr if none could be found
ObjCMethodDecl *SelectBestMethod(Selector Sel, MultiExprArg Args,
bool IsInstance,
SmallVectorImpl<ObjCMethodDecl*>& Methods);
/// Record the typo correction failure and return an empty correction.
TypoCorrection FailedCorrection(IdentifierInfo *Typo, SourceLocation TypoLoc,
bool RecordFailure = true) {
if (RecordFailure)
TypoCorrectionFailures[Typo].insert(TypoLoc);
return TypoCorrection();
}
public:
/// AddInstanceMethodToGlobalPool - All instance methods in a translation
/// unit are added to a global pool. This allows us to efficiently associate
/// a selector with a method declaraation for purposes of typechecking
/// messages sent to "id" (where the class of the object is unknown).
void AddInstanceMethodToGlobalPool(ObjCMethodDecl *Method, bool impl=false) {
AddMethodToGlobalPool(Method, impl, /*instance*/true);
}
/// AddFactoryMethodToGlobalPool - Same as above, but for factory methods.
void AddFactoryMethodToGlobalPool(ObjCMethodDecl *Method, bool impl=false) {
AddMethodToGlobalPool(Method, impl, /*instance*/false);
}
/// AddAnyMethodToGlobalPool - Add any method, instance or factory to global
/// pool.
void AddAnyMethodToGlobalPool(Decl *D);
/// LookupInstanceMethodInGlobalPool - Returns the method and warns if
/// there are multiple signatures.
ObjCMethodDecl *LookupInstanceMethodInGlobalPool(Selector Sel, SourceRange R,
bool receiverIdOrClass=false) {
return LookupMethodInGlobalPool(Sel, R, receiverIdOrClass,
/*instance*/true);
}
/// LookupFactoryMethodInGlobalPool - Returns the method and warns if
/// there are multiple signatures.
ObjCMethodDecl *LookupFactoryMethodInGlobalPool(Selector Sel, SourceRange R,
bool receiverIdOrClass=false) {
return LookupMethodInGlobalPool(Sel, R, receiverIdOrClass,
/*instance*/false);
}
const ObjCMethodDecl *SelectorsForTypoCorrection(Selector Sel,
QualType ObjectType=QualType());
/// LookupImplementedMethodInGlobalPool - Returns the method which has an
/// implementation.
ObjCMethodDecl *LookupImplementedMethodInGlobalPool(Selector Sel);
/// CollectIvarsToConstructOrDestruct - Collect those ivars which require
/// initialization.
void CollectIvarsToConstructOrDestruct(ObjCInterfaceDecl *OI,
SmallVectorImpl<ObjCIvarDecl*> &Ivars);
//===--------------------------------------------------------------------===//
// Statement Parsing Callbacks: SemaStmt.cpp.
public:
class FullExprArg {
public:
FullExprArg() : E(nullptr) { }
FullExprArg(Sema &actions) : E(nullptr) { }
ExprResult release() {
return E;
}
Expr *get() const { return E; }
Expr *operator->() {
return E;
}
private:
// FIXME: No need to make the entire Sema class a friend when it's just
// Sema::MakeFullExpr that needs access to the constructor below.
friend class Sema;
explicit FullExprArg(Expr *expr) : E(expr) {}
Expr *E;
};
FullExprArg MakeFullExpr(Expr *Arg) {
return MakeFullExpr(Arg, Arg ? Arg->getExprLoc() : SourceLocation());
}
FullExprArg MakeFullExpr(Expr *Arg, SourceLocation CC) {
return FullExprArg(
ActOnFinishFullExpr(Arg, CC, /*DiscardedValue*/ false).get());
}
FullExprArg MakeFullDiscardedValueExpr(Expr *Arg) {
ExprResult FE =
ActOnFinishFullExpr(Arg, Arg ? Arg->getExprLoc() : SourceLocation(),
/*DiscardedValue*/ true);
return FullExprArg(FE.get());
}
StmtResult ActOnExprStmt(ExprResult Arg, bool DiscardedValue = true);
StmtResult ActOnExprStmtError();
StmtResult ActOnNullStmt(SourceLocation SemiLoc,
bool HasLeadingEmptyMacro = false);
void ActOnStartOfCompoundStmt(bool IsStmtExpr);
void ActOnFinishOfCompoundStmt();
StmtResult ActOnCompoundStmt(SourceLocation L, SourceLocation R,
ArrayRef<Stmt *> Elts, bool isStmtExpr);
/// A RAII object to enter scope of a compound statement.
class CompoundScopeRAII {
public:
CompoundScopeRAII(Sema &S, bool IsStmtExpr = false) : S(S) {
S.ActOnStartOfCompoundStmt(IsStmtExpr);
}
~CompoundScopeRAII() {
S.ActOnFinishOfCompoundStmt();
}
private:
Sema &S;
};
/// An RAII helper that pops function a function scope on exit.
struct FunctionScopeRAII {
Sema &S;
bool Active;
FunctionScopeRAII(Sema &S) : S(S), Active(true) {}
~FunctionScopeRAII() {
if (Active)
S.PopFunctionScopeInfo();
}
void disable() { Active = false; }
};
StmtResult ActOnDeclStmt(DeclGroupPtrTy Decl,
SourceLocation StartLoc,
SourceLocation EndLoc);
void ActOnForEachDeclStmt(DeclGroupPtrTy Decl);
StmtResult ActOnForEachLValueExpr(Expr *E);
ExprResult ActOnCaseExpr(SourceLocation CaseLoc, ExprResult Val);
StmtResult ActOnCaseStmt(SourceLocation CaseLoc, ExprResult LHS,
SourceLocation DotDotDotLoc, ExprResult RHS,
SourceLocation ColonLoc);
void ActOnCaseStmtBody(Stmt *CaseStmt, Stmt *SubStmt);
StmtResult ActOnDefaultStmt(SourceLocation DefaultLoc,
SourceLocation ColonLoc,
Stmt *SubStmt, Scope *CurScope);
StmtResult ActOnLabelStmt(SourceLocation IdentLoc, LabelDecl *TheDecl,
SourceLocation ColonLoc, Stmt *SubStmt);
StmtResult ActOnAttributedStmt(SourceLocation AttrLoc,
ArrayRef<const Attr*> Attrs,
Stmt *SubStmt);
class ConditionResult;
StmtResult ActOnIfStmt(SourceLocation IfLoc, bool IsConstexpr,
Stmt *InitStmt,
ConditionResult Cond, Stmt *ThenVal,
SourceLocation ElseLoc, Stmt *ElseVal);
StmtResult BuildIfStmt(SourceLocation IfLoc, bool IsConstexpr,
Stmt *InitStmt,
ConditionResult Cond, Stmt *ThenVal,
SourceLocation ElseLoc, Stmt *ElseVal);
StmtResult ActOnStartOfSwitchStmt(SourceLocation SwitchLoc,
Stmt *InitStmt,
ConditionResult Cond);
StmtResult ActOnFinishSwitchStmt(SourceLocation SwitchLoc,
Stmt *Switch, Stmt *Body);
StmtResult ActOnWhileStmt(SourceLocation WhileLoc, ConditionResult Cond,
Stmt *Body);
StmtResult ActOnDoStmt(SourceLocation DoLoc, Stmt *Body,
SourceLocation WhileLoc, SourceLocation CondLParen,
Expr *Cond, SourceLocation CondRParen);
StmtResult ActOnForStmt(SourceLocation ForLoc,
SourceLocation LParenLoc,
Stmt *First,
ConditionResult Second,
FullExprArg Third,
SourceLocation RParenLoc,
Stmt *Body);
ExprResult CheckObjCForCollectionOperand(SourceLocation forLoc,
Expr *collection);
StmtResult ActOnObjCForCollectionStmt(SourceLocation ForColLoc,
Stmt *First, Expr *collection,
SourceLocation RParenLoc);
StmtResult FinishObjCForCollectionStmt(Stmt *ForCollection, Stmt *Body);
enum BuildForRangeKind {
/// Initial building of a for-range statement.
BFRK_Build,
/// Instantiation or recovery rebuild of a for-range statement. Don't
/// attempt any typo-correction.
BFRK_Rebuild,
/// Determining whether a for-range statement could be built. Avoid any
/// unnecessary or irreversible actions.
BFRK_Check
};
StmtResult ActOnCXXForRangeStmt(Scope *S, SourceLocation ForLoc,
SourceLocation CoawaitLoc,
Stmt *InitStmt,
Stmt *LoopVar,
SourceLocation ColonLoc, Expr *Collection,
SourceLocation RParenLoc,
BuildForRangeKind Kind);
StmtResult BuildCXXForRangeStmt(SourceLocation ForLoc,
SourceLocation CoawaitLoc,
Stmt *InitStmt,
SourceLocation ColonLoc,
Stmt *RangeDecl, Stmt *Begin, Stmt *End,
Expr *Cond, Expr *Inc,
Stmt *LoopVarDecl,
SourceLocation RParenLoc,
BuildForRangeKind Kind);
StmtResult FinishCXXForRangeStmt(Stmt *ForRange, Stmt *Body);
StmtResult ActOnGotoStmt(SourceLocation GotoLoc,
SourceLocation LabelLoc,
LabelDecl *TheDecl);
StmtResult ActOnIndirectGotoStmt(SourceLocation GotoLoc,
SourceLocation StarLoc,
Expr *DestExp);
StmtResult ActOnContinueStmt(SourceLocation ContinueLoc, Scope *CurScope);
StmtResult ActOnBreakStmt(SourceLocation BreakLoc, Scope *CurScope);
void ActOnCapturedRegionStart(SourceLocation Loc, Scope *CurScope,
CapturedRegionKind Kind, unsigned NumParams);
typedef std::pair<StringRef, QualType> CapturedParamNameType;
void ActOnCapturedRegionStart(SourceLocation Loc, Scope *CurScope,
CapturedRegionKind Kind,
ArrayRef<CapturedParamNameType> Params,
unsigned OpenMPCaptureLevel = 0);
StmtResult ActOnCapturedRegionEnd(Stmt *S);
void ActOnCapturedRegionError();
RecordDecl *CreateCapturedStmtRecordDecl(CapturedDecl *&CD,
SourceLocation Loc,
unsigned NumParams);
enum CopyElisionSemanticsKind {
CES_Strict = 0,
CES_AllowParameters = 1,
CES_AllowDifferentTypes = 2,
CES_AllowExceptionVariables = 4,
CES_FormerDefault = (CES_AllowParameters),
CES_Default = (CES_AllowParameters | CES_AllowDifferentTypes),
CES_AsIfByStdMove = (CES_AllowParameters | CES_AllowDifferentTypes |
CES_AllowExceptionVariables),
};
VarDecl *getCopyElisionCandidate(QualType ReturnType, Expr *E,
CopyElisionSemanticsKind CESK);
bool isCopyElisionCandidate(QualType ReturnType, const VarDecl *VD,
CopyElisionSemanticsKind CESK);
StmtResult ActOnReturnStmt(SourceLocation ReturnLoc, Expr *RetValExp,
Scope *CurScope);
StmtResult BuildReturnStmt(SourceLocation ReturnLoc, Expr *RetValExp);
StmtResult ActOnCapScopeReturnStmt(SourceLocation ReturnLoc, Expr *RetValExp);
StmtResult ActOnGCCAsmStmt(SourceLocation AsmLoc, bool IsSimple,
bool IsVolatile, unsigned NumOutputs,
unsigned NumInputs, IdentifierInfo **Names,
MultiExprArg Constraints, MultiExprArg Exprs,
Expr *AsmString, MultiExprArg Clobbers,
unsigned NumLabels,
SourceLocation RParenLoc);
void FillInlineAsmIdentifierInfo(Expr *Res,
llvm::InlineAsmIdentifierInfo &Info);
ExprResult LookupInlineAsmIdentifier(CXXScopeSpec &SS,
SourceLocation TemplateKWLoc,
UnqualifiedId &Id,
bool IsUnevaluatedContext);
bool LookupInlineAsmField(StringRef Base, StringRef Member,
unsigned &Offset, SourceLocation AsmLoc);
ExprResult LookupInlineAsmVarDeclField(Expr *RefExpr, StringRef Member,
SourceLocation AsmLoc);
StmtResult ActOnMSAsmStmt(SourceLocation AsmLoc, SourceLocation LBraceLoc,
ArrayRef<Token> AsmToks,
StringRef AsmString,
unsigned NumOutputs, unsigned NumInputs,
ArrayRef<StringRef> Constraints,
ArrayRef<StringRef> Clobbers,
ArrayRef<Expr*> Exprs,
SourceLocation EndLoc);
LabelDecl *GetOrCreateMSAsmLabel(StringRef ExternalLabelName,
SourceLocation Location,
bool AlwaysCreate);
VarDecl *BuildObjCExceptionDecl(TypeSourceInfo *TInfo, QualType ExceptionType,
SourceLocation StartLoc,
SourceLocation IdLoc, IdentifierInfo *Id,
bool Invalid = false);
Decl *ActOnObjCExceptionDecl(Scope *S, Declarator &D);
StmtResult ActOnObjCAtCatchStmt(SourceLocation AtLoc, SourceLocation RParen,
Decl *Parm, Stmt *Body);
StmtResult ActOnObjCAtFinallyStmt(SourceLocation AtLoc, Stmt *Body);
StmtResult ActOnObjCAtTryStmt(SourceLocation AtLoc, Stmt *Try,
MultiStmtArg Catch, Stmt *Finally);
StmtResult BuildObjCAtThrowStmt(SourceLocation AtLoc, Expr *Throw);
StmtResult ActOnObjCAtThrowStmt(SourceLocation AtLoc, Expr *Throw,
Scope *CurScope);
ExprResult ActOnObjCAtSynchronizedOperand(SourceLocation atLoc,
Expr *operand);
StmtResult ActOnObjCAtSynchronizedStmt(SourceLocation AtLoc,
Expr *SynchExpr,
Stmt *SynchBody);
StmtResult ActOnObjCAutoreleasePoolStmt(SourceLocation AtLoc, Stmt *Body);
VarDecl *BuildExceptionDeclaration(Scope *S, TypeSourceInfo *TInfo,
SourceLocation StartLoc,
SourceLocation IdLoc,
IdentifierInfo *Id);
Decl *ActOnExceptionDeclarator(Scope *S, Declarator &D);
StmtResult ActOnCXXCatchBlock(SourceLocation CatchLoc,
Decl *ExDecl, Stmt *HandlerBlock);
StmtResult ActOnCXXTryBlock(SourceLocation TryLoc, Stmt *TryBlock,
ArrayRef<Stmt *> Handlers);
StmtResult ActOnSEHTryBlock(bool IsCXXTry, // try (true) or __try (false) ?
SourceLocation TryLoc, Stmt *TryBlock,
Stmt *Handler);
StmtResult ActOnSEHExceptBlock(SourceLocation Loc,
Expr *FilterExpr,
Stmt *Block);
void ActOnStartSEHFinallyBlock();
void ActOnAbortSEHFinallyBlock();
StmtResult ActOnFinishSEHFinallyBlock(SourceLocation Loc, Stmt *Block);
StmtResult ActOnSEHLeaveStmt(SourceLocation Loc, Scope *CurScope);
void DiagnoseReturnInConstructorExceptionHandler(CXXTryStmt *TryBlock);
bool ShouldWarnIfUnusedFileScopedDecl(const DeclaratorDecl *D) const;
/// If it's a file scoped decl that must warn if not used, keep track
/// of it.
void MarkUnusedFileScopedDecl(const DeclaratorDecl *D);
/// DiagnoseUnusedExprResult - If the statement passed in is an expression
/// whose result is unused, warn.
void DiagnoseUnusedExprResult(const Stmt *S);
void DiagnoseUnusedNestedTypedefs(const RecordDecl *D);
void DiagnoseUnusedDecl(const NamedDecl *ND);
/// Emit \p DiagID if statement located on \p StmtLoc has a suspicious null
/// statement as a \p Body, and it is located on the same line.
///
/// This helps prevent bugs due to typos, such as:
/// if (condition);
/// do_stuff();
void DiagnoseEmptyStmtBody(SourceLocation StmtLoc,
const Stmt *Body,
unsigned DiagID);
/// Warn if a for/while loop statement \p S, which is followed by
/// \p PossibleBody, has a suspicious null statement as a body.
void DiagnoseEmptyLoopBody(const Stmt *S,
const Stmt *PossibleBody);
/// Warn if a value is moved to itself.
void DiagnoseSelfMove(const Expr *LHSExpr, const Expr *RHSExpr,
SourceLocation OpLoc);
/// Warn if we're implicitly casting from a _Nullable pointer type to a
/// _Nonnull one.
void diagnoseNullableToNonnullConversion(QualType DstType, QualType SrcType,
SourceLocation Loc);
/// Warn when implicitly casting 0 to nullptr.
void diagnoseZeroToNullptrConversion(CastKind Kind, const Expr *E);
ParsingDeclState PushParsingDeclaration(sema::DelayedDiagnosticPool &pool) {
return DelayedDiagnostics.push(pool);
}
void PopParsingDeclaration(ParsingDeclState state, Decl *decl);
typedef ProcessingContextState ParsingClassState;
ParsingClassState PushParsingClass() {
ParsingClassDepth++;
return DelayedDiagnostics.pushUndelayed();
}
void PopParsingClass(ParsingClassState state) {
ParsingClassDepth--;
DelayedDiagnostics.popUndelayed(state);
}
void redelayDiagnostics(sema::DelayedDiagnosticPool &pool);
void DiagnoseAvailabilityOfDecl(NamedDecl *D, ArrayRef<SourceLocation> Locs,
const ObjCInterfaceDecl *UnknownObjCClass,
bool ObjCPropertyAccess,
bool AvoidPartialAvailabilityChecks = false,
ObjCInterfaceDecl *ClassReceiver = nullptr);
bool makeUnavailableInSystemHeader(SourceLocation loc,
UnavailableAttr::ImplicitReason reason);
/// Issue any -Wunguarded-availability warnings in \c FD
void DiagnoseUnguardedAvailabilityViolations(Decl *FD);
void handleDelayedAvailabilityCheck(sema::DelayedDiagnostic &DD, Decl *Ctx);
//===--------------------------------------------------------------------===//
// Expression Parsing Callbacks: SemaExpr.cpp.
bool CanUseDecl(NamedDecl *D, bool TreatUnavailableAsInvalid);
bool DiagnoseUseOfDecl(NamedDecl *D, ArrayRef<SourceLocation> Locs,
const ObjCInterfaceDecl *UnknownObjCClass = nullptr,
bool ObjCPropertyAccess = false,
bool AvoidPartialAvailabilityChecks = false,
ObjCInterfaceDecl *ClassReciever = nullptr);
void NoteDeletedFunction(FunctionDecl *FD);
void NoteDeletedInheritingConstructor(CXXConstructorDecl *CD);
bool DiagnosePropertyAccessorMismatch(ObjCPropertyDecl *PD,
ObjCMethodDecl *Getter,
SourceLocation Loc);
void DiagnoseSentinelCalls(NamedDecl *D, SourceLocation Loc,
ArrayRef<Expr *> Args);
void PushExpressionEvaluationContext(
ExpressionEvaluationContext NewContext, Decl *LambdaContextDecl = nullptr,
ExpressionEvaluationContextRecord::ExpressionKind Type =
ExpressionEvaluationContextRecord::EK_Other);
enum ReuseLambdaContextDecl_t { ReuseLambdaContextDecl };
void PushExpressionEvaluationContext(
ExpressionEvaluationContext NewContext, ReuseLambdaContextDecl_t,
ExpressionEvaluationContextRecord::ExpressionKind Type =
ExpressionEvaluationContextRecord::EK_Other);
void PopExpressionEvaluationContext();
void DiscardCleanupsInEvaluationContext();
ExprResult TransformToPotentiallyEvaluated(Expr *E);
ExprResult HandleExprEvaluationContextForTypeof(Expr *E);
ExprResult CheckUnevaluatedOperand(Expr *E);
void CheckUnusedVolatileAssignment(Expr *E);
ExprResult ActOnConstantExpression(ExprResult Res);
// Functions for marking a declaration referenced. These functions also
// contain the relevant logic for marking if a reference to a function or
// variable is an odr-use (in the C++11 sense). There are separate variants
// for expressions referring to a decl; these exist because odr-use marking
// needs to be delayed for some constant variables when we build one of the
// named expressions.
//
// MightBeOdrUse indicates whether the use could possibly be an odr-use, and
// should usually be true. This only needs to be set to false if the lack of
// odr-use cannot be determined from the current context (for instance,
// because the name denotes a virtual function and was written without an
// explicit nested-name-specifier).
void MarkAnyDeclReferenced(SourceLocation Loc, Decl *D, bool MightBeOdrUse);
void MarkFunctionReferenced(SourceLocation Loc, FunctionDecl *Func,
bool MightBeOdrUse = true);
void MarkVariableReferenced(SourceLocation Loc, VarDecl *Var);
void MarkDeclRefReferenced(DeclRefExpr *E, const Expr *Base = nullptr);
void MarkMemberReferenced(MemberExpr *E);
void MarkFunctionParmPackReferenced(FunctionParmPackExpr *E);
void MarkCaptureUsedInEnclosingContext(VarDecl *Capture, SourceLocation Loc,
unsigned CapturingScopeIndex);
ExprResult CheckLValueToRValueConversionOperand(Expr *E);
void CleanupVarDeclMarking();
enum TryCaptureKind {
TryCapture_Implicit, TryCapture_ExplicitByVal, TryCapture_ExplicitByRef
};
/// Try to capture the given variable.
///
/// \param Var The variable to capture.
///
/// \param Loc The location at which the capture occurs.
///
/// \param Kind The kind of capture, which may be implicit (for either a
/// block or a lambda), or explicit by-value or by-reference (for a lambda).
///
/// \param EllipsisLoc The location of the ellipsis, if one is provided in
/// an explicit lambda capture.
///
/// \param BuildAndDiagnose Whether we are actually supposed to add the
/// captures or diagnose errors. If false, this routine merely check whether
/// the capture can occur without performing the capture itself or complaining
/// if the variable cannot be captured.
///
/// \param CaptureType Will be set to the type of the field used to capture
/// this variable in the innermost block or lambda. Only valid when the
/// variable can be captured.
///
/// \param DeclRefType Will be set to the type of a reference to the capture
/// from within the current scope. Only valid when the variable can be
/// captured.
///
/// \param FunctionScopeIndexToStopAt If non-null, it points to the index
/// of the FunctionScopeInfo stack beyond which we do not attempt to capture.
/// This is useful when enclosing lambdas must speculatively capture
/// variables that may or may not be used in certain specializations of
/// a nested generic lambda.
///
/// \returns true if an error occurred (i.e., the variable cannot be
/// captured) and false if the capture succeeded.
bool tryCaptureVariable(VarDecl *Var, SourceLocation Loc, TryCaptureKind Kind,
SourceLocation EllipsisLoc, bool BuildAndDiagnose,
QualType &CaptureType,
QualType &DeclRefType,
const unsigned *const FunctionScopeIndexToStopAt);
/// Try to capture the given variable.
bool tryCaptureVariable(VarDecl *Var, SourceLocation Loc,
TryCaptureKind Kind = TryCapture_Implicit,
SourceLocation EllipsisLoc = SourceLocation());
/// Checks if the variable must be captured.
bool NeedToCaptureVariable(VarDecl *Var, SourceLocation Loc);
/// Given a variable, determine the type that a reference to that
/// variable will have in the given scope.
QualType getCapturedDeclRefType(VarDecl *Var, SourceLocation Loc);
/// Mark all of the declarations referenced within a particular AST node as
/// referenced. Used when template instantiation instantiates a non-dependent
/// type -- entities referenced by the type are now referenced.
void MarkDeclarationsReferencedInType(SourceLocation Loc, QualType T);
void MarkDeclarationsReferencedInExpr(Expr *E,
bool SkipLocalVariables = false);
/// Try to recover by turning the given expression into a
/// call. Returns true if recovery was attempted or an error was
/// emitted; this may also leave the ExprResult invalid.
bool tryToRecoverWithCall(ExprResult &E, const PartialDiagnostic &PD,
bool ForceComplain = false,
bool (*IsPlausibleResult)(QualType) = nullptr);
/// Figure out if an expression could be turned into a call.
bool tryExprAsCall(Expr &E, QualType &ZeroArgCallReturnTy,
UnresolvedSetImpl &NonTemplateOverloads);
/// Conditionally issue a diagnostic based on the current
/// evaluation context.
///
/// \param Statement If Statement is non-null, delay reporting the
/// diagnostic until the function body is parsed, and then do a basic
/// reachability analysis to determine if the statement is reachable.
/// If it is unreachable, the diagnostic will not be emitted.
bool DiagRuntimeBehavior(SourceLocation Loc, const Stmt *Statement,
const PartialDiagnostic &PD);
/// Similar, but diagnostic is only produced if all the specified statements
/// are reachable.
bool DiagRuntimeBehavior(SourceLocation Loc, ArrayRef<const Stmt*> Stmts,
const PartialDiagnostic &PD);
// Primary Expressions.
SourceRange getExprRange(Expr *E) const;
ExprResult ActOnIdExpression(
Scope *S, CXXScopeSpec &SS, SourceLocation TemplateKWLoc,
UnqualifiedId &Id, bool HasTrailingLParen, bool IsAddressOfOperand,
CorrectionCandidateCallback *CCC = nullptr,
bool IsInlineAsmIdentifier = false, Token *KeywordReplacement = nullptr);
void DecomposeUnqualifiedId(const UnqualifiedId &Id,
TemplateArgumentListInfo &Buffer,
DeclarationNameInfo &NameInfo,
const TemplateArgumentListInfo *&TemplateArgs);
bool
DiagnoseEmptyLookup(Scope *S, CXXScopeSpec &SS, LookupResult &R,
CorrectionCandidateCallback &CCC,
TemplateArgumentListInfo *ExplicitTemplateArgs = nullptr,
ArrayRef<Expr *> Args = None, TypoExpr **Out = nullptr);
DeclResult LookupIvarInObjCMethod(LookupResult &Lookup, Scope *S,
IdentifierInfo *II);
ExprResult BuildIvarRefExpr(Scope *S, SourceLocation Loc, ObjCIvarDecl *IV);
ExprResult LookupInObjCMethod(LookupResult &LookUp, Scope *S,
IdentifierInfo *II,
bool AllowBuiltinCreation=false);
ExprResult ActOnDependentIdExpression(const CXXScopeSpec &SS,
SourceLocation TemplateKWLoc,
const DeclarationNameInfo &NameInfo,
bool isAddressOfOperand,
const TemplateArgumentListInfo *TemplateArgs);
/// If \p D cannot be odr-used in the current expression evaluation context,
/// return a reason explaining why. Otherwise, return NOUR_None.
NonOdrUseReason getNonOdrUseReasonInCurrentContext(ValueDecl *D);
DeclRefExpr *BuildDeclRefExpr(ValueDecl *D, QualType Ty, ExprValueKind VK,
SourceLocation Loc,
const CXXScopeSpec *SS = nullptr);
DeclRefExpr *
BuildDeclRefExpr(ValueDecl *D, QualType Ty, ExprValueKind VK,
const DeclarationNameInfo &NameInfo,
const CXXScopeSpec *SS = nullptr,
NamedDecl *FoundD = nullptr,
SourceLocation TemplateKWLoc = SourceLocation(),
const TemplateArgumentListInfo *TemplateArgs = nullptr);
DeclRefExpr *
BuildDeclRefExpr(ValueDecl *D, QualType Ty, ExprValueKind VK,
const DeclarationNameInfo &NameInfo,
NestedNameSpecifierLoc NNS,
NamedDecl *FoundD = nullptr,
SourceLocation TemplateKWLoc = SourceLocation(),
const TemplateArgumentListInfo *TemplateArgs = nullptr);
ExprResult
BuildAnonymousStructUnionMemberReference(
const CXXScopeSpec &SS,
SourceLocation nameLoc,
IndirectFieldDecl *indirectField,
DeclAccessPair FoundDecl = DeclAccessPair::make(nullptr, AS_none),
Expr *baseObjectExpr = nullptr,
SourceLocation opLoc = SourceLocation());
ExprResult BuildPossibleImplicitMemberExpr(const CXXScopeSpec &SS,
SourceLocation TemplateKWLoc,
LookupResult &R,
const TemplateArgumentListInfo *TemplateArgs,
const Scope *S);
ExprResult BuildImplicitMemberExpr(const CXXScopeSpec &SS,
SourceLocation TemplateKWLoc,
LookupResult &R,
const TemplateArgumentListInfo *TemplateArgs,
bool IsDefiniteInstance,
const Scope *S);
bool UseArgumentDependentLookup(const CXXScopeSpec &SS,
const LookupResult &R,
bool HasTrailingLParen);
ExprResult
BuildQualifiedDeclarationNameExpr(CXXScopeSpec &SS,
const DeclarationNameInfo &NameInfo,
bool IsAddressOfOperand, const Scope *S,
TypeSourceInfo **RecoveryTSI = nullptr);
ExprResult BuildDependentDeclRefExpr(const CXXScopeSpec &SS,
SourceLocation TemplateKWLoc,
const DeclarationNameInfo &NameInfo,
const TemplateArgumentListInfo *TemplateArgs);
ExprResult BuildDeclarationNameExpr(const CXXScopeSpec &SS,
LookupResult &R,
bool NeedsADL,
bool AcceptInvalidDecl = false);
ExprResult BuildDeclarationNameExpr(
const CXXScopeSpec &SS, const DeclarationNameInfo &NameInfo, NamedDecl *D,
NamedDecl *FoundD = nullptr,
const TemplateArgumentListInfo *TemplateArgs = nullptr,
bool AcceptInvalidDecl = false);
ExprResult BuildLiteralOperatorCall(LookupResult &R,
DeclarationNameInfo &SuffixInfo,
ArrayRef<Expr *> Args,
SourceLocation LitEndLoc,
TemplateArgumentListInfo *ExplicitTemplateArgs = nullptr);
ExprResult BuildPredefinedExpr(SourceLocation Loc,
PredefinedExpr::IdentKind IK);
ExprResult ActOnPredefinedExpr(SourceLocation Loc, tok::TokenKind Kind);
ExprResult ActOnIntegerConstant(SourceLocation Loc, uint64_t Val);
bool CheckLoopHintExpr(Expr *E, SourceLocation Loc);
ExprResult ActOnNumericConstant(const Token &Tok, Scope *UDLScope = nullptr);
ExprResult ActOnCharacterConstant(const Token &Tok,
Scope *UDLScope = nullptr);
ExprResult ActOnParenExpr(SourceLocation L, SourceLocation R, Expr *E);
ExprResult ActOnParenListExpr(SourceLocation L,
SourceLocation R,
MultiExprArg Val);
/// ActOnStringLiteral - The specified tokens were lexed as pasted string
/// fragments (e.g. "foo" "bar" L"baz").
ExprResult ActOnStringLiteral(ArrayRef<Token> StringToks,
Scope *UDLScope = nullptr);
ExprResult ActOnGenericSelectionExpr(SourceLocation KeyLoc,
SourceLocation DefaultLoc,
SourceLocation RParenLoc,
Expr *ControllingExpr,
ArrayRef<ParsedType> ArgTypes,
ArrayRef<Expr *> ArgExprs);
ExprResult CreateGenericSelectionExpr(SourceLocation KeyLoc,
SourceLocation DefaultLoc,
SourceLocation RParenLoc,
Expr *ControllingExpr,
ArrayRef<TypeSourceInfo *> Types,
ArrayRef<Expr *> Exprs);
// Binary/Unary Operators. 'Tok' is the token for the operator.
ExprResult CreateBuiltinUnaryOp(SourceLocation OpLoc, UnaryOperatorKind Opc,
Expr *InputExpr);
ExprResult BuildUnaryOp(Scope *S, SourceLocation OpLoc,
UnaryOperatorKind Opc, Expr *Input);
ExprResult ActOnUnaryOp(Scope *S, SourceLocation OpLoc,
tok::TokenKind Op, Expr *Input);
bool isQualifiedMemberAccess(Expr *E);
QualType CheckAddressOfOperand(ExprResult &Operand, SourceLocation OpLoc);
ExprResult CreateUnaryExprOrTypeTraitExpr(TypeSourceInfo *TInfo,
SourceLocation OpLoc,
UnaryExprOrTypeTrait ExprKind,
SourceRange R);
ExprResult CreateUnaryExprOrTypeTraitExpr(Expr *E, SourceLocation OpLoc,
UnaryExprOrTypeTrait ExprKind);
ExprResult
ActOnUnaryExprOrTypeTraitExpr(SourceLocation OpLoc,
UnaryExprOrTypeTrait ExprKind,
bool IsType, void *TyOrEx,
SourceRange ArgRange);
ExprResult CheckPlaceholderExpr(Expr *E);
bool CheckVecStepExpr(Expr *E);
bool CheckUnaryExprOrTypeTraitOperand(Expr *E, UnaryExprOrTypeTrait ExprKind);
bool CheckUnaryExprOrTypeTraitOperand(QualType ExprType, SourceLocation OpLoc,
SourceRange ExprRange,
UnaryExprOrTypeTrait ExprKind);
ExprResult ActOnSizeofParameterPackExpr(Scope *S,
SourceLocation OpLoc,
IdentifierInfo &Name,
SourceLocation NameLoc,
SourceLocation RParenLoc);
ExprResult ActOnPostfixUnaryOp(Scope *S, SourceLocation OpLoc,
tok::TokenKind Kind, Expr *Input);
ExprResult ActOnArraySubscriptExpr(Scope *S, Expr *Base, SourceLocation LLoc,
Expr *Idx, SourceLocation RLoc);
ExprResult CreateBuiltinArraySubscriptExpr(Expr *Base, SourceLocation LLoc,
Expr *Idx, SourceLocation RLoc);
ExprResult ActOnOMPArraySectionExpr(Expr *Base, SourceLocation LBLoc,
Expr *LowerBound, SourceLocation ColonLoc,
Expr *Length, SourceLocation RBLoc);
// This struct is for use by ActOnMemberAccess to allow
// BuildMemberReferenceExpr to be able to reinvoke ActOnMemberAccess after
// changing the access operator from a '.' to a '->' (to see if that is the
// change needed to fix an error about an unknown member, e.g. when the class
// defines a custom operator->).
struct ActOnMemberAccessExtraArgs {
Scope *S;
UnqualifiedId &Id;
Decl *ObjCImpDecl;
};
ExprResult BuildMemberReferenceExpr(
Expr *Base, QualType BaseType, SourceLocation OpLoc, bool IsArrow,
CXXScopeSpec &SS, SourceLocation TemplateKWLoc,
NamedDecl *FirstQualifierInScope, const DeclarationNameInfo &NameInfo,
const TemplateArgumentListInfo *TemplateArgs,
const Scope *S,
ActOnMemberAccessExtraArgs *ExtraArgs = nullptr);
ExprResult
BuildMemberReferenceExpr(Expr *Base, QualType BaseType, SourceLocation OpLoc,
bool IsArrow, const CXXScopeSpec &SS,
SourceLocation TemplateKWLoc,
NamedDecl *FirstQualifierInScope, LookupResult &R,
const TemplateArgumentListInfo *TemplateArgs,
const Scope *S,
bool SuppressQualifierCheck = false,
ActOnMemberAccessExtraArgs *ExtraArgs = nullptr);
ExprResult BuildFieldReferenceExpr(Expr *BaseExpr, bool IsArrow,
SourceLocation OpLoc,
const CXXScopeSpec &SS, FieldDecl *Field,
DeclAccessPair FoundDecl,
const DeclarationNameInfo &MemberNameInfo);
ExprResult PerformMemberExprBaseConversion(Expr *Base, bool IsArrow);
bool CheckQualifiedMemberReference(Expr *BaseExpr, QualType BaseType,
const CXXScopeSpec &SS,
const LookupResult &R);
ExprResult ActOnDependentMemberExpr(Expr *Base, QualType BaseType,
bool IsArrow, SourceLocation OpLoc,
const CXXScopeSpec &SS,
SourceLocation TemplateKWLoc,
NamedDecl *FirstQualifierInScope,
const DeclarationNameInfo &NameInfo,
const TemplateArgumentListInfo *TemplateArgs);
ExprResult ActOnMemberAccessExpr(Scope *S, Expr *Base,
SourceLocation OpLoc,
tok::TokenKind OpKind,
CXXScopeSpec &SS,
SourceLocation TemplateKWLoc,
UnqualifiedId &Member,
Decl *ObjCImpDecl);
MemberExpr *
BuildMemberExpr(Expr *Base, bool IsArrow, SourceLocation OpLoc,
const CXXScopeSpec *SS, SourceLocation TemplateKWLoc,
ValueDecl *Member, DeclAccessPair FoundDecl,
bool HadMultipleCandidates,
const DeclarationNameInfo &MemberNameInfo, QualType Ty,
ExprValueKind VK, ExprObjectKind OK,
const TemplateArgumentListInfo *TemplateArgs = nullptr);
MemberExpr *
BuildMemberExpr(Expr *Base, bool IsArrow, SourceLocation OpLoc,
NestedNameSpecifierLoc NNS, SourceLocation TemplateKWLoc,
ValueDecl *Member, DeclAccessPair FoundDecl,
bool HadMultipleCandidates,
const DeclarationNameInfo &MemberNameInfo, QualType Ty,
ExprValueKind VK, ExprObjectKind OK,
const TemplateArgumentListInfo *TemplateArgs = nullptr);
void ActOnDefaultCtorInitializers(Decl *CDtorDecl);
bool ConvertArgumentsForCall(CallExpr *Call, Expr *Fn,
FunctionDecl *FDecl,
const FunctionProtoType *Proto,
ArrayRef<Expr *> Args,
SourceLocation RParenLoc,
bool ExecConfig = false);
void CheckStaticArrayArgument(SourceLocation CallLoc,
ParmVarDecl *Param,
const Expr *ArgExpr);
/// ActOnCallExpr - Handle a call to Fn with the specified array of arguments.
/// This provides the location of the left/right parens and a list of comma
/// locations.
ExprResult ActOnCallExpr(Scope *S, Expr *Fn, SourceLocation LParenLoc,
MultiExprArg ArgExprs, SourceLocation RParenLoc,
Expr *ExecConfig = nullptr);
ExprResult BuildCallExpr(Scope *S, Expr *Fn, SourceLocation LParenLoc,
MultiExprArg ArgExprs, SourceLocation RParenLoc,
Expr *ExecConfig = nullptr,
bool IsExecConfig = false);
enum class AtomicArgumentOrder { API, AST };
ExprResult
BuildAtomicExpr(SourceRange CallRange, SourceRange ExprRange,
SourceLocation RParenLoc, MultiExprArg Args,
AtomicExpr::AtomicOp Op,
AtomicArgumentOrder ArgOrder = AtomicArgumentOrder::API);
ExprResult
BuildResolvedCallExpr(Expr *Fn, NamedDecl *NDecl, SourceLocation LParenLoc,
ArrayRef<Expr *> Arg, SourceLocation RParenLoc,
Expr *Config = nullptr, bool IsExecConfig = false,
ADLCallKind UsesADL = ADLCallKind::NotADL);
ExprResult ActOnCUDAExecConfigExpr(Scope *S, SourceLocation LLLLoc,
MultiExprArg ExecConfig,
SourceLocation GGGLoc);
ExprResult ActOnCastExpr(Scope *S, SourceLocation LParenLoc,
Declarator &D, ParsedType &Ty,
SourceLocation RParenLoc, Expr *CastExpr);
ExprResult BuildCStyleCastExpr(SourceLocation LParenLoc,
TypeSourceInfo *Ty,
SourceLocation RParenLoc,
Expr *Op);
CastKind PrepareScalarCast(ExprResult &src, QualType destType);
/// Build an altivec or OpenCL literal.
ExprResult BuildVectorLiteral(SourceLocation LParenLoc,
SourceLocation RParenLoc, Expr *E,
TypeSourceInfo *TInfo);
ExprResult MaybeConvertParenListExprToParenExpr(Scope *S, Expr *ME);
ExprResult ActOnCompoundLiteral(SourceLocation LParenLoc,
ParsedType Ty,
SourceLocation RParenLoc,
Expr *InitExpr);
ExprResult BuildCompoundLiteralExpr(SourceLocation LParenLoc,
TypeSourceInfo *TInfo,
SourceLocation RParenLoc,
Expr *LiteralExpr);
ExprResult ActOnInitList(SourceLocation LBraceLoc,
MultiExprArg InitArgList,
SourceLocation RBraceLoc);
ExprResult BuildInitList(SourceLocation LBraceLoc,
MultiExprArg InitArgList,
SourceLocation RBraceLoc);
ExprResult ActOnDesignatedInitializer(Designation &Desig,
SourceLocation EqualOrColonLoc,
bool GNUSyntax,
ExprResult Init);
private:
static BinaryOperatorKind ConvertTokenKindToBinaryOpcode(tok::TokenKind Kind);
public:
ExprResult ActOnBinOp(Scope *S, SourceLocation TokLoc,
tok::TokenKind Kind, Expr *LHSExpr, Expr *RHSExpr);
ExprResult BuildBinOp(Scope *S, SourceLocation OpLoc,
BinaryOperatorKind Opc, Expr *LHSExpr, Expr *RHSExpr);
ExprResult CreateBuiltinBinOp(SourceLocation OpLoc, BinaryOperatorKind Opc,
Expr *LHSExpr, Expr *RHSExpr);
void DiagnoseCommaOperator(const Expr *LHS, SourceLocation Loc);
/// ActOnConditionalOp - Parse a ?: operation. Note that 'LHS' may be null
/// in the case of a the GNU conditional expr extension.
ExprResult ActOnConditionalOp(SourceLocation QuestionLoc,
SourceLocation ColonLoc,
Expr *CondExpr, Expr *LHSExpr, Expr *RHSExpr);
/// ActOnAddrLabel - Parse the GNU address of label extension: "&&foo".
ExprResult ActOnAddrLabel(SourceLocation OpLoc, SourceLocation LabLoc,
LabelDecl *TheDecl);
void ActOnStartStmtExpr();
ExprResult ActOnStmtExpr(SourceLocation LPLoc, Stmt *SubStmt,
SourceLocation RPLoc); // "({..})"
// Handle the final expression in a statement expression.
ExprResult ActOnStmtExprResult(ExprResult E);
void ActOnStmtExprError();
// __builtin_offsetof(type, identifier(.identifier|[expr])*)
struct OffsetOfComponent {
SourceLocation LocStart, LocEnd;
bool isBrackets; // true if [expr], false if .ident
union {
IdentifierInfo *IdentInfo;
Expr *E;
} U;
};
/// __builtin_offsetof(type, a.b[123][456].c)
ExprResult BuildBuiltinOffsetOf(SourceLocation BuiltinLoc,
TypeSourceInfo *TInfo,
ArrayRef<OffsetOfComponent> Components,
SourceLocation RParenLoc);
ExprResult ActOnBuiltinOffsetOf(Scope *S,
SourceLocation BuiltinLoc,
SourceLocation TypeLoc,
ParsedType ParsedArgTy,
ArrayRef<OffsetOfComponent> Components,
SourceLocation RParenLoc);
// __builtin_choose_expr(constExpr, expr1, expr2)
ExprResult ActOnChooseExpr(SourceLocation BuiltinLoc,
Expr *CondExpr, Expr *LHSExpr,
Expr *RHSExpr, SourceLocation RPLoc);
// __builtin_va_arg(expr, type)
ExprResult ActOnVAArg(SourceLocation BuiltinLoc, Expr *E, ParsedType Ty,
SourceLocation RPLoc);
ExprResult BuildVAArgExpr(SourceLocation BuiltinLoc, Expr *E,
TypeSourceInfo *TInfo, SourceLocation RPLoc);
// __builtin_LINE(), __builtin_FUNCTION(), __builtin_FILE(),
// __builtin_COLUMN()
ExprResult ActOnSourceLocExpr(SourceLocExpr::IdentKind Kind,
SourceLocation BuiltinLoc,
SourceLocation RPLoc);
// Build a potentially resolved SourceLocExpr.
ExprResult BuildSourceLocExpr(SourceLocExpr::IdentKind Kind,
SourceLocation BuiltinLoc, SourceLocation RPLoc,
DeclContext *ParentContext);
// __null
ExprResult ActOnGNUNullExpr(SourceLocation TokenLoc);
bool CheckCaseExpression(Expr *E);
/// Describes the result of an "if-exists" condition check.
enum IfExistsResult {
/// The symbol exists.
IER_Exists,
/// The symbol does not exist.
IER_DoesNotExist,
/// The name is a dependent name, so the results will differ
/// from one instantiation to the next.
IER_Dependent,
/// An error occurred.
IER_Error
};
IfExistsResult
CheckMicrosoftIfExistsSymbol(Scope *S, CXXScopeSpec &SS,
const DeclarationNameInfo &TargetNameInfo);
IfExistsResult
CheckMicrosoftIfExistsSymbol(Scope *S, SourceLocation KeywordLoc,
bool IsIfExists, CXXScopeSpec &SS,
UnqualifiedId &Name);
StmtResult BuildMSDependentExistsStmt(SourceLocation KeywordLoc,
bool IsIfExists,
NestedNameSpecifierLoc QualifierLoc,
DeclarationNameInfo NameInfo,
Stmt *Nested);
StmtResult ActOnMSDependentExistsStmt(SourceLocation KeywordLoc,
bool IsIfExists,
CXXScopeSpec &SS, UnqualifiedId &Name,
Stmt *Nested);
//===------------------------- "Block" Extension ------------------------===//
/// ActOnBlockStart - This callback is invoked when a block literal is
/// started.
void ActOnBlockStart(SourceLocation CaretLoc, Scope *CurScope);
/// ActOnBlockArguments - This callback allows processing of block arguments.
/// If there are no arguments, this is still invoked.
void ActOnBlockArguments(SourceLocation CaretLoc, Declarator &ParamInfo,
Scope *CurScope);
/// ActOnBlockError - If there is an error parsing a block, this callback
/// is invoked to pop the information about the block from the action impl.
void ActOnBlockError(SourceLocation CaretLoc, Scope *CurScope);
/// ActOnBlockStmtExpr - This is called when the body of a block statement
/// literal was successfully completed. ^(int x){...}
ExprResult ActOnBlockStmtExpr(SourceLocation CaretLoc, Stmt *Body,
Scope *CurScope);
//===---------------------------- Clang Extensions ----------------------===//
/// __builtin_convertvector(...)
ExprResult ActOnConvertVectorExpr(Expr *E, ParsedType ParsedDestTy,
SourceLocation BuiltinLoc,
SourceLocation RParenLoc);
//===---------------------------- OpenCL Features -----------------------===//
/// __builtin_astype(...)
ExprResult ActOnAsTypeExpr(Expr *E, ParsedType ParsedDestTy,
SourceLocation BuiltinLoc,
SourceLocation RParenLoc);
//===---------------------------- C++ Features --------------------------===//
// Act on C++ namespaces
Decl *ActOnStartNamespaceDef(Scope *S, SourceLocation InlineLoc,
SourceLocation NamespaceLoc,
SourceLocation IdentLoc, IdentifierInfo *Ident,
SourceLocation LBrace,
const ParsedAttributesView &AttrList,
UsingDirectiveDecl *&UsingDecl);
void ActOnFinishNamespaceDef(Decl *Dcl, SourceLocation RBrace);
NamespaceDecl *getStdNamespace() const;
NamespaceDecl *getOrCreateStdNamespace();
NamespaceDecl *lookupStdExperimentalNamespace();
CXXRecordDecl *getStdBadAlloc() const;
EnumDecl *getStdAlignValT() const;
private:
// A cache representing if we've fully checked the various comparison category
// types stored in ASTContext. The bit-index corresponds to the integer value
// of a ComparisonCategoryType enumerator.
llvm::SmallBitVector FullyCheckedComparisonCategories;
ValueDecl *tryLookupCtorInitMemberDecl(CXXRecordDecl *ClassDecl,
CXXScopeSpec &SS,
ParsedType TemplateTypeTy,
IdentifierInfo *MemberOrBase);
public:
enum class ComparisonCategoryUsage {
/// The '<=>' operator was used in an expression and a builtin operator
/// was selected.
OperatorInExpression,
/// A defaulted 'operator<=>' needed the comparison category. This
/// typically only applies to 'std::strong_ordering', due to the implicit
/// fallback return value.
DefaultedOperator,
};
/// Lookup the specified comparison category types in the standard
/// library, an check the VarDecls possibly returned by the operator<=>
/// builtins for that type.
///
/// \return The type of the comparison category type corresponding to the
/// specified Kind, or a null type if an error occurs
QualType CheckComparisonCategoryType(ComparisonCategoryType Kind,
SourceLocation Loc,
ComparisonCategoryUsage Usage);
/// Tests whether Ty is an instance of std::initializer_list and, if
/// it is and Element is not NULL, assigns the element type to Element.
bool isStdInitializerList(QualType Ty, QualType *Element);
/// Looks for the std::initializer_list template and instantiates it
/// with Element, or emits an error if it's not found.
///
/// \returns The instantiated template, or null on error.
QualType BuildStdInitializerList(QualType Element, SourceLocation Loc);
/// Determine whether Ctor is an initializer-list constructor, as
/// defined in [dcl.init.list]p2.
bool isInitListConstructor(const FunctionDecl *Ctor);
Decl *ActOnUsingDirective(Scope *CurScope, SourceLocation UsingLoc,
SourceLocation NamespcLoc, CXXScopeSpec &SS,
SourceLocation IdentLoc,
IdentifierInfo *NamespcName,
const ParsedAttributesView &AttrList);
void PushUsingDirective(Scope *S, UsingDirectiveDecl *UDir);
Decl *ActOnNamespaceAliasDef(Scope *CurScope,
SourceLocation NamespaceLoc,
SourceLocation AliasLoc,
IdentifierInfo *Alias,
CXXScopeSpec &SS,
SourceLocation IdentLoc,
IdentifierInfo *Ident);
void HideUsingShadowDecl(Scope *S, UsingShadowDecl *Shadow);
bool CheckUsingShadowDecl(UsingDecl *UD, NamedDecl *Target,
const LookupResult &PreviousDecls,
UsingShadowDecl *&PrevShadow);
UsingShadowDecl *BuildUsingShadowDecl(Scope *S, UsingDecl *UD,
NamedDecl *Target,
UsingShadowDecl *PrevDecl);
bool CheckUsingDeclRedeclaration(SourceLocation UsingLoc,
bool HasTypenameKeyword,
const CXXScopeSpec &SS,
SourceLocation NameLoc,
const LookupResult &Previous);
bool CheckUsingDeclQualifier(SourceLocation UsingLoc,
bool HasTypename,
const CXXScopeSpec &SS,
const DeclarationNameInfo &NameInfo,
SourceLocation NameLoc);
NamedDecl *BuildUsingDeclaration(
Scope *S, AccessSpecifier AS, SourceLocation UsingLoc,
bool HasTypenameKeyword, SourceLocation TypenameLoc, CXXScopeSpec &SS,
DeclarationNameInfo NameInfo, SourceLocation EllipsisLoc,
const ParsedAttributesView &AttrList, bool IsInstantiation);
NamedDecl *BuildUsingPackDecl(NamedDecl *InstantiatedFrom,
ArrayRef<NamedDecl *> Expansions);
bool CheckInheritingConstructorUsingDecl(UsingDecl *UD);
/// Given a derived-class using shadow declaration for a constructor and the
/// correspnding base class constructor, find or create the implicit
/// synthesized derived class constructor to use for this initialization.
CXXConstructorDecl *
findInheritingConstructor(SourceLocation Loc, CXXConstructorDecl *BaseCtor,
ConstructorUsingShadowDecl *DerivedShadow);
Decl *ActOnUsingDeclaration(Scope *CurScope, AccessSpecifier AS,
SourceLocation UsingLoc,
SourceLocation TypenameLoc, CXXScopeSpec &SS,
UnqualifiedId &Name, SourceLocation EllipsisLoc,
const ParsedAttributesView &AttrList);
Decl *ActOnAliasDeclaration(Scope *CurScope, AccessSpecifier AS,
MultiTemplateParamsArg TemplateParams,
SourceLocation UsingLoc, UnqualifiedId &Name,
const ParsedAttributesView &AttrList,
TypeResult Type, Decl *DeclFromDeclSpec);
/// BuildCXXConstructExpr - Creates a complete call to a constructor,
/// including handling of its default argument expressions.
///
/// \param ConstructKind - a CXXConstructExpr::ConstructionKind
ExprResult
BuildCXXConstructExpr(SourceLocation ConstructLoc, QualType DeclInitType,
NamedDecl *FoundDecl,
CXXConstructorDecl *Constructor, MultiExprArg Exprs,
bool HadMultipleCandidates, bool IsListInitialization,
bool IsStdInitListInitialization,
bool RequiresZeroInit, unsigned ConstructKind,
SourceRange ParenRange);
/// Build a CXXConstructExpr whose constructor has already been resolved if
/// it denotes an inherited constructor.
ExprResult
BuildCXXConstructExpr(SourceLocation ConstructLoc, QualType DeclInitType,
CXXConstructorDecl *Constructor, bool Elidable,
MultiExprArg Exprs,
bool HadMultipleCandidates, bool IsListInitialization,
bool IsStdInitListInitialization,
bool RequiresZeroInit, unsigned ConstructKind,
SourceRange ParenRange);
// FIXME: Can we remove this and have the above BuildCXXConstructExpr check if
// the constructor can be elidable?
ExprResult
BuildCXXConstructExpr(SourceLocation ConstructLoc, QualType DeclInitType,
NamedDecl *FoundDecl,
CXXConstructorDecl *Constructor, bool Elidable,
MultiExprArg Exprs, bool HadMultipleCandidates,
bool IsListInitialization,
bool IsStdInitListInitialization, bool RequiresZeroInit,
unsigned ConstructKind, SourceRange ParenRange);
ExprResult BuildCXXDefaultInitExpr(SourceLocation Loc, FieldDecl *Field);
/// Instantiate or parse a C++ default argument expression as necessary.
/// Return true on error.
bool CheckCXXDefaultArgExpr(SourceLocation CallLoc, FunctionDecl *FD,
ParmVarDecl *Param);
/// BuildCXXDefaultArgExpr - Creates a CXXDefaultArgExpr, instantiating
/// the default expr if needed.
ExprResult BuildCXXDefaultArgExpr(SourceLocation CallLoc,
FunctionDecl *FD,
ParmVarDecl *Param);
/// FinalizeVarWithDestructor - Prepare for calling destructor on the
/// constructed variable.
void FinalizeVarWithDestructor(VarDecl *VD, const RecordType *DeclInitType);
/// Helper class that collects exception specifications for
/// implicitly-declared special member functions.
class ImplicitExceptionSpecification {
// Pointer to allow copying
Sema *Self;
// We order exception specifications thus:
// noexcept is the most restrictive, but is only used in C++11.
// throw() comes next.
// Then a throw(collected exceptions)
// Finally no specification, which is expressed as noexcept(false).
// throw(...) is used instead if any called function uses it.
ExceptionSpecificationType ComputedEST;
llvm::SmallPtrSet<CanQualType, 4> ExceptionsSeen;
SmallVector<QualType, 4> Exceptions;
void ClearExceptions() {
ExceptionsSeen.clear();
Exceptions.clear();
}
public:
explicit ImplicitExceptionSpecification(Sema &Self)
: Self(&Self), ComputedEST(EST_BasicNoexcept) {
if (!Self.getLangOpts().CPlusPlus11)
ComputedEST = EST_DynamicNone;
}
/// Get the computed exception specification type.
ExceptionSpecificationType getExceptionSpecType() const {
assert(!isComputedNoexcept(ComputedEST) &&
"noexcept(expr) should not be a possible result");
return ComputedEST;
}
/// The number of exceptions in the exception specification.
unsigned size() const { return Exceptions.size(); }
/// The set of exceptions in the exception specification.
const QualType *data() const { return Exceptions.data(); }
/// Integrate another called method into the collected data.
void CalledDecl(SourceLocation CallLoc, const CXXMethodDecl *Method);
/// Integrate an invoked expression into the collected data.
void CalledExpr(Expr *E) { CalledStmt(E); }
/// Integrate an invoked statement into the collected data.
void CalledStmt(Stmt *S);
/// Overwrite an EPI's exception specification with this
/// computed exception specification.
FunctionProtoType::ExceptionSpecInfo getExceptionSpec() const {
FunctionProtoType::ExceptionSpecInfo ESI;
ESI.Type = getExceptionSpecType();
if (ESI.Type == EST_Dynamic) {
ESI.Exceptions = Exceptions;
} else if (ESI.Type == EST_None) {
/// C++11 [except.spec]p14:
/// The exception-specification is noexcept(false) if the set of
/// potential exceptions of the special member function contains "any"
ESI.Type = EST_NoexceptFalse;
ESI.NoexceptExpr = Self->ActOnCXXBoolLiteral(SourceLocation(),
tok::kw_false).get();
}
return ESI;
}
};
/// Determine what sort of exception specification a defaulted
/// copy constructor of a class will have.
ImplicitExceptionSpecification
ComputeDefaultedDefaultCtorExceptionSpec(SourceLocation Loc,
CXXMethodDecl *MD);
/// Determine what sort of exception specification a defaulted
/// default constructor of a class will have, and whether the parameter
/// will be const.
ImplicitExceptionSpecification
ComputeDefaultedCopyCtorExceptionSpec(CXXMethodDecl *MD);
/// Determine what sort of exception specification a defaulted
/// copy assignment operator of a class will have, and whether the
/// parameter will be const.
ImplicitExceptionSpecification
ComputeDefaultedCopyAssignmentExceptionSpec(CXXMethodDecl *MD);
/// Determine what sort of exception specification a defaulted move
/// constructor of a class will have.
ImplicitExceptionSpecification
ComputeDefaultedMoveCtorExceptionSpec(CXXMethodDecl *MD);
/// Determine what sort of exception specification a defaulted move
/// assignment operator of a class will have.
ImplicitExceptionSpecification
ComputeDefaultedMoveAssignmentExceptionSpec(CXXMethodDecl *MD);
/// Determine what sort of exception specification a defaulted
/// destructor of a class will have.
ImplicitExceptionSpecification
ComputeDefaultedDtorExceptionSpec(CXXMethodDecl *MD);
/// Determine what sort of exception specification an inheriting
/// constructor of a class will have.
ImplicitExceptionSpecification
ComputeInheritingCtorExceptionSpec(SourceLocation Loc,
CXXConstructorDecl *CD);
/// Evaluate the implicit exception specification for a defaulted
/// special member function.
void EvaluateImplicitExceptionSpec(SourceLocation Loc, FunctionDecl *FD);
/// Check the given noexcept-specifier, convert its expression, and compute
/// the appropriate ExceptionSpecificationType.
ExprResult ActOnNoexceptSpec(SourceLocation NoexceptLoc, Expr *NoexceptExpr,
ExceptionSpecificationType &EST);
/// Check the given exception-specification and update the
/// exception specification information with the results.
void checkExceptionSpecification(bool IsTopLevel,
ExceptionSpecificationType EST,
ArrayRef<ParsedType> DynamicExceptions,
ArrayRef<SourceRange> DynamicExceptionRanges,
Expr *NoexceptExpr,
SmallVectorImpl<QualType> &Exceptions,
FunctionProtoType::ExceptionSpecInfo &ESI);
/// Determine if we're in a case where we need to (incorrectly) eagerly
/// parse an exception specification to work around a libstdc++ bug.
bool isLibstdcxxEagerExceptionSpecHack(const Declarator &D);
/// Add an exception-specification to the given member function
/// (or member function template). The exception-specification was parsed
/// after the method itself was declared.
void actOnDelayedExceptionSpecification(Decl *Method,
ExceptionSpecificationType EST,
SourceRange SpecificationRange,
ArrayRef<ParsedType> DynamicExceptions,
ArrayRef<SourceRange> DynamicExceptionRanges,
Expr *NoexceptExpr);
class InheritedConstructorInfo;
/// Determine if a special member function should have a deleted
/// definition when it is defaulted.
bool ShouldDeleteSpecialMember(CXXMethodDecl *MD, CXXSpecialMember CSM,
InheritedConstructorInfo *ICI = nullptr,
bool Diagnose = false);
/// Produce notes explaining why a defaulted function was defined as deleted.
void DiagnoseDeletedDefaultedFunction(FunctionDecl *FD);
/// Declare the implicit default constructor for the given class.
///
/// \param ClassDecl The class declaration into which the implicit
/// default constructor will be added.
///
/// \returns The implicitly-declared default constructor.
CXXConstructorDecl *DeclareImplicitDefaultConstructor(
CXXRecordDecl *ClassDecl);
/// DefineImplicitDefaultConstructor - Checks for feasibility of
/// defining this constructor as the default constructor.
void DefineImplicitDefaultConstructor(SourceLocation CurrentLocation,
CXXConstructorDecl *Constructor);
/// Declare the implicit destructor for the given class.
///
/// \param ClassDecl The class declaration into which the implicit
/// destructor will be added.
///
/// \returns The implicitly-declared destructor.
CXXDestructorDecl *DeclareImplicitDestructor(CXXRecordDecl *ClassDecl);
/// DefineImplicitDestructor - Checks for feasibility of
/// defining this destructor as the default destructor.
void DefineImplicitDestructor(SourceLocation CurrentLocation,
CXXDestructorDecl *Destructor);
/// Build an exception spec for destructors that don't have one.
///
/// C++11 says that user-defined destructors with no exception spec get one
/// that looks as if the destructor was implicitly declared.
void AdjustDestructorExceptionSpec(CXXDestructorDecl *Destructor);
/// Define the specified inheriting constructor.
void DefineInheritingConstructor(SourceLocation UseLoc,
CXXConstructorDecl *Constructor);
/// Declare the implicit copy constructor for the given class.
///
/// \param ClassDecl The class declaration into which the implicit
/// copy constructor will be added.
///
/// \returns The implicitly-declared copy constructor.
CXXConstructorDecl *DeclareImplicitCopyConstructor(CXXRecordDecl *ClassDecl);
/// DefineImplicitCopyConstructor - Checks for feasibility of
/// defining this constructor as the copy constructor.
void DefineImplicitCopyConstructor(SourceLocation CurrentLocation,
CXXConstructorDecl *Constructor);
/// Declare the implicit move constructor for the given class.
///
/// \param ClassDecl The Class declaration into which the implicit
/// move constructor will be added.
///
/// \returns The implicitly-declared move constructor, or NULL if it wasn't
/// declared.
CXXConstructorDecl *DeclareImplicitMoveConstructor(CXXRecordDecl *ClassDecl);
/// DefineImplicitMoveConstructor - Checks for feasibility of
/// defining this constructor as the move constructor.
void DefineImplicitMoveConstructor(SourceLocation CurrentLocation,
CXXConstructorDecl *Constructor);
/// Declare the implicit copy assignment operator for the given class.
///
/// \param ClassDecl The class declaration into which the implicit
/// copy assignment operator will be added.
///
/// \returns The implicitly-declared copy assignment operator.
CXXMethodDecl *DeclareImplicitCopyAssignment(CXXRecordDecl *ClassDecl);
/// Defines an implicitly-declared copy assignment operator.
void DefineImplicitCopyAssignment(SourceLocation CurrentLocation,
CXXMethodDecl *MethodDecl);
/// Declare the implicit move assignment operator for the given class.
///
/// \param ClassDecl The Class declaration into which the implicit
/// move assignment operator will be added.
///
/// \returns The implicitly-declared move assignment operator, or NULL if it
/// wasn't declared.
CXXMethodDecl *DeclareImplicitMoveAssignment(CXXRecordDecl *ClassDecl);
/// Defines an implicitly-declared move assignment operator.
void DefineImplicitMoveAssignment(SourceLocation CurrentLocation,
CXXMethodDecl *MethodDecl);
/// Force the declaration of any implicitly-declared members of this
/// class.
void ForceDeclarationOfImplicitMembers(CXXRecordDecl *Class);
/// Check a completed declaration of an implicit special member.
void CheckImplicitSpecialMemberDeclaration(Scope *S, FunctionDecl *FD);
/// Determine whether the given function is an implicitly-deleted
/// special member function.
bool isImplicitlyDeleted(FunctionDecl *FD);
/// Check whether 'this' shows up in the type of a static member
/// function after the (naturally empty) cv-qualifier-seq would be.
///
/// \returns true if an error occurred.
bool checkThisInStaticMemberFunctionType(CXXMethodDecl *Method);
/// Whether this' shows up in the exception specification of a static
/// member function.
bool checkThisInStaticMemberFunctionExceptionSpec(CXXMethodDecl *Method);
/// Check whether 'this' shows up in the attributes of the given
/// static member function.
///
/// \returns true if an error occurred.
bool checkThisInStaticMemberFunctionAttributes(CXXMethodDecl *Method);
/// MaybeBindToTemporary - If the passed in expression has a record type with
/// a non-trivial destructor, this will return CXXBindTemporaryExpr. Otherwise
/// it simply returns the passed in expression.
ExprResult MaybeBindToTemporary(Expr *E);
/// Wrap the expression in a ConstantExpr if it is a potential immediate
/// invocation.
ExprResult CheckForImmediateInvocation(ExprResult E, FunctionDecl *Decl);
bool CompleteConstructorCall(CXXConstructorDecl *Constructor,
MultiExprArg ArgsPtr,
SourceLocation Loc,
SmallVectorImpl<Expr*> &ConvertedArgs,
bool AllowExplicit = false,
bool IsListInitialization = false);
ParsedType getInheritingConstructorName(CXXScopeSpec &SS,
SourceLocation NameLoc,
IdentifierInfo &Name);
ParsedType getConstructorName(IdentifierInfo &II, SourceLocation NameLoc,
Scope *S, CXXScopeSpec &SS,
bool EnteringContext);
ParsedType getDestructorName(SourceLocation TildeLoc,
IdentifierInfo &II, SourceLocation NameLoc,
Scope *S, CXXScopeSpec &SS,
ParsedType ObjectType,
bool EnteringContext);
ParsedType getDestructorTypeForDecltype(const DeclSpec &DS,
ParsedType ObjectType);
// Checks that reinterpret casts don't have undefined behavior.
void CheckCompatibleReinterpretCast(QualType SrcType, QualType DestType,
bool IsDereference, SourceRange Range);
/// ActOnCXXNamedCast - Parse {dynamic,static,reinterpret,const}_cast's.
ExprResult ActOnCXXNamedCast(SourceLocation OpLoc,
tok::TokenKind Kind,
SourceLocation LAngleBracketLoc,
Declarator &D,
SourceLocation RAngleBracketLoc,
SourceLocation LParenLoc,
Expr *E,
SourceLocation RParenLoc);
ExprResult BuildCXXNamedCast(SourceLocation OpLoc,
tok::TokenKind Kind,
TypeSourceInfo *Ty,
Expr *E,
SourceRange AngleBrackets,
SourceRange Parens);
ExprResult ActOnBuiltinBitCastExpr(SourceLocation KWLoc, Declarator &Dcl,
ExprResult Operand,
SourceLocation RParenLoc);
ExprResult BuildBuiltinBitCastExpr(SourceLocation KWLoc, TypeSourceInfo *TSI,
Expr *Operand, SourceLocation RParenLoc);
ExprResult BuildCXXTypeId(QualType TypeInfoType,
SourceLocation TypeidLoc,
TypeSourceInfo *Operand,
SourceLocation RParenLoc);
ExprResult BuildCXXTypeId(QualType TypeInfoType,
SourceLocation TypeidLoc,
Expr *Operand,
SourceLocation RParenLoc);
/// ActOnCXXTypeid - Parse typeid( something ).
ExprResult ActOnCXXTypeid(SourceLocation OpLoc,
SourceLocation LParenLoc, bool isType,
void *TyOrExpr,
SourceLocation RParenLoc);
ExprResult BuildCXXUuidof(QualType TypeInfoType,
SourceLocation TypeidLoc,
TypeSourceInfo *Operand,
SourceLocation RParenLoc);
ExprResult BuildCXXUuidof(QualType TypeInfoType,
SourceLocation TypeidLoc,
Expr *Operand,
SourceLocation RParenLoc);
/// ActOnCXXUuidof - Parse __uuidof( something ).
ExprResult ActOnCXXUuidof(SourceLocation OpLoc,
SourceLocation LParenLoc, bool isType,
void *TyOrExpr,
SourceLocation RParenLoc);
/// Handle a C++1z fold-expression: ( expr op ... op expr ).
ExprResult ActOnCXXFoldExpr(SourceLocation LParenLoc, Expr *LHS,
tok::TokenKind Operator,
SourceLocation EllipsisLoc, Expr *RHS,
SourceLocation RParenLoc);
ExprResult BuildCXXFoldExpr(SourceLocation LParenLoc, Expr *LHS,
BinaryOperatorKind Operator,
SourceLocation EllipsisLoc, Expr *RHS,
SourceLocation RParenLoc,
Optional<unsigned> NumExpansions);
ExprResult BuildEmptyCXXFoldExpr(SourceLocation EllipsisLoc,
BinaryOperatorKind Operator);
//// ActOnCXXThis - Parse 'this' pointer.
ExprResult ActOnCXXThis(SourceLocation loc);
/// Build a CXXThisExpr and mark it referenced in the current context.
Expr *BuildCXXThisExpr(SourceLocation Loc, QualType Type, bool IsImplicit);
void MarkThisReferenced(CXXThisExpr *This);
/// Try to retrieve the type of the 'this' pointer.
///
/// \returns The type of 'this', if possible. Otherwise, returns a NULL type.
QualType getCurrentThisType();
/// When non-NULL, the C++ 'this' expression is allowed despite the
/// current context not being a non-static member function. In such cases,
/// this provides the type used for 'this'.
QualType CXXThisTypeOverride;
/// RAII object used to temporarily allow the C++ 'this' expression
/// to be used, with the given qualifiers on the current class type.
class CXXThisScopeRAII {
Sema &S;
QualType OldCXXThisTypeOverride;
bool Enabled;
public:
/// Introduce a new scope where 'this' may be allowed (when enabled),
/// using the given declaration (which is either a class template or a
/// class) along with the given qualifiers.
/// along with the qualifiers placed on '*this'.
CXXThisScopeRAII(Sema &S, Decl *ContextDecl, Qualifiers CXXThisTypeQuals,
bool Enabled = true);
~CXXThisScopeRAII();
};
/// Make sure the value of 'this' is actually available in the current
/// context, if it is a potentially evaluated context.
///
/// \param Loc The location at which the capture of 'this' occurs.
///
/// \param Explicit Whether 'this' is explicitly captured in a lambda
/// capture list.
///
/// \param FunctionScopeIndexToStopAt If non-null, it points to the index
/// of the FunctionScopeInfo stack beyond which we do not attempt to capture.
/// This is useful when enclosing lambdas must speculatively capture
/// 'this' that may or may not be used in certain specializations of
/// a nested generic lambda (depending on whether the name resolves to
/// a non-static member function or a static function).
/// \return returns 'true' if failed, 'false' if success.
bool CheckCXXThisCapture(SourceLocation Loc, bool Explicit = false,
bool BuildAndDiagnose = true,
const unsigned *const FunctionScopeIndexToStopAt = nullptr,
bool ByCopy = false);
/// Determine whether the given type is the type of *this that is used
/// outside of the body of a member function for a type that is currently
/// being defined.
bool isThisOutsideMemberFunctionBody(QualType BaseType);
/// ActOnCXXBoolLiteral - Parse {true,false} literals.
ExprResult ActOnCXXBoolLiteral(SourceLocation OpLoc, tok::TokenKind Kind);
/// ActOnObjCBoolLiteral - Parse {__objc_yes,__objc_no} literals.
ExprResult ActOnObjCBoolLiteral(SourceLocation OpLoc, tok::TokenKind Kind);
ExprResult
ActOnObjCAvailabilityCheckExpr(llvm::ArrayRef<AvailabilitySpec> AvailSpecs,
SourceLocation AtLoc, SourceLocation RParen);
/// ActOnCXXNullPtrLiteral - Parse 'nullptr'.
ExprResult ActOnCXXNullPtrLiteral(SourceLocation Loc);
//// ActOnCXXThrow - Parse throw expressions.
ExprResult ActOnCXXThrow(Scope *S, SourceLocation OpLoc, Expr *expr);
ExprResult BuildCXXThrow(SourceLocation OpLoc, Expr *Ex,
bool IsThrownVarInScope);
bool CheckCXXThrowOperand(SourceLocation ThrowLoc, QualType ThrowTy, Expr *E);
/// ActOnCXXTypeConstructExpr - Parse construction of a specified type.
/// Can be interpreted either as function-style casting ("int(x)")
/// or class type construction ("ClassType(x,y,z)")
/// or creation of a value-initialized type ("int()").
ExprResult ActOnCXXTypeConstructExpr(ParsedType TypeRep,
SourceLocation LParenOrBraceLoc,
MultiExprArg Exprs,
SourceLocation RParenOrBraceLoc,
bool ListInitialization);
ExprResult BuildCXXTypeConstructExpr(TypeSourceInfo *Type,
SourceLocation LParenLoc,
MultiExprArg Exprs,
SourceLocation RParenLoc,
bool ListInitialization);
/// ActOnCXXNew - Parsed a C++ 'new' expression.
ExprResult ActOnCXXNew(SourceLocation StartLoc, bool UseGlobal,
SourceLocation PlacementLParen,
MultiExprArg PlacementArgs,
SourceLocation PlacementRParen,
SourceRange TypeIdParens, Declarator &D,
Expr *Initializer);
ExprResult BuildCXXNew(SourceRange Range, bool UseGlobal,
SourceLocation PlacementLParen,
MultiExprArg PlacementArgs,
SourceLocation PlacementRParen,
SourceRange TypeIdParens,
QualType AllocType,
TypeSourceInfo *AllocTypeInfo,
Optional<Expr *> ArraySize,
SourceRange DirectInitRange,
Expr *Initializer);
/// Determine whether \p FD is an aligned allocation or deallocation
/// function that is unavailable.
bool isUnavailableAlignedAllocationFunction(const FunctionDecl &FD) const;
/// Produce diagnostics if \p FD is an aligned allocation or deallocation
/// function that is unavailable.
void diagnoseUnavailableAlignedAllocation(const FunctionDecl &FD,
SourceLocation Loc);
bool CheckAllocatedType(QualType AllocType, SourceLocation Loc,
SourceRange R);
/// The scope in which to find allocation functions.
enum AllocationFunctionScope {
/// Only look for allocation functions in the global scope.
AFS_Global,
/// Only look for allocation functions in the scope of the
/// allocated class.
AFS_Class,
/// Look for allocation functions in both the global scope
/// and in the scope of the allocated class.
AFS_Both
};
/// Finds the overloads of operator new and delete that are appropriate
/// for the allocation.
bool FindAllocationFunctions(SourceLocation StartLoc, SourceRange Range,
AllocationFunctionScope NewScope,
AllocationFunctionScope DeleteScope,
QualType AllocType, bool IsArray,
bool &PassAlignment, MultiExprArg PlaceArgs,
FunctionDecl *&OperatorNew,
FunctionDecl *&OperatorDelete,
bool Diagnose = true);
void DeclareGlobalNewDelete();
void DeclareGlobalAllocationFunction(DeclarationName Name, QualType Return,
ArrayRef<QualType> Params);
bool FindDeallocationFunction(SourceLocation StartLoc, CXXRecordDecl *RD,
DeclarationName Name, FunctionDecl* &Operator,
bool Diagnose = true);
FunctionDecl *FindUsualDeallocationFunction(SourceLocation StartLoc,
bool CanProvideSize,
bool Overaligned,
DeclarationName Name);
FunctionDecl *FindDeallocationFunctionForDestructor(SourceLocation StartLoc,
CXXRecordDecl *RD);
/// ActOnCXXDelete - Parsed a C++ 'delete' expression
ExprResult ActOnCXXDelete(SourceLocation StartLoc,
bool UseGlobal, bool ArrayForm,
Expr *Operand);
void CheckVirtualDtorCall(CXXDestructorDecl *dtor, SourceLocation Loc,
bool IsDelete, bool CallCanBeVirtual,
bool WarnOnNonAbstractTypes,
SourceLocation DtorLoc);
ExprResult ActOnNoexceptExpr(SourceLocation KeyLoc, SourceLocation LParen,
Expr *Operand, SourceLocation RParen);
ExprResult BuildCXXNoexceptExpr(SourceLocation KeyLoc, Expr *Operand,
SourceLocation RParen);
/// Parsed one of the type trait support pseudo-functions.
ExprResult ActOnTypeTrait(TypeTrait Kind, SourceLocation KWLoc,
ArrayRef<ParsedType> Args,
SourceLocation RParenLoc);
ExprResult BuildTypeTrait(TypeTrait Kind, SourceLocation KWLoc,
ArrayRef<TypeSourceInfo *> Args,
SourceLocation RParenLoc);
/// ActOnArrayTypeTrait - Parsed one of the binary type trait support
/// pseudo-functions.
ExprResult ActOnArrayTypeTrait(ArrayTypeTrait ATT,
SourceLocation KWLoc,
ParsedType LhsTy,
Expr *DimExpr,
SourceLocation RParen);
ExprResult BuildArrayTypeTrait(ArrayTypeTrait ATT,
SourceLocation KWLoc,
TypeSourceInfo *TSInfo,
Expr *DimExpr,
SourceLocation RParen);
/// ActOnExpressionTrait - Parsed one of the unary type trait support
/// pseudo-functions.
ExprResult ActOnExpressionTrait(ExpressionTrait OET,
SourceLocation KWLoc,
Expr *Queried,
SourceLocation RParen);
ExprResult BuildExpressionTrait(ExpressionTrait OET,
SourceLocation KWLoc,
Expr *Queried,
SourceLocation RParen);
ExprResult ActOnStartCXXMemberReference(Scope *S,
Expr *Base,
SourceLocation OpLoc,
tok::TokenKind OpKind,
ParsedType &ObjectType,
bool &MayBePseudoDestructor);
ExprResult BuildPseudoDestructorExpr(Expr *Base,
SourceLocation OpLoc,
tok::TokenKind OpKind,
const CXXScopeSpec &SS,
TypeSourceInfo *ScopeType,
SourceLocation CCLoc,
SourceLocation TildeLoc,
PseudoDestructorTypeStorage DestroyedType);
ExprResult ActOnPseudoDestructorExpr(Scope *S, Expr *Base,
SourceLocation OpLoc,
tok::TokenKind OpKind,
CXXScopeSpec &SS,
UnqualifiedId &FirstTypeName,
SourceLocation CCLoc,
SourceLocation TildeLoc,
UnqualifiedId &SecondTypeName);
ExprResult ActOnPseudoDestructorExpr(Scope *S, Expr *Base,
SourceLocation OpLoc,
tok::TokenKind OpKind,
SourceLocation TildeLoc,
const DeclSpec& DS);
/// MaybeCreateExprWithCleanups - If the current full-expression
/// requires any cleanups, surround it with a ExprWithCleanups node.
/// Otherwise, just returns the passed-in expression.
Expr *MaybeCreateExprWithCleanups(Expr *SubExpr);
Stmt *MaybeCreateStmtWithCleanups(Stmt *SubStmt);
ExprResult MaybeCreateExprWithCleanups(ExprResult SubExpr);
MaterializeTemporaryExpr *
CreateMaterializeTemporaryExpr(QualType T, Expr *Temporary,
bool BoundToLvalueReference);
ExprResult ActOnFinishFullExpr(Expr *Expr, bool DiscardedValue) {
return ActOnFinishFullExpr(
Expr, Expr ? Expr->getExprLoc() : SourceLocation(), DiscardedValue);
}
ExprResult ActOnFinishFullExpr(Expr *Expr, SourceLocation CC,
bool DiscardedValue, bool IsConstexpr = false);
StmtResult ActOnFinishFullStmt(Stmt *Stmt);
// Marks SS invalid if it represents an incomplete type.
bool RequireCompleteDeclContext(CXXScopeSpec &SS, DeclContext *DC);
DeclContext *computeDeclContext(QualType T);
DeclContext *computeDeclContext(const CXXScopeSpec &SS,
bool EnteringContext = false);
bool isDependentScopeSpecifier(const CXXScopeSpec &SS);
CXXRecordDecl *getCurrentInstantiationOf(NestedNameSpecifier *NNS);
/// The parser has parsed a global nested-name-specifier '::'.
///
/// \param CCLoc The location of the '::'.
///
/// \param SS The nested-name-specifier, which will be updated in-place
/// to reflect the parsed nested-name-specifier.
///
/// \returns true if an error occurred, false otherwise.
bool ActOnCXXGlobalScopeSpecifier(SourceLocation CCLoc, CXXScopeSpec &SS);
/// The parser has parsed a '__super' nested-name-specifier.
///
/// \param SuperLoc The location of the '__super' keyword.
///
/// \param ColonColonLoc The location of the '::'.
///
/// \param SS The nested-name-specifier, which will be updated in-place
/// to reflect the parsed nested-name-specifier.
///
/// \returns true if an error occurred, false otherwise.
bool ActOnSuperScopeSpecifier(SourceLocation SuperLoc,
SourceLocation ColonColonLoc, CXXScopeSpec &SS);
bool isAcceptableNestedNameSpecifier(const NamedDecl *SD,
bool *CanCorrect = nullptr);
NamedDecl *FindFirstQualifierInScope(Scope *S, NestedNameSpecifier *NNS);
/// Keeps information about an identifier in a nested-name-spec.
///
struct NestedNameSpecInfo {
/// The type of the object, if we're parsing nested-name-specifier in
/// a member access expression.
ParsedType ObjectType;
/// The identifier preceding the '::'.
IdentifierInfo *Identifier;
/// The location of the identifier.
SourceLocation IdentifierLoc;
/// The location of the '::'.
SourceLocation CCLoc;
/// Creates info object for the most typical case.
NestedNameSpecInfo(IdentifierInfo *II, SourceLocation IdLoc,
SourceLocation ColonColonLoc, ParsedType ObjectType = ParsedType())
: ObjectType(ObjectType), Identifier(II), IdentifierLoc(IdLoc),
CCLoc(ColonColonLoc) {
}
NestedNameSpecInfo(IdentifierInfo *II, SourceLocation IdLoc,
SourceLocation ColonColonLoc, QualType ObjectType)
: ObjectType(ParsedType::make(ObjectType)), Identifier(II),
IdentifierLoc(IdLoc), CCLoc(ColonColonLoc) {
}
};
bool isNonTypeNestedNameSpecifier(Scope *S, CXXScopeSpec &SS,
NestedNameSpecInfo &IdInfo);
bool BuildCXXNestedNameSpecifier(Scope *S,
NestedNameSpecInfo &IdInfo,
bool EnteringContext,
CXXScopeSpec &SS,
NamedDecl *ScopeLookupResult,
bool ErrorRecoveryLookup,
bool *IsCorrectedToColon = nullptr,
bool OnlyNamespace = false);
/// The parser has parsed a nested-name-specifier 'identifier::'.
///
/// \param S The scope in which this nested-name-specifier occurs.
///
/// \param IdInfo Parser information about an identifier in the
/// nested-name-spec.
///
/// \param EnteringContext Whether we're entering the context nominated by
/// this nested-name-specifier.
///
/// \param SS The nested-name-specifier, which is both an input
/// parameter (the nested-name-specifier before this type) and an
/// output parameter (containing the full nested-name-specifier,
/// including this new type).
///
/// \param ErrorRecoveryLookup If true, then this method is called to improve
/// error recovery. In this case do not emit error message.
///
/// \param IsCorrectedToColon If not null, suggestions to replace '::' -> ':'
/// are allowed. The bool value pointed by this parameter is set to 'true'
/// if the identifier is treated as if it was followed by ':', not '::'.
///
/// \param OnlyNamespace If true, only considers namespaces in lookup.
///
/// \returns true if an error occurred, false otherwise.
bool ActOnCXXNestedNameSpecifier(Scope *S,
NestedNameSpecInfo &IdInfo,
bool EnteringContext,
CXXScopeSpec &SS,
bool ErrorRecoveryLookup = false,
bool *IsCorrectedToColon = nullptr,
bool OnlyNamespace = false);
ExprResult ActOnDecltypeExpression(Expr *E);
bool ActOnCXXNestedNameSpecifierDecltype(CXXScopeSpec &SS,
const DeclSpec &DS,
SourceLocation ColonColonLoc);
bool IsInvalidUnlessNestedName(Scope *S, CXXScopeSpec &SS,
NestedNameSpecInfo &IdInfo,
bool EnteringContext);
/// The parser has parsed a nested-name-specifier
/// 'template[opt] template-name < template-args >::'.
///
/// \param S The scope in which this nested-name-specifier occurs.
///
/// \param SS The nested-name-specifier, which is both an input
/// parameter (the nested-name-specifier before this type) and an
/// output parameter (containing the full nested-name-specifier,
/// including this new type).
///
/// \param TemplateKWLoc the location of the 'template' keyword, if any.
/// \param TemplateName the template name.
/// \param TemplateNameLoc The location of the template name.
/// \param LAngleLoc The location of the opening angle bracket ('<').
/// \param TemplateArgs The template arguments.
/// \param RAngleLoc The location of the closing angle bracket ('>').
/// \param CCLoc The location of the '::'.
///
/// \param EnteringContext Whether we're entering the context of the
/// nested-name-specifier.
///
///
/// \returns true if an error occurred, false otherwise.
bool ActOnCXXNestedNameSpecifier(Scope *S,
CXXScopeSpec &SS,
SourceLocation TemplateKWLoc,
TemplateTy TemplateName,
SourceLocation TemplateNameLoc,
SourceLocation LAngleLoc,
ASTTemplateArgsPtr TemplateArgs,
SourceLocation RAngleLoc,
SourceLocation CCLoc,
bool EnteringContext);
/// Given a C++ nested-name-specifier, produce an annotation value
/// that the parser can use later to reconstruct the given
/// nested-name-specifier.
///
/// \param SS A nested-name-specifier.
///
/// \returns A pointer containing all of the information in the
/// nested-name-specifier \p SS.
void *SaveNestedNameSpecifierAnnotation(CXXScopeSpec &SS);
/// Given an annotation pointer for a nested-name-specifier, restore
/// the nested-name-specifier structure.
///
/// \param Annotation The annotation pointer, produced by
/// \c SaveNestedNameSpecifierAnnotation().
///
/// \param AnnotationRange The source range corresponding to the annotation.
///
/// \param SS The nested-name-specifier that will be updated with the contents
/// of the annotation pointer.
void RestoreNestedNameSpecifierAnnotation(void *Annotation,
SourceRange AnnotationRange,
CXXScopeSpec &SS);
bool ShouldEnterDeclaratorScope(Scope *S, const CXXScopeSpec &SS);
/// ActOnCXXEnterDeclaratorScope - Called when a C++ scope specifier (global
/// scope or nested-name-specifier) is parsed, part of a declarator-id.
/// After this method is called, according to [C++ 3.4.3p3], names should be
/// looked up in the declarator-id's scope, until the declarator is parsed and
/// ActOnCXXExitDeclaratorScope is called.
/// The 'SS' should be a non-empty valid CXXScopeSpec.
bool ActOnCXXEnterDeclaratorScope(Scope *S, CXXScopeSpec &SS);
/// ActOnCXXExitDeclaratorScope - Called when a declarator that previously
/// invoked ActOnCXXEnterDeclaratorScope(), is finished. 'SS' is the same
/// CXXScopeSpec that was passed to ActOnCXXEnterDeclaratorScope as well.
/// Used to indicate that names should revert to being looked up in the
/// defining scope.
void ActOnCXXExitDeclaratorScope(Scope *S, const CXXScopeSpec &SS);
/// ActOnCXXEnterDeclInitializer - Invoked when we are about to parse an
/// initializer for the declaration 'Dcl'.
/// After this method is called, according to [C++ 3.4.1p13], if 'Dcl' is a
/// static data member of class X, names should be looked up in the scope of
/// class X.
void ActOnCXXEnterDeclInitializer(Scope *S, Decl *Dcl);
/// ActOnCXXExitDeclInitializer - Invoked after we are finished parsing an
/// initializer for the declaration 'Dcl'.
void ActOnCXXExitDeclInitializer(Scope *S, Decl *Dcl);
/// Create a new lambda closure type.
CXXRecordDecl *createLambdaClosureType(SourceRange IntroducerRange,
TypeSourceInfo *Info,
bool KnownDependent,
LambdaCaptureDefault CaptureDefault);
/// Start the definition of a lambda expression.
CXXMethodDecl *startLambdaDefinition(CXXRecordDecl *Class,
SourceRange IntroducerRange,
TypeSourceInfo *MethodType,
SourceLocation EndLoc,
ArrayRef<ParmVarDecl *> Params,
ConstexprSpecKind ConstexprKind,
Expr *TrailingRequiresClause);
/// Number lambda for linkage purposes if necessary.
void handleLambdaNumbering(
CXXRecordDecl *Class, CXXMethodDecl *Method,
Optional<std::tuple<unsigned, bool, Decl *>> Mangling = None);
/// Endow the lambda scope info with the relevant properties.
void buildLambdaScope(sema::LambdaScopeInfo *LSI,
CXXMethodDecl *CallOperator,
SourceRange IntroducerRange,
LambdaCaptureDefault CaptureDefault,
SourceLocation CaptureDefaultLoc,
bool ExplicitParams,
bool ExplicitResultType,
bool Mutable);
/// Perform initialization analysis of the init-capture and perform
/// any implicit conversions such as an lvalue-to-rvalue conversion if
/// not being used to initialize a reference.
ParsedType actOnLambdaInitCaptureInitialization(
SourceLocation Loc, bool ByRef, SourceLocation EllipsisLoc,
IdentifierInfo *Id, LambdaCaptureInitKind InitKind, Expr *&Init) {
return ParsedType::make(buildLambdaInitCaptureInitialization(
Loc, ByRef, EllipsisLoc, None, Id,
InitKind != LambdaCaptureInitKind::CopyInit, Init));
}
QualType buildLambdaInitCaptureInitialization(
SourceLocation Loc, bool ByRef, SourceLocation EllipsisLoc,
Optional<unsigned> NumExpansions, IdentifierInfo *Id, bool DirectInit,
Expr *&Init);
/// Create a dummy variable within the declcontext of the lambda's
/// call operator, for name lookup purposes for a lambda init capture.
///
/// CodeGen handles emission of lambda captures, ignoring these dummy
/// variables appropriately.
VarDecl *createLambdaInitCaptureVarDecl(SourceLocation Loc,
QualType InitCaptureType,
SourceLocation EllipsisLoc,
IdentifierInfo *Id,
unsigned InitStyle, Expr *Init);
/// Add an init-capture to a lambda scope.
void addInitCapture(sema::LambdaScopeInfo *LSI, VarDecl *Var);
/// Note that we have finished the explicit captures for the
/// given lambda.
void finishLambdaExplicitCaptures(sema::LambdaScopeInfo *LSI);
/// \brief This is called after parsing the explicit template parameter list
/// on a lambda (if it exists) in C++2a.
void ActOnLambdaExplicitTemplateParameterList(SourceLocation LAngleLoc,
ArrayRef<NamedDecl *> TParams,
SourceLocation RAngleLoc);
/// Introduce the lambda parameters into scope.
void addLambdaParameters(
ArrayRef<LambdaIntroducer::LambdaCapture> Captures,
CXXMethodDecl *CallOperator, Scope *CurScope);
/// Deduce a block or lambda's return type based on the return
/// statements present in the body.
void deduceClosureReturnType(sema::CapturingScopeInfo &CSI);
/// ActOnStartOfLambdaDefinition - This is called just before we start
/// parsing the body of a lambda; it analyzes the explicit captures and
/// arguments, and sets up various data-structures for the body of the
/// lambda.
void ActOnStartOfLambdaDefinition(LambdaIntroducer &Intro,
Declarator &ParamInfo, Scope *CurScope);
/// ActOnLambdaError - If there is an error parsing a lambda, this callback
/// is invoked to pop the information about the lambda.
void ActOnLambdaError(SourceLocation StartLoc, Scope *CurScope,
bool IsInstantiation = false);
/// ActOnLambdaExpr - This is called when the body of a lambda expression
/// was successfully completed.
ExprResult ActOnLambdaExpr(SourceLocation StartLoc, Stmt *Body,
Scope *CurScope);
/// Does copying/destroying the captured variable have side effects?
bool CaptureHasSideEffects(const sema::Capture &From);
/// Diagnose if an explicit lambda capture is unused. Returns true if a
/// diagnostic is emitted.
bool DiagnoseUnusedLambdaCapture(SourceRange CaptureRange,
const sema::Capture &From);
/// Build a FieldDecl suitable to hold the given capture.
FieldDecl *BuildCaptureField(RecordDecl *RD, const sema::Capture &Capture);
/// Initialize the given capture with a suitable expression.
ExprResult BuildCaptureInit(const sema::Capture &Capture,
SourceLocation ImplicitCaptureLoc,
bool IsOpenMPMapping = false);
/// Complete a lambda-expression having processed and attached the
/// lambda body.
ExprResult BuildLambdaExpr(SourceLocation StartLoc, SourceLocation EndLoc,
sema::LambdaScopeInfo *LSI);
/// Get the return type to use for a lambda's conversion function(s) to
/// function pointer type, given the type of the call operator.
QualType
getLambdaConversionFunctionResultType(const FunctionProtoType *CallOpType);
/// Define the "body" of the conversion from a lambda object to a
/// function pointer.
///
/// This routine doesn't actually define a sensible body; rather, it fills
/// in the initialization expression needed to copy the lambda object into
/// the block, and IR generation actually generates the real body of the
/// block pointer conversion.
void DefineImplicitLambdaToFunctionPointerConversion(
SourceLocation CurrentLoc, CXXConversionDecl *Conv);
/// Define the "body" of the conversion from a lambda object to a
/// block pointer.
///
/// This routine doesn't actually define a sensible body; rather, it fills
/// in the initialization expression needed to copy the lambda object into
/// the block, and IR generation actually generates the real body of the
/// block pointer conversion.
void DefineImplicitLambdaToBlockPointerConversion(SourceLocation CurrentLoc,
CXXConversionDecl *Conv);
ExprResult BuildBlockForLambdaConversion(SourceLocation CurrentLocation,
SourceLocation ConvLocation,
CXXConversionDecl *Conv,
Expr *Src);
/// Check whether the given expression is a valid constraint expression.
/// A diagnostic is emitted if it is not, false is returned, and
/// PossibleNonPrimary will be set to true if the failure might be due to a
/// non-primary expression being used as an atomic constraint.
bool CheckConstraintExpression(Expr *CE, Token NextToken = Token(),
bool *PossibleNonPrimary = nullptr,
bool IsTrailingRequiresClause = false);
/// Check whether the given type-dependent expression will be the name of a
/// function or another callable function-like entity (e.g. a function
// template or overload set) for any substitution.
bool IsDependentFunctionNameExpr(Expr *E);
private:
/// Caches pairs of template-like decls whose associated constraints were
/// checked for subsumption and whether or not the first's constraints did in
/// fact subsume the second's.
llvm::DenseMap<std::pair<NamedDecl *, NamedDecl *>, bool> SubsumptionCache;
/// Caches the normalized associated constraints of declarations (concepts or
/// constrained declarations). If an error occurred while normalizing the
/// associated constraints of the template or concept, nullptr will be cached
/// here.
llvm::DenseMap<NamedDecl *, NormalizedConstraint *>
NormalizationCache;
llvm::ContextualFoldingSet<ConstraintSatisfaction, const ASTContext &>
SatisfactionCache;
public:
const NormalizedConstraint *
getNormalizedAssociatedConstraints(
NamedDecl *ConstrainedDecl, ArrayRef<const Expr *> AssociatedConstraints);
/// \brief Check whether the given declaration's associated constraints are
/// at least as constrained than another declaration's according to the
/// partial ordering of constraints.
///
/// \param Result If no error occurred, receives the result of true if D1 is
/// at least constrained than D2, and false otherwise.
///
/// \returns true if an error occurred, false otherwise.
bool IsAtLeastAsConstrained(NamedDecl *D1, ArrayRef<const Expr *> AC1,
NamedDecl *D2, ArrayRef<const Expr *> AC2,
bool &Result);
/// If D1 was not at least as constrained as D2, but would've been if a pair
/// of atomic constraints involved had been declared in a concept and not
/// repeated in two separate places in code.
/// \returns true if such a diagnostic was emitted, false otherwise.
bool MaybeEmitAmbiguousAtomicConstraintsDiagnostic(NamedDecl *D1,
ArrayRef<const Expr *> AC1, NamedDecl *D2, ArrayRef<const Expr *> AC2);
/// \brief Check whether the given list of constraint expressions are
/// satisfied (as if in a 'conjunction') given template arguments.
/// \param Template the template-like entity that triggered the constraints
/// check (either a concept or a constrained entity).
/// \param ConstraintExprs a list of constraint expressions, treated as if
/// they were 'AND'ed together.
/// \param TemplateArgs the list of template arguments to substitute into the
/// constraint expression.
/// \param TemplateIDRange The source range of the template id that
/// caused the constraints check.
/// \param Satisfaction if true is returned, will contain details of the
/// satisfaction, with enough information to diagnose an unsatisfied
/// expression.
/// \returns true if an error occurred and satisfaction could not be checked,
/// false otherwise.
bool CheckConstraintSatisfaction(
const NamedDecl *Template, ArrayRef<const Expr *> ConstraintExprs,
ArrayRef<TemplateArgument> TemplateArgs,
SourceRange TemplateIDRange, ConstraintSatisfaction &Satisfaction);
/// \brief Check whether the given non-dependent constraint expression is
/// satisfied. Returns false and updates Satisfaction with the satisfaction
/// verdict if successful, emits a diagnostic and returns true if an error
/// occured and satisfaction could not be determined.
///
/// \returns true if an error occurred, false otherwise.
bool CheckConstraintSatisfaction(const Expr *ConstraintExpr,
ConstraintSatisfaction &Satisfaction);
/// Check whether the given function decl's trailing requires clause is
/// satisfied, if any. Returns false and updates Satisfaction with the
/// satisfaction verdict if successful, emits a diagnostic and returns true if
/// an error occured and satisfaction could not be determined.
///
/// \returns true if an error occurred, false otherwise.
bool CheckFunctionConstraints(const FunctionDecl *FD,
ConstraintSatisfaction &Satisfaction,
SourceLocation UsageLoc = SourceLocation());
/// \brief Ensure that the given template arguments satisfy the constraints
/// associated with the given template, emitting a diagnostic if they do not.
///
/// \param Template The template to which the template arguments are being
/// provided.
///
/// \param TemplateArgs The converted, canonicalized template arguments.
///
/// \param TemplateIDRange The source range of the template id that
/// caused the constraints check.
///
/// \returns true if the constrains are not satisfied or could not be checked
/// for satisfaction, false if the constraints are satisfied.
bool EnsureTemplateArgumentListConstraints(TemplateDecl *Template,
ArrayRef<TemplateArgument> TemplateArgs,
SourceRange TemplateIDRange);
/// \brief Emit diagnostics explaining why a constraint expression was deemed
/// unsatisfied.
/// \param First whether this is the first time an unsatisfied constraint is
/// diagnosed for this error.
void
DiagnoseUnsatisfiedConstraint(const ConstraintSatisfaction &Satisfaction,
bool First = true);
/// \brief Emit diagnostics explaining why a constraint expression was deemed
/// unsatisfied.
void
DiagnoseUnsatisfiedConstraint(const ASTConstraintSatisfaction &Satisfaction,
bool First = true);
/// \brief Emit diagnostics explaining why a constraint expression was deemed
/// unsatisfied because it was ill-formed.
void DiagnoseUnsatisfiedIllFormedConstraint(SourceLocation DiagnosticLocation,
StringRef Diagnostic);
void DiagnoseRedeclarationConstraintMismatch(SourceLocation Old,
SourceLocation New);
// ParseObjCStringLiteral - Parse Objective-C string literals.
ExprResult ParseObjCStringLiteral(SourceLocation *AtLocs,
ArrayRef<Expr *> Strings);
ExprResult BuildObjCStringLiteral(SourceLocation AtLoc, StringLiteral *S);
/// BuildObjCNumericLiteral - builds an ObjCBoxedExpr AST node for the
/// numeric literal expression. Type of the expression will be "NSNumber *"
/// or "id" if NSNumber is unavailable.
ExprResult BuildObjCNumericLiteral(SourceLocation AtLoc, Expr *Number);
ExprResult ActOnObjCBoolLiteral(SourceLocation AtLoc, SourceLocation ValueLoc,
bool Value);
ExprResult BuildObjCArrayLiteral(SourceRange SR, MultiExprArg Elements);
/// BuildObjCBoxedExpr - builds an ObjCBoxedExpr AST node for the
/// '@' prefixed parenthesized expression. The type of the expression will
/// either be "NSNumber *", "NSString *" or "NSValue *" depending on the type
/// of ValueType, which is allowed to be a built-in numeric type, "char *",
/// "const char *" or C structure with attribute 'objc_boxable'.
ExprResult BuildObjCBoxedExpr(SourceRange SR, Expr *ValueExpr);
ExprResult BuildObjCSubscriptExpression(SourceLocation RB, Expr *BaseExpr,
Expr *IndexExpr,
ObjCMethodDecl *getterMethod,
ObjCMethodDecl *setterMethod);
ExprResult BuildObjCDictionaryLiteral(SourceRange SR,
MutableArrayRef<ObjCDictionaryElement> Elements);
ExprResult BuildObjCEncodeExpression(SourceLocation AtLoc,
TypeSourceInfo *EncodedTypeInfo,
SourceLocation RParenLoc);
ExprResult BuildCXXMemberCallExpr(Expr *Exp, NamedDecl *FoundDecl,
CXXConversionDecl *Method,
bool HadMultipleCandidates);
ExprResult ParseObjCEncodeExpression(SourceLocation AtLoc,
SourceLocation EncodeLoc,
SourceLocation LParenLoc,
ParsedType Ty,
SourceLocation RParenLoc);
/// ParseObjCSelectorExpression - Build selector expression for \@selector
ExprResult ParseObjCSelectorExpression(Selector Sel,
SourceLocation AtLoc,
SourceLocation SelLoc,
SourceLocation LParenLoc,
SourceLocation RParenLoc,
bool WarnMultipleSelectors);
/// ParseObjCProtocolExpression - Build protocol expression for \@protocol
ExprResult ParseObjCProtocolExpression(IdentifierInfo * ProtocolName,
SourceLocation AtLoc,
SourceLocation ProtoLoc,
SourceLocation LParenLoc,
SourceLocation ProtoIdLoc,
SourceLocation RParenLoc);
//===--------------------------------------------------------------------===//
// C++ Declarations
//
Decl *ActOnStartLinkageSpecification(Scope *S,
SourceLocation ExternLoc,
Expr *LangStr,
SourceLocation LBraceLoc);
Decl *ActOnFinishLinkageSpecification(Scope *S,
Decl *LinkageSpec,
SourceLocation RBraceLoc);
//===--------------------------------------------------------------------===//
// C++ Classes
//
CXXRecordDecl *getCurrentClass(Scope *S, const CXXScopeSpec *SS);
bool isCurrentClassName(const IdentifierInfo &II, Scope *S,
const CXXScopeSpec *SS = nullptr);
bool isCurrentClassNameTypo(IdentifierInfo *&II, const CXXScopeSpec *SS);
bool ActOnAccessSpecifier(AccessSpecifier Access, SourceLocation ASLoc,
SourceLocation ColonLoc,
const ParsedAttributesView &Attrs);
NamedDecl *ActOnCXXMemberDeclarator(Scope *S, AccessSpecifier AS,
Declarator &D,
MultiTemplateParamsArg TemplateParameterLists,
Expr *BitfieldWidth, const VirtSpecifiers &VS,
InClassInitStyle InitStyle);
void ActOnStartCXXInClassMemberInitializer();
void ActOnFinishCXXInClassMemberInitializer(Decl *VarDecl,
SourceLocation EqualLoc,
Expr *Init);
MemInitResult ActOnMemInitializer(Decl *ConstructorD,
Scope *S,
CXXScopeSpec &SS,
IdentifierInfo *MemberOrBase,
ParsedType TemplateTypeTy,
const DeclSpec &DS,
SourceLocation IdLoc,
SourceLocation LParenLoc,
ArrayRef<Expr *> Args,
SourceLocation RParenLoc,
SourceLocation EllipsisLoc);
MemInitResult ActOnMemInitializer(Decl *ConstructorD,
Scope *S,
CXXScopeSpec &SS,
IdentifierInfo *MemberOrBase,
ParsedType TemplateTypeTy,
const DeclSpec &DS,
SourceLocation IdLoc,
Expr *InitList,
SourceLocation EllipsisLoc);
MemInitResult BuildMemInitializer(Decl *ConstructorD,
Scope *S,
CXXScopeSpec &SS,
IdentifierInfo *MemberOrBase,
ParsedType TemplateTypeTy,
const DeclSpec &DS,
SourceLocation IdLoc,
Expr *Init,
SourceLocation EllipsisLoc);
MemInitResult BuildMemberInitializer(ValueDecl *Member,
Expr *Init,
SourceLocation IdLoc);
MemInitResult BuildBaseInitializer(QualType BaseType,
TypeSourceInfo *BaseTInfo,
Expr *Init,
CXXRecordDecl *ClassDecl,
SourceLocation EllipsisLoc);
MemInitResult BuildDelegatingInitializer(TypeSourceInfo *TInfo,
Expr *Init,
CXXRecordDecl *ClassDecl);
bool SetDelegatingInitializer(CXXConstructorDecl *Constructor,
CXXCtorInitializer *Initializer);
bool SetCtorInitializers(CXXConstructorDecl *Constructor, bool AnyErrors,
ArrayRef<CXXCtorInitializer *> Initializers = None);
void SetIvarInitializers(ObjCImplementationDecl *ObjCImplementation);
/// MarkBaseAndMemberDestructorsReferenced - Given a record decl,
/// mark all the non-trivial destructors of its members and bases as
/// referenced.
void MarkBaseAndMemberDestructorsReferenced(SourceLocation Loc,
CXXRecordDecl *Record);
/// The list of classes whose vtables have been used within
/// this translation unit, and the source locations at which the
/// first use occurred.
typedef std::pair<CXXRecordDecl*, SourceLocation> VTableUse;
/// The list of vtables that are required but have not yet been
/// materialized.
SmallVector<VTableUse, 16> VTableUses;
/// The set of classes whose vtables have been used within
/// this translation unit, and a bit that will be true if the vtable is
/// required to be emitted (otherwise, it should be emitted only if needed
/// by code generation).
llvm::DenseMap<CXXRecordDecl *, bool> VTablesUsed;
/// Load any externally-stored vtable uses.
void LoadExternalVTableUses();
/// Note that the vtable for the given class was used at the
/// given location.
void MarkVTableUsed(SourceLocation Loc, CXXRecordDecl *Class,
bool DefinitionRequired = false);
/// Mark the exception specifications of all virtual member functions
/// in the given class as needed.
void MarkVirtualMemberExceptionSpecsNeeded(SourceLocation Loc,
const CXXRecordDecl *RD);
/// MarkVirtualMembersReferenced - Will mark all members of the given
/// CXXRecordDecl referenced.
void MarkVirtualMembersReferenced(SourceLocation Loc, const CXXRecordDecl *RD,
bool ConstexprOnly = false);
/// Define all of the vtables that have been used in this
/// translation unit and reference any virtual members used by those
/// vtables.
///
/// \returns true if any work was done, false otherwise.
bool DefineUsedVTables();
void AddImplicitlyDeclaredMembersToClass(CXXRecordDecl *ClassDecl);
void ActOnMemInitializers(Decl *ConstructorDecl,
SourceLocation ColonLoc,
ArrayRef<CXXCtorInitializer*> MemInits,
bool AnyErrors);
/// Check class-level dllimport/dllexport attribute. The caller must
/// ensure that referenceDLLExportedClassMethods is called some point later
/// when all outer classes of Class are complete.
void checkClassLevelDLLAttribute(CXXRecordDecl *Class);
void checkClassLevelCodeSegAttribute(CXXRecordDecl *Class);
void referenceDLLExportedClassMethods();
void propagateDLLAttrToBaseClassTemplate(
CXXRecordDecl *Class, Attr *ClassAttr,
ClassTemplateSpecializationDecl *BaseTemplateSpec,
SourceLocation BaseLoc);
/// Add gsl::Pointer attribute to std::container::iterator
/// \param ND The declaration that introduces the name
/// std::container::iterator. \param UnderlyingRecord The record named by ND.
void inferGslPointerAttribute(NamedDecl *ND, CXXRecordDecl *UnderlyingRecord);
/// Add [[gsl::Owner]] and [[gsl::Pointer]] attributes for std:: types.
void inferGslOwnerPointerAttribute(CXXRecordDecl *Record);
/// Add [[gsl::Pointer]] attributes for std:: types.
void inferGslPointerAttribute(TypedefNameDecl *TD);
void CheckCompletedCXXClass(Scope *S, CXXRecordDecl *Record);
/// Check that the C++ class annoated with "trivial_abi" satisfies all the
/// conditions that are needed for the attribute to have an effect.
void checkIllFormedTrivialABIStruct(CXXRecordDecl &RD);
void ActOnFinishCXXMemberSpecification(Scope *S, SourceLocation RLoc,
Decl *TagDecl, SourceLocation LBrac,
SourceLocation RBrac,
const ParsedAttributesView &AttrList);
void ActOnFinishCXXMemberDecls();
void ActOnFinishCXXNonNestedClass();
void ActOnReenterCXXMethodParameter(Scope *S, ParmVarDecl *Param);
unsigned ActOnReenterTemplateScope(Scope *S, Decl *Template);
void ActOnStartDelayedMemberDeclarations(Scope *S, Decl *Record);
void ActOnStartDelayedCXXMethodDeclaration(Scope *S, Decl *Method);
void ActOnDelayedCXXMethodParameter(Scope *S, Decl *Param);
void ActOnFinishDelayedMemberDeclarations(Scope *S, Decl *Record);
void ActOnFinishDelayedCXXMethodDeclaration(Scope *S, Decl *Method);
void ActOnFinishDelayedMemberInitializers(Decl *Record);
void MarkAsLateParsedTemplate(FunctionDecl *FD, Decl *FnD,
CachedTokens &Toks);
void UnmarkAsLateParsedTemplate(FunctionDecl *FD);
bool IsInsideALocalClassWithinATemplateFunction();
Decl *ActOnStaticAssertDeclaration(SourceLocation StaticAssertLoc,
Expr *AssertExpr,
Expr *AssertMessageExpr,
SourceLocation RParenLoc);
Decl *BuildStaticAssertDeclaration(SourceLocation StaticAssertLoc,
Expr *AssertExpr,
StringLiteral *AssertMessageExpr,
SourceLocation RParenLoc,
bool Failed);
FriendDecl *CheckFriendTypeDecl(SourceLocation LocStart,
SourceLocation FriendLoc,
TypeSourceInfo *TSInfo);
Decl *ActOnFriendTypeDecl(Scope *S, const DeclSpec &DS,
MultiTemplateParamsArg TemplateParams);
NamedDecl *ActOnFriendFunctionDecl(Scope *S, Declarator &D,
MultiTemplateParamsArg TemplateParams);
QualType CheckConstructorDeclarator(Declarator &D, QualType R,
StorageClass& SC);
void CheckConstructor(CXXConstructorDecl *Constructor);
QualType CheckDestructorDeclarator(Declarator &D, QualType R,
StorageClass& SC);
bool CheckDestructor(CXXDestructorDecl *Destructor);
void CheckConversionDeclarator(Declarator &D, QualType &R,
StorageClass& SC);
Decl *ActOnConversionDeclarator(CXXConversionDecl *Conversion);
void CheckDeductionGuideDeclarator(Declarator &D, QualType &R,
StorageClass &SC);
void CheckDeductionGuideTemplate(FunctionTemplateDecl *TD);
void CheckExplicitlyDefaultedFunction(Scope *S, FunctionDecl *MD);
bool CheckExplicitlyDefaultedSpecialMember(CXXMethodDecl *MD,
CXXSpecialMember CSM);
void CheckDelayedMemberExceptionSpecs();
bool CheckExplicitlyDefaultedComparison(Scope *S, FunctionDecl *MD,
DefaultedComparisonKind DCK);
void DeclareImplicitEqualityComparison(CXXRecordDecl *RD,
FunctionDecl *Spaceship);
void DefineDefaultedComparison(SourceLocation Loc, FunctionDecl *FD,
DefaultedComparisonKind DCK);
//===--------------------------------------------------------------------===//
// C++ Derived Classes
//
/// ActOnBaseSpecifier - Parsed a base specifier
CXXBaseSpecifier *CheckBaseSpecifier(CXXRecordDecl *Class,
SourceRange SpecifierRange,
bool Virtual, AccessSpecifier Access,
TypeSourceInfo *TInfo,
SourceLocation EllipsisLoc);
BaseResult ActOnBaseSpecifier(Decl *classdecl,
SourceRange SpecifierRange,
ParsedAttributes &Attrs,
bool Virtual, AccessSpecifier Access,
ParsedType basetype,
SourceLocation BaseLoc,
SourceLocation EllipsisLoc);
bool AttachBaseSpecifiers(CXXRecordDecl *Class,
MutableArrayRef<CXXBaseSpecifier *> Bases);
void ActOnBaseSpecifiers(Decl *ClassDecl,
MutableArrayRef<CXXBaseSpecifier *> Bases);
bool IsDerivedFrom(SourceLocation Loc, QualType Derived, QualType Base);
bool IsDerivedFrom(SourceLocation Loc, QualType Derived, QualType Base,
CXXBasePaths &Paths);
// FIXME: I don't like this name.
void BuildBasePathArray(const CXXBasePaths &Paths, CXXCastPath &BasePath);
bool CheckDerivedToBaseConversion(QualType Derived, QualType Base,
SourceLocation Loc, SourceRange Range,
CXXCastPath *BasePath = nullptr,
bool IgnoreAccess = false);
bool CheckDerivedToBaseConversion(QualType Derived, QualType Base,
unsigned InaccessibleBaseID,
unsigned AmbigiousBaseConvID,
SourceLocation Loc, SourceRange Range,
DeclarationName Name,
CXXCastPath *BasePath,
bool IgnoreAccess = false);
std::string getAmbiguousPathsDisplayString(CXXBasePaths &Paths);
bool CheckOverridingFunctionAttributes(const CXXMethodDecl *New,
const CXXMethodDecl *Old);
/// CheckOverridingFunctionReturnType - Checks whether the return types are
/// covariant, according to C++ [class.virtual]p5.
bool CheckOverridingFunctionReturnType(const CXXMethodDecl *New,
const CXXMethodDecl *Old);
/// CheckOverridingFunctionExceptionSpec - Checks whether the exception
/// spec is a subset of base spec.
bool CheckOverridingFunctionExceptionSpec(const CXXMethodDecl *New,
const CXXMethodDecl *Old);
bool CheckPureMethod(CXXMethodDecl *Method, SourceRange InitRange);
/// CheckOverrideControl - Check C++11 override control semantics.
void CheckOverrideControl(NamedDecl *D);
/// DiagnoseAbsenceOfOverrideControl - Diagnose if 'override' keyword was
/// not used in the declaration of an overriding method.
void DiagnoseAbsenceOfOverrideControl(NamedDecl *D);
/// CheckForFunctionMarkedFinal - Checks whether a virtual member function
/// overrides a virtual member function marked 'final', according to
/// C++11 [class.virtual]p4.
bool CheckIfOverriddenFunctionIsMarkedFinal(const CXXMethodDecl *New,
const CXXMethodDecl *Old);
//===--------------------------------------------------------------------===//
// C++ Access Control
//
enum AccessResult {
AR_accessible,
AR_inaccessible,
AR_dependent,
AR_delayed
};
bool SetMemberAccessSpecifier(NamedDecl *MemberDecl,
NamedDecl *PrevMemberDecl,
AccessSpecifier LexicalAS);
AccessResult CheckUnresolvedMemberAccess(UnresolvedMemberExpr *E,
DeclAccessPair FoundDecl);
AccessResult CheckUnresolvedLookupAccess(UnresolvedLookupExpr *E,
DeclAccessPair FoundDecl);
AccessResult CheckAllocationAccess(SourceLocation OperatorLoc,
SourceRange PlacementRange,
CXXRecordDecl *NamingClass,
DeclAccessPair FoundDecl,
bool Diagnose = true);
AccessResult CheckConstructorAccess(SourceLocation Loc,
CXXConstructorDecl *D,
DeclAccessPair FoundDecl,
const InitializedEntity &Entity,
bool IsCopyBindingRefToTemp = false);
AccessResult CheckConstructorAccess(SourceLocation Loc,
CXXConstructorDecl *D,
DeclAccessPair FoundDecl,
const InitializedEntity &Entity,
const PartialDiagnostic &PDiag);
AccessResult CheckDestructorAccess(SourceLocation Loc,
CXXDestructorDecl *Dtor,
const PartialDiagnostic &PDiag,
QualType objectType = QualType());
AccessResult CheckFriendAccess(NamedDecl *D);
AccessResult CheckMemberAccess(SourceLocation UseLoc,
CXXRecordDecl *NamingClass,
DeclAccessPair Found);
AccessResult
CheckStructuredBindingMemberAccess(SourceLocation UseLoc,
CXXRecordDecl *DecomposedClass,
DeclAccessPair Field);
AccessResult CheckMemberOperatorAccess(SourceLocation Loc,
Expr *ObjectExpr,
Expr *ArgExpr,
DeclAccessPair FoundDecl);
AccessResult CheckAddressOfMemberAccess(Expr *OvlExpr,
DeclAccessPair FoundDecl);
AccessResult CheckBaseClassAccess(SourceLocation AccessLoc,
QualType Base, QualType Derived,
const CXXBasePath &Path,
unsigned DiagID,
bool ForceCheck = false,
bool ForceUnprivileged = false);
void CheckLookupAccess(const LookupResult &R);
bool IsSimplyAccessible(NamedDecl *Decl, CXXRecordDecl *NamingClass,
QualType BaseType);
bool isMemberAccessibleForDeletion(CXXRecordDecl *NamingClass,
DeclAccessPair Found, QualType ObjectType,
SourceLocation Loc,
const PartialDiagnostic &Diag);
bool isMemberAccessibleForDeletion(CXXRecordDecl *NamingClass,
DeclAccessPair Found,
QualType ObjectType) {
return isMemberAccessibleForDeletion(NamingClass, Found, ObjectType,
SourceLocation(), PDiag());
}
void HandleDependentAccessCheck(const DependentDiagnostic &DD,
const MultiLevelTemplateArgumentList &TemplateArgs);
void PerformDependentDiagnostics(const DeclContext *Pattern,
const MultiLevelTemplateArgumentList &TemplateArgs);
void HandleDelayedAccessCheck(sema::DelayedDiagnostic &DD, Decl *Ctx);
/// When true, access checking violations are treated as SFINAE
/// failures rather than hard errors.
bool AccessCheckingSFINAE;
enum AbstractDiagSelID {
AbstractNone = -1,
AbstractReturnType,
AbstractParamType,
AbstractVariableType,
AbstractFieldType,
AbstractIvarType,
AbstractSynthesizedIvarType,
AbstractArrayType
};
bool isAbstractType(SourceLocation Loc, QualType T);
bool RequireNonAbstractType(SourceLocation Loc, QualType T,
TypeDiagnoser &Diagnoser);
template <typename... Ts>
bool RequireNonAbstractType(SourceLocation Loc, QualType T, unsigned DiagID,
const Ts &...Args) {
BoundTypeDiagnoser<Ts...> Diagnoser(DiagID, Args...);
return RequireNonAbstractType(Loc, T, Diagnoser);
}
void DiagnoseAbstractType(const CXXRecordDecl *RD);
//===--------------------------------------------------------------------===//
// C++ Overloaded Operators [C++ 13.5]
//
bool CheckOverloadedOperatorDeclaration(FunctionDecl *FnDecl);
bool CheckLiteralOperatorDeclaration(FunctionDecl *FnDecl);
//===--------------------------------------------------------------------===//
// C++ Templates [C++ 14]
//
void FilterAcceptableTemplateNames(LookupResult &R,
bool AllowFunctionTemplates = true,
bool AllowDependent = true);
bool hasAnyAcceptableTemplateNames(LookupResult &R,
bool AllowFunctionTemplates = true,
bool AllowDependent = true,
bool AllowNonTemplateFunctions = false);
/// Try to interpret the lookup result D as a template-name.
///
/// \param D A declaration found by name lookup.
/// \param AllowFunctionTemplates Whether function templates should be
/// considered valid results.
/// \param AllowDependent Whether unresolved using declarations (that might
/// name templates) should be considered valid results.
NamedDecl *getAsTemplateNameDecl(NamedDecl *D,
bool AllowFunctionTemplates = true,
bool AllowDependent = true);
enum class AssumedTemplateKind {
/// This is not assumed to be a template name.
None,
/// This is assumed to be a template name because lookup found nothing.
FoundNothing,
/// This is assumed to be a template name because lookup found one or more
/// functions (but no function templates).
FoundFunctions,
};
bool LookupTemplateName(LookupResult &R, Scope *S, CXXScopeSpec &SS,
QualType ObjectType, bool EnteringContext,
bool &MemberOfUnknownSpecialization,
SourceLocation TemplateKWLoc = SourceLocation(),
AssumedTemplateKind *ATK = nullptr);
TemplateNameKind isTemplateName(Scope *S,
CXXScopeSpec &SS,
bool hasTemplateKeyword,
const UnqualifiedId &Name,
ParsedType ObjectType,
bool EnteringContext,
TemplateTy &Template,
bool &MemberOfUnknownSpecialization);
/// Try to resolve an undeclared template name as a type template.
///
/// Sets II to the identifier corresponding to the template name, and updates
/// Name to a corresponding (typo-corrected) type template name and TNK to
/// the corresponding kind, if possible.
void ActOnUndeclaredTypeTemplateName(Scope *S, TemplateTy &Name,
TemplateNameKind &TNK,
SourceLocation NameLoc,
IdentifierInfo *&II);
bool resolveAssumedTemplateNameAsType(Scope *S, TemplateName &Name,
SourceLocation NameLoc,
bool Diagnose = true);
/// Determine whether a particular identifier might be the name in a C++1z
/// deduction-guide declaration.
bool isDeductionGuideName(Scope *S, const IdentifierInfo &Name,
SourceLocation NameLoc,
ParsedTemplateTy *Template = nullptr);
bool DiagnoseUnknownTemplateName(const IdentifierInfo &II,
SourceLocation IILoc,
Scope *S,
const CXXScopeSpec *SS,
TemplateTy &SuggestedTemplate,
TemplateNameKind &SuggestedKind);
bool DiagnoseUninstantiableTemplate(SourceLocation PointOfInstantiation,
NamedDecl *Instantiation,
bool InstantiatedFromMember,
const NamedDecl *Pattern,
const NamedDecl *PatternDef,
TemplateSpecializationKind TSK,
bool Complain = true);
void DiagnoseTemplateParameterShadow(SourceLocation Loc, Decl *PrevDecl);
TemplateDecl *AdjustDeclIfTemplate(Decl *&Decl);
NamedDecl *ActOnTypeParameter(Scope *S, bool Typename,
SourceLocation EllipsisLoc,
SourceLocation KeyLoc,
IdentifierInfo *ParamName,
SourceLocation ParamNameLoc,
unsigned Depth, unsigned Position,
SourceLocation EqualLoc,
ParsedType DefaultArg, bool HasTypeConstraint);
bool ActOnTypeConstraint(const CXXScopeSpec &SS,
TemplateIdAnnotation *TypeConstraint,
TemplateTypeParmDecl *ConstrainedParameter,
SourceLocation EllipsisLoc);
bool AttachTypeConstraint(NestedNameSpecifierLoc NS,
DeclarationNameInfo NameInfo,
ConceptDecl *NamedConcept,
const TemplateArgumentListInfo *TemplateArgs,
TemplateTypeParmDecl *ConstrainedParameter,
SourceLocation EllipsisLoc);
bool AttachTypeConstraint(AutoTypeLoc TL,
NonTypeTemplateParmDecl *ConstrainedParameter,
SourceLocation EllipsisLoc);
QualType CheckNonTypeTemplateParameterType(TypeSourceInfo *&TSI,
SourceLocation Loc);
QualType CheckNonTypeTemplateParameterType(QualType T, SourceLocation Loc);
NamedDecl *ActOnNonTypeTemplateParameter(Scope *S, Declarator &D,
unsigned Depth,
unsigned Position,
SourceLocation EqualLoc,
Expr *DefaultArg);
NamedDecl *ActOnTemplateTemplateParameter(Scope *S,
SourceLocation TmpLoc,
TemplateParameterList *Params,
SourceLocation EllipsisLoc,
IdentifierInfo *ParamName,
SourceLocation ParamNameLoc,
unsigned Depth,
unsigned Position,
SourceLocation EqualLoc,
ParsedTemplateArgument DefaultArg);
TemplateParameterList *
ActOnTemplateParameterList(unsigned Depth,
SourceLocation ExportLoc,
SourceLocation TemplateLoc,
SourceLocation LAngleLoc,
ArrayRef<NamedDecl *> Params,
SourceLocation RAngleLoc,
Expr *RequiresClause);
/// The context in which we are checking a template parameter list.
enum TemplateParamListContext {
TPC_ClassTemplate,
TPC_VarTemplate,
TPC_FunctionTemplate,
TPC_ClassTemplateMember,
TPC_FriendClassTemplate,
TPC_FriendFunctionTemplate,
TPC_FriendFunctionTemplateDefinition,
TPC_TypeAliasTemplate
};
bool CheckTemplateParameterList(TemplateParameterList *NewParams,
TemplateParameterList *OldParams,
TemplateParamListContext TPC,
SkipBodyInfo *SkipBody = nullptr);
TemplateParameterList *MatchTemplateParametersToScopeSpecifier(
SourceLocation DeclStartLoc, SourceLocation DeclLoc,
const CXXScopeSpec &SS, TemplateIdAnnotation *TemplateId,
ArrayRef<TemplateParameterList *> ParamLists,
bool IsFriend, bool &IsMemberSpecialization, bool &Invalid,
bool SuppressDiagnostic = false);
DeclResult CheckClassTemplate(
Scope *S, unsigned TagSpec, TagUseKind TUK, SourceLocation KWLoc,
CXXScopeSpec &SS, IdentifierInfo *Name, SourceLocation NameLoc,
const ParsedAttributesView &Attr, TemplateParameterList *TemplateParams,
AccessSpecifier AS, SourceLocation ModulePrivateLoc,
SourceLocation FriendLoc, unsigned NumOuterTemplateParamLists,
TemplateParameterList **OuterTemplateParamLists,
SkipBodyInfo *SkipBody = nullptr);
TemplateArgumentLoc getTrivialTemplateArgumentLoc(const TemplateArgument &Arg,
QualType NTTPType,
SourceLocation Loc);
/// Get a template argument mapping the given template parameter to itself,
/// e.g. for X in \c template<int X>, this would return an expression template
/// argument referencing X.
TemplateArgumentLoc getIdentityTemplateArgumentLoc(NamedDecl *Param,
SourceLocation Location);
void translateTemplateArguments(const ASTTemplateArgsPtr &In,
TemplateArgumentListInfo &Out);
ParsedTemplateArgument ActOnTemplateTypeArgument(TypeResult ParsedType);
void NoteAllFoundTemplates(TemplateName Name);
QualType CheckTemplateIdType(TemplateName Template,
SourceLocation TemplateLoc,
TemplateArgumentListInfo &TemplateArgs);
TypeResult
ActOnTemplateIdType(Scope *S, CXXScopeSpec &SS, SourceLocation TemplateKWLoc,
TemplateTy Template, IdentifierInfo *TemplateII,
SourceLocation TemplateIILoc, SourceLocation LAngleLoc,
ASTTemplateArgsPtr TemplateArgs, SourceLocation RAngleLoc,
bool IsCtorOrDtorName = false, bool IsClassName = false);
/// Parsed an elaborated-type-specifier that refers to a template-id,
/// such as \c class T::template apply<U>.
TypeResult ActOnTagTemplateIdType(TagUseKind TUK,
TypeSpecifierType TagSpec,
SourceLocation TagLoc,
CXXScopeSpec &SS,
SourceLocation TemplateKWLoc,
TemplateTy TemplateD,
SourceLocation TemplateLoc,
SourceLocation LAngleLoc,
ASTTemplateArgsPtr TemplateArgsIn,
SourceLocation RAngleLoc);
DeclResult ActOnVarTemplateSpecialization(
Scope *S, Declarator &D, TypeSourceInfo *DI,
SourceLocation TemplateKWLoc, TemplateParameterList *TemplateParams,
StorageClass SC, bool IsPartialSpecialization);
DeclResult CheckVarTemplateId(VarTemplateDecl *Template,
SourceLocation TemplateLoc,
SourceLocation TemplateNameLoc,
const TemplateArgumentListInfo &TemplateArgs);
ExprResult CheckVarTemplateId(const CXXScopeSpec &SS,
const DeclarationNameInfo &NameInfo,
VarTemplateDecl *Template,
SourceLocation TemplateLoc,
const TemplateArgumentListInfo *TemplateArgs);
ExprResult
CheckConceptTemplateId(const CXXScopeSpec &SS,
SourceLocation TemplateKWLoc,
const DeclarationNameInfo &ConceptNameInfo,
NamedDecl *FoundDecl, ConceptDecl *NamedConcept,
const TemplateArgumentListInfo *TemplateArgs);
void diagnoseMissingTemplateArguments(TemplateName Name, SourceLocation Loc);
ExprResult BuildTemplateIdExpr(const CXXScopeSpec &SS,
SourceLocation TemplateKWLoc,
LookupResult &R,
bool RequiresADL,
const TemplateArgumentListInfo *TemplateArgs);
ExprResult BuildQualifiedTemplateIdExpr(CXXScopeSpec &SS,
SourceLocation TemplateKWLoc,
const DeclarationNameInfo &NameInfo,
const TemplateArgumentListInfo *TemplateArgs);
TemplateNameKind ActOnDependentTemplateName(
Scope *S, CXXScopeSpec &SS, SourceLocation TemplateKWLoc,
const UnqualifiedId &Name, ParsedType ObjectType, bool EnteringContext,
TemplateTy &Template, bool AllowInjectedClassName = false);
DeclResult ActOnClassTemplateSpecialization(
Scope *S, unsigned TagSpec, TagUseKind TUK, SourceLocation KWLoc,
SourceLocation ModulePrivateLoc, CXXScopeSpec &SS,
TemplateIdAnnotation &TemplateId, const ParsedAttributesView &Attr,
MultiTemplateParamsArg TemplateParameterLists,
SkipBodyInfo *SkipBody = nullptr);
bool CheckTemplatePartialSpecializationArgs(SourceLocation Loc,
TemplateDecl *PrimaryTemplate,
unsigned NumExplicitArgs,
ArrayRef<TemplateArgument> Args);
void CheckTemplatePartialSpecialization(
ClassTemplatePartialSpecializationDecl *Partial);
void CheckTemplatePartialSpecialization(
VarTemplatePartialSpecializationDecl *Partial);
Decl *ActOnTemplateDeclarator(Scope *S,
MultiTemplateParamsArg TemplateParameterLists,
Declarator &D);
bool
CheckSpecializationInstantiationRedecl(SourceLocation NewLoc,
TemplateSpecializationKind NewTSK,
NamedDecl *PrevDecl,
TemplateSpecializationKind PrevTSK,
SourceLocation PrevPtOfInstantiation,
bool &SuppressNew);
bool CheckDependentFunctionTemplateSpecialization(FunctionDecl *FD,
const TemplateArgumentListInfo &ExplicitTemplateArgs,
LookupResult &Previous);
bool CheckFunctionTemplateSpecialization(
FunctionDecl *FD, TemplateArgumentListInfo *ExplicitTemplateArgs,
LookupResult &Previous, bool QualifiedFriend = false);
bool CheckMemberSpecialization(NamedDecl *Member, LookupResult &Previous);
void CompleteMemberSpecialization(NamedDecl *Member, LookupResult &Previous);
DeclResult ActOnExplicitInstantiation(
Scope *S, SourceLocation ExternLoc, SourceLocation TemplateLoc,
unsigned TagSpec, SourceLocation KWLoc, const CXXScopeSpec &SS,
TemplateTy Template, SourceLocation TemplateNameLoc,
SourceLocation LAngleLoc, ASTTemplateArgsPtr TemplateArgs,
SourceLocation RAngleLoc, const ParsedAttributesView &Attr);
DeclResult ActOnExplicitInstantiation(Scope *S, SourceLocation ExternLoc,
SourceLocation TemplateLoc,
unsigned TagSpec, SourceLocation KWLoc,
CXXScopeSpec &SS, IdentifierInfo *Name,
SourceLocation NameLoc,
const ParsedAttributesView &Attr);
DeclResult ActOnExplicitInstantiation(Scope *S,
SourceLocation ExternLoc,
SourceLocation TemplateLoc,
Declarator &D);
TemplateArgumentLoc
SubstDefaultTemplateArgumentIfAvailable(TemplateDecl *Template,
SourceLocation TemplateLoc,
SourceLocation RAngleLoc,
Decl *Param,
SmallVectorImpl<TemplateArgument>
&Converted,
bool &HasDefaultArg);
/// Specifies the context in which a particular template
/// argument is being checked.
enum CheckTemplateArgumentKind {
/// The template argument was specified in the code or was
/// instantiated with some deduced template arguments.
CTAK_Specified,
/// The template argument was deduced via template argument
/// deduction.
CTAK_Deduced,
/// The template argument was deduced from an array bound
/// via template argument deduction.
CTAK_DeducedFromArrayBound
};
bool CheckTemplateArgument(NamedDecl *Param,
TemplateArgumentLoc &Arg,
NamedDecl *Template,
SourceLocation TemplateLoc,
SourceLocation RAngleLoc,
unsigned ArgumentPackIndex,
SmallVectorImpl<TemplateArgument> &Converted,
CheckTemplateArgumentKind CTAK = CTAK_Specified);
/// Check that the given template arguments can be be provided to
/// the given template, converting the arguments along the way.
///
/// \param Template The template to which the template arguments are being
/// provided.
///
/// \param TemplateLoc The location of the template name in the source.
///
/// \param TemplateArgs The list of template arguments. If the template is
/// a template template parameter, this function may extend the set of
/// template arguments to also include substituted, defaulted template
/// arguments.
///
/// \param PartialTemplateArgs True if the list of template arguments is
/// intentionally partial, e.g., because we're checking just the initial
/// set of template arguments.
///
/// \param Converted Will receive the converted, canonicalized template
/// arguments.
///
/// \param UpdateArgsWithConversions If \c true, update \p TemplateArgs to
/// contain the converted forms of the template arguments as written.
/// Otherwise, \p TemplateArgs will not be modified.
///
/// \param ConstraintsNotSatisfied If provided, and an error occured, will
/// receive true if the cause for the error is the associated constraints of
/// the template not being satisfied by the template arguments.
///
/// \returns true if an error occurred, false otherwise.
bool CheckTemplateArgumentList(TemplateDecl *Template,
SourceLocation TemplateLoc,
TemplateArgumentListInfo &TemplateArgs,
bool PartialTemplateArgs,
SmallVectorImpl<TemplateArgument> &Converted,
bool UpdateArgsWithConversions = true,
bool *ConstraintsNotSatisfied = nullptr);
bool CheckTemplateTypeArgument(TemplateTypeParmDecl *Param,
TemplateArgumentLoc &Arg,
SmallVectorImpl<TemplateArgument> &Converted);
bool CheckTemplateArgument(TemplateTypeParmDecl *Param,
TypeSourceInfo *Arg);
ExprResult CheckTemplateArgument(NonTypeTemplateParmDecl *Param,
QualType InstantiatedParamType, Expr *Arg,
TemplateArgument &Converted,
CheckTemplateArgumentKind CTAK = CTAK_Specified);
bool CheckTemplateTemplateArgument(TemplateTemplateParmDecl *Param,
TemplateParameterList *Params,
TemplateArgumentLoc &Arg);
ExprResult
BuildExpressionFromDeclTemplateArgument(const TemplateArgument &Arg,
QualType ParamType,
SourceLocation Loc);
ExprResult
BuildExpressionFromIntegralTemplateArgument(const TemplateArgument &Arg,
SourceLocation Loc);
/// Enumeration describing how template parameter lists are compared
/// for equality.
enum TemplateParameterListEqualKind {
/// We are matching the template parameter lists of two templates
/// that might be redeclarations.
///
/// \code
/// template<typename T> struct X;
/// template<typename T> struct X;
/// \endcode
TPL_TemplateMatch,
/// We are matching the template parameter lists of two template
/// template parameters as part of matching the template parameter lists
/// of two templates that might be redeclarations.
///
/// \code
/// template<template<int I> class TT> struct X;
/// template<template<int Value> class Other> struct X;
/// \endcode
TPL_TemplateTemplateParmMatch,
/// We are matching the template parameter lists of a template
/// template argument against the template parameter lists of a template
/// template parameter.
///
/// \code
/// template<template<int Value> class Metafun> struct X;
/// template<int Value> struct integer_c;
/// X<integer_c> xic;
/// \endcode
TPL_TemplateTemplateArgumentMatch
};
bool TemplateParameterListsAreEqual(TemplateParameterList *New,
TemplateParameterList *Old,
bool Complain,
TemplateParameterListEqualKind Kind,
SourceLocation TemplateArgLoc
= SourceLocation());
bool CheckTemplateDeclScope(Scope *S, TemplateParameterList *TemplateParams);
/// Called when the parser has parsed a C++ typename
/// specifier, e.g., "typename T::type".
///
/// \param S The scope in which this typename type occurs.
/// \param TypenameLoc the location of the 'typename' keyword
/// \param SS the nested-name-specifier following the typename (e.g., 'T::').
/// \param II the identifier we're retrieving (e.g., 'type' in the example).
/// \param IdLoc the location of the identifier.
TypeResult
ActOnTypenameType(Scope *S, SourceLocation TypenameLoc,
const CXXScopeSpec &SS, const IdentifierInfo &II,
SourceLocation IdLoc);
/// Called when the parser has parsed a C++ typename
/// specifier that ends in a template-id, e.g.,
/// "typename MetaFun::template apply<T1, T2>".
///
/// \param S The scope in which this typename type occurs.
/// \param TypenameLoc the location of the 'typename' keyword
/// \param SS the nested-name-specifier following the typename (e.g., 'T::').
/// \param TemplateLoc the location of the 'template' keyword, if any.
/// \param TemplateName The template name.
/// \param TemplateII The identifier used to name the template.
/// \param TemplateIILoc The location of the template name.
/// \param LAngleLoc The location of the opening angle bracket ('<').
/// \param TemplateArgs The template arguments.
/// \param RAngleLoc The location of the closing angle bracket ('>').
TypeResult
ActOnTypenameType(Scope *S, SourceLocation TypenameLoc,
const CXXScopeSpec &SS,
SourceLocation TemplateLoc,
TemplateTy TemplateName,
IdentifierInfo *TemplateII,
SourceLocation TemplateIILoc,
SourceLocation LAngleLoc,
ASTTemplateArgsPtr TemplateArgs,
SourceLocation RAngleLoc);
QualType CheckTypenameType(ElaboratedTypeKeyword Keyword,
SourceLocation KeywordLoc,
NestedNameSpecifierLoc QualifierLoc,
const IdentifierInfo &II,
SourceLocation IILoc,
TypeSourceInfo **TSI,
bool DeducedTSTContext);
QualType CheckTypenameType(ElaboratedTypeKeyword Keyword,
SourceLocation KeywordLoc,
NestedNameSpecifierLoc QualifierLoc,
const IdentifierInfo &II,
SourceLocation IILoc,
bool DeducedTSTContext = true);
TypeSourceInfo *RebuildTypeInCurrentInstantiation(TypeSourceInfo *T,
SourceLocation Loc,
DeclarationName Name);
bool RebuildNestedNameSpecifierInCurrentInstantiation(CXXScopeSpec &SS);
ExprResult RebuildExprInCurrentInstantiation(Expr *E);
bool RebuildTemplateParamsInCurrentInstantiation(
TemplateParameterList *Params);
std::string
getTemplateArgumentBindingsText(const TemplateParameterList *Params,
const TemplateArgumentList &Args);
std::string
getTemplateArgumentBindingsText(const TemplateParameterList *Params,
const TemplateArgument *Args,
unsigned NumArgs);
//===--------------------------------------------------------------------===//
// C++ Concepts
//===--------------------------------------------------------------------===//
Decl *ActOnConceptDefinition(
Scope *S, MultiTemplateParamsArg TemplateParameterLists,
IdentifierInfo *Name, SourceLocation NameLoc, Expr *ConstraintExpr);
RequiresExprBodyDecl *
ActOnStartRequiresExpr(SourceLocation RequiresKWLoc,
ArrayRef<ParmVarDecl *> LocalParameters,
Scope *BodyScope);
void ActOnFinishRequiresExpr();
concepts::Requirement *ActOnSimpleRequirement(Expr *E);
concepts::Requirement *ActOnTypeRequirement(
SourceLocation TypenameKWLoc, CXXScopeSpec &SS, SourceLocation NameLoc,
IdentifierInfo *TypeName, TemplateIdAnnotation *TemplateId);
concepts::Requirement *ActOnCompoundRequirement(Expr *E,
SourceLocation NoexceptLoc);
concepts::Requirement *
ActOnCompoundRequirement(
Expr *E, SourceLocation NoexceptLoc, CXXScopeSpec &SS,
TemplateIdAnnotation *TypeConstraint, unsigned Depth);
concepts::Requirement *ActOnNestedRequirement(Expr *Constraint);
concepts::ExprRequirement *
BuildExprRequirement(
Expr *E, bool IsSatisfied, SourceLocation NoexceptLoc,
concepts::ExprRequirement::ReturnTypeRequirement ReturnTypeRequirement);
concepts::ExprRequirement *
BuildExprRequirement(
concepts::Requirement::SubstitutionDiagnostic *ExprSubstDiag,
bool IsSatisfied, SourceLocation NoexceptLoc,
concepts::ExprRequirement::ReturnTypeRequirement ReturnTypeRequirement);
concepts::TypeRequirement *BuildTypeRequirement(TypeSourceInfo *Type);
concepts::TypeRequirement *
BuildTypeRequirement(
concepts::Requirement::SubstitutionDiagnostic *SubstDiag);
concepts::NestedRequirement *BuildNestedRequirement(Expr *E);
concepts::NestedRequirement *
BuildNestedRequirement(
concepts::Requirement::SubstitutionDiagnostic *SubstDiag);
ExprResult ActOnRequiresExpr(SourceLocation RequiresKWLoc,
RequiresExprBodyDecl *Body,
ArrayRef<ParmVarDecl *> LocalParameters,
ArrayRef<concepts::Requirement *> Requirements,
SourceLocation ClosingBraceLoc);
//===--------------------------------------------------------------------===//
// C++ Variadic Templates (C++0x [temp.variadic])
//===--------------------------------------------------------------------===//
/// Determine whether an unexpanded parameter pack might be permitted in this
/// location. Useful for error recovery.
bool isUnexpandedParameterPackPermitted();
/// The context in which an unexpanded parameter pack is
/// being diagnosed.
///
/// Note that the values of this enumeration line up with the first
/// argument to the \c err_unexpanded_parameter_pack diagnostic.
enum UnexpandedParameterPackContext {
/// An arbitrary expression.
UPPC_Expression = 0,
/// The base type of a class type.
UPPC_BaseType,
/// The type of an arbitrary declaration.
UPPC_DeclarationType,
/// The type of a data member.
UPPC_DataMemberType,
/// The size of a bit-field.
UPPC_BitFieldWidth,
/// The expression in a static assertion.
UPPC_StaticAssertExpression,
/// The fixed underlying type of an enumeration.
UPPC_FixedUnderlyingType,
/// The enumerator value.
UPPC_EnumeratorValue,
/// A using declaration.
UPPC_UsingDeclaration,
/// A friend declaration.
UPPC_FriendDeclaration,
/// A declaration qualifier.
UPPC_DeclarationQualifier,
/// An initializer.
UPPC_Initializer,
/// A default argument.
UPPC_DefaultArgument,
/// The type of a non-type template parameter.
UPPC_NonTypeTemplateParameterType,
/// The type of an exception.
UPPC_ExceptionType,
/// Partial specialization.
UPPC_PartialSpecialization,
/// Microsoft __if_exists.
UPPC_IfExists,
/// Microsoft __if_not_exists.
UPPC_IfNotExists,
/// Lambda expression.
UPPC_Lambda,
/// Block expression,
UPPC_Block,
/// A type constraint,
UPPC_TypeConstraint
};
/// Diagnose unexpanded parameter packs.
///
/// \param Loc The location at which we should emit the diagnostic.
///
/// \param UPPC The context in which we are diagnosing unexpanded
/// parameter packs.
///
/// \param Unexpanded the set of unexpanded parameter packs.
///
/// \returns true if an error occurred, false otherwise.
bool DiagnoseUnexpandedParameterPacks(SourceLocation Loc,
UnexpandedParameterPackContext UPPC,
ArrayRef<UnexpandedParameterPack> Unexpanded);
/// If the given type contains an unexpanded parameter pack,
/// diagnose the error.
///
/// \param Loc The source location where a diagnostc should be emitted.
///
/// \param T The type that is being checked for unexpanded parameter
/// packs.
///
/// \returns true if an error occurred, false otherwise.
bool DiagnoseUnexpandedParameterPack(SourceLocation Loc, TypeSourceInfo *T,
UnexpandedParameterPackContext UPPC);
/// If the given expression contains an unexpanded parameter
/// pack, diagnose the error.
///
/// \param E The expression that is being checked for unexpanded
/// parameter packs.
///
/// \returns true if an error occurred, false otherwise.
bool DiagnoseUnexpandedParameterPack(Expr *E,
UnexpandedParameterPackContext UPPC = UPPC_Expression);
/// If the given nested-name-specifier contains an unexpanded
/// parameter pack, diagnose the error.
///
/// \param SS The nested-name-specifier that is being checked for
/// unexpanded parameter packs.
///
/// \returns true if an error occurred, false otherwise.
bool DiagnoseUnexpandedParameterPack(const CXXScopeSpec &SS,
UnexpandedParameterPackContext UPPC);
/// If the given name contains an unexpanded parameter pack,
/// diagnose the error.
///
/// \param NameInfo The name (with source location information) that
/// is being checked for unexpanded parameter packs.
///
/// \returns true if an error occurred, false otherwise.
bool DiagnoseUnexpandedParameterPack(const DeclarationNameInfo &NameInfo,
UnexpandedParameterPackContext UPPC);
/// If the given template name contains an unexpanded parameter pack,
/// diagnose the error.
///
/// \param Loc The location of the template name.
///
/// \param Template The template name that is being checked for unexpanded
/// parameter packs.
///
/// \returns true if an error occurred, false otherwise.
bool DiagnoseUnexpandedParameterPack(SourceLocation Loc,
TemplateName Template,
UnexpandedParameterPackContext UPPC);
/// If the given template argument contains an unexpanded parameter
/// pack, diagnose the error.
///
/// \param Arg The template argument that is being checked for unexpanded
/// parameter packs.
///
/// \returns true if an error occurred, false otherwise.
bool DiagnoseUnexpandedParameterPack(TemplateArgumentLoc Arg,
UnexpandedParameterPackContext UPPC);
/// Collect the set of unexpanded parameter packs within the given
/// template argument.
///
/// \param Arg The template argument that will be traversed to find
/// unexpanded parameter packs.
void collectUnexpandedParameterPacks(TemplateArgument Arg,
SmallVectorImpl<UnexpandedParameterPack> &Unexpanded);
/// Collect the set of unexpanded parameter packs within the given
/// template argument.
///
/// \param Arg The template argument that will be traversed to find
/// unexpanded parameter packs.
void collectUnexpandedParameterPacks(TemplateArgumentLoc Arg,
SmallVectorImpl<UnexpandedParameterPack> &Unexpanded);
/// Collect the set of unexpanded parameter packs within the given
/// type.
///
/// \param T The type that will be traversed to find
/// unexpanded parameter packs.
void collectUnexpandedParameterPacks(QualType T,
SmallVectorImpl<UnexpandedParameterPack> &Unexpanded);
/// Collect the set of unexpanded parameter packs within the given
/// type.
///
/// \param TL The type that will be traversed to find
/// unexpanded parameter packs.
void collectUnexpandedParameterPacks(TypeLoc TL,
SmallVectorImpl<UnexpandedParameterPack> &Unexpanded);
/// Collect the set of unexpanded parameter packs within the given
/// nested-name-specifier.
///
/// \param NNS The nested-name-specifier that will be traversed to find
/// unexpanded parameter packs.
void collectUnexpandedParameterPacks(NestedNameSpecifierLoc NNS,
SmallVectorImpl<UnexpandedParameterPack> &Unexpanded);
/// Collect the set of unexpanded parameter packs within the given
/// name.
///
/// \param NameInfo The name that will be traversed to find
/// unexpanded parameter packs.
void collectUnexpandedParameterPacks(const DeclarationNameInfo &NameInfo,
SmallVectorImpl<UnexpandedParameterPack> &Unexpanded);
/// Invoked when parsing a template argument followed by an
/// ellipsis, which creates a pack expansion.
///
/// \param Arg The template argument preceding the ellipsis, which
/// may already be invalid.
///
/// \param EllipsisLoc The location of the ellipsis.
ParsedTemplateArgument ActOnPackExpansion(const ParsedTemplateArgument &Arg,
SourceLocation EllipsisLoc);
/// Invoked when parsing a type followed by an ellipsis, which
/// creates a pack expansion.
///
/// \param Type The type preceding the ellipsis, which will become
/// the pattern of the pack expansion.
///
/// \param EllipsisLoc The location of the ellipsis.
TypeResult ActOnPackExpansion(ParsedType Type, SourceLocation EllipsisLoc);
/// Construct a pack expansion type from the pattern of the pack
/// expansion.
TypeSourceInfo *CheckPackExpansion(TypeSourceInfo *Pattern,
SourceLocation EllipsisLoc,
Optional<unsigned> NumExpansions);
/// Construct a pack expansion type from the pattern of the pack
/// expansion.
QualType CheckPackExpansion(QualType Pattern,
SourceRange PatternRange,
SourceLocation EllipsisLoc,
Optional<unsigned> NumExpansions);
/// Invoked when parsing an expression followed by an ellipsis, which
/// creates a pack expansion.
///
/// \param Pattern The expression preceding the ellipsis, which will become
/// the pattern of the pack expansion.
///
/// \param EllipsisLoc The location of the ellipsis.
ExprResult ActOnPackExpansion(Expr *Pattern, SourceLocation EllipsisLoc);
/// Invoked when parsing an expression followed by an ellipsis, which
/// creates a pack expansion.
///
/// \param Pattern The expression preceding the ellipsis, which will become
/// the pattern of the pack expansion.
///
/// \param EllipsisLoc The location of the ellipsis.
ExprResult CheckPackExpansion(Expr *Pattern, SourceLocation EllipsisLoc,
Optional<unsigned> NumExpansions);
/// Determine whether we could expand a pack expansion with the
/// given set of parameter packs into separate arguments by repeatedly
/// transforming the pattern.
///
/// \param EllipsisLoc The location of the ellipsis that identifies the
/// pack expansion.
///
/// \param PatternRange The source range that covers the entire pattern of
/// the pack expansion.
///
/// \param Unexpanded The set of unexpanded parameter packs within the
/// pattern.
///
/// \param ShouldExpand Will be set to \c true if the transformer should
/// expand the corresponding pack expansions into separate arguments. When
/// set, \c NumExpansions must also be set.
///
/// \param RetainExpansion Whether the caller should add an unexpanded
/// pack expansion after all of the expanded arguments. This is used
/// when extending explicitly-specified template argument packs per
/// C++0x [temp.arg.explicit]p9.
///
/// \param NumExpansions The number of separate arguments that will be in
/// the expanded form of the corresponding pack expansion. This is both an
/// input and an output parameter, which can be set by the caller if the
/// number of expansions is known a priori (e.g., due to a prior substitution)
/// and will be set by the callee when the number of expansions is known.
/// The callee must set this value when \c ShouldExpand is \c true; it may
/// set this value in other cases.
///
/// \returns true if an error occurred (e.g., because the parameter packs
/// are to be instantiated with arguments of different lengths), false
/// otherwise. If false, \c ShouldExpand (and possibly \c NumExpansions)
/// must be set.
bool CheckParameterPacksForExpansion(SourceLocation EllipsisLoc,
SourceRange PatternRange,
ArrayRef<UnexpandedParameterPack> Unexpanded,
const MultiLevelTemplateArgumentList &TemplateArgs,
bool &ShouldExpand,
bool &RetainExpansion,
Optional<unsigned> &NumExpansions);
/// Determine the number of arguments in the given pack expansion
/// type.
///
/// This routine assumes that the number of arguments in the expansion is
/// consistent across all of the unexpanded parameter packs in its pattern.
///
/// Returns an empty Optional if the type can't be expanded.
Optional<unsigned> getNumArgumentsInExpansion(QualType T,
const MultiLevelTemplateArgumentList &TemplateArgs);
/// Determine whether the given declarator contains any unexpanded
/// parameter packs.
///
/// This routine is used by the parser to disambiguate function declarators
/// with an ellipsis prior to the ')', e.g.,
///
/// \code
/// void f(T...);
/// \endcode
///
/// To determine whether we have an (unnamed) function parameter pack or
/// a variadic function.
///
/// \returns true if the declarator contains any unexpanded parameter packs,
/// false otherwise.
bool containsUnexpandedParameterPacks(Declarator &D);
/// Returns the pattern of the pack expansion for a template argument.
///
/// \param OrigLoc The template argument to expand.
///
/// \param Ellipsis Will be set to the location of the ellipsis.
///
/// \param NumExpansions Will be set to the number of expansions that will
/// be generated from this pack expansion, if known a priori.
TemplateArgumentLoc getTemplateArgumentPackExpansionPattern(
TemplateArgumentLoc OrigLoc,
SourceLocation &Ellipsis,
Optional<unsigned> &NumExpansions) const;
/// Given a template argument that contains an unexpanded parameter pack, but
/// which has already been substituted, attempt to determine the number of
/// elements that will be produced once this argument is fully-expanded.
///
/// This is intended for use when transforming 'sizeof...(Arg)' in order to
/// avoid actually expanding the pack where possible.
Optional<unsigned> getFullyPackExpandedSize(TemplateArgument Arg);
//===--------------------------------------------------------------------===//
// C++ Template Argument Deduction (C++ [temp.deduct])
//===--------------------------------------------------------------------===//
/// Adjust the type \p ArgFunctionType to match the calling convention,
/// noreturn, and optionally the exception specification of \p FunctionType.
/// Deduction often wants to ignore these properties when matching function
/// types.
QualType adjustCCAndNoReturn(QualType ArgFunctionType, QualType FunctionType,
bool AdjustExceptionSpec = false);
/// Describes the result of template argument deduction.
///
/// The TemplateDeductionResult enumeration describes the result of
/// template argument deduction, as returned from
/// DeduceTemplateArguments(). The separate TemplateDeductionInfo
/// structure provides additional information about the results of
/// template argument deduction, e.g., the deduced template argument
/// list (if successful) or the specific template parameters or
/// deduced arguments that were involved in the failure.
enum TemplateDeductionResult {
/// Template argument deduction was successful.
TDK_Success = 0,
/// The declaration was invalid; do nothing.
TDK_Invalid,
/// Template argument deduction exceeded the maximum template
/// instantiation depth (which has already been diagnosed).
TDK_InstantiationDepth,
/// Template argument deduction did not deduce a value
/// for every template parameter.
TDK_Incomplete,
/// Template argument deduction did not deduce a value for every
/// expansion of an expanded template parameter pack.
TDK_IncompletePack,
/// Template argument deduction produced inconsistent
/// deduced values for the given template parameter.
TDK_Inconsistent,
/// Template argument deduction failed due to inconsistent
/// cv-qualifiers on a template parameter type that would
/// otherwise be deduced, e.g., we tried to deduce T in "const T"
/// but were given a non-const "X".
TDK_Underqualified,
/// Substitution of the deduced template argument values
/// resulted in an error.
TDK_SubstitutionFailure,
/// After substituting deduced template arguments, a dependent
/// parameter type did not match the corresponding argument.
TDK_DeducedMismatch,
/// After substituting deduced template arguments, an element of
/// a dependent parameter type did not match the corresponding element
/// of the corresponding argument (when deducing from an initializer list).
TDK_DeducedMismatchNested,
/// A non-depnedent component of the parameter did not match the
/// corresponding component of the argument.
TDK_NonDeducedMismatch,
/// When performing template argument deduction for a function
/// template, there were too many call arguments.
TDK_TooManyArguments,
/// When performing template argument deduction for a function
/// template, there were too few call arguments.
TDK_TooFewArguments,
/// The explicitly-specified template arguments were not valid
/// template arguments for the given template.
TDK_InvalidExplicitArguments,
/// Checking non-dependent argument conversions failed.
TDK_NonDependentConversionFailure,
/// The deduced arguments did not satisfy the constraints associated
/// with the template.
TDK_ConstraintsNotSatisfied,
/// Deduction failed; that's all we know.
TDK_MiscellaneousDeductionFailure,
/// CUDA Target attributes do not match.
TDK_CUDATargetMismatch
};
TemplateDeductionResult
DeduceTemplateArguments(ClassTemplatePartialSpecializationDecl *Partial,
const TemplateArgumentList &TemplateArgs,
sema::TemplateDeductionInfo &Info);
TemplateDeductionResult
DeduceTemplateArguments(VarTemplatePartialSpecializationDecl *Partial,
const TemplateArgumentList &TemplateArgs,
sema::TemplateDeductionInfo &Info);
TemplateDeductionResult SubstituteExplicitTemplateArguments(
FunctionTemplateDecl *FunctionTemplate,
TemplateArgumentListInfo &ExplicitTemplateArgs,
SmallVectorImpl<DeducedTemplateArgument> &Deduced,
SmallVectorImpl<QualType> &ParamTypes, QualType *FunctionType,
sema::TemplateDeductionInfo &Info);
/// brief A function argument from which we performed template argument
// deduction for a call.
struct OriginalCallArg {
OriginalCallArg(QualType OriginalParamType, bool DecomposedParam,
unsigned ArgIdx, QualType OriginalArgType)
: OriginalParamType(OriginalParamType),
DecomposedParam(DecomposedParam), ArgIdx(ArgIdx),
OriginalArgType(OriginalArgType) {}
QualType OriginalParamType;
bool DecomposedParam;
unsigned ArgIdx;
QualType OriginalArgType;
};
TemplateDeductionResult FinishTemplateArgumentDeduction(
FunctionTemplateDecl *FunctionTemplate,
SmallVectorImpl<DeducedTemplateArgument> &Deduced,
unsigned NumExplicitlySpecified, FunctionDecl *&Specialization,
sema::TemplateDeductionInfo &Info,
SmallVectorImpl<OriginalCallArg> const *OriginalCallArgs = nullptr,
bool PartialOverloading = false,
llvm::function_ref<bool()> CheckNonDependent = []{ return false; });
TemplateDeductionResult DeduceTemplateArguments(
FunctionTemplateDecl *FunctionTemplate,
TemplateArgumentListInfo *ExplicitTemplateArgs, ArrayRef<Expr *> Args,
FunctionDecl *&Specialization, sema::TemplateDeductionInfo &Info,
bool PartialOverloading,
llvm::function_ref<bool(ArrayRef<QualType>)> CheckNonDependent);
TemplateDeductionResult
DeduceTemplateArguments(FunctionTemplateDecl *FunctionTemplate,
TemplateArgumentListInfo *ExplicitTemplateArgs,
QualType ArgFunctionType,
FunctionDecl *&Specialization,
sema::TemplateDeductionInfo &Info,
bool IsAddressOfFunction = false);
TemplateDeductionResult
DeduceTemplateArguments(FunctionTemplateDecl *FunctionTemplate,
QualType ToType,
CXXConversionDecl *&Specialization,
sema::TemplateDeductionInfo &Info);
TemplateDeductionResult
DeduceTemplateArguments(FunctionTemplateDecl *FunctionTemplate,
TemplateArgumentListInfo *ExplicitTemplateArgs,
FunctionDecl *&Specialization,
sema::TemplateDeductionInfo &Info,
bool IsAddressOfFunction = false);
/// Substitute Replacement for \p auto in \p TypeWithAuto
QualType SubstAutoType(QualType TypeWithAuto, QualType Replacement);
/// Substitute Replacement for auto in TypeWithAuto
TypeSourceInfo* SubstAutoTypeSourceInfo(TypeSourceInfo *TypeWithAuto,
QualType Replacement);
/// Completely replace the \c auto in \p TypeWithAuto by
/// \p Replacement. This does not retain any \c auto type sugar.
QualType ReplaceAutoType(QualType TypeWithAuto, QualType Replacement);
/// Result type of DeduceAutoType.
enum DeduceAutoResult {
DAR_Succeeded,
DAR_Failed,
DAR_FailedAlreadyDiagnosed
};
DeduceAutoResult
DeduceAutoType(TypeSourceInfo *AutoType, Expr *&Initializer, QualType &Result,
Optional<unsigned> DependentDeductionDepth = None,
bool IgnoreConstraints = false);
DeduceAutoResult
DeduceAutoType(TypeLoc AutoTypeLoc, Expr *&Initializer, QualType &Result,
Optional<unsigned> DependentDeductionDepth = None,
bool IgnoreConstraints = false);
void DiagnoseAutoDeductionFailure(VarDecl *VDecl, Expr *Init);
bool DeduceReturnType(FunctionDecl *FD, SourceLocation Loc,
bool Diagnose = true);
/// Declare implicit deduction guides for a class template if we've
/// not already done so.
void DeclareImplicitDeductionGuides(TemplateDecl *Template,
SourceLocation Loc);
QualType DeduceTemplateSpecializationFromInitializer(
TypeSourceInfo *TInfo, const InitializedEntity &Entity,
const InitializationKind &Kind, MultiExprArg Init);
QualType deduceVarTypeFromInitializer(VarDecl *VDecl, DeclarationName Name,
QualType Type, TypeSourceInfo *TSI,
SourceRange Range, bool DirectInit,
Expr *Init);
TypeLoc getReturnTypeLoc(FunctionDecl *FD) const;
bool DeduceFunctionTypeFromReturnExpr(FunctionDecl *FD,
SourceLocation ReturnLoc,
Expr *&RetExpr, AutoType *AT);
FunctionTemplateDecl *getMoreSpecializedTemplate(
FunctionTemplateDecl *FT1, FunctionTemplateDecl *FT2, SourceLocation Loc,
TemplatePartialOrderingContext TPOC, unsigned NumCallArguments1,
unsigned NumCallArguments2, bool Reversed = false);
UnresolvedSetIterator
getMostSpecialized(UnresolvedSetIterator SBegin, UnresolvedSetIterator SEnd,
TemplateSpecCandidateSet &FailedCandidates,
SourceLocation Loc,
const PartialDiagnostic &NoneDiag,
const PartialDiagnostic &AmbigDiag,
const PartialDiagnostic &CandidateDiag,
bool Complain = true, QualType TargetType = QualType());
ClassTemplatePartialSpecializationDecl *
getMoreSpecializedPartialSpecialization(
ClassTemplatePartialSpecializationDecl *PS1,
ClassTemplatePartialSpecializationDecl *PS2,
SourceLocation Loc);
bool isMoreSpecializedThanPrimary(ClassTemplatePartialSpecializationDecl *T,
sema::TemplateDeductionInfo &Info);
VarTemplatePartialSpecializationDecl *getMoreSpecializedPartialSpecialization(
VarTemplatePartialSpecializationDecl *PS1,
VarTemplatePartialSpecializationDecl *PS2, SourceLocation Loc);
bool isMoreSpecializedThanPrimary(VarTemplatePartialSpecializationDecl *T,
sema::TemplateDeductionInfo &Info);
bool isTemplateTemplateParameterAtLeastAsSpecializedAs(
TemplateParameterList *PParam, TemplateDecl *AArg, SourceLocation Loc);
void MarkUsedTemplateParameters(const Expr *E, bool OnlyDeduced,
unsigned Depth, llvm::SmallBitVector &Used);
void MarkUsedTemplateParameters(const TemplateArgumentList &TemplateArgs,
bool OnlyDeduced,
unsigned Depth,
llvm::SmallBitVector &Used);
void MarkDeducedTemplateParameters(
const FunctionTemplateDecl *FunctionTemplate,
llvm::SmallBitVector &Deduced) {
return MarkDeducedTemplateParameters(Context, FunctionTemplate, Deduced);
}
static void MarkDeducedTemplateParameters(ASTContext &Ctx,
const FunctionTemplateDecl *FunctionTemplate,
llvm::SmallBitVector &Deduced);
//===--------------------------------------------------------------------===//
// C++ Template Instantiation
//
MultiLevelTemplateArgumentList
getTemplateInstantiationArgs(NamedDecl *D,
const TemplateArgumentList *Innermost = nullptr,
bool RelativeToPrimary = false,
const FunctionDecl *Pattern = nullptr);
/// A context in which code is being synthesized (where a source location
/// alone is not sufficient to identify the context). This covers template
/// instantiation and various forms of implicitly-generated functions.
struct CodeSynthesisContext {
/// The kind of template instantiation we are performing
enum SynthesisKind {
/// We are instantiating a template declaration. The entity is
/// the declaration we're instantiating (e.g., a CXXRecordDecl).
TemplateInstantiation,
/// We are instantiating a default argument for a template
/// parameter. The Entity is the template parameter whose argument is
/// being instantiated, the Template is the template, and the
/// TemplateArgs/NumTemplateArguments provide the template arguments as
/// specified.
DefaultTemplateArgumentInstantiation,
/// We are instantiating a default argument for a function.
/// The Entity is the ParmVarDecl, and TemplateArgs/NumTemplateArgs
/// provides the template arguments as specified.
DefaultFunctionArgumentInstantiation,
/// We are substituting explicit template arguments provided for
/// a function template. The entity is a FunctionTemplateDecl.
ExplicitTemplateArgumentSubstitution,
/// We are substituting template argument determined as part of
/// template argument deduction for either a class template
/// partial specialization or a function template. The
/// Entity is either a {Class|Var}TemplatePartialSpecializationDecl or
/// a TemplateDecl.
DeducedTemplateArgumentSubstitution,
/// We are substituting prior template arguments into a new
/// template parameter. The template parameter itself is either a
/// NonTypeTemplateParmDecl or a TemplateTemplateParmDecl.
PriorTemplateArgumentSubstitution,
/// We are checking the validity of a default template argument that
/// has been used when naming a template-id.
DefaultTemplateArgumentChecking,
/// We are computing the exception specification for a defaulted special
/// member function.
ExceptionSpecEvaluation,
/// We are instantiating the exception specification for a function
/// template which was deferred until it was needed.
ExceptionSpecInstantiation,
/// We are instantiating a requirement of a requires expression.
RequirementInstantiation,
/// We are checking the satisfaction of a nested requirement of a requires
/// expression.
NestedRequirementConstraintsCheck,
/// We are declaring an implicit special member function.
DeclaringSpecialMember,
/// We are declaring an implicit 'operator==' for a defaulted
/// 'operator<=>'.
DeclaringImplicitEqualityComparison,
/// We are defining a synthesized function (such as a defaulted special
/// member).
DefiningSynthesizedFunction,
// We are checking the constraints associated with a constrained entity or
// the constraint expression of a concept. This includes the checks that
// atomic constraints have the type 'bool' and that they can be constant
// evaluated.
ConstraintsCheck,
// We are substituting template arguments into a constraint expression.
ConstraintSubstitution,
// We are normalizing a constraint expression.
ConstraintNormalization,
// We are substituting into the parameter mapping of an atomic constraint
// during normalization.
ParameterMappingSubstitution,
/// We are rewriting a comparison operator in terms of an operator<=>.
RewritingOperatorAsSpaceship,
/// Added for Template instantiation observation.
/// Memoization means we are _not_ instantiating a template because
/// it is already instantiated (but we entered a context where we
/// would have had to if it was not already instantiated).
Memoization
} Kind;
/// Was the enclosing context a non-instantiation SFINAE context?
bool SavedInNonInstantiationSFINAEContext;
/// The point of instantiation or synthesis within the source code.
SourceLocation PointOfInstantiation;
/// The entity that is being synthesized.
Decl *Entity;
/// The template (or partial specialization) in which we are
/// performing the instantiation, for substitutions of prior template
/// arguments.
NamedDecl *Template;
/// The list of template arguments we are substituting, if they
/// are not part of the entity.
const TemplateArgument *TemplateArgs;
// FIXME: Wrap this union around more members, or perhaps store the
// kind-specific members in the RAII object owning the context.
union {
/// The number of template arguments in TemplateArgs.
unsigned NumTemplateArgs;
/// The special member being declared or defined.
CXXSpecialMember SpecialMember;
};
ArrayRef<TemplateArgument> template_arguments() const {
assert(Kind != DeclaringSpecialMember);
return {TemplateArgs, NumTemplateArgs};
}
/// The template deduction info object associated with the
/// substitution or checking of explicit or deduced template arguments.
sema::TemplateDeductionInfo *DeductionInfo;
/// The source range that covers the construct that cause
/// the instantiation, e.g., the template-id that causes a class
/// template instantiation.
SourceRange InstantiationRange;
CodeSynthesisContext()
: Kind(TemplateInstantiation),
SavedInNonInstantiationSFINAEContext(false), Entity(nullptr),
Template(nullptr), TemplateArgs(nullptr), NumTemplateArgs(0),
DeductionInfo(nullptr) {}
/// Determines whether this template is an actual instantiation
/// that should be counted toward the maximum instantiation depth.
bool isInstantiationRecord() const;
};
/// List of active code synthesis contexts.
///
/// This vector is treated as a stack. As synthesis of one entity requires
/// synthesis of another, additional contexts are pushed onto the stack.
SmallVector<CodeSynthesisContext, 16> CodeSynthesisContexts;
/// Specializations whose definitions are currently being instantiated.
llvm::DenseSet<std::pair<Decl *, unsigned>> InstantiatingSpecializations;
/// Non-dependent types used in templates that have already been instantiated
/// by some template instantiation.
llvm::DenseSet<QualType> InstantiatedNonDependentTypes;
/// Extra modules inspected when performing a lookup during a template
/// instantiation. Computed lazily.
SmallVector<Module*, 16> CodeSynthesisContextLookupModules;
/// Cache of additional modules that should be used for name lookup
/// within the current template instantiation. Computed lazily; use
/// getLookupModules() to get a complete set.
llvm::DenseSet<Module*> LookupModulesCache;
/// Get the set of additional modules that should be checked during
/// name lookup. A module and its imports become visible when instanting a
/// template defined within it.
llvm::DenseSet<Module*> &getLookupModules();
/// Map from the most recent declaration of a namespace to the most
/// recent visible declaration of that namespace.
llvm::DenseMap<NamedDecl*, NamedDecl*> VisibleNamespaceCache;
/// Whether we are in a SFINAE context that is not associated with
/// template instantiation.
///
/// This is used when setting up a SFINAE trap (\c see SFINAETrap) outside
/// of a template instantiation or template argument deduction.
bool InNonInstantiationSFINAEContext;
/// The number of \p CodeSynthesisContexts that are not template
/// instantiations and, therefore, should not be counted as part of the
/// instantiation depth.
///
/// When the instantiation depth reaches the user-configurable limit
/// \p LangOptions::InstantiationDepth we will abort instantiation.
// FIXME: Should we have a similar limit for other forms of synthesis?
unsigned NonInstantiationEntries;
/// The depth of the context stack at the point when the most recent
/// error or warning was produced.
///
/// This value is used to suppress printing of redundant context stacks
/// when there are multiple errors or warnings in the same instantiation.
// FIXME: Does this belong in Sema? It's tough to implement it anywhere else.
unsigned LastEmittedCodeSynthesisContextDepth = 0;
/// The template instantiation callbacks to trace or track
/// instantiations (objects can be chained).
///
/// This callbacks is used to print, trace or track template
/// instantiations as they are being constructed.
std::vector<std::unique_ptr<TemplateInstantiationCallback>>
TemplateInstCallbacks;
/// The current index into pack expansion arguments that will be
/// used for substitution of parameter packs.
///
/// The pack expansion index will be -1 to indicate that parameter packs
/// should be instantiated as themselves. Otherwise, the index specifies
/// which argument within the parameter pack will be used for substitution.
int ArgumentPackSubstitutionIndex;
/// RAII object used to change the argument pack substitution index
/// within a \c Sema object.
///
/// See \c ArgumentPackSubstitutionIndex for more information.
class ArgumentPackSubstitutionIndexRAII {
Sema &Self;
int OldSubstitutionIndex;
public:
ArgumentPackSubstitutionIndexRAII(Sema &Self, int NewSubstitutionIndex)
: Self(Self), OldSubstitutionIndex(Self.ArgumentPackSubstitutionIndex) {
Self.ArgumentPackSubstitutionIndex = NewSubstitutionIndex;
}
~ArgumentPackSubstitutionIndexRAII() {
Self.ArgumentPackSubstitutionIndex = OldSubstitutionIndex;
}
};
friend class ArgumentPackSubstitutionRAII;
/// For each declaration that involved template argument deduction, the
/// set of diagnostics that were suppressed during that template argument
/// deduction.
///
/// FIXME: Serialize this structure to the AST file.
typedef llvm::DenseMap<Decl *, SmallVector<PartialDiagnosticAt, 1> >
SuppressedDiagnosticsMap;
SuppressedDiagnosticsMap SuppressedDiagnostics;
/// A stack object to be created when performing template
/// instantiation.
///
/// Construction of an object of type \c InstantiatingTemplate
/// pushes the current instantiation onto the stack of active
/// instantiations. If the size of this stack exceeds the maximum
/// number of recursive template instantiations, construction
/// produces an error and evaluates true.
///
/// Destruction of this object will pop the named instantiation off
/// the stack.
struct InstantiatingTemplate {
/// Note that we are instantiating a class template,
/// function template, variable template, alias template,
/// or a member thereof.
InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation,
Decl *Entity,
SourceRange InstantiationRange = SourceRange());
struct ExceptionSpecification {};
/// Note that we are instantiating an exception specification
/// of a function template.
InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation,
FunctionDecl *Entity, ExceptionSpecification,
SourceRange InstantiationRange = SourceRange());
/// Note that we are instantiating a default argument in a
/// template-id.
InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation,
TemplateParameter Param, TemplateDecl *Template,
ArrayRef<TemplateArgument> TemplateArgs,
SourceRange InstantiationRange = SourceRange());
/// Note that we are substituting either explicitly-specified or
/// deduced template arguments during function template argument deduction.
InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation,
FunctionTemplateDecl *FunctionTemplate,
ArrayRef<TemplateArgument> TemplateArgs,
CodeSynthesisContext::SynthesisKind Kind,
sema::TemplateDeductionInfo &DeductionInfo,
SourceRange InstantiationRange = SourceRange());
/// Note that we are instantiating as part of template
/// argument deduction for a class template declaration.
InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation,
TemplateDecl *Template,
ArrayRef<TemplateArgument> TemplateArgs,
sema::TemplateDeductionInfo &DeductionInfo,
SourceRange InstantiationRange = SourceRange());
/// Note that we are instantiating as part of template
/// argument deduction for a class template partial
/// specialization.
InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation,
ClassTemplatePartialSpecializationDecl *PartialSpec,
ArrayRef<TemplateArgument> TemplateArgs,
sema::TemplateDeductionInfo &DeductionInfo,
SourceRange InstantiationRange = SourceRange());
/// Note that we are instantiating as part of template
/// argument deduction for a variable template partial
/// specialization.
InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation,
VarTemplatePartialSpecializationDecl *PartialSpec,
ArrayRef<TemplateArgument> TemplateArgs,
sema::TemplateDeductionInfo &DeductionInfo,
SourceRange InstantiationRange = SourceRange());
/// Note that we are instantiating a default argument for a function
/// parameter.
InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation,
ParmVarDecl *Param,
ArrayRef<TemplateArgument> TemplateArgs,
SourceRange InstantiationRange = SourceRange());
/// Note that we are substituting prior template arguments into a
/// non-type parameter.
InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation,
NamedDecl *Template,
NonTypeTemplateParmDecl *Param,
ArrayRef<TemplateArgument> TemplateArgs,
SourceRange InstantiationRange);
/// Note that we are substituting prior template arguments into a
/// template template parameter.
InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation,
NamedDecl *Template,
TemplateTemplateParmDecl *Param,
ArrayRef<TemplateArgument> TemplateArgs,
SourceRange InstantiationRange);
/// Note that we are checking the default template argument
/// against the template parameter for a given template-id.
InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation,
TemplateDecl *Template,
NamedDecl *Param,
ArrayRef<TemplateArgument> TemplateArgs,
SourceRange InstantiationRange);
struct ConstraintsCheck {};
/// \brief Note that we are checking the constraints associated with some
/// constrained entity (a concept declaration or a template with associated
/// constraints).
InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation,
ConstraintsCheck, NamedDecl *Template,
ArrayRef<TemplateArgument> TemplateArgs,
SourceRange InstantiationRange);
struct ConstraintSubstitution {};
/// \brief Note that we are checking a constraint expression associated
/// with a template declaration or as part of the satisfaction check of a
/// concept.
InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation,
ConstraintSubstitution, NamedDecl *Template,
sema::TemplateDeductionInfo &DeductionInfo,
SourceRange InstantiationRange);
struct ConstraintNormalization {};
/// \brief Note that we are normalizing a constraint expression.
InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation,
ConstraintNormalization, NamedDecl *Template,
SourceRange InstantiationRange);
struct ParameterMappingSubstitution {};
/// \brief Note that we are subtituting into the parameter mapping of an
/// atomic constraint during constraint normalization.
InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation,
ParameterMappingSubstitution, NamedDecl *Template,
SourceRange InstantiationRange);
/// \brief Note that we are substituting template arguments into a part of
/// a requirement of a requires expression.
InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation,
concepts::Requirement *Req,
sema::TemplateDeductionInfo &DeductionInfo,
SourceRange InstantiationRange = SourceRange());
/// \brief Note that we are checking the satisfaction of the constraint
/// expression inside of a nested requirement.
InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation,
concepts::NestedRequirement *Req, ConstraintsCheck,
SourceRange InstantiationRange = SourceRange());
/// Note that we have finished instantiating this template.
void Clear();
~InstantiatingTemplate() { Clear(); }
/// Determines whether we have exceeded the maximum
/// recursive template instantiations.
bool isInvalid() const { return Invalid; }
/// Determine whether we are already instantiating this
/// specialization in some surrounding active instantiation.
bool isAlreadyInstantiating() const { return AlreadyInstantiating; }
private:
Sema &SemaRef;
bool Invalid;
bool AlreadyInstantiating;
bool CheckInstantiationDepth(SourceLocation PointOfInstantiation,
SourceRange InstantiationRange);
InstantiatingTemplate(
Sema &SemaRef, CodeSynthesisContext::SynthesisKind Kind,
SourceLocation PointOfInstantiation, SourceRange InstantiationRange,
Decl *Entity, NamedDecl *Template = nullptr,
ArrayRef<TemplateArgument> TemplateArgs = None,
sema::TemplateDeductionInfo *DeductionInfo = nullptr);
InstantiatingTemplate(const InstantiatingTemplate&) = delete;
InstantiatingTemplate&
operator=(const InstantiatingTemplate&) = delete;
};
void pushCodeSynthesisContext(CodeSynthesisContext Ctx);
void popCodeSynthesisContext();
/// Determine whether we are currently performing template instantiation.
bool inTemplateInstantiation() const {
return CodeSynthesisContexts.size() > NonInstantiationEntries;
}
void PrintContextStack() {
if (!CodeSynthesisContexts.empty() &&
CodeSynthesisContexts.size() != LastEmittedCodeSynthesisContextDepth) {
PrintInstantiationStack();
LastEmittedCodeSynthesisContextDepth = CodeSynthesisContexts.size();
}
if (PragmaAttributeCurrentTargetDecl)
PrintPragmaAttributeInstantiationPoint();
}
void PrintInstantiationStack();
void PrintPragmaAttributeInstantiationPoint();
/// Determines whether we are currently in a context where
/// template argument substitution failures are not considered
/// errors.
///
/// \returns An empty \c Optional if we're not in a SFINAE context.
/// Otherwise, contains a pointer that, if non-NULL, contains the nearest
/// template-deduction context object, which can be used to capture
/// diagnostics that will be suppressed.
Optional<sema::TemplateDeductionInfo *> isSFINAEContext() const;
/// Determines whether we are currently in a context that
/// is not evaluated as per C++ [expr] p5.
bool isUnevaluatedContext() const {
assert(!ExprEvalContexts.empty() &&
"Must be in an expression evaluation context");
return ExprEvalContexts.back().isUnevaluated();
}
/// RAII class used to determine whether SFINAE has
/// trapped any errors that occur during template argument
/// deduction.
class SFINAETrap {
Sema &SemaRef;
unsigned PrevSFINAEErrors;
bool PrevInNonInstantiationSFINAEContext;
bool PrevAccessCheckingSFINAE;
bool PrevLastDiagnosticIgnored;
public:
explicit SFINAETrap(Sema &SemaRef, bool AccessCheckingSFINAE = false)
: SemaRef(SemaRef), PrevSFINAEErrors(SemaRef.NumSFINAEErrors),
PrevInNonInstantiationSFINAEContext(
SemaRef.InNonInstantiationSFINAEContext),
PrevAccessCheckingSFINAE(SemaRef.AccessCheckingSFINAE),
PrevLastDiagnosticIgnored(
SemaRef.getDiagnostics().isLastDiagnosticIgnored())
{
if (!SemaRef.isSFINAEContext())
SemaRef.InNonInstantiationSFINAEContext = true;
SemaRef.AccessCheckingSFINAE = AccessCheckingSFINAE;
}
~SFINAETrap() {
SemaRef.NumSFINAEErrors = PrevSFINAEErrors;
SemaRef.InNonInstantiationSFINAEContext
= PrevInNonInstantiationSFINAEContext;
SemaRef.AccessCheckingSFINAE = PrevAccessCheckingSFINAE;
SemaRef.getDiagnostics().setLastDiagnosticIgnored(
PrevLastDiagnosticIgnored);
}
/// Determine whether any SFINAE errors have been trapped.
bool hasErrorOccurred() const {
return SemaRef.NumSFINAEErrors > PrevSFINAEErrors;
}
};
/// RAII class used to indicate that we are performing provisional
/// semantic analysis to determine the validity of a construct, so
/// typo-correction and diagnostics in the immediate context (not within
/// implicitly-instantiated templates) should be suppressed.
class TentativeAnalysisScope {
Sema &SemaRef;
// FIXME: Using a SFINAETrap for this is a hack.
SFINAETrap Trap;
bool PrevDisableTypoCorrection;
public:
explicit TentativeAnalysisScope(Sema &SemaRef)
: SemaRef(SemaRef), Trap(SemaRef, true),
PrevDisableTypoCorrection(SemaRef.DisableTypoCorrection) {
SemaRef.DisableTypoCorrection = true;
}
~TentativeAnalysisScope() {
SemaRef.DisableTypoCorrection = PrevDisableTypoCorrection;
}
};
/// The current instantiation scope used to store local
/// variables.
LocalInstantiationScope *CurrentInstantiationScope;
/// Tracks whether we are in a context where typo correction is
/// disabled.
bool DisableTypoCorrection;
/// The number of typos corrected by CorrectTypo.
unsigned TyposCorrected;
typedef llvm::SmallSet<SourceLocation, 2> SrcLocSet;
typedef llvm::DenseMap<IdentifierInfo *, SrcLocSet> IdentifierSourceLocations;
/// A cache containing identifiers for which typo correction failed and
/// their locations, so that repeated attempts to correct an identifier in a
/// given location are ignored if typo correction already failed for it.
IdentifierSourceLocations TypoCorrectionFailures;
/// Worker object for performing CFG-based warnings.
sema::AnalysisBasedWarnings AnalysisWarnings;
threadSafety::BeforeSet *ThreadSafetyDeclCache;
/// An entity for which implicit template instantiation is required.
///
/// The source location associated with the declaration is the first place in
/// the source code where the declaration was "used". It is not necessarily
/// the point of instantiation (which will be either before or after the
/// namespace-scope declaration that triggered this implicit instantiation),
/// However, it is the location that diagnostics should generally refer to,
/// because users will need to know what code triggered the instantiation.
typedef std::pair<ValueDecl *, SourceLocation> PendingImplicitInstantiation;
/// The queue of implicit template instantiations that are required
/// but have not yet been performed.
std::deque<PendingImplicitInstantiation> PendingInstantiations;
/// Queue of implicit template instantiations that cannot be performed
/// eagerly.
SmallVector<PendingImplicitInstantiation, 1> LateParsedInstantiations;
class GlobalEagerInstantiationScope {
public:
GlobalEagerInstantiationScope(Sema &S, bool Enabled)
: S(S), Enabled(Enabled) {
if (!Enabled) return;
SavedPendingInstantiations.swap(S.PendingInstantiations);
SavedVTableUses.swap(S.VTableUses);
}
void perform() {
if (Enabled) {
S.DefineUsedVTables();
S.PerformPendingInstantiations();
}
}
~GlobalEagerInstantiationScope() {
if (!Enabled) return;
// Restore the set of pending vtables.
assert(S.VTableUses.empty() &&
"VTableUses should be empty before it is discarded.");
S.VTableUses.swap(SavedVTableUses);
// Restore the set of pending implicit instantiations.
assert(S.PendingInstantiations.empty() &&
"PendingInstantiations should be empty before it is discarded.");
S.PendingInstantiations.swap(SavedPendingInstantiations);
}
private:
Sema &S;
SmallVector<VTableUse, 16> SavedVTableUses;
std::deque<PendingImplicitInstantiation> SavedPendingInstantiations;
bool Enabled;
};
/// The queue of implicit template instantiations that are required
/// and must be performed within the current local scope.
///
/// This queue is only used for member functions of local classes in
/// templates, which must be instantiated in the same scope as their
/// enclosing function, so that they can reference function-local
/// types, static variables, enumerators, etc.
std::deque<PendingImplicitInstantiation> PendingLocalImplicitInstantiations;
class LocalEagerInstantiationScope {
public:
LocalEagerInstantiationScope(Sema &S) : S(S) {
SavedPendingLocalImplicitInstantiations.swap(
S.PendingLocalImplicitInstantiations);
}
void perform() { S.PerformPendingInstantiations(/*LocalOnly=*/true); }
~LocalEagerInstantiationScope() {
assert(S.PendingLocalImplicitInstantiations.empty() &&
"there shouldn't be any pending local implicit instantiations");
SavedPendingLocalImplicitInstantiations.swap(
S.PendingLocalImplicitInstantiations);
}
private:
Sema &S;
std::deque<PendingImplicitInstantiation>
SavedPendingLocalImplicitInstantiations;
};
/// A helper class for building up ExtParameterInfos.
class ExtParameterInfoBuilder {
SmallVector<FunctionProtoType::ExtParameterInfo, 16> Infos;
bool HasInteresting = false;
public:
/// Set the ExtParameterInfo for the parameter at the given index,
///
void set(unsigned index, FunctionProtoType::ExtParameterInfo info) {
assert(Infos.size() <= index);
Infos.resize(index);
Infos.push_back(info);
if (!HasInteresting)
HasInteresting = (info != FunctionProtoType::ExtParameterInfo());
}
/// Return a pointer (suitable for setting in an ExtProtoInfo) to the
/// ExtParameterInfo array we've built up.
const FunctionProtoType::ExtParameterInfo *
getPointerOrNull(unsigned numParams) {
if (!HasInteresting) return nullptr;
Infos.resize(numParams);
return Infos.data();
}
};
void PerformPendingInstantiations(bool LocalOnly = false);
TypeSourceInfo *SubstType(TypeSourceInfo *T,
const MultiLevelTemplateArgumentList &TemplateArgs,
SourceLocation Loc, DeclarationName Entity,
bool AllowDeducedTST = false);
QualType SubstType(QualType T,
const MultiLevelTemplateArgumentList &TemplateArgs,
SourceLocation Loc, DeclarationName Entity);
TypeSourceInfo *SubstType(TypeLoc TL,
const MultiLevelTemplateArgumentList &TemplateArgs,
SourceLocation Loc, DeclarationName Entity);
TypeSourceInfo *SubstFunctionDeclType(TypeSourceInfo *T,
const MultiLevelTemplateArgumentList &TemplateArgs,
SourceLocation Loc,
DeclarationName Entity,
CXXRecordDecl *ThisContext,
Qualifiers ThisTypeQuals);
void SubstExceptionSpec(FunctionDecl *New, const FunctionProtoType *Proto,
const MultiLevelTemplateArgumentList &Args);
bool SubstExceptionSpec(SourceLocation Loc,
FunctionProtoType::ExceptionSpecInfo &ESI,
SmallVectorImpl<QualType> &ExceptionStorage,
const MultiLevelTemplateArgumentList &Args);
ParmVarDecl *SubstParmVarDecl(ParmVarDecl *D,
const MultiLevelTemplateArgumentList &TemplateArgs,
int indexAdjustment,
Optional<unsigned> NumExpansions,
bool ExpectParameterPack);
bool SubstParmTypes(SourceLocation Loc, ArrayRef<ParmVarDecl *> Params,
const FunctionProtoType::ExtParameterInfo *ExtParamInfos,
const MultiLevelTemplateArgumentList &TemplateArgs,
SmallVectorImpl<QualType> &ParamTypes,
SmallVectorImpl<ParmVarDecl *> *OutParams,
ExtParameterInfoBuilder &ParamInfos);
ExprResult SubstExpr(Expr *E,
const MultiLevelTemplateArgumentList &TemplateArgs);
/// Substitute the given template arguments into a list of
/// expressions, expanding pack expansions if required.
///
/// \param Exprs The list of expressions to substitute into.
///
/// \param IsCall Whether this is some form of call, in which case
/// default arguments will be dropped.
///
/// \param TemplateArgs The set of template arguments to substitute.
///
/// \param Outputs Will receive all of the substituted arguments.
///
/// \returns true if an error occurred, false otherwise.
bool SubstExprs(ArrayRef<Expr *> Exprs, bool IsCall,
const MultiLevelTemplateArgumentList &TemplateArgs,
SmallVectorImpl<Expr *> &Outputs);
StmtResult SubstStmt(Stmt *S,
const MultiLevelTemplateArgumentList &TemplateArgs);
TemplateParameterList *
SubstTemplateParams(TemplateParameterList *Params, DeclContext *Owner,
const MultiLevelTemplateArgumentList &TemplateArgs);
bool
SubstTemplateArguments(ArrayRef<TemplateArgumentLoc> Args,
const MultiLevelTemplateArgumentList &TemplateArgs,
TemplateArgumentListInfo &Outputs);
Decl *SubstDecl(Decl *D, DeclContext *Owner,
const MultiLevelTemplateArgumentList &TemplateArgs);
/// Substitute the name and return type of a defaulted 'operator<=>' to form
/// an implicit 'operator=='.
FunctionDecl *SubstSpaceshipAsEqualEqual(CXXRecordDecl *RD,
FunctionDecl *Spaceship);
ExprResult SubstInitializer(Expr *E,
const MultiLevelTemplateArgumentList &TemplateArgs,
bool CXXDirectInit);
bool
SubstBaseSpecifiers(CXXRecordDecl *Instantiation,
CXXRecordDecl *Pattern,
const MultiLevelTemplateArgumentList &TemplateArgs);
bool
InstantiateClass(SourceLocation PointOfInstantiation,
CXXRecordDecl *Instantiation, CXXRecordDecl *Pattern,
const MultiLevelTemplateArgumentList &TemplateArgs,
TemplateSpecializationKind TSK,
bool Complain = true);
bool InstantiateEnum(SourceLocation PointOfInstantiation,
EnumDecl *Instantiation, EnumDecl *Pattern,
const MultiLevelTemplateArgumentList &TemplateArgs,
TemplateSpecializationKind TSK);
bool InstantiateInClassInitializer(
SourceLocation PointOfInstantiation, FieldDecl *Instantiation,
FieldDecl *Pattern, const MultiLevelTemplateArgumentList &TemplateArgs);
struct LateInstantiatedAttribute {
const Attr *TmplAttr;
LocalInstantiationScope *Scope;
Decl *NewDecl;
LateInstantiatedAttribute(const Attr *A, LocalInstantiationScope *S,
Decl *D)
: TmplAttr(A), Scope(S), NewDecl(D)
{ }
};
typedef SmallVector<LateInstantiatedAttribute, 16> LateInstantiatedAttrVec;
void InstantiateAttrs(const MultiLevelTemplateArgumentList &TemplateArgs,
const Decl *Pattern, Decl *Inst,
LateInstantiatedAttrVec *LateAttrs = nullptr,
LocalInstantiationScope *OuterMostScope = nullptr);
void
InstantiateAttrsForDecl(const MultiLevelTemplateArgumentList &TemplateArgs,
const Decl *Pattern, Decl *Inst,
LateInstantiatedAttrVec *LateAttrs = nullptr,
LocalInstantiationScope *OuterMostScope = nullptr);
bool usesPartialOrExplicitSpecialization(
SourceLocation Loc, ClassTemplateSpecializationDecl *ClassTemplateSpec);
bool
InstantiateClassTemplateSpecialization(SourceLocation PointOfInstantiation,
ClassTemplateSpecializationDecl *ClassTemplateSpec,
TemplateSpecializationKind TSK,
bool Complain = true);
void InstantiateClassMembers(SourceLocation PointOfInstantiation,
CXXRecordDecl *Instantiation,
const MultiLevelTemplateArgumentList &TemplateArgs,
TemplateSpecializationKind TSK);
void InstantiateClassTemplateSpecializationMembers(
SourceLocation PointOfInstantiation,
ClassTemplateSpecializationDecl *ClassTemplateSpec,
TemplateSpecializationKind TSK);
NestedNameSpecifierLoc
SubstNestedNameSpecifierLoc(NestedNameSpecifierLoc NNS,
const MultiLevelTemplateArgumentList &TemplateArgs);
DeclarationNameInfo
SubstDeclarationNameInfo(const DeclarationNameInfo &NameInfo,
const MultiLevelTemplateArgumentList &TemplateArgs);
TemplateName
SubstTemplateName(NestedNameSpecifierLoc QualifierLoc, TemplateName Name,
SourceLocation Loc,
const MultiLevelTemplateArgumentList &TemplateArgs);
bool Subst(const TemplateArgumentLoc *Args, unsigned NumArgs,
TemplateArgumentListInfo &Result,
const MultiLevelTemplateArgumentList &TemplateArgs);
void InstantiateExceptionSpec(SourceLocation PointOfInstantiation,
FunctionDecl *Function);
bool CheckInstantiatedFunctionTemplateConstraints(
SourceLocation PointOfInstantiation, FunctionDecl *Decl,
ArrayRef<TemplateArgument> TemplateArgs,
ConstraintSatisfaction &Satisfaction);
FunctionDecl *InstantiateFunctionDeclaration(FunctionTemplateDecl *FTD,
const TemplateArgumentList *Args,
SourceLocation Loc);
void InstantiateFunctionDefinition(SourceLocation PointOfInstantiation,
FunctionDecl *Function,
bool Recursive = false,
bool DefinitionRequired = false,
bool AtEndOfTU = false);
VarTemplateSpecializationDecl *BuildVarTemplateInstantiation(
VarTemplateDecl *VarTemplate, VarDecl *FromVar,
const TemplateArgumentList &TemplateArgList,
const TemplateArgumentListInfo &TemplateArgsInfo,
SmallVectorImpl<TemplateArgument> &Converted,
SourceLocation PointOfInstantiation, void *InsertPos,
LateInstantiatedAttrVec *LateAttrs = nullptr,
LocalInstantiationScope *StartingScope = nullptr);
VarTemplateSpecializationDecl *CompleteVarTemplateSpecializationDecl(
VarTemplateSpecializationDecl *VarSpec, VarDecl *PatternDecl,
const MultiLevelTemplateArgumentList &TemplateArgs);
void
BuildVariableInstantiation(VarDecl *NewVar, VarDecl *OldVar,
const MultiLevelTemplateArgumentList &TemplateArgs,
LateInstantiatedAttrVec *LateAttrs,
DeclContext *Owner,
LocalInstantiationScope *StartingScope,
bool InstantiatingVarTemplate = false,
VarTemplateSpecializationDecl *PrevVTSD = nullptr);
VarDecl *getVarTemplateSpecialization(
VarTemplateDecl *VarTempl, const TemplateArgumentListInfo *TemplateArgs,
const DeclarationNameInfo &MemberNameInfo, SourceLocation TemplateKWLoc);
void InstantiateVariableInitializer(
VarDecl *Var, VarDecl *OldVar,
const MultiLevelTemplateArgumentList &TemplateArgs);
void InstantiateVariableDefinition(SourceLocation PointOfInstantiation,
VarDecl *Var, bool Recursive = false,
bool DefinitionRequired = false,
bool AtEndOfTU = false);
void InstantiateMemInitializers(CXXConstructorDecl *New,
const CXXConstructorDecl *Tmpl,
const MultiLevelTemplateArgumentList &TemplateArgs);
NamedDecl *FindInstantiatedDecl(SourceLocation Loc, NamedDecl *D,
const MultiLevelTemplateArgumentList &TemplateArgs,
bool FindingInstantiatedContext = false);
DeclContext *FindInstantiatedContext(SourceLocation Loc, DeclContext *DC,
const MultiLevelTemplateArgumentList &TemplateArgs);
// Objective-C declarations.
enum ObjCContainerKind {
OCK_None = -1,
OCK_Interface = 0,
OCK_Protocol,
OCK_Category,
OCK_ClassExtension,
OCK_Implementation,
OCK_CategoryImplementation
};
ObjCContainerKind getObjCContainerKind() const;
DeclResult actOnObjCTypeParam(Scope *S,
ObjCTypeParamVariance variance,
SourceLocation varianceLoc,
unsigned index,
IdentifierInfo *paramName,
SourceLocation paramLoc,
SourceLocation colonLoc,
ParsedType typeBound);
ObjCTypeParamList *actOnObjCTypeParamList(Scope *S, SourceLocation lAngleLoc,
ArrayRef<Decl *> typeParams,
SourceLocation rAngleLoc);
void popObjCTypeParamList(Scope *S, ObjCTypeParamList *typeParamList);
Decl *ActOnStartClassInterface(
Scope *S, SourceLocation AtInterfaceLoc, IdentifierInfo *ClassName,
SourceLocation ClassLoc, ObjCTypeParamList *typeParamList,
IdentifierInfo *SuperName, SourceLocation SuperLoc,
ArrayRef<ParsedType> SuperTypeArgs, SourceRange SuperTypeArgsRange,
Decl *const *ProtoRefs, unsigned NumProtoRefs,
const SourceLocation *ProtoLocs, SourceLocation EndProtoLoc,
const ParsedAttributesView &AttrList);
void ActOnSuperClassOfClassInterface(Scope *S,
SourceLocation AtInterfaceLoc,
ObjCInterfaceDecl *IDecl,
IdentifierInfo *ClassName,
SourceLocation ClassLoc,
IdentifierInfo *SuperName,
SourceLocation SuperLoc,
ArrayRef<ParsedType> SuperTypeArgs,
SourceRange SuperTypeArgsRange);
void ActOnTypedefedProtocols(SmallVectorImpl<Decl *> &ProtocolRefs,
SmallVectorImpl<SourceLocation> &ProtocolLocs,
IdentifierInfo *SuperName,
SourceLocation SuperLoc);
Decl *ActOnCompatibilityAlias(
SourceLocation AtCompatibilityAliasLoc,
IdentifierInfo *AliasName, SourceLocation AliasLocation,
IdentifierInfo *ClassName, SourceLocation ClassLocation);
bool CheckForwardProtocolDeclarationForCircularDependency(
IdentifierInfo *PName,
SourceLocation &PLoc, SourceLocation PrevLoc,
const ObjCList<ObjCProtocolDecl> &PList);
Decl *ActOnStartProtocolInterface(
SourceLocation AtProtoInterfaceLoc, IdentifierInfo *ProtocolName,
SourceLocation ProtocolLoc, Decl *const *ProtoRefNames,
unsigned NumProtoRefs, const SourceLocation *ProtoLocs,
SourceLocation EndProtoLoc, const ParsedAttributesView &AttrList);
Decl *ActOnStartCategoryInterface(
SourceLocation AtInterfaceLoc, IdentifierInfo *ClassName,
SourceLocation ClassLoc, ObjCTypeParamList *typeParamList,
IdentifierInfo *CategoryName, SourceLocation CategoryLoc,
Decl *const *ProtoRefs, unsigned NumProtoRefs,
const SourceLocation *ProtoLocs, SourceLocation EndProtoLoc,
const ParsedAttributesView &AttrList);
Decl *ActOnStartClassImplementation(SourceLocation AtClassImplLoc,
IdentifierInfo *ClassName,
SourceLocation ClassLoc,
IdentifierInfo *SuperClassname,
SourceLocation SuperClassLoc,
const ParsedAttributesView &AttrList);
Decl *ActOnStartCategoryImplementation(SourceLocation AtCatImplLoc,
IdentifierInfo *ClassName,
SourceLocation ClassLoc,
IdentifierInfo *CatName,
SourceLocation CatLoc,
const ParsedAttributesView &AttrList);
DeclGroupPtrTy ActOnFinishObjCImplementation(Decl *ObjCImpDecl,
ArrayRef<Decl *> Decls);
DeclGroupPtrTy ActOnForwardClassDeclaration(SourceLocation Loc,
IdentifierInfo **IdentList,
SourceLocation *IdentLocs,
ArrayRef<ObjCTypeParamList *> TypeParamLists,
unsigned NumElts);
DeclGroupPtrTy
ActOnForwardProtocolDeclaration(SourceLocation AtProtoclLoc,
ArrayRef<IdentifierLocPair> IdentList,
const ParsedAttributesView &attrList);
void FindProtocolDeclaration(bool WarnOnDeclarations, bool ForObjCContainer,
ArrayRef<IdentifierLocPair> ProtocolId,
SmallVectorImpl<Decl *> &Protocols);
void DiagnoseTypeArgsAndProtocols(IdentifierInfo *ProtocolId,
SourceLocation ProtocolLoc,
IdentifierInfo *TypeArgId,
SourceLocation TypeArgLoc,
bool SelectProtocolFirst = false);
/// Given a list of identifiers (and their locations), resolve the
/// names to either Objective-C protocol qualifiers or type
/// arguments, as appropriate.
void actOnObjCTypeArgsOrProtocolQualifiers(
Scope *S,
ParsedType baseType,
SourceLocation lAngleLoc,
ArrayRef<IdentifierInfo *> identifiers,
ArrayRef<SourceLocation> identifierLocs,
SourceLocation rAngleLoc,
SourceLocation &typeArgsLAngleLoc,
SmallVectorImpl<ParsedType> &typeArgs,
SourceLocation &typeArgsRAngleLoc,
SourceLocation &protocolLAngleLoc,
SmallVectorImpl<Decl *> &protocols,
SourceLocation &protocolRAngleLoc,
bool warnOnIncompleteProtocols);
/// Build a an Objective-C protocol-qualified 'id' type where no
/// base type was specified.
TypeResult actOnObjCProtocolQualifierType(
SourceLocation lAngleLoc,
ArrayRef<Decl *> protocols,
ArrayRef<SourceLocation> protocolLocs,
SourceLocation rAngleLoc);
/// Build a specialized and/or protocol-qualified Objective-C type.
TypeResult actOnObjCTypeArgsAndProtocolQualifiers(
Scope *S,
SourceLocation Loc,
ParsedType BaseType,
SourceLocation TypeArgsLAngleLoc,
ArrayRef<ParsedType> TypeArgs,
SourceLocation TypeArgsRAngleLoc,
SourceLocation ProtocolLAngleLoc,
ArrayRef<Decl *> Protocols,
ArrayRef<SourceLocation> ProtocolLocs,
SourceLocation ProtocolRAngleLoc);
/// Build an Objective-C type parameter type.
QualType BuildObjCTypeParamType(const ObjCTypeParamDecl *Decl,
SourceLocation ProtocolLAngleLoc,
ArrayRef<ObjCProtocolDecl *> Protocols,
ArrayRef<SourceLocation> ProtocolLocs,
SourceLocation ProtocolRAngleLoc,
bool FailOnError = false);
/// Build an Objective-C object pointer type.
QualType BuildObjCObjectType(QualType BaseType,
SourceLocation Loc,
SourceLocation TypeArgsLAngleLoc,
ArrayRef<TypeSourceInfo *> TypeArgs,
SourceLocation TypeArgsRAngleLoc,
SourceLocation ProtocolLAngleLoc,
ArrayRef<ObjCProtocolDecl *> Protocols,
ArrayRef<SourceLocation> ProtocolLocs,
SourceLocation ProtocolRAngleLoc,
bool FailOnError = false);
/// Ensure attributes are consistent with type.
/// \param [in, out] Attributes The attributes to check; they will
/// be modified to be consistent with \p PropertyTy.
void CheckObjCPropertyAttributes(Decl *PropertyPtrTy,
SourceLocation Loc,
unsigned &Attributes,
bool propertyInPrimaryClass);
/// Process the specified property declaration and create decls for the
/// setters and getters as needed.
/// \param property The property declaration being processed
void ProcessPropertyDecl(ObjCPropertyDecl *property);
void DiagnosePropertyMismatch(ObjCPropertyDecl *Property,
ObjCPropertyDecl *SuperProperty,
const IdentifierInfo *Name,
bool OverridingProtocolProperty);
void DiagnoseClassExtensionDupMethods(ObjCCategoryDecl *CAT,
ObjCInterfaceDecl *ID);
Decl *ActOnAtEnd(Scope *S, SourceRange AtEnd,
ArrayRef<Decl *> allMethods = None,
ArrayRef<DeclGroupPtrTy> allTUVars = None);
Decl *ActOnProperty(Scope *S, SourceLocation AtLoc,
SourceLocation LParenLoc,
FieldDeclarator &FD, ObjCDeclSpec &ODS,
Selector GetterSel, Selector SetterSel,
tok::ObjCKeywordKind MethodImplKind,
DeclContext *lexicalDC = nullptr);
Decl *ActOnPropertyImplDecl(Scope *S,
SourceLocation AtLoc,
SourceLocation PropertyLoc,
bool ImplKind,
IdentifierInfo *PropertyId,
IdentifierInfo *PropertyIvar,
SourceLocation PropertyIvarLoc,
ObjCPropertyQueryKind QueryKind);
enum ObjCSpecialMethodKind {
OSMK_None,
OSMK_Alloc,
OSMK_New,
OSMK_Copy,
OSMK_RetainingInit,
OSMK_NonRetainingInit
};
struct ObjCArgInfo {
IdentifierInfo *Name;
SourceLocation NameLoc;
// The Type is null if no type was specified, and the DeclSpec is invalid
// in this case.
ParsedType Type;
ObjCDeclSpec DeclSpec;
/// ArgAttrs - Attribute list for this argument.
ParsedAttributesView ArgAttrs;
};
Decl *ActOnMethodDeclaration(
Scope *S,
SourceLocation BeginLoc, // location of the + or -.
SourceLocation EndLoc, // location of the ; or {.
tok::TokenKind MethodType, ObjCDeclSpec &ReturnQT, ParsedType ReturnType,
ArrayRef<SourceLocation> SelectorLocs, Selector Sel,
// optional arguments. The number of types/arguments is obtained
// from the Sel.getNumArgs().
ObjCArgInfo *ArgInfo, DeclaratorChunk::ParamInfo *CParamInfo,
unsigned CNumArgs, // c-style args
const ParsedAttributesView &AttrList, tok::ObjCKeywordKind MethodImplKind,
bool isVariadic, bool MethodDefinition);
ObjCMethodDecl *LookupMethodInQualifiedType(Selector Sel,
const ObjCObjectPointerType *OPT,
bool IsInstance);
ObjCMethodDecl *LookupMethodInObjectType(Selector Sel, QualType Ty,
bool IsInstance);
bool CheckARCMethodDecl(ObjCMethodDecl *method);
bool inferObjCARCLifetime(ValueDecl *decl);
void deduceOpenCLAddressSpace(ValueDecl *decl);
ExprResult
HandleExprPropertyRefExpr(const ObjCObjectPointerType *OPT,
Expr *BaseExpr,
SourceLocation OpLoc,
DeclarationName MemberName,
SourceLocation MemberLoc,
SourceLocation SuperLoc, QualType SuperType,
bool Super);
ExprResult
ActOnClassPropertyRefExpr(IdentifierInfo &receiverName,
IdentifierInfo &propertyName,
SourceLocation receiverNameLoc,
SourceLocation propertyNameLoc);
ObjCMethodDecl *tryCaptureObjCSelf(SourceLocation Loc);
/// Describes the kind of message expression indicated by a message
/// send that starts with an identifier.
enum ObjCMessageKind {
/// The message is sent to 'super'.
ObjCSuperMessage,
/// The message is an instance message.
ObjCInstanceMessage,
/// The message is a class message, and the identifier is a type
/// name.
ObjCClassMessage
};
ObjCMessageKind getObjCMessageKind(Scope *S,
IdentifierInfo *Name,
SourceLocation NameLoc,
bool IsSuper,
bool HasTrailingDot,
ParsedType &ReceiverType);
ExprResult ActOnSuperMessage(Scope *S, SourceLocation SuperLoc,
Selector Sel,
SourceLocation LBracLoc,
ArrayRef<SourceLocation> SelectorLocs,
SourceLocation RBracLoc,
MultiExprArg Args);
ExprResult BuildClassMessage(TypeSourceInfo *ReceiverTypeInfo,
QualType ReceiverType,
SourceLocation SuperLoc,
Selector Sel,
ObjCMethodDecl *Method,
SourceLocation LBracLoc,
ArrayRef<SourceLocation> SelectorLocs,
SourceLocation RBracLoc,
MultiExprArg Args,
bool isImplicit = false);
ExprResult BuildClassMessageImplicit(QualType ReceiverType,
bool isSuperReceiver,
SourceLocation Loc,
Selector Sel,
ObjCMethodDecl *Method,
MultiExprArg Args);
ExprResult ActOnClassMessage(Scope *S,
ParsedType Receiver,
Selector Sel,
SourceLocation LBracLoc,
ArrayRef<SourceLocation> SelectorLocs,
SourceLocation RBracLoc,
MultiExprArg Args);
ExprResult BuildInstanceMessage(Expr *Receiver,
QualType ReceiverType,
SourceLocation SuperLoc,
Selector Sel,
ObjCMethodDecl *Method,
SourceLocation LBracLoc,
ArrayRef<SourceLocation> SelectorLocs,
SourceLocation RBracLoc,
MultiExprArg Args,
bool isImplicit = false);
ExprResult BuildInstanceMessageImplicit(Expr *Receiver,
QualType ReceiverType,
SourceLocation Loc,
Selector Sel,
ObjCMethodDecl *Method,
MultiExprArg Args);
ExprResult ActOnInstanceMessage(Scope *S,
Expr *Receiver,
Selector Sel,
SourceLocation LBracLoc,
ArrayRef<SourceLocation> SelectorLocs,
SourceLocation RBracLoc,
MultiExprArg Args);
ExprResult BuildObjCBridgedCast(SourceLocation LParenLoc,
ObjCBridgeCastKind Kind,
SourceLocation BridgeKeywordLoc,
TypeSourceInfo *TSInfo,
Expr *SubExpr);
ExprResult ActOnObjCBridgedCast(Scope *S,
SourceLocation LParenLoc,
ObjCBridgeCastKind Kind,
SourceLocation BridgeKeywordLoc,
ParsedType Type,
SourceLocation RParenLoc,
Expr *SubExpr);
void CheckTollFreeBridgeCast(QualType castType, Expr *castExpr);
void CheckObjCBridgeRelatedCast(QualType castType, Expr *castExpr);
bool CheckTollFreeBridgeStaticCast(QualType castType, Expr *castExpr,
CastKind &Kind);
bool checkObjCBridgeRelatedComponents(SourceLocation Loc,
QualType DestType, QualType SrcType,
ObjCInterfaceDecl *&RelatedClass,
ObjCMethodDecl *&ClassMethod,
ObjCMethodDecl *&InstanceMethod,
TypedefNameDecl *&TDNDecl,
bool CfToNs, bool Diagnose = true);
bool CheckObjCBridgeRelatedConversions(SourceLocation Loc,
QualType DestType, QualType SrcType,
Expr *&SrcExpr, bool Diagnose = true);
bool ConversionToObjCStringLiteralCheck(QualType DstType, Expr *&SrcExpr,
bool Diagnose = true);
bool checkInitMethod(ObjCMethodDecl *method, QualType receiverTypeIfCall);
/// Check whether the given new method is a valid override of the
/// given overridden method, and set any properties that should be inherited.
void CheckObjCMethodOverride(ObjCMethodDecl *NewMethod,
const ObjCMethodDecl *Overridden);
/// Describes the compatibility of a result type with its method.
enum ResultTypeCompatibilityKind {
RTC_Compatible,
RTC_Incompatible,
RTC_Unknown
};
void CheckObjCMethodDirectOverrides(ObjCMethodDecl *method,
ObjCMethodDecl *overridden);
void CheckObjCMethodOverrides(ObjCMethodDecl *ObjCMethod,
ObjCInterfaceDecl *CurrentClass,
ResultTypeCompatibilityKind RTC);
enum PragmaOptionsAlignKind {
POAK_Native, // #pragma options align=native
POAK_Natural, // #pragma options align=natural
POAK_Packed, // #pragma options align=packed
POAK_Power, // #pragma options align=power
POAK_Mac68k, // #pragma options align=mac68k
POAK_Reset // #pragma options align=reset
};
/// ActOnPragmaClangSection - Called on well formed \#pragma clang section
void ActOnPragmaClangSection(SourceLocation PragmaLoc,
PragmaClangSectionAction Action,
PragmaClangSectionKind SecKind, StringRef SecName);
/// ActOnPragmaOptionsAlign - Called on well formed \#pragma options align.
void ActOnPragmaOptionsAlign(PragmaOptionsAlignKind Kind,
SourceLocation PragmaLoc);
/// ActOnPragmaPack - Called on well formed \#pragma pack(...).
void ActOnPragmaPack(SourceLocation PragmaLoc, PragmaMsStackAction Action,
StringRef SlotLabel, Expr *Alignment);
enum class PragmaPackDiagnoseKind {
NonDefaultStateAtInclude,
ChangedStateAtExit
};
void DiagnoseNonDefaultPragmaPack(PragmaPackDiagnoseKind Kind,
SourceLocation IncludeLoc);
void DiagnoseUnterminatedPragmaPack();
/// ActOnPragmaMSStruct - Called on well formed \#pragma ms_struct [on|off].
void ActOnPragmaMSStruct(PragmaMSStructKind Kind);
/// ActOnPragmaMSComment - Called on well formed
/// \#pragma comment(kind, "arg").
void ActOnPragmaMSComment(SourceLocation CommentLoc, PragmaMSCommentKind Kind,
StringRef Arg);
/// ActOnPragmaMSPointersToMembers - called on well formed \#pragma
/// pointers_to_members(representation method[, general purpose
/// representation]).
void ActOnPragmaMSPointersToMembers(
LangOptions::PragmaMSPointersToMembersKind Kind,
SourceLocation PragmaLoc);
/// Called on well formed \#pragma vtordisp().
void ActOnPragmaMSVtorDisp(PragmaMsStackAction Action,
SourceLocation PragmaLoc,
MSVtorDispMode Value);
enum PragmaSectionKind {
PSK_DataSeg,
PSK_BSSSeg,
PSK_ConstSeg,
PSK_CodeSeg,
};
bool UnifySection(StringRef SectionName,
int SectionFlags,
DeclaratorDecl *TheDecl);
bool UnifySection(StringRef SectionName,
int SectionFlags,
SourceLocation PragmaSectionLocation);
/// Called on well formed \#pragma bss_seg/data_seg/const_seg/code_seg.
void ActOnPragmaMSSeg(SourceLocation PragmaLocation,
PragmaMsStackAction Action,
llvm::StringRef StackSlotLabel,
StringLiteral *SegmentName,
llvm::StringRef PragmaName);
/// Called on well formed \#pragma section().
void ActOnPragmaMSSection(SourceLocation PragmaLocation,
int SectionFlags, StringLiteral *SegmentName);
/// Called on well-formed \#pragma init_seg().
void ActOnPragmaMSInitSeg(SourceLocation PragmaLocation,
StringLiteral *SegmentName);
/// Called on #pragma clang __debug dump II
void ActOnPragmaDump(Scope *S, SourceLocation Loc, IdentifierInfo *II);
/// ActOnPragmaDetectMismatch - Call on well-formed \#pragma detect_mismatch
void ActOnPragmaDetectMismatch(SourceLocation Loc, StringRef Name,
StringRef Value);
/// ActOnPragmaUnused - Called on well-formed '\#pragma unused'.
void ActOnPragmaUnused(const Token &Identifier,
Scope *curScope,
SourceLocation PragmaLoc);
/// ActOnPragmaVisibility - Called on well formed \#pragma GCC visibility... .
void ActOnPragmaVisibility(const IdentifierInfo* VisType,
SourceLocation PragmaLoc);
NamedDecl *DeclClonePragmaWeak(NamedDecl *ND, IdentifierInfo *II,
SourceLocation Loc);
void DeclApplyPragmaWeak(Scope *S, NamedDecl *ND, WeakInfo &W);
/// ActOnPragmaWeakID - Called on well formed \#pragma weak ident.
void ActOnPragmaWeakID(IdentifierInfo* WeakName,
SourceLocation PragmaLoc,
SourceLocation WeakNameLoc);
/// ActOnPragmaRedefineExtname - Called on well formed
/// \#pragma redefine_extname oldname newname.
void ActOnPragmaRedefineExtname(IdentifierInfo* WeakName,
IdentifierInfo* AliasName,
SourceLocation PragmaLoc,
SourceLocation WeakNameLoc,
SourceLocation AliasNameLoc);
/// ActOnPragmaWeakAlias - Called on well formed \#pragma weak ident = ident.
void ActOnPragmaWeakAlias(IdentifierInfo* WeakName,
IdentifierInfo* AliasName,
SourceLocation PragmaLoc,
SourceLocation WeakNameLoc,
SourceLocation AliasNameLoc);
/// ActOnPragmaFPContract - Called on well formed
/// \#pragma {STDC,OPENCL} FP_CONTRACT and
/// \#pragma clang fp contract
void ActOnPragmaFPContract(LangOptions::FPContractModeKind FPC);
/// ActOnPragmaFenvAccess - Called on well formed
/// \#pragma STDC FENV_ACCESS
void ActOnPragmaFEnvAccess(LangOptions::FEnvAccessModeKind FPC);
/// Called to set rounding mode for floating point operations.
void setRoundingMode(LangOptions::FPRoundingModeKind);
/// Called to set exception behavior for floating point operations.
void setExceptionMode(LangOptions::FPExceptionModeKind);
/// AddAlignmentAttributesForRecord - Adds any needed alignment attributes to
/// a the record decl, to handle '\#pragma pack' and '\#pragma options align'.
void AddAlignmentAttributesForRecord(RecordDecl *RD);
/// AddMsStructLayoutForRecord - Adds ms_struct layout attribute to record.
void AddMsStructLayoutForRecord(RecordDecl *RD);
/// FreePackedContext - Deallocate and null out PackContext.
void FreePackedContext();
/// PushNamespaceVisibilityAttr - Note that we've entered a
/// namespace with a visibility attribute.
void PushNamespaceVisibilityAttr(const VisibilityAttr *Attr,
SourceLocation Loc);
/// AddPushedVisibilityAttribute - If '\#pragma GCC visibility' was used,
/// add an appropriate visibility attribute.
void AddPushedVisibilityAttribute(Decl *RD);
/// PopPragmaVisibility - Pop the top element of the visibility stack; used
/// for '\#pragma GCC visibility' and visibility attributes on namespaces.
void PopPragmaVisibility(bool IsNamespaceEnd, SourceLocation EndLoc);
/// FreeVisContext - Deallocate and null out VisContext.
void FreeVisContext();
/// AddCFAuditedAttribute - Check whether we're currently within
/// '\#pragma clang arc_cf_code_audited' and, if so, consider adding
/// the appropriate attribute.
void AddCFAuditedAttribute(Decl *D);
void ActOnPragmaAttributeAttribute(ParsedAttr &Attribute,
SourceLocation PragmaLoc,
attr::ParsedSubjectMatchRuleSet Rules);
void ActOnPragmaAttributeEmptyPush(SourceLocation PragmaLoc,
const IdentifierInfo *Namespace);
/// Called on well-formed '\#pragma clang attribute pop'.
void ActOnPragmaAttributePop(SourceLocation PragmaLoc,
const IdentifierInfo *Namespace);
/// Adds the attributes that have been specified using the
/// '\#pragma clang attribute push' directives to the given declaration.
void AddPragmaAttributes(Scope *S, Decl *D);
void DiagnoseUnterminatedPragmaAttribute();
/// Called on well formed \#pragma clang optimize.
void ActOnPragmaOptimize(bool On, SourceLocation PragmaLoc);
/// Get the location for the currently active "\#pragma clang optimize
/// off". If this location is invalid, then the state of the pragma is "on".
SourceLocation getOptimizeOffPragmaLocation() const {
return OptimizeOffPragmaLocation;
}
/// Only called on function definitions; if there is a pragma in scope
/// with the effect of a range-based optnone, consider marking the function
/// with attribute optnone.
void AddRangeBasedOptnone(FunctionDecl *FD);
/// Adds the 'optnone' attribute to the function declaration if there
/// are no conflicts; Loc represents the location causing the 'optnone'
/// attribute to be added (usually because of a pragma).
void AddOptnoneAttributeIfNoConflicts(FunctionDecl *FD, SourceLocation Loc);
/// AddAlignedAttr - Adds an aligned attribute to a particular declaration.
void AddAlignedAttr(Decl *D, const AttributeCommonInfo &CI, Expr *E,
bool IsPackExpansion);
void AddAlignedAttr(Decl *D, const AttributeCommonInfo &CI, TypeSourceInfo *T,
bool IsPackExpansion);
/// AddAssumeAlignedAttr - Adds an assume_aligned attribute to a particular
/// declaration.
void AddAssumeAlignedAttr(Decl *D, const AttributeCommonInfo &CI, Expr *E,
Expr *OE);
/// AddAllocAlignAttr - Adds an alloc_align attribute to a particular
/// declaration.
void AddAllocAlignAttr(Decl *D, const AttributeCommonInfo &CI,
Expr *ParamExpr);
/// AddAlignValueAttr - Adds an align_value attribute to a particular
/// declaration.
void AddAlignValueAttr(Decl *D, const AttributeCommonInfo &CI, Expr *E);
/// AddLaunchBoundsAttr - Adds a launch_bounds attribute to a particular
/// declaration.
void AddLaunchBoundsAttr(Decl *D, const AttributeCommonInfo &CI,
Expr *MaxThreads, Expr *MinBlocks);
/// AddModeAttr - Adds a mode attribute to a particular declaration.
void AddModeAttr(Decl *D, const AttributeCommonInfo &CI, IdentifierInfo *Name,
bool InInstantiation = false);
void AddParameterABIAttr(Decl *D, const AttributeCommonInfo &CI,
ParameterABI ABI);
enum class RetainOwnershipKind {NS, CF, OS};
void AddXConsumedAttr(Decl *D, const AttributeCommonInfo &CI,
RetainOwnershipKind K, bool IsTemplateInstantiation);
/// addAMDGPUFlatWorkGroupSizeAttr - Adds an amdgpu_flat_work_group_size
/// attribute to a particular declaration.
void addAMDGPUFlatWorkGroupSizeAttr(Decl *D, const AttributeCommonInfo &CI,
Expr *Min, Expr *Max);
/// addAMDGPUWavePersEUAttr - Adds an amdgpu_waves_per_eu attribute to a
/// particular declaration.
void addAMDGPUWavesPerEUAttr(Decl *D, const AttributeCommonInfo &CI,
Expr *Min, Expr *Max);
bool checkNSReturnsRetainedReturnType(SourceLocation loc, QualType type);
//===--------------------------------------------------------------------===//
// C++ Coroutines TS
//
bool ActOnCoroutineBodyStart(Scope *S, SourceLocation KwLoc,
StringRef Keyword);
ExprResult ActOnCoawaitExpr(Scope *S, SourceLocation KwLoc, Expr *E);
ExprResult ActOnCoyieldExpr(Scope *S, SourceLocation KwLoc, Expr *E);
StmtResult ActOnCoreturnStmt(Scope *S, SourceLocation KwLoc, Expr *E);
ExprResult BuildResolvedCoawaitExpr(SourceLocation KwLoc, Expr *E,
bool IsImplicit = false);
ExprResult BuildUnresolvedCoawaitExpr(SourceLocation KwLoc, Expr *E,
UnresolvedLookupExpr* Lookup);
ExprResult BuildCoyieldExpr(SourceLocation KwLoc, Expr *E);
StmtResult BuildCoreturnStmt(SourceLocation KwLoc, Expr *E,
bool IsImplicit = false);
StmtResult BuildCoroutineBodyStmt(CoroutineBodyStmt::CtorArgs);
bool buildCoroutineParameterMoves(SourceLocation Loc);
VarDecl *buildCoroutinePromise(SourceLocation Loc);
void CheckCompletedCoroutineBody(FunctionDecl *FD, Stmt *&Body);
ClassTemplateDecl *lookupCoroutineTraits(SourceLocation KwLoc,
SourceLocation FuncLoc);
//===--------------------------------------------------------------------===//
// OpenCL extensions.
//
private:
std::string CurrOpenCLExtension;
/// Extensions required by an OpenCL type.
llvm::DenseMap<const Type*, std::set<std::string>> OpenCLTypeExtMap;
/// Extensions required by an OpenCL declaration.
llvm::DenseMap<const Decl*, std::set<std::string>> OpenCLDeclExtMap;
public:
llvm::StringRef getCurrentOpenCLExtension() const {
return CurrOpenCLExtension;
}
/// Check if a function declaration \p FD associates with any
/// extensions present in OpenCLDeclExtMap and if so return the
/// extension(s) name(s).
std::string getOpenCLExtensionsFromDeclExtMap(FunctionDecl *FD);
/// Check if a function type \p FT associates with any
/// extensions present in OpenCLTypeExtMap and if so return the
/// extension(s) name(s).
std::string getOpenCLExtensionsFromTypeExtMap(FunctionType *FT);
/// Find an extension in an appropriate extension map and return its name
template<typename T, typename MapT>
std::string getOpenCLExtensionsFromExtMap(T* FT, MapT &Map);
void setCurrentOpenCLExtension(llvm::StringRef Ext) {
CurrOpenCLExtension = std::string(Ext);
}
/// Set OpenCL extensions for a type which can only be used when these
/// OpenCL extensions are enabled. If \p Exts is empty, do nothing.
/// \param Exts A space separated list of OpenCL extensions.
void setOpenCLExtensionForType(QualType T, llvm::StringRef Exts);
/// Set OpenCL extensions for a declaration which can only be
/// used when these OpenCL extensions are enabled. If \p Exts is empty, do
/// nothing.
/// \param Exts A space separated list of OpenCL extensions.
void setOpenCLExtensionForDecl(Decl *FD, llvm::StringRef Exts);
/// Set current OpenCL extensions for a type which can only be used
/// when these OpenCL extensions are enabled. If current OpenCL extension is
/// empty, do nothing.
void setCurrentOpenCLExtensionForType(QualType T);
/// Set current OpenCL extensions for a declaration which
/// can only be used when these OpenCL extensions are enabled. If current
/// OpenCL extension is empty, do nothing.
void setCurrentOpenCLExtensionForDecl(Decl *FD);
bool isOpenCLDisabledDecl(Decl *FD);
/// Check if type \p T corresponding to declaration specifier \p DS
/// is disabled due to required OpenCL extensions being disabled. If so,
/// emit diagnostics.
/// \return true if type is disabled.
bool checkOpenCLDisabledTypeDeclSpec(const DeclSpec &DS, QualType T);
/// Check if declaration \p D used by expression \p E
/// is disabled due to required OpenCL extensions being disabled. If so,
/// emit diagnostics.
/// \return true if type is disabled.
bool checkOpenCLDisabledDecl(const NamedDecl &D, const Expr &E);
//===--------------------------------------------------------------------===//
// OpenMP directives and clauses.
//
private:
void *VarDataSharingAttributesStack;
/// Number of nested '#pragma omp declare target' directives.
unsigned DeclareTargetNestingLevel = 0;
/// Initialization of data-sharing attributes stack.
void InitDataSharingAttributesStack();
void DestroyDataSharingAttributesStack();
ExprResult
VerifyPositiveIntegerConstantInClause(Expr *Op, OpenMPClauseKind CKind,
bool StrictlyPositive = true);
/// Returns OpenMP nesting level for current directive.
unsigned getOpenMPNestingLevel() const;
/// Adjusts the function scopes index for the target-based regions.
void adjustOpenMPTargetScopeIndex(unsigned &FunctionScopesIndex,
unsigned Level) const;
/// Returns the number of scopes associated with the construct on the given
/// OpenMP level.
int getNumberOfConstructScopes(unsigned Level) const;
/// Push new OpenMP function region for non-capturing function.
void pushOpenMPFunctionRegion();
/// Pop OpenMP function region for non-capturing function.
void popOpenMPFunctionRegion(const sema::FunctionScopeInfo *OldFSI);
/// Check whether we're allowed to call Callee from the current function.
void checkOpenMPDeviceFunction(SourceLocation Loc, FunctionDecl *Callee,
bool CheckForDelayedContext = true);
/// Check whether we're allowed to call Callee from the current function.
void checkOpenMPHostFunction(SourceLocation Loc, FunctionDecl *Callee,
bool CheckCaller = true);
/// Check if the expression is allowed to be used in expressions for the
/// OpenMP devices.
void checkOpenMPDeviceExpr(const Expr *E);
/// Finishes analysis of the deferred functions calls that may be declared as
/// host/nohost during device/host compilation.
void finalizeOpenMPDelayedAnalysis();
/// Checks if a type or a declaration is disabled due to the owning extension
/// being disabled, and emits diagnostic messages if it is disabled.
/// \param D type or declaration to be checked.
/// \param DiagLoc source location for the diagnostic message.
/// \param DiagInfo information to be emitted for the diagnostic message.
/// \param SrcRange source range of the declaration.
/// \param Map maps type or declaration to the extensions.
/// \param Selector selects diagnostic message: 0 for type and 1 for
/// declaration.
/// \return true if the type or declaration is disabled.
template <typename T, typename DiagLocT, typename DiagInfoT, typename MapT>
bool checkOpenCLDisabledTypeOrDecl(T D, DiagLocT DiagLoc, DiagInfoT DiagInfo,
MapT &Map, unsigned Selector = 0,
SourceRange SrcRange = SourceRange());
/// Marks all the functions that might be required for the currently active
/// OpenMP context.
void markOpenMPDeclareVariantFuncsReferenced(SourceLocation Loc,
FunctionDecl *Func,
bool MightBeOdrUse);
public:
/// Struct to store the context selectors info for declare variant directive.
/// Checks if the variant/multiversion functions are compatible.
bool areMultiversionVariantFunctionsCompatible(
const FunctionDecl *OldFD, const FunctionDecl *NewFD,
const PartialDiagnostic &NoProtoDiagID,
const PartialDiagnosticAt &NoteCausedDiagIDAt,
const PartialDiagnosticAt &NoSupportDiagIDAt,
const PartialDiagnosticAt &DiffDiagIDAt, bool TemplatesSupported,
bool ConstexprSupported, bool CLinkageMayDiffer);
/// Function tries to capture lambda's captured variables in the OpenMP region
/// before the original lambda is captured.
void tryCaptureOpenMPLambdas(ValueDecl *V);
/// Return true if the provided declaration \a VD should be captured by
/// reference.
/// \param Level Relative level of nested OpenMP construct for that the check
/// is performed.
/// \param OpenMPCaptureLevel Capture level within an OpenMP construct.
bool isOpenMPCapturedByRef(const ValueDecl *D, unsigned Level,
unsigned OpenMPCaptureLevel) const;
/// Check if the specified variable is used in one of the private
/// clauses (private, firstprivate, lastprivate, reduction etc.) in OpenMP
/// constructs.
VarDecl *isOpenMPCapturedDecl(ValueDecl *D, bool CheckScopeInfo = false,
unsigned StopAt = 0);
ExprResult getOpenMPCapturedExpr(VarDecl *Capture, ExprValueKind VK,
ExprObjectKind OK, SourceLocation Loc);
/// If the current region is a loop-based region, mark the start of the loop
/// construct.
void startOpenMPLoop();
/// If the current region is a range loop-based region, mark the start of the
/// loop construct.
void startOpenMPCXXRangeFor();
/// Check if the specified variable is used in 'private' clause.
/// \param Level Relative level of nested OpenMP construct for that the check
/// is performed.
bool isOpenMPPrivateDecl(const ValueDecl *D, unsigned Level) const;
/// Sets OpenMP capture kind (OMPC_private, OMPC_firstprivate, OMPC_map etc.)
/// for \p FD based on DSA for the provided corresponding captured declaration
/// \p D.
void setOpenMPCaptureKind(FieldDecl *FD, const ValueDecl *D, unsigned Level);
/// Check if the specified variable is captured by 'target' directive.
/// \param Level Relative level of nested OpenMP construct for that the check
/// is performed.
bool isOpenMPTargetCapturedDecl(const ValueDecl *D, unsigned Level,
unsigned CaptureLevel) const;
ExprResult PerformOpenMPImplicitIntegerConversion(SourceLocation OpLoc,
Expr *Op);
/// Called on start of new data sharing attribute block.
void StartOpenMPDSABlock(OpenMPDirectiveKind K,
const DeclarationNameInfo &DirName, Scope *CurScope,
SourceLocation Loc);
/// Start analysis of clauses.
void StartOpenMPClause(OpenMPClauseKind K);
/// End analysis of clauses.
void EndOpenMPClause();
/// Called on end of data sharing attribute block.
void EndOpenMPDSABlock(Stmt *CurDirective);
/// Check if the current region is an OpenMP loop region and if it is,
/// mark loop control variable, used in \p Init for loop initialization, as
/// private by default.
/// \param Init First part of the for loop.
void ActOnOpenMPLoopInitialization(SourceLocation ForLoc, Stmt *Init);
// OpenMP directives and clauses.
/// Called on correct id-expression from the '#pragma omp
/// threadprivate'.
ExprResult ActOnOpenMPIdExpression(Scope *CurScope, CXXScopeSpec &ScopeSpec,
const DeclarationNameInfo &Id,
OpenMPDirectiveKind Kind);
/// Called on well-formed '#pragma omp threadprivate'.
DeclGroupPtrTy ActOnOpenMPThreadprivateDirective(
SourceLocation Loc,
ArrayRef<Expr *> VarList);
/// Builds a new OpenMPThreadPrivateDecl and checks its correctness.
OMPThreadPrivateDecl *CheckOMPThreadPrivateDecl(SourceLocation Loc,
ArrayRef<Expr *> VarList);
/// Called on well-formed '#pragma omp allocate'.
DeclGroupPtrTy ActOnOpenMPAllocateDirective(SourceLocation Loc,
ArrayRef<Expr *> VarList,
ArrayRef<OMPClause *> Clauses,
DeclContext *Owner = nullptr);
/// Called on well-formed '#pragma omp requires'.
DeclGroupPtrTy ActOnOpenMPRequiresDirective(SourceLocation Loc,
ArrayRef<OMPClause *> ClauseList);
/// Check restrictions on Requires directive
OMPRequiresDecl *CheckOMPRequiresDecl(SourceLocation Loc,
ArrayRef<OMPClause *> Clauses);
/// Check if the specified type is allowed to be used in 'omp declare
/// reduction' construct.
QualType ActOnOpenMPDeclareReductionType(SourceLocation TyLoc,
TypeResult ParsedType);
/// Called on start of '#pragma omp declare reduction'.
DeclGroupPtrTy ActOnOpenMPDeclareReductionDirectiveStart(
Scope *S, DeclContext *DC, DeclarationName Name,
ArrayRef<std::pair<QualType, SourceLocation>> ReductionTypes,
AccessSpecifier AS, Decl *PrevDeclInScope = nullptr);
/// Initialize declare reduction construct initializer.
void ActOnOpenMPDeclareReductionCombinerStart(Scope *S, Decl *D);
/// Finish current declare reduction construct initializer.
void ActOnOpenMPDeclareReductionCombinerEnd(Decl *D, Expr *Combiner);
/// Initialize declare reduction construct initializer.
/// \return omp_priv variable.
VarDecl *ActOnOpenMPDeclareReductionInitializerStart(Scope *S, Decl *D);
/// Finish current declare reduction construct initializer.
void ActOnOpenMPDeclareReductionInitializerEnd(Decl *D, Expr *Initializer,
VarDecl *OmpPrivParm);
/// Called at the end of '#pragma omp declare reduction'.
DeclGroupPtrTy ActOnOpenMPDeclareReductionDirectiveEnd(
Scope *S, DeclGroupPtrTy DeclReductions, bool IsValid);
/// Check variable declaration in 'omp declare mapper' construct.
TypeResult ActOnOpenMPDeclareMapperVarDecl(Scope *S, Declarator &D);
/// Check if the specified type is allowed to be used in 'omp declare
/// mapper' construct.
QualType ActOnOpenMPDeclareMapperType(SourceLocation TyLoc,
TypeResult ParsedType);
/// Called on start of '#pragma omp declare mapper'.
OMPDeclareMapperDecl *ActOnOpenMPDeclareMapperDirectiveStart(
Scope *S, DeclContext *DC, DeclarationName Name, QualType MapperType,
SourceLocation StartLoc, DeclarationName VN, AccessSpecifier AS,
Decl *PrevDeclInScope = nullptr);
/// Build the mapper variable of '#pragma omp declare mapper'.
void ActOnOpenMPDeclareMapperDirectiveVarDecl(OMPDeclareMapperDecl *DMD,
Scope *S, QualType MapperType,
SourceLocation StartLoc,
DeclarationName VN);
/// Called at the end of '#pragma omp declare mapper'.
DeclGroupPtrTy
ActOnOpenMPDeclareMapperDirectiveEnd(OMPDeclareMapperDecl *D, Scope *S,
ArrayRef<OMPClause *> ClauseList);
/// Called on the start of target region i.e. '#pragma omp declare target'.
bool ActOnStartOpenMPDeclareTargetDirective(SourceLocation Loc);
/// Called at the end of target region i.e. '#pragme omp end declare target'.
void ActOnFinishOpenMPDeclareTargetDirective();
/// Searches for the provided declaration name for OpenMP declare target
/// directive.
NamedDecl *
lookupOpenMPDeclareTargetName(Scope *CurScope, CXXScopeSpec &ScopeSpec,
const DeclarationNameInfo &Id,
NamedDeclSetType &SameDirectiveDecls);
/// Called on correct id-expression from the '#pragma omp declare target'.
void ActOnOpenMPDeclareTargetName(NamedDecl *ND, SourceLocation Loc,
OMPDeclareTargetDeclAttr::MapTypeTy MT,
OMPDeclareTargetDeclAttr::DevTypeTy DT);
/// Check declaration inside target region.
void
checkDeclIsAllowedInOpenMPTarget(Expr *E, Decl *D,
SourceLocation IdLoc = SourceLocation());
/// Return true inside OpenMP declare target region.
bool isInOpenMPDeclareTargetContext() const {
return DeclareTargetNestingLevel > 0;
}
/// Return true inside OpenMP target region.
bool isInOpenMPTargetExecutionDirective() const;
/// Return the number of captured regions created for an OpenMP directive.
static int getOpenMPCaptureLevels(OpenMPDirectiveKind Kind);
/// Initialization of captured region for OpenMP region.
void ActOnOpenMPRegionStart(OpenMPDirectiveKind DKind, Scope *CurScope);
/// End of OpenMP region.
///
/// \param S Statement associated with the current OpenMP region.
/// \param Clauses List of clauses for the current OpenMP region.
///
/// \returns Statement for finished OpenMP region.
StmtResult ActOnOpenMPRegionEnd(StmtResult S, ArrayRef<OMPClause *> Clauses);
StmtResult ActOnOpenMPExecutableDirective(
OpenMPDirectiveKind Kind, const DeclarationNameInfo &DirName,
OpenMPDirectiveKind CancelRegion, ArrayRef<OMPClause *> Clauses,
Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc);
/// Called on well-formed '\#pragma omp parallel' after parsing
/// of the associated statement.
StmtResult ActOnOpenMPParallelDirective(ArrayRef<OMPClause *> Clauses,
Stmt *AStmt,
SourceLocation StartLoc,
SourceLocation EndLoc);
using VarsWithInheritedDSAType =
llvm::SmallDenseMap<const ValueDecl *, const Expr *, 4>;
/// Called on well-formed '\#pragma omp simd' after parsing
/// of the associated statement.
StmtResult
ActOnOpenMPSimdDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt,
SourceLocation StartLoc, SourceLocation EndLoc,
VarsWithInheritedDSAType &VarsWithImplicitDSA);
/// Called on well-formed '\#pragma omp for' after parsing
/// of the associated statement.
StmtResult
ActOnOpenMPForDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt,
SourceLocation StartLoc, SourceLocation EndLoc,
VarsWithInheritedDSAType &VarsWithImplicitDSA);
/// Called on well-formed '\#pragma omp for simd' after parsing
/// of the associated statement.
StmtResult
ActOnOpenMPForSimdDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt,
SourceLocation StartLoc, SourceLocation EndLoc,
VarsWithInheritedDSAType &VarsWithImplicitDSA);
/// Called on well-formed '\#pragma omp sections' after parsing
/// of the associated statement.
StmtResult ActOnOpenMPSectionsDirective(ArrayRef<OMPClause *> Clauses,
Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed '\#pragma omp section' after parsing of the
/// associated statement.
StmtResult ActOnOpenMPSectionDirective(Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed '\#pragma omp single' after parsing of the
/// associated statement.
StmtResult ActOnOpenMPSingleDirective(ArrayRef<OMPClause *> Clauses,
Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed '\#pragma omp master' after parsing of the
/// associated statement.
StmtResult ActOnOpenMPMasterDirective(Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed '\#pragma omp critical' after parsing of the
/// associated statement.
StmtResult ActOnOpenMPCriticalDirective(const DeclarationNameInfo &DirName,
ArrayRef<OMPClause *> Clauses,
Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed '\#pragma omp parallel for' after parsing
/// of the associated statement.
StmtResult ActOnOpenMPParallelForDirective(
ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA);
/// Called on well-formed '\#pragma omp parallel for simd' after
/// parsing of the associated statement.
StmtResult ActOnOpenMPParallelForSimdDirective(
ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA);
/// Called on well-formed '\#pragma omp parallel master' after
/// parsing of the associated statement.
StmtResult ActOnOpenMPParallelMasterDirective(ArrayRef<OMPClause *> Clauses,
Stmt *AStmt,
SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed '\#pragma omp parallel sections' after
/// parsing of the associated statement.
StmtResult ActOnOpenMPParallelSectionsDirective(ArrayRef<OMPClause *> Clauses,
Stmt *AStmt,
SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed '\#pragma omp task' after parsing of the
/// associated statement.
StmtResult ActOnOpenMPTaskDirective(ArrayRef<OMPClause *> Clauses,
Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed '\#pragma omp taskyield'.
StmtResult ActOnOpenMPTaskyieldDirective(SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed '\#pragma omp barrier'.
StmtResult ActOnOpenMPBarrierDirective(SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed '\#pragma omp taskwait'.
StmtResult ActOnOpenMPTaskwaitDirective(SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed '\#pragma omp taskgroup'.
StmtResult ActOnOpenMPTaskgroupDirective(ArrayRef<OMPClause *> Clauses,
Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed '\#pragma omp flush'.
StmtResult ActOnOpenMPFlushDirective(ArrayRef<OMPClause *> Clauses,
SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed '\#pragma omp ordered' after parsing of the
/// associated statement.
StmtResult ActOnOpenMPOrderedDirective(ArrayRef<OMPClause *> Clauses,
Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed '\#pragma omp atomic' after parsing of the
/// associated statement.
StmtResult ActOnOpenMPAtomicDirective(ArrayRef<OMPClause *> Clauses,
Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed '\#pragma omp target' after parsing of the
/// associated statement.
StmtResult ActOnOpenMPTargetDirective(ArrayRef<OMPClause *> Clauses,
Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed '\#pragma omp target data' after parsing of
/// the associated statement.
StmtResult ActOnOpenMPTargetDataDirective(ArrayRef<OMPClause *> Clauses,
Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed '\#pragma omp target enter data' after
/// parsing of the associated statement.
StmtResult ActOnOpenMPTargetEnterDataDirective(ArrayRef<OMPClause *> Clauses,
SourceLocation StartLoc,
SourceLocation EndLoc,
Stmt *AStmt);
/// Called on well-formed '\#pragma omp target exit data' after
/// parsing of the associated statement.
StmtResult ActOnOpenMPTargetExitDataDirective(ArrayRef<OMPClause *> Clauses,
SourceLocation StartLoc,
SourceLocation EndLoc,
Stmt *AStmt);
/// Called on well-formed '\#pragma omp target parallel' after
/// parsing of the associated statement.
StmtResult ActOnOpenMPTargetParallelDirective(ArrayRef<OMPClause *> Clauses,
Stmt *AStmt,
SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed '\#pragma omp target parallel for' after
/// parsing of the associated statement.
StmtResult ActOnOpenMPTargetParallelForDirective(
ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA);
/// Called on well-formed '\#pragma omp teams' after parsing of the
/// associated statement.
StmtResult ActOnOpenMPTeamsDirective(ArrayRef<OMPClause *> Clauses,
Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed '\#pragma omp cancellation point'.
StmtResult
ActOnOpenMPCancellationPointDirective(SourceLocation StartLoc,
SourceLocation EndLoc,
OpenMPDirectiveKind CancelRegion);
/// Called on well-formed '\#pragma omp cancel'.
StmtResult ActOnOpenMPCancelDirective(ArrayRef<OMPClause *> Clauses,
SourceLocation StartLoc,
SourceLocation EndLoc,
OpenMPDirectiveKind CancelRegion);
/// Called on well-formed '\#pragma omp taskloop' after parsing of the
/// associated statement.
StmtResult
ActOnOpenMPTaskLoopDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt,
SourceLocation StartLoc, SourceLocation EndLoc,
VarsWithInheritedDSAType &VarsWithImplicitDSA);
/// Called on well-formed '\#pragma omp taskloop simd' after parsing of
/// the associated statement.
StmtResult ActOnOpenMPTaskLoopSimdDirective(
ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA);
/// Called on well-formed '\#pragma omp master taskloop' after parsing of the
/// associated statement.
StmtResult ActOnOpenMPMasterTaskLoopDirective(
ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA);
/// Called on well-formed '\#pragma omp master taskloop simd' after parsing of
/// the associated statement.
StmtResult ActOnOpenMPMasterTaskLoopSimdDirective(
ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA);
/// Called on well-formed '\#pragma omp parallel master taskloop' after
/// parsing of the associated statement.
StmtResult ActOnOpenMPParallelMasterTaskLoopDirective(
ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA);
/// Called on well-formed '\#pragma omp parallel master taskloop simd' after
/// parsing of the associated statement.
StmtResult ActOnOpenMPParallelMasterTaskLoopSimdDirective(
ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA);
/// Called on well-formed '\#pragma omp distribute' after parsing
/// of the associated statement.
StmtResult
ActOnOpenMPDistributeDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt,
SourceLocation StartLoc, SourceLocation EndLoc,
VarsWithInheritedDSAType &VarsWithImplicitDSA);
/// Called on well-formed '\#pragma omp target update'.
StmtResult ActOnOpenMPTargetUpdateDirective(ArrayRef<OMPClause *> Clauses,
SourceLocation StartLoc,
SourceLocation EndLoc,
Stmt *AStmt);
/// Called on well-formed '\#pragma omp distribute parallel for' after
/// parsing of the associated statement.
StmtResult ActOnOpenMPDistributeParallelForDirective(
ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA);
/// Called on well-formed '\#pragma omp distribute parallel for simd'
/// after parsing of the associated statement.
StmtResult ActOnOpenMPDistributeParallelForSimdDirective(
ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA);
/// Called on well-formed '\#pragma omp distribute simd' after
/// parsing of the associated statement.
StmtResult ActOnOpenMPDistributeSimdDirective(
ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA);
/// Called on well-formed '\#pragma omp target parallel for simd' after
/// parsing of the associated statement.
StmtResult ActOnOpenMPTargetParallelForSimdDirective(
ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA);
/// Called on well-formed '\#pragma omp target simd' after parsing of
/// the associated statement.
StmtResult
ActOnOpenMPTargetSimdDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt,
SourceLocation StartLoc, SourceLocation EndLoc,
VarsWithInheritedDSAType &VarsWithImplicitDSA);
/// Called on well-formed '\#pragma omp teams distribute' after parsing of
/// the associated statement.
StmtResult ActOnOpenMPTeamsDistributeDirective(
ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA);
/// Called on well-formed '\#pragma omp teams distribute simd' after parsing
/// of the associated statement.
StmtResult ActOnOpenMPTeamsDistributeSimdDirective(
ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA);
/// Called on well-formed '\#pragma omp teams distribute parallel for simd'
/// after parsing of the associated statement.
StmtResult ActOnOpenMPTeamsDistributeParallelForSimdDirective(
ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA);
/// Called on well-formed '\#pragma omp teams distribute parallel for'
/// after parsing of the associated statement.
StmtResult ActOnOpenMPTeamsDistributeParallelForDirective(
ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA);
/// Called on well-formed '\#pragma omp target teams' after parsing of the
/// associated statement.
StmtResult ActOnOpenMPTargetTeamsDirective(ArrayRef<OMPClause *> Clauses,
Stmt *AStmt,
SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed '\#pragma omp target teams distribute' after parsing
/// of the associated statement.
StmtResult ActOnOpenMPTargetTeamsDistributeDirective(
ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA);
/// Called on well-formed '\#pragma omp target teams distribute parallel for'
/// after parsing of the associated statement.
StmtResult ActOnOpenMPTargetTeamsDistributeParallelForDirective(
ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA);
/// Called on well-formed '\#pragma omp target teams distribute parallel for
/// simd' after parsing of the associated statement.
StmtResult ActOnOpenMPTargetTeamsDistributeParallelForSimdDirective(
ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA);
/// Called on well-formed '\#pragma omp target teams distribute simd' after
/// parsing of the associated statement.
StmtResult ActOnOpenMPTargetTeamsDistributeSimdDirective(
ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA);
/// Checks correctness of linear modifiers.
bool CheckOpenMPLinearModifier(OpenMPLinearClauseKind LinKind,
SourceLocation LinLoc);
/// Checks that the specified declaration matches requirements for the linear
/// decls.
bool CheckOpenMPLinearDecl(const ValueDecl *D, SourceLocation ELoc,
OpenMPLinearClauseKind LinKind, QualType Type);
/// Called on well-formed '\#pragma omp declare simd' after parsing of
/// the associated method/function.
DeclGroupPtrTy ActOnOpenMPDeclareSimdDirective(
DeclGroupPtrTy DG, OMPDeclareSimdDeclAttr::BranchStateTy BS,
Expr *Simdlen, ArrayRef<Expr *> Uniforms, ArrayRef<Expr *> Aligneds,
ArrayRef<Expr *> Alignments, ArrayRef<Expr *> Linears,
ArrayRef<unsigned> LinModifiers, ArrayRef<Expr *> Steps, SourceRange SR);
/// Checks '\#pragma omp declare variant' variant function and original
/// functions after parsing of the associated method/function.
/// \param DG Function declaration to which declare variant directive is
/// applied to.
/// \param VariantRef Expression that references the variant function, which
/// must be used instead of the original one, specified in \p DG.
/// \param TI The trait info object representing the match clause.
/// \returns None, if the function/variant function are not compatible with
/// the pragma, pair of original function/variant ref expression otherwise.
Optional<std::pair<FunctionDecl *, Expr *>>
checkOpenMPDeclareVariantFunction(DeclGroupPtrTy DG, Expr *VariantRef,
OMPTraitInfo &TI, SourceRange SR);
/// Called on well-formed '\#pragma omp declare variant' after parsing of
/// the associated method/function.
/// \param FD Function declaration to which declare variant directive is
/// applied to.
/// \param VariantRef Expression that references the variant function, which
/// must be used instead of the original one, specified in \p DG.
/// \param TI The context traits associated with the function variant.
void ActOnOpenMPDeclareVariantDirective(FunctionDecl *FD, Expr *VariantRef,
OMPTraitInfo &TI, SourceRange SR);
OMPClause *ActOnOpenMPSingleExprClause(OpenMPClauseKind Kind,
Expr *Expr,
SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-formed 'allocator' clause.
OMPClause *ActOnOpenMPAllocatorClause(Expr *Allocator,
SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-formed 'if' clause.
OMPClause *ActOnOpenMPIfClause(OpenMPDirectiveKind NameModifier,
Expr *Condition, SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation NameModifierLoc,
SourceLocation ColonLoc,
SourceLocation EndLoc);
/// Called on well-formed 'final' clause.
OMPClause *ActOnOpenMPFinalClause(Expr *Condition, SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-formed 'num_threads' clause.
OMPClause *ActOnOpenMPNumThreadsClause(Expr *NumThreads,
SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-formed 'safelen' clause.
OMPClause *ActOnOpenMPSafelenClause(Expr *Length,
SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-formed 'simdlen' clause.
OMPClause *ActOnOpenMPSimdlenClause(Expr *Length, SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-formed 'collapse' clause.
OMPClause *ActOnOpenMPCollapseClause(Expr *NumForLoops,
SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-formed 'ordered' clause.
OMPClause *
ActOnOpenMPOrderedClause(SourceLocation StartLoc, SourceLocation EndLoc,
SourceLocation LParenLoc = SourceLocation(),
Expr *NumForLoops = nullptr);
/// Called on well-formed 'grainsize' clause.
OMPClause *ActOnOpenMPGrainsizeClause(Expr *Size, SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-formed 'num_tasks' clause.
OMPClause *ActOnOpenMPNumTasksClause(Expr *NumTasks, SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-formed 'hint' clause.
OMPClause *ActOnOpenMPHintClause(Expr *Hint, SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
OMPClause *ActOnOpenMPSimpleClause(OpenMPClauseKind Kind,
unsigned Argument,
SourceLocation ArgumentLoc,
SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-formed 'default' clause.
OMPClause *ActOnOpenMPDefaultClause(llvm::omp::DefaultKind Kind,
SourceLocation KindLoc,
SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-formed 'proc_bind' clause.
OMPClause *ActOnOpenMPProcBindClause(llvm::omp::ProcBindKind Kind,
SourceLocation KindLoc,
SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-formed 'order' clause.
OMPClause *ActOnOpenMPOrderClause(OpenMPOrderClauseKind Kind,
SourceLocation KindLoc,
SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
OMPClause *ActOnOpenMPSingleExprWithArgClause(
OpenMPClauseKind Kind, ArrayRef<unsigned> Arguments, Expr *Expr,
SourceLocation StartLoc, SourceLocation LParenLoc,
ArrayRef<SourceLocation> ArgumentsLoc, SourceLocation DelimLoc,
SourceLocation EndLoc);
/// Called on well-formed 'schedule' clause.
OMPClause *ActOnOpenMPScheduleClause(
OpenMPScheduleClauseModifier M1, OpenMPScheduleClauseModifier M2,
OpenMPScheduleClauseKind Kind, Expr *ChunkSize, SourceLocation StartLoc,
SourceLocation LParenLoc, SourceLocation M1Loc, SourceLocation M2Loc,
SourceLocation KindLoc, SourceLocation CommaLoc, SourceLocation EndLoc);
OMPClause *ActOnOpenMPClause(OpenMPClauseKind Kind, SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed 'nowait' clause.
OMPClause *ActOnOpenMPNowaitClause(SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed 'untied' clause.
OMPClause *ActOnOpenMPUntiedClause(SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed 'mergeable' clause.
OMPClause *ActOnOpenMPMergeableClause(SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed 'read' clause.
OMPClause *ActOnOpenMPReadClause(SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed 'write' clause.
OMPClause *ActOnOpenMPWriteClause(SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed 'update' clause.
OMPClause *ActOnOpenMPUpdateClause(SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed 'capture' clause.
OMPClause *ActOnOpenMPCaptureClause(SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed 'seq_cst' clause.
OMPClause *ActOnOpenMPSeqCstClause(SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed 'acq_rel' clause.
OMPClause *ActOnOpenMPAcqRelClause(SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed 'acquire' clause.
OMPClause *ActOnOpenMPAcquireClause(SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed 'release' clause.
OMPClause *ActOnOpenMPReleaseClause(SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed 'relaxed' clause.
OMPClause *ActOnOpenMPRelaxedClause(SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed 'threads' clause.
OMPClause *ActOnOpenMPThreadsClause(SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed 'simd' clause.
OMPClause *ActOnOpenMPSIMDClause(SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed 'nogroup' clause.
OMPClause *ActOnOpenMPNogroupClause(SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed 'unified_address' clause.
OMPClause *ActOnOpenMPUnifiedAddressClause(SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed 'unified_address' clause.
OMPClause *ActOnOpenMPUnifiedSharedMemoryClause(SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed 'reverse_offload' clause.
OMPClause *ActOnOpenMPReverseOffloadClause(SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed 'dynamic_allocators' clause.
OMPClause *ActOnOpenMPDynamicAllocatorsClause(SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed 'atomic_default_mem_order' clause.
OMPClause *ActOnOpenMPAtomicDefaultMemOrderClause(
OpenMPAtomicDefaultMemOrderClauseKind Kind, SourceLocation KindLoc,
SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc);
OMPClause *ActOnOpenMPVarListClause(
OpenMPClauseKind Kind, ArrayRef<Expr *> Vars, Expr *TailExpr,
const OMPVarListLocTy &Locs, SourceLocation ColonLoc,
CXXScopeSpec &ReductionOrMapperIdScopeSpec,
DeclarationNameInfo &ReductionOrMapperId, int ExtraModifier,
ArrayRef<OpenMPMapModifierKind> MapTypeModifiers,
ArrayRef<SourceLocation> MapTypeModifiersLoc, bool IsMapTypeImplicit,
SourceLocation DepLinMapLastLoc);
/// Called on well-formed 'allocate' clause.
OMPClause *
ActOnOpenMPAllocateClause(Expr *Allocator, ArrayRef<Expr *> VarList,
SourceLocation StartLoc, SourceLocation ColonLoc,
SourceLocation LParenLoc, SourceLocation EndLoc);
/// Called on well-formed 'private' clause.
OMPClause *ActOnOpenMPPrivateClause(ArrayRef<Expr *> VarList,
SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-formed 'firstprivate' clause.
OMPClause *ActOnOpenMPFirstprivateClause(ArrayRef<Expr *> VarList,
SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-formed 'lastprivate' clause.
OMPClause *ActOnOpenMPLastprivateClause(
ArrayRef<Expr *> VarList, OpenMPLastprivateModifier LPKind,
SourceLocation LPKindLoc, SourceLocation ColonLoc,
SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc);
/// Called on well-formed 'shared' clause.
OMPClause *ActOnOpenMPSharedClause(ArrayRef<Expr *> VarList,
SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-formed 'reduction' clause.
OMPClause *ActOnOpenMPReductionClause(
ArrayRef<Expr *> VarList, SourceLocation StartLoc,
SourceLocation LParenLoc, SourceLocation ColonLoc, SourceLocation EndLoc,
CXXScopeSpec &ReductionIdScopeSpec,
const DeclarationNameInfo &ReductionId,
ArrayRef<Expr *> UnresolvedReductions = llvm::None);
/// Called on well-formed 'task_reduction' clause.
OMPClause *ActOnOpenMPTaskReductionClause(
ArrayRef<Expr *> VarList, SourceLocation StartLoc,
SourceLocation LParenLoc, SourceLocation ColonLoc, SourceLocation EndLoc,
CXXScopeSpec &ReductionIdScopeSpec,
const DeclarationNameInfo &ReductionId,
ArrayRef<Expr *> UnresolvedReductions = llvm::None);
/// Called on well-formed 'in_reduction' clause.
OMPClause *ActOnOpenMPInReductionClause(
ArrayRef<Expr *> VarList, SourceLocation StartLoc,
SourceLocation LParenLoc, SourceLocation ColonLoc, SourceLocation EndLoc,
CXXScopeSpec &ReductionIdScopeSpec,
const DeclarationNameInfo &ReductionId,
ArrayRef<Expr *> UnresolvedReductions = llvm::None);
/// Called on well-formed 'linear' clause.
OMPClause *
ActOnOpenMPLinearClause(ArrayRef<Expr *> VarList, Expr *Step,
SourceLocation StartLoc, SourceLocation LParenLoc,
OpenMPLinearClauseKind LinKind, SourceLocation LinLoc,
SourceLocation ColonLoc, SourceLocation EndLoc);
/// Called on well-formed 'aligned' clause.
OMPClause *ActOnOpenMPAlignedClause(ArrayRef<Expr *> VarList,
Expr *Alignment,
SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation ColonLoc,
SourceLocation EndLoc);
/// Called on well-formed 'copyin' clause.
OMPClause *ActOnOpenMPCopyinClause(ArrayRef<Expr *> VarList,
SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-formed 'copyprivate' clause.
OMPClause *ActOnOpenMPCopyprivateClause(ArrayRef<Expr *> VarList,
SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-formed 'flush' pseudo clause.
OMPClause *ActOnOpenMPFlushClause(ArrayRef<Expr *> VarList,
SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-formed 'depend' clause.
OMPClause *
ActOnOpenMPDependClause(OpenMPDependClauseKind DepKind, SourceLocation DepLoc,
SourceLocation ColonLoc, ArrayRef<Expr *> VarList,
SourceLocation StartLoc, SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-formed 'device' clause.
OMPClause *ActOnOpenMPDeviceClause(Expr *Device, SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-formed 'map' clause.
OMPClause *
ActOnOpenMPMapClause(ArrayRef<OpenMPMapModifierKind> MapTypeModifiers,
ArrayRef<SourceLocation> MapTypeModifiersLoc,
CXXScopeSpec &MapperIdScopeSpec,
DeclarationNameInfo &MapperId,
OpenMPMapClauseKind MapType, bool IsMapTypeImplicit,
SourceLocation MapLoc, SourceLocation ColonLoc,
ArrayRef<Expr *> VarList, const OMPVarListLocTy &Locs,
ArrayRef<Expr *> UnresolvedMappers = llvm::None);
/// Called on well-formed 'num_teams' clause.
OMPClause *ActOnOpenMPNumTeamsClause(Expr *NumTeams, SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-formed 'thread_limit' clause.
OMPClause *ActOnOpenMPThreadLimitClause(Expr *ThreadLimit,
SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-formed 'priority' clause.
OMPClause *ActOnOpenMPPriorityClause(Expr *Priority, SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-formed 'dist_schedule' clause.
OMPClause *ActOnOpenMPDistScheduleClause(
OpenMPDistScheduleClauseKind Kind, Expr *ChunkSize,
SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation KindLoc,
SourceLocation CommaLoc, SourceLocation EndLoc);
/// Called on well-formed 'defaultmap' clause.
OMPClause *ActOnOpenMPDefaultmapClause(
OpenMPDefaultmapClauseModifier M, OpenMPDefaultmapClauseKind Kind,
SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation MLoc,
SourceLocation KindLoc, SourceLocation EndLoc);
/// Called on well-formed 'to' clause.
OMPClause *
ActOnOpenMPToClause(ArrayRef<Expr *> VarList, CXXScopeSpec &MapperIdScopeSpec,
DeclarationNameInfo &MapperId,
const OMPVarListLocTy &Locs,
ArrayRef<Expr *> UnresolvedMappers = llvm::None);
/// Called on well-formed 'from' clause.
OMPClause *ActOnOpenMPFromClause(
ArrayRef<Expr *> VarList, CXXScopeSpec &MapperIdScopeSpec,
DeclarationNameInfo &MapperId, const OMPVarListLocTy &Locs,
ArrayRef<Expr *> UnresolvedMappers = llvm::None);
/// Called on well-formed 'use_device_ptr' clause.
OMPClause *ActOnOpenMPUseDevicePtrClause(ArrayRef<Expr *> VarList,
const OMPVarListLocTy &Locs);
/// Called on well-formed 'is_device_ptr' clause.
OMPClause *ActOnOpenMPIsDevicePtrClause(ArrayRef<Expr *> VarList,
const OMPVarListLocTy &Locs);
/// Called on well-formed 'nontemporal' clause.
OMPClause *ActOnOpenMPNontemporalClause(ArrayRef<Expr *> VarList,
SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// The kind of conversion being performed.
enum CheckedConversionKind {
/// An implicit conversion.
CCK_ImplicitConversion,
/// A C-style cast.
CCK_CStyleCast,
/// A functional-style cast.
CCK_FunctionalCast,
/// A cast other than a C-style cast.
CCK_OtherCast,
/// A conversion for an operand of a builtin overloaded operator.
CCK_ForBuiltinOverloadedOp
};
static bool isCast(CheckedConversionKind CCK) {
return CCK == CCK_CStyleCast || CCK == CCK_FunctionalCast ||
CCK == CCK_OtherCast;
}
/// ImpCastExprToType - If Expr is not of type 'Type', insert an implicit
/// cast. If there is already an implicit cast, merge into the existing one.
/// If isLvalue, the result of the cast is an lvalue.
ExprResult ImpCastExprToType(Expr *E, QualType Type, CastKind CK,
ExprValueKind VK = VK_RValue,
const CXXCastPath *BasePath = nullptr,
CheckedConversionKind CCK
= CCK_ImplicitConversion);
/// ScalarTypeToBooleanCastKind - Returns the cast kind corresponding
/// to the conversion from scalar type ScalarTy to the Boolean type.
static CastKind ScalarTypeToBooleanCastKind(QualType ScalarTy);
/// IgnoredValueConversions - Given that an expression's result is
/// syntactically ignored, perform any conversions that are
/// required.
ExprResult IgnoredValueConversions(Expr *E);
// UsualUnaryConversions - promotes integers (C99 6.3.1.1p2) and converts
// functions and arrays to their respective pointers (C99 6.3.2.1).
ExprResult UsualUnaryConversions(Expr *E);
/// CallExprUnaryConversions - a special case of an unary conversion
/// performed on a function designator of a call expression.
ExprResult CallExprUnaryConversions(Expr *E);
// DefaultFunctionArrayConversion - converts functions and arrays
// to their respective pointers (C99 6.3.2.1).
ExprResult DefaultFunctionArrayConversion(Expr *E, bool Diagnose = true);
// DefaultFunctionArrayLvalueConversion - converts functions and
// arrays to their respective pointers and performs the
// lvalue-to-rvalue conversion.
ExprResult DefaultFunctionArrayLvalueConversion(Expr *E,
bool Diagnose = true);
// DefaultLvalueConversion - performs lvalue-to-rvalue conversion on
// the operand. This is DefaultFunctionArrayLvalueConversion,
// except that it assumes the operand isn't of function or array
// type.
ExprResult DefaultLvalueConversion(Expr *E);
// DefaultArgumentPromotion (C99 6.5.2.2p6). Used for function calls that
// do not have a prototype. Integer promotions are performed on each
// argument, and arguments that have type float are promoted to double.
ExprResult DefaultArgumentPromotion(Expr *E);
/// If \p E is a prvalue denoting an unmaterialized temporary, materialize
/// it as an xvalue. In C++98, the result will still be a prvalue, because
/// we don't have xvalues there.
ExprResult TemporaryMaterializationConversion(Expr *E);
// Used for emitting the right warning by DefaultVariadicArgumentPromotion
enum VariadicCallType {
VariadicFunction,
VariadicBlock,
VariadicMethod,
VariadicConstructor,
VariadicDoesNotApply
};
VariadicCallType getVariadicCallType(FunctionDecl *FDecl,
const FunctionProtoType *Proto,
Expr *Fn);
// Used for determining in which context a type is allowed to be passed to a
// vararg function.
enum VarArgKind {
VAK_Valid,
VAK_ValidInCXX11,
VAK_Undefined,
VAK_MSVCUndefined,
VAK_Invalid
};
// Determines which VarArgKind fits an expression.
VarArgKind isValidVarArgType(const QualType &Ty);
/// Check to see if the given expression is a valid argument to a variadic
/// function, issuing a diagnostic if not.
void checkVariadicArgument(const Expr *E, VariadicCallType CT);
/// Check to see if a given expression could have '.c_str()' called on it.
bool hasCStrMethod(const Expr *E);
/// GatherArgumentsForCall - Collector argument expressions for various
/// form of call prototypes.
bool GatherArgumentsForCall(SourceLocation CallLoc, FunctionDecl *FDecl,
const FunctionProtoType *Proto,
unsigned FirstParam, ArrayRef<Expr *> Args,
SmallVectorImpl<Expr *> &AllArgs,
VariadicCallType CallType = VariadicDoesNotApply,
bool AllowExplicit = false,
bool IsListInitialization = false);
// DefaultVariadicArgumentPromotion - Like DefaultArgumentPromotion, but
// will create a runtime trap if the resulting type is not a POD type.
ExprResult DefaultVariadicArgumentPromotion(Expr *E, VariadicCallType CT,
FunctionDecl *FDecl);
/// Context in which we're performing a usual arithmetic conversion.
enum ArithConvKind {
/// An arithmetic operation.
ACK_Arithmetic,
/// A bitwise operation.
ACK_BitwiseOp,
/// A comparison.
ACK_Comparison,
/// A conditional (?:) operator.
ACK_Conditional,
/// A compound assignment expression.
ACK_CompAssign,
};
// UsualArithmeticConversions - performs the UsualUnaryConversions on it's
// operands and then handles various conversions that are common to binary
// operators (C99 6.3.1.8). If both operands aren't arithmetic, this
// routine returns the first non-arithmetic type found. The client is
// responsible for emitting appropriate error diagnostics.
QualType UsualArithmeticConversions(ExprResult &LHS, ExprResult &RHS,
SourceLocation Loc, ArithConvKind ACK);
/// AssignConvertType - All of the 'assignment' semantic checks return this
/// enum to indicate whether the assignment was allowed. These checks are
/// done for simple assignments, as well as initialization, return from
/// function, argument passing, etc. The query is phrased in terms of a
/// source and destination type.
enum AssignConvertType {
/// Compatible - the types are compatible according to the standard.
Compatible,
/// PointerToInt - The assignment converts a pointer to an int, which we
/// accept as an extension.
PointerToInt,
/// IntToPointer - The assignment converts an int to a pointer, which we
/// accept as an extension.
IntToPointer,
/// FunctionVoidPointer - The assignment is between a function pointer and
/// void*, which the standard doesn't allow, but we accept as an extension.
FunctionVoidPointer,
/// IncompatiblePointer - The assignment is between two pointers types that
/// are not compatible, but we accept them as an extension.
IncompatiblePointer,
/// IncompatibleFunctionPointer - The assignment is between two function
/// pointers types that are not compatible, but we accept them as an
/// extension.
IncompatibleFunctionPointer,
/// IncompatiblePointerSign - The assignment is between two pointers types
/// which point to integers which have a different sign, but are otherwise
/// identical. This is a subset of the above, but broken out because it's by
/// far the most common case of incompatible pointers.
IncompatiblePointerSign,
/// CompatiblePointerDiscardsQualifiers - The assignment discards
/// c/v/r qualifiers, which we accept as an extension.
CompatiblePointerDiscardsQualifiers,
/// IncompatiblePointerDiscardsQualifiers - The assignment
/// discards qualifiers that we don't permit to be discarded,
/// like address spaces.
IncompatiblePointerDiscardsQualifiers,
/// IncompatibleNestedPointerAddressSpaceMismatch - The assignment
/// changes address spaces in nested pointer types which is not allowed.
/// For instance, converting __private int ** to __generic int ** is
/// illegal even though __private could be converted to __generic.
IncompatibleNestedPointerAddressSpaceMismatch,
/// IncompatibleNestedPointerQualifiers - The assignment is between two
/// nested pointer types, and the qualifiers other than the first two
/// levels differ e.g. char ** -> const char **, but we accept them as an
/// extension.
IncompatibleNestedPointerQualifiers,
/// IncompatibleVectors - The assignment is between two vector types that
/// have the same size, which we accept as an extension.
IncompatibleVectors,
/// IntToBlockPointer - The assignment converts an int to a block
/// pointer. We disallow this.
IntToBlockPointer,
/// IncompatibleBlockPointer - The assignment is between two block
/// pointers types that are not compatible.
IncompatibleBlockPointer,
/// IncompatibleObjCQualifiedId - The assignment is between a qualified
/// id type and something else (that is incompatible with it). For example,
/// "id <XXX>" = "Foo *", where "Foo *" doesn't implement the XXX protocol.
IncompatibleObjCQualifiedId,
/// IncompatibleObjCWeakRef - Assigning a weak-unavailable object to an
/// object with __weak qualifier.
IncompatibleObjCWeakRef,
/// Incompatible - We reject this conversion outright, it is invalid to
/// represent it in the AST.
Incompatible
};
/// DiagnoseAssignmentResult - Emit a diagnostic, if required, for the
/// assignment conversion type specified by ConvTy. This returns true if the
/// conversion was invalid or false if the conversion was accepted.
bool DiagnoseAssignmentResult(AssignConvertType ConvTy,
SourceLocation Loc,
QualType DstType, QualType SrcType,
Expr *SrcExpr, AssignmentAction Action,
bool *Complained = nullptr);
/// IsValueInFlagEnum - Determine if a value is allowed as part of a flag
/// enum. If AllowMask is true, then we also allow the complement of a valid
/// value, to be used as a mask.
bool IsValueInFlagEnum(const EnumDecl *ED, const llvm::APInt &Val,
bool AllowMask) const;
/// DiagnoseAssignmentEnum - Warn if assignment to enum is a constant
/// integer not in the range of enum values.
void DiagnoseAssignmentEnum(QualType DstType, QualType SrcType,
Expr *SrcExpr);
/// CheckAssignmentConstraints - Perform type checking for assignment,
/// argument passing, variable initialization, and function return values.
/// C99 6.5.16.
AssignConvertType CheckAssignmentConstraints(SourceLocation Loc,
QualType LHSType,
QualType RHSType);
/// Check assignment constraints and optionally prepare for a conversion of
/// the RHS to the LHS type. The conversion is prepared for if ConvertRHS
/// is true.
AssignConvertType CheckAssignmentConstraints(QualType LHSType,
ExprResult &RHS,
CastKind &Kind,
bool ConvertRHS = true);
/// Check assignment constraints for an assignment of RHS to LHSType.
///
/// \param LHSType The destination type for the assignment.
/// \param RHS The source expression for the assignment.
/// \param Diagnose If \c true, diagnostics may be produced when checking
/// for assignability. If a diagnostic is produced, \p RHS will be
/// set to ExprError(). Note that this function may still return
/// without producing a diagnostic, even for an invalid assignment.
/// \param DiagnoseCFAudited If \c true, the target is a function parameter
/// in an audited Core Foundation API and does not need to be checked
/// for ARC retain issues.
/// \param ConvertRHS If \c true, \p RHS will be updated to model the
/// conversions necessary to perform the assignment. If \c false,
/// \p Diagnose must also be \c false.
AssignConvertType CheckSingleAssignmentConstraints(
QualType LHSType, ExprResult &RHS, bool Diagnose = true,
bool DiagnoseCFAudited = false, bool ConvertRHS = true);
// If the lhs type is a transparent union, check whether we
// can initialize the transparent union with the given expression.
AssignConvertType CheckTransparentUnionArgumentConstraints(QualType ArgType,
ExprResult &RHS);
bool IsStringLiteralToNonConstPointerConversion(Expr *From, QualType ToType);
bool CheckExceptionSpecCompatibility(Expr *From, QualType ToType);
ExprResult PerformImplicitConversion(Expr *From, QualType ToType,
AssignmentAction Action,
bool AllowExplicit = false);
ExprResult PerformImplicitConversion(Expr *From, QualType ToType,
AssignmentAction Action,
bool AllowExplicit,
ImplicitConversionSequence& ICS);
ExprResult PerformImplicitConversion(Expr *From, QualType ToType,
const ImplicitConversionSequence& ICS,
AssignmentAction Action,
CheckedConversionKind CCK
= CCK_ImplicitConversion);
ExprResult PerformImplicitConversion(Expr *From, QualType ToType,
const StandardConversionSequence& SCS,
AssignmentAction Action,
CheckedConversionKind CCK);
ExprResult PerformQualificationConversion(
Expr *E, QualType Ty, ExprValueKind VK = VK_RValue,
CheckedConversionKind CCK = CCK_ImplicitConversion);
/// the following "Check" methods will return a valid/converted QualType
/// or a null QualType (indicating an error diagnostic was issued).
/// type checking binary operators (subroutines of CreateBuiltinBinOp).
QualType InvalidOperands(SourceLocation Loc, ExprResult &LHS,
ExprResult &RHS);
QualType InvalidLogicalVectorOperands(SourceLocation Loc, ExprResult &LHS,
ExprResult &RHS);
QualType CheckPointerToMemberOperands( // C++ 5.5
ExprResult &LHS, ExprResult &RHS, ExprValueKind &VK,
SourceLocation OpLoc, bool isIndirect);
QualType CheckMultiplyDivideOperands( // C99 6.5.5
ExprResult &LHS, ExprResult &RHS, SourceLocation Loc, bool IsCompAssign,
bool IsDivide);
QualType CheckRemainderOperands( // C99 6.5.5
ExprResult &LHS, ExprResult &RHS, SourceLocation Loc,
bool IsCompAssign = false);
QualType CheckAdditionOperands( // C99 6.5.6
ExprResult &LHS, ExprResult &RHS, SourceLocation Loc,
BinaryOperatorKind Opc, QualType* CompLHSTy = nullptr);
QualType CheckSubtractionOperands( // C99 6.5.6
ExprResult &LHS, ExprResult &RHS, SourceLocation Loc,
QualType* CompLHSTy = nullptr);
QualType CheckShiftOperands( // C99 6.5.7
ExprResult &LHS, ExprResult &RHS, SourceLocation Loc,
BinaryOperatorKind Opc, bool IsCompAssign = false);
void CheckPtrComparisonWithNullChar(ExprResult &E, ExprResult &NullE);
QualType CheckCompareOperands( // C99 6.5.8/9
ExprResult &LHS, ExprResult &RHS, SourceLocation Loc,
BinaryOperatorKind Opc);
QualType CheckBitwiseOperands( // C99 6.5.[10...12]
ExprResult &LHS, ExprResult &RHS, SourceLocation Loc,
BinaryOperatorKind Opc);
QualType CheckLogicalOperands( // C99 6.5.[13,14]
ExprResult &LHS, ExprResult &RHS, SourceLocation Loc,
BinaryOperatorKind Opc);
// CheckAssignmentOperands is used for both simple and compound assignment.
// For simple assignment, pass both expressions and a null converted type.
// For compound assignment, pass both expressions and the converted type.
QualType CheckAssignmentOperands( // C99 6.5.16.[1,2]
Expr *LHSExpr, ExprResult &RHS, SourceLocation Loc, QualType CompoundType);
ExprResult checkPseudoObjectIncDec(Scope *S, SourceLocation OpLoc,
UnaryOperatorKind Opcode, Expr *Op);
ExprResult checkPseudoObjectAssignment(Scope *S, SourceLocation OpLoc,
BinaryOperatorKind Opcode,
Expr *LHS, Expr *RHS);
ExprResult checkPseudoObjectRValue(Expr *E);
Expr *recreateSyntacticForm(PseudoObjectExpr *E);
QualType CheckConditionalOperands( // C99 6.5.15
ExprResult &Cond, ExprResult &LHS, ExprResult &RHS,
ExprValueKind &VK, ExprObjectKind &OK, SourceLocation QuestionLoc);
QualType CXXCheckConditionalOperands( // C++ 5.16
ExprResult &cond, ExprResult &lhs, ExprResult &rhs,
ExprValueKind &VK, ExprObjectKind &OK, SourceLocation questionLoc);
QualType CheckGNUVectorConditionalTypes(ExprResult &Cond, ExprResult &LHS,
ExprResult &RHS,
SourceLocation QuestionLoc);
QualType FindCompositePointerType(SourceLocation Loc, Expr *&E1, Expr *&E2,
bool ConvertArgs = true);
QualType FindCompositePointerType(SourceLocation Loc,
ExprResult &E1, ExprResult &E2,
bool ConvertArgs = true) {
Expr *E1Tmp = E1.get(), *E2Tmp = E2.get();
QualType Composite =
FindCompositePointerType(Loc, E1Tmp, E2Tmp, ConvertArgs);
E1 = E1Tmp;
E2 = E2Tmp;
return Composite;
}
QualType FindCompositeObjCPointerType(ExprResult &LHS, ExprResult &RHS,
SourceLocation QuestionLoc);
bool DiagnoseConditionalForNull(Expr *LHSExpr, Expr *RHSExpr,
SourceLocation QuestionLoc);
void DiagnoseAlwaysNonNullPointer(Expr *E,
Expr::NullPointerConstantKind NullType,
bool IsEqual, SourceRange Range);
/// type checking for vector binary operators.
QualType CheckVectorOperands(ExprResult &LHS, ExprResult &RHS,
SourceLocation Loc, bool IsCompAssign,
bool AllowBothBool, bool AllowBoolConversion);
QualType GetSignedVectorType(QualType V);
QualType CheckVectorCompareOperands(ExprResult &LHS, ExprResult &RHS,
SourceLocation Loc,
BinaryOperatorKind Opc);
QualType CheckVectorLogicalOperands(ExprResult &LHS, ExprResult &RHS,
SourceLocation Loc);
bool areLaxCompatibleVectorTypes(QualType srcType, QualType destType);
bool isLaxVectorConversion(QualType srcType, QualType destType);
/// type checking declaration initializers (C99 6.7.8)
bool CheckForConstantInitializer(Expr *e, QualType t);
// type checking C++ declaration initializers (C++ [dcl.init]).
/// ReferenceCompareResult - Expresses the result of comparing two
/// types (cv1 T1 and cv2 T2) to determine their compatibility for the
/// purposes of initialization by reference (C++ [dcl.init.ref]p4).
enum ReferenceCompareResult {
/// Ref_Incompatible - The two types are incompatible, so direct
/// reference binding is not possible.
Ref_Incompatible = 0,
/// Ref_Related - The two types are reference-related, which means
/// that their unqualified forms (T1 and T2) are either the same
/// or T1 is a base class of T2.
Ref_Related,
/// Ref_Compatible - The two types are reference-compatible.
Ref_Compatible
};
// Fake up a scoped enumeration that still contextually converts to bool.
struct ReferenceConversionsScope {
/// The conversions that would be performed on an lvalue of type T2 when
/// binding a reference of type T1 to it, as determined when evaluating
/// whether T1 is reference-compatible with T2.
enum ReferenceConversions {
Qualification = 0x1,
NestedQualification = 0x2,
Function = 0x4,
DerivedToBase = 0x8,
ObjC = 0x10,
ObjCLifetime = 0x20,
LLVM_MARK_AS_BITMASK_ENUM(/*LargestValue=*/ObjCLifetime)
};
};
using ReferenceConversions = ReferenceConversionsScope::ReferenceConversions;
ReferenceCompareResult
CompareReferenceRelationship(SourceLocation Loc, QualType T1, QualType T2,
ReferenceConversions *Conv = nullptr);
ExprResult checkUnknownAnyCast(SourceRange TypeRange, QualType CastType,
Expr *CastExpr, CastKind &CastKind,
ExprValueKind &VK, CXXCastPath &Path);
/// Force an expression with unknown-type to an expression of the
/// given type.
ExprResult forceUnknownAnyToType(Expr *E, QualType ToType);
/// Type-check an expression that's being passed to an
/// __unknown_anytype parameter.
ExprResult checkUnknownAnyArg(SourceLocation callLoc,
Expr *result, QualType ¶mType);
// CheckVectorCast - check type constraints for vectors.
// Since vectors are an extension, there are no C standard reference for this.
// We allow casting between vectors and integer datatypes of the same size.
// returns true if the cast is invalid
bool CheckVectorCast(SourceRange R, QualType VectorTy, QualType Ty,
CastKind &Kind);
/// Prepare `SplattedExpr` for a vector splat operation, adding
/// implicit casts if necessary.
ExprResult prepareVectorSplat(QualType VectorTy, Expr *SplattedExpr);
// CheckExtVectorCast - check type constraints for extended vectors.
// Since vectors are an extension, there are no C standard reference for this.
// We allow casting between vectors and integer datatypes of the same size,
// or vectors and the element type of that vector.
// returns the cast expr
ExprResult CheckExtVectorCast(SourceRange R, QualType DestTy, Expr *CastExpr,
CastKind &Kind);
ExprResult BuildCXXFunctionalCastExpr(TypeSourceInfo *TInfo, QualType Type,
SourceLocation LParenLoc,
Expr *CastExpr,
SourceLocation RParenLoc);
enum ARCConversionResult { ACR_okay, ACR_unbridged, ACR_error };
/// Checks for invalid conversions and casts between
/// retainable pointers and other pointer kinds for ARC and Weak.
ARCConversionResult CheckObjCConversion(SourceRange castRange,
QualType castType, Expr *&op,
CheckedConversionKind CCK,
bool Diagnose = true,
bool DiagnoseCFAudited = false,
BinaryOperatorKind Opc = BO_PtrMemD
);
Expr *stripARCUnbridgedCast(Expr *e);
void diagnoseARCUnbridgedCast(Expr *e);
bool CheckObjCARCUnavailableWeakConversion(QualType castType,
QualType ExprType);
/// checkRetainCycles - Check whether an Objective-C message send
/// might create an obvious retain cycle.
void checkRetainCycles(ObjCMessageExpr *msg);
void checkRetainCycles(Expr *receiver, Expr *argument);
void checkRetainCycles(VarDecl *Var, Expr *Init);
/// checkUnsafeAssigns - Check whether +1 expr is being assigned
/// to weak/__unsafe_unretained type.
bool checkUnsafeAssigns(SourceLocation Loc, QualType LHS, Expr *RHS);
/// checkUnsafeExprAssigns - Check whether +1 expr is being assigned
/// to weak/__unsafe_unretained expression.
void checkUnsafeExprAssigns(SourceLocation Loc, Expr *LHS, Expr *RHS);
/// CheckMessageArgumentTypes - Check types in an Obj-C message send.
/// \param Method - May be null.
/// \param [out] ReturnType - The return type of the send.
/// \return true iff there were any incompatible types.
bool CheckMessageArgumentTypes(const Expr *Receiver, QualType ReceiverType,
MultiExprArg Args, Selector Sel,
ArrayRef<SourceLocation> SelectorLocs,
ObjCMethodDecl *Method, bool isClassMessage,
bool isSuperMessage, SourceLocation lbrac,
SourceLocation rbrac, SourceRange RecRange,
QualType &ReturnType, ExprValueKind &VK);
/// Determine the result of a message send expression based on
/// the type of the receiver, the method expected to receive the message,
/// and the form of the message send.
QualType getMessageSendResultType(const Expr *Receiver, QualType ReceiverType,
ObjCMethodDecl *Method, bool isClassMessage,
bool isSuperMessage);
/// If the given expression involves a message send to a method
/// with a related result type, emit a note describing what happened.
void EmitRelatedResultTypeNote(const Expr *E);
/// Given that we had incompatible pointer types in a return
/// statement, check whether we're in a method with a related result
/// type, and if so, emit a note describing what happened.
void EmitRelatedResultTypeNoteForReturn(QualType destType);
class ConditionResult {
Decl *ConditionVar;
FullExprArg Condition;
bool Invalid;
bool HasKnownValue;
bool KnownValue;
friend class Sema;
ConditionResult(Sema &S, Decl *ConditionVar, FullExprArg Condition,
bool IsConstexpr)
: ConditionVar(ConditionVar), Condition(Condition), Invalid(false),
HasKnownValue(IsConstexpr && Condition.get() &&
!Condition.get()->isValueDependent()),
KnownValue(HasKnownValue &&
!!Condition.get()->EvaluateKnownConstInt(S.Context)) {}
explicit ConditionResult(bool Invalid)
: ConditionVar(nullptr), Condition(nullptr), Invalid(Invalid),
HasKnownValue(false), KnownValue(false) {}
public:
ConditionResult() : ConditionResult(false) {}
bool isInvalid() const { return Invalid; }
std::pair<VarDecl *, Expr *> get() const {
return std::make_pair(cast_or_null<VarDecl>(ConditionVar),
Condition.get());
}
llvm::Optional<bool> getKnownValue() const {
if (!HasKnownValue)
return None;
return KnownValue;
}
};
static ConditionResult ConditionError() { return ConditionResult(true); }
enum class ConditionKind {
Boolean, ///< A boolean condition, from 'if', 'while', 'for', or 'do'.
ConstexprIf, ///< A constant boolean condition from 'if constexpr'.
Switch ///< An integral condition for a 'switch' statement.
};
ConditionResult ActOnCondition(Scope *S, SourceLocation Loc,
Expr *SubExpr, ConditionKind CK);
ConditionResult ActOnConditionVariable(Decl *ConditionVar,
SourceLocation StmtLoc,
ConditionKind CK);
DeclResult ActOnCXXConditionDeclaration(Scope *S, Declarator &D);
ExprResult CheckConditionVariable(VarDecl *ConditionVar,
SourceLocation StmtLoc,
ConditionKind CK);
ExprResult CheckSwitchCondition(SourceLocation SwitchLoc, Expr *Cond);
/// CheckBooleanCondition - Diagnose problems involving the use of
/// the given expression as a boolean condition (e.g. in an if
/// statement). Also performs the standard function and array
/// decays, possibly changing the input variable.
///
/// \param Loc - A location associated with the condition, e.g. the
/// 'if' keyword.
/// \return true iff there were any errors
ExprResult CheckBooleanCondition(SourceLocation Loc, Expr *E,
bool IsConstexpr = false);
/// ActOnExplicitBoolSpecifier - Build an ExplicitSpecifier from an expression
/// found in an explicit(bool) specifier.
ExplicitSpecifier ActOnExplicitBoolSpecifier(Expr *E);
/// tryResolveExplicitSpecifier - Attempt to resolve the explict specifier.
/// Returns true if the explicit specifier is now resolved.
bool tryResolveExplicitSpecifier(ExplicitSpecifier &ExplicitSpec);
/// DiagnoseAssignmentAsCondition - Given that an expression is
/// being used as a boolean condition, warn if it's an assignment.
void DiagnoseAssignmentAsCondition(Expr *E);
/// Redundant parentheses over an equality comparison can indicate
/// that the user intended an assignment used as condition.
void DiagnoseEqualityWithExtraParens(ParenExpr *ParenE);
/// CheckCXXBooleanCondition - Returns true if conversion to bool is invalid.
ExprResult CheckCXXBooleanCondition(Expr *CondExpr, bool IsConstexpr = false);
/// ConvertIntegerToTypeWarnOnOverflow - Convert the specified APInt to have
/// the specified width and sign. If an overflow occurs, detect it and emit
/// the specified diagnostic.
void ConvertIntegerToTypeWarnOnOverflow(llvm::APSInt &OldVal,
unsigned NewWidth, bool NewSign,
SourceLocation Loc, unsigned DiagID);
/// Checks that the Objective-C declaration is declared in the global scope.
/// Emits an error and marks the declaration as invalid if it's not declared
/// in the global scope.
bool CheckObjCDeclScope(Decl *D);
/// Abstract base class used for diagnosing integer constant
/// expression violations.
class VerifyICEDiagnoser {
public:
bool Suppress;
VerifyICEDiagnoser(bool Suppress = false) : Suppress(Suppress) { }
virtual void diagnoseNotICE(Sema &S, SourceLocation Loc, SourceRange SR) =0;
virtual void diagnoseFold(Sema &S, SourceLocation Loc, SourceRange SR);
virtual ~VerifyICEDiagnoser() { }
};
/// VerifyIntegerConstantExpression - Verifies that an expression is an ICE,
/// and reports the appropriate diagnostics. Returns false on success.
/// Can optionally return the value of the expression.
ExprResult VerifyIntegerConstantExpression(Expr *E, llvm::APSInt *Result,
VerifyICEDiagnoser &Diagnoser,
bool AllowFold = true);
ExprResult VerifyIntegerConstantExpression(Expr *E, llvm::APSInt *Result,
unsigned DiagID,
bool AllowFold = true);
ExprResult VerifyIntegerConstantExpression(Expr *E,
llvm::APSInt *Result = nullptr);
/// VerifyBitField - verifies that a bit field expression is an ICE and has
/// the correct width, and that the field type is valid.
/// Returns false on success.
/// Can optionally return whether the bit-field is of width 0
ExprResult VerifyBitField(SourceLocation FieldLoc, IdentifierInfo *FieldName,
QualType FieldTy, bool IsMsStruct,
Expr *BitWidth, bool *ZeroWidth = nullptr);
private:
unsigned ForceCUDAHostDeviceDepth = 0;
public:
/// Increments our count of the number of times we've seen a pragma forcing
/// functions to be __host__ __device__. So long as this count is greater
/// than zero, all functions encountered will be __host__ __device__.
void PushForceCUDAHostDevice();
/// Decrements our count of the number of times we've seen a pragma forcing
/// functions to be __host__ __device__. Returns false if the count is 0
/// before incrementing, so you can emit an error.
bool PopForceCUDAHostDevice();
/// Diagnostics that are emitted only if we discover that the given function
/// must be codegen'ed. Because handling these correctly adds overhead to
/// compilation, this is currently only enabled for CUDA compilations.
llvm::DenseMap<CanonicalDeclPtr<FunctionDecl>,
std::vector<PartialDiagnosticAt>>
DeviceDeferredDiags;
/// A pair of a canonical FunctionDecl and a SourceLocation. When used as the
/// key in a hashtable, both the FD and location are hashed.
struct FunctionDeclAndLoc {
CanonicalDeclPtr<FunctionDecl> FD;
SourceLocation Loc;
};
/// FunctionDecls and SourceLocations for which CheckCUDACall has emitted a
/// (maybe deferred) "bad call" diagnostic. We use this to avoid emitting the
/// same deferred diag twice.
llvm::DenseSet<FunctionDeclAndLoc> LocsWithCUDACallDiags;
/// An inverse call graph, mapping known-emitted functions to one of their
/// known-emitted callers (plus the location of the call).
///
/// Functions that we can tell a priori must be emitted aren't added to this
/// map.
llvm::DenseMap</* Callee = */ CanonicalDeclPtr<FunctionDecl>,
/* Caller = */ FunctionDeclAndLoc>
DeviceKnownEmittedFns;
/// A partial call graph maintained during CUDA/OpenMP device code compilation
/// to support deferred diagnostics.
///
/// Functions are only added here if, at the time they're considered, they are
/// not known-emitted. As soon as we discover that a function is
/// known-emitted, we remove it and everything it transitively calls from this
/// set and add those functions to DeviceKnownEmittedFns.
llvm::DenseMap</* Caller = */ CanonicalDeclPtr<FunctionDecl>,
/* Callees = */ llvm::MapVector<CanonicalDeclPtr<FunctionDecl>,
SourceLocation>>
DeviceCallGraph;
/// Diagnostic builder for CUDA/OpenMP devices errors which may or may not be
/// deferred.
///
/// In CUDA, there exist constructs (e.g. variable-length arrays, try/catch)
/// which are not allowed to appear inside __device__ functions and are
/// allowed to appear in __host__ __device__ functions only if the host+device
/// function is never codegen'ed.
///
/// To handle this, we use the notion of "deferred diagnostics", where we
/// attach a diagnostic to a FunctionDecl that's emitted iff it's codegen'ed.
///
/// This class lets you emit either a regular diagnostic, a deferred
/// diagnostic, or no diagnostic at all, according to an argument you pass to
/// its constructor, thus simplifying the process of creating these "maybe
/// deferred" diagnostics.
class DeviceDiagBuilder {
public:
enum Kind {
/// Emit no diagnostics.
K_Nop,
/// Emit the diagnostic immediately (i.e., behave like Sema::Diag()).
K_Immediate,
/// Emit the diagnostic immediately, and, if it's a warning or error, also
/// emit a call stack showing how this function can be reached by an a
/// priori known-emitted function.
K_ImmediateWithCallStack,
/// Create a deferred diagnostic, which is emitted only if the function
/// it's attached to is codegen'ed. Also emit a call stack as with
/// K_ImmediateWithCallStack.
K_Deferred
};
DeviceDiagBuilder(Kind K, SourceLocation Loc, unsigned DiagID,
FunctionDecl *Fn, Sema &S);
DeviceDiagBuilder(DeviceDiagBuilder &&D);
DeviceDiagBuilder(const DeviceDiagBuilder &) = default;
~DeviceDiagBuilder();
/// Convertible to bool: True if we immediately emitted an error, false if
/// we didn't emit an error or we created a deferred error.
///
/// Example usage:
///
/// if (DeviceDiagBuilder(...) << foo << bar)
/// return ExprError();
///
/// But see CUDADiagIfDeviceCode() and CUDADiagIfHostCode() -- you probably
/// want to use these instead of creating a DeviceDiagBuilder yourself.
operator bool() const { return ImmediateDiag.hasValue(); }
template <typename T>
friend const DeviceDiagBuilder &operator<<(const DeviceDiagBuilder &Diag,
const T &Value) {
if (Diag.ImmediateDiag.hasValue())
*Diag.ImmediateDiag << Value;
else if (Diag.PartialDiagId.hasValue())
Diag.S.DeviceDeferredDiags[Diag.Fn][*Diag.PartialDiagId].second
<< Value;
return Diag;
}
private:
Sema &S;
SourceLocation Loc;
unsigned DiagID;
FunctionDecl *Fn;
bool ShowCallStack;
// Invariant: At most one of these Optionals has a value.
// FIXME: Switch these to a Variant once that exists.
llvm::Optional<SemaDiagnosticBuilder> ImmediateDiag;
llvm::Optional<unsigned> PartialDiagId;
};
/// Indicate that this function (and thus everything it transtively calls)
/// will be codegen'ed, and emit any deferred diagnostics on this function and
/// its (transitive) callees.
void markKnownEmitted(
Sema &S, FunctionDecl *OrigCaller, FunctionDecl *OrigCallee,
SourceLocation OrigLoc,
const llvm::function_ref<bool(Sema &, FunctionDecl *)> IsKnownEmitted);
/// Creates a DeviceDiagBuilder that emits the diagnostic if the current context
/// is "used as device code".
///
/// - If CurContext is a __host__ function, does not emit any diagnostics.
/// - If CurContext is a __device__ or __global__ function, emits the
/// diagnostics immediately.
/// - If CurContext is a __host__ __device__ function and we are compiling for
/// the device, creates a diagnostic which is emitted if and when we realize
/// that the function will be codegen'ed.
///
/// Example usage:
///
/// // Variable-length arrays are not allowed in CUDA device code.
/// if (CUDADiagIfDeviceCode(Loc, diag::err_cuda_vla) << CurrentCUDATarget())
/// return ExprError();
/// // Otherwise, continue parsing as normal.
DeviceDiagBuilder CUDADiagIfDeviceCode(SourceLocation Loc, unsigned DiagID);
/// Creates a DeviceDiagBuilder that emits the diagnostic if the current context
/// is "used as host code".
///
/// Same as CUDADiagIfDeviceCode, with "host" and "device" switched.
DeviceDiagBuilder CUDADiagIfHostCode(SourceLocation Loc, unsigned DiagID);
/// Creates a DeviceDiagBuilder that emits the diagnostic if the current
/// context is "used as device code".
///
/// - If CurContext is a `declare target` function or it is known that the
/// function is emitted for the device, emits the diagnostics immediately.
/// - If CurContext is a non-`declare target` function and we are compiling
/// for the device, creates a diagnostic which is emitted if and when we
/// realize that the function will be codegen'ed.
///
/// Example usage:
///
/// // Variable-length arrays are not allowed in NVPTX device code.
/// if (diagIfOpenMPDeviceCode(Loc, diag::err_vla_unsupported))
/// return ExprError();
/// // Otherwise, continue parsing as normal.
DeviceDiagBuilder diagIfOpenMPDeviceCode(SourceLocation Loc, unsigned DiagID);
/// Creates a DeviceDiagBuilder that emits the diagnostic if the current
/// context is "used as host code".
///
/// - If CurContext is a `declare target` function or it is known that the
/// function is emitted for the host, emits the diagnostics immediately.
/// - If CurContext is a non-host function, just ignore it.
///
/// Example usage:
///
/// // Variable-length arrays are not allowed in NVPTX device code.
/// if (diagIfOpenMPHostode(Loc, diag::err_vla_unsupported))
/// return ExprError();
/// // Otherwise, continue parsing as normal.
DeviceDiagBuilder diagIfOpenMPHostCode(SourceLocation Loc, unsigned DiagID);
DeviceDiagBuilder targetDiag(SourceLocation Loc, unsigned DiagID);
enum CUDAFunctionTarget {
CFT_Device,
CFT_Global,
CFT_Host,
CFT_HostDevice,
CFT_InvalidTarget
};
/// Determines whether the given function is a CUDA device/host/kernel/etc.
/// function.
///
/// Use this rather than examining the function's attributes yourself -- you
/// will get it wrong. Returns CFT_Host if D is null.
CUDAFunctionTarget IdentifyCUDATarget(const FunctionDecl *D,
bool IgnoreImplicitHDAttr = false);
CUDAFunctionTarget IdentifyCUDATarget(const ParsedAttributesView &Attrs);
/// Gets the CUDA target for the current context.
CUDAFunctionTarget CurrentCUDATarget() {
return IdentifyCUDATarget(dyn_cast<FunctionDecl>(CurContext));
}
// CUDA function call preference. Must be ordered numerically from
// worst to best.
enum CUDAFunctionPreference {
CFP_Never, // Invalid caller/callee combination.
CFP_WrongSide, // Calls from host-device to host or device
// function that do not match current compilation
// mode.
CFP_HostDevice, // Any calls to host/device functions.
CFP_SameSide, // Calls from host-device to host or device
// function matching current compilation mode.
CFP_Native, // host-to-host or device-to-device calls.
};
/// Identifies relative preference of a given Caller/Callee
/// combination, based on their host/device attributes.
/// \param Caller function which needs address of \p Callee.
/// nullptr in case of global context.
/// \param Callee target function
///
/// \returns preference value for particular Caller/Callee combination.
CUDAFunctionPreference IdentifyCUDAPreference(const FunctionDecl *Caller,
const FunctionDecl *Callee);
/// Determines whether Caller may invoke Callee, based on their CUDA
/// host/device attributes. Returns false if the call is not allowed.
///
/// Note: Will return true for CFP_WrongSide calls. These may appear in
/// semantically correct CUDA programs, but only if they're never codegen'ed.
bool IsAllowedCUDACall(const FunctionDecl *Caller,
const FunctionDecl *Callee) {
return IdentifyCUDAPreference(Caller, Callee) != CFP_Never;
}
/// May add implicit CUDAHostAttr and CUDADeviceAttr attributes to FD,
/// depending on FD and the current compilation settings.
void maybeAddCUDAHostDeviceAttrs(FunctionDecl *FD,
const LookupResult &Previous);
public:
/// Check whether we're allowed to call Callee from the current context.
///
/// - If the call is never allowed in a semantically-correct program
/// (CFP_Never), emits an error and returns false.
///
/// - If the call is allowed in semantically-correct programs, but only if
/// it's never codegen'ed (CFP_WrongSide), creates a deferred diagnostic to
/// be emitted if and when the caller is codegen'ed, and returns true.
///
/// Will only create deferred diagnostics for a given SourceLocation once,
/// so you can safely call this multiple times without generating duplicate
/// deferred errors.
///
/// - Otherwise, returns true without emitting any diagnostics.
bool CheckCUDACall(SourceLocation Loc, FunctionDecl *Callee);
/// Set __device__ or __host__ __device__ attributes on the given lambda
/// operator() method.
///
/// CUDA lambdas declared inside __device__ or __global__ functions inherit
/// the __device__ attribute. Similarly, lambdas inside __host__ __device__
/// functions become __host__ __device__ themselves.
void CUDASetLambdaAttrs(CXXMethodDecl *Method);
/// Finds a function in \p Matches with highest calling priority
/// from \p Caller context and erases all functions with lower
/// calling priority.
void EraseUnwantedCUDAMatches(
const FunctionDecl *Caller,
SmallVectorImpl<std::pair<DeclAccessPair, FunctionDecl *>> &Matches);
/// Given a implicit special member, infer its CUDA target from the
/// calls it needs to make to underlying base/field special members.
/// \param ClassDecl the class for which the member is being created.
/// \param CSM the kind of special member.
/// \param MemberDecl the special member itself.
/// \param ConstRHS true if this is a copy operation with a const object on
/// its RHS.
/// \param Diagnose true if this call should emit diagnostics.
/// \return true if there was an error inferring.
/// The result of this call is implicit CUDA target attribute(s) attached to
/// the member declaration.
bool inferCUDATargetForImplicitSpecialMember(CXXRecordDecl *ClassDecl,
CXXSpecialMember CSM,
CXXMethodDecl *MemberDecl,
bool ConstRHS,
bool Diagnose);
/// \return true if \p CD can be considered empty according to CUDA
/// (E.2.3.1 in CUDA 7.5 Programming guide).
bool isEmptyCudaConstructor(SourceLocation Loc, CXXConstructorDecl *CD);
bool isEmptyCudaDestructor(SourceLocation Loc, CXXDestructorDecl *CD);
// \brief Checks that initializers of \p Var satisfy CUDA restrictions. In
// case of error emits appropriate diagnostic and invalidates \p Var.
//
// \details CUDA allows only empty constructors as initializers for global
// variables (see E.2.3.1, CUDA 7.5). The same restriction also applies to all
// __shared__ variables whether they are local or not (they all are implicitly
// static in CUDA). One exception is that CUDA allows constant initializers
// for __constant__ and __device__ variables.
void checkAllowedCUDAInitializer(VarDecl *VD);
/// Check whether NewFD is a valid overload for CUDA. Emits
/// diagnostics and invalidates NewFD if not.
void checkCUDATargetOverload(FunctionDecl *NewFD,
const LookupResult &Previous);
/// Copies target attributes from the template TD to the function FD.
void inheritCUDATargetAttrs(FunctionDecl *FD, const FunctionTemplateDecl &TD);
/// Returns the name of the launch configuration function. This is the name
/// of the function that will be called to configure kernel call, with the
/// parameters specified via <<<>>>.
std::string getCudaConfigureFuncName() const;
/// \name Code completion
//@{
/// Describes the context in which code completion occurs.
enum ParserCompletionContext {
/// Code completion occurs at top-level or namespace context.
PCC_Namespace,
/// Code completion occurs within a class, struct, or union.
PCC_Class,
/// Code completion occurs within an Objective-C interface, protocol,
/// or category.
PCC_ObjCInterface,
/// Code completion occurs within an Objective-C implementation or
/// category implementation
PCC_ObjCImplementation,
/// Code completion occurs within the list of instance variables
/// in an Objective-C interface, protocol, category, or implementation.
PCC_ObjCInstanceVariableList,
/// Code completion occurs following one or more template
/// headers.
PCC_Template,
/// Code completion occurs following one or more template
/// headers within a class.
PCC_MemberTemplate,
/// Code completion occurs within an expression.
PCC_Expression,
/// Code completion occurs within a statement, which may
/// also be an expression or a declaration.
PCC_Statement,
/// Code completion occurs at the beginning of the
/// initialization statement (or expression) in a for loop.
PCC_ForInit,
/// Code completion occurs within the condition of an if,
/// while, switch, or for statement.
PCC_Condition,
/// Code completion occurs within the body of a function on a
/// recovery path, where we do not have a specific handle on our position
/// in the grammar.
PCC_RecoveryInFunction,
/// Code completion occurs where only a type is permitted.
PCC_Type,
/// Code completion occurs in a parenthesized expression, which
/// might also be a type cast.
PCC_ParenthesizedExpression,
/// Code completion occurs within a sequence of declaration
/// specifiers within a function, method, or block.
PCC_LocalDeclarationSpecifiers
};
void CodeCompleteModuleImport(SourceLocation ImportLoc, ModuleIdPath Path);
void CodeCompleteOrdinaryName(Scope *S,
ParserCompletionContext CompletionContext);
void CodeCompleteDeclSpec(Scope *S, DeclSpec &DS,
bool AllowNonIdentifiers,
bool AllowNestedNameSpecifiers);
struct CodeCompleteExpressionData;
void CodeCompleteExpression(Scope *S,
const CodeCompleteExpressionData &Data);
void CodeCompleteExpression(Scope *S, QualType PreferredType,
bool IsParenthesized = false);
void CodeCompleteMemberReferenceExpr(Scope *S, Expr *Base, Expr *OtherOpBase,
SourceLocation OpLoc, bool IsArrow,
bool IsBaseExprStatement,
QualType PreferredType);
void CodeCompletePostfixExpression(Scope *S, ExprResult LHS,
QualType PreferredType);
void CodeCompleteTag(Scope *S, unsigned TagSpec);
void CodeCompleteTypeQualifiers(DeclSpec &DS);
void CodeCompleteFunctionQualifiers(DeclSpec &DS, Declarator &D,
const VirtSpecifiers *VS = nullptr);
void CodeCompleteBracketDeclarator(Scope *S);
void CodeCompleteCase(Scope *S);
/// Reports signatures for a call to CodeCompleteConsumer and returns the
/// preferred type for the current argument. Returned type can be null.
QualType ProduceCallSignatureHelp(Scope *S, Expr *Fn, ArrayRef<Expr *> Args,
SourceLocation OpenParLoc);
QualType ProduceConstructorSignatureHelp(Scope *S, QualType Type,
SourceLocation Loc,
ArrayRef<Expr *> Args,
SourceLocation OpenParLoc);
QualType ProduceCtorInitMemberSignatureHelp(Scope *S, Decl *ConstructorDecl,
CXXScopeSpec SS,
ParsedType TemplateTypeTy,
ArrayRef<Expr *> ArgExprs,
IdentifierInfo *II,
SourceLocation OpenParLoc);
void CodeCompleteInitializer(Scope *S, Decl *D);
/// Trigger code completion for a record of \p BaseType. \p InitExprs are
/// expressions in the initializer list seen so far and \p D is the current
/// Designation being parsed.
void CodeCompleteDesignator(const QualType BaseType,
llvm::ArrayRef<Expr *> InitExprs,
const Designation &D);
void CodeCompleteAfterIf(Scope *S);
void CodeCompleteQualifiedId(Scope *S, CXXScopeSpec &SS, bool EnteringContext,
bool IsUsingDeclaration, QualType BaseType,
QualType PreferredType);
void CodeCompleteUsing(Scope *S);
void CodeCompleteUsingDirective(Scope *S);
void CodeCompleteNamespaceDecl(Scope *S);
void CodeCompleteNamespaceAliasDecl(Scope *S);
void CodeCompleteOperatorName(Scope *S);
void CodeCompleteConstructorInitializer(
Decl *Constructor,
ArrayRef<CXXCtorInitializer *> Initializers);
void CodeCompleteLambdaIntroducer(Scope *S, LambdaIntroducer &Intro,
bool AfterAmpersand);
void CodeCompleteObjCAtDirective(Scope *S);
void CodeCompleteObjCAtVisibility(Scope *S);
void CodeCompleteObjCAtStatement(Scope *S);
void CodeCompleteObjCAtExpression(Scope *S);
void CodeCompleteObjCPropertyFlags(Scope *S, ObjCDeclSpec &ODS);
void CodeCompleteObjCPropertyGetter(Scope *S);
void CodeCompleteObjCPropertySetter(Scope *S);
void CodeCompleteObjCPassingType(Scope *S, ObjCDeclSpec &DS,
bool IsParameter);
void CodeCompleteObjCMessageReceiver(Scope *S);
void CodeCompleteObjCSuperMessage(Scope *S, SourceLocation SuperLoc,
ArrayRef<IdentifierInfo *> SelIdents,
bool AtArgumentExpression);
void CodeCompleteObjCClassMessage(Scope *S, ParsedType Receiver,
ArrayRef<IdentifierInfo *> SelIdents,
bool AtArgumentExpression,
bool IsSuper = false);
void CodeCompleteObjCInstanceMessage(Scope *S, Expr *Receiver,
ArrayRef<IdentifierInfo *> SelIdents,
bool AtArgumentExpression,
ObjCInterfaceDecl *Super = nullptr);
void CodeCompleteObjCForCollection(Scope *S,
DeclGroupPtrTy IterationVar);
void CodeCompleteObjCSelector(Scope *S,
ArrayRef<IdentifierInfo *> SelIdents);
void CodeCompleteObjCProtocolReferences(
ArrayRef<IdentifierLocPair> Protocols);
void CodeCompleteObjCProtocolDecl(Scope *S);
void CodeCompleteObjCInterfaceDecl(Scope *S);
void CodeCompleteObjCSuperclass(Scope *S,
IdentifierInfo *ClassName,
SourceLocation ClassNameLoc);
void CodeCompleteObjCImplementationDecl(Scope *S);
void CodeCompleteObjCInterfaceCategory(Scope *S,
IdentifierInfo *ClassName,
SourceLocation ClassNameLoc);
void CodeCompleteObjCImplementationCategory(Scope *S,
IdentifierInfo *ClassName,
SourceLocation ClassNameLoc);
void CodeCompleteObjCPropertyDefinition(Scope *S);
void CodeCompleteObjCPropertySynthesizeIvar(Scope *S,
IdentifierInfo *PropertyName);
void CodeCompleteObjCMethodDecl(Scope *S, Optional<bool> IsInstanceMethod,
ParsedType ReturnType);
void CodeCompleteObjCMethodDeclSelector(Scope *S,
bool IsInstanceMethod,
bool AtParameterName,
ParsedType ReturnType,
ArrayRef<IdentifierInfo *> SelIdents);
void CodeCompleteObjCClassPropertyRefExpr(Scope *S, IdentifierInfo &ClassName,
SourceLocation ClassNameLoc,
bool IsBaseExprStatement);
void CodeCompletePreprocessorDirective(bool InConditional);
void CodeCompleteInPreprocessorConditionalExclusion(Scope *S);
void CodeCompletePreprocessorMacroName(bool IsDefinition);
void CodeCompletePreprocessorExpression();
void CodeCompletePreprocessorMacroArgument(Scope *S,
IdentifierInfo *Macro,
MacroInfo *MacroInfo,
unsigned Argument);
void CodeCompleteIncludedFile(llvm::StringRef Dir, bool IsAngled);
void CodeCompleteNaturalLanguage();
void CodeCompleteAvailabilityPlatformName();
void GatherGlobalCodeCompletions(CodeCompletionAllocator &Allocator,
CodeCompletionTUInfo &CCTUInfo,
SmallVectorImpl<CodeCompletionResult> &Results);
//@}
//===--------------------------------------------------------------------===//
// Extra semantic analysis beyond the C type system
public:
SourceLocation getLocationOfStringLiteralByte(const StringLiteral *SL,
unsigned ByteNo) const;
private:
void CheckArrayAccess(const Expr *BaseExpr, const Expr *IndexExpr,
const ArraySubscriptExpr *ASE=nullptr,
bool AllowOnePastEnd=true, bool IndexNegated=false);
void CheckArrayAccess(const Expr *E);
// Used to grab the relevant information from a FormatAttr and a
// FunctionDeclaration.
struct FormatStringInfo {
unsigned FormatIdx;
unsigned FirstDataArg;
bool HasVAListArg;
};
static bool getFormatStringInfo(const FormatAttr *Format, bool IsCXXMember,
FormatStringInfo *FSI);
bool CheckFunctionCall(FunctionDecl *FDecl, CallExpr *TheCall,
const FunctionProtoType *Proto);
bool CheckObjCMethodCall(ObjCMethodDecl *Method, SourceLocation loc,
ArrayRef<const Expr *> Args);
bool CheckPointerCall(NamedDecl *NDecl, CallExpr *TheCall,
const FunctionProtoType *Proto);
bool CheckOtherCall(CallExpr *TheCall, const FunctionProtoType *Proto);
void CheckConstructorCall(FunctionDecl *FDecl,
ArrayRef<const Expr *> Args,
const FunctionProtoType *Proto,
SourceLocation Loc);
void checkCall(NamedDecl *FDecl, const FunctionProtoType *Proto,
const Expr *ThisArg, ArrayRef<const Expr *> Args,
bool IsMemberFunction, SourceLocation Loc, SourceRange Range,
VariadicCallType CallType);
bool CheckObjCString(Expr *Arg);
ExprResult CheckOSLogFormatStringArg(Expr *Arg);
ExprResult CheckBuiltinFunctionCall(FunctionDecl *FDecl,
unsigned BuiltinID, CallExpr *TheCall);
void checkFortifiedBuiltinMemoryFunction(FunctionDecl *FD, CallExpr *TheCall);
bool CheckARMBuiltinExclusiveCall(unsigned BuiltinID, CallExpr *TheCall,
unsigned MaxWidth);
bool CheckNeonBuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall);
bool CheckMVEBuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall);
bool CheckARMBuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall);
bool CheckAArch64BuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall);
bool CheckBPFBuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall);
bool CheckHexagonBuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall);
bool CheckHexagonBuiltinArgument(unsigned BuiltinID, CallExpr *TheCall);
bool CheckMipsBuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall);
bool CheckMipsBuiltinCpu(unsigned BuiltinID, CallExpr *TheCall);
bool CheckMipsBuiltinArgument(unsigned BuiltinID, CallExpr *TheCall);
bool CheckSystemZBuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall);
bool CheckX86BuiltinRoundingOrSAE(unsigned BuiltinID, CallExpr *TheCall);
bool CheckX86BuiltinGatherScatterScale(unsigned BuiltinID, CallExpr *TheCall);
bool CheckX86BuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall);
bool CheckPPCBuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall);
bool SemaBuiltinVAStart(unsigned BuiltinID, CallExpr *TheCall);
bool SemaBuiltinVAStartARMMicrosoft(CallExpr *Call);
bool SemaBuiltinUnorderedCompare(CallExpr *TheCall);
bool SemaBuiltinFPClassification(CallExpr *TheCall, unsigned NumArgs);
bool SemaBuiltinVSX(CallExpr *TheCall);
bool SemaBuiltinOSLogFormat(CallExpr *TheCall);
public:
// Used by C++ template instantiation.
ExprResult SemaBuiltinShuffleVector(CallExpr *TheCall);
ExprResult SemaConvertVectorExpr(Expr *E, TypeSourceInfo *TInfo,
SourceLocation BuiltinLoc,
SourceLocation RParenLoc);
private:
bool SemaBuiltinPrefetch(CallExpr *TheCall);
bool SemaBuiltinAllocaWithAlign(CallExpr *TheCall);
bool SemaBuiltinAssume(CallExpr *TheCall);
bool SemaBuiltinAssumeAligned(CallExpr *TheCall);
bool SemaBuiltinLongjmp(CallExpr *TheCall);
bool SemaBuiltinSetjmp(CallExpr *TheCall);
ExprResult SemaBuiltinAtomicOverloaded(ExprResult TheCallResult);
ExprResult SemaBuiltinNontemporalOverloaded(ExprResult TheCallResult);
ExprResult SemaAtomicOpsOverloaded(ExprResult TheCallResult,
AtomicExpr::AtomicOp Op);
ExprResult SemaBuiltinOperatorNewDeleteOverloaded(ExprResult TheCallResult,
bool IsDelete);
bool SemaBuiltinConstantArg(CallExpr *TheCall, int ArgNum,
llvm::APSInt &Result);
bool SemaBuiltinConstantArgRange(CallExpr *TheCall, int ArgNum, int Low,
int High, bool RangeIsError = true);
bool SemaBuiltinConstantArgMultiple(CallExpr *TheCall, int ArgNum,
unsigned Multiple);
bool SemaBuiltinConstantArgPower2(CallExpr *TheCall, int ArgNum);
bool SemaBuiltinConstantArgShiftedByte(CallExpr *TheCall, int ArgNum,
unsigned ArgBits);
bool SemaBuiltinConstantArgShiftedByteOrXXFF(CallExpr *TheCall, int ArgNum,
unsigned ArgBits);
bool SemaBuiltinARMSpecialReg(unsigned BuiltinID, CallExpr *TheCall,
int ArgNum, unsigned ExpectedFieldNum,
bool AllowName);
bool SemaBuiltinARMMemoryTaggingCall(unsigned BuiltinID, CallExpr *TheCall);
public:
enum FormatStringType {
FST_Scanf,
FST_Printf,
FST_NSString,
FST_Strftime,
FST_Strfmon,
FST_Kprintf,
FST_FreeBSDKPrintf,
FST_OSTrace,
FST_OSLog,
FST_Unknown
};
static FormatStringType GetFormatStringType(const FormatAttr *Format);
bool FormatStringHasSArg(const StringLiteral *FExpr);
static bool GetFormatNSStringIdx(const FormatAttr *Format, unsigned &Idx);
private:
bool CheckFormatArguments(const FormatAttr *Format,
ArrayRef<const Expr *> Args,
bool IsCXXMember,
VariadicCallType CallType,
SourceLocation Loc, SourceRange Range,
llvm::SmallBitVector &CheckedVarArgs);
bool CheckFormatArguments(ArrayRef<const Expr *> Args,
bool HasVAListArg, unsigned format_idx,
unsigned firstDataArg, FormatStringType Type,
VariadicCallType CallType,
SourceLocation Loc, SourceRange range,
llvm::SmallBitVector &CheckedVarArgs);
void CheckAbsoluteValueFunction(const CallExpr *Call,
const FunctionDecl *FDecl);
void CheckMaxUnsignedZero(const CallExpr *Call, const FunctionDecl *FDecl);
void CheckMemaccessArguments(const CallExpr *Call,
unsigned BId,
IdentifierInfo *FnName);
void CheckStrlcpycatArguments(const CallExpr *Call,
IdentifierInfo *FnName);
void CheckStrncatArguments(const CallExpr *Call,
IdentifierInfo *FnName);
void CheckReturnValExpr(Expr *RetValExp, QualType lhsType,
SourceLocation ReturnLoc,
bool isObjCMethod = false,
const AttrVec *Attrs = nullptr,
const FunctionDecl *FD = nullptr);
public:
void CheckFloatComparison(SourceLocation Loc, Expr *LHS, Expr *RHS);
private:
void CheckImplicitConversions(Expr *E, SourceLocation CC = SourceLocation());
void CheckBoolLikeConversion(Expr *E, SourceLocation CC);
void CheckForIntOverflow(Expr *E);
void CheckUnsequencedOperations(const Expr *E);
/// Perform semantic checks on a completed expression. This will either
/// be a full-expression or a default argument expression.
void CheckCompletedExpr(Expr *E, SourceLocation CheckLoc = SourceLocation(),
bool IsConstexpr = false);
void CheckBitFieldInitialization(SourceLocation InitLoc, FieldDecl *Field,
Expr *Init);
/// Check if there is a field shadowing.
void CheckShadowInheritedFields(const SourceLocation &Loc,
DeclarationName FieldName,
const CXXRecordDecl *RD,
bool DeclIsField = true);
/// Check if the given expression contains 'break' or 'continue'
/// statement that produces control flow different from GCC.
void CheckBreakContinueBinding(Expr *E);
/// Check whether receiver is mutable ObjC container which
/// attempts to add itself into the container
void CheckObjCCircularContainer(ObjCMessageExpr *Message);
void AnalyzeDeleteExprMismatch(const CXXDeleteExpr *DE);
void AnalyzeDeleteExprMismatch(FieldDecl *Field, SourceLocation DeleteLoc,
bool DeleteWasArrayForm);
public:
/// Register a magic integral constant to be used as a type tag.
void RegisterTypeTagForDatatype(const IdentifierInfo *ArgumentKind,
uint64_t MagicValue, QualType Type,
bool LayoutCompatible, bool MustBeNull);
struct TypeTagData {
TypeTagData() {}
TypeTagData(QualType Type, bool LayoutCompatible, bool MustBeNull) :
Type(Type), LayoutCompatible(LayoutCompatible),
MustBeNull(MustBeNull)
{}
QualType Type;
/// If true, \c Type should be compared with other expression's types for
/// layout-compatibility.
unsigned LayoutCompatible : 1;
unsigned MustBeNull : 1;
};
/// A pair of ArgumentKind identifier and magic value. This uniquely
/// identifies the magic value.
typedef std::pair<const IdentifierInfo *, uint64_t> TypeTagMagicValue;
private:
/// A map from magic value to type information.
std::unique_ptr<llvm::DenseMap<TypeTagMagicValue, TypeTagData>>
TypeTagForDatatypeMagicValues;
/// Peform checks on a call of a function with argument_with_type_tag
/// or pointer_with_type_tag attributes.
void CheckArgumentWithTypeTag(const ArgumentWithTypeTagAttr *Attr,
const ArrayRef<const Expr *> ExprArgs,
SourceLocation CallSiteLoc);
/// Check if we are taking the address of a packed field
/// as this may be a problem if the pointer value is dereferenced.
void CheckAddressOfPackedMember(Expr *rhs);
/// The parser's current scope.
///
/// The parser maintains this state here.
Scope *CurScope;
mutable IdentifierInfo *Ident_super;
mutable IdentifierInfo *Ident___float128;
/// Nullability type specifiers.
IdentifierInfo *Ident__Nonnull = nullptr;
IdentifierInfo *Ident__Nullable = nullptr;
IdentifierInfo *Ident__Null_unspecified = nullptr;
IdentifierInfo *Ident_NSError = nullptr;
/// The handler for the FileChanged preprocessor events.
///
/// Used for diagnostics that implement custom semantic analysis for #include
/// directives, like -Wpragma-pack.
sema::SemaPPCallbacks *SemaPPCallbackHandler;
protected:
friend class Parser;
friend class InitializationSequence;
friend class ASTReader;
friend class ASTDeclReader;
friend class ASTWriter;
public:
/// Retrieve the keyword associated
IdentifierInfo *getNullabilityKeyword(NullabilityKind nullability);
/// The struct behind the CFErrorRef pointer.
RecordDecl *CFError = nullptr;
/// Retrieve the identifier "NSError".
IdentifierInfo *getNSErrorIdent();
/// Retrieve the parser's current scope.
///
/// This routine must only be used when it is certain that semantic analysis
/// and the parser are in precisely the same context, which is not the case
/// when, e.g., we are performing any kind of template instantiation.
/// Therefore, the only safe places to use this scope are in the parser
/// itself and in routines directly invoked from the parser and *never* from
/// template substitution or instantiation.
Scope *getCurScope() const { return CurScope; }
void incrementMSManglingNumber() const {
return CurScope->incrementMSManglingNumber();
}
IdentifierInfo *getSuperIdentifier() const;
IdentifierInfo *getFloat128Identifier() const;
Decl *getObjCDeclContext() const;
DeclContext *getCurLexicalContext() const {
return OriginalLexicalContext ? OriginalLexicalContext : CurContext;
}
const DeclContext *getCurObjCLexicalContext() const {
const DeclContext *DC = getCurLexicalContext();
// A category implicitly has the attribute of the interface.
if (const ObjCCategoryDecl *CatD = dyn_cast<ObjCCategoryDecl>(DC))
DC = CatD->getClassInterface();
return DC;
}
/// To be used for checking whether the arguments being passed to
/// function exceeds the number of parameters expected for it.
static bool TooManyArguments(size_t NumParams, size_t NumArgs,
bool PartialOverloading = false) {
// We check whether we're just after a comma in code-completion.
if (NumArgs > 0 && PartialOverloading)
return NumArgs + 1 > NumParams; // If so, we view as an extra argument.
return NumArgs > NumParams;
}
// Emitting members of dllexported classes is delayed until the class
// (including field initializers) is fully parsed.
SmallVector<CXXRecordDecl*, 4> DelayedDllExportClasses;
SmallVector<CXXMethodDecl*, 4> DelayedDllExportMemberFunctions;
private:
int ParsingClassDepth = 0;
class SavePendingParsedClassStateRAII {
public:
SavePendingParsedClassStateRAII(Sema &S) : S(S) { swapSavedState(); }
~SavePendingParsedClassStateRAII() {
assert(S.DelayedOverridingExceptionSpecChecks.empty() &&
"there shouldn't be any pending delayed exception spec checks");
assert(S.DelayedEquivalentExceptionSpecChecks.empty() &&
"there shouldn't be any pending delayed exception spec checks");
swapSavedState();
}
private:
Sema &S;
decltype(DelayedOverridingExceptionSpecChecks)
SavedOverridingExceptionSpecChecks;
decltype(DelayedEquivalentExceptionSpecChecks)
SavedEquivalentExceptionSpecChecks;
void swapSavedState() {
SavedOverridingExceptionSpecChecks.swap(
S.DelayedOverridingExceptionSpecChecks);
SavedEquivalentExceptionSpecChecks.swap(
S.DelayedEquivalentExceptionSpecChecks);
}
};
/// Helper class that collects misaligned member designations and
/// their location info for delayed diagnostics.
struct MisalignedMember {
Expr *E;
RecordDecl *RD;
ValueDecl *MD;
CharUnits Alignment;
MisalignedMember() : E(), RD(), MD(), Alignment() {}
MisalignedMember(Expr *E, RecordDecl *RD, ValueDecl *MD,
CharUnits Alignment)
: E(E), RD(RD), MD(MD), Alignment(Alignment) {}
explicit MisalignedMember(Expr *E)
: MisalignedMember(E, nullptr, nullptr, CharUnits()) {}
bool operator==(const MisalignedMember &m) { return this->E == m.E; }
};
/// Small set of gathered accesses to potentially misaligned members
/// due to the packed attribute.
SmallVector<MisalignedMember, 4> MisalignedMembers;
/// Adds an expression to the set of gathered misaligned members.
void AddPotentialMisalignedMembers(Expr *E, RecordDecl *RD, ValueDecl *MD,
CharUnits Alignment);
public:
/// Diagnoses the current set of gathered accesses. This typically
/// happens at full expression level. The set is cleared after emitting the
/// diagnostics.
void DiagnoseMisalignedMembers();
/// This function checks if the expression is in the sef of potentially
/// misaligned members and it is converted to some pointer type T with lower
/// or equal alignment requirements. If so it removes it. This is used when
/// we do not want to diagnose such misaligned access (e.g. in conversions to
/// void*).
void DiscardMisalignedMemberAddress(const Type *T, Expr *E);
/// This function calls Action when it determines that E designates a
/// misaligned member due to the packed attribute. This is used to emit
/// local diagnostics like in reference binding.
void RefersToMemberWithReducedAlignment(
Expr *E,
llvm::function_ref<void(Expr *, RecordDecl *, FieldDecl *, CharUnits)>
Action);
/// Describes the reason a calling convention specification was ignored, used
/// for diagnostics.
enum class CallingConventionIgnoredReason {
ForThisTarget = 0,
VariadicFunction,
ConstructorDestructor,
BuiltinFunction
};
};
/// RAII object that enters a new expression evaluation context.
class EnterExpressionEvaluationContext {
Sema &Actions;
bool Entered = true;
public:
EnterExpressionEvaluationContext(
Sema &Actions, Sema::ExpressionEvaluationContext NewContext,
Decl *LambdaContextDecl = nullptr,
Sema::ExpressionEvaluationContextRecord::ExpressionKind ExprContext =
Sema::ExpressionEvaluationContextRecord::EK_Other,
bool ShouldEnter = true)
: Actions(Actions), Entered(ShouldEnter) {
if (Entered)
Actions.PushExpressionEvaluationContext(NewContext, LambdaContextDecl,
ExprContext);
}
EnterExpressionEvaluationContext(
Sema &Actions, Sema::ExpressionEvaluationContext NewContext,
Sema::ReuseLambdaContextDecl_t,
Sema::ExpressionEvaluationContextRecord::ExpressionKind ExprContext =
Sema::ExpressionEvaluationContextRecord::EK_Other)
: Actions(Actions) {
Actions.PushExpressionEvaluationContext(
NewContext, Sema::ReuseLambdaContextDecl, ExprContext);
}
enum InitListTag { InitList };
EnterExpressionEvaluationContext(Sema &Actions, InitListTag,
bool ShouldEnter = true)
: Actions(Actions), Entered(false) {
// In C++11 onwards, narrowing checks are performed on the contents of
// braced-init-lists, even when they occur within unevaluated operands.
// Therefore we still need to instantiate constexpr functions used in such
// a context.
if (ShouldEnter && Actions.isUnevaluatedContext() &&
Actions.getLangOpts().CPlusPlus11) {
Actions.PushExpressionEvaluationContext(
Sema::ExpressionEvaluationContext::UnevaluatedList);
Entered = true;
}
}
~EnterExpressionEvaluationContext() {
if (Entered)
Actions.PopExpressionEvaluationContext();
}
};
DeductionFailureInfo
MakeDeductionFailureInfo(ASTContext &Context, Sema::TemplateDeductionResult TDK,
sema::TemplateDeductionInfo &Info);
/// Contains a late templated function.
/// Will be parsed at the end of the translation unit, used by Sema & Parser.
struct LateParsedTemplate {
CachedTokens Toks;
/// The template function declaration to be late parsed.
Decl *D;
};
} // end namespace clang
namespace llvm {
// Hash a FunctionDeclAndLoc by looking at both its FunctionDecl and its
// SourceLocation.
template <> struct DenseMapInfo<clang::Sema::FunctionDeclAndLoc> {
using FunctionDeclAndLoc = clang::Sema::FunctionDeclAndLoc;
using FDBaseInfo = DenseMapInfo<clang::CanonicalDeclPtr<clang::FunctionDecl>>;
static FunctionDeclAndLoc getEmptyKey() {
return {FDBaseInfo::getEmptyKey(), clang::SourceLocation()};
}
static FunctionDeclAndLoc getTombstoneKey() {
return {FDBaseInfo::getTombstoneKey(), clang::SourceLocation()};
}
static unsigned getHashValue(const FunctionDeclAndLoc &FDL) {
return hash_combine(FDBaseInfo::getHashValue(FDL.FD),
FDL.Loc.getRawEncoding());
}
static bool isEqual(const FunctionDeclAndLoc &LHS,
const FunctionDeclAndLoc &RHS) {
return LHS.FD == RHS.FD && LHS.Loc == RHS.Loc;
}
};
} // namespace llvm
#endif
|
MicroMlpAffine.h | // --------------------------------------------------------------------------
// Binary Brain -- binary neural net framework
//
// Copyright (C) 2018 by Ryuji Fuchikami
// https://github.com/ryuz
// ryuji.fuchikami@nifty.com
// --------------------------------------------------------------------------
#pragma once
#include <cstdint>
#include <random>
#include "bb/Manager.h"
#include "bb/SparseModel.h"
#include "bb/ShuffleSet.h"
namespace bb {
// Mini-MLP Affine
template <int N = 6, int M = 16, typename FXT = float, typename T = float>
class MicroMlpAffine : public SparseModel
{
using _super = SparseModel;
public:
static inline std::string ModelName(void) { return "MicroMlpAffine" + std::to_string(N) + "_" + std::to_string(M); }
static inline std::string ObjectName(void){ return ModelName() + "_" + DataType<FXT>::Name() + "_" + DataType<T>::Name(); }
std::string GetModelName(void) const override { return ModelName(); }
std::string GetObjectName(void) const override { return ObjectName(); }
protected:
public: // debug
bool m_binary_mode = false;
bool m_host_only = false;
bool m_host_simd = false;
std::string m_connection;
T m_initialize_std = (T)0.01;
std::string m_initializer = "he";
std::mt19937_64 m_mt;
index_t m_input_node_size = 0;
index_t m_output_node_size = 0;
indices_t m_input_shape;
indices_t m_output_shape;
Tensor_<std::int32_t> m_input_index;
std::shared_ptr<Tensor> m_W0;
std::shared_ptr<Tensor> m_b0;
std::shared_ptr<Tensor> m_dW0;
std::shared_ptr<Tensor> m_db0;
std::shared_ptr<Tensor> m_W1;
std::shared_ptr<Tensor> m_b1;
std::shared_ptr<Tensor> m_dW1;
std::shared_ptr<Tensor> m_db1;
public:
FrameBuffer m_x_buf;
protected:
MicroMlpAffine() {
m_W0 = std::make_shared<Tensor>();
m_b0 = std::make_shared<Tensor>();
m_dW0 = std::make_shared<Tensor>();
m_db0 = std::make_shared<Tensor>();
m_W1 = std::make_shared<Tensor>();
m_b1 = std::make_shared<Tensor>();
m_dW1 = std::make_shared<Tensor>();
m_db1 = std::make_shared<Tensor>();
}
void CommandProc(std::vector<std::string> args)
{
// バイナリモード設定
if ( args.size() == 2 && args[0] == "binary" )
{
m_binary_mode = EvalBool(args[1]);
}
// HostOnlyモード設定
if (args.size() == 2 && args[0] == "host_only")
{
m_host_only = EvalBool(args[1]);
}
// Host SIMDモード設定
if (args.size() == 2 && args[0] == "host_simd")
{
m_host_simd = EvalBool(args[1]);
}
}
public:
~MicroMlpAffine() {}
struct create_t
{
indices_t output_shape;
std::string connection;
T initialize_std = (T)0.01;
std::string initializer = "";
std::uint64_t seed = 1;
};
static std::shared_ptr<MicroMlpAffine> Create(create_t const &create)
{
auto self = std::shared_ptr<MicroMlpAffine>(new MicroMlpAffine);
BB_ASSERT(!create.output_shape.empty());
self->m_initialize_std = create.initialize_std;
self->m_initializer = create.initializer;
self->m_mt.seed(create.seed);
self->m_output_shape = create.output_shape;
self->m_output_node_size = CalcShapeSize(self->m_output_shape);
self->m_connection = create.connection;
return self;
}
static std::shared_ptr<MicroMlpAffine> Create(indices_t const &output_shape, std::string connection = "", std::uint64_t seed = 1)
{
create_t create;
create.output_shape = output_shape;
create.connection = connection;
create.seed = seed;
return Create(create);
}
static std::shared_ptr<MicroMlpAffine> Create(index_t output_node_size, std::string connection = "", std::uint64_t seed = 1)
{
create_t create;
create.output_shape.resize(1);
create.output_shape[0] = output_node_size;
create.connection = connection;
create.seed = seed;
return Create(create);
}
static std::shared_ptr<MicroMlpAffine> Create(void)
{
return Create(create_t());
}
// シリアライズ
protected:
void DumpObjectData(std::ostream &os) const override
{
// バージョン
std::int64_t ver = 1;
bb::SaveValue(os, ver);
// 親クラス
_super::DumpObjectData(os);
// メンバ
bb::SaveValue(os, m_binary_mode);
bb::SaveValue(os, m_host_simd);
bb::SaveValue(os, m_host_only);
bb::SaveValue(os, m_connection);
bb::SaveValue(os, m_initialize_std);
bb::SaveValue(os, m_initializer);
bb::SaveValue(os, m_input_shape);
bb::SaveValue(os, m_output_shape);
m_input_index.DumpObject(os);
m_W0->DumpObject(os);
m_b0->DumpObject(os);
m_W1->DumpObject(os);
m_b1->DumpObject(os);
}
void LoadObjectData(std::istream &is) override
{
// バージョン
std::int64_t ver;
bb::LoadValue(is, ver);
BB_ASSERT(ver == 1);
// 親クラス
_super::LoadObjectData(is);
// メンバ
bb::LoadValue(is, m_binary_mode);
bb::LoadValue(is, m_host_simd);
bb::LoadValue(is, m_host_only);
bb::LoadValue(is, m_connection);
bb::LoadValue(is, m_initialize_std);
bb::LoadValue(is, m_initializer);
bb::LoadValue(is, m_input_shape);
bb::LoadValue(is, m_output_shape);
m_input_index.LoadObject(is);
m_W0->LoadObject(is);
m_b0->LoadObject(is);
m_W1->LoadObject(is);
m_b1->LoadObject(is);
// 再構築
m_input_node_size = CalcShapeSize(m_input_shape);
m_output_node_size = CalcShapeSize(m_output_shape);
m_dW0->Resize(m_W0->GetShape(), m_W0->GetType());
m_db0->Resize(m_b0->GetShape(), m_b0->GetType());
m_dW1->Resize(m_W1->GetShape(), m_W1->GetType());
m_db1->Resize(m_b1->GetShape(), m_b1->GetType());
}
public:
// Serialize(旧)
void Save(std::ostream &os) const
{
SaveIndex(os, m_input_node_size);
SaveIndex(os, m_output_node_size);
SaveIndices(os, m_input_shape);
SaveIndices(os, m_output_shape);
m_input_index.Save(os);
m_W0->Save(os);
m_b0->Save(os);
m_W1->Save(os);
m_b1->Save(os);
}
void Load(std::istream &is)
{
m_input_node_size = LoadIndex(is);
m_output_node_size = LoadIndex(is);
m_input_shape = LoadIndices(is);
m_output_shape = LoadIndices(is);
m_input_index.Load(is);
m_W0->Load(is);
m_b0->Load(is);
m_W1->Load(is);
m_b1->Load(is);
m_dW0->Resize(m_W0->GetShape(), m_W0->GetType());
m_db0->Resize(m_b0->GetShape(), m_b0->GetType());
m_dW1->Resize(m_W1->GetShape(), m_W1->GetType());
m_db1->Resize(m_b1->GetShape(), m_b1->GetType());
}
#ifdef BB_WITH_CEREAL
template <class Archive>
void save(Archive& archive, std::uint32_t const version) const
{
_super::save(archive, version);
archive(cereal::make_nvp("input_node_size", m_input_node_size));
archive(cereal::make_nvp("output_node_size", m_output_node_size));
archive(cereal::make_nvp("input_shape", m_input_shape));
archive(cereal::make_nvp("output_shape", m_output_shape));
archive(cereal::make_nvp("input_index", m_input_index));
archive(cereal::make_nvp("W0", *m_W0));
archive(cereal::make_nvp("b0", *m_b0));
archive(cereal::make_nvp("W1", *m_W1));
archive(cereal::make_nvp("b1", *m_b1));
// archive(cereal::make_nvp("dW0", *m_dW0));
// archive(cereal::make_nvp("db0", *m_db0));
// archive(cereal::make_nvp("dW1", *m_dW1));
// archive(cereal::make_nvp("db1", *m_db1));
}
template <class Archive>
void load(Archive& archive, std::uint32_t const version)
{
_super::load(archive, version);
archive(cereal::make_nvp("input_node_size", m_input_node_size));
archive(cereal::make_nvp("output_node_size", m_output_node_size));
archive(cereal::make_nvp("input_shape", m_input_shape));
archive(cereal::make_nvp("output_shape", m_output_shape));
archive(cereal::make_nvp("input_index", m_input_index));
archive(cereal::make_nvp("W0", *m_W0));
archive(cereal::make_nvp("b0", *m_b0));
archive(cereal::make_nvp("W1", *m_W1));
archive(cereal::make_nvp("b1", *m_b1));
// archive(cereal::make_nvp("dW0", *m_dW0));
// archive(cereal::make_nvp("db0", *m_db0));
// archive(cereal::make_nvp("dW1", *m_dW1));
// archive(cereal::make_nvp("db1", *m_db1));
}
void Save(cereal::JSONOutputArchive& archive) const
{
archive(cereal::make_nvp("MicroMlpAffine", *this));
}
void Load(cereal::JSONInputArchive& archive)
{
archive(cereal::make_nvp("MicroMlpAffine", *this));
}
#endif
Tensor &W0(void) { return *m_W0; }
Tensor const &W0(void) const { return *m_W0; }
Tensor &b0(void) { return *m_b0; }
Tensor const &b0(void) const { return *m_b0; }
Tensor &W1(void) { return *m_W1; }
Tensor const &W1(void) const { return *m_W1; }
Tensor &b1(void) { return *m_b1; }
Tensor const &b1(void) const { return *m_b1; }
Tensor &dW0(void) { return *m_dW0; }
Tensor const &dW0(void) const { return *m_dW0; }
Tensor &db0(void) { return *m_db0; }
Tensor const &db0(void) const { return *m_db0; }
Tensor &dW1(void) { return *m_dW1; }
Tensor const &dW1(void) const { return *m_dW1; }
Tensor &db1(void) { return *m_db1; }
Tensor const &db1(void) const { return *m_db1; }
auto lock_InputIndex(void) { return m_input_index.Lock(); }
auto lock_InputIndex_const(void) const { return m_input_index.LockConst(); }
auto lock_W0(void) { return m_W0->Lock<T>(); }
auto lock_W0_const(void) const { return m_W0->LockConst<T>(); }
auto lock_b0(void) { return m_b0->Lock<T>(); }
auto lock_b0_const(void) const { return m_b0->LockConst<T>(); }
auto lock_W1(void) { return m_W1->Lock<T>(); }
auto lock_W1_const(void) const { return m_W1->LockConst<T>(); }
auto lock_b1(void) { return m_b1->Lock<T>(); }
auto lock_b1_const(void) const { return m_b1->LockConst<T>(); }
auto lock_dW0(void) { return m_dW0->Lock<T>(); }
auto lock_dW0_const(void) const { return m_dW0->LockConst<T>(); }
auto lock_db0(void) { return m_db0->Lock<T>(); }
auto lock_db0_const(void) const { return m_db0->LockConst<T>(); }
auto lock_dW1(void) { return m_dW1->Lock<T>(); }
auto lock_dW1_const(void) const { return m_dW1->LockConst<T>(); }
auto lock_db1(void) { return m_db1->Lock<T>(); }
auto lock_db1_const(void) const { return m_db1->LockConst<T>(); }
index_t GetNodeConnectionSize(index_t node) const
{
return N;
}
void SetNodeConnectionIndex(index_t node, index_t input_index, index_t input_node)
{
auto ptr = lock_InputIndex();
ptr(node, input_index) = (std::int32_t)input_node;
}
index_t GetNodeConnectionIndex(index_t node, index_t input_index) const
{
auto ptr = lock_InputIndex_const();
return (index_t)ptr(node, input_index);
}
/**
* @brief 入力のshape設定
* @detail 入力のshape設定
* @param shape 新しいshape
* @return なし
*/
indices_t SetInputShape(indices_t shape)
{
// 設定済みなら何もしない
if ( shape == this->GetInputShape() ) {
return this->GetOutputShape();
}
// 形状設定
m_input_shape = shape;
m_input_node_size = CalcShapeSize(shape);
// 接続初期化
m_input_index.Resize(m_output_node_size, N);
this->InitializeNodeInput(m_mt(), m_connection);
// パラメータ初期化
m_W0->Resize({m_output_node_size, M, N}, DataType<T>::type);
m_b0->Resize({m_output_node_size, M}, DataType<T>::type);
m_W1->Resize({m_output_node_size, M}, DataType<T>::type);
m_b1->Resize({m_output_node_size}, DataType<T>::type);
m_dW0->Resize({m_output_node_size, M, N}, DataType<T>::type);
m_db0->Resize({m_output_node_size, M}, DataType<T>::type);
m_dW1->Resize({m_output_node_size, M}, DataType<T>::type);
m_db1->Resize({m_output_node_size}, DataType<T>::type);
if (m_initializer == "he" || m_initializer == "He") {
m_initialize_std = (T)std::sqrt((double)2.0 / (double)N);
m_W0->InitNormalDistribution(0.0, m_initialize_std, m_mt());
m_b0->InitNormalDistribution(0.0, m_initialize_std, m_mt());
m_W1->InitNormalDistribution(0.0, m_initialize_std, m_mt());
m_b1->InitNormalDistribution(0.0, m_initialize_std, m_mt());
}
else if (m_initializer == "xavier" || m_initializer == "Xavier" ) {
m_initialize_std = (T)std::sqrt((double)1.0 / (double)N);
m_W0->InitNormalDistribution(0.0, m_initialize_std, m_mt());
m_b0->InitNormalDistribution(0.0, m_initialize_std, m_mt());
m_W1->InitNormalDistribution(0.0, m_initialize_std, m_mt());
m_b1->InitNormalDistribution(0.0, m_initialize_std, m_mt());
}
else if (m_initializer == "normal" || m_initializer == "Normal" ) {
m_W0->InitNormalDistribution(0.0, m_initialize_std, m_mt());
m_b0->InitNormalDistribution(0.0, m_initialize_std, m_mt());
m_W1->InitNormalDistribution(0.0, m_initialize_std, m_mt());
m_b1->InitNormalDistribution(0.0, m_initialize_std, m_mt());
}
else if (m_initializer == "uniform" || m_initializer == "Uniform" ) {
double k = (double)m_initialize_std * std::sqrt(3.0);
m_W0->InitUniformDistribution(-k, +k, m_mt());
m_b0->InitUniformDistribution(-k, +k, m_mt());
m_W1->InitUniformDistribution(-k, +k, m_mt());
m_b1->InitUniformDistribution(-k, +k, m_mt());
}
else {
double k = std::sqrt(1.0 / (double)N);
m_W0->InitUniformDistribution(-k, +k, m_mt());
m_b0->InitUniformDistribution(-k, +k, m_mt());
m_W1->InitUniformDistribution(-k, +k, m_mt());
m_b1->InitUniformDistribution(-k, +k, m_mt());
}
m_dW0->FillZero();
m_db0->FillZero();
m_dW1->FillZero();
m_db1->FillZero();
return m_output_shape;
}
/**
* @brief 出力のshape設定
* @detail 出力のshape設定
* 出力ノード数が変わらない限りshpeは自由
* @param shape 新しいshape
* @return なし
*/
void SetOutputShape(indices_t const &shape)
{
BB_ASSERT(CalcShapeSize(shape) == m_output_node_size);
m_output_shape = shape;
}
/**
* @brief 入力形状取得
* @detail 入力形状を取得する
* @return 入力形状を返す
*/
indices_t GetInputShape(void) const
{
return m_input_shape;
}
/**
* @brief 出力形状取得
* @detail 出力形状を取得する
* @return 出力形状を返す
*/
indices_t GetOutputShape(void) const
{
return m_output_shape;
}
Variables GetParameters(void)
{
Variables parameters;
parameters.PushBack(m_W0);
parameters.PushBack(m_b0);
parameters.PushBack(m_W1);
parameters.PushBack(m_b1);
return parameters;
}
Variables GetGradients(void)
{
Variables gradients;
gradients.PushBack(m_dW0);
gradients.PushBack(m_db0);
gradients.PushBack(m_dW1);
gradients.PushBack(m_db1);
return gradients;
}
void SetFrameBufferX(FrameBuffer x) { m_x_buf = x; }
FrameBuffer GetFrameBufferX(void) { return m_x_buf; }
// ノード単位でのForward計算
std::vector<double> ForwardNode(index_t node, std::vector<double> input_value) const
{
auto W0 = lock_W0_const();
auto b0 = lock_b0_const();
auto W1 = lock_W1_const();
auto b1 = lock_b1_const();
// affine0
std::vector<T> value0(M);
for (std::size_t i = 0; i < M; ++i) {
value0[i] = b0(node, i);
for (std::size_t j = 0; j < N; ++j) {
value0[i] += (T)input_value[j] * W0(node, i, j);
}
}
// ReLU
for (std::size_t i = 0; i < M; ++i) {
value0[i] = std::max(value0[i], (T)0.0);
}
// affine1
std::vector<T> value1(1);
value1[0] = b1(node);
for (std::size_t i = 0; i < M; ++i) {
value1[0] = value1[0] + value0[i] * W1(node, i);
}
// 型変換
std::vector<double> value2(M);
for (std::size_t i = 0; i < M; ++i) {
value2[i] = (double)value1[i];
}
return value2;
}
FrameBuffer Forward(FrameBuffer x_buf, bool train = true)
{
BB_ASSERT(x_buf.GetType() == DataType<FXT>::type);
// SetInputShpaeされていなければ初回に設定
if ( x_buf.GetNodeSize() != m_input_node_size) {
SetInputShape(x_buf.GetShape());
}
// backwardの為に保存
if ( train ) {
m_x_buf = x_buf;
}
// 出力を設定
FrameBuffer y_buf(x_buf.GetFrameSize(), m_output_shape, DataType<T>::type);
// バイナリモードならパラメータクリップ
if (m_binary_mode) {
m_W0->Clamp_inplace(-1.0, +1.0);
m_b0->Clamp_inplace(-1.0, +1.0);
m_W1->Clamp_inplace(-1.0, +1.0);
m_b1->Clamp_inplace(-1.0, +1.0);
}
#ifdef BB_WITH_CUDA
// FP32 CUDA版
if ( N == 6 && M == 16 && DataType<FXT>::type == BB_TYPE_FP32 && DataType<T>::type == BB_TYPE_FP32
&& !m_host_only && x_buf.IsDeviceAvailable() && y_buf.IsDeviceAvailable() && Manager::IsDeviceAvailable() ) {
auto input_index_ptr = m_input_index.LockDeviceMemoryConst();
auto x_ptr = x_buf.LockDeviceMemoryConst();
auto y_ptr = y_buf.LockDeviceMemory();
auto W0_ptr = m_W0->LockDeviceMemoryConst();
auto b0_ptr = m_b0->LockDeviceMemoryConst();
auto W1_ptr = m_W1->LockDeviceMemoryConst();
auto b1_ptr = m_b1->LockDeviceMemoryConst();
bbcu_fp32_MicroMlp6x16_Forward
(
(float const *)x_ptr.GetAddr(),
(float *)y_ptr.GetAddr(),
(int const *)input_index_ptr.GetAddr(),
(float const *)W0_ptr.GetAddr(),
(float const *)b0_ptr.GetAddr(),
(float const *)W1_ptr.GetAddr(),
(float const *)b1_ptr.GetAddr(),
(int )m_input_node_size,
(int )m_output_node_size,
(int )x_buf.GetFrameSize(),
(int )(x_buf.GetFrameStride() / sizeof(float))
);
return y_buf;
}
#endif
#ifdef BB_WITH_CUDA
// Bit CUDA版
if ( N == 6 && M == 16 && DataType<FXT>::type == BB_TYPE_BIT && DataType<T>::type == BB_TYPE_FP32
&& !m_host_only && x_buf.IsDeviceAvailable() && y_buf.IsDeviceAvailable() && Manager::IsDeviceAvailable() ) {
auto input_index_ptr = m_input_index.LockDeviceMemoryConst();
auto x_ptr = x_buf.LockDeviceMemoryConst();
auto y_ptr = y_buf.LockDeviceMemory();
auto W0_ptr = m_W0->LockDeviceMemoryConst();
auto b0_ptr = m_b0->LockDeviceMemoryConst();
auto W1_ptr = m_W1->LockDeviceMemoryConst();
auto b1_ptr = m_b1->LockDeviceMemoryConst();
bbcu_bit_fp32_MicroMlp6x16_Forward
(
(int const *)x_ptr.GetAddr(),
(float *)y_ptr.GetAddr(),
(int const *)input_index_ptr.GetAddr(),
(float const *)W0_ptr.GetAddr(),
(float const *)b0_ptr.GetAddr(),
(float const *)W1_ptr.GetAddr(),
(float const *)b1_ptr.GetAddr(),
(int )m_input_node_size,
(int )m_output_node_size,
(int )x_buf.GetFrameSize(),
(int )(x_buf.GetFrameStride() / sizeof(float)),
(int )(y_buf.GetFrameStride() / sizeof(float))
);
return y_buf;
}
#endif
// AVX版
if ( DataType<FXT>::type == BB_TYPE_FP32 && DataType<T>::type == BB_TYPE_FP32 && m_host_simd ) {
const index_t frame_size = x_buf.GetFrameStride() / sizeof(float);
const __m256 zero = _mm256_set1_ps(0);
auto x_ptr = x_buf.LockMemoryConst();
auto y_ptr = y_buf.LockMemory();
auto input_index_ptr = m_input_index.LockConst();
auto W0_ptr = lock_W0_const();
auto b0_ptr = lock_b0_const();
auto W1_ptr = lock_W1_const();
auto b1_ptr = lock_b1_const();
auto in_sig_buf = (float const *)x_ptr.GetAddr();
auto out_sig_buf = (float *)y_ptr.GetAddr();
#pragma omp parallel for
for (index_t node = 0; node < m_output_node_size; ++node) {
__m256 W0[M][N];
__m256 b0[M];
__m256 W1[M];
__m256 b1;
for (int i = 0; i < M; ++i) {
for (int j = 0; j < N; ++j) {
W0[i][j] = _mm256_set1_ps(W0_ptr(node, i, j));
}
b0[i] = _mm256_set1_ps(b0_ptr(node, i));
W1[i] = _mm256_set1_ps(W1_ptr(node, i));
}
b1 = _mm256_set1_ps(b1_ptr(node));
float const *in_sig_ptr[N];
float *out_sig_ptr;
for (int i = 0; i < N; ++i) {
in_sig_ptr[i] = &in_sig_buf[input_index_ptr(node, i) * frame_size];
}
out_sig_ptr = &out_sig_buf[node * frame_size];
for (index_t frame = 0; frame < frame_size; frame += 8) {
__m256 in_sig[N];
for (int i = 0; i < N; ++i) {
in_sig[i] = _mm256_load_ps(&in_sig_ptr[i][frame]);
}
__m256 sum1 = b1;
for (int i = 0; i < M; ++i) {
// sub-layer0
__m256 sum0 = b0[i];
for (int j = 0; j < N; ++j) {
sum0 = _mm256_fmadd_ps(in_sig[j], W0[i][j], sum0);
}
// ReLU
sum0 = _mm256_max_ps(sum0, zero);
// sub-layer1
sum1 = _mm256_fmadd_ps(sum0, W1[i], sum1);
}
_mm256_store_ps(&out_sig_ptr[frame], sum1);
}
}
return y_buf;
}
{
// 汎用版
auto frame_size = x_buf.GetFrameSize();
auto x_ptr = x_buf.LockConst<FXT>();
auto y_ptr = y_buf.Lock<T>();
auto input_index_ptr = m_input_index.LockConst();
auto W0_ptr = lock_W0_const();
auto b0_ptr = lock_b0_const();
auto W1_ptr = lock_W1_const();
auto b1_ptr = lock_b1_const();
#pragma omp parallel for
for ( index_t node = 0; node < m_output_node_size; ++node ) {
index_t in_idx[N];
for ( int i = 0; i < N; ++i) {
in_idx[i] = input_index_ptr(node, i);
}
for (index_t frame = 0; frame < frame_size; ++frame ) {
T in_sig[N];
for ( int i = 0; i < N; ++i) {
in_sig[i] = (T)x_ptr.Get(frame, in_idx[i]);
}
T sum1 = b1_ptr(node);
for (int i = 0; i < M; ++i) {
// sub-layer0
T sum0 = b0_ptr(node, i);
for (int j = 0; j < N; ++j) {
sum0 += in_sig[j] * W0_ptr(node, i, j);
}
// ReLU
sum0 = sum0 > (T)0 ? sum0 : (T)0;
// sub-layer1
sum1 += sum0 * W1_ptr(node, i);
}
y_ptr.Set(frame, node, sum1);
}
}
return y_buf;
}
}
FrameBuffer Backward(FrameBuffer dy_buf)
{
if (dy_buf.Empty()) {
return FrameBuffer();
}
BB_ASSERT(dy_buf.GetType() == DataType<T>::type);
// forward時データ取り出し
FrameBuffer x_buf = m_x_buf;
m_x_buf = FrameBuffer();
BB_ASSERT(x_buf.GetType() == DataType<FXT>::type);
// 出力設定
FrameBuffer dx_buf(dy_buf.GetFrameSize(), m_input_shape, DataType<T>::type);
// CUDA版
#ifdef BB_WITH_CUDA
if ( N == 6 && M == 16 && DataType<FXT>::type == BB_TYPE_FP32 && DataType<T>::type == BB_TYPE_FP32
&& !m_host_only && x_buf.IsDeviceAvailable() && dx_buf.IsDeviceAvailable() && dy_buf.IsDeviceAvailable() && Manager::IsDeviceAvailable() ) {
// CUDA版
auto input_index_ptr = m_input_index.LockDeviceMemoryConst();
auto x_ptr = x_buf.LockDeviceMemoryConst();
auto dy_ptr = dy_buf.LockDeviceMemoryConst();
auto dx_ptr = dx_buf.LockDeviceMemory();
auto W0_ptr = m_W0->LockDeviceMemoryConst();
auto b0_ptr = m_b0->LockDeviceMemoryConst();
auto W1_ptr = m_W1->LockDeviceMemoryConst();
auto b1_ptr = m_b1->LockDeviceMemoryConst();
auto dW0_ptr = m_dW0->LockDeviceMemory();
auto db0_ptr = m_db0->LockDeviceMemory();
auto dW1_ptr = m_dW1->LockDeviceMemory();
auto db1_ptr = m_db1->LockDeviceMemory();
FrameBuffer dx_tmp(dy_buf.GetFrameSize(), {m_output_node_size * N}, BB_TYPE_FP32);
auto dx_tmp_ptr = dx_tmp.LockDeviceMemory();
bbcu_fp32_MicroMlp6x16_Backward
(
(float const *)x_ptr.GetAddr(),
(float *)dy_ptr.GetAddr(),
(float *)dx_ptr.GetAddr(),
(float *)dx_tmp_ptr.GetAddr(),
(int const *)input_index_ptr.GetAddr(),
(float const *)W0_ptr.GetAddr(),
(float const *)b0_ptr.GetAddr(),
(float *)dW0_ptr.GetAddr(),
(float *)db0_ptr.GetAddr(),
(float const *)W1_ptr.GetAddr(),
(float const *)b1_ptr.GetAddr(),
(float *)dW1_ptr.GetAddr(),
(float *)db1_ptr.GetAddr(),
(int )m_input_node_size,
(int )m_output_node_size,
(int )dy_buf.GetFrameSize(),
(int )dy_buf.GetFrameStride() / sizeof(float)
);
return dx_buf;
}
#endif
#ifdef BB_WITH_CUDA
if ( N == 6 && M == 16 && DataType<FXT>::type == BB_TYPE_BIT && DataType<T>::type == BB_TYPE_FP32
&& !m_host_only && x_buf.IsDeviceAvailable() && dx_buf.IsDeviceAvailable() && dy_buf.IsDeviceAvailable() && Manager::IsDeviceAvailable() ) {
// CUDA版
auto input_index_ptr = m_input_index.LockDeviceMemoryConst();
auto x_ptr = x_buf.LockDeviceMemoryConst();
auto dy_ptr = dy_buf.LockDeviceMemoryConst();
auto dx_ptr = dx_buf.LockDeviceMemory();
auto W0_ptr = m_W0->LockDeviceMemoryConst();
auto b0_ptr = m_b0->LockDeviceMemoryConst();
auto W1_ptr = m_W1->LockDeviceMemoryConst();
auto b1_ptr = m_b1->LockDeviceMemoryConst();
auto dW0_ptr = m_dW0->LockDeviceMemory();
auto db0_ptr = m_db0->LockDeviceMemory();
auto dW1_ptr = m_dW1->LockDeviceMemory();
auto db1_ptr = m_db1->LockDeviceMemory();
FrameBuffer dx_tmp(dy_buf.GetFrameSize(), {m_output_node_size * N}, BB_TYPE_FP32);
auto dx_tmp_ptr = dx_tmp.LockDeviceMemory();
bbcu_bit_fp32_MicroMlp6x16_Backward
(
(int const *)x_ptr.GetAddr(),
(float *)dy_ptr.GetAddr(),
(float *)dx_ptr.GetAddr(),
(float *)dx_tmp_ptr.GetAddr(),
(int const *)input_index_ptr.GetAddr(),
(float const *)W0_ptr.GetAddr(),
(float const *)b0_ptr.GetAddr(),
(float *)dW0_ptr.GetAddr(),
(float *)db0_ptr.GetAddr(),
(float const *)W1_ptr.GetAddr(),
(float const *)b1_ptr.GetAddr(),
(float *)dW1_ptr.GetAddr(),
(float *)db1_ptr.GetAddr(),
(int )m_input_node_size,
(int )m_output_node_size,
(int )dy_buf.GetFrameSize(),
(int )x_buf.GetFrameStride() / sizeof(int),
(int )dy_buf.GetFrameStride() / sizeof(float)
);
return dx_buf;
}
#endif
// m_dW0->FillZero();
// m_db0->FillZero();
// m_dW1->FillZero();
// m_db1->FillZero();
// AVX版
if ( DataType<FXT>::type == BB_TYPE_FP32 && DataType<T>::type == BB_TYPE_FP32 ) {
index_t frame_size = dy_buf.GetFrameStride() / sizeof(float);
index_t node_size = m_output_node_size;
dx_buf.FillZero();
auto dy_ptr = dy_buf.LockMemoryConst();
auto dx_ptr = dx_buf.LockMemory();
auto x_ptr = x_buf.LockMemoryConst();
auto input_index_ptr = m_input_index.LockConst();
auto W0_ptr = lock_W0_const();
auto b0_ptr = lock_b0_const();
auto W1_ptr = lock_W1_const();
auto b1_ptr = lock_b1_const();
auto dW0_ptr = lock_dW0();
auto db0_ptr = lock_db0();
auto dW1_ptr = lock_dW1();
auto db1_ptr = lock_db1();
auto dy_addr = (float const *)dy_ptr.GetAddr();
auto dx_addr = (float *)dx_ptr.GetAddr();
auto x_addr = (float const *)x_ptr.GetAddr();
const __m256 zero = _mm256_set1_ps(0);
FrameBuffer dx_tmp(dy_buf.GetFrameSize(), {m_output_node_size * N}, BB_TYPE_FP32);
auto dx_tmp_ptr = dx_tmp.Lock<float>();
#pragma omp parallel for
for (int node = 0; node < (int)node_size; ++node) {
__m256 W0[M][N];
__m256 b0[M];
__m256 dW0[M][N];
__m256 db0[M];
__m256 W1[M];
__m256 dW1[M];
__m256 db1;
for (int i = 0; i < M; ++i) {
for (int j = 0; j < N; ++j) {
W0[i][j] = _mm256_set1_ps(W0_ptr (node, i, j));
dW0[i][j] = _mm256_set1_ps(0.0f);
}
b0[i] = _mm256_set1_ps(b0_ptr(node, i));
db0[i] = _mm256_set1_ps(0.0f);
W1[i] = _mm256_set1_ps(W1_ptr(node, i));
dW1[i] = _mm256_set1_ps(0.0f);
}
db1 = _mm256_set1_ps(0.0f);
float const *out_err_ptr;
float const *in_sig_ptr[N];
out_err_ptr = &dy_addr[frame_size * node];
for (int i = 0; i < N; ++i) {
in_sig_ptr[i] = &x_addr[frame_size * input_index_ptr(node, i)];
}
for (int frame = 0; frame < frame_size; frame += 8) {
__m256 in_sig[N];
for (int i = 0; i < N; ++i) {
in_sig[i] = _mm256_load_ps(&in_sig_ptr[i][frame]);
}
// 一層目の信号を再構成
__m256 sig0[M];
for (int i = 0; i < M; ++i) {
// sub-layer0
__m256 sum0 = b0[i];
for (int j = 0; j < N; ++j) {
sum0 = _mm256_fmadd_ps(in_sig[j], W0[i][j], sum0);
}
// ReLU
sum0 = _mm256_max_ps(sum0, zero);
sig0[i] = sum0;
}
// 逆伝播
__m256 in_err[N];
for (int i = 0; i < N; ++i) {
in_err[i] = zero;
}
__m256 out_err = _mm256_load_ps(&out_err_ptr[frame]);
db1 = _mm256_add_ps(db1, out_err);
for (int i = 0; i < M; ++i) {
__m256 err0 = _mm256_mul_ps(W1[i], out_err);
__m256 mask = _mm256_cmp_ps(sig0[i], zero, _CMP_GT_OS);
dW1[i] = _mm256_fmadd_ps(sig0[i], out_err, dW1[i]);
err0 = _mm256_and_ps(err0, mask); // ReLU
db0[i] = _mm256_add_ps(db0[i], err0);
for (int j = 0; j < N; ++j) {
in_err[j] = _mm256_fmadd_ps(err0, W0[i][j], in_err[j]);
dW0[i][j] = _mm256_fmadd_ps(err0, in_sig[j], dW0[i][j]);
}
}
for (int i = 0; i < N; ++i) {
float* tmp_dx_addr = dx_tmp_ptr.GetAddr(node * N + i);
_mm256_store_ps(&tmp_dx_addr[frame], in_err[i]);
}
}
for (int i = 0; i < M; ++i) {
for (int j = 0; j < N; ++j) {
dW0_ptr(node, i, j) += bb_mm256_cvtss_f32(bb_mm256_hsum_ps(dW0[i][j]));
}
db0_ptr(node, i) += bb_mm256_cvtss_f32(bb_mm256_hsum_ps(db0[i]));
dW1_ptr(node, i) += bb_mm256_cvtss_f32(bb_mm256_hsum_ps(dW1[i]));
}
db1_ptr(node) += bb_mm256_cvtss_f32(bb_mm256_hsum_ps(db1));
}
// 足しこみ
for (int node = 0; node < (int)node_size; ++node) {
float* in_err_ptr[N];
for (int i = 0; i < N; ++i) {
in_err_ptr[i] = &dx_addr[frame_size * input_index_ptr(node, i)];
}
#pragma omp parallel for
for (int frame = 0; frame < frame_size; frame += 8) {
for (int i = 0; i < N; ++i) {
__m256 in_err = _mm256_load_ps(&in_err_ptr[i][frame]);
float* tmp_dx_addr = dx_tmp_ptr.GetAddr(node * N + i);
__m256 tmp_err = _mm256_load_ps(&tmp_dx_addr[frame]);
in_err = _mm256_add_ps(in_err, tmp_err);
_mm256_store_ps(&in_err_ptr[i][frame], in_err);
}
}
}
return dx_buf;
}
{
// 汎用版
index_t frame_size = dy_buf.GetFrameSize();
index_t node_size = m_output_node_size;
dx_buf.FillZero();
auto dy_ptr = dy_buf.LockConst<T>();
auto dx_ptr = dx_buf.Lock<T>();
auto x_ptr = x_buf.LockConst<FXT>();
auto input_index_ptr = m_input_index.Lock();
auto W0_ptr = lock_W0_const();
auto b0_ptr = lock_b0_const();
auto W1_ptr = lock_W1_const();
auto b1_ptr = lock_b1_const();
auto dW0_ptr = lock_dW0();
auto db0_ptr = lock_db0();
auto dW1_ptr = lock_dW1();
auto db1_ptr = lock_db1();
// FrameBuffer dx_tmp(dy_buf.GetFrameSize(), m_output_node_size * N, BB_TYPE_FP32);
// auto dx_tmp_ptr = dx_tmp.Lock<float>();
// #pragma omp parallel for
for (int node = 0; node < (int)node_size; ++node) {
float W0[M][N];
float b0[M];
float dW0[M][N];
float db0[M];
float W1[M];
float dW1[M];
float db1;
for (int i = 0; i < M; ++i) {
for (int j = 0; j < N; ++j) {
W0[i][j] = W0_ptr(node, i, j);
dW0[i][j] = (T)0.0;
}
b0[i] = b0_ptr(node, i);
db0[i] = (T)0.0;
W1[i] = W1_ptr(node, i);
dW1[i] = (T)0.0;
}
db1 = (T)0.0;
// 1つのSMで1nodeを全フレーム処理
for ( index_t frame = 0; frame < frame_size; ++frame ) {
// 入力データ読み込み
T x[N];
for ( int i = 0; i < N; ++i ) {
x[i] = x_ptr.Get(frame, input_index_ptr(node, i));
}
// 1段目再計算して2段目逆伝播
T grad1 = dy_ptr.Get(frame, node);
T grad0[M];
db1 += grad1;
for ( int i = 0; i < M; ++i ) {
T sig0 = b0[i];
for ( int j = 0; j < N; ++j ) {
sig0 += x[j] * W0[i][j];
}
sig0 = std::max(sig0, (T)0); // ReLU
dW1[i] += grad1 * sig0;
if ( sig0 > 0 ) { // ReLU
grad0[i] = grad1 * W1[i];
}
else {
grad0[i] = 0;
}
}
// 1段目逆伝播
T dx[N];
for ( int i = 0; i < N; ++i ) {
dx[i] = 0; // dx_ptr[frame_stride * i + frame];
}
for ( int i = 0; i < M; ++i ) {
db0[i] += grad0[i];
for ( int j = 0; j < N; ++j ) {
dW0[i][j] += grad0[i] * x[j];
dx[j] += grad0[i] * W0[i][j];
}
}
// 誤差書き込み
for ( int i = 0; i < N; ++i ) {
dx_ptr.Add(frame, input_index_ptr(node, i), dx[i]);
}
}
// パラメータ設定
for ( int i = 0; i < M; ++i ) {
for ( int j = 0; j < N; ++j ) {
dW0_ptr(node, i, j) += dW0[i][j];
}
db0_ptr(node, i) += db0[i];
dW1_ptr(node, i) += dW1[i];
}
db1_ptr(node) = db1;
}
return dx_buf;
}
}
};
}
|
convolution_sgemm.h | // Tencent is pleased to support the open source community by making ncnn available.
//
// Copyright (C) 2021 THL A29 Limited, a Tencent company. All rights reserved.
//
// Licensed under the BSD 3-Clause License (the "License"); you may not use this file except
// in compliance with the License. You may obtain a copy of the License at
//
// https://opensource.org/licenses/BSD-3-Clause
//
// Unless required by applicable law or agreed to in writing, software distributed
// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
// CONDITIONS OF ANY KIND, either express or implied. See the License for the
// specific language governing permissions and limitations under the License.
static void im2col_sgemm_neon(const Mat& bottom_im2col, Mat& top_blob, const Mat& kernel, const Mat& _bias, const Option& opt)
{
// Mat bottom_im2col(size, maxk, inch, 4u, 1, opt.workspace_allocator);
const int size = bottom_im2col.w;
const int maxk = bottom_im2col.h;
const int inch = bottom_im2col.c;
const int outch = top_blob.c;
const float* bias = _bias;
// permute
Mat tmp;
#if __ARM_NEON
if (size >= 8)
tmp.create(8 * maxk, inch, size / 8 + (size % 8) / 4 + size % 4, 4u, 1, opt.workspace_allocator);
else if (size >= 4)
tmp.create(4 * maxk, inch, size / 4 + size % 4, 4u, 1, opt.workspace_allocator);
else
tmp.create(maxk, inch, size, 4u, 1, opt.workspace_allocator);
{
int nn_size = size >> 3;
int remain_size_start = 0;
#pragma omp parallel for num_threads(opt.num_threads)
for (int ii = 0; ii < nn_size; ii++)
{
int i = remain_size_start + ii * 8;
float* tmpptr = tmp.channel(i / 8);
for (int q = 0; q < inch; q++)
{
const float* img0 = (const float*)bottom_im2col.channel(q) + i;
for (int k = 0; k < maxk; k++)
{
vst1q_f32(tmpptr, vld1q_f32(img0));
vst1q_f32(tmpptr + 4, vld1q_f32(img0 + 4));
img0 += size;
tmpptr += 8;
}
}
}
remain_size_start += nn_size << 3;
nn_size = (size - remain_size_start) >> 2;
#pragma omp parallel for num_threads(opt.num_threads)
for (int ii = 0; ii < nn_size; ii++)
{
int i = remain_size_start + ii * 4;
float* tmpptr = tmp.channel(i / 8 + (i % 8) / 4);
for (int q = 0; q < inch; q++)
{
const float* img0 = (const float*)bottom_im2col.channel(q) + i;
for (int k = 0; k < maxk; k++)
{
vst1q_f32(tmpptr, vld1q_f32(img0));
img0 += size;
tmpptr += 4;
}
}
}
remain_size_start += nn_size << 2;
#pragma omp parallel for num_threads(opt.num_threads)
for (int i = remain_size_start; i < size; i++)
{
float* tmpptr = tmp.channel(i / 8 + (i % 8) / 4 + i % 4);
for (int q = 0; q < inch; q++)
{
const float* img0 = (const float*)bottom_im2col.channel(q) + i;
for (int k = 0; k < maxk; k++)
{
tmpptr[0] = img0[0];
img0 += size;
tmpptr += 1;
}
}
}
}
#else // __ARM_NEON
tmp.create(maxk, inch, size, 4u, 1, opt.workspace_allocator);
{
#pragma omp parallel for num_threads(opt.num_threads)
for (int i = 0; i < size; i++)
{
float* tmpptr = tmp.channel(i);
for (int q = 0; q < inch; q++)
{
const float* img0 = (const float*)bottom_im2col.channel(q) + i;
for (int k = 0; k < maxk; k++)
{
tmpptr[0] = img0[0];
img0 += size;
tmpptr += 1;
}
}
}
}
#endif // __ARM_NEON
#if __ARM_NEON
int nn_outch = 0;
int remain_outch_start = 0;
#if __aarch64__
nn_outch = outch >> 3;
remain_outch_start = nn_outch << 3;
#pragma omp parallel for num_threads(opt.num_threads)
for (int pp = 0; pp < nn_outch; pp++)
{
int p = pp * 8;
float* outptr0 = top_blob.channel(p);
float* outptr1 = top_blob.channel(p + 1);
float* outptr2 = top_blob.channel(p + 2);
float* outptr3 = top_blob.channel(p + 3);
float* outptr4 = top_blob.channel(p + 4);
float* outptr5 = top_blob.channel(p + 5);
float* outptr6 = top_blob.channel(p + 6);
float* outptr7 = top_blob.channel(p + 7);
const float zeros[8] = {0.f, 0.f, 0.f, 0.f, 0.f, 0.f, 0.f, 0.f};
const float* biasptr = bias ? bias + p : zeros;
int i = 0;
for (; i + 7 < size; i += 8)
{
const float* tmpptr = tmp.channel(i / 8);
const float* kptr = kernel.channel(p / 8);
int nn = inch * maxk; // inch always > 0
asm volatile(
"ld1 {v0.4s, v1.4s}, [%20] \n"
"dup v16.4s, v0.s[0] \n"
"dup v17.4s, v0.s[0] \n"
"dup v18.4s, v0.s[1] \n"
"dup v19.4s, v0.s[1] \n"
"dup v20.4s, v0.s[2] \n"
"dup v21.4s, v0.s[2] \n"
"dup v22.4s, v0.s[3] \n"
"dup v23.4s, v0.s[3] \n"
"dup v24.4s, v1.s[0] \n"
"dup v25.4s, v1.s[0] \n"
"dup v26.4s, v1.s[1] \n"
"dup v27.4s, v1.s[1] \n"
"dup v28.4s, v1.s[2] \n"
"dup v29.4s, v1.s[2] \n"
"dup v30.4s, v1.s[3] \n"
"dup v31.4s, v1.s[3] \n"
// inch loop
"lsr w4, %w21, #2 \n" // w4 = nn = inch >> 2
"cmp w4, #0 \n"
"beq 1f \n"
"0: \n"
"prfm pldl1keep, [%8, #512] \n"
"ld1 {v8.4s, v9.4s, v10.4s, v11.4s}, [%8], #64 \n"
"prfm pldl1keep, [%9, #512] \n"
"ld1 {v0.4s, v1.4s, v2.4s, v3.4s}, [%9], #64 \n"
"fmla v16.4s, v8.4s, v0.s[0] \n"
"fmla v18.4s, v8.4s, v0.s[1] \n"
"fmla v20.4s, v8.4s, v0.s[2] \n"
"fmla v22.4s, v8.4s, v0.s[3] \n"
"fmla v17.4s, v9.4s, v0.s[0] \n"
"fmla v19.4s, v9.4s, v0.s[1] \n"
"fmla v21.4s, v9.4s, v0.s[2] \n"
"fmla v23.4s, v9.4s, v0.s[3] \n"
"fmla v24.4s, v8.4s, v1.s[0] \n"
"fmla v26.4s, v8.4s, v1.s[1] \n"
"fmla v28.4s, v8.4s, v1.s[2] \n"
"fmla v30.4s, v8.4s, v1.s[3] \n"
"fmla v25.4s, v9.4s, v1.s[0] \n"
"fmla v27.4s, v9.4s, v1.s[1] \n"
"fmla v29.4s, v9.4s, v1.s[2] \n"
"fmla v31.4s, v9.4s, v1.s[3] \n"
"prfm pldl1keep, [%8, #512] \n"
"ld1 {v12.4s, v13.4s, v14.4s, v15.4s}, [%8], #64 \n"
"fmla v16.4s, v10.4s, v2.s[0] \n"
"fmla v18.4s, v10.4s, v2.s[1] \n"
"fmla v20.4s, v10.4s, v2.s[2] \n"
"fmla v22.4s, v10.4s, v2.s[3] \n"
"fmla v17.4s, v11.4s, v2.s[0] \n"
"fmla v19.4s, v11.4s, v2.s[1] \n"
"fmla v21.4s, v11.4s, v2.s[2] \n"
"fmla v23.4s, v11.4s, v2.s[3] \n"
"fmla v24.4s, v10.4s, v3.s[0] \n"
"fmla v26.4s, v10.4s, v3.s[1] \n"
"fmla v28.4s, v10.4s, v3.s[2] \n"
"fmla v30.4s, v10.4s, v3.s[3] \n"
"fmla v25.4s, v11.4s, v3.s[0] \n"
"fmla v27.4s, v11.4s, v3.s[1] \n"
"fmla v29.4s, v11.4s, v3.s[2] \n"
"fmla v31.4s, v11.4s, v3.s[3] \n"
"prfm pldl1keep, [%9, #512] \n"
"ld1 {v4.4s, v5.4s, v6.4s, v7.4s}, [%9], #64 \n"
"fmla v16.4s, v12.4s, v4.s[0] \n"
"fmla v18.4s, v12.4s, v4.s[1] \n"
"fmla v20.4s, v12.4s, v4.s[2] \n"
"fmla v22.4s, v12.4s, v4.s[3] \n"
"fmla v17.4s, v13.4s, v4.s[0] \n"
"fmla v19.4s, v13.4s, v4.s[1] \n"
"fmla v21.4s, v13.4s, v4.s[2] \n"
"fmla v23.4s, v13.4s, v4.s[3] \n"
"fmla v24.4s, v12.4s, v5.s[0] \n"
"fmla v26.4s, v12.4s, v5.s[1] \n"
"fmla v28.4s, v12.4s, v5.s[2] \n"
"fmla v30.4s, v12.4s, v5.s[3] \n"
"fmla v25.4s, v13.4s, v5.s[0] \n"
"fmla v27.4s, v13.4s, v5.s[1] \n"
"fmla v29.4s, v13.4s, v5.s[2] \n"
"fmla v31.4s, v13.4s, v5.s[3] \n"
"subs w4, w4, #1 \n"
"fmla v16.4s, v14.4s, v6.s[0] \n"
"fmla v18.4s, v14.4s, v6.s[1] \n"
"fmla v20.4s, v14.4s, v6.s[2] \n"
"fmla v22.4s, v14.4s, v6.s[3] \n"
"fmla v17.4s, v15.4s, v6.s[0] \n"
"fmla v19.4s, v15.4s, v6.s[1] \n"
"fmla v21.4s, v15.4s, v6.s[2] \n"
"fmla v23.4s, v15.4s, v6.s[3] \n"
"fmla v24.4s, v14.4s, v7.s[0] \n"
"fmla v26.4s, v14.4s, v7.s[1] \n"
"fmla v28.4s, v14.4s, v7.s[2] \n"
"fmla v30.4s, v14.4s, v7.s[3] \n"
"fmla v25.4s, v15.4s, v7.s[0] \n"
"fmla v27.4s, v15.4s, v7.s[1] \n"
"fmla v29.4s, v15.4s, v7.s[2] \n"
"fmla v31.4s, v15.4s, v7.s[3] \n"
"bne 0b \n"
"1: \n"
// remain loop
"and w4, %w21, #3 \n" // w4 = remain = inch & 3;
"cmp w4, #0 \n"
"beq 3f \n"
"2: \n"
"prfm pldl1keep, [%8, #256] \n"
"ld1 {v8.4s, v9.4s}, [%8], #32 \n"
"prfm pldl1keep, [%9, #256] \n"
"ld1 {v0.4s, v1.4s}, [%9], #32 \n"
"fmla v16.4s, v8.4s, v0.s[0] \n"
"fmla v18.4s, v8.4s, v0.s[1] \n"
"fmla v20.4s, v8.4s, v0.s[2] \n"
"fmla v22.4s, v8.4s, v0.s[3] \n"
"fmla v17.4s, v9.4s, v0.s[0] \n"
"fmla v19.4s, v9.4s, v0.s[1] \n"
"fmla v21.4s, v9.4s, v0.s[2] \n"
"fmla v23.4s, v9.4s, v0.s[3] \n"
"subs w4, w4, #1 \n"
"fmla v24.4s, v8.4s, v1.s[0] \n"
"fmla v26.4s, v8.4s, v1.s[1] \n"
"fmla v28.4s, v8.4s, v1.s[2] \n"
"fmla v30.4s, v8.4s, v1.s[3] \n"
"fmla v25.4s, v9.4s, v1.s[0] \n"
"fmla v27.4s, v9.4s, v1.s[1] \n"
"fmla v29.4s, v9.4s, v1.s[2] \n"
"fmla v31.4s, v9.4s, v1.s[3] \n"
"bne 2b \n"
"3: \n"
"st1 {v16.4s, v17.4s}, [%0], #32 \n"
"st1 {v18.4s, v19.4s}, [%1], #32 \n"
"st1 {v20.4s, v21.4s}, [%2], #32 \n"
"st1 {v22.4s, v23.4s}, [%3], #32 \n"
"st1 {v24.4s, v25.4s}, [%4], #32 \n"
"st1 {v26.4s, v27.4s}, [%5], #32 \n"
"st1 {v28.4s, v29.4s}, [%6], #32 \n"
"st1 {v30.4s, v31.4s}, [%7], #32 \n"
: "=r"(outptr0), // %0
"=r"(outptr1), // %1
"=r"(outptr2), // %2
"=r"(outptr3), // %3
"=r"(outptr4), // %4
"=r"(outptr5), // %5
"=r"(outptr6), // %6
"=r"(outptr7), // %7
"=r"(tmpptr), // %8
"=r"(kptr) // %9
: "0"(outptr0),
"1"(outptr1),
"2"(outptr2),
"3"(outptr3),
"4"(outptr4),
"5"(outptr5),
"6"(outptr6),
"7"(outptr7),
"8"(tmpptr),
"9"(kptr),
"r"(biasptr), // %20
"r"(nn) // %21
: "cc", "memory", "x4", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31");
}
for (; i + 3 < size; i += 4)
{
const float* tmpptr = tmp.channel(i / 8 + (i % 8) / 4);
const float* kptr = kernel.channel(p / 8);
int nn = inch * maxk; // inch always > 0
asm volatile(
"ld1 {v0.4s, v1.4s}, [%20] \n"
"dup v16.4s, v0.s[0] \n"
"dup v17.4s, v0.s[1] \n"
"dup v18.4s, v0.s[2] \n"
"dup v19.4s, v0.s[3] \n"
"dup v20.4s, v1.s[0] \n"
"dup v21.4s, v1.s[1] \n"
"dup v22.4s, v1.s[2] \n"
"dup v23.4s, v1.s[3] \n"
// inch loop
"lsr w4, %w21, #2 \n" // w4 = nn = inch >> 2
"cmp w4, #0 \n"
"beq 1f \n"
"0: \n"
"prfm pldl1keep, [%8, #512] \n"
"ld1 {v8.4s, v9.4s, v10.4s, v11.4s}, [%8], #64 \n"
"prfm pldl1keep, [%9, #512] \n"
"ld1 {v0.4s, v1.4s, v2.4s, v3.4s}, [%9], #64 \n"
"fmla v16.4s, v8.4s, v0.s[0] \n"
"fmla v17.4s, v8.4s, v0.s[1] \n"
"fmla v18.4s, v8.4s, v0.s[2] \n"
"fmla v19.4s, v8.4s, v0.s[3] \n"
"fmla v20.4s, v8.4s, v1.s[0] \n"
"fmla v21.4s, v8.4s, v1.s[1] \n"
"fmla v22.4s, v8.4s, v1.s[2] \n"
"fmla v23.4s, v8.4s, v1.s[3] \n"
"prfm pldl1keep, [%9, #512] \n"
"ld1 {v4.4s, v5.4s, v6.4s, v7.4s}, [%9], #64 \n"
"fmla v16.4s, v9.4s, v2.s[0] \n"
"fmla v17.4s, v9.4s, v2.s[1] \n"
"fmla v18.4s, v9.4s, v2.s[2] \n"
"fmla v19.4s, v9.4s, v2.s[3] \n"
"fmla v20.4s, v9.4s, v3.s[0] \n"
"fmla v21.4s, v9.4s, v3.s[1] \n"
"fmla v22.4s, v9.4s, v3.s[2] \n"
"fmla v23.4s, v9.4s, v3.s[3] \n"
"subs w4, w4, #1 \n"
"fmla v16.4s, v10.4s, v4.s[0] \n"
"fmla v17.4s, v10.4s, v4.s[1] \n"
"fmla v18.4s, v10.4s, v4.s[2] \n"
"fmla v19.4s, v10.4s, v4.s[3] \n"
"fmla v20.4s, v10.4s, v5.s[0] \n"
"fmla v21.4s, v10.4s, v5.s[1] \n"
"fmla v22.4s, v10.4s, v5.s[2] \n"
"fmla v23.4s, v10.4s, v5.s[3] \n"
"fmla v16.4s, v11.4s, v6.s[0] \n"
"fmla v17.4s, v11.4s, v6.s[1] \n"
"fmla v18.4s, v11.4s, v6.s[2] \n"
"fmla v19.4s, v11.4s, v6.s[3] \n"
"fmla v20.4s, v11.4s, v7.s[0] \n"
"fmla v21.4s, v11.4s, v7.s[1] \n"
"fmla v22.4s, v11.4s, v7.s[2] \n"
"fmla v23.4s, v11.4s, v7.s[3] \n"
"bne 0b \n"
"1: \n"
// remain loop
"and w4, %w21, #3 \n" // w4 = remain = inch & 3;
"cmp w4, #0 \n"
"beq 3f \n"
"2: \n"
"prfm pldl1keep, [%8, #128] \n"
"ld1 {v8.4s}, [%8], #16 \n"
"prfm pldl1keep, [%9, #256] \n"
"ld1 {v0.4s, v1.4s}, [%9], #32 \n"
"fmla v16.4s, v8.4s, v0.s[0] \n"
"fmla v17.4s, v8.4s, v0.s[1] \n"
"fmla v18.4s, v8.4s, v0.s[2] \n"
"fmla v19.4s, v8.4s, v0.s[3] \n"
"subs w4, w4, #1 \n"
"fmla v20.4s, v8.4s, v1.s[0] \n"
"fmla v21.4s, v8.4s, v1.s[1] \n"
"fmla v22.4s, v8.4s, v1.s[2] \n"
"fmla v23.4s, v8.4s, v1.s[3] \n"
"bne 2b \n"
"3: \n"
"st1 {v16.4s}, [%0], #16 \n"
"st1 {v17.4s}, [%1], #16 \n"
"st1 {v18.4s}, [%2], #16 \n"
"st1 {v19.4s}, [%3], #16 \n"
"st1 {v20.4s}, [%4], #16 \n"
"st1 {v21.4s}, [%5], #16 \n"
"st1 {v22.4s}, [%6], #16 \n"
"st1 {v23.4s}, [%7], #16 \n"
: "=r"(outptr0), // %0
"=r"(outptr1), // %1
"=r"(outptr2), // %2
"=r"(outptr3), // %3
"=r"(outptr4), // %4
"=r"(outptr5), // %5
"=r"(outptr6), // %6
"=r"(outptr7), // %7
"=r"(tmpptr), // %8
"=r"(kptr) // %9
: "0"(outptr0),
"1"(outptr1),
"2"(outptr2),
"3"(outptr3),
"4"(outptr4),
"5"(outptr5),
"6"(outptr6),
"7"(outptr7),
"8"(tmpptr),
"9"(kptr),
"r"(biasptr), // %20
"r"(nn) // %21
: "cc", "memory", "x4", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23");
}
for (; i < size; i++)
{
const float* tmpptr = tmp.channel(i / 8 + (i % 8) / 4 + i % 4);
const float* kptr = kernel.channel(p / 8);
int nn = inch * maxk; // inch always > 0
asm volatile(
"ld1 {v24.4s, v25.4s}, [%20] \n"
// inch loop
"lsr w4, %w21, #2 \n" // w4 = nn = inch >> 2
"cmp w4, #0 \n"
"beq 1f \n"
"eor v16.16b, v16.16b, v16.16b \n"
"eor v17.16b, v17.16b, v17.16b \n"
"eor v18.16b, v18.16b, v18.16b \n"
"eor v19.16b, v19.16b, v19.16b \n"
"eor v20.16b, v20.16b, v20.16b \n"
"eor v21.16b, v21.16b, v21.16b \n"
"eor v22.16b, v22.16b, v22.16b \n"
"eor v23.16b, v23.16b, v23.16b \n"
"0: \n"
"prfm pldl1keep, [%8, #128] \n"
"ld1 {v8.4s}, [%8], #16 \n"
"prfm pldl1keep, [%9, #512] \n"
"ld1 {v0.4s, v1.4s, v2.4s, v3.4s}, [%9], #64 \n"
"fmla v16.4s, v0.4s, v8.s[0] \n"
"fmla v17.4s, v1.4s, v8.s[0] \n"
"fmla v18.4s, v2.4s, v8.s[1] \n"
"fmla v19.4s, v3.4s, v8.s[1] \n"
"prfm pldl1keep, [%9, #512] \n"
"ld1 {v4.4s, v5.4s, v6.4s, v7.4s}, [%9], #64 \n"
"subs w4, w4, #1 \n"
"fmla v20.4s, v4.4s, v8.s[2] \n"
"fmla v21.4s, v5.4s, v8.s[2] \n"
"fmla v22.4s, v6.4s, v8.s[3] \n"
"fmla v23.4s, v7.4s, v8.s[3] \n"
"bne 0b \n"
"fadd v16.4s, v16.4s, v18.4s \n"
"fadd v17.4s, v17.4s, v19.4s \n"
"fadd v20.4s, v20.4s, v22.4s \n"
"fadd v21.4s, v21.4s, v23.4s \n"
"fadd v16.4s, v16.4s, v20.4s \n"
"fadd v17.4s, v17.4s, v21.4s \n"
"fadd v24.4s, v24.4s, v16.4s \n"
"fadd v25.4s, v25.4s, v17.4s \n"
"1: \n"
// remain loop
"and w4, %w21, #3 \n" // w4 = remain = inch & 3;
"cmp w4, #0 \n"
"beq 3f \n"
"2: \n"
"prfm pldl1keep, [%8, #32] \n"
"ld1r {v8.4s}, [%8], #4 \n"
"prfm pldl1keep, [%9, #256] \n"
"ld1 {v0.4s, v1.4s}, [%9], #32 \n"
"subs w4, w4, #1 \n"
"fmla v24.4s, v8.4s, v0.4s \n"
"fmla v25.4s, v8.4s, v1.4s \n"
"bne 2b \n"
"3: \n"
"st1 {v24.s}[0],[%0], #4 \n"
"st1 {v24.s}[1],[%1], #4 \n"
"st1 {v24.s}[2],[%2], #4 \n"
"st1 {v24.s}[3],[%3], #4 \n"
"st1 {v25.s}[0],[%4], #4 \n"
"st1 {v25.s}[1],[%5], #4 \n"
"st1 {v25.s}[2],[%6], #4 \n"
"st1 {v25.s}[3],[%7], #4 \n"
: "=r"(outptr0), // %0
"=r"(outptr1), // %1
"=r"(outptr2), // %2
"=r"(outptr3), // %3
"=r"(outptr4), // %4
"=r"(outptr5), // %5
"=r"(outptr6), // %6
"=r"(outptr7), // %7
"=r"(tmpptr), // %8
"=r"(kptr) // %9
: "0"(outptr0),
"1"(outptr1),
"2"(outptr2),
"3"(outptr3),
"4"(outptr4),
"5"(outptr5),
"6"(outptr6),
"7"(outptr7),
"8"(tmpptr),
"9"(kptr),
"r"(biasptr), // %20
"r"(nn) // %21
: "cc", "memory", "x4", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25");
}
}
#endif // __aarch64__
nn_outch = (outch - remain_outch_start) >> 2;
#pragma omp parallel for num_threads(opt.num_threads)
for (int pp = 0; pp < nn_outch; pp++)
{
int p = remain_outch_start + pp * 4;
float* outptr0 = top_blob.channel(p);
float* outptr1 = top_blob.channel(p + 1);
float* outptr2 = top_blob.channel(p + 2);
float* outptr3 = top_blob.channel(p + 3);
const float zeros[4] = {0.f, 0.f, 0.f, 0.f};
const float* biasptr = bias ? bias + p : zeros;
int i = 0;
for (; i + 7 < size; i += 8)
{
const float* tmpptr = tmp.channel(i / 8);
#if __aarch64__
const float* kptr = kernel.channel(p / 8 + (p % 8) / 4);
#else
const float* kptr = kernel.channel(p / 4);
#endif
int nn = inch * maxk; // inch always > 0
#if __aarch64__
asm volatile(
"ld1 {v0.4s}, [%12] \n"
"dup v8.4s, v0.s[0] \n"
"dup v9.4s, v0.s[0] \n"
"dup v10.4s, v0.s[1] \n"
"dup v11.4s, v0.s[1] \n"
"dup v12.4s, v0.s[2] \n"
"dup v13.4s, v0.s[2] \n"
"dup v14.4s, v0.s[3] \n"
"dup v15.4s, v0.s[3] \n"
// inch loop
"lsr w4, %w13, #2 \n" // w4 = nn = inch >> 2
"cmp w4, #0 \n"
"beq 1f \n"
"0: \n"
"prfm pldl1keep, [%4, #512] \n"
"ld1 {v4.4s, v5.4s, v6.4s, v7.4s}, [%4], #64 \n"
"prfm pldl1keep, [%5, #512] \n"
"ld1 {v0.4s, v1.4s, v2.4s, v3.4s}, [%5], #64 \n"
"fmla v8.4s, v4.4s, v0.s[0] \n"
"fmla v10.4s, v4.4s, v0.s[1] \n"
"fmla v12.4s, v4.4s, v0.s[2] \n"
"fmla v14.4s, v4.4s, v0.s[3] \n"
"fmla v9.4s, v5.4s, v0.s[0] \n"
"fmla v11.4s, v5.4s, v0.s[1] \n"
"fmla v13.4s, v5.4s, v0.s[2] \n"
"fmla v15.4s, v5.4s, v0.s[3] \n"
"prfm pldl1keep, [%4, #512] \n"
"ld1 {v16.4s, v17.4s, v18.4s, v19.4s}, [%4], #64 \n"
"fmla v8.4s, v6.4s, v1.s[0] \n"
"fmla v10.4s, v6.4s, v1.s[1] \n"
"fmla v12.4s, v6.4s, v1.s[2] \n"
"fmla v14.4s, v6.4s, v1.s[3] \n"
"fmla v9.4s, v7.4s, v1.s[0] \n"
"fmla v11.4s, v7.4s, v1.s[1] \n"
"fmla v13.4s, v7.4s, v1.s[2] \n"
"fmla v15.4s, v7.4s, v1.s[3] \n"
"subs w4, w4, #1 \n"
"fmla v8.4s, v16.4s, v2.s[0] \n"
"fmla v10.4s, v16.4s, v2.s[1] \n"
"fmla v12.4s, v16.4s, v2.s[2] \n"
"fmla v14.4s, v16.4s, v2.s[3] \n"
"fmla v9.4s, v17.4s, v2.s[0] \n"
"fmla v11.4s, v17.4s, v2.s[1] \n"
"fmla v13.4s, v17.4s, v2.s[2] \n"
"fmla v15.4s, v17.4s, v2.s[3] \n"
"fmla v8.4s, v18.4s, v3.s[0] \n"
"fmla v10.4s, v18.4s, v3.s[1] \n"
"fmla v12.4s, v18.4s, v3.s[2] \n"
"fmla v14.4s, v18.4s, v3.s[3] \n"
"fmla v9.4s, v19.4s, v3.s[0] \n"
"fmla v11.4s, v19.4s, v3.s[1] \n"
"fmla v13.4s, v19.4s, v3.s[2] \n"
"fmla v15.4s, v19.4s, v3.s[3] \n"
"bne 0b \n"
"1: \n"
// remain loop
"and w4, %w13, #3 \n" // w4 = remain = inch & 3;
"cmp w4, #0 \n"
"beq 3f \n"
"2: \n"
"prfm pldl1keep, [%4, #256] \n"
"ld1 {v4.4s, v5.4s}, [%4], #32 \n"
"prfm pldl1keep, [%5, #128] \n"
"ld1 {v0.4s}, [%5], #16 \n"
"fmla v8.4s, v4.4s, v0.s[0] \n"
"fmla v10.4s, v4.4s, v0.s[1] \n"
"fmla v12.4s, v4.4s, v0.s[2] \n"
"fmla v14.4s, v4.4s, v0.s[3] \n"
"subs w4, w4, #1 \n"
"fmla v9.4s, v5.4s, v0.s[0] \n"
"fmla v11.4s, v5.4s, v0.s[1] \n"
"fmla v13.4s, v5.4s, v0.s[2] \n"
"fmla v15.4s, v5.4s, v0.s[3] \n"
"bne 2b \n"
"3: \n"
"st1 {v8.4s, v9.4s}, [%0], #32 \n"
"st1 {v10.4s, v11.4s}, [%1], #32 \n"
"st1 {v12.4s, v13.4s}, [%2], #32 \n"
"st1 {v14.4s, v15.4s}, [%3], #32 \n"
: "=r"(outptr0), // %0
"=r"(outptr1), // %1
"=r"(outptr2), // %2
"=r"(outptr3), // %3
"=r"(tmpptr), // %4
"=r"(kptr) // %5
: "0"(outptr0),
"1"(outptr1),
"2"(outptr2),
"3"(outptr3),
"4"(tmpptr),
"5"(kptr),
"r"(biasptr), // %12
"r"(nn) // %13
: "cc", "memory", "x4", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19");
#else // __aarch64__
asm volatile(
"vld1.f32 {d0-d1}, [%12] \n"
"vdup.f32 q8, d0[0] \n"
"vdup.f32 q9, d0[0] \n"
"vdup.f32 q10, d0[1] \n"
"vdup.f32 q11, d0[1] \n"
"vdup.f32 q12, d1[0] \n"
"vdup.f32 q13, d1[0] \n"
"vdup.f32 q14, d1[1] \n"
"vdup.f32 q15, d1[1] \n"
// inch loop
"lsr r4, %13, #2 \n" // r4 = nn = inch >> 2
"cmp r4, #0 \n"
"beq 1f \n"
"0: \n"
"pld [%4, #512] \n"
"vldm %4!, {d8-d15} \n"
// "vld1.f32 {d8-d11}, [%4 :128]! \n"
// "vld1.f32 {d12-d15}, [%4 :128]! \n"
"pld [%5, #512] \n"
"vldm %5!, {d0-d7} \n"
// "vld1.f32 {d0-d3}, [%5 :128]! \n"
// "vld1.f32 {d4-d7}, [%5 :128]! \n"
"vmla.f32 q8, q4, d0[0] \n"
"vmla.f32 q10, q4, d0[1] \n"
"vmla.f32 q12, q4, d1[0] \n"
"vmla.f32 q14, q4, d1[1] \n"
"vmla.f32 q9, q5, d0[0] \n"
"vmla.f32 q11, q5, d0[1] \n"
"vmla.f32 q13, q5, d1[0] \n"
"vmla.f32 q15, q5, d1[1] \n"
"vmla.f32 q8, q6, d2[0] \n"
"vmla.f32 q10, q6, d2[1] \n"
"vmla.f32 q12, q6, d3[0] \n"
"vmla.f32 q14, q6, d3[1] \n"
"vmla.f32 q9, q7, d2[0] \n"
"vmla.f32 q11, q7, d2[1] \n"
"vmla.f32 q13, q7, d3[0] \n"
"vmla.f32 q15, q7, d3[1] \n"
"pld [%4, #512] \n"
"vldm %4!, {d8-d15} \n"
// "vld1.f32 {d8-d11}, [%4 :128]! \n"
// "vld1.f32 {d12-d15}, [%4 :128]! \n"
"vmla.f32 q8, q4, d4[0] \n"
"vmla.f32 q10, q4, d4[1] \n"
"vmla.f32 q12, q4, d5[0] \n"
"vmla.f32 q14, q4, d5[1] \n"
"vmla.f32 q9, q5, d4[0] \n"
"vmla.f32 q11, q5, d4[1] \n"
"vmla.f32 q13, q5, d5[0] \n"
"vmla.f32 q15, q5, d5[1] \n"
"subs r4, r4, #1 \n"
"vmla.f32 q8, q6, d6[0] \n"
"vmla.f32 q10, q6, d6[1] \n"
"vmla.f32 q12, q6, d7[0] \n"
"vmla.f32 q14, q6, d7[1] \n"
"vmla.f32 q9, q7, d6[0] \n"
"vmla.f32 q11, q7, d6[1] \n"
"vmla.f32 q13, q7, d7[0] \n"
"vmla.f32 q15, q7, d7[1] \n"
"bne 0b \n"
"1: \n"
// remain loop
"and r4, %13, #3 \n" // r4 = remain = inch & 3;
"cmp r4, #0 \n"
"beq 3f \n"
"2: \n"
"pld [%4, #256] \n"
"vld1.f32 {d8-d11}, [%4 :128]! \n"
"pld [%5, #128] \n"
"vld1.f32 {d0-d1}, [%5 :128]! \n"
"vmla.f32 q8, q4, d0[0] \n"
"vmla.f32 q10, q4, d0[1] \n"
"vmla.f32 q12, q4, d1[0] \n"
"vmla.f32 q14, q4, d1[1] \n"
"subs r4, r4, #1 \n"
"vmla.f32 q9, q5, d0[0] \n"
"vmla.f32 q11, q5, d0[1] \n"
"vmla.f32 q13, q5, d1[0] \n"
"vmla.f32 q15, q5, d1[1] \n"
"bne 2b \n"
"3: \n"
"vst1.f32 {d16-d19}, [%0 :128]! \n"
"vst1.f32 {d20-d23}, [%1 :128]! \n"
"vst1.f32 {d24-d27}, [%2 :128]! \n"
"vst1.f32 {d28-d31}, [%3 :128]! \n"
: "=r"(outptr0), // %0
"=r"(outptr1), // %1
"=r"(outptr2), // %2
"=r"(outptr3), // %3
"=r"(tmpptr), // %4
"=r"(kptr) // %5
: "0"(outptr0),
"1"(outptr1),
"2"(outptr2),
"3"(outptr3),
"4"(tmpptr),
"5"(kptr),
"r"(biasptr), // %12
"r"(nn) // %13
: "cc", "memory", "r4", "q0", "q1", "q2", "q3", "q4", "q5", "q6", "q7", "q8", "q9", "q10", "q11", "q12", "q13", "q14", "q15");
#endif // __aarch64__
}
for (; i + 3 < size; i += 4)
{
const float* tmpptr = tmp.channel(i / 8 + (i % 8) / 4);
#if __aarch64__
const float* kptr = kernel.channel(p / 8 + (p % 8) / 4);
#else
const float* kptr = kernel.channel(p / 4);
#endif
int nn = inch * maxk; // inch always > 0
#if __aarch64__
asm volatile(
"ld1 {v0.4s}, [%12] \n"
"dup v8.4s, v0.s[0] \n"
"dup v9.4s, v0.s[1] \n"
"dup v10.4s, v0.s[2] \n"
"dup v11.4s, v0.s[3] \n"
// inch loop
"lsr w4, %w13, #2 \n" // w4 = nn = inch >> 2
"cmp w4, #0 \n"
"beq 1f \n"
"0: \n"
"prfm pldl1keep, [%4, #512] \n"
"ld1 {v4.4s, v5.4s, v6.4s, v7.4s}, [%4], #64 \n"
"prfm pldl1keep, [%5, #512] \n"
"ld1 {v0.4s, v1.4s, v2.4s, v3.4s}, [%5], #64 \n"
"fmla v8.4s, v4.4s, v0.s[0] \n"
"fmla v9.4s, v4.4s, v0.s[1] \n"
"fmla v10.4s, v4.4s, v0.s[2] \n"
"fmla v11.4s, v4.4s, v0.s[3] \n"
"fmla v8.4s, v5.4s, v1.s[0] \n"
"fmla v9.4s, v5.4s, v1.s[1] \n"
"fmla v10.4s, v5.4s, v1.s[2] \n"
"fmla v11.4s, v5.4s, v1.s[3] \n"
"subs w4, w4, #1 \n"
"fmla v8.4s, v6.4s, v2.s[0] \n"
"fmla v9.4s, v6.4s, v2.s[1] \n"
"fmla v10.4s, v6.4s, v2.s[2] \n"
"fmla v11.4s, v6.4s, v2.s[3] \n"
"fmla v8.4s, v7.4s, v3.s[0] \n"
"fmla v9.4s, v7.4s, v3.s[1] \n"
"fmla v10.4s, v7.4s, v3.s[2] \n"
"fmla v11.4s, v7.4s, v3.s[3] \n"
"bne 0b \n"
"1: \n"
// remain loop
"and w4, %w13, #3 \n" // w4 = remain = inch & 3;
"cmp w4, #0 \n"
"beq 3f \n"
"2: \n"
"prfm pldl1keep, [%4, #128] \n"
"ld1 {v4.4s}, [%4], #16 \n"
"prfm pldl1keep, [%5, #128] \n"
"ld1 {v0.4s}, [%5], #16 \n"
"subs w4, w4, #1 \n"
"fmla v8.4s, v4.4s, v0.s[0] \n"
"fmla v9.4s, v4.4s, v0.s[1] \n"
"fmla v10.4s, v4.4s, v0.s[2] \n"
"fmla v11.4s, v4.4s, v0.s[3] \n"
"bne 2b \n"
"3: \n"
"st1 {v8.4s}, [%0], #16 \n"
"st1 {v9.4s}, [%1], #16 \n"
"st1 {v10.4s}, [%2], #16 \n"
"st1 {v11.4s}, [%3], #16 \n"
: "=r"(outptr0), // %0
"=r"(outptr1), // %1
"=r"(outptr2), // %2
"=r"(outptr3), // %3
"=r"(tmpptr), // %4
"=r"(kptr) // %5
: "0"(outptr0),
"1"(outptr1),
"2"(outptr2),
"3"(outptr3),
"4"(tmpptr),
"5"(kptr),
"r"(biasptr), // %12
"r"(nn) // %13
: "cc", "memory", "x4", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11");
#else // __aarch64__
asm volatile(
"vld1.f32 {d0-d1}, [%12] \n"
"vdup.f32 q8, d0[0] \n"
"vdup.f32 q9, d0[1] \n"
"vdup.f32 q10, d1[0] \n"
"vdup.f32 q11, d1[1] \n"
// inch loop
"lsr r4, %13, #2 \n" // r4 = nn = inch >> 2
"cmp r4, #0 \n"
"beq 1f \n"
"0: \n"
"pld [%4, #512] \n"
"vldm %4!, {d8-d15} \n"
// "vld1.f32 {d8-d11}, [%4 :128]! \n"
// "vld1.f32 {d12-d15}, [%4 :128]! \n"
"pld [%5, #512] \n"
"vldm %5!, {d0-d7} \n"
// "vld1.f32 {d0-d3}, [%5 :128]! \n"
// "vld1.f32 {d4-d7}, [%5 :128]! \n"
"vmla.f32 q8, q4, d0[0] \n"
"vmla.f32 q9, q4, d0[1] \n"
"vmla.f32 q10, q4, d1[0] \n"
"vmla.f32 q11, q4, d1[1] \n"
"vmla.f32 q8, q5, d2[0] \n"
"vmla.f32 q9, q5, d2[1] \n"
"vmla.f32 q10, q5, d3[0] \n"
"vmla.f32 q11, q5, d3[1] \n"
"subs r4, r4, #1 \n"
"vmla.f32 q8, q6, d4[0] \n"
"vmla.f32 q9, q6, d4[1] \n"
"vmla.f32 q10, q6, d5[0] \n"
"vmla.f32 q11, q6, d5[1] \n"
"vmla.f32 q8, q7, d6[0] \n"
"vmla.f32 q9, q7, d6[1] \n"
"vmla.f32 q10, q7, d7[0] \n"
"vmla.f32 q11, q7, d7[1] \n"
"bne 0b \n"
"1: \n"
// remain loop
"and r4, %13, #3 \n" // r4 = remain = inch & 3;
"cmp r4, #0 \n"
"beq 3f \n"
"2: \n"
"pld [%4, #128] \n"
"vld1.f32 {d8-d9}, [%4 :128]! \n"
"pld [%5, #128] \n"
"vld1.f32 {d0-d1}, [%5 :128]! \n"
"subs r4, r4, #1 \n"
"vmla.f32 q8, q4, d0[0] \n"
"vmla.f32 q9, q4, d0[1] \n"
"vmla.f32 q10, q4, d1[0] \n"
"vmla.f32 q11, q4, d1[1] \n"
"bne 2b \n"
"3: \n"
"vst1.f32 {d16-d17}, [%0 :128]! \n"
"vst1.f32 {d18-d19}, [%1 :128]! \n"
"vst1.f32 {d20-d21}, [%2 :128]! \n"
"vst1.f32 {d22-d23}, [%3 :128]! \n"
: "=r"(outptr0), // %0
"=r"(outptr1), // %1
"=r"(outptr2), // %2
"=r"(outptr3), // %3
"=r"(tmpptr), // %4
"=r"(kptr) // %5
: "0"(outptr0),
"1"(outptr1),
"2"(outptr2),
"3"(outptr3),
"4"(tmpptr),
"5"(kptr),
"r"(biasptr), // %12
"r"(nn) // %13
: "cc", "memory", "r4", "q0", "q1", "q2", "q3", "q4", "q5", "q6", "q7", "q8", "q9", "q10", "q11");
#endif // __aarch64__
}
for (; i < size; i++)
{
const float* tmpptr = tmp.channel(i / 8 + (i % 8) / 4 + i % 4);
#if __aarch64__
const float* kptr = kernel.channel(p / 8 + (p % 8) / 4);
#else
const float* kptr = kernel.channel(p / 4);
#endif
int nn = inch * maxk; // inch always > 0
#if __aarch64__
asm volatile(
"ld1 {v12.4s}, [%12] \n"
// inch loop
"lsr w4, %w13, #2 \n" // w4 = nn = inch >> 2
"cmp w4, #0 \n"
"beq 1f \n"
"eor v8.16b, v8.16b, v8.16b \n"
"eor v9.16b, v9.16b, v9.16b \n"
"eor v10.16b, v10.16b, v10.16b \n"
"eor v11.16b, v11.16b, v11.16b \n"
"0: \n"
"prfm pldl1keep, [%4, #128] \n"
"ld1 {v4.4s}, [%4], #16 \n"
"prfm pldl1keep, [%5, #512] \n"
"ld1 {v0.4s, v1.4s, v2.4s, v3.4s}, [%5], #64 \n"
"subs w4, w4, #1 \n"
"fmla v8.4s, v0.4s, v4.s[0] \n"
"fmla v9.4s, v1.4s, v4.s[1] \n"
"fmla v10.4s, v2.4s, v4.s[2] \n"
"fmla v11.4s, v3.4s, v4.s[3] \n"
"bne 0b \n"
"fadd v8.4s, v8.4s, v9.4s \n"
"fadd v10.4s, v10.4s, v11.4s \n"
"fadd v8.4s, v8.4s, v10.4s \n"
"fadd v12.4s, v12.4s, v8.4s \n"
"1: \n"
// remain loop
"and w4, %w13, #3 \n" // w4 = remain = inch & 3;
"cmp w4, #0 \n"
"beq 3f \n"
"2: \n"
"prfm pldl1keep, [%4, #32] \n"
"ld1r {v4.4s}, [%4], #4 \n"
"prfm pldl1keep, [%5, #128] \n"
"ld1 {v0.4s}, [%5], #16 \n"
"subs w4, w4, #1 \n"
"fmla v12.4s, v4.4s, v0.4s \n"
"bne 2b \n"
"3: \n"
"st1 {v12.s}[0], [%0], #4 \n"
"st1 {v12.s}[1], [%1], #4 \n"
"st1 {v12.s}[2], [%2], #4 \n"
"st1 {v12.s}[3], [%3], #4 \n"
: "=r"(outptr0), // %0
"=r"(outptr1), // %1
"=r"(outptr2), // %2
"=r"(outptr3), // %3
"=r"(tmpptr), // %4
"=r"(kptr) // %5
: "0"(outptr0),
"1"(outptr1),
"2"(outptr2),
"3"(outptr3),
"4"(tmpptr),
"5"(kptr),
"r"(biasptr), // %12
"r"(nn) // %13
: "cc", "memory", "x4", "v0", "v1", "v2", "v3", "v4", "v8", "v9", "v10", "v11", "v12");
#else // __aarch64__
asm volatile(
"vld1.f32 {d24-d25}, [%12] \n"
// inch loop
"lsr r4, %13, #2 \n" // r4 = nn = inch >> 2
"cmp r4, #0 \n"
"beq 1f \n"
"veor q8, q8, q8 \n"
"veor q9, q9, q9 \n"
"veor q10, q10, q10 \n"
"veor q11, q11, q11 \n"
"0: \n"
"pld [%4, #128] \n"
"vld1.f32 {d8-d9}, [%4 :128]! \n"
"pld [%5, #512] \n"
"vldm %5!, {d0-d7} \n"
// "vld1.f32 {d0-d3}, [%5 :128]! \n"
// "vld1.f32 {d4-d7}, [%5 :128]! \n"
"subs r4, r4, #1 \n"
"vmla.f32 q8, q0, d8[0] \n"
"vmla.f32 q9, q1, d8[1] \n"
"vmla.f32 q10, q2, d9[0] \n"
"vmla.f32 q11, q3, d9[1] \n"
"bne 0b \n"
"vadd.f32 q8, q8, q9 \n"
"vadd.f32 q10, q10, q11 \n"
"vadd.f32 q8, q8, q10 \n"
"vadd.f32 q12, q12, q8 \n"
"1: \n"
// remain loop
"and r4, %13, #3 \n" // r4 = remain = inch & 3;
"cmp r4, #0 \n"
"beq 3f \n"
"2: \n"
"pld [%4, #32] \n"
"vld1.f32 {d8[],d9[]}, [%4]! \n"
"pld [%5, #128] \n"
"vld1.f32 {d0-d1}, [%5 :128]! \n"
"subs r4, r4, #1 \n"
"vmla.f32 q12, q4, q0 \n"
"bne 2b \n"
"3: \n"
"vst1.f32 {d24[0]}, [%0]! \n"
"vst1.f32 {d24[1]}, [%1]! \n"
"vst1.f32 {d25[0]}, [%2]! \n"
"vst1.f32 {d25[1]}, [%3]! \n"
: "=r"(outptr0), // %0
"=r"(outptr1), // %1
"=r"(outptr2), // %2
"=r"(outptr3), // %3
"=r"(tmpptr), // %4
"=r"(kptr) // %5
: "0"(outptr0),
"1"(outptr1),
"2"(outptr2),
"3"(outptr3),
"4"(tmpptr),
"5"(kptr),
"r"(biasptr), // %12
"r"(nn) // %13
: "cc", "memory", "r4", "q0", "q1", "q2", "q3", "q4", "q8", "q9", "q10", "q11", "q12");
#endif // __aarch64__
}
}
remain_outch_start += nn_outch << 2;
#pragma omp parallel for num_threads(opt.num_threads)
for (int p = remain_outch_start; p < outch; p++)
{
float* outptr0 = top_blob.channel(p);
const float bias0 = bias ? bias[p] : 0.f;
int i = 0;
for (; i + 7 < size; i += 8)
{
const float* tmpptr = tmp.channel(i / 8);
#if __aarch64__
const float* kptr = kernel.channel(p / 8 + (p % 8) / 4 + p % 4);
#else
const float* kptr = kernel.channel(p / 4 + p % 4);
#endif
int nn = inch * maxk; // inch always > 0
#if __aarch64__
asm volatile(
"dup v8.4s, %w6 \n"
"dup v9.4s, %w6 \n"
// inch loop
"lsr w4, %w7, #2 \n" // w4 = nn = inch >> 2
"cmp w4, #0 \n"
"beq 1f \n"
"0: \n"
"prfm pldl1keep, [%1, #512] \n"
"ld1 {v4.4s, v5.4s, v6.4s, v7.4s}, [%1], #64 \n"
"prfm pldl1keep, [%2, #128] \n"
"ld1 {v0.4s}, [%2], #16 \n"
"fmla v8.4s, v4.4s, v0.s[0] \n"
"fmla v9.4s, v5.4s, v0.s[0] \n"
"prfm pldl1keep, [%1, #512] \n"
"ld1 {v12.4s, v13.4s, v14.4s, v15.4s}, [%1], #64 \n"
"fmla v8.4s, v6.4s, v0.s[1] \n"
"fmla v9.4s, v7.4s, v0.s[1] \n"
"subs w4, w4, #1 \n"
"fmla v8.4s, v12.4s, v0.s[2] \n"
"fmla v9.4s, v13.4s, v0.s[2] \n"
"fmla v8.4s, v14.4s, v0.s[3] \n"
"fmla v9.4s, v15.4s, v0.s[3] \n"
"bne 0b \n"
"1: \n"
// remain loop
"and w4, %w7, #3 \n" // w4 = remain = inch & 3;
"cmp w4, #0 \n"
"beq 3f \n"
"2: \n"
"prfm pldl1keep, [%1, #256] \n"
"ld1 {v4.4s, v5.4s}, [%1], #32 \n"
"prfm pldl1keep, [%2, #32] \n"
"ld1r {v0.4s}, [%2], #4 \n"
"subs w4, w4, #1 \n"
"fmla v8.4s, v4.4s, v0.4s \n"
"fmla v9.4s, v5.4s, v0.4s \n"
"bne 2b \n"
"3: \n"
"st1 {v8.4s, v9.4s}, [%0], #32 \n"
: "=r"(outptr0), // %0
"=r"(tmpptr), // %1
"=r"(kptr) // %2
: "0"(outptr0),
"1"(tmpptr),
"2"(kptr),
"r"(bias0), // %6
"r"(nn) // %7
: "cc", "memory", "x4", "v0", "v4", "v5", "v6", "v7", "v8", "v9", "v12", "v13", "v14", "v15");
#else // __aarch64__
asm volatile(
"vdup.f32 q8, %6 \n"
"vdup.f32 q9, %6 \n"
// inch loop
"lsr r4, %7, #2 \n" // r4 = nn = inch >> 2
"cmp r4, #0 \n"
"beq 1f \n"
"0: \n"
"pld [%1, #512] \n"
"vldm %1!, {d8-d15} \n"
// "vld1.f32 {d8-d11}, [%1 :128]! \n"
// "vld1.f32 {d12-d15}, [%1 :128]! \n"
"pld [%2, #128] \n"
"vld1.f32 {d0-d1}, [%2 :128]! \n"
"vmla.f32 q8, q4, d0[0] \n"
"vmla.f32 q9, q5, d0[0] \n"
"pld [%1, #512] \n"
"vldm %1!, {d24-d31} \n"
// "vld1.f32 {d24-d27}, [%1 :128]! \n"
// "vld1.f32 {d28-d31}, [%1 :128]! \n"
"vmla.f32 q8, q6, d0[1] \n"
"vmla.f32 q9, q7, d0[1] \n"
"subs r4, r4, #1 \n"
"vmla.f32 q8, q12, d1[0] \n"
"vmla.f32 q9, q13, d1[0] \n"
"vmla.f32 q8, q14, d1[1] \n"
"vmla.f32 q9, q15, d1[1] \n"
"bne 0b \n"
"1: \n"
// remain loop
"and r4, %7, #3 \n" // r4 = remain = inch & 3;
"cmp r4, #0 \n"
"beq 3f \n"
"2: \n"
"pld [%1, #256] \n"
"vld1.f32 {d8-d11}, [%1 :128]! \n"
"pld [%2, #32] \n"
"vld1.f32 {d0[],d1[]}, [%2]! \n"
"subs r4, r4, #1 \n"
"vmla.f32 q8, q4, q0 \n"
"vmla.f32 q9, q5, q0 \n"
"bne 2b \n"
"3: \n"
"vst1.f32 {d16-d19}, [%0 :128]! \n"
: "=r"(outptr0), // %0
"=r"(tmpptr), // %1
"=r"(kptr) // %2
: "0"(outptr0),
"1"(tmpptr),
"2"(kptr),
"r"(bias0), // %6
"r"(nn) // %7
: "cc", "memory", "r4", "q0", "q4", "q5", "q6", "q7", "q8", "q9", "q12", "q13", "q14", "q15");
#endif // __aarch64__
}
for (; i + 3 < size; i += 4)
{
const float* tmpptr = tmp.channel(i / 8 + (i % 8) / 4);
#if __aarch64__
const float* kptr = kernel.channel(p / 8 + (p % 8) / 4 + p % 4);
#else
const float* kptr = kernel.channel(p / 4 + p % 4);
#endif
int nn = inch * maxk; // inch always > 0
#if __aarch64__
asm volatile(
"dup v8.4s, %w6 \n"
// inch loop
"lsr w4, %w7, #2 \n" // w4 = nn = inch >> 2
"cmp w4, #0 \n"
"beq 1f \n"
"0: \n"
"prfm pldl1keep, [%1, #512] \n"
"ld1 {v4.4s, v5.4s, v6.4s, v7.4s}, [%1], #64 \n"
"prfm pldl1keep, [%2, #128] \n"
"ld1 {v0.4s}, [%2], #16 \n"
"subs w4, w4, #1 \n"
"fmla v8.4s, v4.4s, v0.s[0] \n"
"fmla v8.4s, v5.4s, v0.s[1] \n"
"fmla v8.4s, v6.4s, v0.s[2] \n"
"fmla v8.4s, v7.4s, v0.s[3] \n"
"bne 0b \n"
"1: \n"
// remain loop
"and w4, %w7, #3 \n" // w4 = remain = inch & 3;
"cmp w4, #0 \n"
"beq 3f \n"
"2: \n"
"prfm pldl1keep, [%1, #128] \n"
"ld1 {v4.4s}, [%1], #16 \n"
"prfm pldl1keep, [%2, #32] \n"
"ld1r {v0.4s}, [%2], #4 \n"
"subs w4, w4, #1 \n"
"fmla v8.4s, v4.4s, v0.4s \n"
"bne 2b \n"
"3: \n"
"st1 {v8.4s}, [%0], #16 \n"
: "=r"(outptr0), // %0
"=r"(tmpptr), // %1
"=r"(kptr) // %2
: "0"(outptr0),
"1"(tmpptr),
"2"(kptr),
"r"(bias0), // %6
"r"(nn) // %7
: "cc", "memory", "x4", "v0", "v4", "v5", "v6", "v7", "v8");
#else // __aarch64__
asm volatile(
"vdup.f32 q8, %6 \n"
// inch loop
"lsr r4, %7, #2 \n" // r4 = nn = inch >> 2
"cmp r4, #0 \n"
"beq 1f \n"
"0: \n"
"pld [%1, #512] \n"
"vldm %1!, {d8-d15} \n"
// "vld1.f32 {d8-d11}, [%1 :128]! \n"
// "vld1.f32 {d12-d15}, [%1 :128]! \n"
"pld [%2, #128] \n"
"vld1.f32 {d0-d1}, [%2]! \n"
"subs r4, r4, #1 \n"
"vmla.f32 q8, q4, d0[0] \n"
"vmla.f32 q8, q5, d0[1] \n"
"vmla.f32 q8, q6, d1[0] \n"
"vmla.f32 q8, q7, d1[1] \n"
"bne 0b \n"
"1: \n"
// remain loop
"and r4, %7, #3 \n" // r4 = remain = inch & 3;
"cmp r4, #0 \n"
"beq 3f \n"
"2: \n"
"pld [%1, #128] \n"
"vld1.f32 {d8-d9}, [%1 :128]! \n"
"pld [%2, #32] \n"
"vld1.f32 {d0[],d1[]}, [%2]! \n"
"subs r4, r4, #1 \n"
"vmla.f32 q8, q4, q0 \n"
"bne 2b \n"
"3: \n"
"vst1.f32 {d16-d17}, [%0 :128]! \n"
: "=r"(outptr0), // %0
"=r"(tmpptr), // %1
"=r"(kptr) // %2
: "0"(outptr0),
"1"(tmpptr),
"2"(kptr),
"r"(bias0), // %6
"r"(nn) // %7
: "cc", "memory", "r4", "q0", "q4", "q5", "q6", "q7", "q8");
#endif // __aarch64__
}
for (; i < size; i++)
{
const float* tmpptr = tmp.channel(i / 8 + (i % 8) / 4 + i % 4);
#if __aarch64__
const float* kptr = kernel.channel(p / 8 + (p % 8) / 4 + p % 4);
#else
const float* kptr = kernel.channel(p / 4 + p % 4);
#endif
int nn = inch * maxk; // inch always > 0
float32x4_t _sum0 = vdupq_n_f32(0.f);
int q = 0;
for (; q + 3 < nn; q += 4)
{
float32x4_t _p0 = vld1q_f32(tmpptr);
tmpptr += 4;
float32x4_t _k0 = vld1q_f32(kptr);
kptr += 4;
#if __aarch64__
_sum0 = vfmaq_f32(_sum0, _p0, _k0);
#else
_sum0 = vmlaq_f32(_sum0, _p0, _k0);
#endif
}
#if __aarch64__
float sum0 = bias0 + vaddvq_f32(_sum0);
#else
float32x2_t _ss = vadd_f32(vget_low_f32(_sum0), vget_high_f32(_sum0));
float sum0 = bias0 + vget_lane_f32(vpadd_f32(_ss, _ss), 0);
#endif
for (; q < nn; q++)
{
sum0 += tmpptr[0] * kptr[0];
tmpptr++;
kptr++;
}
outptr0[0] = sum0;
outptr0++;
}
}
#else // __ARM_NEON
#pragma omp parallel for num_threads(opt.num_threads)
for (int p = 0; p < outch; p++)
{
float* outptr0 = top_blob.channel(p);
const float bias0 = bias ? bias[p] : 0.f;
for (int i = 0; i < size; i++)
{
const float* tmpptr = tmp.channel(i);
const float* kptr = kernel.channel(p);
int nn = inch * maxk; // inch always > 0
float sum0 = bias0;
for (int q = 0; q < nn; q++)
{
sum0 += tmpptr[0] * kptr[0];
tmpptr++;
kptr++;
}
outptr0[0] = sum0;
outptr0++;
}
}
#endif // __ARM_NEON
}
static void convolution_im2col_sgemm_transform_kernel_neon(const Mat& _kernel, Mat& kernel_tm, int inch, int outch, int kernel_w, int kernel_h)
{
const int maxk = kernel_w * kernel_h;
// interleave
// src = maxk-inch-outch
// dst = 4b-4a-maxk-inch/4a-outch/4b
Mat kernel = _kernel.reshape(maxk, inch, outch);
#if __ARM_NEON
#if __aarch64__
kernel_tm.create(32 * maxk, inch / 4 + inch % 4, outch / 8 + (outch % 8) / 4 + outch % 4);
#else
kernel_tm.create(16 * maxk, inch / 4 + inch % 4, outch / 4 + outch % 4);
#endif
int q = 0;
#if __aarch64__
for (; q + 7 < outch; q += 8)
{
const Mat k0 = kernel.channel(q);
const Mat k1 = kernel.channel(q + 1);
const Mat k2 = kernel.channel(q + 2);
const Mat k3 = kernel.channel(q + 3);
const Mat k4 = kernel.channel(q + 4);
const Mat k5 = kernel.channel(q + 5);
const Mat k6 = kernel.channel(q + 6);
const Mat k7 = kernel.channel(q + 7);
float* g00 = kernel_tm.channel(q / 8);
for (int p = 0; p < inch; p++)
{
const float* k00 = k0.row(p);
const float* k10 = k1.row(p);
const float* k20 = k2.row(p);
const float* k30 = k3.row(p);
const float* k40 = k4.row(p);
const float* k50 = k5.row(p);
const float* k60 = k6.row(p);
const float* k70 = k7.row(p);
for (int k = 0; k < maxk; k++)
{
g00[0] = k00[k];
g00[1] = k10[k];
g00[2] = k20[k];
g00[3] = k30[k];
g00[4] = k40[k];
g00[5] = k50[k];
g00[6] = k60[k];
g00[7] = k70[k];
g00 += 8;
}
}
}
#endif // __aarch64__
for (; q + 3 < outch; q += 4)
{
const Mat k0 = kernel.channel(q);
const Mat k1 = kernel.channel(q + 1);
const Mat k2 = kernel.channel(q + 2);
const Mat k3 = kernel.channel(q + 3);
#if __aarch64__
float* g00 = kernel_tm.channel(q / 8 + (q % 8) / 4);
#else
float* g00 = kernel_tm.channel(q / 4);
#endif
for (int p = 0; p < inch; p++)
{
const float* k00 = k0.row(p);
const float* k10 = k1.row(p);
const float* k20 = k2.row(p);
const float* k30 = k3.row(p);
for (int k = 0; k < maxk; k++)
{
g00[0] = k00[k];
g00[1] = k10[k];
g00[2] = k20[k];
g00[3] = k30[k];
g00 += 4;
}
}
}
for (; q < outch; q++)
{
const Mat k0 = kernel.channel(q);
#if __aarch64__
float* g00 = kernel_tm.channel(q / 8 + (q % 8) / 4 + q % 4);
#else
float* g00 = kernel_tm.channel(q / 4 + q % 4);
#endif
for (int p = 0; p < inch; p++)
{
const float* k00 = k0.row(p);
for (int k = 0; k < maxk; k++)
{
g00[0] = k00[k];
g00 += 1;
}
}
}
#else
kernel_tm = kernel;
#endif // __ARM_NEON
}
static void convolution_im2col_sgemm_neon(const Mat& bottom_blob, Mat& top_blob, const Mat& kernel, const Mat& _bias, int kernel_w, int kernel_h, int dilation_w, int dilation_h, int stride_w, int stride_h, const Option& opt)
{
int w = bottom_blob.w;
int inch = bottom_blob.c;
int outw = top_blob.w;
int outh = top_blob.h;
const int size = outw * outh;
const int maxk = kernel_w * kernel_h;
// im2col
Mat bottom_im2col(size, maxk, inch, 4u, 1, opt.workspace_allocator);
{
const int gap = w * stride_h - outw * stride_w;
#pragma omp parallel for num_threads(opt.num_threads)
for (int p = 0; p < inch; p++)
{
const Mat img = bottom_blob.channel(p);
float* ptr = bottom_im2col.channel(p);
for (int u = 0; u < kernel_h; u++)
{
for (int v = 0; v < kernel_w; v++)
{
const float* sptr = img.row<const float>(dilation_h * u) + dilation_w * v;
for (int i = 0; i < outh; i++)
{
int j = 0;
for (; j + 3 < outw; j += 4)
{
ptr[0] = sptr[0];
ptr[1] = sptr[stride_w];
ptr[2] = sptr[stride_w * 2];
ptr[3] = sptr[stride_w * 3];
sptr += stride_w * 4;
ptr += 4;
}
for (; j + 1 < outw; j += 2)
{
ptr[0] = sptr[0];
ptr[1] = sptr[stride_w];
sptr += stride_w * 2;
ptr += 2;
}
for (; j < outw; j++)
{
ptr[0] = sptr[0];
sptr += stride_w;
ptr += 1;
}
sptr += gap;
}
}
}
}
}
im2col_sgemm_neon(bottom_im2col, top_blob, kernel, _bias, opt);
}
|
contact_residualbased_block_builder_and_solver.h | // | / |
// ' / __| _` | __| _ \ __|
// . \ | ( | | ( |\__ `
// _|\_\_| \__,_|\__|\___/ ____/
// Multi-Physics
//
// License: BSD License
// Kratos default license: kratos/license.txt
//
// Main authors: Vicente Mataix Ferrandiz
//
//
#if !defined(KRATOS_CONTACT_RESIDUAL_BASED_BLOCK_BUILDER_AND_SOLVER )
#define KRATOS_CONTACT_RESIDUAL_BASED_BLOCK_BUILDER_AND_SOLVER
/* System includes */
/* External includes */
/* Project includes */
#include "solving_strategies/builder_and_solvers/residualbased_block_builder_and_solver.h"
namespace Kratos
{
///@name Kratos Globals
///@{
///@}
///@name Type Definitions
///@{
///@}
///@name Enum's
///@{
///@}
///@name Functions
///@{
///@}
///@name Kratos Classes
///@{
/**
* @class ContactResidualBasedBlockBuilderAndSolver
* @ingroup ContactStructuralMechanicsApplication
* @brief Current class provides an implementation for contact builder and solving operations.
* @details The RHS is constituted by the unbalanced loads (residual). Degrees of freedom are reordered putting the restrained degrees of freedom at the end of the system ordered in reverse order with respect to the DofSet. Imposition of the dirichlet conditions is naturally dealt with as the residual already contains
this information. Calculation of the reactions involves a cost very similiar to the calculation of the total residual
* @author Vicente Mataix Ferrandiz
* @tparam TSparseSpace The sparse matrix system considered
* @tparam TDenseSpace The dense matrix system
* @tparam TLinearSolver The type of linear solver considered
* @tparam TBuilderAndSolver The builder and solver considered as base
*/
template<class TSparseSpace,
class TDenseSpace, //= DenseSpace<double>,
class TLinearSolver, //= LinearSolver<TSparseSpace,TDenseSpace>
class TBuilderAndSolver = ResidualBasedBlockBuilderAndSolver< TSparseSpace, TDenseSpace, TLinearSolver >
>
class ContactResidualBasedBlockBuilderAndSolver
: public TBuilderAndSolver
{
public:
///@name Type Definitions
///@{
/// Pointer definition of ContactResidualBasedBlockBuilderAndSolver
KRATOS_CLASS_POINTER_DEFINITION(ContactResidualBasedBlockBuilderAndSolver);
/// Definitions dependent of the base class
typedef TBuilderAndSolver BaseType;
typedef typename BaseType::TSchemeType TSchemeType;
typedef typename BaseType::TDataType TDataType;
typedef typename BaseType::DofsArrayType DofsArrayType;
typedef typename BaseType::TSystemMatrixType TSystemMatrixType;
typedef typename BaseType::TSystemVectorType TSystemVectorType;
///@}
///@name Life Cycle
///@{
/** Constructor.
*/
ContactResidualBasedBlockBuilderAndSolver(
typename TLinearSolver::Pointer pNewLinearSystemSolver)
: BaseType(pNewLinearSystemSolver)
{
}
/** Destructor.
*/
~ContactResidualBasedBlockBuilderAndSolver() override
{
}
///@}
///@name Operators
///@{
/**
* @brief This method imposses the BC of Dirichlet. It will fill with 0 the corresponding DoF
* @param pScheme The pointer to the scheme considered
* @param rModelPart The model part of the problem to solve
* @param A The LHS of the system
* @param Dx The current solution increment
* @param b The RHS of the system
*/
void ApplyDirichletConditions(
typename TSchemeType::Pointer pScheme,
ModelPart& rModelPart,
TSystemMatrixType& A,
TSystemVectorType& Dx,
TSystemVectorType& b
) override
{
FixIsolatedNodes(rModelPart);
BaseType::ApplyDirichletConditions(pScheme, rModelPart, A, Dx, b);
FreeIsolatedNodes(rModelPart);
}
/**
* @brief This method buils the RHS of the system of equations
* @param pScheme The pointer to the scheme considered
* @param rModelPart The model part of the problem to solve
* @param b The RHS of the system
*/
void BuildRHS(
typename TSchemeType::Pointer pScheme,
ModelPart& rModelPart,
TSystemVectorType& b
) override
{
FixIsolatedNodes(rModelPart);
BaseType::BuildRHS(pScheme, rModelPart, b);
FreeIsolatedNodes(rModelPart);
}
///@}
///@name Operations
///@{
///@}
///@name Access
///@{
///@}
///@name Inquiry
///@{
///@}
///@name Friends
///@{
///@}
protected:
///@name Protected static Member Variables
///@{
///@}
///@name Protected member Variables
///@{
///@}
///@name Protected Operators
///@{
///@}
///@name Protected Operations
///@{
///@}
///@name Protected Access
///@{
///@}
///@name Protected Inquiry
///@{
///@}
///@name Protected LifeCycle
///@{
///@}
private:
///@name Static Member Variables
///@{
///@}
///@name Member Variables
///@{
///@}
///@name Private Operators
///@{
/**
* @brief This method check the ISOLATED nodes and it fixes
* @param rModelPart The model part to compute
*/
void FixIsolatedNodes(ModelPart& rModelPart)
{
KRATOS_ERROR_IF_NOT(rModelPart.HasSubModelPart("Contact")) << "CONTACT MODEL PART NOT CREATED" << std::endl;
KRATOS_ERROR_IF_NOT(rModelPart.HasSubModelPart("ComputingContact")) << "CONTACT COMPUTING MODEL PART NOT CREATED" << std::endl;
ModelPart& contact_model_part = rModelPart.GetSubModelPart("Contact");
ModelPart& computing_contact_model_part = rModelPart.GetSubModelPart("ComputingContact");
// We reset the flag
auto& nodes_array = contact_model_part.Nodes();
#pragma omp parallel for
for(int i = 0; i < static_cast<int>(nodes_array.size()); ++i) {
(nodes_array.begin() + i)->Set(VISITED, false);
(nodes_array.begin() + i)->Set(ISOLATED, false);
}
// Now we set the flag in the nodes
auto& conditions_array = computing_contact_model_part.Conditions();
#pragma omp parallel for
for(int i = 0; i < static_cast<int>(conditions_array.size()); ++i) {
auto it_cond = conditions_array.begin() + i;
auto& geom = it_cond->GetGeometry();
for (std::size_t i_node = 0; i_node < geom.size(); ++i_node) {
geom[i_node].SetLock();
if (geom[i_node].Is(VISITED) == false) {
geom[i_node].Set(ISOLATED, it_cond->Is(ISOLATED));
geom[i_node].Set(VISITED, true);
} else {
geom[i_node].Set(ISOLATED, geom[i_node].Is(ISOLATED) && it_cond->Is(ISOLATED));
}
geom[i_node].UnSetLock();
}
}
// We fix the LM
#pragma omp parallel for
for(int i = 0; i < static_cast<int>(nodes_array.size()); ++i) {
auto it_node = nodes_array.begin() + i;
if (it_node->Is(ISOLATED) == true) {
if (it_node->SolutionStepsDataHas(LAGRANGE_MULTIPLIER_CONTACT_PRESSURE))
it_node->Fix(LAGRANGE_MULTIPLIER_CONTACT_PRESSURE);
else if (it_node->SolutionStepsDataHas(VECTOR_LAGRANGE_MULTIPLIER_X)) {
it_node->Fix(VECTOR_LAGRANGE_MULTIPLIER_X);
it_node->Fix(VECTOR_LAGRANGE_MULTIPLIER_Y);
it_node->Fix(VECTOR_LAGRANGE_MULTIPLIER_Z);
}
}
}
}
/**
* @brief This method releases the ISOLATED nodes
* @param rModelPart The model part to compute
*/
void FreeIsolatedNodes(ModelPart& rModelPart)
{
KRATOS_ERROR_IF_NOT(rModelPart.HasSubModelPart("Contact")) << "CONTACT MODEL PART NOT CREATED" << std::endl;
ModelPart& contact_model_part = rModelPart.GetSubModelPart("Contact");
// We release the LM
auto& nodes_array = contact_model_part.Nodes();
#pragma omp parallel for
for(int i = 0; i < static_cast<int>(nodes_array.size()); ++i) {
auto it_node = nodes_array.begin() + i;
if (it_node->Is(ISOLATED) == true) {
if (it_node->SolutionStepsDataHas(LAGRANGE_MULTIPLIER_CONTACT_PRESSURE))
it_node->Free(LAGRANGE_MULTIPLIER_CONTACT_PRESSURE);
else if (it_node->SolutionStepsDataHas(VECTOR_LAGRANGE_MULTIPLIER_X)) {
it_node->Free(VECTOR_LAGRANGE_MULTIPLIER_X);
it_node->Free(VECTOR_LAGRANGE_MULTIPLIER_Y);
it_node->Free(VECTOR_LAGRANGE_MULTIPLIER_Z);
}
}
}
}
///@}
///@name Private Operations
///@{
///@}
///@name Private Access
///@{
///@}
///@name Private Inquiry
///@{
///@}
///@name Un accessible methods
///@{
///@}
}; /* Class ContactResidualBasedBlockBuilderAndSolver */
///@}
///@name Type Definitions */
///@{
///@}
} /* namespace Kratos.*/
#endif /* KRATOS_CONTACT_RESIDUAL_BASED_BLOCK_BUILDER_AND_SOLVER defined */
|
flux_avx512.c |
#include <stdio.h>
#include <string.h>
#include <stdint.h>
#include <omp.h>
#include <mathimf.h>
#include <immintrin.h>
#include <ktime.h>
#include <geometry.h>
#ifdef __USE_HW_COUNTER
#include <perf.h>
#include <kperf.h>
#endif
#include <phy.h>
#define MAG0 (0.5 / 3)
#define MAG1 (-MAG0)
/*
Calculates the residual
*/
void
compute_residual(struct residual *restrict res)
{
#ifdef __USE_HW_COUNTER
const struct fd fd = res->perf_counters->fd;
struct counters start;
perf_read(fd, &start);
const uint64_t icycle = __rdtsc();
#endif
struct ktime ktime;
setktime(&ktime);
const size_t bsz = res->bsz;
const size_t nfnodes = res->nfnodes;
const size_t dofs = res->dofs;
const uint32_t snfc = res->snfc;
const double pressure = res->pressure;
const double velocity_u = res->velocity_u;
const double velocity_v = res->velocity_v;
const double velocity_w = res->velocity_w;
const double *restrict f_xyz0 = res->f_xyz0;
const double *restrict f_xyz1 = res->f_xyz1;
const double *restrict f_xyz2 = res->f_xyz2;
const double *restrict xyz0 = res->xyz0;
const double *restrict xyz1 = res->xyz1;
const double *restrict xyz2 = res->xyz2;
const uint32_t *restrict ie = res->ie;
const uint32_t *restrict part = res->part;
const uint32_t *restrict snfic = res->snfic;
const uint32_t *restrict n0 = res->n0;
const uint32_t *restrict n1 = res->n1;
const uint32_t *restrict nfptr = res->nfptr;
const uint32_t *restrict sn0 = res->sn0;
const uint32_t *restrict sn1 = res->sn1;
const uint32_t *restrict sn2 = res->sn2;
const double *restrict x0 = res->x0;
const double *restrict x1 = res->x1;
const double *restrict x2 = res->x2;
const double *restrict x3 = res->x3;
const double *restrict q = res->q;
const double *restrict w0termsx = res->w0termsx;
const double *restrict w0termsy = res->w0termsy;
const double *restrict w0termsz = res->w0termsz;
const double *restrict w1termsx = res->w1termsx;
const double *restrict w1termsy = res->w1termsy;
const double *restrict w1termsz = res->w1termsz;
double *restrict gradx0 = res->gradx0;
double *restrict gradx1 = res->gradx1;
double *restrict gradx2 = res->gradx2;
memset(gradx0, 0, dofs * sizeof(double));
memset(gradx1, 0, dofs * sizeof(double));
memset(gradx2, 0, dofs * sizeof(double));
double *restrict r = res->r;
memset(r, 0, dofs * sizeof(double));
__assume_aligned(r, 64);
/*
Calculates the gradients at the nodes using weighted least squares
This solves using Gram-Schmidt
*/
#pragma omp parallel
{
const uint32_t t = omp_get_thread_num();
const uint32_t ie0 = ie[t];
const uint32_t ie1 = ie[t+1];
uint32_t i;
for(i = ie0; i < ie1; i++)
{
const uint32_t node0 = n0[i];
const uint32_t node1 = n1[i];
const uint32_t idx0 = bsz * node0;
const uint32_t idx1 = bsz * node1;
double dq;
double termx;
double termy;
double termz;
if(part[node0] == t)
{
termx = w0termsx[i];
termy = w0termsy[i];
termz = w0termsz[i];
dq = q[idx1 + 0] - q[idx0 + 0];
gradx0[idx0 + 0] += termx * dq;
gradx1[idx0 + 0] += termy * dq;
gradx2[idx0 + 0] += termz * dq;
dq = q[idx1 + 1] - q[idx0 + 1];
gradx0[idx0 + 1] += termx * dq;
gradx1[idx0 + 1] += termy * dq;
gradx2[idx0 + 1] += termz * dq;
dq = q[idx1 + 2] - q[idx0 + 2];
gradx0[idx0 + 2] += termx * dq;
gradx1[idx0 + 2] += termy * dq;
gradx2[idx0 + 2] += termz * dq;
dq = q[idx1 + 3] - q[idx0 + 3];
gradx0[idx0 + 3] += termx * dq;
gradx1[idx0 + 3] += termy * dq;
gradx2[idx0 + 3] += termz * dq;
}
if(part[node1] == t)
{
termx = w1termsx[i];
termy = w1termsy[i];
termz = w1termsz[i];
dq = q[idx0 + 0] - q[idx1 + 0];
gradx0[idx1 + 0] += termx * dq;
gradx1[idx1 + 0] += termy * dq;
gradx2[idx1 + 0] += termz * dq;
dq = q[idx0 + 1] - q[idx1 + 1];
gradx0[idx1 + 1] += termx * dq;
gradx1[idx1 + 1] += termy * dq;
gradx2[idx1 + 1] += termz * dq;
dq = q[idx0 + 2] - q[idx1 + 2];
gradx0[idx1 + 2] += termx * dq;
gradx1[idx1 + 2] += termy * dq;
gradx2[idx1 + 2] += termz * dq;
dq = q[idx0 + 3] - q[idx1 + 3];
gradx0[idx1 + 3] += termx * dq;
gradx1[idx1 + 3] += termy * dq;
gradx2[idx1 + 3] += termz * dq;
}
}
}
/*
Calculates the fluxes on the face and performs the flux balance
*/
/* AVX512 Registers */
const __m512d _zero = _mm512_set1_pd(0);
const __m512d _pos1 = _mm512_set1_pd(1.0);
const __m512d _pos2 = _mm512_set1_pd(2.0);
const __m512d _half = _mm512_set1_pd(0.5);
const __m512d _nhalf = _mm512_set1_pd(-0.5);
const __m512d _nu95 = _mm512_set1_pd(0.95);
const __m512d _beta = _mm512_set1_pd(BETA);
#ifdef __USE_SKX
const __m512d _rbeta = _mm512_rcp14_pd(_beta);
#else
const __m512d _rbeta = _mm512_rcp28_pd(_beta);
#endif
const __m256i _bsz = _mm256_set1_epi32(bsz);
const __m256i _shift1 = _mm256_set1_epi32(1);
const __m256i _shift2 = _mm256_set1_epi32(2);
const __m256i _shift3 = _mm256_set1_epi32(3);
const __m512i _ng = _mm512_set1_epi32(-1);
const __m512d _und = _mm512_undefined_pd();
#pragma omp parallel
{
const uint32_t t = omp_get_thread_num();
const uint32_t ie0 = ie[t];
const uint32_t ie1 = ie[t+1];
const uint32_t lim = ie1 - ((ie1-ie0) % 8);
const __m512i _t = _mm512_set1_epi32(t);
uint32_t i;
for(i = ie0; i < lim; i+=8)
{
const __m512d _xn = _mm512_load_pd((void const *) &x0[i]);
const __m512d _yn = _mm512_load_pd((void const *) &x1[i]);
const __m512d _zn = _mm512_load_pd((void const *) &x2[i]);
const __m512d _ln = _mm512_load_pd((void const *) &x3[i]);
/*
Now lets get our other 2 vectors
For first vector, use {1,0,0} and subtract off the component
in the direction of the face normal. If the inner product of
{1,0,0} is close to unity, use {0,1,0}
*/
const __m512d _fdot = _mm512_abs_pd(_xn);
__mmask _k0;
__m512d _dot, _X1, _Y1, _Z1;
_k0 = _mm512_cmp_pd_mask(_fdot, _nu95, _CMP_LT_OS);
_X1 = _mm512_mask_fnmadd_pd(_xn, _k0, _xn, _pos1);
_Y1 = _mm512_mask_fnmadd_pd(_yn, _k0, _xn, _zero);
_Z1 = _mm512_mask_fnmadd_pd(_zn, _k0, _xn, _zero);
_k0 = _mm512_cmp_pd_mask(_fdot, _nu95, _CMP_GE_OS);
_X1 = _mm512_mask_fnmadd_pd(_X1, _k0, _yn, _zero);
_Y1 = _mm512_mask_fnmadd_pd(_Y1, _k0, _yn, _pos1);
_Z1 = _mm512_mask_fnmadd_pd(_Z1, _k0, _yn, _zero);
/*
Normalize the first vector
*/
__m512d _size;
_size = _mm512_mul_pd(_X1, _X1);
_size = _mm512_fmadd_pd(_Y1, _Y1, _size);
_size = _mm512_fmadd_pd(_Z1, _Z1, _size);
#ifdef __USE_SKX
_size = _mm512_rsqrt14_pd(_size);
#else
_size = _mm512_rsqrt28_pd(_size);
#endif
_X1 = _mm512_mul_pd(_X1, _size);
_Y1 = _mm512_mul_pd(_Y1, _size);
_Z1 = _mm512_mul_pd(_Z1, _size);
const __m256i _n0 = _mm256_load_si256((__m256i const *) &n0[i]);
const __m256i _n1 = _mm256_load_si256((__m256i const *) &n1[i]);
const __m512d _x00 = _mm512_i32gather_pd(_n0, &xyz0[0], 8);
const __m512d _x01 = _mm512_i32gather_pd(_n0, &xyz1[0], 8);
const __m512d _x02 = _mm512_i32gather_pd(_n0, &xyz2[0], 8);
const __m512d _x10 = _mm512_i32gather_pd(_n1, &xyz0[0], 8);
const __m512d _x11 = _mm512_i32gather_pd(_n1, &xyz1[0], 8);
const __m512d _x12 = _mm512_i32gather_pd(_n1, &xyz2[0], 8);
const __m512d _xmean = _mm512_mul_pd(_half, _mm512_add_pd(_x00, _x10));
const __m512d _ymean = _mm512_mul_pd(_half, _mm512_add_pd(_x01, _x11));
const __m512d _zmean = _mm512_mul_pd(_half, _mm512_add_pd(_x02, _x12));
/*
Take cross-product of normal and V1 to get V2
*/
const __m512d _X2 = _mm512_fmsub_pd(_yn, _Z1, _mm512_mul_pd(_zn, _Y1));
const __m512d _Y2 = _mm512_fmsub_pd(_zn, _X1, _mm512_mul_pd(_xn, _Z1));
const __m512d _Z2 = _mm512_fmsub_pd(_xn, _Y1, _mm512_mul_pd(_yn, _X1));
/*
Compute the stride indices
*/
const __m256i _idx0 = _mm256_mullo_epi32(_bsz, _n0);
const __m256i _idx1 = _mm256_mullo_epi32(_bsz, _n1);
const __m256i _idx01 = _mm256_add_epi32(_idx0, _shift1);
const __m256i _idx11 = _mm256_add_epi32(_idx1, _shift1);
const __m256i _idx02 = _mm256_add_epi32(_idx0, _shift2);
const __m256i _idx12 = _mm256_add_epi32(_idx1, _shift2);
const __m256i _idx03 = _mm256_add_epi32(_idx0, _shift3);
const __m256i _idx13 = _mm256_add_epi32(_idx1, _shift3);
/*
Get variables on "left" and "right" side of face
*/
__m512d _q;
__m512d _ubarL, _ubarR;
__m512d _rx, _ry, _rz;
__m512d _g0, _g1, _g2;
__m512d _pL, _uL, _vL, _wL;
__m512d _pR, _uR, _vR, _wR;
/* Left */
_rx = _mm512_sub_pd(_xmean, _x00);
_ry = _mm512_sub_pd(_ymean, _x01);
_rz = _mm512_sub_pd(_zmean, _x02);
/* Pressure */
_g0 = _mm512_i32gather_pd(_idx0, &gradx0[0], 8);
_g1 = _mm512_i32gather_pd(_idx0, &gradx1[0], 8);
_g2 = _mm512_i32gather_pd(_idx0, &gradx2[0], 8);
_q = _mm512_i32gather_pd(_idx0, &q[0], 8);
_pL = _mm512_fmadd_pd(_g0, _rx, _q);
_pL = _mm512_fmadd_pd(_g1, _ry, _pL);
_pL = _mm512_fmadd_pd(_g2, _rz, _pL);
/* Velocity u */
_g0 = _mm512_i32gather_pd(_idx01, &gradx0[0], 8);
_g1 = _mm512_i32gather_pd(_idx01, &gradx1[0], 8);
_g2 = _mm512_i32gather_pd(_idx01, &gradx2[0], 8);
_q = _mm512_i32gather_pd(_idx01, &q[0], 8);
_uL = _mm512_fmadd_pd(_g0, _rx, _q);
_uL = _mm512_fmadd_pd(_g1, _ry, _uL);
_uL = _mm512_fmadd_pd(_g2, _rz, _uL);
/* Velocity v */
_g0 = _mm512_i32gather_pd(_idx02, &gradx0[0], 8);
_g1 = _mm512_i32gather_pd(_idx02, &gradx1[0], 8);
_g2 = _mm512_i32gather_pd(_idx02, &gradx2[0], 8);
_q = _mm512_i32gather_pd(_idx02, &q[0], 8);
_vL = _mm512_fmadd_pd(_g0, _rx, _q);
_vL = _mm512_fmadd_pd(_g1, _ry, _vL);
_vL = _mm512_fmadd_pd(_g2, _rz, _vL);
/* Velocity w */
_g0 = _mm512_i32gather_pd(_idx03, &gradx0[0], 8);
_g1 = _mm512_i32gather_pd(_idx03, &gradx1[0], 8);
_g2 = _mm512_i32gather_pd(_idx03, &gradx2[0], 8);
_q = _mm512_i32gather_pd(_idx03, &q[0], 8);
_wL = _mm512_fmadd_pd(_g0, _rx, _q);
_wL = _mm512_fmadd_pd(_g1, _ry, _wL);
_wL = _mm512_fmadd_pd(_g2, _rz, _wL);
_ubarL = _mm512_mul_pd(_xn, _uL);
_ubarL = _mm512_fmadd_pd(_yn, _vL, _ubarL);
_ubarL = _mm512_fmadd_pd(_zn, _wL, _ubarL);
/* Right */
_rx = _mm512_sub_pd(_xmean, _x10);
_ry = _mm512_sub_pd(_ymean, _x11);
_rz = _mm512_sub_pd(_zmean, _x12);
/* Pressure */
_g0 = _mm512_i32gather_pd(_idx1, &gradx0[0], 8);
_g1 = _mm512_i32gather_pd(_idx1, &gradx1[0], 8);
_g2 = _mm512_i32gather_pd(_idx1, &gradx2[0], 8);
_q = _mm512_i32gather_pd(_idx1, &q[0], 8);
_pR = _mm512_fmadd_pd(_g0, _rx, _q);
_pR = _mm512_fmadd_pd(_g1, _ry, _pR);
_pR = _mm512_fmadd_pd(_g2, _rz, _pR);
/* Velocity u */
_g0 = _mm512_i32gather_pd(_idx11, &gradx0[0], 8);
_g1 = _mm512_i32gather_pd(_idx11, &gradx1[0], 8);
_g2 = _mm512_i32gather_pd(_idx11, &gradx2[0], 8);
_q = _mm512_i32gather_pd(_idx11, &q[0], 8);
_uR = _mm512_fmadd_pd(_g0, _rx, _q);
_uR = _mm512_fmadd_pd(_g1, _ry, _uR);
_uR = _mm512_fmadd_pd(_g2, _rz, _uR);
/* Velocity v */
_g0 = _mm512_i32gather_pd(_idx12, &gradx0[0], 8);
_g1 = _mm512_i32gather_pd(_idx12, &gradx1[0], 8);
_g2 = _mm512_i32gather_pd(_idx12, &gradx2[0], 8);
_q = _mm512_i32gather_pd(_idx12, &q[0], 8);
_vR = _mm512_fmadd_pd(_g0, _rx, _q);
_vR = _mm512_fmadd_pd(_g1, _ry, _vR);
_vR = _mm512_fmadd_pd(_g2, _rz, _vR);
/* Velocity w */
_g0 = _mm512_i32gather_pd(_idx13, &gradx0[0], 8);
_g1 = _mm512_i32gather_pd(_idx13, &gradx1[0], 8);
_g2 = _mm512_i32gather_pd(_idx13, &gradx2[0], 8);
_q = _mm512_i32gather_pd(_idx13, &q[0], 8);
_wR = _mm512_fmadd_pd(_g0, _rx, _q);
_wR = _mm512_fmadd_pd(_g1, _ry, _wR);
_wR = _mm512_fmadd_pd(_g2, _rz, _wR);
_ubarR = _mm512_mul_pd(_xn, _uR);
_ubarR = _mm512_fmadd_pd(_yn, _vR, _ubarR);
_ubarR = _mm512_fmadd_pd(_zn, _wR, _ubarR);
const __m512d _dp = _mm512_sub_pd(_pR, _pL);
const __m512d _du = _mm512_sub_pd(_uR, _uL);
const __m512d _dv = _mm512_sub_pd(_vR, _vL);
const __m512d _dw = _mm512_sub_pd(_wR, _wL);
/* Compute averages for velocity variables only */
const __m512d _u = _mm512_mul_pd(_half, _mm512_add_pd(_uL, _uR));
const __m512d _v = _mm512_mul_pd(_half, _mm512_add_pd(_vL, _vR));
const __m512d _w = _mm512_mul_pd(_half, _mm512_add_pd(_wL, _wR));
__m512d _ubar;
_ubar = _mm512_mul_pd(_xn, _u);
_ubar = _mm512_fmadd_pd(_yn, _v, _ubar);
_ubar = _mm512_fmadd_pd(_zn, _w, _ubar);
/* Compute Phi's */
__m512d _phi1;
_phi1 = _mm512_mul_pd(_xn, _beta);
_phi1 = _mm512_fmadd_pd(_u, _ubar, _phi1);
__m512d _phi2;
_phi2 = _mm512_mul_pd(_yn, _beta);
_phi2 = _mm512_fmadd_pd(_v, _ubar, _phi2);
__m512d _phi3;
_phi3 = _mm512_mul_pd(_zn, _beta);
_phi3 = _mm512_fmadd_pd(_w, _ubar, _phi3);
__m512d _phi4;
_phi4 = _mm512_mul_pd(_Z2, _phi2);
_phi4 = _mm512_fmsub_pd(_Y2, _phi3, _phi4);
__m512d _phi5;
_phi5 = _mm512_mul_pd(_X2, _phi3);
_phi5 = _mm512_fmsub_pd(_Z2, _phi1, _phi5);
__m512d _phi6;
_phi6 = _mm512_mul_pd(_Y2, _phi1);
_phi6 = _mm512_fmsub_pd(_X2, _phi2, _phi6);
__m512d _phi7;
_phi7 = _mm512_mul_pd(_Y1, _phi3);
_phi7 = _mm512_fmsub_pd(_Z1, _phi2, _phi7);
__m512d _phi8;
_phi8 = _mm512_mul_pd(_Z1, _phi1);
_phi8 = _mm512_fmsub_pd(_X1, _phi3, _phi8);
__m512d _phi9;
_phi9 = _mm512_mul_pd(_X1, _phi2);
_phi9 = _mm512_fmsub_pd(_Y1, _phi1, _phi9);
/*
Compute eigenvalues, eigenvectors, and strengths
*/
const __m512d _c2 = _mm512_fmadd_pd(_ubar, _ubar, _beta);
#ifdef __USE_SKX
const __m512d _c = _mm512_mul_pd(_mm512_rsqrt14_pd(_c2), _c2);
const __m512d _c2r = _mm512_rcp14_pd(_c2);
#else
const __m512d _c = _mm512_mul_pd(_mm512_rsqrt28_pd(_c2), _c2);
const __m512d _c2r = _mm512_rcp28_pd(_c2);
#endif
const __m512d _bac = _mm512_add_pd(_ubar, _c);
const __m512d _bsc = _mm512_sub_pd(_ubar, _c);
/*
Components of T(inverse)
*/
__m512d _ti11;
_ti11 = _mm512_mul_pd(_u, _phi4);
_ti11 = _mm512_fmadd_pd(_v, _phi5, _ti11);
_ti11 = _mm512_fmadd_pd(_w, _phi6, _ti11);
_ti11 = _mm512_fnmadd_pd(_ti11, _rbeta, _zero);
__m512d _ti21;
_ti21 = _mm512_mul_pd(_u, _phi7);
_ti21 = _mm512_fmadd_pd(_v, _phi8, _ti21);
_ti21 = _mm512_fmadd_pd(_w, _phi9, _ti21);
_ti21 = _mm512_fnmadd_pd(_ti21, _rbeta, _zero);
__m512d _ti31;
_ti31 = _mm512_mul_pd(_half, _mm512_sub_pd(_c, _ubar));
_ti31 = _mm512_mul_pd(_ti31, _rbeta);
__m512d _ti41;
_ti41 = _mm512_mul_pd(_nhalf, _bac);
_ti41 = _mm512_mul_pd(_ti41, _rbeta);
/*
jumps (T(inverse) * dq)
*/
__m512d _dv1;
_dv1 = _mm512_mul_pd(_ti11, _dp);
_dv1 = _mm512_fmadd_pd(_phi4, _du, _dv1);
_dv1 = _mm512_fmadd_pd(_phi5, _dv, _dv1);
_dv1 = _mm512_fmadd_pd(_phi6, _dw, _dv1);
_dv1 = _mm512_mul_pd(_dv1, _c2r);
__m512d _dv2;
_dv2 = _mm512_mul_pd(_ti21, _dp);
_dv2 = _mm512_fmadd_pd(_phi7, _du, _dv2);
_dv2 = _mm512_fmadd_pd(_phi8, _dv, _dv2);
_dv2 = _mm512_fmadd_pd(_phi9, _dw, _dv2);
_dv2 = _mm512_mul_pd(_dv2, _c2r);
__m512d _dv34;
_dv34 = _mm512_mul_pd(_xn, _du);
_dv34 = _mm512_fmadd_pd(_yn, _dv, _dv34);
_dv34 = _mm512_fmadd_pd(_zn, _dw, _dv34);
__m512d _dv3;
_dv3 = _mm512_fmadd_pd(_mm512_mul_pd(_pos2, _ti31), _dp, _dv34);
_dv3 = _mm512_mul_pd(_dv3, _mm512_mul_pd(_half, _c2r));
__m512d _dv4;
_dv4 = _mm512_fmadd_pd(_mm512_mul_pd(_pos2, _ti41), _dp, _dv34);
_dv4 = _mm512_mul_pd(_dv4, _mm512_mul_pd(_half, _c2r));
/*
Now get elements of T
*/
const __m512d _r13 = _mm512_mul_pd(_c, _beta);
__m512d _r23;
_r23 = _mm512_mul_pd(_u, _bac);
_r23 = _mm512_fmadd_pd(_xn, _beta, _r23);
__m512d _r33;
_r33 = _mm512_mul_pd(_v, _bac);
_r33 = _mm512_fmadd_pd(_yn, _beta, _r33);
__m512d _r43;
_r43 = _mm512_mul_pd(_w, _bac);
_r43 = _mm512_fmadd_pd(_zn, _beta, _r43);
const __m512d _r14 = _mm512_fnmadd_pd(_c, _beta, _zero);
__m512d _r24;
_r24 = _mm512_mul_pd(_u, _bsc);
_r24 = _mm512_fmadd_pd(_xn, _beta, _r24);
__m512d _r34;
_r34 = _mm512_mul_pd(_v, _bsc);
_r34 = _mm512_fmadd_pd(_yn, _beta, _r34);
__m512d _r44;
_r44 = _mm512_mul_pd(_w, _bsc);
_r44 = _mm512_fmadd_pd(_zn, _beta, _r44);
/*
Calculate T* |lambda| * T(inverse)
*/
const __m512d _eig1 = _mm512_abs_pd(_ubar);
const __m512d _eig2 = _mm512_abs_pd(_bac);
const __m512d _eig3 = _mm512_abs_pd(_bsc);
__m512d _t1;
_t1 = _mm512_mul_pd(_mm512_mul_pd(_eig2, _r13), _dv3);
_t1 = _mm512_fmadd_pd(_mm512_mul_pd(_eig3, _r14), _dv4, _t1);
__m512d _t2;
_t2 = _mm512_mul_pd(_mm512_mul_pd(_eig1, _X1), _dv1);
_t2 = _mm512_fmadd_pd(_mm512_mul_pd(_eig1, _X2), _dv2, _t2);
_t2 = _mm512_fmadd_pd(_mm512_mul_pd(_eig2, _r23), _dv3, _t2);
_t2 = _mm512_fmadd_pd(_mm512_mul_pd(_eig3, _r24), _dv4, _t2);
__m512d _t3;
_t3 = _mm512_mul_pd(_mm512_mul_pd(_eig1, _Y1), _dv1);
_t3 = _mm512_fmadd_pd(_mm512_mul_pd(_eig1, _Y2), _dv2, _t3);
_t3 = _mm512_fmadd_pd(_mm512_mul_pd(_eig2, _r33), _dv3, _t3);
_t3 = _mm512_fmadd_pd(_mm512_mul_pd(_eig3, _r34), _dv4, _t3);
__m512d _t4;
_t4 = _mm512_mul_pd(_mm512_mul_pd(_eig1, _Z1), _dv1);
_t4 = _mm512_fmadd_pd(_mm512_mul_pd(_eig1, _Z2), _dv2, _t4);
_t4 = _mm512_fmadd_pd(_mm512_mul_pd(_eig2, _r43), _dv3, _t4);
_t4 = _mm512_fmadd_pd(_mm512_mul_pd(_eig3, _r44), _dv4, _t4);
/*
Modify to calculate .5(fl +fr) from nodes
instead of extrapolated ones
*/
/* Left Side */
__m512d _fluxp1;
_fluxp1 = _mm512_mul_pd(_mm512_mul_pd(_ln, _beta), _ubarL);
__m512d _fluxp2;
_fluxp2 = _mm512_mul_pd(_uL, _ubarL);
_fluxp2 = _mm512_fmadd_pd(_xn, _pL, _fluxp2);
_fluxp2 = _mm512_mul_pd(_ln, _fluxp2);
__m512d _fluxp3;
_fluxp3 = _mm512_mul_pd(_vL, _ubarL);
_fluxp3 = _mm512_fmadd_pd(_yn, _pL, _fluxp3);
_fluxp3 = _mm512_mul_pd(_ln, _fluxp3);
__m512d _fluxp4;
_fluxp4 = _mm512_mul_pd(_wL, _ubarL);
_fluxp4 = _mm512_fmadd_pd(_zn, _pL, _fluxp4);
_fluxp4 = _mm512_mul_pd(_ln, _fluxp4);
/* Right Side */
__m512d _fluxm1;
_fluxm1 = _mm512_mul_pd(_mm512_mul_pd(_ln, _beta), _ubarR);
__m512d _fluxm2;
_fluxm2 = _mm512_mul_pd(_uR, _ubarR);
_fluxm2 = _mm512_fmadd_pd(_xn, _pR, _fluxm2);
_fluxm2 = _mm512_mul_pd(_ln, _fluxm2);
__m512d _fluxm3;
_fluxm3 = _mm512_mul_pd(_vR, _ubarR);
_fluxm3 = _mm512_fmadd_pd(_yn, _pR, _fluxm3);
_fluxm3 = _mm512_mul_pd(_ln, _fluxm3);
__m512d _fluxm4;
_fluxm4 = _mm512_mul_pd(_wR, _ubarR);
_fluxm4 = _mm512_fmadd_pd(_zn, _pR, _fluxm4);
_fluxm4 = _mm512_mul_pd(_ln, _fluxm4);
__m512d _res1;
_res1 = _mm512_fnmadd_pd(_ln, _t1, _mm512_add_pd(_fluxm1, _fluxp1));
__m512d _res2;
_res2 = _mm512_fnmadd_pd(_ln, _t2, _mm512_add_pd(_fluxm2, _fluxp2));
__m512d _res3;
_res3 = _mm512_fnmadd_pd(_ln, _t3, _mm512_add_pd(_fluxm3, _fluxp3));
__m512d _res4;
_res4 = _mm512_fnmadd_pd(_ln, _t4, _mm512_add_pd(_fluxm4, _fluxp4));
/* Update the residual */
__m512i _node, _part;
__mmask _next;
_node = _mm512_castsi256_si512(_n0);
_part = _mm512_i32gather_epi32(_node, &part[0], 4);
_next = _mm512_cmpeq_epi32_mask(_part, _t);
/* Conflict detection instructions with multiple node update */
/* Node 0 Contributions */
do {
__m512i _cd, _bnext;
__m512d _v, _d;
__mmask _crt;
_cd = _mm512_mask_conflict_epi32(_ng, _next, _node);
_bnext = _mm512_broadcastmw_epi32(_next);
_crt = _mm512_mask_testn_epi32_mask(_next, _cd, _bnext);
_v = _mm512_mask_i32gather_pd(_und, _crt, _idx0, &r[0], 8);
_d = _mm512_mask_fmadd_pd(_res1, _crt, _half, _v);
_mm512_mask_i32scatter_pd(&r[0], _crt, _idx0, _d, 8);
_v = _mm512_mask_i32gather_pd(_und, _crt, _idx01, &r[0], 8);
_d = _mm512_mask_fmadd_pd(_res2, _crt, _half, _v);
_mm512_mask_i32scatter_pd(&r[0], _crt, _idx01, _d, 8);
_v = _mm512_mask_i32gather_pd(_und, _crt, _idx02, &r[0], 8);
_d = _mm512_mask_fmadd_pd(_res3, _crt, _half, _v);
_mm512_mask_i32scatter_pd(&r[0], _crt, _idx02, _d, 8);
_v = _mm512_mask_i32gather_pd(_und, _crt, _idx03, &r[0], 8);
_d = _mm512_mask_fmadd_pd(_res4, _crt, _half, _v);
_mm512_mask_i32scatter_pd(&r[0], _crt, _idx03, _d, 8);
_next = _mm512_kxor(_next, _crt);
} while(_next);
_node = _mm512_castsi256_si512(_n1);
_part = _mm512_i32gather_epi32(_node, &part[0], 4);
_next = _mm512_cmpeq_epi32_mask(_part, _t);
/* Node 1 Contributions */
do {
__m512i _cd, _bnext;
__m512d _v, _d;
__mmask _crt;
_cd = _mm512_mask_conflict_epi32(_ng, _next, _node);
_bnext = _mm512_broadcastmw_epi32(_next);
_crt = _mm512_mask_testn_epi32_mask(_next, _cd, _bnext);
_v = _mm512_mask_i32gather_pd(_und, _crt, _idx1, &r[0], 8);
_d = _mm512_mask_fnmadd_pd(_res1, _crt, _half, _v);
_mm512_mask_i32scatter_pd(&r[0], _crt, _idx1, _d, 8);
_v = _mm512_mask_i32gather_pd(_und, _crt, _idx11, &r[0], 8);
_d = _mm512_mask_fnmadd_pd(_res2, _crt, _half, _v);
_mm512_mask_i32scatter_pd(&r[0], _crt, _idx11, _d, 8);
_v = _mm512_mask_i32gather_pd(_und, _crt, _idx12, &r[0], 8);
_d = _mm512_mask_fnmadd_pd(_res3, _crt, _half, _v);
_mm512_mask_i32scatter_pd(&r[0], _crt, _idx12, _d, 8);
_v = _mm512_mask_i32gather_pd(_und, _crt, _idx13, &r[0], 8);
_d = _mm512_mask_fnmadd_pd(_res4, _crt, _half, _v);
_mm512_mask_i32scatter_pd(&r[0], _crt, _idx13, _d, 8);
_next = _mm512_kxor(_next, _crt);
} while(_next);
}
/* Remainder loop */
for(i = lim; i < ie1; i++)
{
const uint32_t node0 = n0[i];
const uint32_t node1 = n1[i];
const double xn = x0[i];
const double yn = x1[i];
const double zn = x2[i];
const double ln = x3[i];
const double xmean = 0.5f * (xyz0[node0] + xyz0[node1]);
const double ymean = 0.5f * (xyz1[node0] + xyz1[node1]);
const double zmean = 0.5f * (xyz2[node0] + xyz2[node1]);
/*
Now lets get our other 2 vectors
For first vector, use {1,0,0} and subtract off the component
in the direction of the face normal. If the inner product of
{1,0,0} is close to unity, use {0,1,0}
*/
double X1 = (fabs(xn) < 0.95) ? (1 - xn * xn) : (- yn * xn);
double Y1 = (fabs(xn) < 0.95) ? (- xn * yn) : (1 - yn * yn);
double Z1 = (fabs(xn) < 0.95) ? (- xn * zn) : (- yn * zn);
/*
Normalize the first vector
*/
double size = X1 * X1;
size += Y1 * Y1;
size += Z1 * Z1;
size = sqrt(size);
X1 /= size;
Y1 /= size;
Z1 /= size;
/*
Take cross-product of normal and V1 to get V2
*/
const double X2 = yn * Z1 - zn * Y1;
const double Y2 = zn * X1 - xn * Z1;
const double Z2 = xn * Y1 - yn * X1;
/*
Get variables on "left" and "right" side of face
*/
double rx = xmean - xyz0[node0];
double ry = ymean - xyz1[node0];
double rz = zmean - xyz2[node0];
const uint32_t idx0 = bsz * node0;
const uint32_t idx1 = bsz * node1;
// Pressure
double pL = q[idx0 + 0] + gradx0[idx0 + 0] * rx;
pL += gradx1[idx0 + 0] * ry;
pL += gradx2[idx0 + 0] * rz;
// Velocity u
double uL = q[idx0 + 1] + gradx0[idx0 + 1] * rx;
uL += gradx1[idx0 + 1] * ry;
uL += gradx2[idx0 + 1] * rz;
// Velocity v
double vL = q[idx0 + 2] + gradx0[idx0 + 2] * rx;
vL += gradx1[idx0 + 2] * ry;
vL += gradx2[idx0 + 2] * rz;
// Velocity w
double wL = q[idx0 + 3] + gradx0[idx0 + 3] * rx;
wL += gradx1[idx0 + 3] * ry;
wL += gradx2[idx0 + 3] * rz;
double ubarL = xn * uL;
ubarL += yn * vL;
ubarL += zn * wL;
rx = xmean - xyz0[node1];
ry = ymean - xyz1[node1];
rz = zmean - xyz2[node1];
// Pressure
double pR = q[idx1 + 0] + gradx0[idx1 + 0] * rx;
pR += gradx1[idx1 + 0] * ry;
pR += gradx2[idx1 + 0] * rz;
// Velocity u
double uR = q[idx1 + 1] + gradx0[idx1 + 1] * rx;
uR += gradx1[idx1 + 1] * ry;
uR += gradx2[idx1 + 1] * rz;
// Velocity v
double vR = q[idx1 + 2] + gradx0[idx1 + 2] * rx;
vR += gradx1[idx1 + 2] * ry;
vR += gradx2[idx1 + 2] * rz;
// Velocity w
double wR = q[idx1 + 3] + gradx0[idx1 + 3] * rx;
wR += gradx1[idx1 + 3] * ry;
wR += gradx2[idx1 + 3] * rz;
double ubarR = xn * uR;
ubarR += yn * vR;
ubarR += zn * wR;
/* Compute averages */
const double u = 0.5f * (uL + uR);
const double v = 0.5f * (vL + vR);
const double w = 0.5f * (wL + wR);
double ubar = xn * u;
ubar += yn * v;
ubar += zn * w;
double phi1 = xn * BETA;
phi1 += u * ubar;
double phi2 = yn * BETA;
phi2 += v * ubar;
double phi3 = zn * BETA;
phi3 += w * ubar;
double phi4 = Y2 * phi3;
phi4 -= Z2 * phi2;
double phi5 = Z2 * phi1;
phi5 -= X2 * phi3;
double phi6 = X2 * phi2;
phi6 -= Y2 * phi1;
double phi7 = Z1 * phi2;
phi7 -= Y1 * phi3;
double phi8 = X1 * phi3;
phi8 -= Z1 * phi1;
double phi9 = Y1 * phi1;
phi9 -= X1 * phi2;
double c2 = ubar * ubar + BETA;
double c = sqrt(c2);
/*
Now compute eigenvalues, eigenvectors, and strengths
*/
const double uac = ubar + c;
const double usc = ubar - c;
const double eig1 = fabs(ubar);
const double eig2 = fabs(uac);
const double eig3 = fabs(usc);
const double dp = pR - pL;
const double du = uR - uL;
const double dv = vR - vL;
const double dw = wR - wL;
/*
Components of T(inverse)
*/
double ti11 = u * phi4;
ti11 += v * phi5;
ti11 += w * phi6;
ti11 = -ti11 / BETA;
double ti21 = u * phi7;
ti21 += v * phi8;
ti21 += w * phi9;
ti21 = -ti21 / BETA;
double ti31 = 0.5f * (c - ubar);
ti31 /= BETA;
double ti41 = -0.5f * uac;
ti41 /= BETA;
/*
jumps (T(inverse) * dq)
*/
double dv1 = ti11 * dp;
dv1 += phi4 * du;
dv1 += phi5 * dv;
dv1 += phi6 * dw;
dv1 /= c2;
double dv2 = ti21 * dp;
dv2 += phi7 * du;
dv2 += phi8 * dv;
dv2 += phi9 * dw;
dv2 /= c2;
double dv3 = 2.f * ti31 * dp;
dv3 += xn * du;
dv3 += yn * dv;
dv3 += zn * dw;
dv3 *= 0.5f / c2;
double dv4 = 2.f * ti41 * dp;
dv4 += xn * du;
dv4 += yn * dv;
dv4 += zn * dw;
dv4 *= 0.5f / c2;
/*
Now get elements of T
*/
const double r13 = c * BETA;
const double r23 = u * uac + xn * BETA;
const double r33 = v * uac + yn * BETA;
const double r43 = w * uac + zn * BETA;
const double r14 = -c * BETA;
const double r24 = u * usc + xn * BETA;
const double r34 = v * usc + yn * BETA;
const double r44 = w * usc + zn * BETA;
/*
Calculate T* |lambda| * T(inverse)
*/
double t1 = eig2 * r13 * dv3 + eig3 * r14 * dv4;
double t2 = eig1 * X1 * dv1 + eig1 * X2 * dv2;
t2 += eig2 * r23 * dv3 + eig3 * r24 * dv4;
double t3 = eig1 * Y1 * dv1 + eig1 * Y2 * dv2;
t3 += eig2 * r33 * dv3 + eig3 * r34 * dv4;
double t4 = eig1 * Z1 * dv1 + eig1 * Z2 * dv2;
t4 += eig2 * r43 * dv3 + eig3 * r44 * dv4;
/*
Modify to calculate .5(fl +fr) from nodes
instead of extrapolated ones
*/
const double fluxp1 = ln * BETA * ubarL;
const double fluxp2 = ln * (uL * ubarL + xn * pL);
const double fluxp3 = ln * (vL * ubarL + yn * pL);
const double fluxp4 = ln * (wL * ubarL + zn * pL);
/*
Now the right side
*/
const double fluxm1 = ln * BETA * ubarR;
const double fluxm2 = ln * (uR * ubarR + xn * pR);
const double fluxm3 = ln * (vR * ubarR + yn * pR);
const double fluxm4 = ln * (wR * ubarR + zn * pR);
const double res1 = 0.5f * (fluxp1 + fluxm1 - ln * t1);
const double res2 = 0.5f * (fluxp2 + fluxm2 - ln * t2);
const double res3 = 0.5f * (fluxp3 + fluxm3 - ln * t3);
const double res4 = 0.5f * (fluxp4 + fluxm4 - ln * t4);
r[idx0 + 0] = (part[node0] == t) ? (r[idx0 + 0] + res1) : r[idx0 + 0];
r[idx0 + 1] = (part[node0] == t) ? (r[idx0 + 1] + res2) : r[idx0 + 1];
r[idx0 + 2] = (part[node0] == t) ? (r[idx0 + 2] + res3) : r[idx0 + 2];
r[idx0 + 3] = (part[node0] == t) ? (r[idx0 + 3] + res4) : r[idx0 + 3];
r[idx1 + 0] = (part[node1] == t) ? (r[idx1 + 0] - res1) : r[idx1 + 0];
r[idx1 + 1] = (part[node1] == t) ? (r[idx1 + 1] - res2) : r[idx1 + 1];
r[idx1 + 2] = (part[node1] == t) ? (r[idx1 + 2] - res3) : r[idx1 + 2];
r[idx1 + 3] = (part[node1] == t) ? (r[idx1 + 3] - res4) : r[idx1 + 3];
}
}
uint32_t i;
for(i = 0; i < snfc; i++)
{
const uint32_t if0 = snfic[i];
const uint32_t if1 = snfic[i+1];
uint32_t j;
#pragma omp parallel for
for(j = if0; j < if1; j++)
{
const uint32_t node0 = sn0[j];
const uint32_t node1 = sn1[j];
const uint32_t node2 = sn2[j];
const double p1 = q[bsz * node0];
const double p2 = q[bsz * node1];
const double p3 = q[bsz * node2];
const double ax = xyz0[node1] - xyz0[node0];
const double ay = xyz1[node1] - xyz1[node0];
const double az = xyz2[node1] - xyz2[node0];
const double bx = xyz0[node2] - xyz0[node0];
const double by = xyz1[node2] - xyz1[node0];
const double bz = xyz2[node2] - xyz2[node0];
/*
Normal points away from grid interior.
Magnitude is 1/3 area of surface triangle.
*/
double xn = ay * bz;
xn -= az * by;
xn *= MAG1;
double yn = ax * bz;
yn -= az * bx;
yn *= MAG0;
double zn = ax * by;
zn -= ay * bx;
zn *= MAG1;
double pa = 0.125f * (p2 + p3);
pa += 0.75f * p1;
double pb = 0.125f * (p3 + p1);
pb += 0.75f * p2;
double pc = 0.125f * (p1 + p2);
pc += 0.75f * p3;
uint32_t idx;
idx = bsz * node0;
r[idx + 1] += xn * pa;
r[idx + 2] += yn * pa;
r[idx + 3] += zn * pa;
idx = bsz * node1;
r[idx + 1] += xn * pb;
r[idx + 2] += yn * pb;
r[idx + 3] += zn * pb;
idx = bsz * node2;
r[idx + 1] += xn * pc;
r[idx + 2] += yn * pc;
r[idx + 3] += zn * pc;
}
}
/* Do the free boundaries */
#pragma omp parallel for
for(i = 0; i < nfnodes; i++)
{
uint32_t n = nfptr[i];
/*
Get normal and "other" 2 vectors. Remember that fxn,fyn and fzn
has the magnitude of the face contained in it.
*/
double xn = f_xyz0[i];
double yn = f_xyz1[i];
double zn = f_xyz2[i];
double area = xn * xn;
area += yn * yn;
area += zn * zn;
area = sqrt(area);
xn /= area;
yn /= area;
zn /= area;
/*
Now lets get our other 2 vectors
For first vector, use {1,0,0} and subtract off the component
in the direction of the face normal. If the inner product of
{1,0,0} is close to unity, use {0,1,0}
*/
double X1, Y1, Z1;
double dot = xn;
if(fabs(dot) < 0.95f)
{
X1 = 1.f - dot * xn;
Y1 = -dot * yn;
Z1 = -dot * zn;
}
else
{
dot = yn;
X1 = -dot * xn;
Y1 = 1.f - dot * yn;
Z1 = -dot * zn;
}
/*
Normalize the first vector (V1)
*/
double size = X1 * X1;
size += Y1 * Y1;
size += Z1 * Z1;
size = sqrt(size);
X1 /= size;
Y1 /= size;
Z1 /= size;
/*
Take cross-product of normal with V1 to get V2
*/
double X2 = yn * Z1;
X2 -= zn * Y1;
double Y2 = zn * X1;
Y2 -= xn * Z1;
double Z2 = xn * Y1;
Z2 -= yn * X1;
/*
Calculate elements of T and T(inverse) evaluated at free-stream
*/
double ubar0 = xn * velocity_u;
ubar0 += yn * velocity_v;
ubar0 += zn * velocity_w;
double c20 = ubar0 * ubar0 + BETA;
double c0 = sqrt(c20);
double phi1 = xn * BETA;
phi1 += velocity_u * ubar0;
double phi2 = yn * BETA;
phi2 += velocity_v * ubar0;
double phi3 = zn * BETA;
phi3 += velocity_w * ubar0;
double phi4 = Y2 * phi3;
phi4 -= Z2 * phi2;
double phi5 = Z2 * phi1;
phi5 -= X2 * phi3;
double phi6 = X2 * phi2;
phi6 -= Y2 * phi1;
double phi7 = Z1 * phi2;
phi7 -= Y1 * phi3;
double phi8 = X1 * phi3;
phi8 -= Z1 * phi1;
double phi9 = Y1 * phi1;
phi9 -= X1 * phi2;
double t13 = c0 * BETA;
double t23 = velocity_u * (ubar0 + c0);
t23 += xn * BETA;
double t33 = velocity_v * (ubar0 + c0);
t33 += yn * BETA;
double t43 = velocity_w * (ubar0 + c0);
t43 += zn * BETA;
double t14 = -c0 * BETA;
double t24 = velocity_u * (ubar0 - c0);
t24 += xn * BETA;
double t34 = velocity_v * (ubar0 - c0);
t34 += yn * BETA;
double t44 = velocity_w * (ubar0 - c0);
t44 += zn * BETA;
double ti11 = velocity_u * phi4;
ti11 += velocity_v * phi5;
ti11 += velocity_w * phi6;
ti11 = -ti11/BETA;
double ti21 = velocity_u * phi7;
ti21 += velocity_v * phi8;
ti21 += velocity_w * phi9;
ti21 = -ti21/BETA;
double ti31 = 0.5f * (c0 - ubar0);
ti31 /= BETA;
double ti41 = -0.5f * (c0 + ubar0);
ti41 /= BETA;
/*
Now, get the variables on the "inside"
*/
double pi = q[bsz * n + 0];
double ui = q[bsz * n + 1];
double vi = q[bsz * n + 2];
double wi = q[bsz * n + 3];
double un = xn * ui;
un += yn * vi;
un += zn * wi;
/*
If ubar is negative, take the reference condition from outside
*/
double pr, ur, vr, wr;
if(un > 0.f)
{
pr = pi;
ur = ui;
vr = vi;
wr = wi;
}
else
{
pr = pressure;
ur = velocity_u;
vr = velocity_v;
wr = velocity_w;
}
/*
Set rhs
*/
double rhs1 = ti11 * pr;
rhs1 += phi4 * ur;
rhs1 += phi5 * vr;
rhs1 += phi6 * wr;
rhs1 /= c20;
double rhs2 = ti21 * pr;
rhs2 += phi7 * ur;
rhs2 += phi8 * vr;
rhs2 += phi9 * wr;
rhs2 /= c20;
double rhs3 = 2.f * ti31 * pi;
rhs3 += xn * ui;
rhs3 += yn * vi;
rhs3 += zn * wi;
rhs3 = 0.5f * rhs3 / c20;
double rhs4 = 2.f * ti41 * pressure;
rhs4 += xn * velocity_u;
rhs4 += yn * velocity_v;
rhs4 += zn * velocity_w;
rhs4 = 0.5f * rhs4 / c20;
/*
Now do matrix multiplication to get values on boundary
*/
double pb = t13 * rhs3;
pb += t14 * rhs4;
double ub = X1 * rhs1;
ub += X2 * rhs2;
ub += t23 * rhs3;
ub += t24 * rhs4;
double vb = Y1 * rhs1;
vb += Y2 * rhs2;
vb += t33 * rhs3;
vb += t34 * rhs4;
double wb = Z1 * rhs1;
wb += Z2 * rhs2;
wb += t43 * rhs3;
wb += t44 * rhs4;
double ubar = xn * ub;
ubar += yn * vb;
ubar += zn * wb;
uint32_t idx = bsz * n;
r[idx + 0] += area * BETA * ubar;
r[idx + 1] += area * (ub * ubar + xn * pb);
r[idx + 2] += area * (vb * ubar + yn * pb);
r[idx + 3] += area * (wb * ubar + zn * pb);
}
compute_time(&ktime, res->t);
#ifdef __USE_HW_COUNTER
const uint64_t cycle = __rdtsc() - icycle;
struct counters end;
perf_read(fd, &end);
struct tot tot;
perf_calc(start, end, &tot);
res->perf_counters->ctrs->flux.cycles += cycle;
res->perf_counters->ctrs->flux.tot.imcR += tot.imcR;
res->perf_counters->ctrs->flux.tot.imcW += tot.imcW;
res->perf_counters->ctrs->flux.tot.edcR += tot.edcR;
res->perf_counters->ctrs->flux.tot.edcW += tot.edcW;
#endif
} |
ParticleFilterOMP.h | //------------------------------------------------------------------------
// ____ _ _
// / ___|____ _ _ ____ ____| |__ | |
// | | / ___| | | | _ \/ ___| _ \| |
// | |___| | | |_| | | | | |___| | | ||_|
// \____|_| \_____|_| |_|\____|_| |_|(_) Media benchmarks
//
// 2006, Intel Corporation, licensed under Apache 2.0
//
// file : ParticleFilterOMP.h
// author : Scott Ettinger - scott.m.ettinger@intel.com
//
// description : OpenMP parallelized version of the particle filter
// object derived from ParticleFilter.h
//
// modified :
//--------------------------------------------------------------------------
#ifndef PARTICLEFILTEROMP_H
#define PARTICLEFILTEROMP_H
#if defined(HAVE_CONFIG_H)
# include "config.h"
#endif
#include <omp.h>
#include "ParticleFilter.h"
template<class T>
class ParticleFilterOMP : public ParticleFilter<T> {
using ParticleFilter<T>:: mModel;
using ParticleFilter<T>:: mWeights;
using ParticleFilter<T>:: mParticles;
using ParticleFilter<T>:: mNewParticles;
using ParticleFilter<T>:: mBestParticle;
using ParticleFilter<T>:: mNParticles;
using ParticleFilter<T>:: mMinParticles;
using ParticleFilter<T>:: mBins;
using ParticleFilter<T>:: mRnd;
typedef typename ParticleFilter<T>::fpType fpType;
typedef typename ParticleFilter<T>::Vectorf Vectorf;
protected:
std::vector<int> mIndex; //list of particles to regenerate
//calculate particle weights - threaded version
void CalcWeights(std::vector<Vectorf > &particles); //calculate particle weights based on model likelihood
//New particle generation - threaded version
void GenerateNewParticles(int k);
};
//Calculate particle weights (mWeights) and find highest likelihood particle.
//computes an optimal annealing factor and scales the likelihoods.
template<class T>
void ParticleFilterOMP<T>::CalcWeights(std::vector<Vectorf > &particles)
{
std::vector<unsigned char> valid(particles.size());
mBestParticle = 0;
fpType total = 0, best = 0, minWeight = 1e30f, annealingFactor = 1;
mWeights.resize(particles.size());
int np = (int)particles.size(), j;
#pragma omp parallel for //OpenMP parallelized loop to compute log-likelihoods
for(j = 0; j < np; j++)
{ bool vflag;
int n = omp_get_thread_num();
mWeights[j] = mModel->LogLikelihood(particles[j], vflag, n); //compute log-likelihood weights for each particle
valid[j] = vflag ? 1 : 0;
}
uint i = 0;
while(i < particles.size())
{ if(!valid[i]) //if not valid(model prior), remove the particle from the list
{ particles[i] = particles[particles.size() - 1];
mWeights[i] = mWeights[particles.size() - 1];
valid[i] = valid[valid.size() - 1];
particles.pop_back(); mWeights.pop_back(); valid.pop_back();
}
else
minWeight = std::min(mWeights[i++], minWeight); //find minimum log-likelihood
}
if((int)particles.size() < mMinParticles) return; //bail out if not enough valid particles
mWeights -= minWeight; //shift weights to zero for numerical stability
if(mModel->StdDevs().size() > 1)
annealingFactor = BetaAnnealingFactor(mWeights, 0.5f); //calculate annealing factor if more than 1 step
for(i = 0; i < mWeights.size(); i++)
{ double wa = annealingFactor * mWeights[i];
mWeights[i] = (float)exp(wa); //exponentiate log-likelihoods scaled by annealing factor
total += mWeights[i]; //save sum of all weights
if(i == 0 || mWeights[i] > best) //find highest likelihood particle
{ best = mWeights[i];
mBestParticle = i;
}
}
mWeights *= fpType(1.0) / total; //normalize weights
}
//generate new particles distributed with std deviation given by the model annealing parameter - threaded
template<class T>
void ParticleFilterOMP<T>::GenerateNewParticles(int k)
{ int p = 0;
mNewParticles.resize(mNParticles);
mIndex.resize(mNParticles);
for(int i = 0; i < (int)mBins.size(); i++)
for(uint j = 0; j < mBins[i]; j++) //index particles to be regenerated
mIndex[p++] = i;
#pragma omp parallel for
for(int i = 0; i < mNParticles; i++) //distribute new particles randomly according to model stdDevs
{ mNewParticles[i] = mParticles[mIndex[i]]; //add new particle for each entry in each bin distributed randomly about duplicated particle
this->AddGaussianNoise(mNewParticles[i], mModel->StdDevs()[k], mRnd[i]);
}
}
#endif
|
3d7pt.lbpar.c | #include <omp.h>
#include <math.h>
#define ceild(n,d) ceil(((double)(n))/((double)(d)))
#define floord(n,d) floor(((double)(n))/((double)(d)))
#define max(x,y) ((x) > (y)? (x) : (y))
#define min(x,y) ((x) < (y)? (x) : (y))
/*
* Order-1, 3D 7 point stencil
* Adapted from PLUTO and Pochoir test bench
*
* Tareq Malas
*/
#include <stdio.h>
#include <stdlib.h>
#include <sys/time.h>
#ifdef LIKWID_PERFMON
#include <likwid.h>
#endif
#include "print_utils.h"
#define TESTS 2
#define MAX(a,b) ((a) > (b) ? a : b)
#define MIN(a,b) ((a) < (b) ? a : b)
/* Subtract the `struct timeval' values X and Y,
* storing the result in RESULT.
*
* Return 1 if the difference is negative, otherwise 0.
*/
int timeval_subtract(struct timeval *result, struct timeval *x, struct timeval *y)
{
/* Perform the carry for the later subtraction by updating y. */
if (x->tv_usec < y->tv_usec)
{
int nsec = (y->tv_usec - x->tv_usec) / 1000000 + 1;
y->tv_usec -= 1000000 * nsec;
y->tv_sec += nsec;
}
if (x->tv_usec - y->tv_usec > 1000000)
{
int nsec = (x->tv_usec - y->tv_usec) / 1000000;
y->tv_usec += 1000000 * nsec;
y->tv_sec -= nsec;
}
/* Compute the time remaining to wait.
* tv_usec is certainly positive.
*/
result->tv_sec = x->tv_sec - y->tv_sec;
result->tv_usec = x->tv_usec - y->tv_usec;
/* Return 1 if result is negative. */
return x->tv_sec < y->tv_sec;
}
int main(int argc, char *argv[])
{
int t, i, j, k, test;
int Nx, Ny, Nz, Nt;
if (argc > 3) {
Nx = atoi(argv[1])+2;
Ny = atoi(argv[2])+2;
Nz = atoi(argv[3])+2;
}
if (argc > 4)
Nt = atoi(argv[4]);
double ****A = (double ****) malloc(sizeof(double***)*2);
A[0] = (double ***) malloc(sizeof(double**)*Nz);
A[1] = (double ***) malloc(sizeof(double**)*Nz);
for(i=0; i<Nz; i++){
A[0][i] = (double**) malloc(sizeof(double*)*Ny);
A[1][i] = (double**) malloc(sizeof(double*)*Ny);
for(j=0;j<Ny;j++){
A[0][i][j] = (double*) malloc(sizeof(double)*Nx);
A[1][i][j] = (double*) malloc(sizeof(double)*Nx);
}
}
// tile size information, including extra element to decide the list length
int *tile_size = (int*) malloc(sizeof(int));
tile_size[0] = -1;
// The list is modified here before source-to-source transformations
tile_size = (int*) realloc((void *)tile_size, sizeof(int)*5);
tile_size[0] = 16;
tile_size[1] = 16;
tile_size[2] = 8;
tile_size[3] = 256;
tile_size[4] = -1;
// for timekeeping
int ts_return = -1;
struct timeval start, end, result;
double tdiff = 0.0, min_tdiff=1.e100;
const int BASE = 1024;
const double alpha = 0.0876;
const double beta = 0.0765;
// initialize variables
//
srand(42);
for (i = 1; i < Nz; i++) {
for (j = 1; j < Ny; j++) {
for (k = 1; k < Nx; k++) {
A[0][i][j][k] = 1.0 * (rand() % BASE);
}
}
}
#ifdef LIKWID_PERFMON
LIKWID_MARKER_INIT;
#pragma omp parallel
{
LIKWID_MARKER_THREADINIT;
#pragma omp barrier
LIKWID_MARKER_START("calc");
}
#endif
int num_threads = 1;
#if defined(_OPENMP)
num_threads = omp_get_max_threads();
#endif
for(test=0; test<TESTS; test++){
gettimeofday(&start, 0);
// serial execution - Addition: 6 && Multiplication: 2
/* Copyright (C) 1991-2014 Free Software Foundation, Inc.
This file is part of the GNU C Library.
The GNU C Library is free software; you can redistribute it and/or
modify it under the terms of the GNU Lesser General Public
License as published by the Free Software Foundation; either
version 2.1 of the License, or (at your option) any later version.
The GNU C Library is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
Lesser General Public License for more details.
You should have received a copy of the GNU Lesser General Public
License along with the GNU C Library; if not, see
<http://www.gnu.org/licenses/>. */
/* This header is separate from features.h so that the compiler can
include it implicitly at the start of every compilation. It must
not itself include <features.h> or any other header that includes
<features.h> because the implicit include comes before any feature
test macros that may be defined in a source file before it first
explicitly includes a system header. GCC knows the name of this
header in order to preinclude it. */
/* glibc's intent is to support the IEC 559 math functionality, real
and complex. If the GCC (4.9 and later) predefined macros
specifying compiler intent are available, use them to determine
whether the overall intent is to support these features; otherwise,
presume an older compiler has intent to support these features and
define these macros by default. */
/* wchar_t uses ISO/IEC 10646 (2nd ed., published 2011-03-15) /
Unicode 6.0. */
/* We do not support C11 <threads.h>. */
int t1, t2, t3, t4, t5, t6, t7, t8;
int lb, ub, lbp, ubp, lb2, ub2;
register int lbv, ubv;
/* Start of CLooG code */
if ((Nt >= 2) && (Nx >= 3) && (Ny >= 3) && (Nz >= 3)) {
for (t1=-1;t1<=floord(Nt-2,8);t1++) {
lbp=max(ceild(t1,2),ceild(16*t1-Nt+3,16));
ubp=min(floord(Nt+Nz-4,16),floord(8*t1+Nz+5,16));
#pragma omp parallel for private(lbv,ubv,t3,t4,t5,t6,t7,t8)
for (t2=lbp;t2<=ubp;t2++) {
for (t3=max(max(0,ceild(16*t2-Nz-4,8)),t1);t3<=min(min(min(floord(Nt+Ny-4,8),floord(8*t1+Ny+13,8)),floord(16*t2+Ny+12,8)),floord(16*t1-16*t2+Nz+Ny+11,8));t3++) {
for (t4=max(max(max(0,ceild(t1-31,32)),ceild(16*t2-Nz-252,256)),ceild(8*t3-Ny-252,256));t4<=min(min(min(min(floord(Nt+Nx-4,256),floord(8*t1+Nx+13,256)),floord(16*t2+Nx+12,256)),floord(8*t3+Nx+4,256)),floord(16*t1-16*t2+Nz+Nx+11,256));t4++) {
for (t5=max(max(max(max(max(0,8*t1),16*t1-16*t2+1),16*t2-Nz+2),8*t3-Ny+2),256*t4-Nx+2);t5<=min(min(min(min(min(Nt-2,8*t1+15),16*t2+14),8*t3+6),256*t4+254),16*t1-16*t2+Nz+13);t5++) {
for (t6=max(max(16*t2,t5+1),-16*t1+16*t2+2*t5-15);t6<=min(min(16*t2+15,-16*t1+16*t2+2*t5),t5+Nz-2);t6++) {
for (t7=max(8*t3,t5+1);t7<=min(8*t3+7,t5+Ny-2);t7++) {
lbv=max(256*t4,t5+1);
ubv=min(256*t4+255,t5+Nx-2);
#pragma ivdep
#pragma vector always
for (t8=lbv;t8<=ubv;t8++) {
A[( t5 + 1) % 2][ (-t5+t6)][ (-t5+t7)][ (-t5+t8)] = ((alpha * A[ t5 % 2][ (-t5+t6)][ (-t5+t7)][ (-t5+t8)]) + (beta * (((((A[ t5 % 2][ (-t5+t6) - 1][ (-t5+t7)][ (-t5+t8)] + A[ t5 % 2][ (-t5+t6)][ (-t5+t7) - 1][ (-t5+t8)]) + A[ t5 % 2][ (-t5+t6)][ (-t5+t7)][ (-t5+t8) - 1]) + A[ t5 % 2][ (-t5+t6) + 1][ (-t5+t7)][ (-t5+t8)]) + A[ t5 % 2][ (-t5+t6)][ (-t5+t7) + 1][ (-t5+t8)]) + A[ t5 % 2][ (-t5+t6)][ (-t5+t7)][ (-t5+t8) + 1])));;
}
}
}
}
}
}
}
}
}
/* End of CLooG code */
gettimeofday(&end, 0);
ts_return = timeval_subtract(&result, &end, &start);
tdiff = (double) (result.tv_sec + result.tv_usec * 1.0e-6);
min_tdiff = min(min_tdiff, tdiff);
printf("Rank 0 TEST# %d time: %f\n", test, tdiff);
}
PRINT_RESULTS(1, "constant")
#ifdef LIKWID_PERFMON
#pragma omp parallel
{
LIKWID_MARKER_STOP("calc");
}
LIKWID_MARKER_CLOSE;
#endif
// Free allocated arrays (Causing performance degradation
/* for(i=0; i<Nz; i++){
for(j=0;j<Ny;j++){
free(A[0][i][j]);
free(A[1][i][j]);
}
free(A[0][i]);
free(A[1][i]);
}
free(A[0]);
free(A[1]);
*/
return 0;
}
|
phocount.c | /*
* Copyright (c) 2014, Brookhaven Science Associates, Brookhaven
* National Laboratory. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
*
* * Redistributions in binary form must reproduce the above copyright
* notice this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
*
* * Neither the name of the Brookhaven Science Associates, Brookhaven
* National Laboratory nor the names of its contributors may be used
* to endorse or promote products derived from this software without
* specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
* FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
* COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT,
* INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
* (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
* STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OTHERWISE) ARISING
* IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*
*/
#include <omp.h>
#include <stdlib.h>
#include <stdio.h>
#include <math.h>
#include <stdint.h>
#include "phocount.h"
int count(data_t *in, data_t *out, data_t *stddev,
int ndims, index_t *dims,
data_t *thresh, data_t *sum_filter, data_t *std_filter,
int sum_max, int nan){
index_t nimages = dims[0];
index_t M = dims[ndims-1];
index_t N = dims[ndims-2];
index_t imsize = N*M;
data_t nodata;
if(nan){
// Pad no data with nan not zero
nodata = NAN;
} else {
nodata = 0.0;
}
int x;
for(x=1;x<(ndims-2);x++){
nimages = nimages * dims[x];
}
index_t i;
#pragma omp parallel shared(in, out, stddev)
{
#pragma omp for
for(i=0;i<nimages;i++){
// Find the start pointers of the image
data_t *inp = in + (i*imsize) - 1;
data_t *outp = out + (i*imsize) - 1;
data_t *stddevp = stddev + (i*imsize) - 1;
data_t pixel[9];
// Clear out the parts of the output array we don't use
index_t j, k;
for(j=0;j<(M+1);j++){
inp++;
outp++;
stddevp++;
*outp = nodata;
*stddevp = nodata;
}
// Now start the search
for(j=1;j<(N-1);j++){
for(k=1;k<(M-1);k++){
inp++;
outp++;
stddevp++;
*outp = nodata;
*stddevp = nodata;
if((*inp < thresh[0]) || (*inp >= thresh[1])){
continue;
}
// The pixel is above thresh
// Now get the surrounding 9 pixels.
pixel[0] = *inp;
pixel[1] = *(inp - M - 1);
pixel[2] = *(inp - M);
pixel[3] = *(inp - M + 1);
pixel[4] = *(inp - 1);
pixel[5] = *(inp + 1);
pixel[6] = *(inp + M - 1);
pixel[7] = *(inp + M);
pixel[8] = *(inp + M + 1);
// Is this the brightest pixel?
int n;
int flag = 0;
for(n=1;n<9;n++){
if(pixel[n] > pixel[0]){
flag = 1;
break;
}
}
if(flag){
continue;
}
// Sort the array
sort(pixel, 9);
data_t sum = 0;
data_t scnd_moment = 0;
for(n=0;n<sum_max;n++){
sum += pixel[n];
scnd_moment += pixel[n] * pixel[n];
}
if((sum < sum_filter[0]) || (sum >= sum_filter[1])){
continue;
}
data_t std = pow((scnd_moment - (sum*sum) / sum_max) / sum_max, 0.5);
if((std < std_filter[0]) || (std >= std_filter[1])){
continue;
}
*stddevp = std;
*outp = sum;
} // for(k)
for(k=0;k<2;k++){
outp++;
stddevp++;
inp++;
*stddevp = nodata;
*outp = nodata;
}
} // for(j)
for(j=0;j<(M+1);j++){
outp++;
stddevp++;
*outp = nodata;
*stddevp = nodata;
}
} // for(nimages)
} // pragma omp
return 0;
}
void sort(data_t *array, int n){
int c;
for (c = 1 ; c <= n - 1; c++) {
int d = c;
while(d>0 && array[d] > array[d-1]){
data_t t = array[d];
array[d] = array[d-1];
array[d-1] = t;
d--;
}
}
}
|
conv_kernel_mips.c | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* License); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* AS IS BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
/*
* Copyright (c) 2020, Martin Han
* Author: hansh-sz@hotmail.com
*/
#include <stdint.h>
#include <stdlib.h>
#include <math.h>
#include "conv_kernel_mips.h"
#include "wino_conv_kernel_mips.h"
#if __mips_msa
#include <msa.h>
#endif
#define max(a, b) ((a) > (b) ? (a) : (b))
#define min(a, b) ((a) < (b) ? (a) : (b))
static int get_private_mem_size(struct ir_tensor* filter)
{
return filter->elem_num * filter->elem_size; // caution
}
static void interleave(struct ir_tensor* filter, struct conv_priv_info* priv_info)
{
/* simply copy the data */
memcpy(priv_info->interleave_buffer, filter->data, filter->elem_num * filter->elem_size);
}
void im2col(float* data_img, float* data_col, int inh, int inw, int inc, int outh, int outw, int outc, int ksize_h,
int ksize_w, int sh, int sw, int ph, int pw, int dh, int dw)
{
const int channels_col = ksize_h * ksize_w * inc;
for (int c = 0; c < channels_col; ++c)
{
const int kw = c % ksize_w;
int c_ = c / ksize_w;
const int kh = c_ % ksize_h;
c_ = c_ / ksize_h;
const int im_col = kw * dw - pw;
const int w_low = max(0, -im_col / sw + (-im_col % sw > 0));
const int w_high = min(outw, (inw - im_col) / sw + ((inw - im_col) % sw > 0));
for (int h = 0; h < outh; ++h)
{
const int im_row = kh * dh + h * sh - ph;
float* out = data_col + (c * outh + h) * outw;
const float* end = out + w_high;
if (im_row >= 0 && im_row < inh)
{
float* in = data_img + inw * (im_row + inh * c_) + im_col + (w_low - 1) * sw;
memset(out, 0, w_low * sizeof(float));
out += w_low;
while (out < end)
{
in += sw;
*(out++) = *in;
}
memset(out, 0, (outw - w_high) * sizeof(float));
}
else
{
memset(out, 0, outw * sizeof(float));
}
}
}
}
static void im2col_ir(struct ir_tensor* input, struct ir_tensor* output, struct conv_priv_info* priv_info,
struct conv_param* param, int n, int group)
{
int input_chan = param->input_channel / param->group;
int image_size = input->dims[1] * input->dims[2] * input->dims[3];
int group_size = input_chan * input->dims[2] * input->dims[3];
void* input_base = input->data + (n * image_size + group * group_size) * input->elem_size;
void* im2col_buf = priv_info->im2col_buffer;
int input_zero = 0;
if (input->data_type == TENGINE_DT_UINT8)
input_zero = input->zero_point;
im2col(input_base, im2col_buf, input->dims[2], input->dims[3], input_chan, output->dims[2], output->dims[3],
output->dims[1] / param->group, param->kernel_h, param->kernel_w, param->stride_h, param->stride_w,
param->pad_h0, param->pad_w0, param->dilation_h, param->dilation_w);
}
void input_pack4(int K, int N, float* pB, float* pB_t, int num_thread)
{
int nn_size = N >> 2;
int remian_size_start = nn_size << 2;
// [ch00, ch10, ch20, ch30, ch01, ch11, ch21, ch31, ch02, ch12, ch22, ch32, ch03, ch13, ch23, ch33 ....]
#pragma omp parallel for num_threads(num_thread)
for (int ii = 0; ii < nn_size; ii++)
{
int i = ii * 4;
const float* img = pB + i;
float* tmp = pB_t + (i / 4) * 4 * K;
for (int j = 0; j < K; j++)
{
#if __mips_msa
__msa_st_w(__msa_ld_w(img, 0), tmp, 0);
#else
tmp[0] = img[0];
tmp[1] = img[1];
tmp[2] = img[2];
tmp[3] = img[3];
#endif // __mips_msa
tmp += 4;
img += N;
}
}
// [ch00, ch01, ch02, ch03 ....]
#pragma omp parallel for num_threads(num_thread)
for (int i = remian_size_start; i < N; i++)
{
const float* img = pB + i;
float* tmp = pB_t + (i / 4 + i % 4) * 4 * K;
for (int j = 0; j < K; j++)
{
tmp[0] = img[0];
tmp += 1;
img += N;
}
}
}
// unloop output M, unloop N, packet 4x4, using intrinsic
static void sgemm(int M, int N, int K, float* pA_t, float* pB_t, float* pC, int num_thread)
{
int nn_outch = 0;
int remain_outch_start = 0;
nn_outch = M >> 2;
remain_outch_start = nn_outch << 2;
// output ch0 - ch3
#pragma omp parallel for num_threads(num_thread)
for (int pp=0; pp<nn_outch; pp++)
{
int i = pp * 4;
float* output0 = pC + ( i )*N;
float* output1 = pC + (i + 1) * N;
float* output2 = pC + (i + 2) * N;
float* output3 = pC + (i + 3) * N;
int j = 0;
for (; j + 3 < N; j += 4)
{
float* va = pA_t + (i / 4) * 4 * K;
float* vb = pB_t + (j / 4) * 4 * K;
#if __mips_msa
v4f32 _sum0 = {0.f};
v4f32 _sum1 = {0.f};
v4f32 _sum2 = {0.f};
v4f32 _sum3 = {0.f};
for (int k = 0; k < K; k++)
{
// k0
v4f32 _vb = (v4f32)__msa_ld_w(vb, 0);
v4f32 _va0 = {va[0], va[0], va[0], va[0]};
v4f32 _va1 = {va[1], va[1], va[1], va[1]};
v4f32 _va2 = {va[2], va[2], va[2], va[2]};
v4f32 _va3 = {va[3], va[3], va[3], va[3]};
_sum0 = __msa_fadd_w(_sum0, __msa_fmul_w(_vb, _va0)); // sum0 = (a00-a03) * k00
_sum1 = __msa_fadd_w(_sum1, __msa_fmul_w(_vb, _va1)); // sum1 = (a00-a03) * k10
_sum2 = __msa_fadd_w(_sum2, __msa_fmul_w(_vb, _va2)); // sum2 = (a00-a03) * k20
_sum3 = __msa_fadd_w(_sum3, __msa_fmul_w(_vb, _va3)); // sum3 = (a00-a03) * k30
va += 4;
vb += 4;
}
__msa_st_w((v4i32)_sum0, output0, 0);
__msa_st_w((v4i32)_sum1, output1, 0);
__msa_st_w((v4i32)_sum2, output2, 0);
__msa_st_w((v4i32)_sum3, output3, 0);
#else
float sum0[4] = {0};
float sum1[4] = {0};
float sum2[4] = {0};
float sum3[4] = {0};
for (int k = 0; k < K; k++)
{
for (int n = 0; n < 4; n++)
{
sum0[n] += va[0] * vb[n];
sum1[n] += va[1] * vb[n];
sum2[n] += va[2] * vb[n];
sum3[n] += va[3] * vb[n];
}
va += 4;
vb += 4;
}
for (int n = 0; n < 4; n++)
{
output0[n] = sum0[n];
output1[n] = sum1[n];
output2[n] = sum2[n];
output3[n] = sum3[n];
}
#endif // __mips_msa
output0 += 4;
output1 += 4;
output2 += 4;
output3 += 4;
}
for (; j < N; j++)
{
float* va = pA_t + (i / 4) * 4 * K;
float* vb = pB_t + (j / 4 + j % 4) * 4 * K;
#if __mips_msa
v4f32 _sum0_3 = {0.f};
v4f32 _sum0 = {0.f};
v4f32 _sum1 = {0.f};
v4f32 _sum2 = {0.f};
v4f32 _sum3 = {0.f};
int k = 0;
for (; k + 3 < K; k = k + 4)
{
v4f32 _vb0 = {vb[0], vb[0], vb[0], vb[0]};
v4f32 _vb1 = {vb[1], vb[1], vb[1], vb[1]};
v4f32 _vb2 = {vb[2], vb[2], vb[2], vb[2]};
v4f32 _vb3 = {vb[3], vb[3], vb[3], vb[3]};
v4f32 _va0 = (v4f32)__msa_ld_w(va, 0);
v4f32 _va1 = (v4f32)__msa_ld_w(va + 4, 0);
v4f32 _va2 = (v4f32)__msa_ld_w(va + 8, 0);
v4f32 _va3 = (v4f32)__msa_ld_w(va + 12, 0);
_sum0 = __msa_fadd_w(_sum0, __msa_fmul_w(_va0, _vb0)); // sum0 += (k00-k30) * a00
_sum1 = __msa_fadd_w(_sum1, __msa_fmul_w(_va1, _vb1)); // sum1 += (k01-k31) * a10
_sum2 = __msa_fadd_w(_sum2, __msa_fmul_w(_va2, _vb2)); // sum2 += (k02-k32) * a20
_sum3 = __msa_fadd_w(_sum3, __msa_fmul_w(_va3, _vb3)); // sum3 += (k03-k33) * a30
va += 16;
vb += 4;
}
_sum0 = __msa_fadd_w(_sum0, _sum1);
_sum2 = __msa_fadd_w(_sum2, _sum3);
_sum0_3 = __msa_fadd_w(_sum2, _sum0);
// _sum0_3 = __msa_fadd_w(_sum0_3, _sum2);
for (; k < K; k++)
{
v4f32 _vb0 = {vb[0], vb[0], vb[0], vb[0]};
v4f32 _va = (v4f32)__msa_ld_w(va, 0);
_sum0_3 = __msa_fadd_w(_sum0_3, __msa_fmul_w(_va, _vb0)); // sum0 += (k00-k30) * a00
va += 4;
vb += 1;
}
output0[0] = _sum0_3[0];
output1[0] = _sum0_3[1];
output2[0] = _sum0_3[2];
output3[0] = _sum0_3[3];
#else
float sum0 = 0;
float sum1 = 0;
float sum2 = 0;
float sum3 = 0;
for (int k = 0; k < K; k++)
{
sum0 += va[0] * vb[0];
sum1 += va[1] * vb[0];
sum2 += va[2] * vb[0];
sum3 += va[3] * vb[0];
va += 4;
vb += 1;
}
output0[0] = sum0;
output1[0] = sum1;
output2[0] = sum2;
output3[0] = sum3;
#endif // __mips_msa
output0++;
output1++;
output2++;
output3++;
}
}
// output ch0
#pragma omp parallel for num_threads(num_thread)
for (int i=remain_outch_start; i<M; i++)
{
float* output = pC + i * N;
int j = 0;
for (; j + 3 < N; j += 4)
{
float* va = pA_t + (i / 4 + i % 4) * 4 * K;
float* vb = pB_t + (j / 4) * 4 * K;
#if __mips_msa
v4f32 _sum0 = {0.f};
int k = 0;
for (; k + 3 < K; k = k + 4)
{
// k0
v4f32 _va0 = {va[0], va[0], va[0], va[0]};
v4f32 _va1 = {va[1], va[1], va[1], va[1]};
v4f32 _va2 = {va[2], va[2], va[2], va[2]};
v4f32 _va3 = {va[3], va[3], va[3], va[3]};
v4f32 _vb0 = (v4f32)__msa_ld_w(vb, 0);
v4f32 _vb1 = (v4f32)__msa_ld_w(vb + 4, 0);
v4f32 _vb2 = (v4f32)__msa_ld_w(vb + 8, 0);
v4f32 _vb3 = (v4f32)__msa_ld_w(vb + 12, 0);
_sum0 = __msa_fadd_w(_sum0, __msa_fmul_w(_vb0, _va0)); // sum0 = (a00-a03) * k00
_sum0 = __msa_fadd_w(_sum0, __msa_fmul_w(_vb1, _va1)); // sum0 += (a10-a13) * k01
_sum0 = __msa_fadd_w(_sum0, __msa_fmul_w(_vb2, _va2)); // sum0 += (a20-a23) * k02
_sum0 = __msa_fadd_w(_sum0, __msa_fmul_w(_vb3, _va3)); // sum0 += (a30-a33) * k03
va += 4;
vb += 16;
}
for (; k < K; k++)
{
// k0
v4f32 _va0 = {va[0]};
v4f32 _vb0 = (v4f32)__msa_ld_w(vb, 0);
_sum0 = __msa_fadd_w(_sum0, __msa_fmul_w(_vb0, _va0)); // sum0 = (a00-a03) * k00
va += 1;
vb += 4;
}
__msa_st_w((v4i32)_sum0, output, 0);
#else
float sum[4] = {0};
for (int k = 0; k < K; k++)
{
for (int n = 0; n < 4; n++)
{
sum[n] += va[0] * vb[n];
}
va += 1;
vb += 4;
}
for (int n = 0; n < 4; n++)
{
output[n] = sum[n];
}
#endif // __mips_msa
output += 4;
}
for (; j < N; j++)
{
float* va = pA_t + (i / 4 + i % 4) * 4 * K;
float* vb = pB_t + (j / 4 + j % 4) * 4 * K;
int k = 0;
#if __mips_msa
v4f32 _sum0 = {0.f};
for (; k + 3 < K; k += 4)
{
v4f32 _p0 = (v4f32)__msa_ld_w(vb, 0);
v4f32 _k0 = (v4f32)__msa_ld_w(va, 0);
_sum0 = __msa_fadd_w(_sum0, __msa_fmul_w(_p0, _k0));
va += 4;
vb += 4;
}
float sum0 = _sum0[0] + _sum0[1] + _sum0[2] + _sum0[3];
#else
float sum0 = 0.f;
#endif // __mips_msa
for (; k < K; k++)
{
sum0 += va[0] * vb[0];
va += 1;
vb += 1;
}
output[0] = sum0;
output++;
}
}
}
static void sgemm_fp32(struct ir_tensor* input, struct ir_tensor* filter, struct ir_tensor* bias,
struct ir_tensor* output, struct conv_priv_info* priv_info, struct conv_param* param, int n,
int group, int num_thread)
{
int kernel_size = param->kernel_h * param->kernel_w * param->input_channel / param->group;
int outchan_g = param->output_channel / param->group;
int out_h = output->dims[2];
int out_w = output->dims[3];
int out_image_size = output->dims[1] * output->dims[2] * output->dims[3];
float* interleave_fp32 = ( float* )priv_info->interleave_buffer_pack4 + outchan_g * group * kernel_size;
float* im2col_pack4_fp32 = priv_info->im2col_buffer_pack4;
float* output_fp32 = ( float* )output->data + n * out_image_size + outchan_g * group * out_h * out_w;
float* bias_fp32 = NULL;
if (bias)
bias_fp32 = ( float* )bias->data + outchan_g * group;
float* filter_sgemm = interleave_fp32;
float* input_sgemm_pack4 = im2col_pack4_fp32;
float* output_sgemm = output_fp32;
sgemm(outchan_g, out_h * out_w, kernel_size, filter_sgemm, input_sgemm_pack4, output_sgemm, num_thread);
// process bias
if (bias)
{
for (int i = 0; i < outchan_g; i++)
{
for (int j = 0; j < out_h * out_w; j++)
{
int output_off = i * (out_h * out_w) + j;
output_fp32[output_off] += bias_fp32[i];
}
}
}
// process activation relu
if (param->activation == 0)
{
for (int i = 0; i < outchan_g; i++)
{
for (int j = 0; j < out_h * out_w; j++)
{
int output_off = i * (out_h * out_w) + j;
if (output_fp32[output_off] < 0)
output_fp32[output_off] = 0;
}
}
}
// process activation relu6
if (param->activation > 0)
{
for (int i = 0; i < outchan_g; i++)
{
for (int j = 0; j < out_h * out_w; j++)
{
int output_off = i * (out_h * out_w) + j;
if (output_fp32[output_off] < 0)
output_fp32[output_off] = 0;
if (output_fp32[output_off] > 6)
output_fp32[output_off] = 6;
}
}
}
}
/* check the conv wheather need to be using winograd */
static int winograd_support(struct conv_param* param, int in_h, int in_w)
{
int kernel_h = param->kernel_h;
int kernel_w = param->kernel_w;
int stride_h = param->stride_h;
int stride_w = param->stride_w;
int dilation_h = param->dilation_h;
int dilation_w = param->dilation_w;
int input_chan = param->input_channel;
int output_chan = param->output_channel;
int group = param->group;
if (in_h <= 10 && in_w <= 10)
return 0;
if (group != 1 || kernel_h != 3 || kernel_w != 3 || stride_h != 1 || stride_w != 1 || dilation_h != 1 ||
dilation_w != 1 || input_chan < 16 || output_chan < 16)
return 0;
return 1;
}
int conv_hcl_get_shared_mem_size(struct ir_tensor* input, struct ir_tensor* output, struct conv_param* param)
{
int group = param->group;
int input_chan = param->input_channel / group;
int kernel_size = input_chan * param->kernel_h * param->kernel_w;
int output_xy = output->dims[2] * output->dims[3];
int elem_size = input->elem_size;
return elem_size * output_xy * kernel_size;
}
int conv_hcl_get_shared_pack4_mem_size(struct ir_tensor* filter, struct ir_tensor* output, struct conv_param* param)
{
int K = filter->elem_num / filter->dims[0];
int N = output->dims[2] * output->dims[3];
int elem_size = filter->elem_size;
return (4 * K * (N / 4 + N % 4)) * elem_size;
}
int conv_hcl_get_interleave_pack4_size(int M, int K, struct ir_tensor* filter)
{
int size = 4 * K * (M / 4 + M % 4) * filter->elem_size;
return size;
}
void conv_hcl_interleave_pack4(int M, int K, struct conv_priv_info* priv_info)
{
float* pA = ( float* )priv_info->interleave_buffer;
float* pA_t = ( float* )priv_info->interleave_buffer_pack4;
int nn_outch = M >> 2;
int remain_outch_start = nn_outch << 2;
for (int pp = 0; pp < nn_outch; pp++)
{
int p = pp * 4;
const float* k0 = pA + (p + 0) * K;
const float* k1 = pA + (p + 1) * K;
const float* k2 = pA + (p + 2) * K;
const float* k3 = pA + (p + 3) * K;
float* ktmp = pA_t + (p / 4) * 4 * K;
for (int q = 0; q < K; q++)
{
ktmp[0] = k0[0];
ktmp[1] = k1[0];
ktmp[2] = k2[0];
ktmp[3] = k3[0];
ktmp += 4;
k0 += 1;
k1 += 1;
k2 += 1;
k3 += 1;
}
}
for (int p = remain_outch_start; p < M; p++)
{
const float* k0 = pA + (p + 0) * K;
float* ktmp = pA_t + (p / 4 + p % 4) * 4 * K;
for (int q = 0; q < K; q++)
{
ktmp[0] = k0[0];
ktmp++;
k0++;
}
}
}
int conv_hcl_prerun(struct ir_tensor* input_tensor, struct ir_tensor* filter_tensor, struct ir_tensor* output_tensor,
struct conv_priv_info* priv_info, struct conv_param* param)
{
int in_h = input_tensor->dims[2];
int in_w = input_tensor->dims[3];
/* check winograd implement, only for conv3x3s1 */
priv_info->winograd = winograd_support(param, in_h, in_w);
if (priv_info->winograd)
{
return wino_conv_hcl_prerun(input_tensor, filter_tensor, output_tensor, priv_info, param);
}
if (!priv_info->external_im2col_mem)
{
int mem_size = conv_hcl_get_shared_mem_size(input_tensor, output_tensor, param);
void* mem = sys_malloc(mem_size);
priv_info->im2col_buffer = mem;
priv_info->im2col_buffer_size = mem_size;
}
if (!priv_info->external_im2col_pack4_mem)
{
int mem_size = conv_hcl_get_shared_pack4_mem_size(filter_tensor, output_tensor, param);
void* mem = sys_malloc(mem_size);
priv_info->im2col_buffer_pack4 = mem;
priv_info->im2col_buffer_pack4_size = mem_size;
}
if (!priv_info->external_interleave_mem)
{
int mem_size = get_private_mem_size(filter_tensor);
void* mem = sys_malloc(mem_size);
priv_info->interleave_buffer = mem;
priv_info->interleave_buffer_size = mem_size;
}
interleave(filter_tensor, priv_info);
if (priv_info->external_interleave_pack4_mem)
{
int M = filter_tensor->dims[0];
int K = filter_tensor->elem_num / filter_tensor->dims[0];
int mem_size = conv_hcl_get_interleave_pack4_size(M, K, filter_tensor);
void* mem = sys_malloc(mem_size);
priv_info->interleave_buffer_pack4 = mem;
priv_info->interleave_buffer_pack4_size = mem_size;
conv_hcl_interleave_pack4(M, K, priv_info);
if (!priv_info->external_interleave_mem && priv_info->interleave_buffer)
{
sys_free(priv_info->interleave_buffer);
priv_info->interleave_buffer = NULL;
}
}
return 0;
}
int conv_hcl_postrun(struct conv_priv_info* priv_info)
{
if (priv_info->winograd)
{
return wino_conv_hcl_postrun(priv_info);
}
if (priv_info->external_interleave_pack4_mem && !priv_info->external_interleave_mem &&
priv_info->interleave_buffer != NULL)
{
sys_free(priv_info->interleave_buffer_pack4);
priv_info->interleave_buffer_pack4 = NULL;
}
if (!priv_info->external_im2col_mem && priv_info->im2col_buffer != NULL)
{
sys_free(priv_info->im2col_buffer);
priv_info->im2col_buffer = NULL;
}
if (!priv_info->external_im2col_pack4_mem && priv_info->im2col_buffer_pack4 != NULL)
{
sys_free(priv_info->im2col_buffer_pack4);
priv_info->im2col_buffer_pack4 = NULL;
}
if (priv_info->external_interleave_pack4_mem && priv_info->interleave_buffer_pack4 != NULL)
{
sys_free(priv_info->interleave_buffer_pack4);
priv_info->interleave_buffer_pack4 = NULL;
}
return 0;
}
int conv_hcl_run(struct ir_tensor* input_tensor, struct ir_tensor* filter_tensor, struct ir_tensor* bias_tensor,
struct ir_tensor* output_tensor, struct conv_priv_info* priv_info, struct conv_param* param,
int num_thread, int cpu_affinity)
{
int group = param->group;
int type = input_tensor->data_type;
if (priv_info->winograd)
{
return wino_conv_hcl_run(input_tensor, filter_tensor, bias_tensor, output_tensor, priv_info, param, num_thread,
cpu_affinity);
}
for (int i = 0; i < input_tensor->dims[0]; i++) // batch size
{
for (int j = 0; j < group; j++)
{
im2col_ir(input_tensor, output_tensor, priv_info, param, i, j);
int K = filter_tensor->elem_num / filter_tensor->dims[0];
int N = output_tensor->dims[2] * output_tensor->dims[3];
float* im2col_fp32 = priv_info->im2col_buffer;
float* im2col_pack4_fp32 = priv_info->im2col_buffer_pack4;
input_pack4(K, N, im2col_fp32, im2col_pack4_fp32, num_thread);
if (type == TENGINE_DT_FP32)
sgemm_fp32(input_tensor, filter_tensor, bias_tensor, output_tensor, priv_info, param, i, j, num_thread);
}
}
return 0;
}
int conv_hcl_set_shared_mem(struct conv_priv_info* priv_info, void* mem, int mem_size)
{
priv_info->external_im2col_mem = 1;
priv_info->im2col_buffer = mem;
priv_info->im2col_buffer_size = mem_size;
return 0;
}
int conv_hcl_set_shared_pack4_mem(struct conv_priv_info* priv_info, void* mem, int mem_size)
{
priv_info->external_im2col_pack4_mem = 1;
priv_info->im2col_buffer_pack4 = mem;
priv_info->im2col_buffer_pack4_size = mem_size;
return 0;
}
|
hitting.h | /**
Pasha: Parallel Algorithms for Approximating Compact Universal Hitting Sets
hitting.h
Header file for main operations for different options of hitting set calculations.
@author Baris Ekim
@version 1.0 4/15/19
*/
#ifndef HITTING_H
#define HITTING_H
#include "decycling.h"
#include "graph.h"
#include <cstdlib>
#include <iomanip>
#include <algorithm>
#include <omp.h>
int graph::Hitting(int L, string hittingFile) {
/**
Performs hitting set calculations without parallelization
or randomization, counting L-k+1-long paths.
@param L: Sequence length, hittingFile: Output file destination.
@return hittingCount: Size of hitting set.
*/
vertexExp = pow(ALPHABET_SIZE, k-1);
int imaxHittingNum = -1;
ofstream hittingStream;
int hittingCount = 0;
l = L-k+1;
hittingNumArray = new double[edgeNum];
used = new bool[vertexExp];
finished = new bool[vertexExp];
topoSort = new byte[vertexExp];
D = new long double*[l + 1];
long double* Dpool = new long double[(l+1)* vertexExp];
for(int i = 0; i < l+1; i++, Dpool += vertexExp) D[i] = Dpool;
hittingStream.open(hittingFile);
F = new long double*[l + 1];
long double* Fpool = new long double[(l+1)* vertexExp];
for(int i = 0; i < l+1; i++, Fpool += vertexExp) F[i] = Fpool;
while (calculatePathsSeq(l)) {
int imaxHittingNum = calculateHittingNumber(l);
if (imaxHittingNum < 0) break;
removeEdge(imaxHittingNum);
string label = getLabel(imaxHittingNum);
hittingStream << label << "\n";
hittingCount++;
}
hittingStream.close();
delete [] *D;
delete [] D;
delete [] *F;
delete [] F;
topologicalSort();
cout << "Length of longest remaining path: " << maxLength() << "\n";
return hittingCount;
}
int graph::HittingAny(int L, int x, string hittingFile) {
/**
Performs hitting set calculations without parallelization
or randomization, counting paths of all length.
@param L: Sequence length, x: Number of vertices, hittingFile: Output file destination.
@return hittingCount: Size of hitting set.
*/
vertexExp = pow(ALPHABET_SIZE, k-1);
ofstream hittingStream;
byte* imaxHittingNum;
int hittingCount = 0;
l = L-k+1;
hittingNumAnyArray = new double[edgeNum];
used = new bool[vertexExp];
finished = new bool[vertexExp];
topoSort = new byte[vertexExp];
hittingStream.open(hittingFile);
topologicalSort();
D = new long double*[1];
long double* Dpool = new long double[(1)* vertexExp];
for(int i = 0; i < 1; i++, Dpool += vertexExp) D[i] = Dpool;
F = new long double*[1];
long double* Fpool = new long double[(1)* vertexExp];
for(int i = 0; i < 1; i++, Fpool += vertexExp) F[i] = Fpool;
while (maxLength() >= l) {
calculatePathsAny();
imaxHittingNum = calculateHittingNumberAny(x);
for (int i = 0; i < x; i++) {
removeEdge(imaxHittingNum[i]);
string label = getLabel(imaxHittingNum[i]);
hittingStream << label << "\n";
hittingCount++;
}
}
hittingStream.close();
delete [] *D;
delete [] D;
delete [] *F;
delete [] F;
topologicalSort();
cout << "Length of longest remaining path: " << maxLength() << "\n";
return hittingCount;
}
int graph::HittingParallel(int L, string hittingFile, int threads) {
/**
Performs hitting set calculations with parallelization
and without randomization, counting L-k+1-long paths.
@param L: Sequence length, hittingFile: Output file destination.
@return hittingCount: Size of hitting set.
*/
vertexExp = pow(ALPHABET_SIZE, k-1);
int imaxHittingNum = -1;
ofstream hittingStream;
int hittingCount = 0;
l = L-k+1;
hittingNumArray = new double[edgeNum];
stageArray = new byte[edgeNum];
used = new bool[vertexExp];
finished = new bool[vertexExp];
topoSort = new byte[vertexExp];
D = new long double*[l + 1];
long double* Dpool = new long double[(l+1)* vertexExp];
for(int i = 0; i < l+1; i++, Dpool += vertexExp) D[i] = Dpool;
hittingStream.open(hittingFile);
F = new long double*[l + 1];
long double* Fpool = new long double[(l+1)* vertexExp];
for(int i = 0; i < l+1; i++, Fpool += vertexExp) F[i] = Fpool;
while (calculatePaths(l, threads)) {
int imaxHittingNum = calculateHittingNumberParallel(l, false, threads);
if (imaxHittingNum < 0) break;
removeEdge(imaxHittingNum);
string label = getLabel(imaxHittingNum);
hittingStream << label << "\n";
hittingCount++;
}
hittingStream.close();
delete [] *D;
delete [] D;
delete [] *F;
delete [] F;
topologicalSort();
cout << "Length of longest remaining path: " << maxLength() << "\n";
return hittingCount;
}
int graph::HittingRandomParallel(int L, string hittingFile, int threads) {
/**
Performs hitting set calculations with parallelization
and with randomization, counting L-k+1-long paths.
@param L: Sequence length, hittingFile: Output file destination.
@return hittingCount: Size of hitting set.
*/
omp_set_dynamic(0);
vertexExp = pow(ALPHABET_SIZE, k-1);
ofstream hittingStream;
int hittingCount = 0;
l = L-k+1;
epsilon = 0.1;
delta = 1/(double)l;
if (l <= 200) {
delta = 0.1;
epsilon = 0.1;
}
double alpha = 1 - 4*delta -2*epsilon;
cout << "Alpha: " << 1/alpha << endl;
cout << "Delta: " << delta << endl;
cout << "Epsilon: " << epsilon << endl;
int i;
int j;
hittingNumArray = new double[edgeNum];
stageArray = new byte[edgeNum];
used = new bool[vertexExp];
finished = new bool[vertexExp];
pick = new bool[edgeNum];
topoSort = new byte[vertexExp];
D = new long double*[l + 1];
long double* Dpool = new long double[(l+1)* vertexExp];
for(int i = 0; i < l+1; i++, Dpool += vertexExp) D[i] = Dpool;
hittingStream.open(hittingFile);
F = new long double*[l + 1];
long double* Fpool = new long double[(l+1)* vertexExp];
for(int i = 0; i < l+1; i++, Fpool += vertexExp) F[i] = Fpool;
calculatePaths(l, threads);
int imaxHittingNum = calculateHittingNumberParallel(l, false, threads);
h = findLog((1.0+epsilon), hittingNumArray[imaxHittingNum]);
double prob = delta/l;
while (h > 0) {
total = 0;
int hittingCountStage = 0;
double pathCountStage = 0;
calculatePaths(l, threads);
if (calculateHittingNumberParallel(l, true, threads) < 0) break;
stageVertices = pushBackVector();
#pragma omp parallel for num_threads(threads)
for (int it = 0; it < stageVertices.size(); it++) {
i = stageVertices[it];
#pragma omp critical
if ((pick[i] == false) && (hittingNumArray[i] > (pow(delta, 3) * total))) {
stageArray[i] = 0;
pick[i] = true;
hittingCountStage++;
pathCountStage += hittingNumArray[i];
}
}
#pragma omp parallel for collapse (2) num_threads(threads)
for (int it = 0; it < stageVertices.size(); it++) {
for (int jt = 0; jt < stageVertices.size(); jt++) {
i = stageVertices[it];
#pragma omp critical
if (pick[i] == false) {
if (((double) rand() / (RAND_MAX)) <= prob) {
stageArray[i] = 0;
pick[i] = true;
hittingCountStage += 1;
pathCountStage += hittingNumArray[i];
}
j = stageVertices[jt];
if (pick[j] == false) {
if (((double) rand() / (RAND_MAX)) <= prob) {
stageArray[j] = 0;
pick[j] = true;
hittingCountStage += 1;
pathCountStage += hittingNumArray[j];
}
}
}
}
}
hittingCount += hittingCountStage;
if (pathCountStage >= hittingCountStage * pow((1.0 + epsilon), h) * (1 - 6*delta - 2*epsilon)) {
for (int it = 0; it < stageVertices.size(); it++) {
i = stageVertices[it];
if (pick[i] == true) {
removeEdge(i);
string label = getLabel(i);
hittingStream << label << "\n";
}
}
h--;
}
else hittingCount -= hittingCountStage;
}
hittingStream.close();
delete [] *D;
delete [] D;
delete [] *F;
delete [] F;
topologicalSort();
cout << "Length of longest remaining path: " << maxLength() << "\n";
return hittingCount;
}
#endif
|
dbscan.h | #pragma once
// Code adapted from https://github.com/propanoid/DBSCAN
#include <vector>
#include <algorithm>
#include <omp.h>
// Any basic vector/matrix library should also work
#include <Eigen/Core>
namespace clustering
{
template<typename Vector, typename Matrix>
class DBSCAN
{
public:
typedef Vector FeaturesWeights;
typedef Matrix ClusterData;
typedef Matrix DistanceMatrix;
typedef std::vector<unsigned int> Neighbors;
typedef std::vector<int> Labels;
private:
double m_eps;
size_t m_min_elems;
double m_dmin;
double m_dmax;
Labels m_labels;
public:
// 'eps' is the search space for neighbors in the range [0,1], where 0.0 is exactly self and 1.0 is entire dataset
DBSCAN(double eps, size_t min_elems)
: m_eps( eps )
, m_min_elems( min_elems )
, m_dmin(0.0)
, m_dmax(0.0)
{
reset();
}
// Call this to perform clustering, get results by calling 'get_labels()'
void fit( const ClusterData & C )
{
const FeaturesWeights W = std_weights( C.cols() );
wfit( C, W );
}
const Labels & get_labels() const
{
return m_labels;
}
void reset()
{
m_labels.clear();
}
void init(double eps, size_t min_elems)
{
m_eps = eps;
m_min_elems = min_elems;
}
// Useful for testing
static ClusterData gen_cluster_data( size_t features_num, size_t elements_num )
{
ClusterData cl_d( elements_num, features_num );
for (size_t i = 0; i < elements_num; ++i)
for (size_t j = 0; j < features_num; ++j)
cl_d(i, j) = (-1.0 + rand() * (2.0) / RAND_MAX);
return cl_d;
}
FeaturesWeights std_weights( size_t s )
{
// num cols
FeaturesWeights ws( s );
for (size_t i = 0; i < s; ++i)
ws(i) = 1.0;
return ws;
}
void fit_precomputed( const DistanceMatrix & D )
{
prepare_labels( D.rows() );
dbscan( D );
}
void wfit( const ClusterData & C, const FeaturesWeights & W )
{
prepare_labels( C.rows() );
const DistanceMatrix D = calc_dist_matrix( C, W );
dbscan( D );
}
private:
void prepare_labels( size_t s )
{
m_labels.resize(s, -1);
}
Neighbors find_neighbors(const DistanceMatrix & D, unsigned int pid)
{
Neighbors ne;
for (unsigned int j = 0; j < D.rows(); ++j)
{
if ( D(pid, j) <= m_eps )
{
ne.push_back(j);
}
}
return ne;
}
const DistanceMatrix calc_dist_matrix( const ClusterData & C, const FeaturesWeights & W )
{
ClusterData cl_d = C;
#pragma omp parallel for
for (int i = 0; i < (int)cl_d.cols(); ++i)
{
auto col = cl_d.col(i);
const auto r = std::minmax_element( col.data(), col.data() + col.size() );
double data_min = *r.first;
double data_range = *r.second - *r.first;
if (data_range == 0.0) { data_range = 1.0; }
const double scale = 1/data_range;
const double min = -1.0*data_min*scale;
col *= scale;
col += Vector::Constant(col.size(), min);
cl_d.col(i) = col;
}
// rows x rows
DistanceMatrix d_m( cl_d.rows(), cl_d.rows() );
Vector d_max( cl_d.rows() );
Vector d_min( cl_d.rows() );
for (int i = 0; i < (int)cl_d.rows(); ++i)
{
#pragma omp parallel for
for (int j = i; j < (int)cl_d.rows(); ++j)
{
d_m(i, j) = 0.0;
if (i != j)
{
Vector U = cl_d.row(i);
Vector V = cl_d.row(j);
Vector diff = ( U-V );
for(int k = 0; k < (int)diff.size(); k++)
{
auto e = diff[k];
d_m(i, j) += fabs(e)*W[k];
}
d_m(j, i) = d_m(i, j);
}
}
const auto cur_row = d_m.row(i);
const auto mm = std::minmax_element( cur_row.data(), cur_row.data() + cur_row.size() );
d_max(i) = *mm.second;
d_min(i) = *mm.first;
}
m_dmin = *(std::min_element( d_min.data(), d_min.data() + d_min.size() ));
m_dmax = *(std::max_element( d_max.data(), d_max.data() + d_max.size() ));
m_eps = (m_dmax - m_dmin) * m_eps + m_dmin;
return d_m;
}
void dbscan( const DistanceMatrix & dm )
{
std::vector<unsigned int> visited( dm.rows() );
unsigned int cluster_id = 0;
for (unsigned int pid = 0; pid < dm.rows(); ++pid)
{
if ( !visited[pid] )
{
visited[pid] = 1;
Neighbors ne = find_neighbors(dm, pid );
if (ne.size() >= m_min_elems)
{
m_labels[pid] = cluster_id;
for (unsigned int i = 0; i < ne.size(); ++i)
{
unsigned int nPid = ne[i];
if ( !visited[nPid] )
{
visited[nPid] = 1;
Neighbors ne1 = find_neighbors(dm, nPid);
if ( ne1.size() >= m_min_elems )
{
for (const auto & n1 : ne1)
{
ne.push_back(n1);
}
}
}
if ( m_labels[nPid] == -1 )
{
m_labels[nPid] = cluster_id;
}
}
++cluster_id;
}
}
}
}
};
}
|
VolumetricAveragePooling.c | #ifndef TH_GENERIC_FILE
#define TH_GENERIC_FILE "THNN/generic/VolumetricAveragePooling.c"
#else
#include <THNN/generic/pooling_shape.h>
#include <algorithm>
static inline void THNN_(VolumetricAveragePooling_shapeCheck)(
THNNState *state,
THTensor *input,
THTensor *gradOutput,
int kT,
int kW,
int kH,
int dT,
int dW,
int dH,
int padT,
int padW,
int padH,
bool ceil_mode)
{
int64_t nslices;
int64_t itime;
int64_t iheight;
int64_t iwidth;
int64_t otime;
int64_t oheight;
int64_t owidth;
int ndim = input->dim();
int dimN = 0;
int dimt = 1;
int dimh = 2;
int dimw = 3;
if (input->dim() == 5)
{
dimN++;
dimt++;
dimh++;
dimw++;
}
THArgCheck(kT > 0 && kW > 0 && kH > 0, 5,
"kernel size should be greater than zero, but got kT: %d kH: %d kW: %d",
kT, kH, kW);
THArgCheck(dT > 0 && dW > 0 && dH > 0, 8,
"stride should be greater than zero, but got dT: %d dH: %d dW: %d",
dT, dH, dW);
THNN_ARGCHECK(!input->is_empty() && (input->dim() == 4 || input->dim() == 5), 2, input,
"non-empty 4D or 5D (batch mode) tensor expected for input, but got: %s");
THArgCheck(input->size(dimw) >= kW && input->size(dimh) >= kH
&& input->size(dimt) >= kT, 2,
"input image (T: %d H: %d W: %d) smaller than "
"kernel size (kT: %d kH: %d kW: %d)",
input->size(dimt), input->size(dimh), input->size(dimw),
kT, kH, kW);
// The second argument is argNumber... here is the index of padH.
THArgCheck(kT/2 >= padT && kW/2 >= padW && kH/2 >= padH, 11,
"pad should not be greater than half of kernel size, but got "
"padT = %d, padW = %d, padH = %d, kT = %d, kW = %d, kH = %d",
padT, padW, padH, kT, kW, kH);
/* sizes */
nslices = input->size(dimN);
itime = input->size(dimt);
iheight = input->size(dimh);
iwidth = input->size(dimw);
otime = pooling_output_shape<int64_t>(itime, kT, padT, dT, 1, ceil_mode);
oheight = pooling_output_shape<int64_t>(iheight, kH, padH, dH, 1, ceil_mode);
owidth = pooling_output_shape<int64_t>(iwidth, kW, padW, dW, 1, ceil_mode);
if (otime < 1 || owidth < 1 || oheight < 1)
THError("Given input size: (%dx%dx%dx%d). "
"Calculated output size: (%dx%dx%dx%d). Output size is too small",
nslices,itime,iheight,iwidth,nslices,otime,oheight,owidth);
if (gradOutput != NULL) {
THNN_CHECK_DIM_SIZE(gradOutput, ndim, dimN, nslices);
THNN_CHECK_DIM_SIZE(gradOutput, ndim, dimt, otime);
THNN_CHECK_DIM_SIZE(gradOutput, ndim, dimh, oheight);
THNN_CHECK_DIM_SIZE(gradOutput, ndim, dimw, owidth);
}
}
static void THNN_(VolumetricAveragePooling_updateOutput_frame)(
scalar_t *input_p,
scalar_t *output_p,
int64_t nslices,
int64_t itime,
int64_t iwidth,
int64_t iheight,
int64_t otime,
int64_t owidth,
int64_t oheight,
int kT,
int kW,
int kH,
int dT,
int dW,
int dH,
int padT,
int padW,
int padH,
bool count_include_pad)
{
int64_t k;
#pragma omp parallel for private(k)
for (k = 0; k < nslices; k++)
{
int64_t i, j, ti;
/* local pointers. */
scalar_t *ip = input_p + k * itime * iwidth * iheight;
scalar_t *op = output_p + k * otime * owidth * oheight;
for (i = 0; i < otime * oheight * owidth; ++i)
*(op + i) = 0;
/* loop over output */
for (ti = 0; ti < otime; ti++)
{
for (i = 0; i < oheight; i++)
{
for (j = 0; j < owidth; j++)
{
/* compute pool range. */
int64_t tstart = ti * dT - padT;
int64_t hstart = i * dH - padH;
int64_t wstart = j * dW - padW;
int64_t tend = std::min(tstart + kT, itime + padT);
int64_t hend = std::min(hstart + kH, iheight + padH);
int64_t wend = std::min(wstart + kW, iwidth + padW);
int64_t pool_size = (tend - tstart) * (hend - hstart) * (wend - wstart);
tstart = std::max(tstart, (int64_t) 0);
hstart = std::max(hstart, (int64_t) 0);
wstart = std::max(wstart, (int64_t) 0);
tend = std::min(tend, itime);
hend = std::min(hend, iheight);
wend = std::min(wend, iwidth);
int divide_factor;
if (count_include_pad)
divide_factor = pool_size;
else
divide_factor = (tend - tstart) * (hend - hstart) * (wend - wstart);
/* compute local sum: */
scalar_t sum = 0.0;
int64_t x, y, z;
for (z = tstart; z < tend; z++)
{
for (y = hstart; y < hend; y++)
{
for (x = wstart; x < wend; x++)
{
sum += *(ip + z * iwidth * iheight + y * iwidth + x);
}
}
}
/* set output to local max */
*op++ += sum / divide_factor;
}
}
}
}
}
void THNN_(VolumetricAveragePooling_updateOutput)(
THNNState *state,
THTensor *input,
THTensor *output,
int kT,
int kW,
int kH,
int dT,
int dW,
int dH,
int padT,
int padW,
int padH,
bool ceil_mode,
bool count_include_pad)
{
int64_t nslices;
int64_t itime;
int64_t iheight;
int64_t iwidth;
int64_t otime;
int64_t oheight;
int64_t owidth;
scalar_t *input_data;
scalar_t *output_data;
THNN_(VolumetricAveragePooling_shapeCheck)(
state, input, NULL, kT, kW, kH,
dT, dW, dH, padT, padW, padH, ceil_mode);
int dimN = 0;
int dimt = 1;
int dimh = 2;
int dimw = 3;
if (input->dim() == 5)
{
dimN++;
dimt++;
dimh++;
dimw++;
}
/* sizes */
nslices = input->size(dimN);
itime = input->size(dimt);
iheight = input->size(dimh);
iwidth = input->size(dimw);
otime = pooling_output_shape<int64_t>(itime, kT, padT, dT, 1, ceil_mode);
oheight = pooling_output_shape<int64_t>(iheight, kH, padH, dH, 1, ceil_mode);
owidth = pooling_output_shape<int64_t>(iwidth, kW, padW, dW, 1, ceil_mode);
/* get contiguous input */
input = THTensor_(newContiguous)(input);
if (input->dim() == 4) /* non-batch mode */
{
/* resize output */
THTensor_(resize4d)(output, nslices, otime, oheight, owidth);
input_data = input->data<scalar_t>();
output_data = output->data<scalar_t>();
THNN_(VolumetricAveragePooling_updateOutput_frame)(
input_data, output_data, nslices,
itime, iwidth, iheight,
otime, owidth, oheight,
kT, kW, kH,
dT, dW, dH,
padT, padW, padH,
count_include_pad
);
}
else /* batch mode */
{
int64_t p;
int64_t nBatch = input->size(0);
int64_t istride = nslices * itime * iwidth * iheight;
int64_t ostride = nslices * otime * owidth * oheight;
/* resize output */
THTensor_(resize5d)(output, nBatch, nslices, otime, oheight, owidth);
input_data = input->data<scalar_t>();
output_data = output->data<scalar_t>();
#pragma omp parallel for private(p)
for (p=0; p < nBatch; p++)
{
THNN_(VolumetricAveragePooling_updateOutput_frame)(
input_data + p * istride, output_data + p * ostride, nslices,
itime, iwidth, iheight,
otime, owidth, oheight,
kT, kW, kH,
dT, dW, dH,
padT, padW, padH,
count_include_pad
);
}
}
/* cleanup */
c10::raw::intrusive_ptr::decref(input);
}
static void THNN_(VolumetricAveragePooling_updateGradInput_frame)(
scalar_t *gradInput_p,
scalar_t *gradOutput_p,
int64_t nslices,
int64_t itime,
int64_t iwidth,
int64_t iheight,
int64_t otime,
int64_t owidth,
int64_t oheight,
int kT,
int kW,
int kH,
int dT,
int dW,
int dH,
int padT,
int padW,
int padH,
bool count_include_pad)
{
int64_t k;
#pragma omp parallel for private(k)
for (k = 0; k < nslices; k++)
{
int64_t i, j, ti;
/* local pointers */
scalar_t *ip = gradInput_p + k * itime * iwidth * iheight;
scalar_t *op = gradOutput_p + k * otime * owidth * oheight;
for (i = 0; i < itime*iwidth*iheight; i++)
*(ip + i) = 0;
/* loop over output */
for (ti = 0; ti < otime; ti++)
{
for (i = 0; i < oheight; i++)
{
for (j = 0; j < owidth; j++)
{
int64_t tstart = ti * dT - padT;
int64_t hstart = i * dH - padH;
int64_t wstart = j * dW - padW;
int64_t tend = std::min(tstart + kT, itime + padT);
int64_t hend = std::min(hstart + kH, iheight + padH);
int64_t wend = std::min(wstart + kW, iwidth + padW);
int64_t pool_size = (tend -tstart) * (hend - hstart) * (wend - wstart);
tstart = std::max(tstart, (int64_t) 0);
hstart = std::max(hstart, (int64_t) 0);
wstart = std::max(wstart, (int64_t) 0);
tend = std::min(tend, itime);
hend = std::min(hend, iheight);
wend = std::min(wend, iwidth);
int64_t divide_factor;
if (count_include_pad)
divide_factor = pool_size;
else
divide_factor = (tend - tstart) * (hend - hstart) * (wend - wstart);
/* scatter gradients out to footprint: */
scalar_t val = *op++;
int64_t x,y,z;
for (z = tstart; z < tend; z++)
{
for (y = hstart; y < hend; y++)
{
for (x = wstart; x < wend; x++)
{
*(ip + z * iheight * iwidth + y * iwidth + x) += val / divide_factor;
}
}
}
}
}
}
}
}
void THNN_(VolumetricAveragePooling_updateGradInput)(
THNNState *state,
THTensor *input,
THTensor *gradOutput,
THTensor *gradInput,
int kT,
int kW,
int kH,
int dT,
int dW,
int dH,
int padT,
int padW,
int padH,
bool ceil_mode,
bool count_include_pad)
{
int64_t nslices;
int64_t itime;
int64_t iheight;
int64_t iwidth;
int64_t otime;
int64_t oheight;
int64_t owidth;
scalar_t *gradInput_data;
scalar_t *gradOutput_data;
int dimN = 0;
int dimt = 1;
int dimh = 2;
int dimw = 3;
THNN_(VolumetricAveragePooling_shapeCheck)(
state, input, gradOutput, kT, kW, kH,
dT, dW, dH, padT, padW, padH, ceil_mode);
/* get contiguous gradOutput */
gradOutput = THTensor_(newContiguous)(gradOutput);
/* resize */
THTensor_(resizeAs)(gradInput, input);
THTensor_(zero)(gradInput);
if (input->dim() == 5)
{
dimN++;
dimt++;
dimh++;
dimw++;
}
/* sizes */
nslices = input->size(dimN);
itime = input->size(dimt);
iheight = input->size(dimh);
iwidth = input->size(dimw);
otime = gradOutput->size(dimt);
oheight = gradOutput->size(dimh);
owidth = gradOutput->size(dimw);
/* get raw pointers */
gradInput_data = gradInput->data<scalar_t>();
gradOutput_data = gradOutput->data<scalar_t>();
/* backprop */
if (input->dim() == 4) /* non-batch mode*/
{
THNN_(VolumetricAveragePooling_updateGradInput_frame)(
gradInput_data, gradOutput_data, nslices,
itime, iwidth, iheight,
otime, owidth, oheight,
kT, kW, kH,
dT, dW, dH,
padT, padW, padH,
count_include_pad
);
}
else /* batch mode */
{
int64_t p;
int64_t nBatch = input->size(0);
int64_t istride = nslices * itime * iwidth * iheight;
int64_t ostride = nslices * otime * owidth * oheight;
#pragma omp parallel for private(p)
for (p = 0; p < nBatch; p++)
{
THNN_(VolumetricAveragePooling_updateGradInput_frame)(
gradInput_data + p * istride, gradOutput_data + p * ostride, nslices,
itime, iwidth, iheight,
otime, owidth, oheight,
kT, kW, kH,
dT, dW, dH,
padT, padW, padH,
count_include_pad
);
}
}
/* cleanup */
c10::raw::intrusive_ptr::decref(gradOutput);
}
#endif
|
_phono3py.c | /* Copyright (C) 2015 Atsushi Togo */
/* All rights reserved. */
/* This file is part of phonopy. */
/* Redistribution and use in source and binary forms, with or without */
/* modification, are permitted provided that the following conditions */
/* are met: */
/* * Redistributions of source code must retain the above copyright */
/* notice, this list of conditions and the following disclaimer. */
/* * Redistributions in binary form must reproduce the above copyright */
/* notice, this list of conditions and the following disclaimer in */
/* the documentation and/or other materials provided with the */
/* distribution. */
/* * Neither the name of the phonopy project nor the names of its */
/* contributors may be used to endorse or promote products derived */
/* from this software without specific prior written permission. */
/* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS */
/* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT */
/* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS */
/* FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE */
/* COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, */
/* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, */
/* BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; */
/* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER */
/* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT */
/* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN */
/* ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE */
/* POSSIBILITY OF SUCH DAMAGE. */
#include <Python.h>
#include <assert.h>
#include <stdio.h>
#include <stddef.h>
#include <stdlib.h>
#include <math.h>
#include <numpy/arrayobject.h>
#include <lapack_wrapper.h>
#include <phonon.h>
#include <phonoc_array.h>
#include <phonoc_const.h>
#include <phonon3_h/fc3.h>
#include <phonon3_h/frequency_shift.h>
#include <phonon3_h/interaction.h>
#include <phonon3_h/imag_self_energy_with_g.h>
#include <phonon3_h/pp_collision.h>
#include <phonon3_h/collision_matrix.h>
#include <other_h/isotope.h>
#include <triplet_h/triplet.h>
#include <tetrahedron_method.h>
/* #define LIBFLAME */
#ifdef LIBFLAME
#include <flame_wrapper.h>
#endif
static PyObject * py_get_phonons_at_gridpoints(PyObject *self, PyObject *args);
static PyObject * py_get_interaction(PyObject *self, PyObject *args);
static PyObject * py_get_pp_collision(PyObject *self, PyObject *args);
static PyObject *
py_get_pp_collision_with_sigma(PyObject *self, PyObject *args);
static PyObject *
py_get_imag_self_energy_with_g(PyObject *self, PyObject *args);
static PyObject *
py_get_detailed_imag_self_energy_with_g(PyObject *self, PyObject *args);
static PyObject * py_get_frequency_shift_at_bands(PyObject *self,
PyObject *args);
static PyObject * py_get_collision_matrix(PyObject *self, PyObject *args);
static PyObject * py_get_reducible_collision_matrix(PyObject *self,
PyObject *args);
static PyObject * py_symmetrize_collision_matrix(PyObject *self,
PyObject *args);
static PyObject * py_expand_collision_matrix(PyObject *self, PyObject *args);
static PyObject * py_distribute_fc3(PyObject *self, PyObject *args);
static PyObject * py_rotate_delta_fc2s(PyObject *self, PyObject *args);
static PyObject * py_get_isotope_strength(PyObject *self, PyObject *args);
static PyObject * py_get_thm_isotope_strength(PyObject *self, PyObject *args);
static PyObject *
py_set_permutation_symmetry_fc3(PyObject *self, PyObject *args);
static PyObject *
py_set_permutation_symmetry_compact_fc3(PyObject *self, PyObject *args);
static PyObject * py_set_permutation_symmetry_fc3(PyObject *self,
PyObject *args);
static PyObject * py_transpose_compact_fc3(PyObject *self, PyObject *args);
static PyObject * py_get_neighboring_gird_points(PyObject *self, PyObject *args);
static PyObject * py_set_integration_weights(PyObject *self, PyObject *args);
static PyObject *
py_tpl_get_triplets_reciprocal_mesh_at_q(PyObject *self, PyObject *args);
static PyObject * py_tpl_get_BZ_triplets_at_q(PyObject *self, PyObject *args);
static PyObject *
py_set_triplets_integration_weights(PyObject *self, PyObject *args);
static PyObject *
py_set_triplets_integration_weights_with_sigma(PyObject *self, PyObject *args);
static PyObject *
py_diagonalize_collision_matrix(PyObject *self, PyObject *args);
static PyObject * py_pinv_from_eigensolution(PyObject *self, PyObject *args);
static PyObject * py_get_default_colmat_solver(PyObject *self, PyObject *args);
#ifdef LIBFLAME
static PyObject * py_inverse_collision_matrix_libflame(PyObject *self, PyObject *args);
#endif
static void pinv_from_eigensolution(double *data,
const double *eigvals,
const size_t size,
const double cutoff,
const int pinv_method);
static void show_colmat_info(const PyArrayObject *collision_matrix_py,
const size_t i_sigma,
const size_t i_temp,
const size_t adrs_shift);
struct module_state {
PyObject *error;
};
#if PY_MAJOR_VERSION >= 3
#define GETSTATE(m) ((struct module_state*)PyModule_GetState(m))
#else
#define GETSTATE(m) (&_state)
static struct module_state _state;
#endif
static PyObject *
error_out(PyObject *m) {
struct module_state *st = GETSTATE(m);
PyErr_SetString(st->error, "something bad happened");
return NULL;
}
static PyMethodDef _phono3py_methods[] = {
{"error_out", (PyCFunction)error_out, METH_NOARGS, NULL},
{"phonons_at_gridpoints",
py_get_phonons_at_gridpoints,
METH_VARARGS,
"Set phonons at grid points"},
{"interaction",
(PyCFunction)py_get_interaction,
METH_VARARGS,
"Interaction of triplets"},
{"pp_collision",
(PyCFunction)py_get_pp_collision,
METH_VARARGS,
"Collision and ph-ph calculation"},
{"pp_collision_with_sigma",
(PyCFunction)py_get_pp_collision_with_sigma,
METH_VARARGS,
"Collision and ph-ph calculation for smearing method"},
{"imag_self_energy_with_g",
(PyCFunction)py_get_imag_self_energy_with_g,
METH_VARARGS,
"Imaginary part of self energy at frequency points with g"},
{"detailed_imag_self_energy_with_g",
(PyCFunction)py_get_detailed_imag_self_energy_with_g,
METH_VARARGS,
"Detailed contribution to imaginary part of self energy at frequency points with g"},
{"frequency_shift_at_bands",
(PyCFunction)py_get_frequency_shift_at_bands,
METH_VARARGS,
"Phonon frequency shift from third order force constants"},
{"collision_matrix",
(PyCFunction)py_get_collision_matrix,
METH_VARARGS,
"Collision matrix with g"},
{"reducible_collision_matrix",
(PyCFunction)py_get_reducible_collision_matrix,
METH_VARARGS,
"Collision matrix with g for reducible grid points"},
{"symmetrize_collision_matrix",
(PyCFunction)py_symmetrize_collision_matrix,
METH_VARARGS,
"Symmetrize collision matrix"},
{"expand_collision_matrix",
(PyCFunction)py_expand_collision_matrix,
METH_VARARGS,
"Expand collision matrix"},
{"distribute_fc3",
(PyCFunction)py_distribute_fc3,
METH_VARARGS,
"Distribute least fc3 to full fc3"},
{"rotate_delta_fc2s",
(PyCFunction)py_rotate_delta_fc2s,
METH_VARARGS,
"Rotate delta fc2s"},
{"isotope_strength",
(PyCFunction)py_get_isotope_strength,
METH_VARARGS,
"Isotope scattering strength"},
{"thm_isotope_strength",
(PyCFunction)py_get_thm_isotope_strength,
METH_VARARGS,
"Isotope scattering strength for tetrahedron_method"},
{"permutation_symmetry_fc3",
(PyCFunction)py_set_permutation_symmetry_fc3,
METH_VARARGS,
"Set permutation symmetry for fc3"},
{"permutation_symmetry_compact_fc3",
(PyCFunction)py_set_permutation_symmetry_compact_fc3,
METH_VARARGS,
"Set permutation symmetry for compact-fc3"},
{"transpose_compact_fc3",
(PyCFunction)py_transpose_compact_fc3,
METH_VARARGS,
"Transpose compact fc3"},
{"neighboring_grid_points",
(PyCFunction)py_get_neighboring_gird_points,
METH_VARARGS,
"Neighboring grid points by relative grid addresses"},
{"integration_weights",
(PyCFunction)py_set_integration_weights,
METH_VARARGS,
"Integration weights of tetrahedron method"},
{"triplets_reciprocal_mesh_at_q",
(PyCFunction)py_tpl_get_triplets_reciprocal_mesh_at_q,
METH_VARARGS,
"Triplets on reciprocal mesh points at a specific q-point"},
{"BZ_triplets_at_q",
(PyCFunction)py_tpl_get_BZ_triplets_at_q,
METH_VARARGS,
"Triplets in reciprocal primitive lattice are transformed to those in BZ."},
{"triplets_integration_weights",
(PyCFunction)py_set_triplets_integration_weights,
METH_VARARGS,
"Integration weights of tetrahedron method for triplets"},
{"triplets_integration_weights_with_sigma",
(PyCFunction)py_set_triplets_integration_weights_with_sigma,
METH_VARARGS,
"Integration weights of smearing method for triplets"},
{"diagonalize_collision_matrix",
(PyCFunction)py_diagonalize_collision_matrix,
METH_VARARGS,
"Diagonalize and optionally pseudo-inverse using Lapack dsyev(d)"},
{"pinv_from_eigensolution",
(PyCFunction)py_pinv_from_eigensolution,
METH_VARARGS,
"Pseudo-inverse from eigensolution"},
{"default_colmat_solver",
(PyCFunction)py_get_default_colmat_solver,
METH_VARARGS,
"Return default collison matrix solver by integer value"},
#ifdef LIBFLAME
{"inverse_collision_matrix_libflame",
(PyCFunction)py_inverse_collision_matrix_libflame,
METH_VARARGS,
"Pseudo-inverse using libflame hevd"},
#endif
{NULL, NULL, 0, NULL}
};
#if PY_MAJOR_VERSION >= 3
static int _phono3py_traverse(PyObject *m, visitproc visit, void *arg) {
Py_VISIT(GETSTATE(m)->error);
return 0;
}
static int _phono3py_clear(PyObject *m) {
Py_CLEAR(GETSTATE(m)->error);
return 0;
}
static struct PyModuleDef moduledef = {
PyModuleDef_HEAD_INIT,
"_phono3py",
NULL,
sizeof(struct module_state),
_phono3py_methods,
NULL,
_phono3py_traverse,
_phono3py_clear,
NULL
};
#define INITERROR return NULL
PyObject *
PyInit__phono3py(void)
#else
#define INITERROR return
void
init_phono3py(void)
#endif
{
#if PY_MAJOR_VERSION >= 3
PyObject *module = PyModule_Create(&moduledef);
#else
PyObject *module = Py_InitModule("_phono3py", _phono3py_methods);
#endif
struct module_state *st;
if (module == NULL)
INITERROR;
st = GETSTATE(module);
st->error = PyErr_NewException("_phono3py.Error", NULL, NULL);
if (st->error == NULL) {
Py_DECREF(module);
INITERROR;
}
#if PY_MAJOR_VERSION >= 3
return module;
#endif
}
static PyObject * py_get_phonons_at_gridpoints(PyObject *self, PyObject *args)
{
PyArrayObject* py_frequencies;
PyArrayObject* py_eigenvectors;
PyArrayObject* py_phonon_done;
PyArrayObject* py_grid_points;
PyArrayObject* py_grid_address;
PyArrayObject* py_mesh;
PyArrayObject* py_shortest_vectors_fc2;
PyArrayObject* py_multiplicity_fc2;
PyArrayObject* py_positions_fc2;
PyArrayObject* py_fc2;
PyArrayObject* py_masses_fc2;
PyArrayObject* py_p2s_map_fc2;
PyArrayObject* py_s2p_map_fc2;
PyArrayObject* py_reciprocal_lattice;
PyArrayObject* py_born_effective_charge;
PyArrayObject* py_q_direction;
PyArrayObject* py_dielectric_constant;
PyArrayObject* py_dd_q0;
PyArrayObject* py_G_list;
double nac_factor;
double unit_conversion_factor;
double lambda;
char* uplo;
double (*born)[3][3];
double (*dielectric)[3];
double *q_dir;
double* freqs;
lapack_complex_double* eigvecs;
char* phonon_done;
size_t* grid_points;
int (*grid_address)[3];
int* mesh;
double* fc2;
double(*svecs_fc2)[27][3];
int* multi_fc2;
double (*positions_fc2)[3];
double* masses_fc2;
int* p2s_fc2;
int* s2p_fc2;
double (*rec_lat)[3];
double * dd_q0;
double (*G_list)[3];
npy_intp num_patom, num_satom, num_phonons, num_grid_points, num_G_points;
if (!PyArg_ParseTuple(args, "OOOOOOOOOOOOOdOOOOdOOds",
&py_frequencies,
&py_eigenvectors,
&py_phonon_done,
&py_grid_points,
&py_grid_address,
&py_mesh,
&py_fc2,
&py_shortest_vectors_fc2,
&py_multiplicity_fc2,
&py_positions_fc2,
&py_masses_fc2,
&py_p2s_map_fc2,
&py_s2p_map_fc2,
&unit_conversion_factor,
&py_born_effective_charge,
&py_dielectric_constant,
&py_reciprocal_lattice,
&py_q_direction,
&nac_factor,
&py_dd_q0,
&py_G_list,
&lambda,
&uplo)) {
return NULL;
}
freqs = (double*)PyArray_DATA(py_frequencies);
eigvecs = (lapack_complex_double*)PyArray_DATA(py_eigenvectors);
phonon_done = (char*)PyArray_DATA(py_phonon_done);
grid_points = (size_t*)PyArray_DATA(py_grid_points);
grid_address = (int(*)[3])PyArray_DATA(py_grid_address);
mesh = (int*)PyArray_DATA(py_mesh);
fc2 = (double*)PyArray_DATA(py_fc2);
svecs_fc2 = (double(*)[27][3])PyArray_DATA(py_shortest_vectors_fc2);
multi_fc2 = (int*)PyArray_DATA(py_multiplicity_fc2);
masses_fc2 = (double*)PyArray_DATA(py_masses_fc2);
p2s_fc2 = (int*)PyArray_DATA(py_p2s_map_fc2);
s2p_fc2 = (int*)PyArray_DATA(py_s2p_map_fc2);
rec_lat = (double(*)[3])PyArray_DATA(py_reciprocal_lattice);
num_patom = PyArray_DIMS(py_multiplicity_fc2)[1];
num_satom = PyArray_DIMS(py_multiplicity_fc2)[0];
num_phonons = PyArray_DIMS(py_frequencies)[0];
num_grid_points = PyArray_DIMS(py_grid_points)[0];
if ((PyObject*)py_born_effective_charge == Py_None) {
born = NULL;
} else {
born = (double(*)[3][3])PyArray_DATA(py_born_effective_charge);
}
if ((PyObject*)py_dielectric_constant == Py_None) {
dielectric = NULL;
} else {
dielectric = (double(*)[3])PyArray_DATA(py_dielectric_constant);
}
if ((PyObject*)py_q_direction == Py_None) {
q_dir = NULL;
} else {
q_dir = (double*)PyArray_DATA(py_q_direction);
if (fabs(q_dir[0]) < 1e-10 &&
fabs(q_dir[1]) < 1e-10 &&
fabs(q_dir[2]) < 1e-10) {
q_dir = NULL;
}
}
if ((PyObject*)py_dd_q0 == Py_None) {
dd_q0 = NULL;
} else {
dd_q0 = (double*)PyArray_DATA(py_dd_q0);
}
if ((PyObject*)py_G_list == Py_None) {
G_list = NULL;
num_G_points = 0;
} else {
G_list = (double(*)[3])PyArray_DATA(py_G_list);
num_G_points = PyArray_DIMS(py_G_list)[0];
}
if ((PyObject*)py_positions_fc2 == Py_None) {
positions_fc2 = NULL;
} else {
positions_fc2 = (double(*)[3])PyArray_DATA(py_positions_fc2);
}
if (!dd_q0) {
phn_get_phonons_at_gridpoints(freqs,
eigvecs,
phonon_done,
num_phonons,
grid_points,
num_grid_points,
grid_address,
mesh,
fc2,
svecs_fc2,
multi_fc2,
num_patom,
num_satom,
masses_fc2,
p2s_fc2,
s2p_fc2,
unit_conversion_factor,
born,
dielectric,
rec_lat,
q_dir,
nac_factor,
uplo[0]);
} else {
phn_get_gonze_phonons_at_gridpoints(freqs,
eigvecs,
phonon_done,
num_phonons,
grid_points,
num_grid_points,
grid_address,
mesh,
fc2,
svecs_fc2,
multi_fc2,
positions_fc2,
num_patom,
num_satom,
masses_fc2,
p2s_fc2,
s2p_fc2,
unit_conversion_factor,
born,
dielectric,
rec_lat,
q_dir,
nac_factor,
dd_q0,
G_list,
num_G_points,
lambda,
uplo[0]);
}
Py_RETURN_NONE;
}
static PyObject * py_get_interaction(PyObject *self, PyObject *args)
{
PyArrayObject *py_fc3_normal_squared;
PyArrayObject *py_g_zero;
PyArrayObject *py_frequencies;
PyArrayObject *py_eigenvectors;
PyArrayObject *py_triplets;
PyArrayObject *py_grid_address;
PyArrayObject *py_mesh;
PyArrayObject *py_shortest_vectors;
PyArrayObject *py_multiplicities;
PyArrayObject *py_fc3;
PyArrayObject *py_masses;
PyArrayObject *py_p2s_map;
PyArrayObject *py_s2p_map;
PyArrayObject *py_band_indices;
double cutoff_frequency;
int symmetrize_fc3_q;
Darray *fc3_normal_squared;
Darray *freqs;
lapack_complex_double *eigvecs;
size_t (*triplets)[3];
npy_intp num_triplets;
char* g_zero;
int *grid_address;
int *mesh;
double *fc3;
double *svecs;
int *multi;
double *masses;
int *p2s;
int *s2p;
int *band_indices;
int svecs_dims[3];
int i;
int is_compact_fc3;
if (!PyArg_ParseTuple(args, "OOOOOOOOOOOOOOid",
&py_fc3_normal_squared,
&py_g_zero,
&py_frequencies,
&py_eigenvectors,
&py_triplets,
&py_grid_address,
&py_mesh,
&py_fc3,
&py_shortest_vectors,
&py_multiplicities,
&py_masses,
&py_p2s_map,
&py_s2p_map,
&py_band_indices,
&symmetrize_fc3_q,
&cutoff_frequency)) {
return NULL;
}
fc3_normal_squared = convert_to_darray(py_fc3_normal_squared);
freqs = convert_to_darray(py_frequencies);
/* npy_cdouble and lapack_complex_double may not be compatible. */
/* So eigenvectors should not be used in Python side */
eigvecs = (lapack_complex_double*)PyArray_DATA(py_eigenvectors);
triplets = (size_t(*)[3])PyArray_DATA(py_triplets);
num_triplets = PyArray_DIMS(py_triplets)[0];
g_zero = (char*)PyArray_DATA(py_g_zero);
grid_address = (int*)PyArray_DATA(py_grid_address);
mesh = (int*)PyArray_DATA(py_mesh);
fc3 = (double*)PyArray_DATA(py_fc3);
if (PyArray_DIMS(py_fc3)[0] == PyArray_DIMS(py_fc3)[1]) {
is_compact_fc3 = 0;
} else {
is_compact_fc3 = 1;
}
svecs = (double*)PyArray_DATA(py_shortest_vectors);
for (i = 0; i < 3; i++) {
svecs_dims[i] = PyArray_DIMS(py_shortest_vectors)[i];
}
multi = (int*)PyArray_DATA(py_multiplicities);
masses = (double*)PyArray_DATA(py_masses);
p2s = (int*)PyArray_DATA(py_p2s_map);
s2p = (int*)PyArray_DATA(py_s2p_map);
band_indices = (int*)PyArray_DATA(py_band_indices);
itr_get_interaction(fc3_normal_squared,
g_zero,
freqs,
eigvecs,
triplets,
num_triplets,
grid_address,
mesh,
fc3,
is_compact_fc3,
svecs,
svecs_dims,
multi,
masses,
p2s,
s2p,
band_indices,
symmetrize_fc3_q,
cutoff_frequency);
free(fc3_normal_squared);
fc3_normal_squared = NULL;
free(freqs);
freqs = NULL;
Py_RETURN_NONE;
}
static PyObject * py_get_pp_collision(PyObject *self, PyObject *args)
{
PyArrayObject *py_gamma;
PyArrayObject *py_relative_grid_address;
PyArrayObject *py_frequencies;
PyArrayObject *py_eigenvectors;
PyArrayObject *py_triplets;
PyArrayObject *py_triplet_weights;
PyArrayObject *py_grid_address;
PyArrayObject *py_bz_map;
PyArrayObject *py_mesh;
PyArrayObject *py_fc3;
PyArrayObject *py_shortest_vectors;
PyArrayObject *py_multiplicities;
PyArrayObject *py_masses;
PyArrayObject *py_p2s_map;
PyArrayObject *py_s2p_map;
PyArrayObject *py_band_indices;
PyArrayObject *py_temperatures;
double cutoff_frequency;
int is_NU;
int symmetrize_fc3_q;
double *gamma;
int (*relative_grid_address)[4][3];
double *frequencies;
lapack_complex_double *eigenvectors;
size_t (*triplets)[3];
npy_intp num_triplets;
int *triplet_weights;
int *grid_address;
size_t *bz_map;
int *mesh;
double *fc3;
double *svecs;
int *multi;
double *masses;
int *p2s;
int *s2p;
Iarray *band_indices;
Darray *temperatures;
int svecs_dims[3];
int i;
int is_compact_fc3;
if (!PyArg_ParseTuple(args, "OOOOOOOOOOOOOOOOOiid",
&py_gamma,
&py_relative_grid_address,
&py_frequencies,
&py_eigenvectors,
&py_triplets,
&py_triplet_weights,
&py_grid_address,
&py_bz_map,
&py_mesh,
&py_fc3,
&py_shortest_vectors,
&py_multiplicities,
&py_masses,
&py_p2s_map,
&py_s2p_map,
&py_band_indices,
&py_temperatures,
&is_NU,
&symmetrize_fc3_q,
&cutoff_frequency)) {
return NULL;
}
gamma = (double*)PyArray_DATA(py_gamma);
relative_grid_address = (int(*)[4][3])PyArray_DATA(py_relative_grid_address);
frequencies = (double*)PyArray_DATA(py_frequencies);
eigenvectors = (lapack_complex_double*)PyArray_DATA(py_eigenvectors);
triplets = (size_t(*)[3])PyArray_DATA(py_triplets);
num_triplets = PyArray_DIMS(py_triplets)[0];
triplet_weights = (int*)PyArray_DATA(py_triplet_weights);
grid_address = (int*)PyArray_DATA(py_grid_address);
bz_map = (size_t*)PyArray_DATA(py_bz_map);
mesh = (int*)PyArray_DATA(py_mesh);
fc3 = (double*)PyArray_DATA(py_fc3);
if (PyArray_DIMS(py_fc3)[0] == PyArray_DIMS(py_fc3)[1]) {
is_compact_fc3 = 0;
} else {
is_compact_fc3 = 1;
}
svecs = (double*)PyArray_DATA(py_shortest_vectors);
for (i = 0; i < 3; i++) {
svecs_dims[i] = PyArray_DIMS(py_shortest_vectors)[i];
}
multi = (int*)PyArray_DATA(py_multiplicities);
masses = (double*)PyArray_DATA(py_masses);
p2s = (int*)PyArray_DATA(py_p2s_map);
s2p = (int*)PyArray_DATA(py_s2p_map);
band_indices = convert_to_iarray(py_band_indices);
temperatures = convert_to_darray(py_temperatures);
ppc_get_pp_collision(gamma,
relative_grid_address,
frequencies,
eigenvectors,
triplets,
num_triplets,
triplet_weights,
grid_address,
bz_map,
mesh,
fc3,
is_compact_fc3,
svecs,
svecs_dims,
multi,
masses,
p2s,
s2p,
band_indices,
temperatures,
is_NU,
symmetrize_fc3_q,
cutoff_frequency);
free(band_indices);
band_indices = NULL;
free(temperatures);
temperatures = NULL;
Py_RETURN_NONE;
}
static PyObject * py_get_pp_collision_with_sigma(PyObject *self, PyObject *args)
{
PyArrayObject *py_gamma;
PyArrayObject *py_frequencies;
PyArrayObject *py_eigenvectors;
PyArrayObject *py_triplets;
PyArrayObject *py_triplet_weights;
PyArrayObject *py_grid_address;
PyArrayObject *py_mesh;
PyArrayObject *py_fc3;
PyArrayObject *py_shortest_vectors;
PyArrayObject *py_multiplicities;
PyArrayObject *py_masses;
PyArrayObject *py_p2s_map;
PyArrayObject *py_s2p_map;
PyArrayObject *py_band_indices;
PyArrayObject *py_temperatures;
int is_NU;
int symmetrize_fc3_q;
double sigma;
double sigma_cutoff;
double cutoff_frequency;
double *gamma;
double *frequencies;
lapack_complex_double *eigenvectors;
size_t (*triplets)[3];
npy_intp num_triplets;
int *triplet_weights;
int *grid_address;
int *mesh;
double *fc3;
double *svecs;
int *multi;
double *masses;
int *p2s;
int *s2p;
Iarray *band_indices;
Darray *temperatures;
int svecs_dims[3];
int i;
int is_compact_fc3;
if (!PyArg_ParseTuple(args, "OddOOOOOOOOOOOOOOiid",
&py_gamma,
&sigma,
&sigma_cutoff,
&py_frequencies,
&py_eigenvectors,
&py_triplets,
&py_triplet_weights,
&py_grid_address,
&py_mesh,
&py_fc3,
&py_shortest_vectors,
&py_multiplicities,
&py_masses,
&py_p2s_map,
&py_s2p_map,
&py_band_indices,
&py_temperatures,
&is_NU,
&symmetrize_fc3_q,
&cutoff_frequency)) {
return NULL;
}
gamma = (double*)PyArray_DATA(py_gamma);
frequencies = (double*)PyArray_DATA(py_frequencies);
eigenvectors = (lapack_complex_double*)PyArray_DATA(py_eigenvectors);
triplets = (size_t(*)[3])PyArray_DATA(py_triplets);
num_triplets = PyArray_DIMS(py_triplets)[0];
triplet_weights = (int*)PyArray_DATA(py_triplet_weights);
grid_address = (int*)PyArray_DATA(py_grid_address);
mesh = (int*)PyArray_DATA(py_mesh);
fc3 = (double*)PyArray_DATA(py_fc3);
if (PyArray_DIMS(py_fc3)[0] == PyArray_DIMS(py_fc3)[1]) {
is_compact_fc3 = 0;
} else {
is_compact_fc3 = 1;
}
svecs = (double*)PyArray_DATA(py_shortest_vectors);
for (i = 0; i < 3; i++) {
svecs_dims[i] = PyArray_DIMS(py_shortest_vectors)[i];
}
multi = (int*)PyArray_DATA(py_multiplicities);
masses = (double*)PyArray_DATA(py_masses);
p2s = (int*)PyArray_DATA(py_p2s_map);
s2p = (int*)PyArray_DATA(py_s2p_map);
band_indices = convert_to_iarray(py_band_indices);
temperatures = convert_to_darray(py_temperatures);
ppc_get_pp_collision_with_sigma(gamma,
sigma,
sigma_cutoff,
frequencies,
eigenvectors,
triplets,
num_triplets,
triplet_weights,
grid_address,
mesh,
fc3,
is_compact_fc3,
svecs,
svecs_dims,
multi,
masses,
p2s,
s2p,
band_indices,
temperatures,
is_NU,
symmetrize_fc3_q,
cutoff_frequency);
free(band_indices);
band_indices = NULL;
free(temperatures);
temperatures = NULL;
Py_RETURN_NONE;
}
static PyObject * py_get_imag_self_energy_with_g(PyObject *self, PyObject *args)
{
PyArrayObject *py_gamma;
PyArrayObject *py_fc3_normal_squared;
PyArrayObject *py_frequencies;
PyArrayObject *py_triplets;
PyArrayObject *py_triplet_weights;
PyArrayObject *py_g;
PyArrayObject *py_g_zero;
double cutoff_frequency, temperature;
Darray *fc3_normal_squared;
double *gamma;
double *g;
char* g_zero;
double *frequencies;
size_t (*triplets)[3];
int *triplet_weights;
if (!PyArg_ParseTuple(args, "OOOOOdOOd",
&py_gamma,
&py_fc3_normal_squared,
&py_triplets,
&py_triplet_weights,
&py_frequencies,
&temperature,
&py_g,
&py_g_zero,
&cutoff_frequency)) {
return NULL;
}
fc3_normal_squared = convert_to_darray(py_fc3_normal_squared);
gamma = (double*)PyArray_DATA(py_gamma);
g = (double*)PyArray_DATA(py_g);
g_zero = (char*)PyArray_DATA(py_g_zero);
frequencies = (double*)PyArray_DATA(py_frequencies);
triplets = (size_t(*)[3])PyArray_DATA(py_triplets);
triplet_weights = (int*)PyArray_DATA(py_triplet_weights);
ise_get_imag_self_energy_at_bands_with_g(gamma,
fc3_normal_squared,
frequencies,
triplets,
triplet_weights,
g,
g_zero,
temperature,
cutoff_frequency);
free(fc3_normal_squared);
fc3_normal_squared = NULL;
Py_RETURN_NONE;
}
static PyObject *
py_get_detailed_imag_self_energy_with_g(PyObject *self, PyObject *args)
{
PyArrayObject *py_gamma_detail;
PyArrayObject *py_gamma_N;
PyArrayObject *py_gamma_U;
PyArrayObject *py_fc3_normal_squared;
PyArrayObject *py_frequencies;
PyArrayObject *py_triplets;
PyArrayObject *py_triplet_weights;
PyArrayObject *py_grid_address;
PyArrayObject *py_g;
PyArrayObject *py_g_zero;
double cutoff_frequency, temperature;
Darray *fc3_normal_squared;
double *gamma_detail;
double *gamma_N;
double *gamma_U;
double *g;
char* g_zero;
double *frequencies;
size_t (*triplets)[3];
int *triplet_weights;
int *grid_address;
if (!PyArg_ParseTuple(args, "OOOOOOOOdOOd",
&py_gamma_detail,
&py_gamma_N,
&py_gamma_U,
&py_fc3_normal_squared,
&py_triplets,
&py_triplet_weights,
&py_grid_address,
&py_frequencies,
&temperature,
&py_g,
&py_g_zero,
&cutoff_frequency)) {
return NULL;
}
fc3_normal_squared = convert_to_darray(py_fc3_normal_squared);
gamma_detail = (double*)PyArray_DATA(py_gamma_detail);
gamma_N = (double*)PyArray_DATA(py_gamma_N);
gamma_U = (double*)PyArray_DATA(py_gamma_U);
g = (double*)PyArray_DATA(py_g);
g_zero = (char*)PyArray_DATA(py_g_zero);
frequencies = (double*)PyArray_DATA(py_frequencies);
triplets = (size_t(*)[3])PyArray_DATA(py_triplets);
triplet_weights = (int*)PyArray_DATA(py_triplet_weights);
grid_address = (int*)PyArray_DATA(py_grid_address);
ise_get_detailed_imag_self_energy_at_bands_with_g(gamma_detail,
gamma_N,
gamma_U,
fc3_normal_squared,
frequencies,
triplets,
triplet_weights,
grid_address,
g,
g_zero,
temperature,
cutoff_frequency);
free(fc3_normal_squared);
fc3_normal_squared = NULL;
Py_RETURN_NONE;
}
static PyObject * py_get_frequency_shift_at_bands(PyObject *self,
PyObject *args)
{
PyArrayObject *py_shift;
PyArrayObject *py_fc3_normal_squared;
PyArrayObject *py_frequencies;
PyArrayObject *py_triplets;
PyArrayObject *py_triplet_weights;
PyArrayObject *py_band_indices;
double epsilon, unit_conversion_factor, cutoff_frequency, temperature;
Darray *fc3_normal_squared;
double *shift;
double *frequencies;
int *band_indices;
int *grid_point_triplets;
int *triplet_weights;
if (!PyArg_ParseTuple(args, "OOOOOOdddd",
&py_shift,
&py_fc3_normal_squared,
&py_triplets,
&py_triplet_weights,
&py_frequencies,
&py_band_indices,
&temperature,
&epsilon,
&unit_conversion_factor,
&cutoff_frequency)) {
return NULL;
}
fc3_normal_squared = convert_to_darray(py_fc3_normal_squared);
shift = (double*)PyArray_DATA(py_shift);
frequencies = (double*)PyArray_DATA(py_frequencies);
band_indices = (int*)PyArray_DATA(py_band_indices);
grid_point_triplets = (int*)PyArray_DATA(py_triplets);
triplet_weights = (int*)PyArray_DATA(py_triplet_weights);
get_frequency_shift_at_bands(shift,
fc3_normal_squared,
band_indices,
frequencies,
grid_point_triplets,
triplet_weights,
epsilon,
temperature,
unit_conversion_factor,
cutoff_frequency);
free(fc3_normal_squared);
fc3_normal_squared = NULL;
Py_RETURN_NONE;
}
static PyObject * py_get_collision_matrix(PyObject *self, PyObject *args)
{
PyArrayObject *py_collision_matrix;
PyArrayObject *py_fc3_normal_squared;
PyArrayObject *py_frequencies;
PyArrayObject *py_triplets;
PyArrayObject *py_triplets_map;
PyArrayObject *py_map_q;
PyArrayObject *py_g;
PyArrayObject *py_rotated_grid_points;
PyArrayObject *py_rotations_cartesian;
double temperature, unit_conversion_factor, cutoff_frequency;
Darray *fc3_normal_squared;
double *collision_matrix;
double *g;
double *frequencies;
size_t (*triplets)[3];
size_t *triplets_map;
size_t *map_q;
size_t *rotated_grid_points;
npy_intp num_gp, num_ir_gp, num_rot;
double *rotations_cartesian;
if (!PyArg_ParseTuple(args, "OOOOOOOOOddd",
&py_collision_matrix,
&py_fc3_normal_squared,
&py_frequencies,
&py_g,
&py_triplets,
&py_triplets_map,
&py_map_q,
&py_rotated_grid_points,
&py_rotations_cartesian,
&temperature,
&unit_conversion_factor,
&cutoff_frequency)) {
return NULL;
}
fc3_normal_squared = convert_to_darray(py_fc3_normal_squared);
collision_matrix = (double*)PyArray_DATA(py_collision_matrix);
g = (double*)PyArray_DATA(py_g);
frequencies = (double*)PyArray_DATA(py_frequencies);
triplets = (size_t(*)[3])PyArray_DATA(py_triplets);
triplets_map = (size_t*)PyArray_DATA(py_triplets_map);
num_gp = PyArray_DIMS(py_triplets_map)[0];
map_q = (size_t*)PyArray_DATA(py_map_q);
rotated_grid_points = (size_t*)PyArray_DATA(py_rotated_grid_points);
num_ir_gp = PyArray_DIMS(py_rotated_grid_points)[0];
num_rot = PyArray_DIMS(py_rotated_grid_points)[1];
rotations_cartesian = (double*)PyArray_DATA(py_rotations_cartesian);
assert(num_rot == PyArray_DIMS(py_rotations_cartesian)[0]);
assert(num_gp == PyArray_DIMS(py_frequencies)[0]);
col_get_collision_matrix(collision_matrix,
fc3_normal_squared,
frequencies,
triplets,
triplets_map,
map_q,
rotated_grid_points,
rotations_cartesian,
g,
num_ir_gp,
num_gp,
num_rot,
temperature,
unit_conversion_factor,
cutoff_frequency);
free(fc3_normal_squared);
fc3_normal_squared = NULL;
Py_RETURN_NONE;
}
static PyObject * py_get_reducible_collision_matrix(PyObject *self, PyObject *args)
{
PyArrayObject *py_collision_matrix;
PyArrayObject *py_fc3_normal_squared;
PyArrayObject *py_frequencies;
PyArrayObject *py_triplets;
PyArrayObject *py_triplets_map;
PyArrayObject *py_map_q;
PyArrayObject *py_g;
double temperature, unit_conversion_factor, cutoff_frequency;
Darray *fc3_normal_squared;
double *collision_matrix;
double *g;
double *frequencies;
size_t (*triplets)[3];
size_t *triplets_map;
npy_intp num_gp;
size_t *map_q;
if (!PyArg_ParseTuple(args, "OOOOOOOddd",
&py_collision_matrix,
&py_fc3_normal_squared,
&py_frequencies,
&py_g,
&py_triplets,
&py_triplets_map,
&py_map_q,
&temperature,
&unit_conversion_factor,
&cutoff_frequency)) {
return NULL;
}
fc3_normal_squared = convert_to_darray(py_fc3_normal_squared);
collision_matrix = (double*)PyArray_DATA(py_collision_matrix);
g = (double*)PyArray_DATA(py_g);
frequencies = (double*)PyArray_DATA(py_frequencies);
triplets = (size_t(*)[3])PyArray_DATA(py_triplets);
triplets_map = (size_t*)PyArray_DATA(py_triplets_map);
num_gp = PyArray_DIMS(py_triplets_map)[0];
map_q = (size_t*)PyArray_DATA(py_map_q);
col_get_reducible_collision_matrix(collision_matrix,
fc3_normal_squared,
frequencies,
triplets,
triplets_map,
map_q,
g,
num_gp,
temperature,
unit_conversion_factor,
cutoff_frequency);
free(fc3_normal_squared);
fc3_normal_squared = NULL;
Py_RETURN_NONE;
}
static PyObject * py_symmetrize_collision_matrix(PyObject *self, PyObject *args)
{
PyArrayObject *py_collision_matrix;
double *collision_matrix;
size_t i, j, k, l;
npy_intp num_band, num_grid_points, num_temp, num_sigma;
size_t adrs_shift, num_column;
double val;
if (!PyArg_ParseTuple(args, "O",
&py_collision_matrix)) {
return NULL;
}
collision_matrix = (double*)PyArray_DATA(py_collision_matrix);
num_sigma = PyArray_DIMS(py_collision_matrix)[0];
num_temp = PyArray_DIMS(py_collision_matrix)[1];
num_grid_points = PyArray_DIMS(py_collision_matrix)[2];
num_band = PyArray_DIMS(py_collision_matrix)[3];
if (PyArray_NDIM(py_collision_matrix) == 8) {
num_column = num_grid_points * num_band * 3;
} else {
num_column = num_grid_points * num_band;
}
for (i = 0; i < num_sigma; i++) {
for (j = 0; j < num_temp; j++) {
adrs_shift = (i * num_column * num_column * num_temp +
j * num_column * num_column);
/* show_colmat_info(py_collision_matrix, i, j, adrs_shift); */
#pragma omp parallel for schedule(guided) private(l, val)
for (k = 0; k < num_column; k++) {
for (l = k + 1; l < num_column; l++) {
val = (collision_matrix[adrs_shift + k * num_column + l] +
collision_matrix[adrs_shift + l * num_column + k]) / 2;
collision_matrix[adrs_shift + k * num_column + l] = val;
collision_matrix[adrs_shift + l * num_column + k] = val;
}
}
}
}
Py_RETURN_NONE;
}
static PyObject * py_expand_collision_matrix(PyObject *self, PyObject *args)
{
PyArrayObject *py_collision_matrix;
PyArrayObject *py_ir_grid_points;
PyArrayObject *py_rot_grid_points;
double *collision_matrix;
size_t *rot_grid_points;
size_t *ir_grid_points;
size_t i, j, k, l, m, n, p;
size_t adrs_shift, adrs_shift_plus, num_column, ir_gp, num_bgb, gp_r;
npy_intp num_band, num_grid_points, num_temp, num_sigma, num_rot, num_ir_gp;
size_t *multi;
double *colmat_copy;
if (!PyArg_ParseTuple(args, "OOO",
&py_collision_matrix,
&py_ir_grid_points,
&py_rot_grid_points)) {
return NULL;
}
collision_matrix = (double*)PyArray_DATA(py_collision_matrix);
rot_grid_points = (size_t*)PyArray_DATA(py_rot_grid_points);
ir_grid_points = (size_t*)PyArray_DATA(py_ir_grid_points);
num_sigma = PyArray_DIMS(py_collision_matrix)[0];
num_temp = PyArray_DIMS(py_collision_matrix)[1];
num_grid_points = PyArray_DIMS(py_collision_matrix)[2];
num_band = PyArray_DIMS(py_collision_matrix)[3];
num_rot = PyArray_DIMS(py_rot_grid_points)[0];
num_ir_gp = PyArray_DIMS(py_ir_grid_points)[0];
num_column = num_grid_points * num_band;
num_bgb = num_band * num_grid_points * num_band;
assert(num_grid_points == PyArray_DIMS(py_rot_grid_points)[1]);
multi = (size_t*)malloc(sizeof(size_t) * num_ir_gp);
colmat_copy = NULL;
#pragma omp parallel for schedule(guided) private(j, ir_gp)
for (i = 0; i < num_ir_gp; i++) {
ir_gp = ir_grid_points[i];
multi[i] = 0;
for (j = 0; j < num_rot; j++) {
if (rot_grid_points[j * num_grid_points + ir_gp] == ir_gp) {
multi[i]++;
}
}
}
for (i = 0; i < num_sigma; i++) {
for (j = 0; j < num_temp; j++) {
adrs_shift = (i * num_column * num_column * num_temp +
j * num_column * num_column);
#pragma omp parallel for private(ir_gp, adrs_shift_plus, colmat_copy, l, gp_r, m, n, p)
for (k = 0; k < num_ir_gp; k++) {
ir_gp = ir_grid_points[k];
adrs_shift_plus = adrs_shift + ir_gp * num_bgb;
colmat_copy = (double*)malloc(sizeof(double) * num_bgb);
for (l = 0; l < num_bgb; l++) {
colmat_copy[l] = collision_matrix[adrs_shift_plus + l] / multi[k];
collision_matrix[adrs_shift_plus + l] = 0;
}
for (l = 0; l < num_rot; l++) {
gp_r = rot_grid_points[l * num_grid_points + ir_gp];
for (m = 0; m < num_band; m++) {
for (n = 0; n < num_grid_points; n++) {
for (p = 0; p < num_band; p++) {
collision_matrix[
adrs_shift + gp_r * num_bgb + m * num_grid_points * num_band
+ rot_grid_points[l * num_grid_points + n] * num_band + p] +=
colmat_copy[m * num_grid_points * num_band + n * num_band + p];
}
}
}
}
free(colmat_copy);
colmat_copy = NULL;
}
}
}
free(multi);
multi = NULL;
Py_RETURN_NONE;
}
static PyObject * py_get_isotope_strength(PyObject *self, PyObject *args)
{
PyArrayObject *py_gamma;
PyArrayObject *py_frequencies;
PyArrayObject *py_eigenvectors;
PyArrayObject *py_band_indices;
PyArrayObject *py_mass_variances;
long grid_point;
long num_grid_points;
double cutoff_frequency;
double sigma;
double *gamma;
double *frequencies;
lapack_complex_double *eigenvectors;
int *band_indices;
double *mass_variances;
npy_intp num_band, num_band0;
if (!PyArg_ParseTuple(args, "OlOOOOldd",
&py_gamma,
&grid_point,
&py_mass_variances,
&py_frequencies,
&py_eigenvectors,
&py_band_indices,
&num_grid_points,
&sigma,
&cutoff_frequency)) {
return NULL;
}
gamma = (double*)PyArray_DATA(py_gamma);
frequencies = (double*)PyArray_DATA(py_frequencies);
eigenvectors = (lapack_complex_double*)PyArray_DATA(py_eigenvectors);
band_indices = (int*)PyArray_DATA(py_band_indices);
mass_variances = (double*)PyArray_DATA(py_mass_variances);
num_band = PyArray_DIMS(py_frequencies)[1];
num_band0 = PyArray_DIMS(py_band_indices)[0];
/* int i, j, k; */
/* double f, f0; */
/* int *weights, *ir_grid_points; */
/* double *integration_weights; */
/* ir_grid_points = (int*)malloc(sizeof(int) * num_grid_points); */
/* weights = (int*)malloc(sizeof(int) * num_grid_points); */
/* integration_weights = (double*)malloc(sizeof(double) * */
/* num_grid_points * num_band0 * num_band); */
/* for (i = 0; i < num_grid_points; i++) { */
/* ir_grid_points[i] = i; */
/* weights[i] = 1; */
/* for (j = 0; j < num_band0; j++) { */
/* f0 = frequencies[grid_point * num_band + band_indices[j]]; */
/* for (k = 0; k < num_band; k++) { */
/* f = frequencies[i * num_band + k]; */
/* integration_weights[i * num_band0 * num_band + */
/* j * num_band + k] = gaussian(f - f0, sigma); */
/* } */
/* } */
/* } */
/* get_thm_isotope_scattering_strength(gamma, */
/* grid_point, */
/* ir_grid_points, */
/* weights, */
/* mass_variances, */
/* frequencies, */
/* eigenvectors, */
/* num_grid_points, */
/* band_indices, */
/* num_band, */
/* num_band0, */
/* integration_weights, */
/* cutoff_frequency); */
/* free(ir_grid_points); */
/* free(weights); */
/* free(integration_weights); */
iso_get_isotope_scattering_strength(gamma,
grid_point,
mass_variances,
frequencies,
eigenvectors,
num_grid_points,
band_indices,
num_band,
num_band0,
sigma,
cutoff_frequency);
Py_RETURN_NONE;
}
static PyObject * py_get_thm_isotope_strength(PyObject *self, PyObject *args)
{
PyArrayObject *py_gamma;
PyArrayObject *py_frequencies;
PyArrayObject *py_eigenvectors;
PyArrayObject *py_band_indices;
PyArrayObject *py_mass_variances;
PyArrayObject *py_ir_grid_points;
PyArrayObject *py_weights;
PyArrayObject *py_integration_weights;
long grid_point;
double cutoff_frequency;
double *gamma;
double *frequencies;
size_t *ir_grid_points;
int *weights;
lapack_complex_double *eigenvectors;
int *band_indices;
double *mass_variances;
npy_intp num_band, num_band0, num_ir_grid_points;
double *integration_weights;
if (!PyArg_ParseTuple(args, "OlOOOOOOOd",
&py_gamma,
&grid_point,
&py_ir_grid_points,
&py_weights,
&py_mass_variances,
&py_frequencies,
&py_eigenvectors,
&py_band_indices,
&py_integration_weights,
&cutoff_frequency)) {
return NULL;
}
gamma = (double*)PyArray_DATA(py_gamma);
frequencies = (double*)PyArray_DATA(py_frequencies);
ir_grid_points = (size_t*)PyArray_DATA(py_ir_grid_points);
weights = (int*)PyArray_DATA(py_weights);
eigenvectors = (lapack_complex_double*)PyArray_DATA(py_eigenvectors);
band_indices = (int*)PyArray_DATA(py_band_indices);
mass_variances = (double*)PyArray_DATA(py_mass_variances);
num_band = PyArray_DIMS(py_frequencies)[1];
num_band0 = PyArray_DIMS(py_band_indices)[0];
integration_weights = (double*)PyArray_DATA(py_integration_weights);
num_ir_grid_points = PyArray_DIMS(py_ir_grid_points)[0];
iso_get_thm_isotope_scattering_strength(gamma,
grid_point,
ir_grid_points,
weights,
mass_variances,
frequencies,
eigenvectors,
num_ir_grid_points,
band_indices,
num_band,
num_band0,
integration_weights,
cutoff_frequency);
Py_RETURN_NONE;
}
static PyObject * py_distribute_fc3(PyObject *self, PyObject *args)
{
PyArrayObject *force_constants_third;
int target;
int source;
PyArrayObject *rotation_cart_inv;
PyArrayObject *atom_mapping_py;
double *fc3;
double *rot_cart_inv;
int *atom_mapping;
npy_intp num_atom;
if (!PyArg_ParseTuple(args, "OiiOO",
&force_constants_third,
&target,
&source,
&atom_mapping_py,
&rotation_cart_inv)) {
return NULL;
}
fc3 = (double*)PyArray_DATA(force_constants_third);
rot_cart_inv = (double*)PyArray_DATA(rotation_cart_inv);
atom_mapping = (int*)PyArray_DATA(atom_mapping_py);
num_atom = PyArray_DIMS(atom_mapping_py)[0];
fc3_distribute_fc3(fc3,
target,
source,
atom_mapping,
num_atom,
rot_cart_inv);
Py_RETURN_NONE;
}
static PyObject * py_rotate_delta_fc2s(PyObject *self, PyObject *args)
{
PyArrayObject *py_fc3;
PyArrayObject *py_delta_fc2s;
PyArrayObject *py_inv_U;
PyArrayObject *py_site_sym_cart;
PyArrayObject *py_rot_map_syms;
double (*fc3)[3][3][3];
double (*delta_fc2s)[3][3];
double *inv_U;
double (*site_sym_cart)[3][3];
int *rot_map_syms;
npy_intp num_atom, num_disp, num_site_sym;
if (!PyArg_ParseTuple(args, "OOOOO",
&py_fc3,
&py_delta_fc2s,
&py_inv_U,
&py_site_sym_cart,
&py_rot_map_syms)) {
return NULL;
}
/* (num_atom, num_atom, 3, 3, 3) */
fc3 = (double(*)[3][3][3])PyArray_DATA(py_fc3);
/* (n_u1, num_atom, num_atom, 3, 3) */
delta_fc2s = (double(*)[3][3])PyArray_DATA(py_delta_fc2s);
/* (3, n_u1 * n_sym) */
inv_U = (double*)PyArray_DATA(py_inv_U);
/* (n_sym, 3, 3) */
site_sym_cart = (double(*)[3][3])PyArray_DATA(py_site_sym_cart);
/* (n_sym, natom) */
rot_map_syms = (int*)PyArray_DATA(py_rot_map_syms);
num_atom = PyArray_DIMS(py_fc3)[0];
num_disp = PyArray_DIMS(py_delta_fc2s)[0];
num_site_sym = PyArray_DIMS(py_site_sym_cart)[0];
fc3_rotate_delta_fc2(fc3,
delta_fc2s,
inv_U,
site_sym_cart,
rot_map_syms,
num_atom,
num_site_sym,
num_disp);
Py_RETURN_NONE;
}
static PyObject *
py_set_permutation_symmetry_fc3(PyObject *self, PyObject *args)
{
PyArrayObject *py_fc3;
double *fc3;
npy_intp num_atom;
if (!PyArg_ParseTuple(args, "O", &py_fc3)) {
return NULL;
}
fc3 = (double*)PyArray_DATA(py_fc3);
num_atom = PyArray_DIMS(py_fc3)[0];
fc3_set_permutation_symmetry_fc3(fc3, num_atom);
Py_RETURN_NONE;
}
static PyObject *
py_set_permutation_symmetry_compact_fc3(PyObject *self, PyObject *args)
{
PyArrayObject* py_fc3;
PyArrayObject* py_permutations;
PyArrayObject* py_s2pp_map;
PyArrayObject* py_p2s_map;
PyArrayObject* py_nsym_list;
double *fc3;
int *s2pp;
int *p2s;
int *nsym_list;
int *perms;
npy_intp n_patom, n_satom;
if (!PyArg_ParseTuple(args, "OOOOO",
&py_fc3,
&py_permutations,
&py_s2pp_map,
&py_p2s_map,
&py_nsym_list)) {
return NULL;
}
fc3 = (double*)PyArray_DATA(py_fc3);
perms = (int*)PyArray_DATA(py_permutations);
s2pp = (int*)PyArray_DATA(py_s2pp_map);
p2s = (int*)PyArray_DATA(py_p2s_map);
nsym_list = (int*)PyArray_DATA(py_nsym_list);
n_patom = PyArray_DIMS(py_fc3)[0];
n_satom = PyArray_DIMS(py_fc3)[1];
fc3_set_permutation_symmetry_compact_fc3(fc3,
p2s,
s2pp,
nsym_list,
perms,
n_satom,
n_patom);
Py_RETURN_NONE;
}
static PyObject * py_transpose_compact_fc3(PyObject *self, PyObject *args)
{
PyArrayObject* py_fc3;
PyArrayObject* py_permutations;
PyArrayObject* py_s2pp_map;
PyArrayObject* py_p2s_map;
PyArrayObject* py_nsym_list;
int t_type;
double *fc3;
int *s2pp;
int *p2s;
int *nsym_list;
int *perms;
npy_intp n_patom, n_satom;
if (!PyArg_ParseTuple(args, "OOOOOi",
&py_fc3,
&py_permutations,
&py_s2pp_map,
&py_p2s_map,
&py_nsym_list,
&t_type)) {
return NULL;
}
fc3 = (double*)PyArray_DATA(py_fc3);
perms = (int*)PyArray_DATA(py_permutations);
s2pp = (int*)PyArray_DATA(py_s2pp_map);
p2s = (int*)PyArray_DATA(py_p2s_map);
nsym_list = (int*)PyArray_DATA(py_nsym_list);
n_patom = PyArray_DIMS(py_fc3)[0];
n_satom = PyArray_DIMS(py_fc3)[1];
fc3_transpose_compact_fc3(fc3,
p2s,
s2pp,
nsym_list,
perms,
n_satom,
n_patom,
t_type);
Py_RETURN_NONE;
}
static PyObject * py_get_neighboring_gird_points(PyObject *self, PyObject *args)
{
PyArrayObject *py_relative_grid_points;
PyArrayObject *py_grid_points;
PyArrayObject *py_relative_grid_address;
PyArrayObject *py_mesh;
PyArrayObject *py_bz_grid_address;
PyArrayObject *py_bz_map;
size_t *relative_grid_points;
size_t *grid_points;
npy_intp num_grid_points, num_relative_grid_address;
int (*relative_grid_address)[3];
int *mesh;
int (*bz_grid_address)[3];
size_t *bz_map;
size_t i;
if (!PyArg_ParseTuple(args, "OOOOOO",
&py_relative_grid_points,
&py_grid_points,
&py_relative_grid_address,
&py_mesh,
&py_bz_grid_address,
&py_bz_map)) {
return NULL;
}
relative_grid_points = (size_t*)PyArray_DATA(py_relative_grid_points);
grid_points = (size_t*)PyArray_DATA(py_grid_points);
num_grid_points = PyArray_DIMS(py_grid_points)[0];
relative_grid_address = (int(*)[3])PyArray_DATA(py_relative_grid_address);
num_relative_grid_address = PyArray_DIMS(py_relative_grid_address)[0];
mesh = (int*)PyArray_DATA(py_mesh);
bz_grid_address = (int(*)[3])PyArray_DATA(py_bz_grid_address);
bz_map = (size_t*)PyArray_DATA(py_bz_map);
#pragma omp parallel for
for (i = 0; i < num_grid_points; i++) {
thm_get_dense_neighboring_grid_points
(relative_grid_points + i * num_relative_grid_address,
grid_points[i],
relative_grid_address,
num_relative_grid_address,
mesh,
bz_grid_address,
bz_map);
}
Py_RETURN_NONE;
}
static PyObject * py_set_integration_weights(PyObject *self, PyObject *args)
{
PyArrayObject *py_iw;
PyArrayObject *py_frequency_points;
PyArrayObject *py_relative_grid_address;
PyArrayObject *py_mesh;
PyArrayObject *py_grid_points;
PyArrayObject *py_frequencies;
PyArrayObject *py_bz_grid_address;
PyArrayObject *py_bz_map;
double *iw;
double *frequency_points;
npy_intp num_band0, num_band, num_gp;
size_t i, j, k, bi;
int (*relative_grid_address)[4][3];
int *mesh;
size_t *grid_points;
int (*bz_grid_address)[3];
size_t *bz_map;
double *frequencies;
size_t vertices[24][4];
double freq_vertices[24][4];
if (!PyArg_ParseTuple(args, "OOOOOOOO",
&py_iw,
&py_frequency_points,
&py_relative_grid_address,
&py_mesh,
&py_grid_points,
&py_frequencies,
&py_bz_grid_address,
&py_bz_map)) {
return NULL;
}
iw = (double*)PyArray_DATA(py_iw);
frequency_points = (double*)PyArray_DATA(py_frequency_points);
num_band0 = PyArray_DIMS(py_frequency_points)[0];
relative_grid_address = (int(*)[4][3])PyArray_DATA(py_relative_grid_address);
mesh = (int*)PyArray_DATA(py_mesh);
grid_points = (size_t*)PyArray_DATA(py_grid_points);
num_gp = PyArray_DIMS(py_grid_points)[0];
bz_grid_address = (int(*)[3])PyArray_DATA(py_bz_grid_address);
bz_map = (size_t*)PyArray_DATA(py_bz_map);
frequencies = (double*)PyArray_DATA(py_frequencies);
num_band = PyArray_DIMS(py_frequencies)[1];
#pragma omp parallel for private(j, k, bi, vertices, freq_vertices)
for (i = 0; i < num_gp; i++) {
for (j = 0; j < 24; j++) {
thm_get_dense_neighboring_grid_points(vertices[j],
grid_points[i],
relative_grid_address[j],
4,
mesh,
bz_grid_address,
bz_map);
}
for (bi = 0; bi < num_band; bi++) {
for (j = 0; j < 24; j++) {
for (k = 0; k < 4; k++) {
freq_vertices[j][k] = frequencies[vertices[j][k] * num_band + bi];
}
}
for (j = 0; j < num_band0; j++) {
iw[i * num_band0 * num_band + j * num_band + bi] =
thm_get_integration_weight(frequency_points[j], freq_vertices, 'I');
}
}
}
Py_RETURN_NONE;
}
static PyObject *
py_tpl_get_triplets_reciprocal_mesh_at_q(PyObject *self, PyObject *args)
{
PyArrayObject *py_map_triplets;
PyArrayObject *py_grid_address;
PyArrayObject *py_map_q;
PyArrayObject *py_mesh;
PyArrayObject *py_rotations;
long fixed_grid_number;
int is_time_reversal;
int swappable;
int (*grid_address)[3];
size_t *map_triplets;
size_t *map_q;
int *mesh;
int (*rot)[3][3];
npy_intp num_rot;
size_t num_ir;
if (!PyArg_ParseTuple(args, "OOOlOiOi",
&py_map_triplets,
&py_map_q,
&py_grid_address,
&fixed_grid_number,
&py_mesh,
&is_time_reversal,
&py_rotations,
&swappable)) {
return NULL;
}
grid_address = (int(*)[3])PyArray_DATA(py_grid_address);
map_triplets = (size_t*)PyArray_DATA(py_map_triplets);
map_q = (size_t*)PyArray_DATA(py_map_q);
mesh = (int*)PyArray_DATA(py_mesh);
rot = (int(*)[3][3])PyArray_DATA(py_rotations);
num_rot = PyArray_DIMS(py_rotations)[0];
num_ir = tpl_get_triplets_reciprocal_mesh_at_q(map_triplets,
map_q,
grid_address,
fixed_grid_number,
mesh,
is_time_reversal,
num_rot,
rot,
swappable);
return PyLong_FromSize_t(num_ir);
}
static PyObject * py_tpl_get_BZ_triplets_at_q(PyObject *self, PyObject *args)
{
PyArrayObject *py_triplets;
PyArrayObject *py_bz_grid_address;
PyArrayObject *py_bz_map;
PyArrayObject *py_map_triplets;
PyArrayObject *py_mesh;
long grid_point;
size_t (*triplets)[3];
int (*bz_grid_address)[3];
size_t *bz_map;
size_t *map_triplets;
npy_intp num_map_triplets;
int *mesh;
size_t num_ir;
if (!PyArg_ParseTuple(args, "OlOOOO",
&py_triplets,
&grid_point,
&py_bz_grid_address,
&py_bz_map,
&py_map_triplets,
&py_mesh)) {
return NULL;
}
triplets = (size_t(*)[3])PyArray_DATA(py_triplets);
bz_grid_address = (int(*)[3])PyArray_DATA(py_bz_grid_address);
bz_map = (size_t*)PyArray_DATA(py_bz_map);
map_triplets = (size_t*)PyArray_DATA(py_map_triplets);
num_map_triplets = PyArray_DIMS(py_map_triplets)[0];
mesh = (int*)PyArray_DATA(py_mesh);
num_ir = tpl_get_BZ_triplets_at_q(triplets,
grid_point,
bz_grid_address,
bz_map,
map_triplets,
num_map_triplets,
mesh);
return PyLong_FromSize_t(num_ir);
}
static PyObject *
py_set_triplets_integration_weights(PyObject *self, PyObject *args)
{
PyArrayObject *py_iw;
PyArrayObject *py_iw_zero;
PyArrayObject *py_frequency_points;
PyArrayObject *py_relative_grid_address;
PyArrayObject *py_mesh;
PyArrayObject *py_triplets;
PyArrayObject *py_frequencies1;
PyArrayObject *py_frequencies2;
PyArrayObject *py_bz_grid_address;
PyArrayObject *py_bz_map;
int tp_type;
double *iw;
char *iw_zero;
double *frequency_points;
int (*relative_grid_address)[4][3];
int *mesh;
size_t (*triplets)[3];
int (*bz_grid_address)[3];
size_t *bz_map;
double *frequencies1, *frequencies2;
npy_intp num_band0, num_band1, num_band2, num_iw, num_triplets;
if (!PyArg_ParseTuple(args, "OOOOOOOOOOi",
&py_iw,
&py_iw_zero,
&py_frequency_points,
&py_relative_grid_address,
&py_mesh,
&py_triplets,
&py_frequencies1,
&py_frequencies2,
&py_bz_grid_address,
&py_bz_map,
&tp_type)) {
return NULL;
}
iw = (double*)PyArray_DATA(py_iw);
iw_zero = (char*)PyArray_DATA(py_iw_zero);
frequency_points = (double*)PyArray_DATA(py_frequency_points);
num_band0 = PyArray_DIMS(py_frequency_points)[0];
relative_grid_address = (int(*)[4][3])PyArray_DATA(py_relative_grid_address);
mesh = (int*)PyArray_DATA(py_mesh);
triplets = (size_t(*)[3])PyArray_DATA(py_triplets);
num_triplets = PyArray_DIMS(py_triplets)[0];
bz_grid_address = (int(*)[3])PyArray_DATA(py_bz_grid_address);
bz_map = (size_t*)PyArray_DATA(py_bz_map);
frequencies1 = (double*)PyArray_DATA(py_frequencies1);
frequencies2 = (double*)PyArray_DATA(py_frequencies2);
num_band1 = PyArray_DIMS(py_frequencies1)[1];
num_band2 = PyArray_DIMS(py_frequencies2)[1];
tpl_get_integration_weight(iw,
iw_zero,
frequency_points,
num_band0,
relative_grid_address,
mesh,
triplets,
num_triplets,
bz_grid_address,
bz_map,
frequencies1,
num_band1,
frequencies2,
num_band2,
tp_type,
1,
0);
Py_RETURN_NONE;
}
static PyObject *
py_set_triplets_integration_weights_with_sigma(PyObject *self, PyObject *args)
{
PyArrayObject *py_iw;
PyArrayObject *py_iw_zero;
PyArrayObject *py_frequency_points;
PyArrayObject *py_triplets;
PyArrayObject *py_frequencies;
double sigma, sigma_cutoff;
double *iw;
char *iw_zero;
double *frequency_points;
size_t (*triplets)[3];
double *frequencies;
npy_intp num_band0, num_band, num_iw, num_triplets;
if (!PyArg_ParseTuple(args, "OOOOOdd",
&py_iw,
&py_iw_zero,
&py_frequency_points,
&py_triplets,
&py_frequencies,
&sigma,
&sigma_cutoff)) {
return NULL;
}
iw = (double*)PyArray_DATA(py_iw);
iw_zero = (char*)PyArray_DATA(py_iw_zero);
frequency_points = (double*)PyArray_DATA(py_frequency_points);
num_band0 = PyArray_DIMS(py_frequency_points)[0];
triplets = (size_t(*)[3])PyArray_DATA(py_triplets);
num_triplets = PyArray_DIMS(py_triplets)[0];
frequencies = (double*)PyArray_DATA(py_frequencies);
num_band = PyArray_DIMS(py_frequencies)[1];
num_iw = PyArray_DIMS(py_iw)[0];
tpl_get_integration_weight_with_sigma(iw,
iw_zero,
sigma,
sigma_cutoff,
frequency_points,
num_band0,
triplets,
num_triplets,
frequencies,
num_band,
num_iw);
Py_RETURN_NONE;
}
#ifdef LIBFLAME
static PyObject * py_inverse_collision_matrix_libflame(PyObject *self, PyObject *args)
{
PyArrayObject *py_collision_matrix;
PyArrayObject *py_eigenvalues;
int i_sigma, i_temp;
double cutoff;
double *collision_matrix;
double *eigvals;
npy_intp num_temp, num_ir_grid_points, num_band;
size_t num_column, adrs_shift;
if (!PyArg_ParseTuple(args, "OOiid",
&py_collision_matrix,
&py_eigenvalues,
&i_sigma,
&i_temp,
&cutoff)) {
return NULL;
}
collision_matrix = (double*)PyArray_DATA(py_collision_matrix);
eigvals = (double*)PyArray_DATA(py_eigenvalues);
num_temp = PyArray_DIMS(py_collision_matrix)[1];
num_ir_grid_points = PyArray_DIMS(py_collision_matrix)[2];
num_band = PyArray_DIMS(py_collision_matrix)[3];
num_column = num_ir_grid_points * num_band * 3;
adrs_shift = (i_sigma * num_column * num_column * num_temp +
i_temp * num_column * num_column);
phonopy_pinv_libflame(collision_matrix + adrs_shift,
eigvals, num_column, cutoff);
Py_RETURN_NONE;
}
#endif
static PyObject *
py_diagonalize_collision_matrix(PyObject *self, PyObject *args)
{
PyArrayObject *py_collision_matrix;
PyArrayObject *py_eigenvalues;
double cutoff;
int i_sigma, i_temp, is_pinv, solver;
double *collision_matrix;
double *eigvals;
npy_intp num_temp, num_grid_point, num_band;
size_t num_column, adrs_shift;
int info;
if (!PyArg_ParseTuple(args, "OOiidii",
&py_collision_matrix,
&py_eigenvalues,
&i_sigma,
&i_temp,
&cutoff,
&solver,
&is_pinv)) {
return NULL;
}
collision_matrix = (double*)PyArray_DATA(py_collision_matrix);
eigvals = (double*)PyArray_DATA(py_eigenvalues);
if (PyArray_NDIM(py_collision_matrix) == 2) {
num_temp = 1;
num_column = PyArray_DIM(py_collision_matrix, 1);
} else {
num_temp = PyArray_DIM(py_collision_matrix, 1);
num_grid_point = PyArray_DIM(py_collision_matrix, 2);
num_band = PyArray_DIM(py_collision_matrix, 3);
if (PyArray_NDIM(py_collision_matrix) == 8) {
num_column = num_grid_point * num_band * 3;
} else {
num_column = num_grid_point * num_band;
}
}
adrs_shift = (i_sigma * num_column * num_column * num_temp +
i_temp * num_column * num_column);
/* show_colmat_info(py_collision_matrix, i_sigma, i_temp, adrs_shift); */
info = phonopy_dsyev(collision_matrix + adrs_shift,
eigvals, num_column, solver);
if (is_pinv) {
pinv_from_eigensolution(collision_matrix + adrs_shift,
eigvals, num_column, cutoff, 0);
}
return PyLong_FromLong((long) info);
}
static PyObject * py_pinv_from_eigensolution(PyObject *self, PyObject *args)
{
PyArrayObject *py_collision_matrix;
PyArrayObject *py_eigenvalues;
double cutoff;
int i_sigma, i_temp, pinv_method;
double *collision_matrix;
double *eigvals;
npy_intp num_temp, num_grid_point, num_band;
size_t num_column, adrs_shift;
if (!PyArg_ParseTuple(args, "OOiidi",
&py_collision_matrix,
&py_eigenvalues,
&i_sigma,
&i_temp,
&cutoff,
&pinv_method)) {
return NULL;
}
collision_matrix = (double*)PyArray_DATA(py_collision_matrix);
eigvals = (double*)PyArray_DATA(py_eigenvalues);
num_temp = PyArray_DIMS(py_collision_matrix)[1];
num_grid_point = PyArray_DIMS(py_collision_matrix)[2];
num_band = PyArray_DIMS(py_collision_matrix)[3];
if (PyArray_NDIM(py_collision_matrix) == 8) {
num_column = num_grid_point * num_band * 3;
} else {
num_column = num_grid_point * num_band;
}
adrs_shift = (i_sigma * num_column * num_column * num_temp +
i_temp * num_column * num_column);
/* show_colmat_info(py_collision_matrix, i_sigma, i_temp, adrs_shift); */
pinv_from_eigensolution(collision_matrix + adrs_shift,
eigvals, num_column, cutoff, pinv_method);
Py_RETURN_NONE;
}
static PyObject * py_get_default_colmat_solver(PyObject *self, PyObject *args)
{
if (!PyArg_ParseTuple(args, "")) {
return NULL;
}
#ifdef MKL_LAPACKE
return PyLong_FromLong((long) 1);
#else
return PyLong_FromLong((long) 4);
#endif
}
static void pinv_from_eigensolution(double *data,
const double *eigvals,
const size_t size,
const double cutoff,
const int pinv_method)
{
size_t i, ib, j, k, max_l, i_s, j_s;
double *tmp_data;
double e, sum;
size_t *l;
l = NULL;
tmp_data = NULL;
tmp_data = (double*)malloc(sizeof(double) * size * size);
#pragma omp parallel for
for (i = 0; i < size * size; i++) {
tmp_data[i] = data[i];
}
l = (size_t*)malloc(sizeof(size_t) * size);
max_l = 0;
for (i = 0; i < size; i++) {
if (pinv_method == 0) {
e = fabs(eigvals[i]);
} else {
e = eigvals[i];
}
if (e > cutoff) {
l[max_l] = i;
max_l++;
}
}
#pragma omp parallel for private(ib, j, k, i_s, j_s, sum)
for (i = 0; i < size / 2; i++) {
/* from front */
i_s = i * size;
for (j = i; j < size; j++) {
j_s = j * size;
sum = 0;
for (k = 0; k < max_l; k++) {
sum += tmp_data[i_s + l[k]] * tmp_data[j_s + l[k]] / eigvals[l[k]];
}
data[i_s + j] = sum;
data[j_s + i] = sum;
}
/* from back */
ib = size - i - 1;
i_s = ib * size;
for (j = ib; j < size; j++) {
j_s = j * size;
sum = 0;
for (k = 0; k < max_l; k++) {
sum += tmp_data[i_s + l[k]] * tmp_data[j_s + l[k]] / eigvals[l[k]];
}
data[i_s + j] = sum;
data[j_s + ib] = sum;
}
}
/* when size is odd */
if ((size % 2) == 1) {
i = (size - 1) / 2;
i_s = i * size;
for (j = i; j < size; j++) {
j_s = j * size;
sum = 0;
for (k = 0; k < max_l; k++) {
sum += tmp_data[i_s + l[k]] * tmp_data[j_s + l[k]] / eigvals[l[k]];
}
data[i_s + j] = sum;
data[j_s + i] = sum;
}
}
free(l);
l = NULL;
free(tmp_data);
tmp_data = NULL;
}
static void show_colmat_info(const PyArrayObject *py_collision_matrix,
const size_t i_sigma,
const size_t i_temp,
const size_t adrs_shift)
{
int i;
printf(" Array_shape:(");
for (i = 0; i < PyArray_NDIM(py_collision_matrix); i++) {
printf("%d", (int)PyArray_DIM(py_collision_matrix, i));
if (i < PyArray_NDIM(py_collision_matrix) - 1) {
printf(",");
} else {
printf("), ");
}
}
printf("Data shift:%lu [%lu, %lu]\n", adrs_shift, i_sigma, i_temp);
}
|
GB_unop__log1p_fc64_fc64.c | //------------------------------------------------------------------------------
// GB_unop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_atomics.h"
#include "GB_unop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB_unop_apply__log1p_fc64_fc64
// op(A') function: GB_unop_tran__log1p_fc64_fc64
// C type: GxB_FC64_t
// A type: GxB_FC64_t
// cast: GxB_FC64_t cij = aij
// unaryop: cij = GB_clog1p (aij)
#define GB_ATYPE \
GxB_FC64_t
#define GB_CTYPE \
GxB_FC64_t
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
GxB_FC64_t aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = GB_clog1p (x) ;
// casting
#define GB_CAST(z, aij) \
GxB_FC64_t z = aij ;
// cij = op (aij)
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
GxB_FC64_t aij = Ax [pA] ; \
/* Cx [pC] = op (cast (aij)) */ \
GxB_FC64_t z = aij ; \
Cx [pC] = GB_clog1p (z) ; \
}
// true if operator is the identity op with no typecasting
#define GB_OP_IS_IDENTITY_WITH_NO_TYPECAST \
0
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_LOG1P || GxB_NO_FC64)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_unop_apply__log1p_fc64_fc64
(
GxB_FC64_t *Cx, // Cx and Ax may be aliased
const GxB_FC64_t *Ax,
const int8_t *GB_RESTRICT Ab, // A->b if A is bitmap
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
if (Ab == NULL)
{
#if ( GB_OP_IS_IDENTITY_WITH_NO_TYPECAST )
GB_memcpy (Cx, Ax, anz * sizeof (GxB_FC64_t), nthreads) ;
#else
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
GxB_FC64_t aij = Ax [p] ;
GxB_FC64_t z = aij ;
Cx [p] = GB_clog1p (z) ;
}
#endif
}
else
{
// bitmap case, no transpose; A->b already memcpy'd into C->b
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!Ab [p]) continue ;
GxB_FC64_t aij = Ax [p] ;
GxB_FC64_t z = aij ;
Cx [p] = GB_clog1p (z) ;
}
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_unop_tran__log1p_fc64_fc64
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t *GB_RESTRICT *Workspaces,
const int64_t *GB_RESTRICT A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
prand.c | //------------------------------------------------------------------------------
// GraphBLAS/Demo/Source/prand: parallel random number generator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2020, All Rights Reserved.
// http://suitesparse.com See GraphBLAS/Doc/License.txt for license.
//------------------------------------------------------------------------------
// A simple thread-safe parallel pseudo-random nuumber generator.
#include "prand.h"
//------------------------------------------------------------------------------
// prand macros
//------------------------------------------------------------------------------
// Generate the next seed, and extract a random 15-bit value from a seed.
#define PRAND_RECURENCE(seed) ((seed) * 1103515245 + 12345)
#define PRAND_15_MAX 32767
#define PRAND_15(seed) (((seed)/65536) % (PRAND_15_MAX + 1))
//------------------------------------------------------------------------------
// global types and operators
//------------------------------------------------------------------------------
// These can be shared by all threads in a user application, and thus are
// safely declared as global objects.
GrB_Type prand_type = NULL ;
GrB_UnaryOp prand_next_op = NULL ;
GrB_UnaryOp prand_iget_op = NULL ;
GrB_UnaryOp prand_xget_op = NULL ;
GrB_BinaryOp prand_dup_op = NULL ;
//------------------------------------------------------------------------------
// prand_next_op: unary operator to construct the next seed
//------------------------------------------------------------------------------
// z = f(x), where x is the old seed and z is the new seed.
void prand_next_f (prand_t *z, const prand_t *x)
{
for (int k = 0 ; k < 5 ; k++)
{
z->seed [k] = PRAND_RECURENCE (x->seed [k]) ;
}
}
//------------------------------------------------------------------------------
// prand_iget: unary operator to construct get a random integer from the seed
//------------------------------------------------------------------------------
// z = f(x), where x is a random seed, and z is an unsigned 64-bit
// pseudo-random number constructed from the seed.
void prand_iget_f (uint64_t *z, const prand_t *x)
{
uint64_t i = 0 ;
for (int k = 0 ; k < 5 ; k++)
{
i = PRAND_15_MAX * i + PRAND_15 (x->seed [k]) ;
}
(*z) = i ;
}
//------------------------------------------------------------------------------
// prand_xget: unary operator to construct get a random double from the seed
//------------------------------------------------------------------------------
// z = f(x), where x is a random seed, and z is a double precision
// pseudo-random number constructed from the seed, in the range 0 to 1.
void prand_xget_f (double *z, prand_t *x)
{
uint64_t i ;
prand_iget_f (&i, x) ;
(*z) = ((double) i) / ((double) UINT64_MAX) ;
}
//------------------------------------------------------------------------------
// prand_dup: binary operator to build a vector
//------------------------------------------------------------------------------
// This is required by GrB_Vector_build, but is never called since no
// duplicates are created. This is the SECOND operator for the prand_type.
#if defined ( __INTEL_COMPILER )
// disable icc warnings
// 869: unused parameters
#pragma warning (disable: 869 )
#elif defined ( __GNUC__ )
#pragma GCC diagnostic ignored "-Wunused-parameter"
#endif
void prand_dup_f (prand_t *z, /* unused: */ const prand_t *x, const prand_t *y)
{
(*z) = (*y) ;
}
//------------------------------------------------------------------------------
// prand_init: create the random seed type and its operators
//------------------------------------------------------------------------------
#define PRAND_FREE_ALL \
{ \
GrB_Type_free (&prand_type) ; \
GrB_UnaryOp_free (&prand_next_op) ; \
GrB_UnaryOp_free (&prand_iget_op) ; \
GrB_UnaryOp_free (&prand_xget_op) ; \
GrB_BinaryOp_free (&prand_dup_op) ; \
}
#undef OK
#define OK(method) \
{ \
GrB_Info info = method ; \
if (info != GrB_SUCCESS) \
{ \
PRAND_FREE_ALL ; \
printf ("GraphBLAS error:\n%s\n", GrB_error ( )) ; \
return (info) ; \
} \
}
GrB_Info prand_init ( )
{
prand_type = NULL ;
prand_next_op = NULL ;
prand_iget_op = NULL ;
prand_xget_op = NULL ;
prand_dup_op = NULL ;
OK (GrB_Type_new (&prand_type, sizeof (prand_t))) ;
OK (GrB_UnaryOp_new (&prand_next_op, (GxB_unary_function) prand_next_f,
prand_type, prand_type)) ;
OK (GrB_UnaryOp_new (&prand_iget_op, (GxB_unary_function) prand_iget_f,
GrB_UINT64, prand_type)) ;
OK (GrB_UnaryOp_new (&prand_xget_op, (GxB_unary_function) prand_xget_f,
GrB_FP64, prand_type)) ;
OK (GrB_BinaryOp_new (&prand_dup_op, (GxB_binary_function) prand_dup_f,
prand_type, prand_type, prand_type)) ;
return (GrB_SUCCESS) ;
}
//------------------------------------------------------------------------------
// prand_finalize: free the random seed type and its operators
//------------------------------------------------------------------------------
GrB_Info prand_finalize ( )
{
PRAND_FREE_ALL ;
return (GrB_SUCCESS) ;
}
//------------------------------------------------------------------------------
// prand_next: get the next random numbers
//------------------------------------------------------------------------------
GrB_Info prand_next
(
GrB_Vector Seed
)
{
return (GrB_Vector_apply (Seed, NULL, NULL, prand_next_op, Seed, NULL)) ;
}
//------------------------------------------------------------------------------
// prand_seed: create a vector of random seeds
//------------------------------------------------------------------------------
// Returns a vector of random seed values.
#define PRAND_FREE_WORK \
{ \
free (I) ; \
free (X) ; \
}
#undef PRAND_FREE_ALL
#define PRAND_FREE_ALL \
{ \
PRAND_FREE_WORK ; \
GrB_Vector_free (Seed) ; \
}
GrB_Info prand_seed
(
GrB_Vector *Seed, // vector of random number seeds
int64_t seed, // scalar input seed
GrB_Index n, // size of Seed to create
int nthreads // # of threads to use (OpenMP default if <= 0)
)
{
GrB_Index *I = NULL ;
prand_t *X = NULL ;
// allocate the Seed vector
OK (GrB_Vector_new (Seed, prand_type, n)) ;
// allocate the I and X arrays
I = malloc ((n+1) * sizeof (GrB_Index)) ;
X = malloc ((n+1) * sizeof (prand_t)) ;
if (I == NULL || X == NULL)
{
PRAND_FREE_ALL ;
return (GrB_OUT_OF_MEMORY) ;
}
// determine # of threads to use
int nthreads_max = 1 ;
#ifdef _OPENMP
nthreads_max = omp_get_max_threads ( ) ;
#endif
if (nthreads <= 0 || nthreads > nthreads_max)
{
nthreads = nthreads_max ;
}
// construct the tuples for the initial seeds
int64_t i, len = (int64_t) n ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (i = 0 ; i < len ; i++)
{
I [i] = i ;
for (int k = 0 ; k < 5 ; k++)
{
X [i].seed [k] = (100000000*(seed) + 10*i + k + 1) ;
}
}
// build the Seed vector
OK (GrB_Vector_build_UDT (*Seed, I, X, n, prand_dup_op)) ;
// free workspace
PRAND_FREE_WORK ;
// advance to the first set of random numbers
OK (prand_next (*Seed)) ;
return (GrB_SUCCESS) ;
}
//------------------------------------------------------------------------------
// prand_print: print the Seed vector
//------------------------------------------------------------------------------
// This is meant for testing, not production use.
#undef PRAND_FREE_ALL
#define PRAND_FREE_ALL ;
GrB_Info prand_print
(
GrB_Vector Seed,
int pr // 0: print nothing, 1: print some, 2: print all
)
{
if (pr > 0)
{
GrB_Index n ;
OK (GrB_Vector_nvals (&n, Seed)) ;
printf ("\nSeed: length %g\n", (double) n) ;
prand_t x ;
for (int k = 0 ; k < 5 ; k++) x.seed [k] = -1 ;
for (int64_t i = 0 ; i < (int64_t) n ; i++)
{
if (GrB_Vector_extractElement_UDT (&x, Seed, i) == GrB_SUCCESS)
{
printf ("%g: ", (double) i) ;
for (int k = 0 ; k < 5 ; k++)
{
printf (" %.18g", (double) (x.seed [k])) ;
}
printf ("\n") ;
}
if (pr == 1 && i > 10) break ;
}
}
return (GrB_SUCCESS) ;
}
//------------------------------------------------------------------------------
// prand_iget: return a vector of random uint64 integers
//------------------------------------------------------------------------------
GrB_Info prand_iget
(
GrB_Vector X,
GrB_Vector Seed
)
{
OK (GrB_Vector_apply (X, NULL, NULL, prand_iget_op, Seed, NULL)) ;
return (prand_next (Seed)) ;
}
//------------------------------------------------------------------------------
// prand_xget: return a vector of random doubles, in range 0 to 1 inclusive
//------------------------------------------------------------------------------
GrB_Info prand_xget
(
GrB_Vector X,
GrB_Vector Seed
)
{
OK (GrB_Vector_apply (X, NULL, NULL, prand_xget_op, Seed, NULL)) ;
return (prand_next (Seed)) ;
}
|
rf_matrix.h | #ifndef RF_MATRIX_H
#define RF_MATRIX_H
// headers
#include <cstdio>
#include <cstdlib>
#include <cstring>
#include <algorithm>
#include <vector>
#include <cmath>
#include <cstddef>
#include <assert.h>
#include <omp.h>
#include <iostream>
#include <fstream>
#include <sstream>
#if __cplusplus >= 201103L || (defined(_MSC_VER) && (_MSC_VER >= 1500)) // Visual Studio 2008
#define CPP11
#endif
#ifdef _MSC_VER
#if _MSC_VER >= 1600
#include <cstdint>
#else
typedef __int8 int8_t;
typedef __int16 int16_t;
typedef __int32 int32_t;
typedef __int64 int64_t;
typedef unsigned __int8 uint8_t;
typedef unsigned __int16 uint16_t;
typedef unsigned __int32 uint32_t;
typedef unsigned __int64 uint64_t;
#endif
#else
#if !defined(_MSC_VER) && defined(CPP11)
#include <cstdint>
#else
typedef short int int16_t;
typedef int int32_t;
typedef long int64_t;
typedef unsigned char uint8_t;
typedef unsigned short int uint16_t;
typedef unsigned int uint32_t;
typedef unsigned long uint64_t;
#endif
#endif
/* random number genrator: simulate the interface of python random module*/
#include <limits>
#if defined(CPP11)
#include <random>
template<typename engine_t=std::mt19937>
struct random_number_generator : public engine_t {
typedef typename engine_t::result_type result_type;
random_number_generator(unsigned seed=0): engine_t(seed){ }
result_type randrange(result_type end=engine_t::max()) { return engine_t::operator()() % end; }
template<class T=double, class T2=double> T uniform(T start=0.0, T2 end=1.0) {
return std::uniform_real_distribution<T>(start, (T)end)(*this);
}
template<class T=double> T normal(T mean=0.0, T stddev=1.0) {
return std::normal_distribution<T>(mean, stddev)(*this);
}
template<class T=int, class T2=T> T randint(T start=0, T2 end=std::numeric_limits<T>::max()) {
return std::uniform_int_distribution<T>(start, end)(*this);
}
template<class RandIter> void shuffle(RandIter first, RandIter last) {
std::shuffle(first, last, *this);
}
};
#else
#include <tr1/random>
template<typename engine_t=std::tr1::mt19937>
struct random_number_generator : public engine_t {
typedef typename engine_t::result_type result_type;
random_number_generator(unsigned seed=0): engine_t(seed) { }
result_type operator()() { return engine_t::operator()(); }
result_type operator()(result_type n) { return randint(result_type(0), result_type(n-1)); }
result_type randrange(result_type end=engine_t::max()) { return engine_t::operator()() % end; }
template<class T, class T2> T uniform(T start=0.0, T2 end=1.0) {
typedef std::tr1::uniform_real<T> dist_t;
return std::tr1::variate_generator<engine_t*, dist_t>(this, dist_t(start,(T)end))();
}
template<class T, class T2> T normal(T mean=0.0, T2 stddev=1.0) {
typedef std::tr1::normal_distribution<T> dist_t;
return std::tr1::variate_generator<engine_t*, dist_t>(this, dist_t(mean, (T)stddev))();
}
template<class T, class T2> T randint(T start=0, T2 end=std::numeric_limits<T>::max()) {
typedef std::tr1::uniform_int<T> dist_t;
return std::tr1::variate_generator<engine_t*, dist_t>(this, dist_t(start,end))();
}
template<class RandIter> void shuffle(RandIter first, RandIter last) {
std::random_shuffle(first, last, *this);
}
};
#endif
typedef random_number_generator<> rng_t;
template<typename T>
void gen_permutation_pair(size_t size, std::vector<T> &perm, std::vector<T> &inv_perm, int seed=0) {
perm.resize(size);
for(size_t i = 0; i < size; i++)
perm[i] = i;
rng_t rng(seed);
rng.shuffle(perm.begin(), perm.end());
//std::srand(seed);
//std::random_shuffle(perm.begin(), perm.end());
inv_perm.resize(size);
for(size_t i = 0; i < size; i++)
inv_perm[perm[i]] = i;
}
//#include "zlib_util.h"
#define MALLOC(type, size) (type*)malloc(sizeof(type)*(size))
#define CALLOC(type, size) (type*)calloc((size), sizeof(type))
#define REALLOC(ptr, type, size) (type*)realloc((ptr), sizeof(type)*(size))
typedef unsigned major_t;
const major_t ROWMAJOR = 1U;
const major_t COLMAJOR = 2U;
const major_t default_major = COLMAJOR;
// Zip Iterator
// Commom usage: std::sort(zip_iter(A.begin(),B.begin()), zip_iter(A.end(),B.end()));
template<class T1, class T2> struct zip_body;
template<class T1, class T2> struct zip_ref;
template<class IterT1, class IterT2> struct zip_it;
template<class IterT1, class IterT2> zip_it<IterT1, IterT2> zip_iter(IterT1 x, IterT2 y);
#define dvec_t dense_vector
template<typename val_type> class dvec_t;
#define svec_t sparse_vector
template<typename val_type> class svec_t;
#define sdvec_t sparse_dense_vector
template<typename val_type> class sdvec_t; // a dense vector with sparse indices
#define gvec_t general_vector
template<typename val_type> class gvec_t {
public:
size_t len;
gvec_t(size_t len=0): len(len){}
size_t size() const { return len; }
virtual bool is_sparse() const {return false;}
virtual bool is_dense() const {return false;}
svec_t<val_type>& get_sparse() {assert(is_sparse()); return static_cast<svec_t<val_type>&>(*this);}
const svec_t<val_type>& get_sparse() const {assert(is_sparse()); return static_cast<const svec_t<val_type>&>(*this);}
dvec_t<val_type>& get_dense() {assert(is_dense()); return static_cast<dvec_t<val_type>&>(*this);}
const dvec_t<val_type>& get_dense() const {assert(is_dense()); return static_cast<const dvec_t<val_type>&>(*this);}
};
#define dmat_t dense_matrix
template<typename val_type> class dmat_t;
#define smat_t sparse_matrix
template<typename val_type> class smat_t;
#define eye_t identity_matrix
template<typename val_type> class eye_t;
#define gmat_t general_matrix
template<typename val_type> class gmat_t {
public:
size_t rows, cols;
gmat_t(size_t rows=0, size_t cols=0): rows(rows), cols(cols) { }
size_t num_rows() const { return rows; }
size_t num_cols() const { return cols; }
virtual bool is_sparse() const { return false; }
virtual bool is_dense() const { return false; }
virtual bool is_identity() const { return false; }
bool is_zero() const { return !is_sparse() && !is_dense() && !is_identity(); }
smat_t<val_type>& get_sparse() { assert(is_sparse()); return static_cast<smat_t<val_type>&>(*this); }
const smat_t<val_type>& get_sparse() const { assert(is_sparse()); return static_cast<const smat_t<val_type>&>(*this); }
dmat_t<val_type>& get_dense() { assert(is_dense()); return static_cast<dmat_t<val_type>&>(*this); }
const dmat_t<val_type>& get_dense() const { assert(is_dense()); return static_cast<const dmat_t<val_type>&>(*this); }
virtual dvec_t<val_type>& Xv(const dvec_t<val_type>& v, dvec_t<val_type>& Xv, bool addson=0) const {
if(addson == 0)
memset(Xv.buf, 0, sizeof(val_type) * Xv.len);
return Xv;
}
virtual dvec_t<val_type>& Xv(const svec_t<val_type>& v, dvec_t<val_type>& Xv, bool addson=0) const {
if(addson == 0)
memset(Xv.buf, 0, sizeof(val_type) * Xv.len);
return Xv;
}
dvec_t<val_type>& Xv(const gvec_t<val_type>& v, dvec_t<val_type>& Xv, bool addson=0) const {
if(v.is_sparse())
return this->Xv(v.get_sparse(), Xv, addson);
else if(v.is_dense())
return this->Xv(v.get_dense(), Xv, addson);
else // Should not be here
return Xv;
}
virtual dvec_t<val_type>& XTu(const dvec_t<val_type>& u, dvec_t<val_type>& XTu, bool addson=0) const {
if(addson == 0)
memset(XTu.buf, 0, sizeof(val_type) * XTu.len);
return XTu;
}
virtual dvec_t<val_type>& XTu(const svec_t<val_type>& u, dvec_t<val_type>& XTu, bool addson=0) const {
if(addson == 0)
memset(XTu.buf, 0, sizeof(val_type) * XTu.len);
return XTu;
}
dvec_t<val_type>& XTu(const gvec_t<val_type>& u, dvec_t<val_type>& XTu, bool addson=0) const {
if(u.is_sparse())
return this->XTu(u.get_sparse(), XTu, addson);
else if(u.is_dense())
return this->XTu(u.get_dense(), XTu, addson);
else // Should not be here
return XTu;
}
};
template<typename val_type> class entry_t;
template<typename val_type> class entry_iterator_t; // iterator base class
template<typename val_type> class file_iterator_t; // iterator for files with (i,j,v) tuples
template<typename val_type> class svmlight_file_iterator_t; // iterator for svmlight files
template<typename val_type> class coo_iterator_t; //iterator for three vectors (I, J, V) tuples
template<typename val_type> class smat_iterator_t; // iterator for nonzero entries in smat_t
template<typename val_type> class smat_subset_iterator_t; // iterator for nonzero entries in a subset
template<typename val_type> class dmat_iterator_t; // iterator for nonzero entries in dmat_t
/*------------------- Essential Linear Algebra Operations -------------------*/
// H = X*W, (X: m*n, W: n*k, H: m*k)
template<typename val_type> dmat_t<val_type>& dmat_x_dmat(const dmat_t<val_type> &X, const dmat_t<val_type> &W, dmat_t<val_type> &H);
template<typename val_type> dmat_t<val_type> operator*(const dmat_t<val_type> &X, const dmat_t<val_type> &W);
template<typename val_type> dmat_t<val_type>& smat_x_dmat(const smat_t<val_type>& X, const dmat_t<val_type> &W, dmat_t<val_type> &H);
template<typename val_type> dmat_t<val_type>& gmat_x_dmat(const gmat_t<val_type>& X, const dmat_t<val_type> &W, dmat_t<val_type> &H);
template<typename val_type> dmat_t<val_type> operator*(const smat_t<val_type> &X, const dmat_t<val_type> &W);
// H = a*X*W + H0, (X: m*n, W: n*k, H: m*k)
template<typename val_type, typename T2> dmat_t<val_type>& dmat_x_dmat(T2 a, const dmat_t<val_type> &X, const dmat_t<val_type> &W, const dmat_t<val_type> &H0, dmat_t<val_type> &H);
template<typename val_type, typename T2> dmat_t<val_type>& smat_x_dmat(T2 a, const smat_t<val_type> &X, const dmat_t<val_type> &W, const dmat_t<val_type> &H0, dmat_t<val_type> &H);
template<typename val_type, typename T2> dmat_t<val_type>& gmat_x_dmat(T2 a, const gmat_t<val_type> &X, const dmat_t<val_type> &W, const dmat_t<val_type> &H0, dmat_t<val_type> &H);
// H = a*X*W + b*H0, (X: m*n, W: n*k, H: m*k)
template<typename val_type, typename T2, typename T3> dmat_t<val_type>& dmat_x_dmat(T2 a, const dmat_t<val_type> &X, const dmat_t<val_type> &W, T3 b, const dmat_t<val_type> &H0, dmat_t<val_type> &H);
template<typename val_type, typename T2, typename T3> dmat_t<val_type>& smat_x_dmat(T2 a, const smat_t<val_type> &X, const dmat_t<val_type> &W, T3 b, const dmat_t<val_type> &H0, dmat_t<val_type> &H);
template<typename val_type, typename T2, typename T3> dmat_t<val_type>& gmat_x_dmat(T2 a, const gmat_t<val_type> &X, const dmat_t<val_type> &W, T3 b, const dmat_t<val_type> &H0, dmat_t<val_type> &H);
// trace(W'*X*H)
template<typename val_type> val_type trace_dmat_T_smat_dmat(const dmat_t<val_type>& W, const smat_t<val_type>& X, const dmat_t<val_type>& H);
// trace(W'*diag(D)*H)
template<typename val_type> val_type trace_dmat_T_diag_dmat(const dmat_t<val_type>& W, const dvec_t<val_type>& D, const dmat_t<val_type>& H);
/*-------------- Essential Linear Algebra Solvers -------------------*/
// Solve AX = B using Cholesky Factorization (A: Positive Definite)
template<typename val_type> dmat_t<val_type> ls_solve_chol(const dmat_t<val_type>& A, const dmat_t<val_type>& B, bool A_as_workspace);
// Solve Ax = b using Cholesky Factorization (A: Positive Definite)
template<typename val_type> dvec_t<val_type> ls_solve_chol(const dmat_t<val_type>& A, const dvec_t<val_type>& b, bool A_as_workspace);
// SVD: A = USV'
template<typename val_type> void svd(const dmat_t<val_type>& A, dmat_t<val_type>& U, dvec_t<val_type>& S, dmat_t<val_type>& V, bool reduced=true, bool A_as_workspace=false);
/*-------------- Vectors & Matrices -------------------*/
// Dense Vector
template<typename val_type>
class dvec_t : public gvec_t<val_type> {
friend class dmat_t<val_type>;
private:
bool mem_alloc_by_me;
void zero_init() {
len = 0;
buf = NULL;
mem_alloc_by_me = false;
}
public:
// size_t len; inherited from gvec_t
using gvec_t<val_type>::len;
val_type *buf;
// Default Constructor
dvec_t() { zero_init(); }
// Copy Constructor
dvec_t(const dvec_t& v) {
zero_init();
*this = v;
}
// Copy Assignment
dvec_t& operator=(const dvec_t& other) {
if(this == &other) { return *this; }
if(other.is_view()) { // view to view copy
if(mem_alloc_by_me) clear_space();
memcpy(static_cast<void*>(this), &other, sizeof(dvec_t));
} else { // deep to deep copy
resize(other.size());
memcpy(buf, other.buf, sizeof(val_type)*len);
}
return *this;
}
// View Constructor: allocate space (w/ all 0) if buf == NULL
explicit dvec_t(size_t len, val_type *buf=NULL): gvec_t<val_type>(len), mem_alloc_by_me(false), buf(buf) {
if(buf == NULL && len != 0) {
this->buf = MALLOC(val_type, len);
memset(this->buf, 0, sizeof(val_type)*len);
mem_alloc_by_me = true;
}
}
// Fill Constructor
explicit dvec_t(size_t len, const val_type &x) {
zero_init();
resize(len, x);
}
// Constructor - dense_matrix => dense_vector:
// Having the same status (view or deep) as m (the dense matrix).
// (expand the matrix using row major)
dvec_t(const dmat_t<val_type>& m) {
zero_init();
if(m.is_view()) {
len = m.rows * m.cols;
buf = m.buf;
}
else {
resize(m.rows * m.cols);
memcpy(buf, m.buf, sizeof(val_type) * len);
}
}
// Constructor - sparse_vector => dense_vector:
// Always deep.
dvec_t(const svec_t<val_type>& v) {
zero_init();
resize(v.len);
memset(buf, 0, sizeof(val_type) * len);
for(size_t i = 0; i < v.nnz; i++)
buf[v.idx[i]] = v.val[i];
}
#if defined(CPP11)
// Move Constructor
dvec_t(dvec_t&& m) {
zero_init();
*this = std::move(m);
}
// Move Assignment
dvec_t& operator=(dvec_t&& other) {
if(this == &other) { return *this; }
clear_space();
memcpy(static_cast<void*>(this), &other, sizeof(dvec_t));
other.zero_init();
return *this;
}
#endif
~dvec_t() { clear_space(); }
bool is_view() const { return mem_alloc_by_me == false; }
bool is_dense() const { return true; }
void clear_space() {
if(mem_alloc_by_me) { free(buf); }
zero_init();
}
dvec_t get_view() const {
return dvec_t(len, buf); // using view constructor
}
dvec_t& grow_body() {
if(is_view()) {
dvec_t tmp_view = *this; // Copy Assignment: View to view
this->resize(len);
memcpy(buf, tmp_view.buf, sizeof(val_type)*len);
}
return *this;
}
// Similar to operator=, but operator= uses view to view, deep to deep.
// "assign" will directly change the underlying data, no matter view or deep.
dvec_t& assign(const dvec_t& other) {
assert(len == other.len);
return assign((val_type)1.0, other);
}
// "assign" will directly change the underlying data, no matter view or deep.
dvec_t& assign(val_type a, const dvec_t& other) {
assert(len == other.len);
if(a == val_type(0))
memset(buf, 0, sizeof(val_type)*len);
else if(a == val_type(1)) {
if(this == &other)
return *this;
#pragma omp parallel for schedule(static)
for(size_t idx = 0; idx < len; idx++)
at(idx) = other.at(idx);
} else {
#pragma omp parallel for schedule(static)
for(size_t idx = 0; idx < len; idx++)
at(idx) = a*other.at(idx);
}
return *this;
}
// resize will always grow body => is_view() becomes false
void resize(size_t len_, const val_type &x) {
resize(len_);
if(x == 0)
memset(buf, 0, sizeof(val_type) * len);
else {
for(size_t i = 0; i < len; i++)
buf[i] = x;
}
}
// resize will always grow body => is_view() becomes false
// (values in buf are not initialized)
void resize(size_t len_) {
if(mem_alloc_by_me)
buf = REALLOC(buf, val_type, len_);
else
buf = MALLOC(val_type, len_);
mem_alloc_by_me = true;
len = len_;
}
val_type& at(size_t idx) { return buf[idx]; }
const val_type& at(size_t idx) const { return buf[idx]; }
val_type& operator[](size_t idx) { return buf[idx]; }
const val_type& operator[](size_t idx) const { return buf[idx]; }
val_type* data() { return buf; }
const val_type* data() const { return buf; }
val_type& back() { return buf[len - 1]; }
const val_type& back() const { return buf[len - 1]; }
void print(const char *str="") const {
printf("%s dvec_t: len %lu, is_view %d, buf %p\n", str, len, is_view(), buf);
for(size_t i = 0; i < len; i ++)
printf("%.3f ", buf[i]);
puts("");
}
};
// Sparse Vector
template<typename val_type>
class svec_t : public gvec_t<val_type> {
friend class smat_t<val_type>;
private:
bool mem_alloc_by_me;
void zero_init() {
len = nnz = 0;
idx = NULL; val = NULL;
mem_alloc_by_me = false;
}
public:
// size_t len; inherited from gvec_t
using gvec_t<val_type>::len;
size_t nnz;
unsigned *idx;
val_type *val;
// Default Constructor
svec_t() { zero_init(); }
// Copy Constructor
svec_t(const svec_t& v) {
zero_init();
*this = v;
}
// Copy Assignment
svec_t& operator=(const svec_t& other) {
if(this == &other) return *this;
if(other.is_view()) { // view to view copy
if(mem_alloc_by_me) clear_space();
memcpy(this, &other, sizeof(svec_t));
} else { // deep to deep copy
resize(other.len, other.nnz);
memcpy(idx, other.idx, sizeof(unsigned) * nnz);
memcpy(val, other.val, sizeof(val_type) * nnz);
}
return *this;
}
// View Constructor:
// If idx != NULL and val != NULL, we create a view copy. (view)
// Otherwise, we will allocate nnz space for both idx and val. (deep)
explicit svec_t(size_t len, size_t nnz, unsigned *idx, val_type *val) : gvec_t<val_type>(len), mem_alloc_by_me(false), nnz(nnz) {
if(nnz == 0){
this->idx = NULL;
this->val = NULL;
}
else {
if(idx != NULL && val != NULL) {
this->idx = idx;
this->val = val;
} else {
zero_init();
resize(len, nnz);
}
}
}
/* (Don't delete yet, so can understand codes not yet adapted elsewhere)
// Fill Constructor:
// Always deep.
// If idx == NULL, we fill this->idx with 0.
// If idx != NULL, we still allocate this->idx and copy from idx.
explicit svec_t(size_t len, size_t nnz, const unsigned *idx=NULL, const val_type &x=0) {
zero_init();
resize(len, nnz, x, idx);
}
*/
// Constructor - sparse_matrix => sparse_vector:
// Always deep. (expand using row major)
svec_t(const smat_t<val_type>& m) {
zero_init();
resize(m.rows * m.cols, m.nnz);
for(int i = 0; i < m.rows; i++) {
for(int j = m.row_ptr[i]; j < m.row_ptr[i+1]; j++) {
idx[j] = m.cols * i + m.col_idx[j];
val[j] = m.val_t[j];
}
}
}
// Constructor - dense_vector => sparse_vector:
// Always deep.
svec_t(const dvec_t<val_type>& v, double threshold=1e-12) {
zero_init();
len = v.size();
for(size_t i = 0; i < v.size(); i++)
if(fabs((double)v.at(i)) >= threshold)
nnz ++;
resize(len, nnz);
int k = 0;
for(size_t i = 0; i < v.size(); i++)
if(fabs((double)v.at(i)) >= threshold) {
idx[k] = i;
val[k] = v.at(i);
k++;
}
}
#if defined(CPP11)
// Move Constructor
svec_t(svec_t&& m) {
zero_init();
*this = std::move(m);
}
// Move Assignment
svec_t& operator=(svec_t&& other) {
if(this == &other) return *this;
clear_space();
memcpy(static_cast<void*>(this), &other, sizeof(svec_t));
other.zero_init();
return *this;
}
#endif
~svec_t() { clear_space(); }
size_t get_nnz() const { return nnz; }
bool is_view() const { return mem_alloc_by_me == false; }
bool is_sparse() const { return true; }
void clear_space() {
if(mem_alloc_by_me){
free(idx);
free(val);
}
zero_init();
}
svec_t get_view() const {
return svec_t(len, nnz, idx, val); // using view constructor
}
svec_t& grow_body() {
if(is_view()) {
svec_t tmp_view = *this; // Copy Assignment: View to view
this->resize(len, nnz);
memcpy(idx, tmp_view.idx, sizeof(unsigned)*nnz);
memcpy(val, tmp_view.val, sizeof(val_type)*nnz);
}
return *this;
}
// Similar to operator=, but operator= uses view to view, deep to deep.
// "assign" will directly change the underlying data, no matter view or deep.
// (so we assert that the sparse vector is not a view on sparse matrix)
svec_t& assign(const svec_t& other) {
assert(len == other.len && nnz == other.nnz);
return assign((val_type)1.0, other);
}
// "assign" will directly change the underlying data, no matter view or deep.
// (so we assert that the sparse vector is not a view on sparse matrix)
svec_t& assign(val_type a, const svec_t& other) {
assert(len == other.len && nnz == other.nnz);
if(a == val_type(0))
memset(val, 0, sizeof(val_type)*nnz);
else if(a == val_type(1) && this == &other) {
return *this;
} else {
#pragma omp parallel for schedule(static)
for(int k = 0; k < nnz; k++){
idx[k] = other.idx[k];
val[k] = a*other.val[k];
}
}
}
/* (Don't delete yet, so can understand codes not yet adapted elsewhere)
// "resize" will always grow body => is_view() becomes false
// (we will copy the whole idx to this->idx)
void resize(size_t len_, size_t nnz_, const val_type &x, const unsigned *idx=NULL) {
resize(len_, nnz_);
if(idx == NULL)
memset(this->idx, 0, sizeof(unsigned)*nnz);
else
memcpy(this->idx, idx, sizeof(unsigned)*nnz);
for(size_t k = 0; k < nnz; k++)
this->val[k] = x;
}
*/
// "resize" will always grow body => is_view() becomes false
// (values in idx, val are not initialized)
void resize(size_t len_, size_t nnz_) {
if(mem_alloc_by_me){
idx = REALLOC(idx, unsigned, nnz_);
val = REALLOC(val, val_type, nnz_);
}
else{
idx = MALLOC(unsigned, nnz_);
val = MALLOC(val_type, nnz_);
}
mem_alloc_by_me = true;
len = len_; nnz = nnz_;
}
void print(const char *str="") const {
printf("%s svec_t: len %lu, nnz %lu, is_view %d\n", str, len, nnz, is_view());
size_t j = 0;
for(size_t i = 0; i < len; i++){
if(j < nnz && idx[j] == i){
printf("%.3f ", val[j]);
j++;
}
else
printf("0.000 ");
}
puts("");
}
};
// Sparse Dense Vector
template<typename val_type>
class sdvec_t : public dvec_t<val_type> {
friend class smat_t<val_type>;
public:
using gvec_t<val_type>::len;
using dvec_t<val_type>::buf;
std::vector<unsigned> nz_idx;
std::vector<unsigned char> is_nonzero;
size_t nnz;
sdvec_t(size_t len=0) :
dvec_t<val_type>(len), nz_idx(len), is_nonzero(len), nnz(0){ }
size_t get_nnz() const { return nnz; }
template<typename I, typename V>
val_type& add_nonzero_at(I idx, V val) {
buf[idx] += static_cast<val_type>(val);
if(!is_nonzero[idx]) {
is_nonzero[idx] = 1;
nz_idx[nnz++] = static_cast<unsigned>(idx);
}
return buf[idx];
}
sdvec_t& update_nz_idx() {
for(size_t t = 0 ; t < nnz; t++) {
if(buf[nz_idx[t]] == static_cast<val_type>(0)) {
std::swap(nz_idx[t], nz_idx[nnz - 1]);
is_nonzero[nz_idx[t]] = 0;
t -= 1;
nnz -= 1;
}
}
std::sort(nz_idx.data(), nz_idx.data() + nnz);
nnz = std::unique(nz_idx.data(), nz_idx.data() + nnz) - nz_idx.data();
}
void clear() {
if(nnz < (len >> 2)) {
for(size_t t = 0; t < nnz; t++) {
buf[nz_idx[t]] = 0;
is_nonzero[nz_idx[t]] = 0;
}
} else {
memset(buf, 0, sizeof(val_type) * len);
memset(is_nonzero.data(), 0, sizeof(unsigned char) * len);
}
nnz = 0;
}
};
// Dense Matrix
template<typename val_type>
class dmat_t : public gmat_t<val_type> {
friend class dvec_t<val_type>;
public:
// size_t rows, cols; inherited from gmat_t
using gmat_t<val_type>::rows;
using gmat_t<val_type>::cols;
val_type *buf;
static dmat_t rand(rng_t &rng, size_t m, size_t n, double lower=0.0, double upper=1.0, major_t major_type_=default_major) {
dmat_t ret(m, n, major_type_);
if(lower >= upper) lower = upper;
for(size_t idx = 0; idx < m*n; idx++)
ret.buf[idx] = (val_type)rng.uniform(lower, upper);
return ret;
}
static dmat_t randn(rng_t &rng, size_t m, size_t n, double mean=0.0, double std=1.0, major_t major_type_=default_major) {
dmat_t ret(m, n, major_type_);
for(size_t idx = 0; idx < m*n; idx++)
ret.buf[idx] = (val_type)rng.normal(mean, std);
return ret;
}
private:
bool mem_alloc_by_me;
major_t major_type;
typedef dvec_t<val_type> vec_t;
void zero_init() {
rows = 0;
cols = 0;
buf = NULL;
major_type = default_major;
mem_alloc_by_me = false;
}
public:
// Default Constructor
dmat_t() { zero_init(); }
// Copy Constructor:
// Having the same status (view or deep) as other.
// Using the same major_type as other.
dmat_t(const dmat_t& other) {
zero_init();
*this = other;
}
// Copy Assignment:
// Having the same status (view or deep) as other.
// Using the same major_type as other.
dmat_t& operator=(const dmat_t& other) {
if(this == &other) return *this;
if(other.is_view()) { // for view
if(mem_alloc_by_me) clear_space();
rows = other.rows;
cols = other.cols;
buf = other.buf;
major_type = other.major_type;
mem_alloc_by_me = false;
} else { // deep copy
if(is_view() || rows!=other.rows || cols!=other.cols || major_type!=other.major_type) {
major_type = other.major_type;
resize(other.rows, other.cols);
}
memcpy(buf, other.buf, sizeof(val_type)*rows*cols);
}
return *this;
}
// View Constructor:
// If buf != NULL, it creates a view on buf.
// If buf == NULL, it creates a deep matrix w/ all 0.
explicit dmat_t(size_t rows_, size_t cols_, major_t major_type_=default_major, val_type *buf=NULL): gmat_t<val_type>(rows_,cols_), buf(buf), mem_alloc_by_me(false), major_type(major_type_) {
if(buf == NULL && rows * cols != 0){
this->buf = MALLOC(val_type, rows * cols);
memset(this->buf, 0, sizeof(val_type) * rows * cols);
mem_alloc_by_me = true;
}
}
// Fill Constructor: fill in dense_vector based on the major_type.
// Always Deep.
explicit dmat_t(size_t nr_copy, const dvec_t<val_type>& v, major_t major_type_=default_major) {
zero_init();
major_type = major_type_;
resize(nr_copy, v);
}
// Constructor: dense_vector => dense_matrix:
// Having the same status (view or deep) as v (the dense vector).
dmat_t(const dvec_t<val_type>& v, major_t major_type_=default_major) {
zero_init();
major_type = major_type_;
if(!v.is_view())
resize(1, v);
else {
rows = is_rowmajor()? 1: v.size();
cols = is_colmajor()? 1: v.size();
buf = v.buf;
}
}
// Constructor: sparse_matrix => dense_matrix:
// Always deep.
template<typename T>
dmat_t(const smat_t<T>& sm, major_t major_type_=default_major) {
zero_init();
major_type = major_type_;
resize(sm.rows, sm.cols);
memset(buf, 0, sizeof(val_type)*rows*cols);
for(size_t i = 0; i < sm.rows; i++)
for(size_t idx = sm.row_ptr[i]; idx != sm.row_ptr[i+1]; idx++)
at(i, sm.col_idx[idx]) = sm.val_t[idx];
}
// Constructor: identity_matrix => dense_matrix:
// Always deep.
template<typename T>
dmat_t(const eye_t<T>& eye, major_t major_type_=default_major) {
zero_init();
major_type = major_type_;
resize(eye.rows, eye.cols);
memset(buf, 0, sizeof(val_type)*rows*cols);
for(size_t i = 0; i < rows; i++)
at(i,i) = 1;
}
#if defined(CPP11)
// Move Constructor
dmat_t(dmat_t&& m){
zero_init();
*this = std::move(m);
}
// Move Assignment
dmat_t& operator=(dmat_t&& other) {
if(this == &other) return *this;
clear_space();
rows = other.rows;
cols = other.cols;
buf = other.buf;
mem_alloc_by_me = other.mem_alloc_by_me;
major_type = other.major_type;
other.zero_init();
return *this;
}
#endif
~dmat_t() { clear_space(); }
bool is_view() const { return mem_alloc_by_me==false; }
bool is_dense() const { return true; }
bool is_rowmajor() const { return major_type==ROWMAJOR; }
bool is_colmajor() const { return major_type==COLMAJOR; }
major_t get_major() const { return major_type; }
void clear_space() {
if(mem_alloc_by_me) {
free(buf);
}
zero_init();
}
// The view of the current dense matrix is returned.
// (Using View Constructor)
dmat_t get_view() const {
return dmat_t(rows,cols,major_type,buf);
}
/* (Not yet deleted, to understand the behavior for unsync code elsewhere)
// For ROWMAJOR, the view of a single row is returned.
// For COLMAJOR, the view of a single column is returned.
dvec_t<val_type> get_single_view(const size_t &idx) const {
if(is_rowmajor())
return dvec_t<val_type>(cols, &buf[idx * cols]);
else
return dvec_t<val_type>(rows, &buf[idx * rows]);
}
*/
// Return a view on the idx-th row of the dense matrix.
// (Can only called when the matrix is ROWMAJOR)
dvec_t<val_type> get_row(const size_t &idx) const {
assert(is_rowmajor());
if(is_rowmajor())
return dvec_t<val_type>(cols, &buf[idx * cols]);
else
return dvec_t<val_type>();
}
// Return a view on the idx-th col of the dense matrix.
// (Can only called when the matrix is COLMAJOR)
dvec_t<val_type> get_col(const size_t &idx) const {
assert(is_colmajor());
if(is_colmajor())
return dvec_t<val_type>(rows, &buf[idx * rows]);
else
return dvec_t<val_type>();
}
// For grow_body():
// Deep, View => Deep.
// (this is the sole purpose of this function)
dmat_t& grow_body() {
if(is_view()) {
dmat_t tmp_view = *this;
this->resize(rows,cols);
memcpy(buf, tmp_view.buf, sizeof(val_type) * rows * cols);
}
return *this;
}
// For transpose():
// It will return a view of the transpose of *this.
// (the major for ret will be the opposite of *this)
dmat_t transpose() const {
dmat_t ret = get_view();
ret.to_transpose();
return ret;
}
// ====================================================
// ================ In-place functions ================
// ====================================================
// For assign():
// Deep => Deep.
// View => View.
// Note: It differents from copy assignment!
// After copy assignment, *this have the same status(View or Deep) as other.
// But assign() directly overwrites the values in buf.
// (it can modify the values it is viewing)
dmat_t& assign(const dmat_t& other) {
return assign((val_type)1.0, other);
}
// Similar to the above assign(), but now *this = a * other.
template<typename T>
dmat_t& assign(T a, const dmat_t& other) {
if(a == T(0))
memset(buf, 0, sizeof(val_type)*rows*cols);
else if(a == T(1)) {
if(this == &other)
return *this;
if(is_rowmajor()) {
#pragma omp parallel for schedule(static)
for(size_t r = 0; r < rows; r++)
for(size_t c = 0; c < cols; c++)
at(r,c) = other.at(r,c);
} else {
#pragma omp parallel for schedule(static)
for(size_t c = 0; c < cols; c++)
for(size_t r = 0; r < rows; r++)
at(r,c) = other.at(r,c);
}
} else {
if(is_rowmajor()) {
#pragma omp parallel for schedule(static)
for(size_t r = 0; r < rows; r++)
for(size_t c = 0; c < cols; c++)
at(r,c) = a * other.at(r,c);
} else {
#pragma omp parallel for schedule(static)
for(size_t c = 0; c < cols; c++)
for(size_t r = 0; r < rows; r++)
at(r,c) = a * other.at(r,c);
}
}
return *this;
}
// After to_transpose():
// Deep => Deep.
// View => View.
// major_type will change.
dmat_t& to_transpose() {
std::swap(rows,cols);
major_type = is_rowmajor()? COLMAJOR: ROWMAJOR;
return *this;
}
// After inv_major():
// View, Deep => Deep.
dmat_t& inv_major() {
if(rows == cols && !is_view()) { // inplace for deep square matrix
for(size_t r = 0; r < rows; r++)
for(size_t c = 0; c < r; c++)
std::swap(at(r,c),at(c,r));
major_type = is_rowmajor()? COLMAJOR: ROWMAJOR;
} else {
dmat_t tmp(*this);
major_type = is_rowmajor()? COLMAJOR: ROWMAJOR;
resize(rows,cols);
for(size_t r = 0; r < rows; r++)
for(size_t c = 0; c < cols; c++)
at(r,c) = tmp.at(r,c);
}
return *this;
}
// After to_rowmajor():
// Deep => Deep.
// View => View (if originally rowmajor), Deep (if originally colmajor).
dmat_t& to_rowmajor() {
if(is_colmajor()) inv_major();
return *this;
}
// After to_colmajor():
// Deep => Deep.
// View => View (if originally colmajor), Deep (if originally rowmajor).
dmat_t& to_colmajor() {
if(is_rowmajor()) inv_major();
return *this;
}
// After apply_permutation():
// Deep => Deep.
// View => View.
// apply_permutation() directly overwrites the values in buf.
// (thus it can modify the values dmat is viewing)
dmat_t& apply_permutation(const std::vector<unsigned> &row_perm, const std::vector<unsigned> &col_perm) {
return apply_permutation(row_perm.size()==rows? &row_perm[0]: NULL, col_perm.size()==cols? &col_perm[0] : NULL);
}
dmat_t& apply_permutation(const unsigned *row_perm=NULL, const unsigned *col_perm=NULL) {
dmat_t tmp(*this);
tmp.grow_body();
for(size_t r = 0; r < rows; r++)
for(size_t c = 0; c < cols; c++)
at(r,c) = tmp.at(row_perm? row_perm[r]: r, col_perm? col_perm[c]: c);
return *this;
}
template<typename V1, typename V2>
dmat_t& apply_scale(const V1 *row_scale, const V2 *col_scale) {
if(row_scale != NULL && col_scale != NULL) {
for(size_t r = 0; r < rows; r++) {
for(size_t c = 0; c < cols; c++) {
at(r, c) *= row_scale[r] * col_scale[c];
}
}
} else if(row_scale != NULL && col_scale == NULL) {
for(size_t r = 0; r < rows; r++) {
for(size_t c = 0; c < cols; c++) {
at(r, c) *= row_scale[r];
}
}
} else if(row_scale == NULL && col_scale != NULL) {
for(size_t r = 0; r < rows; r++) {
for(size_t c = 0; c < cols; c++) {
at(r, c) *= col_scale[c];
}
}
}
return *this;
}
template<typename V>
dmat_t& apply_scale(const dense_vector<V>& row_scale, const dense_vector<V>& col_scale) {
return apply_scale(row_scale.data(), col_scale.data());
}
template<typename V>
dmat_t& apply_row_scale(const dense_vector<V>& row_scale) {
return apply_scale<V, V>(row_scale.data(), NULL);
}
template<typename V>
dmat_t& apply_col_scale(const dense_vector<V>& col_scale) {
return apply_scale<V, V>(NULL, col_scale.data());
}
// After resize():
// View, Deep => Deep.
void resize(size_t nr_copy, const vec_t &v) {
if(is_rowmajor()) {
size_t rows_ = nr_copy, cols_ = v.size();
resize(rows_, cols_);
size_t unit = sizeof(val_type)*v.size();
for(size_t r = 0; r < rows; r++)
memcpy(buf + r * cols, v.data(), unit);
} else {
size_t rows_ = v.size(), cols_ = nr_copy;
resize(rows_, cols_);
size_t unit = sizeof(val_type)*v.size();
for(size_t c = 0; c < cols; c++)
memcpy(buf + c * rows, v.data(), unit);
}
}
// After resize():
// View, Deep => Deep.
dmat_t& resize(size_t rows_, size_t cols_) {
if(mem_alloc_by_me) {
if(rows_ == rows && cols_ == cols)
return *this;
if(rows_*cols_ != rows*cols)
buf = REALLOC(buf, val_type, rows_*cols_);
} else {
buf = MALLOC(val_type, rows_*cols_);
}
mem_alloc_by_me = true;
rows = rows_; cols = cols_;
return *this;
}
// After lazy_resize():
// Deep => Deep.
// View => (If possible) ? View : Deep.
dmat_t& lazy_resize(size_t rows_, size_t cols_, major_t major_type_=0) {
if(is_view() && rows_*cols_==rows*cols &&
(major_type_ == 0 || major_type==major_type_))
reshape(rows_,cols_);
else {
if(major_type_ != 0)
major_type = major_type_;
resize(rows_, cols_);
}
return *this;
}
// After reshape:
// Deep => Deep.
// View => View.
dmat_t& reshape(size_t rows_, size_t cols_) {
assert(rows_*cols_ == rows*cols);
if(rows_ != rows || cols != cols) {
rows = rows_; cols = cols_;
}
return *this;
}
// ====================================================
// ============ Dmat-Vector Multiplication ============
// ====================================================
dvec_t<val_type>& Xv(const dvec_t<val_type>& v, dvec_t<val_type>& Xv, bool addson=0) const {
assert(v.size() == this->cols);
if(Xv.size() != this->rows)
Xv.resize(this->rows, 0.0);
for(size_t i = 0; i < rows; i++) {
if(addson == 0) Xv[i] = 0;
for(size_t j = 0; j < cols; j++)
Xv[i] += at(i, j) * v[j];
}
return Xv;
}
dvec_t<val_type>& Xv(const svec_t<val_type>& v, dvec_t<val_type>& Xv, bool addson=0) const {
assert(v.size() == this->cols);
if(Xv.size() != this->rows)
Xv.resize(this->rows, 0.0);
for(size_t i = 0; i < rows; i++) {
if(addson == 0) Xv[i] = 0;
for(size_t p = 0; p < v.get_nnz(); p++)
Xv[i] += at(i, v.idx[p]) * v.val[p];
}
return Xv;
}
dvec_t<val_type>& XTu(const dvec_t<val_type>& u, dvec_t<val_type>& XTu, bool addson=0) const {
assert(u.size() == this->rows);
if(XTu.size() != this->cols)
XTu.resize(this->rows, 0.0);
for(size_t i = 0; i < cols; i++) {
if(addson == 0) XTu[i] = 0;
for(size_t j = 0; j < rows; j++)
XTu[i] += at(j, i) * u[j];
}
return XTu;
}
dvec_t<val_type>& XTu(const svec_t<val_type>& u, dvec_t<val_type>& XTu, bool addson=0) const {
assert(u.size() == this->rows);
if(XTu.size() != this->cols)
XTu.resize(this->rows, 0.0);
for(size_t i = 0; i < cols; i++) {
if(addson == 0) XTu[i] = 0;
for(size_t p = 0; p < u.get_nnz(); p++)
XTu[i] += at(u.idx[p], i) * u.val[p];
}
return XTu;
}
// ====================================================
// ==================== IO Methods ====================
// ====================================================
void load_from_binary(const char *filename, major_t major_type_=default_major) {
FILE *fp = fopen(filename, "rb");
if(fp == NULL) {
fprintf(stderr, "Error: can't read the file (%s)!!\n", filename);
return;
}
load_from_binary(fp, major_type_, filename);
fclose(fp);
}
void load_from_binary(FILE *fp, major_t major_type_=default_major, const char *filename=NULL) {
clear_space();
zero_init();
size_t rows_, cols_;
if(fread(&rows_, sizeof(size_t), 1, fp) != 1)
fprintf(stderr, "Error: wrong input stream in %s.\n", filename);
if(fread(&cols_, sizeof(size_t), 1, fp) != 1)
fprintf(stderr, "Error: wrong input stream in %s.\n", filename);
std::vector<double> tmp(rows_*cols_);
if(fread(&tmp[0], sizeof(double), rows_*cols_, fp) != rows_*cols_)
fprintf(stderr, "Error: wrong input stream in %s.\n", filename);
dmat_t<double> tmp_view(rows_, cols_, ROWMAJOR, &tmp[0]);
major_type = major_type_;
resize(rows_, cols_);
for(size_t r = 0; r < rows; r++)
for(size_t c = 0; c < cols; c++)
at(r,c) = tmp_view.at(r,c);
}
void save_binary_to_file(const char *filename) {
FILE *fp = fopen(filename, "wb");
if(fp == NULL) {
fprintf(stderr,"Error: can't open file %s\n", filename);
exit(1);
}
save_binary_to_file(fp);
fclose(fp);
}
void save_binary_to_file(FILE *fp) {
fwrite(&rows, sizeof(size_t), 1, fp);
fwrite(&cols, sizeof(size_t), 1, fp);
std::vector<double> tmp(rows*cols);
size_t idx = 0;
for(size_t r = 0; r < rows; r++)
for(size_t c = 0; c < cols; c++)
tmp[idx++] = (double)at(r,c);
fwrite(&tmp[0], sizeof(double), tmp.size(), fp);
}
val_type& at(size_t r, size_t c) { return is_rowmajor()? buf[r*cols+c] : buf[c*rows+r]; }
const val_type& at(size_t r, size_t c) const { return is_rowmajor()? buf[r*cols+c] : buf[c*rows+r]; }
val_type* data() { return buf; }
const val_type* data() const { return buf; }
void print_mat(const char *str="", FILE *fp=stdout) const {
fprintf(fp, "===>%s<===\n", str);
fprintf(fp, "rows %ld cols %ld mem_alloc_by_me %d row_major %d\nbuf %p\n",
rows, cols, mem_alloc_by_me, is_rowmajor(), buf);
for(size_t r = 0; r < rows; r++) {
for(size_t c = 0; c < cols; c++)
fprintf(fp, "%.3f ", at(r,c));
fprintf(fp, "\n");
}
}
};
// Identity Matrix
template<typename val_type>
class eye_t : public gmat_t<val_type> {
public:
// size_t rows, cols; inherited from gmat_t
using gmat_t<val_type>::rows;
using gmat_t<val_type>::cols;
eye_t (size_t rows_ = 0) : gmat_t<val_type>(rows_, rows_){}
bool is_identity() const { return true; }
dvec_t<val_type>& Xv(const dvec_t<val_type>& v, dvec_t<val_type>& Xv, bool addson=0) const {
assert(v.size() == this->cols);
if(Xv.size() != this->rows)
Xv.resize(this->rows, 0.0);
return addson? do_axpy(1, v, Xv): Xv.assign(v);
}
dvec_t<val_type>& Xv(const svec_t<val_type>& v, dvec_t<val_type>& Xv, bool addson=0) const {
assert(v.size() == this->cols);
if(Xv.size() != this->rows)
Xv.resize(this->rows, 0.0);
dvec_t<val_type> dv(v);
return addson? do_axpy(1, dv, Xv): Xv.assign(dv);
}
dvec_t<val_type>& XTu(const dvec_t<val_type>& u, dvec_t<val_type>& XTu, bool addson=0) const {
assert(u.size() == this->rows);
if(XTu.size() != this->cols)
XTu.resize(this->rows, 0.0);
return addson? do_axpy(1, u, XTu): XTu.assign(u);
}
dvec_t<val_type>& XTu(const svec_t<val_type>& u, dvec_t<val_type>& XTu, bool addson=0) const {
assert(u.size() == this->rows);
if(XTu.size() != this->cols)
XTu.resize(this->rows, 0.0);
dvec_t<val_type> du(u);
return addson? do_axpy(1, du, XTu): XTu.assign(du);
}
};
// Sparse Matrix (format CSC & CSR)
template<typename val_type>
class smat_t : public gmat_t<val_type> {
private:
bool mem_alloc_by_me;
void zero_init() {
mem_alloc_by_me = false;
val=val_t=NULL;
col_ptr=row_ptr=NULL;
row_idx=col_idx=NULL;
rows=cols=nnz=max_col_nnz=max_row_nnz=0;
}
void allocate_space(size_t rows_, size_t cols_, size_t nnz_) {
if(mem_alloc_by_me)
clear_space();
rows = rows_; cols = cols_; nnz = nnz_;
val = MALLOC(val_type, nnz);
val_t = MALLOC(val_type, nnz);
row_idx = MALLOC(unsigned, nnz);
col_idx = MALLOC(unsigned, nnz);
row_ptr = MALLOC(size_t, rows+1);
col_ptr = MALLOC(size_t, cols+1);
memset(row_ptr, 0, sizeof(size_t)*(rows+1));
memset(col_ptr, 0, sizeof(size_t)*(cols+1));
mem_alloc_by_me = true;
}
void csc_to_csr_old() {
memset(row_ptr, 0, sizeof(size_t)*(rows+1));
for(size_t idx = 0; idx < nnz; idx++)
row_ptr[row_idx[idx]+1]++;
for(size_t r = 1; r <= rows; r++)
row_ptr[r] += row_ptr[r-1];
for(size_t c = 0; c < cols; c++) {
for(size_t idx = col_ptr[c]; idx != col_ptr[c+1]; idx++) {
size_t r = (size_t) row_idx[idx];
col_idx[row_ptr[r]] = c;
val_t[row_ptr[r]++] = val[idx];
}
}
for(size_t r = rows; r > 0; r--)
row_ptr[r] = row_ptr[r-1];
row_ptr[0] = 0;
}
void csc_to_csr() {
smat_t tmp = this->transpose();
tmp.csr_to_csc();
}
void csr_to_csc() {
memset(col_ptr, 0, sizeof(size_t) * (cols + 1));
for(size_t idx = 0; idx < nnz; idx++)
col_ptr[col_idx[idx] + 1]++;
for(size_t c = 1; c <= cols; c++)
col_ptr[c] += col_ptr[c - 1];
for(size_t r = 0; r < rows; r++) {
for(size_t idx = row_ptr[r]; idx != row_ptr[r + 1]; idx++) {
size_t c = (size_t) col_idx[idx];
row_idx[col_ptr[c]] = r;
val[col_ptr[c]++] = val_t[idx];
}
}
for(size_t c = cols; c > 0; c--)
col_ptr[c] = col_ptr[c - 1];
col_ptr[0] = 0;
}
void update_max_nnz() {
max_row_nnz = max_col_nnz = 0;
for(size_t c = 0; c < cols; c++) max_col_nnz = std::max(max_col_nnz, nnz_of_col(c));
for(size_t r = 0; r < rows; r++) max_row_nnz = std::max(max_row_nnz, nnz_of_row(r));
}
// Comparator for sorting rates into row/column comopression storage
class SparseLess {
public:
const unsigned *row_idx;
const unsigned *col_idx;
SparseLess(const unsigned *row_idx_, const unsigned *col_idx_, bool isCSR=true) {
row_idx = (isCSR)? row_idx_: col_idx_;
col_idx = (isCSR)? col_idx_: row_idx_;
}
bool operator()(size_t x, size_t y) const {
return (row_idx[x] < row_idx[y]) || ((row_idx[x] == row_idx[y]) && (col_idx[x] < col_idx[y]));
}
};
class SparseEq {
public:
const unsigned *row_idx;
const unsigned *col_idx;
SparseEq(const unsigned *row_idx_, const unsigned *col_idx_) {
row_idx = row_idx_;
col_idx = col_idx_;
}
bool operator()(size_t x, size_t y) const {
return (row_idx[x] == row_idx[y]) && (col_idx[x] == col_idx[y]);
}
};
public: // static methods
static smat_t rand(rng_t &rng, size_t m, size_t n, double sparsity=0.01, double lower=0.0, double upper=1.0) {
if(lower > upper) lower = upper;
smat_t ret;
size_t nnz_ = (size_t)(m*n*sparsity);
ret.allocate_space(m, n, nnz_);
for(size_t idx = 0; idx < nnz_; idx++) {
ret.val_t[idx] = rng.uniform(lower, upper);
ret.col_idx[idx] = rng.randint(0, n-1);
ret.row_ptr[rng.randint(1, m)] += 1;
}
for(size_t i = 1; i <= m; i++)
ret.row_ptr[i] += ret.row_ptr[i-1];
ret.csr_to_csc();
ret.update_max_nnz();
return ret;
}
static smat_t randn(rng_t &rng, size_t m, size_t n, double sparsity=0.01, double mean=0.0, double std=1.0) {
smat_t ret;
size_t nnz_ = (size_t)(m*n*sparsity);
ret.allocate_space(m, n, nnz_);
for(size_t idx = 0; idx < nnz_; idx++) {
ret.val_t[idx] = (val_type)rng.normal(mean, std);
ret.col_idx[idx] = rng.randint(0, n-1);
ret.row_ptr[rng.randint(1,m)] += 1;
}
for(size_t i = 1; i <= m; i++)
ret.row_ptr[i] += ret.row_ptr[i-1];
ret.csr_to_csc();
ret.update_max_nnz();
return ret;
}
// rows, cols are inherited from gmat_t
using gmat_t<val_type>::rows;
using gmat_t<val_type>::cols;
size_t nnz, max_row_nnz, max_col_nnz;
val_type *val, *val_t;
size_t *col_ptr, *row_ptr;
unsigned *row_idx, *col_idx;
// filetypes for loading smat_t
enum format_t { TXT=0, PETSc=1, SVMLIGHT=2, BINARY=3, COMPRESSION=4 };
// Default Constructor
smat_t() { zero_init(); }
// Copy Constructor
smat_t(const smat_t& m) {
zero_init();
*this = m;
}
// Copy Assignment
// view => view, deep => deep.
smat_t& operator=(const smat_t& other) {
if(this == &other) { return *this; }
if(mem_alloc_by_me) { clear_space(); }
if(other.is_view()) { // for view
memcpy(static_cast<void*>(this), &other, sizeof(smat_t));
} else { // deep copy
*this = other.get_view();
grow_body();
}
return *this;
}
// View Constructor:
explicit smat_t(size_t rows, size_t cols, size_t nnz,
val_type *val, val_type *val_t,
size_t *col_ptr, size_t *row_ptr,
unsigned *row_idx, unsigned *col_idx) :
gmat_t<val_type>(rows, cols), nnz(nnz),
val(val), val_t(val_t),
col_ptr(col_ptr), row_ptr(row_ptr),
row_idx(row_idx), col_idx(col_idx)
{ mem_alloc_by_me = false; update_max_nnz(); }
// Constructor: dense matrix => sparse matrix
smat_t(const dmat_t<val_type>& m) {
zero_init();
dmat_iterator_t<val_type> entry_it(m);
load_from_iterator(m.rows, m.cols, entry_it.get_nnz(), &entry_it);
}
// Constructor: identity matrix => sparse matrix
smat_t(const eye_t<val_type>& eye) {
zero_init();
allocate_space(eye.rows, eye.rows, eye.rows);
for(size_t i = 0; i < eye.rows; i++) {
row_ptr[i+1] = i+1;
col_idx[i] = i;
val_t[i] = (val_type)1;
}
for(size_t j = 0; j < eye.cols; j++) {
col_ptr[j+1] = j+1;
row_idx[j] = j;
val[j] = (val_type)1;
}
}
smat_t(size_t rows_, size_t cols_, size_t nnz_=0){
zero_init();
allocate_space(rows_, cols_, nnz_);
}
#if defined(CPP11)
// Move Constructor
smat_t(smat_t&& m){
zero_init();
*this = std::move(m);
}
// Move Assignment
smat_t& operator=(smat_t&& other) {
if(this == &other) { return *this; }
clear_space();
memcpy(static_cast<void*>(this), &other, sizeof(smat_t));
other.zero_init();
return *this;
}
#endif
// Destructor
~smat_t(){ clear_space(); }
size_t get_nnz() const { return nnz; }
bool is_view() const { return mem_alloc_by_me==false; }
bool is_sparse() const { return true; }
void clear_space() {
if(mem_alloc_by_me) {
if(val) { free(val); } if(val_t) { free(val_t); }
if(row_ptr) { free(row_ptr); } if(row_idx) { free(row_idx); }
if(col_ptr) { free(col_ptr); } if(col_idx) { free(col_idx); }
}
zero_init();
}
smat_t get_view() const {
if(is_view()) {
return *this;
} else {
smat_t tmp;
memcpy(static_cast<void*>(&tmp), this, sizeof(smat_t));
tmp.mem_alloc_by_me = false;
return tmp;
}
}
/* (Don't delete yet, so can understand codes not yet adapted elsewhere)
svec_t<val_type> get_single_view(const size_t &idx, const major_t &major=default_major) const {
if(major == ROWMAJOR)
return svec_t<val_type>(cols, nnz_of_row(idx), &col_idx[row_ptr[idx]], &val_t[row_ptr[idx]], 0);
else
return svec_t<val_type>(rows, nnz_of_col(idx), &row_idx[col_ptr[idx]], &val[col_ptr[idx]], 0);
}
*/
// For get_row and get_col, a sparse vector view is returned.
// Caveat: If you directly modify the returned sparse vector view,
// it will change the sparse matrix's underlying data.
// And because we store both column and row major format,
// the modification on the returned svec_t will only effect one of the format.
// Resulting in an inconsistency within the sparse matrix.
// Summary: Do not directly modify the returned sparse vector view.
// (if the view becomes a deep vector afterwards, then things will be fine.)
svec_t<val_type> get_row(const size_t &idx) const {
return svec_t<val_type>(cols, nnz_of_row(idx), &col_idx[row_ptr[idx]], &val_t[row_ptr[idx]]);
}
svec_t<val_type> get_col(const size_t &idx) const {
return svec_t<val_type>(rows, nnz_of_col(idx), &row_idx[col_ptr[idx]], &val[col_ptr[idx]]);
}
smat_t& grow_body() {
if(is_view()) {
smat_t tmp = *this; // a copy of the view
col_ptr = MALLOC(size_t, cols + 1); memcpy(col_ptr, tmp.col_ptr, sizeof(size_t) * (cols + 1));
row_idx = MALLOC(unsigned, nnz); memcpy(row_idx, tmp.row_idx, sizeof(unsigned) * nnz);
val = MALLOC(val_type, nnz); memcpy(val, tmp.val, sizeof(val_type) * nnz);
row_ptr = MALLOC(size_t, rows + 1); memcpy(row_ptr, tmp.row_ptr, sizeof(size_t) * (rows + 1));
col_idx = MALLOC(unsigned, nnz); memcpy(col_idx, tmp.col_idx, sizeof(unsigned) * nnz);
val_t = MALLOC(val_type, nnz); memcpy(val_t, tmp.val_t, sizeof(val_type) * nnz);
mem_alloc_by_me = true;
}
return *this;
}
smat_t transpose() const{
smat_t<val_type> mt = get_view().to_transpose();
return mt;
}
// ====================================================
// ================ In-place functions ================
// ====================================================
smat_t& to_transpose() {
std::swap(rows,cols);
std::swap(val,val_t);
std::swap(row_ptr,col_ptr);
std::swap(row_idx,col_idx);
std::swap(max_col_nnz, max_row_nnz);
return *this;
}
smat_t& apply_permutation(const std::vector<unsigned> &row_perm, const std::vector<unsigned> &col_perm) {
return apply_permutation(row_perm.size()==rows? &row_perm[0]: NULL, col_perm.size()==cols? &col_perm[0]: NULL);
}
smat_t& apply_permutation(const unsigned *row_perm=NULL, const unsigned *col_perm=NULL) {
if(row_perm != NULL) {
for(size_t idx = 0; idx < nnz; idx++) {
row_idx[idx] = row_perm[row_idx[idx]];
}
csc_to_csr();
csr_to_csc();
}
if(col_perm != NULL) {
for(size_t idx = 0; idx < nnz; idx++) {
col_idx[idx] = col_perm[col_idx[idx]];
}
csr_to_csc();
csc_to_csr();
}
return *this;
}
template<typename V1, typename V2>
smat_t& apply_scale(const V1 *row_scale, const V2 *col_scale) {
if(row_scale != NULL && col_scale != NULL) {
for(size_t r = 0; r < rows; r++) {
val_type alpha = row_scale[r];
for(size_t idx = row_ptr[r]; idx != row_ptr[r + 1]; idx++) {
val_t[idx] *= alpha * col_scale[col_idx[idx]];
}
}
for(size_t c = 0; c < cols; c++) {
val_type alpha = col_scale[c];
for(size_t idx = col_ptr[c]; idx != col_ptr[c + 1]; idx++) {
val[idx] *= alpha * row_scale[row_idx[idx]];
}
}
} else if(row_scale != NULL && col_scale == NULL) {
for(size_t r = 0; r < rows; r++) {
if(nnz_of_row(r)) {
for(size_t idx = row_ptr[r]; idx < row_ptr[r + 1]; idx++) {
val_t[idx] *= row_scale[r];
}
}
}
for(size_t idx = 0; idx < nnz; idx++) {
val[idx] *= row_scale[row_idx[idx]];
}
} else if(row_scale == NULL && col_scale != NULL) {
for(size_t c = 0; c < cols; c++) {
if(nnz_of_col(c)) {
for(size_t idx = col_ptr[c]; idx < col_ptr[c + 1]; idx++) {
val[idx] *= col_scale[c];
}
}
}
for(size_t idx = 0; idx < nnz; idx++) {
val_t[idx] *= col_scale[col_idx[idx]];
}
}
return *this;
}
template<typename V1, typename V2>
smat_t& apply_scale(const dvec_t<V1> &row_scale, const dvec_t<V2> &col_scale) {
return apply_scale(row_scale.data(), col_scale.data());
}
template<typename V>
smat_t& apply_row_scale(const dvec_t<V> &row_scale) {
return apply_scale<V, V>(row_scale.data(), NULL);
}
template<typename V>
smat_t& apply_col_scale(const dvec_t<V> &col_scale) {
return apply_scale<V, V>(NULL, col_scale.data());
}
smat_t row_subset(const std::vector<unsigned> &subset) const {
return row_subset(&subset[0], (int)subset.size());
}
smat_t row_subset(const unsigned *subset, int subset_size) const {
smat_subset_iterator_t<val_type> it(*this, subset, subset_size, ROWMAJOR);
smat_t<val_type> sub_smat;
sub_smat.load_from_iterator(subset_size, cols, it.get_nnz(), &it);
return sub_smat;
}
smat_t col_subset(const std::vector<unsigned> &subset) const {
return col_subset(&subset[0], (int)subset.size());
}
smat_t col_subset(const unsigned *subset, int subset_size) const {
smat_subset_iterator_t<val_type> it(*this, subset, subset_size, COLMAJOR);
smat_t<val_type> sub_smat;
sub_smat.load_from_iterator(rows, subset_size, it.get_nnz(), &it);
return sub_smat;
}
size_t nnz_of_row(unsigned i) const { return (row_ptr[i+1] - row_ptr[i]); }
size_t nnz_of_col(unsigned i) const { return (col_ptr[i+1] - col_ptr[i]); }
// ====================================================
// ============ Smat-Vector Multiplication ============
// ====================================================
val_type* Xv(const val_type* v, val_type* Xv, bool addson=0) const {
for(size_t i = 0; i < rows; i++) {
if(addson == 0) Xv[i] = 0;
for(size_t idx = row_ptr[i]; idx < row_ptr[i+1]; idx++)
Xv[i] += val_t[idx] * v[col_idx[idx]];
}
return Xv;
}
dvec_t<val_type>& Xv(const dvec_t<val_type>& v, dvec_t<val_type>& Xv, bool addson=0) const {
assert(v.size() == this->cols);
if(Xv.size() != this->rows)
Xv.resize(this->rows, 0.0);
this->Xv(v.data(), Xv.data(), addson);
return Xv;
}
dvec_t<val_type>& Xv(const svec_t<val_type>& v, dvec_t<val_type>& Xv, bool addson=0) const {
assert(v.size() == this->cols);
if(Xv.size() != this->rows)
Xv.resize(this->rows, 0.0);
if(addson == 0) {
for(size_t i = 0; i < Xv.size(); i++) {
Xv[i] = 0;
}
}
for(size_t k = 0; k < v.nnz; k++) {
size_t col_idx = static_cast<size_t>(v.idx[k]);
const val_type& alpha = v.val[k];
do_axpy(alpha, get_col(col_idx), Xv);
}
/* slower implementatoin
dvec_t<val_type> dv(v);
this->Xv(dv.data(), Xv.data(), addson);
*/
return Xv;
}
val_type* XTu(const val_type* u, val_type* XTu, bool addson=0) const {
for(size_t i = 0; i < cols; i++) {
if(addson == 0) XTu[i] = 0;
for(size_t idx = col_ptr[i]; idx < col_ptr[i+1]; idx++)
XTu[i] += val[idx] * u[row_idx[idx]];
}
return XTu;
}
dvec_t<val_type>& XTu(const dvec_t<val_type>& u, dvec_t<val_type>& XTu, bool addson=0) const {
assert(u.size() == this->rows);
if(XTu.size() != this->cols)
XTu.resize(this->rows, 0.0);
this->XTu(u.data(), XTu.data(), addson);
return XTu;
}
dvec_t<val_type>& XTu(const svec_t<val_type>& u, dvec_t<val_type>& XTu, bool addson=0) const {
assert(u.size() == this->rows);
if(XTu.size() != this->cols)
XTu.resize(this->rows, 0.0);
if(addson == 0) {
for(size_t i = 0; i < XTu.size(); i++) {
XTu[i] = 0;
}
}
for(size_t k = 0; k < u.nnz; k++) {
size_t row_idx = static_cast<size_t>(u.idx[k]);
const val_type& alpha = u.val[k];
do_axpy(alpha, get_row(row_idx), XTu);
}
/* slower implementatoin
dvec_t<val_type> du(u);
this->XTu(du.data(), XTu.data(), addson);
*/
return XTu;
}
// ====================================================
// ==================== IO Methods ====================
// ====================================================
// The entry_iterator can be in arbitrary order (sort+unique is applied).
void load_from_iterator(size_t _rows, size_t _cols, size_t _nnz, entry_iterator_t<val_type>* entry_it) {
clear_space(); // clear any pre-allocated space in case of memory leak
rows =_rows, cols=_cols, nnz=_nnz;
allocate_space(rows,cols,nnz);
// a trick to utilize the space that have been allocated
std::vector<size_t> perm(nnz);
unsigned *tmp_row_idx = col_idx;
unsigned *tmp_col_idx = row_idx;
val_type *tmp_val = val;
for(size_t idx = 0; idx < nnz; idx++){
entry_t<val_type> rate = entry_it->next();
tmp_row_idx[idx] = rate.i;
tmp_col_idx[idx] = rate.j;
tmp_val[idx] = rate.v;
perm[idx] = idx;
}
// TODO can change to O(n) method
// sort entries into row-majored ordering
std::sort(perm.begin(), perm.end(), SparseLess(tmp_row_idx, tmp_col_idx));
// add up the values in the same position (i, j)
size_t cur_nnz = 0;
for(size_t idx = 0; idx < nnz; idx++) {
if(cur_nnz > 0
&& tmp_row_idx[perm[idx]] == tmp_row_idx[perm[cur_nnz-1]]
&& tmp_col_idx[perm[idx]] == tmp_col_idx[perm[cur_nnz-1]])
tmp_val[perm[cur_nnz-1]] += tmp_val[perm[idx]];
else {
tmp_row_idx[perm[cur_nnz]] = tmp_row_idx[perm[idx]];
tmp_col_idx[perm[cur_nnz]] = tmp_col_idx[perm[idx]];
tmp_val[perm[cur_nnz]] = tmp_val[perm[idx]];
cur_nnz ++;
}
}
nnz = cur_nnz;
for(size_t idx = 0; idx < nnz; idx++){
row_ptr[tmp_row_idx[perm[idx]] + 1] ++;
col_ptr[tmp_col_idx[perm[idx]] + 1] ++;
}
// Generate CSR format
for(size_t idx = 0; idx < nnz; idx++) {
val_t[idx] = tmp_val[perm[idx]];
col_idx[idx] = tmp_col_idx[perm[idx]];
}
// Calculate nnz for each row and col
max_row_nnz = max_col_nnz = 0;
for(size_t r = 1; r <= rows; r++) {
max_row_nnz = std::max(max_row_nnz, row_ptr[r]);
row_ptr[r] += row_ptr[r-1];
}
for(size_t c = 1; c <= cols; c++) {
max_col_nnz = std::max(max_col_nnz, col_ptr[c]);
col_ptr[c] += col_ptr[c-1];
}
// Transpose CSR into CSC matrix
for(size_t r = 0; r < rows; r++){
for(size_t idx = row_ptr[r]; idx < row_ptr[r+1]; idx++){
size_t c = (size_t) col_idx[idx];
row_idx[col_ptr[c]] = r;
val[col_ptr[c]++] = val_t[idx];
}
}
for(size_t c = cols; c > 0; c--) col_ptr[c] = col_ptr[c-1];
col_ptr[0] = 0;
}
void load(size_t _rows, size_t _cols, size_t _nnz, const char *filename, format_t fmt) {
if(fmt == smat_t<val_type>::TXT) {
file_iterator_t<val_type> entry_it(_nnz, filename);
load_from_iterator(_rows, _cols, _nnz, &entry_it);
} else if(fmt == smat_t<val_type>::PETSc) {
load_from_PETSc(filename);
} else if(fmt == smat_t<val_type>::SVMLIGHT) {
load_from_svmlight(filename);
} else {
fprintf(stderr, "Error: filetype %d not supported\n", fmt);
return;
}
}
void load_from_svmlight(const char *filename, size_t nr_skips=1, bool zero_based=false, double append_bias=-1.0) {
svmlight_file_iterator_t<val_type> entry_it(filename, nr_skips, zero_based, append_bias);
load_from_iterator(entry_it.get_rows(), entry_it.get_cols(), entry_it.get_nnz(), &entry_it);
}
void load_from_PETSc(const char *filename) {
FILE *fp = fopen(filename, "rb");
if(fp == NULL) {
fprintf(stderr, "Error: can't read the file (%s)!!\n", filename);
return;
}
load_from_PETSc(fp, filename);
fclose(fp);
}
void load_from_PETSc(FILE *fp, const char *filename=NULL) {
clear_space(); // clear any pre-allocated space in case of memory leak
const int UNSIGNED_FILE = 1211216, LONG_FILE = 1015;
int32_t int_buf[3];
size_t headersize = 0;
headersize += sizeof(int)*fread(int_buf, sizeof(int), 3, fp);
int filetype = int_buf[0];
rows = (size_t) int_buf[1];
cols = (size_t) int_buf[2];
if(filetype == UNSIGNED_FILE) {
headersize += sizeof(int)*fread(int_buf, sizeof(int32_t), 1, fp);
nnz = (size_t) int_buf[0];
} else if (filetype == LONG_FILE){
headersize += sizeof(size_t)*fread(&nnz, sizeof(int64_t), 1, fp);
} else {
fprintf(stderr, "Error: wrong PETSc format in %s.\n", filename);
}
allocate_space(rows,cols,nnz);
// load CSR from the binary PETSc format
{
// read row_ptr
std::vector<int32_t> nnz_row(rows);
headersize += sizeof(int32_t)*fread(&nnz_row[0], sizeof(int32_t), rows, fp);
row_ptr[0] = 0;
for(size_t r = 1; r <= rows; r++)
row_ptr[r] = row_ptr[r-1] + nnz_row[r-1];
// read col_idx
headersize += sizeof(int)*fread(&col_idx[0], sizeof(unsigned), nnz, fp);
// read val_t
const size_t chunksize = 1024;
double buf[chunksize];
size_t idx = 0;
while(idx + chunksize < nnz) {
headersize += sizeof(double)*fread(&buf[0], sizeof(double), chunksize, fp);
for(size_t i = 0; i < chunksize; i++)
val_t[idx+i] = (val_type) buf[i];
idx += chunksize;
}
size_t remaining = nnz - idx;
headersize += sizeof(double)*fread(&buf[0], sizeof(double), remaining, fp);
for(size_t i = 0; i < remaining; i++)
val_t[idx+i] = (val_type) buf[i];
}
csr_to_csc();
update_max_nnz();
}
void save_PETSc_to_file(const char *filename) const {
FILE *fp = fopen(filename, "wb");
if(fp == NULL) {
fprintf(stderr,"Error: can't open file %s\n", filename);
exit(1);
}
save_PETSc_to_file(fp);
}
void save_PETSc_to_file(FILE *fp) const {
const int UNSIGNED_FILE = 1211216, LONG_FILE = 1015;
int32_t int_buf[3] = {(int32_t)LONG_FILE, (int32_t)rows, (int32_t)cols};
std::vector<int32_t> nnz_row(rows);
for(size_t r = 0; r < rows; r++)
nnz_row[r] = (int)nnz_of_row(r);
fwrite(&int_buf[0], sizeof(int32_t), 3, fp);
fwrite(&nnz, sizeof(size_t), 1, fp);
fwrite(&nnz_row[0], sizeof(int32_t), rows, fp);
fwrite(&col_idx[0], sizeof(unsigned), nnz, fp);
// the following part == fwrite(val_t, sizeof(double), nnz, fp);
const size_t chunksize = 1024;
double buf[chunksize];
size_t idx = 0;
while(idx + chunksize < nnz) {
for(size_t i = 0; i < chunksize; i++)
buf[i] = (double) val_t[idx+i];
fwrite(&buf[0], sizeof(double), chunksize, fp);
idx += chunksize;
}
size_t remaining = nnz - idx;
for(size_t i = 0; i < remaining; i++)
buf[i] = (double) val_t[idx+i];
fwrite(&buf[0], sizeof(double), remaining, fp);
}
val_type get_global_mean() const {
val_type sum=0;
for(size_t idx = 0; idx < nnz; idx++) sum += val[idx];
return sum / (val_type)nnz;
}
void remove_bias(val_type bias=0) {
if(bias) {
for(size_t idx = 0; idx < nnz; idx++) {
val[idx] -= bias;
val_t[idx] -= bias;
}
}
}
void print_mat(const char *str="", FILE *fp=stdout) const {
fprintf(fp, "===>%s<===\n", str);
fprintf(fp, "rows %lu, cols %lu, nnz %lu\n", rows, cols, nnz);
fprintf(fp, "col_ptr, row_idx, val = %p, %p, %p\n", col_ptr, row_idx, val);
fprintf(fp, "row_ptr, col_idx, val_t = %p, %p, %p\n", row_ptr, col_idx, val_t);
fprintf(fp, "mem_alloc_by_me = %d\n", mem_alloc_by_me);
fprintf(fp, "Matrix:\n");
for(size_t i = 0; i < rows; i++) {
size_t it = row_ptr[i];
for(size_t j = 0; j < cols; j++) {
if(it < row_ptr[i+1] && col_idx[it] == j) {
fprintf(fp, "%.3f ", val_t[it]);
it ++;
}
else
fprintf(fp, "0.000 ");
}
fprintf(fp, "\n");
}
fprintf(fp, "Matrix^T:\n");
for(size_t i = 0; i < cols; i++) {
size_t it = col_ptr[i];
for(size_t j = 0; j < rows; j++) {
if(it < col_ptr[i+1] && row_idx[it] == j) {
fprintf(fp, "%.3f ", val[it]);
it ++;
}
else
fprintf(fp, "0.000 ");
}
fprintf(fp, "\n");
}
}
// ===========================================
// ========= Friend Functions/Classes ========
// ===========================================
template<typename VX, typename VY, typename VZ>
friend smat_t<VZ>& smat_x_smat(const smat_t<VX> &X, const smat_t<VY> &Y, smat_t<VZ> &Z);
};
#ifdef __cplusplus
extern "C" {
#endif
#ifdef __cplusplus
} // extern
#endif
/*-------------- Iterators -------------------*/
template<typename val_type>
class entry_t{
public:
unsigned i, j;
val_type v, weight;
entry_t(int _i=0, int _j=0, val_type _v=0, val_type _w=1.0): i(_i), j(_j), v(_v), weight(_w){}
};
template<typename val_type>
class entry_iterator_t {
public:
// Number of elements left to iterate
size_t nnz;
// When no next entry, return entry_t(0, 0, 0, -1);
virtual entry_t<val_type> next() = 0;
size_t get_nnz() const { return nnz; }
};
#define MAXLINE 10240
// Iterator for files with (i,j,v) tuples
template<typename val_type>
class file_iterator_t: public entry_iterator_t<val_type> {
public:
using entry_iterator_t<val_type>::nnz;
file_iterator_t(size_t nnz_, const char* filename, size_t start_pos=0) {
nnz = nnz_;
fp = fopen(filename,"rb");
if(fp == NULL) {
fprintf(stderr, "Error: cannot read the file (%s)!!\n", filename);
return;
}
fseek(fp, start_pos, SEEK_SET);
}
~file_iterator_t(){ if (fp) fclose(fp); }
entry_t<val_type> next() {
const int base10 = 10;
if(nnz > 0) {
--nnz;
if(fgets(&line[0], MAXLINE, fp)==NULL)
fprintf(stderr, "Error: reading error !!\n");
char *head_ptr = &line[0];
size_t i = strtol(head_ptr, &head_ptr, base10);
size_t j = strtol(head_ptr, &head_ptr, base10);
double v = strtod(head_ptr, &head_ptr);
return entry_t<val_type>(i - 1, j - 1, (val_type)v);
}
else { // No more to iterate
return entry_t<val_type>(0, 0, 0, -1);
}
}
private:
FILE *fp;
char line[MAXLINE];
};
template<class val_type>
class svmlight_file_iterator_t : public entry_iterator_t<val_type> {
public:
using entry_iterator_t<val_type>::nnz;
svmlight_file_iterator_t(
const char* filename,
size_t nr_skips=1,
bool zero_based=false,
double append_bias=-1.0) {
std::ifstream fs;
std::string line, kv;
const int base10 = 10;
fs.open(filename, std::ios::in);
if(!fs.is_open()) {
std::cout << "Unable to open" << filename << std::endl;
exit(-1);
}
I.clear();
J.clear();
V.clear();
nr_rows = nr_cols = 0;
while(std::getline(fs, line)) {
if(fs.eof()) {
break;
}
std::stringstream line_ss;
line_ss.str(line);
if(nr_skips != 0) {
// skip label part;
for(size_t i = 0; i < nr_skips; i++) {
line_ss >> kv;
}
}
size_t row_idx = nr_rows;
while(line_ss >> kv) {
char *head_ptr = const_cast<char*>(kv.c_str());
size_t key = strtol(head_ptr, &head_ptr, base10);
head_ptr++; // advancing for the ":" seperator
val_type val = static_cast<val_type>(strtod(head_ptr, &head_ptr));
size_t col_idx = (zero_based)? key : (key - 1);
nr_cols = std::max(nr_cols, col_idx + 1);
I.push_back(row_idx);
J.push_back(col_idx);
V.push_back(val);
}
nr_rows += 1;
}
if(append_bias > 0) {
size_t col_idx = nr_cols;
nr_cols += 1;
val_type val = static_cast<val_type>(append_bias);
for(size_t row_idx = 0; row_idx < nr_rows; row_idx++) {
I.push_back(row_idx);
J.push_back(col_idx);
V.push_back(val);
}
}
idx = 0;
nnz = I.size();
}
entry_t<val_type> next() {
if(nnz > 0) {
++idx; --nnz;
return entry_t<val_type>(I[idx - 1], J[idx - 1], V[idx - 1]);
} else {
return entry_t<val_type>(0, 0, 0, -1);
}
}
size_t get_rows() const { return nr_rows; }
size_t get_cols() const { return nr_cols; }
private:
size_t nr_rows, nr_cols;
size_t idx;
std::vector<size_t> I, J;
std::vector<val_type> V;
};
// Iterator for three vectors (I, J, V)
template<typename val_type>
class coo_iterator_t: public entry_iterator_t<val_type> {
public:
using entry_iterator_t<val_type>::nnz;
coo_iterator_t(const std::vector<size_t> _I, const std::vector<size_t> _J, const std::vector<val_type> _V){
nnz = std::min(std::min(_I.size(), _J.size()), _V.size());
idx = 0;
I = &_I[0]; J = &_J[0]; V = &_V[0];
}
coo_iterator_t(const size_t _nnz, const size_t* _I, const size_t* _J, const val_type* _V){
nnz = _nnz;
idx = 0;
I = _I; J = _J; V = _V;
}
~coo_iterator_t(){ }
entry_t<val_type> next() {
if(nnz > 0) {
++idx;
--nnz;
return entry_t<val_type>(I[idx - 1], J[idx - 1], V[idx - 1]);
} else {
return entry_t<val_type>(0, 0, 0, -1);
}
}
private:
int idx;
const size_t *I, *J;
const val_type *V;
};
// Iterator for sparse matrix
template<typename val_type>
class smat_iterator_t: public entry_iterator_t<val_type> {
public:
using entry_iterator_t<val_type>::nnz;
smat_iterator_t(const smat_t<val_type>& M, major_t major = ROWMAJOR) {
nnz = M.nnz;
col_idx = (major == ROWMAJOR)? M.col_idx: M.row_idx;
row_ptr = (major == ROWMAJOR)? M.row_ptr: M.col_ptr;
val_t = (major == ROWMAJOR)? M.val_t: M.val;
rows = (major==ROWMAJOR)? M.rows: M.cols;
cols = (major==ROWMAJOR)? M.cols: M.rows;
cur_idx = cur_row = 0;
}
~smat_iterator_t() {}
entry_t<val_type> next() {
if (nnz > 0)
nnz--;
else
return entry_t<val_type>(0, 0, 0, -1);
while (cur_idx >= row_ptr[cur_row+1])
cur_row++;
entry_t<val_type> ret(cur_row, col_idx[cur_idx], val_t[cur_idx]);
cur_idx++;
return ret;
}
private:
unsigned *col_idx;
size_t *row_ptr;
val_type *val_t;
size_t rows, cols, cur_idx;
size_t cur_row;
};
// Iterator for a subset of sparse matrix
template<typename val_type>
class smat_subset_iterator_t: public entry_iterator_t<val_type> {
public:
using entry_iterator_t<val_type>::nnz;
// When ROWMAJOR (COLMAJOR) is used, we sample several rows (columns) according to the order in subset_.
// If remapping = true, then we are using the corresponding index (i, j) in the submatrix.
// If remapping = false, then we are using the index (i, j) in the original matrix.
smat_subset_iterator_t(const smat_t<val_type>& M, const unsigned *subset_, size_t size, major_t major_ = ROWMAJOR, bool remapping_=true) {
major = major_; remapping = remapping_;
cr_idx = (major == ROWMAJOR)? M.col_idx: M.row_idx;
rc_ptr = (major == ROWMAJOR)? M.row_ptr: M.col_ptr;
val_t = (major == ROWMAJOR)? M.val_t: M.val;
rows = (major==ROWMAJOR)? (remapping? size: M.rows): M.rows;
cols = (major==ROWMAJOR)? M.cols: (remapping? size: M.cols);
subset.resize(size);
nnz = 0;
for(size_t i = 0; i < size; i++) {
unsigned idx = subset_[i];
subset[i] = idx;
nnz += (major == ROWMAJOR)? M.nnz_of_row(idx): M.nnz_of_col(idx);
}
cur_rc = 0;
cur_idx = rc_ptr[subset[cur_rc]];
}
smat_subset_iterator_t(const smat_t<val_type>& M, const std::vector<unsigned> &subset_, major_t major_ = ROWMAJOR, bool remapping_=true) {
major = major_; remapping = remapping_;
cr_idx = (major == ROWMAJOR)? M.col_idx: M.row_idx;
rc_ptr = (major == ROWMAJOR)? M.row_ptr: M.col_ptr;
val_t = (major == ROWMAJOR)? M.val_t: M.val;
rows = (major==ROWMAJOR)? (remapping? subset_.size(): M.rows): M.rows;
cols = (major==ROWMAJOR)? M.cols: (remapping? subset_.size(): M.cols);
subset.resize(subset_.size());
nnz = 0;
for(size_t i = 0; i < subset_.size(); i++) {
unsigned idx = subset_[i];
subset[i] = idx;
nnz += (major == ROWMAJOR)? M.nnz_of_row(idx): M.nnz_of_col(idx);
}
cur_rc = 0;
cur_idx = rc_ptr[subset[cur_rc]];
}
~smat_subset_iterator_t() {}
size_t get_rows() { return rows; }
size_t get_cols() { return cols; }
entry_t<val_type> next() {
if (nnz > 0)
nnz--;
else
return entry_t<val_type>(0,0,0, -1);
while (cur_idx >= rc_ptr[subset[cur_rc]+1]) {
cur_rc++;
cur_idx = rc_ptr[subset[cur_rc]];
}
entry_t<val_type> ret_rowwise(remapping? cur_rc: subset[cur_rc], cr_idx[cur_idx], val_t[cur_idx]);
entry_t<val_type> ret_colwise(cr_idx[cur_idx], remapping? cur_rc: subset[cur_rc], val_t[cur_idx]);
cur_idx++;
return major==ROWMAJOR? ret_rowwise: ret_colwise;
}
private:
size_t rows, cols;
std::vector<unsigned>subset;
unsigned *cr_idx;
size_t *rc_ptr;
val_type *val_t;
size_t cur_rc, cur_idx;
major_t major;
bool remapping;
};
// Iterator for a dense matrix
template<typename val_type>
class dmat_iterator_t: public entry_iterator_t<val_type> {
public:
using entry_iterator_t<val_type>::nnz;
dmat_iterator_t(const dmat_t<val_type>& M, double threshold=1e-12) : M(M), rows(M.rows), cols(M.cols), threshold(fabs(threshold)) {
cur_row = 0;
cur_col = 0;
nnz = 0;
bool find_firstnz = true;
for(size_t i = 0; i < rows; i++)
for(size_t j = 0; j < cols; j++)
if(fabs((double)M.at(i,j)) >= threshold) {
if(find_firstnz) {
cur_row = i;
cur_col = j;
find_firstnz = false;
}
nnz++;
}
}
~dmat_iterator_t() {}
entry_t<val_type> next() {
if (nnz > 0)
nnz--;
else
return entry_t<val_type>(0,0,0, -1);
entry_t<val_type> entry(cur_row, cur_col, M.at(cur_row, cur_col));
do {
cur_col ++;
if(cur_col == cols) {
cur_row ++;
cur_col = 0;
}
} while(fabs((double)M.at(cur_row, cur_col)) < threshold);
return entry;
}
private:
const dmat_t<val_type>& M;
size_t rows, cols, cur_row, cur_col;
double threshold;
};
/*-------------- Implementation of Linear Algebra Operations --------------*/
// Lapack and Blas support
#ifdef _WIN32
#define ddot_ ddot
#define sdot_ sdot
#define daxpy_ daxpy
#define saxpy_ saxpy
#define dcopy_ dcopy
#define scopy_ scopy
#define dgemm_ dgemm
#define sgemm_ sgemm
#define dposv_ dposv
#define sposv_ sposv
#define dgesdd_ dgesdd
#define sgesdd_ sgesdd
#endif
extern "C" {
double ddot_(ptrdiff_t *, double *, ptrdiff_t *, double *, ptrdiff_t *);
float sdot_(ptrdiff_t *, float *, ptrdiff_t *, float *, ptrdiff_t *);
ptrdiff_t dscal_(ptrdiff_t *, double *, double *, ptrdiff_t *);
ptrdiff_t sscal_(ptrdiff_t *, float *, float *, ptrdiff_t *);
ptrdiff_t daxpy_(ptrdiff_t *, double *, double *, ptrdiff_t *, double *, ptrdiff_t *);
ptrdiff_t saxpy_(ptrdiff_t *, float *, float *, ptrdiff_t *, float *, ptrdiff_t *);
double dcopy_(ptrdiff_t *, double *, ptrdiff_t *, double *, ptrdiff_t *);
float scopy_(ptrdiff_t *, float *, ptrdiff_t *, float *, ptrdiff_t *);
void dgemm_(char *transa, char *transb, ptrdiff_t *m, ptrdiff_t *n, ptrdiff_t *k, double *alpha, double *a, ptrdiff_t *lda, double *b, ptrdiff_t *ldb, double *beta, double *c, ptrdiff_t *ldc);
void sgemm_(char *transa, char *transb, ptrdiff_t *m, ptrdiff_t *n, ptrdiff_t *k, float *alpha, float *a, ptrdiff_t *lda, float *b, ptrdiff_t *ldb, float *beta, float *c, ptrdiff_t *ldc);
int dposv_(char *uplo, ptrdiff_t *n, ptrdiff_t *nrhs, double *a, ptrdiff_t *lda, double *b, ptrdiff_t *ldb, ptrdiff_t *info);
int sposv_(char *uplo, ptrdiff_t *n, ptrdiff_t *nrhs, float *a, ptrdiff_t *lda, float *b, ptrdiff_t *ldb, ptrdiff_t *info);
void dgesdd_(char* jobz, ptrdiff_t* m, ptrdiff_t* n, double* a, ptrdiff_t* lda, double* s, double* u, ptrdiff_t* ldu, double* vt, ptrdiff_t* ldvt, double* work, ptrdiff_t* lwork, ptrdiff_t* iwork, ptrdiff_t* info);
void sgesdd_(char* jobz, ptrdiff_t* m, ptrdiff_t* n, float* a, ptrdiff_t* lda, float* s, float* u, ptrdiff_t* ldu, float* vt, ptrdiff_t* ldvt, float* work, ptrdiff_t* lwork, ptrdiff_t* iwork, ptrdiff_t* info);
}
template<typename val_type> val_type dot(ptrdiff_t *, val_type *, ptrdiff_t *, val_type *, ptrdiff_t *);
template<> inline double dot(ptrdiff_t *len, double *x, ptrdiff_t *xinc, double *y, ptrdiff_t *yinc) { return ddot_(len,x,xinc,y,yinc);}
template<> inline float dot(ptrdiff_t *len, float *x, ptrdiff_t *xinc, float *y, ptrdiff_t *yinc) { return sdot_(len,x,xinc,y,yinc);}
template<typename val_type> val_type scal(ptrdiff_t *, val_type *, val_type *, ptrdiff_t *);
template<> inline double scal(ptrdiff_t *len, double *a, double *x, ptrdiff_t *xinc) { return dscal_(len,a,x,xinc);}
template<> inline float scal(ptrdiff_t *len, float *a, float *x, ptrdiff_t *xinc) { return sscal_(len,a,x,xinc);}
template<typename val_type> ptrdiff_t axpy(ptrdiff_t *, val_type *, val_type *, ptrdiff_t *, val_type *, ptrdiff_t *);
template<> inline ptrdiff_t axpy(ptrdiff_t *len, double *alpha, double *x, ptrdiff_t *xinc, double *y, ptrdiff_t *yinc) { return daxpy_(len,alpha,x,xinc,y,yinc);};
template<> inline ptrdiff_t axpy(ptrdiff_t *len, float *alpha, float *x, ptrdiff_t *xinc, float *y, ptrdiff_t *yinc) { return saxpy_(len,alpha,x,xinc,y,yinc);};
template<typename val_type> val_type copy(ptrdiff_t *, val_type *, ptrdiff_t *, val_type *, ptrdiff_t *);
template<> inline double copy(ptrdiff_t *len, double *x, ptrdiff_t *xinc, double *y, ptrdiff_t *yinc) { return dcopy_(len,x,xinc,y,yinc);}
template<> inline float copy(ptrdiff_t *len, float *x, ptrdiff_t *xinc, float *y, ptrdiff_t *yinc) { return scopy_(len,x,xinc,y,yinc);}
template<typename val_type> void gemm(char *transa, char *transb, ptrdiff_t *m, ptrdiff_t *n, ptrdiff_t *k, val_type *alpha, val_type *a, ptrdiff_t *lda, val_type *b, ptrdiff_t *ldb, val_type *beta, val_type *c, ptrdiff_t *ldc);
template<> inline void gemm(char *transa, char *transb, ptrdiff_t *m, ptrdiff_t *n, ptrdiff_t *k, double *alpha, double *a, ptrdiff_t *lda, double *b, ptrdiff_t *ldb, double *beta, double *c, ptrdiff_t *ldc) { dgemm_(transa, transb, m, n, k, alpha, a, lda, b, ldb, beta, c, ldc); }
template<> inline void gemm<float>(char *transa, char *transb, ptrdiff_t *m, ptrdiff_t *n, ptrdiff_t *k, float *alpha, float *a, ptrdiff_t *lda, float *b, ptrdiff_t *ldb, float *beta, float *c, ptrdiff_t *ldc) { sgemm_(transa, transb, m, n, k, alpha, a, lda, b, ldb, beta, c, ldc); }
template<typename val_type> int posv(char *uplo, ptrdiff_t *n, ptrdiff_t *nrhs, val_type *a, ptrdiff_t *lda, val_type *b, ptrdiff_t *ldb, ptrdiff_t *info);
template<> inline int posv(char *uplo, ptrdiff_t *n, ptrdiff_t *nrhs, double *a, ptrdiff_t *lda, double *b, ptrdiff_t *ldb, ptrdiff_t *info) { return dposv_(uplo, n, nrhs, a, lda, b, ldb, info); }
template<> inline int posv(char *uplo, ptrdiff_t *n, ptrdiff_t *nrhs, float *a, ptrdiff_t *lda, float *b, ptrdiff_t *ldb, ptrdiff_t *info) { return sposv_(uplo, n, nrhs, a, lda, b, ldb, info); }
template<typename val_type> void gesdd(char* jobz, ptrdiff_t* m, ptrdiff_t* n, val_type* a, ptrdiff_t* lda, val_type* s, val_type* u, ptrdiff_t* ldu, val_type* vt, ptrdiff_t* ldvt, val_type* work, ptrdiff_t* lwork, ptrdiff_t* iwork, ptrdiff_t* info);
template<> inline void gesdd(char* jobz, ptrdiff_t* m, ptrdiff_t* n, double* a, ptrdiff_t* lda, double* s, double* u, ptrdiff_t* ldu, double* vt, ptrdiff_t* ldvt, double* work, ptrdiff_t* lwork, ptrdiff_t* iwork, ptrdiff_t* info) { return dgesdd_(jobz, m, n, a, lda, s, u, ldu, vt, ldvt, work, lwork, iwork, info); }
template<> inline void gesdd(char* jobz, ptrdiff_t* m, ptrdiff_t* n, float* a, ptrdiff_t* lda, float* s, float* u, ptrdiff_t* ldu, float* vt, ptrdiff_t* ldvt, float* work, ptrdiff_t* lwork, ptrdiff_t* iwork, ptrdiff_t* info) { return sgesdd_(jobz, m, n, a, lda, s, u, ldu, vt, ldvt, work, lwork, iwork, info); }
// <x,y>
template<typename val_type>
val_type do_dot_product(const val_type *x, const val_type *y, size_t size) {
val_type *xx = const_cast<val_type*>(x);
val_type *yy = const_cast<val_type*>(y);
ptrdiff_t inc = 1;
ptrdiff_t len = (ptrdiff_t) size;
return dot(&len, xx, &inc, yy, &inc);
}
template<typename val_type>
val_type do_dot_product(const dvec_t<val_type> &x, const dvec_t<val_type> &y) {
assert(x.size() == y.size());
return do_dot_product(x.data(), y.data(), x.size());
}
template<typename val_type>
val_type do_dot_product(const svec_t<val_type> &x, const svec_t<val_type> &y) {
assert(x.size() == y.size());
val_type ret = 0;
for(size_t i = 0, j = 0; i < x.get_nnz() && j < y.get_nnz();) {
if(x.idx[i] < y.idx[j]) {
i ++;
} else if(x.idx[i] > y.idx[j]) {
j ++;
} else {
ret += x.val[i] * y.val[j];
i ++; j ++;
}
}
return ret;
}
template<typename val_type>
val_type do_dot_product(const sdvec_t<val_type> &x, const sdvec_t<val_type> &y) {
assert(x.size() == y.size());
val_type ret = 0;
for(size_t i = 0, j = 0; i < x.get_nnz() && j < y.get_nnz();) {
if(x.nz_idx[i] < y.nz_idx[j]) {
i++;
} else if(x.nz_idx[i] < y.nz_idx[i]) {
j++;
} else {
ret += x[x.nz_idx[i]] * y[y.nz_idx[j]];
i++;
j++;
}
}
return ret;
}
template<typename val_type>
val_type do_dot_product(const dvec_t<val_type> &x, const svec_t<val_type> &y) {
assert(x.size() == y.size());
val_type ret = 0;
for(size_t i = 0; i < y.get_nnz(); i++)
ret += x[y.idx[i]] * y.val[i];
return ret;
}
template<typename val_type>
val_type do_dot_product(const svec_t<val_type> &x, const dvec_t<val_type> &y) {
assert(x.size() == y.size());
return do_dot_product(y, x);
}
template<typename val_type>
val_type do_dot_product(const dvec_t<val_type> &x, const sdvec_t<val_type> &y) {
val_type ret = 0;
for(size_t i = 0; i < y.get_nnz(); i++) {
ret += x[y.nz_idx[i]] * y[y.nz_idx[i]];
}
return ret;
}
template<typename val_type>
val_type do_dot_product(const sdvec_t<val_type> &x, const dvec_t<val_type> &y) {
return do_dot_product(y, x);
}
template<typename val_type>
val_type do_dot_product(const svec_t<val_type> &x, const sdvec_t<val_type> &y) {
val_type ret = 0;
for(size_t i = 0, j = 0; i < x.get_nnz() && j < y.get_nnz();) {
if(x.idx[i] < y.nz_idx[j]) {
i++;
} else if(x.idx[i] > y.nz_idx[j]) {
j++;
} else {
ret += x.val[i] * y[y.nz_idx[j]];
i++;
j++;
}
}
return ret;
}
template<typename val_type>
val_type do_dot_product(const sdvec_t<val_type> &x, const svec_t<val_type> &y) {
return do_dot_product(y, x);
}
template<typename val_type>
val_type do_dot_product(const gvec_t<val_type> &x, const gvec_t<val_type> &y) {
assert(x.size() == y.size());
if(x.is_sparse() && y.is_sparse())
return do_dot_product(x.get_sparse(), y.get_sparse());
else if(x.is_sparse() && y.is_dense())
return do_dot_product(x.get_sparse(), y.get_dense());
else if(x.is_dense() && y.is_sparse())
return do_dot_product(x.get_dense(), y.get_sparse());
else if(x.is_dense() && y.is_dense())
return do_dot_product(x.get_dense(), y.get_dense());
else
return 0;
}
template<typename val_type>
val_type do_dot_product(const dmat_t<val_type> &x, const dmat_t<val_type> &y) {
assert(x.rows == y.rows && x.cols == y.cols);
if((x.is_rowmajor() && y.is_rowmajor()) || (x.is_colmajor() && y.is_colmajor()))
return do_dot_product(x.data(), y.data(), x.rows*x.cols);
else {
val_type ret = 0.0;
const dmat_t<val_type> &xx = (x.rows > x.cols) ? x : x.transpose();
const dmat_t<val_type> &yy = (y.rows > y.cols) ? y : y.transpose();
#pragma omp parallel for schedule(static) reduction(+:ret)
for(size_t i = 0; i < xx.rows; i++) {
double ret_local = 0.0;
for(size_t j = 0; j < xx.cols; j++)
ret_local += xx.at(i,j)*yy.at(i,j);
ret += ret_local;
}
return (val_type)ret;
}
}
template<typename val_type>
val_type do_dot_product(const smat_t<val_type> &x, const smat_t<val_type> &y) {
assert(x.rows == y.rows && x.cols == y.cols);
val_type ret = 0.0;
const smat_t<val_type> &xx = (x.rows > x.cols) ? x : x.transpose();
const smat_t<val_type> &yy = (y.rows > y.cols) ? y : y.transpose();
#pragma omp parallel for schedule(static) reduction(+:ret)
for(size_t i = 0; i < xx.rows; i++) {
svec_t<val_type> sv1 = xx.get_row(i);
svec_t<val_type> sv2 = yy.get_row(i);
val_type ret_local = do_dot_product(sv1, sv2);
ret += ret_local;
}
return (val_type)ret;
}
template<typename val_type>
val_type do_dot_product(const smat_t<val_type> &x, const dmat_t<val_type>&y) {
assert(x.rows == y.rows && x.cols == y.cols);
double ret = 0;
const smat_t<val_type> &xx = (x.rows > x.cols) ? x : x.transpose();
#pragma omp parallel for schedule(static) reduction(+:ret)
for(size_t i = 0; i < xx.rows; i++) {
double tmp = 0;
for(size_t idx = xx.row_ptr[i]; idx < xx.row_ptr[i + 1]; idx++) {
tmp += xx.val[idx] * y.at(i, xx.col_idx[idx]);
}
ret += tmp;
}
return static_cast<val_type>(ret);
}
template<typename val_type>
val_type do_dot_product(const dmat_t<val_type>&x, const smat_t<val_type> &y) {
return do_dot_product(y, x);
}
template<typename val_type>
val_type do_dot_product(const gmat_t<val_type>&x, const gmat_t<val_type> &y) {
assert(x.rows == y.rows && x.cols == y.cols);
if(x.is_sparse() && y.is_sparse())
return do_dot_product(x.get_sparse(), y.get_sparse());
else if(x.is_sparse() && y.is_dense())
return do_dot_product(x.get_sparse(), y.get_dense());
else if(x.is_dense() && y.is_sparse())
return do_dot_product(x.get_dense(), y.get_sparse());
else if(x.is_dense() && y.is_dense())
return do_dot_product(x.get_dense(), y.get_dense());
else
return 0;
}
// y = alpha * x + y
template<typename val_type, typename T>
val_type* do_axpy(T alpha, const val_type *x, val_type *y, size_t size) {
if(alpha == 0) return y;
val_type alpha_ = (val_type)alpha;
ptrdiff_t inc = 1;
ptrdiff_t len = (ptrdiff_t) size;
val_type *xx = const_cast<val_type*>(x);
axpy(&len, &alpha_, xx, &inc, y, &inc);
return y;
}
template<typename val_type, typename T>
dvec_t<val_type>& do_axpy(T alpha, const dvec_t<val_type> &x, dvec_t<val_type> &y) {
do_axpy(alpha, x.data(), y.data(), x.size());
return y;
}
template<typename val_type, typename T>
dvec_t<val_type>& do_axpy(T alpha, const svec_t<val_type> &x, dvec_t<val_type> &y) {
if(alpha == 0) return y;
for(size_t i = 0; i < x.get_nnz(); i++) {
y[x.idx[i]] += alpha * x.val[i];
}
return y;
}
template<typename XV, typename YV, typename T>
sdvec_t<YV>& do_axpy(T alpha, const svec_t<XV>& x, sdvec_t<YV> &y) {
if(alpha == 0) return y;
for(size_t i = 0; i < x.get_nnz(); i++) {
y.add_nonzero_at(x.idx[i], alpha * x.val[i]);
}
return y;
}
template<typename XV, typename YV, typename T>
sdvec_t<YV>& do_axpy(T alpha, const dvec_t<XV>& x, sdvec_t<YV> &y) {
if(alpha == 0) return y;
for(size_t i = 0; i < x.size(); i++) {
y.add_nonzero_at(i, alpha * x[i]);
}
return y;
}
template<typename val_type, typename T>
dmat_t<val_type>& do_axpy(T alpha, const dmat_t<val_type> &x, dmat_t<val_type> &y) {
assert(x.rows == y.rows && x.cols == y.cols);
if((x.is_rowmajor() && y.is_rowmajor()) || (x.is_colmajor() && y.is_colmajor()))
do_axpy(alpha, x.data(), y.data(), x.rows*x.cols);
else {
if(x.rows > x.cols) {
#pragma omp parallel for schedule(static)
for(size_t i = 0; i < x.rows; i++)
for(size_t j = 0; j < x.cols; j++)
y.at(i,j) += alpha*x.at(i,j);
} else {
#pragma omp parallel for schedule(static)
for(size_t j = 0; j < x.cols; j++)
for(size_t i = 0; i < x.rows; i++)
y.at(i,j) += alpha*x.at(i,j);
}
}
return y;
}
// x *= alpha
template<typename val_type, typename T>
void do_scale(T alpha, val_type *x, size_t size) {
if(alpha == 0.0) {
memset(x, 0, sizeof(val_type) * size);
} else if (alpha == 1.0) {
return;
} else {
val_type alpha_minus_one = (val_type)(alpha - 1);
do_axpy(alpha_minus_one, x, x, size);
}
}
template<typename val_type, typename T>
void do_scale(T alpha, dvec_t<val_type> &x) {
do_scale(alpha, x.data(), x.size());
}
template<typename val_type, typename T>
void do_scale(T alpha, svec_t<val_type> &x) {
do_scale(alpha, x.val, x.get_nnz());
}
template<typename val_type, typename T>
void do_scale(T alpha, gvec_t<val_type> &x) {
if(x.is_sparse())
do_scale(alpha, x.get_sparse());
else if(x.is_dense())
do_scale(alpha, x.get_dense());
}
template<typename val_type, typename T>
void do_scale(T alpha, dmat_t<val_type> &x) {
do_scale(alpha, x.data(), x.rows*x.cols);
}
template<typename val_type, typename T>
void do_scale(T alpha, smat_t<val_type> &x) {
do_scale(alpha, x.val, x.get_nnz());
do_scale(alpha, x.val_t, x.get_nnz());
}
// H = a*X*W + b H0 (H0 can put H. However H don't need to be pre-allocated, but H0 do.)
template<typename val_type, typename T2, typename T3>
dmat_t<val_type>& dmat_x_dmat(T2 a, const dmat_t<val_type> &X, const dmat_t<val_type> &W, T3 b, const dmat_t<val_type> &H0, dmat_t<val_type> &H) {
if(b == 0)
assert(X.cols == W.rows);
else
assert(W.cols == H0.cols && X.cols == W.rows && X.rows == H0.rows);
H.lazy_resize(X.rows, W.cols).assign(b, H0);
return dmat_x_dmat(a, X, W, 1, H);
}
template<typename val_type, typename T2, typename T3>
dmat_t<val_type>& smat_x_dmat(T2 a, const smat_t<val_type> &X, const dmat_t<val_type> &W, T3 b, const dmat_t<val_type> &H0, dmat_t<val_type> &H) {
if(b == 0)
assert(X.cols == W.rows);
else
assert(W.cols == H0.cols && X.cols == W.rows && X.rows == H0.rows);
H.lazy_resize(X.rows, W.cols).assign(b, H0);
// H += aXW
if(W.is_rowmajor()) {
if(H.is_rowmajor()) {
smat_x_dmat(a, X, W.data(), W.cols, 1.0, H.data(), H.data());
} else { // H is col_major
#pragma omp parallel for schedule(dynamic, 64) shared(X, W, H)
for(size_t i = 0; i < X.rows; i++) {
for(size_t idx = X.row_ptr[i]; idx != X.row_ptr[i+1]; idx++){
size_t j = X.col_idx[idx];
const val_type &Xij = X.val_t[idx];
for(size_t t = 0; t < W.cols; t++)
H.at(i,t) += a*Xij*W.at(j,t);
}
}
}
} else { // W.is_colmajor
if(H.is_colmajor()) {
#pragma omp parallel for schedule(static)
for(size_t j = 0; j < W.cols; j++)
{
dvec_t<val_type> Wj = W.get_col(j);
dvec_t<val_type> Hj = H.get_col(j);
X.Xv(Wj, Hj, true);
}
} else { // H.is row_major
#pragma omp parallel for schedule(dynamic, 64) shared(X, W, H)
for(size_t i = 0; i < X.rows; i++) {
for(size_t idx = X.row_ptr[i]; idx != X.row_ptr[i+1]; idx++){
size_t j = X.col_idx[idx];
const val_type &Xij = X.val_t[idx];
for(size_t t = 0; t < W.cols; t++)
H.at(i,t) += a*Xij*W.at(j,t);
}
}
}
}
return H;
}
template<typename val_type, typename T2, typename T3>
dmat_t<val_type>& gmat_x_dmat(T2 a, const gmat_t<val_type> &X, const dmat_t<val_type> &W, T3 b, const dmat_t<val_type> &H0, dmat_t<val_type> &H) {
if(b == 0)
assert(X.cols == W.rows);
else
assert(W.cols == H0.cols && X.cols == W.rows && X.rows == H0.rows);
if(X.is_sparse())
smat_x_dmat(a, X.get_sparse(), W, b, H0, H);
else if(X.is_dense())
dmat_x_dmat(a, X.get_dense(), W, b, H0, H);
else if(X.is_identity()) {
H.lazy_resize(X.rows, W.cols).assign(b, H0);
do_axpy(a, W, H);
}
return H;
}
// H = a*X*W + H0 (H0 can put H. However H don't need to be pre-allocated, but H0 do)
template<typename val_type, typename T2>
dmat_t<val_type>& dmat_x_dmat(T2 a, const dmat_t<val_type> &X, const dmat_t<val_type> &W, const dmat_t<val_type> &H0, dmat_t<val_type> &H) {
return dmat_x_dmat(a, X, W, 1.0, H0, H);
}
template<typename val_type, typename T2>
dmat_t<val_type>& smat_x_dmat(T2 a, const smat_t<val_type> &X, const dmat_t<val_type> &W, const dmat_t<val_type> &H0, dmat_t<val_type> &H) {
return smat_x_dmat(a, X, W, 1.0, H0, H);
}
template<typename val_type, typename T2>
dmat_t<val_type>& gmat_x_dmat(T2 a, const gmat_t<val_type> &X, const dmat_t<val_type> &W, const dmat_t<val_type> &H0, dmat_t<val_type> &H) {
return gmat_x_dmat(a, X, W, 1.0, H0, H);
}
// H = X*W (H don't need to be pre-allocated)
template<typename val_type>
dmat_t<val_type>& dmat_x_dmat(const dmat_t<val_type> &X, const dmat_t<val_type> &W, dmat_t<val_type> &H) {
return dmat_x_dmat(1.0, X, W, 0.0, H, H);
}
template<typename val_type>
dmat_t<val_type> operator*(const dmat_t<val_type> &X, const dmat_t<val_type> &W) {
dmat_t<val_type> H(X.rows, W.cols);
dmat_x_dmat(X, W, H);
return H;
}
template<typename VX, typename VW, typename VH>
smat_t<VH>& smat_x_smat(const smat_t<VX> &X, const smat_t<VW> &W, smat_t<VH> &H) {
std::vector<unsigned> row_idx;
std::vector<size_t> col_ptr;
std::vector<VH> val;
size_t rows = X.rows, cols = W.cols;
sdvec_t<VH> temp(rows);
col_ptr.push_back(0);
size_t total_nnz = 0;
for(size_t c = 0; c < cols; c++) {
const svec_t<VW>& Wc = W.get_col(c);
for(size_t s = 0; s < Wc.nnz; s++) {
// temp += Wc[i] * Xi
do_axpy(Wc.val[s], X.get_col(Wc.idx[s]), temp);
}
temp.update_nz_idx();
total_nnz += temp.nnz;
col_ptr.push_back(total_nnz);
for(size_t s = 0; s < temp.nnz; s++) {
row_idx.push_back(temp.nz_idx[s]);
val.push_back(temp[temp.nz_idx[s]]);
}
temp.clear();
}
H.allocate_space(rows, cols, total_nnz);
memcpy(H.val, val.data(), sizeof(VH) * total_nnz);
memcpy(H.row_idx, row_idx.data(), sizeof(unsigned) * total_nnz);
memcpy(H.col_ptr, col_ptr.data(), sizeof(size_t) * (cols + 1));
H.csc_to_csr();
return H;
}
template<typename VX, typename VW>
smat_t<VX> operator*(const smat_t<VX> &X, const smat_t<VW>& W) {
smat_t<VX> H;
smat_x_smat(X, W, H);
return H;
}
template<typename val_type>
dmat_t<val_type>& smat_x_dmat(const smat_t<val_type> &X, const dmat_t<val_type> &W, dmat_t<val_type> &H) {
return smat_x_dmat(1.0, X, W, 0.0, H, H);
}
template<typename val_type>
dmat_t<val_type> operator*(const smat_t<val_type> &X, const dmat_t<val_type> &W) {
dmat_t<val_type> H(X.rows, W.cols);
smat_x_dmat(X, W, H);
return H;
}
template<typename val_type>
dmat_t<val_type> operator*(const dmat_t<val_type> &X, const smat_t<val_type> &W) {
dmat_t<val_type> H(X.rows, W.cols);
smat_x_dmat(X.transpose(), W.transpose(), H.transpose());
return H;
}
template<typename val_type>
dmat_t<val_type>& gmat_x_dmat(const gmat_t<val_type> &X, const dmat_t<val_type> &W, dmat_t<val_type> &H) {
return gmat_x_dmat(1.0, X, W, 0.0, H, H);
}
template<typename val_type>
dmat_t<val_type> operator*(const gmat_t<val_type> &X, const dmat_t<val_type> &W) {
dmat_t<val_type> H(X.rows, W.cols);
gmat_x_dmat(X, W, H);
return H;
}
// tr(W^T X H) (W, H: dense matrix; X: sparse matrix)
template<typename val_type>
val_type trace_dmat_T_smat_dmat(const dmat_t<val_type> &W, const smat_t<val_type> &X, const dmat_t<val_type> &H) {
assert(W.cols == H.cols && W.rows == X.rows && H.rows == X.cols);
if(W.is_colmajor() && H.is_colmajor()) {
double ret = 0;
#pragma omp parallel for schedule(static) reduction(+:ret)
for(size_t t = 0; t < W.cols; t++) {
const dvec_t<val_type> u = W.get_col(t);
const dvec_t<val_type> v = H.get_col(t);
double local_sum = 0;
for(size_t i = 0; i < X.rows; i++) {
for(size_t idx = X.row_ptr[i]; idx != X.row_ptr[i+1]; idx++)
local_sum += X.val_t[idx]*u[i]*v[X.col_idx[idx]];
}
ret += local_sum;
}
return ret;
} else {
double ret= 0;
#pragma omp parallel for schedule(dynamic,64) reduction(+:ret)
for(size_t i = 0; i < X.rows; i++) {
double local_sum = 0;
for(size_t idx = X.row_ptr[i]; idx != X.row_ptr[i+1]; idx++) {
size_t j = X.col_idx[idx];
double sum = 0;
for(size_t t = 0; t < W.cols; t++)
sum += W.at(i,t)*H.at(j,t);
local_sum += sum * X.val_t[idx];
}
ret += local_sum;
}
return ret;
}
}
// tr(W^T diag(D) H) (W, H: dense matrix; D: dense vector)
template<typename val_type>
val_type trace_dmat_T_diag_dmat(const dmat_t<val_type> &W, const dvec_t<val_type> &D, const dmat_t<val_type> &H) {
assert(W.rows == H.rows && W.rows == D.len && W.cols == H.cols);
assert(W.is_rowmajor() && H.is_rowmajor());
return trace_dmat_T_diag_dmat(W.data(),D.data(),H.data(),W.rows,W.cols);
}
// -------------- Implementation of Linear Algebra Solvers --------------
// Solve Ax = b, A is symmetric positive definite, b is overwritten with the result x
// A will be modifed by internal Lapack. Make copy when necessary
template<typename val_type>
bool ls_solve_chol(val_type *A, val_type *b, size_t n) {
ptrdiff_t nn=n, lda=n, ldb=n, nrhs=1, info=0;
char uplo = 'U';
posv(&uplo, &nn, &nrhs, A, &lda, b, &ldb, &info);
return (info == 0);
}
// Solve AX = B, A is symmetric positive definite, B is overwritten with the result X
// A is a m-by-m matrix, while B is a m-by-n matrix stored in col_major
// A will be modified by internal Lapack. Make copy when necessary
template<typename val_type>
bool ls_solve_chol_matrix_colmajor(val_type *A, val_type *B, size_t m, size_t n = size_t(0)) {
ptrdiff_t mm=m, lda=m, ldb=m, nrhs=n, info=0;
char uplo = 'U';
posv(&uplo, &mm, &nrhs, A, &lda, B, &ldb, &info);
return (info == 0);
}
// Solve AX = B, A is symmetric positive definite, return X
template<typename val_type>
dmat_t<val_type> ls_solve_chol(const dmat_t<val_type>& A, const dmat_t<val_type>& B, bool A_as_workspace) {
dmat_t<val_type> X(B);
X.grow_body().to_colmajor();
dmat_t<val_type> AA(A);
if(A_as_workspace == false)
AA.grow_body();
if(ls_solve_chol_matrix_colmajor(AA.data(), X.data(), AA.rows, X.cols) == false)
fprintf(stderr, "error when applying ls_solve_cho_matrix_colmajor");
return X;
}
// Solve Ax = b, A is symmetric positive definite, return x
template<typename val_type>
dvec_t<val_type> ls_solve_chol(const dmat_t<val_type>& A, const dvec_t<val_type>& b, bool A_as_workspace) {
dvec_t<val_type> x(b);
x.grow_body();
dmat_t<val_type> AA(A);
if(A_as_workspace == false)
AA.grow_body();
if(ls_solve_chol(AA.data(), x.data(), AA.rows) == false)
fprintf(stderr, "error when applying ls_solve_chol");
return x;
}
// SVD: A = USV'
// U, S, V don't necessarily need to be pre-allocated
template<typename val_type>
class svd_solver_t {
private:
char jobz;
ptrdiff_t mm, nn, min_mn, max_mn, lda, ldu, ldvt, lwork1, lwork2, lwork, info;
std::vector<val_type> u_buf, v_buf, s_buf, work;
std::vector<ptrdiff_t> iwork;
size_t k;
void prepare_parameter(const dmat_t<val_type>& A, dmat_t<val_type>& U, dvec_t<val_type>& S, dmat_t<val_type>& V, bool reduced) {
k = std::min(A.rows, A.cols);
mm = (ptrdiff_t)A.rows;
nn = (ptrdiff_t)A.cols;
min_mn = std::min(mm,nn);
max_mn = std::max(mm,nn);
lda = mm;
ldu = mm;
ldvt = reduced? min_mn : nn;
lwork1 = 3*min_mn*min_mn + std::max(max_mn, 4*min_mn*min_mn + 4*min_mn);
lwork2 = 3*min_mn + std::max(max_mn, 4*min_mn*min_mn + 3*min_mn + max_mn);
lwork = 2 * std::max(lwork1, lwork2); // due to differences between lapack 3.1 and 3.4
info = 0;
work.resize(lwork);
iwork.resize((size_t)(8*min_mn));
if(!S.is_view() || S.size() != k)
S.resize(k);
if(reduced) {
jobz = 'S';
U.lazy_resize(A.rows, k, COLMAJOR);
V.lazy_resize(A.cols, k, ROWMAJOR);
} else {
jobz = 'A';
U.lazy_resize(A.rows, A.rows, COLMAJOR);
V.lazy_resize(A.cols, A.cols, ROWMAJOR);
}
}
public:
svd_solver_t() {}
bool solve(const dmat_t<val_type>& A, dmat_t<val_type>& U, dvec_t<val_type>& S, dmat_t<val_type>& V, bool reduced=true, bool A_as_workspace=false) {
if(A.is_rowmajor())
return solve(A.transpose(), V, S, U, reduced, A_as_workspace);
else {
dmat_t<val_type> AA(A.get_view());
if(A_as_workspace == false)
AA.grow_body();
prepare_parameter(AA, U, S, V, reduced);
#if defined(CPP11)
gesdd(&jobz, &mm, &nn, AA.data(), &lda, S.data(), U.data(), &ldu, V.data(), &ldvt, work.data(), &lwork, iwork.data(), &info);
#else
gesdd(&jobz, &mm, &nn, AA.data(), &lda, S.data(), U.data(), &ldu, V.data(), &ldvt, &work[0], &lwork, &iwork[0], &info);
#endif
return (info == 0);
}
}
};
template<typename val_type>
void svd(const dmat_t<val_type>& A, dmat_t<val_type>& U, dvec_t<val_type>& S, dmat_t<val_type>& V, bool reduced, bool A_as_workspace) {
svd_solver_t<val_type> solver;
solver.solve(A, U, S, V, reduced, A_as_workspace);
}
// -------------- Implementation of Miscellaneous Functions --------------
// y = x for pointer to array
template<typename val_type>
void do_copy(const val_type *x, val_type *y, size_t size) {
if(x == y) return;
ptrdiff_t inc = 1;
ptrdiff_t len = (ptrdiff_t) size;
val_type *xx = const_cast<val_type*>(x);
copy(&len, xx, &inc, y, &inc);
}
// H = a*X*W + b H0
// X is an m*n
// W is an n*k, row-majored array
// H is an m*k, row-majored array
template<typename val_type, typename T2, typename T3>
void smat_x_dmat(T2 a, const smat_t<val_type> &X, const val_type *W, const size_t k, T3 b, const val_type *H0, val_type *H) {
size_t m = X.rows;
val_type aa = (val_type) a;
val_type bb = (val_type) b;
if(a == T2(0)) {
if(bb == (val_type)0.0){
memset(H, 0, sizeof(val_type)*m*k);
return ;
} else {
if(H!=H0) {
do_copy(H0, H, m*k);
//memcpy(H, H0, sizeof(val_type)*m*k);
}
do_scale(bb, H, m*k);
}
return;
}
#pragma omp parallel for schedule(dynamic,64) shared(X, W, H, H0, aa,bb)
for(size_t i = 0; i < m; i++) {
val_type *Hi = &H[k*i];
if(bb == (val_type)0.0)
memset(Hi, 0, sizeof(val_type)*k);
else {
if(Hi!=&H0[k*i])
do_copy(&H0[k*i], Hi, k);
do_scale(bb, Hi, k);
}
for(size_t idx = X.row_ptr[i]; idx < X.row_ptr[i+1]; idx++) {
const val_type Xij = X.val_t[idx];
const val_type *Wj = &W[X.col_idx[idx]*k];
for(size_t t = 0; t < k; t++)
Hi[t] += aa*Xij*Wj[t];
}
}
}
template<typename val_type, typename T2>
void smat_x_dmat(T2 a, const smat_t<val_type> &X, const val_type* W, const size_t k, const val_type *H0, val_type *H) {
smat_x_dmat(a, X, W, k, 1.0, H0, H);
}
// C = alpha*A*B + beta*C
// C : m * n, k is the dimension of the middle
// (1) A, B, C are stored in column major!
template<typename val_type, typename T1, typename T2>
void dmat_x_dmat_colmajor(T1 alpha, const val_type *A, bool trans_A, const val_type *B, bool trans_B, T2 beta, val_type *C, size_t m, size_t n, size_t k) {
ptrdiff_t mm = (ptrdiff_t)m, nn = (ptrdiff_t)n, kk = (ptrdiff_t)k;
ptrdiff_t lda = trans_A? kk:mm, ldb = trans_B? nn:kk, ldc = mm;
char transpose = 'T', notranspose = 'N';
char *transa = trans_A? &transpose: ¬ranspose;
char *transb = trans_B? &transpose: ¬ranspose;
val_type alpha_ = (val_type) alpha;
val_type beta_ = (val_type) beta;
val_type *AA = const_cast<val_type*>(A);
val_type *BB = const_cast<val_type*>(B);
gemm(transa, transb, &mm, &nn, &kk, &alpha_, AA, &lda, BB, &ldb, &beta_, C, &ldc);
}
// (2) A, B, C are stored in row major!
template<typename val_type, typename T1, typename T2>
void dmat_x_dmat(T1 alpha, const val_type *A, bool trans_A, const val_type *B, bool trans_B, T2 beta, val_type *C, size_t m, size_t n, size_t k) {
dmat_x_dmat_colmajor(alpha, B, trans_B, A, trans_A, beta, C, n, m, k);
}
// C = alpha*A*B + beta*C
template<typename val_type, typename T1, typename T2>
dmat_t<val_type>& dmat_x_dmat(T1 alpha, const dmat_t<val_type>& A, const dmat_t<val_type>& B, T2 beta, dmat_t<val_type>& C) {
assert(A.cols == B.rows);
C.lazy_resize(A.rows, B.cols);
if (C.is_rowmajor()) {
bool trans_A = A.is_rowmajor()? false : true;
bool trans_B = B.is_rowmajor()? false : true;
dmat_x_dmat(alpha, A.data(), trans_A, B.data(), trans_B, beta, C.data(), C.rows, C.cols, A.cols);
} else {
bool trans_A = A.is_colmajor()? false : true;
bool trans_B = B.is_colmajor()? false : true;
dmat_x_dmat_colmajor(alpha, A.data(), trans_A, B.data(), trans_B, beta, C.data(), C.rows, C.cols, A.cols);
}
return C;
}
// C = A'*B
// C : m*n, k is the dimension of the middle
// A, B, C are stored in row major!
template<typename val_type>
void dmat_trans_x_dmat(const val_type *A, const val_type *B, val_type *C, size_t m, size_t n, size_t k) {
bool transpose = true;
dmat_x_dmat(val_type(1.0), A, transpose, B, !transpose, val_type(0.0), C, m, n, k);
}
// C=A*B
// A, B, C are stored in row major!
template<typename val_type>
void dmat_x_dmat(const val_type *A, const val_type *B, val_type *C, size_t m, size_t n, size_t k) {
bool trans = true;
dmat_x_dmat(val_type(1.0), A, !trans, B, !trans, val_type(0.0), C, m, n, k);
}
// Input: an n*k row-major matrix H
// Output: an k*k matrix H^TH
template<typename val_type>
void doHTH(const val_type *H, val_type *HTH, size_t n, size_t k) {
bool transpose = true;
dmat_x_dmat_colmajor(val_type(1.0), H, !transpose, H, transpose, val_type(0.0), HTH, k, k, n);
}
/*
trace(W^T X H)
X is an m*n, sparse matrix
W is an m*k, row-majored array
H is an n*k, row-major
*/
template<typename val_type>
val_type trace_dmat_T_smat_dmat(const val_type *W, const smat_t<val_type> &X, const val_type *H, const size_t k) {
size_t m = X.rows;
double ret = 0;
#pragma omp parallel for schedule(dynamic,50) shared(X,H,W) reduction(+:ret)
for(size_t i = 0; i < m; i++) {
const val_type *Wi = &W[k*i];
for(long idx = X.row_ptr[i]; idx < X.row_ptr[i+1]; idx++) {
const val_type *Hj = &H[X.col_idx[idx]*k];
double tmp=0;
for(size_t t = 0; t < k; t++)
tmp += Wi[t]*Hj[t];
ret += X.val_t[idx]*tmp;
}
}
return (val_type)ret;
}
/*
trace(W^T diag(D) H)
D is an m*1 vector
W is an m*k, row-majored array
H is an m*k, row-major array
*/
template<typename val_type>
val_type trace_dmat_T_diag_dmat(const val_type *W, const val_type *D, const val_type *H, const size_t m, const size_t k) {
val_type *w = const_cast<val_type*>(W);
val_type *h = const_cast<val_type*>(H);
val_type *d = const_cast<val_type*>(D);
double ret = 0.0;
#pragma omp parallel for schedule(static) shared(w,h,d) reduction(+:ret)
for(size_t i = 0; i < m; i++) {
val_type *wi = &w[i*k], *hi = &h[i*k];
ret += do_dot_product(wi, wi, k) * d[i];
}
return (val_type)ret;
}
template<typename val_type>
val_type trace_dmat_T_diag_dmat(const dmat_t<val_type> &W, const dmat_t<val_type> &D, const dmat_t<val_type> &H) {
return trace_dmat_T_diag_dmat(W, dvec_t<val_type>(D.get_view()), H);
}
//------------------ Implementation of zip_it -----------------------
// helpler functions and classes for zip_it
template<class T1, class T2>
struct zip_body {
T1 x; T2 y;
zip_body(const zip_ref<T1,T2>& other): x(*other.x), y(*other.y){}
bool operator<(const zip_body &other) const {return x < other.x;}
bool operator>(zip_body &other) const {return x > other.x;}
bool operator==(zip_body &other) const {return x == other.x;}
bool operator!=(zip_body &other) const {return x != other.x;}
};
template<class T1, class T2>
struct zip_ref {
T1 *x; T2 *y;
zip_ref(T1 &x, T2 &y): x(&x), y(&y){}
zip_ref(zip_body<T1,T2>& other): x(&other.x), y(&other.y){}
bool operator<(zip_ref other) const {return *x < *other.x;}
bool operator>(zip_ref other) const {return *x > *other.x;}
bool operator==(zip_ref other) const {return *x == *other.x;}
bool operator!=(zip_ref other) const {return *x != *other.x;}
zip_ref& operator=(zip_ref& other) {
*x = *other.x; *y = *other.y;
return *(this);
}
zip_ref& operator=(zip_body<T1,T2> other) {
*x = other.x; *y = other.y;
return *(this);
}
};
template<class T1, class T2>
void swap(zip_ref<T1,T2> a, zip_ref<T1,T2> b) {
std::swap(*(a.x),*(b.x));
std::swap(*(a.y),*(b.y));
}
template<class IterT1, class IterT2>
struct zip_it {
typedef std::random_access_iterator_tag iterator_category;
typedef typename std::iterator_traits<IterT1>::value_type T1;
typedef typename std::iterator_traits<IterT2>::value_type T2;
typedef zip_body<T1,T2> value_type;
typedef zip_ref<T1,T2> reference;
typedef zip_body<T1,T2>* pointer;
typedef ptrdiff_t difference_type;
IterT1 x;
IterT2 y;
zip_it(IterT1 x, IterT2 y): x(x), y(y){}
reference operator*() {return reference(*x, *y);}
reference operator[](const difference_type n) const {return reference(x[n],y[n]);}
zip_it& operator++() {++x; ++y; return *this;} // prefix ++
zip_it& operator--() {--x; --y; return *this;} // prefix --
zip_it operator++(int) {return zip_it(x++,y++);} // sufix ++
zip_it operator--(int) {return zip_it(x--,y--);} // sufix --
zip_it operator+(const difference_type n) {return zip_it(x+n,y+n);}
zip_it operator-(const difference_type n) {return zip_it(x-n,y-n);}
zip_it& operator+=(const difference_type n) {x+=n; y+=n; return *this;}
zip_it& operator-=(const difference_type n) {x-=n; y-=n; return *this;}
bool operator<(const zip_it& other) {return x<other.x;}
bool operator>(const zip_it& other) {return x>other.x;}
bool operator==(const zip_it& other) {return x==other.x;}
bool operator!=(const zip_it& other) {return x!=other.x;}
difference_type operator-(const zip_it& other) {return x-other.x;}
};
template<class IterT1, class IterT2>
zip_it<IterT1, IterT2> zip_iter(IterT1 x, IterT2 y) {
return zip_it<IterT1,IterT2>(x,y);
}
// ---------------- Implementation of string split utility --------------
// split utility
template<typename Out>
void split(const std::string &s, char delim, Out result) {
std::stringstream ss; ss.str(s); std::string item;
while (std::getline(ss, item, delim)) {
*(result++) = item;
}
}
/*
std::vector<std::string> split(const std::string &s, char delim) {
std::vector<std::string> elems;
split(s, delim, std::back_inserter(elems));
return elems;
}
std::vector<std::string>& split(const std::string &s, char delim, std::vector<std::string>& elems) {
elems.clear();
split(s, delim, std::back_inserter(elems));
return elems;
}
*/
#undef gmat_t
#undef eye_t
#undef smat_t
#undef dmat_t
#undef gvec_t
#undef svec_t
#undef dvec_t
// C Interface
extern "C" {
enum {
DENSE_ROWMAJOR = 1,
DENSE_COLMAJOR = 2,
SPARSE = 3,
EYE = 4
};
typedef struct {
uint64_t rows, cols, nnz;
size_t* row_ptr;
size_t* col_ptr;
uint32_t* row_idx;
uint32_t* col_idx;
void* val;
void* val_t;
int32_t type;
} PyMatrix;
} // end of extern "C"
template<typename val_type>
class general_matrix_wrapper {
public:
typedef sparse_vector<val_type> svec_t;
typedef dense_vector<val_type> dvec_t;
typedef general_vector<val_type> gvec_t;
typedef sparse_matrix<val_type> smat_t;
typedef dense_matrix<val_type> dmat_t;
typedef identity_matrix<val_type> eye_t;
typedef general_matrix<val_type> gmat_t;
general_matrix_wrapper() {}
general_matrix_wrapper(const PyMatrix* py_mat_ptr) {
if(py_mat_ptr->type == DENSE_ROWMAJOR) {
dense = dmat_t(py_mat_ptr->rows, py_mat_ptr->cols, ROWMAJOR, static_cast<val_type*>(py_mat_ptr->val));
gmat_ptr = &dense;
} else if(py_mat_ptr->type == DENSE_COLMAJOR) {
dense = dmat_t(py_mat_ptr->rows, py_mat_ptr->cols, COLMAJOR, static_cast<val_type*>(py_mat_ptr->val));
gmat_ptr = &dense;
} else if(py_mat_ptr->type == SPARSE) {
sparse = smat_t(
py_mat_ptr->rows, py_mat_ptr->cols, py_mat_ptr->nnz,
static_cast<val_type*>(py_mat_ptr->val),
static_cast<val_type*>(py_mat_ptr->val_t),
py_mat_ptr->col_ptr, py_mat_ptr->row_ptr,
py_mat_ptr->row_idx, py_mat_ptr->col_idx);
gmat_ptr = &sparse;
}
}
size_t rows() const { return gmat_ptr->rows; }
size_t cols() const { return gmat_ptr->cols; }
gmat_t& get_gmat() { return *gmat_ptr; }
const gmat_t& get_gmat() const { return *gmat_ptr; }
bool is_sparse() const { return gmat_ptr->is_sparse(); }
bool is_dense() const { return gmat_ptr->is_dense(); }
bool is_identity() const { return gmat_ptr->is_identity(); }
smat_t& get_sparse() { return gmat_ptr->get_sparse(); }
const smat_t& get_sparse() const { return gmat_ptr->get_sparse(); }
dmat_t& get_dense() { return gmat_ptr->get_dense(); }
const dmat_t& get_dense() const { return gmat_ptr->get_dense(); }
general_matrix_wrapper<val_type> transpose() const {
general_matrix_wrapper gmw;
gmw.dense = this->dense.transpose();
if(is_sparse()) {
gmw.sparse = this->sparse.transpose();
gmw.gmat_ptr = &gmw.sparse;
} else if(is_dense()) {
gmw.dense = this->dense.transpose();
gmw.gmat_ptr = &gmw.dense;
} else if(is_identity()) {
gmw.eye = this->eye;
gmw.gmat_ptr = &gmw.eye;
}
return gmw;
}
private:
smat_t sparse;
dmat_t dense;
eye_t eye;
gmat_t* gmat_ptr;
};
#endif // RF_MATRIX_H
|
omp_bug1_fix.c | /******************************************************************************
* FILE: omp_bug1.c
* DESCRIPTION:
* This example attempts to show use of the parallel for construct. However
* it will generate errors at compile time. Try to determine what is causing
* the error. See omp_bug1fix.c for a corrected version.
* AUTHOR: Blaise Barney 5/99
* LAST REVISED: 04/06/05
******************************************************************************/
#include <omp.h>
#include <stdio.h>
#include <stdlib.h>
#define N 50
#define CHUNKSIZE 10
int main (int argc, char *argv[])
{
int i, chunk, tid;
float a[N], b[N], c[N];
/* Some initializations */
for (i=0; i < N; i++)
a[i] = b[i] = i * 1.0;
chunk = CHUNKSIZE;
#pragma omp parallel shared(a,b,c,chunk) private(i,tid)
{
tid = omp_get_thread_num();
#pragma omp parallel for schedule(static,chunk)
for (i=0; i < N; i++)
{
c[i] = a[i] + b[i];
printf("tid= %d i= %d c[i]= %f\n", tid, i, c[i]);
}
/* end of parallel for construct */
}
}
|
dscatter.c | /*! \file
Copyright (c) 2003, The Regents of the University of California, through
Lawrence Berkeley National Laboratory (subject to receipt of any required
approvals from U.S. Dept. of Energy)
All rights reserved.
The source code is distributed under BSD license, see the file License.txt
at the top-level directory.
*/
/*! @file
* \brief Scatter the computed blocks into LU destination.
*
* <pre>
* -- Distributed SuperLU routine (version 6.1.1) --
* Lawrence Berkeley National Lab, Univ. of California Berkeley.
* October 1, 2014
*
* Modified:
* September 18, 2017, enable SIMD vectorized scatter operation.
*
*/
#include <math.h>
#include "superlu_ddefs.h"
static void
dscatter_l_1 (int ib,
int ljb,
int nsupc,
int_t iukp,
int_t* xsup,
int klst,
int nbrow,
int_t lptr,
int temp_nbrow,
int * usub,
int * lsub,
double *tempv,
int * indirect_thread,
int_t ** Lrowind_bc_ptr, double **Lnzval_bc_ptr,
gridinfo_t * grid)
{
// TAU_STATIC_TIMER_START("SCATTER_LB");
// printf("hello\n");
int_t rel, i, segsize, jj;
double *nzval;
int_t *index = Lrowind_bc_ptr[ljb];
int_t ldv = index[1]; /* LDA of the dest lusup. */
int_t lptrj = BC_HEADER;
int_t luptrj = 0;
int_t ijb = index[lptrj];
while (ijb != ib)
{
/* Search for dest block --
blocks are not ordered! */
luptrj += index[lptrj + 1];
lptrj += LB_DESCRIPTOR + index[lptrj + 1];
ijb = index[lptrj];
}
/*
* Build indirect table. This is needed because the
* indices are not sorted for the L blocks.
*/
int_t fnz = FstBlockC (ib);
lptrj += LB_DESCRIPTOR;
for (i = 0; i < index[lptrj - 1]; ++i)
{
rel = index[lptrj + i] - fnz;
indirect_thread[rel] = i;
}
nzval = Lnzval_bc_ptr[ljb] + luptrj;
// tempv =bigV + (cum_nrow + cum_ncol*nbrow);
for (jj = 0; jj < nsupc; ++jj)
{
segsize = klst - usub[iukp + jj];
// printf("segsize %d \n",segsize);
if (segsize) {
/*#pragma _CRI cache_bypass nzval,tempv */
for (i = 0; i < temp_nbrow; ++i) {
rel = lsub[lptr + i] - fnz;
nzval[indirect_thread[rel]] -= tempv[i];
// printf("i (src) %d, perm (dest) %d \n",i,indirect_thread[rel]);
#ifdef PI_DEBUG
double zz = 0.0;
// if(!(*(long*)&zz == *(long*)&tempv[i]) )
printf ("(%d %d, %0.3e, %0.3e, %3e ) ", ljb,
nzval - Lnzval_bc_ptr[ljb] + indirect_thread[rel],
nzval[indirect_thread[rel]] + tempv[i],
nzval[indirect_thread[rel]],tempv[i]);
//printing triplets (location??, old value, new value ) if none of them is zero
#endif
}
// printf("\n");
tempv += nbrow;
#ifdef PI_DEBUG
// printf("\n");
#endif
}
nzval += ldv;
// printf("%d\n",nzval );
}
// TAU_STATIC_TIMER_STOP("SCATTER_LB");
} /* dscatter_l_1 */
static void
dscatter_l (
int ib, /* row block number of source block L(i,k) */
int ljb, /* local column block number of dest. block L(i,j) */
int nsupc, /* number of columns in destination supernode */
int_t iukp, /* point to destination supernode's index[] */
int_t* xsup,
int klst,
int nbrow, /* LDA of the block in tempv[] */
int_t lptr, /* Input, point to index[] location of block L(i,k) */
int temp_nbrow, /* number of rows of source block L(i,k) */
int_t* usub,
int_t* lsub,
double *tempv,
int* indirect_thread,int* indirect2,
int_t ** Lrowind_bc_ptr, double **Lnzval_bc_ptr,
gridinfo_t * grid)
{
int_t rel, i, segsize, jj;
double *nzval;
int_t *index = Lrowind_bc_ptr[ljb];
int_t ldv = index[1]; /* LDA of the destination lusup. */
int_t lptrj = BC_HEADER;
int_t luptrj = 0;
int_t ijb = index[lptrj];
while (ijb != ib) /* Search for destination block L(i,j) */
{
luptrj += index[lptrj + 1];
lptrj += LB_DESCRIPTOR + index[lptrj + 1];
ijb = index[lptrj];
}
/*
* Build indirect table. This is needed because the indices are not sorted
* in the L blocks.
*/
int_t fnz = FstBlockC (ib);
int_t dest_nbrow;
lptrj += LB_DESCRIPTOR;
dest_nbrow=index[lptrj - 1];
#if (_OPENMP>=201307)
#pragma omp simd
#endif
for (i = 0; i < dest_nbrow; ++i) {
rel = index[lptrj + i] - fnz;
indirect_thread[rel] = i;
}
#if (_OPENMP>=201307)
#pragma omp simd
#endif
/* can be precalculated? */
for (i = 0; i < temp_nbrow; ++i) { /* Source index is a subset of dest. */
rel = lsub[lptr + i] - fnz;
indirect2[i] =indirect_thread[rel];
}
nzval = Lnzval_bc_ptr[ljb] + luptrj; /* Destination block L(i,j) */
#ifdef __INTEL_COMPILER
#pragma ivdep
#endif
for (jj = 0; jj < nsupc; ++jj) {
segsize = klst - usub[iukp + jj];
if (segsize) {
#if (_OPENMP>=201307)
#pragma omp simd
#endif
for (i = 0; i < temp_nbrow; ++i) {
nzval[indirect2[i]] -= tempv[i];
}
tempv += nbrow;
}
nzval += ldv;
}
} /* dscatter_l */
static void
dscatter_u (int ib,
int jb,
int nsupc,
int_t iukp,
int_t * xsup,
int klst,
int nbrow, /* LDA of the block in tempv[] */
int_t lptr, /* point to index location of block L(i,k) */
int temp_nbrow, /* number of rows of source block L(i,k) */
int_t* lsub,
int_t* usub,
double* tempv,
int_t ** Ufstnz_br_ptr, double **Unzval_br_ptr,
gridinfo_t * grid)
{
#ifdef PI_DEBUG
printf ("A(%d,%d) goes to U block \n", ib, jb);
#endif
// TAU_STATIC_TIMER_START("SCATTER_U");
// TAU_STATIC_TIMER_START("SCATTER_UB");
int_t jj, i, fnz, rel;
int segsize;
double *ucol;
int_t ilst = FstBlockC (ib + 1);
int_t lib = LBi (ib, grid);
int_t *index = Ufstnz_br_ptr[lib];
/* Reinitilize the pointers to the beginning of the k-th column/row of
* L/U factors.
* usub[] - index array for panel U(k,:)
*/
int_t iuip_lib, ruip_lib;
iuip_lib = BR_HEADER;
ruip_lib = 0;
int_t ijb = index[iuip_lib];
while (ijb < jb) { /* Search for destination block. */
ruip_lib += index[iuip_lib + 1];
// printf("supersize[%ld] \t:%ld \n",ijb,SuperSize( ijb ) );
iuip_lib += UB_DESCRIPTOR + SuperSize (ijb);
ijb = index[iuip_lib];
}
/* Skip descriptor. Now point to fstnz index of block U(i,j). */
iuip_lib += UB_DESCRIPTOR;
// tempv = bigV + (cum_nrow + cum_ncol*nbrow);
for (jj = 0; jj < nsupc; ++jj) {
segsize = klst - usub[iukp + jj];
fnz = index[iuip_lib++];
if (segsize) { /* Nonzero segment in U(k,j). */
ucol = &Unzval_br_ptr[lib][ruip_lib];
// printf("========Entering loop=========\n");
#if (_OPENMP>=201307)
#pragma omp simd
#endif
for (i = 0; i < temp_nbrow; ++i) {
rel = lsub[lptr + i] - fnz;
// printf("%d %d %d %d %d \n",lptr,i,fnz,temp_nbrow,nbrow );
// printf("hello ucol[%d] %d %d : \n",rel,lsub[lptr + i],fnz);
ucol[rel] -= tempv[i];
#ifdef PI_DEBUG
double zz = 0.0;
if (!(*(long *) &zz == *(long *) &tempv[i]))
printf ("(%d, %0.3e, %0.3e ) ", rel, ucol[rel] + tempv[i],
ucol[rel]);
//printing triplets (location??, old value, new value ) if none of them is zero
#endif
} /* for i = 0:temp_nbropw */
tempv += nbrow; /* Jump LDA to next column */
#ifdef PI_DEBUG
// printf("\n");
#endif
} /* if segsize */
ruip_lib += ilst - fnz;
} /* for jj = 0:nsupc */
#ifdef PI_DEBUG
// printf("\n");
#endif
// TAU_STATIC_TIMER_STOP("SCATTER_UB");
} /* dscatter_u */
/*Divide CPU-GPU dgemm work here*/
#ifdef PI_DEBUG
int Ngem = 2;
// int_t Ngem = 0;
int min_gpu_col = 6;
#else
// int_t Ngem = 0;
#endif
#ifdef GPU_ACC
void
gemm_division_cpu_gpu(
int* num_streams_used, /*number of streams that will be used */
int* stream_end_col, /*array holding last column blk for each partition */
int * ncpu_blks, /*Number of CPU dgemm blks */
/*input */
int nbrow, /*number of row in A matrix */
int ldu, /*number of k in dgemm */
int nstreams,
int* full_u_cols, /*array containing prefix sum of work load */
int num_blks /*Number of work load */
)
{
int Ngem = sp_ienv_dist(7); /*get_mnk_dgemm ();*/
int min_gpu_col = get_cublas_nb ();
// Ngem = 1000000000;
/*
cpu is to gpu dgemm should be ideally 0:1 ratios to hide the total cost
However since there is gpu latency of around 20,000 ns implying about
200000 floating point calculation be done in that time so ~200,000/(2*nbrow*ldu)
should be done in cpu to hide the latency; we Ngem =200,000/2
*/
int i, j;
// {
// *num_streams_used=0;
// *ncpu_blks = num_blks;
// return;
// }
for (int i = 0; i < nstreams; ++i)
{
stream_end_col[i] = num_blks;
}
*ncpu_blks = 0;
/*easy returns -1 when number of column are less than threshold */
if (full_u_cols[num_blks - 1] < (Ngem / (nbrow * ldu)) || num_blks == 1 )
{
*num_streams_used = 0;
*ncpu_blks = num_blks;
#ifdef PI_DEBUG
printf ("full_u_cols[num_blks-1] %d %d \n",
full_u_cols[num_blks - 1], (Ngem / (nbrow * ldu)));
printf ("Early return \n");
#endif
return;
}
/* Easy return -2 when number of streams =0 */
if (nstreams == 0)
{
*num_streams_used = 0;
*ncpu_blks = num_blks;
return;
/* code */
}
/*find first block where count > Ngem */
for (i = 0; i < num_blks - 1; ++i) /*I can use binary search here */
{
if (full_u_cols[i + 1] > Ngem / (nbrow * ldu))
break;
}
*ncpu_blks = i + 1;
int_t cols_remain =
full_u_cols[num_blks - 1] - full_u_cols[*ncpu_blks - 1];
#ifdef PI_DEBUG
printf ("Remaining cols %d num_blks %d cpu_blks %d \n", cols_remain,
num_blks, *ncpu_blks);
#endif
if (cols_remain > 0)
{
*num_streams_used = 1; /* now atleast one stream would be used */
#ifdef PI_DEBUG
printf ("%d %d %d %d \n", full_u_cols[num_blks - 1],
full_u_cols[*ncpu_blks], *ncpu_blks, nstreams);
#endif
int_t FP_MIN = 200000 / (nbrow * ldu);
int_t cols_per_stream = SUPERLU_MAX (min_gpu_col, cols_remain / nstreams);
cols_per_stream = SUPERLU_MAX (cols_per_stream, FP_MIN);
#ifdef PI_DEBUG
printf ("cols_per_stream :\t%d\n", cols_per_stream);
#endif
int_t cutoff = cols_per_stream + full_u_cols[*ncpu_blks - 1];
for (int_t i = 0; i < nstreams; ++i)
{
stream_end_col[i] = num_blks;
}
j = *ncpu_blks;
for (i = 0; i < nstreams - 1; ++i)
{
int_t st = (i == 0) ? (*ncpu_blks) : stream_end_col[i - 1];
for (j = st; j < num_blks - 1; ++j)
{
#ifdef PI_DEBUG
printf ("i %d, j %d, %d %d ", i, j, full_u_cols[j + 1],
cutoff);
#endif
if (full_u_cols[j + 1] > cutoff)
{
#ifdef PI_DEBUG
printf ("cutoff met \n");
#endif
cutoff = cols_per_stream + full_u_cols[j];
stream_end_col[i] = j + 1;
*num_streams_used += 1;
j++;
break;
}
#ifdef PI_DEBUG
printf ("\n");
#endif
}
}
}
}
void
gemm_division_new (int * num_streams_used, /*number of streams that will be used */
int * stream_end_col, /*array holding last column blk for each partition */
int * ncpu_blks, /*Number of CPU dgemm blks */
/*input */
int nbrow, /*number of row in A matrix */
int ldu, /*number of k in dgemm */
int nstreams,
Ublock_info_t *Ublock_info, /*array containing prefix sum of work load */
int num_blks /*Number of work load */
)
{
int Ngem = sp_ienv_dist(7); /*get_mnk_dgemm ();*/
int min_gpu_col = get_cublas_nb ();
// Ngem = 1000000000;
/*
cpu is to gpu dgemm should be ideally 0:1 ratios to hide the total cost
However since there is gpu latency of around 20,000 ns implying about
200000 floating point calculation be done in that time so ~200,000/(2*nbrow*ldu)
should be done in cpu to hide the latency; we Ngem =200,000/2
*/
int_t i, j;
for (int i = 0; i < nstreams; ++i)
{
stream_end_col[i] = num_blks;
}
*ncpu_blks = 0;
/*easy returns -1 when number of column are less than threshold */
if (Ublock_info[num_blks - 1].full_u_cols < (Ngem / (nbrow * ldu)) || num_blks == 1)
{
*num_streams_used = 0;
*ncpu_blks = num_blks;
return;
}
/* Easy return -2 when number of streams =0 */
if (nstreams == 0)
{
*num_streams_used = 0;
*ncpu_blks = num_blks;
return;
/* code */
}
/*find first block where count > Ngem */
for (i = 0; i < num_blks - 1; ++i) /*I can use binary search here */
{
if (Ublock_info[i + 1].full_u_cols > Ngem / (nbrow * ldu))
break;
}
*ncpu_blks = i + 1;
int_t cols_remain =
Ublock_info [num_blks - 1].full_u_cols - Ublock_info[*ncpu_blks - 1].full_u_cols;
if (cols_remain > 0)
{
*num_streams_used = 1; /* now atleast one stream would be used */
int_t FP_MIN = 200000 / (nbrow * ldu);
int_t cols_per_stream = SUPERLU_MAX (min_gpu_col, cols_remain / nstreams);
cols_per_stream = SUPERLU_MAX (cols_per_stream, FP_MIN);
int_t cutoff = cols_per_stream + Ublock_info[*ncpu_blks - 1].full_u_cols;
for (int_t i = 0; i < nstreams; ++i)
{
stream_end_col[i] = num_blks;
}
j = *ncpu_blks;
for (i = 0; i < nstreams - 1; ++i)
{
int_t st = (i == 0) ? (*ncpu_blks) : stream_end_col[i - 1];
for (j = st; j < num_blks - 1; ++j)
{
if (Ublock_info[j + 1].full_u_cols > cutoff)
{
cutoff = cols_per_stream + Ublock_info[j].full_u_cols;
stream_end_col[i] = j + 1;
*num_streams_used += 1;
j++;
break;
}
}
}
}
}
#endif /* defined GPU_ACC */
|
DRB008-indirectaccess4-orig-yes.c | /*
Copyright (c) 2017, Lawrence Livermore National Security, LLC.
Produced at the Lawrence Livermore National Laboratory
Written by Chunhua Liao, Pei-Hung Lin, Joshua Asplund,
Markus Schordan, and Ian Karlin
(email: liao6@llnl.gov, lin32@llnl.gov, asplund1@llnl.gov,
schordan1@llnl.gov, karlin1@llnl.gov)
LLNL-CODE-732144
All rights reserved.
This file is part of DataRaceBench. For details, see
https://github.com/LLNL/dataracebench. Please also see the LICENSE file
for our additional BSD notice.
Redistribution and use in source and binary forms, with
or without modification, are permitted provided that the following
conditions are met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the disclaimer below.
* Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the disclaimer (as noted below)
in the documentation and/or other materials provided with the
distribution.
* Neither the name of the LLNS/LLNL nor the names of its contributors
may be used to endorse or promote products derived from this
software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND
CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES,
INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL LAWRENCE LIVERMORE NATIONAL
SECURITY, LLC, THE U.S. DEPARTMENT OF ENERGY OR CONTRIBUTORS BE
LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY,
OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
THE POSSIBILITY OF SUCH DAMAGE.
*/
/*
Two pointers have a distance of 12 (xa2 - xa1 = 12).
They are used as base addresses for indirect array accesses using an index set (another array).
The index set has two indices with distance of 12 :
indexSet[1]- indexSet[0] = 533 - 521 = 12
So xa1[idx] and xa2[idx] may cause loop carried dependence for N=0 and N=3.
We use the default loop scheduling (static even) in OpenMP.
It is possible that two dependent iterations will be scheduled
within a same chunk to a same thread. So there is no runtime data races.
N is 180, two iteraions with N=0 and N= 1 have loop carried dependences.
For static even scheduling, we must have at least 180 threads (180/180=1 iterations)
so iteration 0 and 1 will be scheduled to two different threads.
Data race pair: xa1[idx]@128:5 vs. xa2[idx]@129:5
*/
#include <assert.h>
#include <stdio.h>
#include <stdlib.h>
#define N 180
int indexSet[N] = {
521, 533, 525, 527, 529, 531, // 521+12=533
547, 549, 551, 553, 555, 557,
573, 575, 577, 579, 581, 583,
599, 601, 603, 605, 607, 609,
625, 627, 629, 631, 633, 635,
651, 653, 655, 657, 659, 661,
859, 861, 863, 865, 867, 869,
885, 887, 889, 891, 893, 895,
911, 913, 915, 917, 919, 921,
937, 939, 941, 943, 945, 947,
963, 965, 967, 969, 971, 973,
989, 991, 993, 995, 997, 999,
1197, 1199, 1201, 1203, 1205, 1207,
1223, 1225, 1227, 1229, 1231, 1233,
1249, 1251, 1253, 1255, 1257, 1259,
1275, 1277, 1279, 1281, 1283, 1285,
1301, 1303, 1305, 1307, 1309, 1311,
1327, 1329, 1331, 1333, 1335, 1337,
1535, 1537, 1539, 1541, 1543, 1545,
1561, 1563, 1565, 1567, 1569, 1571,
1587, 1589, 1591, 1593, 1595, 1597,
1613, 1615, 1617, 1619, 1621, 1623,
1639, 1641, 1643, 1645, 1647, 1649,
1665, 1667, 1669, 1671, 1673, 1675,
1873, 1875, 1877, 1879, 1881, 1883,
1899, 1901, 1903, 1905, 1907, 1909,
1925, 1927, 1929, 1931, 1933, 1935,
1951, 1953, 1955, 1957, 1959, 1961,
1977, 1979, 1981, 1983, 1985, 1987,
2003, 2005, 2007, 2009, 2011, 2013};
int main (int argc, char* argv[])
{
double * base = (double*) malloc(sizeof(double)* (2013+12+1));
if (base == 0)
{
printf ("Error in malloc(). Aborting ...\n");
return 1;
}
double * xa1 = base;
double * xa2 = xa1 + 12;
int i;
// initialize segments touched by indexSet
#pragma omp parallel for private(i )
for (i =521; i<= 2025; ++i)
{
base[i]=0.5*i;
}
for (i =0; i< N; ++i)
{
int idx = indexSet[i];
xa1[idx]+= 1.0;
xa2[idx]+= 3.0;
}
printf("x1[999]=%f xa2[1285]=%f\n", xa1[999], xa2[1285]);
free (base);
return 0;
}
|
parallel_invoker.h | // Copyright 2019 The MediaPipe Authors.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
// Parallel for loop execution.
// For details adapt parallel_using_* flags defined in parallel_invoker.cc.
// Usage example (for 1D):
// Define Functor or lambda function that implements:
// void operator()(const BlockedRange & range) const;
// (in addition functor needs to be copyable).
// Execute a for loop in parallel from 0 to N via:
// ParallelFor(0, // start_index
// num_frames, // end_index, exclusive
// 1 // number of elements processed per iteration
// [](const BlockedRange& range) {
// // Process per-thread sub-range
// for (int i = range.begin(); i < range.end(); ++i) {
// // Process i'th item.
// }
// }
// Specific implementation to copy a vector of images in parallel.
// class CopyInvoker {
// public:
// CopyInvoker(const vector<cv::Mat>& inputs,
// vector<cv::Mat*>* outputs)
// : inputs_(inputs), outputs_(outputs) {
// }
// CopyInvoker(const CopyInvoker& rhs)
// : inputs_(rhs.inputs_), outputs_(rhs.outputs) {
// }
// void operator()(const BlockedRange& range) {
// for (int frame = range.begin(); frame < range.end(); ++frame) {
// inputs_[frame].copyTo(*(*outputs_)[frame]);
// }
// }
// private:
// const vector<cv::Mat>& inputs_;
// vector<cv::Mat*>* outputs_;
// }
// vector<cv::Mat> inputs;
// vector<cv::Mat*> outputs;
// ParallelFor(0, num_frames, 1, CopyInvoker(inputs, &outputs));
//
// OR (with lambdas):
// ParallelFor(0, num_frames, 1,
// [&inputs, &outputs](const BlockedRange& range) {
// for (int frame = range.begin(); frame < range.end(); ++frame) {
// inputs[frame].copyTo(*(outputs)[frame]);
// }
// }
#ifndef MEDIAPIPE_UTIL_TRACKING_PARALLEL_INVOKER_H_
#define MEDIAPIPE_UTIL_TRACKING_PARALLEL_INVOKER_H_
#include <stddef.h>
#include <memory>
#include "absl/synchronization/mutex.h"
#include "mediapipe/framework/port/logging.h"
#ifdef PARALLEL_INVOKER_ACTIVE
#include "mediapipe/framework/port/threadpool.h"
#ifdef __APPLE__
#include <dispatch/dispatch.h>
#include <stdatomic.h>
#endif
#endif // PARALLEL_INVOKER_ACTIVE
// Specifies parallelization implementation to use.
enum PARALLEL_INVOKER_MODE {
PARALLEL_INVOKER_NONE = 0, // Uses single threaded execution
PARALLEL_INVOKER_THREAD_POOL = 1, // Uses //thread/threadpool
PARALLEL_INVOKER_OPENMP = 2, // Uses OpenMP (requires compiler support)
PARALLEL_INVOKER_GCD = 3, // Uses GCD (Apple)
PARALLEL_INVOKER_MAX_VALUE = 4, // Increase when adding more modes
};
extern int flags_parallel_invoker_mode;
extern int flags_parallel_invoker_max_threads;
// Note flag: Parallel processing only activated if
// PARALLEL_INVOKER_ACTIVE is defined.
namespace mediapipe {
// Partitions the range [begin, end) into equal blocks of size grain_size each
// (except last one, might be less than grain_size).
class BlockedRange {
public:
BlockedRange(int begin, int end, int grain_size)
: begin_(begin), end_(end), grain_size_(grain_size) {}
int begin() const { return begin_; }
int end() const { return end_; }
int grain_size() const { return grain_size_; }
private:
int begin_;
int end_;
int grain_size_;
};
// Partitions the range row_range x col_range into equal
// blocks of size row_range.grain_size() x col_range.grain_size() each
// (except last column and row might be of size less than grain_size in one
// or both of their dimensions).
class BlockedRange2D {
public:
BlockedRange2D(const BlockedRange& rows, const BlockedRange& cols)
: rows_(rows), cols_(cols) {}
const BlockedRange& rows() const { return rows_; }
const BlockedRange& cols() const { return cols_; }
private:
BlockedRange rows_;
BlockedRange cols_;
};
#ifdef PARALLEL_INVOKER_ACTIVE
// Singleton ThreadPool for parallel invoker.
ThreadPool* ParallelInvokerThreadPool();
#ifdef __APPLE__
// Enable to allow GCD as an option beside ThreadPool.
#define USE_PARALLEL_INVOKER_GCD 1
#define CHECK_GCD_PARALLEL_WORK_COUNT DEBUG
template <class Invoker>
class ParallelInvokerGCDContext {
public:
ParallelInvokerGCDContext(const Invoker& invoker, const BlockedRange& rows)
: local_invoker_(invoker), rows_(rows) {
#if CHECK_GCD_PARALLEL_WORK_COUNT
count_ = 0;
#endif
}
const Invoker& invoker() {
#if CHECK_GCD_PARALLEL_WORK_COUNT
// Implicitly tracking the # of launched tasks at invoker retrieval.
atomic_fetch_add(&count_, 1);
#endif
return local_invoker_;
}
const BlockedRange& rows() const { return rows_; }
#if CHECK_GCD_PARALLEL_WORK_COUNT
const int count() { return atomic_load(&count_); }
#endif
private:
Invoker local_invoker_;
const BlockedRange& rows_;
#if CHECK_GCD_PARALLEL_WORK_COUNT
_Atomic(int32_t) count_;
#endif
};
template <class Invoker>
class ParallelInvokerGCDContext2D : public ParallelInvokerGCDContext<Invoker> {
public:
ParallelInvokerGCDContext2D(const Invoker& invoker, const BlockedRange& rows,
const BlockedRange& cols)
: ParallelInvokerGCDContext<Invoker>(invoker, rows), cols_(cols) {}
const BlockedRange& cols() const { return cols_; }
private:
BlockedRange cols_;
};
template <class Invoker>
static void ParallelForGCDTask(void* context, size_t index) {
ParallelInvokerGCDContext<Invoker>* invoker_context =
static_cast<ParallelInvokerGCDContext<Invoker>*>(context);
const BlockedRange& all_tasks = invoker_context->rows();
int start = all_tasks.begin() + index * all_tasks.grain_size();
int end = std::min(all_tasks.end(), start + all_tasks.grain_size());
BlockedRange this_task(start, end, all_tasks.grain_size());
const Invoker& invoker = invoker_context->invoker();
invoker(this_task);
}
template <class Invoker>
static void ParallelForGCDTask2D(void* context, size_t index) {
ParallelInvokerGCDContext2D<Invoker>* invoker_context =
static_cast<ParallelInvokerGCDContext2D<Invoker>*>(context);
// Partitioning across rows.
const BlockedRange& all_tasks = invoker_context->rows();
int start = all_tasks.begin() + index * all_tasks.grain_size();
int end = std::min(all_tasks.end(), start + all_tasks.grain_size());
BlockedRange this_task(start, end, all_tasks.grain_size());
const Invoker& invoker = invoker_context->invoker();
invoker(BlockedRange2D(this_task, invoker_context->cols()));
}
#endif // __APPLE__
#endif // PARALLEL_INVOKER_ACTIVE
// Simple wrapper for compatibility with below ParallelFor function.
template <class Invoker>
void SerialFor(size_t start, size_t end, size_t grain_size,
const Invoker& invoker) {
invoker(BlockedRange(start, end, 1));
}
inline void CheckAndSetInvokerOptions() {
#if defined(PARALLEL_INVOKER_ACTIVE)
#if defined(__ANDROID__)
// If unsupported option is selected, force usage of OpenMP if detected, and
// ThreadPool otherwise.
if (flags_parallel_invoker_mode != PARALLEL_INVOKER_NONE &&
flags_parallel_invoker_mode != PARALLEL_INVOKER_THREAD_POOL &&
flags_parallel_invoker_mode != PARALLEL_INVOKER_OPENMP) {
#if defined(_OPENMP)
LOG(WARNING) << "Unsupported invoker mode selected on Android. "
<< "OpenMP linkage detected, so falling back to OpenMP";
flags_parallel_invoker_mode = PARALLEL_INVOKER_OPENMP;
#else // _OPENMP
// Fallback mode for active parallel invoker without OpenMP is ThreadPool.
LOG(WARNING) << "Unsupported invoker mode selected on Android. "
<< "Falling back to ThreadPool";
flags_parallel_invoker_mode = PARALLEL_INVOKER_THREAD_POOL;
#endif // _OPENMP
}
#endif // __ANDROID__
#if defined(__APPLE__) || defined(__EMSCRIPTEN__)
// Force usage of ThreadPool if unsupported option is selected.
// (OpenMP is not supported on iOS, due to missing clang support).
if (flags_parallel_invoker_mode != PARALLEL_INVOKER_NONE &&
#if defined(USE_PARALLEL_INVOKER_GCD)
flags_parallel_invoker_mode != PARALLEL_INVOKER_GCD &&
#endif // USE_PARALLEL_INVOKER_GCD
flags_parallel_invoker_mode != PARALLEL_INVOKER_THREAD_POOL) {
LOG(WARNING) << "Unsupported invoker mode selected on iOS. "
<< "Falling back to ThreadPool mode";
flags_parallel_invoker_mode = PARALLEL_INVOKER_THREAD_POOL;
}
#endif // __APPLE__ || __EMSCRIPTEN__
#if !defined(__APPLE__) && !defined(__EMSCRIPTEN__) && !defined(__ANDROID__)
flags_parallel_invoker_mode = PARALLEL_INVOKER_THREAD_POOL;
#endif // !__APPLE__ && !__EMSCRIPTEN__ && !__ANDROID__
// If OpenMP is requested, make sure we can actually use it, and fall back
// to ThreadPool if not.
if (flags_parallel_invoker_mode == PARALLEL_INVOKER_OPENMP) {
#if !defined(_OPENMP)
LOG(ERROR) << "OpenMP invoker mode selected but not compiling with OpenMP "
<< "enabled. Falling back to ThreadPool";
flags_parallel_invoker_mode = PARALLEL_INVOKER_THREAD_POOL;
#endif // _OPENMP
}
#else // PARALLEL_INVOKER_ACTIVE
if (flags_parallel_invoker_mode != PARALLEL_INVOKER_NONE) {
LOG(ERROR) << "Parallel execution requested but PARALLEL_INVOKER_ACTIVE "
<< "compile flag is not set. Falling back to single threaded "
<< "execution.";
flags_parallel_invoker_mode = PARALLEL_INVOKER_NONE;
}
#endif // PARALLEL_INVOKER_ACTIVE
CHECK_LT(flags_parallel_invoker_mode, PARALLEL_INVOKER_MAX_VALUE)
<< "Invalid invoker mode specified.";
CHECK_GE(flags_parallel_invoker_mode, 0) << "Invalid invoker mode specified.";
}
// Performs parallel iteration from [start to end), scheduling grain_size
// iterations per thread. For each iteration
// invoker(BlockedRange(thread_local_start, thread_local_end))
// is called. Each thread is given its local copy of invoker, i.e.
// invoker needs to have copy constructor defined.
template <class Invoker>
void ParallelFor(size_t start, size_t end, size_t grain_size,
const Invoker& invoker) {
#ifdef PARALLEL_INVOKER_ACTIVE
CheckAndSetInvokerOptions();
switch (flags_parallel_invoker_mode) {
#if defined(__APPLE__)
case PARALLEL_INVOKER_GCD: {
int iterations_remain = (end - start + grain_size - 1) / grain_size;
CHECK_GT(iterations_remain, 0);
if (iterations_remain == 1) {
// Execute invoker serially.
invoker(BlockedRange(start, std::min(end, start + grain_size), 1));
} else {
BlockedRange all_tasks(start, end, grain_size);
ParallelInvokerGCDContext<Invoker> context(invoker, all_tasks);
dispatch_queue_t concurrent_queue =
dispatch_get_global_queue(DISPATCH_QUEUE_PRIORITY_DEFAULT, 0);
dispatch_apply_f(iterations_remain, concurrent_queue, &context,
ParallelForGCDTask<Invoker>);
#if CHECK_GCD_PARALLEL_WORK_COUNT
CHECK_EQ(iterations_remain, context.count());
#endif
}
break;
}
#endif // __APPLE__
case PARALLEL_INVOKER_THREAD_POOL: {
int iterations_remain = (end - start + grain_size - 1) / grain_size;
CHECK_GT(iterations_remain, 0);
if (iterations_remain == 1) {
// Execute invoker serially.
invoker(BlockedRange(start, std::min(end, start + grain_size), 1));
break;
}
struct {
absl::Mutex mutex;
absl::CondVar completed;
int iterations_remain ABSL_GUARDED_BY(mutex);
} loop;
{
absl::MutexLock lock(&loop.mutex);
loop.iterations_remain = iterations_remain;
}
for (int x = start; x < end; x += grain_size) {
auto loop_func = [x, end, grain_size, &loop, invoker]() {
// Execute invoker.
invoker(BlockedRange(x, std::min(end, x + grain_size), 1));
// Decrement counter.
absl::MutexLock lock(&loop.mutex);
--loop.iterations_remain;
if (loop.iterations_remain == 0) {
loop.completed.SignalAll();
}
};
// Attempt to run in parallel, if busy run serial to avoid deadlocking.
// This can happen during nested invocation of ParallelFor, as if the
// loop iteration itself is calling ParallelFor we might deadlock if
// we can not guarantee for the iteration to be scheduled.
ParallelInvokerThreadPool()->Schedule(loop_func);
}
// Wait on termination of all iterations.
loop.mutex.Lock();
while (loop.iterations_remain > 0) {
loop.completed.Wait(&loop.mutex);
}
loop.mutex.Unlock();
break;
}
case PARALLEL_INVOKER_OPENMP: {
// Use thread-local copy of invoker.
Invoker local_invoker(invoker);
#pragma omp parallel for firstprivate(local_invoker) \
num_threads(flags_parallel_invoker_max_threads)
for (int x = start; x < end; ++x) {
local_invoker(BlockedRange(x, x + 1, 1));
}
break;
}
case PARALLEL_INVOKER_NONE: {
SerialFor(start, end, grain_size, invoker);
break;
}
case PARALLEL_INVOKER_MAX_VALUE: {
LOG(FATAL) << "Impossible.";
break;
}
}
#else
SerialFor(start, end, grain_size, invoker);
#endif // PARALLEL_INVOKER_ACTIVE
}
// Simple wrapper for compatibility with below ParallelFor2D function.
template <class Invoker>
void SerialFor2D(size_t start_row, size_t end_row, size_t start_col,
size_t end_col, size_t grain_size, const Invoker& invoker) {
invoker(BlockedRange2D(BlockedRange(start_row, end_row, 1),
BlockedRange(start_col, end_col, 1)));
}
// Same as above ParallelFor for 2D iteration.
template <class Invoker>
void ParallelFor2D(size_t start_row, size_t end_row, size_t start_col,
size_t end_col, size_t grain_size, const Invoker& invoker) {
#ifdef PARALLEL_INVOKER_ACTIVE
CheckAndSetInvokerOptions();
switch (flags_parallel_invoker_mode) {
#if defined(__APPLE__)
case PARALLEL_INVOKER_GCD: {
const int iterations_remain =
(end_row - start_row + grain_size - 1) / grain_size;
CHECK_GT(iterations_remain, 0);
if (iterations_remain == 1) {
// Execute invoker serially.
invoker(BlockedRange2D(BlockedRange(start_row, end_row, 1),
BlockedRange(start_col, end_col, 1)));
} else {
BlockedRange all_tasks(start_row, end_row, grain_size);
ParallelInvokerGCDContext2D<Invoker> context(
invoker, all_tasks, BlockedRange(start_col, end_col, grain_size));
dispatch_queue_t concurrent_queue =
dispatch_get_global_queue(DISPATCH_QUEUE_PRIORITY_DEFAULT, 0);
dispatch_apply_f(iterations_remain, concurrent_queue, &context,
ParallelForGCDTask2D<Invoker>);
#if CHECK_GCD_PARALLEL_WORK_COUNT
CHECK_EQ(iterations_remain, context.count());
#endif
}
break;
}
#endif // __APPLE__
case PARALLEL_INVOKER_THREAD_POOL: {
int iterations_remain = end_row - start_row; // Guarded by loop_mutex
CHECK_GT(iterations_remain, 0);
if (iterations_remain == 1) {
// Execute invoker serially.
invoker(BlockedRange2D(BlockedRange(start_row, end_row, 1),
BlockedRange(start_col, end_col, 1)));
break;
}
absl::Mutex loop_mutex;
absl::CondVar loop_completed;
for (int y = start_row; y < end_row; ++y) {
auto loop_func = [y, start_col, end_col, &loop_mutex, &loop_completed,
&iterations_remain, invoker]() {
// Execute invoker.
invoker(BlockedRange2D(BlockedRange(y, y + 1, 1),
BlockedRange(start_col, end_col, 1)));
// Decrement counter.
absl::MutexLock lock(&loop_mutex);
--iterations_remain;
if (iterations_remain == 0) {
loop_completed.Signal();
}
};
// Attempt to run in parallel, if busy run serial to avoid deadlocking.
ParallelInvokerThreadPool()->Schedule(loop_func);
}
// Wait on termination of all iterations.
loop_mutex.Lock();
while (iterations_remain > 0) {
loop_completed.Wait(&loop_mutex);
}
loop_mutex.Unlock();
break;
}
case PARALLEL_INVOKER_OPENMP: {
// Use thread-local copy of invoker.
Invoker local_invoker(invoker);
#pragma omp parallel for firstprivate(local_invoker) \
num_threads(flags_parallel_invoker_max_threads)
for (int y = start_row; y < end_row; ++y) {
local_invoker(BlockedRange2D(BlockedRange(y, y + 1, 1),
BlockedRange(start_col, end_col, 1)));
}
break;
}
case PARALLEL_INVOKER_NONE: {
SerialFor2D(start_row, end_row, start_col, end_col, grain_size, invoker);
break;
}
case PARALLEL_INVOKER_MAX_VALUE: {
LOG(FATAL) << "Impossible.";
break;
}
}
#else
SerialFor2D(start_row, end_row, start_col, end_col, grain_size, invoker);
#endif // PARALLEL_INVOKER_ACTIVE
}
} // namespace mediapipe
#endif // MEDIAPIPE_UTIL_TRACKING_PARALLEL_INVOKER_H_
|
GB_binop__le_fp64.c |
//------------------------------------------------------------------------------
// GB_binop: hard-coded functions for each built-in binary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2022, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated2/ folder, do not edit it
// (it is auto-generated from Generator/*).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_emult.h"
#include "GB_control.h"
#include "GB_ek_slice.h"
#include "GB_dense.h"
#include "GB_atomics.h"
#include "GB_bitmap_assign_methods.h"
#include "GB_binop__include.h"
// C=binop(A,B) is defined by the following types and operators:
// A+B function (eWiseAdd): GB (_AaddB__le_fp64)
// A.*B function (eWiseMult): GB (_AemultB_08__le_fp64)
// A.*B function (eWiseMult): GB (_AemultB_02__le_fp64)
// A.*B function (eWiseMult): GB (_AemultB_04__le_fp64)
// A.*B function (eWiseMult): GB (_AemultB_bitmap__le_fp64)
// A*D function (colscale): GB (_AxD__le_fp64)
// D*A function (rowscale): GB (_DxB__le_fp64)
// C+=B function (dense accum): GB (_Cdense_accumB__le_fp64)
// C+=b function (dense accum): GB (_Cdense_accumb__le_fp64)
// C+=A+B function (dense ewise3): GB ((none))
// C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__le_fp64)
// C=scalar+B GB (_bind1st__le_fp64)
// C=scalar+B' GB (_bind1st_tran__le_fp64)
// C=A+scalar GB (_bind2nd__le_fp64)
// C=A'+scalar GB (_bind2nd_tran__le_fp64)
// C type: bool
// A type: double
// A pattern? 0
// B type: double
// B pattern? 0
// BinaryOp: cij = (aij <= bij)
#define GB_ATYPE \
double
#define GB_BTYPE \
double
#define GB_CTYPE \
bool
// true if the types of A and B are identical
#define GB_ATYPE_IS_BTYPE \
1
// true if the types of C and A are identical
#define GB_CTYPE_IS_ATYPE \
0
// true if the types of C and B are identical
#define GB_CTYPE_IS_BTYPE \
0
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA,A_iso) \
double aij = GBX (Ax, pA, A_iso)
// true if values of A are not used
#define GB_A_IS_PATTERN \
0 \
// bij = Bx [pB]
#define GB_GETB(bij,Bx,pB,B_iso) \
double bij = GBX (Bx, pB, B_iso)
// true if values of B are not used
#define GB_B_IS_PATTERN \
0 \
// declare scalar of the same type as C
#define GB_CTYPE_SCALAR(t) \
bool t
// cij = Ax [pA]
#define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \
cij = GBX (Ax, pA, A_iso)
// cij = Bx [pB]
#define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \
cij = GBX (Bx, pB, B_iso)
#define GB_CX(p) Cx [p]
// binary operator
#define GB_BINOP(z,x,y,i,j) \
z = (x <= y) ;
// true if the binop must be flipped
#define GB_BINOP_FLIP \
0
// op is second
#define GB_OP_IS_SECOND \
0
// do the numerical phases of GB_add and GB_emult
#define GB_PHASE_2_OF_2
// hard-coded loops can be vectorized
#define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_LE || GxB_NO_FP64 || GxB_NO_LE_FP64)
//------------------------------------------------------------------------------
// C += A+B, all 3 matrices dense
//------------------------------------------------------------------------------
#if 0
// The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV.
void GB ((none))
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_accum_template.c"
}
#endif
//------------------------------------------------------------------------------
// C = A+B, all 3 matrices dense
//------------------------------------------------------------------------------
void GB (_Cdense_ewise3_noaccum__le_fp64)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_noaccum_template.c"
}
//------------------------------------------------------------------------------
// C += B, accumulate a sparse matrix into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumB__le_fp64)
(
GrB_Matrix C,
const GrB_Matrix B,
const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#if 0
{
#include "GB_dense_subassign_23_template.c"
}
#endif
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += b, accumulate a scalar into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumb__le_fp64)
(
GrB_Matrix C,
const GB_void *p_bwork,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#if 0
{
// get the scalar b for C += b, of type double
double bwork = (*((double *) p_bwork)) ;
#include "GB_dense_subassign_22_template.c"
return (GrB_SUCCESS) ;
}
#endif
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = A*D, column scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_AxD__le_fp64)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix D,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
bool *restrict Cx = (bool *) C->x ;
#include "GB_AxB_colscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = D*B, row scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_DxB__le_fp64)
(
GrB_Matrix C,
const GrB_Matrix D,
const GrB_Matrix B,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
bool *restrict Cx = (bool *) C->x ;
#include "GB_AxB_rowscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseAdd: C=A+B, C<M>=A+B, C<!M>=A+B
//------------------------------------------------------------------------------
GrB_Info GB (_AaddB__le_fp64)
(
GrB_Matrix C,
const int C_sparsity,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool is_eWiseUnion,
const GB_void *alpha_scalar_in,
const GB_void *beta_scalar_in,
const bool Ch_is_Mh,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
GB_WERK_DECLARE (M_ek_slicing, int64_t) ;
GB_WERK_DECLARE (A_ek_slicing, int64_t) ;
GB_WERK_DECLARE (B_ek_slicing, int64_t) ;
double alpha_scalar ;
double beta_scalar ;
if (is_eWiseUnion)
{
alpha_scalar = (*((double *) alpha_scalar_in)) ;
beta_scalar = (*((double *) beta_scalar_in )) ;
}
#include "GB_add_template.c"
GB_FREE_WORKSPACE ;
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, or C<M!>=A.*B where C is sparse/hyper
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_08__le_fp64)
(
GrB_Matrix C,
const int C_sparsity,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_08_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_02__le_fp64)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool flipxy,
const int64_t *restrict Cp_kfirst,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#if GB_BINOP_FLIP
// The operator is not commutative, and does not have a flipped
// variant. For example z=atan2(y,x).
if (flipxy)
{
// use fmult(y,x)
#undef GB_FLIPPED
#define GB_FLIPPED 1
#include "GB_emult_02_template.c"
}
else
{
// use fmult(x,y)
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
}
#else
// No need to handle the flip: the operator is either commutative, or
// has been handled by changing z=div(y,x) to z=rdiv(x,y) for example.
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
#endif
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_04__le_fp64)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict Cp_kfirst,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_04_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_bitmap__le_fp64)
(
GrB_Matrix C,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_bitmap_emult_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st
//------------------------------------------------------------------------------
GrB_Info GB (_bind1st__le_fp64)
(
GB_void *Cx_output, // Cx and Bx may be aliased
const GB_void *x_input,
const GB_void *Bx_input,
const int8_t *restrict Bb,
int64_t bnz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
bool *Cx = (bool *) Cx_output ;
double x = (*((double *) x_input)) ;
double *Bx = (double *) Bx_input ;
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < bnz ; p++)
{
if (!GBB (Bb, p)) continue ;
double bij = GBX (Bx, p, false) ;
Cx [p] = (x <= bij) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd
//------------------------------------------------------------------------------
GrB_Info GB (_bind2nd__le_fp64)
(
GB_void *Cx_output, // Cx and Ax may be aliased
const GB_void *Ax_input,
const GB_void *y_input,
const int8_t *restrict Ab,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
bool *Cx = (bool *) Cx_output ;
double *Ax = (double *) Ax_input ;
double y = (*((double *) y_input)) ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!GBB (Ab, p)) continue ;
double aij = GBX (Ax, p, false) ;
Cx [p] = (aij <= y) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (x, A'): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (x, aij), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
double aij = GBX (Ax, pA, false) ; \
Cx [pC] = (x <= aij) ; \
}
GrB_Info GB (_bind1st_tran__le_fp64)
(
GrB_Matrix C,
const GB_void *x_input,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
// GB_unop_transpose.c uses GB_ATYPE, but A is
// the 2nd input to binary operator z=f(x,y).
#undef GB_ATYPE
#define GB_ATYPE \
double
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
double x = (*((const double *) x_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
#undef GB_ATYPE
#define GB_ATYPE \
double
}
//------------------------------------------------------------------------------
// C = op (A', y): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (aij, y), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
double aij = GBX (Ax, pA, false) ; \
Cx [pC] = (aij <= y) ; \
}
GrB_Info GB (_bind2nd_tran__le_fp64)
(
GrB_Matrix C,
const GrB_Matrix A,
const GB_void *y_input,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
double y = (*((const double *) y_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
prettyprint.c | /*
$Id$
Copyright 1989-2014 MINES ParisTech
This file is part of PIPS.
PIPS is free software: you can redistribute it and/or modify it
under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
any later version.
PIPS is distributed in the hope that it will be useful, but WITHOUT ANY
WARRANTY; without even the implied warranty of MERCHANTABILITY or
FITNESS FOR A PARTICULAR PURPOSE.
See the GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with PIPS. If not, see <http://www.gnu.org/licenses/>.
*/
#ifdef HAVE_CONFIG_H
#include "pips_config.h"
#endif
#ifndef lint
char lib_ri_util_prettyprint_c_rcsid[] = "$Id$";
#endif /* lint */
/*
* Prettyprint all kinds of ri related data structures
*
* Modifications:
* - In order to remove the extra parentheses, I made the several changes:
* (1) At the intrinsic_handler, the third term is added to indicate the
* precendence, and accordingly words_intrinsic_precedence(obj) is built
* to get the precedence of the call "obj".
* (2) words_subexpression is created to distinguish the
* words_expression. It has two arguments, expression and
* precedence. where precedence is newly added. In case of failure
* of words_subexpression , that is, when
* syntax_call_p is false, we use words_expression instead.
* (3) When words_call is firstly called , we give it the lowest precedence,
* that is 0.
* Lei ZHOU Nov. 4, 1991
*
* - Addition of CMF and CRAFT prettyprints. Only text_loop() has been
* modified.
* Alexis Platonoff, Nov. 18, 1994
* - Modifications of sentence_area to deal with the fact that
* " only one appearance of a symbolic name as an array name in an
* array declarator in a program unit is permitted."
* (Fortran standard, number 8.1, line 40)
* array declarators now only appear with the type declaration, not with the
* area. - BC - october 196.
*
* - Modification of text_entity_declaration to ensure that the OUTPUT of PIPS
* can also be used as INPUT; in particular, variable declarations must
* appear
* before common declarations. BC.
*
* - neither are DATA statements for non integers (FI/FC)
*
* - Also, EQUIVALENCE statements are not generated for the moment. BC.
* Thay are now??? FC?
*
* - variable pdl added in most signature to handle derived type
* declarations in C; it is the parser declaration list; if a derived
* type must be prettyprinted, it must be prettyprinted with all
* information if in pdl, and else it must be prettyprinted with no
* information. For instance, "struct m {int l; int m}" is the
* definition of m. Other references to the type must be
* prettyprinted "struct m". The PIPS internal representation does
* record derived type declarations. The parser declaration list is
* used to desambiguate between the two cases. The problem occurs
* in both declarations.c and prettyprint.c because types can
* appear in expressions thanks to the sizeof and cast operators.
*
* Data structures used:
*
* text: to produce output with multiple lines (a.k.a. "sentence")
* and proper indenting; this is a Newgen managed data structure
*
* words: a list of strings to produce output without any specific
* formatting, but text's sentences can be built with words.
*
* Call graph structure (a slice of it, for C prettyprint):
*
* text_module
* text_named_module
* text_statement
* text_statement_enclosed: to manage braces
* text_instruction: to print a command
* c_text_related_entities: to print the declarations
* all variables declared share some type
* c_text_entities: to declare a list of variables
* c_text_entity: to declare a variable; may call
* recursively c_text_related_entities to
* print out, for instance, a set of membres
* words_variable_or_function(): words level
* c_words_simplified_entity()
* generic_c_words_simplified_entity()
*/
// To have asprintf:
#include <stdlib.h>
#include <stdio.h>
#include <string.h>
#include <ctype.h>
#include "linear.h"
#include "genC.h"
#include "text.h"
#include "text-util.h"
#include "ri.h"
#include "ri-util.h"
#include "effects.h"
#include "misc.h"
#include "properties.h"
/* operator precedences are in the [0,100] range */
#define MAXIMAL_PRECEDENCE 100
#define MINIMAL_ARITHMETIC_PRECEDENCE 19
/* Define the markers used in the raw unstructured output when the
PRETTYPRINT_UNSTRUCTURED_AS_A_GRAPH property is true: */
#define PRETTYPRINT_UNSTRUCTURED_BEGIN_MARKER "\200Unstructured"
#define PRETTYPRINT_UNSTRUCTURED_END_MARKER "\201Unstructured End"
#define PRETTYPRINT_UNSTRUCTURED_ITEM_MARKER "\202Unstructured Item"
#define PRETTYPRINT_UNSTRUCTURED_SUCC_MARKER "\203Unstructured Successor ->"
#define PRETTYPRINT_UNREACHABLE_EXIT_MARKER "\204Unstructured Unreachable"
/*===================== Language management ===========*/
/* The prettyprint language */
static language prettyprint_language = language_undefined;
/**
* @brief please avoid using this function directly, use predicate instead
* (see below)
* @return the prettyprint language as a newgen language object
*/
language get_prettyprint_language () {
if (prettyprint_language == language_undefined)
prettyprint_language = make_language_fortran ();
return prettyprint_language;
}
/**
* @return the prettyprint language as a language_utype
**/
enum language_utype get_prettyprint_language_tag () {
return language_tag (get_prettyprint_language ());
}
/**
* @return true if the language is f77
**/
bool prettyprint_language_is_fortran_p () {
return (get_prettyprint_language_tag () == is_language_fortran);
}
/**
* @return true if the language is f95
**/
bool prettyprint_language_is_fortran95_p () {
return (get_prettyprint_language_tag () == is_language_fortran95);
}
/**
* @return true if the language is C
**/
bool prettyprint_language_is_c_p () {
return (get_prettyprint_language_tag () == is_language_c);
}
/**
* @brief set the prettyprint language according to the property
* PRETTYPRINT_LANGUAGE
* @description If the property PRETTYPRINT_LANGUAGE is set to the special
* value "native" then the language passed in arg is used, usually it's the
* module native language. The user can set "F77", "F95", or "C" to force the
* prettyprint of a language.
*/
void set_prettyprint_language_from_property( enum language_utype native ) {
if (prettyprint_language == language_undefined) {
prettyprint_language = make_language_fortran ();
}
const char* lang = get_string_property ("PRETTYPRINT_LANGUAGE");
if (strcmp (lang, "F77") == 0) {
language_tag (prettyprint_language) = is_language_fortran;
}
else if (strcmp (lang, "C") == 0) {
language_tag (prettyprint_language) = is_language_c;
}
else if (strcmp (lang, "F95") == 0) {
language_tag (prettyprint_language) = is_language_fortran95;
}
else if (strcmp (lang, "native") == 0) {
language_tag (prettyprint_language) = native;
} else {
pips_internal_error("bad property value for language");
}
}
/**
@brief set the prettyprint language from a newgen language object
@param lang, the language to be used to set the prettyprint_language
variable, content is copied so caller may free if it was malloced
**/
void set_prettyprint_language (language lang) {
if (prettyprint_language == language_undefined)
prettyprint_language = make_language_fortran ();
*prettyprint_language = *lang;
}
/**
@brief set the prettyprint language from a language_utype argument
@param lang, the language to be used to set the prettyprint_language
variable
**/
void set_prettyprint_language_tag (enum language_utype lang) {
if (prettyprint_language == language_undefined)
prettyprint_language = make_language_fortran ();
language_tag (prettyprint_language) = lang;
}
/* @brief Start a single line comment
* @return a string containing the begin of a comment line, language dependent
*/
string get_comment_sentinel() {
switch(get_prettyprint_language_tag()) {
case is_language_c: return "//";
case is_language_fortran: return "C";
case is_language_fortran95: return "!";
default: pips_internal_error("language unknown not handled"); return NULL ;
}
}
/* @brief Start a single line comment with continuation (blank spaces)
* @return a string containing the begin of a comment line, language dependent
*/
string get_comment_continuation() {
switch(get_prettyprint_language_tag()) {
case is_language_c: return "// ";
case is_language_fortran: return "C ";
case is_language_fortran95: return "! ";
default: pips_internal_error("language unknown not handled"); return NULL ;
}
}
unsigned int get_prettyprint_indentation() {
if(prettyprint_language_is_fortran_p()) {
return 0;
} else {
return INDENTATION;
}
}
static list words_cast(cast obj, int precedence, list pdl);
static list words_sizeofexpression(sizeofexpression obj, bool in_type_declaration, list pdl);
static list words_subscript(subscript s, list pdl);
static list words_application(application a, list pdl);
static text text_forloop(entity module,const char* label,int margin,forloop obj,int n, list pdl);
/* This variable is used to disable the precedence system and hence to
prettyprint all parentheses, which let the prettyprint reflect the
AST. */
static bool precedence_p = true;
/* This variable is used to print braces around all blocks including
blocks with only one statement. */
static bool prettyprint_all_c_braces_p = false;
/* This variable is used to gracefuly print braces around if / else
blocks to avoid gcc warnings */
static bool prettyprint_gcc_c_braces_p = false;
/******************************************************************* STYLES */
static bool pp_style_p(string s) {
return same_string_p(get_string_property(PRETTYPRINT_PARALLEL), s);
}
#define pp_hpf_style_p() pp_style_p("hpf")
#define pp_f90_style_p() pp_style_p("f90")
#define pp_craft_style_p() pp_style_p("craft")
#define pp_cray_style_p() pp_style_p("cray")
#define pp_cmf_style_p() pp_style_p("cmf")
#define pp_doall_style_p() pp_style_p("doall")
#define pp_do_style_p() pp_style_p("do")
#define pp_omp_style_p() pp_style_p("omp")
/********************************************************************* MISC */
text empty_text(entity __attribute__ ((unused)) e,
int __attribute__ ((unused)) m,
statement __attribute__ ((unused)) s) {
return make_text(NIL);
}
static text (*text_statement_hook)(entity, int, statement) = empty_text;
/**
* @brief checks that the prettyprint hook was actually reset...
*/
void init_prettyprint(text(*hook)(entity, int, statement)) {
pips_assert("prettyprint hook not set", text_statement_hook==empty_text);
text_statement_hook = hook;
}
/**
* @brief because some prettyprint functions may be used for debug, so
* the last hook set by somebody may have stayed there although
* being non sense...
*/
void close_prettyprint() {
text_statement_hook = empty_text;
}
/* Get the prettyprint format of a C label
@param label a string to render
@return the printf-format string
*/
string get_C_label_printf_format(const char* label) {
/* If the label begin with a digit, prefix it with a 'l' to be C
compatible.
Hmmm, this does not verify that there is no such label in the program
already... :-( Should be solved quite earlier anyway...
*/
return isdigit(label[0]) ? "l%s:" : "%s:";
}
/**
* @brief True is statement "s" can be printed out without enclosing
* braces when it is the true branch of a test. This is a special case
* because of dangling else clauses.
*/
/* bool one_liner_true_branch_p(statement s) */
/* { */
/* bool one_p = false; */
/* if(!statement_test_p(s)) */
/* one_p = one_liner_p(s); */
/* else { */
/* test t = instruction_test(statement_instruction(s)); */
/* statement f = test_false(t); */
/* if(!(empty_statement_p(f) || nop_statement_p(f))) */
/* one_p = true; // No need to worry, the else clause exists */
/* else { */
/* // Make sure there is no internal dangling else... */
/* one_p = one_liner_test_p(t); */
/* } */
/* } */
/* return one_p; */
/* } */
/**
* @brief True is test "t" contains a non-empty final "else" clause.
*/
/* bool one_liner_test_p(test t) */
/* { */
/* bool one_liner_p = false; */
/* /\* We must make sure that the final else clause is not empty *\/ */
/* statement f = test_false(t); */
/* if(empty_statement_p(f) || nop_statement_p(f)) */
/* one_liner_p = false; */
/* else if(statement_test_p(f)) { */
/* /\* Go down recursively for "else if" constructs. *\/ */
/* instruction i = statement_instruction(f); */
/* test ft = instruction_test(i); */
/* one_liner_p = one_liner_test_p(ft); */
/* } */
/* else */
/* one_liner_p = true; */
/* return one_liner_p; */
/* } */
/**
* @brief Can this statement be printed on one line, without enclosing
* braces, if it is embedded in a loop?
*
* Another test must be used if Statement "s" is embedded in a test a
* a true branch.
*/
bool one_liner_p(statement s)
{
instruction i = statement_instruction(s);
bool yes = (instruction_test_p(i) || instruction_loop_p(i) || instruction_whileloop_p(i)
|| instruction_call_p(i) || instruction_expression_p(i) || instruction_forloop_p(i) || instruction_goto_p(i)
|| return_instruction_p(i));
yes = yes && ENDP(statement_declarations(s));
if(!yes && instruction_sequence_p(i)) {
list sl = sequence_statements(instruction_sequence(i));
int sc = gen_length(sl);
if(sc==1) {
/* There may be many lines hidden behind another block construct
when code is generated in a non canonical way as for
{{x=1;y=2;}} */
instruction ii = statement_instruction(STATEMENT(CAR(sl)));
if(instruction_sequence_p(ii)) {
/* OK, we could check deeper, but this is only useful for
redundant internal representations. Let's forget about
niceties such as skipping useless braces. */
yes = false;
}
else
yes = ENDP(statement_declarations(s));
}
else
yes = (sc < 1) && ENDP(statement_declarations(s));
}
return yes;
}
bool gcc_if_block_braces_required_p(test obj)
{
statement tb = effective_test_true(obj);
if(one_liner_p(tb)) {
if (statement_test_p(tb)) {
test nested_test = statement_test(tb);
statement fb = test_false(nested_test);
if (!empty_statement_p(fb))
return prettyprint_gcc_c_braces_p;
}
}
return false;
}
/***************************************************local variables handling */
static text local_var;
static bool local_flg = false;
/**
* @brief This function either appends the declaration to the text given as a
* parameter or return a new text with the declaration
*/
static text insert_locals (text r) {
if (local_flg == true) {
if ((r != text_undefined) && (r != NULL)){
MERGE_TEXTS (r, local_var);
}
else {
r = local_var;
}
local_flg = false;
}
return r;
}
/**
* @brief This function returns true if BLOCK boundary markers are required.
* The function also creates the maker when needed.
*/
static bool mark_block(unformatted *t_beg,
unformatted *t_end,
int n,
int margin) {
bool result = false;
if(!get_bool_property("PRETTYPRINT_FOR_FORESYS")
&& (get_bool_property("PRETTYPRINT_ALL_EFFECTS")
|| get_bool_property("PRETTYPRINT_BLOCKS")))
result = true;
if(result == true) {
list pbeg = NIL;
list pend = NIL;
// Here we need to generate block markers for later use:
switch(get_prettyprint_language_tag()) {
case is_language_fortran:
case is_language_fortran95:
// Fortran case: comments at the begin of the line
pbeg = CHAIN_SWORD (NIL, "BEGIN BLOCK");
pend = CHAIN_SWORD (NIL, "END BLOCK");
*t_beg = make_unformatted(strdup(get_comment_sentinel()),
n,
margin,
pbeg);
*t_end = make_unformatted(strdup(get_comment_sentinel()),
n,
margin,
pend);
break;
case is_language_c:
// C case: comments alligned with blocks:
pbeg = CHAIN_SWORD(NIL, strdup(get_comment_sentinel()));
pend = CHAIN_SWORD(NIL, strdup(get_comment_sentinel()));
pbeg = CHAIN_SWORD (pbeg, " BEGIN BLOCK");
pend = CHAIN_SWORD (pend, " END BLOCK");
*t_beg = make_unformatted(NULL, n, margin, pbeg);
*t_end = make_unformatted(NULL, n, margin, pend);
break;
default:
pips_internal_error("Language unknown !");
break;
}
}
return result;
}
/********************************************************************* WORDS */
static int words_intrinsic_precedence(call);
static int intrinsic_precedence(const char*);
/**
* @brief exported for craft
*/
list words_loop_range(range obj, list pdl) {
list pc;
call c = syntax_call(expression_syntax(range_increment(obj)));
pc = words_subexpression(range_lower(obj), 0, true, pdl);
pc = CHAIN_SWORD(pc,", ");
pc = gen_nconc(pc, words_subexpression(range_upper(obj), 0, true, pdl));
if (/* expression_constant_p(range_increment(obj)) && */
strcmp( entity_local_name(call_function(c)), "1") == 0 )
return(pc);
pc = CHAIN_SWORD(pc,", ");
pc = gen_nconc(pc, words_expression(range_increment(obj), pdl));
return(pc);
}
/**
* @brief Output a Fortan-like do-loop range as a C-like for-loop index part.
* @description Assume that the increment is an integer so we can generate the
* good condition. Since the do-loops are recognized in C program part only
* with this assumptions, it is a good assumption.
*/
list C_loop_range(range obj, entity i, list pdl)
{
list pc;
/* call c = syntax_call(expression_syntax(range_increment(obj))); */
/* Complete the initialization assignment */
pc = words_subexpression(range_lower(obj), 0, true, pdl);
pc = CHAIN_SWORD(pc,"; ");
/* Check the final bound */
pc = CHAIN_SWORD(pc, entity_user_name(i));
/* Increasing or decreasing index? */
expression inc = range_increment(obj);
/* Assume the increment has an integer value with a known sign
If The increment is negative, that means the index is tested against
a lower bound
Else we assume to test against an upper bound
*/
expression ru = range_upper(obj);
/* check if we have something of the form exp -1 as range_upper */
expression ru_minus_one = make_op_exp(PLUS_OPERATOR_NAME,
copy_expression(ru),
int_to_expression(1)
);
/* Additionally, we want to pretty print a strict comparison if
certain conditions are met. This could be the default choice ,
but the impact on the validation would be huge */
set re = get_referenced_entities(ru);
bool references_unsigned_entity_p = false;
SET_FOREACH(entity,e,re) {
references_unsigned_entity_p |= unsigned_type_p(ultimate_type(entity_type(e)));
}
set_free(re);
if( references_unsigned_entity_p ) {
if(positive_expression_p(inc))
pc = CHAIN_SWORD(pc, " < ");
else if(negative_expression_p(inc))
pc = CHAIN_SWORD(pc, " > ");
else {
//pips_internal_error("loop range cannot be prettyprinted because increment sign"
// " is unknown\n");
pips_user_warning("loop increment sign is unknown: assumed positive\n");
pc = CHAIN_SWORD(pc, " < ");
}
pc = gen_nconc(pc, words_subexpression(ru_minus_one, 0, true, pdl));
}
else {
// FI: when inc is not a constant integer,
// expression_negative_integer_value_p() always return false
if(positive_expression_p(inc))
pc = CHAIN_SWORD(pc, " <= ");
else if(negative_expression_p(inc))
pc = CHAIN_SWORD(pc, " >= ");
else {
//pips_internal_error("loop range cannot be prettyprinted because increment sign"
// " is unknown\n");
pips_user_warning("loop increment sign is unknown: assumed positive\n");
pc = CHAIN_SWORD(pc, " <= ");
}
pc = gen_nconc(pc, words_subexpression(ru, 0, true, pdl));
}
free_expression(ru_minus_one);
pc = CHAIN_SWORD(pc,"; ");
/* Increment the loop index */
pc = CHAIN_SWORD(pc, entity_user_name(i));
pc = CHAIN_SWORD(pc," += ");
pc = gen_nconc(pc, words_expression(inc, pdl));
pc = CHAIN_SWORD(pc,")");
return(pc);
}
/**
* @return a list of string
*/
list words_range(range obj, list pdl) {
list pc = NIL;
/* if undefined I print a star, why not!? */
if(expression_undefined_p(range_lower(obj))) {
pc = CONS(STRING, MAKE_SWORD("*"), NIL);
} else {
switch(get_prettyprint_language_tag()) {
case is_language_fortran: {
call c = syntax_call(expression_syntax(range_increment(obj)));
pc = CHAIN_SWORD(pc,"(/ (I,I=");
pc = gen_nconc(pc, words_expression(range_lower(obj), pdl));
pc = CHAIN_SWORD(pc,",");
pc = gen_nconc(pc, words_expression(range_upper(obj), pdl));
if(strcmp(entity_local_name(call_function(c)), "1") != 0) {
pc = CHAIN_SWORD(pc,",");
pc = gen_nconc(pc, words_expression(range_increment(obj), pdl));
}
pc = CHAIN_SWORD(pc,") /)") ;
break;
}
case is_language_fortran95: {
// Print the lower bound if != *
if(!unbounded_expression_p(range_lower(obj))) {
pc = gen_nconc(pc, words_expression(range_lower(obj), pdl));
}
// Print the upper bound if != *
pc = CHAIN_SWORD(pc,":");
if(!unbounded_expression_p(range_upper(obj))) {
pc = gen_nconc(pc, words_expression(range_upper(obj), pdl));
}
// Print the increment if != 1
call c = syntax_call(expression_syntax(range_increment(obj)));
if(strcmp(entity_local_name(call_function(c)), "1") != 0) {
pc = CHAIN_SWORD(pc,":");
pc = gen_nconc(pc, words_expression(range_increment(obj), pdl));
}
break;
}
case is_language_c:
/* C does not include ranges, but the PIPS internal
representation does. For instance, constant ranges can be
useful to express effects or regions for intrinsics. To be
discussed with Beatrice: e.g. memcpy(), strncp(). Especially
when they are called with constant arguments. */
// FI: we might still want a warning, but the compiler will
// choke anyway if this is used to prettyprint some C source code
// pips_internal_error("I don't know how to print a range in C !");
// FI: copied from Fortran 95, but we may prefer to see the stars
// Print the lower bound if != *
if(!unbounded_expression_p(range_lower(obj))) {
pc = gen_nconc(pc, words_expression(range_lower(obj), pdl));
}
// Print the upper bound if != *
pc = CHAIN_SWORD(pc,":");
if(!unbounded_expression_p(range_upper(obj))) {
pc = gen_nconc(pc, words_expression(range_upper(obj), pdl));
}
// Print the increment if != 1
call c = syntax_call(expression_syntax(range_increment(obj)));
if(strcmp(entity_local_name(call_function(c)), "1") != 0) {
pc = CHAIN_SWORD(pc,":");
pc = gen_nconc(pc, words_expression(range_increment(obj), pdl));
}
break;
default:
pips_internal_error("Language unknown !");
break;
}
}
return pc;
}
/**
* @description FI: array constructor R433, p. 37 in Fortran 90 standard, can be
* used anywhere in arithmetic expressions whereas the triplet notation is
* restricted to subscript expressions. The triplet notation is used to define
* array sections (see R619, p. 64).
*
* @return a list of string corresponding to the range
*/
list words_subscript_range(range obj, list pdl) {
list pc = NIL;
/* if undefined I print a star, why not!? */
if(expression_undefined_p(range_lower(obj))) {
pc = CONS(STRING, MAKE_SWORD("*"), NIL);
} else {
switch(get_prettyprint_language_tag()) {
case is_language_fortran: {
call c = syntax_call(expression_syntax(range_increment(obj)));
pc = gen_nconc(pc, words_expression(range_lower(obj), pdl));
pc = CHAIN_SWORD(pc,":");
pc = gen_nconc(pc, words_expression(range_upper(obj), pdl));
if(strcmp(entity_local_name(call_function(c)), "1") != 0) {
pc = CHAIN_SWORD(pc,":");
pc = gen_nconc(pc, words_expression(range_increment(obj), pdl));
}
break;
}
case is_language_fortran95: {
// Print the lower bound if != *
if(!unbounded_expression_p(range_lower(obj))) {
pc = gen_nconc(pc, words_expression(range_lower(obj), pdl));
}
// Print the upper bound if != *
pc = CHAIN_SWORD(pc,":");
if(!unbounded_expression_p(range_upper(obj))) {
pc = gen_nconc(pc, words_expression(range_upper(obj), pdl));
}
// Print the increment if != 1
call c = syntax_call(expression_syntax(range_increment(obj)));
if(strcmp(entity_local_name(call_function(c)), "1") != 0) {
pc = CHAIN_SWORD(pc,":");
pc = gen_nconc(pc, words_expression(range_increment(obj), pdl));
}
break;
}
case is_language_c:
// T is no way to print range in C
// The notation with ":" has been chosen to simplify prettyprint
{
// Print the lower bound if != *
if(!unbounded_expression_p(range_lower(obj))) {
pc = gen_nconc(pc, words_expression(range_lower(obj), pdl));
}
// Print the upper bound if != *
pc = CHAIN_SWORD(pc,":");
if(!unbounded_expression_p(range_upper(obj))) {
pc = gen_nconc(pc, words_expression(range_upper(obj), pdl));
}
// Print the increment if != 1
call c = syntax_call(expression_syntax(range_increment(obj)));
if(strcmp(entity_local_name(call_function(c)), "1") != 0) {
pc = CHAIN_SWORD(pc,":");
pc = gen_nconc(pc, words_expression(range_increment(obj), pdl));
}
break;
}
default:
pips_internal_error("Language unknown !");
break;
}
}
return pc;
}
/* exported for expression.c
*
* Should only be used to prettyprint proper C references.
*/
list words_any_reference(reference obj, list pdl, const char* (*enf)(entity))
{
list pc = NIL;
string begin_attachment;
entity e = reference_variable(obj);
if(!ENTITY_ALLOCATABLE_BOUND_P(e)) {
/* We don't want to print these special entity, they are there for
* internal purpose only
*/
/* Print the entity first */
pc = CHAIN_SWORD(pc, (*enf)(e));
begin_attachment = STRING(CAR(pc));
/* Let's print the indices now */
if(reference_indices(obj) != NIL) {
switch(get_prettyprint_language_tag()) {
case is_language_fortran95:
case is_language_fortran: {
int count = 0;
pc = CHAIN_SWORD(pc,"(");
FOREACH(EXPRESSION, subscript, reference_indices(obj)) {
syntax ssubscript = expression_syntax(subscript);
if(count > 0)
pc = CHAIN_SWORD(pc,",");
else
count++;
if(syntax_range_p(ssubscript)) {
pc = gen_nconc(pc,
words_subscript_range(syntax_range(ssubscript),
pdl));
} else {
pc = gen_nconc(pc, words_subexpression(subscript, 0, true, pdl));
}
}
pc = CHAIN_SWORD(pc,")");
break;
}
case is_language_c: {
FOREACH(EXPRESSION, subscript, reference_indices(obj)) {
syntax ssubscript = expression_syntax(subscript);
pc = CHAIN_SWORD(pc, "[");
if(syntax_range_p(ssubscript)) {
pc = gen_nconc(pc,
words_subscript_range(syntax_range(ssubscript),
pdl));
} else {
pc = gen_nconc(pc, words_subexpression(subscript, 0, true, pdl));
}
pc = CHAIN_SWORD(pc, "]");
}
break;
}
default:
pips_internal_error("Language unknown !");
}
}
attach_reference_to_word_list(begin_attachment,
STRING(CAR(gen_last(pc))),
obj);
}
return(pc);
}
list words_reference(reference obj, list pdl)
{
return words_any_reference(obj, pdl, entity_user_name);
}
/* Management of alternate returns */
static list set_of_labels_required_for_alternate_returns = list_undefined;
void set_alternate_return_set()
{
ifdebug(1) {
pips_assert("The target list is undefined",
list_undefined_p(set_of_labels_required_for_alternate_returns));
}
set_of_labels_required_for_alternate_returns = NIL;
}
void reset_alternate_return_set()
{
ifdebug(1) {
pips_assert("The target list is initialized",
!list_undefined_p(set_of_labels_required_for_alternate_returns));
}
gen_free_list(set_of_labels_required_for_alternate_returns);
set_of_labels_required_for_alternate_returns = list_undefined;
}
void add_target_to_alternate_return_set(entity l)
{
ifdebug(1) {
pips_assert("The target list is initialized",
!list_undefined_p(set_of_labels_required_for_alternate_returns));
}
set_of_labels_required_for_alternate_returns =
gen_once(l, set_of_labels_required_for_alternate_returns);
}
text generate_alternate_return_targets()
{
text ral = text_undefined;
if(!ENDP(set_of_labels_required_for_alternate_returns)) {
list sl = NIL;
FOREACH(entity, le, set_of_labels_required_for_alternate_returns) {
sentence s1 = sentence_undefined;
string str_continue = string_undefined;
switch (get_prettyprint_language_tag()) {
case is_language_fortran95:
case is_language_fortran:
str_continue = CONTINUE_FUNCTION_NAME;
break;
case is_language_c:
str_continue = C_CONTINUE_FUNCTION_NAME;
break;
default:
pips_internal_error("Language unknown !");
break;
}
unformatted u1 =
make_unformatted( strdup( label_local_name( le ) ),
STATEMENT_NUMBER_UNDEFINED,
0,
CONS(STRING, strdup(str_continue), NIL));
s1 = make_sentence(is_sentence_unformatted, u1);
sl = gen_nconc(sl, CONS(SENTENCE, s1, NIL));
}
ral = make_text(sl);
}
else {
ral = make_text(NIL);
}
return ral;
}
/* words_regular_call used for user subroutine and user function and
intrinsics called like user function such as MOD().
used also by library static_controlize
*/
list words_regular_call(call obj, bool is_a_subroutine, list pdl)
{
list pc = NIL;
entity f = call_function(obj);
value i = entity_initial(f);
type t = entity_type(f);
bool space_p = get_bool_property("PRETTYPRINT_LISTS_WITH_SPACES");
if (call_arguments(obj) == NIL) {
if (type_statement_p(t))
return (CHAIN_SWORD(pc, entity_local_name(f)+sizeof(LABEL_PREFIX) -1));
if (value_constant_p(i) || value_symbolic_p(i)) {
switch (get_prettyprint_language_tag()) {
case is_language_fortran:
case is_language_fortran95:
return (CHAIN_SWORD(pc, entity_user_name(f)));
break;
case is_language_c:
if (ENTITY_TRUE_P(f))
return (CHAIN_SWORD(pc, "true"));
if (ENTITY_FALSE_P(f))
return (CHAIN_SWORD(pc, "false"));
return (CHAIN_SWORD(pc, entity_user_name(f)));
break;
default:
pips_internal_error("Language unknown !");
break;
}
}
}
type calltype = call_compatible_type(entity_type(call_function(obj)));
bool function_p = type_void_p(functional_result(type_functional(calltype)));
if (function_p) {
if (is_a_subroutine) {
switch (get_prettyprint_language_tag()) {
case is_language_fortran:
case is_language_fortran95:
pc = CHAIN_SWORD(pc, "CALL ");
break;
case is_language_c:
pc = CHAIN_SWORD(pc, "");
break;
default:
pips_internal_error("Language unknown !");
break;
}
} else {
switch (get_prettyprint_language_tag()) {
case is_language_fortran:
pips_user_warning("subroutine '%s' used as a function.\n",
entity_name(f));
break;
case is_language_c:
// no warning in C
break;
case is_language_fortran95:
pips_internal_error("Need to update F95 case");
break;
default:
pips_internal_error("Language unknown !");
break;
}
}
} else if (is_a_subroutine) {
switch (get_prettyprint_language_tag()) {
case is_language_fortran:
case is_language_fortran95:
pips_user_warning("function '%s' used as a subroutine.\n",
entity_name(f));
pc = CHAIN_SWORD(pc, "CALL ");
break;
case is_language_c:
// no warning in C
pc = CHAIN_SWORD(pc, "");
break;
default:
pips_internal_error("Language unknown !");
break;
}
}
/* special cases for stdarg builtin macros */
if (ENTITY_VA_END_P(f))
pc = CHAIN_SWORD(pc, "va_end");
else if (ENTITY_VA_START_P(f))
pc = CHAIN_SWORD(pc, "va_start");
else if (ENTITY_VA_COPY_P(f))
pc = CHAIN_SWORD(pc, "va_copy");
/* Special cases for stdio.h */
/* else if (ENTITY__IO_GETC_P(f)) */
/* pc = CHAIN_SWORD(pc, "getc"); */
/* else if (ENTITY__IO_PUTC_P(f)) */
/* pc = CHAIN_SWORD(pc, "putc"); */
else if (ENTITY_ISOC99_SCANF_P(f))
pc = CHAIN_SWORD(pc, ISOC99_SCANF_USER_FUNCTION_NAME);
else if (ENTITY_ISOC99_FSCANF_P(f))
pc = CHAIN_SWORD(pc, ISOC99_FSCANF_USER_FUNCTION_NAME);
else if (ENTITY_ISOC99_SSCANF_P(f))
pc = CHAIN_SWORD(pc, ISOC99_SSCANF_USER_FUNCTION_NAME);
else if (ENTITY_ISOC99_VFSCANF_P(f))
pc = CHAIN_SWORD(pc, ISOC99_VFSCANF_USER_FUNCTION_NAME);
else if (ENTITY_ISOC99_VSCANF_P(f))
pc = CHAIN_SWORD(pc, ISOC99_VSCANF_USER_FUNCTION_NAME);
else if (ENTITY_ISOC99_VSSCANF_P(f))
pc = CHAIN_SWORD(pc, ISOC99_VSSCANF_USER_FUNCTION_NAME);
/* the implied complex operator is hidden... [D]CMPLX_(x,y) -> (x,y)
*/
else if(!ENTITY_IMPLIED_CMPLX_P(f) && !ENTITY_IMPLIED_DCMPLX_P(f))
pc = CHAIN_SWORD(pc, entity_user_name(f));
/* The corresponding formal parameter cannot be checked by
formal_label_replacement_p() because the called modules may not have
been parsed yet. */
if(!ENDP(call_arguments(obj))) {
list pa = list_undefined;
pc = CHAIN_SWORD(pc, "(");
for(pa = call_arguments(obj); !ENDP(pa); POP(pa)) {
expression eap = EXPRESSION(CAR(pa));
if(get_bool_property("PRETTYPRINT_REGENERATE_ALTERNATE_RETURNS")
&& expression_call_p(eap) && actual_label_replacement_p(eap)) {
/* Alternate return actual argument have been replaced by
character strings by the parser. */
entity cf = call_function(syntax_call(expression_syntax(eap)));
const char* ls = entity_local_name(cf);
string ls1 = malloc(strlen(ls));
/* pips_assert("ls has at least four characters", strlen(ls)>=4); */
/* Get rid of initial and final quotes */
ls1 = strncpy(ls1, ls+1, strlen(ls)-2);
*(ls1+strlen(ls)-2) = '\000';
pips_assert("eap must be a call to a constant string", expression_call_p(eap));
if(strcmp(get_string_property("PARSER_SUBSTITUTE_ALTERNATE_RETURNS"), "STOP")!=0) {
pc = CHAIN_SWORD(pc, ls1);
/* free(ls1); */
}
else {
/* The actual label cannot always be used because it might have been
eliminated as part of dead code by PIPS since it is not used
with the STOP option. */
if(label_string_defined_in_current_module_p(ls1+1)) {
pc = CHAIN_SWORD(pc, ls1);
}
else {
entity els1 = find_label_entity(get_current_module_name(), ls1+1);
/* The assertion may be wrong if this piece of code is used to
print intermediate statements */
pips_assert("Label els1 has been defined although it is not used anymore",
!entity_undefined_p(els1));
pc = CHAIN_SWORD(pc, ls1);
add_target_to_alternate_return_set(els1);
}
}
}
else {
/* words_expression cannot be called because of the C comma
operator which require surrounding parentheses in this
context. Be careful with unary minus. */
pc = gen_nconc(pc, words_subexpression(EXPRESSION(CAR(pa)),
ASSIGN_OPERATOR_PRECEDENCE,
true/*false*/,
pdl));
}
if (CDR(pa) != NIL)
pc = CHAIN_SWORD(pc, space_p? ", ": ",");
}
pc = CHAIN_SWORD(pc, ")");
}
else if(!type_void_p(functional_result(type_functional(t))) ||
!is_a_subroutine || prettyprint_language_is_c_p()) {
pc = CHAIN_SWORD(pc, "()");
}
return pc;
}
/* To deal with attachment on user module usage. */
static list words_genuine_regular_call(call obj, bool is_a_subroutine, list pdl)
{
list pc = words_regular_call(obj, is_a_subroutine, pdl);
if (call_arguments(obj) != NIL) {
/* The call is not used to code a constant: */
//entity f = call_function(obj);
//type t = entity_type(f);
/* The module name is the first one except if it is a procedure CALL. */
if (type_void_p(functional_result(type_functional(call_compatible_type(entity_type(call_function(obj)))))))
attach_regular_call_to_word(STRING(CAR(CDR(pc))), obj);
else
attach_regular_call_to_word(STRING(CAR(pc)), obj);
}
return pc;
}
list
words_call_intrinsic(call obj,
int __attribute__ ((unused)) precedence,
bool __attribute__ ((unused)) leftmost,
list pdl)
{
return words_regular_call(obj, true, pdl);
}
static list
words_assign_op(call obj,
int precedence,
bool __attribute__ ((unused)) leftmost,
list pdl)
{
list pc = NIL, args = call_arguments(obj);
int prec = words_intrinsic_precedence(obj);
const char* fun = entity_local_name(call_function(obj));
pc = gen_nconc(pc, words_subexpression(EXPRESSION(CAR(args)), prec, true, pdl));
if (strcmp(fun, MODULO_UPDATE_OPERATOR_NAME) == 0)
fun = "%=";
else if (strcmp(fun, BITWISE_AND_UPDATE_OPERATOR_NAME) == 0)
fun = "&=";
else if (strcmp(fun, BITWISE_XOR_UPDATE_OPERATOR_NAME) == 0)
fun = "^=";
/* FI: space_p could be used here to control spacing around assignment */
pc = CHAIN_SWORD(pc," ");
pc = CHAIN_SWORD(pc, fun);
pc = CHAIN_SWORD(pc," ");
expression exp = expression_undefined;
switch (get_prettyprint_language_tag()) {
case is_language_fortran:
case is_language_fortran95:
exp = EXPRESSION(CAR(CDR(args)));
if (expression_call_p(exp)) {
/* = is not a Fortran operator. No need for parentheses ever,
even with the parenthesis option */
/*
call c = syntax_call(expression_syntax(e));
pc = gen_nconc(pc, words_call(c, 0, true, true, pdl));
*/
pc = gen_nconc(pc, words_syntax(expression_syntax(exp), pdl));
} else
pc = gen_nconc(pc, words_subexpression(EXPRESSION(CAR(CDR(args))),
prec,
true,
pdl));
break;
case is_language_c:
/* Brace expressions are not allowed in standard assignments */
exp = EXPRESSION(CAR(CDR(args)));
if (ENTITY_ASSIGN_P(call_function(obj))) {
if (brace_expression_p(exp)) {
// use GCC constructor extension */
pips_internal_error("this should not happen: a constructor is represnetd as a cas on brace expression\n");
}
else {
/* Be careful with expression lists, they may require
surrounding parentheses. */
pc = gen_nconc(pc, words_subexpression(exp, prec, true, pdl));
}
} else {
pc = gen_nconc(pc, words_subexpression(exp, prec, true, pdl));
}
break;
default:
pips_internal_error("Language unknown !");
break;
}
if (prec < precedence || (!precedence_p && precedence > 0)) {
pc = CONS(STRING, MAKE_SWORD("("), pc);
pc = CHAIN_SWORD(pc, ")");
}
return (pc);
}
static list
words_substring_op(call obj,
int __attribute__ ((unused)) precedence,
bool __attribute__ ((unused)) leftmost,
list pdl) {
/* The substring function call is reduced to a syntactic construct */
list pc = NIL;
expression r = expression_undefined;
expression l = expression_undefined;
expression u = expression_undefined;
/* expression e = EXPRESSION(CAR(CDR(CDR(CDR(call_arguments(obj)))))); */
int prec = words_intrinsic_precedence(obj);
pips_assert("words_substring_op", gen_length(call_arguments(obj)) == 3 ||
gen_length(call_arguments(obj)) == 4);
r = EXPRESSION(CAR(call_arguments(obj)));
l = EXPRESSION(CAR(CDR(call_arguments(obj))));
u = EXPRESSION(CAR(CDR(CDR(call_arguments(obj)))));
pc = gen_nconc(pc, words_subexpression(r, prec, true, pdl));
pc = CHAIN_SWORD(pc, "(");
pc = gen_nconc(pc, words_subexpression(l, prec, true, pdl));
pc = CHAIN_SWORD(pc, ":");
/* An unknown upper bound is encoded as a call to
UNBOUNDED_DIMENSION_NAME and nothing must be printed */
if(syntax_call_p(expression_syntax(u))) {
entity star = call_function(syntax_call(expression_syntax(u)));
if(star!=CreateIntrinsic(UNBOUNDED_DIMENSION_NAME))
pc = gen_nconc(pc, words_subexpression(u, prec, true, pdl));
}
else {
pc = gen_nconc(pc, words_subexpression(u, prec, true, pdl));
}
pc = CHAIN_SWORD(pc, ")");
return(pc);
}
static list
words_assign_substring_op(call obj,
int __attribute__ ((unused)) precedence,
bool __attribute__ ((unused)) leftmost,
list pdl)
{
/* The assign substring function call is reduced to a syntactic construct */
list pc = NIL;
expression e = expression_undefined;
int prec = words_intrinsic_precedence(obj);
pips_assert("words_substring_op", gen_length(call_arguments(obj)) == 4);
e = EXPRESSION(CAR(CDR(CDR(CDR(call_arguments(obj))))));
pc = gen_nconc(pc, words_substring_op(obj, prec, true, pdl));
pc = CHAIN_SWORD(pc, " = ");
pc = gen_nconc(pc, words_subexpression(e, prec, true, pdl));
return(pc);
}
/**
* @return the external string representation of the operator
* @param name, the pips internal representation of the operator
*/
static const char* renamed_op_handling (const char* name) {
const char* result = name;
if ( strcmp(result,PLUS_C_OPERATOR_NAME) == 0 )
result = "+";
else if ( strcmp(result, MINUS_C_OPERATOR_NAME) == 0 )
result = "-";
else if ( strcmp(result,BITWISE_AND_OPERATOR_NAME) == 0 )
result = "&";
else if ( strcmp(result,BITWISE_XOR_OPERATOR_NAME) == 0 )
result = "^";
else if ( strcmp(result,C_AND_OPERATOR_NAME) == 0 )
result = "&&";
else if ( strcmp(result,C_NON_EQUAL_OPERATOR_NAME) == 0 )
result = "!=";
else if ( strcmp(result,C_MODULO_OPERATOR_NAME) == 0 )
result = "%";
else if (prettyprint_language_is_c_p()) {
if(strcasecmp(result, GREATER_THAN_OPERATOR_NAME)==0)
result=C_GREATER_THAN_OPERATOR_NAME;
else if(strcasecmp(result, LESS_THAN_OPERATOR_NAME)==0)
result=C_LESS_THAN_OPERATOR_NAME;
else if(strcasecmp(result,GREATER_OR_EQUAL_OPERATOR_NAME)==0)
result=C_GREATER_OR_EQUAL_OPERATOR_NAME;
else if(strcasecmp(result,LESS_OR_EQUAL_OPERATOR_NAME)==0)
result=C_LESS_OR_EQUAL_OPERATOR_NAME;
else if(strcasecmp(result, EQUAL_OPERATOR_NAME) ==0)
result=C_EQUAL_OPERATOR_NAME;
else if(strcasecmp(result,NON_EQUAL_OPERATOR_NAME)==0)
result= "!=";
else if(strcasecmp(result,AND_OPERATOR_NAME)==0)
result="&&";
else if(strcasecmp(result, OR_OPERATOR_NAME)==0)
result=C_OR_OPERATOR_NAME;
}
return result;
}
/** @return a list of string with the prettyprint of a omp reduction clause
*/
static list
words_omp_red(call obj,
int precedence __attribute__ ((unused)),
bool leftmost __attribute__ ((unused)),
list pdl)
{
list result = NIL;
entity fct = call_function(obj);
result = CHAIN_SWORD(result, entity_user_name(fct));
result = CHAIN_SWORD(result, "(");
// the reduction arguments as an expression list
list args = call_arguments (obj);
pips_assert ("no arguments for reduction clause", args != NIL);
int nb_arg = 0;
FOREACH (EXPRESSION, arg, args) {
if (nb_arg == 0) {
// the first argument is an operator and need to be handle separately
// because of the intenal management of operator
const char* op;
syntax syn = expression_syntax (arg);
pips_assert ("should be a reference", syntax_tag (syn) == is_syntax_reference);
op = entity_local_name (reference_variable (syntax_reference (syn)));
op = renamed_op_handling (op);
CHAIN_SWORD(result, op);
}
else { // (nb_arg != 0)
result = (nb_arg == 1)? CHAIN_SWORD(result,":") : CHAIN_SWORD(result,",");
result = gen_nconc (result, words_expression(arg, pdl));
}
nb_arg++;
}
pips_assert ("reduction clause has at least two arguments", nb_arg > 1);
result = CHAIN_SWORD(result, ")");
return result;
}
// Function written by C.A. Mensi to prettyprint C or Fortran code as C code
static list
words_nullary_op_c(call obj,
int precedence __attribute__ ((unused)),
bool leftmost __attribute__ ((unused)),
list pdl)
{
list pc = NIL;
list args = call_arguments(obj);
entity func = call_function(obj);
const char* fname = entity_local_name(func);
int nargs = gen_length(args);
bool parentheses_p=true;
/* STOP and PAUSE and RETURN in Fortran may have 0 or 1 argument.
STOP and PAUSE are prettyprinted in C using PIPS specific C functions. */
if(nargs==0){
if(same_string_p(fname,STOP_FUNCTION_NAME))
pc = CHAIN_SWORD(pc, "exit(0)");
else if(same_string_p(fname,RETURN_FUNCTION_NAME)
||same_string_p(fname,C_RETURN_FUNCTION_NAME))
pc = CHAIN_SWORD(pc, "return");
else if(same_string_p(fname,PAUSE_FUNCTION_NAME))
pc = CHAIN_SWORD(pc, "_f77_intrinsics_pause_(0)");
else if(same_string_p(fname,CONTINUE_FUNCTION_NAME))
pc = CHAIN_SWORD(pc, "");
else if ((same_string_p(fname,OMP_OMP_FUNCTION_NAME)) ||
(same_string_p(fname,OMP_FOR_FUNCTION_NAME)) ||
(same_string_p(fname,OMP_PARALLEL_FUNCTION_NAME)))
pc = CHAIN_SWORD(pc, fname);
else
pips_internal_error("Unknown nullary operator");
}
else if(nargs==1){
expression e = EXPRESSION(CAR(args));
if(same_string_p(fname,STOP_FUNCTION_NAME)){
basic b=expression_basic(e);
if(basic_int_p(b)){
// Missing: declaration of exit() if Fortran code handled
pc = CHAIN_SWORD(pc, "exit");
}
else if(basic_string_p(b)){
pc = CHAIN_SWORD(pc, "_f77_intrinsics_stop_");
}
}
else if(same_string_p(fname,RETURN_FUNCTION_NAME)
||same_string_p(fname,C_RETURN_FUNCTION_NAME)){
pc = CHAIN_SWORD(pc, "return");
parentheses_p = false;
//pips_user_error("alternate returns are not supported in C\n");
}
else if(same_string_p(fname, PAUSE_FUNCTION_NAME)){
pc = CHAIN_SWORD(pc, "_f77_intrinsics_pause_");
}
else {
pips_internal_error("unexpected one argument");
}
pc = CHAIN_SWORD(pc, parentheses_p?"(":" ");
pc = gen_nconc(pc, words_subexpression(e, precedence, true, pdl));
pc = CHAIN_SWORD(pc, parentheses_p?")":"");
}
else {
pips_internal_error("unexpected arguments");
}
return(pc);
}
// function added for fortran by A. Mensi
static list words_nullary_op_fortran(call obj,
int precedence,
bool __attribute__ ((unused)) leftmost,
list pdl)
{
list pc = NIL;
list args = call_arguments(obj);
entity func = call_function(obj);
const char* fname = entity_local_name(func);
if(same_string_p(fname,RETURN_FUNCTION_NAME)
||same_string_p(fname,C_RETURN_FUNCTION_NAME))
pc = CHAIN_SWORD(pc, RETURN_FUNCTION_NAME);
else if (same_string_p(fname,OMP_FOR_FUNCTION_NAME))
pc = CHAIN_SWORD(pc, "do");
else
pc = CHAIN_SWORD(pc, fname);
// STOP and PAUSE and RETURN in fortran may have 0 or 1 argument.A Mensi
if(gen_length(args)==1) {
if(same_string_p(fname,STOP_FUNCTION_NAME)
|| same_string_p(fname,PAUSE_FUNCTION_NAME)
|| same_string_p(fname,RETURN_FUNCTION_NAME)
|| same_string_p(fname, C_RETURN_FUNCTION_NAME)) {
expression e = EXPRESSION(CAR(args));
pc = CHAIN_SWORD(pc, " ");
pc = gen_nconc(pc, words_subexpression(e, precedence, true, pdl));
}
else {
pips_internal_error("unexpected arguments");
}
}
else if(gen_length(args)>1) {
pips_internal_error("unexpected arguments");
}
return(pc);
}
static list words_nullary_op(call obj,
int precedence,
bool __attribute__ ((unused)) leftmost,
list pdl) {
list result = NIL;
switch (get_prettyprint_language_tag()) {
case is_language_fortran:
case is_language_fortran95:
result = words_nullary_op_fortran(obj, precedence, leftmost, pdl);
break;
case is_language_c:
result = words_nullary_op_c(obj, precedence, leftmost, pdl);
break;
default:
pips_internal_error("Language unknown !");
break;
}
return result;
}
static list
words_io_control(list *iol,
int __attribute__ ((unused)) precedence,
bool __attribute__ ((unused)) leftmost,
list pdl)
{
list pc = NIL;
list pio = *iol;
while (pio != NIL) {
syntax s = expression_syntax(EXPRESSION(CAR(pio)));
call c;
if (! syntax_call_p(s)) {
pips_internal_error("call expected");
}
c = syntax_call(s);
if (strcmp(entity_local_name(call_function(c)), IO_LIST_STRING_NAME) == 0) {
*iol = CDR(pio);
return(pc);
}
if (pc != NIL)
pc = CHAIN_SWORD(pc, ",");
pc = CHAIN_SWORD(pc, entity_local_name(call_function(c)));
pc = gen_nconc(pc, words_expression(EXPRESSION(CAR(CDR(pio))), pdl));
pio = CDR(CDR(pio));
}
if (pio != NIL)
pips_internal_error("bad format");
*iol = NIL;
return(pc);
}
static list
words_implied_do(call obj,
int __attribute__ ((unused)) precedence,
bool __attribute__ ((unused)) leftmost,
list pdl)
{
list pc = NIL;
list pcc;
expression index;
syntax s;
range r;
bool space_p = get_bool_property("PRETTYPRINT_LISTS_WITH_SPACES");
pcc = call_arguments(obj);
index = EXPRESSION(CAR(pcc));
pcc = CDR(pcc);
s = expression_syntax(EXPRESSION(CAR(pcc)));
if (! syntax_range_p(s)) {
pips_internal_error("range expected");
}
r = syntax_range(s);
pc = CHAIN_SWORD(pc, "(");
MAPL(pcp, {
pc = gen_nconc(pc, words_expression(EXPRESSION(CAR(pcp)), pdl));
if (CDR(pcp) != NIL)
pc = CHAIN_SWORD(pc, space_p? ", " : ",");
}, CDR(pcc));
pc = CHAIN_SWORD(pc, space_p? ", " : ",");
pc = gen_nconc(pc, words_expression(index, pdl));
pc = CHAIN_SWORD(pc, " = ");
pc = gen_nconc(pc, words_loop_range(r, pdl));
pc = CHAIN_SWORD(pc, ")");
return(pc);
}
static list
words_unbounded_dimension(call __attribute__ ((unused)) obj,
int __attribute__ ((unused)) precedence,
bool __attribute__ ((unused)) leftmost,
list __attribute__ ((unused)) pdl)
{
list pc = NIL;
pc = CHAIN_SWORD(pc, "*");
return(pc);
}
static list
words_list_directed(call __attribute__ ((unused)) obj,
int __attribute__ ((unused)) precedence,
bool __attribute__ ((unused)) leftmost,
list __attribute__ ((unused)) pdl)
{
list pc = NIL;
pc = CHAIN_SWORD(pc, "*");
return(pc);
}
static list
words_io_inst(call obj,
int precedence, bool leftmost, list pdl)
{
list pc = NIL;
list pcio = call_arguments(obj);
list pio_write = pcio;
bool good_fmt = false;
bool good_unit = false;
bool iolist_reached = false;
bool complex_io_control_list = false;
expression fmt_arg = expression_undefined;
expression unit_arg = expression_undefined;
const char* called = entity_local_name(call_function(obj));
bool space_p = get_bool_property("PRETTYPRINT_LISTS_WITH_SPACES");
/* AP: I try to convert WRITE to PRINT. Three conditions must be
fullfilled. The first, and obvious, one, is that the function has
to be WRITE. Secondly, "FMT" has to be equal to "*". Finally,
"UNIT" has to be equal either to "*" or "6". In such case,
"WRITE(*,*)" is replaced by "PRINT *,". */
/* GO: Not anymore for UNIT=6 leave it ... */
while((pio_write != NIL) && (!iolist_reached)) {
syntax s = expression_syntax(EXPRESSION(CAR(pio_write)));
call c;
expression arg = EXPRESSION(CAR(CDR(pio_write)));
if(!syntax_call_p(s)) {
pips_internal_error("call expected");
}
c = syntax_call(s);
if(strcmp(entity_local_name(call_function(c)), "FMT=") == 0) {
/* Avoid to use words_expression(arg) because it set some
attachments and unit_words may not be used
later... RK. */
entity f;
/* The * format is coded as a call to
"LIST_DIRECTED_FORMAT_NAME" function: */
good_fmt = syntax_call_p(expression_syntax(arg))
&& value_intrinsic_p(entity_initial(f =
call_function(syntax_call(expression_syntax(arg)))))
&& (strcmp(entity_local_name(f), LIST_DIRECTED_FORMAT_NAME) == 0);
pio_write = CDR(CDR(pio_write));
/* To display the format later: */
fmt_arg = arg;
} else if(strcmp(entity_local_name(call_function(c)), "UNIT=") == 0) {
/* Avoid to use words_expression(arg) because it set some
attachments and unit_words may not be used
later... RK. */
entity f;
/* The * format is coded as a call to
"LIST_DIRECTED_FORMAT_NAME" function: */
good_unit = syntax_call_p(expression_syntax(arg))
&& value_intrinsic_p(entity_initial(f =
call_function(syntax_call(expression_syntax(arg)))))
&& (strcmp(entity_local_name(f), LIST_DIRECTED_FORMAT_NAME) == 0);
/* To display the unit later: */
unit_arg = arg;
pio_write = CDR(CDR(pio_write));
} else if(strcmp(entity_local_name(call_function(c)), IO_LIST_STRING_NAME)
== 0) {
iolist_reached = true;
pio_write = CDR(pio_write);
} else {
complex_io_control_list = true;
pio_write = CDR(CDR(pio_write));
}
}
if(good_fmt && good_unit && same_string_p(called, "WRITE")) {
/* WRITE (*,*) -> PRINT * */
if(pio_write != NIL) /* WRITE (*,*) pio -> PRINT *, pio */
{
pc = CHAIN_SWORD(pc, "PRINT *, ");
} else /* WRITE (*,*) -> PRINT * */
{
pc = CHAIN_SWORD(pc, "PRINT * ");
}
pcio = pio_write;
} else if(good_fmt && good_unit && same_string_p(called, "READ")) {
/* READ (*,*) -> READ * */
if(pio_write != NIL) /* READ (*,*) pio -> READ *, pio */
{
switch(get_prettyprint_language_tag()) {
case is_language_fortran:
case is_language_fortran95:
pc = CHAIN_SWORD(pc, "READ *, ");
break;
case is_language_c:
pc = CHAIN_SWORD(pc, "_f77_intrinsics_read_(");
break;
default:
pips_internal_error("Language unknown !");
break;
}
} else /* READ (*,*) -> READ * */
{
pc = CHAIN_SWORD(pc, "READ * ");
}
pcio = pio_write;
} else if(!complex_io_control_list) {
list unit_words = words_expression(unit_arg, pdl);
pips_assert("A unit must be defined", !ENDP(unit_words));
pc = CHAIN_SWORD(pc, entity_local_name(call_function(obj)));
pc = CHAIN_SWORD(pc, " (");
pc = gen_nconc(pc, unit_words);
if(!expression_undefined_p(fmt_arg)) {
/* There is a FORMAT: */
pc = CHAIN_SWORD(pc, space_p? ", " : ",");
pc = gen_nconc(pc, words_expression(fmt_arg, pdl));
}
pc = CHAIN_SWORD(pc, ") ");
pcio = pio_write;
} else {
pc = CHAIN_SWORD(pc, entity_local_name(call_function(obj)));
pc = CHAIN_SWORD(pc, " (");
/* FI: missing argument; I use "precedence" because I've no clue;
see LZ */
pc = gen_nconc(pc, words_io_control(&pcio, precedence, leftmost, pdl));
pc = CHAIN_SWORD(pc, ") ");
/*
free_words(fmt_words);
*/
}
/* because the "IOLIST=" keyword is embedded in the list
and because the first IOLIST= has already been skipped,
only odd elements are printed */
MAPL(pp, {
pc = gen_nconc(pc, words_expression(EXPRESSION(CAR(pp)), pdl));
if (CDR(pp) != NIL) {
POP(pp);
if(pp==NIL)
pips_internal_error("missing element in IO list");
pc = CHAIN_SWORD(pc, space_p? ", " : ",");
}
}, pcio);
if(prettyprint_language_is_c_p())
pc = CHAIN_SWORD(pc, ") ");
return (pc);
}
/**
* Implemented for ALLOCATE(), but is applicable for every call to
* function that take STAT= parameter
*/
static list words_stat_io_inst(call obj,
int __attribute__((unused)) precedence,
bool __attribute__((unused)) leftmost,
list pdl) {
list pc = NIL;
list pcio = call_arguments(obj);
list pio_write = pcio;
const char* called = entity_local_name(call_function(obj));
bool space_p = get_bool_property("PRETTYPRINT_LISTS_WITH_SPACES");
/* Write call function */
pc = CHAIN_SWORD(pc, called);
pc = CHAIN_SWORD(pc, " (");
while ( ( pio_write != NIL ) ) {
expression expr = EXPRESSION(CAR(pio_write));
syntax s = expression_syntax(expr);
call c;
if ( syntax_call_p(s) ) { /* STAT= is a call */
c = syntax_call(s);
if ( strcmp( entity_local_name( call_function(c) ), "STAT=" ) == 0 ) {
/* We got it ! */
pc = CHAIN_SWORD(pc, strdup("STAT=")); /* FIXME : strdup ? */
/* get argument */
pio_write = CDR(pio_write);
expression arg = EXPRESSION(CAR(pio_write));
pc = gen_nconc( pc, words_expression( arg, pdl ) );
}
} else { /* It's not a call */
pc = gen_nconc( pc, words_expression( expr, pdl ) );
}
pio_write = CDR(pio_write);
if(pio_write) {
pc = CHAIN_SWORD(pc, space_p? ", " : ",");
}
}
pc = CHAIN_SWORD(pc, ") ");
return ( pc );
}
static list
null(call __attribute__ ((unused)) obj,
int __attribute__ ((unused)) precedence,
bool __attribute__ ((unused)) leftmost,
list __attribute__ ((unused)) pdl)
{
return(NIL);
}
static list
words_prefix_unary_op(call obj,
int precedence,
bool __attribute__ ((unused)) leftmost,
list pdl)
{
list pc = NIL;
expression e = EXPRESSION(CAR(call_arguments(obj)));
int prec = words_intrinsic_precedence(obj);
const char* fun = entity_local_name(call_function(obj));
if (strcmp(fun,PRE_INCREMENT_OPERATOR_NAME) == 0)
fun = "++";
else if (strcmp(fun,PRE_DECREMENT_OPERATOR_NAME) == 0)
fun = "--";
else if (strcmp(fun,ADDRESS_OF_OPERATOR_NAME) == 0)
fun = "&";
else if (strcmp(fun,C_NOT_OPERATOR_NAME) == 0)
fun = "!";
else if (strcmp(fun,BITWISE_NOT_OPERATOR_NAME) == 0)
fun = "~";
else if (strcmp(fun,DEREFERENCING_OPERATOR_NAME) == 0)
/* Since we put no spaces around an operator (to not change Fortran), the blank
before '*' is used to avoid the confusion in the case of divide operator, i.e
d1 = 1.0 / *det in function inv_j, SPEC2000 quake benchmark.
But we do not want this in a lhs and espcially with a double dereferencing. */
fun = "*";
else if(prettyprint_language_is_c_p()){
if(strcasecmp(fun, NOT_OPERATOR_NAME)==0)
fun="!";
if(strcasecmp(fun, UNARY_PLUS_OPERATOR_NAME)==0) {
/* You do not want to transform +1 + +1 into +1++ 1 */
/* Maybe the precedence could be useful to avoid adding a
useless SPACE, but unary plus is rare enough to reduce
the ROI of such anoptimization to zero. */
fun=" +";
}
}
pc = CHAIN_SWORD(pc,fun);
pc = gen_nconc(pc, words_subexpression(e, prec, false, pdl));
if(prec < precedence || (!precedence_p && precedence>0)) {
pc = CONS(STRING, MAKE_SWORD("("), pc);
pc = CHAIN_SWORD(pc, ")");
}
return(pc);
}
static list
words_postfix_unary_op(call obj,
int precedence,
bool __attribute__ ((unused)) leftmost,
list pdl)
{
list pc = NIL;
expression e = EXPRESSION(CAR(call_arguments(obj)));
int prec = words_intrinsic_precedence(obj);
const char* fun = entity_local_name(call_function(obj));
pc = gen_nconc(pc, words_subexpression(e, prec, false, pdl));
if (strcmp(fun,POST_INCREMENT_OPERATOR_NAME) == 0)
fun = "++";
else if (strcmp(fun,POST_DECREMENT_OPERATOR_NAME) == 0)
fun = "--";
pc = CHAIN_SWORD(pc,fun);
if(prec < precedence || (!precedence_p && precedence>0)) {
pc = CONS(STRING, MAKE_SWORD("("), pc);
pc = CHAIN_SWORD(pc, ")");
}
return(pc);
}
static list
words_unary_minus(call obj, int precedence, bool leftmost, list pdl)
{
list pc = NIL;
expression e = EXPRESSION(CAR(call_arguments(obj)));
int prec = words_intrinsic_precedence(obj);
if ( prec < precedence || !leftmost || (!precedence_p && precedence>0))
pc = CHAIN_SWORD(pc, "(");
/* make sure the minus can not be split apart from its argument */
list sub = words_subexpression(e, prec, false, pdl);
string fst = STRING(CAR(sub));
POP(sub);
string nfst ;
asprintf(&nfst,"-%s",fst);
free(fst);
sub=CONS(STRING,nfst,sub);
pc = gen_nconc(pc, sub);
if ( prec < precedence || !leftmost || (!precedence_p && precedence>0))
pc = CHAIN_SWORD(pc, ")");
return(pc);
}
/*
The precedence of (1/x) is the same as the multiply operator
(e.g. a*1/b without parentheses). Moreover, the MAXIMAL precedence is
used for the (x) subterm (e.g. 1/(a*b) 1/(-2) ...). However, 1/x**2 may
be a correct prettyprint in Fortran (?) */
/* WARNING : the floating point division is used wether b is an int or not
! (1.0/b) -- in fact b should not be an int ! */
static list /* of string */
words_inverse_op(call obj,
int precedence,
bool __attribute__ ((unused)) leftmost,
list pdl)
{
list /* of string */ pc = NIL;
expression e = EXPRESSION(CAR(call_arguments(obj)));
int prec = words_intrinsic_precedence(obj);
if ( prec < precedence)
pc = CHAIN_SWORD(pc, "(");
pc = CHAIN_SWORD(pc, "1./");
pc = gen_nconc(pc, words_subexpression(e, MAXIMAL_PRECEDENCE ,
false, pdl));
if ( prec < precedence)
pc = CHAIN_SWORD(pc, ")");
return(pc);
}
/* This function is useful only for parsed codes since gotos are
removed by the controlizer */
list /* of string */
words_goto_label(const char* tlabel)
{
list pc = NIL;
if (strcmp(tlabel, RETURN_LABEL_NAME) == 0) {
/*<<<<<<< .working
switch (get_prettyprint_language_tag()) {
case is_language_fortran:
case is_language_fortran95:
pc = CHAIN_SWORD(pc, RETURN_FUNCTION_NAME);
break;
case is_language_c:
pc = CHAIN_SWORD(pc, C_RETURN_FUNCTION_NAME);
pc = CHAIN_SWORD(pc, ";");
break;
default:
pips_internal_error("Language unknown !");
break;
}
=======*/
switch (get_prettyprint_language_tag()) {
case is_language_fortran:
case is_language_fortran95:
pc = CHAIN_SWORD(pc, RETURN_FUNCTION_NAME);
break;
case is_language_c: {
entity f = get_current_module_entity();
if(void_function_p(f)) {
// FI: this hides the parsed code structure and is wrong
// in C because a value may have to be returned;
pc = CHAIN_SWORD(pc, C_RETURN_FUNCTION_NAME);
pc = CHAIN_SWORD(pc, ";");
}
else {
entity rv = function_to_return_value(f);
pc = CHAIN_SWORD(pc, C_RETURN_FUNCTION_NAME" ");
pc = CHAIN_SWORD(pc, entity_user_name(rv));
pc = CHAIN_SWORD(pc, ";");
}
if(false) {
// the gotos are maintained, but the final return must be printed out
// FI: this would only work if the final return were printed
// out for sure and with its label
/* In C, a label cannot begin with a number so "l" is added
for this case*/
pc = CHAIN_SWORD(pc, strdup((isdigit(tlabel[0])?"goto l":"goto ")));
pc = CHAIN_SWORD(pc, tlabel);
pc = CHAIN_SWORD(pc, C_CONTINUE_FUNCTION_NAME);
}
break;
}
default:
pips_internal_error("Language unknown !");
break;
}
//>>>>>>> .merge-right.r18859
} else {
switch (get_prettyprint_language_tag()) {
case is_language_fortran:
case is_language_fortran95:
pc = CHAIN_SWORD(pc, strdup("GOTO "));
pc = CHAIN_SWORD(pc, tlabel);
break;
case is_language_c:
/* In C, a label cannot begin with a number so "l" is added
for this case*/
pc = CHAIN_SWORD(pc, strdup((isdigit(tlabel[0])?"goto l":"goto ")));
pc = CHAIN_SWORD(pc, tlabel);
pc = CHAIN_SWORD(pc, C_CONTINUE_FUNCTION_NAME);
break;
default:
pips_internal_error("Language unknown !");
break;
}
}
return pc;
}
static list
eole_fmx_specific_op(call obj,
int __attribute__ ((unused)) precedence,
bool __attribute__ ((unused)) leftmost,
bool isadd,
list pdl)
{
list /* of strings */ pc = NIL;
list /* of expressions */ args = call_arguments(obj);
int prec ;
/* open parenthese one */
pc = CHAIN_SWORD(pc, "(");
/* open parenthese two */
pc = CHAIN_SWORD(pc, "(");
/* get precedence for mult operator */
prec = intrinsic_precedence("*");
/* first argument */
pc = gen_nconc(pc,words_subexpression(EXPRESSION(CAR(args)), prec, true, pdl));
/* mult operator */
pc = CHAIN_SWORD(pc,"*");
/* second argument */
args = CDR(args);
pc = gen_nconc(pc,words_subexpression(EXPRESSION(CAR(args)),prec,true, pdl));
/* close parenthese two */
pc = CHAIN_SWORD(pc, ")");
/* get precedence for add operator */
prec = intrinsic_precedence("+");
/* add/sub operator */
pc = CHAIN_SWORD(pc, isadd? "+": "-");
/* third argument */
args = CDR(args);
pc = gen_nconc(pc,words_subexpression(EXPRESSION(CAR(args)),prec,false, pdl));
/* close parenthese one */
pc = CHAIN_SWORD(pc,")");
return pc;
}
/* EOLE : The multiply-add operator is used within the optimize
transformation ( JZ - sept 98) - fma(a,b,c) -> ((a*b)+c)
*/
list /* of string */
eole_fma_specific_op(call obj, int precedence, bool leftmost, list pdl)
{
return eole_fmx_specific_op(obj, precedence, leftmost, true, pdl);
}
/* MULTIPLY-SUB operator */
list /* of string */
eole_fms_specific_op(call obj, int precedence, bool leftmost, list pdl)
{
return eole_fmx_specific_op(obj, precedence, leftmost, false, pdl);
}
/* Check if the given operator is associated with a special
prettyprint. For instance, n-ary add and multiply operators which are
used in the EOLE project use "+" and "*" prettyprints instead of the
entity_local_name (JZ - sept 98) */
static const char*
get_special_prettyprint_for_operator(call obj){
static struct special_operator_prettyprint {
const char * name;
const char * op_prettyprint;
} tab_operator_prettyprint[] = {
{EOLE_SUM_OPERATOR_NAME,"+"},
{EOLE_PROD_OPERATOR_NAME,"*"},
{NULL,NULL}
};
int i = 0;
const char* op_name;
/* get the entity name */
op_name = entity_local_name(call_function(obj));
while (tab_operator_prettyprint[i].name) {
if (!strcmp(tab_operator_prettyprint[i].name,op_name))
return tab_operator_prettyprint[i].op_prettyprint;
else i++;
}
return op_name;
}
static list words_brace_op(call obj,
int precedence __attribute__ ((unused)),
bool leftmost __attribute__ ((unused)),
list pdl)
{
expression fake = call_to_expression(copy_call(obj));
list l = words_brace_expression(fake, pdl);
free_expression(fake);
return l;
}
/* Extension of "words_infix_binary_op" function for nary operators used
in the EOLE project - (since "nary" assumes operators with at least 2
op) - JZ (Oct. 98)*/
static list /* of string */
words_infix_nary_op(call obj, int precedence, bool leftmost, list pdl)
{
list /*of string*/ pc = NIL;
list /* of expressions */ args = call_arguments(obj);
/* get current operator precedence */
int prec = words_intrinsic_precedence(obj);
expression exp1 = EXPRESSION(CAR(args));
expression exp2;
list we1 = words_subexpression(exp1, prec,
prec>=MINIMAL_ARITHMETIC_PRECEDENCE? leftmost: true, pdl);
list we2;
/* open parenthese if necessary */
if ( prec < precedence )
pc = CHAIN_SWORD(pc, "(");
pc = gen_nconc(pc, we1);
/* reach the second arg */
args = CDR(args);
for(; args; args=CDR(args)) { /* for all args */
exp2 = EXPRESSION(CAR(args));
/*
* If the infix operator is either "-" or "/", I prefer not to delete
* the parentheses of the second expression.
* Ex: T = X - ( Y - Z ) and T = X / (Y*Z)
*
* Lei ZHOU Nov. 4 , 1991
*/
if ( strcmp(entity_local_name(call_function(obj)), "/") == 0 ) /* divide operator */
we2 = words_subexpression(exp2, MAXIMAL_PRECEDENCE, false, pdl);
else if ( strcmp(entity_local_name(call_function(obj)), "-") == 0 ) { /* minus operator */
if ( expression_call_p(exp2) &&
words_intrinsic_precedence(syntax_call(expression_syntax(exp2))) >=
intrinsic_precedence("*") )
/* precedence is greater than * or / */
we2 = words_subexpression(exp2, prec, false, pdl);
else
we2 = words_subexpression(exp2, MAXIMAL_PRECEDENCE, false, pdl);
}
else {
we2 = words_subexpression(exp2, prec,
prec<MINIMAL_ARITHMETIC_PRECEDENCE, pdl);
}
/* operator prettyprint */
pc = CHAIN_SWORD(pc, get_special_prettyprint_for_operator(obj));
pc = gen_nconc(pc, we2);
}
/* close parenthese if necessary */
if ( prec < precedence )
pc = CHAIN_SWORD(pc, ")");
return(pc);
}
/*
* If the infix operator is either "-" or "/", I prefer not to delete
* the parentheses of the second expression.
* Ex: T = X - ( Y - Z ) and T = X / (Y*Z)
*
* Lei ZHOU Nov. 4 , 1991
*/
static list
words_infix_binary_op(call obj, int precedence, bool leftmost, list pdl)
{
list pc = NIL;
list args = call_arguments(obj);
int prec = words_intrinsic_precedence(obj);
list we1 = words_subexpression(EXPRESSION(CAR(args)), prec,
prec>=MINIMAL_ARITHMETIC_PRECEDENCE? leftmost: true, pdl);
list we2;
const char* fun = entity_local_name(call_function(obj));
/* handling of internally renamed operators */
fun = renamed_op_handling (fun);
if(strcmp(fun, DIVIDE_OPERATOR_NAME) == 0) {
/* Do we want to add a space in case we2 starts with a dereferencing operator "*"?
Nga suggests to look at the quake benchmark of SPEC2000. */
we2 = words_subexpression(EXPRESSION(CAR(CDR(args))), MAXIMAL_PRECEDENCE, false, pdl);
}
else if (strcmp(fun, MINUS_OPERATOR_NAME) == 0 ) {
expression exp = EXPRESSION(CAR(CDR(args)));
if(expression_call_p(exp) &&
words_intrinsic_precedence(syntax_call(expression_syntax(exp))) >=
intrinsic_precedence(MULTIPLY_OPERATOR_NAME) )
/* precedence is greater than * or / */
we2 = words_subexpression(exp, prec, false, pdl);
else
we2 = words_subexpression(exp, MAXIMAL_PRECEDENCE, false, pdl);
}
else if(strcmp(fun, MULTIPLY_OPERATOR_NAME) == 0) {
expression exp = EXPRESSION(CAR(CDR(args)));
if(expression_call_p(exp) &&
ENTITY_DIVIDE_P(call_function(syntax_call(expression_syntax(exp))))) {
basic bexp = basic_of_expression(exp);
if(basic_int_p(bexp)) {
we2 = words_subexpression(exp, MAXIMAL_PRECEDENCE, false, pdl);
}
else
we2 = words_subexpression(exp, prec, false, pdl);
free_basic(bexp);
}
else
we2 = words_subexpression(exp, prec, false, pdl);
}
else {
/* If the operator in the second subexpression has the same
priority as the current operator, it has to be parenthesized
to respect the structure imposed by the programmer. For
instance, a+(b+c) does require parentheses whereas (a+b)+c is
the same as a+b+c. So we1 and we2 cannot be processed exactly
in the same way. */
we2 = words_subexpression(EXPRESSION(CAR(CDR(args))), prec+1,
prec<MINIMAL_ARITHMETIC_PRECEDENCE, pdl);
}
/* Use precedence to generate or not parentheses,
* unless parentheses are always required */
if(prec < precedence || (!precedence_p && precedence>0)) {
pc = CHAIN_SWORD(pc, "(");
}
if(prettyprint_language_is_fortran95_p()
&& strcmp(fun, FIELD_OPERATOR_NAME) == 0) {
pc = gen_nconc(pc, we1);
}
else if(prettyprint_language_is_c_p()) {
/* Check that C ambiguities such as "a+++b" for "a++ + b" or "a +
++b" are not generated */
if(strcmp(fun,"+")==0 || strcmp(fun, "-")==0) {
pips_assert("left and right subexpressions are defined",
!ENDP(we1) && !ENDP(we2));
string l = STRING(CAR(gen_last(we1)));
string f = STRING(CAR(we2));
char lc = *(l+strlen(l)-1);
char fc = *f;
string pre = "";
string post = "";
if(*fun==lc)
pre = " ";
if(*fun==fc)
post = " ";
pc = gen_nconc(pc, we1);
pc = CHAIN_SWORD(pc, pre);
pc = CHAIN_SWORD(pc, fun);
pc = CHAIN_SWORD(pc, post);
pc = gen_nconc(pc, we2);
}
else {
pc = gen_nconc(pc, we1);
pc = CHAIN_SWORD(pc, fun);
pc = gen_nconc(pc, we2);
}
}
else {
pc = gen_nconc(pc, we1);
pc = CHAIN_SWORD(pc, fun);
pc = gen_nconc(pc, we2);
}
if(prec < precedence || (!precedence_p && precedence>0)) {
pc = CHAIN_SWORD(pc, ")");
}
return(pc);
}
/* Nga Nguyen : this case is added for comma expression in C, but I am
not sure about its precedence => to be looked at more carefully */
static list words_comma_op(call obj,
int precedence,
bool __attribute__ ((unused)) leftmost,
list pdl)
{
list pc = NIL, args = call_arguments(obj);
int prec = words_intrinsic_precedence(obj);
bool space_p = get_bool_property("PRETTYPRINT_LISTS_WITH_SPACES");
if(prec < precedence || !precedence_p)
pc = CHAIN_SWORD(pc,"(");
pc = gen_nconc(pc, words_subexpression(EXPRESSION(CAR(args)), prec, true, pdl));
while (!ENDP(CDR(args)))
{
pc = CHAIN_SWORD(pc,space_p?", " : ",");
pc = gen_nconc(pc, words_subexpression(EXPRESSION(CAR(CDR(args))), prec, true, pdl));
args = CDR(args);
}
if(prec < precedence || !precedence_p)
pc = CHAIN_SWORD(pc,")");
return(pc);
}
static list words_conditional_op(call obj,
int precedence,
bool __attribute__ ((unused)) leftmost,
list pdl)
{
list pc = NIL, args = call_arguments(obj);
int prec = words_intrinsic_precedence(obj);
if(prec < precedence || !precedence_p)
pc = CHAIN_SWORD(pc,"(");
pc = gen_nconc(pc, words_subexpression(EXPRESSION(CAR(args)), prec, true, pdl));
pc = CHAIN_SWORD(pc,"?");
pc = gen_nconc(pc, words_subexpression(EXPRESSION(CAR(CDR(args))), prec, true, pdl));
pc = CHAIN_SWORD(pc,":");
pc = gen_nconc(pc, words_subexpression(EXPRESSION(CAR(CDR(CDR(args)))), prec, true, pdl));
if(prec < precedence || !precedence_p)
pc = CHAIN_SWORD(pc,")");
return(pc);
}
/* precedence needed here
* According to the Precedence of Operators
* Arithmetic > Character > Relational > Logical
* Added by Lei ZHOU Nov. 4,91
*
* A precedence is a integer in [0..MAXIMAL_PRECEDENCE]
*/
static struct intrinsic_handler {
const char * name;
intrinsic_desc_t desc;
} tab_intrinsic_handler[] = {
{BRACE_INTRINSIC, { words_brace_op, 31 } },
{POWER_OPERATOR_NAME, { words_infix_binary_op, 30} },
{CONCATENATION_FUNCTION_NAME, {words_infix_binary_op, 30} },
/* The Fortran 77 standard does not allow x*-3 or x+-3, but this is dealt
* with by argument leftmost, not by prorities.
*/
{UNARY_MINUS_OPERATOR_NAME, { words_unary_minus, 25} },
/* {"--", words_unary_minus, 19}, */
{INVERSE_OPERATOR_NAME, { words_inverse_op, 21} },
{PLUS_OPERATOR_NAME, { words_infix_binary_op, 20} },
{MINUS_OPERATOR_NAME, { words_infix_binary_op, 20} },
/* Non-arithemtic operators have priorities lesser than
* MINIMAL_ARITHMETIC_PRECEDENCE leftmost is restaured to true for
* unary minus.
*/
{LESS_THAN_OPERATOR_NAME, { words_infix_binary_op, 15} },
{GREATER_THAN_OPERATOR_NAME, { words_infix_binary_op, 15} },
{LESS_OR_EQUAL_OPERATOR_NAME, { words_infix_binary_op, 15} },
{GREATER_OR_EQUAL_OPERATOR_NAME, { words_infix_binary_op, 15} },
{EQUAL_OPERATOR_NAME, { words_infix_binary_op, 15} },
{NON_EQUAL_OPERATOR_NAME, { words_infix_binary_op, 15} },
{NOT_OPERATOR_NAME, { words_prefix_unary_op, 9} },
{AND_OPERATOR_NAME, { words_infix_binary_op, 8} },
{OR_OPERATOR_NAME, { words_infix_binary_op, 6} },
{EQUIV_OPERATOR_NAME, { words_infix_binary_op, 3} },
{NON_EQUIV_OPERATOR_NAME, { words_infix_binary_op, 3} },
{ASSIGN_OPERATOR_NAME, { words_assign_op, ASSIGN_OPERATOR_PRECEDENCE} },
{ALLOCATE_FUNCTION_NAME, { words_stat_io_inst, 0} },
{DEALLOCATE_FUNCTION_NAME, { words_stat_io_inst, 0} },
{WRITE_FUNCTION_NAME, { words_io_inst, 0} },
{READ_FUNCTION_NAME, { words_io_inst, 0} },
{PRINT_FUNCTION_NAME, { words_io_inst, 0} },
{OPEN_FUNCTION_NAME, { words_io_inst, 0} },
{CLOSE_FUNCTION_NAME, { words_io_inst, 0} },
{INQUIRE_FUNCTION_NAME, { words_io_inst, 0} },
{BACKSPACE_FUNCTION_NAME, { words_io_inst, 0} },
{REWIND_FUNCTION_NAME, { words_io_inst, 0} },
{ENDFILE_FUNCTION_NAME, { words_io_inst, 0} },
{IMPLIED_DO_FUNCTION_NAME, { words_implied_do, 0} },
{RETURN_FUNCTION_NAME, { words_nullary_op,0} },
{C_RETURN_FUNCTION_NAME, { words_nullary_op,0} },
{PAUSE_FUNCTION_NAME, { words_nullary_op,0 } },
{STOP_FUNCTION_NAME, { words_nullary_op, 0} },
{CONTINUE_FUNCTION_NAME, { words_nullary_op,0} },
{END_FUNCTION_NAME, { words_nullary_op, 0} },
{FORMAT_FUNCTION_NAME, { words_prefix_unary_op, 0} },
{UNBOUNDED_DIMENSION_NAME, { words_unbounded_dimension, 0} },
{LIST_DIRECTED_FORMAT_NAME, { words_list_directed, 0} },
{SUBSTRING_FUNCTION_NAME, { words_substring_op, 0} },
{ASSIGN_SUBSTRING_FUNCTION_NAME, { words_assign_substring_op, 0} },
/* These operators are used within the optimize transformation in
order to manipulate operators such as n-ary add and multiply or
multiply-add operators ( JZ - sept 98) */
{EOLE_FMA_OPERATOR_NAME, { eole_fma_specific_op,
MINIMAL_ARITHMETIC_PRECEDENCE } },
{EOLE_FMS_OPERATOR_NAME, { eole_fms_specific_op,
MINIMAL_ARITHMETIC_PRECEDENCE } },
{EOLE_SUM_OPERATOR_NAME, { words_infix_nary_op, 20} },
{EOLE_PROD_OPERATOR_NAME, { words_infix_nary_op, 21} },
/* show IMA/IMS */
{IMA_OPERATOR_NAME, { eole_fma_specific_op,
MINIMAL_ARITHMETIC_PRECEDENCE } },
{IMS_OPERATOR_NAME, { eole_fms_specific_op,
MINIMAL_ARITHMETIC_PRECEDENCE } },
/* 05/08/2003 - Nga Nguyen - Here are C intrinsics.
The precedence is computed by using Table xx, page 49, book
"The C programming language" of Kernighan and Ritchie, and by
taking into account the precedence value of Fortran intrinsics. */
{FIELD_OPERATOR_NAME, { words_infix_binary_op, 30} },
{POINT_TO_OPERATOR_NAME, { words_infix_binary_op, 30} },
{POST_INCREMENT_OPERATOR_NAME, { words_postfix_unary_op, 30} },
{POST_DECREMENT_OPERATOR_NAME, { words_postfix_unary_op, 30} },
{PRE_INCREMENT_OPERATOR_NAME, { words_prefix_unary_op, 25} },
{PRE_DECREMENT_OPERATOR_NAME, { words_prefix_unary_op, 25} },
{ADDRESS_OF_OPERATOR_NAME, { words_prefix_unary_op,25} },
{DEREFERENCING_OPERATOR_NAME, { words_prefix_unary_op, 25} },
{UNARY_PLUS_OPERATOR_NAME, { words_prefix_unary_op, 25} },
/*{"-unary", words_prefix_unary_op, 25},*/
{BITWISE_NOT_OPERATOR_NAME, { words_prefix_unary_op, 25} },
{C_NOT_OPERATOR_NAME, { words_prefix_unary_op, 25} },
/* What is the priority for CAST? 23? */
#define CAST_OPERATOR_PRECEDENCE (23)
{C_MODULO_OPERATOR_NAME, { words_infix_binary_op, 22} },
{MULTIPLY_OPERATOR_NAME, { words_infix_binary_op, 22} },
{DIVIDE_OPERATOR_NAME, { words_infix_binary_op, 22} },
{PLUS_C_OPERATOR_NAME, { words_infix_binary_op, 20} },
{MINUS_C_OPERATOR_NAME, { words_infix_binary_op, 20} },
{LEFT_SHIFT_OPERATOR_NAME, { words_infix_binary_op, 18} },
{RIGHT_SHIFT_OPERATOR_NAME, { words_infix_binary_op, 18} },
{C_LESS_THAN_OPERATOR_NAME, { words_infix_binary_op, 15 } },
{C_GREATER_THAN_OPERATOR_NAME, { words_infix_binary_op, 15} },
{C_LESS_OR_EQUAL_OPERATOR_NAME, { words_infix_binary_op, 15} },
{C_GREATER_OR_EQUAL_OPERATOR_NAME, { words_infix_binary_op, 15} },
{C_EQUAL_OPERATOR_NAME, { words_infix_binary_op, 14} },
{C_NON_EQUAL_OPERATOR_NAME, { words_infix_binary_op, 14} },
{BITWISE_AND_OPERATOR_NAME, { words_infix_binary_op, 13} },
{BITWISE_XOR_OPERATOR_NAME, { words_infix_binary_op, 12} },
{BITWISE_OR_OPERATOR_NAME, { words_infix_binary_op, 11} },
{C_AND_OPERATOR_NAME, { words_infix_binary_op, 8} },
{C_OR_OPERATOR_NAME, { words_infix_binary_op, 6} },
{MULTIPLY_UPDATE_OPERATOR_NAME, { words_assign_op, ASSIGN_OPERATOR_PRECEDENCE} },
{DIVIDE_UPDATE_OPERATOR_NAME, { words_assign_op, ASSIGN_OPERATOR_PRECEDENCE} },
{MODULO_UPDATE_OPERATOR_NAME, { words_assign_op, ASSIGN_OPERATOR_PRECEDENCE} },
{PLUS_UPDATE_OPERATOR_NAME, { words_assign_op, ASSIGN_OPERATOR_PRECEDENCE} },
{MINUS_UPDATE_OPERATOR_NAME, { words_assign_op, ASSIGN_OPERATOR_PRECEDENCE} },
{LEFT_SHIFT_UPDATE_OPERATOR_NAME, { words_assign_op, ASSIGN_OPERATOR_PRECEDENCE} },
{RIGHT_SHIFT_UPDATE_OPERATOR_NAME, { words_assign_op, ASSIGN_OPERATOR_PRECEDENCE} },
{BITWISE_AND_UPDATE_OPERATOR_NAME, { words_assign_op, ASSIGN_OPERATOR_PRECEDENCE} },
{BITWISE_XOR_UPDATE_OPERATOR_NAME, { words_assign_op, ASSIGN_OPERATOR_PRECEDENCE} },
{BITWISE_OR_UPDATE_OPERATOR_NAME, { words_assign_op, ASSIGN_OPERATOR_PRECEDENCE} },
/* which precedence ? You are safe within an assignment. */
{CONDITIONAL_OPERATOR_NAME, { words_conditional_op, ASSIGN_OPERATOR_PRECEDENCE+1} },
/* which precedence ? You need parentheses within an assignment. */
{COMMA_OPERATOR_NAME, { words_comma_op, ASSIGN_OPERATOR_PRECEDENCE-1} },
/* OMP pragma function part */
{OMP_OMP_FUNCTION_NAME, { words_nullary_op, 0} },
{OMP_FOR_FUNCTION_NAME, { words_nullary_op, 0} },
{OMP_PARALLEL_FUNCTION_NAME, { words_nullary_op, 0} },
{OMP_REDUCTION_FUNCTION_NAME, { words_omp_red, 0} },
{NULL, { null, 0} }
};
static hash_table intrinsic_handlers = hash_table_undefined;
static void init_intrinsic_handlers() {
if(hash_table_undefined_p(intrinsic_handlers)) {
intrinsic_handlers = hash_table_make(hash_string,sizeof(tab_intrinsic_handler));
for(struct intrinsic_handler *p = &tab_intrinsic_handler[0];p->name;p++) {
// no copy because the memory is static
hash_put(intrinsic_handlers,p->name,&p->desc);
}
}
}
/* after this call, name and desc are owned by intrinsic_handlers, but will never be deallocated
* they must point to permanent storage
*/
void register_intrinsic_handler(const char* name,intrinsic_desc_t *desc) {
if(hash_table_undefined_p(intrinsic_handlers)) {
init_intrinsic_handlers();
}
hash_put(intrinsic_handlers,name,desc);
}
static list
words_intrinsic_call(call obj, int precedence, bool leftmost, list pdl)
{
if(hash_table_undefined_p(intrinsic_handlers)) {
init_intrinsic_handlers();
}
const char *n = entity_local_name(call_function(obj));
intrinsic_desc_t *d = hash_get(intrinsic_handlers,n);
if(d!= HASH_UNDEFINED_VALUE)
return d->f(obj, precedence, leftmost, pdl);
else
return words_regular_call(obj, false, pdl);
}
static int
intrinsic_precedence(const char* n)
{
if(hash_table_undefined_p(intrinsic_handlers)) {
init_intrinsic_handlers();
}
intrinsic_desc_t *d = hash_get(intrinsic_handlers,n);
if(d!= HASH_UNDEFINED_VALUE)
return d->prec;
else
return 0;
}
static int
words_intrinsic_precedence(call obj)
{
const char *n = entity_local_name(call_function(obj));
return intrinsic_precedence(n);
}
static list words_va_arg(list obj, list pdl)
{
list pc = NIL;
expression e1 = sizeofexpression_expression(SIZEOFEXPRESSION(CAR(obj)));
type t2 = sizeofexpression_type(SIZEOFEXPRESSION(CAR(CDR(obj))));
bool space_p = get_bool_property("PRETTYPRINT_LISTS_WITH_SPACES");
pc = CHAIN_SWORD(pc,"va_arg(");
pc = gen_nconc(pc, words_expression(e1, pdl));
pc = CHAIN_SWORD(pc, space_p? ", " : ",");
pc = gen_nconc(pc, words_type(t2, pdl, false));
pc = CHAIN_SWORD(pc,")");
return pc;
}
/* exported for cmfortran.c
*/
list words_call(
call obj,
int precedence,
bool leftmost,
bool is_a_subroutine,
list pdl)
{
list pc;
entity f = call_function(obj);
value i = entity_initial(f);
if(value_intrinsic_p(i)) {
int effective_precedence = (precedence_p||precedence<=1)?
precedence : MAXIMAL_PRECEDENCE;
pc = words_intrinsic_call(obj, effective_precedence, leftmost, pdl);
}
else
pc = words_genuine_regular_call(obj, is_a_subroutine, pdl);
return pc;
}
/* This one is exported. Outer parentheses are never useful. pdl can
point to an empty list, but it must be free on return*/
list /* of string */ words_expression(expression obj, list pdl)
{
return words_syntax(expression_syntax(obj), pdl);
}
/* exported for expression.c
*/
list
words_syntax(syntax obj, list pdl)
{
list pc = NIL;
switch (syntax_tag(obj)) {
case is_syntax_reference :
pc = words_reference(syntax_reference(obj), pdl);
break;
case is_syntax_range:
pc = words_range(syntax_range(obj), pdl);
break;
case is_syntax_call:
pc = words_call(syntax_call(obj), 0, true, false, pdl);
break;
case is_syntax_cast:
pc = words_cast(syntax_cast(obj), 0, pdl);
break;
case is_syntax_sizeofexpression: {
/* FI->SG: I do not know if in_type_declaration is true, false
or a formal parameter */
bool in_type_declaration = true;
pc = words_sizeofexpression(syntax_sizeofexpression(obj),
in_type_declaration, pdl);
break;
}
case is_syntax_subscript:
pc = words_subscript(syntax_subscript(obj), pdl);
break;
case is_syntax_application:
pc = words_application(syntax_application(obj), pdl);
break;
case is_syntax_va_arg:
pc = words_va_arg(syntax_va_arg(obj), pdl);
break;
default:
pips_internal_error("unexpected tag");
}
return(pc);
}
/* exported for cmfortran.c
*/
list words_subexpression(
expression obj,
int precedence,
bool leftmost,
list pdl)
{
list pc;
if ( expression_call_p(obj) )
pc = words_call(syntax_call(expression_syntax(obj)),
precedence, leftmost, false, pdl);
else if(expression_cast_p(obj)) {
cast c = expression_cast(obj);
pc = words_cast(c, precedence, pdl);
}
else
pc = words_syntax(expression_syntax(obj), pdl);
return pc;
}
/**************************************************************** SENTENCE */
static sentence
sentence_tail(entity e)
{
sentence result = sentence_undefined;
switch(get_prettyprint_language_tag()) {
case is_language_fortran:
result = MAKE_ONE_WORD_SENTENCE(0, "END");
break;
case is_language_c:
result = MAKE_ONE_WORD_SENTENCE(0, "}");
break;
case is_language_fortran95: {
/* In fortran 95, we want the end to be followed by the type of construct
* and its name.
*/
list pc = NIL;
type te = entity_type(e);
functional fe;
type tr;
pc = CHAIN_SWORD(pc,"END ");
pips_assert("is functionnal", type_functional_p(te));
if (static_module_p(e))
pc = CHAIN_SWORD(pc,"static ");
fe = type_functional(te);
tr = functional_result(fe);
switch(type_tag(tr)) {
case is_type_void:
if (entity_main_module_p(e))
pc = CHAIN_SWORD(pc,"PROGRAM ");
else {
if (entity_blockdata_p(e))
pc = CHAIN_SWORD(pc, "BLOCKDATA ");
else if (entity_f95module_p(e))
pc = CHAIN_SWORD(pc, "MODULE ");
else
pc = CHAIN_SWORD(pc,"SUBROUTINE ");
}
break;
case is_type_variable: {
pc = CHAIN_SWORD(pc,"FUNCTION ");
break;
}
case is_type_unknown:
/*
* For C functions with no return type.
* It can be treated as of type int, but we keep it unknown
* for the moment, to make the differences and to regenerate initial code
*/
break;
default:
pips_internal_error("unexpected type for result");
}
pc = CHAIN_SWORD(pc, entity_user_name(e));
result = make_sentence(is_sentence_unformatted, make_unformatted(NULL,
0,
0,
pc));
break;
}
default:
pips_internal_error("Language unknown !");
break;
}
return result;
}
/* exported for unstructured.c */
sentence
sentence_goto_label(
entity __attribute__ ((unused)) module,
const char* label,
int margin,
const char* tlabel,
int n)
{
list pc = words_goto_label(tlabel);
return(make_sentence(is_sentence_unformatted,
make_unformatted(label?strdup(label):NULL, n, margin, pc)));
}
static sentence sentence_goto(entity module,
const char* label,
int margin,
statement obj,
int n) {
const char* tlabel = entity_local_name(statement_label(obj)) +
sizeof(LABEL_PREFIX) -1;
pips_assert("Legal label required", strlen(tlabel)!=0);
return sentence_goto_label(module, label, margin, tlabel, n);
}
/* Build the text of a code block (a list of statements)
@module is the module entity the code to display belong to
@label is the label associated to the block
@param margin is the indentation level
@param objs is the list of statements in the sequence to display
@param n is the statement number of the sequence
@pdl is the parser declaration list to track type declaration display
in C
@return the text of the block
*/
static text
text_block(entity module,
const char* label,
int margin,
list objs,
int n,
list pdl)
{
text r = make_text(NIL);
if (ENDP(objs)
&& ! (get_bool_property("PRETTYPRINT_EMPTY_BLOCKS")
|| get_bool_property("PRETTYPRINT_ALL_C_BLOCKS")))
return(r);
if(!empty_string_p(label)) {
pips_user_warning("Illegal label \"%s\". "
"Blocks cannot carry a label\n",
label);
}
/* "Unformatted" to be added at the beginning and at the end of a block: */
unformatted bm_beg = NULL;
unformatted bm_end = NULL;
// Test if block markers are required and set them:
bool flg_marker = mark_block(&bm_beg, &bm_end, n, margin);
// Print the begin block marker(s) if needed:
if (flg_marker == true)
ADD_SENTENCE_TO_TEXT(r, make_sentence(is_sentence_unformatted, bm_beg));
else if ((get_bool_property("PRETTYPRINT_ALL_EFFECTS")
|| get_bool_property("PRETTYPRINT_BLOCKS"))
&& get_bool_property("PRETTYPRINT_FOR_FORESYS"))
ADD_SENTENCE_TO_TEXT(r, make_sentence(is_sentence_formatted,
strdup("C$BB\n")));
if (get_bool_property("PRETTYPRINT_ALL_C_BLOCKS")) {
/* Since we generate new { }, we increment the margin for the nested
statements: */
margin -= INDENTATION;
if (margin < 0)
margin = 0;
ADD_SENTENCE_TO_TEXT(r, MAKE_ONE_WORD_SENTENCE(margin, "{{"));
margin += INDENTATION;
}
// Append local variables if any:
r = insert_locals (r);
/* Now begin block markers and declarations have been printed, so print
the block instructions: */
for (; objs != NIL; objs = CDR(objs)) {
statement s = STATEMENT(CAR(objs));
text t = text_statement_enclosed(module, margin, s, false, true, pdl);
text_sentences(r) = gen_nconc(text_sentences(r), text_sentences(t));
text_sentences(t) = NIL;
free_text(t);
}
if (get_bool_property("PRETTYPRINT_ALL_C_BLOCKS")) {
/* Get back to previous indentation: */
margin -= INDENTATION;
ADD_SENTENCE_TO_TEXT(r, MAKE_ONE_WORD_SENTENCE(margin, "}}"));
margin += INDENTATION;
}
// Print the end block marker(s) if needed:
if (flg_marker == true)
ADD_SENTENCE_TO_TEXT(r, make_sentence(is_sentence_unformatted, bm_end));
return r;
}
/* @return a list of string with the variable that need to be private in the
* current context. The context takes care of the kind of output. For example
* in the case of open mp the variables would be encapsulated into
* the private() clause like this: private (a,b).
* @param obj the loop to look at.
*/
static list /* of string */
loop_private_variables(loop obj, list pdl)
{
bool all_private = get_bool_property("PRETTYPRINT_ALL_PRIVATE_VARIABLES"),
hpf_private = pp_hpf_style_p(), omp_private = pp_omp_style_p(),
some_before = false;
list l = NIL;
// list of local entities
// In case of openmp the variable declared in the loop body should
// not be made private, so ask for removing them from the list of locals.
// If all_private is false -> remove loop indice from the list of locals.
list locals = loop_private_variables_as_entites(obj,
omp_private,
!all_private);
pips_debug(5, "#printed %zd/%zd\n", gen_length(l),
gen_length(loop_locals(obj)));
/* stuff around if not empty
*/
if (locals) {
string private = string_undefined;
if (hpf_private) {
private = "NEW(";
} else if (omp_private) {
switch (get_prettyprint_language_tag()) {
case is_language_fortran:
private = "PRIVATE(";
break;
case is_language_c:
private = "private(";
break;
case is_language_fortran95:
pips_internal_error("Need to update F95 case");
break;
default:
pips_internal_error("Language unknown !");
break;
}
} else if(get_prettyprint_language_tag()==is_language_fortran) {
/* This is debugging way to print out code. I do not know which
Fortran parser takes this language extension. */
private = "PRIVATE ";
}
else {
/* In C case, it might be a good idea to re-declare the private
variables in the loop body, exceot for outer loop indices,
but this is not easy here. PIPS data structures should be
updated because loop_private is somehow redundant with
statement declarations. */
pips_user_warning("Privatized variables are ignored with the "
"current prettyprinter options.\n");
}
if(!string_undefined_p(private)) {
/* comma-separated list of private variables.
* built in reverse order to avoid adding at the end...
*/
FOREACH (ENTITY, p, locals) {
if (some_before)
l = CHAIN_SWORD(l, ",");
else
some_before = true; /* from now on commas, triggered... */
l = gen_nconc(l, words_declaration(p, true, pdl));
}
gen_free_list(locals);
l = CONS(STRING, MAKE_SWORD(private), l);
if (hpf_private || omp_private)
CHAIN_SWORD(l, ")");
}
}
return l;
}
/* returns a formatted text for the HPF independent and new directive
* well, no continuations and so, but the directives do not fit the
* unformatted domain, because the directive prolog would not be well
* managed there.
*/
static string
marged(
string prefix,
int margin)
{
int len = strlen(prefix), i;
string result = (string) malloc(strlen(prefix)+margin+1);
strcpy(result, prefix);
if(prettyprint_language_is_fortran_p()) {
for (i=len; margin-->0;) {
result[i++] = ' '; result[i]='\0';
}
}
return result;
}
static text text_directive(loop obj, /* the loop we're interested in */
int margin,
string basic_directive,
string basic_continuation,
string parallel,
list pdl) {
string dir = marged(basic_directive, margin), cont =
marged(basic_continuation, margin);
text t = make_text(NIL);
char buffer[100]; /* ??? */
list /* of string */l = NIL;
bool is_hpf = pp_hpf_style_p(), is_omp = pp_omp_style_p();
bool space_p = get_bool_property("PRETTYPRINT_LISTS_WITH_SPACES");
/* start buffer */
buffer[0] = '\0';
if (execution_parallel_p(loop_execution(obj))) {
add_to_current_line(buffer, dir, cont, t);
add_to_current_line(buffer, parallel, cont, t);
l = loop_private_variables(obj, pdl);
if (l && is_hpf)
add_to_current_line(buffer, space_p ? ", " : ",", cont, t);
} else if (get_bool_property("PRETTYPRINT_ALL_PRIVATE_VARIABLES")) {
l = loop_private_variables(obj, pdl);
if (l) {
add_to_current_line(buffer, dir, cont, t);
if (is_omp) {
switch (get_prettyprint_language_tag()) {
case is_language_fortran:
case is_language_fortran95:
add_to_current_line(buffer, "DO ", cont, t);
break;
case is_language_c:
add_to_current_line(buffer, "for ", cont, t);
break;
default:
pips_internal_error("Language unknown !");
break;
}
}
}
}
if (strlen(buffer) > 0)
MAP(STRING, s, add_to_current_line(buffer, s, cont, t), l);
/* what about reductions? should be associated to the ri somewhere.
*/
close_current_line(buffer, t, cont);
free(dir);
free(cont);
return t;
}
#define HPF_SENTINEL "!HPF$"
#define HPF_DIRECTIVE HPF_SENTINEL " "
#define HPF_CONTINUATION HPF_SENTINEL "x"
#define HPF_INDEPENDENT "INDEPENDENT"
static text text_hpf_directive(loop l, int m)
{
list pdl = NIL; // pdl is useless in Fortran
text t = text_directive(l, m, "\n" HPF_DIRECTIVE, HPF_CONTINUATION,
HPF_INDEPENDENT, pdl);
return t;
}
#define OMP_SENTINEL "!$OMP"
#define OMP_DIRECTIVE OMP_SENTINEL " "
#define OMP_CONTINUATION OMP_SENTINEL "x"
#define OMP_PARALLELDO "PARALLEL DO "
#define OMP_C_SENTINEL "#pragma omp"
#define OMP_C_DIRECTIVE OMP_C_SENTINEL " "
#define OMP_C_CONTINUATION OMP_C_SENTINEL "x"
#define OMP_C_PARALLELDO "parallel for "
text
text_omp_directive(loop l, int m)
{
list pdl = NIL; // pdl is useless in Fortran
text t = text_undefined;
switch(get_prettyprint_language_tag()) {
case is_language_fortran:
case is_language_fortran95:
t = text_directive(l,
m,
"\n" OMP_DIRECTIVE,
OMP_CONTINUATION,
OMP_PARALLELDO,
pdl);
break;
case is_language_c:
// text_directive function takes care of private variables
// More should be done to take care of shared variables, reductions
// and other specific omp clause like lastprivate, copyin ...
t = text_directive(l,
m,
OMP_C_DIRECTIVE,
OMP_C_CONTINUATION,
OMP_C_PARALLELDO,
pdl);
break;
default:
pips_internal_error("Language unknown !");
break;
}
return t;
}
/* exported for fortran90.c */
text text_loop_default(entity module,
const char* label,
int margin,
loop obj,
int n,
list pdl) {
list pc = NIL;
sentence first_sentence = sentence_undefined;
unformatted u;
text r = make_text(NIL);
statement body = loop_body( obj );
entity the_label = loop_label(obj);
const char* do_label = entity_local_name(the_label) + sizeof(LABEL_PREFIX) -1;
bool structured_do = entity_empty_label_p(the_label);
bool doall_loop_p = false;
bool hpf_prettyprint = pp_hpf_style_p();
bool do_enddo_p = get_bool_property("PRETTYPRINT_DO_LABEL_AS_COMMENT");
bool all_private = get_bool_property("PRETTYPRINT_ALL_PRIVATE_VARIABLES");
bool braces_p = !one_liner_p(body) || prettyprint_all_c_braces_p;
if (execution_sequential_p(loop_execution(obj))) {
doall_loop_p = false;
} else {
doall_loop_p = pp_doall_style_p();
}
/* HPF directives before the loop if required (INDEPENDENT and NEW) */
if (hpf_prettyprint)
MERGE_TEXTS(r, text_hpf_directive(obj, margin));
/* idem if Open MP directives are required */
if (pp_omp_style_p())
MERGE_TEXTS(r, text_omp_directive(obj, margin));
/* LOOP prologue.
*/
switch (get_prettyprint_language_tag()) {
case is_language_fortran:
case is_language_fortran95:
pc = CHAIN_SWORD(NIL, (doall_loop_p) ? "DOALL " : "DO " );
if (!structured_do && !doall_loop_p && !do_enddo_p) {
pc = CHAIN_SWORD(pc, concatenate(do_label, " ", NULL));
}
break;
case is_language_c:
pc = CHAIN_SWORD(NIL, (doall_loop_p) ? "forall(" : "for(" );
break;
default:
pips_internal_error("Language unknown !");
break;
}
//pc = CHAIN_SWORD(pc, entity_local_name(loop_index(obj)));
pc = CHAIN_SWORD(pc, entity_user_name(loop_index(obj)));
pc = CHAIN_SWORD(pc, " = ");
switch (get_prettyprint_language_tag()) {
case is_language_fortran:
case is_language_fortran95:
pc = gen_nconc(pc, words_loop_range(loop_range(obj), pdl));
u = make_unformatted(strdup(label), n, margin, pc);
ADD_SENTENCE_TO_TEXT(r, first_sentence =
make_sentence(is_sentence_unformatted, u));
break;
case is_language_c:
pc = gen_nconc(pc, C_loop_range(loop_range(obj), loop_index(obj), pdl));
if (braces_p)
pc = CHAIN_SWORD(pc," {");
if ((label != NULL) && (label[0] != '\0')) {
pips_debug(9, "the label %s need to be print for a for C loop", label);
u = make_unformatted(strdup(label), 0, 0, NULL);
ADD_SENTENCE_TO_TEXT(r, first_sentence =
make_sentence(is_sentence_unformatted, u));
}
u = make_unformatted(NULL, n, margin, pc);
ADD_SENTENCE_TO_TEXT(r, make_sentence(is_sentence_unformatted, u));
break;
default:
pips_internal_error("Language unknown !");
break;
}
/* builds the PRIVATE scalar declaration if required
*/
if (!ENDP(loop_locals(obj)) && (doall_loop_p || all_private)
&& !hpf_prettyprint) {
list /* of string */lp = loop_private_variables(obj, pdl);
// initialize the local variable text if needed
if ((local_flg == false) && (lp)) {
local_flg = true;
local_var = make_text(NIL);
}
if (lp)
/* local_var is a global variable which is exploited
later... */
/* FI: I do not understand why the local declarations were
not added right away. I hope my change (simplification)
does not break something else that is not tested by our
non-regression suite. */
if (!pp_omp_style_p()) {
ADD_SENTENCE_TO_TEXT
// ( local_var,
( r,
make_sentence(is_sentence_unformatted,
make_unformatted(NULL, 0, margin+INDENTATION, lp)));
}
}
/* loop BODY
*/
MERGE_TEXTS(r, text_statement_enclosed(module,
margin+INDENTATION,
body,
!one_liner_p(body),
!one_liner_p(body),
pdl));
/* LOOP postlogue
*/
switch (get_prettyprint_language_tag()) {
case is_language_fortran:
case is_language_fortran95:
if (structured_do || doall_loop_p || do_enddo_p || pp_cray_style_p()
|| pp_craft_style_p() || pp_cmf_style_p()) {
ADD_SENTENCE_TO_TEXT(r, MAKE_ONE_WORD_SENTENCE(margin,"ENDDO"));
}
break;
case is_language_c:
if (braces_p)
ADD_SENTENCE_TO_TEXT(r, MAKE_ONE_WORD_SENTENCE(margin,"}"));
break;
default:
pips_internal_error("Language unknown !");
break;
}
attach_loop_to_sentence_up_to_end_of_text(first_sentence, r, obj);
return r;
}
/* exported for conversion/look_for_nested_loops.c */
text text_loop(
entity module,
const char* label,
int margin,
loop obj,
int n,
list pdl)
{
text r = make_text(NIL);
statement body = loop_body( obj ) ;
entity the_label = loop_label(obj);
const char* do_label = entity_local_name(the_label)+sizeof(LABEL_PREFIX) -1;
bool structured_do = entity_empty_label_p(the_label);
bool do_enddo_p = get_bool_property("PRETTYPRINT_DO_LABEL_AS_COMMENT");
/* small hack to show the initial label of the loop to name it...
*/
if(!structured_do && do_enddo_p)
{
ADD_SENTENCE_TO_TEXT(r, make_sentence(is_sentence_formatted,
strdup(concatenate("! INITIALLY: DO ", do_label, "\n", NULL))));
}
/* quite ugly management of other prettyprints...
*/
switch(execution_tag(loop_execution(obj)) ) {
case is_execution_sequential:
MERGE_TEXTS(r, text_loop_default(module, label, margin, obj, n, pdl));
break ;
case is_execution_parallel:
if (pp_cmf_style_p()) {
text aux_r;
if((aux_r = text_loop_cmf(module, label, margin, obj, n, NIL, NIL))
!= text_undefined) {
MERGE_TEXTS(r, aux_r);
}
}
else if (pp_craft_style_p()) {
text aux_r;
if((aux_r = text_loop_craft(module, label, margin, obj, n, NIL, NIL))
!= text_undefined) {
MERGE_TEXTS(r, aux_r);
}
}
else if (pp_f90_style_p()) {
instruction bi = statement_instruction(body); // body instruction
bool success_p = false;
if(instruction_assign_p(bi) ) {
MERGE_TEXTS(r, text_loop_90(module, label, margin, obj, n));
success_p = true;
}
else if(instruction_sequence_p(bi)) {
list sl = sequence_statements(instruction_sequence(bi));
if(gen_length(sl)==1) {
statement ibs = STATEMENT(CAR(sl));
instruction ibi = statement_instruction(ibs);
if(instruction_assign_p(ibi) ) {
MERGE_TEXTS(r, text_loop_90(module, label, margin, obj, n));
success_p = true;
}
}
}
if(!success_p) {
MERGE_TEXTS(r, text_loop_default(module, label, margin, obj, n, pdl));
}
}
else {
MERGE_TEXTS(r, text_loop_default(module, label, margin, obj, n, pdl));
}
break ;
default:
pips_internal_error("Unknown tag") ;
}
return r;
}
static text text_whileloop(entity module,
const char* label,
int margin,
whileloop obj,
int n,
list pdl) {
list pc = NIL;
sentence first_sentence;
unformatted u;
text r = make_text(NIL);
statement body = whileloop_body( obj );
entity the_label = whileloop_label(obj);
const char* do_label = entity_local_name(the_label) + sizeof(LABEL_PREFIX) -1;
bool structured_do = entity_empty_label_p(the_label);
bool do_enddo_p = get_bool_property("PRETTYPRINT_DO_LABEL_AS_COMMENT");
evaluation eval = whileloop_evaluation(obj);
/* Show the initial label of the loop to name it...
* FI: I believe this is useless for while loops since they cannot
* be parallelized.
*/
if(!structured_do && do_enddo_p) {
ADD_SENTENCE_TO_TEXT(r, make_sentence(is_sentence_formatted,
strdup(concatenate("! INITIALLY: DO ", do_label, "\n", NULL))));
}
if(evaluation_before_p(eval)) {
switch(get_prettyprint_language_tag()) {
case is_language_fortran:
case is_language_fortran95:
/* LOOP prologue.
*/
pc = CHAIN_SWORD(NIL, "DO " );
if(!structured_do && !do_enddo_p) {
pc = CHAIN_SWORD(pc, concatenate(do_label, " ", NULL));
}
pc = CHAIN_SWORD(pc, "WHILE (");
pc = gen_nconc(pc, words_expression(whileloop_condition(obj), pdl));
pc = CHAIN_SWORD(pc, ")");
u = make_unformatted(strdup(label), n, margin, pc);
ADD_SENTENCE_TO_TEXT(r, first_sentence =
make_sentence(is_sentence_unformatted, u));
/* loop BODY
*/
MERGE_TEXTS(r, text_statement(module, margin+INDENTATION, body, pdl));
/* LOOP postlogue
*/
if(structured_do) {
ADD_SENTENCE_TO_TEXT(r, MAKE_ONE_WORD_SENTENCE(margin,"ENDDO"));
}
break;
case is_language_c:
{
bool braces_p = !one_liner_p(body) || prettyprint_all_c_braces_p;
if(!braces_p) {
pc = CHAIN_SWORD(NIL,"while (");
pc = gen_nconc(pc, words_expression(whileloop_condition(obj), pdl));
pc = CHAIN_SWORD(pc,")");
u = make_unformatted(strdup(label), n, margin, pc);
ADD_SENTENCE_TO_TEXT(r, make_sentence(is_sentence_unformatted, u));
MERGE_TEXTS(r, text_statement_enclosed(module,
margin+INDENTATION,
body,
!one_liner_p(body),
!one_liner_p(body),
pdl));
//if (structured_do)
//ADD_SENTENCE_TO_TEXT(r, MAKE_ONE_WORD_SENTENCE(margin,"}"));
} else {
pc = CHAIN_SWORD(NIL,"while (");
pc = gen_nconc(pc, words_expression(whileloop_condition(obj), pdl));
pc = CHAIN_SWORD(pc,") {");
u = make_unformatted(strdup(label), n, margin, pc);
ADD_SENTENCE_TO_TEXT(r, make_sentence(is_sentence_unformatted, u));
MERGE_TEXTS(r, text_statement(module, margin+INDENTATION, body, pdl));
if(structured_do)
ADD_SENTENCE_TO_TEXT(r, MAKE_ONE_WORD_SENTENCE(margin,"}"));
}
}
break;
default:
pips_internal_error("Language unknown !");
break;
}
} else {
pips_assert ("Only C language is managed here",
prettyprint_language_is_c_p());
/* C do { s; } while (cond); loop*/
pc = CHAIN_SWORD(NIL,"do {");
u = make_unformatted(strdup(label), n, margin, pc);
ADD_SENTENCE_TO_TEXT(r, make_sentence(is_sentence_unformatted, u));
MERGE_TEXTS(r, text_statement(module, margin+INDENTATION, body, pdl));
ADD_SENTENCE_TO_TEXT(r, MAKE_ONE_WORD_SENTENCE(margin,"}"));
pc = CHAIN_SWORD(NIL,"while (");
pc = gen_nconc(pc, words_expression(whileloop_condition(obj), pdl));
pc = CHAIN_SWORD(pc, ");");
u = make_unformatted(NULL, n, margin, pc);
ADD_SENTENCE_TO_TEXT(r, make_sentence(is_sentence_unformatted, u));
}
/* attach_loop_to_sentence_up_to_end_of_text(first_sentence, r, obj); */
return r;
}
/* exported for unstructured.c
*/
text
init_text_statement(
entity module,
int margin,
statement obj)
{
instruction i = statement_instruction(obj);
text r;
if (get_bool_property("PRETTYPRINT_ALL_EFFECTS")
|| !((instruction_block_p(i) &&
!get_bool_property("PRETTYPRINT_BLOCKS")) ||
(instruction_unstructured_p(i) &&
!get_bool_property("PRETTYPRINT_UNSTRUCTURED")))) {
/* FI: before calling the hook,
* statement_ordering(obj) should be checked */
r = (*text_statement_hook)( module, margin, obj );
if (text_statement_hook != empty_text)
attach_decoration_to_text(r);
}
else
r = make_text( NIL ) ;
if (get_bool_property("PRETTYPRINT_ALL_EFFECTS") ||
get_bool_property("PRETTYPRINT_STATEMENT_ORDERING")) {
char *buffer;
int so = statement_ordering(obj) ;
if (!(instruction_block_p(statement_instruction(obj)) &&
(! get_bool_property("PRETTYPRINT_BLOCKS")))) {
if (so != STATEMENT_ORDERING_UNDEFINED)
asprintf(&buffer, "%s (%d,%d)\n", get_comment_sentinel(),
ORDERING_NUMBER(so), ORDERING_STATEMENT(so));
else
asprintf(&buffer, "%s (statement ordering unavailable)\n",
get_comment_sentinel());
ADD_SENTENCE_TO_TEXT(r, make_sentence(is_sentence_formatted,
buffer));
}
}
return( r ) ;
}
static text text_logical_if(entity __attribute__ ((unused)) module,
const char* label,
int margin,
test obj,
int n,
list pdl) {
text r = make_text(NIL);
list pc = NIL;
statement tb = test_true(obj);
switch(get_prettyprint_language_tag()) {
case is_language_fortran:
case is_language_fortran95:
pc = CHAIN_SWORD(pc, strdup("IF ("));
break;
case is_language_c:
pc = CHAIN_SWORD(pc, strdup("if ("));
break;
default:
pips_internal_error("Language unknown !");
break;
}
pc = gen_nconc(pc, words_expression(test_condition(obj), pdl));
instruction ti = instruction_undefined;
call c = call_undefined;
text t = text_undefined;
switch (get_prettyprint_language_tag()) {
case is_language_fortran:
case is_language_fortran95:
pc = CHAIN_SWORD(pc, ") ");
ti = statement_instruction(tb);
c = instruction_call(ti);
pc = gen_nconc(pc, words_call(c, 0, true, true, pdl));
ADD_SENTENCE_TO_TEXT(r,
make_sentence(is_sentence_unformatted,
make_unformatted(strdup(label), n,
margin, pc)));
break;
case is_language_c:
pc = CHAIN_SWORD(pc, ")"); // Do not add a useless SPACE
t = text_statement(module, margin + INDENTATION, tb, pdl);
ADD_SENTENCE_TO_TEXT(r,
make_sentence(is_sentence_unformatted,
make_unformatted(strdup(label), n,
margin, pc)));
text_sentences(r) = gen_nconc(text_sentences(r), text_sentences(t));
text_sentences(t) = NIL;
free_text(t);
break;
default:
pips_internal_error("Language unknown !");
break;
}
ifdebug(8) {
fprintf(stderr, "logical_if=================================\n");
print_text(stderr, r);
fprintf(stderr, "==============================\n");
}
return (r);
}
static bool test_with_no_else_clause_p(test t)
{
bool no_else_p = true;
if(empty_statement_p(test_false(t)))
no_else_p = false;
else {
statement fs = test_false(t);
if(statement_test_p(fs)) // Go down recursively
no_else_p = test_with_no_else_clause_p(statement_test(fs));
else
no_else_p = true;
}
return no_else_p;
}
/* Some code shared by text_block_if and text_block_ifthen */
static bool test_with_dangling_else_p(test t)
{
statement fb = test_false(t);
bool outer_else_p = !nop_statement_p(fb); // obj contains a non-empty else clause
/* Do we have a test as a true branch, a test with no else clause? */
statement ts = effective_test_true(t);
bool inner_test_p = statement_test_p(ts);
bool inner_else_p = inner_test_p?
test_with_no_else_clause_p(statement_test(ts)) : false;
bool dangling_else_p = inner_test_p && outer_else_p && !inner_else_p;
return dangling_else_p;
}
/* Prettyprint the condition, the true and, possibly, the false branch.
*
* Manage redundant braces in C according to either the standard, or
* gcc guidelines or a request to print them all.
*
* Brace management is a bit complex because the clausing brace of the
* true block may be printed with the else branch or as a final brace
* when the else branch is empty.
*/
static text text_block_if(entity module,
const char* label,
int margin,
test obj,
int n,
list pdl) {
text r = make_text(NIL);
list pc = NIL;
statement test_false_obj;
bool one_liner_true_statement_p = one_liner_p(test_true(obj));
bool one_liner_false_statement_p = one_liner_p(test_false(obj));
//bool else_branch_p = false; /* Is the else branch empty? */
bool dangling_else_p = test_with_dangling_else_p(obj);
bool true_braces_p = !one_liner_true_statement_p
|| dangling_else_p
|| gcc_if_block_braces_required_p(obj)
|| prettyprint_all_c_braces_p;
bool final_braces_p = true_braces_p;
/* Prettyprint the condition and the true branch */
switch (get_prettyprint_language_tag()) {
case is_language_fortran:
case is_language_fortran95:
pc = CHAIN_SWORD(pc, "IF (");
pc = gen_nconc(pc, words_expression(test_condition(obj), pdl));
pc = CHAIN_SWORD(pc, ") THEN");
break;
case is_language_c:
pc = CHAIN_SWORD(pc, "if (");
pc = gen_nconc(pc, words_expression(test_condition(obj), pdl));
if(true_braces_p)
pc = CHAIN_SWORD(pc, ") {");
else
pc = CHAIN_SWORD(pc, ")");
break;
default:
pips_internal_error("Language unknown !");
break;
}
ADD_SENTENCE_TO_TEXT(r,
make_sentence(is_sentence_unformatted,
make_unformatted(strdup(label), n,
margin, pc)));
MERGE_TEXTS(r, text_statement_enclosed(module,
margin+INDENTATION,
test_true(obj),
!one_liner_true_statement_p,
!one_liner_true_statement_p,
pdl));
/* Prettyprint the false branch if it is useful */
test_false_obj = test_false(obj);
if(statement_undefined_p(test_false_obj)) {
pips_internal_error("undefined statement");
}
if(!statement_with_empty_comment_p(test_false_obj)
|| (!empty_statement_p(test_false_obj)
&& !continue_statement_p(test_false_obj))
|| (empty_statement_p(test_false_obj)
&& (get_bool_property("PRETTYPRINT_EMPTY_BLOCKS")))
|| (continue_statement_p(test_false_obj)
&& (get_bool_property("PRETTYPRINT_ALL_LABELS")))) {
//else_branch_p = true;
switch (get_prettyprint_language_tag()) {
case is_language_fortran:
case is_language_fortran95:
ADD_SENTENCE_TO_TEXT(r, MAKE_ONE_WORD_SENTENCE(margin,"ELSE"));
break;
case is_language_c:
if(true_braces_p) {
ADD_SENTENCE_TO_TEXT(r, MAKE_ONE_WORD_SENTENCE(margin,"}"));
final_braces_p = false;
}
/* FI: I am not sure this test is safe and that no dangling else
can occur */
if(one_liner_false_statement_p
&& !prettyprint_all_c_braces_p) {
ADD_SENTENCE_TO_TEXT(r, MAKE_ONE_WORD_SENTENCE(margin,"else"));
final_braces_p = false;
} else {
ADD_SENTENCE_TO_TEXT(r, MAKE_ONE_WORD_SENTENCE(margin,"else {"));
final_braces_p = true;
}
break;
default:
pips_internal_error("Language unknown !");
break;
}
MERGE_TEXTS(r, text_statement(module, margin+INDENTATION,
test_false_obj, pdl));
}
/* Prettyprint the closing of the test */
switch (get_prettyprint_language_tag()) {
case is_language_fortran:
case is_language_fortran95:
ADD_SENTENCE_TO_TEXT(r, MAKE_ONE_WORD_SENTENCE(margin,strdup("ENDIF")));
break;
case is_language_c:
if(final_braces_p)
ADD_SENTENCE_TO_TEXT(r, MAKE_ONE_WORD_SENTENCE(margin,strdup("}")));
break;
default:
pips_internal_error("Language unknown !");
break;
}
ifdebug(8) {
fprintf(stderr, "text_block_if=================================\n");
print_text(stderr, r);
fprintf(stderr, "==============================\n");
}
return (r);
}
static text text_io_block_if(entity module,
const char* label,
int margin,
test obj,
int n,
list pdl) {
text r = make_text(NIL);
list pc = NIL;
if (!empty_statement_p(test_true(obj))) {
char* label_local_name = new_label_local_name(module);
char* strglab= label_local_name + 1;
r = make_text(CONS(SENTENCE,
sentence_goto_label(module, label, margin,
strglab, n),
NIL));
ADD_SENTENCE_TO_TEXT(r,
make_sentence(is_sentence_unformatted,
make_unformatted(strdup(label), n,
margin, pc)));
MERGE_TEXTS(r, text_statement(module, margin,
test_true(obj), pdl));
string str = string_undefined;
switch (get_prettyprint_language_tag()) {
case is_language_fortran:
str = strdup(CONTINUE_FUNCTION_NAME);
break;
case is_language_c:
str = strdup(C_CONTINUE_FUNCTION_NAME);
break;
case is_language_fortran95:
pips_internal_error("Need to update F95 case");
break;
default:
pips_internal_error("Language unknown !");
break;
}
ADD_SENTENCE_TO_TEXT(r, make_sentence(is_sentence_unformatted,
make_unformatted(strdup(strglab), n, margin,
CONS(STRING, str, NIL))));
free(label_local_name);
}
if (!empty_statement_p(test_false(obj)))
MERGE_TEXTS(r, text_statement(module, margin,
test_false(obj), pdl));
return (r);
}
/* Prettyprint a test when it falsbranch is empty */
static text text_block_ifthen(entity module,
const char* label,
int margin,
test obj,
int n,
list pdl) {
text r = make_text(NIL);
list pc = NIL;
statement tb = test_true(obj);
bool dangling_else_p = test_with_dangling_else_p(obj);
bool braces_p =
!one_liner_p(tb) // several statement in the true branch
|| prettyprint_all_c_braces_p // use request for braces
|| dangling_else_p; // else clause would be associated to the wrong if
switch (get_prettyprint_language_tag()) {
case is_language_fortran:
case is_language_fortran95:
pc = CHAIN_SWORD(pc, "IF (");
pc = gen_nconc(pc, words_expression(test_condition(obj), pdl));
pc = CHAIN_SWORD(pc, ") THEN");
break;
case is_language_c:
pc = CHAIN_SWORD(pc, "if (");
pc = gen_nconc(pc, words_expression(test_condition(obj), pdl));
pc = CHAIN_SWORD(pc, (!braces_p?")":") {"));
break;
default:
pips_internal_error("Language unknown !");
break;
}
ADD_SENTENCE_TO_TEXT(r,
make_sentence(is_sentence_unformatted,
make_unformatted(strdup(label), n,
margin, pc)));
MERGE_TEXTS(r, text_statement_enclosed(module,
margin+INDENTATION,
tb,
braces_p,
braces_p,
pdl));
if (prettyprint_language_is_c_p()
&& braces_p)
ADD_SENTENCE_TO_TEXT(r, MAKE_ONE_WORD_SENTENCE(margin,"}"));
return (r);
}
static text text_block_else(entity module,
const char * label __attribute__ ((unused)),
int margin,
statement stmt,
int n __attribute__ ((unused)),
list pdl) {
text r = make_text(NIL);
if (!statement_with_empty_comment_p(stmt) || (!empty_statement_p(stmt)
&& !continue_statement_p(stmt)) || (empty_statement_p(stmt)
&& (get_bool_property("PRETTYPRINT_EMPTY_BLOCKS")))
|| (continue_statement_p(stmt)
&& (get_bool_property("PRETTYPRINT_ALL_LABELS")))) {
switch (get_prettyprint_language_tag()) {
case is_language_fortran:
case is_language_fortran95:
ADD_SENTENCE_TO_TEXT(r, MAKE_ONE_WORD_SENTENCE(margin, "ELSE"));
MERGE_TEXTS(r, text_statement(module, margin+INDENTATION, stmt, pdl));
break;
case is_language_c:
if (one_liner_p(stmt) && !prettyprint_all_c_braces_p) {
ADD_SENTENCE_TO_TEXT(r, MAKE_ONE_WORD_SENTENCE(margin,"else"));
MERGE_TEXTS(r, text_statement_enclosed(module,
margin+INDENTATION,
stmt,
false,
false,
pdl));
} else {
ADD_SENTENCE_TO_TEXT(r, MAKE_ONE_WORD_SENTENCE(margin, "else {"));
MERGE_TEXTS(r, text_statement(module, margin+INDENTATION, stmt, pdl));
ADD_SENTENCE_TO_TEXT(r, MAKE_ONE_WORD_SENTENCE(margin, "}"));
}
break;
default:
pips_internal_error("Language unknown !");
break;
}
}
return r;
}
static text text_block_elseif(entity module,
const char* label,
int margin,
test obj,
int n,
list pdl) {
text r = make_text(NIL);
list pc = NIL;
statement tb = test_true(obj);
statement fb = test_false(obj);
bool braces_p = !one_liner_p(tb) || prettyprint_all_c_braces_p;
switch (get_prettyprint_language_tag()) {
case is_language_fortran:
case is_language_fortran95:
pc = CHAIN_SWORD(pc, strdup("ELSEIF ("));
pc = gen_nconc(pc, words_expression(test_condition(obj), pdl));
pc = CHAIN_SWORD(pc, strdup(") THEN"));
break;
case is_language_c:
pc = CHAIN_SWORD(pc, strdup("else if ("));
pc = gen_nconc(pc, words_expression(test_condition(obj), pdl));
pc = CHAIN_SWORD(pc, (!braces_p?")":") {"));
break;
default:
pips_internal_error("Language unknown !");
break;
}
ADD_SENTENCE_TO_TEXT(r,
make_sentence(is_sentence_unformatted,
make_unformatted(strdup(label), n,
margin, pc)));
MERGE_TEXTS(r, text_statement_enclosed(module,
margin+INDENTATION,
tb,
braces_p,
braces_p,
pdl));
if (prettyprint_language_is_c_p()
&& braces_p) {
ADD_SENTENCE_TO_TEXT(r, MAKE_ONE_WORD_SENTENCE(margin, strdup("}")));
}
if (statement_test_p(fb) && empty_comments_p(statement_comments(fb))
&& entity_empty_label_p(statement_label(fb))) {
MERGE_TEXTS(r, text_block_elseif(module,
label_local_name(statement_label(fb)),
margin,
statement_test(fb), n, pdl));
} else {
MERGE_TEXTS(r, text_block_else(module, label, margin, fb, n, pdl));
}
ifdebug(8) {
fprintf(stderr, "elseif=================================\n");
print_text(stderr, r);
fprintf(stderr, "==============================\n");
}
return (r);
}
static text text_test(entity module,
const char* label,
int margin,
test obj,
int n,
list pdl) {
text r = text_undefined;
statement tb = test_true(obj);
statement fb = test_false(obj);
/* 1st case: one statement in the true branch => Fortran logical IF
or no braces in C */
if (nop_statement_p(fb) && statement_call_p(tb)
&& entity_empty_label_p(statement_label(tb))
&& empty_comments_p(statement_comments(tb)) && !continue_statement_p(tb)
&& !get_bool_property("PRETTYPRINT_BLOCK_IF_ONLY")
&& !(call_contains_alternate_returns_p(statement_call(tb))
&& get_bool_property("PRETTYPRINT_REGENERATE_ALTERNATE_RETURNS"))
&& !(prettyprint_all_c_braces_p
&& (get_prettyprint_language_tag()==is_language_c))) {
r = text_logical_if(module, label, margin, obj, n, pdl);
}
/* 2nd case: one test in the false branch => "ELSEIF" Fortran block
or "else if" C construct */
else if (statement_test_p(fb) && empty_comments_p(statement_comments(fb))
&& entity_empty_label_p(statement_label(fb))
&& !get_bool_property("PRETTYPRINT_BLOCK_IF_ONLY")) {
r = text_block_ifthen(module, label, margin, obj, n, pdl);
MERGE_TEXTS(r, text_block_elseif
(module,
label_local_name(statement_label(fb)),
margin, statement_test(fb), n, pdl));
switch (get_prettyprint_language_tag()) {
case is_language_fortran:
case is_language_fortran95:
ADD_SENTENCE_TO_TEXT(r, MAKE_ONE_WORD_SENTENCE(margin,"ENDIF"));
break;
case is_language_c:
//nothing to do in C
break;
default:
pips_internal_error("Language unknown !");
break;
}
} else {
syntax c = expression_syntax(test_condition(obj));
if (syntax_reference_p(c)
&& io_entity_p(reference_variable(syntax_reference(c)))
&& !get_bool_property("PRETTYPRINT_CHECK_IO_STATEMENTS"))
r = text_io_block_if(module, label, margin, obj, n, pdl);
else
r = text_block_if(module, label, margin, obj, n, pdl);
}
ifdebug(8) {
fprintf(stderr, "text_test=================================\n");
print_text(stderr, r);
fprintf(stderr, "==============================\n");
}
return r;
}
/* hook for adding something in the head. used by hpfc.
* done so to avoid hpfc->prettyprint dependence in the libs.
* FC. 29/12/95.
*/
static string (*head_hook)(entity) = NULL;
void set_prettyprinter_head_hook(string(*f)(entity)){ head_hook=f;}
void reset_prettyprinter_head_hook(){ head_hook=NULL;}
static text text_instruction(entity module,
const char* label,
int margin,
instruction obj,
int n,
list pdl) {
text r = text_undefined;
switch(instruction_tag(obj)) {
case is_instruction_block: {
r = text_block(module, label, margin, instruction_block(obj), n, pdl);
break;
}
case is_instruction_test: {
r = text_test(module, label, margin, instruction_test(obj), n, pdl);
break;
}
case is_instruction_loop: {
r = text_loop(module, label, margin, instruction_loop(obj), n, pdl);
break;
}
case is_instruction_whileloop: {
r = text_whileloop(module,
label,
margin,
instruction_whileloop(obj),
n,
pdl);
break;
}
case is_instruction_goto: {
r = make_text(CONS(SENTENCE,
sentence_goto(module, label, margin,
instruction_goto(obj), n), NIL));
break;
}
case is_instruction_call: {
unformatted u;
sentence s;
/* FI: in C at least, this has already been decided by the
caller, text_statement_enclosed(); but apparently not in
Fortran. Also, the source code may be in Fortran, but the
user wants it prettyprinted as C. */
if (prettyprint_language_is_fortran_p()
&& instruction_continue_p(obj) && empty_string_p(label)
&& !get_bool_property("PRETTYPRINT_ALL_LABELS")) {
pips_debug(5, "useless Fortran CONTINUE not printed\n");
r = make_text(NIL);
} else {
switch (get_prettyprint_language_tag()) {
case is_language_fortran:
case is_language_fortran95:
u = make_unformatted(strdup(label),
n,
margin,
words_call(instruction_call(obj),
0,
true,
true,
pdl));
break;
case is_language_c:
u = make_unformatted(strdup(label),
n,
margin,
CHAIN_SWORD(words_call(instruction_call(obj),
0, true, true, pdl),
C_STATEMENT_END_STRING));
break;
default:
pips_internal_error("Language unknown !");
break;
}
s = make_sentence(is_sentence_unformatted, u);
r = make_text(CONS(SENTENCE, s, NIL));
}
break;
}
case is_instruction_unstructured: {
// append local variables if there is some.
// local variable need to be inserted before diging the
// unstructured graph.
r = insert_locals(r);
text tmp = text_undefined;
tmp = text_unstructured(module,
label,
margin,
instruction_unstructured(obj),
n);
// append the unstructured to the current text if it exists
if ((r != text_undefined) && (r != NULL)) {
MERGE_TEXTS (r, tmp);
} else {
r = tmp;
}
break;
}
case is_instruction_forloop: {
r = text_forloop(module, label, margin, instruction_forloop(obj), n, pdl);
break;
}
case is_instruction_expression: {
list pc = words_expression(instruction_expression(obj), pdl);
unformatted u;
pc = CHAIN_SWORD(pc,C_CONTINUE_FUNCTION_NAME);
u = make_unformatted(strdup(label), n, margin, pc);
r = make_text(CONS(SENTENCE,
make_sentence(is_sentence_unformatted, u),
NIL));
break;
}
default: {
pips_internal_error("unexpected tag");
}
}
return (r);
}
/* In case the input code is not C code, non-standard comments have to
be detected */
bool C_comment_p(string c){
bool is_C_comment=true;
char * ccp=c;
char cc=' ';
init:
cc=*ccp++;
if(cc==' '|| cc=='\t' || cc=='\n')
goto init;
else if( cc=='/')
goto slash;
else if(cc=='\000')
goto end;
else {
is_C_comment=false;
goto end;
}
slash:
cc=*ccp++;
if(cc=='*')
goto slash_star;
else if(cc=='/')
goto slash_slash;
else{
is_C_comment=false;
goto end;
}
slash_star:
cc=*ccp++;
if(cc=='*')
goto slash_star_star;
else if(cc=='\0'){
is_C_comment=false;
goto end;
}
else
goto slash_star;
slash_slash:
cc=*ccp++;
if(cc=='\n')
goto init;
if(cc=='\0') // The comment may not end first with a '\n'
goto end;
else
goto slash_slash;
slash_star_star:
cc=*ccp++;
if(cc=='/')
goto init;
else if(cc=='*')
goto slash_star_star;
else if(cc=='\0'){
is_C_comment=false;
goto end;
}
else
goto slash_star;
end : return is_C_comment;
}
/* In case comments are not formatted according to C rules, e.g. when
prettyprinting Fortran code as C code, add // at beginning of lines */
text C_any_comment_to_text(int r_margin, string c)
{
string lb = c; /* line beginning */
string le = c; /* line end */
string cp = c; /* current position, pointer in comments */
text ct = make_text(NIL);
bool is_C_comment = C_comment_p(c);
int e_margin = r_margin;
/* We do not need spaces before a line feed */
if(strcmp(c, "\n")==0)
e_margin = 0;
if(strlen(c)>0) {
for(;*cp!='\0';cp++) {
if(*cp=='\n') {
if(cp!=c || true){ // Do not skip \n
string cl = gen_strndup0(lb, le-lb);
sentence s = sentence_undefined;
if(is_C_comment)
s = MAKE_ONE_WORD_SENTENCE(e_margin, cl);
else if(strlen(cl)>0){
list pc = CHAIN_SWORD(NIL, cl); // cl is uselessly duplicated
pc = CONS(STRING, MAKE_SWORD("//"), pc);
s= make_sentence(is_sentence_unformatted,
make_unformatted((char *) NULL, 0, e_margin, pc));
}
else {
s = MAKE_ONE_WORD_SENTENCE(0, cl);
}
ADD_SENTENCE_TO_TEXT(ct, s);
free(cl);
}
lb = cp+1;
le = cp+1;
}
else
le++;
}
// Final \n has been removed in the parser presumably by Ronan
// But this is also useful when non-standard comments are added,
// for instance by phase "comment_prepend"
if(lb<cp){
sentence s = sentence_undefined;
string sl = gen_strndup0(lb,le-lb);
if(is_C_comment) {
s = MAKE_ONE_WORD_SENTENCE(e_margin,sl);
}
else {
list pc = CHAIN_SWORD(NIL, sl); // sl is uselessly duplicated
pc = CONS(STRING, MAKE_SWORD("//"), pc);
s = make_sentence(is_sentence_unformatted,
make_unformatted((char *) NULL, 0, e_margin, pc));
}
ADD_SENTENCE_TO_TEXT(ct,s);
free(sl);
} else{
//ADD_SENTENCE_TO_TEXT(ct,MAKE_ONE_WORD_SENTENCE(0,""));
;
}
}
else{// Final \n has been removed by Ronan
//ADD_SENTENCE_TO_TEXT(ct,MAKE_ONE_WORD_SENTENCE(0,""));
;
}
return ct;
}
// Ronan's improved version is bugged. It returns many lines for a
// unique \n because le is not updated before looping. Has this code
// been validated?
text C_standard_comment_to_text(int margin, string comment)
{
string line;
string le = comment; /* position of a line end */
text ct = make_text(NIL);
do {
/* Find the first end of line: */
le = strchr(comment, '\n');
if (le == NULL)
/* No end-of-line, so use all the rest of the comment: */
line = strdup(comment);
else {
/* Skip the '\n' at the end since the line concept is the notion of
sentence */
line = gen_strndup0(comment, le - comment);
/* Analyze the next line: */
comment = le + 1;
}
/* Do not indent if the line is empty */
ADD_SENTENCE_TO_TEXT(ct,
MAKE_ONE_WORD_SENTENCE(line[0] == '\0' ? 0 : margin,
line));
} while (le != NULL);
return ct;
}
/* Special handling for C comments with each line indented according to
the context.
I do not see the interest if the user code is already indented... RK
OK, since the blanks outside the comments are removed by the parser.
*/
text C_comment_to_text(int margin, string comment)
{
text ct = text_undefined;
if(C_comment_p(comment))
//ct = C_standard_comment_to_text(margin, comment);
ct = C_any_comment_to_text(margin, comment);
else
ct = C_any_comment_to_text(margin, comment);
return ct;
}
static list cstrsplit(const char * s, char delim) {
list out = NIL;
const char *b=s,*e=s;
while(*e) {
while(*e && *e!=delim) ++e;
char * word = strndup(b,e-b);
out=CONS(STRING,word,out);
if(*e) {
++e;
b=e;
}
}
return gen_nreverse(out);
}
/* returner a formatted comment, that takes care of adding the relevant // or C
* depending on output language */
static string ensure_comment_consistency(const char * i_comments, language l) {
string comments;
/* Special handling of comments linked to declarations and to the
poor job of the lexical analyzer as regards C comments:
failure. */
if(empty_comments_p(i_comments)) {
comments = strdup("");
}
else {
if(get_bool_property("PRETTYPRINT_CHECK_COMMENTS")) {
char * patterns [] = { NULL, NULL, NULL, NULL, NULL, NULL };
char prefix[3]= { 0,0,0 };
if(language_c_p(l)) {
patterns[0] = "//";
patterns[1] = "/*";
strcat(prefix,"//");
}
else if(language_fortran95_p(l) || language_fortran_p(l)) {
patterns[0]= "C";
patterns[1]= "!";
patterns[2]= "*";
patterns[3]= "c";
patterns[4]= "#"; // a single test case in PIPS validation forces me to do this (Syntax/sharpcomment)
if(language_fortran95_p(l))
strcat(prefix,"! ");
else
strcat(prefix,"C ");//to keep consistency with old fashioned code
}
// be multi-line comments compliant
list lines = cstrsplit(i_comments,'\n');
list lcomments = NIL;
for(list liter=lines;!ENDP(liter);POP(liter)){
string line = STRING(CAR(liter));
bool comment_ok =false;
char *iter =line;
while(*iter && isspace(*iter)) iter++;
if(*iter) {
for(char **piter=&patterns[0];*piter;piter++) {
if((comment_ok=(strncmp(iter,*piter,strlen(*piter))==0)))
break;
}
if(!comment_ok)
asprintf(&comments,"%s%s",prefix,line);
else
comments=strdup(line);
}
else /*blank line */
comments=strdup(line);
if(language_c_p(l) && strncmp(iter,"/*",2)==0 ){ // multi-line comment started, assume it's ok now
lcomments=gen_nconc(lcomments,gen_copy_string_list(liter));
break; // so bad if we close the multi-line comment and keep commenting afterwards ...
}
else
lcomments=gen_nconc(lcomments,CONS(STRING,comments,NIL));
}
comments=words_join(lcomments,"\n");
gen_free_string_list(lcomments);
gen_free_string_list(lines);
}
else
return strdup(i_comments);
#if 0
if(declaration_statement_p(stmt)) {
/* LF interspersed within C struct or union or initialization
declarations may damage the user comment. However, there is no
way no know if the LF are valid because thay are located
between two statements or invalid because they are located
within one statement. The information is lost by the lexer and
the parser. */
//comments = string_strip_final_linefeeds(strdup(i_comments));
//comments = string_fuse_final_linefeeds(strdup(i_comments));
comments = strdup(i_comments);
}
else {
comments = strdup(i_comments);
}
#endif
}
return comments;
}
/* Build the text of a statement
@param module: the module containing the statement
@param imargin: current tabulation
@param stmt: the statement to print
@param braces_p: the statement is within a block; this has an impact of
the print-out of continue statements in C, ";"
@param drop_continue_p: another condition to control the print-out of
";" or not;
@param pdl: previous declaration list; list of entities that have
already been declared and should not be redeclared; this is required
for struct and union which may be declared independently or in a nested
way. See C_syntax/struct03, 04, 05, etc...
@return the text of the statement
Notes:
- in simple tests, the statement ";" may be mandatory or not.
- continue may be used to preserve comments and then the ";" may be
dropped
- source fidelity would be easier if a new NOP statement that is
never printed out were used.
*/
text text_statement_enclosed(entity module,
int imargin,
statement stmt,
bool braces_p,
bool drop_continue_p,
list pdl)
{
instruction i = statement_instruction(stmt);
//synchronization sync = statement_synchronization(stmt);
text r= make_text(NIL);
text temp;
string i_comments = statement_comments(stmt);
string comments = string_undefined;
bool braces_added = false;
int nmargin = imargin;
// To ease breakpoint setting
//pips_assert("Blocks have no comments", !instruction_block_p(i)||empty_comments_p(comments));
if(instruction_block_p(i) && !empty_comments_p(i_comments)) {
pips_internal_error("Blocks should have no comments");
}
comments = ensure_comment_consistency(i_comments,get_prettyprint_language());
if(prettyprint_language_is_c_p() &&
statement_block_p(stmt) &&
!empty_extensions_p(statement_extensions(stmt)))
{
string ext = extensions_to_string(statement_extensions (stmt), true);
if (ext != string_undefined) {
ADD_SENTENCE_TO_TEXT(r, make_sentence(is_sentence_formatted, ext));
braces_added = true;
ADD_SENTENCE_TO_TEXT(r,
MAKE_ONE_WORD_SENTENCE(imargin, "{"));
nmargin += INDENTATION;
}
}
/* Generate text for local declarations
*
* 31/07/2003 Nga Nguyen : This code is added for C, because a
* statement can have its own declarations
*/
list dl = statement_declarations(stmt);
/* FI: consistency check - incompatible with unfolding.. and with
the C parser... */
ifdebug(1) {
/* The real check is that dl and idl are equal, that is
ENDP(gen_list_and_not(dl,idl)) && ENDP(gen_list_and_not(idl,dl)),
except for the side effects of gen_list_and_not(), so dl and idl
should be copied first. */
if(statement_block_p(stmt)) {
list idl = statement_to_direct_declarations(stmt);
if(ENDP(dl) && !ENDP(idl)) {
/* This may occur when declaration statements are added using
subsequences by somebody forgetfull of scope issues */
// Do not forget: the error is detected within the prettyprinter...
//print_statement(stmt);
print_entities(idl);
pips_internal_error("A block statement with no declarations"
" contains declarations\n");
}
else if(gen_length(dl)!=gen_length(idl)) {
print_entities(dl);
fprintf(stderr, "\n"); // FI, OK a fputc might do as well
print_entities(idl);
fprintf(stderr, "\n");
pips_internal_error("A block statement with %d declarations"
" contains %d declarations in its statements\n",
gen_length(dl), gen_length(idl));
}
else
gen_free_list(idl);
}
if(statement_block_p(stmt) && !ENDP(dl)) {
/* See for instance
Transformations/Simplify_control.sub/sequence01 */
list sl = statement_block(stmt);
if(ENDP(sl)) {
pips_internal_error("A block statement with declarations"
" contains no declaration statements\n");
}
}
}
if (!ENDP(dl) && prettyprint_language_is_c_p()) {
if(statement_block_p(stmt)) {
if(!braces_p && !braces_added) {
braces_added = true;
ADD_SENTENCE_TO_TEXT(r,
MAKE_ONE_WORD_SENTENCE(imargin, "{"));
nmargin += INDENTATION;
}
}
else {
pips_assert("declarations are carried by continue statements",
continue_statement_p(stmt));
}
// initialize the local variable text if needed
if (local_flg == false) {
local_flg = true;
local_var = make_text(NIL);
}
if(declaration_statement_p(stmt)) {
int sn = statement_number(stmt);
MERGE_TEXTS(local_var,
c_text_related_entities(module,dl,nmargin,sn,dl));
}
else {
//MERGE_TEXTS(local_var, c_text_entities(module,l,nmargin));
// Do nothing and rely on CONTINUE statements...
;
}
}
pips_debug(2, "Begin for statement %s with braces_p=%d\n",
statement_identification(stmt),braces_p);
pips_debug(9, "statement_comments: --%s--\n",
string_undefined_p(comments)? "<undef>": comments);
if(statement_number(stmt)!=STATEMENT_NUMBER_UNDEFINED &&
statement_ordering(stmt)==STATEMENT_ORDERING_UNDEFINED) {
/* we are in trouble with some kind of dead (?) code...
but we might as well be dealing with some parsed_code */
pips_debug(1, "I unexpectedly bumped into dead code?\n");
}
const char* label;
bool pragma_before_label_in_C = prettyprint_language_is_c_p()
&& statement_with_pragma_p(stmt) && !unlabelled_statement_p(stmt);
if (pragma_before_label_in_C)
/* We are in trouble because a pragma in C should appear after the label but
the Fortran-oriented prettyprinter is to prettyprint a label and an
instruction in block. So we print the instruction without the label
that is to be added in another way afterwards */
label = "";
else
label = label_local_name(statement_label(stmt));
if (entity_return_label_p(statement_label(stmt))) {
pips_assert("Statement with return label must be a return statement",
return_statement_p(stmt));
/* do not add a redundant RETURN before an END, unless
requested or unless needed because a value must be returned
in C */
bool last_statement_p(statement);
if(get_bool_property("PRETTYPRINT_FINAL_RETURN")
|| !last_statement_p(stmt)
|| (!void_function_p(module) && c_module_p(module)))
{
/*<<<<<<< .working
sentence s = MAKE_ONE_WORD_SENTENCE(nmargin, prettyprint_language_is_c_p()?C_RETURN_FUNCTION_NAME";":RETURN_FUNCTION_NAME);
=======*/
sentence s = sentence_undefined;
if(entity_undefined_p(module)
|| void_function_p(module)
|| fortran_module_p(module)) {
s = MAKE_ONE_WORD_SENTENCE(nmargin,
prettyprint_language_is_c_p()?
C_RETURN_FUNCTION_NAME";"
:RETURN_FUNCTION_NAME);
}
else {
// Must be a non void C function
entity rv = function_to_return_value(module);
list pc = NIL;
pc = CHAIN_SWORD(pc, C_RETURN_FUNCTION_NAME);
pc = CHAIN_SWORD(pc, " ");
pc = CHAIN_SWORD(pc, entity_user_name(rv));
pc = CHAIN_SWORD(pc, C_CONTINUE_FUNCTION_NAME);
unformatted u = make_unformatted((char *) NULL, 0, nmargin, pc);
s = make_sentence_unformatted(u);
}
//>>>>>>> .merge-right.r18859
temp = make_text(CONS(SENTENCE, s, NIL));
}
else {
temp = make_text(NIL);
}
}
else
{
entity m = entity_undefined_p(module)?
get_current_module_entity()
: module;
if(true || !compilation_unit_p(entity_name(m))) {
/* Do we need to print this CONTINUE statement in C? */
string cs = statement_comments(stmt);
if (prettyprint_language_is_c_p()
&& (braces_p || drop_continue_p)
&& unlabelled_statement_p(stmt)
&& instruction_continue_p(i)) {
if(!ENDP(statement_declarations(stmt))) {
/* The declarations will be printed, no need for anything else */
temp = make_text(NIL);
}
else if(string_undefined_p(cs) || cs == NULL || strcmp(cs, "")==0) {
sentence s = MAKE_ONE_WORD_SENTENCE(0, "");
temp = make_text(CONS(SENTENCE, s, NIL));
//temp = make_text(NIL);
}
else if(strcmp(cs, "\n")==0) {
// MAKE_ONE_WORD_SENTENCE already implies a '\n'
sentence s = MAKE_ONE_WORD_SENTENCE(0, "");
temp = make_text(CONS(SENTENCE, s, NIL));
}
else
temp = text_instruction(module, label, nmargin, i,
statement_number(stmt), pdl);
}
else
temp = text_instruction(module, label, nmargin, i,
statement_number(stmt), pdl);
}
else
temp = make_text(NIL);
}
/* Take care of comments and of analysis results printed as comments
*
* Note about comments: they are duplicated here, but I'm pretty
* sure that the free is NEVER performed as it should. FC.
*/
if(!ENDP(text_sentences(temp))) {
/* There is something to output for the instruction... */
MERGE_TEXTS(r, init_text_statement(module, nmargin, stmt));
if (! empty_comments_p(comments)) {
text ct = text_undefined;
switch(get_prettyprint_language_tag()) {
case is_language_fortran:
case is_language_fortran95:
ADD_SENTENCE_TO_TEXT(r, make_sentence(is_sentence_formatted,
strdup(comments)));
break;
case is_language_c:
ct = C_comment_to_text(nmargin, comments);
MERGE_TEXTS(r, ct);
break;
default:
pips_internal_error("Language unknown !");
break;
}
}
}
else {
/* There is nothing to output for the instruction itself.
Preserve comments and empty C instruction */
if (! empty_comments_p(comments)) {
text ct = text_undefined;
switch (get_prettyprint_language_tag()) {
case is_language_fortran:
case is_language_fortran95:
ADD_SENTENCE_TO_TEXT(r, make_sentence(is_sentence_formatted,
strdup(comments)));
break;
case is_language_c:
ct = C_comment_to_text(nmargin, comments);
MERGE_TEXTS(r, ct);
MERGE_TEXTS(r, init_text_statement(module, nmargin, stmt));
break;
default:
pips_internal_error("Language unknown !");
break;
}
}
else if(prettyprint_language_is_c_p() &&
!braces_p && !braces_added &&ENDP(dl)) {
// Because C braces can be eliminated and hence semi-colon
// may be mandatory in a test branch or in a loop body.
// A. Mensi
sentence s = MAKE_ONE_WORD_SENTENCE(nmargin,
strdup(C_CONTINUE_FUNCTION_NAME));
ADD_SENTENCE_TO_TEXT(r, s);
}
else if(!ENDP(dl)) {
MERGE_TEXTS(r, init_text_statement(module, nmargin, stmt));
}
}
/* Add the label if not already done, in the case we want it before a
extension/pragma: */
if (pragma_before_label_in_C)
ADD_SENTENCE_TO_TEXT(r, make_sentence(is_sentence_unformatted,
make_unformatted(strdup(label_local_name(statement_label(stmt))),
STATEMENT_NUMBER_UNDEFINED,
0,
NULL)));
if(!(prettyprint_language_is_c_p() && statement_block_p(stmt))) {
/* Append the extensions after comments: */
string ext = extensions_to_string(statement_extensions (stmt), true);
if (ext != string_undefined) {
ADD_SENTENCE_TO_TEXT(r, make_sentence(is_sentence_formatted, ext));
}
}
/* Then add any instruction text: */
MERGE_TEXTS(r, temp);
/* append local variables that might have not been inserted
previously
FI: this seems to be quite late and might explain the problem
with local variables of Fortran do loops. Might, because I've
never managed to figure out exactly what happens...
*/
r = insert_locals (r);
if (braces_added) {
ADD_SENTENCE_TO_TEXT(r, MAKE_ONE_WORD_SENTENCE(imargin, "}"));
}
attach_statement_information_to_text(r, stmt);
// the last thing to do is to close the extension
string close = close_extensions (statement_extensions (stmt), true);
if (close != string_undefined) {
ADD_SENTENCE_TO_TEXT(r,make_sentence(is_sentence_formatted, close));
}
ifdebug(1) {
if (instruction_sequence_p(i)) {
if(!(statement_with_empty_comment_p(stmt)
&& statement_number(stmt) == STATEMENT_NUMBER_UNDEFINED
&& unlabelled_statement_p(stmt))) {
user_log("Block statement %s\n"
"Block number=%d, Block label=\"%s\", block comment=\"%s\"\n",
statement_identification(stmt),
statement_number(stmt), label_local_name(statement_label(stmt)),
statement_comments(stmt));
pips_internal_error("This block statement should be labelless,"
" numberless and commentless.\n");
}
}
}
ifdebug(8){
fprintf(stderr,"text_statement_enclosed=================================\n");
print_text(stderr,r);
fprintf(stderr,"==============================\n");
}
free(comments);
pips_debug(2, "End for statement %s\n", statement_identification(stmt));
return(r);
}
/* Handles all statements but tests that are nodes of an unstructured.
Those are handled by text_control.
@param module: the module containing the statement
@param margin: current tabulation
@param stmt: the statement to print
@param pdl: previous declaration list; list of entities that have already
been declared and should not be redeclared; this is required for
struct and union which may be declared independently or in a nested
way. See C_syntax/struct03, 04, 05, etc...
@return the text of the statement
*/
text text_statement(
entity module,
int margin,
statement stmt,
list pdl)
{
return text_statement_enclosed(module, margin, stmt, true, true, pdl);
}
/* Keep track of the last statement to decide if a final return can be
* omitted or not. If no last statement can be found for sure, for
* instance because it depends on the prettyprinter, last_statement_found is
* set to statement_undefined which is safe.
*
* FI: for purposes unrelated to prettyprint, see
* last_statement(). This function is part of the prettyprinter and
* probably only useful for Fortran code.
*/
static statement last_statement_found = statement_undefined;
statement find_last_statement(statement s)
{
statement last = statement_undefined;
pips_assert("statement is defined", !statement_undefined_p(s));
if(statement_sequence_p(s)) {
list ls = instruction_block(statement_instruction(s));
last = (ENDP(ls)? statement_undefined : STATEMENT(CAR(gen_last(ls))));
}
else if(statement_unstructured_p(s)) {
unstructured u = statement_unstructured(s);
list trail = unstructured_to_trail(u);
last = control_statement(CONTROL(CAR(trail)));
gen_free_list(trail);
}
else if(statement_call_p(s)) {
/* Hopefully it is a return statement.
* Since the semantics of STOP is ignored by the parser, a
* final STOp should be followed by a RETURN.
*/
last = s;
}
else {
/* loop or test cannot be last statements of a module */
last = statement_undefined;
}
/* recursive call */
if(!statement_undefined_p(last)
&& (statement_sequence_p(last) || statement_unstructured_p(last))) {
last = find_last_statement(last);
}
/* Too many program transformations and syntheses violate the
following assert */
if(!(statement_undefined_p(last)
|| !statement_sequence_p(s)
|| return_statement_p(last))) {
switch(get_prettyprint_language_tag()) {
case is_language_fortran:
case is_language_fortran95:
pips_user_warning("Last statement is not a RETURN!\n");
break;
case is_language_c:
/* No warning needed for C, is it right for C ?*/
break;
default:
pips_internal_error("Language unknown !");
break;
}
last = statement_undefined;
}
/* I had a lot of trouble writing the condition for this assert... */
pips_assert("Last statement is either undefined or a call to return",
statement_undefined_p(last) /* let's give up: it's always safe */
|| !statement_sequence_p(s) /* not a block: any kind of statement... */
|| return_statement_p(last)); /* if a block, then a return */
return last;
}
void
set_last_statement(statement s)
{
statement ls = statement_undefined;
pips_assert("last statement is undefined",
statement_undefined_p(last_statement_found));
ls = find_last_statement(s);
last_statement_found = ls;
}
void reset_last_statement()
{
last_statement_found = statement_undefined;
}
bool last_statement_p(statement s) {
pips_assert("statement is defined\n", !statement_undefined_p(s));
return s == last_statement_found;
}
/* Build the text of a module.
The original text of the declarations is used if possible in
Fortran. Otherwise, the function text_declaration is called.
*/
text text_named_module(
entity name, /**< the name of the module */
entity module,
statement stat)
{
text r = make_text(NIL);
code c = entity_code(module);
string s = code_decls_text(c);
text ral = text_undefined;
debug_on("PRETTYPRINT_DEBUG_LEVEL");
/* Set the prettyprint language */
set_prettyprint_language_from_property(language_tag(code_language(c)));
/* This guard is correct but could be removed if find_last_statement()
* were robust and/or if the internal representations were always "correct".
* See also the guard for reset_last_statement()
*/
if(!get_bool_property("PRETTYPRINT_FINAL_RETURN"))
set_last_statement(stat);
precedence_p = !get_bool_property("PRETTYPRINT_ALL_PARENTHESES");
prettyprint_all_c_braces_p = get_bool_property("PRETTYPRINT_ALL_C_BRACES");
prettyprint_gcc_c_braces_p = get_bool_property("PRETTYPRINT_GCC_C_BRACES");
list l = NIL;
switch(get_prettyprint_language_tag()) {
case is_language_fortran:
case is_language_fortran95:
if(strcmp(s, "") == 0
|| get_bool_property("PRETTYPRINT_ALL_DECLARATIONS")) {
if(get_bool_property("PRETTYPRINT_HEADER_COMMENTS"))
/* Add the original header comments if any: */
ADD_SENTENCE_TO_TEXT(r, get_header_comments(module));
ADD_SENTENCE_TO_TEXT(r,
attach_head_to_sentence(sentence_head(name, NIL), module));
if(head_hook)
ADD_SENTENCE_TO_TEXT(r, make_sentence(is_sentence_formatted,
head_hook(module)));
if(get_bool_property("PRETTYPRINT_HEADER_COMMENTS"))
/* Add the original header comments if any: */
ADD_SENTENCE_TO_TEXT(r, get_declaration_comments(module));
MERGE_TEXTS(r, text_declaration(module));
MERGE_TEXTS(r, text_initializations(module));
} else {
ADD_SENTENCE_TO_TEXT(r,
attach_head_to_sentence(make_sentence(is_sentence_formatted,
strdup(s)),
module));
}
break;
case is_language_c:
/* C prettyprinter */
pips_debug(3,"Prettyprint function %s\n",entity_name(name));
if(!compilation_unit_p(entity_name(name))) {
//entity cu = module_entity_to_compilation_unit_entity(module);
//list pdl = code_declarations(value_code(entity_initial(cu))));
/* Print function header if the current module is not a compilation unit*/
ADD_SENTENCE_TO_TEXT(r,attach_head_to_sentence(sentence_head(name, NIL), module));
ADD_SENTENCE_TO_TEXT(r,MAKE_ONE_WORD_SENTENCE(0,"{"));
/* get the declarations for Fortran codes prettyrinted as C,
as the declarations are not located in the module
statement. A.Mensi */
if(ENDP(statement_declarations(stat)) && fortran_module_p(module)) {
l = code_declarations(value_code(entity_initial(module)));
MERGE_TEXTS(r,c_text_entities(module, l, INDENTATION, NIL));
}
}
break;
default:
pips_internal_error("Language unknown !");
break;
}
set_alternate_return_set();
reset_label_counter();
if (stat != statement_undefined) {
/* FI: This function should not be used here because it is part of
the preprocessor library... */
//entity cu = module_entity_to_compilation_unit_entity(module);
switch(get_prettyprint_language_tag()) {
case is_language_fortran:
case is_language_fortran95:
MERGE_TEXTS(r, text_statement(module,
get_prettyprint_indentation(),
stat,
NIL));
break;
case is_language_c:
MERGE_TEXTS(r,
text_statement(module,
(compilation_unit_p(entity_name(name)))?0:INDENTATION,
stat, NIL));
break;
default:
pips_internal_error("Language unknown !");
break;
}
}
ral = generate_alternate_return_targets();
reset_alternate_return_set();
MERGE_TEXTS(r, ral);
if(!compilation_unit_p(entity_name(name))
|| prettyprint_language_is_fortran_p()) {
/* No need to print TAIL (}) if the current module is a C compilation unit*/
ADD_SENTENCE_TO_TEXT(r, sentence_tail(module));
}
if(!get_bool_property("PRETTYPRINT_FINAL_RETURN"))
reset_last_statement();
debug_off();
return(r);
}
text text_module(entity module, statement stat) {
return text_named_module(module, module, stat);
}
text text_graph(), text_control() ;
string control_slabel() ;
/* The node itentifiers are generated from the ordering, more stable than
the control node address: */
void
add_control_node_identifier_to_text(text r, control c) {
_int so = statement_ordering(control_statement(c));
add_one_unformated_printf_to_text(r, "c_%d_%d",
ORDERING_NUMBER(so),
ORDERING_STATEMENT(so));
}
void output_a_graph_view_of_the_unstructured_successors(text r,
entity module,
int margin,
control c)
{
list pdl = NIL; // FI: I have no idea how to initialize it in this context...
add_one_unformated_printf_to_text(r, "%s ",
PRETTYPRINT_UNSTRUCTURED_ITEM_MARKER);
add_control_node_identifier_to_text(r, c);
add_one_unformated_printf_to_text(r, "\n");
if (get_bool_property("PRETTYPRINT_UNSTRUCTURED_AS_A_GRAPH_VERBOSE")) {
add_one_unformated_printf_to_text(r, "C Unstructured node %p ->", c);
MAP(CONTROL, a_successor,
add_one_unformated_printf_to_text(r, " %p", a_successor),
control_successors(c));
add_one_unformated_printf_to_text(r,"\n");
}
MERGE_TEXTS(r, text_statement(module,
margin,
control_statement(c),
pdl));
add_one_unformated_printf_to_text(r,
PRETTYPRINT_UNSTRUCTURED_SUCC_MARKER);
MAP(CONTROL, a_successor,
{
add_one_unformated_printf_to_text(r, " ");
add_control_node_identifier_to_text(r, a_successor);
},
control_successors(c));
add_one_unformated_printf_to_text(r,"\n");
}
bool output_a_graph_view_of_the_unstructured_from_a_control(text r,
entity module,
int margin,
control begin_control,
control exit_control)
{
bool exit_node_has_been_displayed = false;
list blocs = NIL;
CONTROL_MAP(c,
{
/* Display the statements of each node followed by
the list of its successors if any: */
output_a_graph_view_of_the_unstructured_successors(r,
module,
margin,
c);
if (c == exit_control)
exit_node_has_been_displayed = true;
},
begin_control,
blocs);
gen_free_list(blocs);
return exit_node_has_been_displayed;
}
void output_a_graph_view_of_the_unstructured(text r,
entity module,
const char * label __attribute__ ((unused)),
int margin,
unstructured u,
int __attribute__ ((unused)) num)
{
bool exit_node_has_been_displayed = false;
control begin_control = unstructured_control(u);
control end_control = unstructured_exit(u);
add_one_unformated_printf_to_text(r, "%s ",
PRETTYPRINT_UNSTRUCTURED_BEGIN_MARKER);
add_control_node_identifier_to_text(r, begin_control);
add_one_unformated_printf_to_text(r, " end: ");
add_control_node_identifier_to_text(r, end_control);
add_one_unformated_printf_to_text(r, "\n");
exit_node_has_been_displayed =
output_a_graph_view_of_the_unstructured_from_a_control(r,
module,
margin,
begin_control,
end_control);
/* If we have not displayed the exit node, that mean that it is not
connex with the entry node and so the code is
unreachable. Anyway, it has to be displayed as for the classical
Sequential View: */
if (! exit_node_has_been_displayed) {
/* Note that since the controlizer adds a dummy successor to the
exit node, use
output_a_graph_view_of_the_unstructured_from_a_control()
instead of
output_a_graph_view_of_the_unstructured_successors(): */
output_a_graph_view_of_the_unstructured_from_a_control(r,
module,
margin,
end_control,
end_control);
/* Even if the code is unreachable, add the fact that the
control above is semantically related to the entry node. Add
a dash arrow from the entry node to the exit node in daVinci,
for example: */
add_one_unformated_printf_to_text(r, "%s ",
PRETTYPRINT_UNREACHABLE_EXIT_MARKER);
add_control_node_identifier_to_text(r, begin_control);
add_one_unformated_printf_to_text(r, " -> ");
add_control_node_identifier_to_text(r, end_control);
add_one_unformated_printf_to_text(r, "\n");
if (get_bool_property("PRETTYPRINT_UNSTRUCTURED_AS_A_GRAPH_VERBOSE"))
add_one_unformated_printf_to_text(r, "C Unreachable exit node (%p -> %p)\n",
begin_control,
end_control);
}
add_one_unformated_printf_to_text(r, "%s ",
PRETTYPRINT_UNSTRUCTURED_END_MARKER);
add_control_node_identifier_to_text(r, begin_control);
add_one_unformated_printf_to_text(r, " end: ");
add_control_node_identifier_to_text(r, end_control);
add_one_unformated_printf_to_text(r, "\n");
}
/* ================C prettyprinter functions================= */
static list words_cast(cast obj, int precedence, list pdl)
{
list pc = NIL;
type t = cast_type(obj);
expression exp = cast_expression(obj);
bool space_p = get_bool_property("PRETTYPRINT_LISTS_WITH_SPACES");
pc = CHAIN_SWORD(pc,"(");
pc = gen_nconc(pc, c_words_entity(t, NIL, pdl));
pc = CHAIN_SWORD(pc, space_p? ") " : ")");
pc = gen_nconc(pc, words_subexpression(exp, CAST_OPERATOR_PRECEDENCE, true, pdl));
if(get_bool_property("PRETTYPRINT_ALL_PARENTHESES") || precedence >= 25) {
pc = CONS(STRING, strdup("("),
gen_nconc(pc,CONS(STRING, strdup(")"), NIL)));
}
return pc;
}
static list words_sizeofexpression(sizeofexpression obj,
bool in_type_declaration,
list pdl)
{
list pc = NIL;
switch(get_prettyprint_language_tag()) {
case is_language_fortran:
case is_language_fortran95:
pips_user_warning("generating FORTRAN 2008 function call defined in the the module ISO_C_BINDING\n");
pc = CHAIN_SWORD(pc,"c_sizeof(");
break;
case is_language_c:
pc = CHAIN_SWORD(pc,"sizeof(");
break;
default:
pips_internal_error("Language unknown !");
break;
}
if (sizeofexpression_type_p(obj)) {
type t = sizeofexpression_type(obj);
/* FI: the test used below is probably too strict I believe, because
dimensions are not allowed, but I may be wrong*/
if(derived_type_p(t)) {
entity te = basic_derived(variable_basic(type_variable(t)));
if(!gen_in_list_p((void *) te, pdl)) {
list pca = words_type(sizeofexpression_type(obj), pdl, false);
pc = gen_nconc(pc, pca);
}
else {
/* The type must be fully declared: see struct15.c */
list pct = c_words_simplified_entity(t, NIL, true, in_type_declaration, pdl);
pc = gen_nconc(pc, pct);
}
}
else {
list pca = words_type(sizeofexpression_type(obj), pdl, false);
pc = gen_nconc(pc, pca);
}
}
else
pc = gen_nconc(pc, words_expression(sizeofexpression_expression(obj), pdl));
pc = CHAIN_SWORD(pc,")");
return pc;
}
static list words_subscript(subscript s, list pdl)
{
list pc = NIL;
expression a = subscript_array(s);
list lexp = subscript_indices(s);
bool first = true;
/* Parentheses must be added for array expression
* like __ctype+1 in (__ctype+1)[*np]
*/
/* Here we differentiate the indices parenthesis syntax */
switch(get_prettyprint_language_tag()) {
case is_language_fortran:
pips_internal_error("We don't know how to prettyprint a subscript in "
"Fortran, aborting");
case is_language_fortran95: {
bool allocatable_p = expression_allocatable_data_access_p(a);
pips_assert("We don't know how to prettyprint a subscript in Fortran95 "
"and it's not an allocatable",
allocatable_p );
pc = gen_nconc(pc, words_expression(a, pdl));
if(!ENDP(lexp)) {
pc = CHAIN_SWORD(pc,"(");
}
break;
}
case is_language_c:
pc = CHAIN_SWORD(pc,"(");
pc = gen_nconc(pc, words_expression(a, pdl));
pc = CHAIN_SWORD(pc,")");
if(!ENDP(lexp)) {
pc = CHAIN_SWORD(pc,"[");
}
break;
default:
pips_internal_error("Language unknown !");
break;
}
/* Print now the indices list */
FOREACH(expression,exp,lexp) {
if(!first) {
switch(get_prettyprint_language_tag()) {
case is_language_fortran:
case is_language_fortran95:
pc = CHAIN_SWORD(pc, ",");
break;
case is_language_c:
pc = CHAIN_SWORD(pc,"][");
break;
default:
pips_internal_error("Language unknown !");
break;
}
}
pc = gen_nconc(pc, words_expression(exp, pdl));
first = false;
}
/* Here we differentiate the indices syntax */
switch(get_prettyprint_language_tag()) {
case is_language_fortran:
case is_language_fortran95:
if(!ENDP(lexp)) {
pc = CHAIN_SWORD(pc,")");
}
break;
case is_language_c:
if(!ENDP(lexp)) {
pc = CHAIN_SWORD(pc,"]");
}
break;
default:
pips_internal_error("Language unknown !");
break;
}
return pc;
}
static list words_application(application a, list pdl)
{
list pc = NIL;
expression f = application_function(a);
list lexp = application_arguments(a);
bool first = true;
/* Parentheses must be added for function expression */
pc = CHAIN_SWORD(pc,"(");
pc = gen_nconc(pc, words_expression(f, pdl));
pc = CHAIN_SWORD(pc,")(");
MAP(EXPRESSION,exp,
{
if (!first)
pc = CHAIN_SWORD(pc,",");
pc = gen_nconc(pc, words_expression(exp, pdl));
first = false;
},lexp);
pc = CHAIN_SWORD(pc,")");
return pc;
}
static text text_forloop(entity module,
const char* label,
int margin,
forloop obj,
int n,
list pdl)
{
list pc = NIL;
unformatted u;
text r = make_text(NIL);
statement body = forloop_body(obj) ;
//instruction i = statement_instruction(body);
bool braces_p = !one_liner_p(body) || prettyprint_all_c_braces_p;
pc = CHAIN_SWORD(pc,"for (");
if (!expression_undefined_p(forloop_initialization(obj)))
pc = gen_nconc(pc, words_expression(forloop_initialization(obj), pdl));
pc = CHAIN_SWORD(pc,C_STATEMENT_END_STRING);
if (!expression_undefined_p(forloop_condition(obj))) {
/* To restitute for(;;) */
expression cond = forloop_condition(obj);
if(!expression_one_p(cond))
pc = gen_nconc(pc, words_expression(forloop_condition(obj), pdl));
}
pc = CHAIN_SWORD(pc,C_STATEMENT_END_STRING);
if (!expression_undefined_p(forloop_increment(obj)))
pc = gen_nconc(pc, words_expression(forloop_increment(obj), pdl));
pc = CHAIN_SWORD(pc,!braces_p?")":") {");
u = make_unformatted(strdup(label), n, margin, pc) ;
ADD_SENTENCE_TO_TEXT(r, make_sentence(is_sentence_unformatted, u));
if(!braces_p) {
MERGE_TEXTS(r, text_statement_enclosed(module,
margin+INDENTATION,
body,
!one_liner_p(body),
!one_liner_p(body),
pdl));
}
else {
// ADD_SENTENCE_TO_TEXT(r, MAKE_ONE_WORD_SENTENCE(margin,"{"));
MERGE_TEXTS(r, text_statement(module, margin+INDENTATION, body, pdl));
ADD_SENTENCE_TO_TEXT(r, MAKE_ONE_WORD_SENTENCE(margin,"}"));
}
return r;
}
|
transpose.h | // This code is part of the Problem Based Benchmark Suite (PBBS)
// Copyright (c) 2011-2016 Guy Blelloch and the PBBS team
//
// Permission is hereby granted, free of charge, to any person obtaining a
// copy of this software and associated documentation files (the
// "Software"), to deal in the Software without restriction, including
// without limitation the rights (to use, copy, modify, merge, publish,
// distribute, sublicense, and/or sell copies of the Software, and to
// permit persons to whom the Software is furnished to do so, subject to
// the following conditions:
//
// The above copyright notice and this permission notice shall be included
// in all copies or substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
// OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
// MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
// NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
// LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
// OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
// WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#pragma once
#include "get_time.h"
#include "utilities.h"
namespace pbbs {
constexpr const size_t TRANS_THRESHHOLD = PAR_GRANULARITY / 4;
inline size_t split(size_t n) {
return n / 2;
// return ((((size_t) 1) << log2_up(n) != n) ? n/2 : (7*(n+1))/16);
}
template <class E>
struct transpose {
E *A, *B;
transpose(E *AA, E *BB) : A(AA), B(BB) {}
void transR(size_t rStart, size_t rCount, size_t rLength, size_t cStart,
size_t cCount, size_t cLength) {
if (cCount * rCount < TRANS_THRESHHOLD) {
for (size_t i = rStart; i < rStart + rCount; i++)
for (size_t j = cStart; j < cStart + cCount; j++)
B[j * cLength + i] = A[i * rLength + j];
} else if (cCount > rCount) {
size_t l1 = split(cCount);
size_t l2 = cCount - l1;
auto left = [&]() {
transR(rStart, rCount, rLength, cStart, l1, cLength);
};
auto right = [&]() {
transR(rStart, rCount, rLength, cStart + l1, l2, cLength);
};
par_do(left, right);
} else {
size_t l1 = split(cCount);
size_t l2 = rCount - l1;
auto left = [&]() {
transR(rStart, l1, rLength, cStart, cCount, cLength);
};
auto right = [&]() {
transR(rStart + l1, l2, rLength, cStart, cCount, cLength);
};
par_do(left, right);
}
}
void trans(size_t rCount, size_t cCount) {
#if defined(OPENMP)
#pragma omp parallel
#pragma omp single
#endif
transR(0, rCount, cCount, 0, cCount, rCount);
}
};
template <class E, class int_t>
struct blockTrans {
E *A, *B;
int_t *OA, *OB;
blockTrans(E *AA, E *BB, int_t *OOA, int_t *OOB)
: A(AA), B(BB), OA(OOA), OB(OOB) {}
void transR(size_t rStart, size_t rCount, size_t rLength, size_t cStart,
size_t cCount, size_t cLength) {
if (cCount * rCount < TRANS_THRESHHOLD * 16) {
parallel_for(rStart, rStart + rCount, [&](size_t i) {
for (size_t j = cStart; j < cStart + cCount; j++) {
size_t sa = OA[i * rLength + j];
size_t sb = OB[j * cLength + i];
size_t l = OA[i * rLength + j + 1] - sa;
for (size_t k = 0; k < l; k++)
move_uninitialized(B[k + sb], A[k + sa]);
}
});
} else if (cCount > rCount) {
size_t l1 = split(cCount);
size_t l2 = cCount - l1;
auto left = [&]() {
transR(rStart, rCount, rLength, cStart, l1, cLength);
};
auto right = [&]() {
transR(rStart, rCount, rLength, cStart + l1, l2, cLength);
};
par_do(left, right);
} else {
size_t l1 = split(cCount);
size_t l2 = rCount - l1;
auto left = [&]() {
transR(rStart, l1, rLength, cStart, cCount, cLength);
};
auto right = [&]() {
transR(rStart + l1, l2, rLength, cStart, cCount, cLength);
};
par_do(left, right);
}
}
void trans(size_t rCount, size_t cCount) {
#if defined(OPENMP)
#pragma omp parallel
#pragma omp single
#endif
transR(0, rCount, cCount, 0, cCount, rCount);
}
};
// Moves values from blocks to buckets
// From is sorted by key within each block, in block major
// counts is the # of keys in each bucket for each block, in block major
// From and To are of lenght n
// counts is of length num_blocks * num_buckets
// Data is memcpy'd into To avoiding initializers and overloaded =
template <typename E, typename s_size_t>
size_t *transpose_buckets(E *From, E *To, s_size_t *counts, size_t n,
size_t block_size, size_t num_blocks,
size_t num_buckets) {
timer t("transpose", false);
size_t m = num_buckets * num_blocks;
sequence<s_size_t> dest_offsets; //(m);
auto add = addm<s_size_t>();
// cout << "ss 8" << endl;
// for smaller input do non-cache oblivious version
if (n < (1 << 22) || num_buckets <= 512 || num_blocks <= 512) {
size_t block_bits = log2_up(num_blocks);
size_t block_mask = num_blocks - 1;
if ((size_t)1 << block_bits != num_blocks) {
cout << "in transpose_buckets: num_blocks must be a power or 2" << endl;
abort();
}
// determine the destination offsets
auto get = [&](size_t i) {
return counts[(i >> block_bits) + num_buckets * (i & block_mask)];
};
// slow down?
dest_offsets = sequence<s_size_t>(m, get);
size_t sum = scan_inplace(dest_offsets.slice(), add);
if (sum != n) abort();
t.next("seq and scan");
// send each key to correct location within its bucket
auto f = [&](size_t i) {
size_t s_offset = i * block_size;
for (size_t j = 0; j < num_buckets; j++) {
size_t d_offset = dest_offsets[i + num_blocks * j];
size_t len = counts[i * num_buckets + j];
for (size_t k = 0; k < len; k++)
move_uninitialized(To[d_offset++], From[s_offset++]);
}
};
parallel_for(0, num_blocks, f, 1);
t.next("trans");
free_array(counts);
} else { // for larger input do cache efficient transpose
sequence<s_size_t> source_offsets(counts, m);
dest_offsets = sequence<s_size_t>(m);
size_t total;
transpose<s_size_t>(counts, dest_offsets.begin())
.trans(num_blocks, num_buckets);
t.next("trans 1");
// cout << "ss 9" << endl;
// do both scans inplace
total = scan_inplace(dest_offsets.slice(), add);
if (total != n) abort();
total = scan_inplace(source_offsets.slice(), add);
if (total != n) abort();
source_offsets[m] = n;
t.next("scans");
blockTrans<E, s_size_t>(From, To, source_offsets.begin(),
dest_offsets.begin())
.trans(num_blocks, num_buckets);
t.next("trans 2");
// cout << "ss 10" << endl;
}
size_t *bucket_offsets = new_array_no_init<size_t>(num_buckets + 1);
for (s_size_t i = 0; i < num_buckets; i++)
bucket_offsets[i] = dest_offsets[i * num_blocks];
// last element is the total size n
bucket_offsets[num_buckets] = n;
return bucket_offsets;
}
}
|
gamma_index_ivfpq.h | /**
* Copyright (c) Facebook, Inc. and its affiliates.
*
* This faiss source code is licensed under the MIT license.
* https://github.com/facebookresearch/faiss/blob/master/LICENSE
*
*
* The works below are modified based on faiss:
* 1. Replace the static batch indexing with real time indexing
* 2. Add the fine-grained sort after PQ coarse sort
* 3. Add the numeric field and bitmap filters in the process of searching
*
* Modified works copyright 2019 The Gamma Authors.
*
* The modified codes are licensed under the Apache License, Version 2.0 license
* found in the LICENSE file in the root directory of this source tree.
*
*/
#ifndef GAMMA_INDEX_IVFPQ_H_
#define GAMMA_INDEX_IVFPQ_H_
#include <unistd.h>
#include <atomic>
#include "faiss/IndexIVF.h"
#include "faiss/IndexIVFPQ.h"
#include "faiss/VectorTransform.h"
#include "faiss/IndexHNSW.h"
#include "faiss/InvertedLists.h"
#include "faiss/impl/FaissAssert.h"
#include "faiss/impl/io.h"
#include "faiss/index_io.h"
#include "faiss/utils/Heap.h"
#include "faiss/utils/distances.h"
#include "faiss/utils/hamming.h"
#include "faiss/utils/utils.h"
#include "table/field_range_index.h"
#include "common/gamma_common_data.h"
#include "gamma_index_flat.h"
#include "gamma_scanner.h"
#include "util/log.h"
#include "vector/memory_raw_vector.h"
#include "vector/raw_vector.h"
#include "realtime/realtime_invert_index.h"
#include "index/retrieval_model.h"
#include "util/utils.h"
namespace tig_gamma {
/// statistics are robust to internal threading, but not if
/// IndexIVFPQ::search_preassigned is called by multiple threads
struct IndexIVFPQStats {
size_t nrefine; // nb of refines (IVFPQR)
size_t n_hamming_pass;
// nb of passed Hamming distance tests (for polysemous)
// timings measured with the CPU RTC
// on all threads
size_t search_cycles;
size_t refine_cycles; // only for IVFPQR
IndexIVFPQStats() { reset(); }
void reset(){};
};
// global var that collects them all
extern IndexIVFPQStats indexIVFPQ_stats;
// namespace {
using idx_t = faiss::Index::idx_t;
static uint64_t get_cycles() {
#ifdef __x86_64__
uint32_t high, low;
asm volatile("rdtsc \n\t" : "=a"(low), "=d"(high));
return ((uint64_t)high << 32) | (low);
#else
return 0;
#endif
}
#define TIC t0 = get_cycles()
#define TOC get_cycles() - t0
/** QueryTables manages the various ways of searching an
* IndexIVFPQ. The code contains a lot of branches, depending on:
* - metric_type: are we computing L2 or Inner product similarity?
* - by_residual: do we encode raw vectors or residuals?
* - use_precomputed_table: are x_R|x_C tables precomputed?
* - polysemous_ht: are we filtering with polysemous codes?
*/
struct QueryTables {
/*****************************************************
* General data from the IVFPQ
*****************************************************/
const faiss::IndexIVFPQ &ivfpq;
const faiss::IVFSearchParameters *params;
// copied from IndexIVFPQ for easier access
int d;
const faiss::ProductQuantizer &pq;
faiss::MetricType metric_type;
bool by_residual;
int use_precomputed_table;
int polysemous_ht;
// pre-allocated data buffers
float *sim_table, *sim_table_2;
float *residual_vec, *decoded_vec;
// single data buffer
std::vector<float> mem;
// for table pointers
std::vector<const float *> sim_table_ptrs;
explicit QueryTables(const faiss::IndexIVFPQ &ivfpq,
const faiss::IVFSearchParameters *params,
faiss::MetricType metric_type)
: ivfpq(ivfpq),
d(ivfpq.d),
pq(ivfpq.pq),
metric_type(metric_type),
by_residual(ivfpq.by_residual),
use_precomputed_table(ivfpq.use_precomputed_table) {
mem.resize(pq.ksub * pq.M * 2 + d * 2);
sim_table = mem.data();
sim_table_2 = sim_table + pq.ksub * pq.M;
residual_vec = sim_table_2 + pq.ksub * pq.M;
decoded_vec = residual_vec + d;
// for polysemous
polysemous_ht = ivfpq.polysemous_ht;
if (auto ivfpq_params =
dynamic_cast<const faiss::IVFPQSearchParameters *>(params)) {
polysemous_ht = ivfpq_params->polysemous_ht;
}
if (polysemous_ht != 0) {
q_code.resize(pq.code_size);
}
init_list_cycles = 0;
sim_table_ptrs.resize(pq.M);
}
/*****************************************************
* What we do when query is known
*****************************************************/
// field specific to query
const float *qi;
// query-specific intialization
void init_query(const float *qi) {
this->qi = qi;
if (metric_type == faiss::METRIC_INNER_PRODUCT)
init_query_IP();
else
init_query_L2();
if (!by_residual && polysemous_ht != 0) pq.compute_code(qi, q_code.data());
}
void init_query_IP() {
// precompute some tables specific to the query qi
pq.compute_inner_prod_table(qi, sim_table);
}
void init_query_L2() {
if (!by_residual) {
pq.compute_distance_table(qi, sim_table);
} else if (use_precomputed_table) {
pq.compute_inner_prod_table(qi, sim_table_2);
}
}
/*****************************************************
* When inverted list is known: prepare computations
*****************************************************/
// fields specific to list
long key;
float coarse_dis;
std::vector<uint8_t> q_code;
uint64_t init_list_cycles;
/// once we know the query and the centroid, we can prepare the
/// sim_table that will be used for accumulation
/// and dis0, the initial value
float precompute_list_tables() {
float dis0 = 0;
uint64_t t0;
TIC;
if (by_residual) {
if (metric_type == faiss::METRIC_INNER_PRODUCT)
dis0 = precompute_list_tables_IP();
else
dis0 = precompute_list_tables_L2();
}
init_list_cycles += TOC;
return dis0;
}
float precompute_list_table_pointers() {
float dis0 = 0;
uint64_t t0;
TIC;
if (by_residual) {
if (metric_type == faiss::METRIC_INNER_PRODUCT)
FAISS_THROW_MSG("not implemented");
else
dis0 = precompute_list_table_pointers_L2();
}
init_list_cycles += TOC;
return dis0;
}
/*****************************************************
* compute tables for inner prod
*****************************************************/
float precompute_list_tables_IP() {
// prepare the sim_table that will be used for accumulation
// and dis0, the initial value
ivfpq.quantizer->reconstruct(key, decoded_vec);
// decoded_vec = centroid
float dis0 = faiss::fvec_inner_product(qi, decoded_vec, d);
if (polysemous_ht) {
for (int i = 0; i < d; i++) {
residual_vec[i] = qi[i] - decoded_vec[i];
}
pq.compute_code(residual_vec, q_code.data());
}
return dis0;
}
/*****************************************************
* compute tables for L2 distance
*****************************************************/
float precompute_list_tables_L2() {
float dis0 = 0;
if (use_precomputed_table == 0 || use_precomputed_table == -1) {
ivfpq.quantizer->compute_residual(qi, residual_vec, key);
pq.compute_distance_table(residual_vec, sim_table);
if (polysemous_ht != 0) {
pq.compute_code(residual_vec, q_code.data());
}
} else if (use_precomputed_table == 1) {
dis0 = coarse_dis;
faiss::fvec_madd(pq.M * pq.ksub,
&ivfpq.precomputed_table[key * pq.ksub * pq.M], -2.0,
sim_table_2, sim_table);
if (polysemous_ht != 0) {
ivfpq.quantizer->compute_residual(qi, residual_vec, key);
pq.compute_code(residual_vec, q_code.data());
}
} else if (use_precomputed_table == 2) {
dis0 = coarse_dis;
const faiss::MultiIndexQuantizer *miq =
dynamic_cast<const faiss::MultiIndexQuantizer *>(ivfpq.quantizer);
FAISS_THROW_IF_NOT(miq);
const faiss::ProductQuantizer &cpq = miq->pq;
int Mf = pq.M / cpq.M;
const float *qtab = sim_table_2; // query-specific table
float *ltab = sim_table; // (output) list-specific table
long k = key;
for (size_t cm = 0; cm < cpq.M; cm++) {
// compute PQ index
int ki = k & ((uint64_t(1) << cpq.nbits) - 1);
k >>= cpq.nbits;
// get corresponding table
const float *pc =
&ivfpq.precomputed_table[(ki * pq.M + cm * Mf) * pq.ksub];
if (polysemous_ht == 0) {
// sum up with query-specific table
faiss::fvec_madd(Mf * pq.ksub, pc, -2.0, qtab, ltab);
ltab += Mf * pq.ksub;
qtab += Mf * pq.ksub;
} else {
for (size_t m = cm * Mf; m < (cm + 1) * Mf; m++) {
q_code[m] =
faiss::fvec_madd_and_argmin(pq.ksub, pc, -2, qtab, ltab);
pc += pq.ksub;
ltab += pq.ksub;
qtab += pq.ksub;
}
}
}
}
return dis0;
}
float precompute_list_table_pointers_L2() {
float dis0 = 0;
if (use_precomputed_table == 1) {
dis0 = coarse_dis;
const float *s = &ivfpq.precomputed_table[key * pq.ksub * pq.M];
for (size_t m = 0; m < pq.M; m++) {
sim_table_ptrs[m] = s;
s += pq.ksub;
}
} else if (use_precomputed_table == 2) {
dis0 = coarse_dis;
const faiss::MultiIndexQuantizer *miq =
dynamic_cast<const faiss::MultiIndexQuantizer *>(ivfpq.quantizer);
FAISS_THROW_IF_NOT(miq);
const faiss::ProductQuantizer &cpq = miq->pq;
int Mf = pq.M / cpq.M;
long k = key;
int m0 = 0;
for (size_t cm = 0; cm < cpq.M; cm++) {
int ki = k & ((uint64_t(1) << cpq.nbits) - 1);
k >>= cpq.nbits;
const float *pc =
&ivfpq.precomputed_table[(ki * pq.M + cm * Mf) * pq.ksub];
for (int m = m0; m < m0 + Mf; m++) {
sim_table_ptrs[m] = pc;
pc += pq.ksub;
}
m0 += Mf;
}
} else {
FAISS_THROW_MSG("need precomputed tables");
}
if (polysemous_ht) {
FAISS_THROW_MSG("not implemented");
// Not clear that it makes sense to implemente this,
// because it costs M * ksub, which is what we wanted to
// avoid with the tables pointers.
}
return dis0;
}
};
template <class C>
struct KnnSearchResults {
idx_t key;
const idx_t *ids;
// heap params
size_t k;
float *heap_sim;
idx_t *heap_ids;
size_t nup;
inline void add(idx_t j, float dis) {
if (C::cmp(heap_sim[0], dis)) {
faiss::heap_pop<C>(k, heap_sim, heap_ids);
idx_t id = ids ? ids[j] : (key << 32 | j);
faiss::heap_push<C>(k, heap_sim, heap_ids, dis, id);
nup++;
}
}
};
/*****************************************************
* Scaning the codes.
* The scanning functions call their favorite precompute_*
* function to precompute the tables they need.
*****************************************************/
template <typename IDType, faiss::MetricType METRIC_TYPE>
struct IVFPQScannerT : QueryTables {
const uint8_t *list_codes;
const IDType *list_ids;
size_t list_size;
explicit IVFPQScannerT(const faiss::IndexIVFPQ &ivfpq,
const faiss::IVFSearchParameters *params)
: QueryTables(ivfpq, params, METRIC_TYPE) {
FAISS_THROW_IF_NOT(pq.nbits == 8);
}
float dis0;
void init_list(idx_t list_no, float coarse_dis, int mode) {
this->key = list_no;
this->coarse_dis = coarse_dis;
if (mode == 2) {
dis0 = precompute_list_tables();
} else if (mode == 1) {
dis0 = precompute_list_table_pointers();
}
}
/// tables are not precomputed, but pointers are provided to the
/// relevant X_c|x_r tables
template <class SearchResultType>
void scan_list_with_pointer(size_t ncode, const uint8_t *codes,
SearchResultType &res) const {
for (size_t j = 0; j < ncode; j++) {
float dis = dis0;
const float *tab = sim_table_2;
for (size_t m = 0; m < pq.M; m++) {
int ci = *codes++;
dis += sim_table_ptrs[m][ci] - 2 * tab[ci];
tab += pq.ksub;
}
res.add(j, dis);
}
}
/// nothing is precomputed: access residuals on-the-fly
template <class SearchResultType>
void scan_on_the_fly_dist(size_t ncode, const uint8_t *codes,
SearchResultType &res) const {
const float *dvec;
float dis0 = 0;
if (by_residual) {
if (METRIC_TYPE == faiss::METRIC_INNER_PRODUCT) {
ivfpq.quantizer->reconstruct(key, residual_vec);
dis0 = faiss::fvec_inner_product(residual_vec, qi, d);
} else {
ivfpq.quantizer->compute_residual(qi, residual_vec, key);
}
dvec = residual_vec;
} else {
dvec = qi;
dis0 = 0;
}
for (size_t j = 0; j < ncode; j++) {
pq.decode(codes, decoded_vec);
codes += pq.code_size;
float dis;
if (METRIC_TYPE == faiss::METRIC_INNER_PRODUCT) {
dis = dis0 + faiss::fvec_inner_product(decoded_vec, qi, d);
} else {
dis = faiss::fvec_L2sqr(decoded_vec, dvec, d);
}
res.add(j, dis);
}
}
/*****************************************************
* Scanning codes with polysemous filtering
*****************************************************/
template <class HammingComputer, class SearchResultType>
void scan_list_polysemous_hc(size_t ncode, const uint8_t *codes,
SearchResultType &res) const {
int ht = ivfpq.polysemous_ht;
size_t n_hamming_pass = 0;
int code_size = pq.code_size;
HammingComputer hc(q_code.data(), code_size);
for (size_t j = 0; j < ncode; j++) {
const uint8_t *b_code = codes;
int hd = hc.hamming(b_code);
if (hd < ht) {
n_hamming_pass++;
float dis = dis0;
const float *tab = sim_table;
for (size_t m = 0; m < pq.M; m++) {
dis += tab[*b_code++];
tab += pq.ksub;
}
res.add(j, dis);
}
codes += code_size;
}
#pragma omp critical
{ indexIVFPQ_stats.n_hamming_pass += n_hamming_pass; }
}
template <class SearchResultType>
void scan_list_polysemous(size_t ncode, const uint8_t *codes,
SearchResultType &res) const {
switch (pq.code_size) {
#define HANDLE_CODE_SIZE(cs) \
case cs: \
scan_list_polysemous_hc<faiss::HammingComputer##cs, SearchResultType>( \
ncode, codes, res); \
break
HANDLE_CODE_SIZE(4);
HANDLE_CODE_SIZE(8);
HANDLE_CODE_SIZE(16);
HANDLE_CODE_SIZE(20);
HANDLE_CODE_SIZE(32);
HANDLE_CODE_SIZE(64);
#undef HANDLE_CODE_SIZE
default:
if (pq.code_size % 8 == 0)
scan_list_polysemous_hc<faiss::HammingComputerM8, SearchResultType>(
ncode, codes, res);
else
scan_list_polysemous_hc<faiss::HammingComputerM4, SearchResultType>(
ncode, codes, res);
break;
}
}
};
/* struct GammaInvertedListScanner : faiss::InvertedListScanner { */
/* GammaInvertedListScanner() { retrieval_context_ = nullptr; } */
/* virtual size_t scan_codes_pointer(size_t ncode, const uint8_t **codes, */
/* const idx_t *ids, float *heap_sim, */
/* idx_t *heap_ids, size_t k) = 0; */
/* void set_search_context(RetrievalContext *retrieval_context) { */
/* this->retrieval_context_ = retrieval_context; */
/* } */
/* RetrievalContext *retrieval_context_; */
/* }; */
template <faiss::MetricType metric, class C>
struct GammaIVFFlatScanner : GammaInvertedListScanner {
size_t d;
GammaIVFFlatScanner(size_t d) : d(d) {}
const float *xi;
void set_query(const float *query) override { this->xi = query; }
idx_t list_no;
void set_list(idx_t list_no, float /* coarse_dis */) override {
this->list_no = list_no;
}
float distance_to_code(const uint8_t *code) const override {
const float *yj = (float *)code;
float dis = metric == faiss::METRIC_INNER_PRODUCT
? faiss::fvec_inner_product(xi, yj, d)
: faiss::fvec_L2sqr(xi, yj, d);
return dis;
}
inline size_t scan_codes(size_t list_size, const uint8_t *codes,
const idx_t *ids, float *simi, idx_t *idxi,
size_t k) const override {
RawVector *raw_vec = (RawVector *)codes;
size_t nup = 0;
for (size_t j = 0; j < list_size; j++) {
if (ids[j] & realtime::kDelIdxMask) continue;
idx_t vid = ids[j] & realtime::kRecoverIdxMask;
if (vid < 0) continue;
if (retrieval_context_->IsValid(vid) == false) continue;
ScopeVector svec;
raw_vec->GetVector(vid, svec);
const float *yj = (const float *)svec.Get();
float dis = metric == faiss::METRIC_INNER_PRODUCT
? faiss::fvec_inner_product(xi, yj, d)
: faiss::fvec_L2sqr(xi, yj, d);
if (retrieval_context_->IsSimilarScoreValid(dis) && C::cmp(simi[0], dis)) {
faiss::heap_pop<C>(k, simi, idxi);
faiss::heap_push<C>(k, simi, idxi, dis, vid);
nup++;
}
}
return nup;
}
size_t scan_codes_pointer(size_t ncode, const uint8_t **codes,
const idx_t *ids, float *heap_sim, idx_t *heap_ids,
size_t k) {
return 0;
}
};
class IVFPQRetrievalParameters : public RetrievalParameters {
public:
IVFPQRetrievalParameters() : RetrievalParameters() {
parallel_on_queries_ = true;
recall_num_ = 100;
nprobe_ = 80;
ivf_flat_ = false;
}
IVFPQRetrievalParameters(bool parallel_on_queries, int recall_num, int nprobe,
enum DistanceComputeType type, bool ivf_flat) {
parallel_on_queries_ = parallel_on_queries;
recall_num_ = recall_num;
nprobe_ = nprobe;
ivf_flat_ = ivf_flat;
distance_compute_type_ = type;
}
IVFPQRetrievalParameters(enum DistanceComputeType type) {
parallel_on_queries_ = true;
recall_num_ = 100;
nprobe_ = 80;
ivf_flat_ = false;
distance_compute_type_ = type;
}
virtual ~IVFPQRetrievalParameters() {}
int RecallNum() { return recall_num_; }
void SetRecallNum(int recall_num) { recall_num_ = recall_num; }
int Nprobe() { return nprobe_; }
void SetNprobe(int nprobe) { nprobe_ = nprobe; }
bool ParallelOnQueries() { return parallel_on_queries_; }
void SetParallelOnQueries(bool parallel_on_queries) {
parallel_on_queries_ = parallel_on_queries;
}
bool IvfFlat() { return ivf_flat_; }
void SetIvfFlat(bool ivf_flat) { ivf_flat_ = ivf_flat; }
protected:
// parallelize over queries or ivf lists
bool parallel_on_queries_;
int recall_num_;
int nprobe_;
bool ivf_flat_;
};
struct IVFPQModelParams;
struct GammaIVFPQIndex : GammaFLATIndex, faiss::IndexIVFPQ {
GammaIVFPQIndex();
virtual ~GammaIVFPQIndex();
faiss::InvertedListScanner *get_InvertedListScanner(
bool store_pairs, faiss::MetricType metric_type);
GammaInvertedListScanner *GetGammaIVFFlatScanner(
size_t d, faiss::MetricType metric_type);
GammaInvertedListScanner *GetGammaInvertedListScanner(
bool store_pairs, faiss::MetricType metric_type);
int Init(const std::string &model_parameters, int indexing_size) override;
RetrievalParameters *Parse(const std::string ¶meters) override;
int Indexing() override;
bool Add(int n, const uint8_t *vec);
int Update(const std::vector<int64_t> &ids,
const std::vector<const uint8_t *> &vecs);
// assign the vectors, then call search_preassign
int Search(RetrievalContext *retrieval_context, int n, const uint8_t *x,
int k, float *distances, idx_t *labels);
void search_preassigned(RetrievalContext *retrieval_context, int n,
const float *x, const float *applied_x, int k, const idx_t *keys,
const float *coarse_dis, float *distances,
idx_t *labels, int nprobe, bool store_pairs,
const faiss::IVFSearchParameters *params = nullptr);
void search_ivf_flat(RetrievalContext *retrieval_context, int n,
const float *x, int k, const idx_t *keys,
const float *coarse_dis, float *distances, idx_t *labels,
int nprobe, bool store_pairs,
const faiss::IVFSearchParameters *params = nullptr);
long GetTotalMemBytes() override {
if (!rt_invert_index_ptr_) {
return 0;
}
return rt_invert_index_ptr_->GetTotalMemBytes();
}
int Dump(const std::string &dir) override;
int Load(const std::string &index_dir) override;
virtual void copy_subset_to(faiss::IndexIVF &other, int subset_type, idx_t a1,
idx_t a2) const;
int Delete(const std::vector<int64_t> &ids);
int indexed_vec_count_;
realtime::RTInvertIndex *rt_invert_index_ptr_;
bool compaction_;
size_t compact_bucket_no_;
uint64_t compacted_num_;
uint64_t updated_num_;
int d_;
DistanceComputeType metric_type_;
faiss::VectorTransform *opq_;
// 0 is FlatL2, 1 is HNSWFlat
int quantizer_type_;
#ifdef PERFORMANCE_TESTING
std::atomic<uint64_t> search_count_;
int add_count_;
#endif
IVFPQModelParams *model_param_;
};
template <faiss::MetricType METRIC_TYPE, class C, int precompute_mode>
struct GammaIVFPQScanner : IVFPQScannerT<idx_t, METRIC_TYPE>,
GammaInvertedListScanner {
const GammaIVFPQIndex &gamma_ivfpq_;
bool store_pairs_;
GammaIVFPQScanner(const GammaIVFPQIndex &gamma_ivfpq, bool store_pairs)
: IVFPQScannerT<idx_t, METRIC_TYPE>(gamma_ivfpq, nullptr),
gamma_ivfpq_(gamma_ivfpq) {
store_pairs_ = store_pairs;
}
template <class SearchResultType>
void scan_list_with_table(size_t ncode, const uint8_t *codes,
SearchResultType &res) const {
size_t j = 0;
for (; j < ncode; j++) {
if (res.ids[j] & realtime::kDelIdxMask) {
codes += this->pq.M;
continue;
}
if (!retrieval_context_->IsValid(res.ids[j] &
realtime::kRecoverIdxMask)) {
codes += this->pq.M;
continue;
}
float dis = this->dis0;
const float *tab = this->sim_table;
for (size_t m = 0; m < this->pq.M; m++) {
dis += tab[*codes++];
tab += this->pq.ksub;
}
res.add(j, dis);
}
assert(j == ncode);
}
inline void set_query(const float *query) override {
this->init_query(query);
}
inline void set_list(idx_t list_no, float coarse_dis) override {
this->init_list(list_no, coarse_dis, precompute_mode);
}
inline float distance_to_code(const uint8_t *code) const override {
assert(precompute_mode == 2);
float dis = this->dis0;
const float *tab = this->sim_table;
for (size_t m = 0; m < this->pq.M; m++) {
dis += tab[*code++];
tab += this->pq.ksub;
}
return dis;
}
inline size_t scan_codes(size_t ncode, const uint8_t *codes, const idx_t *ids,
float *heap_sim, idx_t *heap_ids,
size_t k) const override {
KnnSearchResults<C> res = {/* key */ this->key,
/* ids */ this->store_pairs_ ? nullptr : ids,
/* k */ k,
/* heap_sim */ heap_sim,
/* heap_ids */ heap_ids,
/* nup */ 0};
if (this->polysemous_ht > 0) {
assert(precompute_mode == 2);
this->scan_list_polysemous(ncode, codes, res);
} else if (precompute_mode == 2) {
this->scan_list_with_table(ncode, codes, res);
} else if (precompute_mode == 1) {
this->scan_list_with_pointer(ncode, codes, res);
} else if (precompute_mode == 0) {
this->scan_on_the_fly_dist(ncode, codes, res);
} else {
FAISS_THROW_MSG("bad precomp mode");
}
return 0;
}
inline size_t scan_codes_pointer(size_t ncode, const uint8_t **codes,
const idx_t *ids, float *heap_sim,
idx_t *heap_ids, size_t k) {
KnnSearchResults<C> res = {/* key */ this->key,
/* ids */ this->store_pairs_ ? nullptr : ids,
/* k */ k,
/* heap_sim */ heap_sim,
/* heap_ids */ heap_ids,
/* nup */ 0};
if (precompute_mode == 2) {
this->scan_list_with_table(ncode, codes, res);
} else {
FAISS_THROW_MSG("bad precomp mode");
}
return 0;
}
};
} // namespace tig_gamma
#endif
|
morn_image_resize.c | /*
Copyright (C) 2019-2020 JingWeiZhangHuai <jingweizhanghuai@163.com>
Licensed under the Apache License, Version 2.0; you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License.
*/
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <math.h>
#include "morn_image.h"
struct HandleBinaryImageResize
{
int height;
int width;
int type;
int *lx;
int *ly;
};
#define HASH_BinaryImageResize 0x3b7f3813
void endBinaryImageResize(void *info)
{
struct HandleBinaryImageResize *handle = (struct HandleBinaryImageResize *)info;
if(handle->lx != NULL) mFree(handle->lx);
if(handle->ly != NULL) mFree(handle->ly);
}
void mBinaryImageResize(MImage *src,MImage *dst,int height,int width,int type)
{
mException(INVALID_IMAGE(src),EXIT,"invalid input");
type = type|0xFC;
MImage *p=dst;
if(INVALID_POINTER(dst)||(dst==src))
{
if((height>0)&&(width>0));
else if((height<=0)&&(type != MORN_RESIZE_UNUNIFORM))
height = (src->height)*width/(src->width);
else if((width<=0)&&(type != MORN_RESIZE_UNUNIFORM))
width = (src->width)*height/(src->height);
else
mException(1,EXIT,"invalid input");
dst = mImageCreate(src->channel,height,width,NULL);
}
else
{
if(height <= 0) height = dst->height;
if(width <= 0) width = dst->width;
if((height>0)&&(width>0));
else if((height<=0)&&(type != MORN_RESIZE_UNUNIFORM))
height = (src->height)*width/(src->width);
else if((width<=0)&&(type != MORN_RESIZE_UNUNIFORM))
width = (src->width)*height/(src->height);
else
mException(1,EXIT,"invalid input");
mImageRedefine(dst,src->channel,height,width,dst->data);
}
MHandle *hdl=mHandle(src,BinaryImageResize);
struct HandleBinaryImageResize *handle = (struct HandleBinaryImageResize *)hdl->handle;
if((hdl->valid == 0)||(handle->height != height)||(handle->width != width)||(handle->type != type))
{
float kx = ((float)(src->width ))/((float)width );
float ky = ((float)(src->height))/((float)height);
if(type == MORN_RESIZE_MINUNIFORM) {kx = MIN(kx,ky); ky = kx;}
else if(type == MORN_RESIZE_MAXUNIFORM) {kx = MAX(kx,ky); ky = kx;}
float scx = ((float)(src->width))/2.0f; float scy = ((float)(src->height))/2.0f;
float dcx = ((float) width )/2.0f; float dcy = ((float) height )/2.0f;
handle->type = type;
if(handle->width <width)
{
if(handle->lx != NULL) mFree(handle->lx);
handle->lx = (int *)mMalloc(width * sizeof(int));
}
handle->width = width;
if(handle->height < height)
{
if(handle->ly != NULL) mFree(handle->ly);
handle->ly = (int *)mMalloc(height * sizeof(int));
}
handle->height = height;
for(int i=0;i<width;i++)
{
float x = ((float)i-dcx)*kx+scx;
handle->lx[i] = (int)(x+0.5);
}
for(int j=0;j<height;j++)
{
float y = ((float)j-dcy)*ky+scy;
handle->ly[j] = (int)(y+0.5);
}
}
int *lx = handle->lx;
int *ly = handle->ly;
int j;
#pragma omp parallel for
for(j=0;j<height;j++)
for(int i=0;i<width;i++)
for(int cn=0;cn<src->channel;cn++)
dst->data[cn][j][i] =src->data[cn][ly[j]][lx[i]];
// memcpy(&(dst->info),&(src->info),sizeof(MInfo));
if(p!=dst)
{
mImageExchange(src,dst);
mImageRelease(dst);
}
hdl->valid = 1;
}
struct HandleImageResize
{
int height;
int width;
int type;
int *lx;
unsigned char *wx;
int *ly;
unsigned char *wy;
};
#define HASH_ImageResize 0x56db84c
void endImageResize(void *info)
{
struct HandleImageResize *handle = (struct HandleImageResize *)info;
if(handle->lx != NULL) mFree(handle->lx);
if(handle->wx != NULL) mFree(handle->wx);
if(handle->ly != NULL) mFree(handle->ly);
if(handle->wy != NULL) mFree(handle->wy);
}
void m_ImageResize(MImage *src,MImage *dst,int height,int width,int type)
{
mException(INVALID_IMAGE(src),EXIT,"invalid input");
if((type|MORN_NEAREST)==MORN_NEAREST) {mBinaryImageResize(src,dst,height,width,type);return;}
MImage *p=dst;
if(INVALID_POINTER(dst)||(dst==src))
{
if((height>0)&&(width>0));
else if((height<=0)&&(type != MORN_RESIZE_UNUNIFORM))
height = (src->height)*width/(src->width);
else if((width<=0)&&(type != MORN_RESIZE_UNUNIFORM))
width = (src->width)*height/(src->height);
else
mException(1,EXIT,"invalid input");
dst = mImageCreate(src->channel,height,width,NULL);
}
else
{
if(height <= 0) height = dst->height;
if(width <= 0) width = dst->width;
if((height>0)&&(width>0));
else if((height<=0)&&(type != MORN_RESIZE_UNUNIFORM))
height = (src->height)*width/(src->width);
else if((width<=0)&&(type != MORN_RESIZE_UNUNIFORM))
width = (src->width)*height/(src->height);
else
mException(1,EXIT,"invalid input");
mImageRedefine(dst,src->channel,height,width,dst->data);
}
MHandle *hdl=mHandle(src,ImageResize);
struct HandleImageResize *handle = (struct HandleImageResize *)(hdl->handle);
if((hdl->valid == 0)||(handle->height != height)||(handle->width != width)||(handle->type != type))
{
float kx = ((float)(src->width ))/((float)width );
float ky = ((float)(src->height))/((float)height);
if(type == MORN_RESIZE_MINUNIFORM) {kx = MIN(kx,ky); ky = kx;}
else if(type == MORN_RESIZE_MAXUNIFORM) {kx = MAX(kx,ky); ky = kx;}
float scx = ((float)(src->width))/2.0f; float scy = ((float)(src->height))/2.0f;
float dcx = ((float) width )/2.0f; float dcy = ((float) height )/2.0f;
handle->type = type;
if(handle->width <width)
{
if(handle->lx != NULL) mFree(handle->lx);
if(handle->wx != NULL) mFree(handle->wx);
handle->lx = ( int *)mMalloc(width * sizeof(int));
handle->wx = (unsigned char *)mMalloc(width * sizeof(unsigned char));
}
handle->width = width;
if(handle->height < height)
{
if(handle->ly != NULL) mFree(handle->ly);
if(handle->wy != NULL) mFree(handle->wy);
handle->ly = ( int *)mMalloc(height * sizeof(int));
handle->wy = (unsigned char *)mMalloc(height * sizeof(unsigned char));
}
handle->height = height;
for(int i=0;i<width;i++)
{
float x = ((float)i-dcx)*kx+scx;
handle->lx[i] = floor(x);
handle->wx[i] = 128 - (unsigned char)((x-(float)(handle->lx[i]))*128.0f);
}
for(int j=0;j<height;j++)
{
float y = ((float)j-dcy)*ky+scy;
handle->ly[j] = floor(y);
handle->wy[j] = 128 - (unsigned char)((y-(float)(handle->ly[j]))*128.0f);
}
}
int *lx = handle->lx; unsigned char *wx = handle->wx;
int *ly = handle->ly; unsigned char *wy = handle->wy;
int j;
#pragma omp parallel for
// for(j=0;j<height;j++)for(int i=0;i<width;i++)
for(j=ImageY1(dst);j<ImageY2(dst);j++)for(int i=ImageX1(dst,j);i<ImageX2(dst,j);i++)
{
if((lx[i]<0)||(lx[i]>src->width-1)||(ly[j]<0)||(ly[j]>src->height-1))
{
for(int cn=0;cn<src->channel;cn++)
dst->data[cn][j][i] = 0.0f;
}
else
{
int x1 = lx[i];int x2 = x1+1;
int y1 = ly[j];int y2 = y1+1;
unsigned char wx1 = wx[i];unsigned char wx2 = 128-wx1;
unsigned char wy1 = wy[j];unsigned char wy2 = 128-wy1;
for(int cn=0;cn<src->channel;cn++)
{
dst->data[cn][j][i] =((src->data[cn][y1][x1]*wx1+src->data[cn][y1][x2]*wx2)*wy1
+(src->data[cn][y2][x1]*wx1+src->data[cn][y2][x2]*wx2)*wy2)/16384;
}
}
}
// memcpy(&(dst->info),&(src->info),sizeof(MInfo));
if(p!=dst)
{
mImageExchange(src,dst);
mImageRelease(dst);
}
hdl->valid = 1;
}
|
ImageGraphUtils.h | #ifndef CAPTURE3_IMAGE_GRAPH_UTILS_H
#define CAPTURE3_IMAGE_GRAPH_UTILS_H
#include <cmath>
#include <vector>
#include <omp.h>
#include "BoundingBoxUtils.h"
#include "../engine/objects/image/ImageChannel.h"
#include "../engine/objects/image/ImageSize.h"
namespace Capture3
{
static void generateGraph(
const ImageSize &imageSize,
const ImageChannel &imageRGB,
const ImageChannel &imageHSB,
const ImageChannel &imageXYZ,
const ImageChannel &imageXYV,
const ImageChannel &imageLAB,
std::vector<float> &positionsRGB,
std::vector<float> &positionsHSB,
std::vector<float> &positionsXYZ,
std::vector<float> &positionsXYV,
std::vector<float> &positionsLAB,
std::vector<float> &colors,
unsigned int &countPoints,
unsigned int &countLines
)
{
// We want to sample a max of 250000 points
const unsigned int outputMax = 250000;
const unsigned int outputArea = imageSize.getArea();
const unsigned int outputSize = std::min(outputArea, outputMax);
const double outputScale = outputArea / (double) outputSize;
const unsigned int outputBytes = (outputSize + 24) * 3;
// Resize vectors to fit points and bounding box
positionsRGB.resize(outputBytes);
positionsHSB.resize(outputBytes);
positionsXYZ.resize(outputBytes);
positionsXYV.resize(outputBytes);
positionsLAB.resize(outputBytes);
colors.resize(outputBytes);
// Fetch data pointers
const double *dataRGB = imageRGB.getData();
const double *dataHSB = imageHSB.getData();
const double *dataXYZ = imageXYZ.getData();
const double *dataXYV = imageXYV.getData();
const double *dataLAB = imageLAB.getData();
#pragma omp parallel for schedule(static)
for (unsigned int i = 0; i < outputSize; i++) {
// Calculate indexes
const auto index = (unsigned int) lround(i * outputScale);
const unsigned int indexInput = index * 3;
const unsigned int indexOutput = i * 3;
// Colors
colors[indexOutput + 0] = (float) dataRGB[indexInput + 0];
colors[indexOutput + 1] = (float) dataRGB[indexInput + 1];
colors[indexOutput + 2] = (float) dataRGB[indexInput + 2];
// RGB
positionsRGB[indexOutput + 0] = (float) (dataRGB[indexInput + 1] - 0.5);
positionsRGB[indexOutput + 1] = (float) (dataRGB[indexInput + 2] - 0.5);
positionsRGB[indexOutput + 2] = (float) (dataRGB[indexInput + 0] - 0.5);
// HSB:
const double radians = dataHSB[indexInput + 0] * M_PI * 2.0;
const double radius = (dataHSB[indexInput + 1] * dataHSB[indexInput + 2]) / 2.0;
positionsHSB[indexOutput + 0] = (float) (radius * std::cos(radians));
positionsHSB[indexOutput + 1] = (float) (radius * std::sin(radians));
positionsHSB[indexOutput + 2] = (float) (dataHSB[indexInput + 2] - 0.5);
// XYZ:
positionsXYZ[indexOutput + 0] = (float) (dataXYZ[indexInput + 2] - 0.5);
positionsXYZ[indexOutput + 1] = (float) (dataXYZ[indexInput + 0] - 0.5);
positionsXYZ[indexOutput + 2] = (float) (dataXYZ[indexInput + 1] - 0.5);
// XYV:
positionsXYV[indexOutput + 0] = (float) (dataXYV[indexInput + 1] - 0.5);
positionsXYV[indexOutput + 1] = (float) (dataXYV[indexInput + 0] - 0.5);
positionsXYV[indexOutput + 2] = (float) (dataXYV[indexInput + 2] - 0.5);
// LAB:
positionsLAB[indexOutput + 0] = (float) (dataLAB[indexInput + 1] - 0.5);
positionsLAB[indexOutput + 1] = (float) (dataLAB[indexInput + 2] - 0.5);
positionsLAB[indexOutput + 2] = (float) (dataLAB[indexInput + 0] - 0.5);
}
// Add bounding box
boundingBox(
outputSize * 3,
positionsRGB,
positionsHSB,
positionsXYZ,
positionsXYV,
positionsLAB,
colors
);
// Store counts
countPoints = outputSize;
countLines = 24;
}
}
#endif // CAPTURE3_IMAGE_GRAPH_UTILS_H
|
Compiler.c |
// this is autogenerated file, do not edit it.
#include "ficus/ficus.h"
struct _fx_Nt6option1N10Ast__exp_t_data_t;
static void _fx_free_Nt6option1N10Ast__exp_t(struct _fx_Nt6option1N10Ast__exp_t_data_t** dst);
struct _fx_Nt6option1N10Ast__typ_t_data_t;
static void _fx_free_Nt6option1N10Ast__typ_t(struct _fx_Nt6option1N10Ast__typ_t_data_t** dst);
struct _fx_Nt9Dynvec__t1N14Ast__id_info_t_data_t;
static void _fx_free_Nt9Dynvec__t1N14Ast__id_info_t(struct _fx_Nt9Dynvec__t1N14Ast__id_info_t_data_t** dst);
struct _fx_Nt11Map__tree_t2R9Ast__id_tLN16Ast__env_entry_t_data_t;
static void _fx_free_Nt11Map__tree_t2R9Ast__id_tLN16Ast__env_entry_t(
struct _fx_Nt11Map__tree_t2R9Ast__id_tLN16Ast__env_entry_t_data_t** dst);
struct _fx_N10Ast__typ_t_data_t;
static void _fx_free_N10Ast__typ_t(struct _fx_N10Ast__typ_t_data_t** dst);
struct _fx_N13Ast__binary_t_data_t;
static void _fx_free_N13Ast__binary_t(struct _fx_N13Ast__binary_t_data_t** dst);
struct _fx_N10Ast__exp_t_data_t;
static void _fx_free_N10Ast__exp_t(struct _fx_N10Ast__exp_t_data_t** dst);
struct _fx_N10Ast__pat_t_data_t;
static void _fx_free_N10Ast__pat_t(struct _fx_N10Ast__pat_t_data_t** dst);
struct _fx_N16Ast__env_entry_t_data_t;
static void _fx_free_N16Ast__env_entry_t(struct _fx_N16Ast__env_entry_t_data_t** dst);
struct _fx_N16Ast__defmodule_t_data_t;
static void _fx_free_N16Ast__defmodule_t(struct _fx_N16Ast__defmodule_t_data_t** dst);
struct _fx_N14K_form__ktyp_t_data_t;
static void _fx_free_N14K_form__ktyp_t(struct _fx_N14K_form__ktyp_t_data_t** dst);
struct _fx_N14K_form__kexp_t_data_t;
static void _fx_free_N14K_form__kexp_t(struct _fx_N14K_form__kexp_t_data_t** dst);
struct _fx_N14C_form__ctyp_t_data_t;
static void _fx_free_N14C_form__ctyp_t(struct _fx_N14C_form__ctyp_t_data_t** dst);
struct _fx_N14C_form__cexp_t_data_t;
static void _fx_free_N14C_form__cexp_t(struct _fx_N14C_form__cexp_t_data_t** dst);
struct _fx_N15C_form__cstmt_t_data_t;
static void _fx_free_N15C_form__cstmt_t(struct _fx_N15C_form__cstmt_t_data_t** dst);
typedef struct _fx_LS_data_t {
int_ rc;
struct _fx_LS_data_t* tl;
fx_str_t hd;
} _fx_LS_data_t, *_fx_LS;
typedef struct _fx_R7File__t {
fx_cptr_t handle;
} _fx_R7File__t;
typedef struct _fx_FPS1B {
int (*fp)(bool, fx_str_t*, void*);
fx_fcv_t* fcv;
} _fx_FPS1B;
typedef struct _fx_N17Options__optval_t {
int tag;
union {
bool OptBool;
int_ OptInt;
fx_str_t OptString;
} u;
} _fx_N17Options__optval_t;
typedef struct _fx_T2SN17Options__optval_t {
fx_str_t t0;
struct _fx_N17Options__optval_t t1;
} _fx_T2SN17Options__optval_t;
typedef struct _fx_LT2SN17Options__optval_t_data_t {
int_ rc;
struct _fx_LT2SN17Options__optval_t_data_t* tl;
struct _fx_T2SN17Options__optval_t hd;
} _fx_LT2SN17Options__optval_t_data_t, *_fx_LT2SN17Options__optval_t;
typedef struct _fx_R18Options__options_t {
struct _fx_LS_data_t* app_args;
fx_str_t app_filename;
bool arch64;
bool force_rebuild;
fx_str_t build_dir;
fx_str_t build_rootdir;
fx_str_t cflags;
fx_str_t clibs;
bool compile_by_cpp;
fx_str_t filename;
bool gen_c;
struct _fx_LS_data_t* include_path;
bool debug;
struct _fx_LT2SN17Options__optval_t_data_t* defines;
int_ optim_iters;
int_ inline_thresh;
bool enable_openmp;
bool relax;
bool use_preamble;
bool make_app;
int_ optimize_level;
fx_str_t output_name;
bool print_ast0;
bool print_ast;
bool print_k0;
bool print_k;
bool print_tokens;
bool run_app;
bool verbose;
bool W_unused;
} _fx_R18Options__options_t;
typedef struct _fx_Ta2i {
int_ t0;
int_ t1;
} _fx_Ta2i;
typedef struct _fx_T2Ta2iS {
struct _fx_Ta2i t0;
fx_str_t t1;
} _fx_T2Ta2iS;
typedef struct _fx_R9Ast__id_t {
int_ m;
int_ i;
int_ j;
} _fx_R9Ast__id_t;
typedef struct _fx_Nt6option1R9Ast__id_t {
int tag;
union {
struct _fx_R9Ast__id_t Some;
} u;
} _fx_Nt6option1R9Ast__id_t;
typedef struct _fx_Nt6option1N10Ast__exp_t_data_t {
int_ rc;
union {
struct _fx_N10Ast__exp_t_data_t* Some;
} u;
} _fx_Nt6option1N10Ast__exp_t_data_t, *_fx_Nt6option1N10Ast__exp_t;
typedef struct _fx_Nt6option1N10Ast__typ_t_data_t {
int_ rc;
union {
struct _fx_N10Ast__typ_t_data_t* Some;
} u;
} _fx_Nt6option1N10Ast__typ_t_data_t, *_fx_Nt6option1N10Ast__typ_t;
typedef struct _fx_R10Ast__loc_t {
int_ m_idx;
int_ line0;
int_ col0;
int_ line1;
int_ col1;
} _fx_R10Ast__loc_t;
typedef struct _fx_T2R9Ast__id_ti {
struct _fx_R9Ast__id_t t0;
int_ t1;
} _fx_T2R9Ast__id_ti;
typedef struct _fx_T2Bi {
bool t0;
int_ t1;
} _fx_T2Bi;
typedef struct _fx_N12Ast__scope_t {
int tag;
union {
int_ ScBlock;
struct _fx_T2Bi ScLoop;
int_ ScFold;
int_ ScArrMap;
int_ ScMap;
int_ ScTry;
struct _fx_R9Ast__id_t ScFun;
struct _fx_R9Ast__id_t ScClass;
struct _fx_R9Ast__id_t ScInterface;
int_ ScModule;
} u;
} _fx_N12Ast__scope_t;
typedef struct _fx_LN12Ast__scope_t_data_t {
int_ rc;
struct _fx_LN12Ast__scope_t_data_t* tl;
struct _fx_N12Ast__scope_t hd;
} _fx_LN12Ast__scope_t_data_t, *_fx_LN12Ast__scope_t;
typedef struct _fx_R16Ast__val_flags_t {
bool val_flag_arg;
bool val_flag_mutable;
bool val_flag_temp;
bool val_flag_tempref;
bool val_flag_private;
bool val_flag_subarray;
bool val_flag_instance;
struct _fx_T2R9Ast__id_ti val_flag_method;
int_ val_flag_ctor;
struct _fx_LN12Ast__scope_t_data_t* val_flag_global;
} _fx_R16Ast__val_flags_t;
typedef struct _fx_R13Ast__defval_t {
struct _fx_R9Ast__id_t dv_name;
struct _fx_N10Ast__typ_t_data_t* dv_typ;
struct _fx_R16Ast__val_flags_t dv_flags;
struct _fx_LN12Ast__scope_t_data_t* dv_scope;
struct _fx_R10Ast__loc_t dv_loc;
} _fx_R13Ast__defval_t;
typedef struct _fx_FPi2R9Ast__id_tR9Ast__id_t {
int (*fp)(struct _fx_R9Ast__id_t*, struct _fx_R9Ast__id_t*, int_*, void*);
fx_fcv_t* fcv;
} _fx_FPi2R9Ast__id_tR9Ast__id_t;
typedef struct _fx_Rt6Map__t2R9Ast__id_tLN16Ast__env_entry_t {
struct _fx_Nt11Map__tree_t2R9Ast__id_tLN16Ast__env_entry_t_data_t* root;
struct _fx_FPi2R9Ast__id_tR9Ast__id_t cmp;
} _fx_Rt6Map__t2R9Ast__id_tLN16Ast__env_entry_t;
typedef struct _fx_N17Ast__fun_constr_t {
int tag;
union {
int_ CtorVariant;
struct _fx_R9Ast__id_t CtorFP;
struct _fx_R9Ast__id_t CtorExn;
} u;
} _fx_N17Ast__fun_constr_t;
typedef struct _fx_R16Ast__fun_flags_t {
int_ fun_flag_pure;
bool fun_flag_ccode;
bool fun_flag_have_keywords;
bool fun_flag_inline;
bool fun_flag_nothrow;
bool fun_flag_really_nothrow;
bool fun_flag_private;
struct _fx_N17Ast__fun_constr_t fun_flag_ctor;
struct _fx_R9Ast__id_t fun_flag_method_of;
bool fun_flag_uses_fv;
bool fun_flag_recursive;
bool fun_flag_instance;
} _fx_R16Ast__fun_flags_t;
typedef struct _fx_LR9Ast__id_t_data_t {
int_ rc;
struct _fx_LR9Ast__id_t_data_t* tl;
struct _fx_R9Ast__id_t hd;
} _fx_LR9Ast__id_t_data_t, *_fx_LR9Ast__id_t;
typedef struct _fx_LN10Ast__pat_t_data_t {
int_ rc;
struct _fx_LN10Ast__pat_t_data_t* tl;
struct _fx_N10Ast__pat_t_data_t* hd;
} _fx_LN10Ast__pat_t_data_t, *_fx_LN10Ast__pat_t;
typedef struct _fx_rLR9Ast__id_t_data_t {
int_ rc;
struct _fx_LR9Ast__id_t_data_t* data;
} _fx_rLR9Ast__id_t_data_t, *_fx_rLR9Ast__id_t;
typedef struct _fx_R13Ast__deffun_t {
struct _fx_R9Ast__id_t df_name;
struct _fx_LR9Ast__id_t_data_t* df_templ_args;
struct _fx_LN10Ast__pat_t_data_t* df_args;
struct _fx_N10Ast__typ_t_data_t* df_typ;
struct _fx_N10Ast__exp_t_data_t* df_body;
struct _fx_R16Ast__fun_flags_t df_flags;
struct _fx_LN12Ast__scope_t_data_t* df_scope;
struct _fx_R10Ast__loc_t df_loc;
struct _fx_rLR9Ast__id_t_data_t* df_templ_inst;
struct _fx_Rt6Map__t2R9Ast__id_tLN16Ast__env_entry_t df_env;
} _fx_R13Ast__deffun_t;
typedef struct _fx_rR13Ast__deffun_t_data_t {
int_ rc;
struct _fx_R13Ast__deffun_t data;
} _fx_rR13Ast__deffun_t_data_t, *_fx_rR13Ast__deffun_t;
typedef struct _fx_R13Ast__defexn_t {
struct _fx_R9Ast__id_t dexn_name;
struct _fx_N10Ast__typ_t_data_t* dexn_typ;
struct _fx_LN12Ast__scope_t_data_t* dexn_scope;
struct _fx_R10Ast__loc_t dexn_loc;
} _fx_R13Ast__defexn_t;
typedef struct _fx_rR13Ast__defexn_t_data_t {
int_ rc;
struct _fx_R13Ast__defexn_t data;
} _fx_rR13Ast__defexn_t_data_t, *_fx_rR13Ast__defexn_t;
typedef struct _fx_R13Ast__deftyp_t {
struct _fx_R9Ast__id_t dt_name;
struct _fx_LR9Ast__id_t_data_t* dt_templ_args;
struct _fx_N10Ast__typ_t_data_t* dt_typ;
bool dt_finalized;
struct _fx_LN12Ast__scope_t_data_t* dt_scope;
struct _fx_R10Ast__loc_t dt_loc;
} _fx_R13Ast__deftyp_t;
typedef struct _fx_rR13Ast__deftyp_t_data_t {
int_ rc;
struct _fx_R13Ast__deftyp_t data;
} _fx_rR13Ast__deftyp_t_data_t, *_fx_rR13Ast__deftyp_t;
typedef struct _fx_R16Ast__var_flags_t {
int_ var_flag_class_from;
bool var_flag_record;
bool var_flag_recursive;
bool var_flag_have_tag;
bool var_flag_have_mutable;
bool var_flag_opt;
bool var_flag_instance;
} _fx_R16Ast__var_flags_t;
typedef struct _fx_T2R9Ast__id_tN10Ast__typ_t {
struct _fx_R9Ast__id_t t0;
struct _fx_N10Ast__typ_t_data_t* t1;
} _fx_T2R9Ast__id_tN10Ast__typ_t;
typedef struct _fx_LT2R9Ast__id_tN10Ast__typ_t_data_t {
int_ rc;
struct _fx_LT2R9Ast__id_tN10Ast__typ_t_data_t* tl;
struct _fx_T2R9Ast__id_tN10Ast__typ_t hd;
} _fx_LT2R9Ast__id_tN10Ast__typ_t_data_t, *_fx_LT2R9Ast__id_tN10Ast__typ_t;
typedef struct _fx_Ta2R9Ast__id_t {
struct _fx_R9Ast__id_t t0;
struct _fx_R9Ast__id_t t1;
} _fx_Ta2R9Ast__id_t;
typedef struct _fx_LTa2R9Ast__id_t_data_t {
int_ rc;
struct _fx_LTa2R9Ast__id_t_data_t* tl;
struct _fx_Ta2R9Ast__id_t hd;
} _fx_LTa2R9Ast__id_t_data_t, *_fx_LTa2R9Ast__id_t;
typedef struct _fx_T2R9Ast__id_tLTa2R9Ast__id_t {
struct _fx_R9Ast__id_t t0;
struct _fx_LTa2R9Ast__id_t_data_t* t1;
} _fx_T2R9Ast__id_tLTa2R9Ast__id_t;
typedef struct _fx_LT2R9Ast__id_tLTa2R9Ast__id_t_data_t {
int_ rc;
struct _fx_LT2R9Ast__id_tLTa2R9Ast__id_t_data_t* tl;
struct _fx_T2R9Ast__id_tLTa2R9Ast__id_t hd;
} _fx_LT2R9Ast__id_tLTa2R9Ast__id_t_data_t, *_fx_LT2R9Ast__id_tLTa2R9Ast__id_t;
typedef struct _fx_R17Ast__defvariant_t {
struct _fx_R9Ast__id_t dvar_name;
struct _fx_LR9Ast__id_t_data_t* dvar_templ_args;
struct _fx_N10Ast__typ_t_data_t* dvar_alias;
struct _fx_R16Ast__var_flags_t dvar_flags;
struct _fx_LT2R9Ast__id_tN10Ast__typ_t_data_t* dvar_cases;
struct _fx_LR9Ast__id_t_data_t* dvar_ctors;
struct _fx_rLR9Ast__id_t_data_t* dvar_templ_inst;
struct _fx_LT2R9Ast__id_tLTa2R9Ast__id_t_data_t* dvar_ifaces;
struct _fx_LN12Ast__scope_t_data_t* dvar_scope;
struct _fx_R10Ast__loc_t dvar_loc;
} _fx_R17Ast__defvariant_t;
typedef struct _fx_rR17Ast__defvariant_t_data_t {
int_ rc;
struct _fx_R17Ast__defvariant_t data;
} _fx_rR17Ast__defvariant_t_data_t, *_fx_rR17Ast__defvariant_t;
typedef struct _fx_T3R9Ast__id_tN10Ast__typ_tR16Ast__fun_flags_t {
struct _fx_R9Ast__id_t t0;
struct _fx_N10Ast__typ_t_data_t* t1;
struct _fx_R16Ast__fun_flags_t t2;
} _fx_T3R9Ast__id_tN10Ast__typ_tR16Ast__fun_flags_t;
typedef struct _fx_LT3R9Ast__id_tN10Ast__typ_tR16Ast__fun_flags_t_data_t {
int_ rc;
struct _fx_LT3R9Ast__id_tN10Ast__typ_tR16Ast__fun_flags_t_data_t* tl;
struct _fx_T3R9Ast__id_tN10Ast__typ_tR16Ast__fun_flags_t hd;
} _fx_LT3R9Ast__id_tN10Ast__typ_tR16Ast__fun_flags_t_data_t, *_fx_LT3R9Ast__id_tN10Ast__typ_tR16Ast__fun_flags_t;
typedef struct _fx_R19Ast__definterface_t {
struct _fx_R9Ast__id_t di_name;
struct _fx_R9Ast__id_t di_base;
struct _fx_LT3R9Ast__id_tN10Ast__typ_tR16Ast__fun_flags_t_data_t* di_new_methods;
struct _fx_LT3R9Ast__id_tN10Ast__typ_tR16Ast__fun_flags_t_data_t* di_all_methods;
struct _fx_LN12Ast__scope_t_data_t* di_scope;
struct _fx_R10Ast__loc_t di_loc;
} _fx_R19Ast__definterface_t;
typedef struct _fx_rR19Ast__definterface_t_data_t {
int_ rc;
struct _fx_R19Ast__definterface_t data;
} _fx_rR19Ast__definterface_t_data_t, *_fx_rR19Ast__definterface_t;
typedef struct _fx_N14Ast__id_info_t {
int tag;
union {
struct _fx_R13Ast__defval_t IdDVal;
struct _fx_rR13Ast__deffun_t_data_t* IdFun;
struct _fx_rR13Ast__defexn_t_data_t* IdExn;
struct _fx_rR13Ast__deftyp_t_data_t* IdTyp;
struct _fx_rR17Ast__defvariant_t_data_t* IdVariant;
struct _fx_rR19Ast__definterface_t_data_t* IdInterface;
int_ IdModule;
} u;
} _fx_N14Ast__id_info_t;
typedef struct _fx_T3iA1N14Ast__id_info_tN14Ast__id_info_t {
int_ t0;
fx_arr_t t1;
struct _fx_N14Ast__id_info_t t2;
} _fx_T3iA1N14Ast__id_info_tN14Ast__id_info_t;
typedef struct _fx_Nt9Dynvec__t1N14Ast__id_info_t_data_t {
int_ rc;
union {
struct _fx_T3iA1N14Ast__id_info_tN14Ast__id_info_t t;
} u;
} _fx_Nt9Dynvec__t1N14Ast__id_info_t_data_t, *_fx_Nt9Dynvec__t1N14Ast__id_info_t;
typedef struct _fx_N12Map__color_t {
int tag;
} _fx_N12Map__color_t;
typedef struct _fx_LN16Ast__env_entry_t_data_t {
int_ rc;
struct _fx_LN16Ast__env_entry_t_data_t* tl;
struct _fx_N16Ast__env_entry_t_data_t* hd;
} _fx_LN16Ast__env_entry_t_data_t, *_fx_LN16Ast__env_entry_t;
typedef struct _fx_T5N12Map__color_tNt11Map__tree_t2R9Ast__id_tLN16Ast__env_entry_tR9Ast__id_tLN16Ast__env_entry_tNt11Map__tree_t2R9Ast__id_tLN16Ast__env_entry_t {
struct _fx_N12Map__color_t t0;
struct _fx_Nt11Map__tree_t2R9Ast__id_tLN16Ast__env_entry_t_data_t* t1;
struct _fx_R9Ast__id_t t2;
struct _fx_LN16Ast__env_entry_t_data_t* t3;
struct _fx_Nt11Map__tree_t2R9Ast__id_tLN16Ast__env_entry_t_data_t* t4;
} _fx_T5N12Map__color_tNt11Map__tree_t2R9Ast__id_tLN16Ast__env_entry_tR9Ast__id_tLN16Ast__env_entry_tNt11Map__tree_t2R9Ast__id_tLN16Ast__env_entry_t;
typedef struct _fx_Nt11Map__tree_t2R9Ast__id_tLN16Ast__env_entry_t_data_t {
int_ rc;
union {
struct _fx_T5N12Map__color_tNt11Map__tree_t2R9Ast__id_tLN16Ast__env_entry_tR9Ast__id_tLN16Ast__env_entry_tNt11Map__tree_t2R9Ast__id_tLN16Ast__env_entry_t
Node;
} u;
} _fx_Nt11Map__tree_t2R9Ast__id_tLN16Ast__env_entry_t_data_t, *_fx_Nt11Map__tree_t2R9Ast__id_tLN16Ast__env_entry_t;
typedef struct _fx_T2R10Ast__loc_tS {
struct _fx_R10Ast__loc_t t0;
fx_str_t t1;
} _fx_T2R10Ast__loc_tS;
typedef struct _fx_T2il {
int_ t0;
int64_t t1;
} _fx_T2il;
typedef struct _fx_T2iq {
int_ t0;
uint64_t t1;
} _fx_T2iq;
typedef struct _fx_T2id {
int_ t0;
double t1;
} _fx_T2id;
typedef struct _fx_N10Ast__lit_t {
int tag;
union {
int64_t LitInt;
struct _fx_T2il LitSInt;
struct _fx_T2iq LitUInt;
struct _fx_T2id LitFloat;
fx_str_t LitString;
char_ LitChar;
bool LitBool;
} u;
} _fx_N10Ast__lit_t;
typedef struct _fx_rNt6option1N10Ast__typ_t_data_t {
int_ rc;
struct _fx_Nt6option1N10Ast__typ_t_data_t* data;
} _fx_rNt6option1N10Ast__typ_t_data_t, *_fx_rNt6option1N10Ast__typ_t;
typedef struct _fx_LN10Ast__typ_t_data_t {
int_ rc;
struct _fx_LN10Ast__typ_t_data_t* tl;
struct _fx_N10Ast__typ_t_data_t* hd;
} _fx_LN10Ast__typ_t_data_t, *_fx_LN10Ast__typ_t;
typedef struct _fx_T2LN10Ast__typ_tN10Ast__typ_t {
struct _fx_LN10Ast__typ_t_data_t* t0;
struct _fx_N10Ast__typ_t_data_t* t1;
} _fx_T2LN10Ast__typ_tN10Ast__typ_t;
typedef struct _fx_T2iN10Ast__typ_t {
int_ t0;
struct _fx_N10Ast__typ_t_data_t* t1;
} _fx_T2iN10Ast__typ_t;
typedef struct _fx_T4R16Ast__val_flags_tR9Ast__id_tN10Ast__typ_tN10Ast__exp_t {
struct _fx_R16Ast__val_flags_t t0;
struct _fx_R9Ast__id_t t1;
struct _fx_N10Ast__typ_t_data_t* t2;
struct _fx_N10Ast__exp_t_data_t* t3;
} _fx_T4R16Ast__val_flags_tR9Ast__id_tN10Ast__typ_tN10Ast__exp_t;
typedef struct _fx_LT4R16Ast__val_flags_tR9Ast__id_tN10Ast__typ_tN10Ast__exp_t_data_t {
int_ rc;
struct _fx_LT4R16Ast__val_flags_tR9Ast__id_tN10Ast__typ_tN10Ast__exp_t_data_t* tl;
struct _fx_T4R16Ast__val_flags_tR9Ast__id_tN10Ast__typ_tN10Ast__exp_t hd;
} _fx_LT4R16Ast__val_flags_tR9Ast__id_tN10Ast__typ_tN10Ast__exp_t_data_t, *_fx_LT4R16Ast__val_flags_tR9Ast__id_tN10Ast__typ_tN10Ast__exp_t;
typedef struct _fx_T2LT4R16Ast__val_flags_tR9Ast__id_tN10Ast__typ_tN10Ast__exp_tB {
struct _fx_LT4R16Ast__val_flags_tR9Ast__id_tN10Ast__typ_tN10Ast__exp_t_data_t* t0;
bool t1;
} _fx_T2LT4R16Ast__val_flags_tR9Ast__id_tN10Ast__typ_tN10Ast__exp_tB;
typedef struct _fx_rT2LT4R16Ast__val_flags_tR9Ast__id_tN10Ast__typ_tN10Ast__exp_tB_data_t {
int_ rc;
struct _fx_T2LT4R16Ast__val_flags_tR9Ast__id_tN10Ast__typ_tN10Ast__exp_tB data;
} _fx_rT2LT4R16Ast__val_flags_tR9Ast__id_tN10Ast__typ_tN10Ast__exp_tB_data_t, *_fx_rT2LT4R16Ast__val_flags_tR9Ast__id_tN10Ast__typ_tN10Ast__exp_tB;
typedef struct _fx_T2LN10Ast__typ_tR9Ast__id_t {
struct _fx_LN10Ast__typ_t_data_t* t0;
struct _fx_R9Ast__id_t t1;
} _fx_T2LN10Ast__typ_tR9Ast__id_t;
typedef struct _fx_N10Ast__typ_t_data_t {
int_ rc;
int tag;
union {
struct _fx_rNt6option1N10Ast__typ_t_data_t* TypVar;
struct _fx_Nt6option1N10Ast__typ_t_data_t* TypVarTuple;
struct _fx_N10Ast__typ_t_data_t* TypVarArray;
int_ TypSInt;
int_ TypUInt;
int_ TypFloat;
struct _fx_T2LN10Ast__typ_tN10Ast__typ_t TypFun;
struct _fx_N10Ast__typ_t_data_t* TypList;
struct _fx_N10Ast__typ_t_data_t* TypVector;
struct _fx_LN10Ast__typ_t_data_t* TypTuple;
struct _fx_N10Ast__typ_t_data_t* TypRef;
struct _fx_T2iN10Ast__typ_t TypArray;
struct _fx_rT2LT4R16Ast__val_flags_tR9Ast__id_tN10Ast__typ_tN10Ast__exp_tB_data_t* TypRecord;
struct _fx_T2LN10Ast__typ_tR9Ast__id_t TypApp;
} u;
} _fx_N10Ast__typ_t_data_t, *_fx_N10Ast__typ_t;
typedef struct _fx_N12Ast__cmpop_t {
int tag;
} _fx_N12Ast__cmpop_t;
typedef struct _fx_N13Ast__binary_t_data_t {
int_ rc;
int tag;
union {
struct _fx_N12Ast__cmpop_t OpCmp;
struct _fx_N12Ast__cmpop_t OpDotCmp;
struct _fx_N13Ast__binary_t_data_t* OpAugBinary;
} u;
} _fx_N13Ast__binary_t_data_t, *_fx_N13Ast__binary_t;
typedef struct _fx_N12Ast__unary_t {
int tag;
} _fx_N12Ast__unary_t;
typedef struct _fx_N12Ast__sctyp_t {
int tag;
} _fx_N12Ast__sctyp_t;
typedef struct _fx_N13Ast__intrin_t {
int tag;
union {
struct _fx_R9Ast__id_t IntrinMath;
struct _fx_N12Ast__sctyp_t IntrinSaturate;
} u;
} _fx_N13Ast__intrin_t;
typedef struct _fx_N15Ast__for_make_t {
int tag;
} _fx_N15Ast__for_make_t;
typedef struct _fx_R16Ast__for_flags_t {
bool for_flag_parallel;
struct _fx_N15Ast__for_make_t for_flag_make;
bool for_flag_unzip;
bool for_flag_fold;
bool for_flag_nested;
} _fx_R16Ast__for_flags_t;
typedef struct _fx_N13Ast__border_t {
int tag;
} _fx_N13Ast__border_t;
typedef struct _fx_N18Ast__interpolate_t {
int tag;
} _fx_N18Ast__interpolate_t;
typedef struct _fx_T2BR10Ast__loc_t {
bool t0;
struct _fx_R10Ast__loc_t t1;
} _fx_T2BR10Ast__loc_t;
typedef struct _fx_T2Nt6option1N10Ast__exp_tR10Ast__loc_t {
struct _fx_Nt6option1N10Ast__exp_t_data_t* t0;
struct _fx_R10Ast__loc_t t1;
} _fx_T2Nt6option1N10Ast__exp_tR10Ast__loc_t;
typedef struct _fx_T2N10Ast__typ_tR10Ast__loc_t {
struct _fx_N10Ast__typ_t_data_t* t0;
struct _fx_R10Ast__loc_t t1;
} _fx_T2N10Ast__typ_tR10Ast__loc_t;
typedef struct _fx_T4Nt6option1N10Ast__exp_tNt6option1N10Ast__exp_tNt6option1N10Ast__exp_tT2N10Ast__typ_tR10Ast__loc_t {
struct _fx_Nt6option1N10Ast__exp_t_data_t* t0;
struct _fx_Nt6option1N10Ast__exp_t_data_t* t1;
struct _fx_Nt6option1N10Ast__exp_t_data_t* t2;
struct _fx_T2N10Ast__typ_tR10Ast__loc_t t3;
} _fx_T4Nt6option1N10Ast__exp_tNt6option1N10Ast__exp_tNt6option1N10Ast__exp_tT2N10Ast__typ_tR10Ast__loc_t;
typedef struct _fx_T2N10Ast__lit_tT2N10Ast__typ_tR10Ast__loc_t {
struct _fx_N10Ast__lit_t t0;
struct _fx_T2N10Ast__typ_tR10Ast__loc_t t1;
} _fx_T2N10Ast__lit_tT2N10Ast__typ_tR10Ast__loc_t;
typedef struct _fx_T2R9Ast__id_tT2N10Ast__typ_tR10Ast__loc_t {
struct _fx_R9Ast__id_t t0;
struct _fx_T2N10Ast__typ_tR10Ast__loc_t t1;
} _fx_T2R9Ast__id_tT2N10Ast__typ_tR10Ast__loc_t;
typedef struct _fx_T4N13Ast__binary_tN10Ast__exp_tN10Ast__exp_tT2N10Ast__typ_tR10Ast__loc_t {
struct _fx_N13Ast__binary_t_data_t* t0;
struct _fx_N10Ast__exp_t_data_t* t1;
struct _fx_N10Ast__exp_t_data_t* t2;
struct _fx_T2N10Ast__typ_tR10Ast__loc_t t3;
} _fx_T4N13Ast__binary_tN10Ast__exp_tN10Ast__exp_tT2N10Ast__typ_tR10Ast__loc_t;
typedef struct _fx_T3N12Ast__unary_tN10Ast__exp_tT2N10Ast__typ_tR10Ast__loc_t {
struct _fx_N12Ast__unary_t t0;
struct _fx_N10Ast__exp_t_data_t* t1;
struct _fx_T2N10Ast__typ_tR10Ast__loc_t t2;
} _fx_T3N12Ast__unary_tN10Ast__exp_tT2N10Ast__typ_tR10Ast__loc_t;
typedef struct _fx_LN10Ast__exp_t_data_t {
int_ rc;
struct _fx_LN10Ast__exp_t_data_t* tl;
struct _fx_N10Ast__exp_t_data_t* hd;
} _fx_LN10Ast__exp_t_data_t, *_fx_LN10Ast__exp_t;
typedef struct _fx_T3N13Ast__intrin_tLN10Ast__exp_tT2N10Ast__typ_tR10Ast__loc_t {
struct _fx_N13Ast__intrin_t t0;
struct _fx_LN10Ast__exp_t_data_t* t1;
struct _fx_T2N10Ast__typ_tR10Ast__loc_t t2;
} _fx_T3N13Ast__intrin_tLN10Ast__exp_tT2N10Ast__typ_tR10Ast__loc_t;
typedef struct _fx_T2R9Ast__id_tN10Ast__exp_t {
struct _fx_R9Ast__id_t t0;
struct _fx_N10Ast__exp_t_data_t* t1;
} _fx_T2R9Ast__id_tN10Ast__exp_t;
typedef struct _fx_T2LN10Ast__exp_tT2N10Ast__typ_tR10Ast__loc_t {
struct _fx_LN10Ast__exp_t_data_t* t0;
struct _fx_T2N10Ast__typ_tR10Ast__loc_t t1;
} _fx_T2LN10Ast__exp_tT2N10Ast__typ_tR10Ast__loc_t;
typedef struct _fx_LLN10Ast__exp_t_data_t {
int_ rc;
struct _fx_LLN10Ast__exp_t_data_t* tl;
struct _fx_LN10Ast__exp_t_data_t* hd;
} _fx_LLN10Ast__exp_t_data_t, *_fx_LLN10Ast__exp_t;
typedef struct _fx_T2LLN10Ast__exp_tT2N10Ast__typ_tR10Ast__loc_t {
struct _fx_LLN10Ast__exp_t_data_t* t0;
struct _fx_T2N10Ast__typ_tR10Ast__loc_t t1;
} _fx_T2LLN10Ast__exp_tT2N10Ast__typ_tR10Ast__loc_t;
typedef struct _fx_LT2R9Ast__id_tN10Ast__exp_t_data_t {
int_ rc;
struct _fx_LT2R9Ast__id_tN10Ast__exp_t_data_t* tl;
struct _fx_T2R9Ast__id_tN10Ast__exp_t hd;
} _fx_LT2R9Ast__id_tN10Ast__exp_t_data_t, *_fx_LT2R9Ast__id_tN10Ast__exp_t;
typedef struct _fx_T3N10Ast__exp_tLT2R9Ast__id_tN10Ast__exp_tT2N10Ast__typ_tR10Ast__loc_t {
struct _fx_N10Ast__exp_t_data_t* t0;
struct _fx_LT2R9Ast__id_tN10Ast__exp_t_data_t* t1;
struct _fx_T2N10Ast__typ_tR10Ast__loc_t t2;
} _fx_T3N10Ast__exp_tLT2R9Ast__id_tN10Ast__exp_tT2N10Ast__typ_tR10Ast__loc_t;
typedef struct _fx_T3N10Ast__exp_tLN10Ast__exp_tT2N10Ast__typ_tR10Ast__loc_t {
struct _fx_N10Ast__exp_t_data_t* t0;
struct _fx_LN10Ast__exp_t_data_t* t1;
struct _fx_T2N10Ast__typ_tR10Ast__loc_t t2;
} _fx_T3N10Ast__exp_tLN10Ast__exp_tT2N10Ast__typ_tR10Ast__loc_t;
typedef struct _fx_T5N10Ast__exp_tN13Ast__border_tN18Ast__interpolate_tLN10Ast__exp_tT2N10Ast__typ_tR10Ast__loc_t {
struct _fx_N10Ast__exp_t_data_t* t0;
struct _fx_N13Ast__border_t t1;
struct _fx_N18Ast__interpolate_t t2;
struct _fx_LN10Ast__exp_t_data_t* t3;
struct _fx_T2N10Ast__typ_tR10Ast__loc_t t4;
} _fx_T5N10Ast__exp_tN13Ast__border_tN18Ast__interpolate_tLN10Ast__exp_tT2N10Ast__typ_tR10Ast__loc_t;
typedef struct _fx_T3N10Ast__exp_tN10Ast__exp_tR10Ast__loc_t {
struct _fx_N10Ast__exp_t_data_t* t0;
struct _fx_N10Ast__exp_t_data_t* t1;
struct _fx_R10Ast__loc_t t2;
} _fx_T3N10Ast__exp_tN10Ast__exp_tR10Ast__loc_t;
typedef struct _fx_T3N10Ast__exp_tN10Ast__exp_tT2N10Ast__typ_tR10Ast__loc_t {
struct _fx_N10Ast__exp_t_data_t* t0;
struct _fx_N10Ast__exp_t_data_t* t1;
struct _fx_T2N10Ast__typ_tR10Ast__loc_t t2;
} _fx_T3N10Ast__exp_tN10Ast__exp_tT2N10Ast__typ_tR10Ast__loc_t;
typedef struct _fx_T2N10Ast__exp_tR10Ast__loc_t {
struct _fx_N10Ast__exp_t_data_t* t0;
struct _fx_R10Ast__loc_t t1;
} _fx_T2N10Ast__exp_tR10Ast__loc_t;
typedef struct _fx_T4N10Ast__exp_tN10Ast__exp_tN10Ast__exp_tT2N10Ast__typ_tR10Ast__loc_t {
struct _fx_N10Ast__exp_t_data_t* t0;
struct _fx_N10Ast__exp_t_data_t* t1;
struct _fx_N10Ast__exp_t_data_t* t2;
struct _fx_T2N10Ast__typ_tR10Ast__loc_t t3;
} _fx_T4N10Ast__exp_tN10Ast__exp_tN10Ast__exp_tT2N10Ast__typ_tR10Ast__loc_t;
typedef struct _fx_T2N10Ast__pat_tN10Ast__exp_t {
struct _fx_N10Ast__pat_t_data_t* t0;
struct _fx_N10Ast__exp_t_data_t* t1;
} _fx_T2N10Ast__pat_tN10Ast__exp_t;
typedef struct _fx_LT2N10Ast__pat_tN10Ast__exp_t_data_t {
int_ rc;
struct _fx_LT2N10Ast__pat_tN10Ast__exp_t_data_t* tl;
struct _fx_T2N10Ast__pat_tN10Ast__exp_t hd;
} _fx_LT2N10Ast__pat_tN10Ast__exp_t_data_t, *_fx_LT2N10Ast__pat_tN10Ast__exp_t;
typedef struct _fx_T5LT2N10Ast__pat_tN10Ast__exp_tN10Ast__pat_tN10Ast__exp_tR16Ast__for_flags_tR10Ast__loc_t {
struct _fx_LT2N10Ast__pat_tN10Ast__exp_t_data_t* t0;
struct _fx_N10Ast__pat_t_data_t* t1;
struct _fx_N10Ast__exp_t_data_t* t2;
struct _fx_R16Ast__for_flags_t t3;
struct _fx_R10Ast__loc_t t4;
} _fx_T5LT2N10Ast__pat_tN10Ast__exp_tN10Ast__pat_tN10Ast__exp_tR16Ast__for_flags_tR10Ast__loc_t;
typedef struct _fx_T2LT2N10Ast__pat_tN10Ast__exp_tN10Ast__pat_t {
struct _fx_LT2N10Ast__pat_tN10Ast__exp_t_data_t* t0;
struct _fx_N10Ast__pat_t_data_t* t1;
} _fx_T2LT2N10Ast__pat_tN10Ast__exp_tN10Ast__pat_t;
typedef struct _fx_LT2LT2N10Ast__pat_tN10Ast__exp_tN10Ast__pat_t_data_t {
int_ rc;
struct _fx_LT2LT2N10Ast__pat_tN10Ast__exp_tN10Ast__pat_t_data_t* tl;
struct _fx_T2LT2N10Ast__pat_tN10Ast__exp_tN10Ast__pat_t hd;
} _fx_LT2LT2N10Ast__pat_tN10Ast__exp_tN10Ast__pat_t_data_t, *_fx_LT2LT2N10Ast__pat_tN10Ast__exp_tN10Ast__pat_t;
typedef struct _fx_T4LT2LT2N10Ast__pat_tN10Ast__exp_tN10Ast__pat_tN10Ast__exp_tR16Ast__for_flags_tT2N10Ast__typ_tR10Ast__loc_t {
struct _fx_LT2LT2N10Ast__pat_tN10Ast__exp_tN10Ast__pat_t_data_t* t0;
struct _fx_N10Ast__exp_t_data_t* t1;
struct _fx_R16Ast__for_flags_t t2;
struct _fx_T2N10Ast__typ_tR10Ast__loc_t t3;
} _fx_T4LT2LT2N10Ast__pat_tN10Ast__exp_tN10Ast__pat_tN10Ast__exp_tR16Ast__for_flags_tT2N10Ast__typ_tR10Ast__loc_t;
typedef struct _fx_T3N10Ast__exp_tLT2N10Ast__pat_tN10Ast__exp_tT2N10Ast__typ_tR10Ast__loc_t {
struct _fx_N10Ast__exp_t_data_t* t0;
struct _fx_LT2N10Ast__pat_tN10Ast__exp_t_data_t* t1;
struct _fx_T2N10Ast__typ_tR10Ast__loc_t t2;
} _fx_T3N10Ast__exp_tLT2N10Ast__pat_tN10Ast__exp_tT2N10Ast__typ_tR10Ast__loc_t;
typedef struct _fx_T3N10Ast__exp_tN10Ast__typ_tT2N10Ast__typ_tR10Ast__loc_t {
struct _fx_N10Ast__exp_t_data_t* t0;
struct _fx_N10Ast__typ_t_data_t* t1;
struct _fx_T2N10Ast__typ_tR10Ast__loc_t t2;
} _fx_T3N10Ast__exp_tN10Ast__typ_tT2N10Ast__typ_tR10Ast__loc_t;
typedef struct _fx_T2ST2N10Ast__typ_tR10Ast__loc_t {
fx_str_t t0;
struct _fx_T2N10Ast__typ_tR10Ast__loc_t t1;
} _fx_T2ST2N10Ast__typ_tR10Ast__loc_t;
typedef struct _fx_T3SST2N10Ast__typ_tR10Ast__loc_t {
fx_str_t t0;
fx_str_t t1;
struct _fx_T2N10Ast__typ_tR10Ast__loc_t t2;
} _fx_T3SST2N10Ast__typ_tR10Ast__loc_t;
typedef struct _fx_T4N10Ast__pat_tN10Ast__exp_tR16Ast__val_flags_tR10Ast__loc_t {
struct _fx_N10Ast__pat_t_data_t* t0;
struct _fx_N10Ast__exp_t_data_t* t1;
struct _fx_R16Ast__val_flags_t t2;
struct _fx_R10Ast__loc_t t3;
} _fx_T4N10Ast__pat_tN10Ast__exp_tR16Ast__val_flags_tR10Ast__loc_t;
typedef struct _fx_T2iR9Ast__id_t {
int_ t0;
struct _fx_R9Ast__id_t t1;
} _fx_T2iR9Ast__id_t;
typedef struct _fx_LT2iR9Ast__id_t_data_t {
int_ rc;
struct _fx_LT2iR9Ast__id_t_data_t* tl;
struct _fx_T2iR9Ast__id_t hd;
} _fx_LT2iR9Ast__id_t_data_t, *_fx_LT2iR9Ast__id_t;
typedef struct _fx_T2LT2iR9Ast__id_tR10Ast__loc_t {
struct _fx_LT2iR9Ast__id_t_data_t* t0;
struct _fx_R10Ast__loc_t t1;
} _fx_T2LT2iR9Ast__id_tR10Ast__loc_t;
typedef struct _fx_T3iLR9Ast__id_tR10Ast__loc_t {
int_ t0;
struct _fx_LR9Ast__id_t_data_t* t1;
struct _fx_R10Ast__loc_t t2;
} _fx_T3iLR9Ast__id_tR10Ast__loc_t;
typedef struct _fx_T2LSR10Ast__loc_t {
struct _fx_LS_data_t* t0;
struct _fx_R10Ast__loc_t t1;
} _fx_T2LSR10Ast__loc_t;
typedef struct _fx_N10Ast__exp_t_data_t {
int_ rc;
int tag;
union {
struct _fx_R10Ast__loc_t ExpNop;
struct _fx_T2BR10Ast__loc_t ExpBreak;
struct _fx_R10Ast__loc_t ExpContinue;
struct _fx_T2Nt6option1N10Ast__exp_tR10Ast__loc_t ExpReturn;
struct _fx_T4Nt6option1N10Ast__exp_tNt6option1N10Ast__exp_tNt6option1N10Ast__exp_tT2N10Ast__typ_tR10Ast__loc_t ExpRange;
struct _fx_T2N10Ast__lit_tT2N10Ast__typ_tR10Ast__loc_t ExpLit;
struct _fx_T2R9Ast__id_tT2N10Ast__typ_tR10Ast__loc_t ExpIdent;
struct _fx_T4N13Ast__binary_tN10Ast__exp_tN10Ast__exp_tT2N10Ast__typ_tR10Ast__loc_t ExpBinary;
struct _fx_T3N12Ast__unary_tN10Ast__exp_tT2N10Ast__typ_tR10Ast__loc_t ExpUnary;
struct _fx_T3N13Ast__intrin_tLN10Ast__exp_tT2N10Ast__typ_tR10Ast__loc_t ExpIntrin;
struct _fx_T2R9Ast__id_tN10Ast__exp_t ExpSync;
struct _fx_T2LN10Ast__exp_tT2N10Ast__typ_tR10Ast__loc_t ExpSeq;
struct _fx_T2LN10Ast__exp_tT2N10Ast__typ_tR10Ast__loc_t ExpMkTuple;
struct _fx_T2LLN10Ast__exp_tT2N10Ast__typ_tR10Ast__loc_t ExpMkArray;
struct _fx_T2LN10Ast__exp_tT2N10Ast__typ_tR10Ast__loc_t ExpMkVector;
struct _fx_T3N10Ast__exp_tLT2R9Ast__id_tN10Ast__exp_tT2N10Ast__typ_tR10Ast__loc_t ExpMkRecord;
struct _fx_T3N10Ast__exp_tLT2R9Ast__id_tN10Ast__exp_tT2N10Ast__typ_tR10Ast__loc_t ExpUpdateRecord;
struct _fx_T3N10Ast__exp_tLN10Ast__exp_tT2N10Ast__typ_tR10Ast__loc_t ExpCall;
struct _fx_T5N10Ast__exp_tN13Ast__border_tN18Ast__interpolate_tLN10Ast__exp_tT2N10Ast__typ_tR10Ast__loc_t ExpAt;
struct _fx_T3N10Ast__exp_tN10Ast__exp_tR10Ast__loc_t ExpAssign;
struct _fx_T3N10Ast__exp_tN10Ast__exp_tT2N10Ast__typ_tR10Ast__loc_t ExpMem;
struct _fx_T2N10Ast__exp_tR10Ast__loc_t ExpThrow;
struct _fx_T4N10Ast__exp_tN10Ast__exp_tN10Ast__exp_tT2N10Ast__typ_tR10Ast__loc_t ExpIf;
struct _fx_T3N10Ast__exp_tN10Ast__exp_tR10Ast__loc_t ExpWhile;
struct _fx_T3N10Ast__exp_tN10Ast__exp_tR10Ast__loc_t ExpDoWhile;
struct _fx_T5LT2N10Ast__pat_tN10Ast__exp_tN10Ast__pat_tN10Ast__exp_tR16Ast__for_flags_tR10Ast__loc_t ExpFor;
struct _fx_T4LT2LT2N10Ast__pat_tN10Ast__exp_tN10Ast__pat_tN10Ast__exp_tR16Ast__for_flags_tT2N10Ast__typ_tR10Ast__loc_t
ExpMap;
struct _fx_T3N10Ast__exp_tLT2N10Ast__pat_tN10Ast__exp_tT2N10Ast__typ_tR10Ast__loc_t ExpTryCatch;
struct _fx_T3N10Ast__exp_tLT2N10Ast__pat_tN10Ast__exp_tT2N10Ast__typ_tR10Ast__loc_t ExpMatch;
struct _fx_T3N10Ast__exp_tN10Ast__typ_tT2N10Ast__typ_tR10Ast__loc_t ExpCast;
struct _fx_T3N10Ast__exp_tN10Ast__typ_tT2N10Ast__typ_tR10Ast__loc_t ExpTyped;
struct _fx_T2ST2N10Ast__typ_tR10Ast__loc_t ExpCCode;
struct _fx_T3SST2N10Ast__typ_tR10Ast__loc_t ExpData;
struct _fx_T4N10Ast__pat_tN10Ast__exp_tR16Ast__val_flags_tR10Ast__loc_t DefVal;
struct _fx_rR13Ast__deffun_t_data_t* DefFun;
struct _fx_rR13Ast__defexn_t_data_t* DefExn;
struct _fx_rR13Ast__deftyp_t_data_t* DefTyp;
struct _fx_rR17Ast__defvariant_t_data_t* DefVariant;
struct _fx_rR19Ast__definterface_t_data_t* DefInterface;
struct _fx_T2LT2iR9Ast__id_tR10Ast__loc_t DirImport;
struct _fx_T3iLR9Ast__id_tR10Ast__loc_t DirImportFrom;
struct _fx_T2LSR10Ast__loc_t DirPragma;
} u;
} _fx_N10Ast__exp_t_data_t, *_fx_N10Ast__exp_t;
typedef struct _fx_T2N10Ast__lit_tR10Ast__loc_t {
struct _fx_N10Ast__lit_t t0;
struct _fx_R10Ast__loc_t t1;
} _fx_T2N10Ast__lit_tR10Ast__loc_t;
typedef struct _fx_T2R9Ast__id_tR10Ast__loc_t {
struct _fx_R9Ast__id_t t0;
struct _fx_R10Ast__loc_t t1;
} _fx_T2R9Ast__id_tR10Ast__loc_t;
typedef struct _fx_T2LN10Ast__pat_tR10Ast__loc_t {
struct _fx_LN10Ast__pat_t_data_t* t0;
struct _fx_R10Ast__loc_t t1;
} _fx_T2LN10Ast__pat_tR10Ast__loc_t;
typedef struct _fx_T3R9Ast__id_tLN10Ast__pat_tR10Ast__loc_t {
struct _fx_R9Ast__id_t t0;
struct _fx_LN10Ast__pat_t_data_t* t1;
struct _fx_R10Ast__loc_t t2;
} _fx_T3R9Ast__id_tLN10Ast__pat_tR10Ast__loc_t;
typedef struct _fx_T2R9Ast__id_tN10Ast__pat_t {
struct _fx_R9Ast__id_t t0;
struct _fx_N10Ast__pat_t_data_t* t1;
} _fx_T2R9Ast__id_tN10Ast__pat_t;
typedef struct _fx_LT2R9Ast__id_tN10Ast__pat_t_data_t {
int_ rc;
struct _fx_LT2R9Ast__id_tN10Ast__pat_t_data_t* tl;
struct _fx_T2R9Ast__id_tN10Ast__pat_t hd;
} _fx_LT2R9Ast__id_tN10Ast__pat_t_data_t, *_fx_LT2R9Ast__id_tN10Ast__pat_t;
typedef struct _fx_T3Nt6option1R9Ast__id_tLT2R9Ast__id_tN10Ast__pat_tR10Ast__loc_t {
struct _fx_Nt6option1R9Ast__id_t t0;
struct _fx_LT2R9Ast__id_tN10Ast__pat_t_data_t* t1;
struct _fx_R10Ast__loc_t t2;
} _fx_T3Nt6option1R9Ast__id_tLT2R9Ast__id_tN10Ast__pat_tR10Ast__loc_t;
typedef struct _fx_T3N10Ast__pat_tN10Ast__pat_tR10Ast__loc_t {
struct _fx_N10Ast__pat_t_data_t* t0;
struct _fx_N10Ast__pat_t_data_t* t1;
struct _fx_R10Ast__loc_t t2;
} _fx_T3N10Ast__pat_tN10Ast__pat_tR10Ast__loc_t;
typedef struct _fx_T3N10Ast__pat_tR9Ast__id_tR10Ast__loc_t {
struct _fx_N10Ast__pat_t_data_t* t0;
struct _fx_R9Ast__id_t t1;
struct _fx_R10Ast__loc_t t2;
} _fx_T3N10Ast__pat_tR9Ast__id_tR10Ast__loc_t;
typedef struct _fx_T3N10Ast__pat_tN10Ast__typ_tR10Ast__loc_t {
struct _fx_N10Ast__pat_t_data_t* t0;
struct _fx_N10Ast__typ_t_data_t* t1;
struct _fx_R10Ast__loc_t t2;
} _fx_T3N10Ast__pat_tN10Ast__typ_tR10Ast__loc_t;
typedef struct _fx_T3N10Ast__pat_tN10Ast__exp_tR10Ast__loc_t {
struct _fx_N10Ast__pat_t_data_t* t0;
struct _fx_N10Ast__exp_t_data_t* t1;
struct _fx_R10Ast__loc_t t2;
} _fx_T3N10Ast__pat_tN10Ast__exp_tR10Ast__loc_t;
typedef struct _fx_T2N10Ast__pat_tR10Ast__loc_t {
struct _fx_N10Ast__pat_t_data_t* t0;
struct _fx_R10Ast__loc_t t1;
} _fx_T2N10Ast__pat_tR10Ast__loc_t;
typedef struct _fx_N10Ast__pat_t_data_t {
int_ rc;
int tag;
union {
struct _fx_R10Ast__loc_t PatAny;
struct _fx_T2N10Ast__lit_tR10Ast__loc_t PatLit;
struct _fx_T2R9Ast__id_tR10Ast__loc_t PatIdent;
struct _fx_T2LN10Ast__pat_tR10Ast__loc_t PatTuple;
struct _fx_T3R9Ast__id_tLN10Ast__pat_tR10Ast__loc_t PatVariant;
struct _fx_T3Nt6option1R9Ast__id_tLT2R9Ast__id_tN10Ast__pat_tR10Ast__loc_t PatRecord;
struct _fx_T3N10Ast__pat_tN10Ast__pat_tR10Ast__loc_t PatCons;
struct _fx_T3N10Ast__pat_tR9Ast__id_tR10Ast__loc_t PatAs;
struct _fx_T3N10Ast__pat_tN10Ast__typ_tR10Ast__loc_t PatTyped;
struct _fx_T3N10Ast__pat_tN10Ast__exp_tR10Ast__loc_t PatWhen;
struct _fx_T2LN10Ast__pat_tR10Ast__loc_t PatAlt;
struct _fx_T2N10Ast__pat_tR10Ast__loc_t PatRef;
} u;
} _fx_N10Ast__pat_t_data_t, *_fx_N10Ast__pat_t;
typedef struct _fx_N16Ast__env_entry_t_data_t {
int_ rc;
int tag;
union {
struct _fx_R9Ast__id_t EnvId;
struct _fx_N10Ast__typ_t_data_t* EnvTyp;
} u;
} _fx_N16Ast__env_entry_t_data_t, *_fx_N16Ast__env_entry_t;
typedef struct _fx_Li_data_t {
int_ rc;
struct _fx_Li_data_t* tl;
int_ hd;
} _fx_Li_data_t, *_fx_Li;
typedef struct _fx_T10R9Ast__id_tSiBLN10Ast__exp_tLiRt6Map__t2R9Ast__id_tLN16Ast__env_entry_tBiNt9Dynvec__t1N14Ast__id_info_t {
struct _fx_R9Ast__id_t t0;
fx_str_t t1;
int_ t2;
bool t3;
struct _fx_LN10Ast__exp_t_data_t* t4;
struct _fx_Li_data_t* t5;
struct _fx_Rt6Map__t2R9Ast__id_tLN16Ast__env_entry_t t6;
bool t7;
int_ t8;
struct _fx_Nt9Dynvec__t1N14Ast__id_info_t_data_t* t9;
} _fx_T10R9Ast__id_tSiBLN10Ast__exp_tLiRt6Map__t2R9Ast__id_tLN16Ast__env_entry_tBiNt9Dynvec__t1N14Ast__id_info_t;
typedef struct _fx_N16Ast__defmodule_t_data_t {
int_ rc;
union {
struct _fx_T10R9Ast__id_tSiBLN10Ast__exp_tLiRt6Map__t2R9Ast__id_tLN16Ast__env_entry_tBiNt9Dynvec__t1N14Ast__id_info_t
defmodule_t;
} u;
} _fx_N16Ast__defmodule_t_data_t, *_fx_N16Ast__defmodule_t;
typedef struct _fx_LE_data_t {
int_ rc;
struct _fx_LE_data_t* tl;
fx_exn_t hd;
} _fx_LE_data_t, *_fx_LE;
typedef struct _fx_T2BS {
bool t0;
fx_str_t t1;
} _fx_T2BS;
typedef struct _fx_N14Lexer__token_t {
int tag;
union {
struct _fx_N10Ast__lit_t LITERAL;
struct _fx_T2BS IDENT;
fx_str_t TYVAR;
fx_str_t DATA;
bool FOR;
bool IMPORT;
bool REF;
bool RETURN;
bool WHILE;
bool LPAREN;
bool LSQUARE;
bool BACKSLASH;
bool MINUS;
bool PLUS;
bool STAR;
bool DOT_PLUS;
bool DOT_MINUS;
struct _fx_N13Ast__binary_t_data_t* AUG_BINOP;
struct _fx_N12Ast__cmpop_t CMP;
struct _fx_N12Ast__cmpop_t DOT_CMP;
fx_str_t RESERVED;
} u;
} _fx_N14Lexer__token_t;
typedef struct _fx_LN14Lexer__token_t_data_t {
int_ rc;
struct _fx_LN14Lexer__token_t_data_t* tl;
struct _fx_N14Lexer__token_t hd;
} _fx_LN14Lexer__token_t_data_t, *_fx_LN14Lexer__token_t;
typedef struct _fx_N14K_form__klit_t {
int tag;
union {
int64_t KLitInt;
struct _fx_T2il KLitSInt;
struct _fx_T2iq KLitUInt;
struct _fx_T2id KLitFloat;
fx_str_t KLitString;
char_ KLitChar;
bool KLitBool;
struct _fx_N14K_form__ktyp_t_data_t* KLitNil;
} u;
} _fx_N14K_form__klit_t;
typedef struct _fx_N14K_form__atom_t {
int tag;
union {
struct _fx_R9Ast__id_t AtomId;
struct _fx_N14K_form__klit_t AtomLit;
} u;
} _fx_N14K_form__atom_t;
typedef struct _fx_R17K_form__ktprops_t {
bool ktp_complex;
bool ktp_scalar;
bool ktp_ptr;
bool ktp_pass_by_ref;
bool ktp_custom_free;
bool ktp_custom_copy;
} _fx_R17K_form__ktprops_t;
typedef struct _fx_Nt6option1R17K_form__ktprops_t {
int tag;
union {
struct _fx_R17K_form__ktprops_t Some;
} u;
} _fx_Nt6option1R17K_form__ktprops_t;
typedef struct _fx_Nt6option1N14K_form__atom_t {
int tag;
union {
struct _fx_N14K_form__atom_t Some;
} u;
} _fx_Nt6option1N14K_form__atom_t;
typedef struct _fx_T2SR10Ast__loc_t {
fx_str_t t0;
struct _fx_R10Ast__loc_t t1;
} _fx_T2SR10Ast__loc_t;
typedef struct _fx_LT2SR10Ast__loc_t_data_t {
int_ rc;
struct _fx_LT2SR10Ast__loc_t_data_t* tl;
struct _fx_T2SR10Ast__loc_t hd;
} _fx_LT2SR10Ast__loc_t_data_t, *_fx_LT2SR10Ast__loc_t;
typedef struct _fx_LN14K_form__ktyp_t_data_t {
int_ rc;
struct _fx_LN14K_form__ktyp_t_data_t* tl;
struct _fx_N14K_form__ktyp_t_data_t* hd;
} _fx_LN14K_form__ktyp_t_data_t, *_fx_LN14K_form__ktyp_t;
typedef struct _fx_T2LN14K_form__ktyp_tN14K_form__ktyp_t {
struct _fx_LN14K_form__ktyp_t_data_t* t0;
struct _fx_N14K_form__ktyp_t_data_t* t1;
} _fx_T2LN14K_form__ktyp_tN14K_form__ktyp_t;
typedef struct _fx_T2R9Ast__id_tN14K_form__ktyp_t {
struct _fx_R9Ast__id_t t0;
struct _fx_N14K_form__ktyp_t_data_t* t1;
} _fx_T2R9Ast__id_tN14K_form__ktyp_t;
typedef struct _fx_LT2R9Ast__id_tN14K_form__ktyp_t_data_t {
int_ rc;
struct _fx_LT2R9Ast__id_tN14K_form__ktyp_t_data_t* tl;
struct _fx_T2R9Ast__id_tN14K_form__ktyp_t hd;
} _fx_LT2R9Ast__id_tN14K_form__ktyp_t_data_t, *_fx_LT2R9Ast__id_tN14K_form__ktyp_t;
typedef struct _fx_T2R9Ast__id_tLT2R9Ast__id_tN14K_form__ktyp_t {
struct _fx_R9Ast__id_t t0;
struct _fx_LT2R9Ast__id_tN14K_form__ktyp_t_data_t* t1;
} _fx_T2R9Ast__id_tLT2R9Ast__id_tN14K_form__ktyp_t;
typedef struct _fx_T2iN14K_form__ktyp_t {
int_ t0;
struct _fx_N14K_form__ktyp_t_data_t* t1;
} _fx_T2iN14K_form__ktyp_t;
typedef struct _fx_N14K_form__ktyp_t_data_t {
int_ rc;
int tag;
union {
int_ KTypSInt;
int_ KTypUInt;
int_ KTypFloat;
struct _fx_N14K_form__ktyp_t_data_t* KTypRawPointer;
struct _fx_T2LN14K_form__ktyp_tN14K_form__ktyp_t KTypFun;
struct _fx_LN14K_form__ktyp_t_data_t* KTypTuple;
struct _fx_T2R9Ast__id_tLT2R9Ast__id_tN14K_form__ktyp_t KTypRecord;
struct _fx_R9Ast__id_t KTypName;
struct _fx_T2iN14K_form__ktyp_t KTypArray;
struct _fx_N14K_form__ktyp_t_data_t* KTypVector;
struct _fx_N14K_form__ktyp_t_data_t* KTypList;
struct _fx_N14K_form__ktyp_t_data_t* KTypRef;
} u;
} _fx_N14K_form__ktyp_t_data_t, *_fx_N14K_form__ktyp_t;
typedef struct _fx_Ta3N14K_form__atom_t {
struct _fx_N14K_form__atom_t t0;
struct _fx_N14K_form__atom_t t1;
struct _fx_N14K_form__atom_t t2;
} _fx_Ta3N14K_form__atom_t;
typedef struct _fx_N13K_form__dom_t {
int tag;
union {
struct _fx_N14K_form__atom_t DomainElem;
struct _fx_N14K_form__atom_t DomainFast;
struct _fx_Ta3N14K_form__atom_t DomainRange;
} u;
} _fx_N13K_form__dom_t;
typedef struct _fx_T2Nt6option1N14K_form__atom_tR10Ast__loc_t {
struct _fx_Nt6option1N14K_form__atom_t t0;
struct _fx_R10Ast__loc_t t1;
} _fx_T2Nt6option1N14K_form__atom_tR10Ast__loc_t;
typedef struct _fx_T2N14K_form__ktyp_tR10Ast__loc_t {
struct _fx_N14K_form__ktyp_t_data_t* t0;
struct _fx_R10Ast__loc_t t1;
} _fx_T2N14K_form__ktyp_tR10Ast__loc_t;
typedef struct _fx_T2N14K_form__atom_tT2N14K_form__ktyp_tR10Ast__loc_t {
struct _fx_N14K_form__atom_t t0;
struct _fx_T2N14K_form__ktyp_tR10Ast__loc_t t1;
} _fx_T2N14K_form__atom_tT2N14K_form__ktyp_tR10Ast__loc_t;
typedef struct _fx_T4N13Ast__binary_tN14K_form__atom_tN14K_form__atom_tT2N14K_form__ktyp_tR10Ast__loc_t {
struct _fx_N13Ast__binary_t_data_t* t0;
struct _fx_N14K_form__atom_t t1;
struct _fx_N14K_form__atom_t t2;
struct _fx_T2N14K_form__ktyp_tR10Ast__loc_t t3;
} _fx_T4N13Ast__binary_tN14K_form__atom_tN14K_form__atom_tT2N14K_form__ktyp_tR10Ast__loc_t;
typedef struct _fx_T3N12Ast__unary_tN14K_form__atom_tT2N14K_form__ktyp_tR10Ast__loc_t {
struct _fx_N12Ast__unary_t t0;
struct _fx_N14K_form__atom_t t1;
struct _fx_T2N14K_form__ktyp_tR10Ast__loc_t t2;
} _fx_T3N12Ast__unary_tN14K_form__atom_tT2N14K_form__ktyp_tR10Ast__loc_t;
typedef struct _fx_LN14K_form__atom_t_data_t {
int_ rc;
struct _fx_LN14K_form__atom_t_data_t* tl;
struct _fx_N14K_form__atom_t hd;
} _fx_LN14K_form__atom_t_data_t, *_fx_LN14K_form__atom_t;
typedef struct _fx_T3N13Ast__intrin_tLN14K_form__atom_tT2N14K_form__ktyp_tR10Ast__loc_t {
struct _fx_N13Ast__intrin_t t0;
struct _fx_LN14K_form__atom_t_data_t* t1;
struct _fx_T2N14K_form__ktyp_tR10Ast__loc_t t2;
} _fx_T3N13Ast__intrin_tLN14K_form__atom_tT2N14K_form__ktyp_tR10Ast__loc_t;
typedef struct _fx_T2R9Ast__id_tN14K_form__kexp_t {
struct _fx_R9Ast__id_t t0;
struct _fx_N14K_form__kexp_t_data_t* t1;
} _fx_T2R9Ast__id_tN14K_form__kexp_t;
typedef struct _fx_LN14K_form__kexp_t_data_t {
int_ rc;
struct _fx_LN14K_form__kexp_t_data_t* tl;
struct _fx_N14K_form__kexp_t_data_t* hd;
} _fx_LN14K_form__kexp_t_data_t, *_fx_LN14K_form__kexp_t;
typedef struct _fx_T2LN14K_form__kexp_tT2N14K_form__ktyp_tR10Ast__loc_t {
struct _fx_LN14K_form__kexp_t_data_t* t0;
struct _fx_T2N14K_form__ktyp_tR10Ast__loc_t t1;
} _fx_T2LN14K_form__kexp_tT2N14K_form__ktyp_tR10Ast__loc_t;
typedef struct _fx_T4N14K_form__kexp_tN14K_form__kexp_tN14K_form__kexp_tT2N14K_form__ktyp_tR10Ast__loc_t {
struct _fx_N14K_form__kexp_t_data_t* t0;
struct _fx_N14K_form__kexp_t_data_t* t1;
struct _fx_N14K_form__kexp_t_data_t* t2;
struct _fx_T2N14K_form__ktyp_tR10Ast__loc_t t3;
} _fx_T4N14K_form__kexp_tN14K_form__kexp_tN14K_form__kexp_tT2N14K_form__ktyp_tR10Ast__loc_t;
typedef struct _fx_T3R9Ast__id_tLN14K_form__atom_tT2N14K_form__ktyp_tR10Ast__loc_t {
struct _fx_R9Ast__id_t t0;
struct _fx_LN14K_form__atom_t_data_t* t1;
struct _fx_T2N14K_form__ktyp_tR10Ast__loc_t t2;
} _fx_T3R9Ast__id_tLN14K_form__atom_tT2N14K_form__ktyp_tR10Ast__loc_t;
typedef struct _fx_T4R9Ast__id_tiLN14K_form__atom_tT2N14K_form__ktyp_tR10Ast__loc_t {
struct _fx_R9Ast__id_t t0;
int_ t1;
struct _fx_LN14K_form__atom_t_data_t* t2;
struct _fx_T2N14K_form__ktyp_tR10Ast__loc_t t3;
} _fx_T4R9Ast__id_tiLN14K_form__atom_tT2N14K_form__ktyp_tR10Ast__loc_t;
typedef struct _fx_T2LN14K_form__atom_tT2N14K_form__ktyp_tR10Ast__loc_t {
struct _fx_LN14K_form__atom_t_data_t* t0;
struct _fx_T2N14K_form__ktyp_tR10Ast__loc_t t1;
} _fx_T2LN14K_form__atom_tT2N14K_form__ktyp_tR10Ast__loc_t;
typedef struct _fx_T4R9Ast__id_tR9Ast__id_tLN14K_form__atom_tT2N14K_form__ktyp_tR10Ast__loc_t {
struct _fx_R9Ast__id_t t0;
struct _fx_R9Ast__id_t t1;
struct _fx_LN14K_form__atom_t_data_t* t2;
struct _fx_T2N14K_form__ktyp_tR10Ast__loc_t t3;
} _fx_T4R9Ast__id_tR9Ast__id_tLN14K_form__atom_tT2N14K_form__ktyp_tR10Ast__loc_t;
typedef struct _fx_T2BN14K_form__atom_t {
bool t0;
struct _fx_N14K_form__atom_t t1;
} _fx_T2BN14K_form__atom_t;
typedef struct _fx_LT2BN14K_form__atom_t_data_t {
int_ rc;
struct _fx_LT2BN14K_form__atom_t_data_t* tl;
struct _fx_T2BN14K_form__atom_t hd;
} _fx_LT2BN14K_form__atom_t_data_t, *_fx_LT2BN14K_form__atom_t;
typedef struct _fx_LLT2BN14K_form__atom_t_data_t {
int_ rc;
struct _fx_LLT2BN14K_form__atom_t_data_t* tl;
struct _fx_LT2BN14K_form__atom_t_data_t* hd;
} _fx_LLT2BN14K_form__atom_t_data_t, *_fx_LLT2BN14K_form__atom_t;
typedef struct _fx_T3BLLT2BN14K_form__atom_tT2N14K_form__ktyp_tR10Ast__loc_t {
bool t0;
struct _fx_LLT2BN14K_form__atom_t_data_t* t1;
struct _fx_T2N14K_form__ktyp_tR10Ast__loc_t t2;
} _fx_T3BLLT2BN14K_form__atom_tT2N14K_form__ktyp_tR10Ast__loc_t;
typedef struct _fx_T2LT2BN14K_form__atom_tT2N14K_form__ktyp_tR10Ast__loc_t {
struct _fx_LT2BN14K_form__atom_t_data_t* t0;
struct _fx_T2N14K_form__ktyp_tR10Ast__loc_t t1;
} _fx_T2LT2BN14K_form__atom_tT2N14K_form__ktyp_tR10Ast__loc_t;
typedef struct _fx_LN13K_form__dom_t_data_t {
int_ rc;
struct _fx_LN13K_form__dom_t_data_t* tl;
struct _fx_N13K_form__dom_t hd;
} _fx_LN13K_form__dom_t_data_t, *_fx_LN13K_form__dom_t;
typedef struct _fx_T5N14K_form__atom_tN13Ast__border_tN18Ast__interpolate_tLN13K_form__dom_tT2N14K_form__ktyp_tR10Ast__loc_t {
struct _fx_N14K_form__atom_t t0;
struct _fx_N13Ast__border_t t1;
struct _fx_N18Ast__interpolate_t t2;
struct _fx_LN13K_form__dom_t_data_t* t3;
struct _fx_T2N14K_form__ktyp_tR10Ast__loc_t t4;
} _fx_T5N14K_form__atom_tN13Ast__border_tN18Ast__interpolate_tLN13K_form__dom_tT2N14K_form__ktyp_tR10Ast__loc_t;
typedef struct _fx_T3R9Ast__id_tiT2N14K_form__ktyp_tR10Ast__loc_t {
struct _fx_R9Ast__id_t t0;
int_ t1;
struct _fx_T2N14K_form__ktyp_tR10Ast__loc_t t2;
} _fx_T3R9Ast__id_tiT2N14K_form__ktyp_tR10Ast__loc_t;
typedef struct _fx_T3R9Ast__id_tN14K_form__atom_tR10Ast__loc_t {
struct _fx_R9Ast__id_t t0;
struct _fx_N14K_form__atom_t t1;
struct _fx_R10Ast__loc_t t2;
} _fx_T3R9Ast__id_tN14K_form__atom_tR10Ast__loc_t;
typedef struct _fx_T2LN14K_form__kexp_tN14K_form__kexp_t {
struct _fx_LN14K_form__kexp_t_data_t* t0;
struct _fx_N14K_form__kexp_t_data_t* t1;
} _fx_T2LN14K_form__kexp_tN14K_form__kexp_t;
typedef struct _fx_LT2LN14K_form__kexp_tN14K_form__kexp_t_data_t {
int_ rc;
struct _fx_LT2LN14K_form__kexp_tN14K_form__kexp_t_data_t* tl;
struct _fx_T2LN14K_form__kexp_tN14K_form__kexp_t hd;
} _fx_LT2LN14K_form__kexp_tN14K_form__kexp_t_data_t, *_fx_LT2LN14K_form__kexp_tN14K_form__kexp_t;
typedef struct _fx_T2LT2LN14K_form__kexp_tN14K_form__kexp_tT2N14K_form__ktyp_tR10Ast__loc_t {
struct _fx_LT2LN14K_form__kexp_tN14K_form__kexp_t_data_t* t0;
struct _fx_T2N14K_form__ktyp_tR10Ast__loc_t t1;
} _fx_T2LT2LN14K_form__kexp_tN14K_form__kexp_tT2N14K_form__ktyp_tR10Ast__loc_t;
typedef struct _fx_T3N14K_form__kexp_tN14K_form__kexp_tT2N14K_form__ktyp_tR10Ast__loc_t {
struct _fx_N14K_form__kexp_t_data_t* t0;
struct _fx_N14K_form__kexp_t_data_t* t1;
struct _fx_T2N14K_form__ktyp_tR10Ast__loc_t t2;
} _fx_T3N14K_form__kexp_tN14K_form__kexp_tT2N14K_form__ktyp_tR10Ast__loc_t;
typedef struct _fx_T3R9Ast__id_tBR10Ast__loc_t {
struct _fx_R9Ast__id_t t0;
bool t1;
struct _fx_R10Ast__loc_t t2;
} _fx_T3R9Ast__id_tBR10Ast__loc_t;
typedef struct _fx_T3N14K_form__atom_tN14K_form__ktyp_tR10Ast__loc_t {
struct _fx_N14K_form__atom_t t0;
struct _fx_N14K_form__ktyp_t_data_t* t1;
struct _fx_R10Ast__loc_t t2;
} _fx_T3N14K_form__atom_tN14K_form__ktyp_tR10Ast__loc_t;
typedef struct _fx_T2R9Ast__id_tN13K_form__dom_t {
struct _fx_R9Ast__id_t t0;
struct _fx_N13K_form__dom_t t1;
} _fx_T2R9Ast__id_tN13K_form__dom_t;
typedef struct _fx_LT2R9Ast__id_tN13K_form__dom_t_data_t {
int_ rc;
struct _fx_LT2R9Ast__id_tN13K_form__dom_t_data_t* tl;
struct _fx_T2R9Ast__id_tN13K_form__dom_t hd;
} _fx_LT2R9Ast__id_tN13K_form__dom_t_data_t, *_fx_LT2R9Ast__id_tN13K_form__dom_t;
typedef struct _fx_T3N14K_form__kexp_tLT2R9Ast__id_tN13K_form__dom_tLR9Ast__id_t {
struct _fx_N14K_form__kexp_t_data_t* t0;
struct _fx_LT2R9Ast__id_tN13K_form__dom_t_data_t* t1;
struct _fx_LR9Ast__id_t_data_t* t2;
} _fx_T3N14K_form__kexp_tLT2R9Ast__id_tN13K_form__dom_tLR9Ast__id_t;
typedef struct _fx_LT3N14K_form__kexp_tLT2R9Ast__id_tN13K_form__dom_tLR9Ast__id_t_data_t {
int_ rc;
struct _fx_LT3N14K_form__kexp_tLT2R9Ast__id_tN13K_form__dom_tLR9Ast__id_t_data_t* tl;
struct _fx_T3N14K_form__kexp_tLT2R9Ast__id_tN13K_form__dom_tLR9Ast__id_t hd;
} _fx_LT3N14K_form__kexp_tLT2R9Ast__id_tN13K_form__dom_tLR9Ast__id_t_data_t, *_fx_LT3N14K_form__kexp_tLT2R9Ast__id_tN13K_form__dom_tLR9Ast__id_t;
typedef struct _fx_T4LT3N14K_form__kexp_tLT2R9Ast__id_tN13K_form__dom_tLR9Ast__id_tN14K_form__kexp_tR16Ast__for_flags_tT2N14K_form__ktyp_tR10Ast__loc_t {
struct _fx_LT3N14K_form__kexp_tLT2R9Ast__id_tN13K_form__dom_tLR9Ast__id_t_data_t* t0;
struct _fx_N14K_form__kexp_t_data_t* t1;
struct _fx_R16Ast__for_flags_t t2;
struct _fx_T2N14K_form__ktyp_tR10Ast__loc_t t3;
} _fx_T4LT3N14K_form__kexp_tLT2R9Ast__id_tN13K_form__dom_tLR9Ast__id_tN14K_form__kexp_tR16Ast__for_flags_tT2N14K_form__ktyp_tR10Ast__loc_t;
typedef struct _fx_T5LT2R9Ast__id_tN13K_form__dom_tLR9Ast__id_tN14K_form__kexp_tR16Ast__for_flags_tR10Ast__loc_t {
struct _fx_LT2R9Ast__id_tN13K_form__dom_t_data_t* t0;
struct _fx_LR9Ast__id_t_data_t* t1;
struct _fx_N14K_form__kexp_t_data_t* t2;
struct _fx_R16Ast__for_flags_t t3;
struct _fx_R10Ast__loc_t t4;
} _fx_T5LT2R9Ast__id_tN13K_form__dom_tLR9Ast__id_tN14K_form__kexp_tR16Ast__for_flags_tR10Ast__loc_t;
typedef struct _fx_T3N14K_form__kexp_tN14K_form__kexp_tR10Ast__loc_t {
struct _fx_N14K_form__kexp_t_data_t* t0;
struct _fx_N14K_form__kexp_t_data_t* t1;
struct _fx_R10Ast__loc_t t2;
} _fx_T3N14K_form__kexp_tN14K_form__kexp_tR10Ast__loc_t;
typedef struct _fx_T2ST2N14K_form__ktyp_tR10Ast__loc_t {
fx_str_t t0;
struct _fx_T2N14K_form__ktyp_tR10Ast__loc_t t1;
} _fx_T2ST2N14K_form__ktyp_tR10Ast__loc_t;
typedef struct _fx_T3R9Ast__id_tN14K_form__kexp_tR10Ast__loc_t {
struct _fx_R9Ast__id_t t0;
struct _fx_N14K_form__kexp_t_data_t* t1;
struct _fx_R10Ast__loc_t t2;
} _fx_T3R9Ast__id_tN14K_form__kexp_tR10Ast__loc_t;
typedef struct _fx_R25K_form__kdefclosureinfo_t {
struct _fx_R9Ast__id_t kci_arg;
struct _fx_R9Ast__id_t kci_fcv_t;
struct _fx_R9Ast__id_t kci_fp_typ;
struct _fx_R9Ast__id_t kci_make_fp;
struct _fx_R9Ast__id_t kci_wrap_f;
} _fx_R25K_form__kdefclosureinfo_t;
typedef struct _fx_R17K_form__kdeffun_t {
struct _fx_R9Ast__id_t kf_name;
fx_str_t kf_cname;
struct _fx_LR9Ast__id_t_data_t* kf_params;
struct _fx_N14K_form__ktyp_t_data_t* kf_rt;
struct _fx_N14K_form__kexp_t_data_t* kf_body;
struct _fx_R16Ast__fun_flags_t kf_flags;
struct _fx_R25K_form__kdefclosureinfo_t kf_closure;
struct _fx_LN12Ast__scope_t_data_t* kf_scope;
struct _fx_R10Ast__loc_t kf_loc;
} _fx_R17K_form__kdeffun_t;
typedef struct _fx_rR17K_form__kdeffun_t_data_t {
int_ rc;
struct _fx_R17K_form__kdeffun_t data;
} _fx_rR17K_form__kdeffun_t_data_t, *_fx_rR17K_form__kdeffun_t;
typedef struct _fx_R17K_form__kdefexn_t {
struct _fx_R9Ast__id_t ke_name;
fx_str_t ke_cname;
fx_str_t ke_base_cname;
struct _fx_N14K_form__ktyp_t_data_t* ke_typ;
bool ke_std;
struct _fx_R9Ast__id_t ke_tag;
struct _fx_R9Ast__id_t ke_make;
struct _fx_LN12Ast__scope_t_data_t* ke_scope;
struct _fx_R10Ast__loc_t ke_loc;
} _fx_R17K_form__kdefexn_t;
typedef struct _fx_rR17K_form__kdefexn_t_data_t {
int_ rc;
struct _fx_R17K_form__kdefexn_t data;
} _fx_rR17K_form__kdefexn_t_data_t, *_fx_rR17K_form__kdefexn_t;
typedef struct _fx_T2R9Ast__id_tLR9Ast__id_t {
struct _fx_R9Ast__id_t t0;
struct _fx_LR9Ast__id_t_data_t* t1;
} _fx_T2R9Ast__id_tLR9Ast__id_t;
typedef struct _fx_LT2R9Ast__id_tLR9Ast__id_t_data_t {
int_ rc;
struct _fx_LT2R9Ast__id_tLR9Ast__id_t_data_t* tl;
struct _fx_T2R9Ast__id_tLR9Ast__id_t hd;
} _fx_LT2R9Ast__id_tLR9Ast__id_t_data_t, *_fx_LT2R9Ast__id_tLR9Ast__id_t;
typedef struct _fx_R21K_form__kdefvariant_t {
struct _fx_R9Ast__id_t kvar_name;
fx_str_t kvar_cname;
struct _fx_R9Ast__id_t kvar_proto;
struct _fx_Nt6option1R17K_form__ktprops_t kvar_props;
struct _fx_LN14K_form__ktyp_t_data_t* kvar_targs;
struct _fx_LT2R9Ast__id_tN14K_form__ktyp_t_data_t* kvar_cases;
struct _fx_LR9Ast__id_t_data_t* kvar_ctors;
struct _fx_R16Ast__var_flags_t kvar_flags;
struct _fx_LT2R9Ast__id_tLR9Ast__id_t_data_t* kvar_ifaces;
struct _fx_LN12Ast__scope_t_data_t* kvar_scope;
struct _fx_R10Ast__loc_t kvar_loc;
} _fx_R21K_form__kdefvariant_t;
typedef struct _fx_rR21K_form__kdefvariant_t_data_t {
int_ rc;
struct _fx_R21K_form__kdefvariant_t data;
} _fx_rR21K_form__kdefvariant_t_data_t, *_fx_rR21K_form__kdefvariant_t;
typedef struct _fx_R23K_form__kdefinterface_t {
struct _fx_R9Ast__id_t ki_name;
struct _fx_R9Ast__id_t ki_base;
fx_str_t ki_cname;
struct _fx_R9Ast__id_t ki_id;
struct _fx_LT2R9Ast__id_tN14K_form__ktyp_t_data_t* ki_all_methods;
struct _fx_LN12Ast__scope_t_data_t* ki_scope;
struct _fx_R10Ast__loc_t ki_loc;
} _fx_R23K_form__kdefinterface_t;
typedef struct _fx_rR23K_form__kdefinterface_t_data_t {
int_ rc;
struct _fx_R23K_form__kdefinterface_t data;
} _fx_rR23K_form__kdefinterface_t_data_t, *_fx_rR23K_form__kdefinterface_t;
typedef struct _fx_R17K_form__kdeftyp_t {
struct _fx_R9Ast__id_t kt_name;
fx_str_t kt_cname;
struct _fx_R9Ast__id_t kt_proto;
struct _fx_Nt6option1R17K_form__ktprops_t kt_props;
struct _fx_LN14K_form__ktyp_t_data_t* kt_targs;
struct _fx_N14K_form__ktyp_t_data_t* kt_typ;
struct _fx_LN12Ast__scope_t_data_t* kt_scope;
struct _fx_R10Ast__loc_t kt_loc;
} _fx_R17K_form__kdeftyp_t;
typedef struct _fx_rR17K_form__kdeftyp_t_data_t {
int_ rc;
struct _fx_R17K_form__kdeftyp_t data;
} _fx_rR17K_form__kdeftyp_t_data_t, *_fx_rR17K_form__kdeftyp_t;
typedef struct _fx_R25K_form__kdefclosurevars_t {
struct _fx_R9Ast__id_t kcv_name;
fx_str_t kcv_cname;
struct _fx_LT2R9Ast__id_tN14K_form__ktyp_t_data_t* kcv_freevars;
struct _fx_LR9Ast__id_t_data_t* kcv_orig_freevars;
struct _fx_LN12Ast__scope_t_data_t* kcv_scope;
struct _fx_R10Ast__loc_t kcv_loc;
} _fx_R25K_form__kdefclosurevars_t;
typedef struct _fx_rR25K_form__kdefclosurevars_t_data_t {
int_ rc;
struct _fx_R25K_form__kdefclosurevars_t data;
} _fx_rR25K_form__kdefclosurevars_t_data_t, *_fx_rR25K_form__kdefclosurevars_t;
typedef struct _fx_N14K_form__kexp_t_data_t {
int_ rc;
int tag;
union {
struct _fx_R10Ast__loc_t KExpNop;
struct _fx_R10Ast__loc_t KExpBreak;
struct _fx_R10Ast__loc_t KExpContinue;
struct _fx_T2Nt6option1N14K_form__atom_tR10Ast__loc_t KExpReturn;
struct _fx_T2N14K_form__atom_tT2N14K_form__ktyp_tR10Ast__loc_t KExpAtom;
struct _fx_T4N13Ast__binary_tN14K_form__atom_tN14K_form__atom_tT2N14K_form__ktyp_tR10Ast__loc_t KExpBinary;
struct _fx_T3N12Ast__unary_tN14K_form__atom_tT2N14K_form__ktyp_tR10Ast__loc_t KExpUnary;
struct _fx_T3N13Ast__intrin_tLN14K_form__atom_tT2N14K_form__ktyp_tR10Ast__loc_t KExpIntrin;
struct _fx_T2R9Ast__id_tN14K_form__kexp_t KExpSync;
struct _fx_T2LN14K_form__kexp_tT2N14K_form__ktyp_tR10Ast__loc_t KExpSeq;
struct _fx_T4N14K_form__kexp_tN14K_form__kexp_tN14K_form__kexp_tT2N14K_form__ktyp_tR10Ast__loc_t KExpIf;
struct _fx_T3R9Ast__id_tLN14K_form__atom_tT2N14K_form__ktyp_tR10Ast__loc_t KExpCall;
struct _fx_T4R9Ast__id_tiLN14K_form__atom_tT2N14K_form__ktyp_tR10Ast__loc_t KExpICall;
struct _fx_T2LN14K_form__atom_tT2N14K_form__ktyp_tR10Ast__loc_t KExpMkTuple;
struct _fx_T2LN14K_form__atom_tT2N14K_form__ktyp_tR10Ast__loc_t KExpMkRecord;
struct _fx_T4R9Ast__id_tR9Ast__id_tLN14K_form__atom_tT2N14K_form__ktyp_tR10Ast__loc_t KExpMkClosure;
struct _fx_T3BLLT2BN14K_form__atom_tT2N14K_form__ktyp_tR10Ast__loc_t KExpMkArray;
struct _fx_T2LT2BN14K_form__atom_tT2N14K_form__ktyp_tR10Ast__loc_t KExpMkVector;
struct _fx_T5N14K_form__atom_tN13Ast__border_tN18Ast__interpolate_tLN13K_form__dom_tT2N14K_form__ktyp_tR10Ast__loc_t
KExpAt;
struct _fx_T3R9Ast__id_tiT2N14K_form__ktyp_tR10Ast__loc_t KExpMem;
struct _fx_T3R9Ast__id_tN14K_form__atom_tR10Ast__loc_t KExpAssign;
struct _fx_T2LT2LN14K_form__kexp_tN14K_form__kexp_tT2N14K_form__ktyp_tR10Ast__loc_t KExpMatch;
struct _fx_T3N14K_form__kexp_tN14K_form__kexp_tT2N14K_form__ktyp_tR10Ast__loc_t KExpTryCatch;
struct _fx_T3R9Ast__id_tBR10Ast__loc_t KExpThrow;
struct _fx_T3N14K_form__atom_tN14K_form__ktyp_tR10Ast__loc_t KExpCast;
struct _fx_T4LT3N14K_form__kexp_tLT2R9Ast__id_tN13K_form__dom_tLR9Ast__id_tN14K_form__kexp_tR16Ast__for_flags_tT2N14K_form__ktyp_tR10Ast__loc_t
KExpMap;
struct _fx_T5LT2R9Ast__id_tN13K_form__dom_tLR9Ast__id_tN14K_form__kexp_tR16Ast__for_flags_tR10Ast__loc_t KExpFor;
struct _fx_T3N14K_form__kexp_tN14K_form__kexp_tR10Ast__loc_t KExpWhile;
struct _fx_T3N14K_form__kexp_tN14K_form__kexp_tR10Ast__loc_t KExpDoWhile;
struct _fx_T2ST2N14K_form__ktyp_tR10Ast__loc_t KExpCCode;
struct _fx_T3R9Ast__id_tN14K_form__kexp_tR10Ast__loc_t KDefVal;
struct _fx_rR17K_form__kdeffun_t_data_t* KDefFun;
struct _fx_rR17K_form__kdefexn_t_data_t* KDefExn;
struct _fx_rR21K_form__kdefvariant_t_data_t* KDefVariant;
struct _fx_rR23K_form__kdefinterface_t_data_t* KDefInterface;
struct _fx_rR17K_form__kdeftyp_t_data_t* KDefTyp;
struct _fx_rR25K_form__kdefclosurevars_t_data_t* KDefClosureVars;
} u;
} _fx_N14K_form__kexp_t_data_t, *_fx_N14K_form__kexp_t;
typedef struct _fx_R14Ast__pragmas_t {
bool pragma_cpp;
struct _fx_LT2SR10Ast__loc_t_data_t* pragma_clibs;
} _fx_R14Ast__pragmas_t;
typedef struct _fx_R17K_form__kmodule_t {
struct _fx_R9Ast__id_t km_name;
int_ km_idx;
int_ km_toposort_idx;
fx_str_t km_cname;
struct _fx_LN14K_form__kexp_t_data_t* km_top;
struct _fx_Li_data_t* km_deps;
bool km_skip;
bool km_main;
struct _fx_R14Ast__pragmas_t km_pragmas;
} _fx_R17K_form__kmodule_t;
typedef struct _fx_LR17K_form__kmodule_t_data_t {
int_ rc;
struct _fx_LR17K_form__kmodule_t_data_t* tl;
struct _fx_R17K_form__kmodule_t hd;
} _fx_LR17K_form__kmodule_t_data_t, *_fx_LR17K_form__kmodule_t;
typedef struct _fx_T2R9Ast__id_tN14C_form__ctyp_t {
struct _fx_R9Ast__id_t t0;
struct _fx_N14C_form__ctyp_t_data_t* t1;
} _fx_T2R9Ast__id_tN14C_form__ctyp_t;
typedef struct _fx_LT2R9Ast__id_tN14C_form__ctyp_t_data_t {
int_ rc;
struct _fx_LT2R9Ast__id_tN14C_form__ctyp_t_data_t* tl;
struct _fx_T2R9Ast__id_tN14C_form__ctyp_t hd;
} _fx_LT2R9Ast__id_tN14C_form__ctyp_t_data_t, *_fx_LT2R9Ast__id_tN14C_form__ctyp_t;
typedef struct _fx_R23C_form__cdefinterface_t {
struct _fx_R9Ast__id_t ci_name;
fx_str_t ci_cname;
struct _fx_R9Ast__id_t ci_id;
struct _fx_R9Ast__id_t ci_vtbl;
struct _fx_R9Ast__id_t ci_base;
struct _fx_LT2R9Ast__id_tN14C_form__ctyp_t_data_t* ci_all_methods;
struct _fx_LN12Ast__scope_t_data_t* ci_scope;
struct _fx_R10Ast__loc_t ci_loc;
} _fx_R23C_form__cdefinterface_t;
typedef struct _fx_rR23C_form__cdefinterface_t_data_t {
int_ rc;
struct _fx_R23C_form__cdefinterface_t data;
} _fx_rR23C_form__cdefinterface_t_data_t, *_fx_rR23C_form__cdefinterface_t;
typedef struct _fx_Nt6option1N14C_form__ctyp_t {
int tag;
union {
struct _fx_N14C_form__ctyp_t_data_t* Some;
} u;
} _fx_Nt6option1N14C_form__ctyp_t;
typedef struct _fx_Nt6option1N14C_form__cexp_t {
int tag;
union {
struct _fx_N14C_form__cexp_t_data_t* Some;
} u;
} _fx_Nt6option1N14C_form__cexp_t;
typedef struct _fx_N17C_form__cbinary_t {
int tag;
union {
struct _fx_N12Ast__cmpop_t COpCmp;
} u;
} _fx_N17C_form__cbinary_t;
typedef struct _fx_N16C_form__cunary_t {
int tag;
} _fx_N16C_form__cunary_t;
typedef struct _fx_N19C_form__ctyp_attr_t {
int tag;
} _fx_N19C_form__ctyp_attr_t;
typedef struct _fx_N19C_form__carg_attr_t {
int tag;
} _fx_N19C_form__carg_attr_t;
typedef struct _fx_R17C_form__ctprops_t {
bool ctp_scalar;
bool ctp_complex;
bool ctp_ptr;
bool ctp_pass_by_ref;
struct _fx_LR9Ast__id_t_data_t* ctp_make;
struct _fx_Ta2R9Ast__id_t ctp_free;
struct _fx_Ta2R9Ast__id_t ctp_copy;
} _fx_R17C_form__ctprops_t;
typedef struct _fx_T2Nt6option1R9Ast__id_tLT2R9Ast__id_tN14C_form__ctyp_t {
struct _fx_Nt6option1R9Ast__id_t t0;
struct _fx_LT2R9Ast__id_tN14C_form__ctyp_t_data_t* t1;
} _fx_T2Nt6option1R9Ast__id_tLT2R9Ast__id_tN14C_form__ctyp_t;
typedef struct _fx_LN14C_form__ctyp_t_data_t {
int_ rc;
struct _fx_LN14C_form__ctyp_t_data_t* tl;
struct _fx_N14C_form__ctyp_t_data_t* hd;
} _fx_LN14C_form__ctyp_t_data_t, *_fx_LN14C_form__ctyp_t;
typedef struct _fx_T2LN14C_form__ctyp_tN14C_form__ctyp_t {
struct _fx_LN14C_form__ctyp_t_data_t* t0;
struct _fx_N14C_form__ctyp_t_data_t* t1;
} _fx_T2LN14C_form__ctyp_tN14C_form__ctyp_t;
typedef struct _fx_LN19C_form__ctyp_attr_t_data_t {
int_ rc;
struct _fx_LN19C_form__ctyp_attr_t_data_t* tl;
struct _fx_N19C_form__ctyp_attr_t hd;
} _fx_LN19C_form__ctyp_attr_t_data_t, *_fx_LN19C_form__ctyp_attr_t;
typedef struct _fx_T2LN19C_form__ctyp_attr_tN14C_form__ctyp_t {
struct _fx_LN19C_form__ctyp_attr_t_data_t* t0;
struct _fx_N14C_form__ctyp_t_data_t* t1;
} _fx_T2LN19C_form__ctyp_attr_tN14C_form__ctyp_t;
typedef struct _fx_T2iN14C_form__ctyp_t {
int_ t0;
struct _fx_N14C_form__ctyp_t_data_t* t1;
} _fx_T2iN14C_form__ctyp_t;
typedef struct _fx_N14C_form__ctyp_t_data_t {
int_ rc;
int tag;
union {
int_ CTypSInt;
int_ CTypUInt;
int_ CTypFloat;
struct _fx_T2Nt6option1R9Ast__id_tLT2R9Ast__id_tN14C_form__ctyp_t CTypStruct;
struct _fx_T2Nt6option1R9Ast__id_tLT2R9Ast__id_tN14C_form__ctyp_t CTypUnion;
struct _fx_T2LN14C_form__ctyp_tN14C_form__ctyp_t CTypFunRawPtr;
struct _fx_T2LN19C_form__ctyp_attr_tN14C_form__ctyp_t CTypRawPtr;
struct _fx_T2LN19C_form__ctyp_attr_tN14C_form__ctyp_t CTypRawArray;
struct _fx_T2iN14C_form__ctyp_t CTypArray;
struct _fx_N14C_form__ctyp_t_data_t* CTypVector;
struct _fx_R9Ast__id_t CTypName;
} u;
} _fx_N14C_form__ctyp_t_data_t, *_fx_N14C_form__ctyp_t;
typedef struct _fx_T2N14C_form__ctyp_tR10Ast__loc_t {
struct _fx_N14C_form__ctyp_t_data_t* t0;
struct _fx_R10Ast__loc_t t1;
} _fx_T2N14C_form__ctyp_tR10Ast__loc_t;
typedef struct _fx_T2R9Ast__id_tT2N14C_form__ctyp_tR10Ast__loc_t {
struct _fx_R9Ast__id_t t0;
struct _fx_T2N14C_form__ctyp_tR10Ast__loc_t t1;
} _fx_T2R9Ast__id_tT2N14C_form__ctyp_tR10Ast__loc_t;
typedef struct _fx_T2N14K_form__klit_tT2N14C_form__ctyp_tR10Ast__loc_t {
struct _fx_N14K_form__klit_t t0;
struct _fx_T2N14C_form__ctyp_tR10Ast__loc_t t1;
} _fx_T2N14K_form__klit_tT2N14C_form__ctyp_tR10Ast__loc_t;
typedef struct _fx_T4N17C_form__cbinary_tN14C_form__cexp_tN14C_form__cexp_tT2N14C_form__ctyp_tR10Ast__loc_t {
struct _fx_N17C_form__cbinary_t t0;
struct _fx_N14C_form__cexp_t_data_t* t1;
struct _fx_N14C_form__cexp_t_data_t* t2;
struct _fx_T2N14C_form__ctyp_tR10Ast__loc_t t3;
} _fx_T4N17C_form__cbinary_tN14C_form__cexp_tN14C_form__cexp_tT2N14C_form__ctyp_tR10Ast__loc_t;
typedef struct _fx_T3N16C_form__cunary_tN14C_form__cexp_tT2N14C_form__ctyp_tR10Ast__loc_t {
struct _fx_N16C_form__cunary_t t0;
struct _fx_N14C_form__cexp_t_data_t* t1;
struct _fx_T2N14C_form__ctyp_tR10Ast__loc_t t2;
} _fx_T3N16C_form__cunary_tN14C_form__cexp_tT2N14C_form__ctyp_tR10Ast__loc_t;
typedef struct _fx_T3N14C_form__cexp_tR9Ast__id_tT2N14C_form__ctyp_tR10Ast__loc_t {
struct _fx_N14C_form__cexp_t_data_t* t0;
struct _fx_R9Ast__id_t t1;
struct _fx_T2N14C_form__ctyp_tR10Ast__loc_t t2;
} _fx_T3N14C_form__cexp_tR9Ast__id_tT2N14C_form__ctyp_tR10Ast__loc_t;
typedef struct _fx_T3N14C_form__cexp_tN14C_form__ctyp_tR10Ast__loc_t {
struct _fx_N14C_form__cexp_t_data_t* t0;
struct _fx_N14C_form__ctyp_t_data_t* t1;
struct _fx_R10Ast__loc_t t2;
} _fx_T3N14C_form__cexp_tN14C_form__ctyp_tR10Ast__loc_t;
typedef struct _fx_T4N14C_form__cexp_tN14C_form__cexp_tN14C_form__cexp_tT2N14C_form__ctyp_tR10Ast__loc_t {
struct _fx_N14C_form__cexp_t_data_t* t0;
struct _fx_N14C_form__cexp_t_data_t* t1;
struct _fx_N14C_form__cexp_t_data_t* t2;
struct _fx_T2N14C_form__ctyp_tR10Ast__loc_t t3;
} _fx_T4N14C_form__cexp_tN14C_form__cexp_tN14C_form__cexp_tT2N14C_form__ctyp_tR10Ast__loc_t;
typedef struct _fx_LN14C_form__cexp_t_data_t {
int_ rc;
struct _fx_LN14C_form__cexp_t_data_t* tl;
struct _fx_N14C_form__cexp_t_data_t* hd;
} _fx_LN14C_form__cexp_t_data_t, *_fx_LN14C_form__cexp_t;
typedef struct _fx_T3N14C_form__cexp_tLN14C_form__cexp_tT2N14C_form__ctyp_tR10Ast__loc_t {
struct _fx_N14C_form__cexp_t_data_t* t0;
struct _fx_LN14C_form__cexp_t_data_t* t1;
struct _fx_T2N14C_form__ctyp_tR10Ast__loc_t t2;
} _fx_T3N14C_form__cexp_tLN14C_form__cexp_tT2N14C_form__ctyp_tR10Ast__loc_t;
typedef struct _fx_T2LN14C_form__cexp_tT2N14C_form__ctyp_tR10Ast__loc_t {
struct _fx_LN14C_form__cexp_t_data_t* t0;
struct _fx_T2N14C_form__ctyp_tR10Ast__loc_t t1;
} _fx_T2LN14C_form__cexp_tT2N14C_form__ctyp_tR10Ast__loc_t;
typedef struct _fx_N14C_form__cexp_t_data_t {
int_ rc;
int tag;
union {
struct _fx_T2R9Ast__id_tT2N14C_form__ctyp_tR10Ast__loc_t CExpIdent;
struct _fx_T2N14K_form__klit_tT2N14C_form__ctyp_tR10Ast__loc_t CExpLit;
struct _fx_T4N17C_form__cbinary_tN14C_form__cexp_tN14C_form__cexp_tT2N14C_form__ctyp_tR10Ast__loc_t CExpBinary;
struct _fx_T3N16C_form__cunary_tN14C_form__cexp_tT2N14C_form__ctyp_tR10Ast__loc_t CExpUnary;
struct _fx_T3N14C_form__cexp_tR9Ast__id_tT2N14C_form__ctyp_tR10Ast__loc_t CExpMem;
struct _fx_T3N14C_form__cexp_tR9Ast__id_tT2N14C_form__ctyp_tR10Ast__loc_t CExpArrow;
struct _fx_T3N14C_form__cexp_tN14C_form__ctyp_tR10Ast__loc_t CExpCast;
struct _fx_T4N14C_form__cexp_tN14C_form__cexp_tN14C_form__cexp_tT2N14C_form__ctyp_tR10Ast__loc_t CExpTernary;
struct _fx_T3N14C_form__cexp_tLN14C_form__cexp_tT2N14C_form__ctyp_tR10Ast__loc_t CExpCall;
struct _fx_T2LN14C_form__cexp_tT2N14C_form__ctyp_tR10Ast__loc_t CExpInit;
struct _fx_T2N14C_form__ctyp_tR10Ast__loc_t CExpTyp;
struct _fx_T2SR10Ast__loc_t CExpCCode;
} u;
} _fx_N14C_form__cexp_t_data_t, *_fx_N14C_form__cexp_t;
typedef struct _fx_T2Nt6option1N14C_form__cexp_tR10Ast__loc_t {
struct _fx_Nt6option1N14C_form__cexp_t t0;
struct _fx_R10Ast__loc_t t1;
} _fx_T2Nt6option1N14C_form__cexp_tR10Ast__loc_t;
typedef struct _fx_LN15C_form__cstmt_t_data_t {
int_ rc;
struct _fx_LN15C_form__cstmt_t_data_t* tl;
struct _fx_N15C_form__cstmt_t_data_t* hd;
} _fx_LN15C_form__cstmt_t_data_t, *_fx_LN15C_form__cstmt_t;
typedef struct _fx_T2LN15C_form__cstmt_tR10Ast__loc_t {
struct _fx_LN15C_form__cstmt_t_data_t* t0;
struct _fx_R10Ast__loc_t t1;
} _fx_T2LN15C_form__cstmt_tR10Ast__loc_t;
typedef struct _fx_T2R9Ast__id_tN15C_form__cstmt_t {
struct _fx_R9Ast__id_t t0;
struct _fx_N15C_form__cstmt_t_data_t* t1;
} _fx_T2R9Ast__id_tN15C_form__cstmt_t;
typedef struct _fx_T4N14C_form__cexp_tN15C_form__cstmt_tN15C_form__cstmt_tR10Ast__loc_t {
struct _fx_N14C_form__cexp_t_data_t* t0;
struct _fx_N15C_form__cstmt_t_data_t* t1;
struct _fx_N15C_form__cstmt_t_data_t* t2;
struct _fx_R10Ast__loc_t t3;
} _fx_T4N14C_form__cexp_tN15C_form__cstmt_tN15C_form__cstmt_tR10Ast__loc_t;
typedef struct _fx_T6Nt6option1N14C_form__ctyp_tLN14C_form__cexp_tNt6option1N14C_form__cexp_tLN14C_form__cexp_tN15C_form__cstmt_tR10Ast__loc_t {
struct _fx_Nt6option1N14C_form__ctyp_t t0;
struct _fx_LN14C_form__cexp_t_data_t* t1;
struct _fx_Nt6option1N14C_form__cexp_t t2;
struct _fx_LN14C_form__cexp_t_data_t* t3;
struct _fx_N15C_form__cstmt_t_data_t* t4;
struct _fx_R10Ast__loc_t t5;
} _fx_T6Nt6option1N14C_form__ctyp_tLN14C_form__cexp_tNt6option1N14C_form__cexp_tLN14C_form__cexp_tN15C_form__cstmt_tR10Ast__loc_t;
typedef struct _fx_T3N14C_form__cexp_tN15C_form__cstmt_tR10Ast__loc_t {
struct _fx_N14C_form__cexp_t_data_t* t0;
struct _fx_N15C_form__cstmt_t_data_t* t1;
struct _fx_R10Ast__loc_t t2;
} _fx_T3N14C_form__cexp_tN15C_form__cstmt_tR10Ast__loc_t;
typedef struct _fx_T3N15C_form__cstmt_tN14C_form__cexp_tR10Ast__loc_t {
struct _fx_N15C_form__cstmt_t_data_t* t0;
struct _fx_N14C_form__cexp_t_data_t* t1;
struct _fx_R10Ast__loc_t t2;
} _fx_T3N15C_form__cstmt_tN14C_form__cexp_tR10Ast__loc_t;
typedef struct _fx_T2LN14C_form__cexp_tLN15C_form__cstmt_t {
struct _fx_LN14C_form__cexp_t_data_t* t0;
struct _fx_LN15C_form__cstmt_t_data_t* t1;
} _fx_T2LN14C_form__cexp_tLN15C_form__cstmt_t;
typedef struct _fx_LT2LN14C_form__cexp_tLN15C_form__cstmt_t_data_t {
int_ rc;
struct _fx_LT2LN14C_form__cexp_tLN15C_form__cstmt_t_data_t* tl;
struct _fx_T2LN14C_form__cexp_tLN15C_form__cstmt_t hd;
} _fx_LT2LN14C_form__cexp_tLN15C_form__cstmt_t_data_t, *_fx_LT2LN14C_form__cexp_tLN15C_form__cstmt_t;
typedef struct _fx_T3N14C_form__cexp_tLT2LN14C_form__cexp_tLN15C_form__cstmt_tR10Ast__loc_t {
struct _fx_N14C_form__cexp_t_data_t* t0;
struct _fx_LT2LN14C_form__cexp_tLN15C_form__cstmt_t_data_t* t1;
struct _fx_R10Ast__loc_t t2;
} _fx_T3N14C_form__cexp_tLT2LN14C_form__cexp_tLN15C_form__cstmt_tR10Ast__loc_t;
typedef struct _fx_T4N14C_form__ctyp_tR9Ast__id_tNt6option1N14C_form__cexp_tR10Ast__loc_t {
struct _fx_N14C_form__ctyp_t_data_t* t0;
struct _fx_R9Ast__id_t t1;
struct _fx_Nt6option1N14C_form__cexp_t t2;
struct _fx_R10Ast__loc_t t3;
} _fx_T4N14C_form__ctyp_tR9Ast__id_tNt6option1N14C_form__cexp_tR10Ast__loc_t;
typedef struct _fx_LN19C_form__carg_attr_t_data_t {
int_ rc;
struct _fx_LN19C_form__carg_attr_t_data_t* tl;
struct _fx_N19C_form__carg_attr_t hd;
} _fx_LN19C_form__carg_attr_t_data_t, *_fx_LN19C_form__carg_attr_t;
typedef struct _fx_T3R9Ast__id_tN14C_form__ctyp_tLN19C_form__carg_attr_t {
struct _fx_R9Ast__id_t t0;
struct _fx_N14C_form__ctyp_t_data_t* t1;
struct _fx_LN19C_form__carg_attr_t_data_t* t2;
} _fx_T3R9Ast__id_tN14C_form__ctyp_tLN19C_form__carg_attr_t;
typedef struct _fx_LT3R9Ast__id_tN14C_form__ctyp_tLN19C_form__carg_attr_t_data_t {
int_ rc;
struct _fx_LT3R9Ast__id_tN14C_form__ctyp_tLN19C_form__carg_attr_t_data_t* tl;
struct _fx_T3R9Ast__id_tN14C_form__ctyp_tLN19C_form__carg_attr_t hd;
} _fx_LT3R9Ast__id_tN14C_form__ctyp_tLN19C_form__carg_attr_t_data_t, *_fx_LT3R9Ast__id_tN14C_form__ctyp_tLN19C_form__carg_attr_t;
typedef struct _fx_R17C_form__cdeffun_t {
struct _fx_R9Ast__id_t cf_name;
fx_str_t cf_cname;
struct _fx_LT3R9Ast__id_tN14C_form__ctyp_tLN19C_form__carg_attr_t_data_t* cf_args;
struct _fx_N14C_form__ctyp_t_data_t* cf_rt;
struct _fx_LN15C_form__cstmt_t_data_t* cf_body;
struct _fx_R16Ast__fun_flags_t cf_flags;
struct _fx_LN12Ast__scope_t_data_t* cf_scope;
struct _fx_R10Ast__loc_t cf_loc;
} _fx_R17C_form__cdeffun_t;
typedef struct _fx_rR17C_form__cdeffun_t_data_t {
int_ rc;
struct _fx_R17C_form__cdeffun_t data;
} _fx_rR17C_form__cdeffun_t_data_t, *_fx_rR17C_form__cdeffun_t;
typedef struct _fx_R17C_form__cdeftyp_t {
struct _fx_R9Ast__id_t ct_name;
struct _fx_N14C_form__ctyp_t_data_t* ct_typ;
fx_str_t ct_cname;
struct _fx_R17C_form__ctprops_t ct_props;
int_ ct_data_start;
struct _fx_R9Ast__id_t ct_enum;
struct _fx_LR9Ast__id_t_data_t* ct_ifaces;
struct _fx_R9Ast__id_t ct_ifaces_id;
struct _fx_LN12Ast__scope_t_data_t* ct_scope;
struct _fx_R10Ast__loc_t ct_loc;
} _fx_R17C_form__cdeftyp_t;
typedef struct _fx_rR17C_form__cdeftyp_t_data_t {
int_ rc;
struct _fx_R17C_form__cdeftyp_t data;
} _fx_rR17C_form__cdeftyp_t_data_t, *_fx_rR17C_form__cdeftyp_t;
typedef struct _fx_T2R9Ast__id_tNt6option1N14C_form__cexp_t {
struct _fx_R9Ast__id_t t0;
struct _fx_Nt6option1N14C_form__cexp_t t1;
} _fx_T2R9Ast__id_tNt6option1N14C_form__cexp_t;
typedef struct _fx_LT2R9Ast__id_tNt6option1N14C_form__cexp_t_data_t {
int_ rc;
struct _fx_LT2R9Ast__id_tNt6option1N14C_form__cexp_t_data_t* tl;
struct _fx_T2R9Ast__id_tNt6option1N14C_form__cexp_t hd;
} _fx_LT2R9Ast__id_tNt6option1N14C_form__cexp_t_data_t, *_fx_LT2R9Ast__id_tNt6option1N14C_form__cexp_t;
typedef struct _fx_R18C_form__cdefenum_t {
struct _fx_R9Ast__id_t cenum_name;
struct _fx_LT2R9Ast__id_tNt6option1N14C_form__cexp_t_data_t* cenum_members;
fx_str_t cenum_cname;
struct _fx_LN12Ast__scope_t_data_t* cenum_scope;
struct _fx_R10Ast__loc_t cenum_loc;
} _fx_R18C_form__cdefenum_t;
typedef struct _fx_rR18C_form__cdefenum_t_data_t {
int_ rc;
struct _fx_R18C_form__cdefenum_t data;
} _fx_rR18C_form__cdefenum_t_data_t, *_fx_rR18C_form__cdefenum_t;
typedef struct _fx_R19C_form__cdefmacro_t {
struct _fx_R9Ast__id_t cm_name;
fx_str_t cm_cname;
struct _fx_LR9Ast__id_t_data_t* cm_args;
struct _fx_LN15C_form__cstmt_t_data_t* cm_body;
struct _fx_LN12Ast__scope_t_data_t* cm_scope;
struct _fx_R10Ast__loc_t cm_loc;
} _fx_R19C_form__cdefmacro_t;
typedef struct _fx_rR19C_form__cdefmacro_t_data_t {
int_ rc;
struct _fx_R19C_form__cdefmacro_t data;
} _fx_rR19C_form__cdefmacro_t_data_t, *_fx_rR19C_form__cdefmacro_t;
typedef struct _fx_T2N14C_form__cexp_tLN15C_form__cstmt_t {
struct _fx_N14C_form__cexp_t_data_t* t0;
struct _fx_LN15C_form__cstmt_t_data_t* t1;
} _fx_T2N14C_form__cexp_tLN15C_form__cstmt_t;
typedef struct _fx_LT2N14C_form__cexp_tLN15C_form__cstmt_t_data_t {
int_ rc;
struct _fx_LT2N14C_form__cexp_tLN15C_form__cstmt_t_data_t* tl;
struct _fx_T2N14C_form__cexp_tLN15C_form__cstmt_t hd;
} _fx_LT2N14C_form__cexp_tLN15C_form__cstmt_t_data_t, *_fx_LT2N14C_form__cexp_tLN15C_form__cstmt_t;
typedef struct _fx_T3LT2N14C_form__cexp_tLN15C_form__cstmt_tLN15C_form__cstmt_tR10Ast__loc_t {
struct _fx_LT2N14C_form__cexp_tLN15C_form__cstmt_t_data_t* t0;
struct _fx_LN15C_form__cstmt_t_data_t* t1;
struct _fx_R10Ast__loc_t t2;
} _fx_T3LT2N14C_form__cexp_tLN15C_form__cstmt_tLN15C_form__cstmt_tR10Ast__loc_t;
typedef struct _fx_N15C_form__cstmt_t_data_t {
int_ rc;
int tag;
union {
struct _fx_R10Ast__loc_t CStmtNop;
struct _fx_T2SR10Ast__loc_t CComment;
struct _fx_N14C_form__cexp_t_data_t* CExp;
struct _fx_R10Ast__loc_t CStmtBreak;
struct _fx_R10Ast__loc_t CStmtContinue;
struct _fx_T2Nt6option1N14C_form__cexp_tR10Ast__loc_t CStmtReturn;
struct _fx_T2LN15C_form__cstmt_tR10Ast__loc_t CStmtBlock;
struct _fx_T2R9Ast__id_tN15C_form__cstmt_t CStmtSync;
struct _fx_T4N14C_form__cexp_tN15C_form__cstmt_tN15C_form__cstmt_tR10Ast__loc_t CStmtIf;
struct _fx_T2R9Ast__id_tR10Ast__loc_t CStmtGoto;
struct _fx_T2R9Ast__id_tR10Ast__loc_t CStmtLabel;
struct _fx_T6Nt6option1N14C_form__ctyp_tLN14C_form__cexp_tNt6option1N14C_form__cexp_tLN14C_form__cexp_tN15C_form__cstmt_tR10Ast__loc_t
CStmtFor;
struct _fx_T3N14C_form__cexp_tN15C_form__cstmt_tR10Ast__loc_t CStmtWhile;
struct _fx_T3N15C_form__cstmt_tN14C_form__cexp_tR10Ast__loc_t CStmtDoWhile;
struct _fx_T3N14C_form__cexp_tLT2LN14C_form__cexp_tLN15C_form__cstmt_tR10Ast__loc_t CStmtSwitch;
struct _fx_T4N14C_form__ctyp_tR9Ast__id_tNt6option1N14C_form__cexp_tR10Ast__loc_t CDefVal;
struct _fx_rR17C_form__cdeffun_t_data_t* CDefFun;
struct _fx_rR17C_form__cdeftyp_t_data_t* CDefTyp;
struct _fx_T2R9Ast__id_tR10Ast__loc_t CDefForwardSym;
struct _fx_T2R9Ast__id_tR10Ast__loc_t CDefForwardTyp;
struct _fx_rR18C_form__cdefenum_t_data_t* CDefEnum;
struct _fx_rR23C_form__cdefinterface_t_data_t* CDefInterface;
struct _fx_rR19C_form__cdefmacro_t_data_t* CMacroDef;
struct _fx_T2R9Ast__id_tR10Ast__loc_t CMacroUndef;
struct _fx_T3LT2N14C_form__cexp_tLN15C_form__cstmt_tLN15C_form__cstmt_tR10Ast__loc_t CMacroIf;
struct _fx_T2SR10Ast__loc_t CMacroInclude;
struct _fx_T2SR10Ast__loc_t CMacroPragma;
} u;
} _fx_N15C_form__cstmt_t_data_t, *_fx_N15C_form__cstmt_t;
typedef struct _fx_R17C_form__cmodule_t {
struct _fx_R9Ast__id_t cmod_name;
fx_str_t cmod_cname;
struct _fx_LN15C_form__cstmt_t_data_t* cmod_ccode;
bool cmod_main;
bool cmod_recompile;
bool cmod_skip;
struct _fx_R14Ast__pragmas_t cmod_pragmas;
} _fx_R17C_form__cmodule_t;
typedef struct _fx_LR17C_form__cmodule_t_data_t {
int_ rc;
struct _fx_LR17C_form__cmodule_t_data_t* tl;
struct _fx_R17C_form__cmodule_t hd;
} _fx_LR17C_form__cmodule_t_data_t, *_fx_LR17C_form__cmodule_t;
typedef struct _fx_N20Compiler__msgcolor_t {
int tag;
} _fx_N20Compiler__msgcolor_t;
typedef struct _fx_T2LN14Lexer__token_tB {
struct _fx_LN14Lexer__token_t_data_t* t0;
bool t1;
} _fx_T2LN14Lexer__token_tB;
typedef struct _fx_T2SB {
fx_str_t t0;
bool t1;
} _fx_T2SB;
typedef struct _fx_LT2SB_data_t {
int_ rc;
struct _fx_LT2SB_data_t* tl;
struct _fx_T2SB hd;
} _fx_LT2SB_data_t, *_fx_LT2SB;
typedef struct _fx_T2SLS {
fx_str_t t0;
struct _fx_LS_data_t* t1;
} _fx_T2SLS;
typedef struct _fx_Ta2LS {
struct _fx_LS_data_t* t0;
struct _fx_LS_data_t* t1;
} _fx_Ta2LS;
typedef struct _fx_T2iLi {
int_ t0;
struct _fx_Li_data_t* t1;
} _fx_T2iLi;
typedef struct _fx_LT2iLi_data_t {
int_ rc;
struct _fx_LT2iLi_data_t* tl;
struct _fx_T2iLi hd;
} _fx_LT2iLi_data_t, *_fx_LT2iLi;
typedef struct _fx_rLi_data_t {
int_ rc;
struct _fx_Li_data_t* data;
} _fx_rLi_data_t, *_fx_rLi;
typedef struct _fx_T3BBS {
bool t0;
bool t1;
fx_str_t t2;
} _fx_T3BBS;
typedef struct _fx_T2LR17K_form__kmodule_tB {
struct _fx_LR17K_form__kmodule_t_data_t* t0;
bool t1;
} _fx_T2LR17K_form__kmodule_tB;
typedef struct _fx_Ta9S {
fx_str_t t0;
fx_str_t t1;
fx_str_t t2;
fx_str_t t3;
fx_str_t t4;
fx_str_t t5;
fx_str_t t6;
fx_str_t t7;
fx_str_t t8;
} _fx_Ta9S;
typedef struct _fx_Ta2S {
fx_str_t t0;
fx_str_t t1;
} _fx_Ta2S;
typedef struct _fx_Ta3S {
fx_str_t t0;
fx_str_t t1;
fx_str_t t2;
} _fx_Ta3S;
typedef struct _fx_Ta4S {
fx_str_t t0;
fx_str_t t1;
fx_str_t t2;
fx_str_t t3;
} _fx_Ta4S;
typedef struct _fx_T5BBLSBS {
bool t0;
bool t1;
struct _fx_LS_data_t* t2;
bool t3;
fx_str_t t4;
} _fx_T5BBLSBS;
typedef struct _fx_T5BBLSBLS {
bool t0;
bool t1;
struct _fx_LS_data_t* t2;
bool t3;
struct _fx_LS_data_t* t4;
} _fx_T5BBLSBLS;
typedef struct _fx_T2LR17C_form__cmodule_tB {
struct _fx_LR17C_form__cmodule_t_data_t* t0;
bool t1;
} _fx_T2LR17C_form__cmodule_tB;
typedef struct {
int_ rc;
int_ data;
} _fx_E4Exit_data_t;
typedef struct {
int_ rc;
fx_str_t data;
} _fx_E4Fail_data_t;
typedef struct {
int_ rc;
struct _fx_T2Ta2iS data;
} _fx_E22LexerUtils__LexerError_data_t;
typedef struct {
int_ rc;
struct _fx_T2R10Ast__loc_tS data;
} _fx_E17Ast__CompileError_data_t;
typedef struct {
int_ rc;
struct _fx_T2R10Ast__loc_tS data;
} _fx_E18Parser__ParseError_data_t;
static void _fx_free_LS(struct _fx_LS_data_t** dst)
{
FX_FREE_LIST_IMPL(_fx_LS, fx_free_str);
}
static int _fx_cons_LS(fx_str_t* hd, struct _fx_LS_data_t* tl, bool addref_tl, struct _fx_LS_data_t** fx_result)
{
FX_MAKE_LIST_IMPL(_fx_LS, fx_copy_str);
}
static void _fx_free_R7File__t(struct _fx_R7File__t* dst)
{
fx_free_cptr(&dst->handle);
}
static void _fx_copy_R7File__t(struct _fx_R7File__t* src, struct _fx_R7File__t* dst)
{
fx_copy_cptr(src->handle, &dst->handle);
}
static void _fx_make_R7File__t(fx_cptr_t r_handle, struct _fx_R7File__t* fx_result)
{
fx_copy_cptr(r_handle, &fx_result->handle);
}
static void _fx_free_N17Options__optval_t(struct _fx_N17Options__optval_t* dst)
{
switch (dst->tag) {
case 3:
fx_free_str(&dst->u.OptString); break;
default:
;
}
dst->tag = 0;
}
static void _fx_copy_N17Options__optval_t(struct _fx_N17Options__optval_t* src, struct _fx_N17Options__optval_t* dst)
{
dst->tag = src->tag;
switch (src->tag) {
case 3:
fx_copy_str(&src->u.OptString, &dst->u.OptString); break;
default:
dst->u = src->u;
}
}
static void _fx_free_T2SN17Options__optval_t(struct _fx_T2SN17Options__optval_t* dst)
{
fx_free_str(&dst->t0);
_fx_free_N17Options__optval_t(&dst->t1);
}
static void _fx_copy_T2SN17Options__optval_t(struct _fx_T2SN17Options__optval_t* src, struct _fx_T2SN17Options__optval_t* dst)
{
fx_copy_str(&src->t0, &dst->t0);
_fx_copy_N17Options__optval_t(&src->t1, &dst->t1);
}
static void _fx_make_T2SN17Options__optval_t(
fx_str_t* t0,
struct _fx_N17Options__optval_t* t1,
struct _fx_T2SN17Options__optval_t* fx_result)
{
fx_copy_str(t0, &fx_result->t0);
_fx_copy_N17Options__optval_t(t1, &fx_result->t1);
}
static void _fx_free_LT2SN17Options__optval_t(struct _fx_LT2SN17Options__optval_t_data_t** dst)
{
FX_FREE_LIST_IMPL(_fx_LT2SN17Options__optval_t, _fx_free_T2SN17Options__optval_t);
}
static int _fx_cons_LT2SN17Options__optval_t(
struct _fx_T2SN17Options__optval_t* hd,
struct _fx_LT2SN17Options__optval_t_data_t* tl,
bool addref_tl,
struct _fx_LT2SN17Options__optval_t_data_t** fx_result)
{
FX_MAKE_LIST_IMPL(_fx_LT2SN17Options__optval_t, _fx_copy_T2SN17Options__optval_t);
}
static void _fx_free_R18Options__options_t(struct _fx_R18Options__options_t* dst)
{
_fx_free_LS(&dst->app_args);
fx_free_str(&dst->app_filename);
fx_free_str(&dst->build_dir);
fx_free_str(&dst->build_rootdir);
fx_free_str(&dst->cflags);
fx_free_str(&dst->clibs);
fx_free_str(&dst->filename);
_fx_free_LS(&dst->include_path);
_fx_free_LT2SN17Options__optval_t(&dst->defines);
fx_free_str(&dst->output_name);
}
static void _fx_copy_R18Options__options_t(struct _fx_R18Options__options_t* src, struct _fx_R18Options__options_t* dst)
{
FX_COPY_PTR(src->app_args, &dst->app_args);
fx_copy_str(&src->app_filename, &dst->app_filename);
dst->arch64 = src->arch64;
dst->force_rebuild = src->force_rebuild;
fx_copy_str(&src->build_dir, &dst->build_dir);
fx_copy_str(&src->build_rootdir, &dst->build_rootdir);
fx_copy_str(&src->cflags, &dst->cflags);
fx_copy_str(&src->clibs, &dst->clibs);
dst->compile_by_cpp = src->compile_by_cpp;
fx_copy_str(&src->filename, &dst->filename);
dst->gen_c = src->gen_c;
FX_COPY_PTR(src->include_path, &dst->include_path);
dst->debug = src->debug;
FX_COPY_PTR(src->defines, &dst->defines);
dst->optim_iters = src->optim_iters;
dst->inline_thresh = src->inline_thresh;
dst->enable_openmp = src->enable_openmp;
dst->relax = src->relax;
dst->use_preamble = src->use_preamble;
dst->make_app = src->make_app;
dst->optimize_level = src->optimize_level;
fx_copy_str(&src->output_name, &dst->output_name);
dst->print_ast0 = src->print_ast0;
dst->print_ast = src->print_ast;
dst->print_k0 = src->print_k0;
dst->print_k = src->print_k;
dst->print_tokens = src->print_tokens;
dst->run_app = src->run_app;
dst->verbose = src->verbose;
dst->W_unused = src->W_unused;
}
static void _fx_make_R18Options__options_t(
struct _fx_LS_data_t* r_app_args,
fx_str_t* r_app_filename,
bool r_arch64,
bool r_force_rebuild,
fx_str_t* r_build_dir,
fx_str_t* r_build_rootdir,
fx_str_t* r_cflags,
fx_str_t* r_clibs,
bool r_compile_by_cpp,
fx_str_t* r_filename,
bool r_gen_c,
struct _fx_LS_data_t* r_include_path,
bool r_debug,
struct _fx_LT2SN17Options__optval_t_data_t* r_defines,
int_ r_optim_iters,
int_ r_inline_thresh,
bool r_enable_openmp,
bool r_relax,
bool r_use_preamble,
bool r_make_app,
int_ r_optimize_level,
fx_str_t* r_output_name,
bool r_print_ast0,
bool r_print_ast,
bool r_print_k0,
bool r_print_k,
bool r_print_tokens,
bool r_run_app,
bool r_verbose,
bool r_W_unused,
struct _fx_R18Options__options_t* fx_result)
{
FX_COPY_PTR(r_app_args, &fx_result->app_args);
fx_copy_str(r_app_filename, &fx_result->app_filename);
fx_result->arch64 = r_arch64;
fx_result->force_rebuild = r_force_rebuild;
fx_copy_str(r_build_dir, &fx_result->build_dir);
fx_copy_str(r_build_rootdir, &fx_result->build_rootdir);
fx_copy_str(r_cflags, &fx_result->cflags);
fx_copy_str(r_clibs, &fx_result->clibs);
fx_result->compile_by_cpp = r_compile_by_cpp;
fx_copy_str(r_filename, &fx_result->filename);
fx_result->gen_c = r_gen_c;
FX_COPY_PTR(r_include_path, &fx_result->include_path);
fx_result->debug = r_debug;
FX_COPY_PTR(r_defines, &fx_result->defines);
fx_result->optim_iters = r_optim_iters;
fx_result->inline_thresh = r_inline_thresh;
fx_result->enable_openmp = r_enable_openmp;
fx_result->relax = r_relax;
fx_result->use_preamble = r_use_preamble;
fx_result->make_app = r_make_app;
fx_result->optimize_level = r_optimize_level;
fx_copy_str(r_output_name, &fx_result->output_name);
fx_result->print_ast0 = r_print_ast0;
fx_result->print_ast = r_print_ast;
fx_result->print_k0 = r_print_k0;
fx_result->print_k = r_print_k;
fx_result->print_tokens = r_print_tokens;
fx_result->run_app = r_run_app;
fx_result->verbose = r_verbose;
fx_result->W_unused = r_W_unused;
}
static void _fx_free_T2Ta2iS(struct _fx_T2Ta2iS* dst)
{
fx_free_str(&dst->t1);
}
static void _fx_copy_T2Ta2iS(struct _fx_T2Ta2iS* src, struct _fx_T2Ta2iS* dst)
{
dst->t0 = src->t0;
fx_copy_str(&src->t1, &dst->t1);
}
static void _fx_make_T2Ta2iS(struct _fx_Ta2i* t0, fx_str_t* t1, struct _fx_T2Ta2iS* fx_result)
{
fx_result->t0 = *t0;
fx_copy_str(t1, &fx_result->t1);
}
static void _fx_free_Nt6option1N10Ast__exp_t(struct _fx_Nt6option1N10Ast__exp_t_data_t** dst)
{
if (*dst && FX_DECREF((*dst)->rc) == 1) {
_fx_free_N10Ast__exp_t(&(*dst)->u.Some); fx_free(*dst);
}
*dst = 0;
}
static void _fx_free_Nt6option1N10Ast__typ_t(struct _fx_Nt6option1N10Ast__typ_t_data_t** dst)
{
if (*dst && FX_DECREF((*dst)->rc) == 1) {
_fx_free_N10Ast__typ_t(&(*dst)->u.Some); fx_free(*dst);
}
*dst = 0;
}
static int _fx_cons_LN12Ast__scope_t(
struct _fx_N12Ast__scope_t* hd,
struct _fx_LN12Ast__scope_t_data_t* tl,
bool addref_tl,
struct _fx_LN12Ast__scope_t_data_t** fx_result)
{
FX_MAKE_LIST_IMPL(_fx_LN12Ast__scope_t, FX_COPY_SIMPLE_BY_PTR);
}
static void _fx_free_R16Ast__val_flags_t(struct _fx_R16Ast__val_flags_t* dst)
{
fx_free_list_simple(&dst->val_flag_global);
}
static void _fx_copy_R16Ast__val_flags_t(struct _fx_R16Ast__val_flags_t* src, struct _fx_R16Ast__val_flags_t* dst)
{
dst->val_flag_arg = src->val_flag_arg;
dst->val_flag_mutable = src->val_flag_mutable;
dst->val_flag_temp = src->val_flag_temp;
dst->val_flag_tempref = src->val_flag_tempref;
dst->val_flag_private = src->val_flag_private;
dst->val_flag_subarray = src->val_flag_subarray;
dst->val_flag_instance = src->val_flag_instance;
dst->val_flag_method = src->val_flag_method;
dst->val_flag_ctor = src->val_flag_ctor;
FX_COPY_PTR(src->val_flag_global, &dst->val_flag_global);
}
static void _fx_make_R16Ast__val_flags_t(
bool r_val_flag_arg,
bool r_val_flag_mutable,
bool r_val_flag_temp,
bool r_val_flag_tempref,
bool r_val_flag_private,
bool r_val_flag_subarray,
bool r_val_flag_instance,
struct _fx_T2R9Ast__id_ti* r_val_flag_method,
int_ r_val_flag_ctor,
struct _fx_LN12Ast__scope_t_data_t* r_val_flag_global,
struct _fx_R16Ast__val_flags_t* fx_result)
{
fx_result->val_flag_arg = r_val_flag_arg;
fx_result->val_flag_mutable = r_val_flag_mutable;
fx_result->val_flag_temp = r_val_flag_temp;
fx_result->val_flag_tempref = r_val_flag_tempref;
fx_result->val_flag_private = r_val_flag_private;
fx_result->val_flag_subarray = r_val_flag_subarray;
fx_result->val_flag_instance = r_val_flag_instance;
fx_result->val_flag_method = *r_val_flag_method;
fx_result->val_flag_ctor = r_val_flag_ctor;
FX_COPY_PTR(r_val_flag_global, &fx_result->val_flag_global);
}
static void _fx_free_R13Ast__defval_t(struct _fx_R13Ast__defval_t* dst)
{
_fx_free_N10Ast__typ_t(&dst->dv_typ);
_fx_free_R16Ast__val_flags_t(&dst->dv_flags);
fx_free_list_simple(&dst->dv_scope);
}
static void _fx_copy_R13Ast__defval_t(struct _fx_R13Ast__defval_t* src, struct _fx_R13Ast__defval_t* dst)
{
dst->dv_name = src->dv_name;
FX_COPY_PTR(src->dv_typ, &dst->dv_typ);
_fx_copy_R16Ast__val_flags_t(&src->dv_flags, &dst->dv_flags);
FX_COPY_PTR(src->dv_scope, &dst->dv_scope);
dst->dv_loc = src->dv_loc;
}
static void _fx_make_R13Ast__defval_t(
struct _fx_R9Ast__id_t* r_dv_name,
struct _fx_N10Ast__typ_t_data_t* r_dv_typ,
struct _fx_R16Ast__val_flags_t* r_dv_flags,
struct _fx_LN12Ast__scope_t_data_t* r_dv_scope,
struct _fx_R10Ast__loc_t* r_dv_loc,
struct _fx_R13Ast__defval_t* fx_result)
{
fx_result->dv_name = *r_dv_name;
FX_COPY_PTR(r_dv_typ, &fx_result->dv_typ);
_fx_copy_R16Ast__val_flags_t(r_dv_flags, &fx_result->dv_flags);
FX_COPY_PTR(r_dv_scope, &fx_result->dv_scope);
fx_result->dv_loc = *r_dv_loc;
}
static void _fx_free_Rt6Map__t2R9Ast__id_tLN16Ast__env_entry_t(struct _fx_Rt6Map__t2R9Ast__id_tLN16Ast__env_entry_t* dst)
{
_fx_free_Nt11Map__tree_t2R9Ast__id_tLN16Ast__env_entry_t(&dst->root);
fx_free_fp(&dst->cmp);
}
static void _fx_copy_Rt6Map__t2R9Ast__id_tLN16Ast__env_entry_t(
struct _fx_Rt6Map__t2R9Ast__id_tLN16Ast__env_entry_t* src,
struct _fx_Rt6Map__t2R9Ast__id_tLN16Ast__env_entry_t* dst)
{
FX_COPY_PTR(src->root, &dst->root);
FX_COPY_FP(&src->cmp, &dst->cmp);
}
static void _fx_make_Rt6Map__t2R9Ast__id_tLN16Ast__env_entry_t(
struct _fx_Nt11Map__tree_t2R9Ast__id_tLN16Ast__env_entry_t_data_t* r_root,
struct _fx_FPi2R9Ast__id_tR9Ast__id_t* r_cmp,
struct _fx_Rt6Map__t2R9Ast__id_tLN16Ast__env_entry_t* fx_result)
{
FX_COPY_PTR(r_root, &fx_result->root);
FX_COPY_FP(r_cmp, &fx_result->cmp);
}
static int _fx_cons_LR9Ast__id_t(
struct _fx_R9Ast__id_t* hd,
struct _fx_LR9Ast__id_t_data_t* tl,
bool addref_tl,
struct _fx_LR9Ast__id_t_data_t** fx_result)
{
FX_MAKE_LIST_IMPL(_fx_LR9Ast__id_t, FX_COPY_SIMPLE_BY_PTR);
}
static void _fx_free_LN10Ast__pat_t(struct _fx_LN10Ast__pat_t_data_t** dst)
{
FX_FREE_LIST_IMPL(_fx_LN10Ast__pat_t, _fx_free_N10Ast__pat_t);
}
static int _fx_cons_LN10Ast__pat_t(
struct _fx_N10Ast__pat_t_data_t* hd,
struct _fx_LN10Ast__pat_t_data_t* tl,
bool addref_tl,
struct _fx_LN10Ast__pat_t_data_t** fx_result)
{
FX_MAKE_LIST_IMPL(_fx_LN10Ast__pat_t, FX_COPY_PTR);
}
static void _fx_free_rLR9Ast__id_t(struct _fx_rLR9Ast__id_t_data_t** dst)
{
FX_FREE_REF_IMPL(_fx_rLR9Ast__id_t, fx_free_list_simple);
}
static int _fx_make_rLR9Ast__id_t(struct _fx_LR9Ast__id_t_data_t* arg, struct _fx_rLR9Ast__id_t_data_t** fx_result)
{
FX_MAKE_REF_IMPL(_fx_rLR9Ast__id_t, FX_COPY_PTR);
}
static void _fx_free_R13Ast__deffun_t(struct _fx_R13Ast__deffun_t* dst)
{
fx_free_list_simple(&dst->df_templ_args);
_fx_free_LN10Ast__pat_t(&dst->df_args);
_fx_free_N10Ast__typ_t(&dst->df_typ);
_fx_free_N10Ast__exp_t(&dst->df_body);
fx_free_list_simple(&dst->df_scope);
_fx_free_rLR9Ast__id_t(&dst->df_templ_inst);
_fx_free_Rt6Map__t2R9Ast__id_tLN16Ast__env_entry_t(&dst->df_env);
}
static void _fx_copy_R13Ast__deffun_t(struct _fx_R13Ast__deffun_t* src, struct _fx_R13Ast__deffun_t* dst)
{
dst->df_name = src->df_name;
FX_COPY_PTR(src->df_templ_args, &dst->df_templ_args);
FX_COPY_PTR(src->df_args, &dst->df_args);
FX_COPY_PTR(src->df_typ, &dst->df_typ);
FX_COPY_PTR(src->df_body, &dst->df_body);
dst->df_flags = src->df_flags;
FX_COPY_PTR(src->df_scope, &dst->df_scope);
dst->df_loc = src->df_loc;
FX_COPY_PTR(src->df_templ_inst, &dst->df_templ_inst);
_fx_copy_Rt6Map__t2R9Ast__id_tLN16Ast__env_entry_t(&src->df_env, &dst->df_env);
}
static void _fx_make_R13Ast__deffun_t(
struct _fx_R9Ast__id_t* r_df_name,
struct _fx_LR9Ast__id_t_data_t* r_df_templ_args,
struct _fx_LN10Ast__pat_t_data_t* r_df_args,
struct _fx_N10Ast__typ_t_data_t* r_df_typ,
struct _fx_N10Ast__exp_t_data_t* r_df_body,
struct _fx_R16Ast__fun_flags_t* r_df_flags,
struct _fx_LN12Ast__scope_t_data_t* r_df_scope,
struct _fx_R10Ast__loc_t* r_df_loc,
struct _fx_rLR9Ast__id_t_data_t* r_df_templ_inst,
struct _fx_Rt6Map__t2R9Ast__id_tLN16Ast__env_entry_t* r_df_env,
struct _fx_R13Ast__deffun_t* fx_result)
{
fx_result->df_name = *r_df_name;
FX_COPY_PTR(r_df_templ_args, &fx_result->df_templ_args);
FX_COPY_PTR(r_df_args, &fx_result->df_args);
FX_COPY_PTR(r_df_typ, &fx_result->df_typ);
FX_COPY_PTR(r_df_body, &fx_result->df_body);
fx_result->df_flags = *r_df_flags;
FX_COPY_PTR(r_df_scope, &fx_result->df_scope);
fx_result->df_loc = *r_df_loc;
FX_COPY_PTR(r_df_templ_inst, &fx_result->df_templ_inst);
_fx_copy_Rt6Map__t2R9Ast__id_tLN16Ast__env_entry_t(r_df_env, &fx_result->df_env);
}
static void _fx_free_rR13Ast__deffun_t(struct _fx_rR13Ast__deffun_t_data_t** dst)
{
FX_FREE_REF_IMPL(_fx_rR13Ast__deffun_t, _fx_free_R13Ast__deffun_t);
}
static int _fx_make_rR13Ast__deffun_t(struct _fx_R13Ast__deffun_t* arg, struct _fx_rR13Ast__deffun_t_data_t** fx_result)
{
FX_MAKE_REF_IMPL(_fx_rR13Ast__deffun_t, _fx_copy_R13Ast__deffun_t);
}
static void _fx_free_R13Ast__defexn_t(struct _fx_R13Ast__defexn_t* dst)
{
_fx_free_N10Ast__typ_t(&dst->dexn_typ);
fx_free_list_simple(&dst->dexn_scope);
}
static void _fx_copy_R13Ast__defexn_t(struct _fx_R13Ast__defexn_t* src, struct _fx_R13Ast__defexn_t* dst)
{
dst->dexn_name = src->dexn_name;
FX_COPY_PTR(src->dexn_typ, &dst->dexn_typ);
FX_COPY_PTR(src->dexn_scope, &dst->dexn_scope);
dst->dexn_loc = src->dexn_loc;
}
static void _fx_make_R13Ast__defexn_t(
struct _fx_R9Ast__id_t* r_dexn_name,
struct _fx_N10Ast__typ_t_data_t* r_dexn_typ,
struct _fx_LN12Ast__scope_t_data_t* r_dexn_scope,
struct _fx_R10Ast__loc_t* r_dexn_loc,
struct _fx_R13Ast__defexn_t* fx_result)
{
fx_result->dexn_name = *r_dexn_name;
FX_COPY_PTR(r_dexn_typ, &fx_result->dexn_typ);
FX_COPY_PTR(r_dexn_scope, &fx_result->dexn_scope);
fx_result->dexn_loc = *r_dexn_loc;
}
static void _fx_free_rR13Ast__defexn_t(struct _fx_rR13Ast__defexn_t_data_t** dst)
{
FX_FREE_REF_IMPL(_fx_rR13Ast__defexn_t, _fx_free_R13Ast__defexn_t);
}
static int _fx_make_rR13Ast__defexn_t(struct _fx_R13Ast__defexn_t* arg, struct _fx_rR13Ast__defexn_t_data_t** fx_result)
{
FX_MAKE_REF_IMPL(_fx_rR13Ast__defexn_t, _fx_copy_R13Ast__defexn_t);
}
static void _fx_free_R13Ast__deftyp_t(struct _fx_R13Ast__deftyp_t* dst)
{
fx_free_list_simple(&dst->dt_templ_args);
_fx_free_N10Ast__typ_t(&dst->dt_typ);
fx_free_list_simple(&dst->dt_scope);
}
static void _fx_copy_R13Ast__deftyp_t(struct _fx_R13Ast__deftyp_t* src, struct _fx_R13Ast__deftyp_t* dst)
{
dst->dt_name = src->dt_name;
FX_COPY_PTR(src->dt_templ_args, &dst->dt_templ_args);
FX_COPY_PTR(src->dt_typ, &dst->dt_typ);
dst->dt_finalized = src->dt_finalized;
FX_COPY_PTR(src->dt_scope, &dst->dt_scope);
dst->dt_loc = src->dt_loc;
}
static void _fx_make_R13Ast__deftyp_t(
struct _fx_R9Ast__id_t* r_dt_name,
struct _fx_LR9Ast__id_t_data_t* r_dt_templ_args,
struct _fx_N10Ast__typ_t_data_t* r_dt_typ,
bool r_dt_finalized,
struct _fx_LN12Ast__scope_t_data_t* r_dt_scope,
struct _fx_R10Ast__loc_t* r_dt_loc,
struct _fx_R13Ast__deftyp_t* fx_result)
{
fx_result->dt_name = *r_dt_name;
FX_COPY_PTR(r_dt_templ_args, &fx_result->dt_templ_args);
FX_COPY_PTR(r_dt_typ, &fx_result->dt_typ);
fx_result->dt_finalized = r_dt_finalized;
FX_COPY_PTR(r_dt_scope, &fx_result->dt_scope);
fx_result->dt_loc = *r_dt_loc;
}
static void _fx_free_rR13Ast__deftyp_t(struct _fx_rR13Ast__deftyp_t_data_t** dst)
{
FX_FREE_REF_IMPL(_fx_rR13Ast__deftyp_t, _fx_free_R13Ast__deftyp_t);
}
static int _fx_make_rR13Ast__deftyp_t(struct _fx_R13Ast__deftyp_t* arg, struct _fx_rR13Ast__deftyp_t_data_t** fx_result)
{
FX_MAKE_REF_IMPL(_fx_rR13Ast__deftyp_t, _fx_copy_R13Ast__deftyp_t);
}
static void _fx_free_T2R9Ast__id_tN10Ast__typ_t(struct _fx_T2R9Ast__id_tN10Ast__typ_t* dst)
{
_fx_free_N10Ast__typ_t(&dst->t1);
}
static void _fx_copy_T2R9Ast__id_tN10Ast__typ_t(
struct _fx_T2R9Ast__id_tN10Ast__typ_t* src,
struct _fx_T2R9Ast__id_tN10Ast__typ_t* dst)
{
dst->t0 = src->t0;
FX_COPY_PTR(src->t1, &dst->t1);
}
static void _fx_make_T2R9Ast__id_tN10Ast__typ_t(
struct _fx_R9Ast__id_t* t0,
struct _fx_N10Ast__typ_t_data_t* t1,
struct _fx_T2R9Ast__id_tN10Ast__typ_t* fx_result)
{
fx_result->t0 = *t0;
FX_COPY_PTR(t1, &fx_result->t1);
}
static void _fx_free_LT2R9Ast__id_tN10Ast__typ_t(struct _fx_LT2R9Ast__id_tN10Ast__typ_t_data_t** dst)
{
FX_FREE_LIST_IMPL(_fx_LT2R9Ast__id_tN10Ast__typ_t, _fx_free_T2R9Ast__id_tN10Ast__typ_t);
}
static int _fx_cons_LT2R9Ast__id_tN10Ast__typ_t(
struct _fx_T2R9Ast__id_tN10Ast__typ_t* hd,
struct _fx_LT2R9Ast__id_tN10Ast__typ_t_data_t* tl,
bool addref_tl,
struct _fx_LT2R9Ast__id_tN10Ast__typ_t_data_t** fx_result)
{
FX_MAKE_LIST_IMPL(_fx_LT2R9Ast__id_tN10Ast__typ_t, _fx_copy_T2R9Ast__id_tN10Ast__typ_t);
}
static int _fx_cons_LTa2R9Ast__id_t(
struct _fx_Ta2R9Ast__id_t* hd,
struct _fx_LTa2R9Ast__id_t_data_t* tl,
bool addref_tl,
struct _fx_LTa2R9Ast__id_t_data_t** fx_result)
{
FX_MAKE_LIST_IMPL(_fx_LTa2R9Ast__id_t, FX_COPY_SIMPLE_BY_PTR);
}
static void _fx_free_T2R9Ast__id_tLTa2R9Ast__id_t(struct _fx_T2R9Ast__id_tLTa2R9Ast__id_t* dst)
{
fx_free_list_simple(&dst->t1);
}
static void _fx_copy_T2R9Ast__id_tLTa2R9Ast__id_t(
struct _fx_T2R9Ast__id_tLTa2R9Ast__id_t* src,
struct _fx_T2R9Ast__id_tLTa2R9Ast__id_t* dst)
{
dst->t0 = src->t0;
FX_COPY_PTR(src->t1, &dst->t1);
}
static void _fx_make_T2R9Ast__id_tLTa2R9Ast__id_t(
struct _fx_R9Ast__id_t* t0,
struct _fx_LTa2R9Ast__id_t_data_t* t1,
struct _fx_T2R9Ast__id_tLTa2R9Ast__id_t* fx_result)
{
fx_result->t0 = *t0;
FX_COPY_PTR(t1, &fx_result->t1);
}
static void _fx_free_LT2R9Ast__id_tLTa2R9Ast__id_t(struct _fx_LT2R9Ast__id_tLTa2R9Ast__id_t_data_t** dst)
{
FX_FREE_LIST_IMPL(_fx_LT2R9Ast__id_tLTa2R9Ast__id_t, _fx_free_T2R9Ast__id_tLTa2R9Ast__id_t);
}
static int _fx_cons_LT2R9Ast__id_tLTa2R9Ast__id_t(
struct _fx_T2R9Ast__id_tLTa2R9Ast__id_t* hd,
struct _fx_LT2R9Ast__id_tLTa2R9Ast__id_t_data_t* tl,
bool addref_tl,
struct _fx_LT2R9Ast__id_tLTa2R9Ast__id_t_data_t** fx_result)
{
FX_MAKE_LIST_IMPL(_fx_LT2R9Ast__id_tLTa2R9Ast__id_t, _fx_copy_T2R9Ast__id_tLTa2R9Ast__id_t);
}
static void _fx_free_R17Ast__defvariant_t(struct _fx_R17Ast__defvariant_t* dst)
{
fx_free_list_simple(&dst->dvar_templ_args);
_fx_free_N10Ast__typ_t(&dst->dvar_alias);
_fx_free_LT2R9Ast__id_tN10Ast__typ_t(&dst->dvar_cases);
fx_free_list_simple(&dst->dvar_ctors);
_fx_free_rLR9Ast__id_t(&dst->dvar_templ_inst);
_fx_free_LT2R9Ast__id_tLTa2R9Ast__id_t(&dst->dvar_ifaces);
fx_free_list_simple(&dst->dvar_scope);
}
static void _fx_copy_R17Ast__defvariant_t(struct _fx_R17Ast__defvariant_t* src, struct _fx_R17Ast__defvariant_t* dst)
{
dst->dvar_name = src->dvar_name;
FX_COPY_PTR(src->dvar_templ_args, &dst->dvar_templ_args);
FX_COPY_PTR(src->dvar_alias, &dst->dvar_alias);
dst->dvar_flags = src->dvar_flags;
FX_COPY_PTR(src->dvar_cases, &dst->dvar_cases);
FX_COPY_PTR(src->dvar_ctors, &dst->dvar_ctors);
FX_COPY_PTR(src->dvar_templ_inst, &dst->dvar_templ_inst);
FX_COPY_PTR(src->dvar_ifaces, &dst->dvar_ifaces);
FX_COPY_PTR(src->dvar_scope, &dst->dvar_scope);
dst->dvar_loc = src->dvar_loc;
}
static void _fx_make_R17Ast__defvariant_t(
struct _fx_R9Ast__id_t* r_dvar_name,
struct _fx_LR9Ast__id_t_data_t* r_dvar_templ_args,
struct _fx_N10Ast__typ_t_data_t* r_dvar_alias,
struct _fx_R16Ast__var_flags_t* r_dvar_flags,
struct _fx_LT2R9Ast__id_tN10Ast__typ_t_data_t* r_dvar_cases,
struct _fx_LR9Ast__id_t_data_t* r_dvar_ctors,
struct _fx_rLR9Ast__id_t_data_t* r_dvar_templ_inst,
struct _fx_LT2R9Ast__id_tLTa2R9Ast__id_t_data_t* r_dvar_ifaces,
struct _fx_LN12Ast__scope_t_data_t* r_dvar_scope,
struct _fx_R10Ast__loc_t* r_dvar_loc,
struct _fx_R17Ast__defvariant_t* fx_result)
{
fx_result->dvar_name = *r_dvar_name;
FX_COPY_PTR(r_dvar_templ_args, &fx_result->dvar_templ_args);
FX_COPY_PTR(r_dvar_alias, &fx_result->dvar_alias);
fx_result->dvar_flags = *r_dvar_flags;
FX_COPY_PTR(r_dvar_cases, &fx_result->dvar_cases);
FX_COPY_PTR(r_dvar_ctors, &fx_result->dvar_ctors);
FX_COPY_PTR(r_dvar_templ_inst, &fx_result->dvar_templ_inst);
FX_COPY_PTR(r_dvar_ifaces, &fx_result->dvar_ifaces);
FX_COPY_PTR(r_dvar_scope, &fx_result->dvar_scope);
fx_result->dvar_loc = *r_dvar_loc;
}
static void _fx_free_rR17Ast__defvariant_t(struct _fx_rR17Ast__defvariant_t_data_t** dst)
{
FX_FREE_REF_IMPL(_fx_rR17Ast__defvariant_t, _fx_free_R17Ast__defvariant_t);
}
static int _fx_make_rR17Ast__defvariant_t(
struct _fx_R17Ast__defvariant_t* arg,
struct _fx_rR17Ast__defvariant_t_data_t** fx_result)
{
FX_MAKE_REF_IMPL(_fx_rR17Ast__defvariant_t, _fx_copy_R17Ast__defvariant_t);
}
static void _fx_free_T3R9Ast__id_tN10Ast__typ_tR16Ast__fun_flags_t(
struct _fx_T3R9Ast__id_tN10Ast__typ_tR16Ast__fun_flags_t* dst)
{
_fx_free_N10Ast__typ_t(&dst->t1);
}
static void _fx_copy_T3R9Ast__id_tN10Ast__typ_tR16Ast__fun_flags_t(
struct _fx_T3R9Ast__id_tN10Ast__typ_tR16Ast__fun_flags_t* src,
struct _fx_T3R9Ast__id_tN10Ast__typ_tR16Ast__fun_flags_t* dst)
{
dst->t0 = src->t0;
FX_COPY_PTR(src->t1, &dst->t1);
dst->t2 = src->t2;
}
static void _fx_make_T3R9Ast__id_tN10Ast__typ_tR16Ast__fun_flags_t(
struct _fx_R9Ast__id_t* t0,
struct _fx_N10Ast__typ_t_data_t* t1,
struct _fx_R16Ast__fun_flags_t* t2,
struct _fx_T3R9Ast__id_tN10Ast__typ_tR16Ast__fun_flags_t* fx_result)
{
fx_result->t0 = *t0;
FX_COPY_PTR(t1, &fx_result->t1);
fx_result->t2 = *t2;
}
static void _fx_free_LT3R9Ast__id_tN10Ast__typ_tR16Ast__fun_flags_t(
struct _fx_LT3R9Ast__id_tN10Ast__typ_tR16Ast__fun_flags_t_data_t** dst)
{
FX_FREE_LIST_IMPL(_fx_LT3R9Ast__id_tN10Ast__typ_tR16Ast__fun_flags_t,
_fx_free_T3R9Ast__id_tN10Ast__typ_tR16Ast__fun_flags_t);
}
static int _fx_cons_LT3R9Ast__id_tN10Ast__typ_tR16Ast__fun_flags_t(
struct _fx_T3R9Ast__id_tN10Ast__typ_tR16Ast__fun_flags_t* hd,
struct _fx_LT3R9Ast__id_tN10Ast__typ_tR16Ast__fun_flags_t_data_t* tl,
bool addref_tl,
struct _fx_LT3R9Ast__id_tN10Ast__typ_tR16Ast__fun_flags_t_data_t** fx_result)
{
FX_MAKE_LIST_IMPL(_fx_LT3R9Ast__id_tN10Ast__typ_tR16Ast__fun_flags_t,
_fx_copy_T3R9Ast__id_tN10Ast__typ_tR16Ast__fun_flags_t);
}
static void _fx_free_R19Ast__definterface_t(struct _fx_R19Ast__definterface_t* dst)
{
_fx_free_LT3R9Ast__id_tN10Ast__typ_tR16Ast__fun_flags_t(&dst->di_new_methods);
_fx_free_LT3R9Ast__id_tN10Ast__typ_tR16Ast__fun_flags_t(&dst->di_all_methods);
fx_free_list_simple(&dst->di_scope);
}
static void _fx_copy_R19Ast__definterface_t(struct _fx_R19Ast__definterface_t* src, struct _fx_R19Ast__definterface_t* dst)
{
dst->di_name = src->di_name;
dst->di_base = src->di_base;
FX_COPY_PTR(src->di_new_methods, &dst->di_new_methods);
FX_COPY_PTR(src->di_all_methods, &dst->di_all_methods);
FX_COPY_PTR(src->di_scope, &dst->di_scope);
dst->di_loc = src->di_loc;
}
static void _fx_make_R19Ast__definterface_t(
struct _fx_R9Ast__id_t* r_di_name,
struct _fx_R9Ast__id_t* r_di_base,
struct _fx_LT3R9Ast__id_tN10Ast__typ_tR16Ast__fun_flags_t_data_t* r_di_new_methods,
struct _fx_LT3R9Ast__id_tN10Ast__typ_tR16Ast__fun_flags_t_data_t* r_di_all_methods,
struct _fx_LN12Ast__scope_t_data_t* r_di_scope,
struct _fx_R10Ast__loc_t* r_di_loc,
struct _fx_R19Ast__definterface_t* fx_result)
{
fx_result->di_name = *r_di_name;
fx_result->di_base = *r_di_base;
FX_COPY_PTR(r_di_new_methods, &fx_result->di_new_methods);
FX_COPY_PTR(r_di_all_methods, &fx_result->di_all_methods);
FX_COPY_PTR(r_di_scope, &fx_result->di_scope);
fx_result->di_loc = *r_di_loc;
}
static void _fx_free_rR19Ast__definterface_t(struct _fx_rR19Ast__definterface_t_data_t** dst)
{
FX_FREE_REF_IMPL(_fx_rR19Ast__definterface_t, _fx_free_R19Ast__definterface_t);
}
static int _fx_make_rR19Ast__definterface_t(
struct _fx_R19Ast__definterface_t* arg,
struct _fx_rR19Ast__definterface_t_data_t** fx_result)
{
FX_MAKE_REF_IMPL(_fx_rR19Ast__definterface_t, _fx_copy_R19Ast__definterface_t);
}
static void _fx_free_N14Ast__id_info_t(struct _fx_N14Ast__id_info_t* dst)
{
switch (dst->tag) {
case 2:
_fx_free_R13Ast__defval_t(&dst->u.IdDVal); break;
case 3:
_fx_free_rR13Ast__deffun_t(&dst->u.IdFun); break;
case 4:
_fx_free_rR13Ast__defexn_t(&dst->u.IdExn); break;
case 5:
_fx_free_rR13Ast__deftyp_t(&dst->u.IdTyp); break;
case 6:
_fx_free_rR17Ast__defvariant_t(&dst->u.IdVariant); break;
case 7:
_fx_free_rR19Ast__definterface_t(&dst->u.IdInterface); break;
default:
;
}
dst->tag = 0;
}
static void _fx_copy_N14Ast__id_info_t(struct _fx_N14Ast__id_info_t* src, struct _fx_N14Ast__id_info_t* dst)
{
dst->tag = src->tag;
switch (src->tag) {
case 2:
_fx_copy_R13Ast__defval_t(&src->u.IdDVal, &dst->u.IdDVal); break;
case 3:
FX_COPY_PTR(src->u.IdFun, &dst->u.IdFun); break;
case 4:
FX_COPY_PTR(src->u.IdExn, &dst->u.IdExn); break;
case 5:
FX_COPY_PTR(src->u.IdTyp, &dst->u.IdTyp); break;
case 6:
FX_COPY_PTR(src->u.IdVariant, &dst->u.IdVariant); break;
case 7:
FX_COPY_PTR(src->u.IdInterface, &dst->u.IdInterface); break;
default:
dst->u = src->u;
}
}
static void _fx_free_T3iA1N14Ast__id_info_tN14Ast__id_info_t(struct _fx_T3iA1N14Ast__id_info_tN14Ast__id_info_t* dst)
{
fx_free_arr(&dst->t1);
_fx_free_N14Ast__id_info_t(&dst->t2);
}
static void _fx_copy_T3iA1N14Ast__id_info_tN14Ast__id_info_t(
struct _fx_T3iA1N14Ast__id_info_tN14Ast__id_info_t* src,
struct _fx_T3iA1N14Ast__id_info_tN14Ast__id_info_t* dst)
{
dst->t0 = src->t0;
fx_copy_arr(&src->t1, &dst->t1);
_fx_copy_N14Ast__id_info_t(&src->t2, &dst->t2);
}
static void _fx_make_T3iA1N14Ast__id_info_tN14Ast__id_info_t(
int_ t0,
fx_arr_t* t1,
struct _fx_N14Ast__id_info_t* t2,
struct _fx_T3iA1N14Ast__id_info_tN14Ast__id_info_t* fx_result)
{
fx_result->t0 = t0;
fx_copy_arr(t1, &fx_result->t1);
_fx_copy_N14Ast__id_info_t(t2, &fx_result->t2);
}
static void _fx_free_Nt9Dynvec__t1N14Ast__id_info_t(struct _fx_Nt9Dynvec__t1N14Ast__id_info_t_data_t** dst)
{
if (*dst && FX_DECREF((*dst)->rc) == 1) {
_fx_free_T3iA1N14Ast__id_info_tN14Ast__id_info_t(&(*dst)->u.t); fx_free(*dst);
}
*dst = 0;
}
static void _fx_free_LN16Ast__env_entry_t(struct _fx_LN16Ast__env_entry_t_data_t** dst)
{
FX_FREE_LIST_IMPL(_fx_LN16Ast__env_entry_t, _fx_free_N16Ast__env_entry_t);
}
static int _fx_cons_LN16Ast__env_entry_t(
struct _fx_N16Ast__env_entry_t_data_t* hd,
struct _fx_LN16Ast__env_entry_t_data_t* tl,
bool addref_tl,
struct _fx_LN16Ast__env_entry_t_data_t** fx_result)
{
FX_MAKE_LIST_IMPL(_fx_LN16Ast__env_entry_t, FX_COPY_PTR);
}
static void
_fx_free_T5N12Map__color_tNt11Map__tree_t2R9Ast__id_tLN16Ast__env_entry_tR9Ast__id_tLN16Ast__env_entry_tNt11Map__tree_t2R9Ast__id_tLN16Ast__env_entry_t(
struct _fx_T5N12Map__color_tNt11Map__tree_t2R9Ast__id_tLN16Ast__env_entry_tR9Ast__id_tLN16Ast__env_entry_tNt11Map__tree_t2R9Ast__id_tLN16Ast__env_entry_t*
dst)
{
_fx_free_Nt11Map__tree_t2R9Ast__id_tLN16Ast__env_entry_t(&dst->t1);
_fx_free_LN16Ast__env_entry_t(&dst->t3);
_fx_free_Nt11Map__tree_t2R9Ast__id_tLN16Ast__env_entry_t(&dst->t4);
}
static void
_fx_copy_T5N12Map__color_tNt11Map__tree_t2R9Ast__id_tLN16Ast__env_entry_tR9Ast__id_tLN16Ast__env_entry_tNt11Map__tree_t2R9Ast__id_tLN16Ast__env_entry_t(
struct _fx_T5N12Map__color_tNt11Map__tree_t2R9Ast__id_tLN16Ast__env_entry_tR9Ast__id_tLN16Ast__env_entry_tNt11Map__tree_t2R9Ast__id_tLN16Ast__env_entry_t*
src,
struct _fx_T5N12Map__color_tNt11Map__tree_t2R9Ast__id_tLN16Ast__env_entry_tR9Ast__id_tLN16Ast__env_entry_tNt11Map__tree_t2R9Ast__id_tLN16Ast__env_entry_t*
dst)
{
dst->t0 = src->t0;
FX_COPY_PTR(src->t1, &dst->t1);
dst->t2 = src->t2;
FX_COPY_PTR(src->t3, &dst->t3);
FX_COPY_PTR(src->t4, &dst->t4);
}
static void
_fx_make_T5N12Map__color_tNt11Map__tree_t2R9Ast__id_tLN16Ast__env_entry_tR9Ast__id_tLN16Ast__env_entry_tNt11Map__tree_t2R9Ast__id_tLN16Ast__env_entry_t(
struct _fx_N12Map__color_t* t0,
struct _fx_Nt11Map__tree_t2R9Ast__id_tLN16Ast__env_entry_t_data_t* t1,
struct _fx_R9Ast__id_t* t2,
struct _fx_LN16Ast__env_entry_t_data_t* t3,
struct _fx_Nt11Map__tree_t2R9Ast__id_tLN16Ast__env_entry_t_data_t* t4,
struct _fx_T5N12Map__color_tNt11Map__tree_t2R9Ast__id_tLN16Ast__env_entry_tR9Ast__id_tLN16Ast__env_entry_tNt11Map__tree_t2R9Ast__id_tLN16Ast__env_entry_t*
fx_result)
{
fx_result->t0 = *t0;
FX_COPY_PTR(t1, &fx_result->t1);
fx_result->t2 = *t2;
FX_COPY_PTR(t3, &fx_result->t3);
FX_COPY_PTR(t4, &fx_result->t4);
}
static void _fx_free_Nt11Map__tree_t2R9Ast__id_tLN16Ast__env_entry_t(
struct _fx_Nt11Map__tree_t2R9Ast__id_tLN16Ast__env_entry_t_data_t** dst)
{
if (*dst && FX_DECREF((*dst)->rc) == 1) {
_fx_free_T5N12Map__color_tNt11Map__tree_t2R9Ast__id_tLN16Ast__env_entry_tR9Ast__id_tLN16Ast__env_entry_tNt11Map__tree_t2R9Ast__id_tLN16Ast__env_entry_t(
&(*dst)->u.Node);
fx_free(*dst);
}
*dst = 0;
}
static void _fx_free_T2R10Ast__loc_tS(struct _fx_T2R10Ast__loc_tS* dst)
{
fx_free_str(&dst->t1);
}
static void _fx_copy_T2R10Ast__loc_tS(struct _fx_T2R10Ast__loc_tS* src, struct _fx_T2R10Ast__loc_tS* dst)
{
dst->t0 = src->t0;
fx_copy_str(&src->t1, &dst->t1);
}
static void _fx_make_T2R10Ast__loc_tS(struct _fx_R10Ast__loc_t* t0, fx_str_t* t1, struct _fx_T2R10Ast__loc_tS* fx_result)
{
fx_result->t0 = *t0;
fx_copy_str(t1, &fx_result->t1);
}
static void _fx_free_N10Ast__lit_t(struct _fx_N10Ast__lit_t* dst)
{
switch (dst->tag) {
case 5:
fx_free_str(&dst->u.LitString); break;
default:
;
}
dst->tag = 0;
}
static void _fx_copy_N10Ast__lit_t(struct _fx_N10Ast__lit_t* src, struct _fx_N10Ast__lit_t* dst)
{
dst->tag = src->tag;
switch (src->tag) {
case 5:
fx_copy_str(&src->u.LitString, &dst->u.LitString); break;
default:
dst->u = src->u;
}
}
static void _fx_free_rNt6option1N10Ast__typ_t(struct _fx_rNt6option1N10Ast__typ_t_data_t** dst)
{
FX_FREE_REF_IMPL(_fx_rNt6option1N10Ast__typ_t, _fx_free_Nt6option1N10Ast__typ_t);
}
static int _fx_make_rNt6option1N10Ast__typ_t(
struct _fx_Nt6option1N10Ast__typ_t_data_t* arg,
struct _fx_rNt6option1N10Ast__typ_t_data_t** fx_result)
{
FX_MAKE_REF_IMPL(_fx_rNt6option1N10Ast__typ_t, FX_COPY_PTR);
}
static void _fx_free_LN10Ast__typ_t(struct _fx_LN10Ast__typ_t_data_t** dst)
{
FX_FREE_LIST_IMPL(_fx_LN10Ast__typ_t, _fx_free_N10Ast__typ_t);
}
static int _fx_cons_LN10Ast__typ_t(
struct _fx_N10Ast__typ_t_data_t* hd,
struct _fx_LN10Ast__typ_t_data_t* tl,
bool addref_tl,
struct _fx_LN10Ast__typ_t_data_t** fx_result)
{
FX_MAKE_LIST_IMPL(_fx_LN10Ast__typ_t, FX_COPY_PTR);
}
static void _fx_free_T2LN10Ast__typ_tN10Ast__typ_t(struct _fx_T2LN10Ast__typ_tN10Ast__typ_t* dst)
{
_fx_free_LN10Ast__typ_t(&dst->t0);
_fx_free_N10Ast__typ_t(&dst->t1);
}
static void _fx_copy_T2LN10Ast__typ_tN10Ast__typ_t(
struct _fx_T2LN10Ast__typ_tN10Ast__typ_t* src,
struct _fx_T2LN10Ast__typ_tN10Ast__typ_t* dst)
{
FX_COPY_PTR(src->t0, &dst->t0);
FX_COPY_PTR(src->t1, &dst->t1);
}
static void _fx_make_T2LN10Ast__typ_tN10Ast__typ_t(
struct _fx_LN10Ast__typ_t_data_t* t0,
struct _fx_N10Ast__typ_t_data_t* t1,
struct _fx_T2LN10Ast__typ_tN10Ast__typ_t* fx_result)
{
FX_COPY_PTR(t0, &fx_result->t0);
FX_COPY_PTR(t1, &fx_result->t1);
}
static void _fx_free_T2iN10Ast__typ_t(struct _fx_T2iN10Ast__typ_t* dst)
{
_fx_free_N10Ast__typ_t(&dst->t1);
}
static void _fx_copy_T2iN10Ast__typ_t(struct _fx_T2iN10Ast__typ_t* src, struct _fx_T2iN10Ast__typ_t* dst)
{
dst->t0 = src->t0;
FX_COPY_PTR(src->t1, &dst->t1);
}
static void _fx_make_T2iN10Ast__typ_t(int_ t0, struct _fx_N10Ast__typ_t_data_t* t1, struct _fx_T2iN10Ast__typ_t* fx_result)
{
fx_result->t0 = t0;
FX_COPY_PTR(t1, &fx_result->t1);
}
static void _fx_free_T4R16Ast__val_flags_tR9Ast__id_tN10Ast__typ_tN10Ast__exp_t(
struct _fx_T4R16Ast__val_flags_tR9Ast__id_tN10Ast__typ_tN10Ast__exp_t* dst)
{
_fx_free_R16Ast__val_flags_t(&dst->t0);
_fx_free_N10Ast__typ_t(&dst->t2);
_fx_free_N10Ast__exp_t(&dst->t3);
}
static void _fx_copy_T4R16Ast__val_flags_tR9Ast__id_tN10Ast__typ_tN10Ast__exp_t(
struct _fx_T4R16Ast__val_flags_tR9Ast__id_tN10Ast__typ_tN10Ast__exp_t* src,
struct _fx_T4R16Ast__val_flags_tR9Ast__id_tN10Ast__typ_tN10Ast__exp_t* dst)
{
_fx_copy_R16Ast__val_flags_t(&src->t0, &dst->t0);
dst->t1 = src->t1;
FX_COPY_PTR(src->t2, &dst->t2);
FX_COPY_PTR(src->t3, &dst->t3);
}
static void _fx_make_T4R16Ast__val_flags_tR9Ast__id_tN10Ast__typ_tN10Ast__exp_t(
struct _fx_R16Ast__val_flags_t* t0,
struct _fx_R9Ast__id_t* t1,
struct _fx_N10Ast__typ_t_data_t* t2,
struct _fx_N10Ast__exp_t_data_t* t3,
struct _fx_T4R16Ast__val_flags_tR9Ast__id_tN10Ast__typ_tN10Ast__exp_t* fx_result)
{
_fx_copy_R16Ast__val_flags_t(t0, &fx_result->t0);
fx_result->t1 = *t1;
FX_COPY_PTR(t2, &fx_result->t2);
FX_COPY_PTR(t3, &fx_result->t3);
}
static void _fx_free_LT4R16Ast__val_flags_tR9Ast__id_tN10Ast__typ_tN10Ast__exp_t(
struct _fx_LT4R16Ast__val_flags_tR9Ast__id_tN10Ast__typ_tN10Ast__exp_t_data_t** dst)
{
FX_FREE_LIST_IMPL(_fx_LT4R16Ast__val_flags_tR9Ast__id_tN10Ast__typ_tN10Ast__exp_t,
_fx_free_T4R16Ast__val_flags_tR9Ast__id_tN10Ast__typ_tN10Ast__exp_t);
}
static int _fx_cons_LT4R16Ast__val_flags_tR9Ast__id_tN10Ast__typ_tN10Ast__exp_t(
struct _fx_T4R16Ast__val_flags_tR9Ast__id_tN10Ast__typ_tN10Ast__exp_t* hd,
struct _fx_LT4R16Ast__val_flags_tR9Ast__id_tN10Ast__typ_tN10Ast__exp_t_data_t* tl,
bool addref_tl,
struct _fx_LT4R16Ast__val_flags_tR9Ast__id_tN10Ast__typ_tN10Ast__exp_t_data_t** fx_result)
{
FX_MAKE_LIST_IMPL(_fx_LT4R16Ast__val_flags_tR9Ast__id_tN10Ast__typ_tN10Ast__exp_t,
_fx_copy_T4R16Ast__val_flags_tR9Ast__id_tN10Ast__typ_tN10Ast__exp_t);
}
static void _fx_free_T2LT4R16Ast__val_flags_tR9Ast__id_tN10Ast__typ_tN10Ast__exp_tB(
struct _fx_T2LT4R16Ast__val_flags_tR9Ast__id_tN10Ast__typ_tN10Ast__exp_tB* dst)
{
_fx_free_LT4R16Ast__val_flags_tR9Ast__id_tN10Ast__typ_tN10Ast__exp_t(&dst->t0);
}
static void _fx_copy_T2LT4R16Ast__val_flags_tR9Ast__id_tN10Ast__typ_tN10Ast__exp_tB(
struct _fx_T2LT4R16Ast__val_flags_tR9Ast__id_tN10Ast__typ_tN10Ast__exp_tB* src,
struct _fx_T2LT4R16Ast__val_flags_tR9Ast__id_tN10Ast__typ_tN10Ast__exp_tB* dst)
{
FX_COPY_PTR(src->t0, &dst->t0);
dst->t1 = src->t1;
}
static void _fx_make_T2LT4R16Ast__val_flags_tR9Ast__id_tN10Ast__typ_tN10Ast__exp_tB(
struct _fx_LT4R16Ast__val_flags_tR9Ast__id_tN10Ast__typ_tN10Ast__exp_t_data_t* t0,
bool t1,
struct _fx_T2LT4R16Ast__val_flags_tR9Ast__id_tN10Ast__typ_tN10Ast__exp_tB* fx_result)
{
FX_COPY_PTR(t0, &fx_result->t0);
fx_result->t1 = t1;
}
static void _fx_free_rT2LT4R16Ast__val_flags_tR9Ast__id_tN10Ast__typ_tN10Ast__exp_tB(
struct _fx_rT2LT4R16Ast__val_flags_tR9Ast__id_tN10Ast__typ_tN10Ast__exp_tB_data_t** dst)
{
FX_FREE_REF_IMPL(_fx_rT2LT4R16Ast__val_flags_tR9Ast__id_tN10Ast__typ_tN10Ast__exp_tB,
_fx_free_T2LT4R16Ast__val_flags_tR9Ast__id_tN10Ast__typ_tN10Ast__exp_tB);
}
static int _fx_make_rT2LT4R16Ast__val_flags_tR9Ast__id_tN10Ast__typ_tN10Ast__exp_tB(
struct _fx_T2LT4R16Ast__val_flags_tR9Ast__id_tN10Ast__typ_tN10Ast__exp_tB* arg,
struct _fx_rT2LT4R16Ast__val_flags_tR9Ast__id_tN10Ast__typ_tN10Ast__exp_tB_data_t** fx_result)
{
FX_MAKE_REF_IMPL(_fx_rT2LT4R16Ast__val_flags_tR9Ast__id_tN10Ast__typ_tN10Ast__exp_tB,
_fx_copy_T2LT4R16Ast__val_flags_tR9Ast__id_tN10Ast__typ_tN10Ast__exp_tB);
}
static void _fx_free_T2LN10Ast__typ_tR9Ast__id_t(struct _fx_T2LN10Ast__typ_tR9Ast__id_t* dst)
{
_fx_free_LN10Ast__typ_t(&dst->t0);
}
static void _fx_copy_T2LN10Ast__typ_tR9Ast__id_t(
struct _fx_T2LN10Ast__typ_tR9Ast__id_t* src,
struct _fx_T2LN10Ast__typ_tR9Ast__id_t* dst)
{
FX_COPY_PTR(src->t0, &dst->t0);
dst->t1 = src->t1;
}
static void _fx_make_T2LN10Ast__typ_tR9Ast__id_t(
struct _fx_LN10Ast__typ_t_data_t* t0,
struct _fx_R9Ast__id_t* t1,
struct _fx_T2LN10Ast__typ_tR9Ast__id_t* fx_result)
{
FX_COPY_PTR(t0, &fx_result->t0);
fx_result->t1 = *t1;
}
static void _fx_free_N10Ast__typ_t(struct _fx_N10Ast__typ_t_data_t** dst)
{
if (*dst && FX_DECREF((*dst)->rc) == 1) {
switch ((*dst)->tag) {
case 1:
_fx_free_rNt6option1N10Ast__typ_t(&(*dst)->u.TypVar); break;
case 2:
_fx_free_Nt6option1N10Ast__typ_t(&(*dst)->u.TypVarTuple); break;
case 3:
_fx_free_N10Ast__typ_t(&(*dst)->u.TypVarArray); break;
case 13:
_fx_free_T2LN10Ast__typ_tN10Ast__typ_t(&(*dst)->u.TypFun); break;
case 14:
_fx_free_N10Ast__typ_t(&(*dst)->u.TypList); break;
case 15:
_fx_free_N10Ast__typ_t(&(*dst)->u.TypVector); break;
case 16:
_fx_free_LN10Ast__typ_t(&(*dst)->u.TypTuple); break;
case 17:
_fx_free_N10Ast__typ_t(&(*dst)->u.TypRef); break;
case 18:
_fx_free_T2iN10Ast__typ_t(&(*dst)->u.TypArray); break;
case 19:
_fx_free_rT2LT4R16Ast__val_flags_tR9Ast__id_tN10Ast__typ_tN10Ast__exp_tB(&(*dst)->u.TypRecord); break;
case 23:
_fx_free_T2LN10Ast__typ_tR9Ast__id_t(&(*dst)->u.TypApp); break;
default:
;
}
fx_free(*dst);
}
*dst = 0;
}
static void _fx_free_N13Ast__binary_t(struct _fx_N13Ast__binary_t_data_t** dst)
{
if (*dst && FX_DECREF((*dst)->rc) == 1) {
switch ((*dst)->tag) {
case 27:
_fx_free_N13Ast__binary_t(&(*dst)->u.OpAugBinary); break;
default:
;
}
fx_free(*dst);
}
*dst = 0;
}
static void _fx_free_T2Nt6option1N10Ast__exp_tR10Ast__loc_t(struct _fx_T2Nt6option1N10Ast__exp_tR10Ast__loc_t* dst)
{
_fx_free_Nt6option1N10Ast__exp_t(&dst->t0);
}
static void _fx_copy_T2Nt6option1N10Ast__exp_tR10Ast__loc_t(
struct _fx_T2Nt6option1N10Ast__exp_tR10Ast__loc_t* src,
struct _fx_T2Nt6option1N10Ast__exp_tR10Ast__loc_t* dst)
{
FX_COPY_PTR(src->t0, &dst->t0);
dst->t1 = src->t1;
}
static void _fx_make_T2Nt6option1N10Ast__exp_tR10Ast__loc_t(
struct _fx_Nt6option1N10Ast__exp_t_data_t* t0,
struct _fx_R10Ast__loc_t* t1,
struct _fx_T2Nt6option1N10Ast__exp_tR10Ast__loc_t* fx_result)
{
FX_COPY_PTR(t0, &fx_result->t0);
fx_result->t1 = *t1;
}
static void _fx_free_T2N10Ast__typ_tR10Ast__loc_t(struct _fx_T2N10Ast__typ_tR10Ast__loc_t* dst)
{
_fx_free_N10Ast__typ_t(&dst->t0);
}
static void _fx_copy_T2N10Ast__typ_tR10Ast__loc_t(
struct _fx_T2N10Ast__typ_tR10Ast__loc_t* src,
struct _fx_T2N10Ast__typ_tR10Ast__loc_t* dst)
{
FX_COPY_PTR(src->t0, &dst->t0);
dst->t1 = src->t1;
}
static void _fx_make_T2N10Ast__typ_tR10Ast__loc_t(
struct _fx_N10Ast__typ_t_data_t* t0,
struct _fx_R10Ast__loc_t* t1,
struct _fx_T2N10Ast__typ_tR10Ast__loc_t* fx_result)
{
FX_COPY_PTR(t0, &fx_result->t0);
fx_result->t1 = *t1;
}
static void _fx_free_T4Nt6option1N10Ast__exp_tNt6option1N10Ast__exp_tNt6option1N10Ast__exp_tT2N10Ast__typ_tR10Ast__loc_t(
struct _fx_T4Nt6option1N10Ast__exp_tNt6option1N10Ast__exp_tNt6option1N10Ast__exp_tT2N10Ast__typ_tR10Ast__loc_t* dst)
{
_fx_free_Nt6option1N10Ast__exp_t(&dst->t0);
_fx_free_Nt6option1N10Ast__exp_t(&dst->t1);
_fx_free_Nt6option1N10Ast__exp_t(&dst->t2);
_fx_free_T2N10Ast__typ_tR10Ast__loc_t(&dst->t3);
}
static void _fx_copy_T4Nt6option1N10Ast__exp_tNt6option1N10Ast__exp_tNt6option1N10Ast__exp_tT2N10Ast__typ_tR10Ast__loc_t(
struct _fx_T4Nt6option1N10Ast__exp_tNt6option1N10Ast__exp_tNt6option1N10Ast__exp_tT2N10Ast__typ_tR10Ast__loc_t* src,
struct _fx_T4Nt6option1N10Ast__exp_tNt6option1N10Ast__exp_tNt6option1N10Ast__exp_tT2N10Ast__typ_tR10Ast__loc_t* dst)
{
FX_COPY_PTR(src->t0, &dst->t0);
FX_COPY_PTR(src->t1, &dst->t1);
FX_COPY_PTR(src->t2, &dst->t2);
_fx_copy_T2N10Ast__typ_tR10Ast__loc_t(&src->t3, &dst->t3);
}
static void _fx_make_T4Nt6option1N10Ast__exp_tNt6option1N10Ast__exp_tNt6option1N10Ast__exp_tT2N10Ast__typ_tR10Ast__loc_t(
struct _fx_Nt6option1N10Ast__exp_t_data_t* t0,
struct _fx_Nt6option1N10Ast__exp_t_data_t* t1,
struct _fx_Nt6option1N10Ast__exp_t_data_t* t2,
struct _fx_T2N10Ast__typ_tR10Ast__loc_t* t3,
struct _fx_T4Nt6option1N10Ast__exp_tNt6option1N10Ast__exp_tNt6option1N10Ast__exp_tT2N10Ast__typ_tR10Ast__loc_t* fx_result)
{
FX_COPY_PTR(t0, &fx_result->t0);
FX_COPY_PTR(t1, &fx_result->t1);
FX_COPY_PTR(t2, &fx_result->t2);
_fx_copy_T2N10Ast__typ_tR10Ast__loc_t(t3, &fx_result->t3);
}
static void _fx_free_T2N10Ast__lit_tT2N10Ast__typ_tR10Ast__loc_t(struct _fx_T2N10Ast__lit_tT2N10Ast__typ_tR10Ast__loc_t* dst)
{
_fx_free_N10Ast__lit_t(&dst->t0);
_fx_free_T2N10Ast__typ_tR10Ast__loc_t(&dst->t1);
}
static void _fx_copy_T2N10Ast__lit_tT2N10Ast__typ_tR10Ast__loc_t(
struct _fx_T2N10Ast__lit_tT2N10Ast__typ_tR10Ast__loc_t* src,
struct _fx_T2N10Ast__lit_tT2N10Ast__typ_tR10Ast__loc_t* dst)
{
_fx_copy_N10Ast__lit_t(&src->t0, &dst->t0);
_fx_copy_T2N10Ast__typ_tR10Ast__loc_t(&src->t1, &dst->t1);
}
static void _fx_make_T2N10Ast__lit_tT2N10Ast__typ_tR10Ast__loc_t(
struct _fx_N10Ast__lit_t* t0,
struct _fx_T2N10Ast__typ_tR10Ast__loc_t* t1,
struct _fx_T2N10Ast__lit_tT2N10Ast__typ_tR10Ast__loc_t* fx_result)
{
_fx_copy_N10Ast__lit_t(t0, &fx_result->t0);
_fx_copy_T2N10Ast__typ_tR10Ast__loc_t(t1, &fx_result->t1);
}
static void _fx_free_T2R9Ast__id_tT2N10Ast__typ_tR10Ast__loc_t(struct _fx_T2R9Ast__id_tT2N10Ast__typ_tR10Ast__loc_t* dst)
{
_fx_free_T2N10Ast__typ_tR10Ast__loc_t(&dst->t1);
}
static void _fx_copy_T2R9Ast__id_tT2N10Ast__typ_tR10Ast__loc_t(
struct _fx_T2R9Ast__id_tT2N10Ast__typ_tR10Ast__loc_t* src,
struct _fx_T2R9Ast__id_tT2N10Ast__typ_tR10Ast__loc_t* dst)
{
dst->t0 = src->t0;
_fx_copy_T2N10Ast__typ_tR10Ast__loc_t(&src->t1, &dst->t1);
}
static void _fx_make_T2R9Ast__id_tT2N10Ast__typ_tR10Ast__loc_t(
struct _fx_R9Ast__id_t* t0,
struct _fx_T2N10Ast__typ_tR10Ast__loc_t* t1,
struct _fx_T2R9Ast__id_tT2N10Ast__typ_tR10Ast__loc_t* fx_result)
{
fx_result->t0 = *t0;
_fx_copy_T2N10Ast__typ_tR10Ast__loc_t(t1, &fx_result->t1);
}
static void _fx_free_T4N13Ast__binary_tN10Ast__exp_tN10Ast__exp_tT2N10Ast__typ_tR10Ast__loc_t(
struct _fx_T4N13Ast__binary_tN10Ast__exp_tN10Ast__exp_tT2N10Ast__typ_tR10Ast__loc_t* dst)
{
_fx_free_N13Ast__binary_t(&dst->t0);
_fx_free_N10Ast__exp_t(&dst->t1);
_fx_free_N10Ast__exp_t(&dst->t2);
_fx_free_T2N10Ast__typ_tR10Ast__loc_t(&dst->t3);
}
static void _fx_copy_T4N13Ast__binary_tN10Ast__exp_tN10Ast__exp_tT2N10Ast__typ_tR10Ast__loc_t(
struct _fx_T4N13Ast__binary_tN10Ast__exp_tN10Ast__exp_tT2N10Ast__typ_tR10Ast__loc_t* src,
struct _fx_T4N13Ast__binary_tN10Ast__exp_tN10Ast__exp_tT2N10Ast__typ_tR10Ast__loc_t* dst)
{
FX_COPY_PTR(src->t0, &dst->t0);
FX_COPY_PTR(src->t1, &dst->t1);
FX_COPY_PTR(src->t2, &dst->t2);
_fx_copy_T2N10Ast__typ_tR10Ast__loc_t(&src->t3, &dst->t3);
}
static void _fx_make_T4N13Ast__binary_tN10Ast__exp_tN10Ast__exp_tT2N10Ast__typ_tR10Ast__loc_t(
struct _fx_N13Ast__binary_t_data_t* t0,
struct _fx_N10Ast__exp_t_data_t* t1,
struct _fx_N10Ast__exp_t_data_t* t2,
struct _fx_T2N10Ast__typ_tR10Ast__loc_t* t3,
struct _fx_T4N13Ast__binary_tN10Ast__exp_tN10Ast__exp_tT2N10Ast__typ_tR10Ast__loc_t* fx_result)
{
FX_COPY_PTR(t0, &fx_result->t0);
FX_COPY_PTR(t1, &fx_result->t1);
FX_COPY_PTR(t2, &fx_result->t2);
_fx_copy_T2N10Ast__typ_tR10Ast__loc_t(t3, &fx_result->t3);
}
static void _fx_free_T3N12Ast__unary_tN10Ast__exp_tT2N10Ast__typ_tR10Ast__loc_t(
struct _fx_T3N12Ast__unary_tN10Ast__exp_tT2N10Ast__typ_tR10Ast__loc_t* dst)
{
_fx_free_N10Ast__exp_t(&dst->t1);
_fx_free_T2N10Ast__typ_tR10Ast__loc_t(&dst->t2);
}
static void _fx_copy_T3N12Ast__unary_tN10Ast__exp_tT2N10Ast__typ_tR10Ast__loc_t(
struct _fx_T3N12Ast__unary_tN10Ast__exp_tT2N10Ast__typ_tR10Ast__loc_t* src,
struct _fx_T3N12Ast__unary_tN10Ast__exp_tT2N10Ast__typ_tR10Ast__loc_t* dst)
{
dst->t0 = src->t0;
FX_COPY_PTR(src->t1, &dst->t1);
_fx_copy_T2N10Ast__typ_tR10Ast__loc_t(&src->t2, &dst->t2);
}
static void _fx_make_T3N12Ast__unary_tN10Ast__exp_tT2N10Ast__typ_tR10Ast__loc_t(
struct _fx_N12Ast__unary_t* t0,
struct _fx_N10Ast__exp_t_data_t* t1,
struct _fx_T2N10Ast__typ_tR10Ast__loc_t* t2,
struct _fx_T3N12Ast__unary_tN10Ast__exp_tT2N10Ast__typ_tR10Ast__loc_t* fx_result)
{
fx_result->t0 = *t0;
FX_COPY_PTR(t1, &fx_result->t1);
_fx_copy_T2N10Ast__typ_tR10Ast__loc_t(t2, &fx_result->t2);
}
static void _fx_free_LN10Ast__exp_t(struct _fx_LN10Ast__exp_t_data_t** dst)
{
FX_FREE_LIST_IMPL(_fx_LN10Ast__exp_t, _fx_free_N10Ast__exp_t);
}
static int _fx_cons_LN10Ast__exp_t(
struct _fx_N10Ast__exp_t_data_t* hd,
struct _fx_LN10Ast__exp_t_data_t* tl,
bool addref_tl,
struct _fx_LN10Ast__exp_t_data_t** fx_result)
{
FX_MAKE_LIST_IMPL(_fx_LN10Ast__exp_t, FX_COPY_PTR);
}
static void _fx_free_T3N13Ast__intrin_tLN10Ast__exp_tT2N10Ast__typ_tR10Ast__loc_t(
struct _fx_T3N13Ast__intrin_tLN10Ast__exp_tT2N10Ast__typ_tR10Ast__loc_t* dst)
{
_fx_free_LN10Ast__exp_t(&dst->t1);
_fx_free_T2N10Ast__typ_tR10Ast__loc_t(&dst->t2);
}
static void _fx_copy_T3N13Ast__intrin_tLN10Ast__exp_tT2N10Ast__typ_tR10Ast__loc_t(
struct _fx_T3N13Ast__intrin_tLN10Ast__exp_tT2N10Ast__typ_tR10Ast__loc_t* src,
struct _fx_T3N13Ast__intrin_tLN10Ast__exp_tT2N10Ast__typ_tR10Ast__loc_t* dst)
{
dst->t0 = src->t0;
FX_COPY_PTR(src->t1, &dst->t1);
_fx_copy_T2N10Ast__typ_tR10Ast__loc_t(&src->t2, &dst->t2);
}
static void _fx_make_T3N13Ast__intrin_tLN10Ast__exp_tT2N10Ast__typ_tR10Ast__loc_t(
struct _fx_N13Ast__intrin_t* t0,
struct _fx_LN10Ast__exp_t_data_t* t1,
struct _fx_T2N10Ast__typ_tR10Ast__loc_t* t2,
struct _fx_T3N13Ast__intrin_tLN10Ast__exp_tT2N10Ast__typ_tR10Ast__loc_t* fx_result)
{
fx_result->t0 = *t0;
FX_COPY_PTR(t1, &fx_result->t1);
_fx_copy_T2N10Ast__typ_tR10Ast__loc_t(t2, &fx_result->t2);
}
static void _fx_free_T2R9Ast__id_tN10Ast__exp_t(struct _fx_T2R9Ast__id_tN10Ast__exp_t* dst)
{
_fx_free_N10Ast__exp_t(&dst->t1);
}
static void _fx_copy_T2R9Ast__id_tN10Ast__exp_t(
struct _fx_T2R9Ast__id_tN10Ast__exp_t* src,
struct _fx_T2R9Ast__id_tN10Ast__exp_t* dst)
{
dst->t0 = src->t0;
FX_COPY_PTR(src->t1, &dst->t1);
}
static void _fx_make_T2R9Ast__id_tN10Ast__exp_t(
struct _fx_R9Ast__id_t* t0,
struct _fx_N10Ast__exp_t_data_t* t1,
struct _fx_T2R9Ast__id_tN10Ast__exp_t* fx_result)
{
fx_result->t0 = *t0;
FX_COPY_PTR(t1, &fx_result->t1);
}
static void _fx_free_T2LN10Ast__exp_tT2N10Ast__typ_tR10Ast__loc_t(struct _fx_T2LN10Ast__exp_tT2N10Ast__typ_tR10Ast__loc_t* dst)
{
_fx_free_LN10Ast__exp_t(&dst->t0);
_fx_free_T2N10Ast__typ_tR10Ast__loc_t(&dst->t1);
}
static void _fx_copy_T2LN10Ast__exp_tT2N10Ast__typ_tR10Ast__loc_t(
struct _fx_T2LN10Ast__exp_tT2N10Ast__typ_tR10Ast__loc_t* src,
struct _fx_T2LN10Ast__exp_tT2N10Ast__typ_tR10Ast__loc_t* dst)
{
FX_COPY_PTR(src->t0, &dst->t0);
_fx_copy_T2N10Ast__typ_tR10Ast__loc_t(&src->t1, &dst->t1);
}
static void _fx_make_T2LN10Ast__exp_tT2N10Ast__typ_tR10Ast__loc_t(
struct _fx_LN10Ast__exp_t_data_t* t0,
struct _fx_T2N10Ast__typ_tR10Ast__loc_t* t1,
struct _fx_T2LN10Ast__exp_tT2N10Ast__typ_tR10Ast__loc_t* fx_result)
{
FX_COPY_PTR(t0, &fx_result->t0);
_fx_copy_T2N10Ast__typ_tR10Ast__loc_t(t1, &fx_result->t1);
}
static void _fx_free_LLN10Ast__exp_t(struct _fx_LLN10Ast__exp_t_data_t** dst)
{
FX_FREE_LIST_IMPL(_fx_LLN10Ast__exp_t, _fx_free_LN10Ast__exp_t);
}
static int _fx_cons_LLN10Ast__exp_t(
struct _fx_LN10Ast__exp_t_data_t* hd,
struct _fx_LLN10Ast__exp_t_data_t* tl,
bool addref_tl,
struct _fx_LLN10Ast__exp_t_data_t** fx_result)
{
FX_MAKE_LIST_IMPL(_fx_LLN10Ast__exp_t, FX_COPY_PTR);
}
static void _fx_free_T2LLN10Ast__exp_tT2N10Ast__typ_tR10Ast__loc_t(
struct _fx_T2LLN10Ast__exp_tT2N10Ast__typ_tR10Ast__loc_t* dst)
{
_fx_free_LLN10Ast__exp_t(&dst->t0);
_fx_free_T2N10Ast__typ_tR10Ast__loc_t(&dst->t1);
}
static void _fx_copy_T2LLN10Ast__exp_tT2N10Ast__typ_tR10Ast__loc_t(
struct _fx_T2LLN10Ast__exp_tT2N10Ast__typ_tR10Ast__loc_t* src,
struct _fx_T2LLN10Ast__exp_tT2N10Ast__typ_tR10Ast__loc_t* dst)
{
FX_COPY_PTR(src->t0, &dst->t0);
_fx_copy_T2N10Ast__typ_tR10Ast__loc_t(&src->t1, &dst->t1);
}
static void _fx_make_T2LLN10Ast__exp_tT2N10Ast__typ_tR10Ast__loc_t(
struct _fx_LLN10Ast__exp_t_data_t* t0,
struct _fx_T2N10Ast__typ_tR10Ast__loc_t* t1,
struct _fx_T2LLN10Ast__exp_tT2N10Ast__typ_tR10Ast__loc_t* fx_result)
{
FX_COPY_PTR(t0, &fx_result->t0);
_fx_copy_T2N10Ast__typ_tR10Ast__loc_t(t1, &fx_result->t1);
}
static void _fx_free_LT2R9Ast__id_tN10Ast__exp_t(struct _fx_LT2R9Ast__id_tN10Ast__exp_t_data_t** dst)
{
FX_FREE_LIST_IMPL(_fx_LT2R9Ast__id_tN10Ast__exp_t, _fx_free_T2R9Ast__id_tN10Ast__exp_t);
}
static int _fx_cons_LT2R9Ast__id_tN10Ast__exp_t(
struct _fx_T2R9Ast__id_tN10Ast__exp_t* hd,
struct _fx_LT2R9Ast__id_tN10Ast__exp_t_data_t* tl,
bool addref_tl,
struct _fx_LT2R9Ast__id_tN10Ast__exp_t_data_t** fx_result)
{
FX_MAKE_LIST_IMPL(_fx_LT2R9Ast__id_tN10Ast__exp_t, _fx_copy_T2R9Ast__id_tN10Ast__exp_t);
}
static void _fx_free_T3N10Ast__exp_tLT2R9Ast__id_tN10Ast__exp_tT2N10Ast__typ_tR10Ast__loc_t(
struct _fx_T3N10Ast__exp_tLT2R9Ast__id_tN10Ast__exp_tT2N10Ast__typ_tR10Ast__loc_t* dst)
{
_fx_free_N10Ast__exp_t(&dst->t0);
_fx_free_LT2R9Ast__id_tN10Ast__exp_t(&dst->t1);
_fx_free_T2N10Ast__typ_tR10Ast__loc_t(&dst->t2);
}
static void _fx_copy_T3N10Ast__exp_tLT2R9Ast__id_tN10Ast__exp_tT2N10Ast__typ_tR10Ast__loc_t(
struct _fx_T3N10Ast__exp_tLT2R9Ast__id_tN10Ast__exp_tT2N10Ast__typ_tR10Ast__loc_t* src,
struct _fx_T3N10Ast__exp_tLT2R9Ast__id_tN10Ast__exp_tT2N10Ast__typ_tR10Ast__loc_t* dst)
{
FX_COPY_PTR(src->t0, &dst->t0);
FX_COPY_PTR(src->t1, &dst->t1);
_fx_copy_T2N10Ast__typ_tR10Ast__loc_t(&src->t2, &dst->t2);
}
static void _fx_make_T3N10Ast__exp_tLT2R9Ast__id_tN10Ast__exp_tT2N10Ast__typ_tR10Ast__loc_t(
struct _fx_N10Ast__exp_t_data_t* t0,
struct _fx_LT2R9Ast__id_tN10Ast__exp_t_data_t* t1,
struct _fx_T2N10Ast__typ_tR10Ast__loc_t* t2,
struct _fx_T3N10Ast__exp_tLT2R9Ast__id_tN10Ast__exp_tT2N10Ast__typ_tR10Ast__loc_t* fx_result)
{
FX_COPY_PTR(t0, &fx_result->t0);
FX_COPY_PTR(t1, &fx_result->t1);
_fx_copy_T2N10Ast__typ_tR10Ast__loc_t(t2, &fx_result->t2);
}
static void _fx_free_T3N10Ast__exp_tLN10Ast__exp_tT2N10Ast__typ_tR10Ast__loc_t(
struct _fx_T3N10Ast__exp_tLN10Ast__exp_tT2N10Ast__typ_tR10Ast__loc_t* dst)
{
_fx_free_N10Ast__exp_t(&dst->t0);
_fx_free_LN10Ast__exp_t(&dst->t1);
_fx_free_T2N10Ast__typ_tR10Ast__loc_t(&dst->t2);
}
static void _fx_copy_T3N10Ast__exp_tLN10Ast__exp_tT2N10Ast__typ_tR10Ast__loc_t(
struct _fx_T3N10Ast__exp_tLN10Ast__exp_tT2N10Ast__typ_tR10Ast__loc_t* src,
struct _fx_T3N10Ast__exp_tLN10Ast__exp_tT2N10Ast__typ_tR10Ast__loc_t* dst)
{
FX_COPY_PTR(src->t0, &dst->t0);
FX_COPY_PTR(src->t1, &dst->t1);
_fx_copy_T2N10Ast__typ_tR10Ast__loc_t(&src->t2, &dst->t2);
}
static void _fx_make_T3N10Ast__exp_tLN10Ast__exp_tT2N10Ast__typ_tR10Ast__loc_t(
struct _fx_N10Ast__exp_t_data_t* t0,
struct _fx_LN10Ast__exp_t_data_t* t1,
struct _fx_T2N10Ast__typ_tR10Ast__loc_t* t2,
struct _fx_T3N10Ast__exp_tLN10Ast__exp_tT2N10Ast__typ_tR10Ast__loc_t* fx_result)
{
FX_COPY_PTR(t0, &fx_result->t0);
FX_COPY_PTR(t1, &fx_result->t1);
_fx_copy_T2N10Ast__typ_tR10Ast__loc_t(t2, &fx_result->t2);
}
static void _fx_free_T5N10Ast__exp_tN13Ast__border_tN18Ast__interpolate_tLN10Ast__exp_tT2N10Ast__typ_tR10Ast__loc_t(
struct _fx_T5N10Ast__exp_tN13Ast__border_tN18Ast__interpolate_tLN10Ast__exp_tT2N10Ast__typ_tR10Ast__loc_t* dst)
{
_fx_free_N10Ast__exp_t(&dst->t0);
_fx_free_LN10Ast__exp_t(&dst->t3);
_fx_free_T2N10Ast__typ_tR10Ast__loc_t(&dst->t4);
}
static void _fx_copy_T5N10Ast__exp_tN13Ast__border_tN18Ast__interpolate_tLN10Ast__exp_tT2N10Ast__typ_tR10Ast__loc_t(
struct _fx_T5N10Ast__exp_tN13Ast__border_tN18Ast__interpolate_tLN10Ast__exp_tT2N10Ast__typ_tR10Ast__loc_t* src,
struct _fx_T5N10Ast__exp_tN13Ast__border_tN18Ast__interpolate_tLN10Ast__exp_tT2N10Ast__typ_tR10Ast__loc_t* dst)
{
FX_COPY_PTR(src->t0, &dst->t0);
dst->t1 = src->t1;
dst->t2 = src->t2;
FX_COPY_PTR(src->t3, &dst->t3);
_fx_copy_T2N10Ast__typ_tR10Ast__loc_t(&src->t4, &dst->t4);
}
static void _fx_make_T5N10Ast__exp_tN13Ast__border_tN18Ast__interpolate_tLN10Ast__exp_tT2N10Ast__typ_tR10Ast__loc_t(
struct _fx_N10Ast__exp_t_data_t* t0,
struct _fx_N13Ast__border_t* t1,
struct _fx_N18Ast__interpolate_t* t2,
struct _fx_LN10Ast__exp_t_data_t* t3,
struct _fx_T2N10Ast__typ_tR10Ast__loc_t* t4,
struct _fx_T5N10Ast__exp_tN13Ast__border_tN18Ast__interpolate_tLN10Ast__exp_tT2N10Ast__typ_tR10Ast__loc_t* fx_result)
{
FX_COPY_PTR(t0, &fx_result->t0);
fx_result->t1 = *t1;
fx_result->t2 = *t2;
FX_COPY_PTR(t3, &fx_result->t3);
_fx_copy_T2N10Ast__typ_tR10Ast__loc_t(t4, &fx_result->t4);
}
static void _fx_free_T3N10Ast__exp_tN10Ast__exp_tR10Ast__loc_t(struct _fx_T3N10Ast__exp_tN10Ast__exp_tR10Ast__loc_t* dst)
{
_fx_free_N10Ast__exp_t(&dst->t0);
_fx_free_N10Ast__exp_t(&dst->t1);
}
static void _fx_copy_T3N10Ast__exp_tN10Ast__exp_tR10Ast__loc_t(
struct _fx_T3N10Ast__exp_tN10Ast__exp_tR10Ast__loc_t* src,
struct _fx_T3N10Ast__exp_tN10Ast__exp_tR10Ast__loc_t* dst)
{
FX_COPY_PTR(src->t0, &dst->t0);
FX_COPY_PTR(src->t1, &dst->t1);
dst->t2 = src->t2;
}
static void _fx_make_T3N10Ast__exp_tN10Ast__exp_tR10Ast__loc_t(
struct _fx_N10Ast__exp_t_data_t* t0,
struct _fx_N10Ast__exp_t_data_t* t1,
struct _fx_R10Ast__loc_t* t2,
struct _fx_T3N10Ast__exp_tN10Ast__exp_tR10Ast__loc_t* fx_result)
{
FX_COPY_PTR(t0, &fx_result->t0);
FX_COPY_PTR(t1, &fx_result->t1);
fx_result->t2 = *t2;
}
static void _fx_free_T3N10Ast__exp_tN10Ast__exp_tT2N10Ast__typ_tR10Ast__loc_t(
struct _fx_T3N10Ast__exp_tN10Ast__exp_tT2N10Ast__typ_tR10Ast__loc_t* dst)
{
_fx_free_N10Ast__exp_t(&dst->t0);
_fx_free_N10Ast__exp_t(&dst->t1);
_fx_free_T2N10Ast__typ_tR10Ast__loc_t(&dst->t2);
}
static void _fx_copy_T3N10Ast__exp_tN10Ast__exp_tT2N10Ast__typ_tR10Ast__loc_t(
struct _fx_T3N10Ast__exp_tN10Ast__exp_tT2N10Ast__typ_tR10Ast__loc_t* src,
struct _fx_T3N10Ast__exp_tN10Ast__exp_tT2N10Ast__typ_tR10Ast__loc_t* dst)
{
FX_COPY_PTR(src->t0, &dst->t0);
FX_COPY_PTR(src->t1, &dst->t1);
_fx_copy_T2N10Ast__typ_tR10Ast__loc_t(&src->t2, &dst->t2);
}
static void _fx_make_T3N10Ast__exp_tN10Ast__exp_tT2N10Ast__typ_tR10Ast__loc_t(
struct _fx_N10Ast__exp_t_data_t* t0,
struct _fx_N10Ast__exp_t_data_t* t1,
struct _fx_T2N10Ast__typ_tR10Ast__loc_t* t2,
struct _fx_T3N10Ast__exp_tN10Ast__exp_tT2N10Ast__typ_tR10Ast__loc_t* fx_result)
{
FX_COPY_PTR(t0, &fx_result->t0);
FX_COPY_PTR(t1, &fx_result->t1);
_fx_copy_T2N10Ast__typ_tR10Ast__loc_t(t2, &fx_result->t2);
}
static void _fx_free_T2N10Ast__exp_tR10Ast__loc_t(struct _fx_T2N10Ast__exp_tR10Ast__loc_t* dst)
{
_fx_free_N10Ast__exp_t(&dst->t0);
}
static void _fx_copy_T2N10Ast__exp_tR10Ast__loc_t(
struct _fx_T2N10Ast__exp_tR10Ast__loc_t* src,
struct _fx_T2N10Ast__exp_tR10Ast__loc_t* dst)
{
FX_COPY_PTR(src->t0, &dst->t0);
dst->t1 = src->t1;
}
static void _fx_make_T2N10Ast__exp_tR10Ast__loc_t(
struct _fx_N10Ast__exp_t_data_t* t0,
struct _fx_R10Ast__loc_t* t1,
struct _fx_T2N10Ast__exp_tR10Ast__loc_t* fx_result)
{
FX_COPY_PTR(t0, &fx_result->t0);
fx_result->t1 = *t1;
}
static void _fx_free_T4N10Ast__exp_tN10Ast__exp_tN10Ast__exp_tT2N10Ast__typ_tR10Ast__loc_t(
struct _fx_T4N10Ast__exp_tN10Ast__exp_tN10Ast__exp_tT2N10Ast__typ_tR10Ast__loc_t* dst)
{
_fx_free_N10Ast__exp_t(&dst->t0);
_fx_free_N10Ast__exp_t(&dst->t1);
_fx_free_N10Ast__exp_t(&dst->t2);
_fx_free_T2N10Ast__typ_tR10Ast__loc_t(&dst->t3);
}
static void _fx_copy_T4N10Ast__exp_tN10Ast__exp_tN10Ast__exp_tT2N10Ast__typ_tR10Ast__loc_t(
struct _fx_T4N10Ast__exp_tN10Ast__exp_tN10Ast__exp_tT2N10Ast__typ_tR10Ast__loc_t* src,
struct _fx_T4N10Ast__exp_tN10Ast__exp_tN10Ast__exp_tT2N10Ast__typ_tR10Ast__loc_t* dst)
{
FX_COPY_PTR(src->t0, &dst->t0);
FX_COPY_PTR(src->t1, &dst->t1);
FX_COPY_PTR(src->t2, &dst->t2);
_fx_copy_T2N10Ast__typ_tR10Ast__loc_t(&src->t3, &dst->t3);
}
static void _fx_make_T4N10Ast__exp_tN10Ast__exp_tN10Ast__exp_tT2N10Ast__typ_tR10Ast__loc_t(
struct _fx_N10Ast__exp_t_data_t* t0,
struct _fx_N10Ast__exp_t_data_t* t1,
struct _fx_N10Ast__exp_t_data_t* t2,
struct _fx_T2N10Ast__typ_tR10Ast__loc_t* t3,
struct _fx_T4N10Ast__exp_tN10Ast__exp_tN10Ast__exp_tT2N10Ast__typ_tR10Ast__loc_t* fx_result)
{
FX_COPY_PTR(t0, &fx_result->t0);
FX_COPY_PTR(t1, &fx_result->t1);
FX_COPY_PTR(t2, &fx_result->t2);
_fx_copy_T2N10Ast__typ_tR10Ast__loc_t(t3, &fx_result->t3);
}
static void _fx_free_T2N10Ast__pat_tN10Ast__exp_t(struct _fx_T2N10Ast__pat_tN10Ast__exp_t* dst)
{
_fx_free_N10Ast__pat_t(&dst->t0);
_fx_free_N10Ast__exp_t(&dst->t1);
}
static void _fx_copy_T2N10Ast__pat_tN10Ast__exp_t(
struct _fx_T2N10Ast__pat_tN10Ast__exp_t* src,
struct _fx_T2N10Ast__pat_tN10Ast__exp_t* dst)
{
FX_COPY_PTR(src->t0, &dst->t0);
FX_COPY_PTR(src->t1, &dst->t1);
}
static void _fx_make_T2N10Ast__pat_tN10Ast__exp_t(
struct _fx_N10Ast__pat_t_data_t* t0,
struct _fx_N10Ast__exp_t_data_t* t1,
struct _fx_T2N10Ast__pat_tN10Ast__exp_t* fx_result)
{
FX_COPY_PTR(t0, &fx_result->t0);
FX_COPY_PTR(t1, &fx_result->t1);
}
static void _fx_free_LT2N10Ast__pat_tN10Ast__exp_t(struct _fx_LT2N10Ast__pat_tN10Ast__exp_t_data_t** dst)
{
FX_FREE_LIST_IMPL(_fx_LT2N10Ast__pat_tN10Ast__exp_t, _fx_free_T2N10Ast__pat_tN10Ast__exp_t);
}
static int _fx_cons_LT2N10Ast__pat_tN10Ast__exp_t(
struct _fx_T2N10Ast__pat_tN10Ast__exp_t* hd,
struct _fx_LT2N10Ast__pat_tN10Ast__exp_t_data_t* tl,
bool addref_tl,
struct _fx_LT2N10Ast__pat_tN10Ast__exp_t_data_t** fx_result)
{
FX_MAKE_LIST_IMPL(_fx_LT2N10Ast__pat_tN10Ast__exp_t, _fx_copy_T2N10Ast__pat_tN10Ast__exp_t);
}
static void _fx_free_T5LT2N10Ast__pat_tN10Ast__exp_tN10Ast__pat_tN10Ast__exp_tR16Ast__for_flags_tR10Ast__loc_t(
struct _fx_T5LT2N10Ast__pat_tN10Ast__exp_tN10Ast__pat_tN10Ast__exp_tR16Ast__for_flags_tR10Ast__loc_t* dst)
{
_fx_free_LT2N10Ast__pat_tN10Ast__exp_t(&dst->t0);
_fx_free_N10Ast__pat_t(&dst->t1);
_fx_free_N10Ast__exp_t(&dst->t2);
}
static void _fx_copy_T5LT2N10Ast__pat_tN10Ast__exp_tN10Ast__pat_tN10Ast__exp_tR16Ast__for_flags_tR10Ast__loc_t(
struct _fx_T5LT2N10Ast__pat_tN10Ast__exp_tN10Ast__pat_tN10Ast__exp_tR16Ast__for_flags_tR10Ast__loc_t* src,
struct _fx_T5LT2N10Ast__pat_tN10Ast__exp_tN10Ast__pat_tN10Ast__exp_tR16Ast__for_flags_tR10Ast__loc_t* dst)
{
FX_COPY_PTR(src->t0, &dst->t0);
FX_COPY_PTR(src->t1, &dst->t1);
FX_COPY_PTR(src->t2, &dst->t2);
dst->t3 = src->t3;
dst->t4 = src->t4;
}
static void _fx_make_T5LT2N10Ast__pat_tN10Ast__exp_tN10Ast__pat_tN10Ast__exp_tR16Ast__for_flags_tR10Ast__loc_t(
struct _fx_LT2N10Ast__pat_tN10Ast__exp_t_data_t* t0,
struct _fx_N10Ast__pat_t_data_t* t1,
struct _fx_N10Ast__exp_t_data_t* t2,
struct _fx_R16Ast__for_flags_t* t3,
struct _fx_R10Ast__loc_t* t4,
struct _fx_T5LT2N10Ast__pat_tN10Ast__exp_tN10Ast__pat_tN10Ast__exp_tR16Ast__for_flags_tR10Ast__loc_t* fx_result)
{
FX_COPY_PTR(t0, &fx_result->t0);
FX_COPY_PTR(t1, &fx_result->t1);
FX_COPY_PTR(t2, &fx_result->t2);
fx_result->t3 = *t3;
fx_result->t4 = *t4;
}
static void _fx_free_T2LT2N10Ast__pat_tN10Ast__exp_tN10Ast__pat_t(struct _fx_T2LT2N10Ast__pat_tN10Ast__exp_tN10Ast__pat_t* dst)
{
_fx_free_LT2N10Ast__pat_tN10Ast__exp_t(&dst->t0);
_fx_free_N10Ast__pat_t(&dst->t1);
}
static void _fx_copy_T2LT2N10Ast__pat_tN10Ast__exp_tN10Ast__pat_t(
struct _fx_T2LT2N10Ast__pat_tN10Ast__exp_tN10Ast__pat_t* src,
struct _fx_T2LT2N10Ast__pat_tN10Ast__exp_tN10Ast__pat_t* dst)
{
FX_COPY_PTR(src->t0, &dst->t0);
FX_COPY_PTR(src->t1, &dst->t1);
}
static void _fx_make_T2LT2N10Ast__pat_tN10Ast__exp_tN10Ast__pat_t(
struct _fx_LT2N10Ast__pat_tN10Ast__exp_t_data_t* t0,
struct _fx_N10Ast__pat_t_data_t* t1,
struct _fx_T2LT2N10Ast__pat_tN10Ast__exp_tN10Ast__pat_t* fx_result)
{
FX_COPY_PTR(t0, &fx_result->t0);
FX_COPY_PTR(t1, &fx_result->t1);
}
static void _fx_free_LT2LT2N10Ast__pat_tN10Ast__exp_tN10Ast__pat_t(
struct _fx_LT2LT2N10Ast__pat_tN10Ast__exp_tN10Ast__pat_t_data_t** dst)
{
FX_FREE_LIST_IMPL(_fx_LT2LT2N10Ast__pat_tN10Ast__exp_tN10Ast__pat_t, _fx_free_T2LT2N10Ast__pat_tN10Ast__exp_tN10Ast__pat_t);
}
static int _fx_cons_LT2LT2N10Ast__pat_tN10Ast__exp_tN10Ast__pat_t(
struct _fx_T2LT2N10Ast__pat_tN10Ast__exp_tN10Ast__pat_t* hd,
struct _fx_LT2LT2N10Ast__pat_tN10Ast__exp_tN10Ast__pat_t_data_t* tl,
bool addref_tl,
struct _fx_LT2LT2N10Ast__pat_tN10Ast__exp_tN10Ast__pat_t_data_t** fx_result)
{
FX_MAKE_LIST_IMPL(_fx_LT2LT2N10Ast__pat_tN10Ast__exp_tN10Ast__pat_t, _fx_copy_T2LT2N10Ast__pat_tN10Ast__exp_tN10Ast__pat_t);
}
static void
_fx_free_T4LT2LT2N10Ast__pat_tN10Ast__exp_tN10Ast__pat_tN10Ast__exp_tR16Ast__for_flags_tT2N10Ast__typ_tR10Ast__loc_t(
struct _fx_T4LT2LT2N10Ast__pat_tN10Ast__exp_tN10Ast__pat_tN10Ast__exp_tR16Ast__for_flags_tT2N10Ast__typ_tR10Ast__loc_t* dst)
{
_fx_free_LT2LT2N10Ast__pat_tN10Ast__exp_tN10Ast__pat_t(&dst->t0);
_fx_free_N10Ast__exp_t(&dst->t1);
_fx_free_T2N10Ast__typ_tR10Ast__loc_t(&dst->t3);
}
static void
_fx_copy_T4LT2LT2N10Ast__pat_tN10Ast__exp_tN10Ast__pat_tN10Ast__exp_tR16Ast__for_flags_tT2N10Ast__typ_tR10Ast__loc_t(
struct _fx_T4LT2LT2N10Ast__pat_tN10Ast__exp_tN10Ast__pat_tN10Ast__exp_tR16Ast__for_flags_tT2N10Ast__typ_tR10Ast__loc_t* src,
struct _fx_T4LT2LT2N10Ast__pat_tN10Ast__exp_tN10Ast__pat_tN10Ast__exp_tR16Ast__for_flags_tT2N10Ast__typ_tR10Ast__loc_t* dst)
{
FX_COPY_PTR(src->t0, &dst->t0);
FX_COPY_PTR(src->t1, &dst->t1);
dst->t2 = src->t2;
_fx_copy_T2N10Ast__typ_tR10Ast__loc_t(&src->t3, &dst->t3);
}
static void
_fx_make_T4LT2LT2N10Ast__pat_tN10Ast__exp_tN10Ast__pat_tN10Ast__exp_tR16Ast__for_flags_tT2N10Ast__typ_tR10Ast__loc_t(
struct _fx_LT2LT2N10Ast__pat_tN10Ast__exp_tN10Ast__pat_t_data_t* t0,
struct _fx_N10Ast__exp_t_data_t* t1,
struct _fx_R16Ast__for_flags_t* t2,
struct _fx_T2N10Ast__typ_tR10Ast__loc_t* t3,
struct _fx_T4LT2LT2N10Ast__pat_tN10Ast__exp_tN10Ast__pat_tN10Ast__exp_tR16Ast__for_flags_tT2N10Ast__typ_tR10Ast__loc_t*
fx_result)
{
FX_COPY_PTR(t0, &fx_result->t0);
FX_COPY_PTR(t1, &fx_result->t1);
fx_result->t2 = *t2;
_fx_copy_T2N10Ast__typ_tR10Ast__loc_t(t3, &fx_result->t3);
}
static void _fx_free_T3N10Ast__exp_tLT2N10Ast__pat_tN10Ast__exp_tT2N10Ast__typ_tR10Ast__loc_t(
struct _fx_T3N10Ast__exp_tLT2N10Ast__pat_tN10Ast__exp_tT2N10Ast__typ_tR10Ast__loc_t* dst)
{
_fx_free_N10Ast__exp_t(&dst->t0);
_fx_free_LT2N10Ast__pat_tN10Ast__exp_t(&dst->t1);
_fx_free_T2N10Ast__typ_tR10Ast__loc_t(&dst->t2);
}
static void _fx_copy_T3N10Ast__exp_tLT2N10Ast__pat_tN10Ast__exp_tT2N10Ast__typ_tR10Ast__loc_t(
struct _fx_T3N10Ast__exp_tLT2N10Ast__pat_tN10Ast__exp_tT2N10Ast__typ_tR10Ast__loc_t* src,
struct _fx_T3N10Ast__exp_tLT2N10Ast__pat_tN10Ast__exp_tT2N10Ast__typ_tR10Ast__loc_t* dst)
{
FX_COPY_PTR(src->t0, &dst->t0);
FX_COPY_PTR(src->t1, &dst->t1);
_fx_copy_T2N10Ast__typ_tR10Ast__loc_t(&src->t2, &dst->t2);
}
static void _fx_make_T3N10Ast__exp_tLT2N10Ast__pat_tN10Ast__exp_tT2N10Ast__typ_tR10Ast__loc_t(
struct _fx_N10Ast__exp_t_data_t* t0,
struct _fx_LT2N10Ast__pat_tN10Ast__exp_t_data_t* t1,
struct _fx_T2N10Ast__typ_tR10Ast__loc_t* t2,
struct _fx_T3N10Ast__exp_tLT2N10Ast__pat_tN10Ast__exp_tT2N10Ast__typ_tR10Ast__loc_t* fx_result)
{
FX_COPY_PTR(t0, &fx_result->t0);
FX_COPY_PTR(t1, &fx_result->t1);
_fx_copy_T2N10Ast__typ_tR10Ast__loc_t(t2, &fx_result->t2);
}
static void _fx_free_T3N10Ast__exp_tN10Ast__typ_tT2N10Ast__typ_tR10Ast__loc_t(
struct _fx_T3N10Ast__exp_tN10Ast__typ_tT2N10Ast__typ_tR10Ast__loc_t* dst)
{
_fx_free_N10Ast__exp_t(&dst->t0);
_fx_free_N10Ast__typ_t(&dst->t1);
_fx_free_T2N10Ast__typ_tR10Ast__loc_t(&dst->t2);
}
static void _fx_copy_T3N10Ast__exp_tN10Ast__typ_tT2N10Ast__typ_tR10Ast__loc_t(
struct _fx_T3N10Ast__exp_tN10Ast__typ_tT2N10Ast__typ_tR10Ast__loc_t* src,
struct _fx_T3N10Ast__exp_tN10Ast__typ_tT2N10Ast__typ_tR10Ast__loc_t* dst)
{
FX_COPY_PTR(src->t0, &dst->t0);
FX_COPY_PTR(src->t1, &dst->t1);
_fx_copy_T2N10Ast__typ_tR10Ast__loc_t(&src->t2, &dst->t2);
}
static void _fx_make_T3N10Ast__exp_tN10Ast__typ_tT2N10Ast__typ_tR10Ast__loc_t(
struct _fx_N10Ast__exp_t_data_t* t0,
struct _fx_N10Ast__typ_t_data_t* t1,
struct _fx_T2N10Ast__typ_tR10Ast__loc_t* t2,
struct _fx_T3N10Ast__exp_tN10Ast__typ_tT2N10Ast__typ_tR10Ast__loc_t* fx_result)
{
FX_COPY_PTR(t0, &fx_result->t0);
FX_COPY_PTR(t1, &fx_result->t1);
_fx_copy_T2N10Ast__typ_tR10Ast__loc_t(t2, &fx_result->t2);
}
static void _fx_free_T2ST2N10Ast__typ_tR10Ast__loc_t(struct _fx_T2ST2N10Ast__typ_tR10Ast__loc_t* dst)
{
fx_free_str(&dst->t0);
_fx_free_T2N10Ast__typ_tR10Ast__loc_t(&dst->t1);
}
static void _fx_copy_T2ST2N10Ast__typ_tR10Ast__loc_t(
struct _fx_T2ST2N10Ast__typ_tR10Ast__loc_t* src,
struct _fx_T2ST2N10Ast__typ_tR10Ast__loc_t* dst)
{
fx_copy_str(&src->t0, &dst->t0);
_fx_copy_T2N10Ast__typ_tR10Ast__loc_t(&src->t1, &dst->t1);
}
static void _fx_make_T2ST2N10Ast__typ_tR10Ast__loc_t(
fx_str_t* t0,
struct _fx_T2N10Ast__typ_tR10Ast__loc_t* t1,
struct _fx_T2ST2N10Ast__typ_tR10Ast__loc_t* fx_result)
{
fx_copy_str(t0, &fx_result->t0);
_fx_copy_T2N10Ast__typ_tR10Ast__loc_t(t1, &fx_result->t1);
}
static void _fx_free_T3SST2N10Ast__typ_tR10Ast__loc_t(struct _fx_T3SST2N10Ast__typ_tR10Ast__loc_t* dst)
{
fx_free_str(&dst->t0);
fx_free_str(&dst->t1);
_fx_free_T2N10Ast__typ_tR10Ast__loc_t(&dst->t2);
}
static void _fx_copy_T3SST2N10Ast__typ_tR10Ast__loc_t(
struct _fx_T3SST2N10Ast__typ_tR10Ast__loc_t* src,
struct _fx_T3SST2N10Ast__typ_tR10Ast__loc_t* dst)
{
fx_copy_str(&src->t0, &dst->t0);
fx_copy_str(&src->t1, &dst->t1);
_fx_copy_T2N10Ast__typ_tR10Ast__loc_t(&src->t2, &dst->t2);
}
static void _fx_make_T3SST2N10Ast__typ_tR10Ast__loc_t(
fx_str_t* t0,
fx_str_t* t1,
struct _fx_T2N10Ast__typ_tR10Ast__loc_t* t2,
struct _fx_T3SST2N10Ast__typ_tR10Ast__loc_t* fx_result)
{
fx_copy_str(t0, &fx_result->t0);
fx_copy_str(t1, &fx_result->t1);
_fx_copy_T2N10Ast__typ_tR10Ast__loc_t(t2, &fx_result->t2);
}
static void _fx_free_T4N10Ast__pat_tN10Ast__exp_tR16Ast__val_flags_tR10Ast__loc_t(
struct _fx_T4N10Ast__pat_tN10Ast__exp_tR16Ast__val_flags_tR10Ast__loc_t* dst)
{
_fx_free_N10Ast__pat_t(&dst->t0);
_fx_free_N10Ast__exp_t(&dst->t1);
_fx_free_R16Ast__val_flags_t(&dst->t2);
}
static void _fx_copy_T4N10Ast__pat_tN10Ast__exp_tR16Ast__val_flags_tR10Ast__loc_t(
struct _fx_T4N10Ast__pat_tN10Ast__exp_tR16Ast__val_flags_tR10Ast__loc_t* src,
struct _fx_T4N10Ast__pat_tN10Ast__exp_tR16Ast__val_flags_tR10Ast__loc_t* dst)
{
FX_COPY_PTR(src->t0, &dst->t0);
FX_COPY_PTR(src->t1, &dst->t1);
_fx_copy_R16Ast__val_flags_t(&src->t2, &dst->t2);
dst->t3 = src->t3;
}
static void _fx_make_T4N10Ast__pat_tN10Ast__exp_tR16Ast__val_flags_tR10Ast__loc_t(
struct _fx_N10Ast__pat_t_data_t* t0,
struct _fx_N10Ast__exp_t_data_t* t1,
struct _fx_R16Ast__val_flags_t* t2,
struct _fx_R10Ast__loc_t* t3,
struct _fx_T4N10Ast__pat_tN10Ast__exp_tR16Ast__val_flags_tR10Ast__loc_t* fx_result)
{
FX_COPY_PTR(t0, &fx_result->t0);
FX_COPY_PTR(t1, &fx_result->t1);
_fx_copy_R16Ast__val_flags_t(t2, &fx_result->t2);
fx_result->t3 = *t3;
}
static int _fx_cons_LT2iR9Ast__id_t(
struct _fx_T2iR9Ast__id_t* hd,
struct _fx_LT2iR9Ast__id_t_data_t* tl,
bool addref_tl,
struct _fx_LT2iR9Ast__id_t_data_t** fx_result)
{
FX_MAKE_LIST_IMPL(_fx_LT2iR9Ast__id_t, FX_COPY_SIMPLE_BY_PTR);
}
static void _fx_free_T2LT2iR9Ast__id_tR10Ast__loc_t(struct _fx_T2LT2iR9Ast__id_tR10Ast__loc_t* dst)
{
fx_free_list_simple(&dst->t0);
}
static void _fx_copy_T2LT2iR9Ast__id_tR10Ast__loc_t(
struct _fx_T2LT2iR9Ast__id_tR10Ast__loc_t* src,
struct _fx_T2LT2iR9Ast__id_tR10Ast__loc_t* dst)
{
FX_COPY_PTR(src->t0, &dst->t0);
dst->t1 = src->t1;
}
static void _fx_make_T2LT2iR9Ast__id_tR10Ast__loc_t(
struct _fx_LT2iR9Ast__id_t_data_t* t0,
struct _fx_R10Ast__loc_t* t1,
struct _fx_T2LT2iR9Ast__id_tR10Ast__loc_t* fx_result)
{
FX_COPY_PTR(t0, &fx_result->t0);
fx_result->t1 = *t1;
}
static void _fx_free_T3iLR9Ast__id_tR10Ast__loc_t(struct _fx_T3iLR9Ast__id_tR10Ast__loc_t* dst)
{
fx_free_list_simple(&dst->t1);
}
static void _fx_copy_T3iLR9Ast__id_tR10Ast__loc_t(
struct _fx_T3iLR9Ast__id_tR10Ast__loc_t* src,
struct _fx_T3iLR9Ast__id_tR10Ast__loc_t* dst)
{
dst->t0 = src->t0;
FX_COPY_PTR(src->t1, &dst->t1);
dst->t2 = src->t2;
}
static void _fx_make_T3iLR9Ast__id_tR10Ast__loc_t(
int_ t0,
struct _fx_LR9Ast__id_t_data_t* t1,
struct _fx_R10Ast__loc_t* t2,
struct _fx_T3iLR9Ast__id_tR10Ast__loc_t* fx_result)
{
fx_result->t0 = t0;
FX_COPY_PTR(t1, &fx_result->t1);
fx_result->t2 = *t2;
}
static void _fx_free_T2LSR10Ast__loc_t(struct _fx_T2LSR10Ast__loc_t* dst)
{
_fx_free_LS(&dst->t0);
}
static void _fx_copy_T2LSR10Ast__loc_t(struct _fx_T2LSR10Ast__loc_t* src, struct _fx_T2LSR10Ast__loc_t* dst)
{
FX_COPY_PTR(src->t0, &dst->t0);
dst->t1 = src->t1;
}
static void _fx_make_T2LSR10Ast__loc_t(
struct _fx_LS_data_t* t0,
struct _fx_R10Ast__loc_t* t1,
struct _fx_T2LSR10Ast__loc_t* fx_result)
{
FX_COPY_PTR(t0, &fx_result->t0);
fx_result->t1 = *t1;
}
static void _fx_free_N10Ast__exp_t(struct _fx_N10Ast__exp_t_data_t** dst)
{
if (*dst && FX_DECREF((*dst)->rc) == 1) {
switch ((*dst)->tag) {
case 4:
_fx_free_T2Nt6option1N10Ast__exp_tR10Ast__loc_t(&(*dst)->u.ExpReturn); break;
case 5:
_fx_free_T4Nt6option1N10Ast__exp_tNt6option1N10Ast__exp_tNt6option1N10Ast__exp_tT2N10Ast__typ_tR10Ast__loc_t(
&(*dst)->u.ExpRange);
break;
case 6:
_fx_free_T2N10Ast__lit_tT2N10Ast__typ_tR10Ast__loc_t(&(*dst)->u.ExpLit); break;
case 7:
_fx_free_T2R9Ast__id_tT2N10Ast__typ_tR10Ast__loc_t(&(*dst)->u.ExpIdent); break;
case 8:
_fx_free_T4N13Ast__binary_tN10Ast__exp_tN10Ast__exp_tT2N10Ast__typ_tR10Ast__loc_t(&(*dst)->u.ExpBinary); break;
case 9:
_fx_free_T3N12Ast__unary_tN10Ast__exp_tT2N10Ast__typ_tR10Ast__loc_t(&(*dst)->u.ExpUnary); break;
case 10:
_fx_free_T3N13Ast__intrin_tLN10Ast__exp_tT2N10Ast__typ_tR10Ast__loc_t(&(*dst)->u.ExpIntrin); break;
case 11:
_fx_free_T2R9Ast__id_tN10Ast__exp_t(&(*dst)->u.ExpSync); break;
case 12:
_fx_free_T2LN10Ast__exp_tT2N10Ast__typ_tR10Ast__loc_t(&(*dst)->u.ExpSeq); break;
case 13:
_fx_free_T2LN10Ast__exp_tT2N10Ast__typ_tR10Ast__loc_t(&(*dst)->u.ExpMkTuple); break;
case 14:
_fx_free_T2LLN10Ast__exp_tT2N10Ast__typ_tR10Ast__loc_t(&(*dst)->u.ExpMkArray); break;
case 15:
_fx_free_T2LN10Ast__exp_tT2N10Ast__typ_tR10Ast__loc_t(&(*dst)->u.ExpMkVector); break;
case 16:
_fx_free_T3N10Ast__exp_tLT2R9Ast__id_tN10Ast__exp_tT2N10Ast__typ_tR10Ast__loc_t(&(*dst)->u.ExpMkRecord); break;
case 17:
_fx_free_T3N10Ast__exp_tLT2R9Ast__id_tN10Ast__exp_tT2N10Ast__typ_tR10Ast__loc_t(&(*dst)->u.ExpUpdateRecord); break;
case 18:
_fx_free_T3N10Ast__exp_tLN10Ast__exp_tT2N10Ast__typ_tR10Ast__loc_t(&(*dst)->u.ExpCall); break;
case 19:
_fx_free_T5N10Ast__exp_tN13Ast__border_tN18Ast__interpolate_tLN10Ast__exp_tT2N10Ast__typ_tR10Ast__loc_t(
&(*dst)->u.ExpAt);
break;
case 20:
_fx_free_T3N10Ast__exp_tN10Ast__exp_tR10Ast__loc_t(&(*dst)->u.ExpAssign); break;
case 21:
_fx_free_T3N10Ast__exp_tN10Ast__exp_tT2N10Ast__typ_tR10Ast__loc_t(&(*dst)->u.ExpMem); break;
case 22:
_fx_free_T2N10Ast__exp_tR10Ast__loc_t(&(*dst)->u.ExpThrow); break;
case 23:
_fx_free_T4N10Ast__exp_tN10Ast__exp_tN10Ast__exp_tT2N10Ast__typ_tR10Ast__loc_t(&(*dst)->u.ExpIf); break;
case 24:
_fx_free_T3N10Ast__exp_tN10Ast__exp_tR10Ast__loc_t(&(*dst)->u.ExpWhile); break;
case 25:
_fx_free_T3N10Ast__exp_tN10Ast__exp_tR10Ast__loc_t(&(*dst)->u.ExpDoWhile); break;
case 26:
_fx_free_T5LT2N10Ast__pat_tN10Ast__exp_tN10Ast__pat_tN10Ast__exp_tR16Ast__for_flags_tR10Ast__loc_t(&(*dst)->u.ExpFor);
break;
case 27:
_fx_free_T4LT2LT2N10Ast__pat_tN10Ast__exp_tN10Ast__pat_tN10Ast__exp_tR16Ast__for_flags_tT2N10Ast__typ_tR10Ast__loc_t(
&(*dst)->u.ExpMap);
break;
case 28:
_fx_free_T3N10Ast__exp_tLT2N10Ast__pat_tN10Ast__exp_tT2N10Ast__typ_tR10Ast__loc_t(&(*dst)->u.ExpTryCatch); break;
case 29:
_fx_free_T3N10Ast__exp_tLT2N10Ast__pat_tN10Ast__exp_tT2N10Ast__typ_tR10Ast__loc_t(&(*dst)->u.ExpMatch); break;
case 30:
_fx_free_T3N10Ast__exp_tN10Ast__typ_tT2N10Ast__typ_tR10Ast__loc_t(&(*dst)->u.ExpCast); break;
case 31:
_fx_free_T3N10Ast__exp_tN10Ast__typ_tT2N10Ast__typ_tR10Ast__loc_t(&(*dst)->u.ExpTyped); break;
case 32:
_fx_free_T2ST2N10Ast__typ_tR10Ast__loc_t(&(*dst)->u.ExpCCode); break;
case 33:
_fx_free_T3SST2N10Ast__typ_tR10Ast__loc_t(&(*dst)->u.ExpData); break;
case 34:
_fx_free_T4N10Ast__pat_tN10Ast__exp_tR16Ast__val_flags_tR10Ast__loc_t(&(*dst)->u.DefVal); break;
case 35:
_fx_free_rR13Ast__deffun_t(&(*dst)->u.DefFun); break;
case 36:
_fx_free_rR13Ast__defexn_t(&(*dst)->u.DefExn); break;
case 37:
_fx_free_rR13Ast__deftyp_t(&(*dst)->u.DefTyp); break;
case 38:
_fx_free_rR17Ast__defvariant_t(&(*dst)->u.DefVariant); break;
case 39:
_fx_free_rR19Ast__definterface_t(&(*dst)->u.DefInterface); break;
case 40:
_fx_free_T2LT2iR9Ast__id_tR10Ast__loc_t(&(*dst)->u.DirImport); break;
case 41:
_fx_free_T3iLR9Ast__id_tR10Ast__loc_t(&(*dst)->u.DirImportFrom); break;
case 42:
_fx_free_T2LSR10Ast__loc_t(&(*dst)->u.DirPragma); break;
default:
;
}
fx_free(*dst);
}
*dst = 0;
}
static void _fx_free_T2N10Ast__lit_tR10Ast__loc_t(struct _fx_T2N10Ast__lit_tR10Ast__loc_t* dst)
{
_fx_free_N10Ast__lit_t(&dst->t0);
}
static void _fx_copy_T2N10Ast__lit_tR10Ast__loc_t(
struct _fx_T2N10Ast__lit_tR10Ast__loc_t* src,
struct _fx_T2N10Ast__lit_tR10Ast__loc_t* dst)
{
_fx_copy_N10Ast__lit_t(&src->t0, &dst->t0);
dst->t1 = src->t1;
}
static void _fx_make_T2N10Ast__lit_tR10Ast__loc_t(
struct _fx_N10Ast__lit_t* t0,
struct _fx_R10Ast__loc_t* t1,
struct _fx_T2N10Ast__lit_tR10Ast__loc_t* fx_result)
{
_fx_copy_N10Ast__lit_t(t0, &fx_result->t0);
fx_result->t1 = *t1;
}
static void _fx_free_T2LN10Ast__pat_tR10Ast__loc_t(struct _fx_T2LN10Ast__pat_tR10Ast__loc_t* dst)
{
_fx_free_LN10Ast__pat_t(&dst->t0);
}
static void _fx_copy_T2LN10Ast__pat_tR10Ast__loc_t(
struct _fx_T2LN10Ast__pat_tR10Ast__loc_t* src,
struct _fx_T2LN10Ast__pat_tR10Ast__loc_t* dst)
{
FX_COPY_PTR(src->t0, &dst->t0);
dst->t1 = src->t1;
}
static void _fx_make_T2LN10Ast__pat_tR10Ast__loc_t(
struct _fx_LN10Ast__pat_t_data_t* t0,
struct _fx_R10Ast__loc_t* t1,
struct _fx_T2LN10Ast__pat_tR10Ast__loc_t* fx_result)
{
FX_COPY_PTR(t0, &fx_result->t0);
fx_result->t1 = *t1;
}
static void _fx_free_T3R9Ast__id_tLN10Ast__pat_tR10Ast__loc_t(struct _fx_T3R9Ast__id_tLN10Ast__pat_tR10Ast__loc_t* dst)
{
_fx_free_LN10Ast__pat_t(&dst->t1);
}
static void _fx_copy_T3R9Ast__id_tLN10Ast__pat_tR10Ast__loc_t(
struct _fx_T3R9Ast__id_tLN10Ast__pat_tR10Ast__loc_t* src,
struct _fx_T3R9Ast__id_tLN10Ast__pat_tR10Ast__loc_t* dst)
{
dst->t0 = src->t0;
FX_COPY_PTR(src->t1, &dst->t1);
dst->t2 = src->t2;
}
static void _fx_make_T3R9Ast__id_tLN10Ast__pat_tR10Ast__loc_t(
struct _fx_R9Ast__id_t* t0,
struct _fx_LN10Ast__pat_t_data_t* t1,
struct _fx_R10Ast__loc_t* t2,
struct _fx_T3R9Ast__id_tLN10Ast__pat_tR10Ast__loc_t* fx_result)
{
fx_result->t0 = *t0;
FX_COPY_PTR(t1, &fx_result->t1);
fx_result->t2 = *t2;
}
static void _fx_free_T2R9Ast__id_tN10Ast__pat_t(struct _fx_T2R9Ast__id_tN10Ast__pat_t* dst)
{
_fx_free_N10Ast__pat_t(&dst->t1);
}
static void _fx_copy_T2R9Ast__id_tN10Ast__pat_t(
struct _fx_T2R9Ast__id_tN10Ast__pat_t* src,
struct _fx_T2R9Ast__id_tN10Ast__pat_t* dst)
{
dst->t0 = src->t0;
FX_COPY_PTR(src->t1, &dst->t1);
}
static void _fx_make_T2R9Ast__id_tN10Ast__pat_t(
struct _fx_R9Ast__id_t* t0,
struct _fx_N10Ast__pat_t_data_t* t1,
struct _fx_T2R9Ast__id_tN10Ast__pat_t* fx_result)
{
fx_result->t0 = *t0;
FX_COPY_PTR(t1, &fx_result->t1);
}
static void _fx_free_LT2R9Ast__id_tN10Ast__pat_t(struct _fx_LT2R9Ast__id_tN10Ast__pat_t_data_t** dst)
{
FX_FREE_LIST_IMPL(_fx_LT2R9Ast__id_tN10Ast__pat_t, _fx_free_T2R9Ast__id_tN10Ast__pat_t);
}
static int _fx_cons_LT2R9Ast__id_tN10Ast__pat_t(
struct _fx_T2R9Ast__id_tN10Ast__pat_t* hd,
struct _fx_LT2R9Ast__id_tN10Ast__pat_t_data_t* tl,
bool addref_tl,
struct _fx_LT2R9Ast__id_tN10Ast__pat_t_data_t** fx_result)
{
FX_MAKE_LIST_IMPL(_fx_LT2R9Ast__id_tN10Ast__pat_t, _fx_copy_T2R9Ast__id_tN10Ast__pat_t);
}
static void _fx_free_T3Nt6option1R9Ast__id_tLT2R9Ast__id_tN10Ast__pat_tR10Ast__loc_t(
struct _fx_T3Nt6option1R9Ast__id_tLT2R9Ast__id_tN10Ast__pat_tR10Ast__loc_t* dst)
{
_fx_free_LT2R9Ast__id_tN10Ast__pat_t(&dst->t1);
}
static void _fx_copy_T3Nt6option1R9Ast__id_tLT2R9Ast__id_tN10Ast__pat_tR10Ast__loc_t(
struct _fx_T3Nt6option1R9Ast__id_tLT2R9Ast__id_tN10Ast__pat_tR10Ast__loc_t* src,
struct _fx_T3Nt6option1R9Ast__id_tLT2R9Ast__id_tN10Ast__pat_tR10Ast__loc_t* dst)
{
dst->t0 = src->t0;
FX_COPY_PTR(src->t1, &dst->t1);
dst->t2 = src->t2;
}
static void _fx_make_T3Nt6option1R9Ast__id_tLT2R9Ast__id_tN10Ast__pat_tR10Ast__loc_t(
struct _fx_Nt6option1R9Ast__id_t* t0,
struct _fx_LT2R9Ast__id_tN10Ast__pat_t_data_t* t1,
struct _fx_R10Ast__loc_t* t2,
struct _fx_T3Nt6option1R9Ast__id_tLT2R9Ast__id_tN10Ast__pat_tR10Ast__loc_t* fx_result)
{
fx_result->t0 = *t0;
FX_COPY_PTR(t1, &fx_result->t1);
fx_result->t2 = *t2;
}
static void _fx_free_T3N10Ast__pat_tN10Ast__pat_tR10Ast__loc_t(struct _fx_T3N10Ast__pat_tN10Ast__pat_tR10Ast__loc_t* dst)
{
_fx_free_N10Ast__pat_t(&dst->t0);
_fx_free_N10Ast__pat_t(&dst->t1);
}
static void _fx_copy_T3N10Ast__pat_tN10Ast__pat_tR10Ast__loc_t(
struct _fx_T3N10Ast__pat_tN10Ast__pat_tR10Ast__loc_t* src,
struct _fx_T3N10Ast__pat_tN10Ast__pat_tR10Ast__loc_t* dst)
{
FX_COPY_PTR(src->t0, &dst->t0);
FX_COPY_PTR(src->t1, &dst->t1);
dst->t2 = src->t2;
}
static void _fx_make_T3N10Ast__pat_tN10Ast__pat_tR10Ast__loc_t(
struct _fx_N10Ast__pat_t_data_t* t0,
struct _fx_N10Ast__pat_t_data_t* t1,
struct _fx_R10Ast__loc_t* t2,
struct _fx_T3N10Ast__pat_tN10Ast__pat_tR10Ast__loc_t* fx_result)
{
FX_COPY_PTR(t0, &fx_result->t0);
FX_COPY_PTR(t1, &fx_result->t1);
fx_result->t2 = *t2;
}
static void _fx_free_T3N10Ast__pat_tR9Ast__id_tR10Ast__loc_t(struct _fx_T3N10Ast__pat_tR9Ast__id_tR10Ast__loc_t* dst)
{
_fx_free_N10Ast__pat_t(&dst->t0);
}
static void _fx_copy_T3N10Ast__pat_tR9Ast__id_tR10Ast__loc_t(
struct _fx_T3N10Ast__pat_tR9Ast__id_tR10Ast__loc_t* src,
struct _fx_T3N10Ast__pat_tR9Ast__id_tR10Ast__loc_t* dst)
{
FX_COPY_PTR(src->t0, &dst->t0);
dst->t1 = src->t1;
dst->t2 = src->t2;
}
static void _fx_make_T3N10Ast__pat_tR9Ast__id_tR10Ast__loc_t(
struct _fx_N10Ast__pat_t_data_t* t0,
struct _fx_R9Ast__id_t* t1,
struct _fx_R10Ast__loc_t* t2,
struct _fx_T3N10Ast__pat_tR9Ast__id_tR10Ast__loc_t* fx_result)
{
FX_COPY_PTR(t0, &fx_result->t0);
fx_result->t1 = *t1;
fx_result->t2 = *t2;
}
static void _fx_free_T3N10Ast__pat_tN10Ast__typ_tR10Ast__loc_t(struct _fx_T3N10Ast__pat_tN10Ast__typ_tR10Ast__loc_t* dst)
{
_fx_free_N10Ast__pat_t(&dst->t0);
_fx_free_N10Ast__typ_t(&dst->t1);
}
static void _fx_copy_T3N10Ast__pat_tN10Ast__typ_tR10Ast__loc_t(
struct _fx_T3N10Ast__pat_tN10Ast__typ_tR10Ast__loc_t* src,
struct _fx_T3N10Ast__pat_tN10Ast__typ_tR10Ast__loc_t* dst)
{
FX_COPY_PTR(src->t0, &dst->t0);
FX_COPY_PTR(src->t1, &dst->t1);
dst->t2 = src->t2;
}
static void _fx_make_T3N10Ast__pat_tN10Ast__typ_tR10Ast__loc_t(
struct _fx_N10Ast__pat_t_data_t* t0,
struct _fx_N10Ast__typ_t_data_t* t1,
struct _fx_R10Ast__loc_t* t2,
struct _fx_T3N10Ast__pat_tN10Ast__typ_tR10Ast__loc_t* fx_result)
{
FX_COPY_PTR(t0, &fx_result->t0);
FX_COPY_PTR(t1, &fx_result->t1);
fx_result->t2 = *t2;
}
static void _fx_free_T3N10Ast__pat_tN10Ast__exp_tR10Ast__loc_t(struct _fx_T3N10Ast__pat_tN10Ast__exp_tR10Ast__loc_t* dst)
{
_fx_free_N10Ast__pat_t(&dst->t0);
_fx_free_N10Ast__exp_t(&dst->t1);
}
static void _fx_copy_T3N10Ast__pat_tN10Ast__exp_tR10Ast__loc_t(
struct _fx_T3N10Ast__pat_tN10Ast__exp_tR10Ast__loc_t* src,
struct _fx_T3N10Ast__pat_tN10Ast__exp_tR10Ast__loc_t* dst)
{
FX_COPY_PTR(src->t0, &dst->t0);
FX_COPY_PTR(src->t1, &dst->t1);
dst->t2 = src->t2;
}
static void _fx_make_T3N10Ast__pat_tN10Ast__exp_tR10Ast__loc_t(
struct _fx_N10Ast__pat_t_data_t* t0,
struct _fx_N10Ast__exp_t_data_t* t1,
struct _fx_R10Ast__loc_t* t2,
struct _fx_T3N10Ast__pat_tN10Ast__exp_tR10Ast__loc_t* fx_result)
{
FX_COPY_PTR(t0, &fx_result->t0);
FX_COPY_PTR(t1, &fx_result->t1);
fx_result->t2 = *t2;
}
static void _fx_free_T2N10Ast__pat_tR10Ast__loc_t(struct _fx_T2N10Ast__pat_tR10Ast__loc_t* dst)
{
_fx_free_N10Ast__pat_t(&dst->t0);
}
static void _fx_copy_T2N10Ast__pat_tR10Ast__loc_t(
struct _fx_T2N10Ast__pat_tR10Ast__loc_t* src,
struct _fx_T2N10Ast__pat_tR10Ast__loc_t* dst)
{
FX_COPY_PTR(src->t0, &dst->t0);
dst->t1 = src->t1;
}
static void _fx_make_T2N10Ast__pat_tR10Ast__loc_t(
struct _fx_N10Ast__pat_t_data_t* t0,
struct _fx_R10Ast__loc_t* t1,
struct _fx_T2N10Ast__pat_tR10Ast__loc_t* fx_result)
{
FX_COPY_PTR(t0, &fx_result->t0);
fx_result->t1 = *t1;
}
static void _fx_free_N10Ast__pat_t(struct _fx_N10Ast__pat_t_data_t** dst)
{
if (*dst && FX_DECREF((*dst)->rc) == 1) {
switch ((*dst)->tag) {
case 2:
_fx_free_T2N10Ast__lit_tR10Ast__loc_t(&(*dst)->u.PatLit); break;
case 4:
_fx_free_T2LN10Ast__pat_tR10Ast__loc_t(&(*dst)->u.PatTuple); break;
case 5:
_fx_free_T3R9Ast__id_tLN10Ast__pat_tR10Ast__loc_t(&(*dst)->u.PatVariant); break;
case 6:
_fx_free_T3Nt6option1R9Ast__id_tLT2R9Ast__id_tN10Ast__pat_tR10Ast__loc_t(&(*dst)->u.PatRecord); break;
case 7:
_fx_free_T3N10Ast__pat_tN10Ast__pat_tR10Ast__loc_t(&(*dst)->u.PatCons); break;
case 8:
_fx_free_T3N10Ast__pat_tR9Ast__id_tR10Ast__loc_t(&(*dst)->u.PatAs); break;
case 9:
_fx_free_T3N10Ast__pat_tN10Ast__typ_tR10Ast__loc_t(&(*dst)->u.PatTyped); break;
case 10:
_fx_free_T3N10Ast__pat_tN10Ast__exp_tR10Ast__loc_t(&(*dst)->u.PatWhen); break;
case 11:
_fx_free_T2LN10Ast__pat_tR10Ast__loc_t(&(*dst)->u.PatAlt); break;
case 12:
_fx_free_T2N10Ast__pat_tR10Ast__loc_t(&(*dst)->u.PatRef); break;
default:
;
}
fx_free(*dst);
}
*dst = 0;
}
static void _fx_free_N16Ast__env_entry_t(struct _fx_N16Ast__env_entry_t_data_t** dst)
{
if (*dst && FX_DECREF((*dst)->rc) == 1) {
switch ((*dst)->tag) { case 2: _fx_free_N10Ast__typ_t(&(*dst)->u.EnvTyp); break; default: ; } fx_free(*dst);
}
*dst = 0;
}
static int _fx_cons_Li(int_ hd, struct _fx_Li_data_t* tl, bool addref_tl, struct _fx_Li_data_t** fx_result)
{
FX_MAKE_LIST_IMPL(_fx_Li, FX_COPY_SIMPLE);
}
static void _fx_free_T10R9Ast__id_tSiBLN10Ast__exp_tLiRt6Map__t2R9Ast__id_tLN16Ast__env_entry_tBiNt9Dynvec__t1N14Ast__id_info_t(
struct _fx_T10R9Ast__id_tSiBLN10Ast__exp_tLiRt6Map__t2R9Ast__id_tLN16Ast__env_entry_tBiNt9Dynvec__t1N14Ast__id_info_t* dst)
{
fx_free_str(&dst->t1);
_fx_free_LN10Ast__exp_t(&dst->t4);
fx_free_list_simple(&dst->t5);
_fx_free_Rt6Map__t2R9Ast__id_tLN16Ast__env_entry_t(&dst->t6);
_fx_free_Nt9Dynvec__t1N14Ast__id_info_t(&dst->t9);
}
static void _fx_copy_T10R9Ast__id_tSiBLN10Ast__exp_tLiRt6Map__t2R9Ast__id_tLN16Ast__env_entry_tBiNt9Dynvec__t1N14Ast__id_info_t(
struct _fx_T10R9Ast__id_tSiBLN10Ast__exp_tLiRt6Map__t2R9Ast__id_tLN16Ast__env_entry_tBiNt9Dynvec__t1N14Ast__id_info_t* src,
struct _fx_T10R9Ast__id_tSiBLN10Ast__exp_tLiRt6Map__t2R9Ast__id_tLN16Ast__env_entry_tBiNt9Dynvec__t1N14Ast__id_info_t* dst)
{
dst->t0 = src->t0;
fx_copy_str(&src->t1, &dst->t1);
dst->t2 = src->t2;
dst->t3 = src->t3;
FX_COPY_PTR(src->t4, &dst->t4);
FX_COPY_PTR(src->t5, &dst->t5);
_fx_copy_Rt6Map__t2R9Ast__id_tLN16Ast__env_entry_t(&src->t6, &dst->t6);
dst->t7 = src->t7;
dst->t8 = src->t8;
FX_COPY_PTR(src->t9, &dst->t9);
}
static void _fx_make_T10R9Ast__id_tSiBLN10Ast__exp_tLiRt6Map__t2R9Ast__id_tLN16Ast__env_entry_tBiNt9Dynvec__t1N14Ast__id_info_t(
struct _fx_R9Ast__id_t* t0,
fx_str_t* t1,
int_ t2,
bool t3,
struct _fx_LN10Ast__exp_t_data_t* t4,
struct _fx_Li_data_t* t5,
struct _fx_Rt6Map__t2R9Ast__id_tLN16Ast__env_entry_t* t6,
bool t7,
int_ t8,
struct _fx_Nt9Dynvec__t1N14Ast__id_info_t_data_t* t9,
struct _fx_T10R9Ast__id_tSiBLN10Ast__exp_tLiRt6Map__t2R9Ast__id_tLN16Ast__env_entry_tBiNt9Dynvec__t1N14Ast__id_info_t*
fx_result)
{
fx_result->t0 = *t0;
fx_copy_str(t1, &fx_result->t1);
fx_result->t2 = t2;
fx_result->t3 = t3;
FX_COPY_PTR(t4, &fx_result->t4);
FX_COPY_PTR(t5, &fx_result->t5);
_fx_copy_Rt6Map__t2R9Ast__id_tLN16Ast__env_entry_t(t6, &fx_result->t6);
fx_result->t7 = t7;
fx_result->t8 = t8;
FX_COPY_PTR(t9, &fx_result->t9);
}
static void _fx_free_N16Ast__defmodule_t(struct _fx_N16Ast__defmodule_t_data_t** dst)
{
if (*dst && FX_DECREF((*dst)->rc) == 1) {
_fx_free_T10R9Ast__id_tSiBLN10Ast__exp_tLiRt6Map__t2R9Ast__id_tLN16Ast__env_entry_tBiNt9Dynvec__t1N14Ast__id_info_t(
&(*dst)->u.defmodule_t);
fx_free(*dst);
}
*dst = 0;
}
static void _fx_free_LE(struct _fx_LE_data_t** dst)
{
FX_FREE_LIST_IMPL(_fx_LE, fx_free_exn);
}
static int _fx_cons_LE(fx_exn_t* hd, struct _fx_LE_data_t* tl, bool addref_tl, struct _fx_LE_data_t** fx_result)
{
FX_MAKE_LIST_IMPL(_fx_LE, fx_copy_exn);
}
static void _fx_free_T2BS(struct _fx_T2BS* dst)
{
fx_free_str(&dst->t1);
}
static void _fx_copy_T2BS(struct _fx_T2BS* src, struct _fx_T2BS* dst)
{
dst->t0 = src->t0;
fx_copy_str(&src->t1, &dst->t1);
}
static void _fx_make_T2BS(bool t0, fx_str_t* t1, struct _fx_T2BS* fx_result)
{
fx_result->t0 = t0;
fx_copy_str(t1, &fx_result->t1);
}
static void _fx_free_N14Lexer__token_t(struct _fx_N14Lexer__token_t* dst)
{
switch (dst->tag) {
case 1:
_fx_free_N10Ast__lit_t(&dst->u.LITERAL); break;
case 2:
_fx_free_T2BS(&dst->u.IDENT); break;
case 3:
fx_free_str(&dst->u.TYVAR); break;
case 13:
fx_free_str(&dst->u.DATA); break;
case 94:
_fx_free_N13Ast__binary_t(&dst->u.AUG_BINOP); break;
case 100:
fx_free_str(&dst->u.RESERVED); break;
default:
;
}
dst->tag = 0;
}
static void _fx_copy_N14Lexer__token_t(struct _fx_N14Lexer__token_t* src, struct _fx_N14Lexer__token_t* dst)
{
dst->tag = src->tag;
switch (src->tag) {
case 1:
_fx_copy_N10Ast__lit_t(&src->u.LITERAL, &dst->u.LITERAL); break;
case 2:
_fx_copy_T2BS(&src->u.IDENT, &dst->u.IDENT); break;
case 3:
fx_copy_str(&src->u.TYVAR, &dst->u.TYVAR); break;
case 13:
fx_copy_str(&src->u.DATA, &dst->u.DATA); break;
case 94:
FX_COPY_PTR(src->u.AUG_BINOP, &dst->u.AUG_BINOP); break;
case 100:
fx_copy_str(&src->u.RESERVED, &dst->u.RESERVED); break;
default:
dst->u = src->u;
}
}
static void _fx_free_LN14Lexer__token_t(struct _fx_LN14Lexer__token_t_data_t** dst)
{
FX_FREE_LIST_IMPL(_fx_LN14Lexer__token_t, _fx_free_N14Lexer__token_t);
}
static int _fx_cons_LN14Lexer__token_t(
struct _fx_N14Lexer__token_t* hd,
struct _fx_LN14Lexer__token_t_data_t* tl,
bool addref_tl,
struct _fx_LN14Lexer__token_t_data_t** fx_result)
{
FX_MAKE_LIST_IMPL(_fx_LN14Lexer__token_t, _fx_copy_N14Lexer__token_t);
}
static void _fx_free_N14K_form__klit_t(struct _fx_N14K_form__klit_t* dst)
{
switch (dst->tag) {
case 5:
fx_free_str(&dst->u.KLitString); break;
case 8:
_fx_free_N14K_form__ktyp_t(&dst->u.KLitNil); break;
default:
;
}
dst->tag = 0;
}
static void _fx_copy_N14K_form__klit_t(struct _fx_N14K_form__klit_t* src, struct _fx_N14K_form__klit_t* dst)
{
dst->tag = src->tag;
switch (src->tag) {
case 5:
fx_copy_str(&src->u.KLitString, &dst->u.KLitString); break;
case 8:
FX_COPY_PTR(src->u.KLitNil, &dst->u.KLitNil); break;
default:
dst->u = src->u;
}
}
static void _fx_free_N14K_form__atom_t(struct _fx_N14K_form__atom_t* dst)
{
switch (dst->tag) {
case 2:
_fx_free_N14K_form__klit_t(&dst->u.AtomLit); break;
default:
;
}
dst->tag = 0;
}
static void _fx_copy_N14K_form__atom_t(struct _fx_N14K_form__atom_t* src, struct _fx_N14K_form__atom_t* dst)
{
dst->tag = src->tag;
switch (src->tag) {
case 2:
_fx_copy_N14K_form__klit_t(&src->u.AtomLit, &dst->u.AtomLit); break;
default:
dst->u = src->u;
}
}
static void _fx_free_Nt6option1N14K_form__atom_t(struct _fx_Nt6option1N14K_form__atom_t* dst)
{
switch (dst->tag) {
case 2:
_fx_free_N14K_form__atom_t(&dst->u.Some); break;
default:
;
}
dst->tag = 0;
}
static void _fx_copy_Nt6option1N14K_form__atom_t(
struct _fx_Nt6option1N14K_form__atom_t* src,
struct _fx_Nt6option1N14K_form__atom_t* dst)
{
dst->tag = src->tag;
switch (src->tag) {
case 2:
_fx_copy_N14K_form__atom_t(&src->u.Some, &dst->u.Some); break;
default:
dst->u = src->u;
}
}
static void _fx_free_T2SR10Ast__loc_t(struct _fx_T2SR10Ast__loc_t* dst)
{
fx_free_str(&dst->t0);
}
static void _fx_copy_T2SR10Ast__loc_t(struct _fx_T2SR10Ast__loc_t* src, struct _fx_T2SR10Ast__loc_t* dst)
{
fx_copy_str(&src->t0, &dst->t0);
dst->t1 = src->t1;
}
static void _fx_make_T2SR10Ast__loc_t(fx_str_t* t0, struct _fx_R10Ast__loc_t* t1, struct _fx_T2SR10Ast__loc_t* fx_result)
{
fx_copy_str(t0, &fx_result->t0);
fx_result->t1 = *t1;
}
static void _fx_free_LT2SR10Ast__loc_t(struct _fx_LT2SR10Ast__loc_t_data_t** dst)
{
FX_FREE_LIST_IMPL(_fx_LT2SR10Ast__loc_t, _fx_free_T2SR10Ast__loc_t);
}
static int _fx_cons_LT2SR10Ast__loc_t(
struct _fx_T2SR10Ast__loc_t* hd,
struct _fx_LT2SR10Ast__loc_t_data_t* tl,
bool addref_tl,
struct _fx_LT2SR10Ast__loc_t_data_t** fx_result)
{
FX_MAKE_LIST_IMPL(_fx_LT2SR10Ast__loc_t, _fx_copy_T2SR10Ast__loc_t);
}
static void _fx_free_LN14K_form__ktyp_t(struct _fx_LN14K_form__ktyp_t_data_t** dst)
{
FX_FREE_LIST_IMPL(_fx_LN14K_form__ktyp_t, _fx_free_N14K_form__ktyp_t);
}
static int _fx_cons_LN14K_form__ktyp_t(
struct _fx_N14K_form__ktyp_t_data_t* hd,
struct _fx_LN14K_form__ktyp_t_data_t* tl,
bool addref_tl,
struct _fx_LN14K_form__ktyp_t_data_t** fx_result)
{
FX_MAKE_LIST_IMPL(_fx_LN14K_form__ktyp_t, FX_COPY_PTR);
}
static void _fx_free_T2LN14K_form__ktyp_tN14K_form__ktyp_t(struct _fx_T2LN14K_form__ktyp_tN14K_form__ktyp_t* dst)
{
_fx_free_LN14K_form__ktyp_t(&dst->t0);
_fx_free_N14K_form__ktyp_t(&dst->t1);
}
static void _fx_copy_T2LN14K_form__ktyp_tN14K_form__ktyp_t(
struct _fx_T2LN14K_form__ktyp_tN14K_form__ktyp_t* src,
struct _fx_T2LN14K_form__ktyp_tN14K_form__ktyp_t* dst)
{
FX_COPY_PTR(src->t0, &dst->t0);
FX_COPY_PTR(src->t1, &dst->t1);
}
static void _fx_make_T2LN14K_form__ktyp_tN14K_form__ktyp_t(
struct _fx_LN14K_form__ktyp_t_data_t* t0,
struct _fx_N14K_form__ktyp_t_data_t* t1,
struct _fx_T2LN14K_form__ktyp_tN14K_form__ktyp_t* fx_result)
{
FX_COPY_PTR(t0, &fx_result->t0);
FX_COPY_PTR(t1, &fx_result->t1);
}
static void _fx_free_T2R9Ast__id_tN14K_form__ktyp_t(struct _fx_T2R9Ast__id_tN14K_form__ktyp_t* dst)
{
_fx_free_N14K_form__ktyp_t(&dst->t1);
}
static void _fx_copy_T2R9Ast__id_tN14K_form__ktyp_t(
struct _fx_T2R9Ast__id_tN14K_form__ktyp_t* src,
struct _fx_T2R9Ast__id_tN14K_form__ktyp_t* dst)
{
dst->t0 = src->t0;
FX_COPY_PTR(src->t1, &dst->t1);
}
static void _fx_make_T2R9Ast__id_tN14K_form__ktyp_t(
struct _fx_R9Ast__id_t* t0,
struct _fx_N14K_form__ktyp_t_data_t* t1,
struct _fx_T2R9Ast__id_tN14K_form__ktyp_t* fx_result)
{
fx_result->t0 = *t0;
FX_COPY_PTR(t1, &fx_result->t1);
}
static void _fx_free_LT2R9Ast__id_tN14K_form__ktyp_t(struct _fx_LT2R9Ast__id_tN14K_form__ktyp_t_data_t** dst)
{
FX_FREE_LIST_IMPL(_fx_LT2R9Ast__id_tN14K_form__ktyp_t, _fx_free_T2R9Ast__id_tN14K_form__ktyp_t);
}
static int _fx_cons_LT2R9Ast__id_tN14K_form__ktyp_t(
struct _fx_T2R9Ast__id_tN14K_form__ktyp_t* hd,
struct _fx_LT2R9Ast__id_tN14K_form__ktyp_t_data_t* tl,
bool addref_tl,
struct _fx_LT2R9Ast__id_tN14K_form__ktyp_t_data_t** fx_result)
{
FX_MAKE_LIST_IMPL(_fx_LT2R9Ast__id_tN14K_form__ktyp_t, _fx_copy_T2R9Ast__id_tN14K_form__ktyp_t);
}
static void _fx_free_T2R9Ast__id_tLT2R9Ast__id_tN14K_form__ktyp_t(struct _fx_T2R9Ast__id_tLT2R9Ast__id_tN14K_form__ktyp_t* dst)
{
_fx_free_LT2R9Ast__id_tN14K_form__ktyp_t(&dst->t1);
}
static void _fx_copy_T2R9Ast__id_tLT2R9Ast__id_tN14K_form__ktyp_t(
struct _fx_T2R9Ast__id_tLT2R9Ast__id_tN14K_form__ktyp_t* src,
struct _fx_T2R9Ast__id_tLT2R9Ast__id_tN14K_form__ktyp_t* dst)
{
dst->t0 = src->t0;
FX_COPY_PTR(src->t1, &dst->t1);
}
static void _fx_make_T2R9Ast__id_tLT2R9Ast__id_tN14K_form__ktyp_t(
struct _fx_R9Ast__id_t* t0,
struct _fx_LT2R9Ast__id_tN14K_form__ktyp_t_data_t* t1,
struct _fx_T2R9Ast__id_tLT2R9Ast__id_tN14K_form__ktyp_t* fx_result)
{
fx_result->t0 = *t0;
FX_COPY_PTR(t1, &fx_result->t1);
}
static void _fx_free_T2iN14K_form__ktyp_t(struct _fx_T2iN14K_form__ktyp_t* dst)
{
_fx_free_N14K_form__ktyp_t(&dst->t1);
}
static void _fx_copy_T2iN14K_form__ktyp_t(struct _fx_T2iN14K_form__ktyp_t* src, struct _fx_T2iN14K_form__ktyp_t* dst)
{
dst->t0 = src->t0;
FX_COPY_PTR(src->t1, &dst->t1);
}
static void _fx_make_T2iN14K_form__ktyp_t(
int_ t0,
struct _fx_N14K_form__ktyp_t_data_t* t1,
struct _fx_T2iN14K_form__ktyp_t* fx_result)
{
fx_result->t0 = t0;
FX_COPY_PTR(t1, &fx_result->t1);
}
static void _fx_free_N14K_form__ktyp_t(struct _fx_N14K_form__ktyp_t_data_t** dst)
{
if (*dst && FX_DECREF((*dst)->rc) == 1) {
switch ((*dst)->tag) {
case 11:
_fx_free_N14K_form__ktyp_t(&(*dst)->u.KTypRawPointer); break;
case 12:
_fx_free_T2LN14K_form__ktyp_tN14K_form__ktyp_t(&(*dst)->u.KTypFun); break;
case 13:
_fx_free_LN14K_form__ktyp_t(&(*dst)->u.KTypTuple); break;
case 14:
_fx_free_T2R9Ast__id_tLT2R9Ast__id_tN14K_form__ktyp_t(&(*dst)->u.KTypRecord); break;
case 16:
_fx_free_T2iN14K_form__ktyp_t(&(*dst)->u.KTypArray); break;
case 17:
_fx_free_N14K_form__ktyp_t(&(*dst)->u.KTypVector); break;
case 18:
_fx_free_N14K_form__ktyp_t(&(*dst)->u.KTypList); break;
case 19:
_fx_free_N14K_form__ktyp_t(&(*dst)->u.KTypRef); break;
default:
;
}
fx_free(*dst);
}
*dst = 0;
}
static void _fx_free_Ta3N14K_form__atom_t(struct _fx_Ta3N14K_form__atom_t* dst)
{
_fx_free_N14K_form__atom_t(&dst->t0);
_fx_free_N14K_form__atom_t(&dst->t1);
_fx_free_N14K_form__atom_t(&dst->t2);
}
static void _fx_copy_Ta3N14K_form__atom_t(struct _fx_Ta3N14K_form__atom_t* src, struct _fx_Ta3N14K_form__atom_t* dst)
{
_fx_copy_N14K_form__atom_t(&src->t0, &dst->t0);
_fx_copy_N14K_form__atom_t(&src->t1, &dst->t1);
_fx_copy_N14K_form__atom_t(&src->t2, &dst->t2);
}
static void _fx_make_Ta3N14K_form__atom_t(
struct _fx_N14K_form__atom_t* t0,
struct _fx_N14K_form__atom_t* t1,
struct _fx_N14K_form__atom_t* t2,
struct _fx_Ta3N14K_form__atom_t* fx_result)
{
_fx_copy_N14K_form__atom_t(t0, &fx_result->t0);
_fx_copy_N14K_form__atom_t(t1, &fx_result->t1);
_fx_copy_N14K_form__atom_t(t2, &fx_result->t2);
}
static void _fx_free_N13K_form__dom_t(struct _fx_N13K_form__dom_t* dst)
{
switch (dst->tag) {
case 1:
_fx_free_N14K_form__atom_t(&dst->u.DomainElem); break;
case 2:
_fx_free_N14K_form__atom_t(&dst->u.DomainFast); break;
case 3:
_fx_free_Ta3N14K_form__atom_t(&dst->u.DomainRange); break;
default:
;
}
dst->tag = 0;
}
static void _fx_copy_N13K_form__dom_t(struct _fx_N13K_form__dom_t* src, struct _fx_N13K_form__dom_t* dst)
{
dst->tag = src->tag;
switch (src->tag) {
case 1:
_fx_copy_N14K_form__atom_t(&src->u.DomainElem, &dst->u.DomainElem); break;
case 2:
_fx_copy_N14K_form__atom_t(&src->u.DomainFast, &dst->u.DomainFast); break;
case 3:
_fx_copy_Ta3N14K_form__atom_t(&src->u.DomainRange, &dst->u.DomainRange); break;
default:
dst->u = src->u;
}
}
static void _fx_free_T2Nt6option1N14K_form__atom_tR10Ast__loc_t(struct _fx_T2Nt6option1N14K_form__atom_tR10Ast__loc_t* dst)
{
_fx_free_Nt6option1N14K_form__atom_t(&dst->t0);
}
static void _fx_copy_T2Nt6option1N14K_form__atom_tR10Ast__loc_t(
struct _fx_T2Nt6option1N14K_form__atom_tR10Ast__loc_t* src,
struct _fx_T2Nt6option1N14K_form__atom_tR10Ast__loc_t* dst)
{
_fx_copy_Nt6option1N14K_form__atom_t(&src->t0, &dst->t0);
dst->t1 = src->t1;
}
static void _fx_make_T2Nt6option1N14K_form__atom_tR10Ast__loc_t(
struct _fx_Nt6option1N14K_form__atom_t* t0,
struct _fx_R10Ast__loc_t* t1,
struct _fx_T2Nt6option1N14K_form__atom_tR10Ast__loc_t* fx_result)
{
_fx_copy_Nt6option1N14K_form__atom_t(t0, &fx_result->t0);
fx_result->t1 = *t1;
}
static void _fx_free_T2N14K_form__ktyp_tR10Ast__loc_t(struct _fx_T2N14K_form__ktyp_tR10Ast__loc_t* dst)
{
_fx_free_N14K_form__ktyp_t(&dst->t0);
}
static void _fx_copy_T2N14K_form__ktyp_tR10Ast__loc_t(
struct _fx_T2N14K_form__ktyp_tR10Ast__loc_t* src,
struct _fx_T2N14K_form__ktyp_tR10Ast__loc_t* dst)
{
FX_COPY_PTR(src->t0, &dst->t0);
dst->t1 = src->t1;
}
static void _fx_make_T2N14K_form__ktyp_tR10Ast__loc_t(
struct _fx_N14K_form__ktyp_t_data_t* t0,
struct _fx_R10Ast__loc_t* t1,
struct _fx_T2N14K_form__ktyp_tR10Ast__loc_t* fx_result)
{
FX_COPY_PTR(t0, &fx_result->t0);
fx_result->t1 = *t1;
}
static void _fx_free_T2N14K_form__atom_tT2N14K_form__ktyp_tR10Ast__loc_t(
struct _fx_T2N14K_form__atom_tT2N14K_form__ktyp_tR10Ast__loc_t* dst)
{
_fx_free_N14K_form__atom_t(&dst->t0);
_fx_free_T2N14K_form__ktyp_tR10Ast__loc_t(&dst->t1);
}
static void _fx_copy_T2N14K_form__atom_tT2N14K_form__ktyp_tR10Ast__loc_t(
struct _fx_T2N14K_form__atom_tT2N14K_form__ktyp_tR10Ast__loc_t* src,
struct _fx_T2N14K_form__atom_tT2N14K_form__ktyp_tR10Ast__loc_t* dst)
{
_fx_copy_N14K_form__atom_t(&src->t0, &dst->t0);
_fx_copy_T2N14K_form__ktyp_tR10Ast__loc_t(&src->t1, &dst->t1);
}
static void _fx_make_T2N14K_form__atom_tT2N14K_form__ktyp_tR10Ast__loc_t(
struct _fx_N14K_form__atom_t* t0,
struct _fx_T2N14K_form__ktyp_tR10Ast__loc_t* t1,
struct _fx_T2N14K_form__atom_tT2N14K_form__ktyp_tR10Ast__loc_t* fx_result)
{
_fx_copy_N14K_form__atom_t(t0, &fx_result->t0);
_fx_copy_T2N14K_form__ktyp_tR10Ast__loc_t(t1, &fx_result->t1);
}
static void _fx_free_T4N13Ast__binary_tN14K_form__atom_tN14K_form__atom_tT2N14K_form__ktyp_tR10Ast__loc_t(
struct _fx_T4N13Ast__binary_tN14K_form__atom_tN14K_form__atom_tT2N14K_form__ktyp_tR10Ast__loc_t* dst)
{
_fx_free_N13Ast__binary_t(&dst->t0);
_fx_free_N14K_form__atom_t(&dst->t1);
_fx_free_N14K_form__atom_t(&dst->t2);
_fx_free_T2N14K_form__ktyp_tR10Ast__loc_t(&dst->t3);
}
static void _fx_copy_T4N13Ast__binary_tN14K_form__atom_tN14K_form__atom_tT2N14K_form__ktyp_tR10Ast__loc_t(
struct _fx_T4N13Ast__binary_tN14K_form__atom_tN14K_form__atom_tT2N14K_form__ktyp_tR10Ast__loc_t* src,
struct _fx_T4N13Ast__binary_tN14K_form__atom_tN14K_form__atom_tT2N14K_form__ktyp_tR10Ast__loc_t* dst)
{
FX_COPY_PTR(src->t0, &dst->t0);
_fx_copy_N14K_form__atom_t(&src->t1, &dst->t1);
_fx_copy_N14K_form__atom_t(&src->t2, &dst->t2);
_fx_copy_T2N14K_form__ktyp_tR10Ast__loc_t(&src->t3, &dst->t3);
}
static void _fx_make_T4N13Ast__binary_tN14K_form__atom_tN14K_form__atom_tT2N14K_form__ktyp_tR10Ast__loc_t(
struct _fx_N13Ast__binary_t_data_t* t0,
struct _fx_N14K_form__atom_t* t1,
struct _fx_N14K_form__atom_t* t2,
struct _fx_T2N14K_form__ktyp_tR10Ast__loc_t* t3,
struct _fx_T4N13Ast__binary_tN14K_form__atom_tN14K_form__atom_tT2N14K_form__ktyp_tR10Ast__loc_t* fx_result)
{
FX_COPY_PTR(t0, &fx_result->t0);
_fx_copy_N14K_form__atom_t(t1, &fx_result->t1);
_fx_copy_N14K_form__atom_t(t2, &fx_result->t2);
_fx_copy_T2N14K_form__ktyp_tR10Ast__loc_t(t3, &fx_result->t3);
}
static void _fx_free_T3N12Ast__unary_tN14K_form__atom_tT2N14K_form__ktyp_tR10Ast__loc_t(
struct _fx_T3N12Ast__unary_tN14K_form__atom_tT2N14K_form__ktyp_tR10Ast__loc_t* dst)
{
_fx_free_N14K_form__atom_t(&dst->t1);
_fx_free_T2N14K_form__ktyp_tR10Ast__loc_t(&dst->t2);
}
static void _fx_copy_T3N12Ast__unary_tN14K_form__atom_tT2N14K_form__ktyp_tR10Ast__loc_t(
struct _fx_T3N12Ast__unary_tN14K_form__atom_tT2N14K_form__ktyp_tR10Ast__loc_t* src,
struct _fx_T3N12Ast__unary_tN14K_form__atom_tT2N14K_form__ktyp_tR10Ast__loc_t* dst)
{
dst->t0 = src->t0;
_fx_copy_N14K_form__atom_t(&src->t1, &dst->t1);
_fx_copy_T2N14K_form__ktyp_tR10Ast__loc_t(&src->t2, &dst->t2);
}
static void _fx_make_T3N12Ast__unary_tN14K_form__atom_tT2N14K_form__ktyp_tR10Ast__loc_t(
struct _fx_N12Ast__unary_t* t0,
struct _fx_N14K_form__atom_t* t1,
struct _fx_T2N14K_form__ktyp_tR10Ast__loc_t* t2,
struct _fx_T3N12Ast__unary_tN14K_form__atom_tT2N14K_form__ktyp_tR10Ast__loc_t* fx_result)
{
fx_result->t0 = *t0;
_fx_copy_N14K_form__atom_t(t1, &fx_result->t1);
_fx_copy_T2N14K_form__ktyp_tR10Ast__loc_t(t2, &fx_result->t2);
}
static void _fx_free_LN14K_form__atom_t(struct _fx_LN14K_form__atom_t_data_t** dst)
{
FX_FREE_LIST_IMPL(_fx_LN14K_form__atom_t, _fx_free_N14K_form__atom_t);
}
static int _fx_cons_LN14K_form__atom_t(
struct _fx_N14K_form__atom_t* hd,
struct _fx_LN14K_form__atom_t_data_t* tl,
bool addref_tl,
struct _fx_LN14K_form__atom_t_data_t** fx_result)
{
FX_MAKE_LIST_IMPL(_fx_LN14K_form__atom_t, _fx_copy_N14K_form__atom_t);
}
static void _fx_free_T3N13Ast__intrin_tLN14K_form__atom_tT2N14K_form__ktyp_tR10Ast__loc_t(
struct _fx_T3N13Ast__intrin_tLN14K_form__atom_tT2N14K_form__ktyp_tR10Ast__loc_t* dst)
{
_fx_free_LN14K_form__atom_t(&dst->t1);
_fx_free_T2N14K_form__ktyp_tR10Ast__loc_t(&dst->t2);
}
static void _fx_copy_T3N13Ast__intrin_tLN14K_form__atom_tT2N14K_form__ktyp_tR10Ast__loc_t(
struct _fx_T3N13Ast__intrin_tLN14K_form__atom_tT2N14K_form__ktyp_tR10Ast__loc_t* src,
struct _fx_T3N13Ast__intrin_tLN14K_form__atom_tT2N14K_form__ktyp_tR10Ast__loc_t* dst)
{
dst->t0 = src->t0;
FX_COPY_PTR(src->t1, &dst->t1);
_fx_copy_T2N14K_form__ktyp_tR10Ast__loc_t(&src->t2, &dst->t2);
}
static void _fx_make_T3N13Ast__intrin_tLN14K_form__atom_tT2N14K_form__ktyp_tR10Ast__loc_t(
struct _fx_N13Ast__intrin_t* t0,
struct _fx_LN14K_form__atom_t_data_t* t1,
struct _fx_T2N14K_form__ktyp_tR10Ast__loc_t* t2,
struct _fx_T3N13Ast__intrin_tLN14K_form__atom_tT2N14K_form__ktyp_tR10Ast__loc_t* fx_result)
{
fx_result->t0 = *t0;
FX_COPY_PTR(t1, &fx_result->t1);
_fx_copy_T2N14K_form__ktyp_tR10Ast__loc_t(t2, &fx_result->t2);
}
static void _fx_free_T2R9Ast__id_tN14K_form__kexp_t(struct _fx_T2R9Ast__id_tN14K_form__kexp_t* dst)
{
_fx_free_N14K_form__kexp_t(&dst->t1);
}
static void _fx_copy_T2R9Ast__id_tN14K_form__kexp_t(
struct _fx_T2R9Ast__id_tN14K_form__kexp_t* src,
struct _fx_T2R9Ast__id_tN14K_form__kexp_t* dst)
{
dst->t0 = src->t0;
FX_COPY_PTR(src->t1, &dst->t1);
}
static void _fx_make_T2R9Ast__id_tN14K_form__kexp_t(
struct _fx_R9Ast__id_t* t0,
struct _fx_N14K_form__kexp_t_data_t* t1,
struct _fx_T2R9Ast__id_tN14K_form__kexp_t* fx_result)
{
fx_result->t0 = *t0;
FX_COPY_PTR(t1, &fx_result->t1);
}
static void _fx_free_LN14K_form__kexp_t(struct _fx_LN14K_form__kexp_t_data_t** dst)
{
FX_FREE_LIST_IMPL(_fx_LN14K_form__kexp_t, _fx_free_N14K_form__kexp_t);
}
static int _fx_cons_LN14K_form__kexp_t(
struct _fx_N14K_form__kexp_t_data_t* hd,
struct _fx_LN14K_form__kexp_t_data_t* tl,
bool addref_tl,
struct _fx_LN14K_form__kexp_t_data_t** fx_result)
{
FX_MAKE_LIST_IMPL(_fx_LN14K_form__kexp_t, FX_COPY_PTR);
}
static void _fx_free_T2LN14K_form__kexp_tT2N14K_form__ktyp_tR10Ast__loc_t(
struct _fx_T2LN14K_form__kexp_tT2N14K_form__ktyp_tR10Ast__loc_t* dst)
{
_fx_free_LN14K_form__kexp_t(&dst->t0);
_fx_free_T2N14K_form__ktyp_tR10Ast__loc_t(&dst->t1);
}
static void _fx_copy_T2LN14K_form__kexp_tT2N14K_form__ktyp_tR10Ast__loc_t(
struct _fx_T2LN14K_form__kexp_tT2N14K_form__ktyp_tR10Ast__loc_t* src,
struct _fx_T2LN14K_form__kexp_tT2N14K_form__ktyp_tR10Ast__loc_t* dst)
{
FX_COPY_PTR(src->t0, &dst->t0);
_fx_copy_T2N14K_form__ktyp_tR10Ast__loc_t(&src->t1, &dst->t1);
}
static void _fx_make_T2LN14K_form__kexp_tT2N14K_form__ktyp_tR10Ast__loc_t(
struct _fx_LN14K_form__kexp_t_data_t* t0,
struct _fx_T2N14K_form__ktyp_tR10Ast__loc_t* t1,
struct _fx_T2LN14K_form__kexp_tT2N14K_form__ktyp_tR10Ast__loc_t* fx_result)
{
FX_COPY_PTR(t0, &fx_result->t0);
_fx_copy_T2N14K_form__ktyp_tR10Ast__loc_t(t1, &fx_result->t1);
}
static void _fx_free_T4N14K_form__kexp_tN14K_form__kexp_tN14K_form__kexp_tT2N14K_form__ktyp_tR10Ast__loc_t(
struct _fx_T4N14K_form__kexp_tN14K_form__kexp_tN14K_form__kexp_tT2N14K_form__ktyp_tR10Ast__loc_t* dst)
{
_fx_free_N14K_form__kexp_t(&dst->t0);
_fx_free_N14K_form__kexp_t(&dst->t1);
_fx_free_N14K_form__kexp_t(&dst->t2);
_fx_free_T2N14K_form__ktyp_tR10Ast__loc_t(&dst->t3);
}
static void _fx_copy_T4N14K_form__kexp_tN14K_form__kexp_tN14K_form__kexp_tT2N14K_form__ktyp_tR10Ast__loc_t(
struct _fx_T4N14K_form__kexp_tN14K_form__kexp_tN14K_form__kexp_tT2N14K_form__ktyp_tR10Ast__loc_t* src,
struct _fx_T4N14K_form__kexp_tN14K_form__kexp_tN14K_form__kexp_tT2N14K_form__ktyp_tR10Ast__loc_t* dst)
{
FX_COPY_PTR(src->t0, &dst->t0);
FX_COPY_PTR(src->t1, &dst->t1);
FX_COPY_PTR(src->t2, &dst->t2);
_fx_copy_T2N14K_form__ktyp_tR10Ast__loc_t(&src->t3, &dst->t3);
}
static void _fx_make_T4N14K_form__kexp_tN14K_form__kexp_tN14K_form__kexp_tT2N14K_form__ktyp_tR10Ast__loc_t(
struct _fx_N14K_form__kexp_t_data_t* t0,
struct _fx_N14K_form__kexp_t_data_t* t1,
struct _fx_N14K_form__kexp_t_data_t* t2,
struct _fx_T2N14K_form__ktyp_tR10Ast__loc_t* t3,
struct _fx_T4N14K_form__kexp_tN14K_form__kexp_tN14K_form__kexp_tT2N14K_form__ktyp_tR10Ast__loc_t* fx_result)
{
FX_COPY_PTR(t0, &fx_result->t0);
FX_COPY_PTR(t1, &fx_result->t1);
FX_COPY_PTR(t2, &fx_result->t2);
_fx_copy_T2N14K_form__ktyp_tR10Ast__loc_t(t3, &fx_result->t3);
}
static void _fx_free_T3R9Ast__id_tLN14K_form__atom_tT2N14K_form__ktyp_tR10Ast__loc_t(
struct _fx_T3R9Ast__id_tLN14K_form__atom_tT2N14K_form__ktyp_tR10Ast__loc_t* dst)
{
_fx_free_LN14K_form__atom_t(&dst->t1);
_fx_free_T2N14K_form__ktyp_tR10Ast__loc_t(&dst->t2);
}
static void _fx_copy_T3R9Ast__id_tLN14K_form__atom_tT2N14K_form__ktyp_tR10Ast__loc_t(
struct _fx_T3R9Ast__id_tLN14K_form__atom_tT2N14K_form__ktyp_tR10Ast__loc_t* src,
struct _fx_T3R9Ast__id_tLN14K_form__atom_tT2N14K_form__ktyp_tR10Ast__loc_t* dst)
{
dst->t0 = src->t0;
FX_COPY_PTR(src->t1, &dst->t1);
_fx_copy_T2N14K_form__ktyp_tR10Ast__loc_t(&src->t2, &dst->t2);
}
static void _fx_make_T3R9Ast__id_tLN14K_form__atom_tT2N14K_form__ktyp_tR10Ast__loc_t(
struct _fx_R9Ast__id_t* t0,
struct _fx_LN14K_form__atom_t_data_t* t1,
struct _fx_T2N14K_form__ktyp_tR10Ast__loc_t* t2,
struct _fx_T3R9Ast__id_tLN14K_form__atom_tT2N14K_form__ktyp_tR10Ast__loc_t* fx_result)
{
fx_result->t0 = *t0;
FX_COPY_PTR(t1, &fx_result->t1);
_fx_copy_T2N14K_form__ktyp_tR10Ast__loc_t(t2, &fx_result->t2);
}
static void _fx_free_T4R9Ast__id_tiLN14K_form__atom_tT2N14K_form__ktyp_tR10Ast__loc_t(
struct _fx_T4R9Ast__id_tiLN14K_form__atom_tT2N14K_form__ktyp_tR10Ast__loc_t* dst)
{
_fx_free_LN14K_form__atom_t(&dst->t2);
_fx_free_T2N14K_form__ktyp_tR10Ast__loc_t(&dst->t3);
}
static void _fx_copy_T4R9Ast__id_tiLN14K_form__atom_tT2N14K_form__ktyp_tR10Ast__loc_t(
struct _fx_T4R9Ast__id_tiLN14K_form__atom_tT2N14K_form__ktyp_tR10Ast__loc_t* src,
struct _fx_T4R9Ast__id_tiLN14K_form__atom_tT2N14K_form__ktyp_tR10Ast__loc_t* dst)
{
dst->t0 = src->t0;
dst->t1 = src->t1;
FX_COPY_PTR(src->t2, &dst->t2);
_fx_copy_T2N14K_form__ktyp_tR10Ast__loc_t(&src->t3, &dst->t3);
}
static void _fx_make_T4R9Ast__id_tiLN14K_form__atom_tT2N14K_form__ktyp_tR10Ast__loc_t(
struct _fx_R9Ast__id_t* t0,
int_ t1,
struct _fx_LN14K_form__atom_t_data_t* t2,
struct _fx_T2N14K_form__ktyp_tR10Ast__loc_t* t3,
struct _fx_T4R9Ast__id_tiLN14K_form__atom_tT2N14K_form__ktyp_tR10Ast__loc_t* fx_result)
{
fx_result->t0 = *t0;
fx_result->t1 = t1;
FX_COPY_PTR(t2, &fx_result->t2);
_fx_copy_T2N14K_form__ktyp_tR10Ast__loc_t(t3, &fx_result->t3);
}
static void _fx_free_T2LN14K_form__atom_tT2N14K_form__ktyp_tR10Ast__loc_t(
struct _fx_T2LN14K_form__atom_tT2N14K_form__ktyp_tR10Ast__loc_t* dst)
{
_fx_free_LN14K_form__atom_t(&dst->t0);
_fx_free_T2N14K_form__ktyp_tR10Ast__loc_t(&dst->t1);
}
static void _fx_copy_T2LN14K_form__atom_tT2N14K_form__ktyp_tR10Ast__loc_t(
struct _fx_T2LN14K_form__atom_tT2N14K_form__ktyp_tR10Ast__loc_t* src,
struct _fx_T2LN14K_form__atom_tT2N14K_form__ktyp_tR10Ast__loc_t* dst)
{
FX_COPY_PTR(src->t0, &dst->t0);
_fx_copy_T2N14K_form__ktyp_tR10Ast__loc_t(&src->t1, &dst->t1);
}
static void _fx_make_T2LN14K_form__atom_tT2N14K_form__ktyp_tR10Ast__loc_t(
struct _fx_LN14K_form__atom_t_data_t* t0,
struct _fx_T2N14K_form__ktyp_tR10Ast__loc_t* t1,
struct _fx_T2LN14K_form__atom_tT2N14K_form__ktyp_tR10Ast__loc_t* fx_result)
{
FX_COPY_PTR(t0, &fx_result->t0);
_fx_copy_T2N14K_form__ktyp_tR10Ast__loc_t(t1, &fx_result->t1);
}
static void _fx_free_T4R9Ast__id_tR9Ast__id_tLN14K_form__atom_tT2N14K_form__ktyp_tR10Ast__loc_t(
struct _fx_T4R9Ast__id_tR9Ast__id_tLN14K_form__atom_tT2N14K_form__ktyp_tR10Ast__loc_t* dst)
{
_fx_free_LN14K_form__atom_t(&dst->t2);
_fx_free_T2N14K_form__ktyp_tR10Ast__loc_t(&dst->t3);
}
static void _fx_copy_T4R9Ast__id_tR9Ast__id_tLN14K_form__atom_tT2N14K_form__ktyp_tR10Ast__loc_t(
struct _fx_T4R9Ast__id_tR9Ast__id_tLN14K_form__atom_tT2N14K_form__ktyp_tR10Ast__loc_t* src,
struct _fx_T4R9Ast__id_tR9Ast__id_tLN14K_form__atom_tT2N14K_form__ktyp_tR10Ast__loc_t* dst)
{
dst->t0 = src->t0;
dst->t1 = src->t1;
FX_COPY_PTR(src->t2, &dst->t2);
_fx_copy_T2N14K_form__ktyp_tR10Ast__loc_t(&src->t3, &dst->t3);
}
static void _fx_make_T4R9Ast__id_tR9Ast__id_tLN14K_form__atom_tT2N14K_form__ktyp_tR10Ast__loc_t(
struct _fx_R9Ast__id_t* t0,
struct _fx_R9Ast__id_t* t1,
struct _fx_LN14K_form__atom_t_data_t* t2,
struct _fx_T2N14K_form__ktyp_tR10Ast__loc_t* t3,
struct _fx_T4R9Ast__id_tR9Ast__id_tLN14K_form__atom_tT2N14K_form__ktyp_tR10Ast__loc_t* fx_result)
{
fx_result->t0 = *t0;
fx_result->t1 = *t1;
FX_COPY_PTR(t2, &fx_result->t2);
_fx_copy_T2N14K_form__ktyp_tR10Ast__loc_t(t3, &fx_result->t3);
}
static void _fx_free_T2BN14K_form__atom_t(struct _fx_T2BN14K_form__atom_t* dst)
{
_fx_free_N14K_form__atom_t(&dst->t1);
}
static void _fx_copy_T2BN14K_form__atom_t(struct _fx_T2BN14K_form__atom_t* src, struct _fx_T2BN14K_form__atom_t* dst)
{
dst->t0 = src->t0;
_fx_copy_N14K_form__atom_t(&src->t1, &dst->t1);
}
static void _fx_make_T2BN14K_form__atom_t(bool t0, struct _fx_N14K_form__atom_t* t1, struct _fx_T2BN14K_form__atom_t* fx_result)
{
fx_result->t0 = t0;
_fx_copy_N14K_form__atom_t(t1, &fx_result->t1);
}
static void _fx_free_LT2BN14K_form__atom_t(struct _fx_LT2BN14K_form__atom_t_data_t** dst)
{
FX_FREE_LIST_IMPL(_fx_LT2BN14K_form__atom_t, _fx_free_T2BN14K_form__atom_t);
}
static int _fx_cons_LT2BN14K_form__atom_t(
struct _fx_T2BN14K_form__atom_t* hd,
struct _fx_LT2BN14K_form__atom_t_data_t* tl,
bool addref_tl,
struct _fx_LT2BN14K_form__atom_t_data_t** fx_result)
{
FX_MAKE_LIST_IMPL(_fx_LT2BN14K_form__atom_t, _fx_copy_T2BN14K_form__atom_t);
}
static void _fx_free_LLT2BN14K_form__atom_t(struct _fx_LLT2BN14K_form__atom_t_data_t** dst)
{
FX_FREE_LIST_IMPL(_fx_LLT2BN14K_form__atom_t, _fx_free_LT2BN14K_form__atom_t);
}
static int _fx_cons_LLT2BN14K_form__atom_t(
struct _fx_LT2BN14K_form__atom_t_data_t* hd,
struct _fx_LLT2BN14K_form__atom_t_data_t* tl,
bool addref_tl,
struct _fx_LLT2BN14K_form__atom_t_data_t** fx_result)
{
FX_MAKE_LIST_IMPL(_fx_LLT2BN14K_form__atom_t, FX_COPY_PTR);
}
static void _fx_free_T3BLLT2BN14K_form__atom_tT2N14K_form__ktyp_tR10Ast__loc_t(
struct _fx_T3BLLT2BN14K_form__atom_tT2N14K_form__ktyp_tR10Ast__loc_t* dst)
{
_fx_free_LLT2BN14K_form__atom_t(&dst->t1);
_fx_free_T2N14K_form__ktyp_tR10Ast__loc_t(&dst->t2);
}
static void _fx_copy_T3BLLT2BN14K_form__atom_tT2N14K_form__ktyp_tR10Ast__loc_t(
struct _fx_T3BLLT2BN14K_form__atom_tT2N14K_form__ktyp_tR10Ast__loc_t* src,
struct _fx_T3BLLT2BN14K_form__atom_tT2N14K_form__ktyp_tR10Ast__loc_t* dst)
{
dst->t0 = src->t0;
FX_COPY_PTR(src->t1, &dst->t1);
_fx_copy_T2N14K_form__ktyp_tR10Ast__loc_t(&src->t2, &dst->t2);
}
static void _fx_make_T3BLLT2BN14K_form__atom_tT2N14K_form__ktyp_tR10Ast__loc_t(
bool t0,
struct _fx_LLT2BN14K_form__atom_t_data_t* t1,
struct _fx_T2N14K_form__ktyp_tR10Ast__loc_t* t2,
struct _fx_T3BLLT2BN14K_form__atom_tT2N14K_form__ktyp_tR10Ast__loc_t* fx_result)
{
fx_result->t0 = t0;
FX_COPY_PTR(t1, &fx_result->t1);
_fx_copy_T2N14K_form__ktyp_tR10Ast__loc_t(t2, &fx_result->t2);
}
static void _fx_free_T2LT2BN14K_form__atom_tT2N14K_form__ktyp_tR10Ast__loc_t(
struct _fx_T2LT2BN14K_form__atom_tT2N14K_form__ktyp_tR10Ast__loc_t* dst)
{
_fx_free_LT2BN14K_form__atom_t(&dst->t0);
_fx_free_T2N14K_form__ktyp_tR10Ast__loc_t(&dst->t1);
}
static void _fx_copy_T2LT2BN14K_form__atom_tT2N14K_form__ktyp_tR10Ast__loc_t(
struct _fx_T2LT2BN14K_form__atom_tT2N14K_form__ktyp_tR10Ast__loc_t* src,
struct _fx_T2LT2BN14K_form__atom_tT2N14K_form__ktyp_tR10Ast__loc_t* dst)
{
FX_COPY_PTR(src->t0, &dst->t0);
_fx_copy_T2N14K_form__ktyp_tR10Ast__loc_t(&src->t1, &dst->t1);
}
static void _fx_make_T2LT2BN14K_form__atom_tT2N14K_form__ktyp_tR10Ast__loc_t(
struct _fx_LT2BN14K_form__atom_t_data_t* t0,
struct _fx_T2N14K_form__ktyp_tR10Ast__loc_t* t1,
struct _fx_T2LT2BN14K_form__atom_tT2N14K_form__ktyp_tR10Ast__loc_t* fx_result)
{
FX_COPY_PTR(t0, &fx_result->t0);
_fx_copy_T2N14K_form__ktyp_tR10Ast__loc_t(t1, &fx_result->t1);
}
static void _fx_free_LN13K_form__dom_t(struct _fx_LN13K_form__dom_t_data_t** dst)
{
FX_FREE_LIST_IMPL(_fx_LN13K_form__dom_t, _fx_free_N13K_form__dom_t);
}
static int _fx_cons_LN13K_form__dom_t(
struct _fx_N13K_form__dom_t* hd,
struct _fx_LN13K_form__dom_t_data_t* tl,
bool addref_tl,
struct _fx_LN13K_form__dom_t_data_t** fx_result)
{
FX_MAKE_LIST_IMPL(_fx_LN13K_form__dom_t, _fx_copy_N13K_form__dom_t);
}
static void _fx_free_T5N14K_form__atom_tN13Ast__border_tN18Ast__interpolate_tLN13K_form__dom_tT2N14K_form__ktyp_tR10Ast__loc_t(
struct _fx_T5N14K_form__atom_tN13Ast__border_tN18Ast__interpolate_tLN13K_form__dom_tT2N14K_form__ktyp_tR10Ast__loc_t* dst)
{
_fx_free_N14K_form__atom_t(&dst->t0);
_fx_free_LN13K_form__dom_t(&dst->t3);
_fx_free_T2N14K_form__ktyp_tR10Ast__loc_t(&dst->t4);
}
static void _fx_copy_T5N14K_form__atom_tN13Ast__border_tN18Ast__interpolate_tLN13K_form__dom_tT2N14K_form__ktyp_tR10Ast__loc_t(
struct _fx_T5N14K_form__atom_tN13Ast__border_tN18Ast__interpolate_tLN13K_form__dom_tT2N14K_form__ktyp_tR10Ast__loc_t* src,
struct _fx_T5N14K_form__atom_tN13Ast__border_tN18Ast__interpolate_tLN13K_form__dom_tT2N14K_form__ktyp_tR10Ast__loc_t* dst)
{
_fx_copy_N14K_form__atom_t(&src->t0, &dst->t0);
dst->t1 = src->t1;
dst->t2 = src->t2;
FX_COPY_PTR(src->t3, &dst->t3);
_fx_copy_T2N14K_form__ktyp_tR10Ast__loc_t(&src->t4, &dst->t4);
}
static void _fx_make_T5N14K_form__atom_tN13Ast__border_tN18Ast__interpolate_tLN13K_form__dom_tT2N14K_form__ktyp_tR10Ast__loc_t(
struct _fx_N14K_form__atom_t* t0,
struct _fx_N13Ast__border_t* t1,
struct _fx_N18Ast__interpolate_t* t2,
struct _fx_LN13K_form__dom_t_data_t* t3,
struct _fx_T2N14K_form__ktyp_tR10Ast__loc_t* t4,
struct _fx_T5N14K_form__atom_tN13Ast__border_tN18Ast__interpolate_tLN13K_form__dom_tT2N14K_form__ktyp_tR10Ast__loc_t*
fx_result)
{
_fx_copy_N14K_form__atom_t(t0, &fx_result->t0);
fx_result->t1 = *t1;
fx_result->t2 = *t2;
FX_COPY_PTR(t3, &fx_result->t3);
_fx_copy_T2N14K_form__ktyp_tR10Ast__loc_t(t4, &fx_result->t4);
}
static void _fx_free_T3R9Ast__id_tiT2N14K_form__ktyp_tR10Ast__loc_t(
struct _fx_T3R9Ast__id_tiT2N14K_form__ktyp_tR10Ast__loc_t* dst)
{
_fx_free_T2N14K_form__ktyp_tR10Ast__loc_t(&dst->t2);
}
static void _fx_copy_T3R9Ast__id_tiT2N14K_form__ktyp_tR10Ast__loc_t(
struct _fx_T3R9Ast__id_tiT2N14K_form__ktyp_tR10Ast__loc_t* src,
struct _fx_T3R9Ast__id_tiT2N14K_form__ktyp_tR10Ast__loc_t* dst)
{
dst->t0 = src->t0;
dst->t1 = src->t1;
_fx_copy_T2N14K_form__ktyp_tR10Ast__loc_t(&src->t2, &dst->t2);
}
static void _fx_make_T3R9Ast__id_tiT2N14K_form__ktyp_tR10Ast__loc_t(
struct _fx_R9Ast__id_t* t0,
int_ t1,
struct _fx_T2N14K_form__ktyp_tR10Ast__loc_t* t2,
struct _fx_T3R9Ast__id_tiT2N14K_form__ktyp_tR10Ast__loc_t* fx_result)
{
fx_result->t0 = *t0;
fx_result->t1 = t1;
_fx_copy_T2N14K_form__ktyp_tR10Ast__loc_t(t2, &fx_result->t2);
}
static void _fx_free_T3R9Ast__id_tN14K_form__atom_tR10Ast__loc_t(struct _fx_T3R9Ast__id_tN14K_form__atom_tR10Ast__loc_t* dst)
{
_fx_free_N14K_form__atom_t(&dst->t1);
}
static void _fx_copy_T3R9Ast__id_tN14K_form__atom_tR10Ast__loc_t(
struct _fx_T3R9Ast__id_tN14K_form__atom_tR10Ast__loc_t* src,
struct _fx_T3R9Ast__id_tN14K_form__atom_tR10Ast__loc_t* dst)
{
dst->t0 = src->t0;
_fx_copy_N14K_form__atom_t(&src->t1, &dst->t1);
dst->t2 = src->t2;
}
static void _fx_make_T3R9Ast__id_tN14K_form__atom_tR10Ast__loc_t(
struct _fx_R9Ast__id_t* t0,
struct _fx_N14K_form__atom_t* t1,
struct _fx_R10Ast__loc_t* t2,
struct _fx_T3R9Ast__id_tN14K_form__atom_tR10Ast__loc_t* fx_result)
{
fx_result->t0 = *t0;
_fx_copy_N14K_form__atom_t(t1, &fx_result->t1);
fx_result->t2 = *t2;
}
static void _fx_free_T2LN14K_form__kexp_tN14K_form__kexp_t(struct _fx_T2LN14K_form__kexp_tN14K_form__kexp_t* dst)
{
_fx_free_LN14K_form__kexp_t(&dst->t0);
_fx_free_N14K_form__kexp_t(&dst->t1);
}
static void _fx_copy_T2LN14K_form__kexp_tN14K_form__kexp_t(
struct _fx_T2LN14K_form__kexp_tN14K_form__kexp_t* src,
struct _fx_T2LN14K_form__kexp_tN14K_form__kexp_t* dst)
{
FX_COPY_PTR(src->t0, &dst->t0);
FX_COPY_PTR(src->t1, &dst->t1);
}
static void _fx_make_T2LN14K_form__kexp_tN14K_form__kexp_t(
struct _fx_LN14K_form__kexp_t_data_t* t0,
struct _fx_N14K_form__kexp_t_data_t* t1,
struct _fx_T2LN14K_form__kexp_tN14K_form__kexp_t* fx_result)
{
FX_COPY_PTR(t0, &fx_result->t0);
FX_COPY_PTR(t1, &fx_result->t1);
}
static void _fx_free_LT2LN14K_form__kexp_tN14K_form__kexp_t(struct _fx_LT2LN14K_form__kexp_tN14K_form__kexp_t_data_t** dst)
{
FX_FREE_LIST_IMPL(_fx_LT2LN14K_form__kexp_tN14K_form__kexp_t, _fx_free_T2LN14K_form__kexp_tN14K_form__kexp_t);
}
static int _fx_cons_LT2LN14K_form__kexp_tN14K_form__kexp_t(
struct _fx_T2LN14K_form__kexp_tN14K_form__kexp_t* hd,
struct _fx_LT2LN14K_form__kexp_tN14K_form__kexp_t_data_t* tl,
bool addref_tl,
struct _fx_LT2LN14K_form__kexp_tN14K_form__kexp_t_data_t** fx_result)
{
FX_MAKE_LIST_IMPL(_fx_LT2LN14K_form__kexp_tN14K_form__kexp_t, _fx_copy_T2LN14K_form__kexp_tN14K_form__kexp_t);
}
static void _fx_free_T2LT2LN14K_form__kexp_tN14K_form__kexp_tT2N14K_form__ktyp_tR10Ast__loc_t(
struct _fx_T2LT2LN14K_form__kexp_tN14K_form__kexp_tT2N14K_form__ktyp_tR10Ast__loc_t* dst)
{
_fx_free_LT2LN14K_form__kexp_tN14K_form__kexp_t(&dst->t0);
_fx_free_T2N14K_form__ktyp_tR10Ast__loc_t(&dst->t1);
}
static void _fx_copy_T2LT2LN14K_form__kexp_tN14K_form__kexp_tT2N14K_form__ktyp_tR10Ast__loc_t(
struct _fx_T2LT2LN14K_form__kexp_tN14K_form__kexp_tT2N14K_form__ktyp_tR10Ast__loc_t* src,
struct _fx_T2LT2LN14K_form__kexp_tN14K_form__kexp_tT2N14K_form__ktyp_tR10Ast__loc_t* dst)
{
FX_COPY_PTR(src->t0, &dst->t0);
_fx_copy_T2N14K_form__ktyp_tR10Ast__loc_t(&src->t1, &dst->t1);
}
static void _fx_make_T2LT2LN14K_form__kexp_tN14K_form__kexp_tT2N14K_form__ktyp_tR10Ast__loc_t(
struct _fx_LT2LN14K_form__kexp_tN14K_form__kexp_t_data_t* t0,
struct _fx_T2N14K_form__ktyp_tR10Ast__loc_t* t1,
struct _fx_T2LT2LN14K_form__kexp_tN14K_form__kexp_tT2N14K_form__ktyp_tR10Ast__loc_t* fx_result)
{
FX_COPY_PTR(t0, &fx_result->t0);
_fx_copy_T2N14K_form__ktyp_tR10Ast__loc_t(t1, &fx_result->t1);
}
static void _fx_free_T3N14K_form__kexp_tN14K_form__kexp_tT2N14K_form__ktyp_tR10Ast__loc_t(
struct _fx_T3N14K_form__kexp_tN14K_form__kexp_tT2N14K_form__ktyp_tR10Ast__loc_t* dst)
{
_fx_free_N14K_form__kexp_t(&dst->t0);
_fx_free_N14K_form__kexp_t(&dst->t1);
_fx_free_T2N14K_form__ktyp_tR10Ast__loc_t(&dst->t2);
}
static void _fx_copy_T3N14K_form__kexp_tN14K_form__kexp_tT2N14K_form__ktyp_tR10Ast__loc_t(
struct _fx_T3N14K_form__kexp_tN14K_form__kexp_tT2N14K_form__ktyp_tR10Ast__loc_t* src,
struct _fx_T3N14K_form__kexp_tN14K_form__kexp_tT2N14K_form__ktyp_tR10Ast__loc_t* dst)
{
FX_COPY_PTR(src->t0, &dst->t0);
FX_COPY_PTR(src->t1, &dst->t1);
_fx_copy_T2N14K_form__ktyp_tR10Ast__loc_t(&src->t2, &dst->t2);
}
static void _fx_make_T3N14K_form__kexp_tN14K_form__kexp_tT2N14K_form__ktyp_tR10Ast__loc_t(
struct _fx_N14K_form__kexp_t_data_t* t0,
struct _fx_N14K_form__kexp_t_data_t* t1,
struct _fx_T2N14K_form__ktyp_tR10Ast__loc_t* t2,
struct _fx_T3N14K_form__kexp_tN14K_form__kexp_tT2N14K_form__ktyp_tR10Ast__loc_t* fx_result)
{
FX_COPY_PTR(t0, &fx_result->t0);
FX_COPY_PTR(t1, &fx_result->t1);
_fx_copy_T2N14K_form__ktyp_tR10Ast__loc_t(t2, &fx_result->t2);
}
static void _fx_free_T3N14K_form__atom_tN14K_form__ktyp_tR10Ast__loc_t(
struct _fx_T3N14K_form__atom_tN14K_form__ktyp_tR10Ast__loc_t* dst)
{
_fx_free_N14K_form__atom_t(&dst->t0);
_fx_free_N14K_form__ktyp_t(&dst->t1);
}
static void _fx_copy_T3N14K_form__atom_tN14K_form__ktyp_tR10Ast__loc_t(
struct _fx_T3N14K_form__atom_tN14K_form__ktyp_tR10Ast__loc_t* src,
struct _fx_T3N14K_form__atom_tN14K_form__ktyp_tR10Ast__loc_t* dst)
{
_fx_copy_N14K_form__atom_t(&src->t0, &dst->t0);
FX_COPY_PTR(src->t1, &dst->t1);
dst->t2 = src->t2;
}
static void _fx_make_T3N14K_form__atom_tN14K_form__ktyp_tR10Ast__loc_t(
struct _fx_N14K_form__atom_t* t0,
struct _fx_N14K_form__ktyp_t_data_t* t1,
struct _fx_R10Ast__loc_t* t2,
struct _fx_T3N14K_form__atom_tN14K_form__ktyp_tR10Ast__loc_t* fx_result)
{
_fx_copy_N14K_form__atom_t(t0, &fx_result->t0);
FX_COPY_PTR(t1, &fx_result->t1);
fx_result->t2 = *t2;
}
static void _fx_free_T2R9Ast__id_tN13K_form__dom_t(struct _fx_T2R9Ast__id_tN13K_form__dom_t* dst)
{
_fx_free_N13K_form__dom_t(&dst->t1);
}
static void _fx_copy_T2R9Ast__id_tN13K_form__dom_t(
struct _fx_T2R9Ast__id_tN13K_form__dom_t* src,
struct _fx_T2R9Ast__id_tN13K_form__dom_t* dst)
{
dst->t0 = src->t0;
_fx_copy_N13K_form__dom_t(&src->t1, &dst->t1);
}
static void _fx_make_T2R9Ast__id_tN13K_form__dom_t(
struct _fx_R9Ast__id_t* t0,
struct _fx_N13K_form__dom_t* t1,
struct _fx_T2R9Ast__id_tN13K_form__dom_t* fx_result)
{
fx_result->t0 = *t0;
_fx_copy_N13K_form__dom_t(t1, &fx_result->t1);
}
static void _fx_free_LT2R9Ast__id_tN13K_form__dom_t(struct _fx_LT2R9Ast__id_tN13K_form__dom_t_data_t** dst)
{
FX_FREE_LIST_IMPL(_fx_LT2R9Ast__id_tN13K_form__dom_t, _fx_free_T2R9Ast__id_tN13K_form__dom_t);
}
static int _fx_cons_LT2R9Ast__id_tN13K_form__dom_t(
struct _fx_T2R9Ast__id_tN13K_form__dom_t* hd,
struct _fx_LT2R9Ast__id_tN13K_form__dom_t_data_t* tl,
bool addref_tl,
struct _fx_LT2R9Ast__id_tN13K_form__dom_t_data_t** fx_result)
{
FX_MAKE_LIST_IMPL(_fx_LT2R9Ast__id_tN13K_form__dom_t, _fx_copy_T2R9Ast__id_tN13K_form__dom_t);
}
static void _fx_free_T3N14K_form__kexp_tLT2R9Ast__id_tN13K_form__dom_tLR9Ast__id_t(
struct _fx_T3N14K_form__kexp_tLT2R9Ast__id_tN13K_form__dom_tLR9Ast__id_t* dst)
{
_fx_free_N14K_form__kexp_t(&dst->t0);
_fx_free_LT2R9Ast__id_tN13K_form__dom_t(&dst->t1);
fx_free_list_simple(&dst->t2);
}
static void _fx_copy_T3N14K_form__kexp_tLT2R9Ast__id_tN13K_form__dom_tLR9Ast__id_t(
struct _fx_T3N14K_form__kexp_tLT2R9Ast__id_tN13K_form__dom_tLR9Ast__id_t* src,
struct _fx_T3N14K_form__kexp_tLT2R9Ast__id_tN13K_form__dom_tLR9Ast__id_t* dst)
{
FX_COPY_PTR(src->t0, &dst->t0);
FX_COPY_PTR(src->t1, &dst->t1);
FX_COPY_PTR(src->t2, &dst->t2);
}
static void _fx_make_T3N14K_form__kexp_tLT2R9Ast__id_tN13K_form__dom_tLR9Ast__id_t(
struct _fx_N14K_form__kexp_t_data_t* t0,
struct _fx_LT2R9Ast__id_tN13K_form__dom_t_data_t* t1,
struct _fx_LR9Ast__id_t_data_t* t2,
struct _fx_T3N14K_form__kexp_tLT2R9Ast__id_tN13K_form__dom_tLR9Ast__id_t* fx_result)
{
FX_COPY_PTR(t0, &fx_result->t0);
FX_COPY_PTR(t1, &fx_result->t1);
FX_COPY_PTR(t2, &fx_result->t2);
}
static void _fx_free_LT3N14K_form__kexp_tLT2R9Ast__id_tN13K_form__dom_tLR9Ast__id_t(
struct _fx_LT3N14K_form__kexp_tLT2R9Ast__id_tN13K_form__dom_tLR9Ast__id_t_data_t** dst)
{
FX_FREE_LIST_IMPL(_fx_LT3N14K_form__kexp_tLT2R9Ast__id_tN13K_form__dom_tLR9Ast__id_t,
_fx_free_T3N14K_form__kexp_tLT2R9Ast__id_tN13K_form__dom_tLR9Ast__id_t);
}
static int _fx_cons_LT3N14K_form__kexp_tLT2R9Ast__id_tN13K_form__dom_tLR9Ast__id_t(
struct _fx_T3N14K_form__kexp_tLT2R9Ast__id_tN13K_form__dom_tLR9Ast__id_t* hd,
struct _fx_LT3N14K_form__kexp_tLT2R9Ast__id_tN13K_form__dom_tLR9Ast__id_t_data_t* tl,
bool addref_tl,
struct _fx_LT3N14K_form__kexp_tLT2R9Ast__id_tN13K_form__dom_tLR9Ast__id_t_data_t** fx_result)
{
FX_MAKE_LIST_IMPL(_fx_LT3N14K_form__kexp_tLT2R9Ast__id_tN13K_form__dom_tLR9Ast__id_t,
_fx_copy_T3N14K_form__kexp_tLT2R9Ast__id_tN13K_form__dom_tLR9Ast__id_t);
}
static void
_fx_free_T4LT3N14K_form__kexp_tLT2R9Ast__id_tN13K_form__dom_tLR9Ast__id_tN14K_form__kexp_tR16Ast__for_flags_tT2N14K_form__ktyp_tR10Ast__loc_t(
struct _fx_T4LT3N14K_form__kexp_tLT2R9Ast__id_tN13K_form__dom_tLR9Ast__id_tN14K_form__kexp_tR16Ast__for_flags_tT2N14K_form__ktyp_tR10Ast__loc_t*
dst)
{
_fx_free_LT3N14K_form__kexp_tLT2R9Ast__id_tN13K_form__dom_tLR9Ast__id_t(&dst->t0);
_fx_free_N14K_form__kexp_t(&dst->t1);
_fx_free_T2N14K_form__ktyp_tR10Ast__loc_t(&dst->t3);
}
static void
_fx_copy_T4LT3N14K_form__kexp_tLT2R9Ast__id_tN13K_form__dom_tLR9Ast__id_tN14K_form__kexp_tR16Ast__for_flags_tT2N14K_form__ktyp_tR10Ast__loc_t(
struct _fx_T4LT3N14K_form__kexp_tLT2R9Ast__id_tN13K_form__dom_tLR9Ast__id_tN14K_form__kexp_tR16Ast__for_flags_tT2N14K_form__ktyp_tR10Ast__loc_t*
src,
struct _fx_T4LT3N14K_form__kexp_tLT2R9Ast__id_tN13K_form__dom_tLR9Ast__id_tN14K_form__kexp_tR16Ast__for_flags_tT2N14K_form__ktyp_tR10Ast__loc_t*
dst)
{
FX_COPY_PTR(src->t0, &dst->t0);
FX_COPY_PTR(src->t1, &dst->t1);
dst->t2 = src->t2;
_fx_copy_T2N14K_form__ktyp_tR10Ast__loc_t(&src->t3, &dst->t3);
}
static void
_fx_make_T4LT3N14K_form__kexp_tLT2R9Ast__id_tN13K_form__dom_tLR9Ast__id_tN14K_form__kexp_tR16Ast__for_flags_tT2N14K_form__ktyp_tR10Ast__loc_t(
struct _fx_LT3N14K_form__kexp_tLT2R9Ast__id_tN13K_form__dom_tLR9Ast__id_t_data_t* t0,
struct _fx_N14K_form__kexp_t_data_t* t1,
struct _fx_R16Ast__for_flags_t* t2,
struct _fx_T2N14K_form__ktyp_tR10Ast__loc_t* t3,
struct _fx_T4LT3N14K_form__kexp_tLT2R9Ast__id_tN13K_form__dom_tLR9Ast__id_tN14K_form__kexp_tR16Ast__for_flags_tT2N14K_form__ktyp_tR10Ast__loc_t*
fx_result)
{
FX_COPY_PTR(t0, &fx_result->t0);
FX_COPY_PTR(t1, &fx_result->t1);
fx_result->t2 = *t2;
_fx_copy_T2N14K_form__ktyp_tR10Ast__loc_t(t3, &fx_result->t3);
}
static void _fx_free_T5LT2R9Ast__id_tN13K_form__dom_tLR9Ast__id_tN14K_form__kexp_tR16Ast__for_flags_tR10Ast__loc_t(
struct _fx_T5LT2R9Ast__id_tN13K_form__dom_tLR9Ast__id_tN14K_form__kexp_tR16Ast__for_flags_tR10Ast__loc_t* dst)
{
_fx_free_LT2R9Ast__id_tN13K_form__dom_t(&dst->t0);
fx_free_list_simple(&dst->t1);
_fx_free_N14K_form__kexp_t(&dst->t2);
}
static void _fx_copy_T5LT2R9Ast__id_tN13K_form__dom_tLR9Ast__id_tN14K_form__kexp_tR16Ast__for_flags_tR10Ast__loc_t(
struct _fx_T5LT2R9Ast__id_tN13K_form__dom_tLR9Ast__id_tN14K_form__kexp_tR16Ast__for_flags_tR10Ast__loc_t* src,
struct _fx_T5LT2R9Ast__id_tN13K_form__dom_tLR9Ast__id_tN14K_form__kexp_tR16Ast__for_flags_tR10Ast__loc_t* dst)
{
FX_COPY_PTR(src->t0, &dst->t0);
FX_COPY_PTR(src->t1, &dst->t1);
FX_COPY_PTR(src->t2, &dst->t2);
dst->t3 = src->t3;
dst->t4 = src->t4;
}
static void _fx_make_T5LT2R9Ast__id_tN13K_form__dom_tLR9Ast__id_tN14K_form__kexp_tR16Ast__for_flags_tR10Ast__loc_t(
struct _fx_LT2R9Ast__id_tN13K_form__dom_t_data_t* t0,
struct _fx_LR9Ast__id_t_data_t* t1,
struct _fx_N14K_form__kexp_t_data_t* t2,
struct _fx_R16Ast__for_flags_t* t3,
struct _fx_R10Ast__loc_t* t4,
struct _fx_T5LT2R9Ast__id_tN13K_form__dom_tLR9Ast__id_tN14K_form__kexp_tR16Ast__for_flags_tR10Ast__loc_t* fx_result)
{
FX_COPY_PTR(t0, &fx_result->t0);
FX_COPY_PTR(t1, &fx_result->t1);
FX_COPY_PTR(t2, &fx_result->t2);
fx_result->t3 = *t3;
fx_result->t4 = *t4;
}
static void _fx_free_T3N14K_form__kexp_tN14K_form__kexp_tR10Ast__loc_t(
struct _fx_T3N14K_form__kexp_tN14K_form__kexp_tR10Ast__loc_t* dst)
{
_fx_free_N14K_form__kexp_t(&dst->t0);
_fx_free_N14K_form__kexp_t(&dst->t1);
}
static void _fx_copy_T3N14K_form__kexp_tN14K_form__kexp_tR10Ast__loc_t(
struct _fx_T3N14K_form__kexp_tN14K_form__kexp_tR10Ast__loc_t* src,
struct _fx_T3N14K_form__kexp_tN14K_form__kexp_tR10Ast__loc_t* dst)
{
FX_COPY_PTR(src->t0, &dst->t0);
FX_COPY_PTR(src->t1, &dst->t1);
dst->t2 = src->t2;
}
static void _fx_make_T3N14K_form__kexp_tN14K_form__kexp_tR10Ast__loc_t(
struct _fx_N14K_form__kexp_t_data_t* t0,
struct _fx_N14K_form__kexp_t_data_t* t1,
struct _fx_R10Ast__loc_t* t2,
struct _fx_T3N14K_form__kexp_tN14K_form__kexp_tR10Ast__loc_t* fx_result)
{
FX_COPY_PTR(t0, &fx_result->t0);
FX_COPY_PTR(t1, &fx_result->t1);
fx_result->t2 = *t2;
}
static void _fx_free_T2ST2N14K_form__ktyp_tR10Ast__loc_t(struct _fx_T2ST2N14K_form__ktyp_tR10Ast__loc_t* dst)
{
fx_free_str(&dst->t0);
_fx_free_T2N14K_form__ktyp_tR10Ast__loc_t(&dst->t1);
}
static void _fx_copy_T2ST2N14K_form__ktyp_tR10Ast__loc_t(
struct _fx_T2ST2N14K_form__ktyp_tR10Ast__loc_t* src,
struct _fx_T2ST2N14K_form__ktyp_tR10Ast__loc_t* dst)
{
fx_copy_str(&src->t0, &dst->t0);
_fx_copy_T2N14K_form__ktyp_tR10Ast__loc_t(&src->t1, &dst->t1);
}
static void _fx_make_T2ST2N14K_form__ktyp_tR10Ast__loc_t(
fx_str_t* t0,
struct _fx_T2N14K_form__ktyp_tR10Ast__loc_t* t1,
struct _fx_T2ST2N14K_form__ktyp_tR10Ast__loc_t* fx_result)
{
fx_copy_str(t0, &fx_result->t0);
_fx_copy_T2N14K_form__ktyp_tR10Ast__loc_t(t1, &fx_result->t1);
}
static void _fx_free_T3R9Ast__id_tN14K_form__kexp_tR10Ast__loc_t(struct _fx_T3R9Ast__id_tN14K_form__kexp_tR10Ast__loc_t* dst)
{
_fx_free_N14K_form__kexp_t(&dst->t1);
}
static void _fx_copy_T3R9Ast__id_tN14K_form__kexp_tR10Ast__loc_t(
struct _fx_T3R9Ast__id_tN14K_form__kexp_tR10Ast__loc_t* src,
struct _fx_T3R9Ast__id_tN14K_form__kexp_tR10Ast__loc_t* dst)
{
dst->t0 = src->t0;
FX_COPY_PTR(src->t1, &dst->t1);
dst->t2 = src->t2;
}
static void _fx_make_T3R9Ast__id_tN14K_form__kexp_tR10Ast__loc_t(
struct _fx_R9Ast__id_t* t0,
struct _fx_N14K_form__kexp_t_data_t* t1,
struct _fx_R10Ast__loc_t* t2,
struct _fx_T3R9Ast__id_tN14K_form__kexp_tR10Ast__loc_t* fx_result)
{
fx_result->t0 = *t0;
FX_COPY_PTR(t1, &fx_result->t1);
fx_result->t2 = *t2;
}
static void _fx_free_R17K_form__kdeffun_t(struct _fx_R17K_form__kdeffun_t* dst)
{
fx_free_str(&dst->kf_cname);
fx_free_list_simple(&dst->kf_params);
_fx_free_N14K_form__ktyp_t(&dst->kf_rt);
_fx_free_N14K_form__kexp_t(&dst->kf_body);
fx_free_list_simple(&dst->kf_scope);
}
static void _fx_copy_R17K_form__kdeffun_t(struct _fx_R17K_form__kdeffun_t* src, struct _fx_R17K_form__kdeffun_t* dst)
{
dst->kf_name = src->kf_name;
fx_copy_str(&src->kf_cname, &dst->kf_cname);
FX_COPY_PTR(src->kf_params, &dst->kf_params);
FX_COPY_PTR(src->kf_rt, &dst->kf_rt);
FX_COPY_PTR(src->kf_body, &dst->kf_body);
dst->kf_flags = src->kf_flags;
dst->kf_closure = src->kf_closure;
FX_COPY_PTR(src->kf_scope, &dst->kf_scope);
dst->kf_loc = src->kf_loc;
}
static void _fx_make_R17K_form__kdeffun_t(
struct _fx_R9Ast__id_t* r_kf_name,
fx_str_t* r_kf_cname,
struct _fx_LR9Ast__id_t_data_t* r_kf_params,
struct _fx_N14K_form__ktyp_t_data_t* r_kf_rt,
struct _fx_N14K_form__kexp_t_data_t* r_kf_body,
struct _fx_R16Ast__fun_flags_t* r_kf_flags,
struct _fx_R25K_form__kdefclosureinfo_t* r_kf_closure,
struct _fx_LN12Ast__scope_t_data_t* r_kf_scope,
struct _fx_R10Ast__loc_t* r_kf_loc,
struct _fx_R17K_form__kdeffun_t* fx_result)
{
fx_result->kf_name = *r_kf_name;
fx_copy_str(r_kf_cname, &fx_result->kf_cname);
FX_COPY_PTR(r_kf_params, &fx_result->kf_params);
FX_COPY_PTR(r_kf_rt, &fx_result->kf_rt);
FX_COPY_PTR(r_kf_body, &fx_result->kf_body);
fx_result->kf_flags = *r_kf_flags;
fx_result->kf_closure = *r_kf_closure;
FX_COPY_PTR(r_kf_scope, &fx_result->kf_scope);
fx_result->kf_loc = *r_kf_loc;
}
static void _fx_free_rR17K_form__kdeffun_t(struct _fx_rR17K_form__kdeffun_t_data_t** dst)
{
FX_FREE_REF_IMPL(_fx_rR17K_form__kdeffun_t, _fx_free_R17K_form__kdeffun_t);
}
static int _fx_make_rR17K_form__kdeffun_t(
struct _fx_R17K_form__kdeffun_t* arg,
struct _fx_rR17K_form__kdeffun_t_data_t** fx_result)
{
FX_MAKE_REF_IMPL(_fx_rR17K_form__kdeffun_t, _fx_copy_R17K_form__kdeffun_t);
}
static void _fx_free_R17K_form__kdefexn_t(struct _fx_R17K_form__kdefexn_t* dst)
{
fx_free_str(&dst->ke_cname);
fx_free_str(&dst->ke_base_cname);
_fx_free_N14K_form__ktyp_t(&dst->ke_typ);
fx_free_list_simple(&dst->ke_scope);
}
static void _fx_copy_R17K_form__kdefexn_t(struct _fx_R17K_form__kdefexn_t* src, struct _fx_R17K_form__kdefexn_t* dst)
{
dst->ke_name = src->ke_name;
fx_copy_str(&src->ke_cname, &dst->ke_cname);
fx_copy_str(&src->ke_base_cname, &dst->ke_base_cname);
FX_COPY_PTR(src->ke_typ, &dst->ke_typ);
dst->ke_std = src->ke_std;
dst->ke_tag = src->ke_tag;
dst->ke_make = src->ke_make;
FX_COPY_PTR(src->ke_scope, &dst->ke_scope);
dst->ke_loc = src->ke_loc;
}
static void _fx_make_R17K_form__kdefexn_t(
struct _fx_R9Ast__id_t* r_ke_name,
fx_str_t* r_ke_cname,
fx_str_t* r_ke_base_cname,
struct _fx_N14K_form__ktyp_t_data_t* r_ke_typ,
bool r_ke_std,
struct _fx_R9Ast__id_t* r_ke_tag,
struct _fx_R9Ast__id_t* r_ke_make,
struct _fx_LN12Ast__scope_t_data_t* r_ke_scope,
struct _fx_R10Ast__loc_t* r_ke_loc,
struct _fx_R17K_form__kdefexn_t* fx_result)
{
fx_result->ke_name = *r_ke_name;
fx_copy_str(r_ke_cname, &fx_result->ke_cname);
fx_copy_str(r_ke_base_cname, &fx_result->ke_base_cname);
FX_COPY_PTR(r_ke_typ, &fx_result->ke_typ);
fx_result->ke_std = r_ke_std;
fx_result->ke_tag = *r_ke_tag;
fx_result->ke_make = *r_ke_make;
FX_COPY_PTR(r_ke_scope, &fx_result->ke_scope);
fx_result->ke_loc = *r_ke_loc;
}
static void _fx_free_rR17K_form__kdefexn_t(struct _fx_rR17K_form__kdefexn_t_data_t** dst)
{
FX_FREE_REF_IMPL(_fx_rR17K_form__kdefexn_t, _fx_free_R17K_form__kdefexn_t);
}
static int _fx_make_rR17K_form__kdefexn_t(
struct _fx_R17K_form__kdefexn_t* arg,
struct _fx_rR17K_form__kdefexn_t_data_t** fx_result)
{
FX_MAKE_REF_IMPL(_fx_rR17K_form__kdefexn_t, _fx_copy_R17K_form__kdefexn_t);
}
static void _fx_free_T2R9Ast__id_tLR9Ast__id_t(struct _fx_T2R9Ast__id_tLR9Ast__id_t* dst)
{
fx_free_list_simple(&dst->t1);
}
static void _fx_copy_T2R9Ast__id_tLR9Ast__id_t(
struct _fx_T2R9Ast__id_tLR9Ast__id_t* src,
struct _fx_T2R9Ast__id_tLR9Ast__id_t* dst)
{
dst->t0 = src->t0;
FX_COPY_PTR(src->t1, &dst->t1);
}
static void _fx_make_T2R9Ast__id_tLR9Ast__id_t(
struct _fx_R9Ast__id_t* t0,
struct _fx_LR9Ast__id_t_data_t* t1,
struct _fx_T2R9Ast__id_tLR9Ast__id_t* fx_result)
{
fx_result->t0 = *t0;
FX_COPY_PTR(t1, &fx_result->t1);
}
static void _fx_free_LT2R9Ast__id_tLR9Ast__id_t(struct _fx_LT2R9Ast__id_tLR9Ast__id_t_data_t** dst)
{
FX_FREE_LIST_IMPL(_fx_LT2R9Ast__id_tLR9Ast__id_t, _fx_free_T2R9Ast__id_tLR9Ast__id_t);
}
static int _fx_cons_LT2R9Ast__id_tLR9Ast__id_t(
struct _fx_T2R9Ast__id_tLR9Ast__id_t* hd,
struct _fx_LT2R9Ast__id_tLR9Ast__id_t_data_t* tl,
bool addref_tl,
struct _fx_LT2R9Ast__id_tLR9Ast__id_t_data_t** fx_result)
{
FX_MAKE_LIST_IMPL(_fx_LT2R9Ast__id_tLR9Ast__id_t, _fx_copy_T2R9Ast__id_tLR9Ast__id_t);
}
static void _fx_free_R21K_form__kdefvariant_t(struct _fx_R21K_form__kdefvariant_t* dst)
{
fx_free_str(&dst->kvar_cname);
_fx_free_LN14K_form__ktyp_t(&dst->kvar_targs);
_fx_free_LT2R9Ast__id_tN14K_form__ktyp_t(&dst->kvar_cases);
fx_free_list_simple(&dst->kvar_ctors);
_fx_free_LT2R9Ast__id_tLR9Ast__id_t(&dst->kvar_ifaces);
fx_free_list_simple(&dst->kvar_scope);
}
static void _fx_copy_R21K_form__kdefvariant_t(
struct _fx_R21K_form__kdefvariant_t* src,
struct _fx_R21K_form__kdefvariant_t* dst)
{
dst->kvar_name = src->kvar_name;
fx_copy_str(&src->kvar_cname, &dst->kvar_cname);
dst->kvar_proto = src->kvar_proto;
dst->kvar_props = src->kvar_props;
FX_COPY_PTR(src->kvar_targs, &dst->kvar_targs);
FX_COPY_PTR(src->kvar_cases, &dst->kvar_cases);
FX_COPY_PTR(src->kvar_ctors, &dst->kvar_ctors);
dst->kvar_flags = src->kvar_flags;
FX_COPY_PTR(src->kvar_ifaces, &dst->kvar_ifaces);
FX_COPY_PTR(src->kvar_scope, &dst->kvar_scope);
dst->kvar_loc = src->kvar_loc;
}
static void _fx_make_R21K_form__kdefvariant_t(
struct _fx_R9Ast__id_t* r_kvar_name,
fx_str_t* r_kvar_cname,
struct _fx_R9Ast__id_t* r_kvar_proto,
struct _fx_Nt6option1R17K_form__ktprops_t* r_kvar_props,
struct _fx_LN14K_form__ktyp_t_data_t* r_kvar_targs,
struct _fx_LT2R9Ast__id_tN14K_form__ktyp_t_data_t* r_kvar_cases,
struct _fx_LR9Ast__id_t_data_t* r_kvar_ctors,
struct _fx_R16Ast__var_flags_t* r_kvar_flags,
struct _fx_LT2R9Ast__id_tLR9Ast__id_t_data_t* r_kvar_ifaces,
struct _fx_LN12Ast__scope_t_data_t* r_kvar_scope,
struct _fx_R10Ast__loc_t* r_kvar_loc,
struct _fx_R21K_form__kdefvariant_t* fx_result)
{
fx_result->kvar_name = *r_kvar_name;
fx_copy_str(r_kvar_cname, &fx_result->kvar_cname);
fx_result->kvar_proto = *r_kvar_proto;
fx_result->kvar_props = *r_kvar_props;
FX_COPY_PTR(r_kvar_targs, &fx_result->kvar_targs);
FX_COPY_PTR(r_kvar_cases, &fx_result->kvar_cases);
FX_COPY_PTR(r_kvar_ctors, &fx_result->kvar_ctors);
fx_result->kvar_flags = *r_kvar_flags;
FX_COPY_PTR(r_kvar_ifaces, &fx_result->kvar_ifaces);
FX_COPY_PTR(r_kvar_scope, &fx_result->kvar_scope);
fx_result->kvar_loc = *r_kvar_loc;
}
static void _fx_free_rR21K_form__kdefvariant_t(struct _fx_rR21K_form__kdefvariant_t_data_t** dst)
{
FX_FREE_REF_IMPL(_fx_rR21K_form__kdefvariant_t, _fx_free_R21K_form__kdefvariant_t);
}
static int _fx_make_rR21K_form__kdefvariant_t(
struct _fx_R21K_form__kdefvariant_t* arg,
struct _fx_rR21K_form__kdefvariant_t_data_t** fx_result)
{
FX_MAKE_REF_IMPL(_fx_rR21K_form__kdefvariant_t, _fx_copy_R21K_form__kdefvariant_t);
}
static void _fx_free_R23K_form__kdefinterface_t(struct _fx_R23K_form__kdefinterface_t* dst)
{
fx_free_str(&dst->ki_cname);
_fx_free_LT2R9Ast__id_tN14K_form__ktyp_t(&dst->ki_all_methods);
fx_free_list_simple(&dst->ki_scope);
}
static void _fx_copy_R23K_form__kdefinterface_t(
struct _fx_R23K_form__kdefinterface_t* src,
struct _fx_R23K_form__kdefinterface_t* dst)
{
dst->ki_name = src->ki_name;
dst->ki_base = src->ki_base;
fx_copy_str(&src->ki_cname, &dst->ki_cname);
dst->ki_id = src->ki_id;
FX_COPY_PTR(src->ki_all_methods, &dst->ki_all_methods);
FX_COPY_PTR(src->ki_scope, &dst->ki_scope);
dst->ki_loc = src->ki_loc;
}
static void _fx_make_R23K_form__kdefinterface_t(
struct _fx_R9Ast__id_t* r_ki_name,
struct _fx_R9Ast__id_t* r_ki_base,
fx_str_t* r_ki_cname,
struct _fx_R9Ast__id_t* r_ki_id,
struct _fx_LT2R9Ast__id_tN14K_form__ktyp_t_data_t* r_ki_all_methods,
struct _fx_LN12Ast__scope_t_data_t* r_ki_scope,
struct _fx_R10Ast__loc_t* r_ki_loc,
struct _fx_R23K_form__kdefinterface_t* fx_result)
{
fx_result->ki_name = *r_ki_name;
fx_result->ki_base = *r_ki_base;
fx_copy_str(r_ki_cname, &fx_result->ki_cname);
fx_result->ki_id = *r_ki_id;
FX_COPY_PTR(r_ki_all_methods, &fx_result->ki_all_methods);
FX_COPY_PTR(r_ki_scope, &fx_result->ki_scope);
fx_result->ki_loc = *r_ki_loc;
}
static void _fx_free_rR23K_form__kdefinterface_t(struct _fx_rR23K_form__kdefinterface_t_data_t** dst)
{
FX_FREE_REF_IMPL(_fx_rR23K_form__kdefinterface_t, _fx_free_R23K_form__kdefinterface_t);
}
static int _fx_make_rR23K_form__kdefinterface_t(
struct _fx_R23K_form__kdefinterface_t* arg,
struct _fx_rR23K_form__kdefinterface_t_data_t** fx_result)
{
FX_MAKE_REF_IMPL(_fx_rR23K_form__kdefinterface_t, _fx_copy_R23K_form__kdefinterface_t);
}
static void _fx_free_R17K_form__kdeftyp_t(struct _fx_R17K_form__kdeftyp_t* dst)
{
fx_free_str(&dst->kt_cname);
_fx_free_LN14K_form__ktyp_t(&dst->kt_targs);
_fx_free_N14K_form__ktyp_t(&dst->kt_typ);
fx_free_list_simple(&dst->kt_scope);
}
static void _fx_copy_R17K_form__kdeftyp_t(struct _fx_R17K_form__kdeftyp_t* src, struct _fx_R17K_form__kdeftyp_t* dst)
{
dst->kt_name = src->kt_name;
fx_copy_str(&src->kt_cname, &dst->kt_cname);
dst->kt_proto = src->kt_proto;
dst->kt_props = src->kt_props;
FX_COPY_PTR(src->kt_targs, &dst->kt_targs);
FX_COPY_PTR(src->kt_typ, &dst->kt_typ);
FX_COPY_PTR(src->kt_scope, &dst->kt_scope);
dst->kt_loc = src->kt_loc;
}
static void _fx_make_R17K_form__kdeftyp_t(
struct _fx_R9Ast__id_t* r_kt_name,
fx_str_t* r_kt_cname,
struct _fx_R9Ast__id_t* r_kt_proto,
struct _fx_Nt6option1R17K_form__ktprops_t* r_kt_props,
struct _fx_LN14K_form__ktyp_t_data_t* r_kt_targs,
struct _fx_N14K_form__ktyp_t_data_t* r_kt_typ,
struct _fx_LN12Ast__scope_t_data_t* r_kt_scope,
struct _fx_R10Ast__loc_t* r_kt_loc,
struct _fx_R17K_form__kdeftyp_t* fx_result)
{
fx_result->kt_name = *r_kt_name;
fx_copy_str(r_kt_cname, &fx_result->kt_cname);
fx_result->kt_proto = *r_kt_proto;
fx_result->kt_props = *r_kt_props;
FX_COPY_PTR(r_kt_targs, &fx_result->kt_targs);
FX_COPY_PTR(r_kt_typ, &fx_result->kt_typ);
FX_COPY_PTR(r_kt_scope, &fx_result->kt_scope);
fx_result->kt_loc = *r_kt_loc;
}
static void _fx_free_rR17K_form__kdeftyp_t(struct _fx_rR17K_form__kdeftyp_t_data_t** dst)
{
FX_FREE_REF_IMPL(_fx_rR17K_form__kdeftyp_t, _fx_free_R17K_form__kdeftyp_t);
}
static int _fx_make_rR17K_form__kdeftyp_t(
struct _fx_R17K_form__kdeftyp_t* arg,
struct _fx_rR17K_form__kdeftyp_t_data_t** fx_result)
{
FX_MAKE_REF_IMPL(_fx_rR17K_form__kdeftyp_t, _fx_copy_R17K_form__kdeftyp_t);
}
static void _fx_free_R25K_form__kdefclosurevars_t(struct _fx_R25K_form__kdefclosurevars_t* dst)
{
fx_free_str(&dst->kcv_cname);
_fx_free_LT2R9Ast__id_tN14K_form__ktyp_t(&dst->kcv_freevars);
fx_free_list_simple(&dst->kcv_orig_freevars);
fx_free_list_simple(&dst->kcv_scope);
}
static void _fx_copy_R25K_form__kdefclosurevars_t(
struct _fx_R25K_form__kdefclosurevars_t* src,
struct _fx_R25K_form__kdefclosurevars_t* dst)
{
dst->kcv_name = src->kcv_name;
fx_copy_str(&src->kcv_cname, &dst->kcv_cname);
FX_COPY_PTR(src->kcv_freevars, &dst->kcv_freevars);
FX_COPY_PTR(src->kcv_orig_freevars, &dst->kcv_orig_freevars);
FX_COPY_PTR(src->kcv_scope, &dst->kcv_scope);
dst->kcv_loc = src->kcv_loc;
}
static void _fx_make_R25K_form__kdefclosurevars_t(
struct _fx_R9Ast__id_t* r_kcv_name,
fx_str_t* r_kcv_cname,
struct _fx_LT2R9Ast__id_tN14K_form__ktyp_t_data_t* r_kcv_freevars,
struct _fx_LR9Ast__id_t_data_t* r_kcv_orig_freevars,
struct _fx_LN12Ast__scope_t_data_t* r_kcv_scope,
struct _fx_R10Ast__loc_t* r_kcv_loc,
struct _fx_R25K_form__kdefclosurevars_t* fx_result)
{
fx_result->kcv_name = *r_kcv_name;
fx_copy_str(r_kcv_cname, &fx_result->kcv_cname);
FX_COPY_PTR(r_kcv_freevars, &fx_result->kcv_freevars);
FX_COPY_PTR(r_kcv_orig_freevars, &fx_result->kcv_orig_freevars);
FX_COPY_PTR(r_kcv_scope, &fx_result->kcv_scope);
fx_result->kcv_loc = *r_kcv_loc;
}
static void _fx_free_rR25K_form__kdefclosurevars_t(struct _fx_rR25K_form__kdefclosurevars_t_data_t** dst)
{
FX_FREE_REF_IMPL(_fx_rR25K_form__kdefclosurevars_t, _fx_free_R25K_form__kdefclosurevars_t);
}
static int _fx_make_rR25K_form__kdefclosurevars_t(
struct _fx_R25K_form__kdefclosurevars_t* arg,
struct _fx_rR25K_form__kdefclosurevars_t_data_t** fx_result)
{
FX_MAKE_REF_IMPL(_fx_rR25K_form__kdefclosurevars_t, _fx_copy_R25K_form__kdefclosurevars_t);
}
static void _fx_free_N14K_form__kexp_t(struct _fx_N14K_form__kexp_t_data_t** dst)
{
if (*dst && FX_DECREF((*dst)->rc) == 1) {
switch ((*dst)->tag) {
case 4:
_fx_free_T2Nt6option1N14K_form__atom_tR10Ast__loc_t(&(*dst)->u.KExpReturn); break;
case 5:
_fx_free_T2N14K_form__atom_tT2N14K_form__ktyp_tR10Ast__loc_t(&(*dst)->u.KExpAtom); break;
case 6:
_fx_free_T4N13Ast__binary_tN14K_form__atom_tN14K_form__atom_tT2N14K_form__ktyp_tR10Ast__loc_t(&(*dst)->u.KExpBinary);
break;
case 7:
_fx_free_T3N12Ast__unary_tN14K_form__atom_tT2N14K_form__ktyp_tR10Ast__loc_t(&(*dst)->u.KExpUnary); break;
case 8:
_fx_free_T3N13Ast__intrin_tLN14K_form__atom_tT2N14K_form__ktyp_tR10Ast__loc_t(&(*dst)->u.KExpIntrin); break;
case 9:
_fx_free_T2R9Ast__id_tN14K_form__kexp_t(&(*dst)->u.KExpSync); break;
case 10:
_fx_free_T2LN14K_form__kexp_tT2N14K_form__ktyp_tR10Ast__loc_t(&(*dst)->u.KExpSeq); break;
case 11:
_fx_free_T4N14K_form__kexp_tN14K_form__kexp_tN14K_form__kexp_tT2N14K_form__ktyp_tR10Ast__loc_t(&(*dst)->u.KExpIf);
break;
case 12:
_fx_free_T3R9Ast__id_tLN14K_form__atom_tT2N14K_form__ktyp_tR10Ast__loc_t(&(*dst)->u.KExpCall); break;
case 13:
_fx_free_T4R9Ast__id_tiLN14K_form__atom_tT2N14K_form__ktyp_tR10Ast__loc_t(&(*dst)->u.KExpICall); break;
case 14:
_fx_free_T2LN14K_form__atom_tT2N14K_form__ktyp_tR10Ast__loc_t(&(*dst)->u.KExpMkTuple); break;
case 15:
_fx_free_T2LN14K_form__atom_tT2N14K_form__ktyp_tR10Ast__loc_t(&(*dst)->u.KExpMkRecord); break;
case 16:
_fx_free_T4R9Ast__id_tR9Ast__id_tLN14K_form__atom_tT2N14K_form__ktyp_tR10Ast__loc_t(&(*dst)->u.KExpMkClosure); break;
case 17:
_fx_free_T3BLLT2BN14K_form__atom_tT2N14K_form__ktyp_tR10Ast__loc_t(&(*dst)->u.KExpMkArray); break;
case 18:
_fx_free_T2LT2BN14K_form__atom_tT2N14K_form__ktyp_tR10Ast__loc_t(&(*dst)->u.KExpMkVector); break;
case 19:
_fx_free_T5N14K_form__atom_tN13Ast__border_tN18Ast__interpolate_tLN13K_form__dom_tT2N14K_form__ktyp_tR10Ast__loc_t(
&(*dst)->u.KExpAt);
break;
case 20:
_fx_free_T3R9Ast__id_tiT2N14K_form__ktyp_tR10Ast__loc_t(&(*dst)->u.KExpMem); break;
case 21:
_fx_free_T3R9Ast__id_tN14K_form__atom_tR10Ast__loc_t(&(*dst)->u.KExpAssign); break;
case 22:
_fx_free_T2LT2LN14K_form__kexp_tN14K_form__kexp_tT2N14K_form__ktyp_tR10Ast__loc_t(&(*dst)->u.KExpMatch); break;
case 23:
_fx_free_T3N14K_form__kexp_tN14K_form__kexp_tT2N14K_form__ktyp_tR10Ast__loc_t(&(*dst)->u.KExpTryCatch); break;
case 25:
_fx_free_T3N14K_form__atom_tN14K_form__ktyp_tR10Ast__loc_t(&(*dst)->u.KExpCast); break;
case 26:
_fx_free_T4LT3N14K_form__kexp_tLT2R9Ast__id_tN13K_form__dom_tLR9Ast__id_tN14K_form__kexp_tR16Ast__for_flags_tT2N14K_form__ktyp_tR10Ast__loc_t(
&(*dst)->u.KExpMap);
break;
case 27:
_fx_free_T5LT2R9Ast__id_tN13K_form__dom_tLR9Ast__id_tN14K_form__kexp_tR16Ast__for_flags_tR10Ast__loc_t(
&(*dst)->u.KExpFor);
break;
case 28:
_fx_free_T3N14K_form__kexp_tN14K_form__kexp_tR10Ast__loc_t(&(*dst)->u.KExpWhile); break;
case 29:
_fx_free_T3N14K_form__kexp_tN14K_form__kexp_tR10Ast__loc_t(&(*dst)->u.KExpDoWhile); break;
case 30:
_fx_free_T2ST2N14K_form__ktyp_tR10Ast__loc_t(&(*dst)->u.KExpCCode); break;
case 31:
_fx_free_T3R9Ast__id_tN14K_form__kexp_tR10Ast__loc_t(&(*dst)->u.KDefVal); break;
case 32:
_fx_free_rR17K_form__kdeffun_t(&(*dst)->u.KDefFun); break;
case 33:
_fx_free_rR17K_form__kdefexn_t(&(*dst)->u.KDefExn); break;
case 34:
_fx_free_rR21K_form__kdefvariant_t(&(*dst)->u.KDefVariant); break;
case 35:
_fx_free_rR23K_form__kdefinterface_t(&(*dst)->u.KDefInterface); break;
case 36:
_fx_free_rR17K_form__kdeftyp_t(&(*dst)->u.KDefTyp); break;
case 37:
_fx_free_rR25K_form__kdefclosurevars_t(&(*dst)->u.KDefClosureVars); break;
default:
;
}
fx_free(*dst);
}
*dst = 0;
}
static void _fx_free_R14Ast__pragmas_t(struct _fx_R14Ast__pragmas_t* dst)
{
_fx_free_LT2SR10Ast__loc_t(&dst->pragma_clibs);
}
static void _fx_copy_R14Ast__pragmas_t(struct _fx_R14Ast__pragmas_t* src, struct _fx_R14Ast__pragmas_t* dst)
{
dst->pragma_cpp = src->pragma_cpp;
FX_COPY_PTR(src->pragma_clibs, &dst->pragma_clibs);
}
static void _fx_make_R14Ast__pragmas_t(
bool r_pragma_cpp,
struct _fx_LT2SR10Ast__loc_t_data_t* r_pragma_clibs,
struct _fx_R14Ast__pragmas_t* fx_result)
{
fx_result->pragma_cpp = r_pragma_cpp;
FX_COPY_PTR(r_pragma_clibs, &fx_result->pragma_clibs);
}
static void _fx_free_R17K_form__kmodule_t(struct _fx_R17K_form__kmodule_t* dst)
{
fx_free_str(&dst->km_cname);
_fx_free_LN14K_form__kexp_t(&dst->km_top);
fx_free_list_simple(&dst->km_deps);
_fx_free_R14Ast__pragmas_t(&dst->km_pragmas);
}
static void _fx_copy_R17K_form__kmodule_t(struct _fx_R17K_form__kmodule_t* src, struct _fx_R17K_form__kmodule_t* dst)
{
dst->km_name = src->km_name;
dst->km_idx = src->km_idx;
dst->km_toposort_idx = src->km_toposort_idx;
fx_copy_str(&src->km_cname, &dst->km_cname);
FX_COPY_PTR(src->km_top, &dst->km_top);
FX_COPY_PTR(src->km_deps, &dst->km_deps);
dst->km_skip = src->km_skip;
dst->km_main = src->km_main;
_fx_copy_R14Ast__pragmas_t(&src->km_pragmas, &dst->km_pragmas);
}
static void _fx_make_R17K_form__kmodule_t(
struct _fx_R9Ast__id_t* r_km_name,
int_ r_km_idx,
int_ r_km_toposort_idx,
fx_str_t* r_km_cname,
struct _fx_LN14K_form__kexp_t_data_t* r_km_top,
struct _fx_Li_data_t* r_km_deps,
bool r_km_skip,
bool r_km_main,
struct _fx_R14Ast__pragmas_t* r_km_pragmas,
struct _fx_R17K_form__kmodule_t* fx_result)
{
fx_result->km_name = *r_km_name;
fx_result->km_idx = r_km_idx;
fx_result->km_toposort_idx = r_km_toposort_idx;
fx_copy_str(r_km_cname, &fx_result->km_cname);
FX_COPY_PTR(r_km_top, &fx_result->km_top);
FX_COPY_PTR(r_km_deps, &fx_result->km_deps);
fx_result->km_skip = r_km_skip;
fx_result->km_main = r_km_main;
_fx_copy_R14Ast__pragmas_t(r_km_pragmas, &fx_result->km_pragmas);
}
static void _fx_free_LR17K_form__kmodule_t(struct _fx_LR17K_form__kmodule_t_data_t** dst)
{
FX_FREE_LIST_IMPL(_fx_LR17K_form__kmodule_t, _fx_free_R17K_form__kmodule_t);
}
static int _fx_cons_LR17K_form__kmodule_t(
struct _fx_R17K_form__kmodule_t* hd,
struct _fx_LR17K_form__kmodule_t_data_t* tl,
bool addref_tl,
struct _fx_LR17K_form__kmodule_t_data_t** fx_result)
{
FX_MAKE_LIST_IMPL(_fx_LR17K_form__kmodule_t, _fx_copy_R17K_form__kmodule_t);
}
static void _fx_free_T2R9Ast__id_tN14C_form__ctyp_t(struct _fx_T2R9Ast__id_tN14C_form__ctyp_t* dst)
{
_fx_free_N14C_form__ctyp_t(&dst->t1);
}
static void _fx_copy_T2R9Ast__id_tN14C_form__ctyp_t(
struct _fx_T2R9Ast__id_tN14C_form__ctyp_t* src,
struct _fx_T2R9Ast__id_tN14C_form__ctyp_t* dst)
{
dst->t0 = src->t0;
FX_COPY_PTR(src->t1, &dst->t1);
}
static void _fx_make_T2R9Ast__id_tN14C_form__ctyp_t(
struct _fx_R9Ast__id_t* t0,
struct _fx_N14C_form__ctyp_t_data_t* t1,
struct _fx_T2R9Ast__id_tN14C_form__ctyp_t* fx_result)
{
fx_result->t0 = *t0;
FX_COPY_PTR(t1, &fx_result->t1);
}
static void _fx_free_LT2R9Ast__id_tN14C_form__ctyp_t(struct _fx_LT2R9Ast__id_tN14C_form__ctyp_t_data_t** dst)
{
FX_FREE_LIST_IMPL(_fx_LT2R9Ast__id_tN14C_form__ctyp_t, _fx_free_T2R9Ast__id_tN14C_form__ctyp_t);
}
static int _fx_cons_LT2R9Ast__id_tN14C_form__ctyp_t(
struct _fx_T2R9Ast__id_tN14C_form__ctyp_t* hd,
struct _fx_LT2R9Ast__id_tN14C_form__ctyp_t_data_t* tl,
bool addref_tl,
struct _fx_LT2R9Ast__id_tN14C_form__ctyp_t_data_t** fx_result)
{
FX_MAKE_LIST_IMPL(_fx_LT2R9Ast__id_tN14C_form__ctyp_t, _fx_copy_T2R9Ast__id_tN14C_form__ctyp_t);
}
static void _fx_free_R23C_form__cdefinterface_t(struct _fx_R23C_form__cdefinterface_t* dst)
{
fx_free_str(&dst->ci_cname);
_fx_free_LT2R9Ast__id_tN14C_form__ctyp_t(&dst->ci_all_methods);
fx_free_list_simple(&dst->ci_scope);
}
static void _fx_copy_R23C_form__cdefinterface_t(
struct _fx_R23C_form__cdefinterface_t* src,
struct _fx_R23C_form__cdefinterface_t* dst)
{
dst->ci_name = src->ci_name;
fx_copy_str(&src->ci_cname, &dst->ci_cname);
dst->ci_id = src->ci_id;
dst->ci_vtbl = src->ci_vtbl;
dst->ci_base = src->ci_base;
FX_COPY_PTR(src->ci_all_methods, &dst->ci_all_methods);
FX_COPY_PTR(src->ci_scope, &dst->ci_scope);
dst->ci_loc = src->ci_loc;
}
static void _fx_make_R23C_form__cdefinterface_t(
struct _fx_R9Ast__id_t* r_ci_name,
fx_str_t* r_ci_cname,
struct _fx_R9Ast__id_t* r_ci_id,
struct _fx_R9Ast__id_t* r_ci_vtbl,
struct _fx_R9Ast__id_t* r_ci_base,
struct _fx_LT2R9Ast__id_tN14C_form__ctyp_t_data_t* r_ci_all_methods,
struct _fx_LN12Ast__scope_t_data_t* r_ci_scope,
struct _fx_R10Ast__loc_t* r_ci_loc,
struct _fx_R23C_form__cdefinterface_t* fx_result)
{
fx_result->ci_name = *r_ci_name;
fx_copy_str(r_ci_cname, &fx_result->ci_cname);
fx_result->ci_id = *r_ci_id;
fx_result->ci_vtbl = *r_ci_vtbl;
fx_result->ci_base = *r_ci_base;
FX_COPY_PTR(r_ci_all_methods, &fx_result->ci_all_methods);
FX_COPY_PTR(r_ci_scope, &fx_result->ci_scope);
fx_result->ci_loc = *r_ci_loc;
}
static void _fx_free_rR23C_form__cdefinterface_t(struct _fx_rR23C_form__cdefinterface_t_data_t** dst)
{
FX_FREE_REF_IMPL(_fx_rR23C_form__cdefinterface_t, _fx_free_R23C_form__cdefinterface_t);
}
static int _fx_make_rR23C_form__cdefinterface_t(
struct _fx_R23C_form__cdefinterface_t* arg,
struct _fx_rR23C_form__cdefinterface_t_data_t** fx_result)
{
FX_MAKE_REF_IMPL(_fx_rR23C_form__cdefinterface_t, _fx_copy_R23C_form__cdefinterface_t);
}
static void _fx_free_Nt6option1N14C_form__ctyp_t(struct _fx_Nt6option1N14C_form__ctyp_t* dst)
{
switch (dst->tag) {
case 2:
_fx_free_N14C_form__ctyp_t(&dst->u.Some); break;
default:
;
}
dst->tag = 0;
}
static void _fx_copy_Nt6option1N14C_form__ctyp_t(
struct _fx_Nt6option1N14C_form__ctyp_t* src,
struct _fx_Nt6option1N14C_form__ctyp_t* dst)
{
dst->tag = src->tag;
switch (src->tag) {
case 2:
FX_COPY_PTR(src->u.Some, &dst->u.Some); break;
default:
dst->u = src->u;
}
}
static void _fx_free_Nt6option1N14C_form__cexp_t(struct _fx_Nt6option1N14C_form__cexp_t* dst)
{
switch (dst->tag) {
case 2:
_fx_free_N14C_form__cexp_t(&dst->u.Some); break;
default:
;
}
dst->tag = 0;
}
static void _fx_copy_Nt6option1N14C_form__cexp_t(
struct _fx_Nt6option1N14C_form__cexp_t* src,
struct _fx_Nt6option1N14C_form__cexp_t* dst)
{
dst->tag = src->tag;
switch (src->tag) {
case 2:
FX_COPY_PTR(src->u.Some, &dst->u.Some); break;
default:
dst->u = src->u;
}
}
static void _fx_free_R17C_form__ctprops_t(struct _fx_R17C_form__ctprops_t* dst)
{
fx_free_list_simple(&dst->ctp_make);
}
static void _fx_copy_R17C_form__ctprops_t(struct _fx_R17C_form__ctprops_t* src, struct _fx_R17C_form__ctprops_t* dst)
{
dst->ctp_scalar = src->ctp_scalar;
dst->ctp_complex = src->ctp_complex;
dst->ctp_ptr = src->ctp_ptr;
dst->ctp_pass_by_ref = src->ctp_pass_by_ref;
FX_COPY_PTR(src->ctp_make, &dst->ctp_make);
dst->ctp_free = src->ctp_free;
dst->ctp_copy = src->ctp_copy;
}
static void _fx_make_R17C_form__ctprops_t(
bool r_ctp_scalar,
bool r_ctp_complex,
bool r_ctp_ptr,
bool r_ctp_pass_by_ref,
struct _fx_LR9Ast__id_t_data_t* r_ctp_make,
struct _fx_Ta2R9Ast__id_t* r_ctp_free,
struct _fx_Ta2R9Ast__id_t* r_ctp_copy,
struct _fx_R17C_form__ctprops_t* fx_result)
{
fx_result->ctp_scalar = r_ctp_scalar;
fx_result->ctp_complex = r_ctp_complex;
fx_result->ctp_ptr = r_ctp_ptr;
fx_result->ctp_pass_by_ref = r_ctp_pass_by_ref;
FX_COPY_PTR(r_ctp_make, &fx_result->ctp_make);
fx_result->ctp_free = *r_ctp_free;
fx_result->ctp_copy = *r_ctp_copy;
}
static void _fx_free_T2Nt6option1R9Ast__id_tLT2R9Ast__id_tN14C_form__ctyp_t(
struct _fx_T2Nt6option1R9Ast__id_tLT2R9Ast__id_tN14C_form__ctyp_t* dst)
{
_fx_free_LT2R9Ast__id_tN14C_form__ctyp_t(&dst->t1);
}
static void _fx_copy_T2Nt6option1R9Ast__id_tLT2R9Ast__id_tN14C_form__ctyp_t(
struct _fx_T2Nt6option1R9Ast__id_tLT2R9Ast__id_tN14C_form__ctyp_t* src,
struct _fx_T2Nt6option1R9Ast__id_tLT2R9Ast__id_tN14C_form__ctyp_t* dst)
{
dst->t0 = src->t0;
FX_COPY_PTR(src->t1, &dst->t1);
}
static void _fx_make_T2Nt6option1R9Ast__id_tLT2R9Ast__id_tN14C_form__ctyp_t(
struct _fx_Nt6option1R9Ast__id_t* t0,
struct _fx_LT2R9Ast__id_tN14C_form__ctyp_t_data_t* t1,
struct _fx_T2Nt6option1R9Ast__id_tLT2R9Ast__id_tN14C_form__ctyp_t* fx_result)
{
fx_result->t0 = *t0;
FX_COPY_PTR(t1, &fx_result->t1);
}
static void _fx_free_LN14C_form__ctyp_t(struct _fx_LN14C_form__ctyp_t_data_t** dst)
{
FX_FREE_LIST_IMPL(_fx_LN14C_form__ctyp_t, _fx_free_N14C_form__ctyp_t);
}
static int _fx_cons_LN14C_form__ctyp_t(
struct _fx_N14C_form__ctyp_t_data_t* hd,
struct _fx_LN14C_form__ctyp_t_data_t* tl,
bool addref_tl,
struct _fx_LN14C_form__ctyp_t_data_t** fx_result)
{
FX_MAKE_LIST_IMPL(_fx_LN14C_form__ctyp_t, FX_COPY_PTR);
}
static void _fx_free_T2LN14C_form__ctyp_tN14C_form__ctyp_t(struct _fx_T2LN14C_form__ctyp_tN14C_form__ctyp_t* dst)
{
_fx_free_LN14C_form__ctyp_t(&dst->t0);
_fx_free_N14C_form__ctyp_t(&dst->t1);
}
static void _fx_copy_T2LN14C_form__ctyp_tN14C_form__ctyp_t(
struct _fx_T2LN14C_form__ctyp_tN14C_form__ctyp_t* src,
struct _fx_T2LN14C_form__ctyp_tN14C_form__ctyp_t* dst)
{
FX_COPY_PTR(src->t0, &dst->t0);
FX_COPY_PTR(src->t1, &dst->t1);
}
static void _fx_make_T2LN14C_form__ctyp_tN14C_form__ctyp_t(
struct _fx_LN14C_form__ctyp_t_data_t* t0,
struct _fx_N14C_form__ctyp_t_data_t* t1,
struct _fx_T2LN14C_form__ctyp_tN14C_form__ctyp_t* fx_result)
{
FX_COPY_PTR(t0, &fx_result->t0);
FX_COPY_PTR(t1, &fx_result->t1);
}
static int _fx_cons_LN19C_form__ctyp_attr_t(
struct _fx_N19C_form__ctyp_attr_t* hd,
struct _fx_LN19C_form__ctyp_attr_t_data_t* tl,
bool addref_tl,
struct _fx_LN19C_form__ctyp_attr_t_data_t** fx_result)
{
FX_MAKE_LIST_IMPL(_fx_LN19C_form__ctyp_attr_t, FX_COPY_SIMPLE_BY_PTR);
}
static void _fx_free_T2LN19C_form__ctyp_attr_tN14C_form__ctyp_t(struct _fx_T2LN19C_form__ctyp_attr_tN14C_form__ctyp_t* dst)
{
fx_free_list_simple(&dst->t0);
_fx_free_N14C_form__ctyp_t(&dst->t1);
}
static void _fx_copy_T2LN19C_form__ctyp_attr_tN14C_form__ctyp_t(
struct _fx_T2LN19C_form__ctyp_attr_tN14C_form__ctyp_t* src,
struct _fx_T2LN19C_form__ctyp_attr_tN14C_form__ctyp_t* dst)
{
FX_COPY_PTR(src->t0, &dst->t0);
FX_COPY_PTR(src->t1, &dst->t1);
}
static void _fx_make_T2LN19C_form__ctyp_attr_tN14C_form__ctyp_t(
struct _fx_LN19C_form__ctyp_attr_t_data_t* t0,
struct _fx_N14C_form__ctyp_t_data_t* t1,
struct _fx_T2LN19C_form__ctyp_attr_tN14C_form__ctyp_t* fx_result)
{
FX_COPY_PTR(t0, &fx_result->t0);
FX_COPY_PTR(t1, &fx_result->t1);
}
static void _fx_free_T2iN14C_form__ctyp_t(struct _fx_T2iN14C_form__ctyp_t* dst)
{
_fx_free_N14C_form__ctyp_t(&dst->t1);
}
static void _fx_copy_T2iN14C_form__ctyp_t(struct _fx_T2iN14C_form__ctyp_t* src, struct _fx_T2iN14C_form__ctyp_t* dst)
{
dst->t0 = src->t0;
FX_COPY_PTR(src->t1, &dst->t1);
}
static void _fx_make_T2iN14C_form__ctyp_t(
int_ t0,
struct _fx_N14C_form__ctyp_t_data_t* t1,
struct _fx_T2iN14C_form__ctyp_t* fx_result)
{
fx_result->t0 = t0;
FX_COPY_PTR(t1, &fx_result->t1);
}
static void _fx_free_N14C_form__ctyp_t(struct _fx_N14C_form__ctyp_t_data_t** dst)
{
if (*dst && FX_DECREF((*dst)->rc) == 1) {
switch ((*dst)->tag) {
case 13:
_fx_free_T2Nt6option1R9Ast__id_tLT2R9Ast__id_tN14C_form__ctyp_t(&(*dst)->u.CTypStruct); break;
case 14:
_fx_free_T2Nt6option1R9Ast__id_tLT2R9Ast__id_tN14C_form__ctyp_t(&(*dst)->u.CTypUnion); break;
case 15:
_fx_free_T2LN14C_form__ctyp_tN14C_form__ctyp_t(&(*dst)->u.CTypFunRawPtr); break;
case 16:
_fx_free_T2LN19C_form__ctyp_attr_tN14C_form__ctyp_t(&(*dst)->u.CTypRawPtr); break;
case 17:
_fx_free_T2LN19C_form__ctyp_attr_tN14C_form__ctyp_t(&(*dst)->u.CTypRawArray); break;
case 18:
_fx_free_T2iN14C_form__ctyp_t(&(*dst)->u.CTypArray); break;
case 19:
_fx_free_N14C_form__ctyp_t(&(*dst)->u.CTypVector); break;
default:
;
}
fx_free(*dst);
}
*dst = 0;
}
static void _fx_free_T2N14C_form__ctyp_tR10Ast__loc_t(struct _fx_T2N14C_form__ctyp_tR10Ast__loc_t* dst)
{
_fx_free_N14C_form__ctyp_t(&dst->t0);
}
static void _fx_copy_T2N14C_form__ctyp_tR10Ast__loc_t(
struct _fx_T2N14C_form__ctyp_tR10Ast__loc_t* src,
struct _fx_T2N14C_form__ctyp_tR10Ast__loc_t* dst)
{
FX_COPY_PTR(src->t0, &dst->t0);
dst->t1 = src->t1;
}
static void _fx_make_T2N14C_form__ctyp_tR10Ast__loc_t(
struct _fx_N14C_form__ctyp_t_data_t* t0,
struct _fx_R10Ast__loc_t* t1,
struct _fx_T2N14C_form__ctyp_tR10Ast__loc_t* fx_result)
{
FX_COPY_PTR(t0, &fx_result->t0);
fx_result->t1 = *t1;
}
static void _fx_free_T2R9Ast__id_tT2N14C_form__ctyp_tR10Ast__loc_t(
struct _fx_T2R9Ast__id_tT2N14C_form__ctyp_tR10Ast__loc_t* dst)
{
_fx_free_T2N14C_form__ctyp_tR10Ast__loc_t(&dst->t1);
}
static void _fx_copy_T2R9Ast__id_tT2N14C_form__ctyp_tR10Ast__loc_t(
struct _fx_T2R9Ast__id_tT2N14C_form__ctyp_tR10Ast__loc_t* src,
struct _fx_T2R9Ast__id_tT2N14C_form__ctyp_tR10Ast__loc_t* dst)
{
dst->t0 = src->t0;
_fx_copy_T2N14C_form__ctyp_tR10Ast__loc_t(&src->t1, &dst->t1);
}
static void _fx_make_T2R9Ast__id_tT2N14C_form__ctyp_tR10Ast__loc_t(
struct _fx_R9Ast__id_t* t0,
struct _fx_T2N14C_form__ctyp_tR10Ast__loc_t* t1,
struct _fx_T2R9Ast__id_tT2N14C_form__ctyp_tR10Ast__loc_t* fx_result)
{
fx_result->t0 = *t0;
_fx_copy_T2N14C_form__ctyp_tR10Ast__loc_t(t1, &fx_result->t1);
}
static void _fx_free_T2N14K_form__klit_tT2N14C_form__ctyp_tR10Ast__loc_t(
struct _fx_T2N14K_form__klit_tT2N14C_form__ctyp_tR10Ast__loc_t* dst)
{
_fx_free_N14K_form__klit_t(&dst->t0);
_fx_free_T2N14C_form__ctyp_tR10Ast__loc_t(&dst->t1);
}
static void _fx_copy_T2N14K_form__klit_tT2N14C_form__ctyp_tR10Ast__loc_t(
struct _fx_T2N14K_form__klit_tT2N14C_form__ctyp_tR10Ast__loc_t* src,
struct _fx_T2N14K_form__klit_tT2N14C_form__ctyp_tR10Ast__loc_t* dst)
{
_fx_copy_N14K_form__klit_t(&src->t0, &dst->t0);
_fx_copy_T2N14C_form__ctyp_tR10Ast__loc_t(&src->t1, &dst->t1);
}
static void _fx_make_T2N14K_form__klit_tT2N14C_form__ctyp_tR10Ast__loc_t(
struct _fx_N14K_form__klit_t* t0,
struct _fx_T2N14C_form__ctyp_tR10Ast__loc_t* t1,
struct _fx_T2N14K_form__klit_tT2N14C_form__ctyp_tR10Ast__loc_t* fx_result)
{
_fx_copy_N14K_form__klit_t(t0, &fx_result->t0);
_fx_copy_T2N14C_form__ctyp_tR10Ast__loc_t(t1, &fx_result->t1);
}
static void _fx_free_T4N17C_form__cbinary_tN14C_form__cexp_tN14C_form__cexp_tT2N14C_form__ctyp_tR10Ast__loc_t(
struct _fx_T4N17C_form__cbinary_tN14C_form__cexp_tN14C_form__cexp_tT2N14C_form__ctyp_tR10Ast__loc_t* dst)
{
_fx_free_N14C_form__cexp_t(&dst->t1);
_fx_free_N14C_form__cexp_t(&dst->t2);
_fx_free_T2N14C_form__ctyp_tR10Ast__loc_t(&dst->t3);
}
static void _fx_copy_T4N17C_form__cbinary_tN14C_form__cexp_tN14C_form__cexp_tT2N14C_form__ctyp_tR10Ast__loc_t(
struct _fx_T4N17C_form__cbinary_tN14C_form__cexp_tN14C_form__cexp_tT2N14C_form__ctyp_tR10Ast__loc_t* src,
struct _fx_T4N17C_form__cbinary_tN14C_form__cexp_tN14C_form__cexp_tT2N14C_form__ctyp_tR10Ast__loc_t* dst)
{
dst->t0 = src->t0;
FX_COPY_PTR(src->t1, &dst->t1);
FX_COPY_PTR(src->t2, &dst->t2);
_fx_copy_T2N14C_form__ctyp_tR10Ast__loc_t(&src->t3, &dst->t3);
}
static void _fx_make_T4N17C_form__cbinary_tN14C_form__cexp_tN14C_form__cexp_tT2N14C_form__ctyp_tR10Ast__loc_t(
struct _fx_N17C_form__cbinary_t* t0,
struct _fx_N14C_form__cexp_t_data_t* t1,
struct _fx_N14C_form__cexp_t_data_t* t2,
struct _fx_T2N14C_form__ctyp_tR10Ast__loc_t* t3,
struct _fx_T4N17C_form__cbinary_tN14C_form__cexp_tN14C_form__cexp_tT2N14C_form__ctyp_tR10Ast__loc_t* fx_result)
{
fx_result->t0 = *t0;
FX_COPY_PTR(t1, &fx_result->t1);
FX_COPY_PTR(t2, &fx_result->t2);
_fx_copy_T2N14C_form__ctyp_tR10Ast__loc_t(t3, &fx_result->t3);
}
static void _fx_free_T3N16C_form__cunary_tN14C_form__cexp_tT2N14C_form__ctyp_tR10Ast__loc_t(
struct _fx_T3N16C_form__cunary_tN14C_form__cexp_tT2N14C_form__ctyp_tR10Ast__loc_t* dst)
{
_fx_free_N14C_form__cexp_t(&dst->t1);
_fx_free_T2N14C_form__ctyp_tR10Ast__loc_t(&dst->t2);
}
static void _fx_copy_T3N16C_form__cunary_tN14C_form__cexp_tT2N14C_form__ctyp_tR10Ast__loc_t(
struct _fx_T3N16C_form__cunary_tN14C_form__cexp_tT2N14C_form__ctyp_tR10Ast__loc_t* src,
struct _fx_T3N16C_form__cunary_tN14C_form__cexp_tT2N14C_form__ctyp_tR10Ast__loc_t* dst)
{
dst->t0 = src->t0;
FX_COPY_PTR(src->t1, &dst->t1);
_fx_copy_T2N14C_form__ctyp_tR10Ast__loc_t(&src->t2, &dst->t2);
}
static void _fx_make_T3N16C_form__cunary_tN14C_form__cexp_tT2N14C_form__ctyp_tR10Ast__loc_t(
struct _fx_N16C_form__cunary_t* t0,
struct _fx_N14C_form__cexp_t_data_t* t1,
struct _fx_T2N14C_form__ctyp_tR10Ast__loc_t* t2,
struct _fx_T3N16C_form__cunary_tN14C_form__cexp_tT2N14C_form__ctyp_tR10Ast__loc_t* fx_result)
{
fx_result->t0 = *t0;
FX_COPY_PTR(t1, &fx_result->t1);
_fx_copy_T2N14C_form__ctyp_tR10Ast__loc_t(t2, &fx_result->t2);
}
static void _fx_free_T3N14C_form__cexp_tR9Ast__id_tT2N14C_form__ctyp_tR10Ast__loc_t(
struct _fx_T3N14C_form__cexp_tR9Ast__id_tT2N14C_form__ctyp_tR10Ast__loc_t* dst)
{
_fx_free_N14C_form__cexp_t(&dst->t0);
_fx_free_T2N14C_form__ctyp_tR10Ast__loc_t(&dst->t2);
}
static void _fx_copy_T3N14C_form__cexp_tR9Ast__id_tT2N14C_form__ctyp_tR10Ast__loc_t(
struct _fx_T3N14C_form__cexp_tR9Ast__id_tT2N14C_form__ctyp_tR10Ast__loc_t* src,
struct _fx_T3N14C_form__cexp_tR9Ast__id_tT2N14C_form__ctyp_tR10Ast__loc_t* dst)
{
FX_COPY_PTR(src->t0, &dst->t0);
dst->t1 = src->t1;
_fx_copy_T2N14C_form__ctyp_tR10Ast__loc_t(&src->t2, &dst->t2);
}
static void _fx_make_T3N14C_form__cexp_tR9Ast__id_tT2N14C_form__ctyp_tR10Ast__loc_t(
struct _fx_N14C_form__cexp_t_data_t* t0,
struct _fx_R9Ast__id_t* t1,
struct _fx_T2N14C_form__ctyp_tR10Ast__loc_t* t2,
struct _fx_T3N14C_form__cexp_tR9Ast__id_tT2N14C_form__ctyp_tR10Ast__loc_t* fx_result)
{
FX_COPY_PTR(t0, &fx_result->t0);
fx_result->t1 = *t1;
_fx_copy_T2N14C_form__ctyp_tR10Ast__loc_t(t2, &fx_result->t2);
}
static void _fx_free_T3N14C_form__cexp_tN14C_form__ctyp_tR10Ast__loc_t(
struct _fx_T3N14C_form__cexp_tN14C_form__ctyp_tR10Ast__loc_t* dst)
{
_fx_free_N14C_form__cexp_t(&dst->t0);
_fx_free_N14C_form__ctyp_t(&dst->t1);
}
static void _fx_copy_T3N14C_form__cexp_tN14C_form__ctyp_tR10Ast__loc_t(
struct _fx_T3N14C_form__cexp_tN14C_form__ctyp_tR10Ast__loc_t* src,
struct _fx_T3N14C_form__cexp_tN14C_form__ctyp_tR10Ast__loc_t* dst)
{
FX_COPY_PTR(src->t0, &dst->t0);
FX_COPY_PTR(src->t1, &dst->t1);
dst->t2 = src->t2;
}
static void _fx_make_T3N14C_form__cexp_tN14C_form__ctyp_tR10Ast__loc_t(
struct _fx_N14C_form__cexp_t_data_t* t0,
struct _fx_N14C_form__ctyp_t_data_t* t1,
struct _fx_R10Ast__loc_t* t2,
struct _fx_T3N14C_form__cexp_tN14C_form__ctyp_tR10Ast__loc_t* fx_result)
{
FX_COPY_PTR(t0, &fx_result->t0);
FX_COPY_PTR(t1, &fx_result->t1);
fx_result->t2 = *t2;
}
static void _fx_free_T4N14C_form__cexp_tN14C_form__cexp_tN14C_form__cexp_tT2N14C_form__ctyp_tR10Ast__loc_t(
struct _fx_T4N14C_form__cexp_tN14C_form__cexp_tN14C_form__cexp_tT2N14C_form__ctyp_tR10Ast__loc_t* dst)
{
_fx_free_N14C_form__cexp_t(&dst->t0);
_fx_free_N14C_form__cexp_t(&dst->t1);
_fx_free_N14C_form__cexp_t(&dst->t2);
_fx_free_T2N14C_form__ctyp_tR10Ast__loc_t(&dst->t3);
}
static void _fx_copy_T4N14C_form__cexp_tN14C_form__cexp_tN14C_form__cexp_tT2N14C_form__ctyp_tR10Ast__loc_t(
struct _fx_T4N14C_form__cexp_tN14C_form__cexp_tN14C_form__cexp_tT2N14C_form__ctyp_tR10Ast__loc_t* src,
struct _fx_T4N14C_form__cexp_tN14C_form__cexp_tN14C_form__cexp_tT2N14C_form__ctyp_tR10Ast__loc_t* dst)
{
FX_COPY_PTR(src->t0, &dst->t0);
FX_COPY_PTR(src->t1, &dst->t1);
FX_COPY_PTR(src->t2, &dst->t2);
_fx_copy_T2N14C_form__ctyp_tR10Ast__loc_t(&src->t3, &dst->t3);
}
static void _fx_make_T4N14C_form__cexp_tN14C_form__cexp_tN14C_form__cexp_tT2N14C_form__ctyp_tR10Ast__loc_t(
struct _fx_N14C_form__cexp_t_data_t* t0,
struct _fx_N14C_form__cexp_t_data_t* t1,
struct _fx_N14C_form__cexp_t_data_t* t2,
struct _fx_T2N14C_form__ctyp_tR10Ast__loc_t* t3,
struct _fx_T4N14C_form__cexp_tN14C_form__cexp_tN14C_form__cexp_tT2N14C_form__ctyp_tR10Ast__loc_t* fx_result)
{
FX_COPY_PTR(t0, &fx_result->t0);
FX_COPY_PTR(t1, &fx_result->t1);
FX_COPY_PTR(t2, &fx_result->t2);
_fx_copy_T2N14C_form__ctyp_tR10Ast__loc_t(t3, &fx_result->t3);
}
static void _fx_free_LN14C_form__cexp_t(struct _fx_LN14C_form__cexp_t_data_t** dst)
{
FX_FREE_LIST_IMPL(_fx_LN14C_form__cexp_t, _fx_free_N14C_form__cexp_t);
}
static int _fx_cons_LN14C_form__cexp_t(
struct _fx_N14C_form__cexp_t_data_t* hd,
struct _fx_LN14C_form__cexp_t_data_t* tl,
bool addref_tl,
struct _fx_LN14C_form__cexp_t_data_t** fx_result)
{
FX_MAKE_LIST_IMPL(_fx_LN14C_form__cexp_t, FX_COPY_PTR);
}
static void _fx_free_T3N14C_form__cexp_tLN14C_form__cexp_tT2N14C_form__ctyp_tR10Ast__loc_t(
struct _fx_T3N14C_form__cexp_tLN14C_form__cexp_tT2N14C_form__ctyp_tR10Ast__loc_t* dst)
{
_fx_free_N14C_form__cexp_t(&dst->t0);
_fx_free_LN14C_form__cexp_t(&dst->t1);
_fx_free_T2N14C_form__ctyp_tR10Ast__loc_t(&dst->t2);
}
static void _fx_copy_T3N14C_form__cexp_tLN14C_form__cexp_tT2N14C_form__ctyp_tR10Ast__loc_t(
struct _fx_T3N14C_form__cexp_tLN14C_form__cexp_tT2N14C_form__ctyp_tR10Ast__loc_t* src,
struct _fx_T3N14C_form__cexp_tLN14C_form__cexp_tT2N14C_form__ctyp_tR10Ast__loc_t* dst)
{
FX_COPY_PTR(src->t0, &dst->t0);
FX_COPY_PTR(src->t1, &dst->t1);
_fx_copy_T2N14C_form__ctyp_tR10Ast__loc_t(&src->t2, &dst->t2);
}
static void _fx_make_T3N14C_form__cexp_tLN14C_form__cexp_tT2N14C_form__ctyp_tR10Ast__loc_t(
struct _fx_N14C_form__cexp_t_data_t* t0,
struct _fx_LN14C_form__cexp_t_data_t* t1,
struct _fx_T2N14C_form__ctyp_tR10Ast__loc_t* t2,
struct _fx_T3N14C_form__cexp_tLN14C_form__cexp_tT2N14C_form__ctyp_tR10Ast__loc_t* fx_result)
{
FX_COPY_PTR(t0, &fx_result->t0);
FX_COPY_PTR(t1, &fx_result->t1);
_fx_copy_T2N14C_form__ctyp_tR10Ast__loc_t(t2, &fx_result->t2);
}
static void _fx_free_T2LN14C_form__cexp_tT2N14C_form__ctyp_tR10Ast__loc_t(
struct _fx_T2LN14C_form__cexp_tT2N14C_form__ctyp_tR10Ast__loc_t* dst)
{
_fx_free_LN14C_form__cexp_t(&dst->t0);
_fx_free_T2N14C_form__ctyp_tR10Ast__loc_t(&dst->t1);
}
static void _fx_copy_T2LN14C_form__cexp_tT2N14C_form__ctyp_tR10Ast__loc_t(
struct _fx_T2LN14C_form__cexp_tT2N14C_form__ctyp_tR10Ast__loc_t* src,
struct _fx_T2LN14C_form__cexp_tT2N14C_form__ctyp_tR10Ast__loc_t* dst)
{
FX_COPY_PTR(src->t0, &dst->t0);
_fx_copy_T2N14C_form__ctyp_tR10Ast__loc_t(&src->t1, &dst->t1);
}
static void _fx_make_T2LN14C_form__cexp_tT2N14C_form__ctyp_tR10Ast__loc_t(
struct _fx_LN14C_form__cexp_t_data_t* t0,
struct _fx_T2N14C_form__ctyp_tR10Ast__loc_t* t1,
struct _fx_T2LN14C_form__cexp_tT2N14C_form__ctyp_tR10Ast__loc_t* fx_result)
{
FX_COPY_PTR(t0, &fx_result->t0);
_fx_copy_T2N14C_form__ctyp_tR10Ast__loc_t(t1, &fx_result->t1);
}
static void _fx_free_N14C_form__cexp_t(struct _fx_N14C_form__cexp_t_data_t** dst)
{
if (*dst && FX_DECREF((*dst)->rc) == 1) {
switch ((*dst)->tag) {
case 1:
_fx_free_T2R9Ast__id_tT2N14C_form__ctyp_tR10Ast__loc_t(&(*dst)->u.CExpIdent); break;
case 2:
_fx_free_T2N14K_form__klit_tT2N14C_form__ctyp_tR10Ast__loc_t(&(*dst)->u.CExpLit); break;
case 3:
_fx_free_T4N17C_form__cbinary_tN14C_form__cexp_tN14C_form__cexp_tT2N14C_form__ctyp_tR10Ast__loc_t(
&(*dst)->u.CExpBinary);
break;
case 4:
_fx_free_T3N16C_form__cunary_tN14C_form__cexp_tT2N14C_form__ctyp_tR10Ast__loc_t(&(*dst)->u.CExpUnary); break;
case 5:
_fx_free_T3N14C_form__cexp_tR9Ast__id_tT2N14C_form__ctyp_tR10Ast__loc_t(&(*dst)->u.CExpMem); break;
case 6:
_fx_free_T3N14C_form__cexp_tR9Ast__id_tT2N14C_form__ctyp_tR10Ast__loc_t(&(*dst)->u.CExpArrow); break;
case 7:
_fx_free_T3N14C_form__cexp_tN14C_form__ctyp_tR10Ast__loc_t(&(*dst)->u.CExpCast); break;
case 8:
_fx_free_T4N14C_form__cexp_tN14C_form__cexp_tN14C_form__cexp_tT2N14C_form__ctyp_tR10Ast__loc_t(&(*dst)->u.CExpTernary);
break;
case 9:
_fx_free_T3N14C_form__cexp_tLN14C_form__cexp_tT2N14C_form__ctyp_tR10Ast__loc_t(&(*dst)->u.CExpCall); break;
case 10:
_fx_free_T2LN14C_form__cexp_tT2N14C_form__ctyp_tR10Ast__loc_t(&(*dst)->u.CExpInit); break;
case 11:
_fx_free_T2N14C_form__ctyp_tR10Ast__loc_t(&(*dst)->u.CExpTyp); break;
case 12:
_fx_free_T2SR10Ast__loc_t(&(*dst)->u.CExpCCode); break;
default:
;
}
fx_free(*dst);
}
*dst = 0;
}
static void _fx_free_T2Nt6option1N14C_form__cexp_tR10Ast__loc_t(struct _fx_T2Nt6option1N14C_form__cexp_tR10Ast__loc_t* dst)
{
_fx_free_Nt6option1N14C_form__cexp_t(&dst->t0);
}
static void _fx_copy_T2Nt6option1N14C_form__cexp_tR10Ast__loc_t(
struct _fx_T2Nt6option1N14C_form__cexp_tR10Ast__loc_t* src,
struct _fx_T2Nt6option1N14C_form__cexp_tR10Ast__loc_t* dst)
{
_fx_copy_Nt6option1N14C_form__cexp_t(&src->t0, &dst->t0);
dst->t1 = src->t1;
}
static void _fx_make_T2Nt6option1N14C_form__cexp_tR10Ast__loc_t(
struct _fx_Nt6option1N14C_form__cexp_t* t0,
struct _fx_R10Ast__loc_t* t1,
struct _fx_T2Nt6option1N14C_form__cexp_tR10Ast__loc_t* fx_result)
{
_fx_copy_Nt6option1N14C_form__cexp_t(t0, &fx_result->t0);
fx_result->t1 = *t1;
}
static void _fx_free_LN15C_form__cstmt_t(struct _fx_LN15C_form__cstmt_t_data_t** dst)
{
FX_FREE_LIST_IMPL(_fx_LN15C_form__cstmt_t, _fx_free_N15C_form__cstmt_t);
}
static int _fx_cons_LN15C_form__cstmt_t(
struct _fx_N15C_form__cstmt_t_data_t* hd,
struct _fx_LN15C_form__cstmt_t_data_t* tl,
bool addref_tl,
struct _fx_LN15C_form__cstmt_t_data_t** fx_result)
{
FX_MAKE_LIST_IMPL(_fx_LN15C_form__cstmt_t, FX_COPY_PTR);
}
static void _fx_free_T2LN15C_form__cstmt_tR10Ast__loc_t(struct _fx_T2LN15C_form__cstmt_tR10Ast__loc_t* dst)
{
_fx_free_LN15C_form__cstmt_t(&dst->t0);
}
static void _fx_copy_T2LN15C_form__cstmt_tR10Ast__loc_t(
struct _fx_T2LN15C_form__cstmt_tR10Ast__loc_t* src,
struct _fx_T2LN15C_form__cstmt_tR10Ast__loc_t* dst)
{
FX_COPY_PTR(src->t0, &dst->t0);
dst->t1 = src->t1;
}
static void _fx_make_T2LN15C_form__cstmt_tR10Ast__loc_t(
struct _fx_LN15C_form__cstmt_t_data_t* t0,
struct _fx_R10Ast__loc_t* t1,
struct _fx_T2LN15C_form__cstmt_tR10Ast__loc_t* fx_result)
{
FX_COPY_PTR(t0, &fx_result->t0);
fx_result->t1 = *t1;
}
static void _fx_free_T2R9Ast__id_tN15C_form__cstmt_t(struct _fx_T2R9Ast__id_tN15C_form__cstmt_t* dst)
{
_fx_free_N15C_form__cstmt_t(&dst->t1);
}
static void _fx_copy_T2R9Ast__id_tN15C_form__cstmt_t(
struct _fx_T2R9Ast__id_tN15C_form__cstmt_t* src,
struct _fx_T2R9Ast__id_tN15C_form__cstmt_t* dst)
{
dst->t0 = src->t0;
FX_COPY_PTR(src->t1, &dst->t1);
}
static void _fx_make_T2R9Ast__id_tN15C_form__cstmt_t(
struct _fx_R9Ast__id_t* t0,
struct _fx_N15C_form__cstmt_t_data_t* t1,
struct _fx_T2R9Ast__id_tN15C_form__cstmt_t* fx_result)
{
fx_result->t0 = *t0;
FX_COPY_PTR(t1, &fx_result->t1);
}
static void _fx_free_T4N14C_form__cexp_tN15C_form__cstmt_tN15C_form__cstmt_tR10Ast__loc_t(
struct _fx_T4N14C_form__cexp_tN15C_form__cstmt_tN15C_form__cstmt_tR10Ast__loc_t* dst)
{
_fx_free_N14C_form__cexp_t(&dst->t0);
_fx_free_N15C_form__cstmt_t(&dst->t1);
_fx_free_N15C_form__cstmt_t(&dst->t2);
}
static void _fx_copy_T4N14C_form__cexp_tN15C_form__cstmt_tN15C_form__cstmt_tR10Ast__loc_t(
struct _fx_T4N14C_form__cexp_tN15C_form__cstmt_tN15C_form__cstmt_tR10Ast__loc_t* src,
struct _fx_T4N14C_form__cexp_tN15C_form__cstmt_tN15C_form__cstmt_tR10Ast__loc_t* dst)
{
FX_COPY_PTR(src->t0, &dst->t0);
FX_COPY_PTR(src->t1, &dst->t1);
FX_COPY_PTR(src->t2, &dst->t2);
dst->t3 = src->t3;
}
static void _fx_make_T4N14C_form__cexp_tN15C_form__cstmt_tN15C_form__cstmt_tR10Ast__loc_t(
struct _fx_N14C_form__cexp_t_data_t* t0,
struct _fx_N15C_form__cstmt_t_data_t* t1,
struct _fx_N15C_form__cstmt_t_data_t* t2,
struct _fx_R10Ast__loc_t* t3,
struct _fx_T4N14C_form__cexp_tN15C_form__cstmt_tN15C_form__cstmt_tR10Ast__loc_t* fx_result)
{
FX_COPY_PTR(t0, &fx_result->t0);
FX_COPY_PTR(t1, &fx_result->t1);
FX_COPY_PTR(t2, &fx_result->t2);
fx_result->t3 = *t3;
}
static void
_fx_free_T6Nt6option1N14C_form__ctyp_tLN14C_form__cexp_tNt6option1N14C_form__cexp_tLN14C_form__cexp_tN15C_form__cstmt_tR10Ast__loc_t(
struct _fx_T6Nt6option1N14C_form__ctyp_tLN14C_form__cexp_tNt6option1N14C_form__cexp_tLN14C_form__cexp_tN15C_form__cstmt_tR10Ast__loc_t*
dst)
{
_fx_free_Nt6option1N14C_form__ctyp_t(&dst->t0);
_fx_free_LN14C_form__cexp_t(&dst->t1);
_fx_free_Nt6option1N14C_form__cexp_t(&dst->t2);
_fx_free_LN14C_form__cexp_t(&dst->t3);
_fx_free_N15C_form__cstmt_t(&dst->t4);
}
static void
_fx_copy_T6Nt6option1N14C_form__ctyp_tLN14C_form__cexp_tNt6option1N14C_form__cexp_tLN14C_form__cexp_tN15C_form__cstmt_tR10Ast__loc_t(
struct _fx_T6Nt6option1N14C_form__ctyp_tLN14C_form__cexp_tNt6option1N14C_form__cexp_tLN14C_form__cexp_tN15C_form__cstmt_tR10Ast__loc_t*
src,
struct _fx_T6Nt6option1N14C_form__ctyp_tLN14C_form__cexp_tNt6option1N14C_form__cexp_tLN14C_form__cexp_tN15C_form__cstmt_tR10Ast__loc_t*
dst)
{
_fx_copy_Nt6option1N14C_form__ctyp_t(&src->t0, &dst->t0);
FX_COPY_PTR(src->t1, &dst->t1);
_fx_copy_Nt6option1N14C_form__cexp_t(&src->t2, &dst->t2);
FX_COPY_PTR(src->t3, &dst->t3);
FX_COPY_PTR(src->t4, &dst->t4);
dst->t5 = src->t5;
}
static void
_fx_make_T6Nt6option1N14C_form__ctyp_tLN14C_form__cexp_tNt6option1N14C_form__cexp_tLN14C_form__cexp_tN15C_form__cstmt_tR10Ast__loc_t(
struct _fx_Nt6option1N14C_form__ctyp_t* t0,
struct _fx_LN14C_form__cexp_t_data_t* t1,
struct _fx_Nt6option1N14C_form__cexp_t* t2,
struct _fx_LN14C_form__cexp_t_data_t* t3,
struct _fx_N15C_form__cstmt_t_data_t* t4,
struct _fx_R10Ast__loc_t* t5,
struct _fx_T6Nt6option1N14C_form__ctyp_tLN14C_form__cexp_tNt6option1N14C_form__cexp_tLN14C_form__cexp_tN15C_form__cstmt_tR10Ast__loc_t*
fx_result)
{
_fx_copy_Nt6option1N14C_form__ctyp_t(t0, &fx_result->t0);
FX_COPY_PTR(t1, &fx_result->t1);
_fx_copy_Nt6option1N14C_form__cexp_t(t2, &fx_result->t2);
FX_COPY_PTR(t3, &fx_result->t3);
FX_COPY_PTR(t4, &fx_result->t4);
fx_result->t5 = *t5;
}
static void _fx_free_T3N14C_form__cexp_tN15C_form__cstmt_tR10Ast__loc_t(
struct _fx_T3N14C_form__cexp_tN15C_form__cstmt_tR10Ast__loc_t* dst)
{
_fx_free_N14C_form__cexp_t(&dst->t0);
_fx_free_N15C_form__cstmt_t(&dst->t1);
}
static void _fx_copy_T3N14C_form__cexp_tN15C_form__cstmt_tR10Ast__loc_t(
struct _fx_T3N14C_form__cexp_tN15C_form__cstmt_tR10Ast__loc_t* src,
struct _fx_T3N14C_form__cexp_tN15C_form__cstmt_tR10Ast__loc_t* dst)
{
FX_COPY_PTR(src->t0, &dst->t0);
FX_COPY_PTR(src->t1, &dst->t1);
dst->t2 = src->t2;
}
static void _fx_make_T3N14C_form__cexp_tN15C_form__cstmt_tR10Ast__loc_t(
struct _fx_N14C_form__cexp_t_data_t* t0,
struct _fx_N15C_form__cstmt_t_data_t* t1,
struct _fx_R10Ast__loc_t* t2,
struct _fx_T3N14C_form__cexp_tN15C_form__cstmt_tR10Ast__loc_t* fx_result)
{
FX_COPY_PTR(t0, &fx_result->t0);
FX_COPY_PTR(t1, &fx_result->t1);
fx_result->t2 = *t2;
}
static void _fx_free_T3N15C_form__cstmt_tN14C_form__cexp_tR10Ast__loc_t(
struct _fx_T3N15C_form__cstmt_tN14C_form__cexp_tR10Ast__loc_t* dst)
{
_fx_free_N15C_form__cstmt_t(&dst->t0);
_fx_free_N14C_form__cexp_t(&dst->t1);
}
static void _fx_copy_T3N15C_form__cstmt_tN14C_form__cexp_tR10Ast__loc_t(
struct _fx_T3N15C_form__cstmt_tN14C_form__cexp_tR10Ast__loc_t* src,
struct _fx_T3N15C_form__cstmt_tN14C_form__cexp_tR10Ast__loc_t* dst)
{
FX_COPY_PTR(src->t0, &dst->t0);
FX_COPY_PTR(src->t1, &dst->t1);
dst->t2 = src->t2;
}
static void _fx_make_T3N15C_form__cstmt_tN14C_form__cexp_tR10Ast__loc_t(
struct _fx_N15C_form__cstmt_t_data_t* t0,
struct _fx_N14C_form__cexp_t_data_t* t1,
struct _fx_R10Ast__loc_t* t2,
struct _fx_T3N15C_form__cstmt_tN14C_form__cexp_tR10Ast__loc_t* fx_result)
{
FX_COPY_PTR(t0, &fx_result->t0);
FX_COPY_PTR(t1, &fx_result->t1);
fx_result->t2 = *t2;
}
static void _fx_free_T2LN14C_form__cexp_tLN15C_form__cstmt_t(struct _fx_T2LN14C_form__cexp_tLN15C_form__cstmt_t* dst)
{
_fx_free_LN14C_form__cexp_t(&dst->t0);
_fx_free_LN15C_form__cstmt_t(&dst->t1);
}
static void _fx_copy_T2LN14C_form__cexp_tLN15C_form__cstmt_t(
struct _fx_T2LN14C_form__cexp_tLN15C_form__cstmt_t* src,
struct _fx_T2LN14C_form__cexp_tLN15C_form__cstmt_t* dst)
{
FX_COPY_PTR(src->t0, &dst->t0);
FX_COPY_PTR(src->t1, &dst->t1);
}
static void _fx_make_T2LN14C_form__cexp_tLN15C_form__cstmt_t(
struct _fx_LN14C_form__cexp_t_data_t* t0,
struct _fx_LN15C_form__cstmt_t_data_t* t1,
struct _fx_T2LN14C_form__cexp_tLN15C_form__cstmt_t* fx_result)
{
FX_COPY_PTR(t0, &fx_result->t0);
FX_COPY_PTR(t1, &fx_result->t1);
}
static void _fx_free_LT2LN14C_form__cexp_tLN15C_form__cstmt_t(struct _fx_LT2LN14C_form__cexp_tLN15C_form__cstmt_t_data_t** dst)
{
FX_FREE_LIST_IMPL(_fx_LT2LN14C_form__cexp_tLN15C_form__cstmt_t, _fx_free_T2LN14C_form__cexp_tLN15C_form__cstmt_t);
}
static int _fx_cons_LT2LN14C_form__cexp_tLN15C_form__cstmt_t(
struct _fx_T2LN14C_form__cexp_tLN15C_form__cstmt_t* hd,
struct _fx_LT2LN14C_form__cexp_tLN15C_form__cstmt_t_data_t* tl,
bool addref_tl,
struct _fx_LT2LN14C_form__cexp_tLN15C_form__cstmt_t_data_t** fx_result)
{
FX_MAKE_LIST_IMPL(_fx_LT2LN14C_form__cexp_tLN15C_form__cstmt_t, _fx_copy_T2LN14C_form__cexp_tLN15C_form__cstmt_t);
}
static void _fx_free_T3N14C_form__cexp_tLT2LN14C_form__cexp_tLN15C_form__cstmt_tR10Ast__loc_t(
struct _fx_T3N14C_form__cexp_tLT2LN14C_form__cexp_tLN15C_form__cstmt_tR10Ast__loc_t* dst)
{
_fx_free_N14C_form__cexp_t(&dst->t0);
_fx_free_LT2LN14C_form__cexp_tLN15C_form__cstmt_t(&dst->t1);
}
static void _fx_copy_T3N14C_form__cexp_tLT2LN14C_form__cexp_tLN15C_form__cstmt_tR10Ast__loc_t(
struct _fx_T3N14C_form__cexp_tLT2LN14C_form__cexp_tLN15C_form__cstmt_tR10Ast__loc_t* src,
struct _fx_T3N14C_form__cexp_tLT2LN14C_form__cexp_tLN15C_form__cstmt_tR10Ast__loc_t* dst)
{
FX_COPY_PTR(src->t0, &dst->t0);
FX_COPY_PTR(src->t1, &dst->t1);
dst->t2 = src->t2;
}
static void _fx_make_T3N14C_form__cexp_tLT2LN14C_form__cexp_tLN15C_form__cstmt_tR10Ast__loc_t(
struct _fx_N14C_form__cexp_t_data_t* t0,
struct _fx_LT2LN14C_form__cexp_tLN15C_form__cstmt_t_data_t* t1,
struct _fx_R10Ast__loc_t* t2,
struct _fx_T3N14C_form__cexp_tLT2LN14C_form__cexp_tLN15C_form__cstmt_tR10Ast__loc_t* fx_result)
{
FX_COPY_PTR(t0, &fx_result->t0);
FX_COPY_PTR(t1, &fx_result->t1);
fx_result->t2 = *t2;
}
static void _fx_free_T4N14C_form__ctyp_tR9Ast__id_tNt6option1N14C_form__cexp_tR10Ast__loc_t(
struct _fx_T4N14C_form__ctyp_tR9Ast__id_tNt6option1N14C_form__cexp_tR10Ast__loc_t* dst)
{
_fx_free_N14C_form__ctyp_t(&dst->t0);
_fx_free_Nt6option1N14C_form__cexp_t(&dst->t2);
}
static void _fx_copy_T4N14C_form__ctyp_tR9Ast__id_tNt6option1N14C_form__cexp_tR10Ast__loc_t(
struct _fx_T4N14C_form__ctyp_tR9Ast__id_tNt6option1N14C_form__cexp_tR10Ast__loc_t* src,
struct _fx_T4N14C_form__ctyp_tR9Ast__id_tNt6option1N14C_form__cexp_tR10Ast__loc_t* dst)
{
FX_COPY_PTR(src->t0, &dst->t0);
dst->t1 = src->t1;
_fx_copy_Nt6option1N14C_form__cexp_t(&src->t2, &dst->t2);
dst->t3 = src->t3;
}
static void _fx_make_T4N14C_form__ctyp_tR9Ast__id_tNt6option1N14C_form__cexp_tR10Ast__loc_t(
struct _fx_N14C_form__ctyp_t_data_t* t0,
struct _fx_R9Ast__id_t* t1,
struct _fx_Nt6option1N14C_form__cexp_t* t2,
struct _fx_R10Ast__loc_t* t3,
struct _fx_T4N14C_form__ctyp_tR9Ast__id_tNt6option1N14C_form__cexp_tR10Ast__loc_t* fx_result)
{
FX_COPY_PTR(t0, &fx_result->t0);
fx_result->t1 = *t1;
_fx_copy_Nt6option1N14C_form__cexp_t(t2, &fx_result->t2);
fx_result->t3 = *t3;
}
static int _fx_cons_LN19C_form__carg_attr_t(
struct _fx_N19C_form__carg_attr_t* hd,
struct _fx_LN19C_form__carg_attr_t_data_t* tl,
bool addref_tl,
struct _fx_LN19C_form__carg_attr_t_data_t** fx_result)
{
FX_MAKE_LIST_IMPL(_fx_LN19C_form__carg_attr_t, FX_COPY_SIMPLE_BY_PTR);
}
static void _fx_free_T3R9Ast__id_tN14C_form__ctyp_tLN19C_form__carg_attr_t(
struct _fx_T3R9Ast__id_tN14C_form__ctyp_tLN19C_form__carg_attr_t* dst)
{
_fx_free_N14C_form__ctyp_t(&dst->t1);
fx_free_list_simple(&dst->t2);
}
static void _fx_copy_T3R9Ast__id_tN14C_form__ctyp_tLN19C_form__carg_attr_t(
struct _fx_T3R9Ast__id_tN14C_form__ctyp_tLN19C_form__carg_attr_t* src,
struct _fx_T3R9Ast__id_tN14C_form__ctyp_tLN19C_form__carg_attr_t* dst)
{
dst->t0 = src->t0;
FX_COPY_PTR(src->t1, &dst->t1);
FX_COPY_PTR(src->t2, &dst->t2);
}
static void _fx_make_T3R9Ast__id_tN14C_form__ctyp_tLN19C_form__carg_attr_t(
struct _fx_R9Ast__id_t* t0,
struct _fx_N14C_form__ctyp_t_data_t* t1,
struct _fx_LN19C_form__carg_attr_t_data_t* t2,
struct _fx_T3R9Ast__id_tN14C_form__ctyp_tLN19C_form__carg_attr_t* fx_result)
{
fx_result->t0 = *t0;
FX_COPY_PTR(t1, &fx_result->t1);
FX_COPY_PTR(t2, &fx_result->t2);
}
static void _fx_free_LT3R9Ast__id_tN14C_form__ctyp_tLN19C_form__carg_attr_t(
struct _fx_LT3R9Ast__id_tN14C_form__ctyp_tLN19C_form__carg_attr_t_data_t** dst)
{
FX_FREE_LIST_IMPL(_fx_LT3R9Ast__id_tN14C_form__ctyp_tLN19C_form__carg_attr_t,
_fx_free_T3R9Ast__id_tN14C_form__ctyp_tLN19C_form__carg_attr_t);
}
static int _fx_cons_LT3R9Ast__id_tN14C_form__ctyp_tLN19C_form__carg_attr_t(
struct _fx_T3R9Ast__id_tN14C_form__ctyp_tLN19C_form__carg_attr_t* hd,
struct _fx_LT3R9Ast__id_tN14C_form__ctyp_tLN19C_form__carg_attr_t_data_t* tl,
bool addref_tl,
struct _fx_LT3R9Ast__id_tN14C_form__ctyp_tLN19C_form__carg_attr_t_data_t** fx_result)
{
FX_MAKE_LIST_IMPL(_fx_LT3R9Ast__id_tN14C_form__ctyp_tLN19C_form__carg_attr_t,
_fx_copy_T3R9Ast__id_tN14C_form__ctyp_tLN19C_form__carg_attr_t);
}
static void _fx_free_R17C_form__cdeffun_t(struct _fx_R17C_form__cdeffun_t* dst)
{
fx_free_str(&dst->cf_cname);
_fx_free_LT3R9Ast__id_tN14C_form__ctyp_tLN19C_form__carg_attr_t(&dst->cf_args);
_fx_free_N14C_form__ctyp_t(&dst->cf_rt);
_fx_free_LN15C_form__cstmt_t(&dst->cf_body);
fx_free_list_simple(&dst->cf_scope);
}
static void _fx_copy_R17C_form__cdeffun_t(struct _fx_R17C_form__cdeffun_t* src, struct _fx_R17C_form__cdeffun_t* dst)
{
dst->cf_name = src->cf_name;
fx_copy_str(&src->cf_cname, &dst->cf_cname);
FX_COPY_PTR(src->cf_args, &dst->cf_args);
FX_COPY_PTR(src->cf_rt, &dst->cf_rt);
FX_COPY_PTR(src->cf_body, &dst->cf_body);
dst->cf_flags = src->cf_flags;
FX_COPY_PTR(src->cf_scope, &dst->cf_scope);
dst->cf_loc = src->cf_loc;
}
static void _fx_make_R17C_form__cdeffun_t(
struct _fx_R9Ast__id_t* r_cf_name,
fx_str_t* r_cf_cname,
struct _fx_LT3R9Ast__id_tN14C_form__ctyp_tLN19C_form__carg_attr_t_data_t* r_cf_args,
struct _fx_N14C_form__ctyp_t_data_t* r_cf_rt,
struct _fx_LN15C_form__cstmt_t_data_t* r_cf_body,
struct _fx_R16Ast__fun_flags_t* r_cf_flags,
struct _fx_LN12Ast__scope_t_data_t* r_cf_scope,
struct _fx_R10Ast__loc_t* r_cf_loc,
struct _fx_R17C_form__cdeffun_t* fx_result)
{
fx_result->cf_name = *r_cf_name;
fx_copy_str(r_cf_cname, &fx_result->cf_cname);
FX_COPY_PTR(r_cf_args, &fx_result->cf_args);
FX_COPY_PTR(r_cf_rt, &fx_result->cf_rt);
FX_COPY_PTR(r_cf_body, &fx_result->cf_body);
fx_result->cf_flags = *r_cf_flags;
FX_COPY_PTR(r_cf_scope, &fx_result->cf_scope);
fx_result->cf_loc = *r_cf_loc;
}
static void _fx_free_rR17C_form__cdeffun_t(struct _fx_rR17C_form__cdeffun_t_data_t** dst)
{
FX_FREE_REF_IMPL(_fx_rR17C_form__cdeffun_t, _fx_free_R17C_form__cdeffun_t);
}
static int _fx_make_rR17C_form__cdeffun_t(
struct _fx_R17C_form__cdeffun_t* arg,
struct _fx_rR17C_form__cdeffun_t_data_t** fx_result)
{
FX_MAKE_REF_IMPL(_fx_rR17C_form__cdeffun_t, _fx_copy_R17C_form__cdeffun_t);
}
static void _fx_free_R17C_form__cdeftyp_t(struct _fx_R17C_form__cdeftyp_t* dst)
{
_fx_free_N14C_form__ctyp_t(&dst->ct_typ);
fx_free_str(&dst->ct_cname);
_fx_free_R17C_form__ctprops_t(&dst->ct_props);
fx_free_list_simple(&dst->ct_ifaces);
fx_free_list_simple(&dst->ct_scope);
}
static void _fx_copy_R17C_form__cdeftyp_t(struct _fx_R17C_form__cdeftyp_t* src, struct _fx_R17C_form__cdeftyp_t* dst)
{
dst->ct_name = src->ct_name;
FX_COPY_PTR(src->ct_typ, &dst->ct_typ);
fx_copy_str(&src->ct_cname, &dst->ct_cname);
_fx_copy_R17C_form__ctprops_t(&src->ct_props, &dst->ct_props);
dst->ct_data_start = src->ct_data_start;
dst->ct_enum = src->ct_enum;
FX_COPY_PTR(src->ct_ifaces, &dst->ct_ifaces);
dst->ct_ifaces_id = src->ct_ifaces_id;
FX_COPY_PTR(src->ct_scope, &dst->ct_scope);
dst->ct_loc = src->ct_loc;
}
static void _fx_make_R17C_form__cdeftyp_t(
struct _fx_R9Ast__id_t* r_ct_name,
struct _fx_N14C_form__ctyp_t_data_t* r_ct_typ,
fx_str_t* r_ct_cname,
struct _fx_R17C_form__ctprops_t* r_ct_props,
int_ r_ct_data_start,
struct _fx_R9Ast__id_t* r_ct_enum,
struct _fx_LR9Ast__id_t_data_t* r_ct_ifaces,
struct _fx_R9Ast__id_t* r_ct_ifaces_id,
struct _fx_LN12Ast__scope_t_data_t* r_ct_scope,
struct _fx_R10Ast__loc_t* r_ct_loc,
struct _fx_R17C_form__cdeftyp_t* fx_result)
{
fx_result->ct_name = *r_ct_name;
FX_COPY_PTR(r_ct_typ, &fx_result->ct_typ);
fx_copy_str(r_ct_cname, &fx_result->ct_cname);
_fx_copy_R17C_form__ctprops_t(r_ct_props, &fx_result->ct_props);
fx_result->ct_data_start = r_ct_data_start;
fx_result->ct_enum = *r_ct_enum;
FX_COPY_PTR(r_ct_ifaces, &fx_result->ct_ifaces);
fx_result->ct_ifaces_id = *r_ct_ifaces_id;
FX_COPY_PTR(r_ct_scope, &fx_result->ct_scope);
fx_result->ct_loc = *r_ct_loc;
}
static void _fx_free_rR17C_form__cdeftyp_t(struct _fx_rR17C_form__cdeftyp_t_data_t** dst)
{
FX_FREE_REF_IMPL(_fx_rR17C_form__cdeftyp_t, _fx_free_R17C_form__cdeftyp_t);
}
static int _fx_make_rR17C_form__cdeftyp_t(
struct _fx_R17C_form__cdeftyp_t* arg,
struct _fx_rR17C_form__cdeftyp_t_data_t** fx_result)
{
FX_MAKE_REF_IMPL(_fx_rR17C_form__cdeftyp_t, _fx_copy_R17C_form__cdeftyp_t);
}
static void _fx_free_T2R9Ast__id_tNt6option1N14C_form__cexp_t(struct _fx_T2R9Ast__id_tNt6option1N14C_form__cexp_t* dst)
{
_fx_free_Nt6option1N14C_form__cexp_t(&dst->t1);
}
static void _fx_copy_T2R9Ast__id_tNt6option1N14C_form__cexp_t(
struct _fx_T2R9Ast__id_tNt6option1N14C_form__cexp_t* src,
struct _fx_T2R9Ast__id_tNt6option1N14C_form__cexp_t* dst)
{
dst->t0 = src->t0;
_fx_copy_Nt6option1N14C_form__cexp_t(&src->t1, &dst->t1);
}
static void _fx_make_T2R9Ast__id_tNt6option1N14C_form__cexp_t(
struct _fx_R9Ast__id_t* t0,
struct _fx_Nt6option1N14C_form__cexp_t* t1,
struct _fx_T2R9Ast__id_tNt6option1N14C_form__cexp_t* fx_result)
{
fx_result->t0 = *t0;
_fx_copy_Nt6option1N14C_form__cexp_t(t1, &fx_result->t1);
}
static void _fx_free_LT2R9Ast__id_tNt6option1N14C_form__cexp_t(
struct _fx_LT2R9Ast__id_tNt6option1N14C_form__cexp_t_data_t** dst)
{
FX_FREE_LIST_IMPL(_fx_LT2R9Ast__id_tNt6option1N14C_form__cexp_t, _fx_free_T2R9Ast__id_tNt6option1N14C_form__cexp_t);
}
static int _fx_cons_LT2R9Ast__id_tNt6option1N14C_form__cexp_t(
struct _fx_T2R9Ast__id_tNt6option1N14C_form__cexp_t* hd,
struct _fx_LT2R9Ast__id_tNt6option1N14C_form__cexp_t_data_t* tl,
bool addref_tl,
struct _fx_LT2R9Ast__id_tNt6option1N14C_form__cexp_t_data_t** fx_result)
{
FX_MAKE_LIST_IMPL(_fx_LT2R9Ast__id_tNt6option1N14C_form__cexp_t, _fx_copy_T2R9Ast__id_tNt6option1N14C_form__cexp_t);
}
static void _fx_free_R18C_form__cdefenum_t(struct _fx_R18C_form__cdefenum_t* dst)
{
_fx_free_LT2R9Ast__id_tNt6option1N14C_form__cexp_t(&dst->cenum_members);
fx_free_str(&dst->cenum_cname);
fx_free_list_simple(&dst->cenum_scope);
}
static void _fx_copy_R18C_form__cdefenum_t(struct _fx_R18C_form__cdefenum_t* src, struct _fx_R18C_form__cdefenum_t* dst)
{
dst->cenum_name = src->cenum_name;
FX_COPY_PTR(src->cenum_members, &dst->cenum_members);
fx_copy_str(&src->cenum_cname, &dst->cenum_cname);
FX_COPY_PTR(src->cenum_scope, &dst->cenum_scope);
dst->cenum_loc = src->cenum_loc;
}
static void _fx_make_R18C_form__cdefenum_t(
struct _fx_R9Ast__id_t* r_cenum_name,
struct _fx_LT2R9Ast__id_tNt6option1N14C_form__cexp_t_data_t* r_cenum_members,
fx_str_t* r_cenum_cname,
struct _fx_LN12Ast__scope_t_data_t* r_cenum_scope,
struct _fx_R10Ast__loc_t* r_cenum_loc,
struct _fx_R18C_form__cdefenum_t* fx_result)
{
fx_result->cenum_name = *r_cenum_name;
FX_COPY_PTR(r_cenum_members, &fx_result->cenum_members);
fx_copy_str(r_cenum_cname, &fx_result->cenum_cname);
FX_COPY_PTR(r_cenum_scope, &fx_result->cenum_scope);
fx_result->cenum_loc = *r_cenum_loc;
}
static void _fx_free_rR18C_form__cdefenum_t(struct _fx_rR18C_form__cdefenum_t_data_t** dst)
{
FX_FREE_REF_IMPL(_fx_rR18C_form__cdefenum_t, _fx_free_R18C_form__cdefenum_t);
}
static int _fx_make_rR18C_form__cdefenum_t(
struct _fx_R18C_form__cdefenum_t* arg,
struct _fx_rR18C_form__cdefenum_t_data_t** fx_result)
{
FX_MAKE_REF_IMPL(_fx_rR18C_form__cdefenum_t, _fx_copy_R18C_form__cdefenum_t);
}
static void _fx_free_R19C_form__cdefmacro_t(struct _fx_R19C_form__cdefmacro_t* dst)
{
fx_free_str(&dst->cm_cname);
fx_free_list_simple(&dst->cm_args);
_fx_free_LN15C_form__cstmt_t(&dst->cm_body);
fx_free_list_simple(&dst->cm_scope);
}
static void _fx_copy_R19C_form__cdefmacro_t(struct _fx_R19C_form__cdefmacro_t* src, struct _fx_R19C_form__cdefmacro_t* dst)
{
dst->cm_name = src->cm_name;
fx_copy_str(&src->cm_cname, &dst->cm_cname);
FX_COPY_PTR(src->cm_args, &dst->cm_args);
FX_COPY_PTR(src->cm_body, &dst->cm_body);
FX_COPY_PTR(src->cm_scope, &dst->cm_scope);
dst->cm_loc = src->cm_loc;
}
static void _fx_make_R19C_form__cdefmacro_t(
struct _fx_R9Ast__id_t* r_cm_name,
fx_str_t* r_cm_cname,
struct _fx_LR9Ast__id_t_data_t* r_cm_args,
struct _fx_LN15C_form__cstmt_t_data_t* r_cm_body,
struct _fx_LN12Ast__scope_t_data_t* r_cm_scope,
struct _fx_R10Ast__loc_t* r_cm_loc,
struct _fx_R19C_form__cdefmacro_t* fx_result)
{
fx_result->cm_name = *r_cm_name;
fx_copy_str(r_cm_cname, &fx_result->cm_cname);
FX_COPY_PTR(r_cm_args, &fx_result->cm_args);
FX_COPY_PTR(r_cm_body, &fx_result->cm_body);
FX_COPY_PTR(r_cm_scope, &fx_result->cm_scope);
fx_result->cm_loc = *r_cm_loc;
}
static void _fx_free_rR19C_form__cdefmacro_t(struct _fx_rR19C_form__cdefmacro_t_data_t** dst)
{
FX_FREE_REF_IMPL(_fx_rR19C_form__cdefmacro_t, _fx_free_R19C_form__cdefmacro_t);
}
static int _fx_make_rR19C_form__cdefmacro_t(
struct _fx_R19C_form__cdefmacro_t* arg,
struct _fx_rR19C_form__cdefmacro_t_data_t** fx_result)
{
FX_MAKE_REF_IMPL(_fx_rR19C_form__cdefmacro_t, _fx_copy_R19C_form__cdefmacro_t);
}
static void _fx_free_T2N14C_form__cexp_tLN15C_form__cstmt_t(struct _fx_T2N14C_form__cexp_tLN15C_form__cstmt_t* dst)
{
_fx_free_N14C_form__cexp_t(&dst->t0);
_fx_free_LN15C_form__cstmt_t(&dst->t1);
}
static void _fx_copy_T2N14C_form__cexp_tLN15C_form__cstmt_t(
struct _fx_T2N14C_form__cexp_tLN15C_form__cstmt_t* src,
struct _fx_T2N14C_form__cexp_tLN15C_form__cstmt_t* dst)
{
FX_COPY_PTR(src->t0, &dst->t0);
FX_COPY_PTR(src->t1, &dst->t1);
}
static void _fx_make_T2N14C_form__cexp_tLN15C_form__cstmt_t(
struct _fx_N14C_form__cexp_t_data_t* t0,
struct _fx_LN15C_form__cstmt_t_data_t* t1,
struct _fx_T2N14C_form__cexp_tLN15C_form__cstmt_t* fx_result)
{
FX_COPY_PTR(t0, &fx_result->t0);
FX_COPY_PTR(t1, &fx_result->t1);
}
static void _fx_free_LT2N14C_form__cexp_tLN15C_form__cstmt_t(struct _fx_LT2N14C_form__cexp_tLN15C_form__cstmt_t_data_t** dst)
{
FX_FREE_LIST_IMPL(_fx_LT2N14C_form__cexp_tLN15C_form__cstmt_t, _fx_free_T2N14C_form__cexp_tLN15C_form__cstmt_t);
}
static int _fx_cons_LT2N14C_form__cexp_tLN15C_form__cstmt_t(
struct _fx_T2N14C_form__cexp_tLN15C_form__cstmt_t* hd,
struct _fx_LT2N14C_form__cexp_tLN15C_form__cstmt_t_data_t* tl,
bool addref_tl,
struct _fx_LT2N14C_form__cexp_tLN15C_form__cstmt_t_data_t** fx_result)
{
FX_MAKE_LIST_IMPL(_fx_LT2N14C_form__cexp_tLN15C_form__cstmt_t, _fx_copy_T2N14C_form__cexp_tLN15C_form__cstmt_t);
}
static void _fx_free_T3LT2N14C_form__cexp_tLN15C_form__cstmt_tLN15C_form__cstmt_tR10Ast__loc_t(
struct _fx_T3LT2N14C_form__cexp_tLN15C_form__cstmt_tLN15C_form__cstmt_tR10Ast__loc_t* dst)
{
_fx_free_LT2N14C_form__cexp_tLN15C_form__cstmt_t(&dst->t0);
_fx_free_LN15C_form__cstmt_t(&dst->t1);
}
static void _fx_copy_T3LT2N14C_form__cexp_tLN15C_form__cstmt_tLN15C_form__cstmt_tR10Ast__loc_t(
struct _fx_T3LT2N14C_form__cexp_tLN15C_form__cstmt_tLN15C_form__cstmt_tR10Ast__loc_t* src,
struct _fx_T3LT2N14C_form__cexp_tLN15C_form__cstmt_tLN15C_form__cstmt_tR10Ast__loc_t* dst)
{
FX_COPY_PTR(src->t0, &dst->t0);
FX_COPY_PTR(src->t1, &dst->t1);
dst->t2 = src->t2;
}
static void _fx_make_T3LT2N14C_form__cexp_tLN15C_form__cstmt_tLN15C_form__cstmt_tR10Ast__loc_t(
struct _fx_LT2N14C_form__cexp_tLN15C_form__cstmt_t_data_t* t0,
struct _fx_LN15C_form__cstmt_t_data_t* t1,
struct _fx_R10Ast__loc_t* t2,
struct _fx_T3LT2N14C_form__cexp_tLN15C_form__cstmt_tLN15C_form__cstmt_tR10Ast__loc_t* fx_result)
{
FX_COPY_PTR(t0, &fx_result->t0);
FX_COPY_PTR(t1, &fx_result->t1);
fx_result->t2 = *t2;
}
static void _fx_free_N15C_form__cstmt_t(struct _fx_N15C_form__cstmt_t_data_t** dst)
{
if (*dst && FX_DECREF((*dst)->rc) == 1) {
switch ((*dst)->tag) {
case 2:
_fx_free_T2SR10Ast__loc_t(&(*dst)->u.CComment); break;
case 3:
_fx_free_N14C_form__cexp_t(&(*dst)->u.CExp); break;
case 6:
_fx_free_T2Nt6option1N14C_form__cexp_tR10Ast__loc_t(&(*dst)->u.CStmtReturn); break;
case 7:
_fx_free_T2LN15C_form__cstmt_tR10Ast__loc_t(&(*dst)->u.CStmtBlock); break;
case 8:
_fx_free_T2R9Ast__id_tN15C_form__cstmt_t(&(*dst)->u.CStmtSync); break;
case 9:
_fx_free_T4N14C_form__cexp_tN15C_form__cstmt_tN15C_form__cstmt_tR10Ast__loc_t(&(*dst)->u.CStmtIf); break;
case 12:
_fx_free_T6Nt6option1N14C_form__ctyp_tLN14C_form__cexp_tNt6option1N14C_form__cexp_tLN14C_form__cexp_tN15C_form__cstmt_tR10Ast__loc_t(
&(*dst)->u.CStmtFor);
break;
case 13:
_fx_free_T3N14C_form__cexp_tN15C_form__cstmt_tR10Ast__loc_t(&(*dst)->u.CStmtWhile); break;
case 14:
_fx_free_T3N15C_form__cstmt_tN14C_form__cexp_tR10Ast__loc_t(&(*dst)->u.CStmtDoWhile); break;
case 15:
_fx_free_T3N14C_form__cexp_tLT2LN14C_form__cexp_tLN15C_form__cstmt_tR10Ast__loc_t(&(*dst)->u.CStmtSwitch); break;
case 16:
_fx_free_T4N14C_form__ctyp_tR9Ast__id_tNt6option1N14C_form__cexp_tR10Ast__loc_t(&(*dst)->u.CDefVal); break;
case 17:
_fx_free_rR17C_form__cdeffun_t(&(*dst)->u.CDefFun); break;
case 18:
_fx_free_rR17C_form__cdeftyp_t(&(*dst)->u.CDefTyp); break;
case 21:
_fx_free_rR18C_form__cdefenum_t(&(*dst)->u.CDefEnum); break;
case 22:
_fx_free_rR23C_form__cdefinterface_t(&(*dst)->u.CDefInterface); break;
case 23:
_fx_free_rR19C_form__cdefmacro_t(&(*dst)->u.CMacroDef); break;
case 25:
_fx_free_T3LT2N14C_form__cexp_tLN15C_form__cstmt_tLN15C_form__cstmt_tR10Ast__loc_t(&(*dst)->u.CMacroIf); break;
case 26:
_fx_free_T2SR10Ast__loc_t(&(*dst)->u.CMacroInclude); break;
case 27:
_fx_free_T2SR10Ast__loc_t(&(*dst)->u.CMacroPragma); break;
default:
;
}
fx_free(*dst);
}
*dst = 0;
}
static void _fx_free_R17C_form__cmodule_t(struct _fx_R17C_form__cmodule_t* dst)
{
fx_free_str(&dst->cmod_cname);
_fx_free_LN15C_form__cstmt_t(&dst->cmod_ccode);
_fx_free_R14Ast__pragmas_t(&dst->cmod_pragmas);
}
static void _fx_copy_R17C_form__cmodule_t(struct _fx_R17C_form__cmodule_t* src, struct _fx_R17C_form__cmodule_t* dst)
{
dst->cmod_name = src->cmod_name;
fx_copy_str(&src->cmod_cname, &dst->cmod_cname);
FX_COPY_PTR(src->cmod_ccode, &dst->cmod_ccode);
dst->cmod_main = src->cmod_main;
dst->cmod_recompile = src->cmod_recompile;
dst->cmod_skip = src->cmod_skip;
_fx_copy_R14Ast__pragmas_t(&src->cmod_pragmas, &dst->cmod_pragmas);
}
static void _fx_make_R17C_form__cmodule_t(
struct _fx_R9Ast__id_t* r_cmod_name,
fx_str_t* r_cmod_cname,
struct _fx_LN15C_form__cstmt_t_data_t* r_cmod_ccode,
bool r_cmod_main,
bool r_cmod_recompile,
bool r_cmod_skip,
struct _fx_R14Ast__pragmas_t* r_cmod_pragmas,
struct _fx_R17C_form__cmodule_t* fx_result)
{
fx_result->cmod_name = *r_cmod_name;
fx_copy_str(r_cmod_cname, &fx_result->cmod_cname);
FX_COPY_PTR(r_cmod_ccode, &fx_result->cmod_ccode);
fx_result->cmod_main = r_cmod_main;
fx_result->cmod_recompile = r_cmod_recompile;
fx_result->cmod_skip = r_cmod_skip;
_fx_copy_R14Ast__pragmas_t(r_cmod_pragmas, &fx_result->cmod_pragmas);
}
static void _fx_free_LR17C_form__cmodule_t(struct _fx_LR17C_form__cmodule_t_data_t** dst)
{
FX_FREE_LIST_IMPL(_fx_LR17C_form__cmodule_t, _fx_free_R17C_form__cmodule_t);
}
static int _fx_cons_LR17C_form__cmodule_t(
struct _fx_R17C_form__cmodule_t* hd,
struct _fx_LR17C_form__cmodule_t_data_t* tl,
bool addref_tl,
struct _fx_LR17C_form__cmodule_t_data_t** fx_result)
{
FX_MAKE_LIST_IMPL(_fx_LR17C_form__cmodule_t, _fx_copy_R17C_form__cmodule_t);
}
static void _fx_free_T2LN14Lexer__token_tB(struct _fx_T2LN14Lexer__token_tB* dst)
{
_fx_free_LN14Lexer__token_t(&dst->t0);
}
static void _fx_copy_T2LN14Lexer__token_tB(struct _fx_T2LN14Lexer__token_tB* src, struct _fx_T2LN14Lexer__token_tB* dst)
{
FX_COPY_PTR(src->t0, &dst->t0);
dst->t1 = src->t1;
}
static void _fx_make_T2LN14Lexer__token_tB(
struct _fx_LN14Lexer__token_t_data_t* t0,
bool t1,
struct _fx_T2LN14Lexer__token_tB* fx_result)
{
FX_COPY_PTR(t0, &fx_result->t0);
fx_result->t1 = t1;
}
static void _fx_free_T2SB(struct _fx_T2SB* dst)
{
fx_free_str(&dst->t0);
}
static void _fx_copy_T2SB(struct _fx_T2SB* src, struct _fx_T2SB* dst)
{
fx_copy_str(&src->t0, &dst->t0);
dst->t1 = src->t1;
}
static void _fx_make_T2SB(fx_str_t* t0, bool t1, struct _fx_T2SB* fx_result)
{
fx_copy_str(t0, &fx_result->t0);
fx_result->t1 = t1;
}
static void _fx_free_LT2SB(struct _fx_LT2SB_data_t** dst)
{
FX_FREE_LIST_IMPL(_fx_LT2SB, _fx_free_T2SB);
}
static int _fx_cons_LT2SB(struct _fx_T2SB* hd, struct _fx_LT2SB_data_t* tl, bool addref_tl, struct _fx_LT2SB_data_t** fx_result)
{
FX_MAKE_LIST_IMPL(_fx_LT2SB, _fx_copy_T2SB);
}
static void _fx_free_T2SLS(struct _fx_T2SLS* dst)
{
fx_free_str(&dst->t0);
_fx_free_LS(&dst->t1);
}
static void _fx_copy_T2SLS(struct _fx_T2SLS* src, struct _fx_T2SLS* dst)
{
fx_copy_str(&src->t0, &dst->t0);
FX_COPY_PTR(src->t1, &dst->t1);
}
static void _fx_make_T2SLS(fx_str_t* t0, struct _fx_LS_data_t* t1, struct _fx_T2SLS* fx_result)
{
fx_copy_str(t0, &fx_result->t0);
FX_COPY_PTR(t1, &fx_result->t1);
}
static void _fx_free_Ta2LS(struct _fx_Ta2LS* dst)
{
_fx_free_LS(&dst->t0);
_fx_free_LS(&dst->t1);
}
static void _fx_copy_Ta2LS(struct _fx_Ta2LS* src, struct _fx_Ta2LS* dst)
{
FX_COPY_PTR(src->t0, &dst->t0);
FX_COPY_PTR(src->t1, &dst->t1);
}
static void _fx_make_Ta2LS(struct _fx_LS_data_t* t0, struct _fx_LS_data_t* t1, struct _fx_Ta2LS* fx_result)
{
FX_COPY_PTR(t0, &fx_result->t0);
FX_COPY_PTR(t1, &fx_result->t1);
}
static void _fx_free_T2iLi(struct _fx_T2iLi* dst)
{
fx_free_list_simple(&dst->t1);
}
static void _fx_copy_T2iLi(struct _fx_T2iLi* src, struct _fx_T2iLi* dst)
{
dst->t0 = src->t0;
FX_COPY_PTR(src->t1, &dst->t1);
}
static void _fx_make_T2iLi(int_ t0, struct _fx_Li_data_t* t1, struct _fx_T2iLi* fx_result)
{
fx_result->t0 = t0;
FX_COPY_PTR(t1, &fx_result->t1);
}
static void _fx_free_LT2iLi(struct _fx_LT2iLi_data_t** dst)
{
FX_FREE_LIST_IMPL(_fx_LT2iLi, _fx_free_T2iLi);
}
static int _fx_cons_LT2iLi(
struct _fx_T2iLi* hd,
struct _fx_LT2iLi_data_t* tl,
bool addref_tl,
struct _fx_LT2iLi_data_t** fx_result)
{
FX_MAKE_LIST_IMPL(_fx_LT2iLi, _fx_copy_T2iLi);
}
static void _fx_free_rLi(struct _fx_rLi_data_t** dst)
{
FX_FREE_REF_IMPL(_fx_rLi, fx_free_list_simple);
}
static int _fx_make_rLi(struct _fx_Li_data_t* arg, struct _fx_rLi_data_t** fx_result)
{
FX_MAKE_REF_IMPL(_fx_rLi, FX_COPY_PTR);
}
static void _fx_free_T3BBS(struct _fx_T3BBS* dst)
{
fx_free_str(&dst->t2);
}
static void _fx_copy_T3BBS(struct _fx_T3BBS* src, struct _fx_T3BBS* dst)
{
dst->t0 = src->t0;
dst->t1 = src->t1;
fx_copy_str(&src->t2, &dst->t2);
}
static void _fx_make_T3BBS(bool t0, bool t1, fx_str_t* t2, struct _fx_T3BBS* fx_result)
{
fx_result->t0 = t0;
fx_result->t1 = t1;
fx_copy_str(t2, &fx_result->t2);
}
static void _fx_free_T2LR17K_form__kmodule_tB(struct _fx_T2LR17K_form__kmodule_tB* dst)
{
_fx_free_LR17K_form__kmodule_t(&dst->t0);
}
static void _fx_copy_T2LR17K_form__kmodule_tB(
struct _fx_T2LR17K_form__kmodule_tB* src,
struct _fx_T2LR17K_form__kmodule_tB* dst)
{
FX_COPY_PTR(src->t0, &dst->t0);
dst->t1 = src->t1;
}
static void _fx_make_T2LR17K_form__kmodule_tB(
struct _fx_LR17K_form__kmodule_t_data_t* t0,
bool t1,
struct _fx_T2LR17K_form__kmodule_tB* fx_result)
{
FX_COPY_PTR(t0, &fx_result->t0);
fx_result->t1 = t1;
}
static void _fx_free_Ta9S(struct _fx_Ta9S* dst)
{
fx_free_str(&dst->t0);
fx_free_str(&dst->t1);
fx_free_str(&dst->t2);
fx_free_str(&dst->t3);
fx_free_str(&dst->t4);
fx_free_str(&dst->t5);
fx_free_str(&dst->t6);
fx_free_str(&dst->t7);
fx_free_str(&dst->t8);
}
static void _fx_copy_Ta9S(struct _fx_Ta9S* src, struct _fx_Ta9S* dst)
{
fx_copy_str(&src->t0, &dst->t0);
fx_copy_str(&src->t1, &dst->t1);
fx_copy_str(&src->t2, &dst->t2);
fx_copy_str(&src->t3, &dst->t3);
fx_copy_str(&src->t4, &dst->t4);
fx_copy_str(&src->t5, &dst->t5);
fx_copy_str(&src->t6, &dst->t6);
fx_copy_str(&src->t7, &dst->t7);
fx_copy_str(&src->t8, &dst->t8);
}
static void _fx_make_Ta9S(
fx_str_t* t0,
fx_str_t* t1,
fx_str_t* t2,
fx_str_t* t3,
fx_str_t* t4,
fx_str_t* t5,
fx_str_t* t6,
fx_str_t* t7,
fx_str_t* t8,
struct _fx_Ta9S* fx_result)
{
fx_copy_str(t0, &fx_result->t0);
fx_copy_str(t1, &fx_result->t1);
fx_copy_str(t2, &fx_result->t2);
fx_copy_str(t3, &fx_result->t3);
fx_copy_str(t4, &fx_result->t4);
fx_copy_str(t5, &fx_result->t5);
fx_copy_str(t6, &fx_result->t6);
fx_copy_str(t7, &fx_result->t7);
fx_copy_str(t8, &fx_result->t8);
}
static void _fx_free_Ta2S(struct _fx_Ta2S* dst)
{
fx_free_str(&dst->t0);
fx_free_str(&dst->t1);
}
static void _fx_copy_Ta2S(struct _fx_Ta2S* src, struct _fx_Ta2S* dst)
{
fx_copy_str(&src->t0, &dst->t0);
fx_copy_str(&src->t1, &dst->t1);
}
static void _fx_make_Ta2S(fx_str_t* t0, fx_str_t* t1, struct _fx_Ta2S* fx_result)
{
fx_copy_str(t0, &fx_result->t0);
fx_copy_str(t1, &fx_result->t1);
}
static void _fx_free_Ta3S(struct _fx_Ta3S* dst)
{
fx_free_str(&dst->t0);
fx_free_str(&dst->t1);
fx_free_str(&dst->t2);
}
static void _fx_copy_Ta3S(struct _fx_Ta3S* src, struct _fx_Ta3S* dst)
{
fx_copy_str(&src->t0, &dst->t0);
fx_copy_str(&src->t1, &dst->t1);
fx_copy_str(&src->t2, &dst->t2);
}
static void _fx_make_Ta3S(fx_str_t* t0, fx_str_t* t1, fx_str_t* t2, struct _fx_Ta3S* fx_result)
{
fx_copy_str(t0, &fx_result->t0);
fx_copy_str(t1, &fx_result->t1);
fx_copy_str(t2, &fx_result->t2);
}
static void _fx_free_Ta4S(struct _fx_Ta4S* dst)
{
fx_free_str(&dst->t0);
fx_free_str(&dst->t1);
fx_free_str(&dst->t2);
fx_free_str(&dst->t3);
}
static void _fx_copy_Ta4S(struct _fx_Ta4S* src, struct _fx_Ta4S* dst)
{
fx_copy_str(&src->t0, &dst->t0);
fx_copy_str(&src->t1, &dst->t1);
fx_copy_str(&src->t2, &dst->t2);
fx_copy_str(&src->t3, &dst->t3);
}
static void _fx_make_Ta4S(fx_str_t* t0, fx_str_t* t1, fx_str_t* t2, fx_str_t* t3, struct _fx_Ta4S* fx_result)
{
fx_copy_str(t0, &fx_result->t0);
fx_copy_str(t1, &fx_result->t1);
fx_copy_str(t2, &fx_result->t2);
fx_copy_str(t3, &fx_result->t3);
}
static void _fx_free_T5BBLSBS(struct _fx_T5BBLSBS* dst)
{
_fx_free_LS(&dst->t2);
fx_free_str(&dst->t4);
}
static void _fx_copy_T5BBLSBS(struct _fx_T5BBLSBS* src, struct _fx_T5BBLSBS* dst)
{
dst->t0 = src->t0;
dst->t1 = src->t1;
FX_COPY_PTR(src->t2, &dst->t2);
dst->t3 = src->t3;
fx_copy_str(&src->t4, &dst->t4);
}
static void _fx_make_T5BBLSBS(bool t0, bool t1, struct _fx_LS_data_t* t2, bool t3, fx_str_t* t4, struct _fx_T5BBLSBS* fx_result)
{
fx_result->t0 = t0;
fx_result->t1 = t1;
FX_COPY_PTR(t2, &fx_result->t2);
fx_result->t3 = t3;
fx_copy_str(t4, &fx_result->t4);
}
static void _fx_free_T5BBLSBLS(struct _fx_T5BBLSBLS* dst)
{
_fx_free_LS(&dst->t2);
_fx_free_LS(&dst->t4);
}
static void _fx_copy_T5BBLSBLS(struct _fx_T5BBLSBLS* src, struct _fx_T5BBLSBLS* dst)
{
dst->t0 = src->t0;
dst->t1 = src->t1;
FX_COPY_PTR(src->t2, &dst->t2);
dst->t3 = src->t3;
FX_COPY_PTR(src->t4, &dst->t4);
}
static void _fx_make_T5BBLSBLS(
bool t0,
bool t1,
struct _fx_LS_data_t* t2,
bool t3,
struct _fx_LS_data_t* t4,
struct _fx_T5BBLSBLS* fx_result)
{
fx_result->t0 = t0;
fx_result->t1 = t1;
FX_COPY_PTR(t2, &fx_result->t2);
fx_result->t3 = t3;
FX_COPY_PTR(t4, &fx_result->t4);
}
static void _fx_free_T2LR17C_form__cmodule_tB(struct _fx_T2LR17C_form__cmodule_tB* dst)
{
_fx_free_LR17C_form__cmodule_t(&dst->t0);
}
static void _fx_copy_T2LR17C_form__cmodule_tB(
struct _fx_T2LR17C_form__cmodule_tB* src,
struct _fx_T2LR17C_form__cmodule_tB* dst)
{
FX_COPY_PTR(src->t0, &dst->t0);
dst->t1 = src->t1;
}
static void _fx_make_T2LR17C_form__cmodule_tB(
struct _fx_LR17C_form__cmodule_t_data_t* t0,
bool t1,
struct _fx_T2LR17C_form__cmodule_tB* fx_result)
{
FX_COPY_PTR(t0, &fx_result->t0);
fx_result->t1 = t1;
}
_fx_N14Lexer__token_t _fx_g14Compiler__FROM = { 20 };
_fx_N14Lexer__token_t _fx_g19Compiler__SEMICOLON = { 59 };
_fx_N14Lexer__token_t _fx_g19Compiler__PP_DEFINE = { 107 };
int _FX_EXN_E30Compiler__CumulativeParseError = 0;
_fx_N20Compiler__msgcolor_t _fx_g16Compiler__MsgRed = { 1 };
_fx_N20Compiler__msgcolor_t _fx_g18Compiler__MsgGreen = { 2 };
_fx_N20Compiler__msgcolor_t _fx_g17Compiler__MsgBlue = { 3 };
bool _fx_g21Compiler__iscolorterm;
fx_str_t _fx_g15Compiler__error = {0};
FX_EXTERN_C void _fx_F12print_stringv1S(fx_str_t* a, void* fx_fv);
FX_EXTERN_C int _fx_F4joinS2SLS(fx_str_t* sep, struct _fx_LS_data_t* strs, fx_str_t* fx_result, void* fx_fv);
FX_EXTERN_C int_ _fx_M6StringFM4findi3SSi(fx_str_t* s, fx_str_t* part, int_ from_pos, void* fx_fv);
FX_EXTERN_C int _fx_M3SysFM9colortermB0(bool* fx_result, void* fx_fv);
FX_EXTERN_C_VAL(struct _fx_R18Options__options_t _fx_g12Options__opt)
FX_EXTERN_C int _fx_M8FilenameFM8basenameS1S(fx_str_t* path, fx_str_t* fx_result, void* fx_fv);
FX_EXTERN_C int _fx_M8FilenameFM16remove_extensionS1S(fx_str_t* path, fx_str_t* fx_result, void* fx_fv);
FX_EXTERN_C bool _fx_F6__eq__B2SS(fx_str_t* a, fx_str_t* b, void* fx_fv);
FX_EXTERN_C void _fx_M5LexerFM5IDENTN14Lexer__token_t2BS(bool arg0, fx_str_t* arg1, struct _fx_N14Lexer__token_t* fx_result);
FX_EXTERN_C void _fx_M5LexerFM6IMPORTN14Lexer__token_t1B(bool arg0, struct _fx_N14Lexer__token_t* fx_result);
FX_EXTERN_C void _fx_M5LexerFM4STARN14Lexer__token_t1B(bool arg0, struct _fx_N14Lexer__token_t* fx_result);
FX_EXTERN_C void _fx_M3AstFM7LitBoolN10Ast__lit_t1B(bool arg0, struct _fx_N10Ast__lit_t* fx_result);
FX_EXTERN_C void _fx_M3AstFM6LitIntN10Ast__lit_t1l(int64_t arg0, struct _fx_N10Ast__lit_t* fx_result);
FX_EXTERN_C void _fx_M3AstFM9LitStringN10Ast__lit_t1S(fx_str_t* arg0, struct _fx_N10Ast__lit_t* fx_result);
FX_EXTERN_C void _fx_M5LexerFM7LITERALN14Lexer__token_t1N10Ast__lit_t(
struct _fx_N10Ast__lit_t* arg0,
struct _fx_N14Lexer__token_t* fx_result);
FX_EXTERN_C int _fx_M3SysFM7getpathLS1S(fx_str_t* name, struct _fx_LS_data_t** fx_result, void* fx_fv);
FX_EXTERN_C int _fx_M8FilenameFM6getcwdS0(fx_str_t* fx_result, void* fx_fv);
FX_EXTERN_C_VAL(struct _fx_LS_data_t* _fx_g9Sys__argv)
FX_EXTERN_C int _fx_M8FilenameFM9normalizeS2SS(fx_str_t* dir, fx_str_t* fname, fx_str_t* fx_result, void* fx_fv);
FX_EXTERN_C int _fx_M8FilenameFM7dirnameS1S(fx_str_t* path, fx_str_t* fx_result, void* fx_fv);
FX_EXTERN_C_VAL(int_ _fx_g15__ficus_major__)
FX_EXTERN_C int _fx_F6stringS1i(int_ a, fx_str_t* fx_result, void* fx_fv);
FX_EXTERN_C_VAL(int_ _fx_g15__ficus_minor__)
FX_EXTERN_C int _fx_M8FilenameFM6existsB1S(fx_str_t* name, bool* fx_result, void* fx_fv);
FX_EXTERN_C int _fx_M3AstFM6get_idRM4id_t1S(fx_str_t* s, struct _fx_R9Ast__id_t* fx_result, void* fx_fv);
FX_EXTERN_C int _fx_M3AstFM11find_modulei2RM4id_tS(
struct _fx_R9Ast__id_t* mname,
fx_str_t* mfname,
int_* fx_result,
void* fx_fv);
FX_EXTERN_C_VAL(fx_arr_t _fx_g16Ast__all_modules)
FX_EXTERN_C int _fx_M6ParserFM5parseB3iLN14Lexer__token_tLS(
int_ m_idx,
struct _fx_LN14Lexer__token_t_data_t* preamble,
struct _fx_LS_data_t* inc_dirs,
bool* fx_result,
void* fx_fv);
FX_EXTERN_C int _fx_M3AstFM10get_moduleN16Ast__defmodule_t1i(
int_ m,
struct _fx_N16Ast__defmodule_t_data_t** fx_result,
void* fx_fv);
FX_EXTERN_C_VAL(int _FX_EXN_E22LexerUtils__LexerError)
FX_EXTERN_C_VAL(int _FX_EXN_E18Parser__ParseError)
FX_EXTERN_C int _fx_M3AstFM6stringS1RM5loc_t(struct _fx_R10Ast__loc_t* loc, fx_str_t* fx_result, void* fx_fv);
FX_EXTERN_C int _fx_F6stringS1E(fx_exn_t* a, fx_str_t* fx_result, void* fx_fv);
static int _fx_M8CompilerFM3dfsv5iLiA1LiA1BrLi(
int_ i_0,
struct _fx_Li_data_t* visited_0,
fx_arr_t* graph_0,
fx_arr_t* processed_0,
struct _fx_rLi_data_t* result_ref_0,
void* fx_fv);
FX_EXTERN_C int _fx_M3AstFM15get_module_nameRM4id_t1i(int_ m, struct _fx_R9Ast__id_t* fx_result, void* fx_fv);
FX_EXTERN_C int _fx_M3AstFM2ppS1RM4id_t(struct _fx_R9Ast__id_t* i, fx_str_t* fx_result, void* fx_fv);
FX_EXTERN_C int _fx_F9make_FailE1S(fx_str_t* arg0, fx_exn_t* fx_result);
FX_EXTERN_C int _fx_M3SysFM5mkdirB2Si(fx_str_t* name, int_ permissions, bool* fx_result, void* fx_fv);
FX_EXTERN_C_VAL(bool _fx_g10Sys__win32)
FX_EXTERN_C int _fx_M8K_mangleFM12mangle_mnameS1S(fx_str_t* m, fx_str_t* fx_result, void* fx_fv);
FX_EXTERN_C int _fx_M4K_ppFM16pp_top_to_stringS1LN14K_form__kexp_t(
struct _fx_LN14K_form__kexp_t_data_t* code,
fx_str_t* fx_result,
void* fx_fv);
FX_EXTERN_C int _fx_M4FileFM9read_utf8S1S(fx_str_t* fname, fx_str_t* fx_result, void* fx_fv);
FX_EXTERN_C_VAL(int FX_EXN_IOError)
FX_EXTERN_C_VAL(int FX_EXN_FileOpenError)
FX_EXTERN_C int _fx_M4FileFM10write_utf8v2SS(fx_str_t* fname, fx_str_t* text, void* fx_fv);
FX_EXTERN_C int _fx_M3SysFM6removev1S(fx_str_t* name, void* fx_fv);
FX_EXTERN_C int _fx_M3AstFM10pr_verbosev1S(fx_str_t* str, void* fx_fv);
FX_EXTERN_C int _fx_M6K_formFM9KExpCCodeN14K_form__kexp_t2ST2N14K_form__ktyp_tR10Ast__loc_t(
fx_str_t* arg0,
struct _fx_T2N14K_form__ktyp_tR10Ast__loc_t* arg1,
struct _fx_N14K_form__kexp_t_data_t** fx_result);
FX_EXTERN_C_VAL(struct _fx_LE_data_t* _fx_g21Ast__all_compile_errs)
FX_EXTERN_C int _fx_M15K_remove_unusedFM13remove_unusedLR17K_form__kmodule_t2LR17K_form__kmodule_tB(
struct _fx_LR17K_form__kmodule_t_data_t* kmods,
bool initial,
struct _fx_LR17K_form__kmodule_t_data_t** fx_result,
void* fx_fv);
FX_EXTERN_C int _fx_M10K_annotateFM14annotate_typesLR17K_form__kmodule_t1LR17K_form__kmodule_t(
struct _fx_LR17K_form__kmodule_t_data_t* kmods,
struct _fx_LR17K_form__kmodule_t_data_t** fx_result,
void* fx_fv);
FX_EXTERN_C int _fx_M13K_copy_n_skipFM9copy_someLR17K_form__kmodule_t1LR17K_form__kmodule_t(
struct _fx_LR17K_form__kmodule_t_data_t* kmods,
struct _fx_LR17K_form__kmodule_t_data_t** fx_result,
void* fx_fv);
FX_EXTERN_C int _fx_M15K_remove_unusedFM21remove_unused_by_mainLR17K_form__kmodule_t1LR17K_form__kmodule_t(
struct _fx_LR17K_form__kmodule_t_data_t* kmods,
struct _fx_LR17K_form__kmodule_t_data_t** fx_result,
void* fx_fv);
FX_EXTERN_C int _fx_M8K_mangleFM10mangle_allLR17K_form__kmodule_t2LR17K_form__kmodule_tB(
struct _fx_LR17K_form__kmodule_t_data_t* kmods,
bool final_mode,
struct _fx_LR17K_form__kmodule_t_data_t** fx_result,
void* fx_fv);
FX_EXTERN_C int _fx_M8K_mangleFM13mangle_localsLR17K_form__kmodule_t1LR17K_form__kmodule_t(
struct _fx_LR17K_form__kmodule_t_data_t* kmods,
struct _fx_LR17K_form__kmodule_t_data_t** fx_result,
void* fx_fv);
FX_EXTERN_C int _fx_M8K_mangleFM12demangle_allLR17K_form__kmodule_t1LR17K_form__kmodule_t(
struct _fx_LR17K_form__kmodule_t_data_t* kmods,
struct _fx_LR17K_form__kmodule_t_data_t** fx_result,
void* fx_fv);
FX_EXTERN_C int _fx_M13K_lift_simpleFM4liftLR17K_form__kmodule_t1LR17K_form__kmodule_t(
struct _fx_LR17K_form__kmodule_t_data_t* kmods,
struct _fx_LR17K_form__kmodule_t_data_t** fx_result,
void* fx_fv);
FX_EXTERN_C int _fx_M9K_tailrecFM17tailrec2loops_allLR17K_form__kmodule_t1LR17K_form__kmodule_t(
struct _fx_LR17K_form__kmodule_t_data_t* kmods,
struct _fx_LR17K_form__kmodule_t_data_t** fx_result,
void* fx_fv);
FX_EXTERN_C int _fx_M10K_loop_invFM18move_loop_invs_allLR17K_form__kmodule_t1LR17K_form__kmodule_t(
struct _fx_LR17K_form__kmodule_t_data_t* kmods,
struct _fx_LR17K_form__kmodule_t_data_t** fx_result,
void* fx_fv);
FX_EXTERN_C int _fx_M13K_optim_matopFM13optimize_gemmLR17K_form__kmodule_t1LR17K_form__kmodule_t(
struct _fx_LR17K_form__kmodule_t_data_t* kmods,
struct _fx_LR17K_form__kmodule_t_data_t** fx_result,
void* fx_fv);
FX_EXTERN_C int _fx_M8K_inlineFM11inline_someLR17K_form__kmodule_t1LR17K_form__kmodule_t(
struct _fx_LR17K_form__kmodule_t_data_t* kmods,
struct _fx_LR17K_form__kmodule_t_data_t** fx_result,
void* fx_fv);
FX_EXTERN_C int _fx_M9K_flattenFM11flatten_allLR17K_form__kmodule_t1LR17K_form__kmodule_t(
struct _fx_LR17K_form__kmodule_t_data_t* kmods,
struct _fx_LR17K_form__kmodule_t_data_t** fx_result,
void* fx_fv);
FX_EXTERN_C int _fx_M12K_fuse_loopsFM14fuse_loops_allLR17K_form__kmodule_t1LR17K_form__kmodule_t(
struct _fx_LR17K_form__kmodule_t_data_t* kmods,
struct _fx_LR17K_form__kmodule_t_data_t** fx_result,
void* fx_fv);
FX_EXTERN_C int _fx_M10K_fast_idxFM23optimize_idx_checks_allLR17K_form__kmodule_t1LR17K_form__kmodule_t(
struct _fx_LR17K_form__kmodule_t_data_t* kmods,
struct _fx_LR17K_form__kmodule_t_data_t** fx_result,
void* fx_fv);
FX_EXTERN_C int _fx_M15K_cfold_dealiasFM13cfold_dealiasLR17K_form__kmodule_t1LR17K_form__kmodule_t(
struct _fx_LR17K_form__kmodule_t_data_t* kmods,
struct _fx_LR17K_form__kmodule_t_data_t** fx_result,
void* fx_fv);
FX_EXTERN_C int _fx_M10K_fast_idxFM23linearize_arrays_accessLR17K_form__kmodule_t1LR17K_form__kmodule_t(
struct _fx_LR17K_form__kmodule_t_data_t* kmods,
struct _fx_LR17K_form__kmodule_t_data_t** fx_result,
void* fx_fv);
FX_EXTERN_C int _fx_M18K_nothrow_wrappersFM25make_wrappers_for_nothrowLR17K_form__kmodule_t1LR17K_form__kmodule_t(
struct _fx_LR17K_form__kmodule_t_data_t* kmods,
struct _fx_LR17K_form__kmodule_t_data_t** fx_result,
void* fx_fv);
FX_EXTERN_C int _fx_M10K_freevarsFM21mutable_freevars2refsLR17K_form__kmodule_t1LR17K_form__kmodule_t(
struct _fx_LR17K_form__kmodule_t_data_t* kmods,
struct _fx_LR17K_form__kmodule_t_data_t** fx_result,
void* fx_fv);
FX_EXTERN_C int _fx_M11K_declosureFM13declosure_allLR17K_form__kmodule_t1LR17K_form__kmodule_t(
struct _fx_LR17K_form__kmodule_t_data_t* kmods,
struct _fx_LR17K_form__kmodule_t_data_t** fx_result,
void* fx_fv);
FX_EXTERN_C int _fx_M6K_liftFM8lift_allLR17K_form__kmodule_t1LR17K_form__kmodule_t(
struct _fx_LR17K_form__kmodule_t_data_t* kmods,
struct _fx_LR17K_form__kmodule_t_data_t** fx_result,
void* fx_fv);
FX_EXTERN_C int _fx_M8K_inlineFM24find_recursive_funcs_allLR17K_form__kmodule_t1LR17K_form__kmodule_t(
struct _fx_LR17K_form__kmodule_t_data_t* kmods,
struct _fx_LR17K_form__kmodule_t_data_t** fx_result,
void* fx_fv);
FX_EXTERN_C_VAL(struct _fx_FPS1B _fx_g11Sys__osname)
FX_EXTERN_C int _fx_M3SysFM6getenvS2SS(fx_str_t* name, fx_str_t* defval, fx_str_t* fx_result, void* fx_fv);
FX_EXTERN_C_VAL(bool _fx_g9Sys__unix)
FX_EXTERN_C int _fx_M3SysFM6getenvS1S(fx_str_t* name, fx_str_t* fx_result, void* fx_fv);
FX_EXTERN_C_VAL(struct _fx_R9Ast__id_t _fx_g9Ast__noid)
FX_EXTERN_C int _fx_M4C_ppFM20pprint_top_to_stringS1LN15C_form__cstmt_t(
struct _fx_LN15C_form__cstmt_t_data_t* code_0,
fx_str_t* fx_result,
void* fx_fv);
FX_EXTERN_C int _fx_M4FileFM5popenRM1t2SS(fx_str_t* cmdname, fx_str_t* mode, struct _fx_R7File__t* fx_result, void* fx_fv);
FX_EXTERN_C int _fx_M4FileFM6readlnS1RM1t(struct _fx_R7File__t* f, fx_str_t* fx_result, void* fx_fv);
FX_EXTERN_C int _fx_M4FileFM18pclose_exit_statusi1RM1t(struct _fx_R7File__t* f, int_* fx_result, void* fx_fv);
FX_EXTERN_C int _fx_M3SysFM7commandi1S(fx_str_t* cmd, int_* fx_result, void* fx_fv);
FX_EXTERN_C int _fx_M3AstFM8init_allv0(void* fx_fv);
FX_EXTERN_C_VAL(fx_exn_t _fx_E30Compiler__CumulativeParseErrorv)
FX_EXTERN_C_VAL(struct _fx_Li_data_t* _fx_g23Ast__all_modules_sorted)
FX_EXTERN_C int _fx_M6Ast_ppFM10pprint_modv1N16Ast__defmodule_t(struct _fx_N16Ast__defmodule_t_data_t* dm, void* fx_fv);
FX_EXTERN_C int _fx_M13Ast_typecheckFM9check_modv1i(int_ m_idx, void* fx_fv);
FX_EXTERN_C int _fx_M6K_formFM13init_all_idksv0(void* fx_fv);
FX_EXTERN_C int _fx_M11K_normalizeFM21normalize_all_modulesLR17K_form__kmodule_t1Li(
struct _fx_Li_data_t* modules,
struct _fx_LR17K_form__kmodule_t_data_t** fx_result,
void* fx_fv);
FX_EXTERN_C int _fx_M4K_ppFM8pp_kmodsv1LR17K_form__kmodule_t(struct _fx_LR17K_form__kmodule_t_data_t* kmods, void* fx_fv);
FX_EXTERN_C int _fx_M6C_formFM13init_all_idcsv0(void* fx_fv);
FX_EXTERN_C int _fx_M9C_gen_stdFM14init_std_namesv0(void* fx_fv);
FX_EXTERN_C int _fx_M10C_gen_codeFM13gen_ccode_allLR17C_form__cmodule_t1LR17K_form__kmodule_t(
struct _fx_LR17K_form__kmodule_t_data_t* kmods_0,
struct _fx_LR17C_form__cmodule_t_data_t** fx_result,
void* fx_fv);
FX_EXTERN_C int _fx_M20C_post_rename_localsFM13rename_localsLR17C_form__cmodule_t1LR17C_form__cmodule_t(
struct _fx_LR17C_form__cmodule_t_data_t* cmods,
struct _fx_LR17C_form__cmodule_t_data_t** fx_result,
void* fx_fv);
FX_EXTERN_C int _fx_M19C_post_adjust_declsFM12adjust_declsR17C_form__cmodule_t1R17C_form__cmodule_t(
struct _fx_R17C_form__cmodule_t* cmod,
struct _fx_R17C_form__cmodule_t* fx_result,
void* fx_fv);
FX_EXTERN_C int _fx_M3AstFM17print_compile_errv1E(fx_exn_t* err, void* fx_fv);
FX_EXTERN_C_VAL(int _FX_EXN_E4Fail)
FX_EXTERN_C_VAL(int _FX_EXN_E17Ast__CompileError)
fx_exn_info_t _fx_E30Compiler__CumulativeParseError_info = {0};
fx_exn_t _fx_E30Compiler__CumulativeParseErrorv = {0};
FX_EXTERN_C int_ _fx_M8CompilerFM6lengthi1LE(struct _fx_LE_data_t* l, void* fx_fv)
{
return fx_list_length(l);
}
FX_EXTERN_C int_ _fx_M8CompilerFM6lengthi1LS(struct _fx_LS_data_t* l, void* fx_fv)
{
return fx_list_length(l);
}
FX_EXTERN_C void _fx_M8CompilerFM5link2LN14Lexer__token_t2LN14Lexer__token_tLN14Lexer__token_t(
struct _fx_LN14Lexer__token_t_data_t* l1,
struct _fx_LN14Lexer__token_t_data_t* l2,
struct _fx_LN14Lexer__token_t_data_t** fx_result,
void* fx_fv)
{
fx_link_lists(l1, l2, fx_result);
}
FX_EXTERN_C void _fx_M8CompilerFM5link2LS2LSLS(
struct _fx_LS_data_t* l1,
struct _fx_LS_data_t* l2,
struct _fx_LS_data_t** fx_result,
void* fx_fv)
{
fx_link_lists(l1, l2, fx_result);
}
FX_EXTERN_C int _fx_M8CompilerFM7__add__LS2LSLS(
struct _fx_LS_data_t* l1_0,
struct _fx_LS_data_t* l2_0,
struct _fx_LS_data_t** fx_result,
void* fx_fv)
{
int fx_status = 0;
if (l1_0 == 0) {
FX_COPY_PTR(l2_0, fx_result);
}
else if (l2_0 == 0) {
FX_COPY_PTR(l1_0, fx_result);
}
else {
_fx_LS v_0 = 0;
_fx_LS lstend_0 = 0;
_fx_LS lst_0 = l1_0;
for (; lst_0; lst_0 = lst_0->tl) {
fx_str_t* x_0 = &lst_0->hd;
_fx_LS node_0 = 0;
FX_CALL(_fx_cons_LS(x_0, 0, false, &node_0), _fx_catch_0);
FX_LIST_APPEND(v_0, lstend_0, node_0);
_fx_catch_0: ;
FX_CHECK_EXN(_fx_catch_1);
}
_fx_M8CompilerFM5link2LS2LSLS(v_0, l2_0, fx_result, 0);
_fx_catch_1: ;
if (v_0) {
_fx_free_LS(&v_0);
}
}
return fx_status;
}
FX_EXTERN_C int _fx_M8CompilerFM6stringS1S(fx_str_t* a_0, fx_str_t* fx_result, void* fx_fv)
{
int fx_status = 0;
fx_copy_str(a_0, fx_result);
return fx_status;
}
FX_EXTERN_C int _fx_M8CompilerFM5printv1S(fx_str_t* a_0, void* fx_fv)
{
int fx_status = 0;
_fx_F12print_stringv1S(a_0, 0);
return fx_status;
}
FX_EXTERN_C int _fx_M8CompilerFM5arrayA1R17C_form__cmodule_t1LR17C_form__cmodule_t(
struct _fx_LR17C_form__cmodule_t_data_t* l_0,
fx_arr_t* fx_result,
void* fx_fv)
{
int fx_status = 0;
_fx_R17C_form__cmodule_t* dstptr_0 = 0;
_fx_LR17C_form__cmodule_t lst_0 = l_0;
int_ len_0 = fx_list_length(lst_0);
{
const int_ shape_0[] = { len_0 };
FX_CALL(
fx_make_arr(1, shape_0, sizeof(_fx_R17C_form__cmodule_t), (fx_free_t)_fx_free_R17C_form__cmodule_t,
(fx_copy_t)_fx_copy_R17C_form__cmodule_t, 0, fx_result), _fx_cleanup);
}
dstptr_0 = (_fx_R17C_form__cmodule_t*)fx_result->data;
for (; lst_0; lst_0 = lst_0->tl, dstptr_0++) {
_fx_R17C_form__cmodule_t* x_0 = &lst_0->hd; _fx_copy_R17C_form__cmodule_t(x_0, dstptr_0);
}
_fx_cleanup: ;
return fx_status;
}
FX_EXTERN_C int _fx_M8CompilerFM3revLS1LS(struct _fx_LS_data_t* l_0, struct _fx_LS_data_t** fx_result, void* fx_fv)
{
_fx_LS __fold_result___0 = 0;
int fx_status = 0;
_fx_LS lst_0 = l_0;
for (; lst_0; lst_0 = lst_0->tl) {
_fx_LS r_0 = 0;
fx_str_t* a_0 = &lst_0->hd;
FX_COPY_PTR(__fold_result___0, &r_0);
FX_CALL(_fx_cons_LS(a_0, r_0, false, &r_0), _fx_catch_0);
_fx_free_LS(&__fold_result___0);
FX_COPY_PTR(r_0, &__fold_result___0);
_fx_catch_0: ;
if (r_0) {
_fx_free_LS(&r_0);
}
FX_CHECK_EXN(_fx_cleanup);
}
FX_COPY_PTR(__fold_result___0, fx_result);
_fx_cleanup: ;
if (__fold_result___0) {
_fx_free_LS(&__fold_result___0);
}
return fx_status;
}
FX_EXTERN_C int _fx_M8CompilerFM4joinS2SLS(fx_str_t* sep_0, struct _fx_LS_data_t* strs_0, fx_str_t* fx_result, void* fx_fv)
{
int fx_status = 0;
FX_CALL(_fx_F4joinS2SLS(sep_0, strs_0, fx_result, 0), _fx_cleanup);
_fx_cleanup: ;
return fx_status;
}
FX_EXTERN_C int _fx_M8CompilerFM8containsB2SS(fx_str_t* s_0, fx_str_t* substr_0, bool* fx_result, void* fx_fv)
{
int fx_status = 0;
int_ v_0 = _fx_M6StringFM4findi3SSi(s_0, substr_0, 0, 0);
*fx_result = v_0 >= 0;
return fx_status;
}
FX_EXTERN_C int _fx_M8CompilerFM6clrmsgS2N20Compiler__msgcolor_tS(
struct _fx_N20Compiler__msgcolor_t* clr_0,
fx_str_t* msg_0,
fx_str_t* fx_result,
void* fx_fv)
{
fx_str_t esc_0 = {0};
int fx_status = 0;
if (_fx_g21Compiler__iscolorterm) {
int tag_0 = clr_0->tag;
if (tag_0 == 1) {
fx_str_t slit_0 = FX_MAKE_STR("[31;1m"); fx_copy_str(&slit_0, &esc_0);
}
else if (tag_0 == 2) {
fx_str_t slit_1 = FX_MAKE_STR("[32;1m"); fx_copy_str(&slit_1, &esc_0);
}
else if (tag_0 == 3) {
fx_str_t slit_2 = FX_MAKE_STR("[34;1m"); fx_copy_str(&slit_2, &esc_0);
}
else {
fx_str_t slit_3 = FX_MAKE_STR(""); fx_copy_str(&slit_3, &esc_0);
}
FX_CHECK_EXN(_fx_cleanup);
fx_str_t slit_4 = FX_MAKE_STR("[0m");
{
const fx_str_t strs_0[] = { esc_0, *msg_0, slit_4 };
FX_CALL(fx_strjoin(0, 0, 0, strs_0, 3, fx_result), _fx_cleanup);
}
}
else {
fx_copy_str(msg_0, fx_result);
}
_fx_cleanup: ;
FX_FREE_STR(&esc_0);
return fx_status;
}
FX_EXTERN_C int _fx_M8CompilerFM12get_preambleLN14Lexer__token_t1S(
fx_str_t* mfname_0,
struct _fx_LN14Lexer__token_t_data_t** fx_result,
void* fx_fv)
{
_fx_LN14Lexer__token_t preamble_0 = 0;
fx_str_t v_0 = {0};
fx_str_t bare_name_0 = {0};
_fx_T2LN14Lexer__token_tB __fold_result___0 = {0};
_fx_T2SB v_1 = {0};
_fx_T2SB v_2 = {0};
_fx_T2SB v_3 = {0};
_fx_T2SB v_4 = {0};
_fx_T2SB v_5 = {0};
_fx_T2SB v_6 = {0};
_fx_T2SB v_7 = {0};
_fx_LT2SB v_8 = 0;
_fx_T2LN14Lexer__token_tB v_9 = {0};
_fx_LN14Lexer__token_t __fold_result___1 = 0;
_fx_LT2SN17Options__optval_t v_10 = 0;
int fx_status = 0;
if (_fx_g12Options__opt.use_preamble) {
FX_CALL(_fx_M8FilenameFM8basenameS1S(mfname_0, &v_0, 0), _fx_cleanup);
FX_CALL(_fx_M8FilenameFM16remove_extensionS1S(&v_0, &bare_name_0, 0), _fx_cleanup);
_fx_make_T2LN14Lexer__token_tB(0, false, &__fold_result___0);
fx_str_t slit_0 = FX_MAKE_STR("Builtins");
_fx_make_T2SB(&slit_0, true, &v_1);
fx_str_t slit_1 = FX_MAKE_STR("Math");
_fx_make_T2SB(&slit_1, true, &v_2);
fx_str_t slit_2 = FX_MAKE_STR("Array");
_fx_make_T2SB(&slit_2, true, &v_3);
fx_str_t slit_3 = FX_MAKE_STR("List");
_fx_make_T2SB(&slit_3, false, &v_4);
fx_str_t slit_4 = FX_MAKE_STR("Vector");
_fx_make_T2SB(&slit_4, false, &v_5);
fx_str_t slit_5 = FX_MAKE_STR("Char");
_fx_make_T2SB(&slit_5, false, &v_6);
fx_str_t slit_6 = FX_MAKE_STR("String");
_fx_make_T2SB(&slit_6, false, &v_7);
FX_CALL(_fx_cons_LT2SB(&v_7, 0, true, &v_8), _fx_cleanup);
FX_CALL(_fx_cons_LT2SB(&v_6, v_8, false, &v_8), _fx_cleanup);
FX_CALL(_fx_cons_LT2SB(&v_5, v_8, false, &v_8), _fx_cleanup);
FX_CALL(_fx_cons_LT2SB(&v_4, v_8, false, &v_8), _fx_cleanup);
FX_CALL(_fx_cons_LT2SB(&v_3, v_8, false, &v_8), _fx_cleanup);
FX_CALL(_fx_cons_LT2SB(&v_2, v_8, false, &v_8), _fx_cleanup);
FX_CALL(_fx_cons_LT2SB(&v_1, v_8, false, &v_8), _fx_cleanup);
_fx_LT2SB lst_0 = v_8;
for (; lst_0; lst_0 = lst_0->tl) {
fx_str_t mname_0 = {0};
_fx_T2LN14Lexer__token_tB v_11 = {0};
_fx_LN14Lexer__token_t preamble_1 = 0;
_fx_T2LN14Lexer__token_tB v_12 = {0};
_fx_N14Lexer__token_t v_13 = {0};
_fx_N14Lexer__token_t v_14 = {0};
_fx_N14Lexer__token_t v_15 = {0};
_fx_LN14Lexer__token_t v_16 = 0;
_fx_LN14Lexer__token_t v_17 = 0;
_fx_N14Lexer__token_t v_18 = {0};
_fx_N14Lexer__token_t v_19 = {0};
_fx_LN14Lexer__token_t v_20 = 0;
_fx_LN14Lexer__token_t v_21 = 0;
_fx_T2SB* __pat___0 = &lst_0->hd;
fx_copy_str(&__pat___0->t0, &mname_0);
_fx_copy_T2LN14Lexer__token_tB(&__fold_result___0, &v_11);
FX_COPY_PTR(v_11.t0, &preamble_1);
bool found_0 = v_11.t1;
if (found_0) {
_fx_make_T2LN14Lexer__token_tB(preamble_1, found_0, &v_12);
}
else {
bool v_22 = _fx_F6__eq__B2SS(&bare_name_0, &mname_0, 0);
if (v_22) {
_fx_make_T2LN14Lexer__token_tB(preamble_1, true, &v_12);
}
else if (__pat___0->t1) {
_fx_M5LexerFM5IDENTN14Lexer__token_t2BS(true, &mname_0, &v_13);
_fx_M5LexerFM6IMPORTN14Lexer__token_t1B(false, &v_14);
_fx_M5LexerFM4STARN14Lexer__token_t1B(true, &v_15);
FX_CALL(_fx_cons_LN14Lexer__token_t(&_fx_g19Compiler__SEMICOLON, 0, true, &v_16), _fx_catch_4);
FX_CALL(_fx_cons_LN14Lexer__token_t(&v_15, v_16, false, &v_16), _fx_catch_4);
FX_CALL(_fx_cons_LN14Lexer__token_t(&v_14, v_16, false, &v_16), _fx_catch_4);
FX_CALL(_fx_cons_LN14Lexer__token_t(&v_13, v_16, false, &v_16), _fx_catch_4);
FX_CALL(_fx_cons_LN14Lexer__token_t(&_fx_g14Compiler__FROM, v_16, false, &v_16), _fx_catch_4);
if (preamble_1 == 0) {
FX_COPY_PTR(v_16, &v_17);
}
else if (v_16 == 0) {
FX_COPY_PTR(preamble_1, &v_17);
}
else {
_fx_LN14Lexer__token_t v_23 = 0;
_fx_LN14Lexer__token_t lstend_0 = 0;
_fx_LN14Lexer__token_t lst_1 = preamble_1;
for (; lst_1; lst_1 = lst_1->tl) {
_fx_N14Lexer__token_t* x_0 = &lst_1->hd;
_fx_LN14Lexer__token_t node_0 = 0;
FX_CALL(_fx_cons_LN14Lexer__token_t(x_0, 0, false, &node_0), _fx_catch_0);
FX_LIST_APPEND(v_23, lstend_0, node_0);
_fx_catch_0: ;
FX_CHECK_EXN(_fx_catch_1);
}
_fx_M8CompilerFM5link2LN14Lexer__token_t2LN14Lexer__token_tLN14Lexer__token_t(v_23, v_16, &v_17, 0);
_fx_catch_1: ;
if (v_23) {
_fx_free_LN14Lexer__token_t(&v_23);
}
}
FX_CHECK_EXN(_fx_catch_4);
_fx_make_T2LN14Lexer__token_tB(v_17, false, &v_12);
}
else {
_fx_M5LexerFM6IMPORTN14Lexer__token_t1B(true, &v_18);
_fx_M5LexerFM5IDENTN14Lexer__token_t2BS(true, &mname_0, &v_19);
FX_CALL(_fx_cons_LN14Lexer__token_t(&_fx_g19Compiler__SEMICOLON, 0, true, &v_20), _fx_catch_4);
FX_CALL(_fx_cons_LN14Lexer__token_t(&v_19, v_20, false, &v_20), _fx_catch_4);
FX_CALL(_fx_cons_LN14Lexer__token_t(&v_18, v_20, false, &v_20), _fx_catch_4);
if (preamble_1 == 0) {
FX_COPY_PTR(v_20, &v_21);
}
else if (v_20 == 0) {
FX_COPY_PTR(preamble_1, &v_21);
}
else {
_fx_LN14Lexer__token_t v_24 = 0;
_fx_LN14Lexer__token_t lstend_1 = 0;
_fx_LN14Lexer__token_t lst_2 = preamble_1;
for (; lst_2; lst_2 = lst_2->tl) {
_fx_N14Lexer__token_t* x_1 = &lst_2->hd;
_fx_LN14Lexer__token_t node_1 = 0;
FX_CALL(_fx_cons_LN14Lexer__token_t(x_1, 0, false, &node_1), _fx_catch_2);
FX_LIST_APPEND(v_24, lstend_1, node_1);
_fx_catch_2: ;
FX_CHECK_EXN(_fx_catch_3);
}
_fx_M8CompilerFM5link2LN14Lexer__token_t2LN14Lexer__token_tLN14Lexer__token_t(v_24, v_20, &v_21, 0);
_fx_catch_3: ;
if (v_24) {
_fx_free_LN14Lexer__token_t(&v_24);
}
}
FX_CHECK_EXN(_fx_catch_4);
_fx_make_T2LN14Lexer__token_tB(v_21, false, &v_12);
}
}
_fx_free_T2LN14Lexer__token_tB(&__fold_result___0);
_fx_copy_T2LN14Lexer__token_tB(&v_12, &__fold_result___0);
_fx_catch_4: ;
if (v_21) {
_fx_free_LN14Lexer__token_t(&v_21);
}
if (v_20) {
_fx_free_LN14Lexer__token_t(&v_20);
}
_fx_free_N14Lexer__token_t(&v_19);
_fx_free_N14Lexer__token_t(&v_18);
if (v_17) {
_fx_free_LN14Lexer__token_t(&v_17);
}
if (v_16) {
_fx_free_LN14Lexer__token_t(&v_16);
}
_fx_free_N14Lexer__token_t(&v_15);
_fx_free_N14Lexer__token_t(&v_14);
_fx_free_N14Lexer__token_t(&v_13);
_fx_free_T2LN14Lexer__token_tB(&v_12);
if (preamble_1) {
_fx_free_LN14Lexer__token_t(&preamble_1);
}
_fx_free_T2LN14Lexer__token_tB(&v_11);
FX_FREE_STR(&mname_0);
FX_CHECK_EXN(_fx_cleanup);
}
_fx_copy_T2LN14Lexer__token_tB(&__fold_result___0, &v_9);
FX_COPY_PTR(v_9.t0, &preamble_0);
}
FX_COPY_PTR(preamble_0, &__fold_result___1);
FX_COPY_PTR(_fx_g12Options__opt.defines, &v_10);
_fx_LT2SN17Options__optval_t lst_3 = v_10;
for (; lst_3; lst_3 = lst_3->tl) {
fx_str_t n_0 = {0};
_fx_N17Options__optval_t v_25 = {0};
_fx_LN14Lexer__token_t p_0 = 0;
_fx_N10Ast__lit_t v_26 = {0};
_fx_N14Lexer__token_t v_27 = {0};
_fx_N14Lexer__token_t v_28 = {0};
_fx_T2SN17Options__optval_t* __pat___1 = &lst_3->hd;
fx_copy_str(&__pat___1->t0, &n_0);
_fx_copy_N17Options__optval_t(&__pat___1->t1, &v_25);
FX_COPY_PTR(__fold_result___1, &p_0);
int tag_0 = v_25.tag;
if (tag_0 == 1) {
_fx_M3AstFM7LitBoolN10Ast__lit_t1B(v_25.u.OptBool, &v_26);
}
else if (tag_0 == 2) {
_fx_M3AstFM6LitIntN10Ast__lit_t1l((int64_t)v_25.u.OptInt, &v_26);
}
else if (tag_0 == 3) {
_fx_M3AstFM9LitStringN10Ast__lit_t1S(&v_25.u.OptString, &v_26);
}
else {
FX_FAST_THROW(FX_EXN_NoMatchError, _fx_catch_5);
}
FX_CHECK_EXN(_fx_catch_5);
_fx_M5LexerFM5IDENTN14Lexer__token_t2BS(true, &n_0, &v_27);
_fx_M5LexerFM7LITERALN14Lexer__token_t1N10Ast__lit_t(&v_26, &v_28);
FX_CALL(_fx_cons_LN14Lexer__token_t(&v_28, p_0, false, &p_0), _fx_catch_5);
FX_CALL(_fx_cons_LN14Lexer__token_t(&v_27, p_0, false, &p_0), _fx_catch_5);
FX_CALL(_fx_cons_LN14Lexer__token_t(&_fx_g19Compiler__PP_DEFINE, p_0, false, &p_0), _fx_catch_5);
_fx_free_LN14Lexer__token_t(&__fold_result___1);
FX_COPY_PTR(p_0, &__fold_result___1);
_fx_catch_5: ;
_fx_free_N14Lexer__token_t(&v_28);
_fx_free_N14Lexer__token_t(&v_27);
_fx_free_N10Ast__lit_t(&v_26);
if (p_0) {
_fx_free_LN14Lexer__token_t(&p_0);
}
_fx_free_N17Options__optval_t(&v_25);
FX_FREE_STR(&n_0);
FX_CHECK_EXN(_fx_cleanup);
}
FX_COPY_PTR(__fold_result___1, fx_result);
_fx_cleanup: ;
if (preamble_0) {
_fx_free_LN14Lexer__token_t(&preamble_0);
}
FX_FREE_STR(&v_0);
FX_FREE_STR(&bare_name_0);
_fx_free_T2LN14Lexer__token_tB(&__fold_result___0);
_fx_free_T2SB(&v_1);
_fx_free_T2SB(&v_2);
_fx_free_T2SB(&v_3);
_fx_free_T2SB(&v_4);
_fx_free_T2SB(&v_5);
_fx_free_T2SB(&v_6);
_fx_free_T2SB(&v_7);
if (v_8) {
_fx_free_LT2SB(&v_8);
}
_fx_free_T2LN14Lexer__token_tB(&v_9);
if (__fold_result___1) {
_fx_free_LN14Lexer__token_t(&__fold_result___1);
}
if (v_10) {
_fx_free_LT2SN17Options__optval_t(&v_10);
}
return fx_status;
}
FX_EXTERN_C int _fx_M8CompilerFM15find_ficus_dirsT2SLS0(struct _fx_T2SLS* fx_result, void* fx_fv)
{
_fx_LS ficus_path_0 = 0;
fx_str_t v_0 = {0};
fx_str_t v_1 = {0};
fx_str_t v_2 = {0};
fx_str_t ficus_app_path_0 = {0};
fx_str_t v_3 = {0};
fx_str_t ficus_pp_path_0 = {0};
fx_str_t v_4 = {0};
fx_str_t v_5 = {0};
fx_str_t v_6 = {0};
fx_str_t v_7 = {0};
fx_str_t ficus_inst_path_0 = {0};
fx_str_t v_8 = {0};
fx_str_t v_9 = {0};
fx_str_t v_10 = {0};
fx_str_t v_11 = {0};
_fx_LS v_12 = 0;
_fx_LS std_ficus_path_0 = 0;
_fx_Ta2LS v_13 = {0};
_fx_LS search_path_0 = 0;
fx_str_t found_0 = {0};
int fx_status = 0;
fx_str_t slit_0 = FX_MAKE_STR("FICUS_PATH");
FX_CALL(_fx_M3SysFM7getpathLS1S(&slit_0, &ficus_path_0, 0), _fx_cleanup);
FX_CALL(_fx_M8FilenameFM6getcwdS0(&v_0, 0), _fx_cleanup);
if (_fx_g9Sys__argv != 0) {
fx_copy_str(&_fx_g9Sys__argv->hd, &v_1);
}
else {
FX_FAST_THROW(FX_EXN_NullListError, _fx_cleanup);
}
FX_CHECK_EXN(_fx_cleanup);
FX_CALL(_fx_M8FilenameFM9normalizeS2SS(&v_0, &v_1, &v_2, 0), _fx_cleanup);
FX_CALL(_fx_M8FilenameFM7dirnameS1S(&v_2, &ficus_app_path_0, 0), _fx_cleanup);
FX_CALL(_fx_M8FilenameFM7dirnameS1S(&ficus_app_path_0, &v_3, 0), _fx_cleanup);
FX_CALL(_fx_M8FilenameFM7dirnameS1S(&v_3, &ficus_pp_path_0, 0), _fx_cleanup);
FX_CALL(_fx_M8FilenameFM7dirnameS1S(&ficus_app_path_0, &v_4, 0), _fx_cleanup);
FX_CALL(_fx_F6stringS1i(_fx_g15__ficus_major__, &v_5, 0), _fx_cleanup);
FX_CALL(_fx_F6stringS1i(_fx_g15__ficus_minor__, &v_6, 0), _fx_cleanup);
fx_str_t slit_1 = FX_MAKE_STR("lib/ficus-");
fx_str_t slit_2 = FX_MAKE_STR(".");
{
const fx_str_t strs_0[] = { slit_1, v_5, slit_2, v_6 };
FX_CALL(fx_strjoin(0, 0, 0, strs_0, 4, &v_7), _fx_cleanup);
}
FX_CALL(_fx_M8FilenameFM9normalizeS2SS(&v_4, &v_7, &ficus_inst_path_0, 0), _fx_cleanup);
FX_CALL(_fx_M8FilenameFM7dirnameS1S(&ficus_app_path_0, &v_8, 0), _fx_cleanup);
fx_str_t slit_3 = FX_MAKE_STR("lib");
FX_CALL(_fx_M8FilenameFM9normalizeS2SS(&v_8, &slit_3, &v_9, 0), _fx_cleanup);
fx_str_t slit_4 = FX_MAKE_STR("lib");
FX_CALL(_fx_M8FilenameFM9normalizeS2SS(&ficus_pp_path_0, &slit_4, &v_10, 0), _fx_cleanup);
fx_str_t slit_5 = FX_MAKE_STR("lib");
FX_CALL(_fx_M8FilenameFM9normalizeS2SS(&ficus_inst_path_0, &slit_5, &v_11, 0), _fx_cleanup);
FX_CALL(_fx_cons_LS(&v_11, 0, true, &v_12), _fx_cleanup);
FX_CALL(_fx_cons_LS(&v_10, v_12, false, &v_12), _fx_cleanup);
FX_CALL(_fx_cons_LS(&v_9, v_12, true, &std_ficus_path_0), _fx_cleanup);
int_ std_ficus_path_len_0 = _fx_M8CompilerFM6lengthi1LS(std_ficus_path_0, 0);
_fx_make_Ta2LS(std_ficus_path_0, ficus_path_0, &v_13);
if (v_13.t0 == 0) {
FX_COPY_PTR(ficus_path_0, &search_path_0);
}
else if (v_13.t1 == 0) {
FX_COPY_PTR(std_ficus_path_0, &search_path_0);
}
else {
_fx_LS v_14 = 0;
_fx_LS lstend_0 = 0;
_fx_LS lst_0 = std_ficus_path_0;
for (; lst_0; lst_0 = lst_0->tl) {
fx_str_t* x_0 = &lst_0->hd;
_fx_LS node_0 = 0;
FX_CALL(_fx_cons_LS(x_0, 0, false, &node_0), _fx_catch_0);
FX_LIST_APPEND(v_14, lstend_0, node_0);
_fx_catch_0: ;
FX_CHECK_EXN(_fx_catch_1);
}
_fx_M8CompilerFM5link2LS2LSLS(v_14, ficus_path_0, &search_path_0, 0);
_fx_catch_1: ;
if (v_14) {
_fx_free_LS(&v_14);
}
}
FX_CHECK_EXN(_fx_cleanup);
fx_str_t slit_6 = FX_MAKE_STR("");
fx_copy_str(&slit_6, &found_0);
int_ i_0 = 0;
_fx_LS lst_1 = search_path_0;
for (; lst_1; lst_1 = lst_1->tl, i_0 += 1) {
fx_str_t builtins_fx_0 = {0};
fx_str_t ficus_h_0 = {0};
fx_str_t v_15 = {0};
_fx_LS v_16 = 0;
_fx_Ta2LS v_17 = {0};
_fx_LS v_18 = 0;
fx_str_t* d_0 = &lst_1->hd;
fx_str_t slit_7 = FX_MAKE_STR("Builtins.fx");
FX_CALL(_fx_M8FilenameFM9normalizeS2SS(d_0, &slit_7, &builtins_fx_0, 0), _fx_catch_4);
fx_str_t slit_8 = FX_MAKE_STR("../runtime/ficus/ficus.h");
FX_CALL(_fx_M8FilenameFM9normalizeS2SS(d_0, &slit_8, &ficus_h_0, 0), _fx_catch_4);
bool v_19;
bool res_0;
FX_CALL(_fx_M8FilenameFM6existsB1S(&builtins_fx_0, &res_0, 0), _fx_catch_4);
if (res_0) {
FX_CALL(_fx_M8FilenameFM6existsB1S(&ficus_h_0, &v_19, 0), _fx_catch_4);
}
else {
v_19 = false;
}
if (v_19) {
FX_CALL(_fx_M8FilenameFM7dirnameS1S(d_0, &v_15, 0), _fx_catch_4);
FX_FREE_STR(&found_0);
fx_copy_str(&v_15, &found_0);
if (i_0 < std_ficus_path_len_0) {
FX_CALL(_fx_cons_LS(d_0, 0, true, &v_16), _fx_catch_4);
_fx_make_Ta2LS(ficus_path_0, v_16, &v_17);
if (v_17.t0 == 0) {
FX_COPY_PTR(v_16, &v_18);
}
else if (v_17.t1 == 0) {
FX_COPY_PTR(ficus_path_0, &v_18);
}
else {
_fx_LS v_20 = 0;
_fx_LS lstend_1 = 0;
_fx_LS lst_2 = ficus_path_0;
for (; lst_2; lst_2 = lst_2->tl) {
fx_str_t* x_1 = &lst_2->hd;
_fx_LS node_1 = 0;
FX_CALL(_fx_cons_LS(x_1, 0, false, &node_1), _fx_catch_2);
FX_LIST_APPEND(v_20, lstend_1, node_1);
_fx_catch_2: ;
FX_CHECK_EXN(_fx_catch_3);
}
_fx_M8CompilerFM5link2LS2LSLS(v_20, v_16, &v_18, 0);
_fx_catch_3: ;
if (v_20) {
_fx_free_LS(&v_20);
}
}
FX_CHECK_EXN(_fx_catch_4);
_fx_free_LS(&ficus_path_0);
FX_COPY_PTR(v_18, &ficus_path_0);
}
FX_BREAK(_fx_catch_4);
}
_fx_catch_4: ;
if (v_18) {
_fx_free_LS(&v_18);
}
_fx_free_Ta2LS(&v_17);
if (v_16) {
_fx_free_LS(&v_16);
}
FX_FREE_STR(&v_15);
FX_FREE_STR(&ficus_h_0);
FX_FREE_STR(&builtins_fx_0);
FX_CHECK_BREAK();
FX_CHECK_EXN(_fx_cleanup);
}
_fx_make_T2SLS(&found_0, ficus_path_0, fx_result);
_fx_cleanup: ;
if (ficus_path_0) {
_fx_free_LS(&ficus_path_0);
}
FX_FREE_STR(&v_0);
FX_FREE_STR(&v_1);
FX_FREE_STR(&v_2);
FX_FREE_STR(&ficus_app_path_0);
FX_FREE_STR(&v_3);
FX_FREE_STR(&ficus_pp_path_0);
FX_FREE_STR(&v_4);
FX_FREE_STR(&v_5);
FX_FREE_STR(&v_6);
FX_FREE_STR(&v_7);
FX_FREE_STR(&ficus_inst_path_0);
FX_FREE_STR(&v_8);
FX_FREE_STR(&v_9);
FX_FREE_STR(&v_10);
FX_FREE_STR(&v_11);
if (v_12) {
_fx_free_LS(&v_12);
}
if (std_ficus_path_0) {
_fx_free_LS(&std_ficus_path_0);
}
_fx_free_Ta2LS(&v_13);
if (search_path_0) {
_fx_free_LS(&search_path_0);
}
FX_FREE_STR(&found_0);
return fx_status;
}
FX_EXTERN_C int _fx_M8CompilerFM9parse_allB2SLS(
fx_str_t* fname0_0,
struct _fx_LS_data_t* ficus_path_0,
bool* fx_result,
void* fx_fv)
{
fx_str_t cwd_0 = {0};
fx_str_t fname0_1 = {0};
fx_str_t dir0_0 = {0};
_fx_LS inc_dirs0_0 = 0;
_fx_LS v_0 = 0;
_fx_LS v_1 = 0;
_fx_LS inc_dirs0_1 = 0;
_fx_LS inc_dirs0_2 = 0;
_fx_LS inc_dirs0_3 = 0;
fx_str_t v_2 = {0};
fx_str_t v_3 = {0};
_fx_Li queue_0 = 0;
int fx_status = 0;
FX_CALL(_fx_M8FilenameFM6getcwdS0(&cwd_0, 0), _fx_cleanup);
FX_CALL(_fx_M8FilenameFM9normalizeS2SS(&cwd_0, fname0_0, &fname0_1, 0), _fx_cleanup);
FX_CALL(_fx_M8FilenameFM7dirnameS1S(&fname0_1, &dir0_0, 0), _fx_cleanup);
bool v_4 = _fx_F6__eq__B2SS(&dir0_0, &cwd_0, 0);
if (v_4) {
FX_CALL(_fx_cons_LS(&cwd_0, 0, true, &inc_dirs0_0), _fx_cleanup);
}
else {
FX_CALL(_fx_cons_LS(&cwd_0, 0, true, &v_0), _fx_cleanup);
FX_CALL(_fx_cons_LS(&dir0_0, v_0, true, &inc_dirs0_0), _fx_cleanup);
}
FX_COPY_PTR(_fx_g12Options__opt.include_path, &v_1);
if (inc_dirs0_0 == 0) {
FX_COPY_PTR(v_1, &inc_dirs0_1);
}
else if (v_1 == 0) {
FX_COPY_PTR(inc_dirs0_0, &inc_dirs0_1);
}
else {
_fx_LS v_5 = 0;
_fx_LS lstend_0 = 0;
_fx_LS lst_0 = inc_dirs0_0;
for (; lst_0; lst_0 = lst_0->tl) {
fx_str_t* x_0 = &lst_0->hd;
_fx_LS node_0 = 0;
FX_CALL(_fx_cons_LS(x_0, 0, false, &node_0), _fx_catch_0);
FX_LIST_APPEND(v_5, lstend_0, node_0);
_fx_catch_0: ;
FX_CHECK_EXN(_fx_catch_1);
}
_fx_M8CompilerFM5link2LS2LSLS(v_5, v_1, &inc_dirs0_1, 0);
_fx_catch_1: ;
if (v_5) {
_fx_free_LS(&v_5);
}
}
FX_CHECK_EXN(_fx_cleanup);
if (inc_dirs0_1 == 0) {
FX_COPY_PTR(ficus_path_0, &inc_dirs0_2);
}
else if (ficus_path_0 == 0) {
FX_COPY_PTR(inc_dirs0_1, &inc_dirs0_2);
}
else {
_fx_LS v_6 = 0;
_fx_LS lstend_1 = 0;
_fx_LS lst_1 = inc_dirs0_1;
for (; lst_1; lst_1 = lst_1->tl) {
fx_str_t* x_1 = &lst_1->hd;
_fx_LS node_1 = 0;
FX_CALL(_fx_cons_LS(x_1, 0, false, &node_1), _fx_catch_2);
FX_LIST_APPEND(v_6, lstend_1, node_1);
_fx_catch_2: ;
FX_CHECK_EXN(_fx_catch_3);
}
_fx_M8CompilerFM5link2LS2LSLS(v_6, ficus_path_0, &inc_dirs0_2, 0);
_fx_catch_3: ;
if (v_6) {
_fx_free_LS(&v_6);
}
}
FX_CHECK_EXN(_fx_cleanup);
_fx_LS lstend_2 = 0;
_fx_LS lst_2 = inc_dirs0_2;
for (; lst_2; lst_2 = lst_2->tl) {
fx_str_t res_0 = {0};
fx_str_t* d_0 = &lst_2->hd;
FX_CALL(_fx_M8FilenameFM9normalizeS2SS(&cwd_0, d_0, &res_0, 0), _fx_catch_4);
_fx_LS node_2 = 0;
FX_CALL(_fx_cons_LS(&res_0, 0, false, &node_2), _fx_catch_4);
FX_LIST_APPEND(inc_dirs0_3, lstend_2, node_2);
_fx_catch_4: ;
FX_FREE_STR(&res_0);
FX_CHECK_EXN(_fx_cleanup);
}
FX_CALL(_fx_M8FilenameFM8basenameS1S(&fname0_1, &v_2, 0), _fx_cleanup);
FX_CALL(_fx_M8FilenameFM16remove_extensionS1S(&v_2, &v_3, 0), _fx_cleanup);
_fx_R9Ast__id_t name0_id_0;
FX_CALL(_fx_M3AstFM6get_idRM4id_t1S(&v_3, &name0_id_0, 0), _fx_cleanup);
int_ m_idx_0;
FX_CALL(_fx_M3AstFM11find_modulei2RM4id_tS(&name0_id_0, &fname0_1, &m_idx_0, 0), _fx_cleanup);
FX_CALL(_fx_cons_Li(m_idx_0, 0, true, &queue_0), _fx_cleanup);
bool ok_0 = true;
while (queue_0 != 0) {
_fx_Li v_7 = 0;
_fx_N16Ast__defmodule_t minfo_0 = 0;
fx_str_t mfname_0 = {0};
fx_exn_t exn_0 = {0};
int_ m_idx_1;
if (queue_0 != 0) {
m_idx_1 = queue_0->hd;
}
else {
FX_FAST_THROW(FX_EXN_NullListError, _fx_catch_13);
}
FX_CHECK_EXN(_fx_catch_13);
if (queue_0 != 0) {
FX_COPY_PTR(queue_0->tl, &v_7);
}
else {
FX_FAST_THROW(FX_EXN_NullListError, _fx_catch_13);
}
FX_CHECK_EXN(_fx_catch_13);
FX_FREE_LIST_SIMPLE(&queue_0);
FX_COPY_PTR(v_7, &queue_0);
FX_CHKIDX(FX_CHKIDX1(_fx_g16Ast__all_modules, 0, m_idx_1), _fx_catch_13);
FX_COPY_PTR(*FX_PTR_1D(_fx_N16Ast__defmodule_t, _fx_g16Ast__all_modules, m_idx_1), &minfo_0);
fx_copy_str(&minfo_0->u.defmodule_t.t1, &mfname_0);
if (!minfo_0->u.defmodule_t.t7) {
fx_str_t dir1_0 = {0};
_fx_LS v_8 = 0;
_fx_LS inc_dirs_0 = 0;
_fx_LN14Lexer__token_t preamble_0 = 0;
_fx_Li v_9 = 0;
_fx_Li __fold_result___0 = 0;
_fx_Li v_10 = 0;
FX_CHKIDX(FX_CHKIDX1(_fx_g16Ast__all_modules, 0, m_idx_1), _fx_catch_9);
(*FX_PTR_1D(_fx_N16Ast__defmodule_t, _fx_g16Ast__all_modules, m_idx_1))->u.defmodule_t.t7 = true;
FX_CALL(_fx_M8FilenameFM7dirnameS1S(&mfname_0, &dir1_0, 0), _fx_catch_9);
bool v_11 = _fx_F6__eq__B2SS(&dir1_0, &dir0_0, 0);
if (!v_11) {
FX_CALL(_fx_cons_LS(&dir1_0, 0, true, &v_8), _fx_catch_9);
}
if (v_8 == 0) {
FX_COPY_PTR(inc_dirs0_3, &inc_dirs_0);
}
else if (inc_dirs0_3 == 0) {
FX_COPY_PTR(v_8, &inc_dirs_0);
}
else {
_fx_LS v_12 = 0;
_fx_LS lstend_3 = 0;
_fx_LS lst_3 = v_8;
for (; lst_3; lst_3 = lst_3->tl) {
fx_str_t* x_2 = &lst_3->hd;
_fx_LS node_3 = 0;
FX_CALL(_fx_cons_LS(x_2, 0, false, &node_3), _fx_catch_5);
FX_LIST_APPEND(v_12, lstend_3, node_3);
_fx_catch_5: ;
FX_CHECK_EXN(_fx_catch_6);
}
_fx_M8CompilerFM5link2LS2LSLS(v_12, inc_dirs0_3, &inc_dirs_0, 0);
_fx_catch_6: ;
if (v_12) {
_fx_free_LS(&v_12);
}
}
FX_CHECK_EXN(_fx_catch_9);
FX_CALL(_fx_M8CompilerFM12get_preambleLN14Lexer__token_t1S(&mfname_0, &preamble_0, 0), _fx_catch_9);
bool v_13;
FX_CALL(_fx_M6ParserFM5parseB3iLN14Lexer__token_tLS(m_idx_1, preamble_0, inc_dirs_0, &v_13, 0), _fx_catch_9);
ok_0 = (bool)(ok_0 & v_13);
FX_CHKIDX(FX_CHKIDX1(_fx_g16Ast__all_modules, 0, m_idx_1), _fx_catch_9);
FX_COPY_PTR((*FX_PTR_1D(_fx_N16Ast__defmodule_t, _fx_g16Ast__all_modules, m_idx_1))->u.defmodule_t.t5, &v_9);
_fx_Li lst_4 = v_9;
for (; lst_4; lst_4 = lst_4->tl) {
_fx_Li r_0 = 0;
int_ a_0 = lst_4->hd;
FX_COPY_PTR(__fold_result___0, &r_0);
FX_CALL(_fx_cons_Li(a_0, r_0, false, &r_0), _fx_catch_7);
FX_FREE_LIST_SIMPLE(&__fold_result___0);
FX_COPY_PTR(r_0, &__fold_result___0);
_fx_catch_7: ;
FX_FREE_LIST_SIMPLE(&r_0);
FX_CHECK_EXN(_fx_catch_9);
}
FX_COPY_PTR(__fold_result___0, &v_10);
_fx_Li lst_5 = v_10;
for (; lst_5; lst_5 = lst_5->tl) {
_fx_N16Ast__defmodule_t dep_minfo_0 = 0;
_fx_Li v_14 = 0;
int_ dep_0 = lst_5->hd;
FX_CALL(_fx_M3AstFM10get_moduleN16Ast__defmodule_t1i(dep_0, &dep_minfo_0, 0), _fx_catch_8);
if (!dep_minfo_0->u.defmodule_t.t7) {
FX_CALL(_fx_cons_Li(dep_0, queue_0, true, &v_14), _fx_catch_8);
FX_FREE_LIST_SIMPLE(&queue_0);
FX_COPY_PTR(v_14, &queue_0);
}
_fx_catch_8: ;
FX_FREE_LIST_SIMPLE(&v_14);
if (dep_minfo_0) {
_fx_free_N16Ast__defmodule_t(&dep_minfo_0);
}
FX_CHECK_EXN(_fx_catch_9);
}
_fx_catch_9: ;
FX_FREE_STR(&dir1_0);
if (v_8) {
_fx_free_LS(&v_8);
}
if (inc_dirs_0) {
_fx_free_LS(&inc_dirs_0);
}
if (preamble_0) {
_fx_free_LN14Lexer__token_t(&preamble_0);
}
FX_FREE_LIST_SIMPLE(&v_9);
FX_FREE_LIST_SIMPLE(&__fold_result___0);
FX_FREE_LIST_SIMPLE(&v_10);
if (fx_status < 0) {
fx_exn_get_and_reset(fx_status, &exn_0);
fx_status = 0;
int tag_0 = exn_0.tag;
if (tag_0 == _FX_EXN_E22LexerUtils__LexerError) {
fx_str_t v_15 = {0};
fx_str_t v_16 = {0};
fx_str_t v_17 = {0};
_fx_T2Ta2iS* vcase_0 = &FX_EXN_DATA(_fx_E22LexerUtils__LexerError_data_t, exn_0.data);
_fx_Ta2i* v_18 = &vcase_0->t0;
FX_CALL(_fx_F6stringS1i(v_18->t0, &v_15, 0), _fx_catch_10);
FX_CALL(_fx_F6stringS1i(v_18->t1, &v_16, 0), _fx_catch_10);
fx_str_t slit_0 = FX_MAKE_STR(":");
fx_str_t slit_1 = FX_MAKE_STR(":");
fx_str_t slit_2 = FX_MAKE_STR(": error: ");
fx_str_t* msg_0 = &vcase_0->t1;
fx_str_t slit_3 = FX_MAKE_STR("\n");
{
const fx_str_t strs_0[] = { mfname_0, slit_0, v_15, slit_1, v_16, slit_2, *msg_0, slit_3 };
FX_CALL(fx_strjoin(0, 0, 0, strs_0, 8, &v_17), _fx_catch_10);
}
_fx_F12print_stringv1S(&v_17, 0);
fx_str_t slit_4 = FX_MAKE_STR("\n");
_fx_F12print_stringv1S(&slit_4, 0);
ok_0 = false;
_fx_catch_10: ;
FX_FREE_STR(&v_17);
FX_FREE_STR(&v_16);
FX_FREE_STR(&v_15);
}
else if (tag_0 == _FX_EXN_E18Parser__ParseError) {
fx_str_t v_19 = {0};
fx_str_t v_20 = {0};
_fx_T2R10Ast__loc_tS* vcase_1 = &FX_EXN_DATA(_fx_E18Parser__ParseError_data_t, exn_0.data);
FX_CALL(_fx_M3AstFM6stringS1RM5loc_t(&vcase_1->t0, &v_19, 0), _fx_catch_11);
fx_str_t slit_5 = FX_MAKE_STR(": error: ");
fx_str_t* msg_1 = &vcase_1->t1;
fx_str_t slit_6 = FX_MAKE_STR("\n");
{
const fx_str_t strs_1[] = { v_19, slit_5, *msg_1, slit_6 };
FX_CALL(fx_strjoin(0, 0, 0, strs_1, 4, &v_20), _fx_catch_11);
}
_fx_F12print_stringv1S(&v_20, 0);
fx_str_t slit_7 = FX_MAKE_STR("\n");
_fx_F12print_stringv1S(&slit_7, 0);
ok_0 = false;
_fx_catch_11: ;
FX_FREE_STR(&v_20);
FX_FREE_STR(&v_19);
}
else {
fx_str_t v_21 = {0};
fx_str_t v_22 = {0};
FX_CALL(_fx_F6stringS1E(&exn_0, &v_21, 0), _fx_catch_12);
fx_str_t slit_8 = FX_MAKE_STR(": exception ");
fx_str_t slit_9 = FX_MAKE_STR(" occured");
{
const fx_str_t strs_2[] = { mfname_0, slit_8, v_21, slit_9 };
FX_CALL(fx_strjoin(0, 0, 0, strs_2, 4, &v_22), _fx_catch_12);
}
_fx_F12print_stringv1S(&v_22, 0);
fx_str_t slit_10 = FX_MAKE_STR("\n");
_fx_F12print_stringv1S(&slit_10, 0);
ok_0 = false;
_fx_catch_12: ;
FX_FREE_STR(&v_22);
FX_FREE_STR(&v_21);
}
FX_CHECK_EXN(_fx_catch_13);
}
}
_fx_catch_13: ;
fx_free_exn(&exn_0);
FX_FREE_STR(&mfname_0);
if (minfo_0) {
_fx_free_N16Ast__defmodule_t(&minfo_0);
}
FX_FREE_LIST_SIMPLE(&v_7);
FX_CHECK_EXN(_fx_cleanup);
}
*fx_result = ok_0;
_fx_cleanup: ;
FX_FREE_STR(&cwd_0);
FX_FREE_STR(&fname0_1);
FX_FREE_STR(&dir0_0);
if (inc_dirs0_0) {
_fx_free_LS(&inc_dirs0_0);
}
if (v_0) {
_fx_free_LS(&v_0);
}
if (v_1) {
_fx_free_LS(&v_1);
}
if (inc_dirs0_1) {
_fx_free_LS(&inc_dirs0_1);
}
if (inc_dirs0_2) {
_fx_free_LS(&inc_dirs0_2);
}
if (inc_dirs0_3) {
_fx_free_LS(&inc_dirs0_3);
}
FX_FREE_STR(&v_2);
FX_FREE_STR(&v_3);
FX_FREE_LIST_SIMPLE(&queue_0);
return fx_status;
}
FX_EXTERN_C int _fx_M8CompilerFM8toposortLi1LT2iLi(
struct _fx_LT2iLi_data_t* graph_0,
struct _fx_Li_data_t** fx_result,
void* fx_fv)
{
fx_arr_t graph_1 = {0};
fx_arr_t processed_0 = {0};
_fx_rLi result_ref_0 = 0;
_fx_Li __fold_result___0 = 0;
_fx_Li result_0 = 0;
int fx_status = 0;
_fx_Li* dstptr_0 = 0;
_fx_LT2iLi lst_0 = graph_0;
int_ len_0 = fx_list_length(lst_0);
{
const int_ shape_0[] = { len_0 };
FX_CALL(fx_make_arr(1, shape_0, sizeof(_fx_Li), (fx_free_t)fx_free_list_simple, (fx_copy_t)fx_copy_ptr, 0, &graph_1),
_fx_cleanup);
}
dstptr_0 = (_fx_Li*)graph_1.data;
for (; lst_0; lst_0 = lst_0->tl, dstptr_0++) {
_fx_T2iLi* __pat___0 = &lst_0->hd; FX_COPY_PTR(__pat___0->t1, dstptr_0);
}
int_ nvtx_0 = FX_ARR_SIZE(graph_1, 0);
bool* dstptr_1 = 0;
{
const int_ shape_1[] = { nvtx_0 };
FX_CALL(fx_make_arr(1, shape_1, sizeof(bool), 0, 0, 0, &processed_0), _fx_cleanup);
}
dstptr_1 = (bool*)processed_0.data;
for (int_ i_0 = 0; i_0 < nvtx_0; i_0++, dstptr_1++) {
*dstptr_1 = false;
}
FX_CALL(_fx_make_rLi(0, &result_ref_0), _fx_cleanup);
for (int_ i_1 = 0; i_1 < nvtx_0; i_1++) {
FX_CHKIDX(FX_CHKIDX1(processed_0, 0, i_1), _fx_catch_0);
if (*FX_PTR_1D(bool, processed_0, i_1)) {
FX_CONTINUE(_fx_catch_0);
}
FX_CALL(_fx_M8CompilerFM3dfsv5iLiA1LiA1BrLi(i_1, 0, &graph_1, &processed_0, result_ref_0, 0), _fx_catch_0);
_fx_catch_0: ;
FX_CHECK_CONTINUE();
FX_CHECK_EXN(_fx_cleanup);
}
FX_COPY_PTR(result_ref_0->data, &result_0);
_fx_Li lst_1 = result_0;
for (; lst_1; lst_1 = lst_1->tl) {
_fx_Li r_0 = 0;
int_ a_0 = lst_1->hd;
FX_COPY_PTR(__fold_result___0, &r_0);
FX_CALL(_fx_cons_Li(a_0, r_0, false, &r_0), _fx_catch_1);
FX_FREE_LIST_SIMPLE(&__fold_result___0);
FX_COPY_PTR(r_0, &__fold_result___0);
_fx_catch_1: ;
FX_FREE_LIST_SIMPLE(&r_0);
FX_CHECK_EXN(_fx_cleanup);
}
FX_COPY_PTR(__fold_result___0, fx_result);
_fx_cleanup: ;
FX_FREE_ARR(&graph_1);
FX_FREE_ARR(&processed_0);
if (result_ref_0) {
_fx_free_rLi(&result_ref_0);
}
FX_FREE_LIST_SIMPLE(&__fold_result___0);
FX_FREE_LIST_SIMPLE(&result_0);
return fx_status;
}
static int _fx_M8CompilerFM3dfsv5iLiA1LiA1BrLi(
int_ i_0,
struct _fx_Li_data_t* visited_0,
fx_arr_t* graph_0,
fx_arr_t* processed_0,
struct _fx_rLi_data_t* result_ref_0,
void* fx_fv)
{
_fx_Li deps_0 = 0;
_fx_LS v_0 = 0;
fx_str_t vlist_0 = {0};
fx_str_t v_1 = {0};
fx_exn_t v_2 = {0};
_fx_Li visited_1 = 0;
_fx_Li v_3 = 0;
int fx_status = 0;
FX_CALL(fx_check_stack(), _fx_cleanup);
_fx_Li* result_0 = &result_ref_0->data;
FX_CHKIDX(FX_CHKIDX1(*graph_0, 0, i_0), _fx_cleanup);
FX_COPY_PTR(*FX_PTR_1D(_fx_Li, *graph_0, i_0), &deps_0);
bool __fold_result___0 = false;
_fx_Li lst_0 = visited_0;
for (; lst_0; lst_0 = lst_0->tl) {
int_ b_0 = lst_0->hd;
if (i_0 == b_0) {
__fold_result___0 = true; FX_BREAK(_fx_catch_0);
}
_fx_catch_0: ;
FX_CHECK_BREAK();
FX_CHECK_EXN(_fx_cleanup);
}
if (__fold_result___0) {
_fx_LS lstend_0 = 0;
_fx_Li lst_1 = visited_0;
for (; lst_1; lst_1 = lst_1->tl) {
fx_str_t res_0 = {0};
int_ j_0 = lst_1->hd;
_fx_R9Ast__id_t v_4;
FX_CALL(_fx_M3AstFM15get_module_nameRM4id_t1i(j_0, &v_4, 0), _fx_catch_1);
FX_CALL(_fx_M3AstFM2ppS1RM4id_t(&v_4, &res_0, 0), _fx_catch_1);
_fx_LS node_0 = 0;
FX_CALL(_fx_cons_LS(&res_0, 0, false, &node_0), _fx_catch_1);
FX_LIST_APPEND(v_0, lstend_0, node_0);
_fx_catch_1: ;
FX_FREE_STR(&res_0);
FX_CHECK_EXN(_fx_cleanup);
}
fx_str_t slit_0 = FX_MAKE_STR(", ");
FX_CALL(_fx_F4joinS2SLS(&slit_0, v_0, &vlist_0, 0), _fx_cleanup);
fx_str_t slit_1 = FX_MAKE_STR("error: cyclib dependency between the modules: ");
{
const fx_str_t strs_0[] = { slit_1, vlist_0 };
FX_CALL(fx_strjoin(0, 0, 0, strs_0, 2, &v_1), _fx_cleanup);
}
FX_CALL(_fx_F9make_FailE1S(&v_1, &v_2), _fx_cleanup);
FX_THROW(&v_2, true, _fx_cleanup);
}
FX_CALL(_fx_cons_Li(i_0, visited_0, true, &visited_1), _fx_cleanup);
_fx_Li lst_2 = deps_0;
for (; lst_2; lst_2 = lst_2->tl) {
int_ j_1 = lst_2->hd;
FX_CHKIDX(FX_CHKIDX1(*processed_0, 0, j_1), _fx_catch_2);
if (*FX_PTR_1D(bool, *processed_0, j_1)) {
FX_CONTINUE(_fx_catch_2);
}
FX_CALL(_fx_M8CompilerFM3dfsv5iLiA1LiA1BrLi(j_1, visited_1, graph_0, processed_0, result_ref_0, 0), _fx_catch_2);
_fx_catch_2: ;
FX_CHECK_CONTINUE();
FX_CHECK_EXN(_fx_cleanup);
}
FX_CALL(_fx_cons_Li(i_0, *result_0, true, &v_3), _fx_cleanup);
FX_FREE_LIST_SIMPLE(result_0);
FX_COPY_PTR(v_3, result_0);
FX_CHKIDX(FX_CHKIDX1(*processed_0, 0, i_0), _fx_cleanup);
*FX_PTR_1D(bool, *processed_0, i_0) = true;
_fx_cleanup: ;
FX_FREE_LIST_SIMPLE(&deps_0);
if (v_0) {
_fx_free_LS(&v_0);
}
FX_FREE_STR(&vlist_0);
FX_FREE_STR(&v_1);
fx_free_exn(&v_2);
FX_FREE_LIST_SIMPLE(&visited_1);
FX_FREE_LIST_SIMPLE(&v_3);
return fx_status;
}
FX_EXTERN_C int _fx_M8CompilerFM11k_skip_someLR17K_form__kmodule_t1LR17K_form__kmodule_t(
struct _fx_LR17K_form__kmodule_t_data_t* kmods_0,
struct _fx_LR17K_form__kmodule_t_data_t** fx_result,
void* fx_fv)
{
fx_arr_t skip_flags_0 = {0};
fx_str_t build_root_dir_0 = {0};
fx_str_t build_dir_0 = {0};
fx_str_t obj_ext_0 = {0};
_fx_LR17K_form__kmodule_t kmods_1 = 0;
fx_exn_t v_0 = {0};
int fx_status = 0;
bool* dstptr_0 = 0;
int_ v_1 = FX_ARR_SIZE(_fx_g16Ast__all_modules, 0);
{
const int_ shape_0[] = { v_1 };
FX_CALL(fx_make_arr(1, shape_0, sizeof(bool), 0, 0, 0, &skip_flags_0), _fx_cleanup);
}
dstptr_0 = (bool*)skip_flags_0.data;
for (int_ i_0 = 0; i_0 < v_1; i_0++, dstptr_0++) {
*dstptr_0 = false;
}
fx_copy_str(&_fx_g12Options__opt.build_rootdir, &build_root_dir_0);
bool ok_0;
FX_CALL(_fx_M3SysFM5mkdirB2Si(&build_root_dir_0, 493, &ok_0, 0), _fx_cleanup);
fx_copy_str(&_fx_g12Options__opt.build_dir, &build_dir_0);
bool ok_1;
if (ok_0) {
FX_CALL(_fx_M3SysFM5mkdirB2Si(&build_dir_0, 493, &ok_1, 0), _fx_cleanup);
}
else {
ok_1 = false;
}
if (_fx_g10Sys__win32) {
fx_str_t slit_0 = FX_MAKE_STR(".obj"); fx_copy_str(&slit_0, &obj_ext_0);
}
else {
fx_str_t slit_1 = FX_MAKE_STR(".o"); fx_copy_str(&slit_1, &obj_ext_0);
}
_fx_LR17K_form__kmodule_t lstend_0 = 0;
_fx_LR17K_form__kmodule_t lst_0 = kmods_0;
for (; lst_0; lst_0 = lst_0->tl) {
_fx_R14Ast__pragmas_t km_pragmas_0 = {0};
_fx_Li km_deps_0 = 0;
_fx_LN14K_form__kexp_t km_top_0 = 0;
fx_str_t km_cname_0 = {0};
fx_str_t ext_0 = {0};
fx_str_t mname_0 = {0};
fx_str_t cname_0 = {0};
fx_str_t k_filename_0 = {0};
fx_str_t c_filename_0 = {0};
fx_str_t o_filename_0 = {0};
fx_str_t new_kform_0 = {0};
fx_str_t old_kform_0 = {0};
fx_exn_t exn_0 = {0};
_fx_T3BBS v_2 = {0};
fx_exn_t exn_1 = {0};
fx_str_t v_3 = {0};
fx_str_t status_j_0 = {0};
fx_str_t status_j_1 = {0};
fx_str_t v_4 = {0};
_fx_R17K_form__kmodule_t rec_0 = {0};
_fx_R17K_form__kmodule_t* km_0 = &lst_0->hd;
_fx_copy_R14Ast__pragmas_t(&km_0->km_pragmas, &km_pragmas_0);
FX_COPY_PTR(km_0->km_deps, &km_deps_0);
FX_COPY_PTR(km_0->km_top, &km_top_0);
fx_copy_str(&km_0->km_cname, &km_cname_0);
int_ km_idx_0 = km_0->km_idx;
bool is_cpp_0;
if (_fx_g12Options__opt.compile_by_cpp) {
is_cpp_0 = true;
}
else {
is_cpp_0 = km_pragmas_0.pragma_cpp;
}
if (is_cpp_0) {
fx_str_t slit_2 = FX_MAKE_STR(".cpp"); fx_copy_str(&slit_2, &ext_0);
}
else {
fx_str_t slit_3 = FX_MAKE_STR(".c"); fx_copy_str(&slit_3, &ext_0);
}
FX_CALL(_fx_M8K_mangleFM12mangle_mnameS1S(&km_cname_0, &mname_0, 0), _fx_catch_5);
FX_CALL(_fx_M8FilenameFM9normalizeS2SS(&build_dir_0, &mname_0, &cname_0, 0), _fx_catch_5);
fx_str_t slit_4 = FX_MAKE_STR(".k");
{
const fx_str_t strs_0[] = { cname_0, slit_4 };
FX_CALL(fx_strjoin(0, 0, 0, strs_0, 2, &k_filename_0), _fx_catch_5);
}
{
const fx_str_t strs_1[] = { cname_0, ext_0 };
FX_CALL(fx_strjoin(0, 0, 0, strs_1, 2, &c_filename_0), _fx_catch_5);
}
{
const fx_str_t strs_2[] = { cname_0, obj_ext_0 };
FX_CALL(fx_strjoin(0, 0, 0, strs_2, 2, &o_filename_0), _fx_catch_5);
}
FX_CALL(_fx_M4K_ppFM16pp_top_to_stringS1LN14K_form__kexp_t(km_top_0, &new_kform_0, 0), _fx_catch_5);
bool have_k_0;
FX_CALL(_fx_M8FilenameFM6existsB1S(&k_filename_0, &have_k_0, 0), _fx_catch_5);
bool have_c_0;
FX_CALL(_fx_M8FilenameFM6existsB1S(&c_filename_0, &have_c_0, 0), _fx_catch_5);
bool have_o_0;
FX_CALL(_fx_M8FilenameFM6existsB1S(&o_filename_0, &have_o_0, 0), _fx_catch_5);
bool have_all_0 = (bool)((bool)(have_k_0 & have_c_0) & have_o_0);
bool t_0;
if (_fx_g12Options__opt.force_rebuild) {
t_0 = true;
}
else {
t_0 = !have_all_0;
}
if (t_0) {
fx_str_t slit_5 = FX_MAKE_STR(""); fx_copy_str(&slit_5, &old_kform_0);
}
else {
FX_CALL(_fx_M4FileFM9read_utf8S1S(&k_filename_0, &old_kform_0, 0), _fx_catch_0);
_fx_catch_0: ;
if (fx_status < 0) {
fx_exn_get_and_reset(fx_status, &exn_0);
fx_status = 0;
FX_FREE_STR(&old_kform_0);
int tag_0 = exn_0.tag;
bool res_0;
if (tag_0 == FX_EXN_IOError) {
res_0 = true;
}
else if (tag_0 == FX_EXN_FileOpenError) {
res_0 = true;
}
else {
res_0 = false;
}
FX_CHECK_EXN(_fx_catch_5);
if (res_0) {
fx_str_t slit_6 = FX_MAKE_STR(""); fx_copy_str(&slit_6, &old_kform_0); goto _fx_endmatch_0;
}
FX_RETHROW(&exn_0, _fx_catch_5);
_fx_endmatch_0: ;
FX_CHECK_EXN(_fx_catch_5);
}
}
bool v_5 = _fx_F6__eq__B2SS(&new_kform_0, &old_kform_0, 0);
if (v_5) {
fx_str_t slit_7 = FX_MAKE_STR(""); _fx_make_T3BBS(true, true, &slit_7, &v_2);
}
else {
bool well_written_0;
FX_CALL(_fx_M4FileFM10write_utf8v2SS(&k_filename_0, &new_kform_0, 0), _fx_catch_1);
well_written_0 = true;
_fx_catch_1: ;
if (fx_status < 0) {
fx_exn_get_and_reset(fx_status, &exn_1);
fx_status = 0;
int tag_1 = exn_1.tag;
bool res_1;
if (tag_1 == FX_EXN_IOError) {
res_1 = true;
}
else if (tag_1 == FX_EXN_FileOpenError) {
res_1 = true;
}
else {
res_1 = false;
}
FX_CHECK_EXN(_fx_catch_5);
if (res_1) {
well_written_0 = false; goto _fx_endmatch_1;
}
FX_RETHROW(&exn_1, _fx_catch_5);
_fx_endmatch_1: ;
FX_CHECK_EXN(_fx_catch_5);
}
if (well_written_0) {
fx_str_t slit_8 = FX_MAKE_STR(""); fx_copy_str(&slit_8, &v_3);
}
else if (_fx_g21Compiler__iscolorterm) {
fx_str_t slit_9 = FX_MAKE_STR("[31;1mfailed to write .k[0m"); fx_copy_str(&slit_9, &v_3);
}
else {
fx_str_t slit_10 = FX_MAKE_STR("failed to write .k"); fx_copy_str(&slit_10, &v_3);
}
_fx_make_T3BBS(well_written_0, false, &v_3, &v_2);
}
bool ok_j_0 = v_2.t0;
bool same_kform_0 = v_2.t1;
fx_copy_str(&v_2.t2, &status_j_0);
ok_1 = (bool)(ok_1 & ok_j_0);
if (!same_kform_0) {
if (have_c_0) {
FX_CALL(_fx_M3SysFM6removev1S(&c_filename_0, 0), _fx_catch_5);
}
if (have_o_0) {
FX_CALL(_fx_M3SysFM6removev1S(&o_filename_0, 0), _fx_catch_5);
}
}
bool skip_module_0;
if (same_kform_0) {
bool __fold_result___0 = true;
_fx_Li lst_1 = km_deps_0;
for (; lst_1; lst_1 = lst_1->tl) {
int_ d_0 = lst_1->hd;
FX_CHKIDX(FX_CHKIDX1(skip_flags_0, 0, d_0), _fx_catch_2);
if (!*FX_PTR_1D(bool, skip_flags_0, d_0)) {
__fold_result___0 = false; FX_BREAK(_fx_catch_2);
}
_fx_catch_2: ;
FX_CHECK_BREAK();
FX_CHECK_EXN(_fx_catch_5);
}
skip_module_0 = __fold_result___0;
}
else {
skip_module_0 = false;
}
if (FX_STR_LENGTH(status_j_0) != 0) {
fx_copy_str(&status_j_0, &status_j_1);
}
else if (skip_module_0) {
fx_str_t slit_11 = FX_MAKE_STR("skip"); fx_copy_str(&slit_11, &status_j_1);
}
else if (_fx_g21Compiler__iscolorterm) {
fx_str_t slit_12 = FX_MAKE_STR("[34;1mprocess[0m"); fx_copy_str(&slit_12, &status_j_1);
}
else {
fx_str_t slit_13 = FX_MAKE_STR("process"); fx_copy_str(&slit_13, &status_j_1);
}
fx_str_t slit_14 = FX_MAKE_STR("K ");
fx_str_t slit_15 = FX_MAKE_STR(": ");
{
const fx_str_t strs_3[] = { slit_14, km_cname_0, slit_15, status_j_1 };
FX_CALL(fx_strjoin(0, 0, 0, strs_3, 4, &v_4), _fx_catch_5);
}
FX_CALL(_fx_M3AstFM10pr_verbosev1S(&v_4, 0), _fx_catch_5);
if (skip_module_0) {
_fx_LN14K_form__kexp_t lst_2 = km_top_0;
for (; lst_2; lst_2 = lst_2->tl) {
_fx_N14K_form__kexp_t e_0 = lst_2->hd;
if (FX_REC_VARIANT_TAG(e_0) == 32) {
_fx_rR17K_form__kdeffun_t kf_0 = e_0->u.KDefFun;
_fx_N17Ast__fun_constr_t v_6 = kf_0->data.kf_flags.fun_flag_ctor;
if (v_6.tag == 1) {
_fx_N14K_form__ktyp_t kf_rt_0 = 0;
_fx_T2N14K_form__ktyp_tR10Ast__loc_t v_7 = {0};
_fx_N14K_form__kexp_t v_8 = 0;
_fx_R17K_form__kdeffun_t v_9 = {0};
_fx_R17K_form__kdeffun_t* v_10 = &kf_0->data;
_fx_R10Ast__loc_t kf_loc_0 = v_10->kf_loc;
FX_COPY_PTR(v_10->kf_rt, &kf_rt_0);
_fx_R16Ast__fun_flags_t kf_flags_0 = v_10->kf_flags;
_fx_R17K_form__kdeffun_t* v_11 = &kf_0->data;
_fx_make_T2N14K_form__ktyp_tR10Ast__loc_t(kf_rt_0, &kf_loc_0, &v_7);
fx_str_t slit_16 = FX_MAKE_STR("");
FX_CALL(_fx_M6K_formFM9KExpCCodeN14K_form__kexp_t2ST2N14K_form__ktyp_tR10Ast__loc_t(&slit_16, &v_7, &v_8),
_fx_catch_3);
_fx_R16Ast__fun_flags_t v_12 =
{ kf_flags_0.fun_flag_pure, true, kf_flags_0.fun_flag_have_keywords, false, kf_flags_0.fun_flag_nothrow,
kf_flags_0.fun_flag_really_nothrow, kf_flags_0.fun_flag_private, kf_flags_0.fun_flag_ctor,
kf_flags_0.fun_flag_method_of, kf_flags_0.fun_flag_uses_fv, kf_flags_0.fun_flag_recursive,
kf_flags_0.fun_flag_instance };
_fx_make_R17K_form__kdeffun_t(&v_11->kf_name, &v_11->kf_cname, v_11->kf_params, v_11->kf_rt, v_8, &v_12,
&v_11->kf_closure, v_11->kf_scope, &v_11->kf_loc, &v_9);
_fx_R17K_form__kdeffun_t* v_13 = &kf_0->data;
_fx_free_R17K_form__kdeffun_t(v_13);
_fx_copy_R17K_form__kdeffun_t(&v_9, v_13);
_fx_catch_3: ;
_fx_free_R17K_form__kdeffun_t(&v_9);
if (v_8) {
_fx_free_N14K_form__kexp_t(&v_8);
}
_fx_free_T2N14K_form__ktyp_tR10Ast__loc_t(&v_7);
if (kf_rt_0) {
_fx_free_N14K_form__ktyp_t(&kf_rt_0);
}
goto _fx_endmatch_2;
}
}
_fx_endmatch_2: ;
FX_CHECK_EXN(_fx_catch_4);
_fx_catch_4: ;
FX_CHECK_EXN(_fx_catch_5);
}
}
FX_CHKIDX(FX_CHKIDX1(skip_flags_0, 0, km_idx_0), _fx_catch_5);
*FX_PTR_1D(bool, skip_flags_0, km_idx_0) = skip_module_0;
_fx_make_R17K_form__kmodule_t(&km_0->km_name, km_0->km_idx, km_0->km_toposort_idx, &km_0->km_cname, km_0->km_top,
km_0->km_deps, skip_module_0, km_0->km_main, &km_0->km_pragmas, &rec_0);
_fx_LR17K_form__kmodule_t node_0 = 0;
FX_CALL(_fx_cons_LR17K_form__kmodule_t(&rec_0, 0, false, &node_0), _fx_catch_5);
FX_LIST_APPEND(kmods_1, lstend_0, node_0);
_fx_catch_5: ;
_fx_free_R17K_form__kmodule_t(&rec_0);
FX_FREE_STR(&v_4);
FX_FREE_STR(&status_j_1);
FX_FREE_STR(&status_j_0);
FX_FREE_STR(&v_3);
fx_free_exn(&exn_1);
_fx_free_T3BBS(&v_2);
fx_free_exn(&exn_0);
FX_FREE_STR(&old_kform_0);
FX_FREE_STR(&new_kform_0);
FX_FREE_STR(&o_filename_0);
FX_FREE_STR(&c_filename_0);
FX_FREE_STR(&k_filename_0);
FX_FREE_STR(&cname_0);
FX_FREE_STR(&mname_0);
FX_FREE_STR(&ext_0);
FX_FREE_STR(&km_cname_0);
if (km_top_0) {
_fx_free_LN14K_form__kexp_t(&km_top_0);
}
FX_FREE_LIST_SIMPLE(&km_deps_0);
_fx_free_R14Ast__pragmas_t(&km_pragmas_0);
FX_CHECK_EXN(_fx_cleanup);
}
if (!ok_1) {
fx_str_t slit_17 = FX_MAKE_STR("failed to write some k-forms");
FX_CALL(_fx_F9make_FailE1S(&slit_17, &v_0), _fx_cleanup);
FX_THROW(&v_0, true, _fx_cleanup);
}
FX_COPY_PTR(kmods_1, fx_result);
_fx_cleanup: ;
FX_FREE_ARR(&skip_flags_0);
FX_FREE_STR(&build_root_dir_0);
FX_FREE_STR(&build_dir_0);
FX_FREE_STR(&obj_ext_0);
if (kmods_1) {
_fx_free_LR17K_form__kmodule_t(&kmods_1);
}
fx_free_exn(&v_0);
return fx_status;
}
FX_EXTERN_C int _fx_M8CompilerFM14k_optimize_allT2LR17K_form__kmodule_tB1LR17K_form__kmodule_t(
struct _fx_LR17K_form__kmodule_t_data_t* kmods_0,
struct _fx_T2LR17K_form__kmodule_tB* fx_result,
void* fx_fv)
{
_fx_LR17K_form__kmodule_t temp_kmods_0 = 0;
_fx_LR17K_form__kmodule_t v_0 = 0;
_fx_LR17K_form__kmodule_t v_1 = 0;
_fx_LR17K_form__kmodule_t v_2 = 0;
_fx_LR17K_form__kmodule_t v_3 = 0;
_fx_LR17K_form__kmodule_t v_4 = 0;
_fx_LR17K_form__kmodule_t v_5 = 0;
_fx_LR17K_form__kmodule_t v_6 = 0;
_fx_LR17K_form__kmodule_t v_7 = 0;
_fx_LR17K_form__kmodule_t v_8 = 0;
_fx_LR17K_form__kmodule_t v_9 = 0;
_fx_LR17K_form__kmodule_t v_10 = 0;
_fx_LR17K_form__kmodule_t v_11 = 0;
_fx_LR17K_form__kmodule_t v_12 = 0;
_fx_LR17K_form__kmodule_t v_13 = 0;
_fx_LR17K_form__kmodule_t v_14 = 0;
_fx_LR17K_form__kmodule_t v_15 = 0;
_fx_LR17K_form__kmodule_t v_16 = 0;
_fx_LR17K_form__kmodule_t v_17 = 0;
_fx_LR17K_form__kmodule_t v_18 = 0;
int fx_status = 0;
_fx_free_LE(&_fx_g21Ast__all_compile_errs);
_fx_g21Ast__all_compile_errs = 0;
int_ niters_0 = _fx_g12Options__opt.optim_iters;
FX_COPY_PTR(kmods_0, &temp_kmods_0);
fx_str_t slit_0 = FX_MAKE_STR("\tremove unused");
FX_CALL(_fx_M3AstFM10pr_verbosev1S(&slit_0, 0), _fx_cleanup);
FX_CALL(_fx_M15K_remove_unusedFM13remove_unusedLR17K_form__kmodule_t2LR17K_form__kmodule_tB(temp_kmods_0, true, &v_0, 0),
_fx_cleanup);
_fx_free_LR17K_form__kmodule_t(&temp_kmods_0);
FX_COPY_PTR(v_0, &temp_kmods_0);
fx_str_t slit_1 = FX_MAKE_STR("\tannotate types");
FX_CALL(_fx_M3AstFM10pr_verbosev1S(&slit_1, 0), _fx_cleanup);
FX_CALL(_fx_M10K_annotateFM14annotate_typesLR17K_form__kmodule_t1LR17K_form__kmodule_t(temp_kmods_0, &v_1, 0), _fx_cleanup);
_fx_free_LR17K_form__kmodule_t(&temp_kmods_0);
FX_COPY_PTR(v_1, &temp_kmods_0);
fx_str_t slit_2 = FX_MAKE_STR("\tcopy generic/inline functions");
FX_CALL(_fx_M3AstFM10pr_verbosev1S(&slit_2, 0), _fx_cleanup);
FX_CALL(_fx_M13K_copy_n_skipFM9copy_someLR17K_form__kmodule_t1LR17K_form__kmodule_t(temp_kmods_0, &v_2, 0), _fx_cleanup);
_fx_free_LR17K_form__kmodule_t(&temp_kmods_0);
FX_COPY_PTR(v_2, &temp_kmods_0);
fx_str_t slit_3 = FX_MAKE_STR("\tremove unused by main");
FX_CALL(_fx_M3AstFM10pr_verbosev1S(&slit_3, 0), _fx_cleanup);
FX_CALL(_fx_M15K_remove_unusedFM21remove_unused_by_mainLR17K_form__kmodule_t1LR17K_form__kmodule_t(temp_kmods_0, &v_3, 0),
_fx_cleanup);
_fx_free_LR17K_form__kmodule_t(&temp_kmods_0);
FX_COPY_PTR(v_3, &temp_kmods_0);
fx_str_t slit_4 = FX_MAKE_STR("\tmangle & dump intermediate K-forms");
FX_CALL(_fx_M3AstFM10pr_verbosev1S(&slit_4, 0), _fx_cleanup);
FX_CALL(_fx_M8K_mangleFM10mangle_allLR17K_form__kmodule_t2LR17K_form__kmodule_tB(temp_kmods_0, false, &v_4, 0), _fx_cleanup);
_fx_free_LR17K_form__kmodule_t(&temp_kmods_0);
FX_COPY_PTR(v_4, &temp_kmods_0);
FX_CALL(_fx_M8K_mangleFM13mangle_localsLR17K_form__kmodule_t1LR17K_form__kmodule_t(temp_kmods_0, &v_5, 0), _fx_cleanup);
_fx_free_LR17K_form__kmodule_t(&temp_kmods_0);
FX_COPY_PTR(v_5, &temp_kmods_0);
FX_CALL(_fx_M8CompilerFM11k_skip_someLR17K_form__kmodule_t1LR17K_form__kmodule_t(temp_kmods_0, &v_6, 0), _fx_cleanup);
_fx_free_LR17K_form__kmodule_t(&temp_kmods_0);
FX_COPY_PTR(v_6, &temp_kmods_0);
fx_str_t slit_5 = FX_MAKE_STR("\tdemangle");
FX_CALL(_fx_M3AstFM10pr_verbosev1S(&slit_5, 0), _fx_cleanup);
FX_CALL(_fx_M8K_mangleFM12demangle_allLR17K_form__kmodule_t1LR17K_form__kmodule_t(temp_kmods_0, &v_7, 0), _fx_cleanup);
_fx_free_LR17K_form__kmodule_t(&temp_kmods_0);
FX_COPY_PTR(v_7, &temp_kmods_0);
int_ v_19 = niters_0 + 1;
int_ n_0 = FX_LOOP_COUNT(1, v_19, 1);
for (int_ i_0 = 0; i_0 < n_0; i_0++) {
fx_str_t v_20 = {0};
fx_str_t v_21 = {0};
_fx_LR17K_form__kmodule_t v_22 = 0;
_fx_LR17K_form__kmodule_t v_23 = 0;
_fx_LR17K_form__kmodule_t v_24 = 0;
_fx_LR17K_form__kmodule_t v_25 = 0;
_fx_LR17K_form__kmodule_t v_26 = 0;
_fx_LR17K_form__kmodule_t v_27 = 0;
_fx_LR17K_form__kmodule_t v_28 = 0;
_fx_LR17K_form__kmodule_t v_29 = 0;
_fx_LR17K_form__kmodule_t v_30 = 0;
_fx_LR17K_form__kmodule_t v_31 = 0;
int_ i_1 = 1 + i_0 * 1;
FX_CALL(_fx_F6stringS1i(i_1, &v_20, 0), _fx_catch_0);
fx_str_t slit_6 = FX_MAKE_STR("Optimization pass #");
fx_str_t slit_7 = FX_MAKE_STR(":");
{
const fx_str_t strs_0[] = { slit_6, v_20, slit_7 };
FX_CALL(fx_strjoin(0, 0, 0, strs_0, 3, &v_21), _fx_catch_0);
}
FX_CALL(_fx_M3AstFM10pr_verbosev1S(&v_21, 0), _fx_catch_0);
if (i_1 <= 2) {
fx_str_t slit_8 = FX_MAKE_STR("\tsimple lambda lifting");
FX_CALL(_fx_M3AstFM10pr_verbosev1S(&slit_8, 0), _fx_catch_0);
FX_CALL(_fx_M13K_lift_simpleFM4liftLR17K_form__kmodule_t1LR17K_form__kmodule_t(temp_kmods_0, &v_22, 0), _fx_catch_0);
_fx_free_LR17K_form__kmodule_t(&temp_kmods_0);
FX_COPY_PTR(v_22, &temp_kmods_0);
}
fx_str_t slit_9 = FX_MAKE_STR("\ttailrec");
FX_CALL(_fx_M3AstFM10pr_verbosev1S(&slit_9, 0), _fx_catch_0);
FX_CALL(_fx_M9K_tailrecFM17tailrec2loops_allLR17K_form__kmodule_t1LR17K_form__kmodule_t(temp_kmods_0, &v_23, 0),
_fx_catch_0);
_fx_free_LR17K_form__kmodule_t(&temp_kmods_0);
FX_COPY_PTR(v_23, &temp_kmods_0);
fx_str_t slit_10 = FX_MAKE_STR("\tloop inv");
FX_CALL(_fx_M3AstFM10pr_verbosev1S(&slit_10, 0), _fx_catch_0);
FX_CALL(_fx_M10K_loop_invFM18move_loop_invs_allLR17K_form__kmodule_t1LR17K_form__kmodule_t(temp_kmods_0, &v_24, 0),
_fx_catch_0);
_fx_free_LR17K_form__kmodule_t(&temp_kmods_0);
FX_COPY_PTR(v_24, &temp_kmods_0);
fx_str_t slit_11 = FX_MAKE_STR("\tgemm implantation");
FX_CALL(_fx_M3AstFM10pr_verbosev1S(&slit_11, 0), _fx_catch_0);
FX_CALL(_fx_M13K_optim_matopFM13optimize_gemmLR17K_form__kmodule_t1LR17K_form__kmodule_t(temp_kmods_0, &v_25, 0),
_fx_catch_0);
_fx_free_LR17K_form__kmodule_t(&temp_kmods_0);
FX_COPY_PTR(v_25, &temp_kmods_0);
fx_str_t slit_12 = FX_MAKE_STR("\tinline");
FX_CALL(_fx_M3AstFM10pr_verbosev1S(&slit_12, 0), _fx_catch_0);
if (_fx_g12Options__opt.inline_thresh > 0) {
FX_CALL(_fx_M8K_inlineFM11inline_someLR17K_form__kmodule_t1LR17K_form__kmodule_t(temp_kmods_0, &v_26, 0), _fx_catch_0);
_fx_free_LR17K_form__kmodule_t(&temp_kmods_0);
FX_COPY_PTR(v_26, &temp_kmods_0);
}
fx_str_t slit_13 = FX_MAKE_STR("\tflatten");
FX_CALL(_fx_M3AstFM10pr_verbosev1S(&slit_13, 0), _fx_catch_0);
FX_CALL(_fx_M9K_flattenFM11flatten_allLR17K_form__kmodule_t1LR17K_form__kmodule_t(temp_kmods_0, &v_27, 0), _fx_catch_0);
_fx_free_LR17K_form__kmodule_t(&temp_kmods_0);
FX_COPY_PTR(v_27, &temp_kmods_0);
fx_str_t slit_14 = FX_MAKE_STR("\tfuse loops");
FX_CALL(_fx_M3AstFM10pr_verbosev1S(&slit_14, 0), _fx_catch_0);
FX_CALL(_fx_M12K_fuse_loopsFM14fuse_loops_allLR17K_form__kmodule_t1LR17K_form__kmodule_t(temp_kmods_0, &v_28, 0),
_fx_catch_0);
_fx_free_LR17K_form__kmodule_t(&temp_kmods_0);
FX_COPY_PTR(v_28, &temp_kmods_0);
fx_str_t slit_15 = FX_MAKE_STR("\tfast idx");
FX_CALL(_fx_M3AstFM10pr_verbosev1S(&slit_15, 0), _fx_catch_0);
FX_CALL(_fx_M10K_fast_idxFM23optimize_idx_checks_allLR17K_form__kmodule_t1LR17K_form__kmodule_t(temp_kmods_0, &v_29, 0),
_fx_catch_0);
_fx_free_LR17K_form__kmodule_t(&temp_kmods_0);
FX_COPY_PTR(v_29, &temp_kmods_0);
fx_str_t slit_16 = FX_MAKE_STR("\tconst folding");
FX_CALL(_fx_M3AstFM10pr_verbosev1S(&slit_16, 0), _fx_catch_0);
FX_CALL(_fx_M15K_cfold_dealiasFM13cfold_dealiasLR17K_form__kmodule_t1LR17K_form__kmodule_t(temp_kmods_0, &v_30, 0),
_fx_catch_0);
_fx_free_LR17K_form__kmodule_t(&temp_kmods_0);
FX_COPY_PTR(v_30, &temp_kmods_0);
fx_str_t slit_17 = FX_MAKE_STR("\tremove unused");
FX_CALL(_fx_M3AstFM10pr_verbosev1S(&slit_17, 0), _fx_catch_0);
FX_CALL(
_fx_M15K_remove_unusedFM13remove_unusedLR17K_form__kmodule_t2LR17K_form__kmodule_tB(temp_kmods_0, false, &v_31, 0),
_fx_catch_0);
_fx_free_LR17K_form__kmodule_t(&temp_kmods_0);
FX_COPY_PTR(v_31, &temp_kmods_0);
_fx_catch_0: ;
if (v_31) {
_fx_free_LR17K_form__kmodule_t(&v_31);
}
if (v_30) {
_fx_free_LR17K_form__kmodule_t(&v_30);
}
if (v_29) {
_fx_free_LR17K_form__kmodule_t(&v_29);
}
if (v_28) {
_fx_free_LR17K_form__kmodule_t(&v_28);
}
if (v_27) {
_fx_free_LR17K_form__kmodule_t(&v_27);
}
if (v_26) {
_fx_free_LR17K_form__kmodule_t(&v_26);
}
if (v_25) {
_fx_free_LR17K_form__kmodule_t(&v_25);
}
if (v_24) {
_fx_free_LR17K_form__kmodule_t(&v_24);
}
if (v_23) {
_fx_free_LR17K_form__kmodule_t(&v_23);
}
if (v_22) {
_fx_free_LR17K_form__kmodule_t(&v_22);
}
FX_FREE_STR(&v_21);
FX_FREE_STR(&v_20);
FX_CHECK_EXN(_fx_cleanup);
}
fx_str_t slit_18 = FX_MAKE_STR("Finalizing K-form:");
FX_CALL(_fx_M3AstFM10pr_verbosev1S(&slit_18, 0), _fx_cleanup);
fx_str_t slit_19 = FX_MAKE_STR("\tlinearize array access");
FX_CALL(_fx_M3AstFM10pr_verbosev1S(&slit_19, 0), _fx_cleanup);
FX_CALL(_fx_M10K_fast_idxFM23linearize_arrays_accessLR17K_form__kmodule_t1LR17K_form__kmodule_t(temp_kmods_0, &v_8, 0),
_fx_cleanup);
_fx_free_LR17K_form__kmodule_t(&temp_kmods_0);
FX_COPY_PTR(v_8, &temp_kmods_0);
fx_str_t slit_20 = FX_MAKE_STR("\tmaking wrappers for nothrow functions");
FX_CALL(_fx_M3AstFM10pr_verbosev1S(&slit_20, 0), _fx_cleanup);
FX_CALL(
_fx_M18K_nothrow_wrappersFM25make_wrappers_for_nothrowLR17K_form__kmodule_t1LR17K_form__kmodule_t(temp_kmods_0, &v_9, 0),
_fx_cleanup);
_fx_free_LR17K_form__kmodule_t(&temp_kmods_0);
FX_COPY_PTR(v_9, &temp_kmods_0);
fx_str_t slit_21 = FX_MAKE_STR("\tmutable freevars referencing");
FX_CALL(_fx_M3AstFM10pr_verbosev1S(&slit_21, 0), _fx_cleanup);
FX_CALL(_fx_M10K_freevarsFM21mutable_freevars2refsLR17K_form__kmodule_t1LR17K_form__kmodule_t(temp_kmods_0, &v_10, 0),
_fx_cleanup);
_fx_free_LR17K_form__kmodule_t(&temp_kmods_0);
FX_COPY_PTR(v_10, &temp_kmods_0);
fx_str_t slit_22 = FX_MAKE_STR("\tdeclosuring");
FX_CALL(_fx_M3AstFM10pr_verbosev1S(&slit_22, 0), _fx_cleanup);
FX_CALL(_fx_M11K_declosureFM13declosure_allLR17K_form__kmodule_t1LR17K_form__kmodule_t(temp_kmods_0, &v_11, 0), _fx_cleanup);
_fx_free_LR17K_form__kmodule_t(&temp_kmods_0);
FX_COPY_PTR(v_11, &temp_kmods_0);
fx_str_t slit_23 = FX_MAKE_STR("\tlambda lifting");
FX_CALL(_fx_M3AstFM10pr_verbosev1S(&slit_23, 0), _fx_cleanup);
FX_CALL(_fx_M6K_liftFM8lift_allLR17K_form__kmodule_t1LR17K_form__kmodule_t(temp_kmods_0, &v_12, 0), _fx_cleanup);
_fx_free_LR17K_form__kmodule_t(&temp_kmods_0);
FX_COPY_PTR(v_12, &temp_kmods_0);
fx_str_t slit_24 = FX_MAKE_STR("\tflatten");
FX_CALL(_fx_M3AstFM10pr_verbosev1S(&slit_24, 0), _fx_cleanup);
FX_CALL(_fx_M9K_flattenFM11flatten_allLR17K_form__kmodule_t1LR17K_form__kmodule_t(temp_kmods_0, &v_13, 0), _fx_cleanup);
_fx_free_LR17K_form__kmodule_t(&temp_kmods_0);
FX_COPY_PTR(v_13, &temp_kmods_0);
fx_str_t slit_25 = FX_MAKE_STR("\tremove unused");
FX_CALL(_fx_M3AstFM10pr_verbosev1S(&slit_25, 0), _fx_cleanup);
FX_CALL(_fx_M15K_remove_unusedFM13remove_unusedLR17K_form__kmodule_t2LR17K_form__kmodule_tB(temp_kmods_0, false, &v_14, 0),
_fx_cleanup);
_fx_free_LR17K_form__kmodule_t(&temp_kmods_0);
FX_COPY_PTR(v_14, &temp_kmods_0);
fx_str_t slit_26 = FX_MAKE_STR("\tmangle");
FX_CALL(_fx_M3AstFM10pr_verbosev1S(&slit_26, 0), _fx_cleanup);
FX_CALL(_fx_M8K_mangleFM10mangle_allLR17K_form__kmodule_t2LR17K_form__kmodule_tB(temp_kmods_0, true, &v_15, 0), _fx_cleanup);
_fx_free_LR17K_form__kmodule_t(&temp_kmods_0);
FX_COPY_PTR(v_15, &temp_kmods_0);
fx_str_t slit_27 = FX_MAKE_STR("\tremove unused");
FX_CALL(_fx_M3AstFM10pr_verbosev1S(&slit_27, 0), _fx_cleanup);
FX_CALL(_fx_M15K_remove_unusedFM13remove_unusedLR17K_form__kmodule_t2LR17K_form__kmodule_tB(temp_kmods_0, false, &v_16, 0),
_fx_cleanup);
_fx_free_LR17K_form__kmodule_t(&temp_kmods_0);
FX_COPY_PTR(v_16, &temp_kmods_0);
fx_str_t slit_28 = FX_MAKE_STR("\tmark recursive");
FX_CALL(_fx_M3AstFM10pr_verbosev1S(&slit_28, 0), _fx_cleanup);
FX_CALL(_fx_M8K_inlineFM24find_recursive_funcs_allLR17K_form__kmodule_t1LR17K_form__kmodule_t(temp_kmods_0, &v_17, 0),
_fx_cleanup);
_fx_free_LR17K_form__kmodule_t(&temp_kmods_0);
FX_COPY_PTR(v_17, &temp_kmods_0);
fx_str_t slit_29 = FX_MAKE_STR("\tannotate types");
FX_CALL(_fx_M3AstFM10pr_verbosev1S(&slit_29, 0), _fx_cleanup);
FX_CALL(_fx_M10K_annotateFM14annotate_typesLR17K_form__kmodule_t1LR17K_form__kmodule_t(temp_kmods_0, &v_18, 0), _fx_cleanup);
_fx_free_LR17K_form__kmodule_t(&temp_kmods_0);
FX_COPY_PTR(v_18, &temp_kmods_0);
_fx_make_T2LR17K_form__kmodule_tB(temp_kmods_0, _fx_g21Ast__all_compile_errs == 0, fx_result);
_fx_cleanup: ;
if (temp_kmods_0) {
_fx_free_LR17K_form__kmodule_t(&temp_kmods_0);
}
if (v_0) {
_fx_free_LR17K_form__kmodule_t(&v_0);
}
if (v_1) {
_fx_free_LR17K_form__kmodule_t(&v_1);
}
if (v_2) {
_fx_free_LR17K_form__kmodule_t(&v_2);
}
if (v_3) {
_fx_free_LR17K_form__kmodule_t(&v_3);
}
if (v_4) {
_fx_free_LR17K_form__kmodule_t(&v_4);
}
if (v_5) {
_fx_free_LR17K_form__kmodule_t(&v_5);
}
if (v_6) {
_fx_free_LR17K_form__kmodule_t(&v_6);
}
if (v_7) {
_fx_free_LR17K_form__kmodule_t(&v_7);
}
if (v_8) {
_fx_free_LR17K_form__kmodule_t(&v_8);
}
if (v_9) {
_fx_free_LR17K_form__kmodule_t(&v_9);
}
if (v_10) {
_fx_free_LR17K_form__kmodule_t(&v_10);
}
if (v_11) {
_fx_free_LR17K_form__kmodule_t(&v_11);
}
if (v_12) {
_fx_free_LR17K_form__kmodule_t(&v_12);
}
if (v_13) {
_fx_free_LR17K_form__kmodule_t(&v_13);
}
if (v_14) {
_fx_free_LR17K_form__kmodule_t(&v_14);
}
if (v_15) {
_fx_free_LR17K_form__kmodule_t(&v_15);
}
if (v_16) {
_fx_free_LR17K_form__kmodule_t(&v_16);
}
if (v_17) {
_fx_free_LR17K_form__kmodule_t(&v_17);
}
if (v_18) {
_fx_free_LR17K_form__kmodule_t(&v_18);
}
return fx_status;
}
FX_EXTERN_C int _fx_M8CompilerFM6run_ccB2LR17C_form__cmodule_tS(
struct _fx_LR17C_form__cmodule_t_data_t* cmods_0,
fx_str_t* ficus_root_0,
bool* fx_result,
void* fx_fv)
{
fx_str_t osinfo_0 = {0};
fx_str_t opt_level_str_0 = {0};
fx_str_t runtime_include_path_0 = {0};
fx_str_t runtime_lib_path_0 = {0};
fx_str_t runtime_impl_0 = {0};
fx_str_t build_root_dir_0 = {0};
fx_str_t build_dir_0 = {0};
_fx_Ta9S v_0 = {0};
fx_str_t opt_flags_0 = {0};
fx_str_t v_1 = {0};
fx_str_t v_2 = {0};
fx_str_t v_3 = {0};
fx_str_t v_4 = {0};
fx_str_t cflags_0 = {0};
fx_str_t c_comp_0 = {0};
fx_str_t cpp_comp_name_0 = {0};
fx_str_t v_5 = {0};
fx_str_t cpp_comp_0 = {0};
_fx_Ta4S v_6 = {0};
_fx_Ta2S v_7 = {0};
fx_str_t omp_cflags_0 = {0};
fx_str_t omp_lib_0 = {0};
_fx_Ta3S v_8 = {0};
fx_str_t v_9 = {0};
fx_str_t v_10 = {0};
fx_str_t libpath_0 = {0};
fx_str_t cflags_1 = {0};
fx_str_t clibs_0 = {0};
fx_str_t omp_flags_0 = {0};
fx_str_t os_0 = {0};
fx_str_t libpath_1 = {0};
fx_str_t cflags_2 = {0};
fx_str_t clibs_1 = {0};
fx_str_t ggdb_opt_0 = {0};
fx_str_t stk_overflow_0 = {0};
fx_str_t v_11 = {0};
fx_str_t v_12 = {0};
fx_str_t v_13 = {0};
fx_str_t v_14 = {0};
fx_str_t v_15 = {0};
fx_str_t v_16 = {0};
fx_str_t cflags_3 = {0};
fx_str_t v_17 = {0};
fx_str_t v_18 = {0};
fx_str_t v_19 = {0};
fx_str_t v_20 = {0};
fx_str_t clibs_2 = {0};
fx_str_t c_comp_1 = {0};
fx_str_t cpp_comp_1 = {0};
fx_str_t obj_ext_0 = {0};
fx_str_t obj_opt_0 = {0};
fx_str_t appname_opt_0 = {0};
fx_str_t link_lib_opt_0 = {0};
fx_str_t cflags_4 = {0};
fx_str_t clibs_3 = {0};
fx_str_t custom_cflags_0 = {0};
fx_str_t v_21 = {0};
fx_str_t custom_cflags_1 = {0};
fx_str_t v_22 = {0};
fx_str_t cflags_5 = {0};
fx_str_t v_23 = {0};
fx_str_t v_24 = {0};
fx_str_t v_25 = {0};
_fx_R14Ast__pragmas_t v_26 = {0};
_fx_R17C_form__cmodule_t runtime_pseudo_cmod_0 = {0};
_fx_LR17C_form__cmodule_t cmods_1 = 0;
fx_arr_t v_27 = {0};
fx_arr_t results_0 = {0};
_fx_T5BBLSBLS __fold_result___0 = {0};
_fx_T5BBLSBLS v_28 = {0};
_fx_LS all_clibs_0 = 0;
_fx_LS objs_0 = 0;
fx_str_t v_29 = {0};
fx_str_t v_30 = {0};
fx_str_t v_31 = {0};
fx_str_t v_32 = {0};
fx_str_t custom_clibs_0 = {0};
fx_str_t v_33 = {0};
fx_str_t custom_clibs_1 = {0};
fx_str_t v_34 = {0};
fx_str_t custom_clibs_2 = {0};
_fx_LS v_35 = 0;
_fx_LS v_36 = 0;
fx_str_t v_37 = {0};
fx_str_t clibs_4 = {0};
fx_str_t v_38 = {0};
fx_str_t v_39 = {0};
fx_str_t v_40 = {0};
fx_str_t v_41 = {0};
fx_str_t cmd_0 = {0};
fx_str_t v_42 = {0};
fx_str_t cmd_1 = {0};
int fx_status = 0;
FX_CALL(_fx_g11Sys__osname.fp(true, &osinfo_0, _fx_g11Sys__osname.fcv), _fx_cleanup);
int_ opt_level_0 = _fx_g12Options__opt.optimize_level;
if (opt_level_0 <= 3) {
FX_CALL(_fx_F6stringS1i(opt_level_0, &opt_level_str_0, 0), _fx_cleanup);
}
else {
fx_str_t slit_0 = FX_MAKE_STR("fast"); fx_copy_str(&slit_0, &opt_level_str_0);
}
bool enable_openmp_0 = _fx_g12Options__opt.enable_openmp;
fx_str_t slit_1 = FX_MAKE_STR("runtime");
FX_CALL(_fx_M8FilenameFM9normalizeS2SS(ficus_root_0, &slit_1, &runtime_include_path_0, 0), _fx_cleanup);
fx_str_t slit_2 = FX_MAKE_STR("runtime/lib");
FX_CALL(_fx_M8FilenameFM9normalizeS2SS(ficus_root_0, &slit_2, &runtime_lib_path_0, 0), _fx_cleanup);
fx_str_t slit_3 = FX_MAKE_STR("runtime/ficus/impl/libficus");
FX_CALL(_fx_M8FilenameFM9normalizeS2SS(ficus_root_0, &slit_3, &runtime_impl_0, 0), _fx_cleanup);
fx_copy_str(&_fx_g12Options__opt.build_rootdir, &build_root_dir_0);
bool ok_0;
FX_CALL(_fx_M3SysFM5mkdirB2Si(&build_root_dir_0, 493, &ok_0, 0), _fx_cleanup);
fx_copy_str(&_fx_g12Options__opt.build_dir, &build_dir_0);
bool ok_1;
if (ok_0) {
FX_CALL(_fx_M3SysFM5mkdirB2Si(&build_dir_0, 493, &ok_1, 0), _fx_cleanup);
}
else {
ok_1 = false;
}
if (_fx_g10Sys__win32) {
if (opt_level_0 == 0) {
fx_str_t slit_4 = FX_MAKE_STR(" /D_DEBUG /MTd /Od /GF"); fx_copy_str(&slit_4, &opt_flags_0);
}
else {
if (opt_level_0 == 1) {
fx_str_t slit_5 = FX_MAKE_STR("/O1"); fx_copy_str(&slit_5, &v_1);
}
else {
fx_str_t slit_6 = FX_MAKE_STR("/O2"); fx_copy_str(&slit_6, &v_1);
}
fx_str_t slit_7 = FX_MAKE_STR(" /DNDEBUG /MT ");
{
const fx_str_t strs_0[] = { slit_7, v_1 };
FX_CALL(fx_strjoin(0, 0, 0, strs_0, 2, &opt_flags_0), _fx_cleanup);
}
}
FX_CALL(_fx_M8CompilerFM6stringS1S(&opt_flags_0, &v_2, 0), _fx_cleanup);
fx_str_t slit_8 = FX_MAKE_STR("");
FX_CALL(_fx_M8CompilerFM6stringS1S(&slit_8, &v_3, 0), _fx_cleanup);
FX_CALL(_fx_M8CompilerFM6stringS1S(&runtime_include_path_0, &v_4, 0), _fx_cleanup);
fx_str_t slit_9 = FX_MAKE_STR("/utf-8 /nologo");
fx_str_t slit_10 = FX_MAKE_STR(" /I");
{
const fx_str_t strs_1[] = { slit_9, v_2, v_3, slit_10, v_4 };
FX_CALL(fx_strjoin(0, 0, 0, strs_1, 5, &cflags_0), _fx_cleanup);
}
fx_str_t slit_11 = FX_MAKE_STR("win");
fx_str_t slit_12 = FX_MAKE_STR("cl");
fx_str_t slit_13 = FX_MAKE_STR("cl");
fx_str_t slit_14 = FX_MAKE_STR(".obj");
fx_str_t slit_15 = FX_MAKE_STR("/c /Fo");
fx_str_t slit_16 = FX_MAKE_STR("/Fe");
fx_str_t slit_17 = FX_MAKE_STR("");
fx_str_t slit_18 = FX_MAKE_STR("/nologo /F10485760 kernel32.lib advapi32.lib");
_fx_make_Ta9S(&slit_11, &slit_12, &slit_13, &slit_14, &slit_15, &slit_16, &slit_17, &cflags_0, &slit_18, &v_0);
}
else {
fx_str_t slit_19 = FX_MAKE_STR("CC");
fx_str_t slit_20 = FX_MAKE_STR("cc");
FX_CALL(_fx_M3SysFM6getenvS2SS(&slit_19, &slit_20, &c_comp_0, 0), _fx_cleanup);
fx_str_t slit_21 = FX_MAKE_STR("CXX");
fx_str_t slit_22 = FX_MAKE_STR("c++");
FX_CALL(_fx_M3SysFM6getenvS2SS(&slit_21, &slit_22, &cpp_comp_name_0, 0), _fx_cleanup);
FX_CALL(_fx_M8CompilerFM6stringS1S(&cpp_comp_name_0, &v_5, 0), _fx_cleanup);
fx_str_t slit_23 = FX_MAKE_STR(" -std=c++11");
{
const fx_str_t strs_2[] = { v_5, slit_23 };
FX_CALL(fx_strjoin(0, 0, 0, strs_2, 2, &cpp_comp_0), _fx_cleanup);
}
bool v_43;
fx_str_t slit_24 = FX_MAKE_STR("Darwin");
FX_CALL(_fx_M8CompilerFM8containsB2SS(&osinfo_0, &slit_24, &v_43, 0), _fx_cleanup);
if (v_43) {
if (enable_openmp_0) {
bool v_44;
fx_str_t slit_25 = FX_MAKE_STR("gcc");
FX_CALL(_fx_M8CompilerFM8containsB2SS(&c_comp_0, &slit_25, &v_44, 0), _fx_cleanup);
if (v_44) {
fx_str_t slit_26 = FX_MAKE_STR("-fopenmp");
fx_str_t slit_27 = FX_MAKE_STR(" -lgomp");
_fx_make_Ta2S(&slit_26, &slit_27, &v_7);
}
else {
fx_str_t slit_28 = FX_MAKE_STR("-Xclang -fopenmp");
fx_str_t slit_29 = FX_MAKE_STR(" -lomp");
_fx_make_Ta2S(&slit_28, &slit_29, &v_7);
}
}
else {
fx_str_t slit_30 = FX_MAKE_STR(""); fx_str_t slit_31 = FX_MAKE_STR(""); _fx_make_Ta2S(&slit_30, &slit_31, &v_7);
}
fx_copy_str(&v_7.t0, &omp_cflags_0);
fx_copy_str(&v_7.t1, &omp_lib_0);
bool v_45;
fx_str_t slit_32 = FX_MAKE_STR("x86_64");
FX_CALL(_fx_M8CompilerFM8containsB2SS(&osinfo_0, &slit_32, &v_45, 0), _fx_cleanup);
if (v_45) {
fx_str_t slit_33 = FX_MAKE_STR(" ");
{
const fx_str_t strs_3[] = { slit_33, omp_cflags_0, omp_lib_0 };
FX_CALL(fx_strjoin(0, 0, 0, strs_3, 3, &v_9), _fx_cleanup);
}
fx_str_t slit_34 = FX_MAKE_STR("macos_x64");
_fx_make_Ta3S(&slit_34, &omp_cflags_0, &v_9, &v_8);
}
else {
bool v_46;
fx_str_t slit_35 = FX_MAKE_STR("arm64");
FX_CALL(_fx_M8CompilerFM8containsB2SS(&osinfo_0, &slit_35, &v_46, 0), _fx_cleanup);
if (v_46) {
fx_str_t slit_36 = FX_MAKE_STR(" ");
{
const fx_str_t strs_4[] = { slit_36, omp_cflags_0, omp_lib_0 };
FX_CALL(fx_strjoin(0, 0, 0, strs_4, 3, &v_10), _fx_cleanup);
}
fx_str_t slit_37 = FX_MAKE_STR("macos_arm64");
_fx_make_Ta3S(&slit_37, &omp_cflags_0, &v_10, &v_8);
}
else {
fx_str_t slit_38 = FX_MAKE_STR("");
fx_str_t slit_39 = FX_MAKE_STR("");
fx_str_t slit_40 = FX_MAKE_STR("");
_fx_make_Ta3S(&slit_38, &slit_39, &slit_40, &v_8);
}
}
fx_copy_str(&v_8.t0, &libpath_0);
fx_copy_str(&v_8.t1, &cflags_1);
fx_copy_str(&v_8.t2, &clibs_0);
fx_str_t slit_41 = FX_MAKE_STR("macos");
_fx_make_Ta4S(&slit_41, &libpath_0, &cflags_1, &clibs_0, &v_6);
}
else {
bool v_47;
fx_str_t slit_42 = FX_MAKE_STR("Linux");
FX_CALL(_fx_M8CompilerFM8containsB2SS(&osinfo_0, &slit_42, &v_47, 0), _fx_cleanup);
if (v_47) {
if (enable_openmp_0) {
fx_str_t slit_43 = FX_MAKE_STR(" -fopenmp"); fx_copy_str(&slit_43, &omp_flags_0);
}
else {
fx_str_t slit_44 = FX_MAKE_STR(""); fx_copy_str(&slit_44, &omp_flags_0);
}
fx_str_t slit_45 = FX_MAKE_STR("linux");
fx_str_t slit_46 = FX_MAKE_STR("");
_fx_make_Ta4S(&slit_45, &slit_46, &omp_flags_0, &omp_flags_0, &v_6);
}
else if (_fx_g9Sys__unix) {
fx_str_t slit_47 = FX_MAKE_STR("unix");
fx_str_t slit_48 = FX_MAKE_STR("");
fx_str_t slit_49 = FX_MAKE_STR("");
fx_str_t slit_50 = FX_MAKE_STR("");
_fx_make_Ta4S(&slit_47, &slit_48, &slit_49, &slit_50, &v_6);
}
else {
fx_str_t slit_51 = FX_MAKE_STR("");
fx_str_t slit_52 = FX_MAKE_STR("");
fx_str_t slit_53 = FX_MAKE_STR("");
fx_str_t slit_54 = FX_MAKE_STR("");
_fx_make_Ta4S(&slit_51, &slit_52, &slit_53, &slit_54, &v_6);
}
}
fx_copy_str(&v_6.t0, &os_0);
fx_copy_str(&v_6.t1, &libpath_1);
fx_copy_str(&v_6.t2, &cflags_2);
fx_copy_str(&v_6.t3, &clibs_1);
if (opt_level_0 == 0) {
fx_str_t slit_55 = FX_MAKE_STR(" -D_DEBUG -ggdb"); fx_copy_str(&slit_55, &ggdb_opt_0);
}
else {
if (opt_level_0 == 100) {
fx_str_t slit_56 = FX_MAKE_STR(" -DFX_NO_STACK_OVERFLOW_CHECK"); fx_copy_str(&slit_56, &stk_overflow_0);
}
else {
fx_str_t slit_57 = FX_MAKE_STR(""); fx_copy_str(&slit_57, &stk_overflow_0);
}
FX_CALL(_fx_M8CompilerFM6stringS1S(&stk_overflow_0, &v_11, 0), _fx_cleanup);
fx_str_t slit_58 = FX_MAKE_STR(" -DNDEBUG");
{
const fx_str_t strs_5[] = { slit_58, v_11 };
FX_CALL(fx_strjoin(0, 0, 0, strs_5, 2, &ggdb_opt_0), _fx_cleanup);
}
}
FX_CALL(_fx_M8CompilerFM6stringS1S(&opt_level_str_0, &v_12, 0), _fx_cleanup);
FX_CALL(_fx_M8CompilerFM6stringS1S(&ggdb_opt_0, &v_13, 0), _fx_cleanup);
FX_CALL(_fx_M8CompilerFM6stringS1S(&cflags_2, &v_14, 0), _fx_cleanup);
fx_str_t slit_59 = FX_MAKE_STR("-Wno-unknown-warning-option -Wno-dangling-else -Wno-static-in-inline -Wno-parentheses");
FX_CALL(_fx_M8CompilerFM6stringS1S(&slit_59, &v_15, 0), _fx_cleanup);
FX_CALL(_fx_M8CompilerFM6stringS1S(&runtime_include_path_0, &v_16, 0), _fx_cleanup);
fx_str_t slit_60 = FX_MAKE_STR("-O");
fx_str_t slit_61 = FX_MAKE_STR(" ");
fx_str_t slit_62 = FX_MAKE_STR(" ");
fx_str_t slit_63 = FX_MAKE_STR(" -I");
{
const fx_str_t strs_6[] = { slit_60, v_12, v_13, slit_61, v_14, slit_62, v_15, slit_63, v_16 };
FX_CALL(fx_strjoin(0, 0, 0, strs_6, 9, &cflags_3), _fx_cleanup);
}
if (FX_STR_LENGTH(libpath_1) != 0) {
FX_CALL(_fx_M8CompilerFM6stringS1S(&runtime_lib_path_0, &v_18, 0), _fx_cleanup);
FX_CALL(_fx_M8CompilerFM6stringS1S(&libpath_1, &v_19, 0), _fx_cleanup);
fx_str_t slit_64 = FX_MAKE_STR("-L");
fx_str_t slit_65 = FX_MAKE_STR("/");
fx_str_t slit_66 = FX_MAKE_STR(" ");
{
const fx_str_t strs_7[] = { slit_64, v_18, slit_65, v_19, slit_66 };
FX_CALL(fx_strjoin(0, 0, 0, strs_7, 5, &v_17), _fx_cleanup);
}
}
else {
fx_str_t slit_67 = FX_MAKE_STR(""); fx_copy_str(&slit_67, &v_17);
}
FX_CALL(_fx_M8CompilerFM6stringS1S(&clibs_1, &v_20, 0), _fx_cleanup);
fx_str_t slit_68 = FX_MAKE_STR("-lm ");
{
const fx_str_t strs_8[] = { v_17, slit_68, v_20 };
FX_CALL(fx_strjoin(0, 0, 0, strs_8, 3, &clibs_2), _fx_cleanup);
}
fx_str_t slit_69 = FX_MAKE_STR(".o");
fx_str_t slit_70 = FX_MAKE_STR("-c -o ");
fx_str_t slit_71 = FX_MAKE_STR("-o ");
fx_str_t slit_72 = FX_MAKE_STR("-l");
_fx_make_Ta9S(&os_0, &c_comp_0, &cpp_comp_0, &slit_69, &slit_70, &slit_71, &slit_72, &cflags_3, &clibs_2, &v_0);
}
fx_copy_str(&v_0.t1, &c_comp_1);
fx_copy_str(&v_0.t2, &cpp_comp_1);
fx_copy_str(&v_0.t3, &obj_ext_0);
fx_copy_str(&v_0.t4, &obj_opt_0);
fx_copy_str(&v_0.t5, &appname_opt_0);
fx_copy_str(&v_0.t6, &link_lib_opt_0);
fx_copy_str(&v_0.t7, &cflags_4);
fx_copy_str(&v_0.t8, &clibs_3);
fx_str_t slit_73 = FX_MAKE_STR("FICUS_CFLAGS");
FX_CALL(_fx_M3SysFM6getenvS1S(&slit_73, &custom_cflags_0, 0), _fx_cleanup);
fx_copy_str(&_fx_g12Options__opt.cflags, &v_21);
if (FX_STR_LENGTH(v_21) == 0) {
fx_copy_str(&custom_cflags_0, &custom_cflags_1);
}
else {
fx_copy_str(&_fx_g12Options__opt.cflags, &v_22);
fx_str_t slit_74 = FX_MAKE_STR(" ");
{
const fx_str_t strs_9[] = { v_22, slit_74, custom_cflags_0 };
FX_CALL(fx_strjoin(0, 0, 0, strs_9, 3, &custom_cflags_1), _fx_cleanup);
}
}
fx_str_t slit_75 = FX_MAKE_STR(" ");
{
const fx_str_t strs_10[] = { cflags_4, slit_75, custom_cflags_1 };
FX_CALL(fx_strjoin(0, 0, 0, strs_10, 3, &cflags_5), _fx_cleanup);
}
FX_CALL(_fx_M8CompilerFM6stringS1S(&cflags_5, &v_23, 0), _fx_cleanup);
fx_str_t slit_76 = FX_MAKE_STR("Compiling .c/.cpp files with cflags=");
{
const fx_str_t strs_11[] = { slit_76, v_23 };
FX_CALL(fx_strjoin(0, 0, 0, strs_11, 2, &v_24), _fx_cleanup);
}
FX_CALL(_fx_M8CompilerFM6clrmsgS2N20Compiler__msgcolor_tS(&_fx_g17Compiler__MsgBlue, &v_24, &v_25, 0), _fx_cleanup);
FX_CALL(_fx_M3AstFM10pr_verbosev1S(&v_25, 0), _fx_cleanup);
_fx_make_R14Ast__pragmas_t(false, 0, &v_26);
_fx_make_R17C_form__cmodule_t(&_fx_g9Ast__noid, &runtime_impl_0, 0, false, true, false, &v_26, &runtime_pseudo_cmod_0);
FX_CALL(_fx_cons_LR17C_form__cmodule_t(&runtime_pseudo_cmod_0, cmods_0, true, &cmods_1), _fx_cleanup);
FX_CALL(_fx_M8CompilerFM5arrayA1R17C_form__cmodule_t1LR17C_form__cmodule_t(cmods_1, &v_27, 0), _fx_cleanup);
int par_status_0 = 0;
int_ ni_0 = FX_ARR_SIZE(v_27, 0);
_fx_R17C_form__cmodule_t* ptr_v_0 = FX_PTR_1D(_fx_R17C_form__cmodule_t, v_27, 0);
{
const int_ shape_0[] = { ni_0 };
FX_CALL(
fx_make_arr(1, shape_0, sizeof(_fx_T5BBLSBS), (fx_free_t)_fx_free_T5BBLSBS, (fx_copy_t)_fx_copy_T5BBLSBS, 0,
&results_0), _fx_cleanup);
}
#pragma omp parallel for
for (int_ i_0 = 0; i_0 < ni_0; i_0++) {
int fx_status = 0;
_fx_R17C_form__cmodule_t __pat___0 = {0};
_fx_LT2SR10Ast__loc_t pragma_clibs_0 = 0;
_fx_LN15C_form__cstmt_t cmod_ccode_0 = 0;
fx_str_t cmod_cname_0 = {0};
fx_str_t output_fname_0 = {0};
_fx_Ta2S v_48 = {0};
fx_str_t comp_0 = {0};
fx_str_t ext_0 = {0};
fx_str_t output_fname_1 = {0};
fx_str_t output_fname_c_0 = {0};
_fx_T3BBS v_49 = {0};
fx_str_t str_new_0 = {0};
fx_str_t str_old_0 = {0};
fx_exn_t exn_0 = {0};
fx_exn_t exn_1 = {0};
fx_str_t v_50 = {0};
fx_str_t v_51 = {0};
fx_str_t v_52 = {0};
fx_str_t status_j_0 = {0};
fx_str_t c_filename_0 = {0};
fx_str_t obj_filename_0 = {0};
_fx_T3BBS v_53 = {0};
fx_str_t v_54 = {0};
fx_str_t v_55 = {0};
fx_str_t v_56 = {0};
fx_str_t v_57 = {0};
fx_str_t v_58 = {0};
fx_str_t cmd_2 = {0};
_fx_R7File__t p_0 = {0};
fx_str_t status_0 = {0};
fx_str_t status_j_1 = {0};
fx_str_t v_59 = {0};
fx_str_t v_60 = {0};
fx_str_t v_61 = {0};
_fx_LS v_62 = 0;
_fx_LS clibs_5 = 0;
_fx_T5BBLSBS tup_0 = {0};
_fx_copy_R17C_form__cmodule_t(ptr_v_0 + i_0, &__pat___0);
_fx_T5BBLSBS* dstptr_0 = FX_PTR_1D(_fx_T5BBLSBS, results_0, i_0);
_fx_R14Ast__pragmas_t* i_1 = &__pat___0.cmod_pragmas;
FX_COPY_PTR(i_1->pragma_clibs, &pragma_clibs_0);
FX_COPY_PTR(__pat___0.cmod_ccode, &cmod_ccode_0);
fx_copy_str(&__pat___0.cmod_cname, &cmod_cname_0);
FX_CALL(_fx_M8FilenameFM8basenameS1S(&cmod_cname_0, &output_fname_0, 0), _fx_catch_4);
bool is_runtime_0 = _fx_F6__eq__B2SS(&cmod_cname_0, &runtime_impl_0, 0);
bool is_cpp_0;
if (!is_runtime_0) {
if (_fx_g12Options__opt.compile_by_cpp) {
is_cpp_0 = true;
}
else {
is_cpp_0 = i_1->pragma_cpp;
}
}
else {
is_cpp_0 = false;
}
if (is_cpp_0) {
fx_str_t slit_77 = FX_MAKE_STR(".cpp"); _fx_make_Ta2S(&cpp_comp_1, &slit_77, &v_48);
}
else {
fx_str_t slit_78 = FX_MAKE_STR(".c"); _fx_make_Ta2S(&c_comp_1, &slit_78, &v_48);
}
fx_copy_str(&v_48.t0, &comp_0);
fx_copy_str(&v_48.t1, &ext_0);
FX_CALL(_fx_M8FilenameFM9normalizeS2SS(&build_dir_0, &output_fname_0, &output_fname_1, 0), _fx_catch_4);
{
const fx_str_t strs_12[] = { output_fname_1, ext_0 };
FX_CALL(fx_strjoin(0, 0, 0, strs_12, 2, &output_fname_c_0), _fx_catch_4);
}
if (__pat___0.cmod_skip) {
fx_str_t slit_79 = FX_MAKE_STR("skipped"); _fx_make_T3BBS(true, false, &slit_79, &v_49);
}
else if (is_runtime_0) {
fx_str_t slit_80 = FX_MAKE_STR(""); _fx_make_T3BBS(true, true, &slit_80, &v_49);
}
else {
FX_CALL(_fx_M4C_ppFM20pprint_top_to_stringS1LN15C_form__cstmt_t(cmod_ccode_0, &str_new_0, 0), _fx_catch_4);
if (_fx_g12Options__opt.force_rebuild) {
fx_str_t slit_81 = FX_MAKE_STR(""); fx_copy_str(&slit_81, &str_old_0);
}
else {
FX_CALL(_fx_M4FileFM9read_utf8S1S(&output_fname_c_0, &str_old_0, 0), _fx_catch_0);
_fx_catch_0: ;
if (fx_status < 0) {
fx_exn_get_and_reset(fx_status, &exn_0);
fx_status = 0;
FX_FREE_STR(&str_old_0);
int tag_0 = exn_0.tag;
bool res_0;
if (tag_0 == FX_EXN_IOError) {
res_0 = true;
}
else if (tag_0 == FX_EXN_FileOpenError) {
res_0 = true;
}
else {
res_0 = false;
}
FX_CHECK_EXN(_fx_catch_4);
if (res_0) {
fx_str_t slit_82 = FX_MAKE_STR(""); fx_copy_str(&slit_82, &str_old_0); goto _fx_endmatch_0;
}
FX_RETHROW(&exn_0, _fx_catch_4);
_fx_endmatch_0: ;
FX_CHECK_EXN(_fx_catch_4);
}
}
bool v_63 = _fx_F6__eq__B2SS(&str_new_0, &str_old_0, 0);
if (v_63) {
fx_str_t slit_83 = FX_MAKE_STR("skipped"); _fx_make_T3BBS(ok_1, false, &slit_83, &v_49);
}
else {
bool well_written_0;
FX_CALL(_fx_M4FileFM10write_utf8v2SS(&output_fname_c_0, &str_new_0, 0), _fx_catch_1);
well_written_0 = true;
_fx_catch_1: ;
if (fx_status < 0) {
fx_exn_get_and_reset(fx_status, &exn_1);
fx_status = 0;
int tag_1 = exn_1.tag;
bool res_1;
if (tag_1 == FX_EXN_IOError) {
res_1 = true;
}
else if (tag_1 == FX_EXN_FileOpenError) {
res_1 = true;
}
else {
res_1 = false;
}
FX_CHECK_EXN(_fx_catch_4);
if (res_1) {
well_written_0 = false; goto _fx_endmatch_1;
}
FX_RETHROW(&exn_1, _fx_catch_4);
_fx_endmatch_1: ;
FX_CHECK_EXN(_fx_catch_4);
}
if (well_written_0) {
fx_str_t slit_84 = FX_MAKE_STR(""); fx_copy_str(&slit_84, &v_50);
}
else {
FX_CALL(_fx_M8CompilerFM6stringS1S(&output_fname_c_0, &v_51, 0), _fx_catch_4);
fx_str_t slit_85 = FX_MAKE_STR("failed to write ");
{
const fx_str_t strs_13[] = { slit_85, v_51 };
FX_CALL(fx_strjoin(0, 0, 0, strs_13, 2, &v_52), _fx_catch_4);
}
FX_CALL(_fx_M8CompilerFM6clrmsgS2N20Compiler__msgcolor_tS(&_fx_g16Compiler__MsgRed, &v_52, &v_50, 0),
_fx_catch_4);
}
_fx_make_T3BBS(well_written_0, well_written_0, &v_50, &v_49);
}
}
bool ok_j_0 = v_49.t0;
bool reprocess_0 = v_49.t1;
fx_copy_str(&v_49.t2, &status_j_0);
if (is_runtime_0) {
fx_str_t slit_86 = FX_MAKE_STR(".c");
{
const fx_str_t strs_14[] = { runtime_impl_0, slit_86 };
FX_CALL(fx_strjoin(0, 0, 0, strs_14, 2, &c_filename_0), _fx_catch_4);
}
}
else {
fx_copy_str(&output_fname_c_0, &c_filename_0);
}
{
const fx_str_t strs_15[] = { output_fname_1, obj_ext_0 };
FX_CALL(fx_strjoin(0, 0, 0, strs_15, 2, &obj_filename_0), _fx_catch_4);
}
bool v_64;
if (ok_j_0) {
if (reprocess_0) {
v_64 = true;
}
else {
bool v_65; FX_CALL(_fx_M8FilenameFM6existsB1S(&obj_filename_0, &v_65, 0), _fx_catch_4); v_64 = !v_65;
}
}
else {
v_64 = false;
}
if (v_64) {
FX_CALL(_fx_M8CompilerFM6stringS1S(&comp_0, &v_54, 0), _fx_catch_4);
FX_CALL(_fx_M8CompilerFM6stringS1S(&cflags_5, &v_55, 0), _fx_catch_4);
FX_CALL(_fx_M8CompilerFM6stringS1S(&obj_opt_0, &v_56, 0), _fx_catch_4);
FX_CALL(_fx_M8CompilerFM6stringS1S(&obj_filename_0, &v_57, 0), _fx_catch_4);
FX_CALL(_fx_M8CompilerFM6stringS1S(&c_filename_0, &v_58, 0), _fx_catch_4);
fx_str_t slit_87 = FX_MAKE_STR(" ");
fx_str_t slit_88 = FX_MAKE_STR(" ");
fx_str_t slit_89 = FX_MAKE_STR(" ");
{
const fx_str_t strs_16[] = { v_54, slit_87, v_55, slit_88, v_56, v_57, slit_89, v_58 };
FX_CALL(fx_strjoin(0, 0, 0, strs_16, 8, &cmd_2), _fx_catch_4);
}
bool v_66;
fx_str_t slit_90 = FX_MAKE_STR("cl");
v_66 = _fx_F6__eq__B2SS(&c_comp_1, &slit_90, 0);
bool result_0;
if (v_66) {
fx_str_t slit_91 = FX_MAKE_STR("rt");
FX_CALL(_fx_M4FileFM5popenRM1t2SS(&cmd_2, &slit_91, &p_0, 0), _fx_catch_4);
int_ lineno_0 = 0;
for (;;) {
fx_str_t str_0 = {0};
FX_CALL(_fx_M4FileFM6readlnS1RM1t(&p_0, &str_0, 0), _fx_catch_2);
if (FX_STR_LENGTH(str_0) == 0) {
FX_BREAK(_fx_catch_2);
}
lineno_0 = lineno_0 + 1;
if (lineno_0 > 1) {
FX_CALL(_fx_M8CompilerFM5printv1S(&str_0, 0), _fx_catch_2);
}
_fx_catch_2: ;
FX_FREE_STR(&str_0);
FX_CHECK_BREAK();
FX_CHECK_EXN(_fx_catch_4);
}
int_ v_67;
FX_CALL(_fx_M4FileFM18pclose_exit_statusi1RM1t(&p_0, &v_67, 0), _fx_catch_4);
result_0 = v_67 == 0;
}
else {
int_ v_68; FX_CALL(_fx_M3SysFM7commandi1S(&cmd_2, &v_68, 0), _fx_catch_4); result_0 = v_68 == 0;
}
if (result_0) {
fx_str_t slit_92 = FX_MAKE_STR("ok");
FX_CALL(_fx_M8CompilerFM6clrmsgS2N20Compiler__msgcolor_tS(&_fx_g18Compiler__MsgGreen, &slit_92, &status_0, 0),
_fx_catch_4);
}
else {
fx_str_t slit_93 = FX_MAKE_STR("fail");
FX_CALL(_fx_M8CompilerFM6clrmsgS2N20Compiler__msgcolor_tS(&_fx_g16Compiler__MsgRed, &slit_93, &status_0, 0),
_fx_catch_4);
}
_fx_make_T3BBS(result_0, true, &status_0, &v_53);
}
else {
_fx_make_T3BBS(ok_j_0, false, &status_j_0, &v_53);
}
bool ok_j_1 = v_53.t0;
bool recompiled_0 = v_53.t1;
fx_copy_str(&v_53.t2, &status_j_1);
FX_CALL(_fx_M8CompilerFM6stringS1S(&c_filename_0, &v_59, 0), _fx_catch_4);
FX_CALL(_fx_M8CompilerFM6stringS1S(&status_j_1, &v_60, 0), _fx_catch_4);
fx_str_t slit_94 = FX_MAKE_STR("CC ");
fx_str_t slit_95 = FX_MAKE_STR(": ");
{
const fx_str_t strs_17[] = { slit_94, v_59, slit_95, v_60 };
FX_CALL(fx_strjoin(0, 0, 0, strs_17, 4, &v_61), _fx_catch_4);
}
FX_CALL(_fx_M3AstFM10pr_verbosev1S(&v_61, 0), _fx_catch_4);
_fx_LS lstend_0 = 0;
_fx_LT2SR10Ast__loc_t lst_0 = pragma_clibs_0;
for (; lst_0; lst_0 = lst_0->tl) {
_fx_T2SR10Ast__loc_t* __pat___1 = &lst_0->hd;
_fx_LS node_0 = 0;
FX_CALL(_fx_cons_LS(&__pat___1->t0, 0, false, &node_0), _fx_catch_3);
FX_LIST_APPEND(v_62, lstend_0, node_0);
_fx_catch_3: ;
FX_CHECK_EXN(_fx_catch_4);
}
FX_CALL(_fx_M8CompilerFM3revLS1LS(v_62, &clibs_5, 0), _fx_catch_4);
_fx_make_T5BBLSBS(is_cpp_0, recompiled_0, clibs_5, ok_j_1, &obj_filename_0, &tup_0);
_fx_copy_T5BBLSBS(&tup_0, dstptr_0);
_fx_catch_4: ;
_fx_free_T5BBLSBS(&tup_0);
if (clibs_5) {
_fx_free_LS(&clibs_5);
}
if (v_62) {
_fx_free_LS(&v_62);
}
FX_FREE_STR(&v_61);
FX_FREE_STR(&v_60);
FX_FREE_STR(&v_59);
FX_FREE_STR(&status_j_1);
FX_FREE_STR(&status_0);
_fx_free_R7File__t(&p_0);
FX_FREE_STR(&cmd_2);
FX_FREE_STR(&v_58);
FX_FREE_STR(&v_57);
FX_FREE_STR(&v_56);
FX_FREE_STR(&v_55);
FX_FREE_STR(&v_54);
_fx_free_T3BBS(&v_53);
FX_FREE_STR(&obj_filename_0);
FX_FREE_STR(&c_filename_0);
FX_FREE_STR(&status_j_0);
FX_FREE_STR(&v_52);
FX_FREE_STR(&v_51);
FX_FREE_STR(&v_50);
fx_free_exn(&exn_1);
fx_free_exn(&exn_0);
FX_FREE_STR(&str_old_0);
FX_FREE_STR(&str_new_0);
_fx_free_T3BBS(&v_49);
FX_FREE_STR(&output_fname_c_0);
FX_FREE_STR(&output_fname_1);
FX_FREE_STR(&ext_0);
FX_FREE_STR(&comp_0);
_fx_free_Ta2S(&v_48);
FX_FREE_STR(&output_fname_0);
FX_FREE_STR(&cmod_cname_0);
if (cmod_ccode_0) {
_fx_free_LN15C_form__cstmt_t(&cmod_ccode_0);
}
if (pragma_clibs_0) {
_fx_free_LT2SR10Ast__loc_t(&pragma_clibs_0);
}
_fx_free_R17C_form__cmodule_t(&__pat___0);
FX_CHECK_EXN_PARALLEL(fx_status, par_status_0);
}
FX_UPDATE_EXN_PARALLEL(par_status_0, _fx_cleanup);
_fx_make_T5BBLSBLS(false, false, 0, ok_1, 0, &__fold_result___0);
int_ ni_1 = FX_ARR_SIZE(results_0, 0);
_fx_T5BBLSBS* ptr_results_0 = FX_PTR_1D(_fx_T5BBLSBS, results_0, 0);
for (int_ i_2 = 0; i_2 < ni_1; i_2++) {
_fx_T5BBLSBS __pat___2 = {0};
_fx_LS clibs_j_0 = 0;
fx_str_t obj_0 = {0};
_fx_T5BBLSBLS v_69 = {0};
_fx_LS all_clibs_1 = 0;
_fx_LS objs_1 = 0;
_fx_LS v_70 = 0;
_fx_T5BBLSBLS v_71 = {0};
_fx_copy_T5BBLSBS(ptr_results_0 + i_2, &__pat___2);
FX_COPY_PTR(__pat___2.t2, &clibs_j_0);
fx_copy_str(&__pat___2.t4, &obj_0);
_fx_copy_T5BBLSBLS(&__fold_result___0, &v_69);
FX_COPY_PTR(v_69.t2, &all_clibs_1);
FX_COPY_PTR(v_69.t4, &objs_1);
FX_CALL(_fx_M8CompilerFM7__add__LS2LSLS(clibs_j_0, all_clibs_1, &v_70, 0), _fx_catch_5);
FX_CALL(_fx_cons_LS(&obj_0, objs_1, false, &objs_1), _fx_catch_5);
_fx_make_T5BBLSBLS((bool)(v_69.t0 | __pat___2.t0), (bool)(v_69.t1 | __pat___2.t1), v_70, (bool)(v_69.t3 & __pat___2.t3),
objs_1, &v_71);
_fx_free_T5BBLSBLS(&__fold_result___0);
_fx_copy_T5BBLSBLS(&v_71, &__fold_result___0);
_fx_catch_5: ;
_fx_free_T5BBLSBLS(&v_71);
if (v_70) {
_fx_free_LS(&v_70);
}
if (objs_1) {
_fx_free_LS(&objs_1);
}
if (all_clibs_1) {
_fx_free_LS(&all_clibs_1);
}
_fx_free_T5BBLSBLS(&v_69);
FX_FREE_STR(&obj_0);
if (clibs_j_0) {
_fx_free_LS(&clibs_j_0);
}
_fx_free_T5BBLSBS(&__pat___2);
FX_CHECK_EXN(_fx_cleanup);
}
_fx_copy_T5BBLSBLS(&__fold_result___0, &v_28);
bool any_cpp_0 = v_28.t0;
bool any_recompiled_0 = v_28.t1;
FX_COPY_PTR(v_28.t2, &all_clibs_0);
bool ok_2 = v_28.t3;
FX_COPY_PTR(v_28.t4, &objs_0);
bool v_72;
bool t_0;
if (ok_2) {
t_0 = !any_recompiled_0;
}
else {
t_0 = false;
}
if (t_0) {
fx_copy_str(&_fx_g12Options__opt.app_filename, &v_29); FX_CALL(_fx_M8FilenameFM6existsB1S(&v_29, &v_72, 0), _fx_cleanup);
}
else {
v_72 = false;
}
if (v_72) {
fx_copy_str(&_fx_g12Options__opt.app_filename, &v_30);
FX_CALL(_fx_M8CompilerFM6stringS1S(&v_30, &v_31, 0), _fx_cleanup);
fx_str_t slit_96 = FX_MAKE_STR(" is up-to-date\n");
{
const fx_str_t strs_18[] = { v_31, slit_96 };
FX_CALL(fx_strjoin(0, 0, 0, strs_18, 2, &v_32), _fx_cleanup);
}
FX_CALL(_fx_M3AstFM10pr_verbosev1S(&v_32, 0), _fx_cleanup);
*fx_result = ok_2;
}
else if (!ok_2) {
*fx_result = ok_2;
}
else {
fx_str_t slit_97 = FX_MAKE_STR("FICUS_LINK_LIBRARIES");
FX_CALL(_fx_M3SysFM6getenvS1S(&slit_97, &custom_clibs_0, 0), _fx_cleanup);
fx_copy_str(&_fx_g12Options__opt.clibs, &v_33);
if (FX_STR_LENGTH(v_33) == 0) {
fx_copy_str(&custom_clibs_0, &custom_clibs_1);
}
else {
fx_copy_str(&_fx_g12Options__opt.clibs, &v_34);
fx_str_t slit_98 = FX_MAKE_STR(" ");
{
const fx_str_t strs_19[] = { custom_clibs_0, slit_98, v_34 };
FX_CALL(fx_strjoin(0, 0, 0, strs_19, 3, &custom_clibs_1), _fx_cleanup);
}
}
if (all_clibs_0 == 0) {
fx_copy_str(&custom_clibs_1, &custom_clibs_2);
}
else {
FX_CALL(_fx_M8CompilerFM3revLS1LS(all_clibs_0, &v_35, 0), _fx_cleanup);
_fx_LS lstend_1 = 0;
_fx_LS lst_1 = v_35;
for (; lst_1; lst_1 = lst_1->tl) {
fx_str_t concat_str_0 = {0};
fx_str_t* l_0 = &lst_1->hd;
{
const fx_str_t strs_20[] = { link_lib_opt_0, *l_0 };
FX_CALL(fx_strjoin(0, 0, 0, strs_20, 2, &concat_str_0), _fx_catch_6);
}
_fx_LS node_1 = 0;
FX_CALL(_fx_cons_LS(&concat_str_0, 0, false, &node_1), _fx_catch_6);
FX_LIST_APPEND(v_36, lstend_1, node_1);
_fx_catch_6: ;
FX_FREE_STR(&concat_str_0);
FX_CHECK_EXN(_fx_cleanup);
}
fx_str_t slit_99 = FX_MAKE_STR(" ");
FX_CALL(_fx_M8CompilerFM4joinS2SLS(&slit_99, v_36, &v_37, 0), _fx_cleanup);
fx_str_t slit_100 = FX_MAKE_STR(" ");
{
const fx_str_t strs_21[] = { custom_clibs_1, slit_100, v_37 };
FX_CALL(fx_strjoin(0, 0, 0, strs_21, 3, &custom_clibs_2), _fx_cleanup);
}
}
fx_str_t slit_101 = FX_MAKE_STR(" ");
{
const fx_str_t strs_22[] = { clibs_3, slit_101, custom_clibs_2 };
FX_CALL(fx_strjoin(0, 0, 0, strs_22, 3, &clibs_4), _fx_cleanup);
}
FX_CALL(_fx_M8CompilerFM6stringS1S(&clibs_4, &v_38, 0), _fx_cleanup);
fx_str_t slit_102 = FX_MAKE_STR("Linking the app with flags=");
{
const fx_str_t strs_23[] = { slit_102, v_38 };
FX_CALL(fx_strjoin(0, 0, 0, strs_23, 2, &v_39), _fx_cleanup);
}
FX_CALL(_fx_M3AstFM10pr_verbosev1S(&v_39, 0), _fx_cleanup);
if (any_cpp_0) {
fx_copy_str(&cpp_comp_1, &v_40);
}
else {
fx_copy_str(&c_comp_1, &v_40);
}
fx_copy_str(&_fx_g12Options__opt.app_filename, &v_41);
fx_str_t slit_103 = FX_MAKE_STR(" ");
{
const fx_str_t strs_24[] = { v_40, slit_103, appname_opt_0, v_41 };
FX_CALL(fx_strjoin(0, 0, 0, strs_24, 4, &cmd_0), _fx_cleanup);
}
fx_str_t slit_104 = FX_MAKE_STR(" ");
FX_CALL(_fx_M8CompilerFM4joinS2SLS(&slit_104, objs_0, &v_42, 0), _fx_cleanup);
fx_str_t slit_105 = FX_MAKE_STR(" ");
fx_str_t slit_106 = FX_MAKE_STR(" ");
{
const fx_str_t strs_25[] = { cmd_0, slit_105, v_42, slit_106, clibs_4 };
FX_CALL(fx_strjoin(0, 0, 0, strs_25, 5, &cmd_1), _fx_cleanup);
}
int_ v_73;
FX_CALL(_fx_M3SysFM7commandi1S(&cmd_1, &v_73, 0), _fx_cleanup);
*fx_result = v_73 == 0;
}
_fx_cleanup: ;
FX_FREE_STR(&osinfo_0);
FX_FREE_STR(&opt_level_str_0);
FX_FREE_STR(&runtime_include_path_0);
FX_FREE_STR(&runtime_lib_path_0);
FX_FREE_STR(&runtime_impl_0);
FX_FREE_STR(&build_root_dir_0);
FX_FREE_STR(&build_dir_0);
_fx_free_Ta9S(&v_0);
FX_FREE_STR(&opt_flags_0);
FX_FREE_STR(&v_1);
FX_FREE_STR(&v_2);
FX_FREE_STR(&v_3);
FX_FREE_STR(&v_4);
FX_FREE_STR(&cflags_0);
FX_FREE_STR(&c_comp_0);
FX_FREE_STR(&cpp_comp_name_0);
FX_FREE_STR(&v_5);
FX_FREE_STR(&cpp_comp_0);
_fx_free_Ta4S(&v_6);
_fx_free_Ta2S(&v_7);
FX_FREE_STR(&omp_cflags_0);
FX_FREE_STR(&omp_lib_0);
_fx_free_Ta3S(&v_8);
FX_FREE_STR(&v_9);
FX_FREE_STR(&v_10);
FX_FREE_STR(&libpath_0);
FX_FREE_STR(&cflags_1);
FX_FREE_STR(&clibs_0);
FX_FREE_STR(&omp_flags_0);
FX_FREE_STR(&os_0);
FX_FREE_STR(&libpath_1);
FX_FREE_STR(&cflags_2);
FX_FREE_STR(&clibs_1);
FX_FREE_STR(&ggdb_opt_0);
FX_FREE_STR(&stk_overflow_0);
FX_FREE_STR(&v_11);
FX_FREE_STR(&v_12);
FX_FREE_STR(&v_13);
FX_FREE_STR(&v_14);
FX_FREE_STR(&v_15);
FX_FREE_STR(&v_16);
FX_FREE_STR(&cflags_3);
FX_FREE_STR(&v_17);
FX_FREE_STR(&v_18);
FX_FREE_STR(&v_19);
FX_FREE_STR(&v_20);
FX_FREE_STR(&clibs_2);
FX_FREE_STR(&c_comp_1);
FX_FREE_STR(&cpp_comp_1);
FX_FREE_STR(&obj_ext_0);
FX_FREE_STR(&obj_opt_0);
FX_FREE_STR(&appname_opt_0);
FX_FREE_STR(&link_lib_opt_0);
FX_FREE_STR(&cflags_4);
FX_FREE_STR(&clibs_3);
FX_FREE_STR(&custom_cflags_0);
FX_FREE_STR(&v_21);
FX_FREE_STR(&custom_cflags_1);
FX_FREE_STR(&v_22);
FX_FREE_STR(&cflags_5);
FX_FREE_STR(&v_23);
FX_FREE_STR(&v_24);
FX_FREE_STR(&v_25);
_fx_free_R14Ast__pragmas_t(&v_26);
_fx_free_R17C_form__cmodule_t(&runtime_pseudo_cmod_0);
if (cmods_1) {
_fx_free_LR17C_form__cmodule_t(&cmods_1);
}
FX_FREE_ARR(&v_27);
FX_FREE_ARR(&results_0);
_fx_free_T5BBLSBLS(&__fold_result___0);
_fx_free_T5BBLSBLS(&v_28);
if (all_clibs_0) {
_fx_free_LS(&all_clibs_0);
}
if (objs_0) {
_fx_free_LS(&objs_0);
}
FX_FREE_STR(&v_29);
FX_FREE_STR(&v_30);
FX_FREE_STR(&v_31);
FX_FREE_STR(&v_32);
FX_FREE_STR(&custom_clibs_0);
FX_FREE_STR(&v_33);
FX_FREE_STR(&custom_clibs_1);
FX_FREE_STR(&v_34);
FX_FREE_STR(&custom_clibs_2);
if (v_35) {
_fx_free_LS(&v_35);
}
if (v_36) {
_fx_free_LS(&v_36);
}
FX_FREE_STR(&v_37);
FX_FREE_STR(&clibs_4);
FX_FREE_STR(&v_38);
FX_FREE_STR(&v_39);
FX_FREE_STR(&v_40);
FX_FREE_STR(&v_41);
FX_FREE_STR(&cmd_0);
FX_FREE_STR(&v_42);
FX_FREE_STR(&cmd_1);
return fx_status;
}
FX_EXTERN_C int _fx_M8CompilerFM11process_allB1S(fx_str_t* fname0_0, bool* fx_result, void* fx_fv)
{
fx_exn_t exn_0 = {0};
_fx_LE __fold_result___0 = 0;
_fx_LE v_0 = 0;
fx_str_t v_1 = {0};
fx_str_t v_2 = {0};
int fx_status = 0;
FX_CALL(_fx_M3AstFM8init_allv0(0), _fx_cleanup);
_fx_T2SLS v_3 = {0};
fx_str_t ficus_root_0 = {0};
_fx_LS ficus_path_0 = 0;
fx_str_t v_4 = {0};
fx_str_t v_5 = {0};
fx_str_t v_6 = {0};
fx_exn_t v_7 = {0};
_fx_LT2iLi graph_0 = 0;
_fx_Li v_8 = 0;
_fx_Li v_9 = 0;
_fx_Li v_10 = 0;
_fx_LS v_11 = 0;
fx_str_t modules_used_0 = {0};
fx_str_t parsing_complete_0 = {0};
fx_str_t v_12 = {0};
fx_str_t v_13 = {0};
_fx_T2LR17K_form__kmodule_tB v_14 = {0};
_fx_LR17K_form__kmodule_t kmods_0 = 0;
_fx_LR17K_form__kmodule_t kmods_1 = 0;
fx_str_t v_15 = {0};
_fx_T2LR17K_form__kmodule_tB v_16 = {0};
fx_str_t v_17 = {0};
_fx_LR17K_form__kmodule_t kmods_2 = 0;
fx_str_t v_18 = {0};
_fx_T2LR17C_form__cmodule_tB v_19 = {0};
fx_str_t v_20 = {0};
_fx_LR17C_form__cmodule_t cmods_0 = 0;
fx_str_t v_21 = {0};
_fx_LR17C_form__cmodule_t cmods_1 = 0;
_fx_LR17C_form__cmodule_t cmods_2 = 0;
_fx_LR17C_form__cmodule_t cmods_3 = 0;
fx_str_t appname_0 = {0};
fx_str_t v_22 = {0};
fx_str_t appname_1 = {0};
_fx_LS v_23 = 0;
fx_str_t cmd_0 = {0};
_fx_LE __fold_result___1 = 0;
_fx_LE v_24 = 0;
fx_str_t v_25 = {0};
fx_str_t v_26 = {0};
FX_CALL(_fx_M8CompilerFM15find_ficus_dirsT2SLS0(&v_3, 0), _fx_catch_8);
fx_copy_str(&v_3.t0, &ficus_root_0);
FX_COPY_PTR(v_3.t1, &ficus_path_0);
if (FX_STR_LENGTH(ficus_root_0) == 0) {
FX_CALL(_fx_F6stringS1i(_fx_g15__ficus_major__, &v_4, 0), _fx_catch_8);
FX_CALL(_fx_F6stringS1i(_fx_g15__ficus_minor__, &v_5, 0), _fx_catch_8);
fx_str_t slit_0 =
FX_MAKE_STR("Ficus root directory is not found.\n"
U"Please, add the directory \'lib\' containing Builtins.fx to\n"
U"\'FICUS_PATH\' environment variable or make sure that either\n"
U"1. \'ficus\' executable is put in a directory <ficus_root>/bin\n"
U"and there are <ficus_root>/runtime and <ficus_root>/lib.\n"
U"2. or \'ficus\' executable is in (/usr|/usr/local|/opt|...)/bin and\n"
U" there are (/usr|...)/lib/ficus-");
fx_str_t slit_1 = FX_MAKE_STR(".");
fx_str_t slit_2 = FX_MAKE_STR("/{runtime, lib}");
{
const fx_str_t strs_0[] = { slit_0, v_4, slit_1, v_5, slit_2 };
FX_CALL(fx_strjoin(0, 0, 0, strs_0, 5, &v_6), _fx_catch_8);
}
FX_CALL(_fx_F9make_FailE1S(&v_6, &v_7), _fx_catch_8);
FX_THROW(&v_7, true, _fx_catch_8);
}
bool ok_0;
FX_CALL(_fx_M8CompilerFM9parse_allB2SLS(fname0_0, ficus_path_0, &ok_0, 0), _fx_catch_8);
if (!ok_0) {
FX_THROW(&_fx_E30Compiler__CumulativeParseErrorv, false, _fx_catch_8);
}
_fx_LT2iLi lstend_0 = 0;
int_ ni_0 = FX_ARR_SIZE(_fx_g16Ast__all_modules, 0);
_fx_N16Ast__defmodule_t* ptr_all_modules_0 = FX_PTR_1D(_fx_N16Ast__defmodule_t, _fx_g16Ast__all_modules, 0);
for (int_ i_0 = 0; i_0 < ni_0; i_0++) {
_fx_N16Ast__defmodule_t minfo_0 = 0;
_fx_Li v_27 = 0;
_fx_T2iLi tup_0 = {0};
FX_COPY_PTR(ptr_all_modules_0[i_0], &minfo_0);
FX_COPY_PTR(minfo_0->u.defmodule_t.t5, &v_27);
_fx_make_T2iLi(minfo_0->u.defmodule_t.t2, v_27, &tup_0);
_fx_LT2iLi node_0 = 0;
FX_CALL(_fx_cons_LT2iLi(&tup_0, 0, false, &node_0), _fx_catch_0);
FX_LIST_APPEND(graph_0, lstend_0, node_0);
_fx_catch_0: ;
_fx_free_T2iLi(&tup_0);
FX_FREE_LIST_SIMPLE(&v_27);
if (minfo_0) {
_fx_free_N16Ast__defmodule_t(&minfo_0);
}
FX_CHECK_EXN(_fx_catch_8);
}
FX_CALL(_fx_M8CompilerFM8toposortLi1LT2iLi(graph_0, &v_8, 0), _fx_catch_8);
if (v_8 != 0) {
FX_COPY_PTR(v_8->tl, &v_9);
}
else {
FX_FAST_THROW(FX_EXN_NullListError, _fx_catch_8);
}
FX_CHECK_EXN(_fx_catch_8);
if (v_9 != 0) {
FX_COPY_PTR(v_9->tl, &v_10);
}
else {
FX_FAST_THROW(FX_EXN_NullListError, _fx_catch_8);
}
FX_CHECK_EXN(_fx_catch_8);
FX_FREE_LIST_SIMPLE(&_fx_g23Ast__all_modules_sorted);
FX_COPY_PTR(v_10, &_fx_g23Ast__all_modules_sorted);
if (_fx_g12Options__opt.print_ast0) {
_fx_Li lst_0 = _fx_g23Ast__all_modules_sorted;
for (; lst_0; lst_0 = lst_0->tl) {
_fx_N16Ast__defmodule_t minfo_1 = 0;
int_ m_0 = lst_0->hd;
FX_CALL(_fx_M3AstFM10get_moduleN16Ast__defmodule_t1i(m_0, &minfo_1, 0), _fx_catch_1);
FX_CALL(_fx_M6Ast_ppFM10pprint_modv1N16Ast__defmodule_t(minfo_1, 0), _fx_catch_1);
_fx_catch_1: ;
if (minfo_1) {
_fx_free_N16Ast__defmodule_t(&minfo_1);
}
FX_CHECK_EXN(_fx_catch_8);
}
}
_fx_LS lstend_1 = 0;
_fx_Li lst_1 = _fx_g23Ast__all_modules_sorted;
for (; lst_1; lst_1 = lst_1->tl) {
fx_str_t res_0 = {0};
int_ m_idx_0 = lst_1->hd;
_fx_R9Ast__id_t v_28;
FX_CALL(_fx_M3AstFM15get_module_nameRM4id_t1i(m_idx_0, &v_28, 0), _fx_catch_2);
FX_CALL(_fx_M3AstFM2ppS1RM4id_t(&v_28, &res_0, 0), _fx_catch_2);
_fx_LS node_1 = 0;
FX_CALL(_fx_cons_LS(&res_0, 0, false, &node_1), _fx_catch_2);
FX_LIST_APPEND(v_11, lstend_1, node_1);
_fx_catch_2: ;
FX_FREE_STR(&res_0);
FX_CHECK_EXN(_fx_catch_8);
}
fx_str_t slit_3 = FX_MAKE_STR(", ");
FX_CALL(_fx_F4joinS2SLS(&slit_3, v_11, &modules_used_0, 0), _fx_catch_8);
if (_fx_g21Compiler__iscolorterm) {
fx_str_t slit_4 = FX_MAKE_STR("[34;1mParsing complete[0m"); fx_copy_str(&slit_4, &parsing_complete_0);
}
else {
fx_str_t slit_5 = FX_MAKE_STR("Parsing complete"); fx_copy_str(&slit_5, &parsing_complete_0);
}
fx_str_t slit_6 = FX_MAKE_STR(". Modules used: ");
{
const fx_str_t strs_1[] = { parsing_complete_0, slit_6, modules_used_0 };
FX_CALL(fx_strjoin(0, 0, 0, strs_1, 3, &v_12), _fx_catch_8);
}
FX_CALL(_fx_M3AstFM10pr_verbosev1S(&v_12, 0), _fx_catch_8);
_fx_free_LE(&_fx_g21Ast__all_compile_errs);
_fx_g21Ast__all_compile_errs = 0;
_fx_Li lst_2 = _fx_g23Ast__all_modules_sorted;
for (; lst_2; lst_2 = lst_2->tl) {
int_ m_1 = lst_2->hd;
FX_CALL(_fx_M13Ast_typecheckFM9check_modv1i(m_1, 0), _fx_catch_3);
_fx_catch_3: ;
FX_CHECK_EXN(_fx_catch_8);
}
bool ok_1 = _fx_g21Ast__all_compile_errs == 0;
if (ok_1) {
if (_fx_g21Compiler__iscolorterm) {
fx_str_t slit_7 = FX_MAKE_STR("[34;1mType checking complete[0m"); fx_copy_str(&slit_7, &v_13);
}
else {
fx_str_t slit_8 = FX_MAKE_STR("Type checking complete"); fx_copy_str(&slit_8, &v_13);
}
FX_CALL(_fx_M3AstFM10pr_verbosev1S(&v_13, 0), _fx_catch_8);
if (_fx_g12Options__opt.print_ast) {
_fx_Li lst_3 = _fx_g23Ast__all_modules_sorted;
for (; lst_3; lst_3 = lst_3->tl) {
_fx_N16Ast__defmodule_t minfo_2 = 0;
int_ m_2 = lst_3->hd;
FX_CALL(_fx_M3AstFM10get_moduleN16Ast__defmodule_t1i(m_2, &minfo_2, 0), _fx_catch_4);
FX_CALL(_fx_M6Ast_ppFM10pprint_modv1N16Ast__defmodule_t(minfo_2, 0), _fx_catch_4);
_fx_catch_4: ;
if (minfo_2) {
_fx_free_N16Ast__defmodule_t(&minfo_2);
}
FX_CHECK_EXN(_fx_catch_8);
}
}
}
if (ok_1) {
_fx_free_LE(&_fx_g21Ast__all_compile_errs);
_fx_g21Ast__all_compile_errs = 0;
FX_CALL(_fx_M6K_formFM13init_all_idksv0(0), _fx_catch_8);
FX_CALL(_fx_M11K_normalizeFM21normalize_all_modulesLR17K_form__kmodule_t1Li(_fx_g23Ast__all_modules_sorted, &kmods_0, 0),
_fx_catch_8);
_fx_make_T2LR17K_form__kmodule_tB(kmods_0, _fx_g21Ast__all_compile_errs == 0, &v_14);
}
else {
_fx_make_T2LR17K_form__kmodule_tB(0, false, &v_14);
}
FX_COPY_PTR(v_14.t0, &kmods_1);
bool ok_2 = v_14.t1;
if (ok_2) {
if (_fx_g21Compiler__iscolorterm) {
fx_str_t slit_9 = FX_MAKE_STR("[34;1mK-normalization complete[0m"); fx_copy_str(&slit_9, &v_15);
}
else {
fx_str_t slit_10 = FX_MAKE_STR("K-normalization complete"); fx_copy_str(&slit_10, &v_15);
}
FX_CALL(_fx_M3AstFM10pr_verbosev1S(&v_15, 0), _fx_catch_8);
if (_fx_g12Options__opt.print_k0) {
FX_CALL(_fx_M4K_ppFM8pp_kmodsv1LR17K_form__kmodule_t(kmods_1, 0), _fx_catch_8);
}
}
if (ok_2) {
if (_fx_g21Compiler__iscolorterm) {
fx_str_t slit_11 = FX_MAKE_STR("[34;1mK-form optimization started[0m"); fx_copy_str(&slit_11, &v_17);
}
else {
fx_str_t slit_12 = FX_MAKE_STR("K-form optimization started"); fx_copy_str(&slit_12, &v_17);
}
FX_CALL(_fx_M3AstFM10pr_verbosev1S(&v_17, 0), _fx_catch_8);
FX_CALL(_fx_M8CompilerFM14k_optimize_allT2LR17K_form__kmodule_tB1LR17K_form__kmodule_t(kmods_1, &v_16, 0), _fx_catch_8);
}
else {
_fx_make_T2LR17K_form__kmodule_tB(0, false, &v_16);
}
FX_COPY_PTR(v_16.t0, &kmods_2);
bool ok_3 = v_16.t1;
if (ok_3) {
if (_fx_g21Compiler__iscolorterm) {
fx_str_t slit_13 = FX_MAKE_STR("[34;1mK-form optimization complete[0m"); fx_copy_str(&slit_13, &v_18);
}
else {
fx_str_t slit_14 = FX_MAKE_STR("K-form optimization complete"); fx_copy_str(&slit_14, &v_18);
}
FX_CALL(_fx_M3AstFM10pr_verbosev1S(&v_18, 0), _fx_catch_8);
if (_fx_g12Options__opt.print_k) {
FX_CALL(_fx_M4K_ppFM8pp_kmodsv1LR17K_form__kmodule_t(kmods_2, 0), _fx_catch_8);
}
}
bool ok_4;
if (!_fx_g12Options__opt.gen_c) {
ok_4 = ok_3;
}
else {
if (ok_3) {
if (_fx_g21Compiler__iscolorterm) {
fx_str_t slit_15 = FX_MAKE_STR("[34;1mGenerating C code[0m"); fx_copy_str(&slit_15, &v_20);
}
else {
fx_str_t slit_16 = FX_MAKE_STR("Generating C code"); fx_copy_str(&slit_16, &v_20);
}
FX_CALL(_fx_M3AstFM10pr_verbosev1S(&v_20, 0), _fx_catch_8);
_fx_free_LE(&_fx_g21Ast__all_compile_errs);
_fx_g21Ast__all_compile_errs = 0;
FX_CALL(_fx_M6C_formFM13init_all_idcsv0(0), _fx_catch_8);
FX_CALL(_fx_M9C_gen_stdFM14init_std_namesv0(0), _fx_catch_8);
FX_CALL(_fx_M10C_gen_codeFM13gen_ccode_allLR17C_form__cmodule_t1LR17K_form__kmodule_t(kmods_2, &cmods_0, 0),
_fx_catch_8);
if (_fx_g21Compiler__iscolorterm) {
fx_str_t slit_17 = FX_MAKE_STR("[34;1mC code generated[0m"); fx_copy_str(&slit_17, &v_21);
}
else {
fx_str_t slit_18 = FX_MAKE_STR("C code generated"); fx_copy_str(&slit_18, &v_21);
}
FX_CALL(_fx_M3AstFM10pr_verbosev1S(&v_21, 0), _fx_catch_8);
FX_CALL(_fx_M20C_post_rename_localsFM13rename_localsLR17C_form__cmodule_t1LR17C_form__cmodule_t(cmods_0, &cmods_1, 0),
_fx_catch_8);
_fx_LR17C_form__cmodule_t lstend_2 = 0;
_fx_LR17C_form__cmodule_t lst_4 = cmods_1;
for (; lst_4; lst_4 = lst_4->tl) {
_fx_R17C_form__cmodule_t t_0 = {0};
_fx_R17C_form__cmodule_t* cmod_0 = &lst_4->hd;
bool is_cpp_0;
if (_fx_g12Options__opt.compile_by_cpp) {
is_cpp_0 = true;
}
else {
is_cpp_0 = cmod_0->cmod_pragmas.pragma_cpp;
}
if (is_cpp_0) {
FX_CALL(_fx_M19C_post_adjust_declsFM12adjust_declsR17C_form__cmodule_t1R17C_form__cmodule_t(cmod_0, &t_0, 0),
_fx_catch_5);
}
else {
_fx_copy_R17C_form__cmodule_t(cmod_0, &t_0);
}
_fx_LR17C_form__cmodule_t node_2 = 0;
FX_CALL(_fx_cons_LR17C_form__cmodule_t(&t_0, 0, false, &node_2), _fx_catch_5);
FX_LIST_APPEND(cmods_2, lstend_2, node_2);
_fx_catch_5: ;
_fx_free_R17C_form__cmodule_t(&t_0);
FX_CHECK_EXN(_fx_catch_8);
}
fx_str_t slit_19 = FX_MAKE_STR("\tConversion to C-form complete");
FX_CALL(_fx_M3AstFM10pr_verbosev1S(&slit_19, 0), _fx_catch_8);
_fx_make_T2LR17C_form__cmodule_tB(cmods_2, _fx_g21Ast__all_compile_errs == 0, &v_19);
}
else {
_fx_make_T2LR17C_form__cmodule_tB(0, false, &v_19);
}
FX_COPY_PTR(v_19.t0, &cmods_3);
bool ok_5 = v_19.t1;
bool t_1;
if (ok_5) {
if (_fx_g12Options__opt.make_app) {
t_1 = true;
}
else {
t_1 = _fx_g12Options__opt.run_app;
}
}
else {
t_1 = false;
}
bool ok_6;
if (t_1) {
FX_CALL(_fx_M8CompilerFM6run_ccB2LR17C_form__cmodule_tS(cmods_3, &ficus_root_0, &ok_6, 0), _fx_catch_8);
}
else {
ok_6 = ok_5;
}
bool t_2;
if (ok_6) {
t_2 = _fx_g12Options__opt.run_app;
}
else {
t_2 = false;
}
if (t_2) {
fx_copy_str(&_fx_g12Options__opt.app_filename, &appname_0);
FX_CALL(_fx_M8FilenameFM6getcwdS0(&v_22, 0), _fx_catch_8);
FX_CALL(_fx_M8FilenameFM9normalizeS2SS(&v_22, &appname_0, &appname_1, 0), _fx_catch_8);
FX_COPY_PTR(_fx_g12Options__opt.app_args, &v_23);
FX_CALL(_fx_cons_LS(&appname_1, v_23, false, &v_23), _fx_catch_8);
fx_str_t slit_20 = FX_MAKE_STR(" ");
FX_CALL(_fx_F4joinS2SLS(&slit_20, v_23, &cmd_0, 0), _fx_catch_8);
int_ v_29;
FX_CALL(_fx_M3SysFM7commandi1S(&cmd_0, &v_29, 0), _fx_catch_8);
ok_4 = v_29 == 0;
}
else {
ok_4 = ok_6;
}
}
if (!ok_4) {
int_ nerrs_0 = _fx_M8CompilerFM6lengthi1LE(_fx_g21Ast__all_compile_errs, 0);
if (nerrs_0 != 0) {
_fx_LE lst_5 = _fx_g21Ast__all_compile_errs;
for (; lst_5; lst_5 = lst_5->tl) {
_fx_LE r_0 = 0;
fx_exn_t* a_0 = &lst_5->hd;
FX_COPY_PTR(__fold_result___1, &r_0);
FX_CALL(_fx_cons_LE(a_0, r_0, false, &r_0), _fx_catch_6);
_fx_free_LE(&__fold_result___1);
FX_COPY_PTR(r_0, &__fold_result___1);
_fx_catch_6: ;
if (r_0) {
_fx_free_LE(&r_0);
}
FX_CHECK_EXN(_fx_catch_8);
}
FX_COPY_PTR(__fold_result___1, &v_24);
_fx_LE lst_6 = v_24;
for (; lst_6; lst_6 = lst_6->tl) {
fx_exn_t* x_0 = &lst_6->hd;
FX_CALL(_fx_M3AstFM17print_compile_errv1E(x_0, 0), _fx_catch_7);
_fx_catch_7: ;
FX_CHECK_EXN(_fx_catch_8);
}
FX_CALL(_fx_F6stringS1i(nerrs_0, &v_25, 0), _fx_catch_8);
fx_str_t slit_21 = FX_MAKE_STR("\n");
fx_str_t slit_22 = FX_MAKE_STR(" errors occured during type checking.");
{
const fx_str_t strs_2[] = { slit_21, v_25, slit_22 };
FX_CALL(fx_strjoin(0, 0, 0, strs_2, 3, &v_26), _fx_catch_8);
}
_fx_F12print_stringv1S(&v_26, 0);
fx_str_t slit_23 = FX_MAKE_STR("\n");
_fx_F12print_stringv1S(&slit_23, 0);
}
}
*fx_result = ok_4;
_fx_catch_8: ;
_fx_free_T2SLS(&v_3);
FX_FREE_STR(&ficus_root_0);
if (ficus_path_0) {
_fx_free_LS(&ficus_path_0);
}
FX_FREE_STR(&v_4);
FX_FREE_STR(&v_5);
FX_FREE_STR(&v_6);
fx_free_exn(&v_7);
if (graph_0) {
_fx_free_LT2iLi(&graph_0);
}
FX_FREE_LIST_SIMPLE(&v_8);
FX_FREE_LIST_SIMPLE(&v_9);
FX_FREE_LIST_SIMPLE(&v_10);
if (v_11) {
_fx_free_LS(&v_11);
}
FX_FREE_STR(&modules_used_0);
FX_FREE_STR(&parsing_complete_0);
FX_FREE_STR(&v_12);
FX_FREE_STR(&v_13);
_fx_free_T2LR17K_form__kmodule_tB(&v_14);
if (kmods_0) {
_fx_free_LR17K_form__kmodule_t(&kmods_0);
}
if (kmods_1) {
_fx_free_LR17K_form__kmodule_t(&kmods_1);
}
FX_FREE_STR(&v_15);
_fx_free_T2LR17K_form__kmodule_tB(&v_16);
FX_FREE_STR(&v_17);
if (kmods_2) {
_fx_free_LR17K_form__kmodule_t(&kmods_2);
}
FX_FREE_STR(&v_18);
_fx_free_T2LR17C_form__cmodule_tB(&v_19);
FX_FREE_STR(&v_20);
if (cmods_0) {
_fx_free_LR17C_form__cmodule_t(&cmods_0);
}
FX_FREE_STR(&v_21);
if (cmods_1) {
_fx_free_LR17C_form__cmodule_t(&cmods_1);
}
if (cmods_2) {
_fx_free_LR17C_form__cmodule_t(&cmods_2);
}
if (cmods_3) {
_fx_free_LR17C_form__cmodule_t(&cmods_3);
}
FX_FREE_STR(&appname_0);
FX_FREE_STR(&v_22);
FX_FREE_STR(&appname_1);
if (v_23) {
_fx_free_LS(&v_23);
}
FX_FREE_STR(&cmd_0);
if (__fold_result___1) {
_fx_free_LE(&__fold_result___1);
}
if (v_24) {
_fx_free_LE(&v_24);
}
FX_FREE_STR(&v_25);
FX_FREE_STR(&v_26);
if (fx_status < 0) {
fx_exn_get_and_reset(fx_status, &exn_0);
fx_status = 0;
int_ nerrs_1 = _fx_M8CompilerFM6lengthi1LE(_fx_g21Ast__all_compile_errs, 0);
if (nerrs_1 != 0) {
_fx_LE lst_7 = _fx_g21Ast__all_compile_errs;
for (; lst_7; lst_7 = lst_7->tl) {
_fx_LE r_1 = 0;
fx_exn_t* a_1 = &lst_7->hd;
FX_COPY_PTR(__fold_result___0, &r_1);
FX_CALL(_fx_cons_LE(a_1, r_1, false, &r_1), _fx_catch_9);
_fx_free_LE(&__fold_result___0);
FX_COPY_PTR(r_1, &__fold_result___0);
_fx_catch_9: ;
if (r_1) {
_fx_free_LE(&r_1);
}
FX_CHECK_EXN(_fx_cleanup);
}
FX_COPY_PTR(__fold_result___0, &v_0);
_fx_LE lst_8 = v_0;
for (; lst_8; lst_8 = lst_8->tl) {
fx_exn_t* x_1 = &lst_8->hd;
FX_CALL(_fx_M3AstFM17print_compile_errv1E(x_1, 0), _fx_catch_10);
_fx_catch_10: ;
FX_CHECK_EXN(_fx_cleanup);
}
FX_CALL(_fx_F6stringS1i(nerrs_1, &v_1, 0), _fx_cleanup);
fx_str_t slit_24 = FX_MAKE_STR("\n");
fx_str_t slit_25 = FX_MAKE_STR(" errors occured during type checking.");
{
const fx_str_t strs_3[] = { slit_24, v_1, slit_25 };
FX_CALL(fx_strjoin(0, 0, 0, strs_3, 3, &v_2), _fx_cleanup);
}
_fx_F12print_stringv1S(&v_2, 0);
fx_str_t slit_26 = FX_MAKE_STR("\n");
_fx_F12print_stringv1S(&slit_26, 0);
}
int tag_0 = exn_0.tag;
if (tag_0 == _FX_EXN_E4Fail) {
fx_str_t v_30 = {0};
fx_str_t slit_27 = FX_MAKE_STR(": ");
fx_str_t* msg_0 = &FX_EXN_DATA(_fx_E4Fail_data_t, exn_0.data);
{
const fx_str_t strs_4[] = { _fx_g15Compiler__error, slit_27, *msg_0 };
FX_CALL(fx_strjoin(0, 0, 0, strs_4, 3, &v_30), _fx_catch_11);
}
_fx_F12print_stringv1S(&v_30, 0);
fx_str_t slit_28 = FX_MAKE_STR("\n");
_fx_F12print_stringv1S(&slit_28, 0);
_fx_catch_11: ;
FX_FREE_STR(&v_30);
}
else if (tag_0 == _FX_EXN_E17Ast__CompileError) {
FX_CALL(_fx_M3AstFM17print_compile_errv1E(&exn_0, 0), _fx_catch_12); _fx_catch_12: ;
}
else if (tag_0 != _FX_EXN_E30Compiler__CumulativeParseError) {
fx_str_t v_31 = {0};
fx_str_t v_32 = {0};
FX_CALL(_fx_F6stringS1E(&exn_0, &v_31, 0), _fx_catch_13);
fx_str_t slit_29 =
FX_MAKE_STR("\n"
U"\n");
fx_str_t slit_30 = FX_MAKE_STR(": Exception ");
fx_str_t slit_31 = FX_MAKE_STR(" occured");
{
const fx_str_t strs_5[] = { slit_29, _fx_g15Compiler__error, slit_30, v_31, slit_31 };
FX_CALL(fx_strjoin(0, 0, 0, strs_5, 5, &v_32), _fx_catch_13);
}
_fx_F12print_stringv1S(&v_32, 0);
fx_str_t slit_32 = FX_MAKE_STR("\n");
_fx_F12print_stringv1S(&slit_32, 0);
_fx_catch_13: ;
FX_FREE_STR(&v_32);
FX_FREE_STR(&v_31);
}
FX_CHECK_EXN(_fx_cleanup);
*fx_result = false;
}
_fx_cleanup: ;
fx_free_exn(&exn_0);
if (__fold_result___0) {
_fx_free_LE(&__fold_result___0);
}
if (v_0) {
_fx_free_LE(&v_0);
}
FX_FREE_STR(&v_1);
FX_FREE_STR(&v_2);
return fx_status;
}
FX_EXTERN_C int fx_init_Compiler(void)
{
FX_REG_SIMPLE_EXN("Compiler.CumulativeParseError", _FX_EXN_E30Compiler__CumulativeParseError,
_fx_E30Compiler__CumulativeParseError_info, _fx_E30Compiler__CumulativeParseErrorv);
int fx_status = 0;
FX_CALL(_fx_M3SysFM9colortermB0(&_fx_g21Compiler__iscolorterm, 0), _fx_cleanup);
fx_str_t slit_0 = FX_MAKE_STR("error");
FX_CALL(_fx_M8CompilerFM6clrmsgS2N20Compiler__msgcolor_tS(&_fx_g16Compiler__MsgRed, &slit_0, &_fx_g15Compiler__error, 0),
_fx_cleanup);
_fx_cleanup: ;
return fx_status;
}
FX_EXTERN_C void fx_deinit_Compiler(void)
{
FX_FREE_STR(&_fx_g15Compiler__error);
}
|
deconvolution_pack16to4.h | // Tencent is pleased to support the open source community by making ncnn available.
//
// Copyright (C) 2022 THL A29 Limited, a Tencent company. All rights reserved.
//
// Licensed under the BSD 3-Clause License (the "License"); you may not use this file except
// in compliance with the License. You may obtain a copy of the License at
//
// https://opensource.org/licenses/BSD-3-Clause
//
// Unless required by applicable law or agreed to in writing, software distributed
// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
// CONDITIONS OF ANY KIND, either express or implied. See the License for the
// specific language governing permissions and limitations under the License.
static void deconvolution_pack16to4_avx512(const Mat& bottom_blob, Mat& top_blob, const Mat& weight_data_packed, const Mat& bias_data, int kernel_w, int kernel_h, int dilation_w, int dilation_h, int stride_w, int stride_h, int activation_type, const Mat& activation_params, const Option& opt)
{
int w = bottom_blob.w;
int h = bottom_blob.h;
int channels = bottom_blob.c;
int outw = top_blob.w;
int outh = top_blob.h;
int outch = top_blob.c;
const int kernel_extent_w = dilation_w * (kernel_w - 1) + 1;
const int kernel_extent_h = dilation_h * (kernel_h - 1) + 1;
const int maxk = kernel_w * kernel_h;
const float* bias_data_ptr = bias_data;
// num_output
#pragma omp parallel for num_threads(opt.num_threads)
for (int p = 0; p < outch; p++)
{
float* outptr = top_blob.channel(p);
for (int i = 0; i < outh; i++)
{
for (int j = 0; j < outw; j++)
{
__m128 _sum = _mm_setzero_ps();
if (bias_data_ptr)
{
_sum = _mm_loadu_ps(bias_data_ptr + p * 4);
}
const float* kptr = weight_data_packed.channel(p);
// channels
for (int q = 0; q < channels; q++)
{
const Mat m = bottom_blob.channel(q);
for (int y = 0; y < kernel_h; y++)
{
int sys = (i + y * dilation_h - (kernel_extent_h - 1));
if (sys < 0 || sys % stride_h != 0)
continue;
int sy = sys / stride_h;
if (sy >= h)
continue;
for (int x = 0; x < kernel_w; x++)
{
int sxs = (j + x * dilation_w - (kernel_extent_w - 1));
if (sxs < 0 || sxs % stride_w != 0)
continue;
int sx = sxs / stride_w;
if (sx >= w)
continue;
const float* sptr = m.row(sy) + sx * 16;
int k = (y * kernel_w + x) * 64;
__m128 _val0 = _mm_broadcast_ss(sptr);
__m128 _val1 = _mm_broadcast_ss(sptr + 1);
__m128 _val2 = _mm_broadcast_ss(sptr + 2);
__m128 _val3 = _mm_broadcast_ss(sptr + 3);
__m128 _val4 = _mm_broadcast_ss(sptr + 4);
__m128 _val5 = _mm_broadcast_ss(sptr + 5);
__m128 _val6 = _mm_broadcast_ss(sptr + 6);
__m128 _val7 = _mm_broadcast_ss(sptr + 7);
__m128 _val8 = _mm_broadcast_ss(sptr + 8);
__m128 _val9 = _mm_broadcast_ss(sptr + 9);
__m128 _vala = _mm_broadcast_ss(sptr + 10);
__m128 _valb = _mm_broadcast_ss(sptr + 11);
__m128 _valc = _mm_broadcast_ss(sptr + 12);
__m128 _vald = _mm_broadcast_ss(sptr + 13);
__m128 _vale = _mm_broadcast_ss(sptr + 14);
__m128 _valf = _mm_broadcast_ss(sptr + 15);
__m128 _w0 = _mm_load_ps(kptr + k);
__m128 _w1 = _mm_load_ps(kptr + k + 4);
__m128 _w2 = _mm_load_ps(kptr + k + 8);
__m128 _w3 = _mm_load_ps(kptr + k + 12);
__m128 _w4 = _mm_load_ps(kptr + k + 16);
__m128 _w5 = _mm_load_ps(kptr + k + 20);
__m128 _w6 = _mm_load_ps(kptr + k + 24);
__m128 _w7 = _mm_load_ps(kptr + k + 28);
__m128 _w8 = _mm_load_ps(kptr + k + 32);
__m128 _w9 = _mm_load_ps(kptr + k + 36);
__m128 _wa = _mm_load_ps(kptr + k + 40);
__m128 _wb = _mm_load_ps(kptr + k + 44);
__m128 _wc = _mm_load_ps(kptr + k + 48);
__m128 _wd = _mm_load_ps(kptr + k + 52);
__m128 _we = _mm_load_ps(kptr + k + 56);
__m128 _wf = _mm_load_ps(kptr + k + 60);
_sum = _mm_fmadd_ps(_val0, _w0, _sum);
_sum = _mm_fmadd_ps(_val1, _w1, _sum);
_sum = _mm_fmadd_ps(_val2, _w2, _sum);
_sum = _mm_fmadd_ps(_val3, _w3, _sum);
_sum = _mm_fmadd_ps(_val4, _w4, _sum);
_sum = _mm_fmadd_ps(_val5, _w5, _sum);
_sum = _mm_fmadd_ps(_val6, _w6, _sum);
_sum = _mm_fmadd_ps(_val7, _w7, _sum);
_sum = _mm_fmadd_ps(_val8, _w8, _sum);
_sum = _mm_fmadd_ps(_val9, _w9, _sum);
_sum = _mm_fmadd_ps(_vala, _wa, _sum);
_sum = _mm_fmadd_ps(_valb, _wb, _sum);
_sum = _mm_fmadd_ps(_valc, _wc, _sum);
_sum = _mm_fmadd_ps(_vald, _wd, _sum);
_sum = _mm_fmadd_ps(_vale, _we, _sum);
_sum = _mm_fmadd_ps(_valf, _wf, _sum);
}
}
kptr += maxk * 64;
}
_sum = activation_sse(_sum, activation_type, activation_params);
_mm_storeu_ps(outptr, _sum);
outptr += 4;
}
}
}
}
|
quantize.c | /*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% QQQ U U AAA N N TTTTT IIIII ZZZZZ EEEEE %
% Q Q U U A A NN N T I ZZ E %
% Q Q U U AAAAA N N N T I ZZZ EEEEE %
% Q QQ U U A A N NN T I ZZ E %
% QQQQ UUU A A N N T IIIII ZZZZZ EEEEE %
% %
% %
% MagickCore Methods to Reduce the Number of Unique Colors in an Image %
% %
% Software Design %
% Cristy %
% July 1992 %
% %
% %
% Copyright @ 1999 ImageMagick Studio LLC, a non-profit organization %
% dedicated to making software imaging solutions freely available. %
% %
% You may not use this file except in compliance with the License. You may %
% obtain a copy of the License at %
% %
% https://imagemagick.org/script/license.php %
% %
% Unless required by applicable law or agreed to in writing, software %
% distributed under the License is distributed on an "AS IS" BASIS, %
% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. %
% See the License for the specific language governing permissions and %
% limitations under the License. %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% Realism in computer graphics typically requires using 24 bits/pixel to
% generate an image. Yet many graphic display devices do not contain the
% amount of memory necessary to match the spatial and color resolution of
% the human eye. The Quantize methods takes a 24 bit image and reduces
% the number of colors so it can be displayed on raster device with less
% bits per pixel. In most instances, the quantized image closely
% resembles the original reference image.
%
% A reduction of colors in an image is also desirable for image
% transmission and real-time animation.
%
% QuantizeImage() takes a standard RGB or monochrome images and quantizes
% them down to some fixed number of colors.
%
% For purposes of color allocation, an image is a set of n pixels, where
% each pixel is a point in RGB space. RGB space is a 3-dimensional
% vector space, and each pixel, Pi, is defined by an ordered triple of
% red, green, and blue coordinates, (Ri, Gi, Bi).
%
% Each primary color component (red, green, or blue) represents an
% intensity which varies linearly from 0 to a maximum value, Cmax, which
% corresponds to full saturation of that color. Color allocation is
% defined over a domain consisting of the cube in RGB space with opposite
% vertices at (0,0,0) and (Cmax, Cmax, Cmax). QUANTIZE requires Cmax =
% 255.
%
% The algorithm maps this domain onto a tree in which each node
% represents a cube within that domain. In the following discussion
% these cubes are defined by the coordinate of two opposite vertices (vertex
% nearest the origin in RGB space and the vertex farthest from the origin).
%
% The tree's root node represents the entire domain, (0,0,0) through
% (Cmax,Cmax,Cmax). Each lower level in the tree is generated by
% subdividing one node's cube into eight smaller cubes of equal size.
% This corresponds to bisecting the parent cube with planes passing
% through the midpoints of each edge.
%
% The basic algorithm operates in three phases: Classification,
% Reduction, and Assignment. Classification builds a color description
% tree for the image. Reduction collapses the tree until the number it
% represents, at most, the number of colors desired in the output image.
% Assignment defines the output image's color map and sets each pixel's
% color by restorage_class in the reduced tree. Our goal is to minimize
% the numerical discrepancies between the original colors and quantized
% colors (quantization error).
%
% Classification begins by initializing a color description tree of
% sufficient depth to represent each possible input color in a leaf.
% However, it is impractical to generate a fully-formed color description
% tree in the storage_class phase for realistic values of Cmax. If
% colors components in the input image are quantized to k-bit precision,
% so that Cmax= 2k-1, the tree would need k levels below the root node to
% allow representing each possible input color in a leaf. This becomes
% prohibitive because the tree's total number of nodes is 1 +
% sum(i=1, k, 8k).
%
% A complete tree would require 19,173,961 nodes for k = 8, Cmax = 255.
% Therefore, to avoid building a fully populated tree, QUANTIZE: (1)
% Initializes data structures for nodes only as they are needed; (2)
% Chooses a maximum depth for the tree as a function of the desired
% number of colors in the output image (currently log2(colormap size)).
%
% For each pixel in the input image, storage_class scans downward from
% the root of the color description tree. At each level of the tree it
% identifies the single node which represents a cube in RGB space
% containing the pixel's color. It updates the following data for each
% such node:
%
% n1: Number of pixels whose color is contained in the RGB cube which
% this node represents;
%
% n2: Number of pixels whose color is not represented in a node at
% lower depth in the tree; initially, n2 = 0 for all nodes except
% leaves of the tree.
%
% Sr, Sg, Sb: Sums of the red, green, and blue component values for all
% pixels not classified at a lower depth. The combination of these sums
% and n2 will ultimately characterize the mean color of a set of pixels
% represented by this node.
%
% E: the distance squared in RGB space between each pixel contained
% within a node and the nodes' center. This represents the
% quantization error for a node.
%
% Reduction repeatedly prunes the tree until the number of nodes with n2
% > 0 is less than or equal to the maximum number of colors allowed in
% the output image. On any given iteration over the tree, it selects
% those nodes whose E count is minimal for pruning and merges their color
% statistics upward. It uses a pruning threshold, Ep, to govern node
% selection as follows:
%
% Ep = 0
% while number of nodes with (n2 > 0) > required maximum number of colors
% prune all nodes such that E <= Ep
% Set Ep to minimum E in remaining nodes
%
% This has the effect of minimizing any quantization error when merging
% two nodes together.
%
% When a node to be pruned has offspring, the pruning procedure invokes
% itself recursively in order to prune the tree from the leaves upward.
% n2, Sr, Sg, and Sb in a node being pruned are always added to the
% corresponding data in that node's parent. This retains the pruned
% node's color characteristics for later averaging.
%
% For each node, n2 pixels exist for which that node represents the
% smallest volume in RGB space containing those pixel's colors. When n2
% > 0 the node will uniquely define a color in the output image. At the
% beginning of reduction, n2 = 0 for all nodes except a the leaves of
% the tree which represent colors present in the input image.
%
% The other pixel count, n1, indicates the total number of colors within
% the cubic volume which the node represents. This includes n1 - n2
% pixels whose colors should be defined by nodes at a lower level in the
% tree.
%
% Assignment generates the output image from the pruned tree. The output
% image consists of two parts: (1) A color map, which is an array of
% color descriptions (RGB triples) for each color present in the output
% image; (2) A pixel array, which represents each pixel as an index
% into the color map array.
%
% First, the assignment phase makes one pass over the pruned color
% description tree to establish the image's color map. For each node
% with n2 > 0, it divides Sr, Sg, and Sb by n2 . This produces the mean
% color of all pixels that classify no lower than this node. Each of
% these colors becomes an entry in the color map.
%
% Finally, the assignment phase reclassifies each pixel in the pruned
% tree to identify the deepest node containing the pixel's color. The
% pixel's value in the pixel array becomes the index of this node's mean
% color in the color map.
%
% This method is based on a similar algorithm written by Paul Raveling.
%
*/
/*
Include declarations.
*/
#include "MagickCore/studio.h"
#include "MagickCore/artifact.h"
#include "MagickCore/attribute.h"
#include "MagickCore/cache-view.h"
#include "MagickCore/color.h"
#include "MagickCore/color-private.h"
#include "MagickCore/colormap.h"
#include "MagickCore/colorspace.h"
#include "MagickCore/colorspace-private.h"
#include "MagickCore/compare.h"
#include "MagickCore/enhance.h"
#include "MagickCore/exception.h"
#include "MagickCore/exception-private.h"
#include "MagickCore/histogram.h"
#include "MagickCore/image.h"
#include "MagickCore/image-private.h"
#include "MagickCore/list.h"
#include "MagickCore/memory_.h"
#include "MagickCore/memory-private.h"
#include "MagickCore/monitor.h"
#include "MagickCore/monitor-private.h"
#include "MagickCore/option.h"
#include "MagickCore/pixel-accessor.h"
#include "MagickCore/property.h"
#include "MagickCore/quantize.h"
#include "MagickCore/quantum.h"
#include "MagickCore/quantum-private.h"
#include "MagickCore/random_.h"
#include "MagickCore/resource_.h"
#include "MagickCore/string_.h"
#include "MagickCore/string-private.h"
#include "MagickCore/thread-private.h"
/*
Define declarations.
*/
#if !defined(__APPLE__) && !defined(TARGET_OS_IPHONE)
#define CacheShift 2
#else
#define CacheShift 3
#endif
#define ErrorQueueLength 16
#define ErrorRelativeWeight PerceptibleReciprocal(16)
#define MaxNodes 266817
#define MaxTreeDepth 8
#define NodesInAList 1920
/*
Typdef declarations.
*/
typedef struct _DoublePixelPacket
{
double
red,
green,
blue,
alpha;
} DoublePixelPacket;
typedef struct _NodeInfo
{
struct _NodeInfo
*parent,
*child[16];
MagickSizeType
number_unique;
DoublePixelPacket
total_color;
double
quantize_error;
size_t
color_number,
id,
level;
} NodeInfo;
typedef struct _Nodes
{
NodeInfo
*nodes;
struct _Nodes
*next;
} Nodes;
typedef struct _CubeInfo
{
NodeInfo
*root;
size_t
colors,
maximum_colors;
ssize_t
transparent_index;
MagickSizeType
transparent_pixels;
DoublePixelPacket
target;
double
distance,
pruning_threshold,
next_threshold;
size_t
nodes,
free_nodes,
color_number;
NodeInfo
*next_node;
Nodes
*node_queue;
MemoryInfo
*memory_info;
ssize_t
*cache;
DoublePixelPacket
error[ErrorQueueLength];
double
diffusion,
weights[ErrorQueueLength];
QuantizeInfo
*quantize_info;
MagickBooleanType
associate_alpha;
ssize_t
x,
y;
size_t
depth;
MagickOffsetType
offset;
MagickSizeType
span;
} CubeInfo;
/*
Method prototypes.
*/
static CubeInfo
*GetCubeInfo(const QuantizeInfo *,const size_t,const size_t);
static NodeInfo
*GetNodeInfo(CubeInfo *,const size_t,const size_t,NodeInfo *);
static MagickBooleanType
AssignImageColors(Image *,CubeInfo *,ExceptionInfo *),
ClassifyImageColors(CubeInfo *,const Image *,ExceptionInfo *),
DitherImage(Image *,CubeInfo *,ExceptionInfo *),
SetGrayscaleImage(Image *,ExceptionInfo *),
SetImageColormap(Image *,CubeInfo *,ExceptionInfo *);
static void
ClosestColor(const Image *,CubeInfo *,const NodeInfo *),
DefineImageColormap(Image *,CubeInfo *,NodeInfo *),
DestroyCubeInfo(CubeInfo *),
PruneLevel(CubeInfo *,const NodeInfo *),
PruneToCubeDepth(CubeInfo *,const NodeInfo *),
ReduceImageColors(const Image *,CubeInfo *);
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% A c q u i r e Q u a n t i z e I n f o %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% AcquireQuantizeInfo() allocates the QuantizeInfo structure.
%
% The format of the AcquireQuantizeInfo method is:
%
% QuantizeInfo *AcquireQuantizeInfo(const ImageInfo *image_info)
%
% A description of each parameter follows:
%
% o image_info: the image info.
%
*/
MagickExport QuantizeInfo *AcquireQuantizeInfo(const ImageInfo *image_info)
{
QuantizeInfo
*quantize_info;
quantize_info=(QuantizeInfo *) AcquireCriticalMemory(sizeof(*quantize_info));
GetQuantizeInfo(quantize_info);
if (image_info != (ImageInfo *) NULL)
{
const char
*option;
quantize_info->dither_method=image_info->dither == MagickFalse ?
NoDitherMethod : RiemersmaDitherMethod;
option=GetImageOption(image_info,"dither");
if (option != (const char *) NULL)
quantize_info->dither_method=(DitherMethod) ParseCommandOption(
MagickDitherOptions,MagickFalse,option);
quantize_info->measure_error=image_info->verbose;
}
return(quantize_info);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ A s s i g n I m a g e C o l o r s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% AssignImageColors() generates the output image from the pruned tree. The
% output image consists of two parts: (1) A color map, which is an array
% of color descriptions (RGB triples) for each color present in the
% output image; (2) A pixel array, which represents each pixel as an
% index into the color map array.
%
% First, the assignment phase makes one pass over the pruned color
% description tree to establish the image's color map. For each node
% with n2 > 0, it divides Sr, Sg, and Sb by n2 . This produces the mean
% color of all pixels that classify no lower than this node. Each of
% these colors becomes an entry in the color map.
%
% Finally, the assignment phase reclassifies each pixel in the pruned
% tree to identify the deepest node containing the pixel's color. The
% pixel's value in the pixel array becomes the index of this node's mean
% color in the color map.
%
% The format of the AssignImageColors() method is:
%
% MagickBooleanType AssignImageColors(Image *image,CubeInfo *cube_info)
%
% A description of each parameter follows.
%
% o image: the image.
%
% o cube_info: A pointer to the Cube structure.
%
*/
static inline void AssociateAlphaPixel(const Image *image,
const CubeInfo *cube_info,const Quantum *pixel,DoublePixelPacket *alpha_pixel)
{
double
alpha;
if ((cube_info->associate_alpha == MagickFalse) ||
(GetPixelAlpha(image,pixel) == OpaqueAlpha))
{
alpha_pixel->red=(double) GetPixelRed(image,pixel);
alpha_pixel->green=(double) GetPixelGreen(image,pixel);
alpha_pixel->blue=(double) GetPixelBlue(image,pixel);
alpha_pixel->alpha=(double) GetPixelAlpha(image,pixel);
return;
}
alpha=(double) (QuantumScale*GetPixelAlpha(image,pixel));
alpha_pixel->red=alpha*GetPixelRed(image,pixel);
alpha_pixel->green=alpha*GetPixelGreen(image,pixel);
alpha_pixel->blue=alpha*GetPixelBlue(image,pixel);
alpha_pixel->alpha=(double) GetPixelAlpha(image,pixel);
}
static inline void AssociateAlphaPixelInfo(const CubeInfo *cube_info,
const PixelInfo *pixel,DoublePixelPacket *alpha_pixel)
{
double
alpha;
if ((cube_info->associate_alpha == MagickFalse) ||
(pixel->alpha == OpaqueAlpha))
{
alpha_pixel->red=(double) pixel->red;
alpha_pixel->green=(double) pixel->green;
alpha_pixel->blue=(double) pixel->blue;
alpha_pixel->alpha=(double) pixel->alpha;
return;
}
alpha=(double) (QuantumScale*pixel->alpha);
alpha_pixel->red=alpha*pixel->red;
alpha_pixel->green=alpha*pixel->green;
alpha_pixel->blue=alpha*pixel->blue;
alpha_pixel->alpha=(double) pixel->alpha;
}
static inline size_t ColorToNodeId(const CubeInfo *cube_info,
const DoublePixelPacket *pixel,size_t index)
{
size_t
id;
id=(size_t) (((ScaleQuantumToChar(ClampPixel(pixel->red)) >> index) & 0x01) |
((ScaleQuantumToChar(ClampPixel(pixel->green)) >> index) & 0x01) << 1 |
((ScaleQuantumToChar(ClampPixel(pixel->blue)) >> index) & 0x01) << 2);
if (cube_info->associate_alpha != MagickFalse)
id|=((ScaleQuantumToChar(ClampPixel(pixel->alpha)) >> index) & 0x1) << 3;
return(id);
}
static MagickBooleanType AssignImageColors(Image *image,CubeInfo *cube_info,
ExceptionInfo *exception)
{
#define AssignImageTag "Assign/Image"
ColorspaceType
colorspace;
ssize_t
y;
/*
Allocate image colormap.
*/
colorspace=image->colorspace;
if (cube_info->quantize_info->colorspace != UndefinedColorspace)
(void) TransformImageColorspace(image,cube_info->quantize_info->colorspace,
exception);
cube_info->transparent_pixels=0;
cube_info->transparent_index=(-1);
if (SetImageColormap(image,cube_info,exception) == MagickFalse)
return(MagickFalse);
/*
Create a reduced color image.
*/
if (cube_info->quantize_info->dither_method != NoDitherMethod)
(void) DitherImage(image,cube_info,exception);
else
{
CacheView
*image_view;
MagickBooleanType
status;
status=MagickTrue;
image_view=AcquireAuthenticCacheView(image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(status) \
magick_number_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
CubeInfo
cube;
Quantum
*magick_restrict q;
ssize_t
count,
x;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,
exception);
if (q == (Quantum *) NULL)
{
status=MagickFalse;
continue;
}
cube=(*cube_info);
for (x=0; x < (ssize_t) image->columns; x+=count)
{
DoublePixelPacket
pixel;
const NodeInfo
*node_info;
ssize_t
i;
size_t
id,
index;
/*
Identify the deepest node containing the pixel's color.
*/
for (count=1; (x+count) < (ssize_t) image->columns; count++)
{
PixelInfo
packet;
GetPixelInfoPixel(image,q+count*GetPixelChannels(image),&packet);
if (IsPixelEquivalent(image,q,&packet) == MagickFalse)
break;
}
AssociateAlphaPixel(image,&cube,q,&pixel);
node_info=cube.root;
for (index=MaxTreeDepth-1; (ssize_t) index > 0; index--)
{
id=ColorToNodeId(&cube,&pixel,index);
if (node_info->child[id] == (NodeInfo *) NULL)
break;
node_info=node_info->child[id];
}
/*
Find closest color among siblings and their children.
*/
cube.target=pixel;
cube.distance=(double) (4.0*(QuantumRange+1.0)*(QuantumRange+1.0)+
1.0);
ClosestColor(image,&cube,node_info->parent);
index=cube.color_number;
for (i=0; i < (ssize_t) count; i++)
{
if (image->storage_class == PseudoClass)
SetPixelIndex(image,(Quantum) index,q);
if (cube.quantize_info->measure_error == MagickFalse)
{
SetPixelRed(image,ClampToQuantum(
image->colormap[index].red),q);
SetPixelGreen(image,ClampToQuantum(
image->colormap[index].green),q);
SetPixelBlue(image,ClampToQuantum(
image->colormap[index].blue),q);
if (cube.associate_alpha != MagickFalse)
SetPixelAlpha(image,ClampToQuantum(
image->colormap[index].alpha),q);
}
q+=GetPixelChannels(image);
}
}
if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
proceed=SetImageProgress(image,AssignImageTag,(MagickOffsetType) y,
image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
image_view=DestroyCacheView(image_view);
}
if (cube_info->quantize_info->measure_error != MagickFalse)
(void) GetImageQuantizeError(image,exception);
if ((cube_info->quantize_info->number_colors == 2) &&
(IsGrayColorspace(cube_info->quantize_info->colorspace)))
{
double
intensity;
/*
Monochrome image.
*/
intensity=GetPixelInfoLuma(image->colormap+0) < QuantumRange/2.0 ? 0.0 :
QuantumRange;
if (image->colors > 1)
{
intensity=0.0;
if (GetPixelInfoLuma(image->colormap+0) >
GetPixelInfoLuma(image->colormap+1))
intensity=(double) QuantumRange;
}
image->colormap[0].red=intensity;
image->colormap[0].green=intensity;
image->colormap[0].blue=intensity;
if (image->colors > 1)
{
image->colormap[1].red=(double) QuantumRange-intensity;
image->colormap[1].green=(double) QuantumRange-intensity;
image->colormap[1].blue=(double) QuantumRange-intensity;
}
}
(void) SyncImage(image,exception);
if ((cube_info->quantize_info->colorspace != UndefinedColorspace) &&
(IssRGBCompatibleColorspace(colorspace) == MagickFalse))
(void) TransformImageColorspace(image,colorspace,exception);
return(MagickTrue);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ C l a s s i f y I m a g e C o l o r s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ClassifyImageColors() begins by initializing a color description tree
% of sufficient depth to represent each possible input color in a leaf.
% However, it is impractical to generate a fully-formed color
% description tree in the storage_class phase for realistic values of
% Cmax. If colors components in the input image are quantized to k-bit
% precision, so that Cmax= 2k-1, the tree would need k levels below the
% root node to allow representing each possible input color in a leaf.
% This becomes prohibitive because the tree's total number of nodes is
% 1 + sum(i=1,k,8k).
%
% A complete tree would require 19,173,961 nodes for k = 8, Cmax = 255.
% Therefore, to avoid building a fully populated tree, QUANTIZE: (1)
% Initializes data structures for nodes only as they are needed; (2)
% Chooses a maximum depth for the tree as a function of the desired
% number of colors in the output image (currently log2(colormap size)).
%
% For each pixel in the input image, storage_class scans downward from
% the root of the color description tree. At each level of the tree it
% identifies the single node which represents a cube in RGB space
% containing It updates the following data for each such node:
%
% n1 : Number of pixels whose color is contained in the RGB cube
% which this node represents;
%
% n2 : Number of pixels whose color is not represented in a node at
% lower depth in the tree; initially, n2 = 0 for all nodes except
% leaves of the tree.
%
% Sr, Sg, Sb : Sums of the red, green, and blue component values for
% all pixels not classified at a lower depth. The combination of
% these sums and n2 will ultimately characterize the mean color of a
% set of pixels represented by this node.
%
% E: the distance squared in RGB space between each pixel contained
% within a node and the nodes' center. This represents the quantization
% error for a node.
%
% The format of the ClassifyImageColors() method is:
%
% MagickBooleanType ClassifyImageColors(CubeInfo *cube_info,
% const Image *image,ExceptionInfo *exception)
%
% A description of each parameter follows.
%
% o cube_info: A pointer to the Cube structure.
%
% o image: the image.
%
*/
static inline void SetAssociatedAlpha(const Image *image,CubeInfo *cube_info)
{
MagickBooleanType
associate_alpha;
associate_alpha=image->alpha_trait != UndefinedPixelTrait ? MagickTrue :
MagickFalse;
if ((cube_info->quantize_info->number_colors == 2) &&
((cube_info->quantize_info->colorspace == LinearGRAYColorspace) ||
(cube_info->quantize_info->colorspace == GRAYColorspace)))
associate_alpha=MagickFalse;
cube_info->associate_alpha=associate_alpha;
}
static MagickBooleanType ClassifyImageColors(CubeInfo *cube_info,
const Image *image,ExceptionInfo *exception)
{
#define ClassifyImageTag "Classify/Image"
CacheView
*image_view;
double
bisect;
DoublePixelPacket
error,
mid,
midpoint,
pixel;
MagickBooleanType
proceed;
NodeInfo
*node_info;
size_t
count,
id,
index,
level;
ssize_t
y;
/*
Classify the first cube_info->maximum_colors colors to a tree depth of 8.
*/
SetAssociatedAlpha(image,cube_info);
if (cube_info->quantize_info->colorspace != image->colorspace)
{
if ((cube_info->quantize_info->colorspace != UndefinedColorspace) &&
(cube_info->quantize_info->colorspace != CMYKColorspace))
(void) TransformImageColorspace((Image *) image,
cube_info->quantize_info->colorspace,exception);
else
if (IssRGBCompatibleColorspace(image->colorspace) == MagickFalse)
(void) TransformImageColorspace((Image *) image,sRGBColorspace,
exception);
}
midpoint.red=(double) QuantumRange/2.0;
midpoint.green=(double) QuantumRange/2.0;
midpoint.blue=(double) QuantumRange/2.0;
midpoint.alpha=(double) QuantumRange/2.0;
error.alpha=0.0;
image_view=AcquireVirtualCacheView(image,exception);
for (y=0; y < (ssize_t) image->rows; y++)
{
const Quantum
*magick_restrict p;
ssize_t
x;
p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception);
if (p == (const Quantum *) NULL)
break;
if (cube_info->nodes > MaxNodes)
{
/*
Prune one level if the color tree is too large.
*/
PruneLevel(cube_info,cube_info->root);
cube_info->depth--;
}
for (x=0; x < (ssize_t) image->columns; x+=(ssize_t) count)
{
/*
Start at the root and descend the color cube tree.
*/
for (count=1; (x+(ssize_t) count) < (ssize_t) image->columns; count++)
{
PixelInfo
packet;
GetPixelInfoPixel(image,p+count*GetPixelChannels(image),&packet);
if (IsPixelEquivalent(image,p,&packet) == MagickFalse)
break;
}
AssociateAlphaPixel(image,cube_info,p,&pixel);
index=MaxTreeDepth-1;
bisect=((double) QuantumRange+1.0)/2.0;
mid=midpoint;
node_info=cube_info->root;
for (level=1; level <= MaxTreeDepth; level++)
{
double
distance;
bisect*=0.5;
id=ColorToNodeId(cube_info,&pixel,index);
mid.red+=(id & 1) != 0 ? bisect : -bisect;
mid.green+=(id & 2) != 0 ? bisect : -bisect;
mid.blue+=(id & 4) != 0 ? bisect : -bisect;
mid.alpha+=(id & 8) != 0 ? bisect : -bisect;
if (node_info->child[id] == (NodeInfo *) NULL)
{
/*
Set colors of new node to contain pixel.
*/
node_info->child[id]=GetNodeInfo(cube_info,id,level,node_info);
if (node_info->child[id] == (NodeInfo *) NULL)
{
(void) ThrowMagickException(exception,GetMagickModule(),
ResourceLimitError,"MemoryAllocationFailed","`%s'",
image->filename);
continue;
}
if (level == MaxTreeDepth)
cube_info->colors++;
}
/*
Approximate the quantization error represented by this node.
*/
node_info=node_info->child[id];
error.red=QuantumScale*(pixel.red-mid.red);
error.green=QuantumScale*(pixel.green-mid.green);
error.blue=QuantumScale*(pixel.blue-mid.blue);
if (cube_info->associate_alpha != MagickFalse)
error.alpha=QuantumScale*(pixel.alpha-mid.alpha);
distance=(double) (error.red*error.red+error.green*error.green+
error.blue*error.blue+error.alpha*error.alpha);
if (IsNaN(distance) != 0)
distance=0.0;
node_info->quantize_error+=count*sqrt(distance);
cube_info->root->quantize_error+=node_info->quantize_error;
index--;
}
/*
Sum RGB for this leaf for later derivation of the mean cube color.
*/
node_info->number_unique+=count;
node_info->total_color.red+=count*QuantumScale*ClampPixel(pixel.red);
node_info->total_color.green+=count*QuantumScale*ClampPixel(pixel.green);
node_info->total_color.blue+=count*QuantumScale*ClampPixel(pixel.blue);
if (cube_info->associate_alpha != MagickFalse)
node_info->total_color.alpha+=count*QuantumScale*
ClampPixel(pixel.alpha);
else
node_info->total_color.alpha+=count*QuantumScale*
ClampPixel((MagickRealType) OpaqueAlpha);
p+=count*GetPixelChannels(image);
}
if (cube_info->colors > cube_info->maximum_colors)
{
PruneToCubeDepth(cube_info,cube_info->root);
break;
}
proceed=SetImageProgress(image,ClassifyImageTag,(MagickOffsetType) y,
image->rows);
if (proceed == MagickFalse)
break;
}
for (y++; y < (ssize_t) image->rows; y++)
{
const Quantum
*magick_restrict p;
ssize_t
x;
p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception);
if (p == (const Quantum *) NULL)
break;
if (cube_info->nodes > MaxNodes)
{
/*
Prune one level if the color tree is too large.
*/
PruneLevel(cube_info,cube_info->root);
cube_info->depth--;
}
for (x=0; x < (ssize_t) image->columns; x+=(ssize_t) count)
{
/*
Start at the root and descend the color cube tree.
*/
for (count=1; (x+(ssize_t) count) < (ssize_t) image->columns; count++)
{
PixelInfo
packet;
GetPixelInfoPixel(image,p+count*GetPixelChannels(image),&packet);
if (IsPixelEquivalent(image,p,&packet) == MagickFalse)
break;
}
AssociateAlphaPixel(image,cube_info,p,&pixel);
index=MaxTreeDepth-1;
bisect=((double) QuantumRange+1.0)/2.0;
mid=midpoint;
node_info=cube_info->root;
for (level=1; level <= cube_info->depth; level++)
{
double
distance;
bisect*=0.5;
id=ColorToNodeId(cube_info,&pixel,index);
mid.red+=(id & 1) != 0 ? bisect : -bisect;
mid.green+=(id & 2) != 0 ? bisect : -bisect;
mid.blue+=(id & 4) != 0 ? bisect : -bisect;
mid.alpha+=(id & 8) != 0 ? bisect : -bisect;
if (node_info->child[id] == (NodeInfo *) NULL)
{
/*
Set colors of new node to contain pixel.
*/
node_info->child[id]=GetNodeInfo(cube_info,id,level,node_info);
if (node_info->child[id] == (NodeInfo *) NULL)
{
(void) ThrowMagickException(exception,GetMagickModule(),
ResourceLimitError,"MemoryAllocationFailed","%s",
image->filename);
continue;
}
if (level == cube_info->depth)
cube_info->colors++;
}
/*
Approximate the quantization error represented by this node.
*/
node_info=node_info->child[id];
error.red=QuantumScale*(pixel.red-mid.red);
error.green=QuantumScale*(pixel.green-mid.green);
error.blue=QuantumScale*(pixel.blue-mid.blue);
if (cube_info->associate_alpha != MagickFalse)
error.alpha=QuantumScale*(pixel.alpha-mid.alpha);
distance=(double) (error.red*error.red+error.green*error.green+
error.blue*error.blue+error.alpha*error.alpha);
if (IsNaN(distance) != 0)
distance=0.0;
node_info->quantize_error+=count*sqrt(distance);
cube_info->root->quantize_error+=node_info->quantize_error;
index--;
}
/*
Sum RGB for this leaf for later derivation of the mean cube color.
*/
node_info->number_unique+=count;
node_info->total_color.red+=count*QuantumScale*ClampPixel(pixel.red);
node_info->total_color.green+=count*QuantumScale*ClampPixel(pixel.green);
node_info->total_color.blue+=count*QuantumScale*ClampPixel(pixel.blue);
if (cube_info->associate_alpha != MagickFalse)
node_info->total_color.alpha+=count*QuantumScale*
ClampPixel(pixel.alpha);
else
node_info->total_color.alpha+=count*QuantumScale*
ClampPixel((MagickRealType) OpaqueAlpha);
p+=count*GetPixelChannels(image);
}
proceed=SetImageProgress(image,ClassifyImageTag,(MagickOffsetType) y,
image->rows);
if (proceed == MagickFalse)
break;
}
image_view=DestroyCacheView(image_view);
if (cube_info->quantize_info->colorspace != image->colorspace)
if ((cube_info->quantize_info->colorspace != UndefinedColorspace) &&
(cube_info->quantize_info->colorspace != CMYKColorspace))
(void) TransformImageColorspace((Image *) image,sRGBColorspace,exception);
return(y < (ssize_t) image->rows ? MagickFalse : MagickTrue);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% C l o n e Q u a n t i z e I n f o %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% CloneQuantizeInfo() makes a duplicate of the given quantize info structure,
% or if quantize info is NULL, a new one.
%
% The format of the CloneQuantizeInfo method is:
%
% QuantizeInfo *CloneQuantizeInfo(const QuantizeInfo *quantize_info)
%
% A description of each parameter follows:
%
% o clone_info: Method CloneQuantizeInfo returns a duplicate of the given
% quantize info, or if image info is NULL a new one.
%
% o quantize_info: a structure of type info.
%
*/
MagickExport QuantizeInfo *CloneQuantizeInfo(const QuantizeInfo *quantize_info)
{
QuantizeInfo
*clone_info;
clone_info=(QuantizeInfo *) AcquireCriticalMemory(sizeof(*clone_info));
GetQuantizeInfo(clone_info);
if (quantize_info == (QuantizeInfo *) NULL)
return(clone_info);
clone_info->number_colors=quantize_info->number_colors;
clone_info->tree_depth=quantize_info->tree_depth;
clone_info->dither_method=quantize_info->dither_method;
clone_info->colorspace=quantize_info->colorspace;
clone_info->measure_error=quantize_info->measure_error;
return(clone_info);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ C l o s e s t C o l o r %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ClosestColor() traverses the color cube tree at a particular node and
% determines which colormap entry best represents the input color.
%
% The format of the ClosestColor method is:
%
% void ClosestColor(const Image *image,CubeInfo *cube_info,
% const NodeInfo *node_info)
%
% A description of each parameter follows.
%
% o image: the image.
%
% o cube_info: A pointer to the Cube structure.
%
% o node_info: the address of a structure of type NodeInfo which points to a
% node in the color cube tree that is to be pruned.
%
*/
static void ClosestColor(const Image *image,CubeInfo *cube_info,
const NodeInfo *node_info)
{
size_t
number_children;
ssize_t
i;
/*
Traverse any children.
*/
number_children=cube_info->associate_alpha == MagickFalse ? 8UL : 16UL;
for (i=0; i < (ssize_t) number_children; i++)
if (node_info->child[i] != (NodeInfo *) NULL)
ClosestColor(image,cube_info,node_info->child[i]);
if (node_info->number_unique != 0)
{
double
alpha,
beta,
distance,
pixel;
DoublePixelPacket
*magick_restrict q;
PixelInfo
*magick_restrict p;
/*
Determine if this color is "closest".
*/
p=image->colormap+node_info->color_number;
q=(&cube_info->target);
alpha=1.0;
beta=1.0;
if (cube_info->associate_alpha != MagickFalse)
{
alpha=(MagickRealType) (QuantumScale*p->alpha);
beta=(MagickRealType) (QuantumScale*q->alpha);
}
pixel=alpha*p->red-beta*q->red;
distance=pixel*pixel;
if (distance <= cube_info->distance)
{
pixel=alpha*p->green-beta*q->green;
distance+=pixel*pixel;
if (distance <= cube_info->distance)
{
pixel=alpha*p->blue-beta*q->blue;
distance+=pixel*pixel;
if (distance <= cube_info->distance)
{
if (cube_info->associate_alpha != MagickFalse)
{
pixel=p->alpha-q->alpha;
distance+=pixel*pixel;
}
if (distance <= cube_info->distance)
{
cube_info->distance=distance;
cube_info->color_number=node_info->color_number;
}
}
}
}
}
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% C o m p r e s s I m a g e C o l o r m a p %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% CompressImageColormap() compresses an image colormap by removing any
% duplicate or unused color entries.
%
% The format of the CompressImageColormap method is:
%
% MagickBooleanType CompressImageColormap(Image *image,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport MagickBooleanType CompressImageColormap(Image *image,
ExceptionInfo *exception)
{
QuantizeInfo
quantize_info;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (IsEventLogging() != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
if (IsPaletteImage(image) == MagickFalse)
return(MagickFalse);
GetQuantizeInfo(&quantize_info);
quantize_info.number_colors=image->colors;
quantize_info.tree_depth=MaxTreeDepth;
return(QuantizeImage(&quantize_info,image,exception));
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ D e f i n e I m a g e C o l o r m a p %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% DefineImageColormap() traverses the color cube tree and notes each colormap
% entry. A colormap entry is any node in the color cube tree where the
% of unique colors is not zero.
%
% The format of the DefineImageColormap method is:
%
% void DefineImageColormap(Image *image,CubeInfo *cube_info,
% NodeInfo *node_info)
%
% A description of each parameter follows.
%
% o image: the image.
%
% o cube_info: A pointer to the Cube structure.
%
% o node_info: the address of a structure of type NodeInfo which points to a
% node in the color cube tree that is to be pruned.
%
*/
static void DefineImageColormap(Image *image,CubeInfo *cube_info,
NodeInfo *node_info)
{
size_t
number_children;
ssize_t
i;
/*
Traverse any children.
*/
number_children=cube_info->associate_alpha == MagickFalse ? 8UL : 16UL;
for (i=0; i < (ssize_t) number_children; i++)
if (node_info->child[i] != (NodeInfo *) NULL)
DefineImageColormap(image,cube_info,node_info->child[i]);
if (node_info->number_unique != 0)
{
double
alpha;
PixelInfo
*magick_restrict q;
/*
Colormap entry is defined by the mean color in this cube.
*/
q=image->colormap+image->colors;
alpha=(double) ((MagickOffsetType) node_info->number_unique);
alpha=PerceptibleReciprocal(alpha);
if (cube_info->associate_alpha == MagickFalse)
{
q->red=(double) ClampToQuantum(alpha*QuantumRange*
node_info->total_color.red);
q->green=(double) ClampToQuantum(alpha*QuantumRange*
node_info->total_color.green);
q->blue=(double) ClampToQuantum(alpha*QuantumRange*
node_info->total_color.blue);
q->alpha=(double) OpaqueAlpha;
}
else
{
double
opacity;
opacity=(double) (alpha*QuantumRange*node_info->total_color.alpha);
q->alpha=(double) ClampToQuantum(opacity);
if (q->alpha == OpaqueAlpha)
{
q->red=(double) ClampToQuantum(alpha*QuantumRange*
node_info->total_color.red);
q->green=(double) ClampToQuantum(alpha*QuantumRange*
node_info->total_color.green);
q->blue=(double) ClampToQuantum(alpha*QuantumRange*
node_info->total_color.blue);
}
else
{
double
gamma;
gamma=(double) (QuantumScale*q->alpha);
gamma=PerceptibleReciprocal(gamma);
q->red=(double) ClampToQuantum(alpha*gamma*QuantumRange*
node_info->total_color.red);
q->green=(double) ClampToQuantum(alpha*gamma*QuantumRange*
node_info->total_color.green);
q->blue=(double) ClampToQuantum(alpha*gamma*QuantumRange*
node_info->total_color.blue);
if (node_info->number_unique > cube_info->transparent_pixels)
{
cube_info->transparent_pixels=node_info->number_unique;
cube_info->transparent_index=(ssize_t) image->colors;
}
}
}
node_info->color_number=image->colors++;
}
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ D e s t r o y C u b e I n f o %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% DestroyCubeInfo() deallocates memory associated with an image.
%
% The format of the DestroyCubeInfo method is:
%
% DestroyCubeInfo(CubeInfo *cube_info)
%
% A description of each parameter follows:
%
% o cube_info: the address of a structure of type CubeInfo.
%
*/
static void DestroyCubeInfo(CubeInfo *cube_info)
{
Nodes
*nodes;
/*
Release color cube tree storage.
*/
do
{
nodes=cube_info->node_queue->next;
cube_info->node_queue->nodes=(NodeInfo *) RelinquishMagickMemory(
cube_info->node_queue->nodes);
cube_info->node_queue=(Nodes *) RelinquishMagickMemory(
cube_info->node_queue);
cube_info->node_queue=nodes;
} while (cube_info->node_queue != (Nodes *) NULL);
if (cube_info->memory_info != (MemoryInfo *) NULL)
cube_info->memory_info=RelinquishVirtualMemory(cube_info->memory_info);
cube_info->quantize_info=DestroyQuantizeInfo(cube_info->quantize_info);
cube_info=(CubeInfo *) RelinquishMagickMemory(cube_info);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% D e s t r o y Q u a n t i z e I n f o %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% DestroyQuantizeInfo() deallocates memory associated with an QuantizeInfo
% structure.
%
% The format of the DestroyQuantizeInfo method is:
%
% QuantizeInfo *DestroyQuantizeInfo(QuantizeInfo *quantize_info)
%
% A description of each parameter follows:
%
% o quantize_info: Specifies a pointer to an QuantizeInfo structure.
%
*/
MagickExport QuantizeInfo *DestroyQuantizeInfo(QuantizeInfo *quantize_info)
{
assert(quantize_info != (QuantizeInfo *) NULL);
assert(quantize_info->signature == MagickCoreSignature);
if (IsEventLogging() != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"...");
quantize_info->signature=(~MagickCoreSignature);
quantize_info=(QuantizeInfo *) RelinquishMagickMemory(quantize_info);
return(quantize_info);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ D i t h e r I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% DitherImage() distributes the difference between an original image and
% the corresponding color reduced algorithm to neighboring pixels using
% serpentine-scan Floyd-Steinberg error diffusion. DitherImage returns
% MagickTrue if the image is dithered otherwise MagickFalse.
%
% The format of the DitherImage method is:
%
% MagickBooleanType DitherImage(Image *image,CubeInfo *cube_info,
% ExceptionInfo *exception)
%
% A description of each parameter follows.
%
% o image: the image.
%
% o cube_info: A pointer to the Cube structure.
%
% o exception: return any errors or warnings in this structure.
%
*/
static DoublePixelPacket **DestroyPixelTLS(DoublePixelPacket **pixels)
{
ssize_t
i;
assert(pixels != (DoublePixelPacket **) NULL);
for (i=0; i < (ssize_t) GetMagickResourceLimit(ThreadResource); i++)
if (pixels[i] != (DoublePixelPacket *) NULL)
pixels[i]=(DoublePixelPacket *) RelinquishMagickMemory(pixels[i]);
pixels=(DoublePixelPacket **) RelinquishMagickMemory(pixels);
return(pixels);
}
static DoublePixelPacket **AcquirePixelTLS(const size_t count)
{
DoublePixelPacket
**pixels;
size_t
number_threads;
ssize_t
i;
number_threads=(size_t) GetMagickResourceLimit(ThreadResource);
pixels=(DoublePixelPacket **) AcquireQuantumMemory(number_threads,
sizeof(*pixels));
if (pixels == (DoublePixelPacket **) NULL)
return((DoublePixelPacket **) NULL);
(void) memset(pixels,0,number_threads*sizeof(*pixels));
for (i=0; i < (ssize_t) number_threads; i++)
{
pixels[i]=(DoublePixelPacket *) AcquireQuantumMemory(count,2*
sizeof(**pixels));
if (pixels[i] == (DoublePixelPacket *) NULL)
return(DestroyPixelTLS(pixels));
}
return(pixels);
}
static inline ssize_t CacheOffset(CubeInfo *cube_info,
const DoublePixelPacket *pixel)
{
#define RedShift(pixel) (((pixel) >> CacheShift) << (0*(8-CacheShift)))
#define GreenShift(pixel) (((pixel) >> CacheShift) << (1*(8-CacheShift)))
#define BlueShift(pixel) (((pixel) >> CacheShift) << (2*(8-CacheShift)))
#define AlphaShift(pixel) (((pixel) >> CacheShift) << (3*(8-CacheShift)))
ssize_t
offset;
offset=(ssize_t) (RedShift(ScaleQuantumToChar(ClampPixel(pixel->red))) |
GreenShift(ScaleQuantumToChar(ClampPixel(pixel->green))) |
BlueShift(ScaleQuantumToChar(ClampPixel(pixel->blue))));
if (cube_info->associate_alpha != MagickFalse)
offset|=AlphaShift(ScaleQuantumToChar(ClampPixel(pixel->alpha)));
return(offset);
}
static MagickBooleanType FloydSteinbergDither(Image *image,CubeInfo *cube_info,
ExceptionInfo *exception)
{
#define DitherImageTag "Dither/Image"
CacheView
*image_view;
DoublePixelPacket
**pixels;
MagickBooleanType
status;
ssize_t
y;
/*
Distribute quantization error using Floyd-Steinberg.
*/
pixels=AcquirePixelTLS(image->columns);
if (pixels == (DoublePixelPacket **) NULL)
return(MagickFalse);
status=MagickTrue;
image_view=AcquireAuthenticCacheView(image,exception);
for (y=0; y < (ssize_t) image->rows; y++)
{
const int
id = GetOpenMPThreadId();
CubeInfo
cube;
DoublePixelPacket
*current,
*previous;
Quantum
*magick_restrict q;
size_t
index;
ssize_t
x,
v;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception);
if (q == (Quantum *) NULL)
{
status=MagickFalse;
continue;
}
cube=(*cube_info);
current=pixels[id]+(y & 0x01)*image->columns;
previous=pixels[id]+((y+1) & 0x01)*image->columns;
v=(ssize_t) ((y & 0x01) != 0 ? -1 : 1);
for (x=0; x < (ssize_t) image->columns; x++)
{
DoublePixelPacket
color,
pixel;
ssize_t
i;
ssize_t
u;
u=(y & 0x01) != 0 ? (ssize_t) image->columns-1-x : x;
AssociateAlphaPixel(image,&cube,q+u*GetPixelChannels(image),&pixel);
if (x > 0)
{
pixel.red+=7.0*cube_info->diffusion*current[u-v].red/16;
pixel.green+=7.0*cube_info->diffusion*current[u-v].green/16;
pixel.blue+=7.0*cube_info->diffusion*current[u-v].blue/16;
if (cube.associate_alpha != MagickFalse)
pixel.alpha+=7.0*cube_info->diffusion*current[u-v].alpha/16;
}
if (y > 0)
{
if (x < (ssize_t) (image->columns-1))
{
pixel.red+=cube_info->diffusion*previous[u+v].red/16;
pixel.green+=cube_info->diffusion*previous[u+v].green/16;
pixel.blue+=cube_info->diffusion*previous[u+v].blue/16;
if (cube.associate_alpha != MagickFalse)
pixel.alpha+=cube_info->diffusion*previous[u+v].alpha/16;
}
pixel.red+=5.0*cube_info->diffusion*previous[u].red/16;
pixel.green+=5.0*cube_info->diffusion*previous[u].green/16;
pixel.blue+=5.0*cube_info->diffusion*previous[u].blue/16;
if (cube.associate_alpha != MagickFalse)
pixel.alpha+=5.0*cube_info->diffusion*previous[u].alpha/16;
if (x > 0)
{
pixel.red+=3.0*cube_info->diffusion*previous[u-v].red/16;
pixel.green+=3.0*cube_info->diffusion*previous[u-v].green/16;
pixel.blue+=3.0*cube_info->diffusion*previous[u-v].blue/16;
if (cube.associate_alpha != MagickFalse)
pixel.alpha+=3.0*cube_info->diffusion*previous[u-v].alpha/16;
}
}
pixel.red=(double) ClampPixel(pixel.red);
pixel.green=(double) ClampPixel(pixel.green);
pixel.blue=(double) ClampPixel(pixel.blue);
if (cube.associate_alpha != MagickFalse)
pixel.alpha=(double) ClampPixel(pixel.alpha);
i=CacheOffset(&cube,&pixel);
if (cube.cache[i] < 0)
{
NodeInfo
*node_info;
size_t
node_id;
/*
Identify the deepest node containing the pixel's color.
*/
node_info=cube.root;
for (index=MaxTreeDepth-1; (ssize_t) index > 0; index--)
{
node_id=ColorToNodeId(&cube,&pixel,index);
if (node_info->child[node_id] == (NodeInfo *) NULL)
break;
node_info=node_info->child[node_id];
}
/*
Find closest color among siblings and their children.
*/
cube.target=pixel;
cube.distance=(double) (4.0*(QuantumRange+1.0)*(QuantumRange+1.0)+
1.0);
ClosestColor(image,&cube,node_info->parent);
cube.cache[i]=(ssize_t) cube.color_number;
}
/*
Assign pixel to closest colormap entry.
*/
index=(size_t) cube.cache[i];
if (image->storage_class == PseudoClass)
SetPixelIndex(image,(Quantum) index,q+u*GetPixelChannels(image));
if (cube.quantize_info->measure_error == MagickFalse)
{
SetPixelRed(image,ClampToQuantum(image->colormap[index].red),
q+u*GetPixelChannels(image));
SetPixelGreen(image,ClampToQuantum(image->colormap[index].green),
q+u*GetPixelChannels(image));
SetPixelBlue(image,ClampToQuantum(image->colormap[index].blue),
q+u*GetPixelChannels(image));
if (cube.associate_alpha != MagickFalse)
SetPixelAlpha(image,ClampToQuantum(image->colormap[index].alpha),
q+u*GetPixelChannels(image));
}
if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse)
status=MagickFalse;
/*
Store the error.
*/
AssociateAlphaPixelInfo(&cube,image->colormap+index,&color);
current[u].red=pixel.red-color.red;
current[u].green=pixel.green-color.green;
current[u].blue=pixel.blue-color.blue;
if (cube.associate_alpha != MagickFalse)
current[u].alpha=pixel.alpha-color.alpha;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
proceed=SetImageProgress(image,DitherImageTag,(MagickOffsetType) y,
image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
}
image_view=DestroyCacheView(image_view);
pixels=DestroyPixelTLS(pixels);
return(MagickTrue);
}
static MagickBooleanType RiemersmaDither(Image *image,CacheView *image_view,
CubeInfo *cube_info,const unsigned int direction,ExceptionInfo *exception)
{
#define DitherImageTag "Dither/Image"
CubeInfo
*p;
DoublePixelPacket
color,
pixel;
MagickBooleanType
proceed;
size_t
index;
p=cube_info;
if ((p->x >= 0) && (p->x < (ssize_t) image->columns) &&
(p->y >= 0) && (p->y < (ssize_t) image->rows))
{
Quantum
*magick_restrict q;
ssize_t
i;
/*
Distribute error.
*/
q=GetCacheViewAuthenticPixels(image_view,p->x,p->y,1,1,exception);
if (q == (Quantum *) NULL)
return(MagickFalse);
AssociateAlphaPixel(image,cube_info,q,&pixel);
for (i=0; i < ErrorQueueLength; i++)
{
pixel.red+=ErrorRelativeWeight*cube_info->diffusion*p->weights[i]*
p->error[i].red;
pixel.green+=ErrorRelativeWeight*cube_info->diffusion*p->weights[i]*
p->error[i].green;
pixel.blue+=ErrorRelativeWeight*cube_info->diffusion*p->weights[i]*
p->error[i].blue;
if (cube_info->associate_alpha != MagickFalse)
pixel.alpha+=ErrorRelativeWeight*cube_info->diffusion*p->weights[i]*
p->error[i].alpha;
}
pixel.red=(double) ClampPixel(pixel.red);
pixel.green=(double) ClampPixel(pixel.green);
pixel.blue=(double) ClampPixel(pixel.blue);
if (cube_info->associate_alpha != MagickFalse)
pixel.alpha=(double) ClampPixel(pixel.alpha);
i=CacheOffset(cube_info,&pixel);
if (p->cache[i] < 0)
{
NodeInfo
*node_info;
size_t
id;
/*
Identify the deepest node containing the pixel's color.
*/
node_info=p->root;
for (index=MaxTreeDepth-1; (ssize_t) index > 0; index--)
{
id=ColorToNodeId(cube_info,&pixel,index);
if (node_info->child[id] == (NodeInfo *) NULL)
break;
node_info=node_info->child[id];
}
/*
Find closest color among siblings and their children.
*/
p->target=pixel;
p->distance=(double) (4.0*(QuantumRange+1.0)*((double)
QuantumRange+1.0)+1.0);
ClosestColor(image,p,node_info->parent);
p->cache[i]=(ssize_t) p->color_number;
}
/*
Assign pixel to closest colormap entry.
*/
index=(size_t) p->cache[i];
if (image->storage_class == PseudoClass)
SetPixelIndex(image,(Quantum) index,q);
if (cube_info->quantize_info->measure_error == MagickFalse)
{
SetPixelRed(image,ClampToQuantum(image->colormap[index].red),q);
SetPixelGreen(image,ClampToQuantum(image->colormap[index].green),q);
SetPixelBlue(image,ClampToQuantum(image->colormap[index].blue),q);
if (cube_info->associate_alpha != MagickFalse)
SetPixelAlpha(image,ClampToQuantum(image->colormap[index].alpha),q);
}
if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse)
return(MagickFalse);
/*
Propagate the error as the last entry of the error queue.
*/
(void) memmove(p->error,p->error+1,(ErrorQueueLength-1)*
sizeof(p->error[0]));
AssociateAlphaPixelInfo(cube_info,image->colormap+index,&color);
p->error[ErrorQueueLength-1].red=pixel.red-color.red;
p->error[ErrorQueueLength-1].green=pixel.green-color.green;
p->error[ErrorQueueLength-1].blue=pixel.blue-color.blue;
if (cube_info->associate_alpha != MagickFalse)
p->error[ErrorQueueLength-1].alpha=pixel.alpha-color.alpha;
proceed=SetImageProgress(image,DitherImageTag,p->offset,p->span);
if (proceed == MagickFalse)
return(MagickFalse);
p->offset++;
}
switch (direction)
{
case WestGravity: p->x--; break;
case EastGravity: p->x++; break;
case NorthGravity: p->y--; break;
case SouthGravity: p->y++; break;
}
return(MagickTrue);
}
static MagickBooleanType Riemersma(Image *image,CacheView *image_view,
CubeInfo *cube_info,const size_t level,const unsigned int direction,
ExceptionInfo *exception)
{
MagickBooleanType
status;
status=MagickTrue;
if (level == 1)
switch (direction)
{
case WestGravity:
{
status=RiemersmaDither(image,image_view,cube_info,EastGravity,
exception);
if (status != MagickFalse)
status=RiemersmaDither(image,image_view,cube_info,SouthGravity,
exception);
if (status != MagickFalse)
status=RiemersmaDither(image,image_view,cube_info,WestGravity,
exception);
break;
}
case EastGravity:
{
status=RiemersmaDither(image,image_view,cube_info,WestGravity,
exception);
if (status != MagickFalse)
status=RiemersmaDither(image,image_view,cube_info,NorthGravity,
exception);
if (status != MagickFalse)
status=RiemersmaDither(image,image_view,cube_info,EastGravity,
exception);
break;
}
case NorthGravity:
{
status=RiemersmaDither(image,image_view,cube_info,SouthGravity,
exception);
if (status != MagickFalse)
status=RiemersmaDither(image,image_view,cube_info,EastGravity,
exception);
if (status != MagickFalse)
status=RiemersmaDither(image,image_view,cube_info,NorthGravity,
exception);
break;
}
case SouthGravity:
{
status=RiemersmaDither(image,image_view,cube_info,NorthGravity,
exception);
if (status != MagickFalse)
status=RiemersmaDither(image,image_view,cube_info,WestGravity,
exception);
if (status != MagickFalse)
status=RiemersmaDither(image,image_view,cube_info,SouthGravity,
exception);
break;
}
default:
break;
}
else
switch (direction)
{
case WestGravity:
{
status=Riemersma(image,image_view,cube_info,level-1,NorthGravity,
exception);
if (status != MagickFalse)
status=RiemersmaDither(image,image_view,cube_info,EastGravity,
exception);
if (status != MagickFalse)
status=Riemersma(image,image_view,cube_info,level-1,WestGravity,
exception);
if (status != MagickFalse)
status=RiemersmaDither(image,image_view,cube_info,SouthGravity,
exception);
if (status != MagickFalse)
status=Riemersma(image,image_view,cube_info,level-1,WestGravity,
exception);
if (status != MagickFalse)
status=RiemersmaDither(image,image_view,cube_info,WestGravity,
exception);
if (status != MagickFalse)
status=Riemersma(image,image_view,cube_info,level-1,SouthGravity,
exception);
break;
}
case EastGravity:
{
status=Riemersma(image,image_view,cube_info,level-1,SouthGravity,
exception);
if (status != MagickFalse)
status=RiemersmaDither(image,image_view,cube_info,WestGravity,
exception);
if (status != MagickFalse)
status=Riemersma(image,image_view,cube_info,level-1,EastGravity,
exception);
if (status != MagickFalse)
status=RiemersmaDither(image,image_view,cube_info,NorthGravity,
exception);
if (status != MagickFalse)
status=Riemersma(image,image_view,cube_info,level-1,EastGravity,
exception);
if (status != MagickFalse)
status=RiemersmaDither(image,image_view,cube_info,EastGravity,
exception);
if (status != MagickFalse)
status=Riemersma(image,image_view,cube_info,level-1,NorthGravity,
exception);
break;
}
case NorthGravity:
{
status=Riemersma(image,image_view,cube_info,level-1,WestGravity,
exception);
if (status != MagickFalse)
status=RiemersmaDither(image,image_view,cube_info,SouthGravity,
exception);
if (status != MagickFalse)
status=Riemersma(image,image_view,cube_info,level-1,NorthGravity,
exception);
if (status != MagickFalse)
status=RiemersmaDither(image,image_view,cube_info,EastGravity,
exception);
if (status != MagickFalse)
status=Riemersma(image,image_view,cube_info,level-1,NorthGravity,
exception);
if (status != MagickFalse)
status=RiemersmaDither(image,image_view,cube_info,NorthGravity,
exception);
if (status != MagickFalse)
status=Riemersma(image,image_view,cube_info,level-1,EastGravity,
exception);
break;
}
case SouthGravity:
{
status=Riemersma(image,image_view,cube_info,level-1,EastGravity,
exception);
if (status != MagickFalse)
status=RiemersmaDither(image,image_view,cube_info,NorthGravity,
exception);
if (status != MagickFalse)
status=Riemersma(image,image_view,cube_info,level-1,SouthGravity,
exception);
if (status != MagickFalse)
status=RiemersmaDither(image,image_view,cube_info,WestGravity,
exception);
if (status != MagickFalse)
status=Riemersma(image,image_view,cube_info,level-1,SouthGravity,
exception);
if (status != MagickFalse)
status=RiemersmaDither(image,image_view,cube_info,SouthGravity,
exception);
if (status != MagickFalse)
status=Riemersma(image,image_view,cube_info,level-1,WestGravity,
exception);
break;
}
default:
break;
}
return(status);
}
static MagickBooleanType DitherImage(Image *image,CubeInfo *cube_info,
ExceptionInfo *exception)
{
CacheView
*image_view;
const char
*artifact;
MagickBooleanType
status;
size_t
extent,
level;
artifact=GetImageArtifact(image,"dither:diffusion-amount");
if (artifact != (const char *) NULL)
cube_info->diffusion=StringToDoubleInterval(artifact,1.0);
if (cube_info->quantize_info->dither_method != RiemersmaDitherMethod)
return(FloydSteinbergDither(image,cube_info,exception));
/*
Distribute quantization error along a Hilbert curve.
*/
(void) memset(cube_info->error,0,ErrorQueueLength*sizeof(*cube_info->error));
cube_info->x=0;
cube_info->y=0;
extent=MagickMax(image->columns,image->rows);
level=(size_t) log2((double) extent);
if (((size_t) 1UL << level) < extent)
level++;
cube_info->offset=0;
cube_info->span=(MagickSizeType) image->columns*image->rows;
image_view=AcquireAuthenticCacheView(image,exception);
status=MagickTrue;
if (level > 0)
status=Riemersma(image,image_view,cube_info,level,NorthGravity,exception);
if (status != MagickFalse)
status=RiemersmaDither(image,image_view,cube_info,ForgetGravity,exception);
image_view=DestroyCacheView(image_view);
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ G e t C u b e I n f o %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetCubeInfo() initialize the Cube data structure.
%
% The format of the GetCubeInfo method is:
%
% CubeInfo GetCubeInfo(const QuantizeInfo *quantize_info,
% const size_t depth,const size_t maximum_colors)
%
% A description of each parameter follows.
%
% o quantize_info: Specifies a pointer to an QuantizeInfo structure.
%
% o depth: Normally, this integer value is zero or one. A zero or
% one tells Quantize to choose a optimal tree depth of Log4(number_colors).
% A tree of this depth generally allows the best representation of the
% reference image with the least amount of memory and the fastest
% computational speed. In some cases, such as an image with low color
% dispersion (a few number of colors), a value other than
% Log4(number_colors) is required. To expand the color tree completely,
% use a value of 8.
%
% o maximum_colors: maximum colors.
%
*/
static CubeInfo *GetCubeInfo(const QuantizeInfo *quantize_info,
const size_t depth,const size_t maximum_colors)
{
CubeInfo
*cube_info;
double
weight;
size_t
length;
ssize_t
i;
/*
Initialize tree to describe color cube_info.
*/
cube_info=(CubeInfo *) AcquireMagickMemory(sizeof(*cube_info));
if (cube_info == (CubeInfo *) NULL)
return((CubeInfo *) NULL);
(void) memset(cube_info,0,sizeof(*cube_info));
cube_info->depth=depth;
if (cube_info->depth > MaxTreeDepth)
cube_info->depth=MaxTreeDepth;
if (cube_info->depth < 2)
cube_info->depth=2;
cube_info->maximum_colors=maximum_colors;
/*
Initialize root node.
*/
cube_info->root=GetNodeInfo(cube_info,0,0,(NodeInfo *) NULL);
if (cube_info->root == (NodeInfo *) NULL)
return((CubeInfo *) NULL);
cube_info->root->parent=cube_info->root;
cube_info->quantize_info=CloneQuantizeInfo(quantize_info);
if (cube_info->quantize_info->dither_method == NoDitherMethod)
return(cube_info);
/*
Initialize dither resources.
*/
length=(size_t) (1UL << (4*(8-CacheShift)));
cube_info->memory_info=AcquireVirtualMemory(length,sizeof(*cube_info->cache));
if (cube_info->memory_info == (MemoryInfo *) NULL)
return((CubeInfo *) NULL);
cube_info->cache=(ssize_t *) GetVirtualMemoryBlob(cube_info->memory_info);
/*
Initialize color cache.
*/
(void) memset(cube_info->cache,(-1),sizeof(*cube_info->cache)*length);
/*
Distribute weights along a curve of exponential decay.
*/
weight=1.0;
for (i=0; i < ErrorQueueLength; i++)
{
cube_info->weights[i]=PerceptibleReciprocal(weight);
weight*=exp(log(1.0/ErrorRelativeWeight)/(ErrorQueueLength-1.0));
}
cube_info->diffusion=1.0;
return(cube_info);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ G e t N o d e I n f o %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetNodeInfo() allocates memory for a new node in the color cube tree and
% presets all fields to zero.
%
% The format of the GetNodeInfo method is:
%
% NodeInfo *GetNodeInfo(CubeInfo *cube_info,const size_t id,
% const size_t level,NodeInfo *parent)
%
% A description of each parameter follows.
%
% o node: The GetNodeInfo method returns a pointer to a queue of nodes.
%
% o id: Specifies the child number of the node.
%
% o level: Specifies the level in the storage_class the node resides.
%
*/
static NodeInfo *GetNodeInfo(CubeInfo *cube_info,const size_t id,
const size_t level,NodeInfo *parent)
{
NodeInfo
*node_info;
if (cube_info->free_nodes == 0)
{
Nodes
*nodes;
/*
Allocate a new queue of nodes.
*/
nodes=(Nodes *) AcquireMagickMemory(sizeof(*nodes));
if (nodes == (Nodes *) NULL)
return((NodeInfo *) NULL);
nodes->nodes=(NodeInfo *) AcquireQuantumMemory(NodesInAList,
sizeof(*nodes->nodes));
if (nodes->nodes == (NodeInfo *) NULL)
return((NodeInfo *) NULL);
nodes->next=cube_info->node_queue;
cube_info->node_queue=nodes;
cube_info->next_node=nodes->nodes;
cube_info->free_nodes=NodesInAList;
}
cube_info->nodes++;
cube_info->free_nodes--;
node_info=cube_info->next_node++;
(void) memset(node_info,0,sizeof(*node_info));
node_info->parent=parent;
node_info->id=id;
node_info->level=level;
return(node_info);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% G e t I m a g e Q u a n t i z e E r r o r %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetImageQuantizeError() measures the difference between the original
% and quantized images. This difference is the total quantization error.
% The error is computed by summing over all pixels in an image the distance
% squared in RGB space between each reference pixel value and its quantized
% value. These values are computed:
%
% o mean_error_per_pixel: This value is the mean error for any single
% pixel in the image.
%
% o normalized_mean_square_error: This value is the normalized mean
% quantization error for any single pixel in the image. This distance
% measure is normalized to a range between 0 and 1. It is independent
% of the range of red, green, and blue values in the image.
%
% o normalized_maximum_square_error: Thsi value is the normalized
% maximum quantization error for any single pixel in the image. This
% distance measure is normalized to a range between 0 and 1. It is
% independent of the range of red, green, and blue values in your image.
%
% The format of the GetImageQuantizeError method is:
%
% MagickBooleanType GetImageQuantizeError(Image *image,
% ExceptionInfo *exception)
%
% A description of each parameter follows.
%
% o image: the image.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport MagickBooleanType GetImageQuantizeError(Image *image,
ExceptionInfo *exception)
{
CacheView
*image_view;
double
alpha,
area,
beta,
distance,
maximum_error,
mean_error,
mean_error_per_pixel;
ssize_t
index,
y;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (IsEventLogging() != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
image->total_colors=GetNumberColors(image,(FILE *) NULL,exception);
(void) memset(&image->error,0,sizeof(image->error));
if (image->storage_class == DirectClass)
return(MagickTrue);
alpha=1.0;
beta=1.0;
area=3.0*image->columns*image->rows;
maximum_error=0.0;
mean_error_per_pixel=0.0;
mean_error=0.0;
image_view=AcquireVirtualCacheView(image,exception);
for (y=0; y < (ssize_t) image->rows; y++)
{
const Quantum
*magick_restrict p;
ssize_t
x;
p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception);
if (p == (const Quantum *) NULL)
break;
for (x=0; x < (ssize_t) image->columns; x++)
{
index=(ssize_t) GetPixelIndex(image,p);
if (image->alpha_trait != UndefinedPixelTrait)
{
alpha=(double) (QuantumScale*GetPixelAlpha(image,p));
beta=(double) (QuantumScale*image->colormap[index].alpha);
}
distance=fabs((double) (alpha*GetPixelRed(image,p)-beta*
image->colormap[index].red));
mean_error_per_pixel+=distance;
mean_error+=distance*distance;
if (distance > maximum_error)
maximum_error=distance;
distance=fabs((double) (alpha*GetPixelGreen(image,p)-beta*
image->colormap[index].green));
mean_error_per_pixel+=distance;
mean_error+=distance*distance;
if (distance > maximum_error)
maximum_error=distance;
distance=fabs((double) (alpha*GetPixelBlue(image,p)-beta*
image->colormap[index].blue));
mean_error_per_pixel+=distance;
mean_error+=distance*distance;
if (distance > maximum_error)
maximum_error=distance;
p+=GetPixelChannels(image);
}
}
image_view=DestroyCacheView(image_view);
image->error.mean_error_per_pixel=(double) mean_error_per_pixel/area;
image->error.normalized_mean_error=(double) QuantumScale*QuantumScale*
mean_error/area;
image->error.normalized_maximum_error=(double) QuantumScale*maximum_error;
return(MagickTrue);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% G e t Q u a n t i z e I n f o %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetQuantizeInfo() initializes the QuantizeInfo structure.
%
% The format of the GetQuantizeInfo method is:
%
% GetQuantizeInfo(QuantizeInfo *quantize_info)
%
% A description of each parameter follows:
%
% o quantize_info: Specifies a pointer to a QuantizeInfo structure.
%
*/
MagickExport void GetQuantizeInfo(QuantizeInfo *quantize_info)
{
assert(quantize_info != (QuantizeInfo *) NULL);
if (IsEventLogging() != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"...");
(void) memset(quantize_info,0,sizeof(*quantize_info));
quantize_info->number_colors=256;
quantize_info->dither_method=RiemersmaDitherMethod;
quantize_info->colorspace=UndefinedColorspace;
quantize_info->measure_error=MagickFalse;
quantize_info->signature=MagickCoreSignature;
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% K m e a n s I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% KmeansImage() applies k-means color reduction to an image. This is a
% colorspace clustering or segmentation technique.
%
% The format of the KmeansImage method is:
%
% MagickBooleanType KmeansImage(Image *image,const size_t number_colors,
% const size_t max_iterations,const double tolerance,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o number_colors: number of colors to use as seeds.
%
% o max_iterations: maximum number of iterations while converging.
%
% o tolerance: the maximum tolerance.
%
% o exception: return any errors or warnings in this structure.
%
*/
typedef struct _KmeansInfo
{
double
red,
green,
blue,
alpha,
black,
count,
distortion;
} KmeansInfo;
static KmeansInfo **DestroyKmeansTLS(KmeansInfo **kmeans_info)
{
ssize_t
i;
assert(kmeans_info != (KmeansInfo **) NULL);
for (i=0; i < (ssize_t) GetMagickResourceLimit(ThreadResource); i++)
if (kmeans_info[i] != (KmeansInfo *) NULL)
kmeans_info[i]=(KmeansInfo *) RelinquishMagickMemory(kmeans_info[i]);
kmeans_info=(KmeansInfo **) RelinquishMagickMemory(kmeans_info);
return(kmeans_info);
}
static int DominantColorCompare(const void *x,const void *y)
{
PixelInfo
*pixel_1,
*pixel_2;
pixel_1=(PixelInfo *) x;
pixel_2=(PixelInfo *) y;
return((int) pixel_2->count-(int) pixel_1->count);
}
static KmeansInfo **AcquireKmeansTLS(const size_t number_colors)
{
KmeansInfo
**kmeans_info;
ssize_t
i;
size_t
number_threads;
number_threads=(size_t) GetMagickResourceLimit(ThreadResource);
kmeans_info=(KmeansInfo **) AcquireQuantumMemory(number_threads,
sizeof(*kmeans_info));
if (kmeans_info == (KmeansInfo **) NULL)
return((KmeansInfo **) NULL);
(void) memset(kmeans_info,0,number_threads*sizeof(*kmeans_info));
for (i=0; i < (ssize_t) number_threads; i++)
{
kmeans_info[i]=(KmeansInfo *) AcquireQuantumMemory(number_colors,
sizeof(**kmeans_info));
if (kmeans_info[i] == (KmeansInfo *) NULL)
return(DestroyKmeansTLS(kmeans_info));
}
return(kmeans_info);
}
static inline double KmeansMetric(const Image *magick_restrict image,
const Quantum *magick_restrict p,const PixelInfo *magick_restrict q)
{
double
gamma,
metric,
pixel;
gamma=1.0;
metric=0.0;
if ((image->alpha_trait != UndefinedPixelTrait) ||
(q->alpha_trait != UndefinedPixelTrait))
{
pixel=GetPixelAlpha(image,p)-(q->alpha_trait != UndefinedPixelTrait ?
q->alpha : OpaqueAlpha);
metric+=pixel*pixel;
if (image->alpha_trait != UndefinedPixelTrait)
gamma*=QuantumScale*GetPixelAlpha(image,p);
if (q->alpha_trait != UndefinedPixelTrait)
gamma*=QuantumScale*q->alpha;
}
if (image->colorspace == CMYKColorspace)
{
pixel=QuantumScale*(GetPixelBlack(image,p)-q->black);
metric+=gamma*pixel*pixel;
gamma*=QuantumScale*(QuantumRange-GetPixelBlack(image,p));
gamma*=QuantumScale*(QuantumRange-q->black);
}
metric*=3.0;
pixel=QuantumScale*(GetPixelRed(image,p)-q->red);
if (IsHueCompatibleColorspace(image->colorspace) != MagickFalse)
{
if (fabs((double) pixel) > 0.5)
pixel-=0.5;
pixel*=2.0;
}
metric+=gamma*pixel*pixel;
pixel=QuantumScale*(GetPixelGreen(image,p)-q->green);
metric+=gamma*pixel*pixel;
pixel=QuantumScale*(GetPixelBlue(image,p)-q->blue);
metric+=gamma*pixel*pixel;
return(metric);
}
MagickExport MagickBooleanType KmeansImage(Image *image,
const size_t number_colors,const size_t max_iterations,const double tolerance,
ExceptionInfo *exception)
{
#define KmeansImageTag "Kmeans/Image"
#define RandomColorComponent(info) (QuantumRange*GetPseudoRandomValue(info))
CacheView
*image_view;
char
tuple[MagickPathExtent];
const char
*colors;
double
previous_tolerance;
Image
*dominant_image;
KmeansInfo
**kmeans_pixels;
MagickBooleanType
verbose,
status;
size_t
number_threads;
ssize_t
n;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
if (IsEventLogging() != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
if (max_iterations == 0)
return(MagickFalse);
colors=GetImageArtifact(image,"kmeans:seed-colors");
if (colors == (const char *) NULL)
{
CubeInfo
*cube_info;
QuantizeInfo
*quantize_info;
size_t
depth;
/*
Seed clusters from color quantization.
*/
quantize_info=AcquireQuantizeInfo((ImageInfo *) NULL);
quantize_info->colorspace=image->colorspace;
quantize_info->number_colors=number_colors;
quantize_info->dither_method=NoDitherMethod;
n=(ssize_t) number_colors;
for (depth=1; n != 0; depth++)
n>>=2;
cube_info=GetCubeInfo(quantize_info,depth,number_colors);
if (cube_info == (CubeInfo *) NULL)
{
quantize_info=DestroyQuantizeInfo(quantize_info);
ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed",
image->filename);
}
status=ClassifyImageColors(cube_info,image,exception);
if (status != MagickFalse)
{
if (cube_info->colors > cube_info->maximum_colors)
ReduceImageColors(image,cube_info);
status=SetImageColormap(image,cube_info,exception);
}
DestroyCubeInfo(cube_info);
quantize_info=DestroyQuantizeInfo(quantize_info);
if (status == MagickFalse)
return(status);
}
else
{
char
color[MagickPathExtent];
const char
*p;
/*
Seed clusters from color list (e.g. red;green;blue).
*/
status=AcquireImageColormap(image,number_colors,exception);
if (status == MagickFalse)
return(status);
for (n=0, p=colors; n < (ssize_t) image->colors; n++)
{
const char
*q;
for (q=p; *q != '\0'; q++)
if (*q == ';')
break;
(void) CopyMagickString(color,p,(size_t) MagickMin(q-p+1,
MagickPathExtent));
(void) QueryColorCompliance(color,AllCompliance,image->colormap+n,
exception);
if (*q == '\0')
{
n++;
break;
}
p=q+1;
}
if (n < (ssize_t) image->colors)
{
RandomInfo
*random_info;
/*
Seed clusters from random values.
*/
random_info=AcquireRandomInfo();
for ( ; n < (ssize_t) image->colors; n++)
{
(void) QueryColorCompliance("#000",AllCompliance,image->colormap+n,
exception);
image->colormap[n].red=RandomColorComponent(random_info);
image->colormap[n].green=RandomColorComponent(random_info);
image->colormap[n].blue=RandomColorComponent(random_info);
if (image->alpha_trait != UndefinedPixelTrait)
image->colormap[n].alpha=RandomColorComponent(random_info);
if (image->colorspace == CMYKColorspace)
image->colormap[n].black=RandomColorComponent(random_info);
}
random_info=DestroyRandomInfo(random_info);
}
}
/*
Iterative refinement.
*/
kmeans_pixels=AcquireKmeansTLS(number_colors);
if (kmeans_pixels == (KmeansInfo **) NULL)
ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed",
image->filename);
previous_tolerance=0.0;
verbose=IsStringTrue(GetImageArtifact(image,"verbose"));
number_threads=(size_t) GetMagickResourceLimit(ThreadResource);
image_view=AcquireAuthenticCacheView(image,exception);
for (n=0; n < (ssize_t) max_iterations; n++)
{
double
distortion;
ssize_t
j,
y;
for (j=0; j < (ssize_t) number_threads; j++)
(void) memset(kmeans_pixels[j],0,image->colors*sizeof(*kmeans_pixels[j]));
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(dynamic) shared(status) \
magick_number_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
const int
id = GetOpenMPThreadId();
Quantum
*magick_restrict q;
ssize_t
x;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception);
if (q == (Quantum *) NULL)
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) image->columns; x++)
{
double
min_distance;
ssize_t
i,
k;
/*
Assign each pixel whose mean has the least squared color distance.
*/
k=0;
min_distance=KmeansMetric(image,q,image->colormap+0);
for (i=1; i < (ssize_t) image->colors; i++)
{
double
distance;
if (min_distance <= MagickEpsilon)
break;
distance=KmeansMetric(image,q,image->colormap+i);
if (distance < min_distance)
{
min_distance=distance;
k=i;
}
}
kmeans_pixels[id][k].red+=QuantumScale*GetPixelRed(image,q);
kmeans_pixels[id][k].green+=QuantumScale*GetPixelGreen(image,q);
kmeans_pixels[id][k].blue+=QuantumScale*GetPixelBlue(image,q);
if (image->alpha_trait != UndefinedPixelTrait)
kmeans_pixels[id][k].alpha+=QuantumScale*GetPixelAlpha(image,q);
if (image->colorspace == CMYKColorspace)
kmeans_pixels[id][k].black+=QuantumScale*GetPixelBlack(image,q);
kmeans_pixels[id][k].count++;
kmeans_pixels[id][k].distortion+=min_distance;
SetPixelIndex(image,(Quantum) k,q);
q+=GetPixelChannels(image);
}
if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse)
status=MagickFalse;
}
if (status == MagickFalse)
break;
/*
Reduce sums to [0] entry.
*/
for (j=1; j < (ssize_t) number_threads; j++)
{
ssize_t
k;
for (k=0; k < (ssize_t) image->colors; k++)
{
kmeans_pixels[0][k].red+=kmeans_pixels[j][k].red;
kmeans_pixels[0][k].green+=kmeans_pixels[j][k].green;
kmeans_pixels[0][k].blue+=kmeans_pixels[j][k].blue;
if (image->alpha_trait != UndefinedPixelTrait)
kmeans_pixels[0][k].alpha+=kmeans_pixels[j][k].alpha;
if (image->colorspace == CMYKColorspace)
kmeans_pixels[0][k].black+=kmeans_pixels[j][k].black;
kmeans_pixels[0][k].count+=kmeans_pixels[j][k].count;
kmeans_pixels[0][k].distortion+=kmeans_pixels[j][k].distortion;
}
}
/*
Calculate the new means (centroids) of the pixels in the new clusters.
*/
distortion=0.0;
for (j=0; j < (ssize_t) image->colors; j++)
{
double
gamma;
gamma=PerceptibleReciprocal((double) kmeans_pixels[0][j].count);
image->colormap[j].red=gamma*QuantumRange*kmeans_pixels[0][j].red;
image->colormap[j].green=gamma*QuantumRange*kmeans_pixels[0][j].green;
image->colormap[j].blue=gamma*QuantumRange*kmeans_pixels[0][j].blue;
if (image->alpha_trait != UndefinedPixelTrait)
image->colormap[j].alpha=gamma*QuantumRange*kmeans_pixels[0][j].alpha;
if (image->colorspace == CMYKColorspace)
image->colormap[j].black=gamma*QuantumRange*kmeans_pixels[0][j].black;
image->colormap[j].count=(MagickSizeType) kmeans_pixels[0][j].count;
distortion+=kmeans_pixels[0][j].distortion;
}
if (image->debug != MagickFalse)
(void) LogMagickEvent(ImageEvent,GetMagickModule(),
"distortion[%.20g]: %*g %*g\n",(double) n,GetMagickPrecision(),
distortion,GetMagickPrecision(),fabs(distortion-previous_tolerance));
if (fabs(distortion-previous_tolerance) <= tolerance)
break;
previous_tolerance=distortion;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
proceed=SetImageProgress(image,KmeansImageTag,(MagickOffsetType) n,
max_iterations);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
image_view=DestroyCacheView(image_view);
if (verbose != MagickFalse)
for (n=0; n < (ssize_t) image->colors; n++)
{
GetColorTuple(image->colormap+n,MagickTrue,tuple);
(void) FormatLocaleFile(stderr,"%s %.20g\n",tuple,(double)
image->colormap[n].count);
}
dominant_image=CloneImage(image,0,0,MagickTrue,exception);
if (dominant_image != (Image *) NULL)
{
/*
Note dominant color.
*/
qsort((void *) dominant_image->colormap,dominant_image->colors,
sizeof(*dominant_image->colormap),DominantColorCompare);
GetColorTuple(dominant_image->colormap,MagickTrue,tuple);
dominant_image=DestroyImage(dominant_image);
(void) SetImageProperty(image,"dominant-color",tuple,exception);
}
kmeans_pixels=DestroyKmeansTLS(kmeans_pixels);
if (image->progress_monitor != (MagickProgressMonitor) NULL)
(void) SetImageProgress(image,KmeansImageTag,(MagickOffsetType)
max_iterations-1,max_iterations);
if (status == MagickFalse)
return(status);
return(SyncImage(image,exception));
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% P o s t e r i z e I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% PosterizeImage() reduces the image to a limited number of colors for a
% "poster" effect.
%
% The format of the PosterizeImage method is:
%
% MagickBooleanType PosterizeImage(Image *image,const size_t levels,
% const DitherMethod dither_method,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: Specifies a pointer to an Image structure.
%
% o levels: Number of color levels allowed in each channel. Very low values
% (2, 3, or 4) have the most visible effect.
%
% o dither_method: choose from UndefinedDitherMethod, NoDitherMethod,
% RiemersmaDitherMethod, FloydSteinbergDitherMethod.
%
% o exception: return any errors or warnings in this structure.
%
*/
static inline double MagickRound(double x)
{
/*
Round the fraction to nearest integer.
*/
if ((x-floor(x)) < (ceil(x)-x))
return(floor(x));
return(ceil(x));
}
MagickExport MagickBooleanType PosterizeImage(Image *image,const size_t levels,
const DitherMethod dither_method,ExceptionInfo *exception)
{
#define PosterizeImageTag "Posterize/Image"
#define PosterizePixel(pixel) ClampToQuantum((MagickRealType) QuantumRange*( \
MagickRound(QuantumScale*pixel*(levels-1)))/MagickMax((ssize_t) levels-1,1))
CacheView
*image_view;
MagickBooleanType
status;
MagickOffsetType
progress;
QuantizeInfo
*quantize_info;
ssize_t
i;
ssize_t
y;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
if (IsEventLogging() != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
if (image->storage_class == PseudoClass)
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(progress,status) \
magick_number_threads(image,image,image->colors,1)
#endif
for (i=0; i < (ssize_t) image->colors; i++)
{
/*
Posterize colormap.
*/
if ((GetPixelRedTraits(image) & UpdatePixelTrait) != 0)
image->colormap[i].red=(double)
PosterizePixel(image->colormap[i].red);
if ((GetPixelGreenTraits(image) & UpdatePixelTrait) != 0)
image->colormap[i].green=(double)
PosterizePixel(image->colormap[i].green);
if ((GetPixelBlueTraits(image) & UpdatePixelTrait) != 0)
image->colormap[i].blue=(double)
PosterizePixel(image->colormap[i].blue);
if ((GetPixelAlphaTraits(image) & UpdatePixelTrait) != 0)
image->colormap[i].alpha=(double)
PosterizePixel(image->colormap[i].alpha);
}
/*
Posterize image.
*/
status=MagickTrue;
progress=0;
image_view=AcquireAuthenticCacheView(image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(progress,status) \
magick_number_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
Quantum
*magick_restrict q;
ssize_t
x;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception);
if (q == (Quantum *) NULL)
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) image->columns; x++)
{
if ((GetPixelRedTraits(image) & UpdatePixelTrait) != 0)
SetPixelRed(image,PosterizePixel(GetPixelRed(image,q)),q);
if ((GetPixelGreenTraits(image) & UpdatePixelTrait) != 0)
SetPixelGreen(image,PosterizePixel(GetPixelGreen(image,q)),q);
if ((GetPixelBlueTraits(image) & UpdatePixelTrait) != 0)
SetPixelBlue(image,PosterizePixel(GetPixelBlue(image,q)),q);
if (((GetPixelBlackTraits(image) & UpdatePixelTrait) != 0) &&
(image->colorspace == CMYKColorspace))
SetPixelBlack(image,PosterizePixel(GetPixelBlack(image,q)),q);
if (((GetPixelAlphaTraits(image) & UpdatePixelTrait) != 0) &&
(image->alpha_trait != UndefinedPixelTrait))
SetPixelAlpha(image,PosterizePixel(GetPixelAlpha(image,q)),q);
q+=GetPixelChannels(image);
}
if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp atomic
#endif
progress++;
proceed=SetImageProgress(image,PosterizeImageTag,progress,image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
image_view=DestroyCacheView(image_view);
quantize_info=AcquireQuantizeInfo((ImageInfo *) NULL);
quantize_info->number_colors=(size_t) MagickMin((ssize_t) levels*levels*
levels,MaxColormapSize+1);
quantize_info->dither_method=dither_method;
quantize_info->tree_depth=MaxTreeDepth;
status=QuantizeImage(quantize_info,image,exception);
quantize_info=DestroyQuantizeInfo(quantize_info);
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ P r u n e C h i l d %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% PruneChild() deletes the given node and merges its statistics into its
% parent.
%
% The format of the PruneSubtree method is:
%
% PruneChild(CubeInfo *cube_info,const NodeInfo *node_info)
%
% A description of each parameter follows.
%
% o cube_info: A pointer to the Cube structure.
%
% o node_info: pointer to node in color cube tree that is to be pruned.
%
*/
static void PruneChild(CubeInfo *cube_info,const NodeInfo *node_info)
{
NodeInfo
*parent;
size_t
number_children;
ssize_t
i;
/*
Traverse any children.
*/
number_children=cube_info->associate_alpha == MagickFalse ? 8UL : 16UL;
for (i=0; i < (ssize_t) number_children; i++)
if (node_info->child[i] != (NodeInfo *) NULL)
PruneChild(cube_info,node_info->child[i]);
if (cube_info->nodes > cube_info->maximum_colors)
{
/*
Merge color statistics into parent.
*/
parent=node_info->parent;
parent->number_unique+=node_info->number_unique;
parent->total_color.red+=node_info->total_color.red;
parent->total_color.green+=node_info->total_color.green;
parent->total_color.blue+=node_info->total_color.blue;
parent->total_color.alpha+=node_info->total_color.alpha;
parent->child[node_info->id]=(NodeInfo *) NULL;
cube_info->nodes--;
}
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ P r u n e L e v e l %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% PruneLevel() deletes all nodes at the bottom level of the color tree merging
% their color statistics into their parent node.
%
% The format of the PruneLevel method is:
%
% PruneLevel(CubeInfo *cube_info,const NodeInfo *node_info)
%
% A description of each parameter follows.
%
% o cube_info: A pointer to the Cube structure.
%
% o node_info: pointer to node in color cube tree that is to be pruned.
%
*/
static void PruneLevel(CubeInfo *cube_info,const NodeInfo *node_info)
{
size_t
number_children;
ssize_t
i;
/*
Traverse any children.
*/
number_children=cube_info->associate_alpha == MagickFalse ? 8UL : 16UL;
for (i=0; i < (ssize_t) number_children; i++)
if (node_info->child[i] != (NodeInfo *) NULL)
PruneLevel(cube_info,node_info->child[i]);
if (node_info->level == cube_info->depth)
PruneChild(cube_info,node_info);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ P r u n e T o C u b e D e p t h %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% PruneToCubeDepth() deletes any nodes at a depth greater than
% cube_info->depth while merging their color statistics into their parent
% node.
%
% The format of the PruneToCubeDepth method is:
%
% PruneToCubeDepth(CubeInfo *cube_info,const NodeInfo *node_info)
%
% A description of each parameter follows.
%
% o cube_info: A pointer to the Cube structure.
%
% o node_info: pointer to node in color cube tree that is to be pruned.
%
*/
static void PruneToCubeDepth(CubeInfo *cube_info,const NodeInfo *node_info)
{
size_t
number_children;
ssize_t
i;
/*
Traverse any children.
*/
number_children=cube_info->associate_alpha == MagickFalse ? 8UL : 16UL;
for (i=0; i < (ssize_t) number_children; i++)
if (node_info->child[i] != (NodeInfo *) NULL)
PruneToCubeDepth(cube_info,node_info->child[i]);
if (node_info->level > cube_info->depth)
PruneChild(cube_info,node_info);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% Q u a n t i z e I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% QuantizeImage() analyzes the colors within a reference image and chooses a
% fixed number of colors to represent the image. The goal of the algorithm
% is to minimize the color difference between the input and output image while
% minimizing the processing time.
%
% The format of the QuantizeImage method is:
%
% MagickBooleanType QuantizeImage(const QuantizeInfo *quantize_info,
% Image *image,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o quantize_info: Specifies a pointer to an QuantizeInfo structure.
%
% o image: the image.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport MagickBooleanType QuantizeImage(const QuantizeInfo *quantize_info,
Image *image,ExceptionInfo *exception)
{
CubeInfo
*cube_info;
ImageType
type;
MagickBooleanType
status;
size_t
depth,
maximum_colors;
assert(quantize_info != (const QuantizeInfo *) NULL);
assert(quantize_info->signature == MagickCoreSignature);
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
if (IsEventLogging() != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
maximum_colors=quantize_info->number_colors;
if (maximum_colors == 0)
maximum_colors=MaxColormapSize;
if (maximum_colors > MaxColormapSize)
maximum_colors=MaxColormapSize;
type=IdentifyImageGray(image,exception);
if (IsGrayImageType(type) != MagickFalse)
(void) SetGrayscaleImage(image,exception);
depth=quantize_info->tree_depth;
if (depth == 0)
{
size_t
colors;
/*
Depth of color tree is: Log4(colormap size)+2.
*/
colors=maximum_colors;
for (depth=1; colors != 0; depth++)
colors>>=2;
if ((quantize_info->dither_method != NoDitherMethod) && (depth > 2))
depth--;
if ((image->alpha_trait != UndefinedPixelTrait) && (depth > 5))
depth--;
if (IsGrayImageType(type) != MagickFalse)
depth=MaxTreeDepth;
}
/*
Initialize color cube.
*/
cube_info=GetCubeInfo(quantize_info,depth,maximum_colors);
if (cube_info == (CubeInfo *) NULL)
ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed",
image->filename);
status=ClassifyImageColors(cube_info,image,exception);
if (status != MagickFalse)
{
/*
Reduce the number of colors in the image.
*/
if (cube_info->colors > cube_info->maximum_colors)
ReduceImageColors(image,cube_info);
status=AssignImageColors(image,cube_info,exception);
}
DestroyCubeInfo(cube_info);
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% Q u a n t i z e I m a g e s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% QuantizeImages() analyzes the colors within a set of reference images and
% chooses a fixed number of colors to represent the set. The goal of the
% algorithm is to minimize the color difference between the input and output
% images while minimizing the processing time.
%
% The format of the QuantizeImages method is:
%
% MagickBooleanType QuantizeImages(const QuantizeInfo *quantize_info,
% Image *images,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o quantize_info: Specifies a pointer to an QuantizeInfo structure.
%
% o images: Specifies a pointer to a list of Image structures.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport MagickBooleanType QuantizeImages(const QuantizeInfo *quantize_info,
Image *images,ExceptionInfo *exception)
{
CubeInfo
*cube_info;
Image
*image;
MagickBooleanType
proceed,
status;
MagickProgressMonitor
progress_monitor;
size_t
depth,
maximum_colors,
number_images;
ssize_t
i;
assert(quantize_info != (const QuantizeInfo *) NULL);
assert(quantize_info->signature == MagickCoreSignature);
assert(images != (Image *) NULL);
assert(images->signature == MagickCoreSignature);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
if (IsEventLogging() != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",images->filename);
if (GetNextImageInList(images) == (Image *) NULL)
{
/*
Handle a single image with QuantizeImage.
*/
status=QuantizeImage(quantize_info,images,exception);
return(status);
}
status=MagickFalse;
maximum_colors=quantize_info->number_colors;
if (maximum_colors == 0)
maximum_colors=MaxColormapSize;
if (maximum_colors > MaxColormapSize)
maximum_colors=MaxColormapSize;
depth=quantize_info->tree_depth;
if (depth == 0)
{
size_t
colors;
/*
Depth of color tree is: Log4(colormap size)+2.
*/
colors=maximum_colors;
for (depth=1; colors != 0; depth++)
colors>>=2;
if (quantize_info->dither_method != NoDitherMethod)
depth--;
}
/*
Initialize color cube.
*/
cube_info=GetCubeInfo(quantize_info,depth,maximum_colors);
if (cube_info == (CubeInfo *) NULL)
{
(void) ThrowMagickException(exception,GetMagickModule(),
ResourceLimitError,"MemoryAllocationFailed","`%s'",images->filename);
return(MagickFalse);
}
number_images=GetImageListLength(images);
image=images;
for (i=0; image != (Image *) NULL; i++)
{
progress_monitor=SetImageProgressMonitor(image,(MagickProgressMonitor) NULL,
image->client_data);
status=ClassifyImageColors(cube_info,image,exception);
if (status == MagickFalse)
break;
(void) SetImageProgressMonitor(image,progress_monitor,image->client_data);
proceed=SetImageProgress(image,AssignImageTag,(MagickOffsetType) i,
number_images);
if (proceed == MagickFalse)
break;
image=GetNextImageInList(image);
}
if (status != MagickFalse)
{
/*
Reduce the number of colors in an image sequence.
*/
ReduceImageColors(images,cube_info);
image=images;
for (i=0; image != (Image *) NULL; i++)
{
progress_monitor=SetImageProgressMonitor(image,(MagickProgressMonitor)
NULL,image->client_data);
status=AssignImageColors(image,cube_info,exception);
if (status == MagickFalse)
break;
(void) SetImageProgressMonitor(image,progress_monitor,
image->client_data);
proceed=SetImageProgress(image,AssignImageTag,(MagickOffsetType) i,
number_images);
if (proceed == MagickFalse)
break;
image=GetNextImageInList(image);
}
}
DestroyCubeInfo(cube_info);
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ Q u a n t i z e E r r o r F l a t t e n %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% QuantizeErrorFlatten() traverses the color cube and flattens the quantization
% error into a sorted 1D array. This accelerates the color reduction process.
%
% Contributed by Yoya.
%
% The format of the QuantizeErrorFlatten method is:
%
% size_t QuantizeErrorFlatten(const CubeInfo *cube_info,
% const NodeInfo *node_info,const ssize_t offset,
% double *quantize_error)
%
% A description of each parameter follows.
%
% o cube_info: A pointer to the Cube structure.
%
% o node_info: pointer to node in color cube tree that is current pointer.
%
% o offset: quantize error offset.
%
% o quantize_error: the quantization error vector.
%
*/
static size_t QuantizeErrorFlatten(const CubeInfo *cube_info,
const NodeInfo *node_info,const ssize_t offset,double *quantize_error)
{
size_t
n,
number_children;
ssize_t
i;
if (offset >= (ssize_t) cube_info->nodes)
return(0);
quantize_error[offset]=node_info->quantize_error;
n=1;
number_children=cube_info->associate_alpha == MagickFalse ? 8UL : 16UL;
for (i=0; i < (ssize_t) number_children ; i++)
if (node_info->child[i] != (NodeInfo *) NULL)
n+=QuantizeErrorFlatten(cube_info,node_info->child[i],offset+n,
quantize_error);
return(n);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ R e d u c e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% Reduce() traverses the color cube tree and prunes any node whose
% quantization error falls below a particular threshold.
%
% The format of the Reduce method is:
%
% Reduce(CubeInfo *cube_info,const NodeInfo *node_info)
%
% A description of each parameter follows.
%
% o cube_info: A pointer to the Cube structure.
%
% o node_info: pointer to node in color cube tree that is to be pruned.
%
*/
static void Reduce(CubeInfo *cube_info,const NodeInfo *node_info)
{
size_t
number_children;
ssize_t
i;
/*
Traverse any children.
*/
number_children=cube_info->associate_alpha == MagickFalse ? 8UL : 16UL;
for (i=0; i < (ssize_t) number_children; i++)
if (node_info->child[i] != (NodeInfo *) NULL)
Reduce(cube_info,node_info->child[i]);
if (node_info->quantize_error <= cube_info->pruning_threshold)
PruneChild(cube_info,node_info);
else
{
/*
Find minimum pruning threshold.
*/
if (node_info->number_unique > 0)
cube_info->colors++;
if (node_info->quantize_error < cube_info->next_threshold)
cube_info->next_threshold=node_info->quantize_error;
}
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ R e d u c e I m a g e C o l o r s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ReduceImageColors() repeatedly prunes the tree until the number of nodes
% with n2 > 0 is less than or equal to the maximum number of colors allowed
% in the output image. On any given iteration over the tree, it selects
% those nodes whose E value is minimal for pruning and merges their
% color statistics upward. It uses a pruning threshold, Ep, to govern
% node selection as follows:
%
% Ep = 0
% while number of nodes with (n2 > 0) > required maximum number of colors
% prune all nodes such that E <= Ep
% Set Ep to minimum E in remaining nodes
%
% This has the effect of minimizing any quantization error when merging
% two nodes together.
%
% When a node to be pruned has offspring, the pruning procedure invokes
% itself recursively in order to prune the tree from the leaves upward.
% n2, Sr, Sg, and Sb in a node being pruned are always added to the
% corresponding data in that node's parent. This retains the pruned
% node's color characteristics for later averaging.
%
% For each node, n2 pixels exist for which that node represents the
% smallest volume in RGB space containing those pixel's colors. When n2
% > 0 the node will uniquely define a color in the output image. At the
% beginning of reduction, n2 = 0 for all nodes except a the leaves of
% the tree which represent colors present in the input image.
%
% The other pixel count, n1, indicates the total number of colors
% within the cubic volume which the node represents. This includes n1 -
% n2 pixels whose colors should be defined by nodes at a lower level in
% the tree.
%
% The format of the ReduceImageColors method is:
%
% ReduceImageColors(const Image *image,CubeInfo *cube_info)
%
% A description of each parameter follows.
%
% o image: the image.
%
% o cube_info: A pointer to the Cube structure.
%
*/
static int QuantizeErrorCompare(const void *error_p,const void *error_q)
{
double
*p,
*q;
p=(double *) error_p;
q=(double *) error_q;
if (*p > *q)
return(1);
if (fabs(*q-*p) <= MagickEpsilon)
return(0);
return(-1);
}
static void ReduceImageColors(const Image *image,CubeInfo *cube_info)
{
#define ReduceImageTag "Reduce/Image"
MagickBooleanType
proceed;
MagickOffsetType
offset;
size_t
span;
cube_info->next_threshold=0.0;
if (cube_info->colors > cube_info->maximum_colors)
{
double
*quantize_error;
/*
Enable rapid reduction of the number of unique colors.
*/
quantize_error=(double *) AcquireQuantumMemory(cube_info->nodes,
sizeof(*quantize_error));
if (quantize_error != (double *) NULL)
{
(void) QuantizeErrorFlatten(cube_info,cube_info->root,0,
quantize_error);
qsort(quantize_error,cube_info->nodes,sizeof(double),
QuantizeErrorCompare);
if (cube_info->nodes > (110*(cube_info->maximum_colors+1)/100))
cube_info->next_threshold=quantize_error[cube_info->nodes-110*
(cube_info->maximum_colors+1)/100];
quantize_error=(double *) RelinquishMagickMemory(quantize_error);
}
}
for (span=cube_info->colors; cube_info->colors > cube_info->maximum_colors; )
{
cube_info->pruning_threshold=cube_info->next_threshold;
cube_info->next_threshold=cube_info->root->quantize_error-1;
cube_info->colors=0;
Reduce(cube_info,cube_info->root);
offset=(MagickOffsetType) span-cube_info->colors;
proceed=SetImageProgress(image,ReduceImageTag,offset,span-
cube_info->maximum_colors+1);
if (proceed == MagickFalse)
break;
}
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% R e m a p I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% RemapImage() replaces the colors of an image with the closest of the colors
% from the reference image.
%
% The format of the RemapImage method is:
%
% MagickBooleanType RemapImage(const QuantizeInfo *quantize_info,
% Image *image,const Image *remap_image,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o quantize_info: Specifies a pointer to an QuantizeInfo structure.
%
% o image: the image.
%
% o remap_image: the reference image.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport MagickBooleanType RemapImage(const QuantizeInfo *quantize_info,
Image *image,const Image *remap_image,ExceptionInfo *exception)
{
CubeInfo
*cube_info;
MagickBooleanType
status;
/*
Initialize color cube.
*/
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
assert(remap_image != (Image *) NULL);
assert(remap_image->signature == MagickCoreSignature);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
if (IsEventLogging() != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
cube_info=GetCubeInfo(quantize_info,MaxTreeDepth,
quantize_info->number_colors);
if (cube_info == (CubeInfo *) NULL)
ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed",
image->filename);
status=ClassifyImageColors(cube_info,remap_image,exception);
if (status != MagickFalse)
{
/*
Classify image colors from the reference image.
*/
cube_info->quantize_info->number_colors=cube_info->colors;
status=AssignImageColors(image,cube_info,exception);
}
DestroyCubeInfo(cube_info);
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% R e m a p I m a g e s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% RemapImages() replaces the colors of a sequence of images with the
% closest color from a reference image.
%
% The format of the RemapImage method is:
%
% MagickBooleanType RemapImages(const QuantizeInfo *quantize_info,
% Image *images,Image *remap_image,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o quantize_info: Specifies a pointer to an QuantizeInfo structure.
%
% o images: the image sequence.
%
% o remap_image: the reference image.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport MagickBooleanType RemapImages(const QuantizeInfo *quantize_info,
Image *images,const Image *remap_image,ExceptionInfo *exception)
{
CubeInfo
*cube_info;
Image
*image;
MagickBooleanType
status;
assert(images != (Image *) NULL);
assert(images->signature == MagickCoreSignature);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
if (IsEventLogging() != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",images->filename);
image=images;
if (remap_image == (Image *) NULL)
{
/*
Create a global colormap for an image sequence.
*/
status=QuantizeImages(quantize_info,images,exception);
return(status);
}
/*
Classify image colors from the reference image.
*/
cube_info=GetCubeInfo(quantize_info,MaxTreeDepth,
quantize_info->number_colors);
if (cube_info == (CubeInfo *) NULL)
ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed",
image->filename);
status=ClassifyImageColors(cube_info,remap_image,exception);
if (status != MagickFalse)
{
/*
Classify image colors from the reference image.
*/
cube_info->quantize_info->number_colors=cube_info->colors;
image=images;
for ( ; image != (Image *) NULL; image=GetNextImageInList(image))
{
status=AssignImageColors(image,cube_info,exception);
if (status == MagickFalse)
break;
}
}
DestroyCubeInfo(cube_info);
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% S e t G r a y s c a l e I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% SetGrayscaleImage() converts an image to a PseudoClass grayscale image.
%
% The format of the SetGrayscaleImage method is:
%
% MagickBooleanType SetGrayscaleImage(Image *image,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: The image.
%
% o exception: return any errors or warnings in this structure.
%
*/
#if defined(__cplusplus) || defined(c_plusplus)
extern "C" {
#endif
static int IntensityCompare(const void *x,const void *y)
{
double
intensity;
PixelInfo
*color_1,
*color_2;
color_1=(PixelInfo *) x;
color_2=(PixelInfo *) y;
intensity=GetPixelInfoIntensity((const Image *) NULL,color_1)-
GetPixelInfoIntensity((const Image *) NULL,color_2);
if (intensity < (double) INT_MIN)
intensity=(double) INT_MIN;
if (intensity > (double) INT_MAX)
intensity=(double) INT_MAX;
return((int) intensity);
}
#if defined(__cplusplus) || defined(c_plusplus)
}
#endif
static MagickBooleanType SetGrayscaleImage(Image *image,
ExceptionInfo *exception)
{
CacheView
*image_view;
MagickBooleanType
status;
PixelInfo
*colormap;
size_t
extent;
ssize_t
*colormap_index,
i,
j,
y;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->type != GrayscaleType)
(void) TransformImageColorspace(image,GRAYColorspace,exception);
extent=MagickMax(image->colors+1,MagickMax(MaxColormapSize,MaxMap+1));
colormap_index=(ssize_t *) AcquireQuantumMemory(extent,
sizeof(*colormap_index));
if (colormap_index == (ssize_t *) NULL)
ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed",
image->filename);
if (image->storage_class != PseudoClass)
{
(void) memset(colormap_index,(-1),extent*sizeof(*colormap_index));
if (AcquireImageColormap(image,MaxColormapSize,exception) == MagickFalse)
{
colormap_index=(ssize_t *) RelinquishMagickMemory(colormap_index);
ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed",
image->filename);
}
image->colors=0;
status=MagickTrue;
image_view=AcquireAuthenticCacheView(image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(status) \
magick_number_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
Quantum
*magick_restrict q;
ssize_t
x;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,
exception);
if (q == (Quantum *) NULL)
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) image->columns; x++)
{
size_t
intensity;
intensity=ScaleQuantumToMap(GetPixelRed(image,q));
if (colormap_index[intensity] < 0)
{
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp critical (MagickCore_SetGrayscaleImage)
#endif
if (colormap_index[intensity] < 0)
{
colormap_index[intensity]=(ssize_t) image->colors;
image->colormap[image->colors].red=(double)
GetPixelRed(image,q);
image->colormap[image->colors].green=(double)
GetPixelGreen(image,q);
image->colormap[image->colors].blue=(double)
GetPixelBlue(image,q);
image->colors++;
}
}
SetPixelIndex(image,(Quantum) colormap_index[intensity],q);
q+=GetPixelChannels(image);
}
if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse)
status=MagickFalse;
}
image_view=DestroyCacheView(image_view);
}
(void) memset(colormap_index,0,extent*sizeof(*colormap_index));
for (i=0; i < (ssize_t) image->colors; i++)
image->colormap[i].alpha=(double) i;
qsort((void *) image->colormap,image->colors,sizeof(PixelInfo),
IntensityCompare);
colormap=(PixelInfo *) AcquireQuantumMemory(image->colors,sizeof(*colormap));
if (colormap == (PixelInfo *) NULL)
{
colormap_index=(ssize_t *) RelinquishMagickMemory(colormap_index);
ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed",
image->filename);
}
j=0;
colormap[j]=image->colormap[0];
for (i=0; i < (ssize_t) image->colors; i++)
{
if (IsPixelInfoEquivalent(&colormap[j],&image->colormap[i]) == MagickFalse)
{
j++;
colormap[j]=image->colormap[i];
}
colormap_index[(ssize_t) image->colormap[i].alpha]=j;
}
image->colors=(size_t) (j+1);
image->colormap=(PixelInfo *) RelinquishMagickMemory(image->colormap);
image->colormap=colormap;
status=MagickTrue;
image_view=AcquireAuthenticCacheView(image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(status) \
magick_number_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
Quantum
*magick_restrict q;
ssize_t
x;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception);
if (q == (Quantum *) NULL)
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) image->columns; x++)
{
SetPixelIndex(image,(Quantum) colormap_index[ScaleQuantumToMap(
GetPixelIndex(image,q))],q);
q+=GetPixelChannels(image);
}
if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse)
status=MagickFalse;
}
image_view=DestroyCacheView(image_view);
colormap_index=(ssize_t *) RelinquishMagickMemory(colormap_index);
image->type=GrayscaleType;
if (SetImageMonochrome(image,exception) != MagickFalse)
image->type=BilevelType;
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ S e t I m a g e C o l o r m a p %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% SetImageColormap() traverses the color cube tree and sets the colormap of
% the image. A colormap entry is any node in the color cube tree where the
% of unique colors is not zero.
%
% The format of the SetImageColormap method is:
%
% MagickBooleanType SetImageColormap(Image *image,CubeInfo *cube_info,
% ExceptionInfo *node_info)
%
% A description of each parameter follows.
%
% o image: the image.
%
% o cube_info: A pointer to the Cube structure.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickBooleanType SetImageColormap(Image *image,CubeInfo *cube_info,
ExceptionInfo *exception)
{
size_t
number_colors;
number_colors=MagickMax(cube_info->maximum_colors,cube_info->colors);
if (AcquireImageColormap(image,number_colors,exception) == MagickFalse)
ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed",
image->filename);
image->colors=0;
DefineImageColormap(image,cube_info,cube_info->root);
if (image->colors != number_colors)
{
image->colormap=(PixelInfo *) ResizeQuantumMemory(image->colormap,
image->colors+1,sizeof(*image->colormap));
if (image->colormap == (PixelInfo *) NULL)
ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed",
image->filename);
}
return(MagickTrue);
}
|
stagger.c | #include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <math.h>
#include "grb2.h"
#include "wgrib2.h"
#include "fnlist.h"
/*
* stagger.c Public domain 1/2014 Wesley Ebisuzaki
*
*
* usually x[0] = y[0] = 0.0
*
* usually x[] and y[] are integers (except when grids are shiffed +/- 1/2)
*
* normally:
* x = x[]*dx + x_0, note: for gctpc x_0 is not needed
* y = y[]*dy + y_0, note: for gctpc y_0 is not needed
* x_0 and y_0 are the x and y of the first grid point (raw order)
*
* version: proposal 5
*/
extern double *lat, *lon;
extern enum output_order_type output_order;
extern int scan;
/*
* stagger fills x[] and y[], lo1 and la1 have X==0 and Y==0
*
* assumed_npnts is number if grid points that the calling program thinks is right
* this is for error checking. use -1 if don't know
*
* to a grid transform:
* setup grid transform (proj4 library for example(
* call stagger() to get the x() and y() values of the grid
* transform x() and y() to lon() and lat()
*
* like many programs, stagger requires grid to be on we:sn order
*/
int stagger(unsigned char **sec, unsigned int assumed_npnts, double *x, double *y) {
int nx, ny, res, scan;
unsigned int npnts;
int nnx, nx_even, nx_odd, nx2;
double x0, y0, dx_offset, dx_offset_even, dx_offset_odd, dy_offset;
unsigned int i, ix, iy, n;
int reduced_grid, dx_off_odd, dx_off_even, dy_off;
int dx, dy, even;
get_nxny(sec, &nx, &ny, &npnts, &res, &scan);
if (scan == -1) return 1;
if (output_order != wesn) return 1;
if (nx < 1 || ny < 1) return 1;
/* get stagger bits */
dx_off_odd = ((unsigned int) scan >> 3) & 1;
dx_off_even = ((unsigned int) scan >> 2) & 1;
dy_off = ((unsigned int) scan >> 1) & 1;
reduced_grid = (unsigned int) scan & 1;
dx = (scan & 128) ? -1 : 1;
dy = (scan & 64) ? 1 : -1;
if (reduced_grid && dy_off) ny--;
if (dy < 0 && ((ny % 2) == 0)) { // swap even and odd rows if ns to sn and even number of rows
i = dx_off_odd;
dx_off_odd = dx_off_even;
dx_off_even = i;
}
dx_offset_odd = reduced_grid ? 0.5 * dx_off_odd : 0.5 * dx_off_odd * dx;
dx_offset_even = reduced_grid ? 0.5 * dx_off_even : 0.5 * dx_off_even * dx;
dy_offset = reduced_grid ? 0.5 * dy_off : 0.5 * dy_off * dy;
nx_odd = nx - (dx_off_odd & reduced_grid);
nx_even = nx - (dx_off_even & reduced_grid);
nx2 = nx_odd + nx_even;
//fprintf(stderr, "stagger: dx_off_odd %lf dx_off_even %lf dy_off %lf reduced_grid %d nx=%d %d\n",
// dx_offset_odd, dx_offset_even, dy_offset, reduced_grid, nx_odd,nx_even);
//fprintf(stderr,"dx_off_odd %d reduced_grid %d, and %d\n", dx_off_odd , reduced_grid, dx_off_odd & reduced_grid);
//fprintf(stderr,"dx_off_even %d reduced_grid %d, and %d\n", dx_off_even , reduced_grid, dx_off_even & reduced_grid);
// number of grid points
n = (ny/2)*nx_even + ((ny+1)/2)*nx_odd;
// check to number of points
if (assumed_npnts != n)
fatal_error_ii("stagger: program error think npnts=%d assumed npnts=%d",n, (int) assumed_npnts);
if (n != GB2_Sec3_npts(sec))
fatal_error_ii("stagger: program error think npnts=%d, Sec3 gives %d",n, GB2_Sec3_npts(sec));
if (x == NULL || y == NULL) return 1;
/* return X[] and Y[] relative to the first grid point but on a we:sn grid */
x0 = (dx > 0) ? 0.0 : 1.0 - (double) nx;
y0 = (dy > 0) ? 0.0 : 1.0 - (double) ny;
#pragma omp parallel for private(ix,iy,even,i,dx_offset, nnx)
for (iy = 0; iy < ny; iy++) {
// even = iy % 2; // first row is odd .. iy % 2 == 0
even = (iy & 1); // first row is odd
i = even ? nx2*(iy >> 1) + nx_odd : nx2*(iy >> 1);
nnx = even ? nx_even : nx_odd;
dx_offset = even ? dx_offset_even : dx_offset_odd;
for (ix = 0; ix < nnx; ix++) {
x[i + ix] = x0 + dx_offset + ix;
y[i + ix] = y0 + dy_offset + iy;
}
}
return 0;
}
|
cpu_stream.h | /*
Copyright 2020 The OneFlow Authors. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
#ifndef ONEFLOW_CORE_EP_CPU_CPU_STREAM_H_
#define ONEFLOW_CORE_EP_CPU_CPU_STREAM_H_
#include "oneflow/core/ep/include/stream.h"
#include "oneflow/core/ep/cpu/cpu_device.h"
#define OF_RUNTIME_SEQ 0u
#define OF_RUNTIME_OMP 1u
#define OF_RUNTIME_TBB 2u
#if OF_CPU_THREADING_RUNTIME == OF_RUNTIME_OMP
#include <omp.h>
#elif OF_CPU_THREADING_RUNTIME == OF_RUNTIME_TBB
#include <tbb/blocked_range.h>
#include <tbb/parallel_for.h>
#include <tbb/global_control.h>
#elif OF_CPU_THREADING_RUNTIME == OF_RUNTIME_SEQ
// Nothing
#else
#error OF_CPU_THREADING_RUNTIME Error setting
#endif
#ifdef WITH_ONEDNN
#include <oneapi/dnnl/dnnl.hpp>
#endif
namespace oneflow {
namespace ep {
class CpuNumThreadsGuard {
public:
OF_DISALLOW_COPY_AND_MOVE(CpuNumThreadsGuard);
explicit CpuNumThreadsGuard(size_t num_threads) : set_num_threads_(num_threads) {
#if OF_CPU_THREADING_RUNTIME == OF_RUNTIME_OMP
saved_num_threads_ = omp_get_max_threads();
omp_set_num_threads(set_num_threads_);
#elif OF_CPU_THREADING_RUNTIME == OF_RUNTIME_TBB
saved_num_threads_ =
tbb::global_control::active_value(tbb::global_control::max_allowed_parallelism);
if (set_num_threads_ != saved_num_threads_) {
tbb::global_control global_thread_limit(tbb::global_control::max_allowed_parallelism,
set_num_threads_);
}
#elif OF_CPU_THREADING_RUNTIME == OF_RUNTIME_SEQ
// Nothing
#else
#error OF_CPU_THREADING_RUNTIME Error setting
#endif
}
~CpuNumThreadsGuard() {
#if OF_CPU_THREADING_RUNTIME == OF_RUNTIME_OMP
omp_set_num_threads(saved_num_threads_);
#elif OF_CPU_THREADING_RUNTIME == OF_RUNTIME_TBB
if (set_num_threads_ != saved_num_threads_) {
tbb::global_control global_thread_limit(tbb::global_control::max_allowed_parallelism,
saved_num_threads_);
}
#elif OF_CPU_THREADING_RUNTIME == OF_RUNTIME_SEQ
// Nothing
#else
#error OF_CPU_THREADING_RUNTIME Error setting
#endif
}
private:
size_t set_num_threads_;
size_t saved_num_threads_;
};
class CpuStream : public Stream {
public:
OF_DISALLOW_COPY_AND_MOVE(CpuStream);
explicit CpuStream(Device* device) : device_(device) {
#ifdef WITH_ONEDNN
onednn_engine_.reset(new dnnl::engine(dnnl::engine::kind::cpu, 0));
onednn_stream_.reset(new dnnl::stream(*onednn_engine_));
#endif
}
~CpuStream() override = default;
DeviceType device_type() const override;
Device* device() const override;
Maybe<void> Sync() override;
void RecordEvent(Event* event) override;
template<typename F>
void ParallelFor(int64_t begin, int64_t end, const F& func) {
ParallelFor(begin, end, func, kParallelForDefaultGrain);
}
template<typename F>
void ParallelFor(int64_t begin, int64_t end, const F& func, size_t grain_size) {
auto DivUp = [](int64_t x, int64_t y) { return (x + y - 1) / y; };
size_t num_threads = dynamic_cast<CpuDevice*>(device())->GetNumThreads();
if (begin >= end) { return; }
#if OF_CPU_THREADING_RUNTIME == OF_RUNTIME_OMP
if (grain_size > 0) { num_threads = std::min(num_threads, DivUp((end - begin), grain_size)); }
#pragma omp parallel num_threads(num_threads)
{
int64_t omp_num_thread = omp_get_num_thread();
int64_t chunk_size = DivUp((end - begin), omp_num_thread);
int64_t omp_tid = omp_get_thread_num();
int64_t thread_begin_index = begin + omp_tid * chunk_size;
int64_t thread_end_index = std::min(end, chunk_size + thread_begin_index);
if (thread_begin_index < end) { func(thread_begin_index, thread_end_index); }
}
#elif OF_CPU_THREADING_RUNTIME == OF_RUNTIME_TBB
CpuNumThreadsGuard guard(num_threads);
size_t tmp_chunk_size = DivUp((end - begin), num_threads);
int64_t chunk_size = std::max(tmp_chunk_size, grain_size);
tbb::parallel_for(
tbb::blocked_range<int64_t>(begin, end, chunk_size),
[func](const tbb::blocked_range<int64_t>& r) { func(r.begin(), r.end()); },
tbb::static_partitioner{});
#elif OF_CPU_THREADING_RUNTIME == OF_RUNTIME_SEQ
func(begin, end);
#else
#error OF_CPU_THREADING_RUNTIME Error setting
#endif
}
#ifdef WITH_ONEDNN
dnnl::engine* onednn_engine() const { return onednn_engine_.get(); }
dnnl::stream* onednn_stream() const { return onednn_stream_.get(); }
#endif
private:
#ifdef WITH_ONEDNN
std::unique_ptr<dnnl::engine> onednn_engine_;
std::unique_ptr<dnnl::stream> onednn_stream_;
#endif
Device* device_;
static constexpr size_t kParallelForDefaultGrain = 32768;
};
} // namespace ep
} // namespace oneflow
#endif // ONEFLOW_CORE_EP_CPU_CPU_STREAM_H_
|
gemm_blis_B3A2C0.c | /**
* This file is part of convGemm
*
* Copyright (C) 2021-22 Universitat Politècnica de València and
* Universitat Jaume I
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <stdio.h>
#include <blis/blis.h>
#include "gemm_blis.h"
/*
* Computes the GEMM C := beta * C + alpha * A * B following the BLIS approach
*/
void gemm_blis_B3A2C0(char orderA, char orderB, char orderC,
char transA, char transB,
int m, int n, int k,
float alpha, const float *A, int ldA,
const float *B, int ldB,
float beta, float *C, int ldC,
float *Ac, pack_func pack_RB,
float *Bc, pack_func pack_CB,
post_func postprocess,
cntx_t *cntx, const conv_p *conv_params) {
// Test the input parameters
#if defined(CHECK)
#include "check_params.h"
#endif
// Quick return if possible
float zero = (float) 0.0, one = (float) 1.0;
if ((m == 0) || (n == 0) || (((alpha == zero) || (k == 0)) && (beta == one)))
return;
// Get Gemm BLIS blocks sizes
int MR_bs, NR_bs, MC_bs, NC_bs, KC_bs;
gemm_blis_blocks_sizes(m, n, k, &MR_bs, &NR_bs, &MC_bs, &NC_bs, &KC_bs);
#include "quick_gemm.h"
for (int jc = 0; jc < n; jc += NC_bs) {
int nc = min(n - jc, NC_bs);
for (int pc = 0; pc < k; pc += KC_bs) {
int kc = min(k - pc, KC_bs);
bool last = (pc + KC_bs) >= k;
pack_CB(orderB, transB, kc, nc, B, ldB, Bc, NR_bs, conv_params, pc, jc);
float betaI = (pc == 0) ? beta : 1.0;
#pragma omp parallel for
for (int ic = 0; ic < m; ic += MC_bs) {
int mc = min(m - ic, MC_bs);
int tid = omp_get_thread_num();
pack_RB(orderA, transA, mc, kc, A, ldA, Ac + tid * MC_bs * KC_bs, MR_bs, conv_params, ic, pc);
// #pragma omp parallel for collapse(2)
for (int jr = 0; jr < nc; jr += NR_bs) {
for (int ir = 0; ir < mc; ir += MR_bs) {
int mr = min(mc - ir, MR_bs);
int nr = min(nc - jr, NR_bs);
float *Cptr = (orderC == 'C') ? &Ccol(ic + ir, jc + jr) : &Crow(ic + ir, jc + jr);
float Clocal[MR_bs * NR_bs];
auxinfo_t aux = {0};
bli_auxinfo_set_next_a(&Ac[tid * MC_bs * KC_bs + (ir + MR_bs) * kc], &aux);
bli_auxinfo_set_next_b(&Bc[(jr + NR_bs) * kc], &aux);
#if BLIS_ABI_VERSION == 3
if (postprocess == NULL && nr == NR_bs && mr == MR_bs) { // don't use buffer
#elif BLIS_ABI_VERSION == 4
if (postprocess == NULL) { // don't use buffer
#else
#pragma message "Specified BLIS_ABI_VERSION not supported!"
#endif
gemm_kernel(
mr, nr,
kc, &alpha, &Ac[tid * MC_bs * KC_bs + ir * kc], &Bc[jr * kc], &betaI, Cptr, 1, ldC,
&aux, cntx);
} else { // use buffer for border elements (BLIS3) or postprocessing
gemm_kernel(mr, nr,
kc, &alpha, &Ac[tid * MC_bs * KC_bs + ir * kc], &Bc[jr * kc], &zero, Clocal, 1, MR_bs,
&aux, cntx);
#if BLIS_ABI_VERSION == 3
if (postprocess == NULL) {
sxpbyM(mr, nr, Clocal, MR_bs, betaI, Cptr, ldC);
} else {
#endif
postprocess(mr, nr, Clocal, MR_bs, betaI, C, ldC, conv_params, ic + ir, jc + jr, last);
#if BLIS_ABI_VERSION == 3
}
#endif
}
}
}
}
}
}
}
|
GB_unaryop__abs_int16_bool.c | //------------------------------------------------------------------------------
// GB_unaryop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2019, All Rights Reserved.
// http://suitesparse.com See GraphBLAS/Doc/License.txt for license.
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_iterator.h"
#include "GB_unaryop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB_unop__abs_int16_bool
// op(A') function: GB_tran__abs_int16_bool
// C type: int16_t
// A type: bool
// cast: int16_t cij = (int16_t) aij
// unaryop: cij = GB_IABS (aij)
#define GB_ATYPE \
bool
#define GB_CTYPE \
int16_t
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
bool aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = GB_IABS (x) ;
// casting
#define GB_CASTING(z, x) \
int16_t z = (int16_t) x ;
// cij = op (cast (aij))
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
GB_GETA (aij, Ax, pA) ; \
/* Cx [pC] = op (cast (aij)) */ \
GB_CASTING (x, aij) ; \
GB_OP (GB_CX (pC), x) ; \
}
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_ABS || GxB_NO_INT16 || GxB_NO_BOOL)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_unop__abs_int16_bool
(
int16_t *restrict Cx,
const bool *restrict Ax,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (int64_t p = 0 ; p < anz ; p++)
{
GB_CAST_OP (p, p) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_tran__abs_int16_bool
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t **Rowcounts,
GBI_single_iterator Iter,
const int64_t *restrict A_slice,
int naslice
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#define GB_PHASE_2_OF_2
#include "GB_unaryop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
3d7pt.lbpar.c | #include <omp.h>
#include <math.h>
#define ceild(n,d) ceil(((double)(n))/((double)(d)))
#define floord(n,d) floor(((double)(n))/((double)(d)))
#define max(x,y) ((x) > (y)? (x) : (y))
#define min(x,y) ((x) < (y)? (x) : (y))
/*
* Order-1, 3D 7 point stencil
* Adapted from PLUTO and Pochoir test bench
*
* Tareq Malas
*/
#include <stdio.h>
#include <stdlib.h>
#include <sys/time.h>
#ifdef LIKWID_PERFMON
#include <likwid.h>
#endif
#include "print_utils.h"
#define TESTS 2
#define MAX(a,b) ((a) > (b) ? a : b)
#define MIN(a,b) ((a) < (b) ? a : b)
/* Subtract the `struct timeval' values X and Y,
* storing the result in RESULT.
*
* Return 1 if the difference is negative, otherwise 0.
*/
int timeval_subtract(struct timeval *result, struct timeval *x, struct timeval *y)
{
/* Perform the carry for the later subtraction by updating y. */
if (x->tv_usec < y->tv_usec)
{
int nsec = (y->tv_usec - x->tv_usec) / 1000000 + 1;
y->tv_usec -= 1000000 * nsec;
y->tv_sec += nsec;
}
if (x->tv_usec - y->tv_usec > 1000000)
{
int nsec = (x->tv_usec - y->tv_usec) / 1000000;
y->tv_usec += 1000000 * nsec;
y->tv_sec -= nsec;
}
/* Compute the time remaining to wait.
* tv_usec is certainly positive.
*/
result->tv_sec = x->tv_sec - y->tv_sec;
result->tv_usec = x->tv_usec - y->tv_usec;
/* Return 1 if result is negative. */
return x->tv_sec < y->tv_sec;
}
int main(int argc, char *argv[])
{
int t, i, j, k, test;
int Nx, Ny, Nz, Nt;
if (argc > 3) {
Nx = atoi(argv[1])+2;
Ny = atoi(argv[2])+2;
Nz = atoi(argv[3])+2;
}
if (argc > 4)
Nt = atoi(argv[4]);
double ****A = (double ****) malloc(sizeof(double***)*2);
A[0] = (double ***) malloc(sizeof(double**)*Nz);
A[1] = (double ***) malloc(sizeof(double**)*Nz);
for(i=0; i<Nz; i++){
A[0][i] = (double**) malloc(sizeof(double*)*Ny);
A[1][i] = (double**) malloc(sizeof(double*)*Ny);
for(j=0;j<Ny;j++){
A[0][i][j] = (double*) malloc(sizeof(double)*Nx);
A[1][i][j] = (double*) malloc(sizeof(double)*Nx);
}
}
// tile size information, including extra element to decide the list length
int *tile_size = (int*) malloc(sizeof(int));
tile_size[0] = -1;
// The list is modified here before source-to-source transformations
tile_size = (int*) realloc((void *)tile_size, sizeof(int)*5);
tile_size[0] = 24;
tile_size[1] = 24;
tile_size[2] = 24;
tile_size[3] = 32;
tile_size[4] = -1;
// for timekeeping
int ts_return = -1;
struct timeval start, end, result;
double tdiff = 0.0, min_tdiff=1.e100;
const int BASE = 1024;
const double alpha = 0.0876;
const double beta = 0.0765;
// initialize variables
//
srand(42);
for (i = 1; i < Nz; i++) {
for (j = 1; j < Ny; j++) {
for (k = 1; k < Nx; k++) {
A[0][i][j][k] = 1.0 * (rand() % BASE);
}
}
}
#ifdef LIKWID_PERFMON
LIKWID_MARKER_INIT;
#pragma omp parallel
{
LIKWID_MARKER_THREADINIT;
#pragma omp barrier
LIKWID_MARKER_START("calc");
}
#endif
int num_threads = 1;
#if defined(_OPENMP)
num_threads = omp_get_max_threads();
#endif
for(test=0; test<TESTS; test++){
gettimeofday(&start, 0);
// serial execution - Addition: 6 && Multiplication: 2
/* Copyright (C) 1991-2014 Free Software Foundation, Inc.
This file is part of the GNU C Library.
The GNU C Library is free software; you can redistribute it and/or
modify it under the terms of the GNU Lesser General Public
License as published by the Free Software Foundation; either
version 2.1 of the License, or (at your option) any later version.
The GNU C Library is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
Lesser General Public License for more details.
You should have received a copy of the GNU Lesser General Public
License along with the GNU C Library; if not, see
<http://www.gnu.org/licenses/>. */
/* This header is separate from features.h so that the compiler can
include it implicitly at the start of every compilation. It must
not itself include <features.h> or any other header that includes
<features.h> because the implicit include comes before any feature
test macros that may be defined in a source file before it first
explicitly includes a system header. GCC knows the name of this
header in order to preinclude it. */
/* glibc's intent is to support the IEC 559 math functionality, real
and complex. If the GCC (4.9 and later) predefined macros
specifying compiler intent are available, use them to determine
whether the overall intent is to support these features; otherwise,
presume an older compiler has intent to support these features and
define these macros by default. */
/* wchar_t uses ISO/IEC 10646 (2nd ed., published 2011-03-15) /
Unicode 6.0. */
/* We do not support C11 <threads.h>. */
int t1, t2, t3, t4, t5, t6, t7, t8;
int lb, ub, lbp, ubp, lb2, ub2;
register int lbv, ubv;
/* Start of CLooG code */
if ((Nt >= 2) && (Nx >= 3) && (Ny >= 3) && (Nz >= 3)) {
for (t1=-1;t1<=floord(Nt-2,12);t1++) {
lbp=max(ceild(t1,2),ceild(24*t1-Nt+3,24));
ubp=min(floord(Nt+Nz-4,24),floord(12*t1+Nz+9,24));
#pragma omp parallel for private(lbv,ubv,t3,t4,t5,t6,t7,t8)
for (t2=lbp;t2<=ubp;t2++) {
for (t3=max(max(0,ceild(t1-1,2)),ceild(24*t2-Nz-20,24));t3<=min(min(min(floord(Nt+Ny-4,24),floord(12*t1+Ny+21,24)),floord(24*t2+Ny+20,24)),floord(24*t1-24*t2+Nz+Ny+19,24));t3++) {
for (t4=max(max(max(0,ceild(3*t1-7,8)),ceild(24*t2-Nz-28,32)),ceild(24*t3-Ny-28,32));t4<=min(min(min(min(floord(Nt+Nx-4,32),floord(12*t1+Nx+21,32)),floord(24*t2+Nx+20,32)),floord(24*t3+Nx+20,32)),floord(24*t1-24*t2+Nz+Nx+19,32));t4++) {
for (t5=max(max(max(max(max(0,12*t1),24*t1-24*t2+1),24*t2-Nz+2),24*t3-Ny+2),32*t4-Nx+2);t5<=min(min(min(min(min(Nt-2,12*t1+23),24*t2+22),24*t3+22),32*t4+30),24*t1-24*t2+Nz+21);t5++) {
for (t6=max(max(24*t2,t5+1),-24*t1+24*t2+2*t5-23);t6<=min(min(24*t2+23,-24*t1+24*t2+2*t5),t5+Nz-2);t6++) {
for (t7=max(24*t3,t5+1);t7<=min(24*t3+23,t5+Ny-2);t7++) {
lbv=max(32*t4,t5+1);
ubv=min(32*t4+31,t5+Nx-2);
#pragma ivdep
#pragma vector always
for (t8=lbv;t8<=ubv;t8++) {
A[( t5 + 1) % 2][ (-t5+t6)][ (-t5+t7)][ (-t5+t8)] = ((alpha * A[ t5 % 2][ (-t5+t6)][ (-t5+t7)][ (-t5+t8)]) + (beta * (((((A[ t5 % 2][ (-t5+t6) - 1][ (-t5+t7)][ (-t5+t8)] + A[ t5 % 2][ (-t5+t6)][ (-t5+t7) - 1][ (-t5+t8)]) + A[ t5 % 2][ (-t5+t6)][ (-t5+t7)][ (-t5+t8) - 1]) + A[ t5 % 2][ (-t5+t6) + 1][ (-t5+t7)][ (-t5+t8)]) + A[ t5 % 2][ (-t5+t6)][ (-t5+t7) + 1][ (-t5+t8)]) + A[ t5 % 2][ (-t5+t6)][ (-t5+t7)][ (-t5+t8) + 1])));;
}
}
}
}
}
}
}
}
}
/* End of CLooG code */
gettimeofday(&end, 0);
ts_return = timeval_subtract(&result, &end, &start);
tdiff = (double) (result.tv_sec + result.tv_usec * 1.0e-6);
min_tdiff = min(min_tdiff, tdiff);
printf("Rank 0 TEST# %d time: %f\n", test, tdiff);
}
PRINT_RESULTS(1, "constant")
#ifdef LIKWID_PERFMON
#pragma omp parallel
{
LIKWID_MARKER_STOP("calc");
}
LIKWID_MARKER_CLOSE;
#endif
// Free allocated arrays (Causing performance degradation
/* for(i=0; i<Nz; i++){
for(j=0;j<Ny;j++){
free(A[0][i][j]);
free(A[1][i][j]);
}
free(A[0][i]);
free(A[1][i]);
}
free(A[0]);
free(A[1]);
*/
return 0;
}
|
WinogradConv2D.c | #include <stdio.h>
#include <stdlib.h>
#include <time.h>
#include <sys/time.h>
#include <math.h>
#define CL_USE_DEPRECATED_OPENCL_1_2_APIS
#include <CL/cl.h>
#include "../polybenchUtilFuncts.h"
//define the error threshold for the results "not matching"
#define PERCENT_DIFF_ERROR_THRESHOLD 1.05
#define MAX_SOURCE_SIZE (0x100000)
/* Problem size */
#define N 2048 // 若凡:改成了二维相等
// #define NI 8192
// #define NJ 8192
/* Thread block dimensions */
#define DIM_LOCAL_WORK_GROUP_X 32
#define DIM_LOCAL_WORK_GROUP_Y 8
#if defined(cl_khr_fp64) // Khronos extension available?
#pragma OPENCL EXTENSION cl_khr_fp64 : enable
#elif defined(cl_amd_fp64) // AMD extension available?
#pragma OPENCL EXTENSION cl_amd_fp64 : enable
#endif
/* Can switch DATA_TYPE between float and double */
typedef float DATA_TYPE;
char str_temp[1024];
cl_platform_id platform_id;
cl_device_id device_id;
cl_uint num_devices;
cl_uint num_platforms;
cl_int errcode;
cl_context clGPUContext;
cl_kernel clKernel;
cl_command_queue clCommandQue;
cl_program clProgram;
DATA_TYPE* a_mem_obj;
DATA_TYPE* b_mem_obj;
DATA_TYPE* c_mem_obj;
FILE *fp;
char *source_str;
size_t source_size;
int cpu_offset;
void WinogradConv2D_2x2_omp(DATA_TYPE *input, DATA_TYPE *output, DATA_TYPE *transformed_filter, size_t *cpu_global_size);
void read_cl_file() // 若凡:修改了文件名
{
// Load the kernel source code into the array source_str
fp = fopen("WinogradConv2D_2x2.cl", "r");
if (!fp) {
fprintf(stdout, "Failed to load kernel.\n");
exit(1);
}
source_str = (char*)malloc(MAX_SOURCE_SIZE);
source_size = fread( source_str, 1, MAX_SOURCE_SIZE, fp);
fclose( fp );
}
void init(DATA_TYPE* A)
{
int i, j;
for (i = 0; i < N; ++i)
{
for (j = 0; j < N; ++j)
{
A[i*N + j] = (float)rand()/RAND_MAX;
}
}
}
void cl_initialization()
{
// Get platform and device information
errcode = clGetPlatformIDs(1, &platform_id, &num_platforms);
if(errcode != CL_SUCCESS)
printf("Error getting platform IDs\n");
errcode = clGetDeviceIDs( platform_id, CL_DEVICE_TYPE_GPU, 1, &device_id, &num_devices);
if(errcode != CL_SUCCESS)
printf("Error getting device IDs\n");
// Create an OpenCL context
clGPUContext = clCreateContext( NULL, 1, &device_id, NULL, NULL, &errcode);
if(errcode != CL_SUCCESS) printf("Error in creating context\n");
//Create a command-queue
clCommandQue = clCreateCommandQueue(clGPUContext, device_id, 0, &errcode);
if(errcode != CL_SUCCESS) printf("Error in creating command queue\n");
}
void cl_load_prog()
{
// Create a program from the kernel source
clProgram = clCreateProgramWithSource(clGPUContext, 1, (const char **)&source_str, (const size_t *)&source_size, &errcode);
if(errcode != CL_SUCCESS) printf("Error in creating program\n");
// Build the program
errcode = clBuildProgram(clProgram, 1, &device_id, NULL, NULL, NULL);
if(errcode != CL_SUCCESS) printf("Error in building program\n");
// Create the OpenCL kernel
clKernel = clCreateKernel(clProgram, "WinogradConv2D_2x2_kernel", &errcode);
if(errcode != CL_SUCCESS) printf("Error in creating kernel\n");
clFinish(clCommandQue);
}
void cl_launch_kernel() // 若凡:有一点改动
{
double t_start, t_end;
int in_map_size = N;
int out_map_size = N - 2; // 这里
int tile_n = (out_map_size + 1) / 2;
size_t localWorkSize[2], globalWorkSize[2];
localWorkSize[0] = DIM_LOCAL_WORK_GROUP_X;
localWorkSize[1] = DIM_LOCAL_WORK_GROUP_Y;
globalWorkSize[0] = (size_t)ceil(((float)tile_n) / ((float)DIM_LOCAL_WORK_GROUP_X)) * DIM_LOCAL_WORK_GROUP_X; // 这里
globalWorkSize[1] = (size_t)ceil(((float)tile_n) / ((float)DIM_LOCAL_WORK_GROUP_Y)) * DIM_LOCAL_WORK_GROUP_Y; // 这里
size_t cpu_global_size[2];
cpu_global_size[0] = cpu_offset * (size_t)ceil(((float)tile_n) / ((float)DIM_LOCAL_WORK_GROUP_X)) / 100 * DIM_LOCAL_WORK_GROUP_X; // 这里
cpu_global_size[1] = globalWorkSize[1];
size_t gpu_global_size[2];
gpu_global_size[0] = globalWorkSize[0] - cpu_global_size[0];
gpu_global_size[1] = globalWorkSize[1];
size_t global_offset[2];
global_offset[0] = cpu_global_size[0];
// global_offset[1] = 1;
global_offset[1] = 0; // 这里
bool cpu_run = false, gpu_run = false;
if (cpu_global_size[0] > 0)
{
cpu_run = true;
}
if (gpu_global_size[0] > 0)
{
gpu_run = true;
}
t_start = rtclock();
cl_event kernelEvent1;
if (gpu_run)
{
// Set the arguments of the kernel
// errcode = clSetKernelArg(clKernel, 0, sizeof(cl_mem), (void *)&a_mem_obj);
// errcode |= clSetKernelArg(clKernel, 1, sizeof(cl_mem), (void *)&b_mem_obj);
// errcode |= clSetKernelArg(clKernel, 2, sizeof(cl_mem), (void *)&c_mem_obj); // 这里
errcode = clSetKernelArgSVMPointer(clKernel, 0, (void *)a_mem_obj);
errcode |= clSetKernelArgSVMPointer(clKernel, 1, (void *)b_mem_obj);
errcode |= clSetKernelArgSVMPointer(clKernel, 2, (void *)c_mem_obj); // 这里
errcode |= clSetKernelArg(clKernel, 3, sizeof(int), &in_map_size); // 这里
errcode |= clSetKernelArg(clKernel, 4, sizeof(int), &out_map_size); // 这里
if (errcode != CL_SUCCESS)
printf("Error in seting arguments\n");
errcode = clEnqueueNDRangeKernel(clCommandQue, clKernel, 2, global_offset, gpu_global_size, localWorkSize, 0, NULL,&kernelEvent1);
t_start = rtclock();
if (errcode != CL_SUCCESS)
printf("Error in launching kernel\n");
}
if (cpu_run)
{
WinogradConv2D_2x2_omp(a_mem_obj, b_mem_obj, c_mem_obj, cpu_global_size);
}
if (gpu_run)
{
cl_int err = clWaitForEvents(1, &kernelEvent1);
if (err != CL_SUCCESS)
printf("ERROR in corun\n");
}
t_end = rtclock();
// fprintf(stdout, "Total time: %lf ms\n", 1000.0 * (t_end - t_start));
}
void cl_clean_up()
{
// Clean up
errcode = clFlush(clCommandQue);
errcode = clFinish(clCommandQue);
errcode = clReleaseKernel(clKernel);
errcode = clReleaseProgram(clProgram);
// errcode = clReleaseMemObject(a_mem_obj);
// errcode = clReleaseMemObject(b_mem_obj);
// errcode = clReleaseMemObject(c_mem_obj);
clSVMFree(clGPUContext, a_mem_obj);
clSVMFree(clGPUContext, b_mem_obj);
clSVMFree(clGPUContext, c_mem_obj);
errcode = clReleaseCommandQueue(clCommandQue);
errcode = clReleaseContext(clGPUContext);
if(errcode != CL_SUCCESS) printf("Error in cleanup\n");
}
// F(2x2,3x3)
void WinogradConv2D_2x2_filter_transformation(DATA_TYPE *transformed_filter) {
DATA_TYPE filter[3][3];
filter[0][0] = +0.2; filter[1][0] = +0.5; filter[2][0] = -0.8;
filter[0][1] = -0.3; filter[1][1] = +0.6; filter[2][1] = -0.9;
filter[0][2] = +0.4; filter[1][2] = +0.7; filter[2][2] = +0.10;
// filter transformation
DATA_TYPE tmp_filter[4][3];
// const float G[4][3] = {
// {1.0f, 0.0f, 0.0f},
// {0.5f, 0.5f, 0.5f},
// {0.5f, -0.5f, 0.5f},
// {0.0f, 0.0f, 1.0f}
// };
// G * g
for (int j = 0; j < 3; j ++) {
tmp_filter[0][j] = filter[0][j];
tmp_filter[1][j] = 0.5f * filter[0][j] + 0.5f * filter[1][j] + 0.5f * filter[2][j];
tmp_filter[2][j] = 0.5f * filter[0][j] - 0.5f * filter[1][j] + 0.5f * filter[2][j];
tmp_filter[3][j] = filter[2][j];
}
// g * Gt
for (int i = 0; i < 4; i ++) {
transformed_filter[i * 4 + 0] = tmp_filter[i][0];
transformed_filter[i * 4 + 1] = 0.5f * tmp_filter[i][0] + 0.5f * tmp_filter[i][1] + 0.5f * tmp_filter[i][2];
transformed_filter[i * 4 + 2] = 0.5f * tmp_filter[i][0] - 0.5f * tmp_filter[i][1] + 0.5f * tmp_filter[i][2];
transformed_filter[i * 4 + 3] = tmp_filter[i][2];
}
}
void WinogradConv2D_2x2_omp(DATA_TYPE *input, DATA_TYPE *output, DATA_TYPE *transformed_filter, size_t *cpu_global_size) {
// DATA_TYPE trasformed_filter[4][4];
// WinogradConv2D_2x2_filter_transformation(trasformed_filter);
int out_map_size = N - 2;
int tile_n = (out_map_size + 1) / 2;
#pragma omp parallel for
for (int tile_i = 0; tile_i < cpu_global_size[0]; tile_i ++) {
for (int tile_j = 0; tile_j < tile_n; tile_j ++) {
// input transformation
DATA_TYPE input_tile[4][4], tmp_tile[4][4], transformed_tile[4][4];
for (int i = 0; i < 4; i ++) {
for (int j = 0; j < 4; j ++) {
int x = 2 * tile_i + i;
int y = 2 * tile_j + j;
if (x >= N || y >= N) {
input_tile[i][j] = 0;
continue;
}
input_tile[i][j] = input[x * N + y];
}
}
// const float Bt[4][4] = {
// {1.0f, 0.0f, -1.0f, 0.0f},
// {0.0f, 1.0f, 1.0f, 0.0f},
// {0.0f, -1.0f, 1.0f, 0.0f},
// {0.0f, 1.0f, 0.0f, -1.0f}
// }
// Bt * d
// #pragma omp simd
for (int j = 0; j < 4; j ++) {
tmp_tile[0][j] = input_tile[0][j] - input_tile[2][j];
tmp_tile[1][j] = input_tile[1][j] + input_tile[2][j];
tmp_tile[2][j] = -input_tile[1][j] + input_tile[2][j];
tmp_tile[3][j] = input_tile[1][j] - input_tile[3][j];
}
// d * B
// #pragma omp simd
for (int i = 0; i < 4; i ++) {
transformed_tile[i][0] = tmp_tile[i][0] - tmp_tile[i][2];
transformed_tile[i][1] = tmp_tile[i][1] + tmp_tile[i][2];
transformed_tile[i][2] = -tmp_tile[i][1] + tmp_tile[i][2];
transformed_tile[i][3] = tmp_tile[i][1] - tmp_tile[i][3];
}
// element-wise multiplication
DATA_TYPE multiplied_tile[4][4];
for (int i = 0; i < 4; i ++) {
// #pragma omp simd
for (int j = 0; j < 4; j ++) {
multiplied_tile[i][j] = transformed_tile[i][j] * transformed_filter[i * 4 + j];
}
}
// output transformation
DATA_TYPE tmp_tile_1[2][4], final_tile[2][2];
// const float At[2][4] {
// {1.0f, 1.0f, 1.0f, 0.0f},
// {0.0f, 1.0f, -1.0f, -1.0f}
// }
// At * I
// #pragma omp simd
for (int j = 0; j < 4; j ++) {
tmp_tile_1[0][j] = multiplied_tile[0][j] + multiplied_tile[1][j] + multiplied_tile[2][j];
tmp_tile_1[1][j] = multiplied_tile[1][j] - multiplied_tile[2][j] - multiplied_tile[3][j];
}
// I * A
// #pragma omp simd
for (int i = 0; i < 2; i ++) {
final_tile[i][0] = tmp_tile_1[i][0] + tmp_tile_1[i][1] + tmp_tile_1[i][2];
final_tile[i][1] = tmp_tile_1[i][1] - tmp_tile_1[i][2] - tmp_tile_1[i][3];
}
for (int i = 0; i < 2; i ++) {
for (int j = 0; j < 2; j ++) {
int x = 2 * tile_i + i;
int y = 2 * tile_j + j;
if (x >= out_map_size || y >= out_map_size) {
continue;
}
output[x * out_map_size + y] = final_tile[i][j];
}
}
} // for tile_i
} // for tile_j
}
void compareResults(DATA_TYPE* B, DATA_TYPE* B_outputFromGpu) // 若凡:有改动
{
int i, j, fail;
fail = 0;
// Compare a and b
for (i=0; i < (N-2); i++)
{
for (j=0; j < (N-2); j++)
{
if (percentDiff(B[i*(N-2) + j], B_outputFromGpu[i*(N-2) + j]) > PERCENT_DIFF_ERROR_THRESHOLD)
{
fail++;
}
}
}
// Print results
printf("Error Threshold of %4.2f Percent: %d\n\n", PERCENT_DIFF_ERROR_THRESHOLD, fail);
}
void WinogradConv2D_2x2(DATA_TYPE *input, DATA_TYPE *output, DATA_TYPE *transformed_filter) {
// DATA_TYPE trasformed_filter[4][4];
// WinogradConv2D_2x2_filter_transformation(trasformed_filter);
int out_map_size = N - 2;
int tile_n = (out_map_size + 1) / 2;
for (int tile_i = 0; tile_i < tile_n; tile_i ++) {
for (int tile_j = 0; tile_j < tile_n; tile_j ++) {
// input transformation
DATA_TYPE input_tile[4][4], tmp_tile[4][4], transformed_tile[4][4];
for (int i = 0; i < 4; i ++) {
for (int j = 0; j < 4; j ++) {
int x = 2 * tile_i + i;
int y = 2 * tile_j + j;
if (x >= N || y >= N) {
input_tile[i][j] = 0;
continue;
}
input_tile[i][j] = input[x * N + y];
}
}
// const float Bt[4][4] = {
// {1.0f, 0.0f, -1.0f, 0.0f},
// {0.0f, 1.0f, 1.0f, 0.0f},
// {0.0f, -1.0f, 1.0f, 0.0f},
// {0.0f, 1.0f, 0.0f, -1.0f}
// }
// Bt * d
for (int j = 0; j < 4; j ++) {
tmp_tile[0][j] = input_tile[0][j] - input_tile[2][j];
tmp_tile[1][j] = input_tile[1][j] + input_tile[2][j];
tmp_tile[2][j] = -input_tile[1][j] + input_tile[2][j];
tmp_tile[3][j] = input_tile[1][j] - input_tile[3][j];
}
// d * B
for (int i = 0; i < 4; i ++) {
transformed_tile[i][0] = tmp_tile[i][0] - tmp_tile[i][2];
transformed_tile[i][1] = tmp_tile[i][1] + tmp_tile[i][2];
transformed_tile[i][2] = -tmp_tile[i][1] + tmp_tile[i][2];
transformed_tile[i][3] = tmp_tile[i][1] - tmp_tile[i][3];
}
// element-wise multiplication
DATA_TYPE multiplied_tile[4][4];
for (int i = 0; i < 4; i ++) {
for (int j = 0; j < 4; j ++) {
multiplied_tile[i][j] = transformed_tile[i][j] * transformed_filter[i * 4 + j];
}
}
// output transformation
DATA_TYPE tmp_tile_1[2][4], final_tile[2][2];
// const float At[2][4] {
// {1.0f, 1.0f, 1.0f, 0.0f},
// {0.0f, 1.0f, -1.0f, -1.0f}
// }
// At * I
for (int j = 0; j < 4; j ++) {
tmp_tile_1[0][j] = multiplied_tile[0][j] + multiplied_tile[1][j] + multiplied_tile[2][j];
tmp_tile_1[1][j] = multiplied_tile[1][j] - multiplied_tile[2][j] - multiplied_tile[3][j];
}
// I * A
for (int i = 0; i < 2; i ++) {
final_tile[i][0] = tmp_tile_1[i][0] + tmp_tile_1[i][1] + tmp_tile_1[i][2];
final_tile[i][1] = tmp_tile_1[i][1] - tmp_tile_1[i][2] - tmp_tile_1[i][3];
}
for (int i = 0; i < 2; i ++) {
for (int j = 0; j < 2; j ++) {
int x = 2 * tile_i + i;
int y = 2 * tile_j + j;
if (x >= out_map_size || y >= out_map_size) {
continue;
}
output[x * out_map_size + y] = final_tile[i][j];
}
}
} // for tile_i
} // for tile_j
}
int main(int argc, char *argv[])
{
if (argc != 2)
{
printf("usage: ./WinogradConv2D <cpu offset>\n");
exit(0);
}
cpu_offset = atoi(argv[1]);
printf("CPU offset: %d\n", cpu_offset);
double t_start, t_end;
int i;
DATA_TYPE *B = (DATA_TYPE*)malloc((N-2)*(N-2)*sizeof(DATA_TYPE));
read_cl_file();
cl_initialization();
cl_load_prog();
a_mem_obj = (DATA_TYPE *)clSVMAlloc(clGPUContext, CL_MEM_READ_WRITE, sizeof(DATA_TYPE) * N * N, 0);
b_mem_obj = (DATA_TYPE *)clSVMAlloc(clGPUContext, CL_MEM_READ_WRITE, sizeof(DATA_TYPE) * (N-2) * (N-2), 0);
c_mem_obj = (DATA_TYPE *)clSVMAlloc(clGPUContext, CL_MEM_READ_ONLY, sizeof(DATA_TYPE) * 4 * 4, 0);
if (a_mem_obj == NULL || b_mem_obj == NULL || c_mem_obj == NULL)
printf("clSVMAlloc failed\n");
WinogradConv2D_2x2_filter_transformation(c_mem_obj); // 这里
init(a_mem_obj);
t_start = rtclock();
for (int i = 0; i < 3; i++)
cl_launch_kernel();
t_end = rtclock();
fprintf(stdout, "Total time: %lf ms\n", 1000.0 * (t_end - t_start));
WinogradConv2D_2x2(a_mem_obj, B, c_mem_obj);
compareResults(B, b_mem_obj);
free(B);
cl_clean_up();
return 0;
} |
diagsm_x_csr_u_row.c | #include "alphasparse/kernel.h"
#include "alphasparse/util.h"
#include "alphasparse/opt.h"
alphasparse_status_t ONAME(const ALPHA_Number alpha, const ALPHA_SPMAT_CSR *A, const ALPHA_Number *x, const ALPHA_INT columns, const ALPHA_INT ldx, ALPHA_Number *y, const ALPHA_INT ldy)
{
ALPHA_INT num_thread = alpha_get_thread_num();
#ifdef _OPENMP
#pragma omp parallel for num_threads(num_thread)
#endif
for (ALPHA_INT r = 0; r < A->rows; ++r)
{
for (ALPHA_INT c = 0; c < columns; ++c)
{
alpha_mul(y[index2(r, c, ldy)], alpha, x[index2(r, c, ldx)]);
}
}
return ALPHA_SPARSE_STATUS_SUCCESS;
}
|
GB_unaryop__lnot_uint8_fp64.c | //------------------------------------------------------------------------------
// GB_unaryop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2020, All Rights Reserved.
// http://suitesparse.com See GraphBLAS/Doc/License.txt for license.
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_iterator.h"
#include "GB_unaryop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB_unop__lnot_uint8_fp64
// op(A') function: GB_tran__lnot_uint8_fp64
// C type: uint8_t
// A type: double
// cast: uint8_t cij ; GB_CAST_UNSIGNED(cij,aij,8)
// unaryop: cij = !(aij != 0)
#define GB_ATYPE \
double
#define GB_CTYPE \
uint8_t
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
double aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = !(x != 0) ;
// casting
#define GB_CASTING(z, aij) \
uint8_t z ; GB_CAST_UNSIGNED(z,aij,8) ;
// cij = op (cast (aij))
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
GB_GETA (aij, Ax, pA) ; \
/* Cx [pC] = op (cast (aij)) */ \
GB_CASTING (z, aij) ; \
GB_OP (GB_CX (pC), z) ; \
}
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_LNOT || GxB_NO_UINT8 || GxB_NO_FP64)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_unop__lnot_uint8_fp64
(
uint8_t *Cx, // Cx and Ax may be aliased
double *Ax,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
GB_CAST_OP (p, p) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_tran__lnot_uint8_fp64
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t *GB_RESTRICT *Rowcounts,
GBI_single_iterator Iter,
const int64_t *GB_RESTRICT A_slice,
int naslice
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#define GB_PHASE_2_OF_2
#include "GB_unaryop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
10 - MPs.c | #include <stdio.h>
#include <string.h>
#include <stdlib.h>
#include <sys/types.h>
#include <unistd.h>
#include </usr/include/openmpi/mpi.h>
#include <omp.h>
#define SIZE_GAU 8
#define SIZE_JVL 15
void main(int argc, char * argv[])
{
int my_rank;
int size;
char Name[8];
strcpy(Name,"Wel1iton");
MPI_Init(&argc, &argv);
MPI_Comm_rank(MPI_COMM_WORLD, &my_rank);
MPI_Comm_size(MPI_COMM_WORLD, &size);
if(my_rank == 0) //cooker
{
int Javalis[1] = {14};
int Received[1];
while(1)
{
MPI_Recv(Received, 1, MPI_INT, 1, 0, MPI_COMM_WORLD, MPI_STATUS_IGNORE);
MPI_Send(Javalis, 1, MPI_INT, 1, 0, MPI_COMM_WORLD);
}
}
else if(my_rank == 1) //table
{
int Avaliable[1] = {14};
int Request[1];
while(1)
{
if (Avaliable[0] == 0)
{
printf("The Last Gaules get hungry, shout n wake up the cooker...\n");
#pragma omp critical(section1)
{
MPI_Send(Avaliable, 1, MPI_INT, 0, 0, MPI_COMM_WORLD);
MPI_Recv(Avaliable, 1, MPI_INT, 0, 0, MPI_COMM_WORLD, MPI_STATUS_IGNORE);
printf("\n < The Cooker Made his Magic n Replace All The Java's >\n\n");
}
}
else
{
MPI_Recv(Request, 1, MPI_INT, 2, 0, MPI_COMM_WORLD, MPI_STATUS_IGNORE);
printf("Table Received a Request from: %c | Sending Javali %d \n",Name[Request[0]], Avaliable[0]);
MPI_Send(Avaliable, 1, MPI_INT, 2, 0, MPI_COMM_WORLD);
Avaliable[0]--;
}
}
}
else //Gaules
{
#pragma omp parallel num_threads(SIZE_GAU)
{
int idGaules;
idGaules = omp_get_thread_num();
int LuckyJava;
//int AskTable[1] = {my_rank};
//printf("\nTest %c %d\n",Name[idGaules],idGaules);
while(1)
{
MPI_Send(&idGaules, 1, MPI_INT, 1, 0, MPI_COMM_WORLD);
MPI_Recv(&LuckyJava, 1, MPI_INT,1, 0, MPI_COMM_WORLD, MPI_STATUS_IGNORE);
printf(" - G[%c][id = %d] is eating the lucky javali %d n very happy.\n", Name[idGaules], idGaules, LuckyJava);
sleep(1);
}
}
}
MPI_Finalize();
}
|
version2_1.c | // Compile with:
//
//
// To specify the number of bodies in the world, the program optionally accepts
// an integer as its first command line argument.
#include <time.h>
#include <sys/times.h>
#include <math.h>
#include <string.h>
#include <stdlib.h>
#include <stdio.h>
#include <X11/Xlib.h>
#include <unistd.h>
#include "omp.h"
#define WIDTH 1024
#define HEIGHT 768
// default number of bodies
#define DEF_NUM_BODIES 200
// gravitational constant
#define GRAV 10.0
// initial velocities are scaled by this value
#define V_SCALAR 20.0
// initial masses are scaled by this value
#define M_SCALAR 5.0
// radius scalar
#define R_SCALAR 3
// coefficient of restitution determines the elasticity of a collision: C_REST = [0,1]
// if C_REST = 0 -> perfectly inelastic (particles stick together)
// if C_REST = 1 -> perfectly elastic (no loss of speed)
#define C_REST 0.5
// set the iteration times
#define iteration_times 100
// Must set 0 if run on Pi
#define NOT_RUN_ON_PI 1
// World Buffer
#define ORIGINAL_WORLD 0
#define BACK_WORLD 1
struct body {
double x, y; // position
double vx, vy; // velocity
double m; // mass
double r; // radius of the particle
double x_back, y_back;
};
struct world {
struct body *bodies;
int num_bodies;
};
clock_t total_time = 0;
//total_time.sec = 0;
//total_time.usec = 0;
/* This function initializes each particle's mass, velocity and position */
struct world *create_world(int num_bodies) {
struct world *world = malloc(sizeof(struct world));
world->num_bodies = num_bodies;
world->bodies = malloc(sizeof(struct body) * num_bodies);
int i = 0;
double x;
double y;
double rc;
int min_dim = (WIDTH < HEIGHT) ? WIDTH : HEIGHT;
while (i < num_bodies) {
x = drand48() * WIDTH;
y = drand48() * HEIGHT;
rc = sqrt((WIDTH / 2 - x) * (WIDTH / 2 - x) + (y - HEIGHT / 2) * (y - HEIGHT / 2));
if (rc <= min_dim / 2) {
world->bodies[i].x = x;
world->bodies[i].y = y;
world->bodies[i].x_back = x;
world->bodies[i].y_back = y;
world->bodies[i].vx = V_SCALAR * (y - HEIGHT / 2) / rc;
world->bodies[i].vy = V_SCALAR * (WIDTH / 2 - x) / rc;
world->bodies[i].m = (1 / (0.025 + drand48())) * M_SCALAR;
world->bodies[i].r = sqrt(world->bodies[i].m / M_PI) * R_SCALAR;
i++;
}
}
return world;
}
// set the foreground color given RGB values between 0..255.
void set_color(Display *disp, GC gc, int r, int g, int b) {
unsigned long int p;
if (r < 0) r = 0; else if (r > 255) r = 255;
if (g < 0) g = 0; else if (g > 255) g = 255;
if (b < 0) b = 0; else if (b > 255) b = 255;
p = (r << 16) | (g << 8) | (b);
XSetForeground(disp, gc, p);
}
/* This function updates the screen with the new positions of each particle */
void draw_world(Display *disp, Pixmap back_buf, GC gc, struct world *world) {
int i;
double x, y, r, r2;
// we turn off aliasing for faster draws
set_color(disp, gc, 255, 255, 255);
XFillRectangle(disp, back_buf, gc, 0, 0, WIDTH, HEIGHT);
for (i = 0; i < world->num_bodies; i++) {
r = world->bodies[i].r;
x = world->bodies[i].x - r;
y = world->bodies[i].y - r;
r2 = r + r;
// draw body
set_color(disp, gc, 255 * 7 / 10, 255 * 7 / 10, 255 * 7 / 10);
XFillArc(disp, back_buf, gc, x, y, r2, r2, 0, 360 * 64);
set_color(disp, gc, 0, 0, 0);
XDrawArc(disp, back_buf, gc, x, y, r2, r2, 0, 360 * 64);
}
}
void collision_step(struct world *world) {
int a, b;
double r, x, y, vx, vy;
// Impose screen boundaries by reversing direction if body is off screen
for (a = 0; a < world->num_bodies; a++) {
r = world->bodies[a].r;
x = world->bodies[a].x;
y = world->bodies[a].y;
vx = world->bodies[a].vx;
vy = world->bodies[a].vy;
if (x - r < 0) { // left edge
if (vx < 0) { world->bodies[a].vx = -C_REST * vx; }
world->bodies[a].x = r;
} else if (x + r > WIDTH) { // right edge
if (vx > 0) { world->bodies[a].vx = -C_REST * vx; }
world->bodies[a].x = WIDTH - r;
}
if (y - r < 0) { // bottom edge
if (vy < 0) { world->bodies[a].vy = -C_REST * vy; }
world->bodies[a].y = r;
} else if (y + r > HEIGHT) { // top edge
if (vy > 0) { world->bodies[a].vy = -C_REST * vy; }
world->bodies[a].y = HEIGHT - r;
}
}
}
void position_step(struct world *world, double time_res,int iteration) {
int i, j;
/* The forces array stores the x and y components of the total force acting
* on each body. The forces are index like this:
* F on body i in the x dir = F_x[i]
* F on body i in the y dir = F_y[i] */
double *force_x = (double *) malloc(sizeof(double) * world->num_bodies);
double *force_y = (double *) malloc(sizeof(double) * world->num_bodies);
// initialize all forces to zero
force_x = memset(force_x, 0, sizeof(double) * world->num_bodies);
force_y = memset(force_y, 0, sizeof(double) * world->num_bodies);
/* Compute the net force on each body */
#pragma omp parallel
{
#pragma omp for
for (int i = 0; i < world->num_bodies; i++) {
double d, d_cubed, diff_x, diff_y;
for (int j = 0; j < world->num_bodies; j++) {
if (i == j) {
continue;
}
// Compute the x and y distances and total distance d between
// bodies i and j
if(iteration%2==0){
diff_x = world->bodies[j].x - world->bodies[i].x;
diff_y = world->bodies[j].y - world->bodies[i].y;
}
else{
diff_x = world->bodies[j].x_back - world->bodies[i].x_back;
diff_y = world->bodies[j].y_back - world->bodies[i].y_back;
}
d = sqrt((diff_x * diff_x) + (diff_y * diff_y));
if (d < 25) {
d = 25;
}
d_cubed = d * d * d;
// Add force due to j to total force on i
force_x[i] += GRAV * (world->bodies[i].m * world->bodies[j].m
/ d_cubed) * diff_x;
force_y[i] += GRAV * (world->bodies[i].m * world->bodies[j].m
/ d_cubed) * diff_y;
}
world->bodies[i].vx += force_x[i] * time_res / world->bodies[i].m;
world->bodies[i].vy += force_y[i] * time_res / world->bodies[i].m;
if (iteration%2== 0) {
// Update positions
world->bodies[i].x_back = world->bodies[i].x + world->bodies[i].vx * time_res;
world->bodies[i].y_back = world->bodies[i].y + world->bodies[i].vy * time_res;
} else {
world->bodies[i].x = world->bodies[i].x_back + world->bodies[i].vx * time_res;
world->bodies[i].y = world->bodies[i].y_back + world->bodies[i].vy * time_res;
}
}
}
}
void step_world(struct world *world, double time_res,int iteration) {
struct tms ttt;
clock_t start, end;
start = times(&ttt);
position_step(world, time_res,iteration);
end = times(&ttt);
total_time += end - start;
collision_step(world);
}
/* Main method runs initialize() and update() */
int main(int argc, char **argv) {
//total_time.tv_sec = 0;
//total_time.tv_usec = 0;
/* get num bodies from the command line */
int num_bodies, threads;
num_bodies = DEF_NUM_BODIES;
threads = 1;
if (argc == 2) {
num_bodies = atoi(argv[1]);
};
int thread_list[10] = {1, 2, 3, 4, 5, 6, 7, 8, 9, 10};
FILE *fstream = fopen("outdata", "a+");
fprintf(fstream, "Universe has %d bodies\n", num_bodies);
for (int i = 0; i < 10; ++i) {
threads = thread_list[i];
printf("Universe has %d bodies. %d Threads\n", num_bodies, threads);
omp_set_num_threads(threads);
/* set up the universe */
time_t cur_time;
time(&cur_time);
srand48((long) cur_time); // seed the RNG used in create_world
struct world *world = create_world(num_bodies);
/* set up graphics using Xlib */
#if NOT_RUN_ON_PI
Display *disp = XOpenDisplay(NULL);
int scr = DefaultScreen(disp);
Window win = XCreateSimpleWindow(
disp,
RootWindow(disp, scr),
0, 0,
WIDTH, HEIGHT,
0,
BlackPixel(disp, scr), WhitePixel(disp, scr));
XStoreName(disp, win, "N-Body Simulator");
Pixmap back_buf = XCreatePixmap(disp, RootWindow(disp, scr),
WIDTH, HEIGHT, DefaultDepth(disp, scr));
GC gc = XCreateGC(disp, back_buf, 0, 0);
// Make sure we're only looking for messages about closing the window
Atom del_window = XInternAtom(disp, "WM_DELETE_WINDOW", 0);
XSetWMProtocols(disp, win, &del_window, 1);
XSelectInput(disp, win, StructureNotifyMask);
XMapWindow(disp, win);
XEvent event;
// wait until window is mapped
while (1) {
XNextEvent(disp, &event);
if (event.type == MapNotify) {
break;
}
}
#endif
struct timespec delay = {0, 1000000000 / 60}; // for 60 FPS
struct timespec remaining;
double delta_t = 0.1;
int ii;
total_time = 0;
for (ii = 0; ii < iteration_times; ii++) {
// check if the window has been closed
#if NOT_RUN_ON_PI
if (XCheckTypedEvent(disp, ClientMessage, &event)) {
break;
}
// we first draw to the back buffer then copy it to the front (`win`)
draw_world(disp, back_buf, gc, world);
XCopyArea(disp, back_buf, win, gc, 0, 0, WIDTH, HEIGHT, 0, 0);
#endif
step_world(world, delta_t,ii);
//if you want to watch the process in 60 FPS
//nanosleep(&delay, &remaining);
}
// printf("Total Time = %f\n", (double)total_time.tv_sec + (double)total_time.tv_usec/1000000);
fprintf(fstream, "%d %lfs\n", threads, (double) total_time / (sysconf(_SC_CLK_TCK)));
#if NOT_RUN_ON_PI
XFreeGC(disp, gc);
XFreePixmap(disp, back_buf);
XDestroyWindow(disp, win);
XCloseDisplay(disp);
#endif
}
fclose(fstream);
return 0;
}
|
GB_unaryop__lnot_uint32_bool.c | //------------------------------------------------------------------------------
// GB_unaryop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2020, All Rights Reserved.
// http://suitesparse.com See GraphBLAS/Doc/License.txt for license.
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_iterator.h"
#include "GB_unaryop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB_unop__lnot_uint32_bool
// op(A') function: GB_tran__lnot_uint32_bool
// C type: uint32_t
// A type: bool
// cast: uint32_t cij = (uint32_t) aij
// unaryop: cij = !(aij != 0)
#define GB_ATYPE \
bool
#define GB_CTYPE \
uint32_t
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
bool aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = !(x != 0) ;
// casting
#define GB_CASTING(z, aij) \
uint32_t z = (uint32_t) aij ;
// cij = op (cast (aij))
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
GB_GETA (aij, Ax, pA) ; \
/* Cx [pC] = op (cast (aij)) */ \
GB_CASTING (z, aij) ; \
GB_OP (GB_CX (pC), z) ; \
}
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_LNOT || GxB_NO_UINT32 || GxB_NO_BOOL)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_unop__lnot_uint32_bool
(
uint32_t *Cx, // Cx and Ax may be aliased
bool *Ax,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
GB_CAST_OP (p, p) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_tran__lnot_uint32_bool
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t *GB_RESTRICT *Rowcounts,
GBI_single_iterator Iter,
const int64_t *GB_RESTRICT A_slice,
int naslice
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#define GB_PHASE_2_OF_2
#include "GB_unaryop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
image_pyramid.h | /*
*
* This file is part of the open-source SeetaFace engine, which includes three modules:
* SeetaFace Detection, SeetaFace Alignment, and SeetaFace Identification.
*
* This file is part of the SeetaFace Detection module, containing codes implementing the
* face detection method described in the following paper:
*
*
* Funnel-structured cascade for multi-view face detection with alignment awareness,
* Shuzhe Wu, Meina Kan, Zhenliang He, Shiguang Shan, Xilin Chen.
* In Neurocomputing (under review)
*
*
* Copyright (C) 2016, Visual Information Processing and Learning (VIPL) group,
* Institute of Computing Technology, Chinese Academy of Sciences, Beijing, China.
*
* The codes are mainly developed by Shuzhe Wu (a Ph.D supervised by Prof. Shiguang Shan)
*
* As an open-source face recognition engine: you can redistribute SeetaFace source codes
* and/or modify it under the terms of the BSD 2-Clause License.
*
* You should have received a copy of the BSD 2-Clause License along with the software.
* If not, see < https://opensource.org/licenses/BSD-2-Clause>.
*
* Contact Info: you can send an email to SeetaFace@vipl.ict.ac.cn for any problems.
*
* Note: the above information must be kept whenever or wherever the codes are used.
*
*/
#ifndef SEETA_FD_UTIL_IMAGE_PYRAMID_H_
#define SEETA_FD_UTIL_IMAGE_PYRAMID_H_
#include <cstdint>
#include <string>
#include "../common.h"
namespace seeta {
namespace fd {
static void ResizeImage(const seeta::ImageData & src, seeta::ImageData* dest) {
int32_t src_width = src.width;
int32_t src_height = src.height;
int32_t dest_width = dest->width;
int32_t dest_height = dest->height;
if (src_width == dest_width && src_height == dest_height) {
std::memcpy(dest->data, src.data, src_width * src_height * sizeof(uint8_t));
return;
}
double lf_x_scl = static_cast<double>(src_width) / dest_width;
double lf_y_Scl = static_cast<double>(src_height) / dest_height;
const uint8_t* src_data = src.data;
uint8_t* dest_data = dest->data;
#pragma omp parallel num_threads(SEETA_NUM_THREADS)
{
#pragma omp for nowait
for (int32_t y = 0; y < dest_height; y++) {
for (int32_t x = 0; x < dest_width; x++) {
double lf_x_s = lf_x_scl * x;
double lf_y_s = lf_y_Scl * y;
int32_t n_x_s = static_cast<int>(lf_x_s);
n_x_s = (n_x_s <= (src_width - 2) ? n_x_s : (src_width - 2));
int32_t n_y_s = static_cast<int>(lf_y_s);
n_y_s = (n_y_s <= (src_height - 2) ? n_y_s : (src_height - 2));
double lf_weight_x = lf_x_s - n_x_s;
double lf_weight_y = lf_y_s - n_y_s;
double dest_val = (1 - lf_weight_y) * ((1 - lf_weight_x) *
src_data[n_y_s * src_width + n_x_s] +
lf_weight_x * src_data[n_y_s * src_width + n_x_s + 1]) +
lf_weight_y * ((1 - lf_weight_x) * src_data[(n_y_s + 1) * src_width + n_x_s] +
lf_weight_x * src_data[(n_y_s + 1) * src_width + n_x_s + 1]);
dest_data[y * dest_width + x] = static_cast<uint8_t>(dest_val);
}
}
}
}
class ImagePyramid {
public:
ImagePyramid()
: max_scale_(1.0f), min_scale_(1.0f),
scale_factor_(1.0f), scale_step_(0.8f),
width1x_(0), height1x_(0),
width_scaled_(0), height_scaled_(0),
buf_img_width_(2), buf_img_height_(2),
buf_scaled_width_(2), buf_scaled_height_(2) {
buf_img_ = new uint8_t[buf_img_width_ * buf_img_height_];
buf_img_scaled_ = new uint8_t[buf_scaled_width_ * buf_scaled_height_];
}
~ImagePyramid() {
delete[] buf_img_;
buf_img_ = nullptr;
buf_img_width_ = 0;
buf_img_height_ = 0;
delete[] buf_img_scaled_;
buf_img_scaled_ = nullptr;
buf_scaled_width_ = 0;
buf_scaled_height_ = 0;
img_scaled_.data = nullptr;
img_scaled_.width = 0;
img_scaled_.height = 0;
}
inline void SetScaleStep(float step) {
if (step > 0.0f && step <= 1.0f)
scale_step_ = step;
}
inline void SetMinScale(float min_scale) {
min_scale_ = min_scale;
}
inline void SetMaxScale(float max_scale) {
max_scale_ = max_scale;
scale_factor_ = max_scale;
UpdateBufScaled();
}
void SetImage1x(const uint8_t* img_data, int32_t width, int32_t height);
inline float min_scale() const { return min_scale_; }
inline float max_scale() const { return max_scale_; }
inline seeta::ImageData image1x() {
seeta::ImageData img(width1x_, height1x_, 1);
img.data = buf_img_;
return img;
}
const seeta::ImageData* GetNextScaleImage(float* scale_factor = nullptr);
private:
void UpdateBufScaled();
float max_scale_;
float min_scale_;
float scale_factor_;
float scale_step_;
int32_t width1x_;
int32_t height1x_;
int32_t width_scaled_;
int32_t height_scaled_;
uint8_t* buf_img_;
int32_t buf_img_width_;
int32_t buf_img_height_;
uint8_t* buf_img_scaled_;
int32_t buf_scaled_width_;
int32_t buf_scaled_height_;
seeta::ImageData img_scaled_;
};
} // namespace fd
} // namespace seeta
#endif // SEETA_FD_UTIL_IMAGE_PYRAMID_H_
|
bad3ff_so8_adv_icc.c | #define _POSIX_C_SOURCE 200809L
#include "stdlib.h"
#include "math.h"
#include "sys/time.h"
#include "xmmintrin.h"
#include "pmmintrin.h"
#include "omp.h"
#include <stdio.h>
#define min(a, b) (((a) < (b)) ? (a) : (b))
#define max(a, b) (((a) > (b)) ? (a) : (b))
struct dataobj
{
void *restrict data;
int *size;
int *npsize;
int *dsize;
int *hsize;
int *hofs;
int *oofs;
};
struct profiler
{
double section0;
double section1;
double section2;
};
void bf0(struct dataobj *restrict damp_vec, const float dt, struct dataobj *restrict epsilon_vec, float *restrict r49_vec, float *restrict r50_vec, float *restrict r51_vec, float *restrict r52_vec, float *restrict r53_vec, struct dataobj *restrict u_vec, struct dataobj *restrict v_vec, struct dataobj *restrict vp_vec, struct dataobj *restrict nnz_sp_source_mask_vec, struct dataobj *restrict sp_source_mask_vec, struct dataobj *restrict save_src_u_vec, struct dataobj *restrict save_src_v_vec, struct dataobj *restrict source_id_vec, struct dataobj *restrict source_mask_vec, const int x0_blk0_size, const int x_size, const int y0_blk0_size, const int y_size, const int z_size, const int t0, const int t1, const int t2, const int x_M, const int x_m, const int y_M, const int y_m, const int z_M, const int z_m, const int sp_zi_m, const int nthreads, const int xb, const int yb, const int xb_size, const int yb_size, float **restrict r108_vec, float **restrict r109_vec, const int time, const int tw);
int ForwardTTI(struct dataobj *restrict block_sizes_vec, struct dataobj *restrict damp_vec, struct dataobj *restrict delta_vec, const float dt, struct dataobj *restrict epsilon_vec, struct dataobj *restrict nnz_sp_source_mask_vec, struct dataobj *restrict phi_vec, struct dataobj *restrict save_src_u_vec, struct dataobj *restrict save_src_v_vec, struct dataobj *restrict source_id_vec, struct dataobj *restrict source_mask_vec, struct dataobj *restrict sp_source_mask_vec, struct dataobj *restrict theta_vec, struct dataobj *restrict u_vec, struct dataobj *restrict v_vec, struct dataobj *restrict vp_vec, const int x_size, const int y_size, const int z_size, const int sp_zi_m, const int time_M, const int time_m, struct profiler *timers, const int x_M, const int x_m, const int y_M, const int y_m, const int z_M, const int z_m, const int nthreads, const int nthreads_nonaffine)
{
int(*restrict block_sizes) __attribute__((aligned(64))) = (int(*))block_sizes_vec->data;
float(*restrict delta)[delta_vec->size[1]][delta_vec->size[2]] __attribute__((aligned(64))) = (float(*)[delta_vec->size[1]][delta_vec->size[2]])delta_vec->data;
int(*restrict nnz_sp_source_mask)[nnz_sp_source_mask_vec->size[1]] __attribute__((aligned(64))) = (int(*)[nnz_sp_source_mask_vec->size[1]])nnz_sp_source_mask_vec->data;
float(*restrict phi)[phi_vec->size[1]][phi_vec->size[2]] __attribute__((aligned(64))) = (float(*)[phi_vec->size[1]][phi_vec->size[2]])phi_vec->data;
float(*restrict save_src_u)[save_src_u_vec->size[1]] __attribute__((aligned(64))) = (float(*)[save_src_u_vec->size[1]])save_src_u_vec->data;
float(*restrict save_src_v)[save_src_v_vec->size[1]] __attribute__((aligned(64))) = (float(*)[save_src_v_vec->size[1]])save_src_v_vec->data;
int(*restrict source_id)[source_id_vec->size[1]][source_id_vec->size[2]] __attribute__((aligned(64))) = (int(*)[source_id_vec->size[1]][source_id_vec->size[2]])source_id_vec->data;
int(*restrict source_mask)[source_mask_vec->size[1]][source_mask_vec->size[2]] __attribute__((aligned(64))) = (int(*)[source_mask_vec->size[1]][source_mask_vec->size[2]])source_mask_vec->data;
int(*restrict sp_source_mask)[sp_source_mask_vec->size[1]][sp_source_mask_vec->size[2]] __attribute__((aligned(64))) = (int(*)[sp_source_mask_vec->size[1]][sp_source_mask_vec->size[2]])sp_source_mask_vec->data;
float(*restrict theta)[theta_vec->size[1]][theta_vec->size[2]] __attribute__((aligned(64))) = (float(*)[theta_vec->size[1]][theta_vec->size[2]])theta_vec->data;
float(*restrict u)[u_vec->size[1]][u_vec->size[2]][u_vec->size[3]] __attribute__((aligned(64))) = (float(*)[u_vec->size[1]][u_vec->size[2]][u_vec->size[3]])u_vec->data;
float(*restrict v)[v_vec->size[1]][v_vec->size[2]][v_vec->size[3]] __attribute__((aligned(64))) = (float(*)[v_vec->size[1]][v_vec->size[2]][v_vec->size[3]])v_vec->data;
float(*r49)[y_size + 2 + 2][z_size + 2 + 2];
posix_memalign((void **)&r49, 64, sizeof(float[x_size + 2 + 2][y_size + 2 + 2][z_size + 2 + 2]));
float(*r50)[y_size + 2 + 2][z_size + 2 + 2];
posix_memalign((void **)&r50, 64, sizeof(float[x_size + 2 + 2][y_size + 2 + 2][z_size + 2 + 2]));
float(*r51)[y_size + 2 + 2][z_size + 2 + 2];
posix_memalign((void **)&r51, 64, sizeof(float[x_size + 2 + 2][y_size + 2 + 2][z_size + 2 + 2]));
float(*r52)[y_size + 2 + 2][z_size + 2 + 2];
posix_memalign((void **)&r52, 64, sizeof(float[x_size + 2 + 2][y_size + 2 + 2][z_size + 2 + 2]));
float(*r53)[y_size + 2 + 2][z_size + 2 + 2];
posix_memalign((void **)&r53, 64, sizeof(float[x_size + 2 + 2][y_size + 2 + 2][z_size + 2 + 2]));
float **r94;
posix_memalign((void **)&r94, 64, sizeof(float *) * nthreads);
float **r95;
posix_memalign((void **)&r95, 64, sizeof(float *) * nthreads);
int y0_blk0_size = block_sizes[3];
int x0_blk0_size = block_sizes[2];
int yb_size = block_sizes[1];
int xb_size = block_sizes[0];
int sf = 4;
int t_blk_size = 2 * sf * (time_M - time_m);
#pragma omp parallel num_threads(nthreads)
{
const int tid = omp_get_thread_num();
posix_memalign((void **)&r94[tid], 64, sizeof(float[x0_blk0_size + 2 + 2][y0_blk0_size + 2 + 2][z_size + 2 + 2]));
posix_memalign((void **)&r95[tid], 64, sizeof(float[x0_blk0_size + 2 + 2][y0_blk0_size + 2 + 2][z_size + 2 + 2]));
}
/* Flush denormal numbers to zero in hardware */
_MM_SET_DENORMALS_ZERO_MODE(_MM_DENORMALS_ZERO_ON);
_MM_SET_FLUSH_ZERO_MODE(_MM_FLUSH_ZERO_ON);
struct timeval start_section0, end_section0;
gettimeofday(&start_section0, NULL);
/* Begin section0 */
#pragma omp parallel num_threads(nthreads)
{
#pragma omp for collapse(2) schedule(static, 1)
for (int x = x_m - 2; x <= x_M + 2; x += 1)
{
for (int y = y_m - 2; y <= y_M + 2; y += 1)
{
#pragma omp simd aligned(delta, phi, theta : 32)
for (int z = z_m - 2; z <= z_M + 2; z += 1)
{
r49[x + 2][y + 2][z + 2] = sqrt(2 * delta[x + 8][y + 8][z + 8] + 1);
r50[x + 2][y + 2][z + 2] = cos(theta[x + 8][y + 8][z + 8]);
r51[x + 2][y + 2][z + 2] = cos(phi[x + 8][y + 8][z + 8]);
r52[x + 2][y + 2][z + 2] = sin(theta[x + 8][y + 8][z + 8]);
r53[x + 2][y + 2][z + 2] = sin(phi[x + 8][y + 8][z + 8]);
}
}
}
}
/* End section0 */
gettimeofday(&end_section0, NULL);
timers->section0 += (double)(end_section0.tv_sec - start_section0.tv_sec) + (double)(end_section0.tv_usec - start_section0.tv_usec) / 1000000;
for (int t_blk = time_m; t_blk <= 1 + sf * (time_M - time_m); t_blk += sf * t_blk_size) // for each t block
{
for (int xb = x_m - 1; xb <= (x_M + sf * (time_M - time_m)); xb += xb_size)
{
//printf(" Change of outer xblock %d \n", xb);
for (int yb = y_m - 1; yb <= (y_M + sf * (time_M - time_m)); yb += yb_size)
{
for (int time = t_blk, t0 = (time) % (3), t1 = (time + 2) % (3), t2 = (time + 1) % (3); time <= 2 + min(t_blk + t_blk_size - 1, sf * (time_M - time_m)); time += sf, t0 = (((time / sf) % (time_M - time_m + 1))) % (3), t1 = (((time / sf) % (time_M - time_m + 1)) + 2) % (3), t2 = (((time / sf) % (time_M - time_m + 1)) + 1) % (3))
{
int tw = ((time / sf) % (time_M - time_m + 1));
struct timeval start_section1, end_section1;
gettimeofday(&start_section1, NULL);
/* Begin section1 */
bf0(damp_vec, dt, epsilon_vec, (float *)r49, (float *)r50, (float *)r51, (float *)r52, (float *)r53, u_vec, v_vec, vp_vec, nnz_sp_source_mask_vec, sp_source_mask_vec, save_src_u_vec, save_src_v_vec, source_id_vec, source_mask_vec, x0_blk0_size, x_size, y0_blk0_size, y_size, z_size, t0, t1, t2, x_M, x_m, y_M, y_m, z_M, z_m, sp_zi_m, nthreads, xb, yb, xb_size, yb_size, (float **)r94, (float **)r95, time, tw);
// x_M - (x_M - x_m + 1)%(x0_blk0_size), x_m, y_M - (y_M - y_m + 1)%(y0_blk0_size), y_m,
/* End section1 */
gettimeofday(&end_section1, NULL);
timers->section1 += (double)(end_section1.tv_sec - start_section1.tv_sec) + (double)(end_section1.tv_usec - start_section1.tv_usec) / 1000000;
}
}
}
}
#pragma omp parallel num_threads(nthreads)
{
const int tid = omp_get_thread_num();
free(r94[tid]);
free(r95[tid]);
}
free(r49);
free(r50);
free(r51);
free(r52);
free(r53);
free(r94);
free(r95);
return 0;
}
void bf0(struct dataobj *restrict damp_vec, const float dt, struct dataobj *restrict epsilon_vec, float *restrict r49_vec, float *restrict r50_vec, float *restrict r51_vec, float *restrict r52_vec, float *restrict r53_vec, struct dataobj *restrict u_vec, struct dataobj *restrict v_vec, struct dataobj *restrict vp_vec, struct dataobj *restrict nnz_sp_source_mask_vec, struct dataobj *restrict sp_source_mask_vec, struct dataobj *restrict save_src_u_vec, struct dataobj *restrict save_src_v_vec, struct dataobj *restrict source_id_vec, struct dataobj *restrict source_mask_vec, const int x0_blk0_size, const int x_size, const int y0_blk0_size, const int y_size, const int z_size, const int t0, const int t1, const int t2, const int x_M, const int x_m, const int y_M, const int y_m, const int z_M, const int z_m, const int sp_zi_m, const int nthreads, const int xb, const int yb, const int xb_size, const int yb_size, float **restrict r94_vec, float **restrict r95_vec, const int time, const int tw)
{
float(*restrict damp)[damp_vec->size[1]][damp_vec->size[2]] __attribute__((aligned(64))) = (float(*)[damp_vec->size[1]][damp_vec->size[2]])damp_vec->data;
float(*restrict epsilon)[epsilon_vec->size[1]][epsilon_vec->size[2]] __attribute__((aligned(64))) = (float(*)[epsilon_vec->size[1]][epsilon_vec->size[2]])epsilon_vec->data;
float(*restrict r49)[y_size + 2 + 2][z_size + 2 + 2] __attribute__((aligned(64))) = (float(*)[y_size + 2 + 2][z_size + 2 + 2]) r49_vec;
float(*restrict r50)[y_size + 2 + 2][z_size + 2 + 2] __attribute__((aligned(64))) = (float(*)[y_size + 2 + 2][z_size + 2 + 2]) r50_vec;
float(*restrict r51)[y_size + 2 + 2][z_size + 2 + 2] __attribute__((aligned(64))) = (float(*)[y_size + 2 + 2][z_size + 2 + 2]) r51_vec;
float(*restrict r52)[y_size + 2 + 2][z_size + 2 + 2] __attribute__((aligned(64))) = (float(*)[y_size + 2 + 2][z_size + 2 + 2]) r52_vec;
float(*restrict r53)[y_size + 2 + 2][z_size + 2 + 2] __attribute__((aligned(64))) = (float(*)[y_size + 2 + 2][z_size + 2 + 2]) r53_vec;
float(*restrict u)[u_vec->size[1]][u_vec->size[2]][u_vec->size[3]] __attribute__((aligned(64))) = (float(*)[u_vec->size[1]][u_vec->size[2]][u_vec->size[3]])u_vec->data;
float(*restrict v)[v_vec->size[1]][v_vec->size[2]][v_vec->size[3]] __attribute__((aligned(64))) = (float(*)[v_vec->size[1]][v_vec->size[2]][v_vec->size[3]])v_vec->data;
float(*restrict vp)[vp_vec->size[1]][vp_vec->size[2]] __attribute__((aligned(64))) = (float(*)[vp_vec->size[1]][vp_vec->size[2]])vp_vec->data;
float **r94 = (float **)r94_vec;
float **r95 = (float **)r95_vec;
int(*restrict nnz_sp_source_mask)[nnz_sp_source_mask_vec->size[1]] __attribute__((aligned(64))) = (int(*)[nnz_sp_source_mask_vec->size[1]])nnz_sp_source_mask_vec->data;
float(*restrict save_src_u)[save_src_u_vec->size[1]] __attribute__((aligned(64))) = (float(*)[save_src_u_vec->size[1]])save_src_u_vec->data;
float(*restrict save_src_v)[save_src_v_vec->size[1]] __attribute__((aligned(64))) = (float(*)[save_src_v_vec->size[1]])save_src_v_vec->data;
int(*restrict source_id)[source_id_vec->size[1]][source_id_vec->size[2]] __attribute__((aligned(64))) = (int(*)[source_id_vec->size[1]][source_id_vec->size[2]])source_id_vec->data;
int(*restrict source_mask)[source_mask_vec->size[1]][source_mask_vec->size[2]] __attribute__((aligned(64))) = (int(*)[source_mask_vec->size[1]][source_mask_vec->size[2]])source_mask_vec->data;
int(*restrict sp_source_mask)[sp_source_mask_vec->size[1]][sp_source_mask_vec->size[2]] __attribute__((aligned(64))) = (int(*)[sp_source_mask_vec->size[1]][sp_source_mask_vec->size[2]])sp_source_mask_vec->data;
#pragma omp parallel num_threads(nthreads)
{
const int tid = omp_get_thread_num();
float(*restrict r82)[y0_blk0_size + 2 + 2][z_size + 2 + 2] __attribute__((aligned(64))) = (float(*)[y0_blk0_size + 2 + 2][z_size + 2 + 2]) r94[tid];
float(*restrict r83)[y0_blk0_size + 2 + 2][z_size + 2 + 2] __attribute__((aligned(64))) = (float(*)[y0_blk0_size + 2 + 2][z_size + 2 + 2]) r95[tid];
#pragma omp for collapse(2) schedule(dynamic, 1)
for (int x0_blk0 = max((x_m + time), xb); x0_blk0 <= min((x_M + time), (xb + xb_size)); x0_blk0 += x0_blk0_size)
{
for (int y0_blk0 = max((y_m + time), yb); y0_blk0 <= min((y_M + time), (yb + yb_size)); y0_blk0 += y0_blk0_size)
{
for (int x = x0_blk0 - 2, xs = 0; x <= min(min((x_M + time), (xb + xb_size + 1)), (x0_blk0 + x0_blk0_size + 1)); x++, xs++)
{
for (int y = y0_blk0 - 2, ys = 0; y <= min(min((y_M + time), (yb + yb_size + 1)), (y0_blk0 + y0_blk0_size + 1)); y++, ys++)
{
//printf(" bf0 Timestep tw: %d, Updating x: %d y: %d , Updating xs: %d ys: %d \n", tw, x - time + 8, y - time + 8, xs, ys);
#pragma omp simd aligned(u, v : 64)
for (int z = z_m - 2; z <= z_M + 2; z += 1)
{
r82[xs][ys][z + 2] = -(8.33333346e-3F * (u[t0][x - time + 6][y - time + 8][z + 8] - u[t0][x - time + 10][y - time + 8][z + 8]) + 6.66666677e-2F * (-u[t0][x - time + 7][y - time + 8][z + 8] + u[t0][x - time + 9][y - time + 8][z + 8])) * r51[x - time + 2][y - time + 2][z + 2] * r52[x - time + 2][y - time + 2][z + 2] - (8.33333346e-3F * (u[t0][x - time + 8][y - time + 6][z + 8] - u[t0][x - time + 8][y - time + 10][z + 8]) + 6.66666677e-2F * (-u[t0][x - time + 8][y - time + 7][z + 8] + u[t0][x - time + 8][y - time + 9][z + 8])) * r52[x - time + 2][y - time + 2][z + 2] * r53[x - time + 2][y - time + 2][z + 2] - (8.33333346e-3F * (u[t0][x - time + 8][y - time + 8][z + 6] - u[t0][x - time + 8][y - time + 8][z + 10]) + 6.66666677e-2F * (-u[t0][x - time + 8][y - time + 8][z + 7] + u[t0][x - time + 8][y - time + 8][z + 9])) * r50[x - time + 2][y - time + 2][z + 2];
r83[xs][ys][z + 2] = -(8.33333346e-3F * (v[t0][x - time + 6][y - time + 8][z + 8] - v[t0][x - time + 10][y - time + 8][z + 8]) + 6.66666677e-2F * (-v[t0][x - time + 7][y - time + 8][z + 8] + v[t0][x - time + 9][y - time + 8][z + 8])) * r51[x - time + 2][y - time + 2][z + 2] * r52[x - time + 2][y - time + 2][z + 2] - (8.33333346e-3F * (v[t0][x - time + 8][y - time + 6][z + 8] - v[t0][x - time + 8][y - time + 10][z + 8]) + 6.66666677e-2F * (-v[t0][x - time + 8][y - time + 7][z + 8] + v[t0][x - time + 8][y - time + 9][z + 8])) * r52[x - time + 2][y - time + 2][z + 2] * r53[x - time + 2][y - time + 2][z + 2] - (8.33333346e-3F * (v[t0][x - time + 8][y - time + 8][z + 6] - v[t0][x - time + 8][y - time + 8][z + 10]) + 6.66666677e-2F * (-v[t0][x - time + 8][y - time + 8][z + 7] + v[t0][x - time + 8][y - time + 8][z + 9])) * r50[x - time + 2][y - time + 2][z + 2];
}
}
}
for (int x = x0_blk0, xs = 0; x <= min(min((x_M + time), (xb + xb_size - 1)), (x0_blk0 + x0_blk0_size - 1)); x++, xs++)
{
for (int y = y0_blk0, ys = 0; y <= min(min((y_M + time), (yb + yb_size - 1)), (y0_blk0 + y0_blk0_size - 1)); y++, ys++)
{
//printf(" bf1 Timestep tw: %d, Updating x: %d y: %d , Updating xs: %d ys: %d \n", tw, x - time + 8, y - time + 8, xs, ys);
#pragma omp simd aligned(damp, epsilon, u, v, vp : 64)
for (int z = z_m; z <= z_M; z += 1)
{
float r93 = 1.0 / dt;
float r92 = 1.0 / (dt * dt);
float r91 = 6.66666677e-2F * (r50[x - time + 2][y - time + 2][z + 1] * r83[xs + 2][ys + 2][z + 1] - r50[x - time + 2][y - time + 2][z + 3] * r83[xs + 2][ys + 2][z + 3] + r51[x - time + 1][y - time + 2][z + 2] * r52[x - time + 1][y - time + 2][z + 2] * r83[xs + 1][ys + 2][z + 2] - r51[x - time + 3][y - time + 2][z + 2] * r52[x - time + 3][y - time + 2][z + 2] * r83[xs + 3][ys + 2][z + 2] + r52[x - time + 2][y - time + 1][z + 2] * r53[x - time + 2][y - time + 1][z + 2] * r83[xs + 2][ys + 1][z + 2] - r52[x - time + 2][y - time + 3][z + 2] * r53[x - time + 2][y - time + 3][z + 2] * r83[xs + 2][ys + 3][z + 2]);
float r90 = 8.33333346e-3F * (-r50[x - time + 2][y - time + 2][z] * r83[xs + 2][ys + 2][z] + r50[x - time + 2][y - time + 2][z + 4] * r83[xs + 2][ys + 2][z + 4] - r51[x-time][y - time + 2][z + 2] * r52[x-time][y - time + 2][z + 2] * r83[xs][ys + 2][z + 2] + r51[x - time + 4][y - time + 2][z + 2] * r52[x - time + 4][y - time + 2][z + 2] * r83[xs + 4][ys + 2][z + 2] - r52[x - time + 2][y-time][z + 2] * r53[x - time + 2][y-time][z + 2] * r83[xs + 2][ys][z + 2] + r52[x - time + 2][y - time + 4][z + 2] * r53[x - time + 2][y - time + 4][z + 2] * r83[xs + 2][ys + 4][z + 2]);
float r89 = 1.0 / (vp[x - time + 8][y - time + 8][z + 8] * vp[x - time + 8][y - time + 8][z + 8]);
float r88 = 1.0 / (r89 * r92 + r93 * damp[x - time + 1][y - time + 1][z + 1]);
float r87 = 8.33333346e-3F * (r50[x - time + 2][y - time + 2][z] * r82[xs + 2][ys + 2][z] - r50[x - time + 2][y - time + 2][z + 4] * r82[xs + 2][ys + 2][z + 4] + r51[x-time][y - time + 2][z + 2] * r52[x-time][y - time + 2][z + 2] * r82[xs][ys + 2][z + 2] - r51[x - time + 4][y - time + 2][z + 2] * r52[x - time + 4][y - time + 2][z + 2] * r82[xs + 4][ys + 2][z + 2] + r52[x - time + 2][y-time][z + 2] * r53[x - time + 2][y-time][z + 2] * r82[xs + 2][ys][z + 2] - r52[x - time + 2][y - time + 4][z + 2] * r53[x - time + 2][y - time + 4][z + 2] * r82[xs + 2][ys + 4][z + 2]) + 6.66666677e-2F * (-r50[x - time + 2][y - time + 2][z + 1] * r82[xs + 2][ys + 2][z + 1] + r50[x - time + 2][y - time + 2][z + 3] * r82[xs + 2][ys + 2][z + 3] - r51[x - time + 1][y - time + 2][z + 2] * r52[x - time + 1][y - time + 2][z + 2] * r82[xs + 1][ys + 2][z + 2] + r51[x - time + 3][y - time + 2][z + 2] * r52[x - time + 3][y - time + 2][z + 2] * r82[xs + 3][ys + 2][z + 2] - r52[x - time + 2][y - time + 1][z + 2] * r53[x - time + 2][y - time + 1][z + 2] * r82[xs + 2][ys + 1][z + 2] + r52[x - time + 2][y - time + 3][z + 2] * r53[x - time + 2][y - time + 3][z + 2] * r82[xs + 2][ys + 3][z + 2]) - 1.78571425e-5F * (u[t0][x - time + 4][y - time + 8][z + 8] + u[t0][x - time + 8][y - time + 4][z + 8] + u[t0][x - time + 8][y - time + 8][z + 4] + u[t0][x - time + 8][y - time + 8][z + 12] + u[t0][x - time + 8][y - time + 12][z + 8] + u[t0][x - time + 12][y - time + 8][z + 8]) + 2.53968248e-4F * (u[t0][x - time + 5][y - time + 8][z + 8] + u[t0][x - time + 8][y - time + 5][z + 8] + u[t0][x - time + 8][y - time + 8][z + 5] + u[t0][x - time + 8][y - time + 8][z + 11] + u[t0][x - time + 8][y - time + 11][z + 8] + u[t0][x - time + 11][y - time + 8][z + 8]) - 1.99999996e-3F * (u[t0][x - time + 6][y - time + 8][z + 8] + u[t0][x - time + 8][y - time + 6][z + 8] + u[t0][x - time + 8][y - time + 8][z + 6] + u[t0][x - time + 8][y - time + 8][z + 10] + u[t0][x - time + 8][y - time + 10][z + 8] + u[t0][x - time + 10][y - time + 8][z + 8]) + 1.59999996e-2F * (u[t0][x - time + 7][y - time + 8][z + 8] + u[t0][x - time + 8][y - time + 7][z + 8] + u[t0][x - time + 8][y - time + 8][z + 7] + u[t0][x - time + 8][y - time + 8][z + 9] + u[t0][x - time + 8][y - time + 9][z + 8] + u[t0][x - time + 9][y - time + 8][z + 8]) - 8.54166647e-2F * u[t0][x - time + 8][y - time + 8][z + 8];
float r80 = r92 * (-2.0F * u[t0][x - time + 8][y - time + 8][z + 8] + u[t1][x - time + 8][y - time + 8][z + 8]);
float r81 = r92 * (-2.0F * v[t0][x - time + 8][y - time + 8][z + 8] + v[t1][x - time + 8][y - time + 8][z + 8]);
u[t2][x - time + 8][y - time + 8][z + 8] = r88 * ((-r80) * r89 + r87 * (2 * epsilon[x - time + 8][y - time + 8][z + 8] + 1) + r93 * (damp[x - time + 1][y - time + 1][z + 1] * u[t0][x - time + 8][y - time + 8][z + 8]) + (r90 + r91) * r49[x - time + 2][y - time + 2][z + 2]);
v[t2][x - time + 8][y - time + 8][z + 8] = r88 * ((-r81) * r89 + r87 * r49[x - time + 2][y - time + 2][z + 2] + r90 + r91 + r93 * (damp[x - time + 1][y - time + 1][z + 1] * v[t0][x - time + 8][y - time + 8][z + 8]));
}
int sp_zi_M = nnz_sp_source_mask[x - time][y - time] - 1;
for (int sp_zi = sp_zi_m; sp_zi <= sp_zi_M; sp_zi += 1)
{
int zind = sp_source_mask[x - time][y - time][sp_zi];
float r22 = save_src_u[tw][source_id[x - time][y - time][zind]] * source_mask[x - time][y - time][zind];
u[t2][x - time + 8][y - time + 8][zind + 8] += r22;
float r23 = save_src_v[tw][source_id[x - time][y - time][zind]] * source_mask[x - time][y - time][zind];
v[t2][x - time + 8][y - time + 8][zind + 8] += r23;
//printf("Source injection at time %d , at : x: %d, y: %d, %d, %f, %f \n", tw, x - time + 4, y - time + 4, zind + 4, r22, r23);
}
}
}
}
}
}
}
|
host.h | #ifndef CUSAMPEN_HOST_H
#define CUSAMPEN_HOST_H
#include <nvrtc.h>
#include <cuda.h>
#include <stdio.h> //for printf
#include <string>
#include <immintrin.h>
#include <cusampen/cuda/cuda.h>
namespace cusampen{
const std::string get_kernel_source(int m, float eps, int length, int apen){
// count self match
if (apen){
apen = 0;
} else {
apen = -1;
}
std::string source = " \n\
#define M " + std::to_string(m) + " \n\
#define EPS " + std::to_string(eps) + "f \n\
#define LENGTH " + std::to_string(length) + " \n\
#define APEN " + std::to_string(apen) + " \n\
\n\
__device__ __forceinline__ \n\
int is_equal(float a, float b, float eps) \n\
{ \n\
return fabsf(a - b) <= eps ? 1 : 0; \n\
} \n\
\n\
extern \"C\" __global__ void sampen(float *x, \n\
int *mcounts, \n\
int *mpocounts){ \n\
int index = blockIdx.x * blockDim.x + threadIdx.x; \n\
\n\
// we can compare for both M and M + 1 sizes \n\
// to use less load/store operations \n\
if (index <= LENGTH - M){ \n\
float base[M + 1]; \n\
float values[M + 1]; \n\
int mcounter = APEN; \n\
int mpocounter = APEN; \n\
int state; \n\
\n\
// fetch M and M + 1 base vectors from memory at index \n\
#pragma unroll (M + 1) \n\
for (int i = 0; i < M + 1; ++i){ \n\
base[i] = x[i + index]; \n\
values[i] = x[i]; \n\
} \n\
\n\
for (int i = 0; i < LENGTH - M; ++i){ \n\
state = 1; \n\
#pragma unroll (M) \n\
for (int j = 0; j < M; ++j){ \n\
state *= is_equal(base[j], values[j], EPS); \n\
values[j] = values[j + 1]; \n\
} \n\
mcounter += state; \n\
state *= is_equal(base[M], values[M], EPS); \n\
values[M] = x[M + i + 1]; \n\
mpocounter += state; \n\
} \n\
\n\
state = 1; \n\
#pragma unroll (M) \n\
for (int j = 0; j < M; ++j){ \n\
state *= is_equal(base[j], values[j], EPS); \n\
} \n\
mcounter += state; \n\
mcounts[index] = mcounter; \n\
mpocounts[index] = mpocounter; \n\
} \n\
} \n\
";
//printf("%s\n", source.c_str());
return source;
}
const std::string compile(const std::string &source){
cuda::program prog(source);
const char *options[] = {"-use_fast_math ", "-restrict", "-I.", "-arch=compute_50", "-std=c++14"};
//const char *options[] = {"-use_fast_math ", "-restrict", "-I.", "-arch=compute_35", "-std=c++11"};
//const char *options[] = {"-use_fast_math ", "-restrict", "-I.", "-arch=compute_75", "-std=c++14"};
return prog.compile(options, 5);
}
int sum(int *x, int n){
int s = 0;
#pragma omp parallel for simd reduction(+:s)
for (int i = 0; i < n; ++i){
s += x[i];
}
return s;
}
}
#endif
|
GB_binop__remainder_fp64.c | //------------------------------------------------------------------------------
// GB_binop: hard-coded functions for each built-in binary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated2/ folder, do not edit it
// (it is auto-generated from Generator/*).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_emult.h"
#include "GB_control.h"
#include "GB_ek_slice.h"
#include "GB_dense.h"
#include "GB_atomics.h"
#include "GB_bitmap_assign_methods.h"
#include "GB_binop__include.h"
// C=binop(A,B) is defined by the following types and operators:
// A+B function (eWiseAdd): GB (_AaddB__remainder_fp64)
// A.*B function (eWiseMult): GB (_AemultB_08__remainder_fp64)
// A.*B function (eWiseMult): GB (_AemultB_02__remainder_fp64)
// A.*B function (eWiseMult): GB (_AemultB_04__remainder_fp64)
// A.*B function (eWiseMult): GB (_AemultB_bitmap__remainder_fp64)
// A*D function (colscale): GB ((none))
// D*A function (rowscale): GB ((none))
// C+=B function (dense accum): GB (_Cdense_accumB__remainder_fp64)
// C+=b function (dense accum): GB (_Cdense_accumb__remainder_fp64)
// C+=A+B function (dense ewise3): GB ((none))
// C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__remainder_fp64)
// C=scalar+B GB (_bind1st__remainder_fp64)
// C=scalar+B' GB (_bind1st_tran__remainder_fp64)
// C=A+scalar GB (_bind2nd__remainder_fp64)
// C=A'+scalar GB (_bind2nd_tran__remainder_fp64)
// C type: double
// A type: double
// B,b type: double
// BinaryOp: cij = remainder (aij, bij)
#define GB_ATYPE \
double
#define GB_BTYPE \
double
#define GB_CTYPE \
double
// true if the types of A and B are identical
#define GB_ATYPE_IS_BTYPE \
1
// true if the types of C and A are identical
#define GB_CTYPE_IS_ATYPE \
1
// true if the types of C and B are identical
#define GB_CTYPE_IS_BTYPE \
1
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA,A_iso) \
double aij = GBX (Ax, pA, A_iso)
// bij = Bx [pB]
#define GB_GETB(bij,Bx,pB,B_iso) \
double bij = GBX (Bx, pB, B_iso)
// declare scalar of the same type as C
#define GB_CTYPE_SCALAR(t) \
double t
// cij = Ax [pA]
#define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \
cij = GBX (Ax, pA, A_iso)
// cij = Bx [pB]
#define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \
cij = GBX (Bx, pB, B_iso)
#define GB_CX(p) Cx [p]
// binary operator
#define GB_BINOP(z,x,y,i,j) \
z = remainder (x, y) ;
// true if the binop must be flipped
#define GB_BINOP_FLIP \
1
// op is second
#define GB_OP_IS_SECOND \
0
// do the numerical phases of GB_add and GB_emult
#define GB_PHASE_2_OF_2
// hard-coded loops can be vectorized
#define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_REMAINDER || GxB_NO_FP64 || GxB_NO_REMAINDER_FP64)
//------------------------------------------------------------------------------
// C += A+B, all 3 matrices dense
//------------------------------------------------------------------------------
#if 0
// The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV.
void GB ((none))
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_accum_template.c"
}
#endif
//------------------------------------------------------------------------------
// C = A+B, all 3 matrices dense
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_ewise3_noaccum__remainder_fp64)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_dense_ewise3_noaccum_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += B, accumulate a sparse matrix into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumB__remainder_fp64)
(
GrB_Matrix C,
const GrB_Matrix B,
const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
#include "GB_dense_subassign_23_template.c"
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += b, accumulate a scalar into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumb__remainder_fp64)
(
GrB_Matrix C,
const GB_void *p_bwork,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
// get the scalar b for C += b, of type double
double bwork = (*((double *) p_bwork)) ;
#include "GB_dense_subassign_22_template.c"
return (GrB_SUCCESS) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = A*D, column scale with diagonal D matrix
//------------------------------------------------------------------------------
#if 0
GrB_Info GB ((none))
(
GrB_Matrix C,
const GrB_Matrix A, bool A_is_pattern,
const GrB_Matrix D, bool D_is_pattern,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
double *restrict Cx = (double *) C->x ;
#include "GB_AxB_colscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
//------------------------------------------------------------------------------
// C = D*B, row scale with diagonal D matrix
//------------------------------------------------------------------------------
#if 0
GrB_Info GB ((none))
(
GrB_Matrix C,
const GrB_Matrix D, bool D_is_pattern,
const GrB_Matrix B, bool B_is_pattern,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
double *restrict Cx = (double *) C->x ;
#include "GB_AxB_rowscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
//------------------------------------------------------------------------------
// eWiseAdd: C=A+B, C<M>=A+B, C<!M>=A+B
//------------------------------------------------------------------------------
GrB_Info GB (_AaddB__remainder_fp64)
(
GrB_Matrix C,
const int C_sparsity,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool Ch_is_Mh,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
GB_WERK_DECLARE (M_ek_slicing, int64_t) ;
GB_WERK_DECLARE (A_ek_slicing, int64_t) ;
GB_WERK_DECLARE (B_ek_slicing, int64_t) ;
#include "GB_add_template.c"
GB_FREE_WORK ;
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, or C<M!>=A.*B where C is sparse/hyper
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_08__remainder_fp64)
(
GrB_Matrix C,
const int C_sparsity,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_08_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_02__remainder_fp64)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool flipxy,
const int64_t *restrict Cp_kfirst,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#if GB_BINOP_FLIP
// The operator is not commutative, and does not have a flipped
// variant. For example z=atan2(y,x).
if (flipxy)
{
// use fmult(y,x)
#undef GB_FLIPPED
#define GB_FLIPPED 1
#include "GB_emult_02_template.c"
}
else
{
// use fmult(x,y)
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
}
#else
// No need to handle the flip: the operator is either commutative, or
// has been handled by changing z=div(y,x) to z=rdiv(x,y) for example.
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
#endif
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_04__remainder_fp64)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict Cp_kfirst,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_04_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_bitmap__remainder_fp64)
(
GrB_Matrix C,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_bitmap_emult_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st
//------------------------------------------------------------------------------
GrB_Info GB (_bind1st__remainder_fp64)
(
GB_void *Cx_output, // Cx and Bx may be aliased
const GB_void *x_input,
const GB_void *Bx_input,
const int8_t *restrict Bb,
int64_t bnz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
double *Cx = (double *) Cx_output ;
double x = (*((double *) x_input)) ;
double *Bx = (double *) Bx_input ;
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < bnz ; p++)
{
if (!GBB (Bb, p)) continue ;
double bij = GBX (Bx, p, false) ;
Cx [p] = remainder (x, bij) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd
//------------------------------------------------------------------------------
GrB_Info GB (_bind2nd__remainder_fp64)
(
GB_void *Cx_output, // Cx and Ax may be aliased
const GB_void *Ax_input,
const GB_void *y_input,
const int8_t *restrict Ab,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
double *Cx = (double *) Cx_output ;
double *Ax = (double *) Ax_input ;
double y = (*((double *) y_input)) ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!GBB (Ab, p)) continue ;
double aij = GBX (Ax, p, false) ;
Cx [p] = remainder (aij, y) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (x, A'): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (x, aij), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
double aij = GBX (Ax, pA, false) ; \
Cx [pC] = remainder (x, aij) ; \
}
GrB_Info GB (_bind1st_tran__remainder_fp64)
(
GrB_Matrix C,
const GB_void *x_input,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
// GB_unop_transpose.c uses GB_ATYPE, but A is
// the 2nd input to binary operator z=f(x,y).
#undef GB_ATYPE
#define GB_ATYPE \
double
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
double x = (*((const double *) x_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
#undef GB_ATYPE
#define GB_ATYPE \
double
}
//------------------------------------------------------------------------------
// C = op (A', y): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (aij, y), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
double aij = GBX (Ax, pA, false) ; \
Cx [pC] = remainder (aij, y) ; \
}
GrB_Info GB (_bind2nd_tran__remainder_fp64)
(
GrB_Matrix C,
const GrB_Matrix A,
const GB_void *y_input,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
double y = (*((const double *) y_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
deprecate.c | /*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% DDDD EEEEE PPPP RRRR EEEEE CCCC AAA TTTTT EEEEE %
% D D E P P R R E C A A T E %
% D D EEE PPPPP RRRR EEE C AAAAA T EEE %
% D D E P R R E C A A T E %
% DDDD EEEEE P R R EEEEE CCCC A A T EEEEE %
% %
% %
% MagickWand Deprecated Methods %
% %
% Software Design %
% John Cristy %
% October 2002 %
% %
% %
% Copyright 1999-2011 ImageMagick Studio LLC, a non-profit organization %
% dedicated to making software imaging solutions freely available. %
% %
% You may not use this file except in compliance with the License. You may %
% obtain a copy of the License at %
% %
% http://www.imagemagick.org/script/license.php %
% %
% Unless required by applicable law or agreed to in writing, software %
% distributed under the License is distributed on an "AS IS" BASIS, %
% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. %
% See the License for the specific language governing permissions and %
% limitations under the License. %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
%
%
*/
/*
Include declarations.
*/
#include "wand/studio.h"
#include "wand/MagickWand.h"
#include "wand/magick-wand-private.h"
#include "wand/wand.h"
#include "magick/monitor-private.h"
#include "magick/thread-private.h"
/*
Define declarations.
*/
#define PixelViewId "PixelView"
#define ThrowWandException(severity,tag,context) \
{ \
(void) ThrowMagickException(wand->exception,GetMagickModule(),severity, \
tag,"`%s'",context); \
return(MagickFalse); \
}
/*
Typedef declarations.
*/
struct _PixelView
{
size_t
id;
char
name[MaxTextExtent];
ExceptionInfo
*exception;
MagickWand
*wand;
CacheView
*view;
RectangleInfo
region;
size_t
number_threads;
PixelWand
***pixel_wands;
MagickBooleanType
debug;
size_t
signature;
};
#if !defined(MAGICKCORE_EXCLUDE_DEPRECATED)
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% M a g i c k A v e r a g e I m a g e s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% MagickAverageImages() average a set of images.
%
% The format of the MagickAverageImages method is:
%
% MagickWand *MagickAverageImages(MagickWand *wand)
%
% A description of each parameter follows:
%
% o wand: the magick wand.
%
*/
static MagickWand *CloneMagickWandFromImages(const MagickWand *wand,
Image *images)
{
MagickWand
*clone_wand;
assert(wand != (MagickWand *) NULL);
assert(wand->signature == WandSignature);
if (wand->debug != MagickFalse)
(void) LogMagickEvent(WandEvent,GetMagickModule(),"%s",wand->name);
clone_wand=(MagickWand *) AcquireMagickMemory(sizeof(*clone_wand));
if (clone_wand == (MagickWand *) NULL)
ThrowWandFatalException(ResourceLimitFatalError,"MemoryAllocationFailed",
images->filename);
(void) ResetMagickMemory(clone_wand,0,sizeof(*clone_wand));
clone_wand->id=AcquireWandId();
(void) FormatMagickString(clone_wand->name,MaxTextExtent,"%s-%.20g",
MagickWandId,(double) clone_wand->id);
clone_wand->exception=AcquireExceptionInfo();
InheritException(clone_wand->exception,wand->exception);
clone_wand->image_info=CloneImageInfo(wand->image_info);
clone_wand->quantize_info=CloneQuantizeInfo(wand->quantize_info);
clone_wand->images=images;
clone_wand->debug=IsEventLogging();
if (clone_wand->debug != MagickFalse)
(void) LogMagickEvent(WandEvent,GetMagickModule(),"%s",clone_wand->name);
clone_wand->signature=WandSignature;
return(clone_wand);
}
WandExport MagickWand *MagickAverageImages(MagickWand *wand)
{
Image
*average_image;
assert(wand != (MagickWand *) NULL);
assert(wand->signature == WandSignature);
if (wand->debug != MagickFalse)
(void) LogMagickEvent(WandEvent,GetMagickModule(),"%s",wand->name);
if (wand->images == (Image *) NULL)
return((MagickWand *) NULL);
average_image=EvaluateImages(wand->images,MeanEvaluateOperator,
wand->exception);
if (average_image == (Image *) NULL)
return((MagickWand *) NULL);
return(CloneMagickWandFromImages(wand,average_image));
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% C l o n e P i x e l V i e w %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ClonePixelView() makes a copy of the specified pixel view.
%
% The format of the ClonePixelView method is:
%
% PixelView *ClonePixelView(const PixelView *pixel_view)
%
% A description of each parameter follows:
%
% o pixel_view: the pixel view.
%
*/
WandExport PixelView *ClonePixelView(const PixelView *pixel_view)
{
PixelView
*clone_view;
register ssize_t
i;
assert(pixel_view != (PixelView *) NULL);
assert(pixel_view->signature == WandSignature);
if (pixel_view->debug != MagickFalse)
(void) LogMagickEvent(WandEvent,GetMagickModule(),"%s",pixel_view->name);
clone_view=(PixelView *) AcquireMagickMemory(sizeof(*clone_view));
if (clone_view == (PixelView *) NULL)
ThrowWandFatalException(ResourceLimitFatalError,"MemoryAllocationFailed",
pixel_view->name);
(void) ResetMagickMemory(clone_view,0,sizeof(*clone_view));
clone_view->id=AcquireWandId();
(void) FormatMagickString(clone_view->name,MaxTextExtent,"%s-%.20g",
PixelViewId,(double) clone_view->id);
clone_view->exception=AcquireExceptionInfo();
InheritException(clone_view->exception,pixel_view->exception);
clone_view->view=CloneCacheView(pixel_view->view);
clone_view->region=pixel_view->region;
clone_view->number_threads=pixel_view->number_threads;
for (i=0; i < (ssize_t) pixel_view->number_threads; i++)
clone_view->pixel_wands[i]=ClonePixelWands((const PixelWand **)
pixel_view->pixel_wands[i],pixel_view->region.width);
clone_view->debug=pixel_view->debug;
if (clone_view->debug != MagickFalse)
(void) LogMagickEvent(WandEvent,GetMagickModule(),"%s",clone_view->name);
clone_view->signature=WandSignature;
return(clone_view);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% D e s t r o y P i x e l V i e w %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% DestroyPixelView() deallocates memory associated with a pixel view.
%
% The format of the DestroyPixelView method is:
%
% PixelView *DestroyPixelView(PixelView *pixel_view,
% const size_t number_wands,const size_t number_threads)
%
% A description of each parameter follows:
%
% o pixel_view: the pixel view.
%
% o number_wand: the number of pixel wands.
%
% o number_threads: number of threads.
%
*/
static PixelWand ***DestroyPixelsThreadSet(PixelWand ***pixel_wands,
const size_t number_wands,const size_t number_threads)
{
register ssize_t
i;
assert(pixel_wands != (PixelWand ***) NULL);
for (i=0; i < (ssize_t) number_threads; i++)
if (pixel_wands[i] != (PixelWand **) NULL)
pixel_wands[i]=DestroyPixelWands(pixel_wands[i],number_wands);
pixel_wands=(PixelWand ***) RelinquishMagickMemory(pixel_wands);
return(pixel_wands);
}
WandExport PixelView *DestroyPixelView(PixelView *pixel_view)
{
assert(pixel_view != (PixelView *) NULL);
assert(pixel_view->signature == WandSignature);
pixel_view->pixel_wands=DestroyPixelsThreadSet(pixel_view->pixel_wands,
pixel_view->region.width,pixel_view->number_threads);
pixel_view->view=DestroyCacheView(pixel_view->view);
pixel_view->exception=DestroyExceptionInfo(pixel_view->exception);
pixel_view->signature=(~WandSignature);
RelinquishWandId(pixel_view->id);
pixel_view=(PixelView *) RelinquishMagickMemory(pixel_view);
return(pixel_view);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% D u p l e x T r a n s f e r P i x e l V i e w I t e r a t o r %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% DuplexTransferPixelViewIterator() iterates over three pixel views in
% parallel and calls your transfer method for each scanline of the view. The
% source and duplex pixel region is not confined to the image canvas-- that is
% you can include negative offsets or widths or heights that exceed the image
% dimension. However, the destination pixel view is confined to the image
% canvas-- that is no negative offsets or widths or heights that exceed the
% image dimension are permitted.
%
% Use this pragma:
%
% #pragma omp critical
%
% to define a section of code in your callback transfer method that must be
% executed by a single thread at a time.
%
% The format of the DuplexTransferPixelViewIterator method is:
%
% MagickBooleanType DuplexTransferPixelViewIterator(PixelView *source,
% PixelView *duplex,PixelView *destination,
% DuplexTransferPixelViewMethod transfer,void *context)
%
% A description of each parameter follows:
%
% o source: the source pixel view.
%
% o duplex: the duplex pixel view.
%
% o destination: the destination pixel view.
%
% o transfer: the transfer callback method.
%
% o context: the user defined context.
%
*/
WandExport MagickBooleanType DuplexTransferPixelViewIterator(
PixelView *source,PixelView *duplex,PixelView *destination,
DuplexTransferPixelViewMethod transfer,void *context)
{
#define DuplexTransferPixelViewTag "PixelView/DuplexTransfer"
ExceptionInfo
*exception;
Image
*destination_image,
*duplex_image,
*source_image;
MagickBooleanType
status;
MagickOffsetType
progress;
ssize_t
y;
assert(source != (PixelView *) NULL);
assert(source->signature == WandSignature);
if (transfer == (DuplexTransferPixelViewMethod) NULL)
return(MagickFalse);
source_image=source->wand->images;
duplex_image=duplex->wand->images;
destination_image=destination->wand->images;
if (SetImageStorageClass(destination_image,DirectClass) == MagickFalse)
return(MagickFalse);
status=MagickTrue;
progress=0;
exception=destination->exception;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static,1) shared(progress,status)
#endif
for (y=source->region.y; y < (ssize_t) source->region.height; y++)
{
const int
id = GetOpenMPThreadId();
MagickBooleanType
sync;
register const IndexPacket
*restrict duplex_indexes,
*restrict indexes;
register const PixelPacket
*restrict duplex_pixels,
*restrict pixels;
register IndexPacket
*restrict destination_indexes;
register ssize_t
x;
register PixelPacket
*restrict destination_pixels;
if (status == MagickFalse)
continue;
pixels=GetCacheViewVirtualPixels(source->view,source->region.x,y,
source->region.width,1,source->exception);
if (pixels == (const PixelPacket *) NULL)
{
status=MagickFalse;
continue;
}
indexes=GetCacheViewVirtualIndexQueue(source->view);
for (x=0; x < (ssize_t) source->region.width; x++)
PixelSetQuantumColor(source->pixel_wands[id][x],pixels+x);
if (source_image->colorspace == CMYKColorspace)
for (x=0; x < (ssize_t) source->region.width; x++)
PixelSetBlackQuantum(source->pixel_wands[id][x],indexes[x]);
if (source_image->storage_class == PseudoClass)
for (x=0; x < (ssize_t) source->region.width; x++)
PixelSetIndex(source->pixel_wands[id][x],indexes[x]);
duplex_pixels=GetCacheViewVirtualPixels(duplex->view,duplex->region.x,y,
duplex->region.width,1,duplex->exception);
if (duplex_pixels == (const PixelPacket *) NULL)
{
status=MagickFalse;
continue;
}
duplex_indexes=GetCacheViewVirtualIndexQueue(duplex->view);
for (x=0; x < (ssize_t) duplex->region.width; x++)
PixelSetQuantumColor(duplex->pixel_wands[id][x],duplex_pixels+x);
if (duplex_image->colorspace == CMYKColorspace)
for (x=0; x < (ssize_t) duplex->region.width; x++)
PixelSetBlackQuantum(duplex->pixel_wands[id][x],duplex_indexes[x]);
if (duplex_image->storage_class == PseudoClass)
for (x=0; x < (ssize_t) duplex->region.width; x++)
PixelSetIndex(duplex->pixel_wands[id][x],duplex_indexes[x]);
destination_pixels=GetCacheViewAuthenticPixels(destination->view,
destination->region.x,y,destination->region.width,1,exception);
if (destination_pixels == (PixelPacket *) NULL)
{
status=MagickFalse;
continue;
}
destination_indexes=GetCacheViewAuthenticIndexQueue(destination->view);
for (x=0; x < (ssize_t) destination->region.width; x++)
PixelSetQuantumColor(destination->pixel_wands[id][x],
destination_pixels+x);
if (destination_image->colorspace == CMYKColorspace)
for (x=0; x < (ssize_t) destination->region.width; x++)
PixelSetBlackQuantum(destination->pixel_wands[id][x],
destination_indexes[x]);
if (destination_image->storage_class == PseudoClass)
for (x=0; x < (ssize_t) destination->region.width; x++)
PixelSetIndex(destination->pixel_wands[id][x],destination_indexes[x]);
if (transfer(source,duplex,destination,context) == MagickFalse)
status=MagickFalse;
for (x=0; x < (ssize_t) destination->region.width; x++)
PixelGetQuantumColor(destination->pixel_wands[id][x],
destination_pixels+x);
if (destination_image->colorspace == CMYKColorspace)
for (x=0; x < (ssize_t) destination->region.width; x++)
destination_indexes[x]=PixelGetBlackQuantum(
destination->pixel_wands[id][x]);
sync=SyncCacheViewAuthenticPixels(destination->view,exception);
if (sync == MagickFalse)
{
InheritException(destination->exception,GetCacheViewException(
source->view));
status=MagickFalse;
}
if (source_image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp critical (MagickWand_DuplexTransferPixelViewIterator)
#endif
proceed=SetImageProgress(source_image,DuplexTransferPixelViewTag,
progress++,source->region.height);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% G e t P i x e l V i e w E x c e p t i o n %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetPixelViewException() returns the severity, reason, and description of any
% error that occurs when utilizing a pixel view.
%
% The format of the GetPixelViewException method is:
%
% char *GetPixelViewException(const PixelWand *pixel_view,
% ExceptionType *severity)
%
% A description of each parameter follows:
%
% o pixel_view: the pixel pixel_view.
%
% o severity: the severity of the error is returned here.
%
*/
WandExport char *GetPixelViewException(const PixelView *pixel_view,
ExceptionType *severity)
{
char
*description;
assert(pixel_view != (const PixelView *) NULL);
assert(pixel_view->signature == WandSignature);
if (pixel_view->debug != MagickFalse)
(void) LogMagickEvent(WandEvent,GetMagickModule(),"%s",pixel_view->name);
assert(severity != (ExceptionType *) NULL);
*severity=pixel_view->exception->severity;
description=(char *) AcquireQuantumMemory(2UL*MaxTextExtent,
sizeof(*description));
if (description == (char *) NULL)
ThrowWandFatalException(ResourceLimitFatalError,"MemoryAllocationFailed",
pixel_view->name);
*description='\0';
if (pixel_view->exception->reason != (char *) NULL)
(void) CopyMagickString(description,GetLocaleExceptionMessage(
pixel_view->exception->severity,pixel_view->exception->reason),
MaxTextExtent);
if (pixel_view->exception->description != (char *) NULL)
{
(void) ConcatenateMagickString(description," (",MaxTextExtent);
(void) ConcatenateMagickString(description,GetLocaleExceptionMessage(
pixel_view->exception->severity,pixel_view->exception->description),
MaxTextExtent);
(void) ConcatenateMagickString(description,")",MaxTextExtent);
}
return(description);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% G e t P i x e l V i e w H e i g h t %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetPixelViewHeight() returns the pixel view height.
%
% The format of the GetPixelViewHeight method is:
%
% size_t GetPixelViewHeight(const PixelView *pixel_view)
%
% A description of each parameter follows:
%
% o pixel_view: the pixel view.
%
*/
WandExport size_t GetPixelViewHeight(const PixelView *pixel_view)
{
assert(pixel_view != (PixelView *) NULL);
assert(pixel_view->signature == WandSignature);
return(pixel_view->region.height);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% G e t P i x e l V i e w I t e r a t o r %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetPixelViewIterator() iterates over the pixel view in parallel and calls
% your get method for each scanline of the view. The pixel region is
% not confined to the image canvas-- that is you can include negative offsets
% or widths or heights that exceed the image dimension. Any updates to
% the pixels in your callback are ignored.
%
% Use this pragma:
%
% #pragma omp critical
%
% to define a section of code in your callback get method that must be
% executed by a single thread at a time.
%
% The format of the GetPixelViewIterator method is:
%
% MagickBooleanType GetPixelViewIterator(PixelView *source,
% GetPixelViewMethod get,void *context)
%
% A description of each parameter follows:
%
% o source: the source pixel view.
%
% o get: the get callback method.
%
% o context: the user defined context.
%
*/
WandExport MagickBooleanType GetPixelViewIterator(PixelView *source,
GetPixelViewMethod get,void *context)
{
#define GetPixelViewTag "PixelView/Get"
Image
*source_image;
MagickBooleanType
status;
MagickOffsetType
progress;
ssize_t
y;
assert(source != (PixelView *) NULL);
assert(source->signature == WandSignature);
if (get == (GetPixelViewMethod) NULL)
return(MagickFalse);
source_image=source->wand->images;
status=MagickTrue;
progress=0;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static,1) shared(progress,status)
#endif
for (y=source->region.y; y < (ssize_t) source->region.height; y++)
{
const int
id = GetOpenMPThreadId();
register const IndexPacket
*indexes;
register const PixelPacket
*pixels;
register ssize_t
x;
if (status == MagickFalse)
continue;
pixels=GetCacheViewVirtualPixels(source->view,source->region.x,y,
source->region.width,1,source->exception);
if (pixels == (const PixelPacket *) NULL)
{
status=MagickFalse;
continue;
}
indexes=GetCacheViewVirtualIndexQueue(source->view);
for (x=0; x < (ssize_t) source->region.width; x++)
PixelSetQuantumColor(source->pixel_wands[id][x],pixels+x);
if (source_image->colorspace == CMYKColorspace)
for (x=0; x < (ssize_t) source->region.width; x++)
PixelSetBlackQuantum(source->pixel_wands[id][x],indexes[x]);
if (source_image->storage_class == PseudoClass)
for (x=0; x < (ssize_t) source->region.width; x++)
PixelSetIndex(source->pixel_wands[id][x],indexes[x]);
if (get(source,context) == MagickFalse)
status=MagickFalse;
if (source_image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp critical (MagickWand_GetPixelViewIterator)
#endif
proceed=SetImageProgress(source_image,GetPixelViewTag,progress++,
source->region.height);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% G e t P i x e l V i e w P i x e l s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetPixelViewPixels() returns the pixel view pixel_wands.
%
% The format of the GetPixelViewPixels method is:
%
% PixelWand *GetPixelViewPixels(const PixelView *pixel_view)
%
% A description of each parameter follows:
%
% o pixel_view: the pixel view.
%
*/
WandExport PixelWand **GetPixelViewPixels(const PixelView *pixel_view)
{
const int
id = GetOpenMPThreadId();
assert(pixel_view != (PixelView *) NULL);
assert(pixel_view->signature == WandSignature);
return(pixel_view->pixel_wands[id]);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% G e t P i x e l V i e w W a n d %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetPixelViewWand() returns the magick wand associated with the pixel view.
%
% The format of the GetPixelViewWand method is:
%
% MagickWand *GetPixelViewWand(const PixelView *pixel_view)
%
% A description of each parameter follows:
%
% o pixel_view: the pixel view.
%
*/
WandExport MagickWand *GetPixelViewWand(const PixelView *pixel_view)
{
assert(pixel_view != (PixelView *) NULL);
assert(pixel_view->signature == WandSignature);
return(pixel_view->wand);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% G e t P i x e l V i e w W i d t h %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetPixelViewWidth() returns the pixel view width.
%
% The format of the GetPixelViewWidth method is:
%
% size_t GetPixelViewWidth(const PixelView *pixel_view)
%
% A description of each parameter follows:
%
% o pixel_view: the pixel view.
%
*/
WandExport size_t GetPixelViewWidth(const PixelView *pixel_view)
{
assert(pixel_view != (PixelView *) NULL);
assert(pixel_view->signature == WandSignature);
return(pixel_view->region.width);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% G e t P i x e l V i e w X %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetPixelViewX() returns the pixel view x offset.
%
% The format of the GetPixelViewX method is:
%
% ssize_t GetPixelViewX(const PixelView *pixel_view)
%
% A description of each parameter follows:
%
% o pixel_view: the pixel view.
%
*/
WandExport ssize_t GetPixelViewX(const PixelView *pixel_view)
{
assert(pixel_view != (PixelView *) NULL);
assert(pixel_view->signature == WandSignature);
return(pixel_view->region.x);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% G e t P i x e l V i e w Y %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetPixelViewY() returns the pixel view y offset.
%
% The format of the GetPixelViewY method is:
%
% ssize_t GetPixelViewY(const PixelView *pixel_view)
%
% A description of each parameter follows:
%
% o pixel_view: the pixel view.
%
*/
WandExport ssize_t GetPixelViewY(const PixelView *pixel_view)
{
assert(pixel_view != (PixelView *) NULL);
assert(pixel_view->signature == WandSignature);
return(pixel_view->region.y);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% I s P i x e l V i e w %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% IsPixelView() returns MagickTrue if the the parameter is verified as a pixel
% view container.
%
% The format of the IsPixelView method is:
%
% MagickBooleanType IsPixelView(const PixelView *pixel_view)
%
% A description of each parameter follows:
%
% o pixel_view: the pixel view.
%
*/
WandExport MagickBooleanType IsPixelView(const PixelView *pixel_view)
{
size_t
length;
if (pixel_view == (const PixelView *) NULL)
return(MagickFalse);
if (pixel_view->signature != WandSignature)
return(MagickFalse);
length=strlen(PixelViewId);
if (LocaleNCompare(pixel_view->name,PixelViewId,length) != 0)
return(MagickFalse);
return(MagickTrue);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% M a g i c k C l i p P a t h I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% MagickClipPathImage() clips along the named paths from the 8BIM profile, if
% present. Later operations take effect inside the path. Id may be a number
% if preceded with #, to work on a numbered path, e.g., "#1" to use the first
% path.
%
% The format of the MagickClipPathImage method is:
%
% MagickBooleanType MagickClipPathImage(MagickWand *wand,
% const char *pathname,const MagickBooleanType inside)
%
% A description of each parameter follows:
%
% o wand: the magick wand.
%
% o pathname: name of clipping path resource. If name is preceded by #, use
% clipping path numbered by name.
%
% o inside: if non-zero, later operations take effect inside clipping path.
% Otherwise later operations take effect outside clipping path.
%
*/
WandExport MagickBooleanType MagickClipPathImage(MagickWand *wand,
const char *pathname,const MagickBooleanType inside)
{
return(MagickClipImagePath(wand,pathname,inside));
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% D r a w G e t F i l l A l p h a %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% DrawGetFillAlpha() returns the alpha used when drawing using the fill
% color or fill texture. Fully opaque is 1.0.
%
% The format of the DrawGetFillAlpha method is:
%
% double DrawGetFillAlpha(const DrawingWand *wand)
%
% A description of each parameter follows:
%
% o wand: the drawing wand.
%
*/
WandExport double DrawGetFillAlpha(const DrawingWand *wand)
{
return(DrawGetFillOpacity(wand));
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% D r a w G e t S t r o k e A l p h a %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% DrawGetStrokeAlpha() returns the alpha of stroked object outlines.
%
% The format of the DrawGetStrokeAlpha method is:
%
% double DrawGetStrokeAlpha(const DrawingWand *wand)
%
% A description of each parameter follows:
%
% o wand: the drawing wand.
*/
WandExport double DrawGetStrokeAlpha(const DrawingWand *wand)
{
return(DrawGetStrokeOpacity(wand));
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% D r a w P e e k G r a p h i c W a n d %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% DrawPeekGraphicWand() returns the current drawing wand.
%
% The format of the PeekDrawingWand method is:
%
% DrawInfo *DrawPeekGraphicWand(const DrawingWand *wand)
%
% A description of each parameter follows:
%
% o wand: the drawing wand.
%
*/
WandExport DrawInfo *DrawPeekGraphicWand(const DrawingWand *wand)
{
return(PeekDrawingWand(wand));
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% D r a w P o p G r a p h i c C o n t e x t %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% DrawPopGraphicContext() destroys the current drawing wand and returns to the
% previously pushed drawing wand. Multiple drawing wands may exist. It is an
% error to attempt to pop more drawing wands than have been pushed, and it is
% proper form to pop all drawing wands which have been pushed.
%
% The format of the DrawPopGraphicContext method is:
%
% MagickBooleanType DrawPopGraphicContext(DrawingWand *wand)
%
% A description of each parameter follows:
%
% o wand: the drawing wand.
%
*/
WandExport void DrawPopGraphicContext(DrawingWand *wand)
{
(void) PopDrawingWand(wand);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% D r a w P u s h G r a p h i c C o n t e x t %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% DrawPushGraphicContext() clones the current drawing wand to create a new
% drawing wand. The original drawing wand(s) may be returned to by
% invoking PopDrawingWand(). The drawing wands are stored on a drawing wand
% stack. For every Pop there must have already been an equivalent Push.
%
% The format of the DrawPushGraphicContext method is:
%
% MagickBooleanType DrawPushGraphicContext(DrawingWand *wand)
%
% A description of each parameter follows:
%
% o wand: the drawing wand.
%
*/
WandExport void DrawPushGraphicContext(DrawingWand *wand)
{
(void) PushDrawingWand(wand);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% D r a w S e t F i l l A l p h a %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% DrawSetFillAlpha() sets the alpha to use when drawing using the fill
% color or fill texture. Fully opaque is 1.0.
%
% The format of the DrawSetFillAlpha method is:
%
% void DrawSetFillAlpha(DrawingWand *wand,const double fill_alpha)
%
% A description of each parameter follows:
%
% o wand: the drawing wand.
%
% o fill_alpha: fill alpha
%
*/
WandExport void DrawSetFillAlpha(DrawingWand *wand,const double fill_alpha)
{
DrawSetFillOpacity(wand,fill_alpha);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% D r a w S e t S t r o k e A l p h a %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% DrawSetStrokeAlpha() specifies the alpha of stroked object outlines.
%
% The format of the DrawSetStrokeAlpha method is:
%
% void DrawSetStrokeAlpha(DrawingWand *wand,const double stroke_alpha)
%
% A description of each parameter follows:
%
% o wand: the drawing wand.
%
% o stroke_alpha: stroke alpha. The value 1.0 is opaque.
%
*/
WandExport void DrawSetStrokeAlpha(DrawingWand *wand,const double stroke_alpha)
{
DrawSetStrokeOpacity(wand,stroke_alpha);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% M a g i c k C o l o r F l o o d f i l l I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% MagickColorFloodfillImage() changes the color value of any pixel that matches
% target and is an immediate neighbor. If the method FillToBorderMethod is
% specified, the color value is changed for any neighbor pixel that does not
% match the bordercolor member of image.
%
% The format of the MagickColorFloodfillImage method is:
%
% MagickBooleanType MagickColorFloodfillImage(MagickWand *wand,
% const PixelWand *fill,const double fuzz,const PixelWand *bordercolor,
% const ssize_t x,const ssize_t y)
%
% A description of each parameter follows:
%
% o wand: the magick wand.
%
% o fill: the floodfill color pixel wand.
%
% o fuzz: By default target must match a particular pixel color
% exactly. However, in many cases two colors may differ by a small amount.
% The fuzz member of image defines how much tolerance is acceptable to
% consider two colors as the same. For example, set fuzz to 10 and the
% color red at intensities of 100 and 102 respectively are now interpreted
% as the same color for the purposes of the floodfill.
%
% o bordercolor: the border color pixel wand.
%
% o x,y: the starting location of the operation.
%
*/
WandExport MagickBooleanType MagickColorFloodfillImage(MagickWand *wand,
const PixelWand *fill,const double fuzz,const PixelWand *bordercolor,
const ssize_t x,const ssize_t y)
{
DrawInfo
*draw_info;
MagickBooleanType
status;
PixelPacket
target;
assert(wand != (MagickWand *) NULL);
assert(wand->signature == WandSignature);
if (wand->debug != MagickFalse)
(void) LogMagickEvent(WandEvent,GetMagickModule(),"%s",wand->name);
if (wand->images == (Image *) NULL)
ThrowWandException(WandError,"ContainsNoImages",wand->name);
draw_info=CloneDrawInfo(wand->image_info,(DrawInfo *) NULL);
PixelGetQuantumColor(fill,&draw_info->fill);
(void) GetOneVirtualPixel(wand->images,x % wand->images->columns,
y % wand->images->rows,&target,wand->exception);
if (bordercolor != (PixelWand *) NULL)
PixelGetQuantumColor(bordercolor,&target);
wand->images->fuzz=fuzz;
status=ColorFloodfillImage(wand->images,draw_info,target,x,y,
bordercolor != (PixelWand *) NULL ? FillToBorderMethod : FloodfillMethod);
if (status == MagickFalse)
InheritException(wand->exception,&wand->images->exception);
draw_info=DestroyDrawInfo(draw_info);
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% M a g i c k D e s c r i b e I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% MagickDescribeImage() identifies an image by printing its attributes to the
% file. Attributes include the image width, height, size, and others.
%
% The format of the MagickDescribeImage method is:
%
% const char *MagickDescribeImage(MagickWand *wand)
%
% A description of each parameter follows:
%
% o wand: the magick wand.
%
*/
WandExport char *MagickDescribeImage(MagickWand *wand)
{
return(MagickIdentifyImage(wand));
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% M a g i c k F l a t t e n I m a g e s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% MagickFlattenImages() merges a sequence of images. This useful for
% combining Photoshop layers into a single image.
%
% The format of the MagickFlattenImages method is:
%
% MagickWand *MagickFlattenImages(MagickWand *wand)
%
% A description of each parameter follows:
%
% o wand: the magick wand.
%
*/
WandExport MagickWand *MagickFlattenImages(MagickWand *wand)
{
Image
*flatten_image;
assert(wand != (MagickWand *) NULL);
assert(wand->signature == WandSignature);
if (wand->debug != MagickFalse)
(void) LogMagickEvent(WandEvent,GetMagickModule(),"%s",wand->name);
if (wand->images == (Image *) NULL)
return((MagickWand *) NULL);
flatten_image=FlattenImages(wand->images,wand->exception);
if (flatten_image == (Image *) NULL)
return((MagickWand *) NULL);
return(CloneMagickWandFromImages(wand,flatten_image));
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% M a g i c k G e t I m a g e A t t r i b u t e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% MagickGetImageAttribute() returns a value associated with the specified
% property. Use MagickRelinquishMemory() to free the value when you are
% finished with it.
%
% The format of the MagickGetImageAttribute method is:
%
% char *MagickGetImageAttribute(MagickWand *wand,const char *property)
%
% A description of each parameter follows:
%
% o wand: the magick wand.
%
% o property: the property.
%
*/
WandExport char *MagickGetImageAttribute(MagickWand *wand,const char *property)
{
return(MagickGetImageProperty(wand,property));
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ M a g i c k G e t I m a g e I n d e x %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% MagickGetImageIndex() returns the index of the current image.
%
% The format of the MagickGetImageIndex method is:
%
% ssize_t MagickGetImageIndex(MagickWand *wand)
%
% A description of each parameter follows:
%
% o wand: the magick wand.
%
*/
WandExport ssize_t MagickGetImageIndex(MagickWand *wand)
{
return(MagickGetIteratorIndex(wand));
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ M a g i c k G e t I m a g e C h a n n e l E x t r e m a %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% MagickGetImageChannelExtrema() gets the extrema for one or more image
% channels.
%
% The format of the MagickGetImageChannelExtrema method is:
%
% MagickBooleanType MagickGetImageChannelExtrema(MagickWand *wand,
% const ChannelType channel,size_t *minima,size_t *maxima)
%
% A description of each parameter follows:
%
% o wand: the magick wand.
%
% o channel: the image channel(s).
%
% o minima: The minimum pixel value for the specified channel(s).
%
% o maxima: The maximum pixel value for the specified channel(s).
%
*/
WandExport MagickBooleanType MagickGetImageChannelExtrema(MagickWand *wand,
const ChannelType channel,size_t *minima,size_t *maxima)
{
MagickBooleanType
status;
assert(wand != (MagickWand *) NULL);
assert(wand->signature == WandSignature);
if (wand->debug != MagickFalse)
(void) LogMagickEvent(WandEvent,GetMagickModule(),"%s",wand->name);
if (wand->images == (Image *) NULL)
ThrowWandException(WandError,"ContainsNoImages",wand->name);
status=GetImageChannelExtrema(wand->images,channel,minima,maxima,
wand->exception);
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ M a g i c k G e t I m a g e E x t r e m a %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% MagickGetImageExtrema() gets the extrema for the image.
%
% The format of the MagickGetImageExtrema method is:
%
% MagickBooleanType MagickGetImageExtrema(MagickWand *wand,
% size_t *minima,size_t *maxima)
%
% A description of each parameter follows:
%
% o wand: the magick wand.
%
% o minima: The minimum pixel value for the specified channel(s).
%
% o maxima: The maximum pixel value for the specified channel(s).
%
*/
WandExport MagickBooleanType MagickGetImageExtrema(MagickWand *wand,
size_t *minima,size_t *maxima)
{
MagickBooleanType
status;
assert(wand != (MagickWand *) NULL);
assert(wand->signature == WandSignature);
if (wand->debug != MagickFalse)
(void) LogMagickEvent(WandEvent,GetMagickModule(),"%s",wand->name);
if (wand->images == (Image *) NULL)
ThrowWandException(WandError,"ContainsNoImages",wand->name);
status=GetImageExtrema(wand->images,minima,maxima,wand->exception);
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% M a g i c k G e t I m a g e M a t t e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% MagickGetImageMatte() returns MagickTrue if the image has a matte channel
% otherwise MagickFalse.
%
% The format of the MagickGetImageMatte method is:
%
% size_t MagickGetImageMatte(MagickWand *wand)
%
% A description of each parameter follows:
%
% o wand: the magick wand.
%
*/
WandExport MagickBooleanType MagickGetImageMatte(MagickWand *wand)
{
assert(wand != (MagickWand *) NULL);
assert(wand->signature == WandSignature);
if (wand->debug != MagickFalse)
(void) LogMagickEvent(WandEvent,GetMagickModule(),"%s",wand->name);
if (wand->images == (Image *) NULL)
ThrowWandException(WandError,"ContainsNoImages",wand->name);
return(wand->images->matte);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% M a g i c k G e t I m a g e P i x e l s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% MagickGetImagePixels() extracts pixel data from an image and returns it to
% you. The method returns MagickTrue on success otherwise MagickFalse if an
% error is encountered. The data is returned as char, short int, int, ssize_t,
% float, or double in the order specified by map.
%
% Suppose you want to extract the first scanline of a 640x480 image as
% character data in red-green-blue order:
%
% MagickGetImagePixels(wand,0,0,640,1,"RGB",CharPixel,pixels);
%
% The format of the MagickGetImagePixels method is:
%
% MagickBooleanType MagickGetImagePixels(MagickWand *wand,
% const ssize_t x,const ssize_t y,const size_t columns,
% const size_t rows,const char *map,const StorageType storage,
% void *pixels)
%
% A description of each parameter follows:
%
% o wand: the magick wand.
%
% o x, y, columns, rows: These values define the perimeter
% of a region of pixels you want to extract.
%
% o map: This string reflects the expected ordering of the pixel array.
% It can be any combination or order of R = red, G = green, B = blue,
% A = alpha (0 is transparent), O = opacity (0 is opaque), C = cyan,
% Y = yellow, M = magenta, K = black, I = intensity (for grayscale),
% P = pad.
%
% o storage: Define the data type of the pixels. Float and double types are
% expected to be normalized [0..1] otherwise [0..QuantumRange]. Choose from
% these types: CharPixel, DoublePixel, FloatPixel, IntegerPixel,
% LongPixel, QuantumPixel, or ShortPixel.
%
% o pixels: This array of values contain the pixel components as defined by
% map and type. You must preallocate this array where the expected
% length varies depending on the values of width, height, map, and type.
%
*/
WandExport MagickBooleanType MagickGetImagePixels(MagickWand *wand,
const ssize_t x,const ssize_t y,const size_t columns,
const size_t rows,const char *map,const StorageType storage,
void *pixels)
{
return(MagickExportImagePixels(wand,x,y,columns,rows,map,storage,pixels));
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% M a g i c k G e t I m a g e S i z e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% MagickGetImageSize() returns the image length in bytes.
%
% The format of the MagickGetImageSize method is:
%
% MagickBooleanType MagickGetImageSize(MagickWand *wand,
% MagickSizeType *length)
%
% A description of each parameter follows:
%
% o wand: the magick wand.
%
% o length: the image length in bytes.
%
*/
WandExport MagickSizeType MagickGetImageSize(MagickWand *wand)
{
assert(wand != (MagickWand *) NULL);
assert(wand->signature == WandSignature);
if (wand->debug != MagickFalse)
(void) LogMagickEvent(WandEvent,GetMagickModule(),"%s",wand->name);
if (wand->images == (Image *) NULL)
ThrowWandException(WandError,"ContainsNoImages",wand->name);
return(GetBlobSize(wand->images));
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% M a g i c k M a p I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% MagickMapImage() replaces the colors of an image with the closest color
% from a reference image.
%
% The format of the MagickMapImage method is:
%
% MagickBooleanType MagickMapImage(MagickWand *wand,
% const MagickWand *map_wand,const MagickBooleanType dither)
%
% A description of each parameter follows:
%
% o wand: the magick wand.
%
% o map: the map wand.
%
% o dither: Set this integer value to something other than zero to dither
% the mapped image.
%
*/
WandExport MagickBooleanType MagickMapImage(MagickWand *wand,
const MagickWand *map_wand,const MagickBooleanType dither)
{
MagickBooleanType
status;
assert(wand != (MagickWand *) NULL);
assert(wand->signature == WandSignature);
if (wand->debug != MagickFalse)
(void) LogMagickEvent(WandEvent,GetMagickModule(),"%s",wand->name);
if ((wand->images == (Image *) NULL) || (map_wand->images == (Image *) NULL))
ThrowWandException(WandError,"ContainsNoImages",wand->name);
status=MapImage(wand->images,map_wand->images,dither);
if (status == MagickFalse)
InheritException(wand->exception,&wand->images->exception);
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% M a g i c k M a t t e F l o o d f i l l I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% MagickMatteFloodfillImage() changes the transparency value of any pixel that
% matches target and is an immediate neighbor. If the method
% FillToBorderMethod is specified, the transparency value is changed for any
% neighbor pixel that does not match the bordercolor member of image.
%
% The format of the MagickMatteFloodfillImage method is:
%
% MagickBooleanType MagickMatteFloodfillImage(MagickWand *wand,
% const double alpha,const double fuzz,const PixelWand *bordercolor,
% const ssize_t x,const ssize_t y)
%
% A description of each parameter follows:
%
% o wand: the magick wand.
%
% o alpha: the level of transparency: 1.0 is fully opaque and 0.0 is fully
% transparent.
%
% o fuzz: By default target must match a particular pixel color
% exactly. However, in many cases two colors may differ by a small amount.
% The fuzz member of image defines how much tolerance is acceptable to
% consider two colors as the same. For example, set fuzz to 10 and the
% color red at intensities of 100 and 102 respectively are now interpreted
% as the same color for the purposes of the floodfill.
%
% o bordercolor: the border color pixel wand.
%
% o x,y: the starting location of the operation.
%
*/
WandExport MagickBooleanType MagickMatteFloodfillImage(MagickWand *wand,
const double alpha,const double fuzz,const PixelWand *bordercolor,
const ssize_t x,const ssize_t y)
{
DrawInfo
*draw_info;
MagickBooleanType
status;
PixelPacket
target;
assert(wand != (MagickWand *) NULL);
assert(wand->signature == WandSignature);
if (wand->debug != MagickFalse)
(void) LogMagickEvent(WandEvent,GetMagickModule(),"%s",wand->name);
if (wand->images == (Image *) NULL)
ThrowWandException(WandError,"ContainsNoImages",wand->name);
draw_info=CloneDrawInfo(wand->image_info,(DrawInfo *) NULL);
(void) GetOneVirtualPixel(wand->images,x % wand->images->columns,
y % wand->images->rows,&target,wand->exception);
if (bordercolor != (PixelWand *) NULL)
PixelGetQuantumColor(bordercolor,&target);
wand->images->fuzz=fuzz;
status=MatteFloodfillImage(wand->images,target,ClampToQuantum(
(MagickRealType) QuantumRange-QuantumRange*alpha),x,y,bordercolor !=
(PixelWand *) NULL ? FillToBorderMethod : FloodfillMethod);
if (status == MagickFalse)
InheritException(wand->exception,&wand->images->exception);
draw_info=DestroyDrawInfo(draw_info);
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% M a g i c k M e d i a n F i l t e r I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% MagickMedianFilterImage() applies a digital filter that improves the quality
% of a noisy image. Each pixel is replaced by the median in a set of
% neighboring pixels as defined by radius.
%
% The format of the MagickMedianFilterImage method is:
%
% MagickBooleanType MagickMedianFilterImage(MagickWand *wand,
% const double radius)
%
% A description of each parameter follows:
%
% o wand: the magick wand.
%
% o radius: the radius of the pixel neighborhood.
%
*/
WandExport MagickBooleanType MagickMedianFilterImage(MagickWand *wand,
const double radius)
{
Image
*median_image;
assert(wand != (MagickWand *) NULL);
assert(wand->signature == WandSignature);
if (wand->debug != MagickFalse)
(void) LogMagickEvent(WandEvent,GetMagickModule(),"%s",wand->name);
if (wand->images == (Image *) NULL)
ThrowWandException(WandError,"ContainsNoImages",wand->name);
median_image=MedianFilterImage(wand->images,radius,wand->exception);
if (median_image == (Image *) NULL)
return(MagickFalse);
ReplaceImageInList(&wand->images,median_image);
return(MagickTrue);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% M a g i c k M i n i m u m I m a g e s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% MagickMinimumImages() returns the minimum intensity of an image sequence.
%
% The format of the MagickMinimumImages method is:
%
% MagickWand *MagickMinimumImages(MagickWand *wand)
%
% A description of each parameter follows:
%
% o wand: the magick wand.
%
*/
WandExport MagickWand *MagickMinimumImages(MagickWand *wand)
{
Image
*minimum_image;
assert(wand != (MagickWand *) NULL);
assert(wand->signature == WandSignature);
if (wand->debug != MagickFalse)
(void) LogMagickEvent(WandEvent,GetMagickModule(),"%s",wand->name);
if (wand->images == (Image *) NULL)
return((MagickWand *) NULL);
minimum_image=EvaluateImages(wand->images,MinEvaluateOperator,
wand->exception);
if (minimum_image == (Image *) NULL)
return((MagickWand *) NULL);
return(CloneMagickWandFromImages(wand,minimum_image));
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% M a g i c k M o d e I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% MagickModeImage() makes each pixel the 'predominate color' of the
% neighborhood of the specified radius.
%
% The format of the MagickModeImage method is:
%
% MagickBooleanType MagickModeImage(MagickWand *wand,
% const double radius)
%
% A description of each parameter follows:
%
% o wand: the magick wand.
%
% o radius: the radius of the pixel neighborhood.
%
*/
WandExport MagickBooleanType MagickModeImage(MagickWand *wand,
const double radius)
{
Image
*mode_image;
assert(wand != (MagickWand *) NULL);
assert(wand->signature == WandSignature);
if (wand->debug != MagickFalse)
(void) LogMagickEvent(WandEvent,GetMagickModule(),"%s",wand->name);
if (wand->images == (Image *) NULL)
ThrowWandException(WandError,"ContainsNoImages",wand->name);
mode_image=ModeImage(wand->images,radius,wand->exception);
if (mode_image == (Image *) NULL)
return(MagickFalse);
ReplaceImageInList(&wand->images,mode_image);
return(MagickTrue);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% M a g i c k M o s a i c I m a g e s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% MagickMosaicImages() inlays an image sequence to form a single coherent
% picture. It returns a wand with each image in the sequence composited at
% the location defined by the page offset of the image.
%
% The format of the MagickMosaicImages method is:
%
% MagickWand *MagickMosaicImages(MagickWand *wand)
%
% A description of each parameter follows:
%
% o wand: the magick wand.
%
*/
WandExport MagickWand *MagickMosaicImages(MagickWand *wand)
{
Image
*mosaic_image;
assert(wand != (MagickWand *) NULL);
assert(wand->signature == WandSignature);
if (wand->debug != MagickFalse)
(void) LogMagickEvent(WandEvent,GetMagickModule(),"%s",wand->name);
if (wand->images == (Image *) NULL)
return((MagickWand *) NULL);
mosaic_image=MosaicImages(wand->images,wand->exception);
if (mosaic_image == (Image *) NULL)
return((MagickWand *) NULL);
return(CloneMagickWandFromImages(wand,mosaic_image));
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% M a g i c k O p a q u e I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% MagickOpaqueImage() changes any pixel that matches color with the color
% defined by fill.
%
% The format of the MagickOpaqueImage method is:
%
% MagickBooleanType MagickOpaqueImage(MagickWand *wand,
% const PixelWand *target,const PixelWand *fill,const double fuzz)
%
% A description of each parameter follows:
%
% o wand: the magick wand.
%
% o channel: the channel(s).
%
% o target: Change this target color to the fill color within the image.
%
% o fill: the fill pixel wand.
%
% o fuzz: By default target must match a particular pixel color
% exactly. However, in many cases two colors may differ by a small amount.
% The fuzz member of image defines how much tolerance is acceptable to
% consider two colors as the same. For example, set fuzz to 10 and the
% color red at intensities of 100 and 102 respectively are now interpreted
% as the same color for the purposes of the floodfill.
%
*/
WandExport MagickBooleanType MagickOpaqueImage(MagickWand *wand,
const PixelWand *target,const PixelWand *fill,const double fuzz)
{
return(MagickPaintOpaqueImage(wand,target,fill,fuzz));
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% M a g i c k P a i n t F l o o d f i l l I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% MagickPaintFloodfillImage() changes the color value of any pixel that matches
% target and is an immediate neighbor. If the method FillToBorderMethod is
% specified, the color value is changed for any neighbor pixel that does not
% match the bordercolor member of image.
%
% The format of the MagickPaintFloodfillImage method is:
%
% MagickBooleanType MagickPaintFloodfillImage(MagickWand *wand,
% const ChannelType channel,const PixelWand *fill,const double fuzz,
% const PixelWand *bordercolor,const ssize_t x,const ssize_t y)
%
% A description of each parameter follows:
%
% o wand: the magick wand.
%
% o channel: the channel(s).
%
% o fill: the floodfill color pixel wand.
%
% o fuzz: By default target must match a particular pixel color
% exactly. However, in many cases two colors may differ by a small amount.
% The fuzz member of image defines how much tolerance is acceptable to
% consider two colors as the same. For example, set fuzz to 10 and the
% color red at intensities of 100 and 102 respectively are now interpreted
% as the same color for the purposes of the floodfill.
%
% o bordercolor: the border color pixel wand.
%
% o x,y: the starting location of the operation.
%
*/
WandExport MagickBooleanType MagickPaintFloodfillImage(MagickWand *wand,
const ChannelType channel,const PixelWand *fill,const double fuzz,
const PixelWand *bordercolor,const ssize_t x,const ssize_t y)
{
MagickBooleanType
status;
status=MagickFloodfillPaintImage(wand,channel,fill,fuzz,bordercolor,x,y,
MagickFalse);
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% M a g i c k P a i n t O p a q u e I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% MagickPaintOpaqueImage() changes any pixel that matches color with the color
% defined by fill.
%
% The format of the MagickPaintOpaqueImage method is:
%
% MagickBooleanType MagickPaintOpaqueImage(MagickWand *wand,
% const PixelWand *target,const PixelWand *fill,const double fuzz)
% MagickBooleanType MagickPaintOpaqueImageChannel(MagickWand *wand,
% const ChannelType channel,const PixelWand *target,
% const PixelWand *fill,const double fuzz)
%
% A description of each parameter follows:
%
% o wand: the magick wand.
%
% o channel: the channel(s).
%
% o target: Change this target color to the fill color within the image.
%
% o fill: the fill pixel wand.
%
% o fuzz: By default target must match a particular pixel color
% exactly. However, in many cases two colors may differ by a small amount.
% The fuzz member of image defines how much tolerance is acceptable to
% consider two colors as the same. For example, set fuzz to 10 and the
% color red at intensities of 100 and 102 respectively are now interpreted
% as the same color for the purposes of the floodfill.
%
*/
WandExport MagickBooleanType MagickPaintOpaqueImage(MagickWand *wand,
const PixelWand *target,const PixelWand *fill,const double fuzz)
{
return(MagickPaintOpaqueImageChannel(wand,DefaultChannels,target,fill,fuzz));
}
WandExport MagickBooleanType MagickPaintOpaqueImageChannel(MagickWand *wand,
const ChannelType channel,const PixelWand *target,const PixelWand *fill,
const double fuzz)
{
MagickBooleanType
status;
status=MagickOpaquePaintImageChannel(wand,channel,target,fill,fuzz,
MagickFalse);
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% M a g i c k P a i n t T r a n s p a r e n t I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% MagickPaintTransparentImage() changes any pixel that matches color with the
% color defined by fill.
%
% The format of the MagickPaintTransparentImage method is:
%
% MagickBooleanType MagickPaintTransparentImage(MagickWand *wand,
% const PixelWand *target,const double alpha,const double fuzz)
%
% A description of each parameter follows:
%
% o wand: the magick wand.
%
% o target: Change this target color to specified opacity value within
% the image.
%
% o alpha: the level of transparency: 1.0 is fully opaque and 0.0 is fully
% transparent.
%
% o fuzz: By default target must match a particular pixel color
% exactly. However, in many cases two colors may differ by a small amount.
% The fuzz member of image defines how much tolerance is acceptable to
% consider two colors as the same. For example, set fuzz to 10 and the
% color red at intensities of 100 and 102 respectively are now interpreted
% as the same color for the purposes of the floodfill.
%
*/
WandExport MagickBooleanType MagickPaintTransparentImage(MagickWand *wand,
const PixelWand *target,const double alpha,const double fuzz)
{
return(MagickTransparentPaintImage(wand,target,alpha,fuzz,MagickFalse));
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% M a g i c k R e c o l o r I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% MagickRecolorImage() apply color transformation to an image. The method
% permits saturation changes, hue rotation, luminance to alpha, and various
% other effects. Although variable-sized transformation matrices can be used,
% typically one uses a 5x5 matrix for an RGBA image and a 6x6 for CMYKA
% (or RGBA with offsets). The matrix is similar to those used by Adobe Flash
% except offsets are in column 6 rather than 5 (in support of CMYKA images)
% and offsets are normalized (divide Flash offset by 255).
%
% The format of the MagickRecolorImage method is:
%
% MagickBooleanType MagickRecolorImage(MagickWand *wand,
% const size_t order,const double *color_matrix)
%
% A description of each parameter follows:
%
% o wand: the magick wand.
%
% o order: the number of columns and rows in the color matrix.
%
% o color_matrix: An array of doubles representing the color matrix.
%
*/
WandExport MagickBooleanType MagickRecolorImage(MagickWand *wand,
const size_t order,const double *color_matrix)
{
Image
*transform_image;
assert(wand != (MagickWand *) NULL);
assert(wand->signature == WandSignature);
if (wand->debug != MagickFalse)
(void) LogMagickEvent(WandEvent,GetMagickModule(),"%s",wand->name);
if (color_matrix == (const double *) NULL)
return(MagickFalse);
if (wand->images == (Image *) NULL)
ThrowWandException(WandError,"ContainsNoImages",wand->name);
transform_image=RecolorImage(wand->images,order,color_matrix,
wand->exception);
if (transform_image == (Image *) NULL)
return(MagickFalse);
ReplaceImageInList(&wand->images,transform_image);
return(MagickTrue);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% M a g i c k R e d u c e N o i s e I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% MagickReduceNoiseImage() smooths the contours of an image while still
% preserving edge information. The algorithm works by replacing each pixel
% with its neighbor closest in value. A neighbor is defined by radius. Use
% a radius of 0 and ReduceNoise() selects a suitable radius for you.
%
% The format of the MagickReduceNoiseImage method is:
%
% MagickBooleanType MagickReduceNoiseImage(MagickWand *wand,
% const double radius)
%
% A description of each parameter follows:
%
% o wand: the magick wand.
%
% o radius: the radius of the pixel neighborhood.
%
*/
WandExport MagickBooleanType MagickReduceNoiseImage(MagickWand *wand,
const double radius)
{
Image
*noise_image;
assert(wand != (MagickWand *) NULL);
assert(wand->signature == WandSignature);
if (wand->debug != MagickFalse)
(void) LogMagickEvent(WandEvent,GetMagickModule(),"%s",wand->name);
if (wand->images == (Image *) NULL)
ThrowWandException(WandError,"ContainsNoImages",wand->name);
noise_image=ReduceNoiseImage(wand->images,radius,wand->exception);
if (noise_image == (Image *) NULL)
return(MagickFalse);
ReplaceImageInList(&wand->images,noise_image);
return(MagickTrue);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% M a g i c k M a x i m u m I m a g e s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% MagickMaximumImages() returns the maximum intensity of an image sequence.
%
% The format of the MagickMaximumImages method is:
%
% MagickWand *MagickMaximumImages(MagickWand *wand)
%
% A description of each parameter follows:
%
% o wand: the magick wand.
%
*/
WandExport MagickWand *MagickMaximumImages(MagickWand *wand)
{
Image
*maximum_image;
assert(wand != (MagickWand *) NULL);
assert(wand->signature == WandSignature);
if (wand->debug != MagickFalse)
(void) LogMagickEvent(WandEvent,GetMagickModule(),"%s",wand->name);
if (wand->images == (Image *) NULL)
return((MagickWand *) NULL);
maximum_image=EvaluateImages(wand->images,MaxEvaluateOperator,
wand->exception);
if (maximum_image == (Image *) NULL)
return((MagickWand *) NULL);
return(CloneMagickWandFromImages(wand,maximum_image));
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% M a g i c k S e t I m a g e A t t r i b u t e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% MagickSetImageAttribute() associates a property with an image.
%
% The format of the MagickSetImageAttribute method is:
%
% MagickBooleanType MagickSetImageAttribute(MagickWand *wand,
% const char *property,const char *value)
%
% A description of each parameter follows:
%
% o wand: the magick wand.
%
% o property: the property.
%
% o value: the value.
%
*/
WandExport MagickBooleanType MagickSetImageAttribute(MagickWand *wand,
const char *property,const char *value)
{
return(SetImageProperty(wand->images,property,value));
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% M a g i c k S e t I m a g e I n d e x %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% MagickSetImageIndex() set the current image to the position of the list
% specified with the index parameter.
%
% The format of the MagickSetImageIndex method is:
%
% MagickBooleanType MagickSetImageIndex(MagickWand *wand,
% const ssize_t index)
%
% A description of each parameter follows:
%
% o wand: the magick wand.
%
% o index: the scene number.
%
*/
WandExport MagickBooleanType MagickSetImageIndex(MagickWand *wand,
const ssize_t index)
{
return(MagickSetIteratorIndex(wand,index));
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ M a g i c k S e t I m a g e O p t i o n %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% MagickSetImageOption() associates one or options with a particular image
% format (.e.g MagickSetImageOption(wand,"jpeg","perserve","yes").
%
% The format of the MagickSetImageOption method is:
%
% MagickBooleanType MagickSetImageOption(MagickWand *wand,
% const char *format,const char *key,const char *value)
%
% A description of each parameter follows:
%
% o wand: the magick wand.
%
% o format: the image format.
%
% o key: The key.
%
% o value: The value.
%
*/
WandExport MagickBooleanType MagickSetImageOption(MagickWand *wand,
const char *format,const char *key,const char *value)
{
char
option[MaxTextExtent];
assert(wand != (MagickWand *) NULL);
assert(wand->signature == WandSignature);
if (wand->debug != MagickFalse)
(void) LogMagickEvent(WandEvent,GetMagickModule(),"%s",wand->name);
(void) FormatMagickString(option,MaxTextExtent,"%s:%s=%s",format,key,value);
return(DefineImageOption(wand->image_info,option));
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% M a g i c k T r a n s p a r e n t I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% MagickTransparentImage() changes any pixel that matches color with the
% color defined by fill.
%
% The format of the MagickTransparentImage method is:
%
% MagickBooleanType MagickTransparentImage(MagickWand *wand,
% const PixelWand *target,const double alpha,const double fuzz)
%
% A description of each parameter follows:
%
% o wand: the magick wand.
%
% o target: Change this target color to specified opacity value within
% the image.
%
% o alpha: the level of transparency: 1.0 is fully opaque and 0.0 is fully
% transparent.
%
% o fuzz: By default target must match a particular pixel color
% exactly. However, in many cases two colors may differ by a small amount.
% The fuzz member of image defines how much tolerance is acceptable to
% consider two colors as the same. For example, set fuzz to 10 and the
% color red at intensities of 100 and 102 respectively are now interpreted
% as the same color for the purposes of the floodfill.
%
*/
WandExport MagickBooleanType MagickTransparentImage(MagickWand *wand,
const PixelWand *target,const double alpha,const double fuzz)
{
return(MagickPaintTransparentImage(wand,target,alpha,fuzz));
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% M a g i c k R e g i o n O f I n t e r e s t I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% MagickRegionOfInterestImage() extracts a region of the image and returns it
% as a new wand.
%
% The format of the MagickRegionOfInterestImage method is:
%
% MagickWand *MagickRegionOfInterestImage(MagickWand *wand,
% const size_t width,const size_t height,const ssize_t x,
% const ssize_t y)
%
% A description of each parameter follows:
%
% o wand: the magick wand.
%
% o width: the region width.
%
% o height: the region height.
%
% o x: the region x offset.
%
% o y: the region y offset.
%
*/
WandExport MagickWand *MagickRegionOfInterestImage(MagickWand *wand,
const size_t width,const size_t height,const ssize_t x,
const ssize_t y)
{
return(MagickGetImageRegion(wand,width,height,x,y));
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% M a g i c k S e t I m a g e P i x e l s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% MagickSetImagePixels() accepts pixel datand stores it in the image at the
% location you specify. The method returns MagickFalse on success otherwise
% MagickTrue if an error is encountered. The pixel data can be either char,
% short int, int, ssize_t, float, or double in the order specified by map.
%
% Suppose your want to upload the first scanline of a 640x480 image from
% character data in red-green-blue order:
%
% MagickSetImagePixels(wand,0,0,640,1,"RGB",CharPixel,pixels);
%
% The format of the MagickSetImagePixels method is:
%
% MagickBooleanType MagickSetImagePixels(MagickWand *wand,
% const ssize_t x,const ssize_t y,const size_t columns,
% const size_t rows,const char *map,const StorageType storage,
% const void *pixels)
%
% A description of each parameter follows:
%
% o wand: the magick wand.
%
% o x, y, columns, rows: These values define the perimeter of a region
% of pixels you want to define.
%
% o map: This string reflects the expected ordering of the pixel array.
% It can be any combination or order of R = red, G = green, B = blue,
% A = alpha (0 is transparent), O = opacity (0 is opaque), C = cyan,
% Y = yellow, M = magenta, K = black, I = intensity (for grayscale),
% P = pad.
%
% o storage: Define the data type of the pixels. Float and double types are
% expected to be normalized [0..1] otherwise [0..QuantumRange]. Choose from
% these types: CharPixel, ShortPixel, IntegerPixel, LongPixel, FloatPixel,
% or DoublePixel.
%
% o pixels: This array of values contain the pixel components as defined by
% map and type. You must preallocate this array where the expected
% length varies depending on the values of width, height, map, and type.
%
*/
WandExport MagickBooleanType MagickSetImagePixels(MagickWand *wand,
const ssize_t x,const ssize_t y,const size_t columns,
const size_t rows,const char *map,const StorageType storage,
const void *pixels)
{
return(MagickImportImagePixels(wand,x,y,columns,rows,map,storage,pixels));
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% M a g i c k W r i t e I m a g e B l o b %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% MagickWriteImageBlob() implements direct to memory image formats. It
% returns the image as a blob and its length. Use MagickSetFormat() to
% set the format of the returned blob (GIF, JPEG, PNG, etc.).
%
% Use MagickRelinquishMemory() to free the blob when you are done with it.
%
% The format of the MagickWriteImageBlob method is:
%
% unsigned char *MagickWriteImageBlob(MagickWand *wand,size_t *length)
%
% A description of each parameter follows:
%
% o wand: the magick wand.
%
% o length: the length of the blob.
%
*/
WandExport unsigned char *MagickWriteImageBlob(MagickWand *wand,size_t *length)
{
return(MagickGetImageBlob(wand,length));
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% N e w P i x e l V i e w %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% NewPixelView() returns a pixel view required for all other methods in the
% Pixel View API.
%
% The format of the NewPixelView method is:
%
% PixelView *NewPixelView(MagickWand *wand)
%
% A description of each parameter follows:
%
% o wand: the wand.
%
*/
static PixelWand ***AcquirePixelsThreadSet(const size_t number_wands,
const size_t number_threads)
{
PixelWand
***pixel_wands;
register ssize_t
i;
pixel_wands=(PixelWand ***) AcquireQuantumMemory(number_threads,
sizeof(*pixel_wands));
if (pixel_wands == (PixelWand ***) NULL)
return((PixelWand ***) NULL);
(void) ResetMagickMemory(pixel_wands,0,number_threads*sizeof(*pixel_wands));
for (i=0; i < (ssize_t) number_threads; i++)
{
pixel_wands[i]=NewPixelWands(number_wands);
if (pixel_wands[i] == (PixelWand **) NULL)
return(DestroyPixelsThreadSet(pixel_wands,number_wands,number_threads));
}
return(pixel_wands);
}
WandExport PixelView *NewPixelView(MagickWand *wand)
{
PixelView
*pixel_view;
assert(wand != (MagickWand *) NULL);
assert(wand->signature == MagickSignature);
pixel_view=(PixelView *) AcquireMagickMemory(sizeof(*pixel_view));
if (pixel_view == (PixelView *) NULL)
ThrowWandFatalException(ResourceLimitFatalError,"MemoryAllocationFailed",
GetExceptionMessage(errno));
(void) ResetMagickMemory(pixel_view,0,sizeof(*pixel_view));
pixel_view->id=AcquireWandId();
(void) FormatMagickString(pixel_view->name,MaxTextExtent,"%s-%.20g",
PixelViewId,(double) pixel_view->id);
pixel_view->exception=AcquireExceptionInfo();
pixel_view->wand=wand;
pixel_view->view=AcquireCacheView(pixel_view->wand->images);
pixel_view->region.width=wand->images->columns;
pixel_view->region.height=wand->images->rows;
pixel_view->number_threads=GetOpenMPMaximumThreads();
pixel_view->pixel_wands=AcquirePixelsThreadSet(pixel_view->region.width,
pixel_view->number_threads);
if (pixel_view->pixel_wands == (PixelWand ***) NULL)
ThrowWandFatalException(ResourceLimitFatalError,"MemoryAllocationFailed",
GetExceptionMessage(errno));
pixel_view->debug=IsEventLogging();
pixel_view->signature=WandSignature;
return(pixel_view);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% N e w P i x e l V i e w R e g i o n %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% NewPixelViewRegion() returns a pixel view required for all other methods
% in the Pixel View API.
%
% The format of the NewPixelViewRegion method is:
%
% PixelView *NewPixelViewRegion(MagickWand *wand,const ssize_t x,
% const ssize_t y,const size_t width,const size_t height)
%
% A description of each parameter follows:
%
% o wand: the magick wand.
%
% o x,y,columns,rows: These values define the perimeter of a region of
% pixel_wands view.
%
*/
WandExport PixelView *NewPixelViewRegion(MagickWand *wand,const ssize_t x,
const ssize_t y,const size_t width,const size_t height)
{
PixelView
*pixel_view;
assert(wand != (MagickWand *) NULL);
assert(wand->signature == MagickSignature);
pixel_view=(PixelView *) AcquireMagickMemory(sizeof(*pixel_view));
if (pixel_view == (PixelView *) NULL)
ThrowWandFatalException(ResourceLimitFatalError,"MemoryAllocationFailed",
GetExceptionMessage(errno));
(void) ResetMagickMemory(pixel_view,0,sizeof(*pixel_view));
pixel_view->id=AcquireWandId();
(void) FormatMagickString(pixel_view->name,MaxTextExtent,"%s-%.20g",
PixelViewId,(double) pixel_view->id);
pixel_view->exception=AcquireExceptionInfo();
pixel_view->view=AcquireCacheView(pixel_view->wand->images);
pixel_view->wand=wand;
pixel_view->region.width=width;
pixel_view->region.height=height;
pixel_view->region.x=x;
pixel_view->region.y=y;
pixel_view->number_threads=GetOpenMPMaximumThreads();
pixel_view->pixel_wands=AcquirePixelsThreadSet(pixel_view->region.width,
pixel_view->number_threads);
if (pixel_view->pixel_wands == (PixelWand ***) NULL)
ThrowWandFatalException(ResourceLimitFatalError,"MemoryAllocationFailed",
GetExceptionMessage(errno));
pixel_view->debug=IsEventLogging();
pixel_view->signature=WandSignature;
return(pixel_view);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% P i x e l G e t N e x t R o w %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% PixelGetNextRow() returns the next row as an array of pixel wands from the
% pixel iterator.
%
% The format of the PixelGetNextRow method is:
%
% PixelWand **PixelGetNextRow(PixelIterator *iterator,
% size_t *number_wands)
%
% A description of each parameter follows:
%
% o iterator: the pixel iterator.
%
% o number_wands: the number of pixel wands.
%
*/
WandExport PixelWand **PixelGetNextRow(PixelIterator *iterator)
{
size_t
number_wands;
return(PixelGetNextIteratorRow(iterator,&number_wands));
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% P i x e l I t e r a t o r G e t E x c e p t i o n %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% PixelIteratorGetException() returns the severity, reason, and description of
% any error that occurs when using other methods in this API.
%
% The format of the PixelIteratorGetException method is:
%
% char *PixelIteratorGetException(const Pixeliterator *iterator,
% ExceptionType *severity)
%
% A description of each parameter follows:
%
% o iterator: the pixel iterator.
%
% o severity: the severity of the error is returned here.
%
*/
WandExport char *PixelIteratorGetException(const PixelIterator *iterator,
ExceptionType *severity)
{
return(PixelGetIteratorException(iterator,severity));
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% S e t P i x e l V i e w I t e r a t o r %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% SetPixelViewIterator() iterates over the pixel view in parallel and calls
% your set method for each scanline of the view. The pixel region is
% confined to the image canvas-- that is no negative offsets or widths or
% heights that exceed the image dimension. The pixels are initiallly
% undefined and any settings you make in the callback method are automagically
% synced back to your image.
%
% Use this pragma:
%
% #pragma omp critical
%
% to define a section of code in your callback set method that must be
% executed by a single thread at a time.
%
% The format of the SetPixelViewIterator method is:
%
% MagickBooleanType SetPixelViewIterator(PixelView *destination,
% SetPixelViewMethod set,void *context)
%
% A description of each parameter follows:
%
% o destination: the pixel view.
%
% o set: the set callback method.
%
% o context: the user defined context.
%
*/
WandExport MagickBooleanType SetPixelViewIterator(PixelView *destination,
SetPixelViewMethod set,void *context)
{
#define SetPixelViewTag "PixelView/Set"
ExceptionInfo
*exception;
Image
*destination_image;
MagickBooleanType
status;
MagickOffsetType
progress;
ssize_t
y;
assert(destination != (PixelView *) NULL);
assert(destination->signature == WandSignature);
if (set == (SetPixelViewMethod) NULL)
return(MagickFalse);
destination_image=destination->wand->images;
if (SetImageStorageClass(destination_image,DirectClass) == MagickFalse)
return(MagickFalse);
status=MagickTrue;
progress=0;
exception=destination->exception;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static,1) shared(progress,status)
#endif
for (y=destination->region.y; y < (ssize_t) destination->region.height; y++)
{
const int
id = GetOpenMPThreadId();
MagickBooleanType
sync;
register IndexPacket
*restrict indexes;
register ssize_t
x;
register PixelPacket
*restrict pixels;
if (status == MagickFalse)
continue;
pixels=GetCacheViewAuthenticPixels(destination->view,destination->region.x,
y,destination->region.width,1,exception);
if (pixels == (PixelPacket *) NULL)
{
InheritException(destination->exception,GetCacheViewException(
destination->view));
status=MagickFalse;
continue;
}
indexes=GetCacheViewAuthenticIndexQueue(destination->view);
if (set(destination,context) == MagickFalse)
status=MagickFalse;
for (x=0; x < (ssize_t) destination->region.width; x++)
PixelGetQuantumColor(destination->pixel_wands[id][x],pixels+x);
if (destination_image->colorspace == CMYKColorspace)
for (x=0; x < (ssize_t) destination->region.width; x++)
indexes[x]=PixelGetBlackQuantum(destination->pixel_wands[id][x]);
sync=SyncCacheViewAuthenticPixels(destination->view,exception);
if (sync == MagickFalse)
{
InheritException(destination->exception,GetCacheViewException(
destination->view));
status=MagickFalse;
}
if (destination_image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp critical (MagickWand_SetPixelViewIterator)
#endif
proceed=SetImageProgress(destination_image,SetPixelViewTag,progress++,
destination->region.height);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% T r a n s f e r P i x e l V i e w I t e r a t o r %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% TransferPixelViewIterator() iterates over two pixel views in parallel and
% calls your transfer method for each scanline of the view. The source pixel
% region is not confined to the image canvas-- that is you can include
% negative offsets or widths or heights that exceed the image dimension.
% However, the destination pixel view is confined to the image canvas-- that
% is no negative offsets or widths or heights that exceed the image dimension
% are permitted.
%
% Use this pragma:
%
% #pragma omp critical
%
% to define a section of code in your callback transfer method that must be
% executed by a single thread at a time.
%
% The format of the TransferPixelViewIterator method is:
%
% MagickBooleanType TransferPixelViewIterator(PixelView *source,
% PixelView *destination,TransferPixelViewMethod transfer,void *context)
%
% A description of each parameter follows:
%
% o source: the source pixel view.
%
% o destination: the destination pixel view.
%
% o transfer: the transfer callback method.
%
% o context: the user defined context.
%
*/
WandExport MagickBooleanType TransferPixelViewIterator(PixelView *source,
PixelView *destination,TransferPixelViewMethod transfer,void *context)
{
#define TransferPixelViewTag "PixelView/Transfer"
ExceptionInfo
*exception;
Image
*destination_image,
*source_image;
MagickBooleanType
status;
MagickOffsetType
progress;
ssize_t
y;
assert(source != (PixelView *) NULL);
assert(source->signature == WandSignature);
if (transfer == (TransferPixelViewMethod) NULL)
return(MagickFalse);
source_image=source->wand->images;
destination_image=destination->wand->images;
if (SetImageStorageClass(destination_image,DirectClass) == MagickFalse)
return(MagickFalse);
status=MagickTrue;
progress=0;
exception=destination->exception;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static,1) shared(progress,status)
#endif
for (y=source->region.y; y < (ssize_t) source->region.height; y++)
{
const int
id = GetOpenMPThreadId();
MagickBooleanType
sync;
register const IndexPacket
*restrict indexes;
register const PixelPacket
*restrict pixels;
register IndexPacket
*restrict destination_indexes;
register ssize_t
x;
register PixelPacket
*restrict destination_pixels;
if (status == MagickFalse)
continue;
pixels=GetCacheViewVirtualPixels(source->view,source->region.x,y,
source->region.width,1,source->exception);
if (pixels == (const PixelPacket *) NULL)
{
status=MagickFalse;
continue;
}
indexes=GetCacheViewVirtualIndexQueue(source->view);
for (x=0; x < (ssize_t) source->region.width; x++)
PixelSetQuantumColor(source->pixel_wands[id][x],pixels+x);
if (source_image->colorspace == CMYKColorspace)
for (x=0; x < (ssize_t) source->region.width; x++)
PixelSetBlackQuantum(source->pixel_wands[id][x],indexes[x]);
if (source_image->storage_class == PseudoClass)
for (x=0; x < (ssize_t) source->region.width; x++)
PixelSetIndex(source->pixel_wands[id][x],indexes[x]);
destination_pixels=GetCacheViewAuthenticPixels(destination->view,
destination->region.x,y,destination->region.width,1,exception);
if (destination_pixels == (PixelPacket *) NULL)
{
status=MagickFalse;
continue;
}
destination_indexes=GetCacheViewAuthenticIndexQueue(destination->view);
for (x=0; x < (ssize_t) destination->region.width; x++)
PixelSetQuantumColor(destination->pixel_wands[id][x],pixels+x);
if (destination_image->colorspace == CMYKColorspace)
for (x=0; x < (ssize_t) destination->region.width; x++)
PixelSetBlackQuantum(destination->pixel_wands[id][x],indexes[x]);
if (destination_image->storage_class == PseudoClass)
for (x=0; x < (ssize_t) destination->region.width; x++)
PixelSetIndex(destination->pixel_wands[id][x],indexes[x]);
if (transfer(source,destination,context) == MagickFalse)
status=MagickFalse;
for (x=0; x < (ssize_t) destination->region.width; x++)
PixelGetQuantumColor(destination->pixel_wands[id][x],
destination_pixels+x);
if (destination_image->colorspace == CMYKColorspace)
for (x=0; x < (ssize_t) destination->region.width; x++)
destination_indexes[x]=PixelGetBlackQuantum(
destination->pixel_wands[id][x]);
sync=SyncCacheViewAuthenticPixels(destination->view,exception);
if (sync == MagickFalse)
{
InheritException(destination->exception,GetCacheViewException(
source->view));
status=MagickFalse;
}
if (source_image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp critical (MagickWand_TransferPixelViewIterator)
#endif
proceed=SetImageProgress(source_image,TransferPixelViewTag,progress++,
source->region.height);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% U p d a t e P i x e l V i e w I t e r a t o r %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% UpdatePixelViewIterator() iterates over the pixel view in parallel and calls
% your update method for each scanline of the view. The pixel region is
% confined to the image canvas-- that is no negative offsets or widths or
% heights that exceed the image dimension are permitted. Updates to pixels
% in your callback are automagically synced back to the image.
%
% Use this pragma:
%
% #pragma omp critical
%
% to define a section of code in your callback update method that must be
% executed by a single thread at a time.
%
% The format of the UpdatePixelViewIterator method is:
%
% MagickBooleanType UpdatePixelViewIterator(PixelView *source,
% UpdatePixelViewMethod update,void *context)
%
% A description of each parameter follows:
%
% o source: the source pixel view.
%
% o update: the update callback method.
%
% o context: the user defined context.
%
*/
WandExport MagickBooleanType UpdatePixelViewIterator(PixelView *source,
UpdatePixelViewMethod update,void *context)
{
#define UpdatePixelViewTag "PixelView/Update"
ExceptionInfo
*exception;
Image
*source_image;
MagickBooleanType
status;
MagickOffsetType
progress;
ssize_t
y;
assert(source != (PixelView *) NULL);
assert(source->signature == WandSignature);
if (update == (UpdatePixelViewMethod) NULL)
return(MagickFalse);
source_image=source->wand->images;
if (SetImageStorageClass(source_image,DirectClass) == MagickFalse)
return(MagickFalse);
status=MagickTrue;
progress=0;
exception=source->exception;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static,1) shared(progress,status)
#endif
for (y=source->region.y; y < (ssize_t) source->region.height; y++)
{
const int
id = GetOpenMPThreadId();
register IndexPacket
*restrict indexes;
register ssize_t
x;
register PixelPacket
*restrict pixels;
if (status == MagickFalse)
continue;
pixels=GetCacheViewAuthenticPixels(source->view,source->region.x,y,
source->region.width,1,exception);
if (pixels == (PixelPacket *) NULL)
{
InheritException(source->exception,GetCacheViewException(
source->view));
status=MagickFalse;
continue;
}
indexes=GetCacheViewAuthenticIndexQueue(source->view);
for (x=0; x < (ssize_t) source->region.width; x++)
PixelSetQuantumColor(source->pixel_wands[id][x],pixels+x);
if (source_image->colorspace == CMYKColorspace)
for (x=0; x < (ssize_t) source->region.width; x++)
PixelSetBlackQuantum(source->pixel_wands[id][x],indexes[x]);
if (update(source,context) == MagickFalse)
status=MagickFalse;
for (x=0; x < (ssize_t) source->region.width; x++)
PixelGetQuantumColor(source->pixel_wands[id][x],pixels+x);
if (source_image->colorspace == CMYKColorspace)
for (x=0; x < (ssize_t) source->region.width; x++)
indexes[x]=PixelGetBlackQuantum(source->pixel_wands[id][x]);
if (SyncCacheViewAuthenticPixels(source->view,exception) == MagickFalse)
{
InheritException(source->exception,GetCacheViewException(source->view));
status=MagickFalse;
}
if (source_image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp critical (MagickWand_UpdatePixelViewIterator)
#endif
proceed=SetImageProgress(source_image,UpdatePixelViewTag,progress++,
source->region.height);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
return(status);
}
#endif
|
sparselevmarq.h | #ifndef ucoslam_SparseLevMarq_H
#define ucoslam_SparseLevMarq_H
#include <Eigen/Sparse>
#include <functional>
#include <iostream>
#include <cmath>
#include <omp.h>
#include <ctime>
#include <cstring>
#include <vector>
#include <chrono>
#include <iomanip>
namespace ucoslam{
//Sparse Levenberg-Marquardt method for general problems
//Inspired in
//@MISC\{IMM2004-03215,
// author = "K. Madsen and H. B. Nielsen and O. Tingleff",
// title = "Methods for Non-Linear Least Squares Problems (2nd ed.)",
// year = "2004",
// pages = "60",
// publisher = "Informatics and Mathematical Modelling, Technical University of Denmark, {DTU}",
// address = "Richard Petersens Plads, Building 321, {DK-}2800 Kgs. Lyngby",
// url = "http://www.ltu.se/cms_fs/1.51590!/nonlinear_least_squares.pdf"
//}
template<typename T>
class SparseLevMarq{
public:
struct Params{
Params(){}
Params(int _maxIters,T _minError,T _min_step_error_diff=0, T _min_average_step_error_diff=0.001 ,T _tau=1 ,T _der_epsilon=1e-3){
maxIters=_maxIters;
minError=_minError;
min_step_error_diff=_min_step_error_diff;
min_average_step_error_diff=_min_step_error_diff;
tau=_tau;
der_epsilon=_der_epsilon;
}
int maxIters=100;//maximum number of iterations
T minError=1e-5; //minimum error. Below this optimization stops
T min_step_error_diff=0; //if error reduction between iterations is below this, optimization stops
T min_average_step_error_diff=0.001;
T tau=1 ; //indicating how near the initial solution is estimated to be to the real one. If 1, it means that it is very far and the first
T der_epsilon=1e-3; //value employed for automatic differentiation
bool cal_dev_parallel=true;//indicates if the compuation of derivatives is done in parallel. If so, the error function must be reentrant
bool use_omp=true;
bool verbose=false;
};
typedef Eigen::Matrix<T,Eigen::Dynamic,1> eVector;
typedef std::function<void(const eVector &, eVector &)> F_z_x;
typedef std::function<void(const eVector &, Eigen::SparseMatrix<T> &)> F_z_J;
SparseLevMarq();
/**
* @brief setParams
* @param maxIters maximum number of iterations of the algoritm
* @param minError to stop the algorithm before reaching the max iterations
* @param min_step_error_diff minimum error difference between two iterations. If below this level, then stop.
* @param tau parameter indicating how near the initial solution is estimated to be to the real one. If 1, it means that it is very far and the first
* @param der_epsilon increment to calculate the derivate of the evaluation function
* step will be very short. If near 0, means the opposite. This value is auto calculated in the subsequent iterations.
*/
void setParams(int maxIters,T minError,T min_step_error_diff=0,T tau=1 ,T der_epsilon=1e-3);
void setParams(const Params &p);
/**
* @brief solve non linear minimization problem ||F(z)||, where F(z)=f(z) f(z)^t
* @param z function params 1xP to be estimated. input-output. Contains the result of the optimization
* @param f_z_x evaluation function f(z)=x
* first parameter : z : input. Data is in T precision as a row vector (1xp)
* second parameter : x : output. Data must be returned in T
* @param f_J computes the jacobian of f(z)
* first parameter : z : input. Data is in T precision as a row vector (1xp)
* second parameter : J : output. Data must be returned in T
* @return final error
*/
T solve( eVector &z, F_z_x , F_z_J)throw (std::exception);
/// Step by step solve mode
/**
* @brief init initializes the search engine
* @param z
*/
void init(eVector &z, F_z_x )throw (std::exception);
/**
* @brief step gives a step of the search
* @param f_z_x error evaluation function
* @param f_z_J Jacobian function
* @return error of current solution
*/
bool step( F_z_x f_z_x , F_z_J f_z_J)throw (std::exception);
bool step( F_z_x f_z_x)throw (std::exception);
/**
* @brief getCurrentSolution returns the current solution
* @param z output
* @return error of the solution
*/
T getCurrentSolution(eVector &z)throw (std::exception);
/**
* @brief getBestSolution sets in z the best solution up to this moment
* @param z output
* @return error of the solution
*/
T getBestSolution(eVector &z)throw (std::exception);
/** Automatic jacobian estimation
* @brief solve non linear minimization problem ||F(z)||, where F(z)=f(z) f(z)^t
* @param z function params 1xP to be estimated. input-output. Contains the result of the optimization
* @param f_z_x evaluation function f(z)=x
* first parameter : z : input. Data is in T precision as a row vector (1xp)
* second parameter : x : output. Data must be returned in T
* @return final error
*/
T solve( eVector &z, F_z_x )throw (std::exception);
//sets a callback func call at each step
void setStepCallBackFunc(std::function<void(const eVector &)> callback){_step_callback=callback;}
//sets a function that indicates when the algorithm must be stop. returns true if must stop and false otherwise
void setStopFunction( std::function<bool(const eVector &)> stop_function){_stopFunction=stop_function;}
void calcDerivates_omp(const eVector & z , Eigen::SparseMatrix<T> &sJ, F_z_x f_z_x);
void calcDerivates(const eVector & z , Eigen::SparseMatrix<T> &sJ, F_z_x f_z_x);
Params _params;
private:
//--------
eVector curr_z,x64;
T currErr,prevErr,minErr ;
Eigen::SparseMatrix<T> I,J;
T mu,v;
std::function<void(const eVector &)> _step_callback;
std::function<bool(const eVector &)> _stopFunction;
void add_missing_diagonal_elements( Eigen::SparseMatrix<T> &M)throw (std::exception);
void get_diagonal_elements_refs_and_add( Eigen::SparseMatrix<T> &M,std::vector<T*> &d_refs,T add_val)throw (std::exception);
void mult(const Eigen::SparseMatrix<T> &lhs, const Eigen::SparseMatrix<T> &rhs,Eigen::SparseMatrix<T> &res);
};
template<typename T>
SparseLevMarq<T>::SparseLevMarq(){
}
/**
* @brief setParams
* @param maxIters maximum number of iterations of the algoritm
* @param minError to stop the algorithm before reaching the max iterations
* @param min_step_error_diff minimum error difference between two iterations. If below this level, then stop.
* @param tau parameter indicating how near the initial solution is estimated to be to the real one. If 1, it means that it is very far and the first
* @param der_epsilon increment to calculate the derivate of the evaluation function
* step will be very short. If near 0, means the opposite. This value is auto calculated in the subsequent iterations.
*/
template<typename T>
void SparseLevMarq<T>::setParams(const Params &p){
_params=p;
}
template<typename T>
void SparseLevMarq<T>:: calcDerivates_omp(const eVector & z , Eigen::SparseMatrix<T> &sJ, F_z_x f_z_x)
{
std::vector< std::vector<Eigen::Triplet<T> > > sp_triplets(omp_get_max_threads());
#pragma omp parallel for
for (int i=0;i<z.rows();i++) {
eVector zp(z),zm(z);
zp(i)+=_params.der_epsilon;
zm(i)-=_params.der_epsilon;
eVector xp,xm;
f_z_x( zp,xp);
f_z_x( zm,xm);
eVector dif=(xp-xm)/(2.f*_params.der_epsilon);
//add the non zero elementos
int tidx=omp_get_thread_num();
for(int r=0;r<dif.rows();r++)
if (fabs(dif(r))>1e-4)
sp_triplets[tidx].push_back(Eigen::Triplet<T> (r,i,dif(r)));
}
//join all triplets
int n=0;
for(auto s:sp_triplets) n+=s.size();
std::vector<Eigen::Triplet<T> > sp_tripletsAll(n);
int cidx=0;
for(size_t i=0;i<sp_triplets.size();i++){
memcpy(&sp_tripletsAll[cidx],& sp_triplets[i][0],sizeof(Eigen::Triplet<T>)*sp_triplets[i].size() );
cidx+=sp_triplets[i].size() ;
}
sJ.setFromTriplets(sp_tripletsAll.begin(),sp_tripletsAll.end());
}
template<typename T>
void SparseLevMarq<T>:: calcDerivates(const eVector & z , Eigen::SparseMatrix<T> &sJ, F_z_x f_z_x)
{
std::vector<Eigen::Triplet<T> > sp_triplets;
for (int i=0;i<z.rows();i++) {
eVector zp(z),zm(z);
zp(i)+=_params.der_epsilon;
zm(i)-=_params.der_epsilon;
eVector xp,xm;
f_z_x( zp,xp);
f_z_x( zm,xm);
eVector dif=(xp-xm)/(2.f*_params.der_epsilon);
//add the non zero elementos
for(int r=0;r<dif.rows();r++)
if (fabs(dif(r))>1e-4)
sp_triplets.push_back(Eigen::Triplet<T> (r,i,dif(r)));
}
sJ.setFromTriplets(sp_triplets.begin(),sp_triplets.end());
}
template<typename T>
T SparseLevMarq<T>:: solve( eVector &z, F_z_x f_z_x)throw (std::exception){
if (_params.cal_dev_parallel && _params.use_omp)
return solve(z,f_z_x,std::bind(&SparseLevMarq<T>::calcDerivates_omp,this,std::placeholders::_1,std::placeholders::_2,f_z_x));
else
return solve(z,f_z_x,std::bind(&SparseLevMarq<T>::calcDerivates,this,std::placeholders::_1,std::placeholders::_2,f_z_x));
}
template<typename T>
bool SparseLevMarq<T>:: step( F_z_x f_z_x)throw (std::exception){
if (_params.cal_dev_parallel && _params.use_omp)
return step(f_z_x,std::bind(&SparseLevMarq<T>::calcDerivates_omp,this,std::placeholders::_1,std::placeholders::_2,f_z_x));
else
return step(f_z_x,std::bind(&SparseLevMarq<T>::calcDerivates,this,std::placeholders::_1,std::placeholders::_2,f_z_x));
}
template<typename T>
void SparseLevMarq<T>::init(eVector &z, F_z_x f_z_x )throw (std::exception){
curr_z=z;
I.resize(z.rows(),z.rows());
I.setIdentity();
f_z_x(curr_z,x64);
// std::cerr<<x64.transpose()<<std::endl;
minErr=currErr=prevErr=x64.cwiseProduct(x64).sum();
J.resize(x64.rows(),z.rows());
mu=-1;
}
template<typename T>
void SparseLevMarq<T>::get_diagonal_elements_refs_and_add( Eigen::SparseMatrix<T> &M,std::vector<T*> &refs,T add_val)throw (std::exception){
refs.resize(M.cols());
//now, get their references and add mu
for (int k=0; k<M.outerSize(); ++k)
for ( typename Eigen::SparseMatrix<T>::InnerIterator it(M,k); it; ++it)
if (it.row()== it.col()) {refs[it.row()]= &it.valueRef(); *refs[it.row()]+=add_val;}
}
//parallel sparse matrix multiplication
//modyfied by rafael muñoz salinas (rmsalinas@uco.es) to make it parallel
template<typename T>
void SparseLevMarq<T>::mult(const Eigen::SparseMatrix<T> &lhs, const Eigen::SparseMatrix<T> &rhs,Eigen::SparseMatrix<T> &res)
{
// make sure to call innerSize/outerSize since we fake the storage order.
uint32_t rows = lhs.innerSize();
uint32_t cols = rhs.outerSize();
eigen_assert(lhs.outerSize() == rhs.innerSize());
typedef typename std::map<uint32_t,T> RowVal;
typedef typename std::pair<uint32_t,RowVal> Col_RowVal; //pair col-rowval
typedef typename std::vector< Col_RowVal> Col_RowValSet;
std::vector<Col_RowValSet>omp_container(omp_get_max_threads());
// we compute each column of the result, in parallel after the other
#pragma omp parallel for
for (uint32_t j=0; j<cols; ++j)
{
int tid=omp_get_thread_num();
omp_container[tid].push_back( std::make_pair(j,RowVal()) );
RowVal &row_val=omp_container[tid].back().second;
for (typename Eigen::SparseMatrix<T>::InnerIterator rhsIt(rhs, j); rhsIt; ++rhsIt)
{
T y = rhsIt.value();
uint32_t k = rhsIt.index();
//add all indices
for (typename Eigen::SparseMatrix<T>::InnerIterator lhsIt(lhs, k); lhsIt; ++lhsIt)
{
uint32_t i = lhsIt.index();
T x = lhsIt.value();
auto iter=row_val.find(i);
if (iter==row_val.end()) row_val.insert(std::make_pair(i,x*y));
else iter->second+=x*y;
}
}
}
//finally, unordered insertion
// unordered insertion
typedef Eigen::SparseMatrix<T,Eigen::RowMajor,int32_t> RowMajorMatrix;
typedef Eigen::SparseMatrix<T,Eigen::ColMajor,int32_t> ColMajorMatrix;
ColMajorMatrix resCol(rows,cols);
resCol.reserve( lhs.nonZeros() + rhs.nonZeros() );
for(auto & omp_thread:omp_container)
for(auto c_r_v:omp_thread){//take each thread results
int j=c_r_v.first;//column
resCol.startVec(j);
for(auto r_v:c_r_v.second)//for each column element, add it
resCol.insertBackByOuterInnerUnordered(j,r_v.first) = r_v.second;
}
resCol.finalize();
RowMajorMatrix resRow(resCol);
res = resRow;
}
template<typename T>
void SparseLevMarq<T>::add_missing_diagonal_elements(Eigen::SparseMatrix<T> &M)throw (std::exception){
std::vector<bool> diag(M.rows(),false);
for (int k=0; k<M.outerSize(); ++k)
for ( typename Eigen::SparseMatrix<T>::InnerIterator it(M,k); it; ++it)
if (it.row()== it.col()) diag[it.row()]=true;
//and add them
for(size_t i=0;i<diag.size();i++) if (!diag[i]) M.insert(i,i) =0;
}
//template<typename T>
//uint64_t signature(Eigen::SparseMatrix<T> &sm){
// uint64_t sum=0;
// for (int k=0; k<sm.outerSize(); ++k)
// for (typename Eigen::SparseMatrix<T>::InnerIterator it(sm,k); it; ++it)
// sum+=(it.value()*10000) + it.row() + it.col() +it.index();
// return sum;
//}
#define splm_get_time(a,b) std::chrono::duration_cast<std::chrono::duration<T>>(a-b).count()
template<typename T>
bool SparseLevMarq<T>::step( F_z_x f_z_x, F_z_J f_J)throw (std::exception){
auto t1= std::chrono::high_resolution_clock::now();
f_J(curr_z,J);
auto t2= std::chrono::high_resolution_clock::now();
Eigen::SparseMatrix<T> Jt=J.transpose();
auto t22= std::chrono::high_resolution_clock::now();
Eigen::SparseMatrix<T> JtJ;
if (_params.use_omp)
mult(Jt,J, JtJ);//parallel sparse matrix multiplication
else JtJ=Jt*J;
auto t3= std::chrono::high_resolution_clock::now();
eVector B=-Jt*x64;
auto t4= std::chrono::high_resolution_clock::now();
if(mu<0){//first time only
T maxv=std::numeric_limits<T>::lowest();
for (int k=0; k<JtJ.outerSize(); ++k)
for (typename Eigen::SparseMatrix<T>::InnerIterator it(JtJ,k); it; ++it)
if (it.row()== it.col())
if (it.value()>maxv)
maxv=it.value();
mu=maxv*_params.tau;
}
T gain=0,prev_mu=0;
std::vector<T*> refs;
int ntries=0;
bool isStepAccepted=false;
auto t6=std::chrono::high_resolution_clock::now(),t5=std::chrono::high_resolution_clock::now();;
do{
//add dumping factor to JtJ.
#if 1 //very efficient in any case, but particularly if initial dump does not produce improvement and must reenter
if(refs.size()==0){//first time into the do
add_missing_diagonal_elements(JtJ);
get_diagonal_elements_refs_and_add(JtJ,refs,mu);
}
else for(size_t i=0;i<refs.size();i++) *refs[i]+= mu-prev_mu;//update mu
prev_mu=mu;
Eigen::SimplicialLDLT<Eigen::SparseMatrix<T> > chol(JtJ); // performs a Cholesky
#else //less efficient, but easier to understand
Eigen::SparseMatrix<T> A=JtJ+I*mu;
Eigen::SimplicialLDLT<Eigen::SparseMatrix<T> > chol(A); // performs a Cholesky
#endif
t5= std::chrono::high_resolution_clock::now();
eVector delta= chol.solve(B);
t6= std::chrono::high_resolution_clock::now();
eVector estimated_z=curr_z+delta;
//compute error
f_z_x(estimated_z,x64);
auto err=x64.cwiseProduct(x64).sum();
auto L=0.5*delta.transpose()*((mu*delta) - B);
gain= (err-prevErr)/ L(0,0) ;
//get gain
if (gain>0 && ((err-prevErr)<0)){
mu=mu*std::max(T(0.33),T(1.-pow(2*gain-1,3)));
v=2.f;
currErr=err;
curr_z=estimated_z;
isStepAccepted=true;
}
else{ mu=mu*v; v=v*5;}
}while( gain<=0 && ntries++<5 && !isStepAccepted);
if (_params.verbose) std::cout<<std::setprecision(5) <<"Curr Error="<<currErr<<" AErr(prev-curr)="<<(prevErr-currErr)/x64.rows()<<" gain="<<gain<<" dumping factor="<<mu<<std::endl;
if (_params.verbose) {std::cerr<<" J="<<splm_get_time(t2,t1)<<" transpose="<< splm_get_time(t22,t2)<<" Jt*J="<< splm_get_time(t3,t22)<<" B="<< splm_get_time(t4,t3) <<" chol="<< splm_get_time(t6,t5) <<std::endl;
// std::cerr<<"solve="<<T(t4-t3)/T(CLOCKS_PER_SEC)<<std::endl;
}
return isStepAccepted;
}
template<typename T>
T SparseLevMarq<T>:: getCurrentSolution(eVector &z)throw (std::exception){
z=curr_z;
return currErr;
}
template<typename T>
T SparseLevMarq<T>::solve( eVector &z, F_z_x f_z_x, F_z_J f_J)throw (std::exception){
prevErr=std::numeric_limits<T>::max();
init(z,f_z_x);
if( _stopFunction){
do{
step(f_z_x,f_J);
if (_step_callback) _step_callback(curr_z);
}while(!_stopFunction(curr_z));
}
else{
//intial error estimation
int mustExit=0;
for ( int i = 0; i < _params.maxIters && !mustExit; i++ ) {
if (_params.verbose)std::cerr<<"iteration "<<i<<"/"<<_params.maxIters<< " ";
bool isStepAccepted=step(f_z_x,f_J);
//check if we must exit
if ( currErr<_params.minError ) mustExit=1;
if( fabs(prevErr -currErr)<=_params.min_step_error_diff || fabs((prevErr-currErr)/x64.rows())<=_params.min_average_step_error_diff || !isStepAccepted) mustExit=2;
//exit if error increment
if (currErr>prevErr )mustExit=3;
// if ( (prevErr-currErr) < 1e-5 ) mustExit=true;
if (_step_callback) _step_callback(curr_z);
prevErr=currErr;
}
// std::cout<<"Exit code="<<mustExit<<std::endl;
}
z=curr_z;
return currErr;
}
}
#endif
|
GB_unop__identity_fc64_fp32.c | //------------------------------------------------------------------------------
// GB_unop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_atomics.h"
#include "GB_unop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB_unop_apply__identity_fc64_fp32
// op(A') function: GB_unop_tran__identity_fc64_fp32
// C type: GxB_FC64_t
// A type: float
// cast: GxB_FC64_t cij = GxB_CMPLX ((double) (aij), 0)
// unaryop: cij = aij
#define GB_ATYPE \
float
#define GB_CTYPE \
GxB_FC64_t
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
float aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = x ;
// casting
#define GB_CAST(z, aij) \
GxB_FC64_t z = GxB_CMPLX ((double) (aij), 0) ;
// cij = op (aij)
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
float aij = Ax [pA] ; \
/* Cx [pC] = op (cast (aij)) */ \
GxB_FC64_t z = GxB_CMPLX ((double) (aij), 0) ; \
Cx [pC] = z ; \
}
// true if operator is the identity op with no typecasting
#define GB_OP_IS_IDENTITY_WITH_NO_TYPECAST \
0
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_IDENTITY || GxB_NO_FC64 || GxB_NO_FP32)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_unop_apply__identity_fc64_fp32
(
GxB_FC64_t *Cx, // Cx and Ax may be aliased
const float *Ax,
const int8_t *GB_RESTRICT Ab, // A->b if A is bitmap
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
if (Ab == NULL)
{
#if ( GB_OP_IS_IDENTITY_WITH_NO_TYPECAST )
GB_memcpy (Cx, Ax, anz * sizeof (float), nthreads) ;
#else
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
float aij = Ax [p] ;
GxB_FC64_t z = GxB_CMPLX ((double) (aij), 0) ;
Cx [p] = z ;
}
#endif
}
else
{
// bitmap case, no transpose; A->b already memcpy'd into C->b
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!Ab [p]) continue ;
float aij = Ax [p] ;
GxB_FC64_t z = GxB_CMPLX ((double) (aij), 0) ;
Cx [p] = z ;
}
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_unop_tran__identity_fc64_fp32
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t *GB_RESTRICT *Workspaces,
const int64_t *GB_RESTRICT A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
GB_unaryop__minv_fp64_uint32.c | //------------------------------------------------------------------------------
// GB_unaryop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2020, All Rights Reserved.
// http://suitesparse.com See GraphBLAS/Doc/License.txt for license.
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_iterator.h"
#include "GB_unaryop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB_unop__minv_fp64_uint32
// op(A') function: GB_tran__minv_fp64_uint32
// C type: double
// A type: uint32_t
// cast: double cij = (double) aij
// unaryop: cij = 1./aij
#define GB_ATYPE \
uint32_t
#define GB_CTYPE \
double
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
uint32_t aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = 1./x ;
// casting
#define GB_CASTING(z, aij) \
double z = (double) aij ;
// cij = op (cast (aij))
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
GB_GETA (aij, Ax, pA) ; \
/* Cx [pC] = op (cast (aij)) */ \
GB_CASTING (z, aij) ; \
GB_OP (GB_CX (pC), z) ; \
}
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_MINV || GxB_NO_FP64 || GxB_NO_UINT32)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_unop__minv_fp64_uint32
(
double *Cx, // Cx and Ax may be aliased
uint32_t *Ax,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
GB_CAST_OP (p, p) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_tran__minv_fp64_uint32
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t *GB_RESTRICT *Rowcounts,
GBI_single_iterator Iter,
const int64_t *GB_RESTRICT A_slice,
int naslice
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#define GB_PHASE_2_OF_2
#include "GB_unaryop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
actividad4.c |
#include <stdio.h>
#include <stdlib.h>
#include <math.h>
#include "ctimer.h"
#include <omp.h>
int main( int argc, char *argv[] ) {
int v, i;
double suma;
if( argc<2 ) {
printf("Usage: %s n_vectores [tam_max] \n",argv[0]);
return 1;
}
int n_vectores;
sscanf(argv[1],"%d",&n_vectores);
int tam_max = 10000;
if( argc>2 ) {
sscanf(argv[2],"%d",&tam_max);
}
double **M = (double **) malloc (n_vectores*sizeof(double*));
int *tam = (int *) malloc (n_vectores*sizeof(int));
for( v=0; v<n_vectores; v++ ) {
tam[v] = rand()%tam_max;
M[v] = (double *) malloc (tam[v]*sizeof(double));
for( i = 0; i<tam[v]; i++ ) {
M[v][i] = (double) rand()/RAND_MAX * 2.0*tam[v] - 1.0*tam[v];
}
}
double elapsed, ucpu, scpu;
ctimer(&elapsed,&ucpu,&scpu);
double *media = (double *) malloc (n_vectores*sizeof(double));
double *desvt = (double *) malloc (n_vectores*sizeof(double));
/*********************************************************/
/* PRINCIPIO DEL CODIGO A INCLUIR */
double parc;
#pragma omp parallel
{
#pragma omp single
{
for( v=0; v<n_vectores; v++ ) {
#pragma omp task firstprivate(v) shared(M) private(parc,i)
{
parc = 0;
for( i = 0; i<tam[v]; i++ ) {
parc = M[v][i];
}
media[v]=parc/tam[v];
}
}
}
}
#pragma omp parallel
{
#pragma omp single
{
#pragma omp task firstprivate(v) shared(M) private(parc,i)
{
for( v=0; v<n_vectores; v++ ) {
parc = 0;
for( i = 0; i<tam[v]; i++ ) {
parc = pow((M[v][i]-media[v]),2);
}
desvt[v]=sqrt(parc/tam[v]);
}
}
}
}
/* FIN DEL CODIGO A INCLUIR */
/*********************************************************/
ctimer(&elapsed,&ucpu,&scpu);
printf("Tiempo = %f segundos\n",elapsed);
FILE *fp;
fp = fopen("media_desvt","wb");
fwrite (media, sizeof(double), n_vectores, fp);
fwrite (desvt, sizeof(double), n_vectores, fp);
fclose(fp);
free(desvt);
free(media);
free(tam);
for( i = 0; i<n_vectores; i++ ) {
free(M[i]);
}
free(M);
return 0;
}
|
transformations.c | #include <stdlib.h> // Malloc and other functions
#include <math.h> // Mathematical functions
#include <stdbool.h>
#include "transformations.h"
//==============================================================================
#define NUM_THREAD 2
//==============================================================================
// Local Functions Prototype
/*
* Function to calculate and return interpolated values
*/
dataType interpolated(dataType k_t, dataType i_t, dataType j_t, int top, int bottom, int left, int right, int begin, int end, dataType ** imageDataPtr, size_t imageWidth);
/*
* Transform Function for imageDataPtr
*/
void transform3DImage(dataType ** sourceDataPtr, dataType ** transformPointsPtr, Point3D translation, Point3D scaling, Point3D rotation, size_t imageHeight, size_t imageLength, size_t imageWidth, dataType bgValue, dataType centroid[3], bool parallelize)
{
int k, i, j;
dataType k_a, i_a, j_a; // Affine indices
//==============================================================================
// Rotation Angles to radians
//T theta = (rotation.y * M_PI) / 180, psi = (rotation.z * M_PI) / 180, phi = (rotation.x * M_PI) / 180;
dataType theta = (rotation.y), psi = (rotation.z), phi = (rotation.x);
//==============================================================================
// Center Points
dataType cz = centroid[2], cx = centroid[0], cy = centroid[1];
//==============================================================================
// Dimension variable
// dimWidth = X*Y
size_t dimWidth = imageLength * imageWidth;
//==============================================================================
// Temporary parameters
dataType tmpX, tmpY, tmpZ, tmp;
// Transformed
dataType k_t, i_t, j_t; // Transformed indices
//==============================================================================
int bottom;
int top;
// X
int left;
int right;
// Y
int begin;
int end;
//==============================================================================
dataType sz = scaling.z, sy = scaling.y, sx = scaling.x;
dataType tz = translation.z, ty = translation.y, tx = translation.x;
//==============================================================================
size_t x;
//==============================================================================
if (parallelize)
{
// OpenMP
omp_set_dynamic(0); // Disable dynamic adjustment of threads
//omp_set_num_threads(omp_num_procs()); // Request as many threads as you have processors
omp_set_num_threads(NUM_THREAD); // Request as many threads as you have processors
#pragma omp parallel
{
#pragma omp for private(k, i, j, k_a, i_a, j_a, x, k_t, i_t, j_t, tmpX, tmpY, tmpZ, bottom, top, left, right, begin, end, tmp) schedule(static) nowait
for (k = 0; k < imageHeight; k++)
{
k_a = k - cz; // Move to origin Z
// Apply scaling
k_a = k_a / sz;
for (i = 0; i < imageLength; i++)
{
i_a = i - cx; // Move to origin x
// Apply scaling
i_a = i_a / sx;
for (j = 0; j < imageWidth; j++)
{
// 2D to 1D representation for i, j
x = x_new(i, j, imageLength);
//==============================================================================
j_a = j - cy; // Move to origin Y
// Apply scaling
j_a = j_a / sy;
//==============================================================================
coordinate_rotate(k_a, i_a, j_a, theta, psi, phi, &k_t, &i_t, &j_t);
//==============================================================================
// Move back to centroid
tmpX = i_t + cx;
tmpY = j_t + cy;
tmpZ = k_t + cz;
//==============================================================================
// Add translation
i_t = tmpX - tx;
j_t = tmpY - ty;
k_t = tmpZ - tz;
//==============================================================================
// Use Interpolation to get the values
// Locations for Tri-linear Interpolation
// Z
bottom = (int)floor(k_t);
top = bottom + 1;
// X
left = (int)floor(i_t);
right = left + 1;
// Y
begin = (int)floor(j_t);
end = begin + 1;
//==============================================================================
// Check if within limits
if (bottom >= 0 && top < imageHeight && left >= 0 && right < imageLength && begin >= 0 && end < imageWidth)
{
//==============================================================================
tmp = interpolated(k_t, i_t, j_t, top, bottom, left, right, begin, end, sourceDataPtr, imageLength);
//==============================================================================
transformPointsPtr[k][x] = tmp;
//==============================================================================
}
else
{
//==============================================================================
transformPointsPtr[k][x] = bgValue; // Background value
//==============================================================================
}
}
}
}
}
}
else
{
//==============================================================================
// Sequential
for (k = 0; k < imageHeight; k++)
{
k_a = k - cz; // Move to origin Z
// Apply scaling
k_a = k_a / sz;
for (i = 0; i < imageLength; i++)
{
i_a = i - cx; // Move to origin x
// Apply scaling
i_a = i_a / sx;
for (j = 0; j < imageWidth; j++)
{
// 2D to 1D representation for i, j
x = x_new(i, j, imageLength);
//==============================================================================
j_a = j - cy; // Move to origin Y
// Apply scaling
j_a = j_a / sy;
//==============================================================================
coordinate_rotate(k_a, i_a, j_a, theta, psi, phi, &k_t, &i_t, &j_t);
//==============================================================================
// Move back to centroid
tmpX = i_t + cx;
tmpY = j_t + cy;
tmpZ = k_t + cz;
//==============================================================================
// Add translation
i_t = tmpX - tx;
j_t = tmpY - ty;
k_t = tmpZ - tz;
//==============================================================================
// Use Interpolation to get the values
// Locations for Tri-linear Interpolation
// Z
bottom = (int)floor(k_t);
top = bottom + 1;
// X
left = (int)floor(i_t);
right = left + 1;
// Y
begin = (int)floor(j_t);
end = begin + 1;
//==============================================================================
// Check if within limits
if (bottom >= 0 && top < imageHeight && left >= 0 && right < imageLength && begin >= 0 && end < imageWidth)
{
//==============================================================================
tmp = interpolated(k_t, i_t, j_t, top, bottom, left, right, begin, end, sourceDataPtr, imageLength);
//==============================================================================
transformPointsPtr[k][x] = tmp;
//==============================================================================
}
else
{
//==============================================================================
transformPointsPtr[k][x] = bgValue; // Background value
//==============================================================================
}
}
}
}
}
//==============================================================================
}
//==============================================================================
void transformInverse3DImage(dataType ** sourceDataPtr, dataType ** transformPointsPtr, Point3D translation, Point3D scaling, Point3D rotation, size_t imageHeight, size_t imageLength, size_t imageWidth, dataType bgValue, dataType centroid[3], bool parallelize)
{
int k, i, j;
dataType k_a, i_a, j_a; // Affine indices
//==============================================================================
// Rotation Angles to radians
//T theta = (rotation.y * M_PI) / 180, psi = (rotation.z * M_PI) / 180, phi = (rotation.x * M_PI) / 180;
dataType theta = (rotation.y), psi = (rotation.z), phi = (rotation.x);
//==============================================================================
// Center Points
dataType cz = centroid[2], cx = centroid[0], cy = centroid[1];
//==============================================================================
// Dimension variable
// dimWidth = X*Y
size_t dimWidth = imageLength * imageWidth;
//==============================================================================
// Temporary parameters
dataType tmpX, tmpY, tmpZ, tmp;
// Transformed
dataType k_t, i_t, j_t; // Transformed indices
//==============================================================================
int bottom;
int top;
// X
int left;
int right;
// Y
int begin;
int end;
//==============================================================================
dataType sz = scaling.z, sy = scaling.y, sx = scaling.x;
dataType tz = translation.z, ty = translation.y, tx = translation.x;
//==============================================================================
size_t x;
//==============================================================================
if (parallelize)
{
//printf("Running Parallelized on %d threads\n", NUM_THREAD);
// OpenMP
omp_set_dynamic(0); // Disable dynamic adjustment of threads
//omp_set_num_threads(omp_num_procs()); // Request as many threads as you have processors
omp_set_num_threads(NUM_THREAD); // Request as many threads as you have processors
#pragma omp parallel
{
#pragma omp for private(k, i, j, k_a, i_a, j_a, x, k_t, i_t, j_t, tmpX, tmpY, tmpZ, bottom, top, left, right, begin, end, tmp) schedule(static) nowait
for (k = 0; k < imageHeight; k++)
{
k_a = k - cz; // Move to origin Z
// Apply scaling
k_a = k_a * sz;
for (i = 0; i < imageLength; i++)
{
i_a = i - cx; // Move to origin x
// Apply scaling
i_a = i_a * sx;
for (j = 0; j < imageWidth; j++)
{
// 2D to 1D representation for i, j
x = x_new(i, j, imageLength);
//==============================================================================
j_a = j - cy; // Move to origin Y
// Apply scaling
j_a = j_a * sy;
//==============================================================================
// Apply Rotation
i_t = x_rotateInv(k_a, i_a, j_a, theta, psi, phi);
j_t = y_rotateInv(k_a, i_a, j_a, theta, psi, phi);
k_t = z_rotateInv(k_a, i_a, j_a, theta, psi, phi);
//==============================================================================
// Move back to centroid
tmpX = i_t + cx;
tmpY = j_t + cy;
tmpZ = k_t + cz;
//==============================================================================
// Add translation
i_t = tmpX + tx;
j_t = tmpY + ty;
k_t = tmpZ + tz;
//==============================================================================
// Use Interpolation to get the values
// Locations for Tri-linear Interpolation
// Z
bottom = (int)floor(k_t);
top = bottom + 1;
// X
left = (int)floor(i_t);
right = left + 1;
// Y
begin = (int)floor(j_t);
end = begin + 1;
//==============================================================================
// Check if within limits
if (bottom >= 0 && top < imageHeight && left >= 0 && right < imageLength && begin >= 0 && end < imageWidth)
{
//==============================================================================
tmp = interpolated(k_t, i_t, j_t, top, bottom, left, right, begin, end, sourceDataPtr, imageLength);
//==============================================================================
transformPointsPtr[k][x] = tmp;
//==============================================================================
}
else
{
//==============================================================================
transformPointsPtr[k][x] = bgValue; // Background value
//==============================================================================
}
}
}
}
}
}
else
{
// Sequential
//printf("Running Sequential code \n");
//==============================================================================
for (k = 0; k < imageHeight; k++)
{
k_a = k - cz; // Move to origin Z
// Apply scaling
k_a = k_a * sz;
for (i = 0; i < imageLength; i++)
{
i_a = i - cx; // Move to origin x
// Apply scaling
i_a = i_a * sx;
for (j = 0; j < imageWidth; j++)
{
// 2D to 1D representation for i, j
x = x_new(i, j, imageLength);
//==============================================================================
j_a = j - cy; // Move to origin Y
// Apply scaling
j_a = j_a * sy;
//==============================================================================
// Apply Rotation
i_t = x_rotateInv(k_a, i_a, j_a, theta, psi, phi);
j_t = y_rotateInv(k_a, i_a, j_a, theta, psi, phi);
k_t = z_rotateInv(k_a, i_a, j_a, theta, psi, phi);
//==============================================================================
// Move back to centroid
tmpX = i_t + cx;
tmpY = j_t + cy;
tmpZ = k_t + cz;
//==============================================================================
// Add translation
i_t = tmpX + tx;
j_t = tmpY + ty;
k_t = tmpZ + tz;
//==============================================================================
// Use Interpolation to get the values
// Locations for Tri-linear Interpolation
// Z
bottom = (int)floor(k_t);
top = bottom + 1;
// X
left = (int)floor(i_t);
right = left + 1;
// Y
begin = (int)floor(j_t);
end = begin + 1;
// Check if within limits
if (bottom >= 0 && top < imageHeight && left >= 0 && right < imageLength && begin >= 0 && end < imageWidth)
{
//==============================================================================
tmp = interpolated(k_t, i_t, j_t, top, bottom, left, right, begin, end, sourceDataPtr, imageLength);
//==============================================================================
transformPointsPtr[k][x] = tmp;
//==============================================================================
}
else
{
//==============================================================================
transformPointsPtr[k][x] = bgValue; // Background value
//==============================================================================
}
}
}
}
}
//==============================================================================
}
//==============================================================================
/*
* Rotated indices
*/
void coordinate_rotate(dataType z, dataType x, dataType y, dataType theta, dataType psi, dataType phi, dataType * k_t, dataType * i_t, dataType * j_t)
{
//==============================================================================
dataType _cos_psi_theta = (dataType)(cos(psi)*cos(theta)), _cos_phi_psi = (dataType)(cos(phi)*cos(psi)), _cos_phi_theta = (dataType)(cos(phi)*cos(theta));
dataType _sin_phi_theta = (dataType)(sin(phi)*sin(theta)), _sin_phi_psi = (dataType)(sin(phi)*sin(psi)), _sin_theta_psi = (dataType)(sin(theta)*sin(psi));
dataType _sin_theta = (dataType)sin(theta), _sin_psi = (dataType)sin(psi), _sin_phi = (dataType)sin(phi);
dataType _cos_theta = (dataType)cos(theta), _cos_psi = (dataType)cos(psi), _cos_phi = (dataType)cos(phi);
//==============================================================================
// I
dataType _cos_theta_sin_psi = _cos_theta * _sin_psi;
//=============================================================================
// J
dataType _sin_psi_neg = -1 * _sin_psi;
dataType _sin_phi_neg = -1 * _sin_phi;
dataType _cos_phi_sin_psi = _cos_phi * _sin_psi;
dataType _sin_phi_sin_theta_cos_psi = _sin_phi * _sin_theta * _cos_psi;
dataType _sin_phi_sin_theta_sin_psi_neg = _sin_phi_theta * _sin_psi_neg;
dataType _sin_phi_neg_cos_theta = _sin_phi_neg * _cos_theta;
// (((cos(phi))*sin(psi) + sin(phi)*sin(theta)*cos(psi))*(x)+(cos(phi)*cos(psi) + sin(phi)*sin(theta)*(-sin(psi)))*(y)+((-sin(phi))*cos(theta))*(z))
//=============================================================================
// K
// ((sin(phi)*sin(psi) + cos(phi)*(-sin(theta))*cos(psi))*(x)+((sin(phi))*cos(psi) + cos(phi)*sin(theta)*sin(psi))*(y)+(cos(phi)*cos(theta))*(z))
dataType _sin_theta_neg = -1 * _sin_theta;
dataType _cos_phi_sin_theta_neg_cos_psi = _cos_phi * _sin_theta_neg * _cos_psi;
dataType _sin_phi_cos_psi = _sin_phi * _cos_psi;
dataType _cos_phi_sin_theta_psi = _cos_phi * _sin_theta_psi;
//=============================================================================
// I
*i_t = x * _cos_psi_theta - y * _cos_theta_sin_psi + z * _sin_theta; // (x*cos(psi)*cos(theta) - y * cos(theta)*sin(psi) + z * sin(theta))
//==============================================================================
// J
*j_t = ((_cos_phi_sin_psi + _sin_phi_sin_theta_cos_psi)*(x)+(_cos_phi_psi + _sin_phi_sin_theta_sin_psi_neg)*(y)+(_sin_phi_neg_cos_theta)*(z));
//==============================================================================
// k
*k_t = ((_sin_phi_psi + _cos_phi_sin_theta_neg_cos_psi)*(x)+(_sin_phi_cos_psi + _cos_phi_sin_theta_psi)*(y)+(_cos_phi_theta)*(z));
//==============================================================================
}
dataType x_rotate(dataType z, dataType x, dataType y, dataType theta, dataType psi)
{
return (dataType)(x*cos(psi)*cos(theta) - y * cos(theta)*sin(psi) + z * sin(theta));
}
// Inverse
dataType x_rotateInv(dataType z, dataType x, dataType y, dataType theta, dataType psi, dataType phi)
{
return (dataType)((cos(theta)*cos(psi))*(x)+((cos(phi))*sin(psi) + sin(phi)*sin(theta)*cos(psi))*(y)+(sin(phi)*sin(psi) + cos(phi)*(-sin(theta))*cos(psi))*(z));
}
//==============================================================================
dataType y_rotate(dataType z, dataType x, dataType y, dataType theta, dataType psi, dataType phi)
{
return (dataType)(((cos(phi))*sin(psi) + sin(phi)*sin(theta)*cos(psi))*(x)+(cos(phi)*cos(psi) + sin(phi)*sin(theta)*(-sin(psi)))*(y)+((-sin(phi))*cos(theta))*(z));
}
//==============================================================================
dataType y_rotateInv(dataType z, dataType x, dataType y, dataType theta, dataType psi, dataType phi)
{
return (dataType)((cos(theta)*(-sin(psi)))*(x)+(cos(phi)*cos(psi) + sin(phi)*sin(theta)*(-sin(psi)))*(y)+((sin(phi))*cos(psi) + cos(phi)*sin(theta)*sin(psi))*(z));
}
//==============================================================================
dataType z_rotate(dataType z, dataType x, dataType y, dataType theta, dataType psi, dataType phi)
{
return (dataType)((sin(phi)*sin(psi) + cos(phi)*(-sin(theta))*cos(psi))*(x)+((sin(phi))*cos(psi) + cos(phi)*sin(theta)*sin(psi))*(y)+(cos(phi)*cos(theta))*(z));
}
//==============================================================================
dataType z_rotateInv(dataType z, dataType x, dataType y, dataType theta, dataType psi, dataType phi)
{
return (dataType)((sin(theta))*(x)+((-sin(phi))*cos(theta))*(y)+(cos(phi)*cos(theta))*(z));
}
//==============================================================================
dataType interpolated(dataType k_t, dataType i_t, dataType j_t, int top, int bottom, int left, int right, int begin, int end, dataType ** imageDataPtr, size_t imageLength)
{
// 8 Corner Values
dataType c000 = imageDataPtr[top][x_new(left, begin, imageLength)];
dataType c100 = imageDataPtr[top][x_new(right, begin, imageLength)];
dataType c010 = imageDataPtr[top][x_new(left, end, imageLength)];
dataType c110 = imageDataPtr[top][x_new(right, end, imageLength)];
dataType c001 = imageDataPtr[bottom][x_new(left, begin, imageLength)];
dataType c101 = imageDataPtr[bottom][x_new(right, begin, imageLength)];
dataType c011 = imageDataPtr[bottom][x_new(left, end, imageLength)];
dataType c111 = imageDataPtr[bottom][x_new(right, end, imageLength)];
return trilinearInterpolation(i_t, (dataType)left, (dataType)right, j_t, (dataType)begin, (dataType)end, c000, c001, c010, c011, c100, c101, c110, c111, (dataType)k_t, (dataType)bottom, (dataType)top);
}
//============================================================================== |
AVX-512Fsearch.c | #include "AVX-512Fsearch.h"
// Host search using AVX-512 instrucions and Score Profile technique
void search_avx512f_ap (char * query_sequences, unsigned short int * query_sequences_lengths, unsigned long int query_sequences_count, unsigned int * query_disp,
char profile, unsigned short int query_length_threshold,
char * vect_db_sequences, unsigned short int * vect_db_sequences_lengths, unsigned short int * vect_db_nbbs, unsigned long int vect_db_sequences_count, unsigned long int * vect_db_sequences_disp,
__m512i * submat, int open_gap, int extend_gap, int n_threads, int block_width, __m512i * scores, double * workTime){
long int i, j, k, qp_count, sp_count;
double tick;
char *a;
unsigned int * a_disp, queryProfiles_length;
unsigned long int * b_disp = NULL;
unsigned short int * m, *n, *nbbs, query_sequences_max_length;
char *b;
__m512i * queryProfiles;
a = query_sequences;
m = query_sequences_lengths;
a_disp = query_disp;
query_sequences_max_length = query_sequences_lengths[query_sequences_count-1];
b = vect_db_sequences;
n = vect_db_sequences_lengths;
nbbs = vect_db_nbbs;
b_disp = vect_db_sequences_disp;
if (profile == QUERY_PROFILE)
query_length_threshold = query_sequences_max_length+1;
else
if (profile == SCORE_PROFILE)
query_length_threshold = 0;
// calculate number of query sequences that are processed with query and score profile
i = 0;
while ((i < query_sequences_count) && (query_sequences_lengths[i] < query_length_threshold))
i++;
qp_count = i;
sp_count = query_sequences_count-qp_count;
// allocate memory for query profiles (if correspond)
if (qp_count > 0)
queryProfiles = (__m512i *)_mm_malloc((a_disp[qp_count])*2*sizeof(__m512i),MEMALIGN);
tick = dwalltime();
#pragma omp parallel default(none) shared(block_width, a, b, n, nbbs, m, a_disp, b_disp, submat, scores, query_sequences_count, \
vect_db_sequences_count, open_gap, extend_gap, query_sequences_max_length, qp_count, sp_count, \
queryProfiles, query_length_threshold) num_threads(n_threads)
{
char * ptr_a;
__m512i *row1, *row2, *maxCol, *maxRow, *lastCol, *tmp, *ptr_scores, *bIndexes, *queryProfile, * scoreProfile, *ptr_scoreProfile1, *ptr_scoreProfile2;
__declspec(align(16)) __m128i* ptr_b, *ptr_b_block;
__declspec(align(MEMALIGN)) __m512i vzero = _mm512_setzero_epi32(), score, previous, current1, current2, aux1, auxLastCol;
__declspec(align(MEMALIGN)) __m512i vextend_gap = _mm512_set1_epi32(extend_gap), vopen_extend_gap = _mm512_set1_epi32(open_gap+extend_gap);
__declspec(align(MEMALIGN)) __m512i v16 = _mm512_set1_epi32(16), submat_hi1, submat_lo1, submat_hi2, submat_lo2, bValues, maxRow1, maxRow2;
__mmask16 * masks, mask;
unsigned int i, j, ii, jj, k, disp, dim1, dim2, nbb;
unsigned long int t, s, q;
// allocate memory for auxiliary buffers
row1 = (__m512i *) _mm_malloc((block_width+1)*sizeof(__m512i),MEMALIGN);
row2 = (__m512i *) _mm_malloc((block_width+1)*sizeof(__m512i),MEMALIGN);
maxCol = (__m512i *) _mm_malloc((block_width+1)*sizeof(__m512i),MEMALIGN);
maxRow = (__m512i *) _mm_malloc((query_sequences_max_length)*sizeof(__m512i),MEMALIGN);
lastCol = (__m512i *) _mm_malloc((query_sequences_max_length)*sizeof(__m512i),MEMALIGN);
// allocate memory for SP (if correspond)
if (query_sequences_max_length >= query_length_threshold)
scoreProfile = (__m512i *) _mm_malloc(SUBMAT_ROWS*block_width*sizeof(__m512i), MEMALIGN);
// build query profiles (if correspond)
if (qp_count > 0) {
// alloc memory for indexes
bIndexes = (__m512i *) _mm_malloc((block_width)*sizeof(__m512i),MEMALIGN);
masks = (__mmask16 *) _mm_malloc((block_width)*sizeof(__mmask16),MEMALIGN);
#pragma omp for schedule(dynamic)
for (i=0; i< a_disp[qp_count] ; i++) {
queryProfiles[i*2] = submat[a[i]*2];
queryProfiles[i*2+1] = submat[a[i]*2+1];
}
}
// calculate chunk alignments using query profile technique
#pragma omp for schedule(dynamic) nowait
for (t=0; t< qp_count*vect_db_sequences_count; t++) {
q = (qp_count-1) - (t % qp_count);
s = (vect_db_sequences_count-1) - (t / qp_count);
queryProfile = queryProfiles + a_disp[q]*2;
ptr_b = (__m128i*)(b + b_disp[s]);
ptr_scores = scores + (q*vect_db_sequences_count+s);
// init buffers
#pragma unroll(UNROLL_COUNT)
for (i=0; i<m[q] ; i++ ) maxRow[i] = _mm512_setzero_epi32(); // index 0 is not used
#pragma unroll(UNROLL_COUNT)
for (i=0; i<m[q] ; i++ ) lastCol[i] = _mm512_setzero_epi32();
// set score to 0
score = _mm512_setzero_epi32();
// calculate number of blocks
nbb = nbbs[s];
for (k=0; k < nbb; k++){
// calculate dim1
disp = k*block_width;
dim1 = (block_width < n[s]-disp ? block_width : n[s]-disp);
// calculate dim2
dim2 = dim1 / DB_SEQ_LEN_MULT;
// init buffers
#pragma unroll(UNROLL_COUNT)
for (i=1; i<dim1+1 ; i++ ) maxCol[i] = _mm512_setzero_epi32(); //index 0 is not used
#pragma unroll(UNROLL_COUNT)
for (i=0; i<dim1 ; i++ ) row1[i] = _mm512_setzero_epi32();
auxLastCol = _mm512_setzero_epi32();
// get bIndexes
ptr_b_block = ptr_b + disp;
#pragma unroll(UNROLL_COUNT)
for (i=0; i<dim1 ; i++ ) {
bIndexes[i] = _mm512_cvtepi8_epi32(ptr_b_block[i]);
masks[i] = _mm512_cmpge_epi32_mask(bIndexes[i],v16);
}
for( i = 0; i < m[q]; i+=QUERY_SEQ_LEN_MULT){
// update row[0] with lastCol[i-1]
row1[0] = lastCol[i];
previous = lastCol[i+1];
// load submat values corresponding to current a residue
submat_lo1 = (queryProfile[i*2]);
submat_hi1 = (queryProfile[i*2+1]);
submat_lo2 = (queryProfile[(i+1)*2]);
submat_hi2 = (queryProfile[(i+1)*2+1]);
// store maxRow in auxiliars
maxRow1 = maxRow[i];
maxRow2 = maxRow[i+1];
for (ii=0; ii<dim2 ; ii++) {
#pragma unroll(DB_SEQ_LEN_MULT)
for( j=ii*DB_SEQ_LEN_MULT+1, jj=0; jj < DB_SEQ_LEN_MULT; jj++, j++) {
//calcuate the diagonal value
aux1 = _mm512_permutevar_epi32(bIndexes[j-1], submat_lo1);
aux1 = _mm512_mask_permutevar_epi32(aux1, masks[j-1], bIndexes[j-1], submat_hi1);
current1 = _mm512_add_epi32(row1[j-1], aux1);
// calculate current1 max value
current1 = _mm512_max_epi32(current1, maxRow1);
current1 = _mm512_max_epi32(current1, maxCol[j]);
current1 = _mm512_max_epi32(current1, vzero);
// update maxRow and maxCol
maxRow1 = _mm512_sub_epi32(maxRow1, vextend_gap);
maxCol[j] = _mm512_sub_epi32(maxCol[j], vextend_gap);
aux1 = _mm512_sub_epi32(current1, vopen_extend_gap);
maxRow1 = _mm512_max_epi32(maxRow1, aux1);
maxCol[j] = _mm512_max_epi32(maxCol[j], aux1);
// update max score
score = _mm512_max_epi32(score,current1);
//calcuate the diagonal value
aux1 = _mm512_permutevar_epi32(bIndexes[j-1], submat_lo2);
aux1 = _mm512_mask_permutevar_epi32(aux1, masks[j-1], bIndexes[j-1], submat_hi2);
current2 = _mm512_add_epi32(previous, aux1);
// update previous
previous = current1;
// calculate current2 max value
current2 = _mm512_max_epi32(current2, maxRow2);
current2 = _mm512_max_epi32(current2, maxCol[j]);
current2 = _mm512_max_epi32(current2, vzero);
// update maxRow and maxCol
maxRow2 = _mm512_sub_epi32(maxRow2, vextend_gap);
maxCol[j] = _mm512_sub_epi32(maxCol[j], vextend_gap);
aux1 = _mm512_sub_epi32(current2, vopen_extend_gap);
maxRow2 = _mm512_max_epi32(maxRow2, aux1);
maxCol[j] = _mm512_max_epi32(maxCol[j], aux1);
// update row buffer
row2[j] = current2;
// update max score
score = _mm512_max_epi32(score,current2);
}
}
if (k != nbb-1) {
// update maxRow
maxRow[i] = maxRow1;
maxRow[i+1] = maxRow2;
// update lastCol
lastCol[i] = auxLastCol;
lastCol[i+1] = current1;
auxLastCol = current2;
}
// swap buffers
tmp = row1;
row1 = row2;
row2 = tmp;
}
}
// store max value
_mm512_store_epi32(ptr_scores, score);
}
// calculate chunk alignments using score profile technique
#pragma omp for schedule(dynamic) nowait
for (t=0; t< sp_count*vect_db_sequences_count; t++) {
q = qp_count + (sp_count-1) - (t % sp_count);
s = (vect_db_sequences_count-1) - (t / sp_count);
ptr_a = a + a_disp[q];
ptr_b = (__m128i*)(b + b_disp[s]);
ptr_scores = scores + (q*vect_db_sequences_count+s);
// init buffers
#pragma unroll(UNROLL_COUNT)
for (i=0; i<m[q] ; i++ ) maxRow[i] = _mm512_setzero_epi32(); // index 0 is not used
#pragma unroll(UNROLL_COUNT)
for (i=0; i<m[q] ; i++ ) lastCol[i] = _mm512_setzero_epi32();
// set score to 0
score = _mm512_setzero_epi32();
// calculate number of blocks
nbb = nbbs[s];
for (k=0; k < nbb; k++){
// calculate dim1
disp = k*block_width;
dim1 = (block_width < n[s]-disp ? block_width : n[s]-disp);
// calculate dim2
dim2 = dim1 / DB_SEQ_LEN_MULT;
// init buffers
#pragma unroll(UNROLL_COUNT)
for (i=1; i<dim1+1 ; i++ ) maxCol[i] = _mm512_setzero_epi32(); //index 0 is not used
#pragma unroll(UNROLL_COUNT)
for (i=0; i<dim1 ; i++ ) row1[i] = _mm512_setzero_epi32();
auxLastCol = _mm512_setzero_epi32();
// build score profile
ptr_b_block = ptr_b + disp;
for (i=0; i< dim1 ;i++ ) {
bValues = _mm512_cvtepi8_epi32(ptr_b_block[i]);
mask = _mm512_cmpge_epi32_mask(bValues,v16);
ptr_scoreProfile1 = scoreProfile + i;
#pragma unroll
for (j=0; j< SUBMAT_ROWS; j++) {
aux1 = _mm512_permutevar_epi32(bValues, (submat[j*2]));
ptr_scoreProfile1[j*dim1] = _mm512_mask_permutevar_epi32(aux1, mask, bValues, (submat[j*2+1]));
}
}
for( i = 0; i < m[q]; i+=QUERY_SEQ_LEN_MULT){
// update row[0] with lastCol[i-1]
row1[0] = lastCol[i];
previous = lastCol[i+1];
// calculate score profile displacement
ptr_scoreProfile1 = scoreProfile+ptr_a[i]*dim1;
ptr_scoreProfile2 = scoreProfile+ptr_a[i+1]*dim1;
// store maxRow in auxiliars
maxRow1 = maxRow[i];
maxRow2 = maxRow[i+1];
for (ii=0; ii<dim2 ; ii++) {
#pragma unroll(DB_SEQ_LEN_MULT)
for( j=ii*DB_SEQ_LEN_MULT+1, jj=0; jj < DB_SEQ_LEN_MULT; jj++, j++) {
//calcuate the diagonal value
current1 = _mm512_add_epi32(row1[j-1], (ptr_scoreProfile1[j-1]));
// calculate current1 max value
current1 = _mm512_max_epi32(current1, maxRow1);
current1 = _mm512_max_epi32(current1, maxCol[j]);
current1 = _mm512_max_epi32(current1, vzero);
// update maxRow and maxCol
maxRow1 = _mm512_sub_epi32(maxRow1, vextend_gap);
maxCol[j] = _mm512_sub_epi32(maxCol[j], vextend_gap);
aux1 = _mm512_sub_epi32(current1, vopen_extend_gap);
maxRow1 = _mm512_max_epi32(maxRow1, aux1);
maxCol[j] = _mm512_max_epi32(maxCol[j], aux1);
// update max score
score = _mm512_max_epi32(score,current1);
//calcuate the diagonal value
current2 = _mm512_add_epi32(previous, (ptr_scoreProfile2[j-1]));
// update previous
previous = current1;
// calculate current2 max value
current2 = _mm512_max_epi32(current2, maxRow2);
current2 = _mm512_max_epi32(current2, maxCol[j]);
current2 = _mm512_max_epi32(current2, vzero);
// update maxRow and maxCol
maxRow2 = _mm512_sub_epi32(maxRow2, vextend_gap);
maxCol[j] = _mm512_sub_epi32(maxCol[j], vextend_gap);
aux1 = _mm512_sub_epi32(current2, vopen_extend_gap);
maxRow2 = _mm512_max_epi32(maxRow2, aux1);
maxCol[j] = _mm512_max_epi32(maxCol[j], aux1);
// update row buffer
row2[j] = current2;
// update max score
score = _mm512_max_epi32(score,current2);
}
}
if (k != nbb-1) {
// update maxRow
maxRow[i] = maxRow1;
maxRow[i+1] = maxRow2;
// update lastCol
lastCol[i] = auxLastCol;
lastCol[i+1] = current1;
auxLastCol = current2;
}
// swap buffers
tmp = row1;
row1 = row2;
row2 = tmp;
}
}
// store max value
_mm512_store_epi32(ptr_scores, score);
}
_mm_free(row1);_mm_free(row2); _mm_free(maxCol); _mm_free(maxRow); _mm_free(lastCol);
if (qp_count > 0) { _mm_free(bIndexes); _mm_free(masks); }
if (sp_count > 0) _mm_free(scoreProfile);
}
*workTime = dwalltime()-tick;
if (qp_count > 0) _mm_free(queryProfiles);
}
|
tutorial_region.c | /*
* Copyright (c) 2015, 2016, 2017, 2018, Intel Corporation
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
*
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
*
* * Neither the name of Intel Corporation nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY LOG OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#include <stdlib.h>
#include <stdio.h>
#include <time.h>
#include <math.h>
#include <stdint.h>
#include <mpi.h>
#ifdef _OPENMP
#include <omp.h>
#endif
#include "geopm.h"
#include "tutorial_region.h"
#ifdef TUTORIAL_ENABLE_MKL
#include "mkl.h"
#endif
int tutorial_sleep(double big_o, int do_report)
{
int err = 0;
if (big_o != 0.0) {
struct timespec seconds = {(time_t)(big_o),
(long)((big_o -
(time_t)(big_o)) * 1E9)};
if (do_report) {
printf("Sleeping for %e seconds\n", big_o);
fflush(stdout);
}
err = clock_nanosleep(CLOCK_REALTIME, 0, &seconds, NULL);
}
return err;
}
int tutorial_dgemm(double big_o, int do_report)
{
int err = 0;
if (big_o != 0.0) {
int matrix_size = (int) pow(4e9 * big_o, 1.0/3.0);
int pad_size = 64;
size_t mem_size = sizeof(double) * (matrix_size * (matrix_size + pad_size));
char transa = 'n';
char transb = 'n';
int M = matrix_size;
int N = matrix_size;
int K = matrix_size;
int LDA = matrix_size + pad_size / sizeof(double);
int LDB = matrix_size + pad_size / sizeof(double);
int LDC = matrix_size + pad_size / sizeof(double);
double alpha = 2.0;
double beta = 3.0;
double *A = NULL;
double *B = NULL;
double *C = NULL;
err = posix_memalign((void *)&A, pad_size, mem_size);
if (!err) {
err = posix_memalign((void *)&B, pad_size, mem_size);
}
if (!err) {
err = posix_memalign((void *)&C, pad_size, mem_size);
}
if (!err) {
#pragma omp parallel for
for (int i = 0; i < mem_size / sizeof(double); ++i) {
A[i] = random() / RAND_MAX;
B[i] = random() / RAND_MAX;
}
if (do_report) {
printf("Executing a %d x %d DGEMM\n", matrix_size, matrix_size);
fflush(stdout);
}
dgemm(&transa, &transb, &M, &N, &K, &alpha,
A, &LDA, B, &LDB, &beta, C, &LDC);
free(C);
free(B);
free(A);
}
}
return err;
}
int tutorial_stream(double big_o, int do_report)
{
int err = 0;
if (big_o != 0.0) {
size_t cline_size = 64;
size_t num_stream = (size_t)big_o * 500000000;
size_t mem_size = sizeof(double) * num_stream;
double *a = NULL;
double *b = NULL;
double *c = NULL;
double scalar = 3.0;
err = posix_memalign((void *)&a, cline_size, mem_size);
if (!err) {
err = posix_memalign((void *)&b, cline_size, mem_size);
}
if (!err) {
err = posix_memalign((void *)&c, cline_size, mem_size);
}
if (!err) {
#pragma omp parallel for
for (int i = 0; i < num_stream; i++) {
a[i] = 0.0;
b[i] = 1.0;
c[i] = 2.0;
}
if (do_report) {
printf("Executing STREAM triad on length %d vectors.\n", num_stream);
fflush(stdout);
}
#pragma omp parallel for
for (int i = 0; i < num_stream; ++i) {
a[i] = b[i] + scalar * c[i];
}
free(c);
free(b);
free(a);
}
}
return err;
}
int tutorial_all2all(double big_o, int do_report)
{
/* Best case scaling is O(ln(num_send) + num_rank) => */
/* num_send = exp(big_o_n - factor * num_rank) */
/* We have somewhat arbitrarily set factor to 1/128 */
int err = 0;
if (big_o != 0.0) {
int num_rank = 0;
int err = MPI_Comm_size(MPI_COMM_WORLD, &num_rank);
size_t num_send = (size_t)pow(2.0, 16 * big_o - num_rank / 128.0);
num_send = num_send ? num_send : 1;
size_t cline_size = 64;
char *send_buffer = NULL;
char *recv_buffer = NULL;
if (!err) {
err = posix_memalign((void *)&send_buffer, cline_size,
num_rank * num_send * sizeof(char));
}
if (!err) {
err = posix_memalign((void *)&recv_buffer, cline_size,
num_rank * num_send * sizeof(char));
}
if (!err) {
if (do_report) {
printf("Executing all2all of %d byte buffer on %d ranks.\n",
num_send * sizeof(char), num_rank);
fflush(stdout);
}
err = MPI_Alltoall(send_buffer, num_send, MPI_CHAR, recv_buffer,
num_send, MPI_CHAR, MPI_COMM_WORLD);
}
if (!err) {
err = MPI_Barrier(MPI_COMM_WORLD);
}
if (!err) {
free(recv_buffer);
free(send_buffer);
}
}
return err;
}
int tutorial_dgemm_static(double big_o, int do_report)
{
static double big_o_last = 0.0;
static double *A = NULL;
static double *B = NULL;
static double *C = NULL;
int err = 0;
if (big_o != 0.0) {
int matrix_size = (int) pow(4e9 * big_o, 1.0/3.0);
int pad_size = 64;
size_t mem_size = sizeof(double) * (matrix_size * (matrix_size + pad_size));
char transa = 'n';
char transb = 'n';
int M = matrix_size;
int N = matrix_size;
int K = matrix_size;
int LDA = matrix_size + pad_size / sizeof(double);
int LDB = matrix_size + pad_size / sizeof(double);
int LDC = matrix_size + pad_size / sizeof(double);
double alpha = 2.0;
double beta = 3.0;
if (big_o != big_o_last) {
big_o_last = big_o;
if (A) {
free(C);
free(B);
free(A);
A = NULL;
B = NULL;
C = NULL;
}
err = posix_memalign((void *)&A, pad_size, mem_size);
if (!err) {
err = posix_memalign((void *)&B, pad_size, mem_size);
}
if (!err) {
err = posix_memalign((void *)&C, pad_size, mem_size);
}
if (!err) {
#pragma omp parallel for
for (int i = 0; i < mem_size / sizeof(double); ++i) {
A[i] = random() / RAND_MAX;
B[i] = random() / RAND_MAX;
}
}
}
if (!err) {
if (do_report) {
printf("Executing a %d x %d DGEMM\n", matrix_size, matrix_size);
fflush(stdout);
}
dgemm(&transa, &transb, &M, &N, &K, &alpha,
A, &LDA, B, &LDB, &beta, C, &LDC);
}
}
else if (A) {
free(C);
free(B);
free(A);
A = NULL;
B = NULL;
C = NULL;
}
return err;
}
#ifdef _OPENMP
static int stream_profiled_omp(uint64_t region_id, size_t num_stream, double scalar, double *a, double *b, double *c)
{
const size_t block = 256;
const size_t num_block = num_stream / block;
const size_t num_remain = num_stream % block;
int err = 0;
int num_thread = 1;
#pragma omp parallel
{
num_thread = omp_get_num_threads();
}
#pragma omp parallel
{
int thread_idx = omp_get_thread_num();
(void)geopm_tprof_init_loop(num_thread, thread_idx, num_block, 0);
#pragma omp for
for (size_t i = 0; i < num_block; ++i) {
for (size_t j = 0; j < block; ++j) {
a[i * block + j] = b[i * block + j] + scalar * c[i * block + j];
}
(void)geopm_tprof_post();
}
#pragma omp for
for (size_t j = 0; j < num_remain; ++j) {
a[num_block * block + j] = b[num_block * block + j] + scalar * c[num_block * block + j];
}
}
return err;
}
#endif
static int stream_profiled_serial(uint64_t region_id, size_t num_stream, double scalar, double *a, double *b, double *c)
{
const size_t block = 256;
const size_t num_block = num_stream / block;
const size_t num_remain = num_stream % block;
const double norm = 1.0 / num_block;
for (size_t i = 0; i < num_block; ++i) {
for (size_t j = 0; j < block; ++j) {
a[i * block + j] = b[i * block + j] + scalar * c[i * block + j];
}
geopm_prof_progress(region_id, i * norm);
}
for (size_t j = 0; j < num_remain; ++j) {
a[num_block * block + j] = b[num_block * block + j] + scalar * c[num_block * block + j];
}
return 0;
}
int tutorial_stream_profiled(double big_o, int do_report)
{
int err = 0;
if (big_o != 0.0) {
size_t cline_size = 64;
size_t num_stream = (size_t)big_o * 500000000;
size_t mem_size = sizeof(double) * num_stream;
double *a = NULL;
double *b = NULL;
double *c = NULL;
double scalar = 3.0;
uint64_t stream_rid;
if (!err) {
err = geopm_prof_region("tutorial_stream",
GEOPM_REGION_HINT_MEMORY,
&stream_rid);
}
err = posix_memalign((void *)&a, cline_size, mem_size);
if (!err) {
err = posix_memalign((void *)&b, cline_size, mem_size);
}
if (!err) {
err = posix_memalign((void *)&c, cline_size, mem_size);
}
if (!err) {
#pragma omp parallel for
for (int i = 0; i < num_stream; i++) {
a[i] = 0.0;
b[i] = 1.0;
c[i] = 2.0;
}
if (do_report) {
printf("Executing profiled STREAM triad on length %d vectors.\n", num_stream);
fflush(stdout);
}
err = geopm_prof_enter(stream_rid);
}
if (!err) {
#ifdef _OPENMP
err = stream_profiled_omp(stream_rid, num_stream, scalar, a, b, c);
#else
err = stream_profiled_serial(stream_rid, num_stream, scalar, a, b, c);
#endif
}
if (!err) {
err = geopm_prof_exit(stream_rid);
}
if (!err) {
free(c);
free(b);
free(a);
}
}
}
|
flush-1.c | /* { dg-do compile } */
/* { dg-options "-fopenmp -fdump-tree-gimple" } */
void f1(void)
{
#pragma omp flush
}
int x, y, z;
void f2(_Bool p)
{
if (p)
{
#pragma omp flush (x)
}
else
{
#pragma omp flush (x, y, z)
}
}
/* { dg-final { scan-tree-dump-times "__sync_synchronize" 3 "gimple" } } */
|
deconvolution_packnto1.h | // Tencent is pleased to support the open source community by making ncnn available.
//
// Copyright (C) 2021 THL A29 Limited, a Tencent company. All rights reserved.
//
// Licensed under the BSD 3-Clause License (the "License"); you may not use this file except
// in compliance with the License. You may obtain a copy of the License at
//
// https://opensource.org/licenses/BSD-3-Clause
//
// Unless required by applicable law or agreed to in writing, software distributed
// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
// CONDITIONS OF ANY KIND, either express or implied. See the License for the
// specific language governing permissions and limitations under the License.
static void deconvolution_packnto1_rvv(const Mat& bottom_blob, Mat& top_blob, const Mat& weight_data_packnto1, const Mat& bias_data, int kernel_w, int kernel_h, int dilation_w, int dilation_h, int stride_w, int stride_h, int activation_type, const Mat& activation_params, const Option& opt)
{
const int packn = csrr_vlenb() / 4;
const word_type vl = vsetvl_e32m1(packn);
int w = bottom_blob.w;
int h = bottom_blob.h;
int channels = bottom_blob.c;
int outw = top_blob.w;
int outh = top_blob.h;
int outch = top_blob.c;
const int kernel_extent_w = dilation_w * (kernel_w - 1) + 1;
const int kernel_extent_h = dilation_h * (kernel_h - 1) + 1;
const int maxk = kernel_w * kernel_h;
const float* bias_data_ptr = bias_data;
// num_output
#pragma omp parallel for num_threads(opt.num_threads)
for (int p = 0; p < outch; p++)
{
float* outptr = top_blob.channel(p);
for (int i = 0; i < outh; i++)
{
for (int j = 0; j < outw; j++)
{
float sum = 0.f;
if (bias_data_ptr)
{
sum = bias_data_ptr[p];
}
vfloat32m1_t _sum = vfmv_v_f_f32m1(0.f, vl);
const float* kptr = (const float*)weight_data_packnto1 + maxk * channels * p * packn;
// channels
for (int q = 0; q < channels; q++)
{
const Mat m = bottom_blob.channel(q);
for (int y = 0; y < kernel_h; y++)
{
int sys = (i + y * dilation_h - (kernel_extent_h - 1));
if (sys < 0 || sys % stride_h != 0)
continue;
int sy = sys / stride_h;
if (sy >= h)
continue;
for (int x = 0; x < kernel_w; x++)
{
int sxs = (j + x * dilation_w - (kernel_extent_w - 1));
if (sxs < 0 || sxs % stride_w != 0)
continue;
int sx = sxs / stride_w;
if (sx >= w)
continue;
const float* sptr = m.row(sy) + sx * packn;
int k = y * kernel_w + x;
vfloat32m1_t _val = vle32_v_f32m1(sptr, vl);
vfloat32m1_t _w = vle32_v_f32m1(kptr + k * packn, vl);
_sum = vfmacc_vv_f32m1(_sum, _val, _w, vl);
}
}
kptr += maxk * packn;
}
sum = vfmv_f_s_f32m1_f32(vfredsum_vs_f32m1_f32m1(vfloat32m1_t(), _sum, vfmv_s_f_f32m1(vfloat32m1_t(), sum, vl), vl));
sum = activation_ss(sum, activation_type, activation_params);
outptr[j] = sum;
}
outptr += outw;
}
}
}
|
3d7pt.c | /*
* Order-1, 3D 7 point stencil
* Adapted from PLUTO and Pochoir test bench
*
* Tareq Malas
*/
#include <stdio.h>
#include <stdlib.h>
#include <sys/time.h>
#ifdef LIKWID_PERFMON
#include <likwid.h>
#endif
#include "print_utils.h"
#define TESTS 2
#define MAX(a,b) ((a) > (b) ? a : b)
#define MIN(a,b) ((a) < (b) ? a : b)
/* Subtract the `struct timeval' values X and Y,
* storing the result in RESULT.
*
* Return 1 if the difference is negative, otherwise 0.
*/
int timeval_subtract(struct timeval *result, struct timeval *x, struct timeval *y)
{
/* Perform the carry for the later subtraction by updating y. */
if (x->tv_usec < y->tv_usec)
{
int nsec = (y->tv_usec - x->tv_usec) / 1000000 + 1;
y->tv_usec -= 1000000 * nsec;
y->tv_sec += nsec;
}
if (x->tv_usec - y->tv_usec > 1000000)
{
int nsec = (x->tv_usec - y->tv_usec) / 1000000;
y->tv_usec += 1000000 * nsec;
y->tv_sec -= nsec;
}
/* Compute the time remaining to wait.
* tv_usec is certainly positive.
*/
result->tv_sec = x->tv_sec - y->tv_sec;
result->tv_usec = x->tv_usec - y->tv_usec;
/* Return 1 if result is negative. */
return x->tv_sec < y->tv_sec;
}
int main(int argc, char *argv[])
{
int t, i, j, k, test;
int Nx, Ny, Nz, Nt;
if (argc > 3) {
Nx = atoi(argv[1])+2;
Ny = atoi(argv[2])+2;
Nz = atoi(argv[3])+2;
}
if (argc > 4)
Nt = atoi(argv[4]);
double ****A = (double ****) malloc(sizeof(double***)*2);
A[0] = (double ***) malloc(sizeof(double**)*Nz);
A[1] = (double ***) malloc(sizeof(double**)*Nz);
for(i=0; i<Nz; i++){
A[0][i] = (double**) malloc(sizeof(double*)*Ny);
A[1][i] = (double**) malloc(sizeof(double*)*Ny);
for(j=0;j<Ny;j++){
A[0][i][j] = (double*) malloc(sizeof(double)*Nx);
A[1][i][j] = (double*) malloc(sizeof(double)*Nx);
}
}
// tile size information, including extra element to decide the list length
int *tile_size = (int*) malloc(sizeof(int));
tile_size[0] = -1;
// The list is modified here before source-to-source transformations
tile_size = (int*) realloc((void *)tile_size, sizeof(int)*5);
tile_size[0] = 24;
tile_size[1] = 24;
tile_size[2] = 24;
tile_size[3] = 1024;
tile_size[4] = -1;
// for timekeeping
int ts_return = -1;
struct timeval start, end, result;
double tdiff = 0.0, min_tdiff=1.e100;
const int BASE = 1024;
const double alpha = 0.0876;
const double beta = 0.0765;
// initialize variables
//
srand(42);
for (i = 1; i < Nz; i++) {
for (j = 1; j < Ny; j++) {
for (k = 1; k < Nx; k++) {
A[0][i][j][k] = 1.0 * (rand() % BASE);
}
}
}
#ifdef LIKWID_PERFMON
LIKWID_MARKER_INIT;
#pragma omp parallel
{
LIKWID_MARKER_THREADINIT;
#pragma omp barrier
LIKWID_MARKER_START("calc");
}
#endif
int num_threads = 1;
#if defined(_OPENMP)
num_threads = omp_get_max_threads();
#endif
for(test=0; test<TESTS; test++){
gettimeofday(&start, 0);
// serial execution - Addition: 6 && Multiplication: 2
#pragma scop
for (t = 0; t < Nt-1; t++) {
for (i = 1; i < Nz-1; i++) {
for (j = 1; j < Ny-1; j++) {
for (k = 1; k < Nx-1; k++) {
A[(t+1)%2][i][j][k] = alpha * (A[t%2][i][j][k])
+ beta * (A[t%2][i - 1][j][k] + A[t%2][i][j - 1][k] + A[t%2][i][j][k - 1] +
A[t%2][i + 1][j][k] + A[t%2][i][j + 1][k] + A[t%2][i][j][k + 1]);
}
}
}
}
#pragma endscop
gettimeofday(&end, 0);
ts_return = timeval_subtract(&result, &end, &start);
tdiff = (double) (result.tv_sec + result.tv_usec * 1.0e-6);
min_tdiff = min(min_tdiff, tdiff);
printf("Rank 0 TEST# %d time: %f\n", test, tdiff);
}
PRINT_RESULTS(1, "constant")
#ifdef LIKWID_PERFMON
#pragma omp parallel
{
LIKWID_MARKER_STOP("calc");
}
LIKWID_MARKER_CLOSE;
#endif
// Free allocated arrays (Causing performance degradation
/* for(i=0; i<Nz; i++){
for(j=0;j<Ny;j++){
free(A[0][i][j]);
free(A[1][i][j]);
}
free(A[0][i]);
free(A[1][i]);
}
free(A[0]);
free(A[1]);
*/
return 0;
}
|
GB_unaryop__identity_int32_int16.c | //------------------------------------------------------------------------------
// GB_unaryop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2019, All Rights Reserved.
// http://suitesparse.com See GraphBLAS/Doc/License.txt for license.
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_iterator.h"
#include "GB_unaryop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB_unop__identity_int32_int16
// op(A') function: GB_tran__identity_int32_int16
// C type: int32_t
// A type: int16_t
// cast: int32_t cij = (int32_t) aij
// unaryop: cij = aij
#define GB_ATYPE \
int16_t
#define GB_CTYPE \
int32_t
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
int16_t aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = x ;
// casting
#define GB_CASTING(z, x) \
int32_t z = (int32_t) x ;
// cij = op (cast (aij))
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
GB_GETA (aij, Ax, pA) ; \
/* Cx [pC] = op (cast (aij)) */ \
GB_CASTING (x, aij) ; \
GB_OP (GB_CX (pC), x) ; \
}
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_IDENTITY || GxB_NO_INT32 || GxB_NO_INT16)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_unop__identity_int32_int16
(
int32_t *restrict Cx,
const int16_t *restrict Ax,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (int64_t p = 0 ; p < anz ; p++)
{
GB_CAST_OP (p, p) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_tran__identity_int32_int16
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t **Rowcounts,
GBI_single_iterator Iter,
const int64_t *restrict A_slice,
int naslice
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#define GB_PHASE_2_OF_2
#include "GB_unaryop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
bls.c | /* Licensed under a 3-clause BSD style license - see LICENSE.rst */
#include <math.h>
#include <float.h>
#include <stdlib.h>
#if defined(_OPENMP)
#include <omp.h>
#endif
#ifndef INFINITY
#define INFINITY (1.0 / 0.0)
#endif
void compute_objective(
double y_in,
double y_out,
double ivar_in,
double ivar_out,
int obj_flag,
double* objective,
double* log_likelihood,
double* depth,
double* depth_err,
double* depth_snr
) {
if (obj_flag) {
double arg = y_out - y_in;
*log_likelihood = 0.5*ivar_in*arg*arg;
*objective = *log_likelihood;
} else {
*depth = y_out - y_in;
*depth_err = sqrt(1.0 / ivar_in + 1.0 / ivar_out);
*depth_snr = *depth / *depth_err;
*objective = *depth_snr;
}
}
inline double wrap_into (double x, double period)
{
return x - period * floor(x / period);
}
int run_bls (
// Inputs
int N, // Length of the time array
double* t, // The list of timestamps
double* y, // The y measured at ``t``
double* ivar, // The inverse variance of the y array
int n_periods,
double* periods, // The period to test in units of ``t``
int n_durations, // Length of the durations array
double* durations, // The durations to test in units of ``bin_duration``
int oversample, // The number of ``bin_duration`` bins in the maximum duration
int obj_flag, // A flag indicating the periodogram type
// 0 - depth signal-to-noise
// 1 - log likelihood
// Outputs
double* best_objective, // The value of the periodogram at maximum
double* best_depth, // The estimated depth at maximum
double* best_depth_err, // The uncertainty on ``best_depth``
double* best_duration, // The best fitting duration in units of ``t``
double* best_phase, // The phase of the mid-transit time in units of
// ``t``
double* best_depth_snr, // The signal-to-noise ratio of the depth estimate
double* best_log_like // The log likelihood at maximum
) {
// Start by finding the period and duration ranges
double max_period = periods[0], min_period = periods[0];
int k;
for (k = 1; k < n_periods; ++k) {
if (periods[k] < min_period) min_period = periods[k];
if (periods[k] > max_period) max_period = periods[k];
}
if (min_period < DBL_EPSILON) return 1;
double min_duration = durations[0], max_duration = durations[0];
for (k = 1; k < n_durations; ++k) {
if (durations[k] < min_duration) min_duration = durations[k];
if (durations[k] > max_duration) max_duration = durations[k];
}
if ((max_duration > min_period) || (min_duration < DBL_EPSILON)) return 2;
// Compute the durations in terms of bin_duration
double bin_duration = min_duration / ((double)oversample);
int max_n_bins = (int)(ceil(max_period / bin_duration)) + oversample;
int nthreads, blocksize = max_n_bins+1;
#pragma omp parallel
{
#if defined(_OPENMP)
nthreads = omp_get_num_threads();
#else
nthreads = 1;
#endif
}
// Allocate the work arrays
double* mean_y_0 = (double*)malloc(nthreads*blocksize*sizeof(double));
if (mean_y_0 == NULL) {
return -2;
}
double* mean_ivar_0 = (double*)malloc(nthreads*blocksize*sizeof(double));
if (mean_ivar_0 == NULL) {
free(mean_y_0);
return -3;
}
// Pre-accumulate some factors.
double min_t = INFINITY;
double sum_y = 0.0, sum_ivar = 0.0;
int i;
#pragma omp parallel for reduction(+:sum_y), reduction(+:sum_ivar)
for (i = 0; i < N; ++i) {
min_t = fmin(min_t, t[i]);
sum_y += y[i] * ivar[i];
sum_ivar += ivar[i];
}
// Loop over periods and do the search
int p;
#pragma omp parallel for
for (p = 0; p < n_periods; ++p) {
#if defined(_OPENMP)
int ithread = omp_get_thread_num();
#else
int ithread = 0;
#endif
int block = blocksize * ithread;
double period = periods[p];
int n_bins = (int)(ceil(period / bin_duration)) + oversample;
double* mean_y = mean_y_0 + block;
double* mean_ivar = mean_ivar_0 + block;
// This first pass bins the data into a fine-grain grid in phase from zero
// to period and computes the weighted sum and inverse variance for each
// bin.
int n, ind;
for (n = 0; n < n_bins+1; ++n) {
mean_y[n] = 0.0;
mean_ivar[n] = 0.0;
}
for (n = 0; n < N; ++n) {
int ind = (int)(wrap_into(t[n] - min_t, period) / bin_duration) + 1;
mean_y[ind] += y[n] * ivar[n];
mean_ivar[ind] += ivar[n];
}
// To simplify calculations below, we wrap the binned values around and pad
// the end of the array with the first ``oversample`` samples.
for (n = 1, ind = n_bins - oversample; n <= oversample; ++n, ++ind) {
mean_y[ind] = mean_y[n];
mean_ivar[ind] = mean_ivar[n];
}
// To compute the estimates of the in-transit flux, we need the sum of
// mean_y and mean_ivar over a given set of transit points. To get this
// fast, we can compute the cumulative sum and then use differences between
// points separated by ``duration`` bins. Here we convert the mean arrays
// to cumulative sums.
for (n = 1; n <= n_bins; ++n) {
mean_y[n] += mean_y[n-1];
mean_ivar[n] += mean_ivar[n-1];
}
// Then we loop over phases (in steps of n_bin) and durations and find the
// best fit value. By looping over durations here, we get to reuse a lot of
// the computations that we did above.
double objective, log_like, depth, depth_err, depth_snr;
best_objective[p] = -INFINITY;
int k;
for (k = 0; k < n_durations; ++k) {
int dur = (int)(round(durations[k] / bin_duration));
int n_max = n_bins-dur;
for (n = 0; n <= n_max; ++n) {
// Estimate the in-transit and out-of-transit flux
double y_in = mean_y[n+dur] - mean_y[n];
double ivar_in = mean_ivar[n+dur] - mean_ivar[n];
double y_out = sum_y - y_in;
double ivar_out = sum_ivar - ivar_in;
// Skip this model if there are no points in transit
if ((ivar_in < DBL_EPSILON) || (ivar_out < DBL_EPSILON)) {
continue;
}
// Normalize to compute the actual value of the flux
y_in /= ivar_in;
y_out /= ivar_out;
// Either compute the log likelihood or the signal-to-noise
// ratio
compute_objective(y_in, y_out, ivar_in, ivar_out, obj_flag,
&objective, &log_like, &depth, &depth_err, &depth_snr);
// If this is the best result seen so far, keep it
if (y_out >= y_in && objective > best_objective[p]) {
best_objective[p] = objective;
// Compute the other parameters
compute_objective(y_in, y_out, ivar_in, ivar_out, (obj_flag == 0),
&objective, &log_like, &depth, &depth_err, &depth_snr);
best_depth[p] = depth;
best_depth_err[p] = depth_err;
best_depth_snr[p] = depth_snr;
best_log_like[p] = log_like;
best_duration[p] = dur * bin_duration;
best_phase[p] = fmod(n*bin_duration + 0.5*best_duration[p] + min_t, period);
}
}
}
}
// Clean up
free(mean_y_0);
free(mean_ivar_0);
return 0;
}
|
DRB040-truedepsingleelement-var-yes.c | /*
Copyright (C) 1991-2018 Free Software Foundation, Inc.
This file is part of the GNU C Library.
The GNU C Library is free software; you can redistribute it andor
modify it under the terms of the GNU Lesser General Public
License as published by the Free Software Foundation; either
version 2.1 of the License, or (at your option) any later version.
The GNU C Library is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
Lesser General Public License for more details.
You should have received a copy of the GNU Lesser General Public
License along with the GNU C Library; if not, see
<http:www.gnu.org/licenses/>.
*/
/*
This header is separate from features.h so that the compiler can
include it implicitly at the start of every compilation. It must
not itself include <features.h> or any other header that includes
<features.h> because the implicit include comes before any feature
test macros that may be defined in a source file before it first
explicitly includes a system header. GCC knows the name of this
header in order to preinclude it.
*/
/*
glibc's intent is to support the IEC 559 math functionality, real
and complex. If the GCC (4.9 and later) predefined macros
specifying compiler intent are available, use them to determine
whether the overall intent is to support these features; otherwise,
presume an older compiler has intent to support these features and
define these macros by default.
*/
/*
wchar_t uses Unicode 10.0.0. Version 10.0 of the Unicode Standard is
synchronized with ISOIEC 10646:2017, fifth edition, plus
the following additions from Amendment 1 to the fifth edition:
- 56 emoji characters
- 285 hentaigana
- 3 additional Zanabazar Square characters
*/
/*
Copyright (c) 2017, Lawrence Livermore National Security, LLC.
Produced at the Lawrence Livermore National Laboratory
Written by Chunhua Liao, Pei-Hung Lin, Joshua Asplund,
Markus Schordan, and Ian Karlin
(email: liao6@llnl.gov, lin32@llnl.gov, asplund1@llnl.gov,
schordan1@llnl.gov, karlin1@llnl.gov)
LLNL-CODE-732144
All rights reserved.
This file is part of DataRaceBench. For details, see
https:github.comLLNL/dataracebench. Please also see the LICENSE file
for our additional BSD notice.
Redistribution and use in source and binary forms, with
or without modification, are permitted provided that the following
conditions are met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the disclaimer below.
* Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the disclaimer (as noted below)
in the documentation and/or other materials provided with the
distribution.
* Neither the name of the LLNS/LLNL nor the names of its contributors
may be used to endorse or promote products derived from this
software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND
CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES,
INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL LAWRENCE LIVERMORE NATIONAL
SECURITY, LLC, THE U.S. DEPARTMENT OF ENERGY OR CONTRIBUTORS BE
LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY,
OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
THE POSSIBILITY OF SUCH DAMAGE.
*/
/*
Data race pair: a[i]@63:5 vs. a[0]@63:15
*/
#include <stdlib.h>
int main(int argc, char * argv[])
{
int len = 1000;
int i;
int a[len];
int _ret_val_0;
if (argc>1)
{
len=atoi(argv[1]);
}
a[0]=2;
#pragma cetus private(i)
#pragma loop name main#0
#pragma cetus parallel
#pragma omp parallel for private(i)
for (i=0; i<len; i ++ )
{
a[i]=i;
}
#pragma cetus private(i)
#pragma loop name main#1
for (i=0; i<len; i ++ )
{
a[i]=(a[i]+a[0]);
}
#pragma cetus private(i)
#pragma loop name main#2
for (i=0; i<len; i ++ )
{
printf("%d\n", a[i]);
}
_ret_val_0=0;
return _ret_val_0;
}
|
GB_reduce_each_vector.c | //------------------------------------------------------------------------------
// GB_reduce_each_vector: Tx(j)=reduce(A(:,j)), reduce a matrix to a vector
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2019, All Rights Reserved.
// http://suitesparse.com See GraphBLAS/Doc/License.txt for license.
//------------------------------------------------------------------------------
// Reduce a matrix to a vector. The kth vector A(:,k) is reduced to the kth
// scalar Tx(k). Each thread computes the reductions on roughly the same number
// of entries, which means that a vector A(:,k) may be reduced by more than one
// thread. The first vector A(:,kfirst) reduced by thread tid may be partial,
// where the prior thread tid-1 (and other prior threads) may also do some of
// the reductions for this same vector A(:,kfirst). The thread tid fully
// reduces all vectors A(:,k) for k in the range kfirst+1 to klast-1. The last
// vector A(:,klast) reduced by thread tid may also be partial. Thread tid+1,
// and following threads, may also do some of the reduces for A(:,klast).
#ifndef GB_GET_J
#define GB_GET_J ;
#endif
{
// Ah, Ai, asize, avlen, avdim unused for some uses of this template
#include "GB_unused.h"
//--------------------------------------------------------------------------
// get A
//--------------------------------------------------------------------------
const int64_t *restrict Ap = A->p ;
const int64_t *restrict Ah = A->h ;
const int64_t *restrict Ai = A->i ;
const GB_ATYPE *restrict Ax = A->x ;
size_t asize = A->type->size ;
int64_t avlen = A->vlen ;
int64_t avdim = A->vdim ;
//--------------------------------------------------------------------------
// workspace for first and last vectors of each slice
//--------------------------------------------------------------------------
// ztype Wfirst [ntasks], Wlast [ntasks] ;
GB_REDUCTION_WORKSPACE (Wfirst, ntasks) ;
GB_REDUCTION_WORKSPACE (Wlast , ntasks) ;
//--------------------------------------------------------------------------
// reduce each slice
//--------------------------------------------------------------------------
// each thread reduces its own part in parallel
#pragma omp parallel for num_threads(nthreads) schedule(dynamic,1)
for (int tid = 0 ; tid < ntasks ; tid++)
{
// if kfirst > klast then thread tid does no work at all
int64_t kfirst = kfirst_slice [tid] ;
int64_t klast = klast_slice [tid] ;
//----------------------------------------------------------------------
// reduce vectors kfirst to klast
//----------------------------------------------------------------------
for (int64_t k = kfirst ; k <= klast ; k++)
{
//------------------------------------------------------------------
// find the part of A(:,k) to be reduced by this thread
//------------------------------------------------------------------
GB_GET_J ;
int64_t pA_start, pA_end ;
GB_get_pA_and_pC (&pA_start, &pA_end, NULL,
tid, k, kfirst, klast, pstart_slice, NULL, NULL, Ap) ;
//------------------------------------------------------------------
// reduce Ax [pA_start ... pA_end-1] to a scalar, if non-empty
//------------------------------------------------------------------
if (pA_start < pA_end)
{
//--------------------------------------------------------------
// reduce the vector to the scalar s
//--------------------------------------------------------------
// ztype s = (ztype) Ax [pA_start], with typecast
GB_SCALAR (s) ;
GB_CAST_ARRAY_TO_SCALAR (s, Ax, pA_start) ;
for (int64_t p = pA_start+1 ; p < pA_end ; p++)
{
// check for early exit
GB_BREAK_IF_TERMINAL (s) ;
// s += (ztype) Ax [p], with typecast
GB_ADD_CAST_ARRAY_TO_SCALAR (s, Ax, p) ;
}
//--------------------------------------------------------------
// save the result s
//--------------------------------------------------------------
if (k == kfirst)
{
// Wfirst [tid] = s ; no typecast
GB_COPY_SCALAR_TO_ARRAY (Wfirst, tid, s) ;
}
else if (k == klast)
{
// Wlast [tid] = s ; no typecast
GB_COPY_SCALAR_TO_ARRAY (Wlast, tid, s) ;
}
else
{
// Tx [k] = s ; no typecast
GB_COPY_SCALAR_TO_ARRAY (Tx, k, s) ;
}
}
}
}
//--------------------------------------------------------------------------
// reduce the first and last vector of each slice using a single thread
//--------------------------------------------------------------------------
// This step is sequential, but it takes only O(ntasks) time. The only
// case where this could be a problem is if a user-defined operator was
// a very costly one.
int64_t kprior = -1 ;
for (int tid = 0 ; tid < ntasks ; tid++)
{
//----------------------------------------------------------------------
// sum up the partial result that thread tid computed for kfirst
//----------------------------------------------------------------------
int64_t kfirst = kfirst_slice [tid] ;
int64_t klast = klast_slice [tid] ;
if (kfirst <= klast)
{
int64_t pA_start = pstart_slice [tid] ;
int64_t pA_end = GB_IMIN (Ap [kfirst+1], pstart_slice [tid+1]) ;
if (pA_start < pA_end)
{
if (kprior < kfirst)
{
// This thread is the first one that did work on
// A(:,kfirst), so use it to start the reduction.
// Tx [kfirst] = Wfirst [tid], no typecast
GB_COPY_ARRAY_TO_ARRAY (Tx, kfirst, Wfirst, tid) ;
}
else
{
// Tx [kfirst] += Wfirst [tid], no typecast
GB_ADD_ARRAY_TO_ARRAY (Tx, kfirst, Wfirst, tid) ;
}
kprior = kfirst ;
}
}
//----------------------------------------------------------------------
// sum up the partial result that thread tid computed for klast
//----------------------------------------------------------------------
if (kfirst < klast)
{
int64_t pA_start = Ap [klast] ;
int64_t pA_end = pstart_slice [tid+1] ;
if (pA_start < pA_end)
{
/* if */ ASSERT (kprior < klast) ;
{
// This thread is the first one that did work on
// A(:,klast), so use it to start the reduction.
// Tx [klast] = Wlast [tid], no typecast
GB_COPY_ARRAY_TO_ARRAY (Tx, klast, Wlast, tid) ;
}
/*
else
{
// If kfirst < klast and A(:,klast is not empty, then this
// task is always the first one to do work on A(:,klast),
// so this case is never used.
ASSERT (GB_DEAD_CODE) ;
// Tx [klast] += Wlast [tid], no typecast
GB_ADD_ARRAY_TO_ARRAY (Tx, klast, Wlast, tid) ;
}
*/
kprior = klast ;
}
}
}
}
|
GB_transpose_bucket.c | //------------------------------------------------------------------------------
// GB_transpose_bucket: transpose and optionally typecast and/or apply operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2022, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// C = A' or op(A'). Optionally typecasts from A->type to the new type ctype,
// and/or optionally applies a unary operator.
// If an operator z=op(x) is provided, the type of z must be the same as the
// type of C. The type of A must be compatible with the type of of x (A is
// typecasted into the type of x). These conditions must be checked in the
// caller.
// This function is agnostic for the CSR/CSC format of C and A. C_is_csc is
// defined by the caller and assigned to C->is_csc, but otherwise unused.
// A->is_csc is ignored.
// The input can be hypersparse or non-hypersparse. The output C is always
// non-hypersparse, and never shallow. On input, C is a static header.
// If A is m-by-n in CSC format, with e nonzeros, the time and memory taken is
// O(m+n+e) if A is non-hypersparse, or O(m+e) if hypersparse. This is fine if
// most rows and columns of A are non-empty, but can be very costly if A or A'
// is hypersparse. In particular, if A is a non-hypersparse column vector with
// m >> e, the time and memory is O(m), which can be huge. Thus, for
// hypersparse matrices, or for very sparse matrices, the qsort method should
// be used instead (see GB_transpose).
// This method is parallel, but not highly scalable. At most O(e/m) threads
// are used.
#include "GB_transpose.h"
#define GB_FREE_WORKSPACE \
{ \
if (Workspaces != NULL && Workspaces_size != NULL) \
{ \
for (int tid = 0 ; tid < nworkspaces ; tid++) \
{ \
GB_FREE_WORK (&(Workspaces [tid]), Workspaces_size [tid]) ; \
} \
} \
GB_WERK_POP (A_slice, int64_t) ; \
GB_WERK_POP (Workspaces_size, size_t) ; \
GB_WERK_POP (Workspaces, int64_t *) ; \
}
#define GB_FREE_ALL \
{ \
GB_phbix_free (C) ; \
GB_FREE_WORKSPACE ; \
}
GrB_Info GB_transpose_bucket // bucket transpose; typecast and apply op
(
GrB_Matrix C, // output matrix (static header)
const GB_iso_code C_code_iso, // iso code for C
const GrB_Type ctype, // type of output matrix C
const bool C_is_csc, // format of output matrix C
const GrB_Matrix A, // input matrix
// no operator is applied if op is NULL
const GB_Operator op, // unary/idxunop/binop to apply
const GrB_Scalar scalar, // scalar to bind to binary operator
bool binop_bind1st, // if true, binop(x,A) else binop(A,y)
const int nworkspaces, // # of workspaces to use
const int nthreads, // # of threads to use
GB_Context Context
)
{
//--------------------------------------------------------------------------
// check inputs
//--------------------------------------------------------------------------
ASSERT (C != NULL) ;
ASSERT (C->static_header) ;
ASSERT_TYPE_OK (ctype, "ctype for transpose", GB0) ;
ASSERT_MATRIX_OK (A, "A input for transpose_bucket", GB0) ;
ASSERT (!GB_PENDING (A)) ;
ASSERT (!GB_ZOMBIES (A)) ;
ASSERT (GB_JUMBLED_OK (A)) ;
// if op is NULL, then no operator is applied
// This method is only be used when A is sparse or hypersparse.
// The full and bitmap cases are handled in GB_transpose.
ASSERT (!GB_IS_FULL (A)) ;
ASSERT (!GB_IS_BITMAP (A)) ;
ASSERT (GB_IS_SPARSE (A) || GB_IS_HYPERSPARSE (A)) ;
GB_WERK_DECLARE (A_slice, int64_t) ; // size nthreads+1
GB_WERK_DECLARE (Workspaces, int64_t *) ; // size nworkspaces
GB_WERK_DECLARE (Workspaces_size, size_t) ; // size nworkspaces
//--------------------------------------------------------------------------
// get A
//--------------------------------------------------------------------------
int64_t anz = GB_nnz (A) ;
int64_t vlen = A->vlen ;
//--------------------------------------------------------------------------
// determine the number of threads to use
//--------------------------------------------------------------------------
// # of threads to use in the O(vlen) loops below
GB_GET_NTHREADS_MAX (nthreads_max, chunk, Context) ;
int nth = GB_nthreads (vlen, chunk, nthreads_max) ;
//--------------------------------------------------------------------------
// allocate C: always sparse
//--------------------------------------------------------------------------
// The bucket transpose only works when C is sparse.
// A can be sparse or hypersparse.
// C->p is allocated but not initialized.
GrB_Info info ;
// set C->iso = C_iso OK
bool C_iso = (C_code_iso != GB_NON_ISO) ;
GB_OK (GB_new_bix (&C, true, // sparse, static header
ctype, A->vdim, vlen, GB_Ap_malloc, C_is_csc,
GxB_SPARSE, true, A->hyper_switch, vlen, anz, true, C_iso, Context)) ;
int64_t *restrict Cp = C->p ;
//--------------------------------------------------------------------------
// allocate workspace
//--------------------------------------------------------------------------
GB_WERK_PUSH (Workspaces, nworkspaces, int64_t *) ;
GB_WERK_PUSH (Workspaces_size, nworkspaces, size_t) ;
if (Workspaces == NULL || Workspaces_size == NULL)
{
// out of memory
GB_FREE_ALL ;
return (GrB_OUT_OF_MEMORY) ;
}
bool ok = true ;
for (int tid = 0 ; tid < nworkspaces ; tid++)
{
Workspaces [tid] = GB_MALLOC_WORK (vlen + 1, int64_t,
&Workspaces_size [tid]) ;
ok = ok && (Workspaces [tid] != NULL) ;
}
if (!ok)
{
// out of memory
GB_FREE_ALL ;
return (GrB_OUT_OF_MEMORY) ;
}
//==========================================================================
// phase1: symbolic analysis
//==========================================================================
// slice the A matrix, perfectly balanced for one task per thread
GB_WERK_PUSH (A_slice, nthreads + 1, int64_t) ;
if (A_slice == NULL)
{
// out of memory
GB_FREE_ALL ;
return (GrB_OUT_OF_MEMORY) ;
}
GB_pslice (A_slice, A->p, A->nvec, nthreads, true) ;
// sum up the row counts and find C->p
if (nthreads == 1)
{
//----------------------------------------------------------------------
// sequential method: A is not sliced
//----------------------------------------------------------------------
// Only requires a single int64 workspace of size vlen for a single
// thread. The resulting C matrix is not jumbled.
// compute the row counts of A. No need to scan the A->p pointers
ASSERT (nworkspaces == 1) ;
int64_t *restrict workspace = Workspaces [0] ;
memset (workspace, 0, (vlen + 1) * sizeof (int64_t)) ;
const int64_t *restrict Ai = A->i ;
for (int64_t p = 0 ; p < anz ; p++)
{
int64_t i = Ai [p] ;
workspace [i]++ ;
}
// cumulative sum of the workspace, and copy back into C->p
GB_cumsum (workspace, vlen, &(C->nvec_nonempty), 1, NULL) ;
memcpy (Cp, workspace, (vlen + 1) * sizeof (int64_t)) ;
}
else if (nworkspaces == 1)
{
//----------------------------------------------------------------------
// atomic method: A is sliced but workspace is shared
//----------------------------------------------------------------------
// Only requires a single int64 workspace of size vlen, shared by all
// threads. Scales well, but requires atomics. If the # of rows is
// very small and the average row degree is high, this can be very slow
// because of contention on the atomic workspace. Otherwise, it is
// typically faster than the non-atomic method. The resulting C matrix
// is jumbled.
// compute the row counts of A. No need to scan the A->p pointers
int64_t *restrict workspace = Workspaces [0] ;
GB_memset (workspace, 0, (vlen + 1) * sizeof (int64_t), nth) ;
const int64_t *restrict Ai = A->i ;
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
int64_t i = Ai [p] ;
// update workspace [i]++ automically:
GB_ATOMIC_UPDATE
workspace [i]++ ;
}
C->jumbled = true ; // atomic transpose leaves C jumbled
// cumulative sum of the workspace, and copy back into C->p
GB_cumsum (workspace, vlen, &(C->nvec_nonempty), nth, Context) ;
GB_memcpy (Cp, workspace, (vlen+ 1) * sizeof (int64_t), nth) ;
}
else
{
//----------------------------------------------------------------------
// non-atomic method
//----------------------------------------------------------------------
// compute the row counts of A for each slice, one per thread; This
// method is parallel, but not highly scalable. Each thread requires
// int64 workspace of size vlen, but no atomics are required. The
// resulting C matrix is not jumbled, so this can save work if C needs
// to be unjumbled later.
ASSERT (nworkspaces == nthreads) ;
const int64_t *restrict Ap = A->p ;
const int64_t *restrict Ah = A->h ;
const int64_t *restrict Ai = A->i ;
int tid ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (tid = 0 ; tid < nthreads ; tid++)
{
// get the row counts for this slice, of size A->vlen
int64_t *restrict workspace = Workspaces [tid] ;
memset (workspace, 0, (vlen + 1) * sizeof (int64_t)) ;
for (int64_t k = A_slice [tid] ; k < A_slice [tid+1] ; k++)
{
// iterate over the entries in A(:,j)
int64_t j = GBH (Ah, k) ;
int64_t pA_start = Ap [k] ;
int64_t pA_end = Ap [k+1] ;
for (int64_t pA = pA_start ; pA < pA_end ; pA++)
{
// count one more entry in C(i,:) for this slice
int64_t i = Ai [pA] ;
workspace [i]++ ;
}
}
}
// cumulative sum of the workspaces across the slices
int64_t i ;
#pragma omp parallel for num_threads(nth) schedule(static)
for (i = 0 ; i < vlen ; i++)
{
int64_t s = 0 ;
for (int tid = 0 ; tid < nthreads ; tid++)
{
int64_t *restrict workspace = Workspaces [tid] ;
int64_t c = workspace [i] ;
workspace [i] = s ;
s += c ;
}
Cp [i] = s ;
}
Cp [vlen] = 0 ;
// compute the vector pointers for C
GB_cumsum (Cp, vlen, &(C->nvec_nonempty), nth, Context) ;
// add Cp back to all Workspaces
#pragma omp parallel for num_threads(nth) schedule(static)
for (i = 0 ; i < vlen ; i++)
{
int64_t s = Cp [i] ;
int64_t *restrict workspace = Workspaces [0] ;
workspace [i] = s ;
for (int tid = 1 ; tid < nthreads ; tid++)
{
int64_t *restrict workspace = Workspaces [tid] ;
workspace [i] += s ;
}
}
}
C->magic = GB_MAGIC ;
//==========================================================================
// phase2: transpose A into C
//==========================================================================
// transpose both the pattern and the values
if (op == NULL)
{
// do not apply an operator; optional typecast to C->type
GB_transpose_ix (C, A, Workspaces, A_slice, nworkspaces, nthreads) ;
}
else
{
// apply an operator, C has type op->ztype
GB_transpose_op (C, C_code_iso, op, scalar, binop_bind1st, A,
Workspaces, A_slice, nworkspaces, nthreads) ;
}
//--------------------------------------------------------------------------
// free workspace and return result
//--------------------------------------------------------------------------
GB_FREE_WORKSPACE ;
ASSERT_MATRIX_OK (C, "C transpose of A", GB0) ;
ASSERT (C->h == NULL) ;
return (GrB_SUCCESS) ;
}
|
GB_subref_template.c | //------------------------------------------------------------------------------
// GB_subref_template: C = A(I,J), or C = pattern (A(I,J))
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2019, All Rights Reserved.
// http://suitesparse.com See GraphBLAS/Doc/License.txt for license.
//------------------------------------------------------------------------------
#if defined ( GB_SYMBOLIC )
// symbolic method must tolerate zombies
#define GB_Ai(p) GB_UNFLIP (Ai [p])
#else
// numeric method will not see any zombies
#define GB_Ai(p) Ai [p]
#endif
// to iterate across all entries in a bucket:
#define GB_for_each_index_in_bucket(inew,i) \
for (int64_t inew = Mark[i]-1 ; inew >= 0 ; inew = Inext [inew])
// copy values from A(:,kA) to C(:,kC): Cx [pC:pC+len-1] = Ax [pA:pA+len-1].
#if defined ( GB_SYMBOLIC )
#define GB_COPY_RANGE(pC,pA,len) \
for (int64_t k = 0 ; k < (len) ; k++) \
{ \
Cx [(pC) + k] = (pA) + k ; \
}
#else
#define GB_COPY_RANGE(pC,pA,len) \
memcpy (Cx + (pC)*GB_CSIZE1, Ax + (pA)*GB_CSIZE1, (len) * GB_CSIZE2) ;
#endif
// copy a single value from A(:,kA) to C(:,kC): Cx [pC] = Ax [pA].
#if defined ( GB_SYMBOLIC )
#define GB_COPY_ENTRY(pC,pA) \
Cx [pC] = (pA) ;
#else
#define GB_COPY_ENTRY(pC,pA) \
/* Cx [pC] = Ax [pA] */ \
memcpy (Cx + (pC)*GB_CSIZE1, Ax + (pA)*GB_CSIZE1, GB_CSIZE2) ;
#endif
// the type of Cx
#if defined ( GB_SYMBOLIC )
#define GB_CTYPE int64_t
#define GB_CSIZE1 1
#define GB_CSIZE2 (sizeof (int64_t))
#else
#define GB_CTYPE GB_void
#define GB_CSIZE1 asize
#define GB_CSIZE2 asize
// FUTURE: If built-in types are used instead of generic, then GB_COPY_ENTRY
// can become Cx [pC] = Ax [pA]. However, the generic GB_qsort_1b would also
// need to be replaced with type-specific versions for each built-in type. For
// A and C of type double, the #defines would be:
// #define GB_CTYPE double
// #define GB_CSIZE1 1
// #define GB_CSIZE2 (sizeof (double))
#endif
{
//--------------------------------------------------------------------------
// get A
//--------------------------------------------------------------------------
const int64_t *restrict Ai = A->i ;
const int64_t avlen = A->vlen ;
#if defined ( GB_SYMBOLIC )
const int64_t nzombies = A->nzombies ;
#endif
#if defined ( GB_PHASE_2_OF_2 ) && defined ( GB_NUMERIC )
const GB_CTYPE *restrict Ax = A->x ;
const int64_t asize = A->type->size ;
#endif
//--------------------------------------------------------------------------
// get C
//--------------------------------------------------------------------------
#if defined ( GB_PHASE_2_OF_2 )
int64_t *restrict Ci = C->i ;
GB_CTYPE *restrict Cx = C->x ;
#endif
//--------------------------------------------------------------------------
// get I
//--------------------------------------------------------------------------
// these values are ignored if Ikind == GB_LIST
int64_t ibegin = Icolon [GxB_BEGIN] ;
int64_t iinc = Icolon [GxB_INC ] ;
int64_t inc = (iinc < 0) ? (-iinc) : iinc ;
#ifdef GB_DEBUG
int64_t iend = Icolon [GxB_END ] ;
#endif
//--------------------------------------------------------------------------
// phase1: count entries in each C(:,kC); phase2: compute C
//--------------------------------------------------------------------------
#pragma omp parallel for num_threads(nthreads) schedule(dynamic,1)
for (int taskid = 0 ; taskid < ntasks ; taskid++)
{
//----------------------------------------------------------------------
// get the task descriptor
//----------------------------------------------------------------------
int64_t kfirst = TaskList [taskid].kfirst ;
int64_t klast = TaskList [taskid].klast ;
bool fine_task = (klast < 0) ;
if (fine_task)
{
// a fine task operates on a slice of a single vector
klast = kfirst ;
}
// a coarse task accesses all of I for all its vectors
int64_t pI = 0 ;
int64_t pI_end = nI ;
int64_t ilen = nI ;
ASSERT (0 <= kfirst && kfirst <= klast && klast < Cnvec) ;
//----------------------------------------------------------------------
// compute all vectors C(:,kfirst:klast) for this task
//----------------------------------------------------------------------
for (int64_t kC = kfirst ; kC <= klast ; kC++)
{
//------------------------------------------------------------------
// get C(:,kC)
//------------------------------------------------------------------
#if defined ( GB_PHASE_1_OF_2 )
// phase1 simply counts the # of entries in C(*,kC).
int64_t clen = 0 ;
#else
// This task computes all or part of C(:,kC), which are the entries
// in Ci,Cx [pC:pC_end-1].
int64_t pC, pC_end ;
if (fine_task)
{
// A fine task computes a slice of C(:,kC)
pC = TaskList [taskid ].pC ;
pC_end = TaskList [taskid+1].pC ;
ASSERT (Cp [kC] <= pC && pC <= pC_end && pC_end <= Cp [kC+1]) ;
}
else
{
// The vectors of C are never sliced for a coarse task, so this
// task computes all of C(:,kC).
pC = Cp [kC] ;
pC_end = Cp [kC+1] ;
}
int64_t clen = pC_end - pC ;
if (clen == 0) continue ;
#endif
//------------------------------------------------------------------
// get A(:,kA)
//------------------------------------------------------------------
int64_t pA, pA_end ;
if (fine_task)
{
// a fine task computes a slice of a single vector C(:,kC).
// The task accesses Ai,Ax [pA:pA_end-1], which holds either
// the entire vector A(imin:imax,kA) for method 6, the entire
// dense A(:,kA) for methods 1 and 2, or a slice of the
// A(imin:max,kA) vector for all other methods.
pA = TaskList [taskid].pA ;
pA_end = TaskList [taskid].pA_end ;
}
else
{
// a coarse task computes the entire vector C(:,kC). The task
// accesses all of A(imin:imax,kA), for most methods, or all of
// A(:,kA) for methods 1 and 2. The vector A(*,kA) appears in
// Ai,Ax [pA:pA_end-1].
pA = Ap_start [kC] ;
pA_end = Ap_end [kC] ;
}
int64_t alen = pA_end - pA ;
if (alen == 0) continue ;
//------------------------------------------------------------------
// get I
//------------------------------------------------------------------
if (fine_task)
{
// A fine task accesses I [pI:pI_end-1]. For methods 2 and 6,
// pI:pI_end is a subset of the entire 0:nI-1 list. For all
// other methods, pI = 0 and pI_end = nI, and the task can
// access all of I.
pI = TaskList [taskid].pB ;
pI_end = TaskList [taskid].pB_end ;
ilen = pI_end - pI ;
}
//------------------------------------------------------------------
// determine the method to use
//------------------------------------------------------------------
int method ;
if (fine_task)
{
// The method that the fine task uses for its slice of A(*,kA)
// and C(*,kC) has already been determined by GB_subref_slice.
method = (int) (-TaskList [taskid].klast) ;
}
else
{
// determine the method based on A(*,kA) and I
method = GB_subref_method (NULL, NULL, alen, avlen, Ikind, nI,
(Mark != NULL), need_qsort, iinc, nduplicates) ;
}
//------------------------------------------------------------------
// extract C (:,kC) = A (I,kA): consider all cases
//------------------------------------------------------------------
switch (method)
{
//--------------------------------------------------------------
case 1 : // C(:,kC) = A(:,kA) where A(:,kA) is dense
//--------------------------------------------------------------
// A (:,kA) has not been sliced
ASSERT (Ikind == GB_ALL) ;
ASSERT (pA == Ap_start [kC]) ;
ASSERT (pA_end == Ap_end [kC]) ;
// copy the entire vector and construct indices
#if defined ( GB_PHASE_1_OF_2 )
clen = ilen ;
#else
for (int64_t k = 0 ; k < ilen ; k++)
{
int64_t inew = k + pI ;
ASSERT (inew == GB_ijlist (I, inew, Ikind, Icolon)) ;
ASSERT (inew == GB_Ai (pA + inew)) ;
Ci [pC + k] = inew ;
}
GB_COPY_RANGE (pC, pA + pI, ilen) ;
#endif
break ;
//--------------------------------------------------------------
case 2 : // C(:,kC) = A(I,kA) where A(I,kA) is dense
//--------------------------------------------------------------
// This method handles any kind of list I, but A(:,kA)
// must be dense. A(:,kA) has not been sliced.
ASSERT (pA == Ap_start [kC]) ;
ASSERT (pA_end == Ap_end [kC]) ;
// scan I and get the entry in A(:,kA) via direct lookup
#if defined ( GB_PHASE_1_OF_2 )
clen = ilen ;
#else
for (int64_t k = 0 ; k < ilen ; k++)
{
// C(inew,kC) = A(i,kA), and it always exists.
int64_t inew = k + pI ;
int64_t i = GB_ijlist (I, inew, Ikind, Icolon) ;
ASSERT (i == GB_Ai (pA + i)) ;
Ci [pC + k] = inew ;
GB_COPY_ENTRY (pC + k, pA + i) ;
}
#endif
break ;
//--------------------------------------------------------------
case 3 : // the list I has a single index, ibegin
//--------------------------------------------------------------
// binary search in GB_subref_phase0 has already found it.
// This can be any Ikind with nI=1: GB_ALL with A->vlen=1,
// GB_RANGE with ibegin==iend, GB_STRIDE such as 0:-1:0
// (with length 1), or a GB_LIST with ni=1.
// Time: 50x faster than MATLAB
ASSERT (!fine_task) ;
ASSERT (alen == 1) ;
ASSERT (nI == 1) ;
ASSERT (GB_Ai (pA) == GB_ijlist (I, 0, Ikind, Icolon)) ;
#if defined ( GB_PHASE_1_OF_2 )
clen = 1 ;
#else
Ci [pC] = 0 ;
GB_COPY_ENTRY (pC, pA) ;
#endif
break ;
//--------------------------------------------------------------
case 4 : // Ikind is ":", thus C(:,kC) = A (:,kA)
//--------------------------------------------------------------
// Time: 1x MATLAB but low speedup on the Mac. Why?
// Probably memory bound since it is just memcpy's.
ASSERT (Ikind == GB_ALL && ibegin == 0) ;
#if defined ( GB_PHASE_1_OF_2 )
clen = alen ;
#else
#if defined ( GB_SYMBOLIC )
if (nzombies == 0)
{
memcpy (Ci + pC, Ai + pA, alen * sizeof (int64_t)) ;
}
else
{
// with zombies
for (int64_t k = 0 ; k < alen ; k++)
{
int64_t i = GB_Ai (pA + k) ;
ASSERT (i == GB_ijlist (I, i, Ikind, Icolon)) ;
Ci [pC + k] = i ;
}
}
#else
memcpy (Ci + pC, Ai + pA, alen * sizeof (int64_t)) ;
#endif
GB_COPY_RANGE (pC, pA, alen) ;
#endif
break ;
//--------------------------------------------------------------
case 5 : // Ikind is GB_RANGE = ibegin:iend
//--------------------------------------------------------------
// Time: much faster than MATLAB. Good speedup too.
ASSERT (Ikind == GB_RANGE) ;
#if defined ( GB_PHASE_1_OF_2 )
clen = alen ;
#else
for (int64_t k = 0 ; k < alen ; k++)
{
int64_t i = GB_Ai (pA + k) ;
int64_t inew = i - ibegin ;
ASSERT (i == GB_ijlist (I, inew, Ikind, Icolon)) ;
Ci [pC + k] = inew ;
}
GB_COPY_RANGE (pC, pA, alen) ;
#endif
break ;
//--------------------------------------------------------------
case 6 : // I is short vs nnz (A (:,kA)), use binary search
//--------------------------------------------------------------
// Time: very slow unless I is very short and A(:,kA) is
// very long.
// This case can handle any kind of I, and A(:,kA) of any
// properties. For a fine task, A(:,kA) has not been
// sliced; I has been sliced instead.
// If the I bucket inverse has not been created, this
// method is the only option. Alternatively, if nI =
// length (I) is << nnz (A (:,kA)), then scanning I and
// doing a binary search of A (:,kA) is faster than doing a
// linear-time search of A(:,kA) and a lookup into the I
// bucket inverse.
// The vector of C is constructed in sorted order, so no
// sort is needed.
// A(:,kA) has not been sliced.
ASSERT (pA == Ap_start [kC]) ;
ASSERT (pA_end == Ap_end [kC]) ;
// scan I, in order, and search for the entry in A(:,kA)
for (int64_t k = 0 ; k < ilen ; k++)
{
// C(inew,kC) = A (i,kA), if it exists.
// i = I [inew] ; or from a colon expression
int64_t inew = k + pI ;
int64_t i = GB_ijlist (I, inew, Ikind, Icolon) ;
bool found ;
int64_t pleft = pA ;
int64_t pright = pA_end - 1 ;
#if defined ( GB_SYMBOLIC )
bool is_zombie ;
GB_BINARY_ZOMBIE (i, Ai, pleft, pright, found,
nzombies, is_zombie) ;
#else
GB_BINARY_SEARCH (i, Ai, pleft, pright, found) ;
#endif
if (found)
{
ASSERT (i == GB_Ai (pleft)) ;
#if defined ( GB_PHASE_1_OF_2 )
clen++ ;
#else
ASSERT (pC < pC_end) ;
Ci [pC] = inew ;
GB_COPY_ENTRY (pC, pleft) ;
pC++ ;
#endif
}
}
#if defined ( GB_PHASE_2_OF_2 )
ASSERT (pC == pC_end) ;
#endif
break ;
//--------------------------------------------------------------
case 7 : // I is ibegin:iinc:iend with iinc > 1
//--------------------------------------------------------------
// Time: 1 thread: C=A(1:2:n,:) is 3x slower than MATLAB
// but has good speedup. About as fast as MATLAB with
// enough threads.
ASSERT (Ikind == GB_STRIDE && iinc > 1) ;
for (int64_t k = 0 ; k < alen ; k++)
{
// A(i,kA) present; see if it is in ibegin:iinc:iend
int64_t i = GB_Ai (pA + k) ;
ASSERT (ibegin <= i && i <= iend) ;
i = i - ibegin ;
if (i % iinc == 0)
{
// i is in the sequence ibegin:iinc:iend
#if defined ( GB_PHASE_1_OF_2 )
clen++ ;
#else
int64_t inew = i / iinc ;
ASSERT (pC < pC_end) ;
Ci [pC] = inew ;
GB_COPY_ENTRY (pC, pA + k) ;
pC++ ;
#endif
}
}
#if defined ( GB_PHASE_2_OF_2 )
ASSERT (pC == pC_end) ;
#endif
break ;
//----------------------------------------------------------
case 8 : // I = ibegin:(-iinc):iend, with iinc < -1
//----------------------------------------------------------
// Time: 2x slower than MATLAB for iinc = -2 or -8.
// Good speedup though. Faster than MATLAB for
// large values (iinc = -128).
ASSERT (Ikind == GB_STRIDE && iinc < -1) ;
for (int64_t k = alen - 1 ; k >= 0 ; k--)
{
// A(i,kA) present; see if it is in ibegin:iinc:iend
int64_t i = GB_Ai (pA + k) ;
ASSERT (iend <= i && i <= ibegin) ;
i = ibegin - i ;
if (i % inc == 0)
{
// i is in the sequence ibegin:iinc:iend
#if defined ( GB_PHASE_1_OF_2 )
clen++ ;
#else
int64_t inew = i / inc ;
ASSERT (pC < pC_end) ;
Ci [pC] = inew ;
GB_COPY_ENTRY (pC, pA + k) ;
pC++ ;
#endif
}
}
#if defined ( GB_PHASE_2_OF_2 )
ASSERT (pC == pC_end) ;
#endif
break ;
//----------------------------------------------------------
case 9 : // I = ibegin:(-1):iend
//----------------------------------------------------------
// Time: much faster than MATLAB. Good speedup.
ASSERT (Ikind == GB_STRIDE && iinc == -1) ;
#if defined ( GB_PHASE_1_OF_2 )
clen = alen ;
#else
for (int64_t k = alen - 1 ; k >= 0 ; k--)
{
// A(i,kA) is present
int64_t i = GB_Ai (pA + k) ;
int64_t inew = (ibegin - i) ;
ASSERT (i == GB_ijlist (I, inew, Ikind, Icolon)) ;
Ci [pC] = inew ;
GB_COPY_ENTRY (pC, pA + k) ;
pC++ ;
}
#endif
break ;
//--------------------------------------------------------------
case 10 : // I unsorted, and C needs qsort, duplicates OK
//--------------------------------------------------------------
// Time: with one thread: 2x slower than MATLAB, probably
// because of the qsort. Good speedup however. This used
// if qsort is needed but ndupl == 0. Try a method that
// needs qsort, but no duplicates?
// Case 10 works well when I has many entries and A(:,kA)
// has few entries. C(:,kC) must be sorted after this pass.
ASSERT (Ikind == GB_LIST) ;
for (int64_t k = 0 ; k < alen ; k++)
{
// A(i,kA) present, look it up in the I inverse buckets
int64_t i = GB_Ai (pA + k) ;
// traverse bucket i for all indices inew where
// i == I [inew] or where i is from a colon expression
GB_for_each_index_in_bucket (inew, i)
{
ASSERT (inew >= 0 && inew < nI) ;
ASSERT (i == GB_ijlist (I, inew, Ikind, Icolon)) ;
#if defined ( GB_PHASE_1_OF_2 )
clen++ ;
#else
Ci [pC] = inew ;
GB_COPY_ENTRY (pC, pA + k) ;
pC++ ;
#endif
}
}
#if defined ( GB_PHASE_2_OF_2 )
ASSERT (pC == pC_end) ;
if (!fine_task)
{
// a coarse task owns this entire C(:,kC) vector, so
// the sort can be done now. The sort for vectors
// handled by multiple fine tasks must wait until all
// task are completed, below in the post sort.
pC = Cp [kC] ;
GB_qsort_1b (Ci + pC, (GB_void *) (Cx + pC*GB_CSIZE1),
GB_CSIZE2, clen) ;
}
#endif
break ;
//--------------------------------------------------------------
case 11 : // I not contiguous, with duplicates. No qsort needed
//--------------------------------------------------------------
// Case 11 works well when I has many entries and A(:,kA)
// has few entries. It requires that I be sorted on input,
// so that no sort is required for C(:,kC). It is
// otherwise identical to Case 9.
ASSERT (Ikind == GB_LIST) ;
for (int64_t k = 0 ; k < alen ; k++)
{
// A(i,kA) present, look it up in the I inverse buckets
int64_t i = GB_Ai (pA + k) ;
// traverse bucket i for all indices inew where
// i == I [inew] or where i is from a colon expression
GB_for_each_index_in_bucket (inew, i)
{
ASSERT (inew >= 0 && inew < nI) ;
ASSERT (i == GB_ijlist (I, inew, Ikind, Icolon)) ;
#if defined ( GB_PHASE_1_OF_2 )
clen++ ;
#else
Ci [pC] = inew ;
GB_COPY_ENTRY (pC, pA + k) ;
pC++ ;
#endif
}
}
#if defined ( GB_PHASE_2_OF_2 )
ASSERT (pC == pC_end) ;
#endif
break ;
//--------------------------------------------------------------
case 12 : // I not contiguous, no duplicates. No qsort needed.
//--------------------------------------------------------------
// Identical to Case 11, except GB_for_each_index_in_bucket
// just needs to iterate 0 or 1 times. Works well when I
// has many entries and A(:,kA) has few entries.
ASSERT (Ikind == GB_LIST && nduplicates == 0) ;
for (int64_t k = 0 ; k < alen ; k++)
{
// A(i,kA) present, look it up in the I inverse buckets
int64_t i = GB_Ai (pA + k) ;
// bucket i has at most one index inew such that
// i == I [inew]
int64_t inew = Mark [i] - 1 ;
if (inew >= 0)
{
ASSERT (inew >= 0 && inew < nI) ;
ASSERT (i == GB_ijlist (I, inew, Ikind, Icolon)) ;
#if defined ( GB_PHASE_1_OF_2 )
clen++ ;
#else
Ci [pC] = inew ;
GB_COPY_ENTRY (pC, pA + k) ;
pC++ ;
#endif
}
}
#if defined ( GB_PHASE_2_OF_2 )
ASSERT (pC == pC_end) ;
#endif
break ;
//--------------------------------------------------------------
default:;
//--------------------------------------------------------------
}
//------------------------------------------------------------------
// final count of nnz (C (:,j))
//------------------------------------------------------------------
#if defined ( GB_PHASE_1_OF_2 )
if (fine_task)
{
TaskList [taskid].pC = clen ;
}
else
{
Cp [kC] = clen ;
}
#endif
}
}
//--------------------------------------------------------------------------
// phase2: post sort for any vectors handled by fine tasks with method 10
//--------------------------------------------------------------------------
#if defined ( GB_PHASE_2_OF_2 )
if (post_sort)
{
#pragma omp parallel for num_threads(nthreads) schedule(dynamic,1)
for (int taskid = 0 ; taskid < ntasks ; taskid++)
{
int64_t kC = TaskList [taskid].kfirst ;
bool do_post_sort = (TaskList [taskid].len != 0) ;
if (do_post_sort)
{
// This is the first fine task with method 10 for C(:,kC). The
// vector C(:,kC) must be sorted, since method 10 left it with
// unsorted indices.
int64_t pC = Cp [kC] ;
int64_t clen = Cp [kC+1] - pC ;
GB_qsort_1b (Ci + pC, (GB_void *) (Cx + pC*GB_CSIZE1),
GB_CSIZE2, clen) ;
}
}
}
#endif
}
#undef GB_Ai
#undef GB_for_each_index_in_bucket
#undef GB_COPY_RANGE
#undef GB_COPY_ENTRY
#undef GB_CTYPE
#undef GB_CSIZE1
#undef GB_CSIZE2
|
GB_binop__land_int64.c |
//------------------------------------------------------------------------------
// GB_binop: hard-coded functions for each built-in binary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2022, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated2/ folder, do not edit it
// (it is auto-generated from Generator/*).
#include "GB.h"
#ifndef GBCUDA_DEV
#include "GB_emult.h"
#include "GB_control.h"
#include "GB_ek_slice.h"
#include "GB_dense.h"
#include "GB_atomics.h"
#include "GB_bitmap_assign_methods.h"
#include "GB_binop__include.h"
// C=binop(A,B) is defined by the following types and operators:
// A+B function (eWiseAdd): GB (_AaddB__land_int64)
// A.*B function (eWiseMult): GB (_AemultB_08__land_int64)
// A.*B function (eWiseMult): GB (_AemultB_02__land_int64)
// A.*B function (eWiseMult): GB (_AemultB_04__land_int64)
// A.*B function (eWiseMult): GB (_AemultB_bitmap__land_int64)
// A*D function (colscale): GB (_AxD__land_int64)
// D*A function (rowscale): GB (_DxB__land_int64)
// C+=B function (dense accum): GB (_Cdense_accumB__land_int64)
// C+=b function (dense accum): GB (_Cdense_accumb__land_int64)
// C+=A+B function (dense ewise3): GB ((none))
// C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__land_int64)
// C=scalar+B GB (_bind1st__land_int64)
// C=scalar+B' GB (_bind1st_tran__land_int64)
// C=A+scalar GB (_bind2nd__land_int64)
// C=A'+scalar GB (_bind2nd_tran__land_int64)
// C type: int64_t
// A type: int64_t
// A pattern? 0
// B type: int64_t
// B pattern? 0
// BinaryOp: cij = ((aij != 0) && (bij != 0))
#define GB_ATYPE \
int64_t
#define GB_BTYPE \
int64_t
#define GB_CTYPE \
int64_t
// true if the types of A and B are identical
#define GB_ATYPE_IS_BTYPE \
1
// true if the types of C and A are identical
#define GB_CTYPE_IS_ATYPE \
1
// true if the types of C and B are identical
#define GB_CTYPE_IS_BTYPE \
1
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA,A_iso) \
int64_t aij = GBX (Ax, pA, A_iso)
// true if values of A are not used
#define GB_A_IS_PATTERN \
0 \
// bij = Bx [pB]
#define GB_GETB(bij,Bx,pB,B_iso) \
int64_t bij = GBX (Bx, pB, B_iso)
// true if values of B are not used
#define GB_B_IS_PATTERN \
0 \
// declare scalar of the same type as C
#define GB_CTYPE_SCALAR(t) \
int64_t t
// cij = Ax [pA]
#define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \
cij = GBX (Ax, pA, A_iso)
// cij = Bx [pB]
#define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \
cij = GBX (Bx, pB, B_iso)
#define GB_CX(p) Cx [p]
// binary operator
#define GB_BINOP(z,x,y,i,j) \
z = ((x != 0) && (y != 0)) ;
// true if the binop must be flipped
#define GB_BINOP_FLIP \
0
// op is second
#define GB_OP_IS_SECOND \
0
// do the numerical phases of GB_add and GB_emult
#define GB_PHASE_2_OF_2
// hard-coded loops can be vectorized
#define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_LAND || GxB_NO_INT64 || GxB_NO_LAND_INT64)
//------------------------------------------------------------------------------
// C += A+B, all 3 matrices dense
//------------------------------------------------------------------------------
#if 0
// The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV.
void GB ((none))
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_accum_template.c"
}
#endif
//------------------------------------------------------------------------------
// C = A+B, all 3 matrices dense
//------------------------------------------------------------------------------
void GB (_Cdense_ewise3_noaccum__land_int64)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_noaccum_template.c"
}
//------------------------------------------------------------------------------
// C += B, accumulate a sparse matrix into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumB__land_int64)
(
GrB_Matrix C,
const GrB_Matrix B,
const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
#include "GB_dense_subassign_23_template.c"
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += b, accumulate a scalar into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumb__land_int64)
(
GrB_Matrix C,
const GB_void *p_bwork,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
// get the scalar b for C += b, of type int64_t
int64_t bwork = (*((int64_t *) p_bwork)) ;
#include "GB_dense_subassign_22_template.c"
return (GrB_SUCCESS) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = A*D, column scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_AxD__land_int64)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix D,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t *restrict Cx = (int64_t *) C->x ;
#include "GB_AxB_colscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = D*B, row scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_DxB__land_int64)
(
GrB_Matrix C,
const GrB_Matrix D,
const GrB_Matrix B,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t *restrict Cx = (int64_t *) C->x ;
#include "GB_AxB_rowscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseAdd: C=A+B, C<M>=A+B, C<!M>=A+B
//------------------------------------------------------------------------------
GrB_Info GB (_AaddB__land_int64)
(
GrB_Matrix C,
const int C_sparsity,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool is_eWiseUnion,
const GB_void *alpha_scalar_in,
const GB_void *beta_scalar_in,
const bool Ch_is_Mh,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
GB_WERK_DECLARE (M_ek_slicing, int64_t) ;
GB_WERK_DECLARE (A_ek_slicing, int64_t) ;
GB_WERK_DECLARE (B_ek_slicing, int64_t) ;
int64_t alpha_scalar ;
int64_t beta_scalar ;
if (is_eWiseUnion)
{
alpha_scalar = (*((int64_t *) alpha_scalar_in)) ;
beta_scalar = (*((int64_t *) beta_scalar_in )) ;
}
#include "GB_add_template.c"
GB_FREE_WORKSPACE ;
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, or C<M!>=A.*B where C is sparse/hyper
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_08__land_int64)
(
GrB_Matrix C,
const int C_sparsity,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_08_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_02__land_int64)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool flipxy,
const int64_t *restrict Cp_kfirst,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#if GB_BINOP_FLIP
// The operator is not commutative, and does not have a flipped
// variant. For example z=atan2(y,x).
if (flipxy)
{
// use fmult(y,x)
#undef GB_FLIPPED
#define GB_FLIPPED 1
#include "GB_emult_02_template.c"
}
else
{
// use fmult(x,y)
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
}
#else
// No need to handle the flip: the operator is either commutative, or
// has been handled by changing z=div(y,x) to z=rdiv(x,y) for example.
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
#endif
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_04__land_int64)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict Cp_kfirst,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_04_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_bitmap__land_int64)
(
GrB_Matrix C,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_bitmap_emult_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st
//------------------------------------------------------------------------------
GrB_Info GB (_bind1st__land_int64)
(
GB_void *Cx_output, // Cx and Bx may be aliased
const GB_void *x_input,
const GB_void *Bx_input,
const int8_t *restrict Bb,
int64_t bnz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t *Cx = (int64_t *) Cx_output ;
int64_t x = (*((int64_t *) x_input)) ;
int64_t *Bx = (int64_t *) Bx_input ;
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < bnz ; p++)
{
if (!GBB (Bb, p)) continue ;
int64_t bij = GBX (Bx, p, false) ;
Cx [p] = ((x != 0) && (bij != 0)) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd
//------------------------------------------------------------------------------
GrB_Info GB (_bind2nd__land_int64)
(
GB_void *Cx_output, // Cx and Ax may be aliased
const GB_void *Ax_input,
const GB_void *y_input,
const int8_t *restrict Ab,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
int64_t *Cx = (int64_t *) Cx_output ;
int64_t *Ax = (int64_t *) Ax_input ;
int64_t y = (*((int64_t *) y_input)) ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!GBB (Ab, p)) continue ;
int64_t aij = GBX (Ax, p, false) ;
Cx [p] = ((aij != 0) && (y != 0)) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (x, A'): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (x, aij), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
int64_t aij = GBX (Ax, pA, false) ; \
Cx [pC] = ((x != 0) && (aij != 0)) ; \
}
GrB_Info GB (_bind1st_tran__land_int64)
(
GrB_Matrix C,
const GB_void *x_input,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
// GB_unop_transpose.c uses GB_ATYPE, but A is
// the 2nd input to binary operator z=f(x,y).
#undef GB_ATYPE
#define GB_ATYPE \
int64_t
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t x = (*((const int64_t *) x_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
#undef GB_ATYPE
#define GB_ATYPE \
int64_t
}
//------------------------------------------------------------------------------
// C = op (A', y): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (aij, y), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
int64_t aij = GBX (Ax, pA, false) ; \
Cx [pC] = ((aij != 0) && (y != 0)) ; \
}
GrB_Info GB (_bind2nd_tran__land_int64)
(
GrB_Matrix C,
const GrB_Matrix A,
const GB_void *y_input,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t y = (*((const int64_t *) y_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
edge_vol_int.c | /******************************************************************************
** Copyright (c) 2016-2019, Intel Corporation **
** All rights reserved. **
** **
** Redistribution and use in source and binary forms, with or without **
** modification, are permitted provided that the following conditions **
** are met: **
** 1. Redistributions of source code must retain the above copyright **
** notice, this list of conditions and the following disclaimer. **
** 2. Redistributions in binary form must reproduce the above copyright **
** notice, this list of conditions and the following disclaimer in the **
** documentation and/or other materials provided with the distribution. **
** 3. Neither the name of the copyright holder nor the names of its **
** contributors may be used to endorse or promote products derived **
** from this software without specific prior written permission. **
** **
** THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS **
** "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT **
** LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR **
** A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT **
** HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, **
** SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED **
** TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR **
** PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF **
** LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING **
** NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS **
** SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. **
******************************************************************************/
/* Alexander Heinecke (Intel Corp.)
******************************************************************************/
#include "edge_proxy_common.h"
#include <libxsmm.h>
#include <stdlib.h>
#include <string.h>
#include <stdio.h>
#include <math.h>
#if defined(_OPENMP)
# include <omp.h>
#endif
/*#define EDGE_HP_1G*/
/*#define HANDLE_AMOK*/
#if defined(EDGE_HP_1G) || defined(EDGE_HP_2M)
#include <sys/mman.h>
#include <linux/mman.h>
#endif
LIBXSMM_INLINE void* edge_hp_malloc( size_t nbytes, size_t alignment ) {
void* ret_ptr = NULL;
#if defined(EDGE_HP_1G)
size_t num_large_pages = nbytes / (1073741824L);
if ( nbytes > num_large_pages*1073741824L ) {
num_large_pages++;
}
nbytes = (size_t) num_large_pages * 1073741824L;
printf("trying to allocate %ld 1G pages\n", num_large_pages);
/*ret_ptr = mmap( NULL, nbytes, PROT_READ | PROT_WRITE | PROT_EXEC, MAP_ANONYMOUS | MAP_PRIVATE | MAP_HUGETLB | MAP_HUGE_1GB, -1, 0 );*/
ret_ptr = mmap( NULL, nbytes, PROT_READ | PROT_WRITE, MAP_ANONYMOUS | MAP_PRIVATE | MAP_HUGETLB | MAP_HUGE_1GB, -1, 0 );
if ( (ret_ptr == (void *)(-1)) ) {
fprintf(stderr,"1G mmap call failed\n");
exit(1);
}
#elif defined(EDGE_HP_2M)
size_t num_large_pages = nbytes / (2097152UL);
if ( nbytes > num_large_pages*2097152UL ) {
num_large_pages++;
}
nbytes = (size_t) num_large_pages * 2097152UL;
printf("trying to allocate %ld 2M pages\n", num_large_pages);
/*ret_ptr = mmap( NULL, nbytes, PROT_READ | PROT_WRITE | PROT_EXEC, MAP_ANONYMOUS | MAP_PRIVATE | MAP_HUGETLB, -1, 0 );*/
ret_ptr = mmap( NULL, nbytes, PROT_READ | PROT_WRITE, MAP_ANONYMOUS | MAP_PRIVATE | MAP_HUGETLB, -1, 0 );
if ( (ret_ptr == (void *)(-1)) ) {
fprintf(stderr,"2M mmap call failed\n");
exit(1);
}
#else
ret_ptr = libxsmm_aligned_malloc( nbytes, alignment );
#endif
return ret_ptr;
}
LIBXSMM_INLINE void edge_hp_free( void* ptr, size_t nbytes ) {
LIBXSMM_UNUSED( nbytes );
#if defined(EDGE_HP_1G)
/* to be implemented */
#elif defined(EDGE_HP_2M)
/* to be implemented */
#else
libxsmm_free( ptr );
#endif
}
#if defined(__AVX512F__)
LIBXSMM_INLINE void matMulFusedAC( unsigned short i_r,
unsigned int i_m,
unsigned int i_n,
unsigned int i_k,
unsigned int i_ldA,
unsigned int i_ldB,
unsigned int i_ldC,
double i_beta,
const double *i_a,
const double *i_b,
double *o_c ) {
unsigned int l_m, l_n, l_k;
const __m512d beta = _mm512_set1_pd( i_beta );
LIBXSMM_UNUSED(i_r);
for( l_m = 0; l_m < i_m; l_m++ ) {
for( l_n = 0; l_n < i_n; l_n++ ) {
__m512d vc = (i_beta != 0.0) ? _mm512_mul_pd( _mm512_loadu_pd( &(o_c[l_m*i_ldC*8 + l_n*8 + 0]) ), beta ) : _mm512_setzero_pd();
_mm512_storeu_pd(&(o_c[l_m*i_ldC*8 + l_n*8 + 0]), vc);
}
}
for( l_m = 0; l_m < i_m; l_m++ ) {
for( l_n = 0; l_n < i_n; l_n++ ) {
__m512d vc = _mm512_loadu_pd( &(o_c[l_m*i_ldC*8 + l_n*8 + 0]) );
for( l_k = 0; l_k < i_k; l_k++ ) {
const __m512d alpha = _mm512_set1_pd( i_b[l_k*i_ldB + l_n] );
vc = _mm512_fmadd_pd( alpha, _mm512_loadu_pd( &(i_a[l_m*i_ldA*8 + l_k*8 + 0]) ), vc);
}
_mm512_storeu_pd( &(o_c[l_m*i_ldC*8 + l_n*8 + 0]), vc );
}
}
}
LIBXSMM_INLINE void matMulFusedBC( unsigned short i_r,
unsigned int i_m,
unsigned int i_n,
unsigned int i_k,
unsigned int i_ldA,
unsigned int i_ldB,
unsigned int i_ldC,
double i_beta,
const double *i_a,
const double *i_b,
double *o_c ) {
unsigned int l_m, l_n, l_k;
const __m512d beta = _mm512_set1_pd( i_beta );
LIBXSMM_UNUSED(i_r);
for( l_m = 0; l_m < i_m; l_m++ ) {
for( l_n = 0; l_n < i_n; l_n++ ) {
__m512d vc = (i_beta != 0.0) ? _mm512_mul_pd( _mm512_loadu_pd( &(o_c[l_m*i_ldC*8 + l_n*8 + 0]) ), beta ) : _mm512_setzero_pd();
_mm512_storeu_pd(&(o_c[l_m*i_ldC*8 + l_n*8 + 0]), vc);
}
}
for( l_m = 0; l_m < i_m; l_m++ ) {
for( l_n = 0; l_n < i_n; l_n++ ) {
__m512d vc = _mm512_loadu_pd( &(o_c[l_m*i_ldC*8 + l_n*8 + 0]) );
for( l_k = 0; l_k < i_k; l_k++ ) {
const __m512d alpha = _mm512_set1_pd( i_a[l_m*i_ldA + l_k] );
vc = _mm512_fmadd_pd( alpha, _mm512_loadu_pd( &(i_b[l_k*i_ldB*8 + l_n*8 + 0]) ), vc);
}
_mm512_storeu_pd( &(o_c[l_m*i_ldC*8 + l_n*8 + 0]), vc );
}
}
}
#endif
LIBXSMM_INLINE void amok_detect( const double* i_runtimes, size_t* io_amoks, const size_t i_workers ) {
double time_avg;
size_t i;
time_avg = 0.0;
for (i = 0; i < i_workers; i++) {
if ( io_amoks[8*i] == 0 ) {
time_avg += i_runtimes[8*i];
}
}
time_avg = time_avg/((double)(i_workers-io_amoks[8*i_workers]));
/* let detect amoks */
for (i = 0; i < i_workers; i++) {
if ( io_amoks[8*i] == 0 ) {
if ( i_runtimes[8*i] > time_avg*1.07 ) { /* this is the amok condition */
io_amoks[8*i_workers]++;
io_amoks[8*i] = 1;
}
}
}
}
LIBXSMM_INLINE void amok_balance( const size_t* i_amoks, const size_t i_workers, const size_t i_worksize, const size_t i_mytid, size_t* io_chunk, size_t* io_mystart, size_t* io_myend ) {
size_t l_chunk, l_start, l_end;
size_t l_cur_amoks = i_amoks[8*i_workers];
size_t l_non_amoks = i_workers - l_cur_amoks;
l_chunk = (i_worksize % l_non_amoks == 0) ? (i_worksize / l_non_amoks) : ((i_worksize / l_non_amoks) + 1);
if (i_amoks[8*i_mytid] != 0) {
l_start = 0;
l_end = 0;
} else {
size_t l_tid_offset = 0;
size_t l_z;
for ( l_z = 0; l_z < i_mytid; l_z++) {
if ( i_amoks[8*l_z] != 0 ) {
l_tid_offset++;
}
}
l_tid_offset = i_mytid - l_tid_offset;
l_start = (l_tid_offset * l_chunk < i_worksize) ? (l_tid_offset * l_chunk) : i_worksize;
l_end = ((l_tid_offset+1) * l_chunk < i_worksize) ? ((l_tid_offset+1) * l_chunk) : i_worksize;
}
*io_chunk = l_chunk;
*io_mystart = l_start;
*io_myend = l_end;
}
int main(int argc, char* argv[])
{
char* mat_a = 0;
unsigned int *mat_a_rowptr, *mat_a_colidx;
unsigned int mat_a_rowcount, mat_a_colcount, mat_a_nnz;
double* mat_a_values;
libxsmm_dmmfunction a_kernel;
char* mat_b = 0;
unsigned int *mat_b_rowptr, *mat_b_colidx;
unsigned int mat_b_rowcount, mat_b_colcount, mat_b_nnz;
double* mat_b_values;
libxsmm_dmmfunction b_kernel;
char* mat_c = 0;
unsigned int *mat_c_rowptr, *mat_c_colidx;
unsigned int mat_c_rowcount, mat_c_colcount, mat_c_nnz;
double* mat_c_values;
libxsmm_dmmfunction c_kernel;
char* mat_st = 0;
unsigned int *mat_st_rowptr, *mat_st_colidx;
unsigned int mat_st_rowcount, mat_st_colcount, mat_st_nnz;
double* mat_st_values;
libxsmm_dmmfunction st_kernel;
int num_modes = 9;
int num_quants = 9;
size_t num_elems = 0;
size_t num_cfr = 8;
size_t num_reps = 1;
size_t elem_size;
/* OpenMP: signed induction variables */
int i, j;
const libxsmm_gemm_descriptor *l_xgemm_desc_stiff = 0, *l_xgemm_desc_star = 0;
libxsmm_descriptor_blob l_xgemm_blob_stiff, l_xgemm_blob_star;
const libxsmm_gemm_prefetch_type prefetch = LIBXSMM_GEMM_PREFETCH_NONE;
const int flags = LIBXSMM_GEMM_FLAGS('N', 'N');
const double alpha = 1, beta = 1;
double flops_vol;
double* q;
double* qt;
double* qs;
double* star;
double* global;
unsigned long long l_start, l_end;
double l_total;
unsigned int l_num_threads;
unsigned int l_star_ent = num_quants*num_quants;
double* l_total_thread;
double* l_cur_thread_time;
double time_max;
double time_min;
double time_avg;
size_t* amoks;
/* read cmd */
if ((argc > 1 && !strncmp(argv[1], "-h", 3)) || (argc != 8)) {
printf("Usage: %s stif1 stif2 stif3 star nModes nElems nReps\n", argv[0]);
return 0;
}
libxsmm_rng_set_seed(1);
/* some empty lines at the beginning */
printf("\n");
i = 1;
if (argc > (int)i) mat_a = argv[i++];
if (argc > (int)i) mat_b = argv[i++];
if (argc > (int)i) mat_c = argv[i++];
if (argc > (int)i) mat_st = argv[i++];
if (argc > (int)i) num_modes = atoi(argv[i++]);
if (argc > (int)i) num_elems = atoi(argv[i++]);
if (argc > (int)i) num_reps = atoi(argv[i++]);
elem_size = num_modes*num_quants*num_cfr;
#if defined(_OPENMP)
#pragma omp parallel
{
#pragma omp master
{
l_num_threads = omp_get_num_threads();
}
}
#else
l_num_threads = 1;
#endif
l_total_thread = (double*)malloc(8*l_num_threads*sizeof(double));
l_cur_thread_time = (double*)malloc(8*l_num_threads*sizeof(double));
amoks = (size_t*)malloc(8*(l_num_threads+1)*sizeof(size_t));
for ( i = 0; i < 8*((int)l_num_threads+1); i++ ) {
amoks[i] = 0;
}
/* read matrices */
printf("reading sparse matrices... ");
edge_sparse_csr_reader_double( mat_a, &mat_a_rowptr, &mat_a_colidx, &mat_a_values, &mat_a_rowcount, &mat_a_colcount, &mat_a_nnz );
edge_sparse_csr_reader_double( mat_b, &mat_b_rowptr, &mat_b_colidx, &mat_b_values, &mat_b_rowcount, &mat_b_colcount, &mat_b_nnz );
edge_sparse_csr_reader_double( mat_c, &mat_c_rowptr, &mat_c_colidx, &mat_c_values, &mat_c_rowcount, &mat_c_colcount, &mat_c_nnz );
edge_sparse_csr_reader_double( mat_st, &mat_st_rowptr, &mat_st_colidx, &mat_st_values, &mat_st_rowcount, &mat_st_colcount, &mat_st_nnz );
printf("done!\n\n");
/* generate kernels */
printf("generating code... ");
l_xgemm_desc_stiff = libxsmm_dgemm_descriptor_init(&l_xgemm_blob_stiff,
num_quants, num_modes, num_modes, num_modes, 0, num_modes, alpha, beta, flags, prefetch);
l_xgemm_desc_star = libxsmm_dgemm_descriptor_init(&l_xgemm_blob_star,
num_quants, num_modes, num_quants, 0, num_modes, num_modes, alpha, beta, flags, prefetch);
a_kernel = libxsmm_create_xcsr_soa( l_xgemm_desc_stiff, mat_a_rowptr, mat_a_colidx, (const void*)mat_a_values ).dmm;
b_kernel = libxsmm_create_xcsr_soa( l_xgemm_desc_stiff, mat_b_rowptr, mat_b_colidx, (const void*)mat_b_values ).dmm;
c_kernel = libxsmm_create_xcsr_soa( l_xgemm_desc_stiff, mat_c_rowptr, mat_c_colidx, (const void*)mat_c_values ).dmm;
st_kernel = libxsmm_create_xcsr_soa( l_xgemm_desc_star, mat_st_rowptr, mat_st_colidx, (const void*)mat_st_values ).dmm;
if ( a_kernel == 0 ) {
printf("a kernel could not be built -> exit!");
exit(-1);
}
if ( b_kernel == 0 ) {
printf("b kernel could not be built -> exit!");
exit(-1);
}
if ( b_kernel == 0 ) {
printf("c kernel could not be built -> exit!");
exit(-1);
}
if ( st_kernel == 0 ) {
printf("st kernel could not be built -> exit!");
exit(-1);
}
printf("done!\n\n");
/* copying code to 1 GB page */
#if 0
#if defined(EDGE_HP_1G) || defined(EDGE_HP_2M)
printf("copying code to 1GB page...\n");
onegcode = (void*)edge_hp_malloc( 5*1024*1024, 2097152 );
memcpy( onegcode, (void*) a_kernel, 1505 );
memcpy( onegcode+(1*1024*1024)+64, (void*) b_kernel, 2892 );
memcpy( onegcode+(2*1024*1024)+128, (void*) c_kernel, 3249 );
memcpy( onegcode+(3*1024*1024)+196, (void*)st_kernel, 11010 );
a_kernel = (libxsmm_dmmfunction)onegcode;
b_kernel = (libxsmm_dmmfunction)(onegcode+(1*1024*1024)+64);
c_kernel = (libxsmm_dmmfunction)(onegcode+(2*1024*1024)+128);
st_kernel = (libxsmm_dmmfunction)(onegcode+(3*1024*1024)+196);
printf("...done\n\n");
#endif
#endif
/* create unknowns and t-unknowns */
printf("allocating and initializing fake data... \n");
/* DoFs */
printf(" q: %f MiB\n", ((double)(num_elems*num_modes*num_quants*num_cfr*sizeof(double))) / ( 1024.0*1024.0) );
q = (double*)edge_hp_malloc( num_elems*num_modes*num_quants*num_cfr*sizeof(double), 2097152);
/* tDofs */
printf(" qt: %f MiB\n", ((double)(num_elems*num_modes*num_quants*num_cfr*sizeof(double))) / ( 1024.0*1024.0) );
qt = (double*)edge_hp_malloc( num_elems*num_modes*num_quants*num_cfr*sizeof(double), 2097152);
/* star matrices */
printf(" star: %f MiB\n", ((double)(num_elems*3*l_star_ent*sizeof(double))) / ( 1024.0*1024.0 ) );
star = (double*)edge_hp_malloc( num_elems*3*l_star_ent*sizeof(double), 2097152);
/* stiffness matrices */
printf("global: %f MiB\n", ((double)(3*num_modes*num_modes*sizeof(double))) / ( 1024.0*1024 ) );
global = (double*)edge_hp_malloc( 3*num_modes*num_modes*sizeof(double), 2097152);
/* per thread scratch */
printf(" t: %f MiB\n", ((double)(l_num_threads*num_modes*num_quants*num_cfr*sizeof(double)))/ ( 1024.0*1024.0) );
qs = (double*)edge_hp_malloc( l_num_threads*num_modes*num_quants*num_cfr*sizeof(double), 2097152);
for (i = 0; i < (int)num_elems; i++) {
for (j = 0; j < (int)elem_size; j++) {
q[i*elem_size + j] = libxsmm_rng_f64();
}
}
for (i = 0; i < (int)num_elems; i++) {
for (j = 0; j < (int)elem_size; j++) {
qt[i*elem_size + j] = libxsmm_rng_f64();
}
}
for (i = 0; i < (int)l_num_threads; i++) {
for (j = 0; j < (int)elem_size; j++) {
qs[i*elem_size + j] = libxsmm_rng_f64();
}
}
for (i = 0; i < (int)num_elems; i++) {
for (j = 0; j < (int)mat_st_nnz*3; j++) {
star[(i*3*mat_st_nnz)+j] = libxsmm_rng_f64();
}
}
for (i = 0; i < 3; i++) {
for (j = 0; j < num_modes*num_modes; j++) {
global[(i*num_modes*num_modes)+j] = libxsmm_rng_f64();
}
}
printf("allocation done!\n\n");
printf("running benchmark...\n");
l_start = libxsmm_timer_tick();
#if defined(_OPENMP)
# pragma omp parallel private(i, j)
#endif
{
#if defined(_OPENMP)
int mytid = omp_get_thread_num();
#else
int mytid = 0;
#endif
libxsmm_timer_tickint mystart, myend;
#if defined(HANDLE_AMOK)
size_t cur_amoks = 0;
size_t non_amoks = l_num_threads;
#endif
size_t l_el_chunk = 0;
size_t l_el_start = 0;
size_t l_el_end = 0;
/* initial work distribution */
amok_balance( amoks, l_num_threads, num_elems, mytid, &l_el_chunk, &l_el_start, &l_el_end );
for (i = 0; i < (int)num_reps; i++) {
#if defined(HANDLE_AMOK)
/* did we had an amok? */
if (cur_amoks != amoks[8*l_num_threads]) {
cur_amoks = amoks[8*l_num_threads];
non_amoks = l_num_threads - cur_amoks;
/* re-balance work */
amok_balance( amoks, l_num_threads, num_elems, mytid, &l_el_chunk, &l_el_start, &l_el_end );
}
#endif
mystart = libxsmm_timer_tick();
for (j = (int)l_el_start; j < (int)l_el_end; j++) {
#if 1
st_kernel( star+(j*3*mat_st_nnz) , qt+(j*elem_size), qs+(mytid*elem_size) );
a_kernel( qs+(mytid*elem_size), global , q+(j*elem_size) );
st_kernel( star+(j*3*mat_st_nnz)+mat_st_nnz , qt+(j*elem_size), qs+(mytid*elem_size) );
b_kernel( qs+(mytid*elem_size), global+(num_modes*num_modes) , q+(j*elem_size) );
st_kernel( star+(j*3*mat_st_nnz)+(2*mat_st_nnz), qt+(j*elem_size), qs+(mytid*elem_size) );
c_kernel( qs+(mytid*elem_size), global+(2*num_modes*num_modes), q+(j*elem_size) );
#else
matMulFusedBC( 8, num_quants, num_modes, num_quants, num_quants, num_modes, num_modes, 1.0, star+(j*3*mat_st_nnz), qt+(j*elem_size), qs+(mytid*elem_size) );
matMulFusedAC( 8, num_quants, num_modes, num_modes, num_modes, num_modes, num_modes, 1.0, qs+(mytid*elem_size), global, q+(j*elem_size) );
matMulFusedBC( 8, num_quants, num_modes, num_quants, num_quants, num_modes, num_modes, 1.0, star+(j*3*mat_st_nnz)+mat_st_nnz, qt+(j*elem_size), qs+(mytid*elem_size) );
matMulFusedAC( 8, num_quants, num_modes, num_modes, num_modes, num_modes, num_modes, 1.0, qs+(mytid*elem_size), global+(num_modes*num_modes) , q+(j*elem_size) );
matMulFusedBC( 8, num_quants, num_modes, num_quants, num_quants, num_modes, num_modes, 1.0, star+(j*3*mat_st_nnz)+(2*mat_st_nnz), qt+(j*elem_size), qs+(mytid*elem_size) );
matMulFusedAC( 8, num_quants, num_modes, num_modes, num_modes, num_modes, num_modes, 1.0, qs+(mytid*elem_size), global+(2*num_modes*num_modes), q+(j*elem_size) );
#endif
}
myend = libxsmm_timer_tick();
l_cur_thread_time[8*mytid] = libxsmm_timer_duration( mystart, myend );
l_total_thread[8*mytid] += libxsmm_timer_duration( mystart, myend );
#if defined(_OPENMP)
#pragma omp barrier
#endif
#if defined(HANDLE_AMOK)
/* checking for amoks is centralized business */
if (mytid == 0) {
/* amok check */
amok_detect( l_cur_thread_time, amoks, l_num_threads );
}
#if defined(_OPENMP)
#pragma omp barrier
#endif
#endif
}
}
l_end = libxsmm_timer_tick();
l_total = libxsmm_timer_duration(l_start, l_end);
printf("...done!\n\n");
/* some timing stats */
time_max = 0.0;
time_min = 80000000;
time_avg = 0.0;
for (i = 0; i < (int)l_num_threads; i++) {
if( amoks[8*i] == 0 ) {
if( l_total_thread[8*i] > time_max) time_max = l_total_thread[8*i];
if( l_total_thread[8*i] < time_min) time_min = l_total_thread[8*i];
time_avg += l_total_thread[8*i];
}
}
time_avg = time_avg/((double)(l_num_threads-amoks[8*l_num_threads]));
flops_vol = (double)num_quants * (double)mat_a_nnz * (double)num_cfr * 2.0;
flops_vol += (double)num_quants * (double)mat_b_nnz * (double)num_cfr * 2.0;
flops_vol += (double)num_quants * (double)mat_c_nnz * (double)num_cfr * 2.0;
flops_vol += (double)num_modes * (double)mat_st_nnz * (double)num_cfr * 6.0; /* 3 star matrix mul */
printf("%fs time for vol (asm), min %f, max %f, avg %f, #amoks %llu, amok-threads ", l_total, time_min, time_max, time_avg, (unsigned long long)amoks[8*l_num_threads]);
for ( i = 0; i < (int)l_num_threads; i++ ) {
if ( amoks[8*i] != 0 ) {
printf("%i,", i);
}
}
printf("\n");
printf("%f GFLOPS for vol (asm)\n", ((double)num_elems * (double)num_reps * flops_vol) / (l_total * 1.0e9));
printf("%f GiB/s for vol (asm)\n", (double)((double)num_elems * (double)elem_size * 8.0 * 3.0 * (double)num_reps) / (l_total * 1024.0*1024.0*1024.0) );
printf("done!\n\n");
/* some empty lines at the end */
printf("\n\n");
return 0;
}
|
GB_binop__times_fp32.c |
//------------------------------------------------------------------------------
// GB_binop: hard-coded functions for each built-in binary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated2/ folder, do not edit it
// (it is auto-generated from Generator/*).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_emult.h"
#include "GB_control.h"
#include "GB_ek_slice.h"
#include "GB_dense.h"
#include "GB_atomics.h"
#include "GB_bitmap_assign_methods.h"
#include "GB_binop__include.h"
// C=binop(A,B) is defined by the following types and operators:
// A+B function (eWiseAdd): GB (_AaddB__times_fp32)
// A.*B function (eWiseMult): GB (_AemultB_08__times_fp32)
// A.*B function (eWiseMult): GB (_AemultB_02__times_fp32)
// A.*B function (eWiseMult): GB (_AemultB_04__times_fp32)
// A.*B function (eWiseMult): GB (_AemultB_bitmap__times_fp32)
// A*D function (colscale): GB (_AxD__times_fp32)
// D*A function (rowscale): GB (_DxB__times_fp32)
// C+=B function (dense accum): GB (_Cdense_accumB__times_fp32)
// C+=b function (dense accum): GB (_Cdense_accumb__times_fp32)
// C+=A+B function (dense ewise3): GB (_Cdense_ewise3_accum__times_fp32)
// C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__times_fp32)
// C=scalar+B GB (_bind1st__times_fp32)
// C=scalar+B' GB (_bind1st_tran__times_fp32)
// C=A+scalar GB (_bind2nd__times_fp32)
// C=A'+scalar GB (_bind2nd_tran__times_fp32)
// C type: float
// A type: float
// A pattern? 0
// B type: float
// B pattern? 0
// BinaryOp: cij = (aij * bij)
#define GB_ATYPE \
float
#define GB_BTYPE \
float
#define GB_CTYPE \
float
// true if the types of A and B are identical
#define GB_ATYPE_IS_BTYPE \
1
// true if the types of C and A are identical
#define GB_CTYPE_IS_ATYPE \
1
// true if the types of C and B are identical
#define GB_CTYPE_IS_BTYPE \
1
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA,A_iso) \
float aij = GBX (Ax, pA, A_iso)
// true if values of A are not used
#define GB_A_IS_PATTERN \
0 \
// bij = Bx [pB]
#define GB_GETB(bij,Bx,pB,B_iso) \
float bij = GBX (Bx, pB, B_iso)
// true if values of B are not used
#define GB_B_IS_PATTERN \
0 \
// declare scalar of the same type as C
#define GB_CTYPE_SCALAR(t) \
float t
// cij = Ax [pA]
#define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \
cij = GBX (Ax, pA, A_iso)
// cij = Bx [pB]
#define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \
cij = GBX (Bx, pB, B_iso)
#define GB_CX(p) Cx [p]
// binary operator
#define GB_BINOP(z,x,y,i,j) \
z = (x * y) ;
// true if the binop must be flipped
#define GB_BINOP_FLIP \
0
// op is second
#define GB_OP_IS_SECOND \
0
// do the numerical phases of GB_add and GB_emult
#define GB_PHASE_2_OF_2
// hard-coded loops can be vectorized
#define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_TIMES || GxB_NO_FP32 || GxB_NO_TIMES_FP32)
//------------------------------------------------------------------------------
// C += A+B, all 3 matrices dense
//------------------------------------------------------------------------------
// The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV.
void GB (_Cdense_ewise3_accum__times_fp32)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_accum_template.c"
}
//------------------------------------------------------------------------------
// C = A+B, all 3 matrices dense
//------------------------------------------------------------------------------
void GB (_Cdense_ewise3_noaccum__times_fp32)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_noaccum_template.c"
}
//------------------------------------------------------------------------------
// C += B, accumulate a sparse matrix into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumB__times_fp32)
(
GrB_Matrix C,
const GrB_Matrix B,
const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
#include "GB_dense_subassign_23_template.c"
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += b, accumulate a scalar into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumb__times_fp32)
(
GrB_Matrix C,
const GB_void *p_bwork,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
// get the scalar b for C += b, of type float
float bwork = (*((float *) p_bwork)) ;
#include "GB_dense_subassign_22_template.c"
return (GrB_SUCCESS) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = A*D, column scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_AxD__times_fp32)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix D,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
float *restrict Cx = (float *) C->x ;
#include "GB_AxB_colscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = D*B, row scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_DxB__times_fp32)
(
GrB_Matrix C,
const GrB_Matrix D,
const GrB_Matrix B,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
float *restrict Cx = (float *) C->x ;
#include "GB_AxB_rowscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseAdd: C=A+B, C<M>=A+B, C<!M>=A+B
//------------------------------------------------------------------------------
GrB_Info GB (_AaddB__times_fp32)
(
GrB_Matrix C,
const int C_sparsity,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool is_eWiseUnion,
const GB_void *alpha_scalar_in,
const GB_void *beta_scalar_in,
const bool Ch_is_Mh,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
GB_WERK_DECLARE (M_ek_slicing, int64_t) ;
GB_WERK_DECLARE (A_ek_slicing, int64_t) ;
GB_WERK_DECLARE (B_ek_slicing, int64_t) ;
float alpha_scalar ;
float beta_scalar ;
if (is_eWiseUnion)
{
alpha_scalar = (*((float *) alpha_scalar_in)) ;
beta_scalar = (*((float *) beta_scalar_in )) ;
}
#include "GB_add_template.c"
GB_FREE_WORKSPACE ;
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, or C<M!>=A.*B where C is sparse/hyper
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_08__times_fp32)
(
GrB_Matrix C,
const int C_sparsity,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_08_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_02__times_fp32)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool flipxy,
const int64_t *restrict Cp_kfirst,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#if GB_BINOP_FLIP
// The operator is not commutative, and does not have a flipped
// variant. For example z=atan2(y,x).
if (flipxy)
{
// use fmult(y,x)
#undef GB_FLIPPED
#define GB_FLIPPED 1
#include "GB_emult_02_template.c"
}
else
{
// use fmult(x,y)
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
}
#else
// No need to handle the flip: the operator is either commutative, or
// has been handled by changing z=div(y,x) to z=rdiv(x,y) for example.
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
#endif
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_04__times_fp32)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict Cp_kfirst,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_04_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_bitmap__times_fp32)
(
GrB_Matrix C,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_bitmap_emult_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st
//------------------------------------------------------------------------------
GrB_Info GB (_bind1st__times_fp32)
(
GB_void *Cx_output, // Cx and Bx may be aliased
const GB_void *x_input,
const GB_void *Bx_input,
const int8_t *restrict Bb,
int64_t bnz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
float *Cx = (float *) Cx_output ;
float x = (*((float *) x_input)) ;
float *Bx = (float *) Bx_input ;
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < bnz ; p++)
{
if (!GBB (Bb, p)) continue ;
float bij = GBX (Bx, p, false) ;
Cx [p] = (x * bij) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd
//------------------------------------------------------------------------------
GrB_Info GB (_bind2nd__times_fp32)
(
GB_void *Cx_output, // Cx and Ax may be aliased
const GB_void *Ax_input,
const GB_void *y_input,
const int8_t *restrict Ab,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
float *Cx = (float *) Cx_output ;
float *Ax = (float *) Ax_input ;
float y = (*((float *) y_input)) ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!GBB (Ab, p)) continue ;
float aij = GBX (Ax, p, false) ;
Cx [p] = (aij * y) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (x, A'): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (x, aij), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
float aij = GBX (Ax, pA, false) ; \
Cx [pC] = (x * aij) ; \
}
GrB_Info GB (_bind1st_tran__times_fp32)
(
GrB_Matrix C,
const GB_void *x_input,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
// GB_unop_transpose.c uses GB_ATYPE, but A is
// the 2nd input to binary operator z=f(x,y).
#undef GB_ATYPE
#define GB_ATYPE \
float
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
float x = (*((const float *) x_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
#undef GB_ATYPE
#define GB_ATYPE \
float
}
//------------------------------------------------------------------------------
// C = op (A', y): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (aij, y), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
float aij = GBX (Ax, pA, false) ; \
Cx [pC] = (aij * y) ; \
}
GrB_Info GB (_bind2nd_tran__times_fp32)
(
GrB_Matrix C,
const GrB_Matrix A,
const GB_void *y_input,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
float y = (*((const float *) y_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
FunctorsOpenMP.h | //============================================================================
// Copyright (c) Kitware, Inc.
// All rights reserved.
// See LICENSE.txt for details.
//
// This software is distributed WITHOUT ANY WARRANTY; without even
// the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR
// PURPOSE. See the above copyright notice for more information.
//============================================================================
#ifndef vtk_m_cont_openmp_internal_FunctorsOpenMP_h
#define vtk_m_cont_openmp_internal_FunctorsOpenMP_h
#include <vtkm/cont/RuntimeDeviceInformation.h>
#include <vtkm/cont/openmp/internal/DeviceAdapterTagOpenMP.h>
#include <vtkm/cont/internal/FunctorsGeneral.h>
#include <vtkm/BinaryOperators.h>
#include <vtkm/BinaryPredicates.h>
#include <vtkm/Pair.h>
#include <vtkm/Types.h>
#include <vtkm/cont/ArrayHandle.h>
#include <vtkm/cont/ErrorExecution.h>
#include <omp.h>
#include <algorithm>
#include <type_traits>
#include <vector>
// Wrap all '#pragma omp ...' calls in this macro so we can disable them in
// non-omp builds and avoid a multitude of 'ignoring pragma..." warnings.
#ifdef _OPENMP
#define VTKM_OPENMP_DIRECTIVE_IMPL(fullDir) _Pragma(#fullDir)
#define VTKM_OPENMP_DIRECTIVE(dir) VTKM_OPENMP_DIRECTIVE_IMPL(omp dir)
#else // _OPENMP
#define VTKM_OPENMP_DIRECTIVE(directive)
#endif // _OPENMP
// See "OpenMP data sharing" section of
// https://www.gnu.org/software/gcc/gcc-9/porting_to.html. OpenMP broke
// backwards compatibility regarding const variable handling.
// tl;dr, put all const variables accessed from openmp blocks in a
// VTKM_OPENMP_SHARED_CONST(var1, var2, ...) macro. This will do The Right Thing
// on all gcc.
#if defined(VTKM_GCC) && (__GNUC__ < 9)
#define VTKM_OPENMP_SHARED_CONST(...)
#else
#define VTKM_OPENMP_SHARED_CONST(...) shared(__VA_ARGS__)
#endif
// When defined, supported type / operator combinations will use the OpenMP
// reduction(...) clause. Otherwise, all reductions use the general
// implementation with a manual reduction once the threads complete.
// I don't know how, but the benchmarks currently perform better without the
// specializations.
//#define VTKM_OPENMP_USE_NATIVE_REDUCTION
namespace vtkm
{
namespace cont
{
namespace openmp
{
constexpr static vtkm::Id VTKM_CACHE_LINE_SIZE = 64;
constexpr static vtkm::Id VTKM_PAGE_SIZE = 4096;
// Returns ceil(num/den) for integral types
template <typename T>
static constexpr T CeilDivide(const T& numerator, const T& denominator)
{
return (numerator + denominator - 1) / denominator;
}
// Computes the number of values per chunk. Note that numChunks + chunkSize may
// exceed numVals, so be sure to check upper limits.
static void ComputeChunkSize(const vtkm::Id numVals,
const vtkm::Id numThreads,
const vtkm::Id chunksPerThread,
const vtkm::Id bytesPerValue,
vtkm::Id& numChunks,
vtkm::Id& valuesPerChunk)
{
// try to evenly distribute pages across chunks:
const vtkm::Id bytesIn = numVals * bytesPerValue;
const vtkm::Id pagesIn = CeilDivide(bytesIn, VTKM_PAGE_SIZE);
// If we don't have enough pages to honor chunksPerThread, ignore it:
numChunks = (pagesIn > numThreads * chunksPerThread) ? numThreads * chunksPerThread : numThreads;
const vtkm::Id pagesPerChunk = CeilDivide(pagesIn, numChunks);
valuesPerChunk = CeilDivide(pagesPerChunk * VTKM_PAGE_SIZE, bytesPerValue);
}
template <typename T>
struct CleanArrayRefImpl
{
using type = T;
};
template <typename PortalType>
struct CleanArrayRefImpl<vtkm::internal::ArrayPortalValueReference<PortalType>>
{
using type = typename PortalType::ValueType;
};
template <typename T>
using CleanArrayRef = typename CleanArrayRefImpl<T>::type;
template <typename T, typename U>
static void DoCopy(T src, U dst, vtkm::Id numVals, std::true_type)
{
if (numVals)
{
std::copy(src, src + numVals, dst);
}
}
// Don't use std::copy when type conversion is required because MSVC.
template <typename InIterT, typename OutIterT>
static void DoCopy(InIterT inIter, OutIterT outIter, vtkm::Id numVals, std::false_type)
{
using InValueType = CleanArrayRef<typename std::iterator_traits<InIterT>::value_type>;
using OutValueType = CleanArrayRef<typename std::iterator_traits<OutIterT>::value_type>;
for (vtkm::Id i = 0; i < numVals; ++i)
{
// The conversion to InputType and then OutputType looks weird, but it is necessary.
// *inItr actually returns an ArrayPortalValueReference, which can automatically convert
// itself to InputType but not necessarily OutputType. Thus, we first convert to
// InputType, and then allow the conversion to OutputType.
*(outIter++) = static_cast<OutValueType>(static_cast<InValueType>(*(inIter++)));
}
}
template <typename InIterT, typename OutIterT>
static void DoCopy(InIterT inIter, OutIterT outIter, vtkm::Id numVals)
{
using InValueType = CleanArrayRef<typename std::iterator_traits<InIterT>::value_type>;
using OutValueType = CleanArrayRef<typename std::iterator_traits<OutIterT>::value_type>;
DoCopy(inIter, outIter, numVals, std::is_same<InValueType, OutValueType>());
}
template <typename InPortalT, typename OutPortalT>
static void CopyHelper(InPortalT inPortal,
OutPortalT outPortal,
vtkm::Id inStart,
vtkm::Id outStart,
vtkm::Id numVals)
{
using InValueT = typename InPortalT::ValueType;
using OutValueT = typename OutPortalT::ValueType;
constexpr auto isSame = std::is_same<InValueT, OutValueT>();
auto inIter = vtkm::cont::ArrayPortalToIteratorBegin(inPortal) + inStart;
auto outIter = vtkm::cont::ArrayPortalToIteratorBegin(outPortal) + outStart;
vtkm::Id valuesPerChunk;
VTKM_OPENMP_DIRECTIVE(parallel default(none) shared(inIter, outIter, valuesPerChunk, numVals)
VTKM_OPENMP_SHARED_CONST(isSame))
{
VTKM_OPENMP_DIRECTIVE(single)
{
// Evenly distribute full pages to all threads. We manually chunk the
// data here so that we can exploit std::copy's memmove optimizations.
vtkm::Id numChunks;
vtkm::Id numThreads;
vtkm::cont::RuntimeDeviceInformation{}
.GetRuntimeConfiguration(vtkm::cont::DeviceAdapterTagOpenMP())
.GetThreads(numThreads);
ComputeChunkSize(numVals, numThreads, 8, sizeof(InValueT), numChunks, valuesPerChunk);
}
VTKM_OPENMP_DIRECTIVE(for schedule(static))
for (vtkm::Id i = 0; i < numVals; i += valuesPerChunk)
{
vtkm::Id chunkSize = std::min(numVals - i, valuesPerChunk);
DoCopy(inIter + i, outIter + i, chunkSize, isSame);
}
}
}
struct CopyIfHelper
{
vtkm::Id NumValues;
vtkm::Id NumThreads;
vtkm::Id ValueSize;
vtkm::Id NumChunks;
vtkm::Id ChunkSize;
std::vector<vtkm::Id> EndIds;
CopyIfHelper() = default;
void Initialize(vtkm::Id numValues, vtkm::Id valueSize)
{
this->NumValues = numValues;
vtkm::cont::RuntimeDeviceInformation{}
.GetRuntimeConfiguration(vtkm::cont::DeviceAdapterTagOpenMP())
.GetThreads(this->NumThreads);
this->ValueSize = valueSize;
// Evenly distribute pages across the threads. We manually chunk the
// data here so that we can exploit std::copy's memmove optimizations.
ComputeChunkSize(
this->NumValues, this->NumThreads, 8, valueSize, this->NumChunks, this->ChunkSize);
this->EndIds.resize(static_cast<std::size_t>(this->NumChunks));
}
template <typename InIterT, typename StencilIterT, typename OutIterT, typename PredicateT>
void CopyIf(InIterT inIter,
StencilIterT stencilIter,
OutIterT outIter,
PredicateT pred,
vtkm::Id chunk)
{
vtkm::Id startPos = std::min(chunk * this->ChunkSize, this->NumValues);
vtkm::Id endPos = std::min((chunk + 1) * this->ChunkSize, this->NumValues);
vtkm::Id outPos = startPos;
for (vtkm::Id inPos = startPos; inPos < endPos; ++inPos)
{
if (pred(stencilIter[inPos]))
{
outIter[outPos++] = inIter[inPos];
}
}
this->EndIds[static_cast<std::size_t>(chunk)] = outPos;
}
template <typename OutIterT>
vtkm::Id Reduce(OutIterT data)
{
vtkm::Id endPos = this->EndIds.front();
for (vtkm::Id i = 1; i < this->NumChunks; ++i)
{
vtkm::Id chunkStart = std::min(i * this->ChunkSize, this->NumValues);
vtkm::Id chunkEnd = this->EndIds[static_cast<std::size_t>(i)];
vtkm::Id numValuesToCopy = chunkEnd - chunkStart;
if (numValuesToCopy > 0 && chunkStart != endPos)
{
std::copy(data + chunkStart, data + chunkEnd, data + endPos);
}
endPos += numValuesToCopy;
}
return endPos;
}
};
#ifdef VTKM_OPENMP_USE_NATIVE_REDUCTION
// OpenMP only declares reduction operations for primitive types. This utility
// detects if a type T is supported.
template <typename T>
struct OpenMPReductionSupported : std::false_type
{
};
template <>
struct OpenMPReductionSupported<Int8> : std::true_type
{
};
template <>
struct OpenMPReductionSupported<UInt8> : std::true_type
{
};
template <>
struct OpenMPReductionSupported<Int16> : std::true_type
{
};
template <>
struct OpenMPReductionSupported<UInt16> : std::true_type
{
};
template <>
struct OpenMPReductionSupported<Int32> : std::true_type
{
};
template <>
struct OpenMPReductionSupported<UInt32> : std::true_type
{
};
template <>
struct OpenMPReductionSupported<Int64> : std::true_type
{
};
template <>
struct OpenMPReductionSupported<UInt64> : std::true_type
{
};
template <>
struct OpenMPReductionSupported<Float32> : std::true_type
{
};
template <>
struct OpenMPReductionSupported<Float64> : std::true_type
{
};
#else
template <typename T>
using OpenMPReductionSupported = std::false_type;
#endif // VTKM_OPENMP_USE_NATIVE_REDUCTION
struct ReduceHelper
{
// std::is_integral, but adapted to see through vecs and pairs.
template <typename T>
struct IsIntegral : public std::is_integral<T>
{
};
template <typename T, vtkm::IdComponent Size>
struct IsIntegral<vtkm::Vec<T, Size>> : public std::is_integral<T>
{
};
template <typename T, typename U>
struct IsIntegral<vtkm::Pair<T, U>>
: public std::integral_constant<bool, std::is_integral<T>::value && std::is_integral<U>::value>
{
};
// Generic implementation:
template <typename PortalT, typename ReturnType, typename Functor>
static ReturnType Execute(PortalT portal, ReturnType init, Functor functorIn, std::false_type)
{
internal::WrappedBinaryOperator<ReturnType, Functor> f(functorIn);
const vtkm::Id numVals = portal.GetNumberOfValues();
auto data = vtkm::cont::ArrayPortalToIteratorBegin(portal);
bool doParallel = false;
vtkm::Id numThreads = 0;
std::unique_ptr<ReturnType[]> threadData;
VTKM_OPENMP_DIRECTIVE(parallel default(none) firstprivate(f) shared(
data, doParallel, numThreads, threadData) VTKM_OPENMP_SHARED_CONST(numVals))
{
int tid = omp_get_thread_num();
VTKM_OPENMP_DIRECTIVE(single)
{
vtkm::cont::RuntimeDeviceInformation{}
.GetRuntimeConfiguration(vtkm::cont::DeviceAdapterTagOpenMP())
.GetThreads(numThreads);
if (numVals >= numThreads * 2)
{
doParallel = true;
threadData.reset(new ReturnType[static_cast<std::size_t>(numThreads)]);
}
}
if (doParallel)
{
// Static dispatch to unroll non-integral types:
const ReturnType localResult = ReduceHelper::DoParallelReduction<ReturnType>(
data, numVals, tid, numThreads, f, IsIntegral<ReturnType>{});
threadData[static_cast<std::size_t>(tid)] = localResult;
}
} // end parallel
if (doParallel)
{
// do the final reduction serially:
for (size_t i = 0; i < static_cast<size_t>(numThreads); ++i)
{
init = f(init, threadData[i]);
}
}
else
{
// Not enough threads. Do the entire reduction in serial:
for (vtkm::Id i = 0; i < numVals; ++i)
{
init = f(init, data[i]);
}
}
return init;
}
// non-integer reduction: unroll loop manually.
// This gives faster code for floats and non-trivial types.
template <typename ReturnType, typename IterType, typename FunctorType>
static ReturnType DoParallelReduction(IterType data,
const vtkm::Id& numVals,
const int& tid,
const int& numThreads,
FunctorType f,
std::false_type /* isIntegral */)
{
// Use the first (numThreads*2) values for initializing:
ReturnType accum = f(data[2 * tid], data[2 * tid + 1]);
const vtkm::Id offset = numThreads * 2;
const vtkm::Id end = std::max(((numVals / 4) * 4) - 4, offset);
const vtkm::Id unrollEnd = end - ((end - offset) % 4);
vtkm::Id i = offset;
// When initializing the looping iterator to a non integral type, intel compilers will
// convert the iterator type to an unsigned value
#pragma GCC diagnostic push
#pragma GCC diagnostic ignored "-Wsign-conversion"
VTKM_OPENMP_DIRECTIVE(for schedule(static))
for (i = offset; i < unrollEnd; i += 4)
#pragma GCC diagnostic pop
{
const auto t1 = f(data[i], data[i + 1]);
const auto t2 = f(data[i + 2], data[i + 3]);
accum = f(accum, t1);
accum = f(accum, t2);
}
// Let the last thread mop up any remaining values as it would
// have just accessed the adjacent data
if (tid == numThreads - 1)
{
for (i = unrollEnd; i < numVals; ++i)
{
accum = f(accum, data[i]);
}
}
return accum;
}
// Integer reduction: no unrolling. Ints vectorize easily and unrolling can
// hurt performance.
template <typename ReturnType, typename IterType, typename FunctorType>
static ReturnType DoParallelReduction(IterType data,
const vtkm::Id& numVals,
const int& tid,
const int& numThreads,
FunctorType f,
std::true_type /* isIntegral */)
{
// Use the first (numThreads*2) values for initializing:
ReturnType accum = f(data[2 * tid], data[2 * tid + 1]);
// Assign each thread chunks of the remaining values for local reduction
#pragma GCC diagnostic push
#pragma GCC diagnostic ignored "-Wsign-conversion"
VTKM_OPENMP_DIRECTIVE(for schedule(static))
for (vtkm::Id i = numThreads * 2; i < numVals; i++)
#pragma GCC diagnostic pop
{
accum = f(accum, data[i]);
}
return accum;
}
#ifdef VTKM_OPENMP_USE_NATIVE_REDUCTION
// Specialize for vtkm functors with OpenMP special cases:
#define VTKM_OPENMP_SPECIALIZE_REDUCE1(FunctorType, PragmaString) \
template <typename PortalT, typename ReturnType> \
static ReturnType Execute( \
PortalT portal, ReturnType value, FunctorType functorIn, std::true_type) \
{ \
const vtkm::Id numValues = portal.GetNumberOfValues(); \
internal::WrappedBinaryOperator<ReturnType, FunctorType> f(functorIn); \
_Pragma(#PragmaString) for (vtkm::Id i = 0; i < numValues; ++i) \
{ \
value = f(value, portal.Get(i)); \
} \
return value; \
}
// Constructing the pragma string inside the _Pragma call doesn't work so
// we jump through a hoop:
#define VTKM_OPENMP_SPECIALIZE_REDUCE(FunctorType, Operator) \
VTKM_OPENMP_SPECIALIZE_REDUCE1(FunctorType, "omp parallel for reduction(" #Operator ":value)")
// + (Add, Sum)
VTKM_OPENMP_SPECIALIZE_REDUCE(vtkm::Add, +)
VTKM_OPENMP_SPECIALIZE_REDUCE(vtkm::Sum, +)
// * (Multiply, Product)
VTKM_OPENMP_SPECIALIZE_REDUCE(vtkm::Multiply, *)
VTKM_OPENMP_SPECIALIZE_REDUCE(vtkm::Product, *)
// - (Subtract)
VTKM_OPENMP_SPECIALIZE_REDUCE(vtkm::Subtract, -)
// & (BitwiseAnd)
VTKM_OPENMP_SPECIALIZE_REDUCE(vtkm::BitwiseAnd, &)
// | (BitwiseOr)
VTKM_OPENMP_SPECIALIZE_REDUCE(vtkm::BitwiseOr, |)
// ^ (BitwiseXor)
VTKM_OPENMP_SPECIALIZE_REDUCE(vtkm::BitwiseXor, ^)
// && (LogicalAnd)
VTKM_OPENMP_SPECIALIZE_REDUCE(vtkm::LogicalAnd, &&)
// || (LogicalOr)
VTKM_OPENMP_SPECIALIZE_REDUCE(vtkm::LogicalOr, ||)
// min (Minimum)
VTKM_OPENMP_SPECIALIZE_REDUCE(vtkm::Minimum, min)
// max (Maximum)
VTKM_OPENMP_SPECIALIZE_REDUCE(vtkm::Maximum, max)
#undef VTKM_OPENMP_SPECIALIZE_REDUCE
#undef VTKM_OPENMP_SPECIALIZE_REDUCE1
#endif // VTKM_OPENMP_USE_NATIVE_REDUCTION
};
template <typename KeysInArray,
typename ValuesInArray,
typename KeysOutArray,
typename ValuesOutArray,
typename BinaryFunctor>
void ReduceByKeyHelper(KeysInArray keysInArray,
ValuesInArray valuesInArray,
KeysOutArray keysOutArray,
ValuesOutArray valuesOutArray,
BinaryFunctor functor)
{
using KeyType = typename KeysInArray::ValueType;
using ValueType = typename ValuesInArray::ValueType;
vtkm::cont::Token token;
const vtkm::Id numValues = keysInArray.GetNumberOfValues();
auto keysInPortal = keysInArray.PrepareForInput(DeviceAdapterTagOpenMP(), token);
auto valuesInPortal = valuesInArray.PrepareForInput(DeviceAdapterTagOpenMP(), token);
auto keysIn = vtkm::cont::ArrayPortalToIteratorBegin(keysInPortal);
auto valuesIn = vtkm::cont::ArrayPortalToIteratorBegin(valuesInPortal);
auto keysOutPortal = keysOutArray.PrepareForOutput(numValues, DeviceAdapterTagOpenMP(), token);
auto valuesOutPortal =
valuesOutArray.PrepareForOutput(numValues, DeviceAdapterTagOpenMP(), token);
auto keysOut = vtkm::cont::ArrayPortalToIteratorBegin(keysOutPortal);
auto valuesOut = vtkm::cont::ArrayPortalToIteratorBegin(valuesOutPortal);
internal::WrappedBinaryOperator<ValueType, BinaryFunctor> f(functor);
vtkm::Id outIdx = 0;
VTKM_OPENMP_DIRECTIVE(parallel default(none) firstprivate(keysIn, valuesIn, keysOut, valuesOut, f)
shared(outIdx) VTKM_OPENMP_SHARED_CONST(numValues))
{
int tid = omp_get_thread_num();
vtkm::Id numThreads = 0;
vtkm::cont::RuntimeDeviceInformation{}
.GetRuntimeConfiguration(vtkm::cont::DeviceAdapterTagOpenMP())
.GetThreads(numThreads);
// Determine bounds for this thread's scan operation:
vtkm::Id chunkSize = (numValues + numThreads - 1) / numThreads;
vtkm::Id scanIdx = std::min(tid * chunkSize, numValues);
vtkm::Id scanEnd = std::min(scanIdx + chunkSize, numValues);
auto threadKeysBegin = keysOut + scanIdx;
auto threadValuesBegin = valuesOut + scanIdx;
auto threadKey = threadKeysBegin;
auto threadValue = threadValuesBegin;
// Reduce each thread's partition:
KeyType rangeKey;
ValueType rangeValue;
for (;;)
{
if (scanIdx < scanEnd)
{
rangeKey = keysIn[scanIdx];
rangeValue = valuesIn[scanIdx];
++scanIdx;
// Locate end of current range:
while (scanIdx < scanEnd && static_cast<KeyType>(keysIn[scanIdx]) == rangeKey)
{
rangeValue = f(rangeValue, valuesIn[scanIdx]);
++scanIdx;
}
*threadKey = rangeKey;
*threadValue = rangeValue;
++threadKey;
++threadValue;
}
else
{
break;
}
}
if (tid == 0)
{
outIdx = static_cast<vtkm::Id>(threadKey - threadKeysBegin);
}
// Combine the reduction results. Skip tid == 0, since it's already in
// the correct location:
for (int i = 1; i < numThreads; ++i)
{
// This barrier ensures that:
// 1) Threads remain synchronized through this final reduction loop.
// 2) The outIdx variable is initialized by thread 0.
// 3) All threads have reduced their partitions.
VTKM_OPENMP_DIRECTIVE(barrier)
if (tid == i)
{
// Check if the previous thread's last key matches our first:
if (outIdx > 0 && threadKeysBegin < threadKey && keysOut[outIdx - 1] == *threadKeysBegin)
{
valuesOut[outIdx - 1] = f(valuesOut[outIdx - 1], *threadValuesBegin);
++threadKeysBegin;
++threadValuesBegin;
}
// Copy reduced partition to final location (if needed)
if (threadKeysBegin < threadKey && threadKeysBegin != keysOut + outIdx)
{
std::copy(threadKeysBegin, threadKey, keysOut + outIdx);
std::copy(threadValuesBegin, threadValue, valuesOut + outIdx);
}
outIdx += static_cast<vtkm::Id>(threadKey - threadKeysBegin);
} // end tid == i
} // end combine reduction
} // end parallel
token.DetachFromAll();
keysOutArray.Allocate(outIdx, vtkm::CopyFlag::On);
valuesOutArray.Allocate(outIdx, vtkm::CopyFlag::On);
}
template <typename IterT, typename RawPredicateT>
struct UniqueHelper
{
using ValueType = typename std::iterator_traits<IterT>::value_type;
using PredicateT = internal::WrappedBinaryOperator<bool, RawPredicateT>;
struct Node
{
vtkm::Id2 InputRange{ -1, -1 };
vtkm::Id2 OutputRange{ -1, -1 };
// Pad the node out to the size of a cache line to prevent false sharing:
static constexpr size_t DataSize = 2 * sizeof(vtkm::Id2);
static constexpr size_t NumCacheLines = CeilDivide<size_t>(DataSize, VTKM_CACHE_LINE_SIZE);
static constexpr size_t PaddingSize = NumCacheLines * VTKM_CACHE_LINE_SIZE - DataSize;
unsigned char Padding[PaddingSize];
};
IterT Data;
vtkm::Id NumValues;
PredicateT Predicate;
vtkm::Id LeafSize;
std::vector<Node> Nodes;
size_t NextNode;
UniqueHelper(IterT iter, vtkm::Id numValues, RawPredicateT pred)
: Data(iter)
, NumValues(numValues)
, Predicate(pred)
, LeafSize(0)
, NextNode(0)
{
}
vtkm::Id Execute()
{
vtkm::Id outSize = 0;
VTKM_OPENMP_DIRECTIVE(parallel default(shared))
{
VTKM_OPENMP_DIRECTIVE(single)
{
this->Prepare();
// Kick off task-based divide-and-conquer uniquification:
Node* rootNode = this->AllocNode();
rootNode->InputRange = vtkm::Id2(0, this->NumValues);
this->Uniquify(rootNode);
outSize = rootNode->OutputRange[1] - rootNode->OutputRange[0];
}
}
return outSize;
}
private:
void Prepare()
{
// Figure out how many values each thread should handle:
vtkm::Id numThreads = 0;
vtkm::cont::RuntimeDeviceInformation{}
.GetRuntimeConfiguration(vtkm::cont::DeviceAdapterTagOpenMP())
.GetThreads(numThreads);
vtkm::Id chunksPerThread = 8;
vtkm::Id numChunks;
ComputeChunkSize(
this->NumValues, numThreads, chunksPerThread, sizeof(ValueType), numChunks, this->LeafSize);
// Compute an upper-bound of the number of nodes in the tree:
std::size_t numNodes = static_cast<std::size_t>(numChunks);
while (numChunks > 1)
{
numChunks = (numChunks + 1) / 2;
numNodes += static_cast<std::size_t>(numChunks);
}
this->Nodes.resize(numNodes);
this->NextNode = 0;
}
Node* AllocNode()
{
size_t nodeIdx;
// GCC emits a false positive "value computed but not used" for this block:
#pragma GCC diagnostic push
#pragma GCC diagnostic ignored "-Wunused-value"
VTKM_OPENMP_DIRECTIVE(atomic capture)
{
nodeIdx = this->NextNode;
++this->NextNode;
}
#pragma GCC diagnostic pop
VTKM_ASSERT(nodeIdx < this->Nodes.size());
return &this->Nodes[nodeIdx];
}
bool IsLeaf(const vtkm::Id2& range) { return (range[1] - range[0]) <= this->LeafSize; }
// Not an strict midpoint, but ensures that the first range will always be
// a multiple of the leaf size.
vtkm::Id ComputeMidpoint(const vtkm::Id2& range)
{
const vtkm::Id n = range[1] - range[0];
const vtkm::Id np = this->LeafSize;
return CeilDivide(n / 2, np) * np + range[0];
}
void Uniquify(Node* node)
{
if (!this->IsLeaf(node->InputRange))
{
vtkm::Id midpoint = this->ComputeMidpoint(node->InputRange);
Node* right = this->AllocNode();
Node* left = this->AllocNode();
right->InputRange = vtkm::Id2(midpoint, node->InputRange[1]);
// Intel compilers seem to have trouble following the 'this' pointer
// when launching tasks, resulting in a corrupt task environment.
// Explicitly copying the pointer into a local variable seems to fix this.
auto explicitThis = this;
VTKM_OPENMP_DIRECTIVE(taskgroup)
{
VTKM_OPENMP_DIRECTIVE(task) { explicitThis->Uniquify(right); }
left->InputRange = vtkm::Id2(node->InputRange[0], midpoint);
this->Uniquify(left);
} // end taskgroup. Both sides of the tree will be completed here.
// Combine the ranges in the left side:
if (this->Predicate(this->Data[left->OutputRange[1] - 1], this->Data[right->OutputRange[0]]))
{
++right->OutputRange[0];
}
vtkm::Id numVals = right->OutputRange[1] - right->OutputRange[0];
DoCopy(this->Data + right->OutputRange[0], this->Data + left->OutputRange[1], numVals);
node->OutputRange[0] = left->OutputRange[0];
node->OutputRange[1] = left->OutputRange[1] + numVals;
}
else
{
auto start = this->Data + node->InputRange[0];
auto end = this->Data + node->InputRange[1];
end = std::unique(start, end, this->Predicate);
node->OutputRange[0] = node->InputRange[0];
node->OutputRange[1] = node->InputRange[0] + static_cast<vtkm::Id>(end - start);
}
}
};
}
}
} // end namespace vtkm::cont::openmp
#endif // vtk_m_cont_openmp_internal_FunctorsOpenMP_h
|
NAS_IS.c | //---------------------------------------------------------------------
// program IS
//---------------------------------------------------------------------
#include <stdio.h>
#include <stdlib.h>
#include <math.h>
#include <time.h>
#include <sys/time.h>
#if !defined(CLASS_W) && !defined(CLASS_S) && !defined(CLASS_A) && !defined(CLASS_B) && !defined(CLASS_C) && !defined(CLASS_D) && !defined(CLASS_E)
# define CLASS_W
#endif
//----------
// Class S:
//----------
#ifdef CLASS_S
# define TOTAL_KEYS_LOG_2 16
# define MAX_KEY_LOG_2 11
# define NUM_BUCKETS_LOG_2 9
# define CLASS 'S'
#endif
//----------
// Class W:
//----------
#ifdef CLASS_W
# define TOTAL_KEYS_LOG_2 20
# define MAX_KEY_LOG_2 16
# define NUM_BUCKETS_LOG_2 10
# define CLASS 'W'
#endif
//----------
// Class A:
//----------
#ifdef CLASS_A
# define TOTAL_KEYS_LOG_2 23
# define MAX_KEY_LOG_2 19
# define NUM_BUCKETS_LOG_2 10
# define CLASS 'A'
#endif
//----------
// Class B:
//----------
#ifdef CLASS_B
# define TOTAL_KEYS_LOG_2 25
# define MAX_KEY_LOG_2 21
# define NUM_BUCKETS_LOG_2 10
# define CLASS 'B'
#endif
//----------
// Class C:
//----------
#ifdef CLASS_C
# define TOTAL_KEYS_LOG_2 27
# define MAX_KEY_LOG_2 23
# define NUM_BUCKETS_LOG_2 10
# define CLASS 'C'
#endif
//----------
// Class D:
//----------
#ifdef CLASS_D
# define TOTAL_KEYS_LOG_2 31
# define MAX_KEY_LOG_2 27
# define NUM_BUCKETS_LOG_2 10
# define CLASS 'D'
#endif
#if CLASS == 'D'
#define TOTAL_KEYS (1L << TOTAL_KEYS_LOG_2)
#else
#define TOTAL_KEYS (1 << TOTAL_KEYS_LOG_2)
#endif
#define MAX_KEY (1 << MAX_KEY_LOG_2)
#define NUM_BUCKETS (1 << NUM_BUCKETS_LOG_2)
#define NUM_KEYS TOTAL_KEYS
#define SIZE_OF_BUFFERS NUM_KEYS
#define MAX_ITERATIONS 10
#define TEST_ARRAY_SIZE 5
/*************************************/
/* Typedef: if necessary, change the */
/* size of int here by changing the */
/* int type to, say, long */
/*************************************/
#if CLASS == 'D'
typedef long INT_TYPE;
#else
typedef int INT_TYPE;
#endif
typedef struct
{
double real;
double imag;
} dcomplex;
#define min(x,y) ((x) < (y) ? (x) : (y))
#define max(x,y) ((x) > (y) ? (x) : (y))
/********************/
/* Some global info */
/********************/
INT_TYPE *key_buff_ptr_global; /* used by full_verify to get */
/* copies of rank info */
int passed_verification;
/************************************/
/* These are the three main arrays. */
/* See SIZE_OF_BUFFERS def above */
/************************************/
INT_TYPE key_array[SIZE_OF_BUFFERS],
key_buff1[MAX_KEY],
key_buff2[SIZE_OF_BUFFERS],
partial_verify_vals[TEST_ARRAY_SIZE];
#ifdef USE_BUCKETS
INT_TYPE bucket_size[NUM_BUCKETS],
bucket_ptrs[NUM_BUCKETS];
#endif
/**********************/
/* Partial verif info */
/**********************/
INT_TYPE test_index_array[TEST_ARRAY_SIZE],
test_rank_array[TEST_ARRAY_SIZE],
S_test_index_array[TEST_ARRAY_SIZE] =
{48427, 17148, 23627, 62548, 4431},
S_test_rank_array[TEST_ARRAY_SIZE] =
{0, 18, 346, 64917, 65463},
W_test_index_array[TEST_ARRAY_SIZE] =
{357773, 934767, 875723, 898999, 404505},
W_test_rank_array[TEST_ARRAY_SIZE] =
{1249, 11698, 1039987, 1043896, 1048018},
A_test_index_array[TEST_ARRAY_SIZE] =
{2112377, 662041, 5336171, 3642833, 4250760},
A_test_rank_array[TEST_ARRAY_SIZE] =
{104, 17523, 123928, 8288932, 8388264},
B_test_index_array[TEST_ARRAY_SIZE] =
{41869, 812306, 5102857, 18232239, 26860214},
B_test_rank_array[TEST_ARRAY_SIZE] =
{33422937, 10244, 59149, 33135281, 99},
C_test_index_array[TEST_ARRAY_SIZE] =
{44172927, 72999161, 74326391, 129606274, 21736814},
C_test_rank_array[TEST_ARRAY_SIZE] =
{61147, 882988, 266290, 133997595, 133525895},
D_test_index_array[TEST_ARRAY_SIZE] =
{1317351170, 995930646, 1157283250, 1503301535, 1453734525},
D_test_rank_array[TEST_ARRAY_SIZE] =
{1, 36538729, 1978098519, 2145192618, 2147425337};
/***********************/
/* function prototypes */
/***********************/
double randlc( double *X, double *A );
void full_verify( void );
void c_print_results( char *name, char class, int n1, int n2, int n3, int niter,
double t, double mops, char *optype, int passed_verification);
double start[64], elapsed[64];
double elapsed_time( void );
void timer_clear( int n );
void timer_start( int n );
void timer_stop( int n );
double timer_read( int n );
void wtime(double *t);
/*****************************************************************/
/************* R A N D L C ************/
/************* ************/
/************* portable random number generator ************/
/*****************************************************************/
double randlc( double *X, double *A )
{
int KS = 0;
double R23, R46, T23, T46;
double T1, T2, T3, T4;
double A1;
double A2;
double X1;
double X2;
double Z;
int i, j;
if (KS == 0)
{
R23 = 1.0;
R46 = 1.0;
T23 = 1.0;
T46 = 1.0;
for (i = 1; i <= 23; i++)
{
R23 = 0.50 * R23;
T23 = 2.0 * T23;
}
for (i = 1; i <= 46; i++)
{
R46 = 0.50 * R46;
T46 = 2.0 * T46;
}
KS = 1;
}
/* Break A into two parts such that A = 2^23 * A1 + A2 and set X = N. */
T1 = R23 * *A;
j = T1;
A1 = j;
A2 = *A - T23 * A1;
/* Break X into two parts such that X = 2^23 * X1 + X2, compute
Z = A1 * X2 + A2 * X1 (mod 2^23), and then
X = 2^23 * Z + A2 * X2 (mod 2^46). */
T1 = R23 * *X;
j = T1;
X1 = j;
X2 = *X - T23 * X1;
T1 = A1 * X2 + A2 * X1;
j = R23 * T1;
T2 = j;
Z = T1 - T23 * T2;
T3 = T23 * Z + A2 * X2;
j = R46 * T3;
T4 = j;
*X = T3 - T46 * T4;
return (R46 * *X);
}
/*****************************************************************/
/************* C R E A T E _ S E Q ************/
/*****************************************************************/
void create_seq( double seed, double a )
{
double x;
INT_TYPE i, k;
k = MAX_KEY / 4;
for (i = 0; i < NUM_KEYS; i++)
{
x = randlc(&seed, &a);
x += randlc(&seed, &a);
x += randlc(&seed, &a);
x += randlc(&seed, &a);
key_array[i] = k * x;
}
}
/*****************************************************************/
/************* F U L L _ V E R I F Y ************/
/*****************************************************************/
void full_verify( void )
{
INT_TYPE i, j;
/* Now, finally, sort the keys: */
#ifdef USE_BUCKETS
/* key_buff2[] already has the proper information, so do nothing */
#else
/* Copy keys into work array; keys in key_array will be reassigned. */
#pragma omp parallel for default(shared) private(i) firstprivate(key_array)
for ( i = 0; i < NUM_KEYS; i++ )
key_buff2[i] = key_array[i];
#endif
for ( i = 0; i < NUM_KEYS; i++ )
key_array[--key_buff_ptr_global[key_buff2[i]]] = key_buff2[i];
/* Confirm keys correctly sorted: count incorrectly sorted keys, if any */
j = 0;
#pragma omp parallel for default(shared) private(i) firstprivate(key_array) reduction(+ : j)
for ( i = 1; i < NUM_KEYS; i++ )
if ( key_array[i - 1] > key_array[i] )
j++;
if ( j != 0 )
{
printf( "Full_verify: number of keys out of sort: %ld\n",
(long)j );
}
else
passed_verification++;
}
/*****************************************************************/
/************* R A N K ****************/
/*****************************************************************/
void rank( int iteration )
{
INT_TYPE i, k;
INT_TYPE *key_buff_ptr, *key_buff_ptr2;
#ifdef USE_BUCKETS
int shift = MAX_KEY_LOG_2 - NUM_BUCKETS_LOG_2;
INT_TYPE key;
#endif
key_array[iteration] = iteration;
key_array[iteration + MAX_ITERATIONS] = MAX_KEY - iteration;
/* Determine where the partial verify test keys are, load into */
/* top of array bucket_size */
for ( i = 0; i < TEST_ARRAY_SIZE; i++ )
partial_verify_vals[i] = key_array[test_index_array[i]];
#ifdef USE_BUCKETS
/* Initialize */
for ( i = 0; i < NUM_BUCKETS; i++ )
bucket_size[i] = 0;
/* Determine the number of keys in each bucket */
for ( i = 0; i < NUM_KEYS; i++ )
bucket_size[key_array[i] >> shift]++;
/* Accumulative bucket sizes are the bucket pointers */
bucket_ptrs[0] = 0;
for ( i = 1; i < NUM_BUCKETS; i++ )
bucket_ptrs[i] = bucket_ptrs[i - 1] + bucket_size[i - 1];
/* Sort into appropriate bucket */
for ( i = 0; i < NUM_KEYS; i++ )
{
key = key_array[i];
key_buff2[bucket_ptrs[key >> shift]++] = key;
}
key_buff_ptr2 = key_buff2;
#else
key_buff_ptr2 = key_array;
#endif
/* Clear the work array */
#pragma omp parallel for default(shared) private(i)
for ( i = 0; i < MAX_KEY; i++ )
key_buff1[i] = 0;
/* Ranking of all keys occurs in this section: */
key_buff_ptr = key_buff1;
/* In this section, the keys themselves are used as their
own indexes to determine how many of each there are: their
individual population */
for ( i = 0; i < NUM_KEYS; i++ )
key_buff_ptr[key_buff_ptr2[i]]++; /* Now they have individual key */
/* population */
/* To obtain ranks of each key, successively add the individual key
population */
for ( i = 0; i < MAX_KEY - 1; i++ )
key_buff_ptr[i + 1] += key_buff_ptr[i];
/* This is the partial verify test section */
/* Observe that test_rank_array vals are */
/* shifted differently for different cases */
for ( i = 0; i < TEST_ARRAY_SIZE; i++ )
{
k = partial_verify_vals[i]; /* test vals were put here */
if ( 0 < k && k <= NUM_KEYS - 1 )
{
INT_TYPE key_rank = key_buff_ptr[k - 1];
int failed = 0;
switch ( CLASS )
{
case 'S':
if ( i <= 2 )
{
if ( key_rank != test_rank_array[i] + iteration )
failed = 1;
else
passed_verification++;
}
else
{
if ( key_rank != test_rank_array[i] - iteration )
failed = 1;
else
passed_verification++;
}
break;
case 'W':
if ( i < 2 )
{
if ( key_rank != test_rank_array[i] + (iteration - 2) )
failed = 1;
else
passed_verification++;
}
else
{
if ( key_rank != test_rank_array[i] - iteration )
failed = 1;
else
passed_verification++;
}
break;
case 'A':
if ( i <= 2 )
{
if ( key_rank != test_rank_array[i] + (iteration - 1) )
failed = 1;
else
passed_verification++;
}
else
{
if ( key_rank != test_rank_array[i] - (iteration - 1) )
failed = 1;
else
passed_verification++;
}
break;
case 'B':
if ( i == 1 || i == 2 || i == 4 )
{
if ( key_rank != test_rank_array[i] + iteration )
failed = 1;
else
passed_verification++;
}
else
{
if ( key_rank != test_rank_array[i] - iteration )
failed = 1;
else
passed_verification++;
}
break;
case 'C':
if ( i <= 2 )
{
if ( key_rank != test_rank_array[i] + iteration )
failed = 1;
else
passed_verification++;
}
else
{
if ( key_rank != test_rank_array[i] - iteration )
failed = 1;
else
passed_verification++;
}
break;
case 'D':
if ( i < 2 )
{
if ( key_rank != test_rank_array[i] + iteration )
failed = 1;
else
passed_verification++;
}
else
{
if ( key_rank != test_rank_array[i] - iteration )
failed = 1;
else
passed_verification++;
}
break;
}
if ( failed == 1 )
printf( "Failed partial verification: "
"iteration %d, test key %d\n",
iteration, (int)i );
}
}
/* Make copies of rank info for use by full_verify: these variables
in rank are local; making them global slows down the code, probably
since they cannot be made register by compiler */
if ( iteration == MAX_ITERATIONS )
key_buff_ptr_global = key_buff_ptr;
}
/*****************************************************************/
/************* M A I N ****************/
/*****************************************************************/
int main( int argc, char **argv )
{
int i, iteration;
double timecounter;
FILE *fp;
/* Initialize timers */
timer_clear( 0 );
/* Initialize the verification arrays if a valid class */
for ( i = 0; i < TEST_ARRAY_SIZE; i++ )
switch ( CLASS )
{
case 'S':
test_index_array[i] = S_test_index_array[i];
test_rank_array[i] = S_test_rank_array[i];
break;
case 'A':
test_index_array[i] = A_test_index_array[i];
test_rank_array[i] = A_test_rank_array[i];
break;
case 'W':
test_index_array[i] = W_test_index_array[i];
test_rank_array[i] = W_test_rank_array[i];
break;
case 'B':
test_index_array[i] = B_test_index_array[i];
test_rank_array[i] = B_test_rank_array[i];
break;
case 'C':
test_index_array[i] = C_test_index_array[i];
test_rank_array[i] = C_test_rank_array[i];
break;
case 'D':
test_index_array[i] = D_test_index_array[i];
test_rank_array[i] = D_test_rank_array[i];
break;
};
/* Printout initial NPB info */
printf
( "\n\n NAS Parallel Benchmarks (NPB3.3-SER) - IS Benchmark\n\n" );
printf( " Size: %ld (class %c)\n", (long)TOTAL_KEYS, CLASS );
printf( " Iterations: %d\n", MAX_ITERATIONS );
/* Generate random number sequence and subsequent keys on all procs */
create_seq( 314159265.00, /* Random number gen seed */
1220703125.00 ); /* Random number gen mult */
/* Do one interation for free (i.e., untimed) to guarantee initialization of
all data and code pages and respective tables */
rank( 1 );
/* Start verification counter */
passed_verification = 0;
if ( CLASS != 'S' ) printf( "\n iteration\n" );
/* Start timer */
timer_start( 0 );
/* This is the main iteration */
for ( iteration = 1; iteration <= MAX_ITERATIONS; iteration++ )
{
if ( CLASS != 'S' ) printf( " %d\n", iteration );
rank( iteration );
}
/* End of timing, obtain maximum time of all processors */
timer_stop( 0 );
timecounter = timer_read( 0 );
/* This tests that keys are in sequence: sorting of last ranked key seq
occurs here, but is an untimed operation */
full_verify();
/* The final printout */
if ( passed_verification != 5 * MAX_ITERATIONS + 1 )
passed_verification = 0;
c_print_results( "IS",
CLASS,
(int)(TOTAL_KEYS / 64),
64,
0,
MAX_ITERATIONS,
timecounter,
((double) (MAX_ITERATIONS * TOTAL_KEYS))
/ timecounter / 1000000.,
"keys ranked",
passed_verification);
int exitValue = passed_verification ? 0 : 1;
return exitValue;
}
/**************************/
/* E N D P R O G R A M */
/**************************/
void c_print_results( char *name,
char class,
int n1,
int n2,
int n3,
int niter,
double t,
double mops,
char *optype,
int passed_verification )
{
printf( "\n\n %s Benchmark Completed\n", name );
printf( " Class = %c\n", class );
if ( n3 == 0 )
{
long nn = n1;
if ( n2 != 0 ) nn *= n2;
printf( " Size = %12ld\n", nn ); /* as in IS */
}
else
printf( " Size = %4dx%4dx%4d\n", n1, n2, n3 );
printf( " Iterations = %12d\n", niter );
printf( " Time in seconds = %12.2f\n", t );
printf( " Mop/s total = %12.2f\n", mops );
printf( " Operation type = %24s\n", optype);
if ( passed_verification < 0 )
printf( " Verification = NOT PERFORMED\n" );
else if ( passed_verification )
printf( " Verification = SUCCESSFUL\n" );
else
printf( " Verification = UNSUCCESSFUL\n" );
#ifdef SMP
evalue = getenv("MP_SET_NUMTHREADS");
printf( " MULTICPUS = %s\n", evalue );
#endif
}
void wtime(double *t)
{
static int sec = -1;
struct timeval tv;
gettimeofday(&tv, (void *)0);
if (sec < 0) sec = tv.tv_sec;
*t = (tv.tv_sec - sec) + 1.0e-6 * tv.tv_usec;
}
/*****************************************************************/
/****** E L A P S E D _ T I M E ******/
/*****************************************************************/
double elapsed_time( void )
{
double t;
wtime( &t );
return ( t );
}
/*****************************************************************/
/****** T I M E R _ C L E A R ******/
/*****************************************************************/
void timer_clear( int n )
{
elapsed[n] = 0.0;
}
/*****************************************************************/
/****** T I M E R _ S T A R T ******/
/*****************************************************************/
void timer_start( int n )
{
start[n] = elapsed_time();
}
/*****************************************************************/
/****** T I M E R _ S T O P ******/
/*****************************************************************/
void timer_stop( int n )
{
double t, now;
now = elapsed_time();
t = now - start[n];
elapsed[n] += t;
}
/*****************************************************************/
/****** T I M E R _ R E A D ******/
/*****************************************************************/
double timer_read( int n )
{
return ( elapsed[n] );
}
|
decomp.h | /*!
* Software SPAMS v2.2 - Copyright 2009-2011 Julien Mairal
*
* This file is part of SPAMS.
*
* SPAMS is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* SPAMS is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with SPAMS. If not, see <http://www.gnu.org/licenses/>.
*
*
* \file
* toolbox decomp
*
* by Julien Mairal
* julien.mairal@inria.fr
*
* File decomp.h
* \brief Contains sparse decomposition algorithms
* It requires the toolbox linalg */
#ifndef DECOMP_H
#define DECOMP_H
#include <utils.h>
static char low='l';
static char nonUnit='n';
/* **************************
* Greedy Forward Selection
* **************************/
/// Forward Selection (or Orthogonal matching pursuit)
/// Address the problem of:
/// \forall i, \min_{\alpha_i} ||X_i-D\alpha_i||_2^2
/// s.t. ||\alphai||_0 <= L or
/// \forall i, \min_{\alpha_i} ||\alpha_i||_0
/// s.t. ||\X_i-D\alpha_i||_2^2 <= epsilon
/// This function is
/// * based on Cholesky decompositions
/// * parallel
/// * optimized for a large number of signals (precompute the Gramm matrix
template <typename T>
void omp(const Matrix<T>& X, const Matrix<T>& D, SpMatrix<T>& spalpha,
const int *L, const T* eps, const T* lambda, const bool vecL = false,
const bool vecEps = false, const bool Lambda=false, const int numThreads=-1,
Matrix<T>* path = NULL);
template <typename T>
void omp_mask(const Matrix<T>& X, const Matrix<T>& D, SpMatrix<T>& spalpha, const Matrix<bool>& mask,
const int *L, const T* eps, const T* lambda, const bool vecL = false,
const bool vecEps = false, const bool Lambda=false, const int numThreads=-1,
Matrix<T>* path = NULL);
/// Auxiliary function of omp
template <typename T>
void coreORMP(Vector<T>& scores, Vector<T>& norm, Vector<T>& tmp,
Matrix<T>& Un, Matrix<T>& Undn, Matrix<T>& Unds, Matrix<T>& Gs,
Vector<T>& Rdn, const AbstractMatrix<T>& G, Vector<INTM>& ind,
Vector<T>& RUn, T& normX, const T* eps, const int* L, const T* lambda,
T* path = NULL);
/// Auxiliary function of omp
template <typename T>
void coreORMPB(Vector<T>& RtD, const AbstractMatrix<T>& G, Vector<INTM>& ind,
Vector<T>& coeffs, T& normX, const int L, const T eps, const T lambda = 0);
/// Auxiliary function of omp
/*template <typename T>
void coreORMPWeighted(Vector<T>& scores, Vector<T>& weights, Vector<T>& norm,
Vector<T>& tmp, Matrix<T>& Un, Matrix<T>& Undn, Matrix<T>& Unds,
Matrix<T>& Gs, Vector<T>& Rdn, const AbstractMatrix<T>& G, Vector<INTM>&
ind, Vector<T>& RUn, T& normX, const T eps, const int L, const T lambda);*/
/* **************
* LARS - Lasso
* **************/
/// Defines different types of problem,
/// - constraint on the l1 norm of the coefficients
/// - constraint on the reconstruction error
/// - l1-sparsity penalty
enum constraint_type { L1COEFFS, L2ERROR, PENALTY, SPARSITY, L2ERROR2, PENALTY2,FISTAMODE};
/// Implementation of LARS-Lasso for solving
/// \forall i, \min_{\alpha_i} ||X_i-D\alpha_i||_2^2
/// s.t. ||\alphai||_1 <= constraint or
/// \forall i, \min_{\alpha_i} ||\alpha_i||_1
/// s.t. ||\X_i-D\alpha_i||_2^2 <= constraint or
/// \forall i, \min_{\alpha_i} constraint*||\alpha_i||_1 + ...
/// ... ||\X_i-D\alpha_i||_2^2 <= T
/// Optionally, the solution might be positive (boolean pos), and a
/// Least-Square can be solved as a post-processing step.
/// L is a maximum number of coefficients.
/// This function is
/// * efficient (Cholesky-based)
/// * parallel
/// * optimized for a big number of signals (precompute the Gramm matrix
template <typename T>
void lasso(const Matrix<T>& X, const Matrix<T>& D,
SpMatrix<T>& spalpha,
int L, const T constraint, const T lambda2 = 0, constraint_type mode = PENALTY,
const bool pos = false, const bool ols = false, const int numThreads=-1,
Matrix<T>* path = NULL, const int length_path=-1);
template <typename T>
void lasso(const Data<T>& X, const AbstractMatrix<T>& G, const AbstractMatrix<T>& DtX,
SpMatrix<T>& spalpha,
int L, const T constraint, constraint_type mode = PENALTY,
const bool pos = false, const bool ols = false, const int numThreads=-1,
Matrix<T>* path = NULL, const int length_path=-1);
/// second implementation using matrix inversion lemma
template <typename T>
void lasso2(const Matrix<T>& X, const Matrix<T>& D, SpMatrix<T>& spalpha,
int L, const T constraint,const T lambda2=0, constraint_type mode = PENALTY, const bool pos = false,
const int numThreads = -1, Matrix<T>* path = NULL, const int length_path=-1);
template <typename T>
void lasso2(const Data<T>& X, const AbstractMatrix<T>& G, const AbstractMatrix<T>& DtX,
SpMatrix<T>& spalpha,
int L, const T constraint, constraint_type mode = PENALTY, const bool pos = false,
const int numThreads = -1, Matrix<T>* path = NULL, const int length_path=-1);
/// second implementation using matrix inversion lemma
template <typename T>
void lasso_mask(const Matrix<T>& X, const Matrix<T>& D, SpMatrix<T>& spalpha, const Matrix<bool>& mask,
int L, const T constraint,const T lambda2=0, constraint_type mode = PENALTY, const bool pos = false,
const int numThreads = -1);
/// second implementation using matrix inversion lemma
template <typename T>
void lassoReweighted(const Matrix<T>& X, const Matrix<T>& D, SpMatrix<T>& spalpha,
int L, const T constraint, constraint_type mode, const bool pos,
const T sigma,
const int numThreads = -1);
/// Auxiliary function for lasso
template <typename T>
void coreLARS(Vector<T>& Rdn, Vector<T>& Xdn, Vector<T>& A,
Vector<T>& u, Vector<T>& sig,
Vector<T>& av, Vector<T>& RUn, Matrix<T>& Un,
Matrix<T>& Unds, Matrix<T>& Gs,
Matrix<T>& Gsa, Matrix<T>& workT, Matrix<T>& R,
const AbstractMatrix<T>& G,T& normX,
Vector<int>& ind,Vector<T>& coeffs,const T constraint,
const bool ols = false,
const bool pos =false,
constraint_type mode = L1COEFFS,
T* path = NULL, int length_path=-1);
template <typename T>
void coreLARS2(Vector<T>& DtR, const AbstractMatrix<T>& G,
Matrix<T>& Gs,
Matrix<T>& Ga,
Matrix<T>& invGs,
Vector<T>& u,
Vector<T>& coeffs,
Vector<INTM>& ind,
Matrix<T>& work,
T& normX,
const constraint_type mode,
const T constraint, const bool pos = false,
T* pr_path = NULL, int length_path = -1);
template <typename T>
void coreLARS2(Vector<T>& DtR, const AbstractMatrix<T>& G,
Vector<T>& coeffs, T normX,
const constraint_type mode,
const T constraint, const bool pos = false);
template <typename T>
void coreLARS2W(Vector<T>& DtR, const AbstractMatrix<T>& G,
Matrix<T>& Gs,
Matrix<T>& Ga,
Matrix<T>& invGs,
Vector<T>& u,
Vector<T>& coeffs,
const Vector<T>& weights,
Vector<INTM>& ind,
Matrix<T>& work,
T& normX,
const constraint_type mode,
const T constraint, const bool pos = false);
template <typename T>
void coreLARS2W(Vector<T>& DtR, const AbstractMatrix<T>& G,
Vector<T>& coeffs, const Vector<T>& weights, T normX,
const constraint_type mode,
const T constraint, const bool pos = false);
/// Auxiliary functoni for coreLARS (Cholesky downdate)
template <typename T>
void downDateLasso(int& j,int& minBasis,T& normX,const bool ols,
const bool pos, Vector<T>& Rdn, INTM* ind,
T* coeffs, Vector<T>& sig, Vector<T>& av,
Vector<T>& Xdn, Vector<T>& RUn,Matrix<T>& Unm, Matrix<T>& Gsm,
Matrix<T>& Gsam, Matrix<T>& Undsm, Matrix<T>& Rm);
/* ************************
* Iterative thresholding
* ************************/
/// Implementation of IST for solving
/// \forall i, \min_{\alpha_i} ||\alpha_i||_1
/// s.t. ||\X_i-D\alpha_i||_2^2 <= constraint or
/// \forall i, \min_{\alpha_i} constraint*||\alpha_i||_1 + ...
/// ... ||\X_i-D\alpha_i||_2^2 <= T
template <typename T>
void ist(const Matrix<T>& X, const Matrix<T>& D,
SpMatrix<T>& spalpha, T lambda, constraint_type mode,
const int itermax=500,
const T tol = 0.5, const int numThreads = -1);
template <typename T>
void ist(const Matrix<T>& X, const Matrix<T>& D,
Matrix<T>& spalpha, T lambda, constraint_type mode,
const int itermax=500,
const T tol = 0.5, const int numThreads=-1);
/// coreIST
template <typename T>
void coreIST(const AbstractMatrix<T>& G, Vector<T>& DtR, Vector<T>& coeffs,
const T thrs, const int itermax = 500,
const T tol = 0.5);
template <typename T>
void coreISTW(const AbstractMatrix<T>& G, Vector<T>& DtR, Vector<T>& coeffs, const Vector<T>& weights,
const T thrs, const int itermax = 500,
const T tol = 0.5);
/// coreIST constrained
template <typename T>
void coreISTconstrained(const AbstractMatrix<T>& G, Vector<T>& DtR, Vector<T>& coeffs,
const T normX2,
const T thrs, const int itermax = 500,
const T tol = 0.5);
/// ist for group Lasso
template <typename T>
void ist_groupLasso(const Matrix<T>* XT, const Matrix<T>& D,
Matrix<T>* alphaT, const int Ngroups,
const T lambda, const constraint_type mode,
const int itermax = 500,
const T tol = 0.5, const int numThreads = -1);
/// Auxiliary function for ist_groupLasso
template <typename T>
void coreGroupIST(const Matrix<T>& G, Matrix<T>& RtD,
Matrix<T>& alphat,
const T thrs,
const int itermax=500,
const T tol = 0.5);
/// Auxiliary function for ist_groupLasso
template <typename T>
void coreGroupISTConstrained(const Matrix<T>& G, Matrix<T>& RtD,
Matrix<T>& alphat, const T normR,
const T eps,
const int itermax=500,
const T tol = 0.5);
/// auxiliary function for ist_groupLasso
template <typename T>
T computeError(const T normX2,const Vector<T>& norms,
const Matrix<T>& G,const Matrix<T>& RtD,const Matrix<T>& alphat);
/// auxiliary function for ist_groupLasso
template <typename T>
T computeError(const T normX2,
const Matrix<T>& G,const Vector<T>& DtR,const Vector<T>& coeffs,
SpVector<T>& coeffs_tmp);
/* ******************
* Simultaneous OMP
* *****************/
template <typename T>
void somp(const Matrix<T>* X, const Matrix<T>& D, SpMatrix<T>* spalpha,
const int Ngroups, const int L, const T* pr_eps, const bool adapt=false,
const int numThreads=-1);
template <typename T>
void somp(const Matrix<T>* X, const Matrix<T>& D, SpMatrix<T>* spalpha,
const int Ngroups, const int L, const T eps, const int numThreads=-1);
template <typename T>
void coreSOMP(const Matrix<T>& X, const Matrix<T>& D, const Matrix<T>& G,
Matrix<T>& vM,
Vector<INTM>& rv, const int L, const T eps);
/* *********************
* Implementation of OMP
* *********************/
/// Forward Selection (or Orthogonal matching pursuit)
/// Address the problem of:
/// \forall i, \min_{\alpha_i} ||X_i-D\alpha_i||_2^2
/// s.t. ||\alphai||_0 <= L or
/// \forall i, \min_{\alpha_i} ||\alpha_i||_0
/// s.t. ||\X_i-D\alpha_i||_2^2 <= epsilon
/// This function is
/// * efficient (Cholesky-based)
/// * parallel
/// * optimized for a big number of signals (precompute the Gramm matrix
template <typename T>
void omp(const Matrix<T>& X, const Matrix<T>& D, SpMatrix<T>& spalpha,
const int* pL, const T* peps, const T* pLambda,
const bool vecL, const bool vecEps,
const bool vecLambda, const int numThreads, Matrix<T>* path) {
int L;
if (!vecL) {
L=*pL;
} else {
Vector<int> vL(const_cast<int*>(pL),X.n());
L=vL.maxval();
}
spalpha.clear();
if (L <= 0) return;
const INTM M = X.n();
const INTM K = D.n();
L = MIN(X.m(),MIN(L,K));
Matrix<T> vM(L,M);
Matrix<INTM> rM(L,M);
ProdMatrix<T> G(D, K < 25000 && M > 10);
int NUM_THREADS=init_omp(numThreads);
Vector<T>* scoresT=new Vector<T>[NUM_THREADS];
Vector<T>* normT=new Vector<T>[NUM_THREADS];
Vector<T>* tmpT=new Vector<T>[NUM_THREADS];
Vector<T>* RdnT=new Vector<T>[NUM_THREADS];
Matrix<T>* UnT=new Matrix<T>[NUM_THREADS];
Matrix<T>* UndnT=new Matrix<T>[NUM_THREADS];
Matrix<T>* UndsT=new Matrix<T>[NUM_THREADS];
Matrix<T>* GsT=new Matrix<T>[NUM_THREADS];
for (int i = 0; i<NUM_THREADS; ++i) {
scoresT[i].resize(K);
normT[i].resize(K);
tmpT[i].resize(K);
RdnT[i].resize(K);
UnT[i].resize(L,L);
UnT[i].setZeros();
UndnT[i].resize(K,L);
UndsT[i].resize(L,L);
GsT[i].resize(K,L);
}
int i;
#pragma omp parallel for private(i)
for (i = 0; i< M; ++i) {
#ifdef _OPENMP
int numT=omp_get_thread_num();
#else
int numT=0;
#endif
Vector<T> Xi;
X.refCol(i,Xi);
T normX = Xi.nrm2sq();
Vector<INTM> ind;
rM.refCol(i,ind);
ind.set(-1);
Vector<T> RUn;
vM.refCol(i,RUn);
Vector<T>& Rdn=RdnT[numT];
D.multTrans(Xi,Rdn);
coreORMP(scoresT[numT],normT[numT],tmpT[numT],UnT[numT],UndnT[numT],UndsT[numT],
GsT[numT],Rdn,G,ind,RUn, normX, vecEps ? peps+i : peps,
vecL ? pL+i : pL, vecLambda ? pLambda+i : pLambda,
path && i==0 ? path->rawX() : NULL);
}
delete[](scoresT);
delete[](normT);
delete[](tmpT);
delete[](RdnT);
delete[](UnT);
delete[](UndnT);
delete[](UndsT);
delete[](GsT);
/// convert the sparse matrix into a proper format
spalpha.convert(vM,rM,K);
};
template <typename T>
void omp_mask(const Matrix<T>& X, const Matrix<T>& D, SpMatrix<T>& spalpha, const Matrix<bool>& mask,
const int *pL, const T* peps, const T* pLambda, const bool vecL,
const bool vecEps, const bool vecLambda, const int numThreads,
Matrix<T>* path) {
int L;
if (!vecL) {
L=*pL;
} else {
Vector<int> vL(const_cast<int*>(pL),X.n());
L=vL.maxval();
}
spalpha.clear();
if (L <= 0) return;
const int M = X.n();
const int K = D.n();
L = MIN(X.m(),MIN(L,K));
Matrix<T> vM(L,M);
Matrix<INTM> rM(L,M);
ProdMatrix<T> G(D, K < 25000 && M > 10);
int NUM_THREADS=init_omp(numThreads);
Vector<T>* scoresT=new Vector<T>[NUM_THREADS];
Vector<T>* normT=new Vector<T>[NUM_THREADS];
Vector<T>* tmpT=new Vector<T>[NUM_THREADS];
Vector<T>* RdnT=new Vector<T>[NUM_THREADS];
Matrix<T>* UnT=new Matrix<T>[NUM_THREADS];
Matrix<T>* UndnT=new Matrix<T>[NUM_THREADS];
Matrix<T>* UndsT=new Matrix<T>[NUM_THREADS];
Matrix<T>* GsT=new Matrix<T>[NUM_THREADS];
ProdMatrix<T>* GT=new ProdMatrix<T>[NUM_THREADS];
Matrix<T>* DmaskT=new Matrix<T>[NUM_THREADS];
Vector<T>* XmaskT=new Vector<T>[NUM_THREADS];
for (int i = 0; i<NUM_THREADS; ++i) {
DmaskT[i].resize(D.m(),D.n());
XmaskT[i].resize(X.m());
scoresT[i].resize(K);
normT[i].resize(K);
tmpT[i].resize(K);
RdnT[i].resize(K);
UnT[i].resize(L,L);
UnT[i].setZeros();
UndnT[i].resize(K,L);
UndsT[i].resize(L,L);
GsT[i].resize(K,L);
}
int i;
#pragma omp parallel for private(i)
for (i = 0; i< M; ++i) {
#ifdef _OPENMP
int numT=omp_get_thread_num();
#else
int numT=0;
#endif
Vector<T> Xi;
X.refCol(i,Xi);
Vector<INTM> ind;
rM.refCol(i,ind);
ind.set(-1);
Vector<T> RUn;
vM.refCol(i,RUn);
Vector<bool> maski;
mask.refCol(i,maski);
Vector<T>& Rdn=RdnT[numT];
if (maski.allfalse()) continue;
if (maski.alltrue()) {
D.multTrans(Xi,Rdn);
T normX = Xi.nrm2sq();
coreORMP(scoresT[numT],normT[numT],tmpT[numT],UnT[numT],UndnT[numT],UndsT[numT],
GsT[numT],Rdn,G,ind,RUn, normX, vecEps ? peps+i : peps,
vecL ? pL+i : pL, vecLambda ? pLambda+i : pLambda,
path && i==0 ? path->rawX() : NULL);
} else {
D.copyMask(DmaskT[numT],maski);
Xi.copyMask(XmaskT[numT],maski);
T normX = XmaskT[numT].nrm2sq();
DmaskT[numT].multTrans(XmaskT[numT],Rdn);
GT[numT].setMatrices(DmaskT[numT],false);
GT[numT].addDiag(T(1e-10));
T eps_mask= (vecEps ? *(peps+i) : *peps)*XmaskT[numT].n()/Xi.n();
coreORMP(scoresT[numT],normT[numT],tmpT[numT],
UnT[numT],UndnT[numT],UndsT[numT],
GsT[numT],Rdn,GT[numT],ind,RUn,
normX, &eps_mask, vecL ? pL+i : pL,
vecLambda ? pLambda+i : pLambda,
path && i==0 ? path->rawX() : NULL);
DmaskT[numT].setm(D.m());
DmaskT[numT].setn(D.n());
XmaskT[numT].setn(X.m());
}
}
delete[](GT);
delete[](XmaskT);
delete[](DmaskT);
delete[](scoresT);
delete[](normT);
delete[](tmpT);
delete[](RdnT);
delete[](UnT);
delete[](UndnT);
delete[](UndsT);
delete[](GsT);
/// convert the sparse matrix into a proper format
spalpha.convert(vM,rM,K);
};
/// Auxiliary function of omp
template <typename T>
void coreORMPB(Vector<T>& RtD, const AbstractMatrix<T>& G, Vector<INTM>& ind,
Vector<T>& coeffs, T& normX, const int L, const T eps, const T lambda) {
const int K = G.n();
Vector<T> scores(K);
Vector<T> norm(K);
Vector<T> tmp(K);
Matrix<T> Un(L,L);
Matrix<T> Undn(K,L);
Matrix<T> Unds(L,L);
Matrix<T> Gs(K,L);
ind.set(-1);
coreORMP(scores,norm,tmp,Un,Undn,Unds,Gs,RtD,G,ind,coeffs,normX,&eps,&L,&lambda);
};
/// Auxiliary function of omp
template <typename T>
void coreORMP(Vector<T>& scores, Vector<T>& norm, Vector<T>& tmp, Matrix<T>& Un,
Matrix<T>& Undn, Matrix<T>& Unds, Matrix<T>& Gs, Vector<T>& Rdn,
const AbstractMatrix<T>& G,
Vector<INTM>& ind, Vector<T>& RUn,
T& normX, const T* peps, const int* pL, const T* plambda,
T* path) {
const T eps = abs<T>(*peps);
const int L = MIN(*pL,Gs.n());
const T lambda=*plambda;
if ((normX <= eps) || L == 0) return;
const int K = scores.n();
scores.copy(Rdn);
norm.set(T(1.0));
Un.setZeros();
// permit unsafe low level access
T* const prUn = Un.rawX();
//T* const prUnds = Unds.rawX();
T* const prUndn = Undn.rawX();
T* const prGs = Gs.rawX();
T* const prRUn= RUn.rawX();
if (path)
memset(path,0,K*L*sizeof(T));
int j;
for (j = 0; j<L; ++j) {
const int currentInd=scores.fmax();
if (norm[currentInd] < 1e-8) {
ind[j]=-1;
break;
}
const T invNorm=T(1.0)/sqrt(norm[currentInd]);
const T RU=Rdn[currentInd]*invNorm;
const T delta = RU*RU;
if (delta < 2*lambda) {
break;
}
RUn[j]=RU;
normX -= delta;
ind[j]=currentInd;
//for (int k = 0; k<j; ++k) prUn[j*L+k]=0.0;
//prUn[j*L+j]=T(1.0);
// for (int k = 0; k<j; ++k) prUnds[k*L+j]=prUndn[k*K+currentInd];
// MGS algorithm, Update Un
// int iter = norm[currentInd] < 0.5 ? 2 : 1;
//int iter=1;
// for (int k = 0; k<iter; ++k) {
/// for (int l = 0; l<j; ++l) {
// T scal=-cblas_dot<T>(j+1-l,prUn+j*L+l,1,prUnds+l*L+l,1);
// T scal = -prUnds[l*L+j];
// cblas_axpy<T>(l+1,scal,prUn+l*L,1,prUn+j*L,1);
// }
// }
prUn[j*L+j]=-T(1.0);
cblas_copy<T>(j,prUndn+currentInd,K,prUn+j*L,1);
cblas_trmv<T>(CblasColMajor,CblasUpper,CblasNoTrans,CblasNonUnit,j,prUn,L,prUn+j*L,1);
cblas_scal<T>(j+1,-invNorm,prUn+j*L,1);
if (j == L-1 || (normX <= eps)) {
++j;
break;
}
if (path) {
T* last_path=path+(L-1)*K;
cblas_copy<T>(j+1,prRUn,1,last_path,1);
cblas_trmv<T>(CblasColMajor,CblasUpper,CblasNoTrans,CblasNonUnit,
j+1,prUn,L,last_path,1);
for (int k = 0; k<=j; ++k) {
path[j*K+ind[k]]=last_path[k];
}
}
// update the variables Gs, Undn, Unds, Rdn, norm, scores
Vector<T> Gsj;
Gs.refCol(j,Gsj);
G.copyCol(currentInd,Gsj);
cblas_gemv<T>(CblasColMajor,CblasNoTrans,K,j+1,T(1.0),prGs,K,prUn+j*L,1,
T(0.0),prUndn+j*K,1);
// prUnds[j*L+j] = prUndn[j*K+currentInd];
Vector<T> Undnj;
Undn.refCol(j,Undnj);
Rdn.add(Undnj,-RUn[j]);
tmp.sqr(Undnj);
norm.sub(tmp);
scores.sqr(Rdn);
scores.div(norm);
for (int k = 0; k<=j; ++k) scores[ind[k]]=T();
}
// compute the final coefficients
cblas_trmv<T>(CblasColMajor,CblasUpper,CblasNoTrans,CblasNonUnit,
j,prUn,L,prRUn,1);
if (path) {
memset(path+(L-1)*K,0,L*sizeof(T));
for (int k = 0; k<j; ++k) {
path[(j-1)*K+ind[k]]=prRUn[k];
}
}
};
/// Auxiliary function of omp
/*template <typename T>
void coreORMPWeighted(Vector<T>& scores, Vector<T>& weights, Vector<T>& norm, Vector<T>& tmp, Matrix<T>& Un,
Matrix<T>& Undn, Matrix<T>& Unds, Matrix<T>& Gs, Vector<T>& Rdn,
const AbstractMatrix<T>& G,
Vector<INTM>& ind, Vector<T>& RUn,
T& normX, const T peps, const int pL, const T plambda) {
const T eps = abs<T>(*peps);
const int L = MIN(*pL,Gs.n());
const T lambda=*plambda;
if ((normX <= eps) || L == 0) return;
const int K = scores.n();
scores.copy(Rdn);
scores.div(weights);
norm.set(T(1.0));
Un.setZeros();
// permit unsafe low level access
T* const prUn = Un.rawX();
T* const prUnds = Unds.rawX();
T* const prUndn = Undn.rawX();
T* const prGs = Gs.rawX();
T* const prRUn= RUn.rawX();
int j;
for (j = 0; j<L; ++j) {
const int currentInd=scores.fmax();
if (norm[currentInd] < 1e-8) {
ind[j]=-1;
break;
}
const T invNorm=T(1.0)/sqrt(norm[currentInd]);
const T RU=Rdn[currentInd]*invNorm;
const T delta = RU*RU;
if (delta < 2*lambda) {
break;
}
RUn[j]=RU;
normX -= delta;
ind[j]=currentInd;
prUn[j*L+j]=-T(1.0);
cblas_copy<T>(j,prUndn+currentInd,K,prUn+j*L,1);
cblas_trmv<T>(CblasColMajor,CblasUpper,CblasNoTrans,CblasNonUnit,j,prUn,L,prUn+j*L,1);
cblas_scal<T>(j+1,-invNorm,prUn+j*L,1);
if (j == L-1 || (normX <= eps)) {
++j;
break;
}
// update the variables Gs, Undn, Unds, Rdn, norm, scores
Vector<T> Gsj;
Gs.refCol(j,Gsj);
G.copyCol(currentInd,Gsj);
cblas_gemv<T>(CblasColMajor,CblasNoTrans,K,j+1,T(1.0),prGs,K,prUn+j*L,1,
T(0.0),prUndn+j*K,1);
Vector<T> Undnj;
Undn.refCol(j,Undnj);
Rdn.add(Undnj,-RUn[j]);
tmp.sqr(Undnj);
norm.sub(tmp);
scores.sqr(Rdn);
scores.div(norm);
scores.div(weights);
for (int k = 0; k<=j; ++k) scores[ind[k]]=T();
}
// compute the final coefficients
cblas_trmv<T>(CblasColMajor,CblasUpper,CblasNoTrans,CblasNonUnit,
j,prUn,L,prRUn,1);
};*/
/* **************
* LARS - Lasso
* **************/
/// Implementation of LARS-Lasso for solving
/// \forall i, \min_{\alpha_i} ||X_i-D\alpha_i||_2^2
/// s.t. ||\alphai||_1 <= constraint or
/// \forall i, \min_{\alpha_i} ||\alpha_i||_1
/// s.t. ||\X_i-D\alpha_i||_2^2 <= constraint or
/// \forall i, \min_{\alpha_i} constraint*||\alpha_i||_1 + ...
/// ... ||\X_i-D\alpha_i||_2^2 <= T
/// Optionally, the solution might be positive (boolean pos), and a
/// Least-Square can be solved as a post-processing step.
/// L is a maximum number of coefficients.
/// This function is
/// * efficient (Cholesky-based)
/// * parallel
/// * optimized for a big number of signals (precompute the Gramm matrix
template <typename T>
void lasso(const Matrix<T>& X, const Matrix<T>& D, SpMatrix<T>& spalpha,
int L, const T lambda, const T lambda2, constraint_type mode,
const bool pos, const bool ols, const int numThreads,
Matrix<T>* path, const int length_path) {
ProdMatrix<T> G(D, X.n() > 10 && D.n() < 50000);
G.addDiag(MAX(lambda2,1e-10));
ProdMatrix<T> DtX(D,X,false);
lasso(X,G,DtX,spalpha,L,lambda,mode,pos,ols,numThreads,path,length_path);
}
template <typename T>
void lasso(const Data<T>& X, const AbstractMatrix<T>& G,
const AbstractMatrix<T>& DtX, SpMatrix<T>& spalpha,
int L, const T lambda, constraint_type mode,
const bool pos, const bool ols, const int numThreads,
Matrix<T>* path, const int length_path) {
spalpha.clear();
const INTM M = X.n();
const INTM K = G.n();
Matrix<T> vM;
Matrix<INTM> rM;
vM.resize(L,M);
rM.resize(L,M);
if (L <= 0) return;
if (path) path->setZeros();
int NUM_THREADS=init_omp(numThreads);
//ProdMatrix<T> G(D, K < 25000 && M > 10);
Vector<T>* RdnT=new Vector<T>[NUM_THREADS];
Vector<T>* XdnT =new Vector<T>[NUM_THREADS];
Vector<T>* AT=new Vector<T>[NUM_THREADS];
Vector<T>* uT=new Vector<T>[NUM_THREADS];
Vector<T>* sigT=new Vector<T>[NUM_THREADS];
Vector<T>* avT=new Vector<T>[NUM_THREADS];
Vector<T>* RUnT = new Vector<T>[NUM_THREADS];
Matrix<T>* UnT=new Matrix<T>[NUM_THREADS];
Matrix<T>* RT=new Matrix<T>[NUM_THREADS];
Matrix<T>* UndsT=new Matrix<T>[NUM_THREADS];
Matrix<T>* GsT=new Matrix<T>[NUM_THREADS];
Matrix<T>* GsaT=new Matrix<T>[NUM_THREADS];
Matrix<T>* workT=new Matrix<T>[NUM_THREADS];
for (int i = 0; i<NUM_THREADS; ++i) {
RdnT[i].resize(K);
if (ols) XdnT[i].resize(K);
AT[i].resize(K);
uT[i].resize(L);
sigT[i].resize(L);
avT[i].resize(L);
if (ols) RUnT[i].resize(L);
UnT[i].resize(L,L);
UnT[i].setZeros();
UndsT[i].resize(L,L);
UndsT[i].setZeros();
GsT[i].resize(K,L);
GsaT[i].resize(L,L);
workT[i].resize(K,2);
RT[i].resize(L,L);
}
Vector<T> norms;
X.norm_2sq_cols(norms);
int i;
#pragma omp parallel for private(i)
for (i = 0; i< M; ++i) {
#ifdef _OPENMP
int numT=omp_get_thread_num();
#else
int numT=0;
#endif
T normX = norms[i];
Vector<INTM> ind;
rM.refCol(i,ind);
Vector<T> coeffs;
vM.refCol(i,coeffs);
coeffs.setZeros();
Vector<T>& Rdn=RdnT[numT];
DtX.copyCol(i,Rdn);
coreLARS(Rdn,XdnT[numT], AT[numT], uT[numT], sigT[numT], avT[numT],
RUnT[numT], UnT[numT], UndsT[numT], GsT[numT], GsaT[numT],
workT[numT],RT[numT],G,normX, ind,coeffs,lambda,ols,pos,
mode,path && i==0 ? path->rawX() : NULL, length_path);
}
delete[](RdnT);
delete[](XdnT);
delete[](AT);
delete[](uT);
delete[](sigT);
delete[](avT);
delete[](RUnT);
delete[](UnT);
delete[](RT);
delete[](UndsT);
delete[](GsT);
delete[](GsaT);
delete[](workT);
/// convert the sparse matrix into a proper format
spalpha.convert(vM,rM,K);
};
/// Auxiliary function for lasso
template <typename T>
void coreLARS(Vector<T>& Rdnv, Vector<T>& Xdnv, Vector<T>& Av,
Vector<T>& uv, Vector<T>& sigv, Vector<T>& avv, Vector<T>& RUnv,
Matrix<T>& Unm, Matrix<T>& Undsm, Matrix<T>& Gsm,
Matrix<T>& Gsam, Matrix<T>& workm, Matrix<T>& Rm,
const AbstractMatrix<T>& Gm,T& normX,
Vector<INTM>& indv,Vector<T>& coeffsv,const T constraint,
const bool ols,const bool pos, constraint_type mode,
T* path, int length_path) {
if (mode == L2ERROR && normX < constraint) return;
const int LL = Gsm.n();
const int K = Gsm.m();
const int L = MIN(LL,K);
if (length_path <= 1) length_path=4*L;
// permit unsafe fast low level access
T* const Rdn = Rdnv.rawX();
T* const Xdn = Xdnv.rawX();
T* const A = Av.rawX();
T* const u = uv.rawX();
T* const sig = sigv.rawX();
//T* const av = avv.rawX();
T* const RUn = RUnv.rawX();
T* const Un = Unm.rawX();
T* const Unds = Undsm.rawX();
T* const Gs = Gsm.rawX();
T* const Gsa = Gsam.rawX();
T* const work = workm.rawX();
//T* const G = Gm.rawX();
//T* const R = Rm.rawX();
INTM* ind = indv.rawX();
T* coeffs = coeffsv.rawX();
coeffsv.setZeros();
indv.set(-1);
if (ols) Xdnv.copy(Rdnv);
int currentInd= pos ? Rdnv.max() : Rdnv.fmax();
bool newAtom=true;
T Cmax = 0;
int iter=1;
T thrs = 0.0;
// INTM* const ind_orig = ind;
// T* const coeffs_orig = coeffs;
int j;
for (j = 0; j<L; ++j) {
if (newAtom) {
ind[j]=currentInd;
if (pos) {
Cmax = Rdn[currentInd];
sig[j]=1.0;
} else {
Cmax = abs<T>(Rdn[currentInd]);
sig[j] = SIGN(Rdn[currentInd]);
}
for (int k = 0; k<=j; ++k) Un[j*L+k]=0.0;
Un[j*L+j]=1.0;
Gm.extract_rawCol(currentInd,Gs+K*j);
for (int k = 0; k<j; ++k) Gs[K*j+ind[k]] *= sig[k];
if (sig[j] < 0) {
Rdn[currentInd]=-Rdn[currentInd];
if (ols) Xdn[currentInd]=-Xdn[currentInd];
cblas_scal<T>(K,sig[j],Gs+K*j,1);
cblas_scal<T>(j+1,sig[j],Gs+currentInd,K);
}
cblas_copy<T>(j+1,Gs+currentInd,K,Gsa+j*L,1);
for (int k = 0; k<j; ++k) Gsa[k*L+j]=Gsa[j*L+k];
// <d_j,d_i>
cblas_copy<T>(j,Gsa+j*L,1,Unds+j,L);
// <U_j final,d_i>
cblas_trmv<T>(CblasColMajor,CblasUpper,CblasTrans,CblasNonUnit,
j+1,Un,L,Unds+j,L);
// norm2
T norm2=Gsa[j*L+j];
for (int k = 0; k<j; ++k) norm2 -= Unds[k*L+j]*Unds[k*L+j];
if (norm2 < 1e-15) {
ind[j]=-1;
// cerr << "bad exit" << endl;
break;
}
// int iter2 = norm2 < 0.5 ? 2 : 1;
// for(int k = 0; k<iter2; ++k) {
// for (int l = 0; l<j; ++l) {
// T scal=-cblas_dot<T>(j+1-l,Un+j*L+l,1,Unds+l*L+l,1);
// cblas_axpy<T>(l+1,scal,Un+l*L,1,Un+j*L,1);
// }
// }
Un[j*L+j]=-T(1.0);
cblas_copy<T>(j,Unds+j,L,Un+j*L,1);
cblas_trmv<T>(CblasColMajor,CblasUpper,CblasNoTrans,CblasNonUnit,j,Un,L,Un+j*L,1);
/// Un is the orthogonalized vectors in the D basis
T invNorm=1.0/sqrt(norm2);
cblas_scal<T>(j+1,-invNorm,Un+j*L,1);
Unds[j*L+j]=cblas_dot<T>(j+1,Un+j*L,1,Gsa+j*L,1);
}
for (int k = 0; k<=j; ++k) u[k]=T(1.0);
cblas_trmv<T>(CblasColMajor,CblasUpper,CblasTrans,CblasNonUnit,
j+1,Un,L,u,1);
T a = T(1.0)/cblas_nrm2<T>(j+1,u,1);
cblas_trmv<T>(CblasColMajor,CblasUpper,CblasNoTrans,CblasNonUnit,
j+1,Un,L,u,1);
cblas_scal<T>(j+1,a,u,1);
cblas_gemv<T>(CblasColMajor,CblasNoTrans,K,j+1,T(1.0),Gs,K,u,1,T(0.0),A,1);
T potentNorm=0.0;
if (!ols) {
for (int k = 0; k<=j; ++k) potentNorm += Rdn[ind[k]]*u[k];
}
if (pos) {
for (int k = 0; k<K; ++k) {
T diff = a-A[k];
work[k]= diff <= 0 ? INFINITY : (Cmax-Rdn[k])/diff;
}
for (int k = 0; k<=j; ++k) {
work[ind[k]]=INFINITY;
}
for (int k = 0; k<K; ++k)
if (work[k] <=0) work[k]=INFINITY;
currentInd =cblas_iamin<T>(K,work,1);
} else {
memset(work,0,2*K*sizeof(T));
for (int k = 0; k<=j; ++k) {
const int index=2*ind[k];
work[index]=INFINITY;
work[index+1]=INFINITY;
}
for (int k = 0; k<K; ++k) {
const int index=2*k;
if (!work[index]) {
const T diff1=a-A[k];
work[index]= diff1 <= 0 ? INFINITY : (Cmax-Rdn[k])/diff1;
const T diff2=a+A[k];
work[index+1]=diff2 <= 0 ? INFINITY : (Cmax+Rdn[k])/diff2;
}
}
currentInd =cblas_iamin<T>(2*K,work,1);
}
T gamma=work[currentInd];
T gammaMin=0;
int minBasis=0;
//if (j == L-1) gamma=potentNorm;
if (mode == PENALTY) {
gamma=MIN(gamma,(Cmax-constraint)/a);
}
// if (j > 0) {
vDiv<T>(j+1,coeffs,u,work);
cblas_scal<T>(j+1,-T(1.0),work,1);
/// voir pour petites valeurs
for (int k=0; k<=j; ++k)
if (coeffs[k]==0 || work[k] <=0) work[k]=INFINITY;
minBasis=cblas_iamin<T>(j+1,work,1);
gammaMin=work[minBasis];
if (gammaMin < gamma) gamma=gammaMin;
// }
if (mode == L1COEFFS) {
T Tu = 0.0;
for (int k = 0; k<=j; ++k) Tu += u[k];
if (Tu > EPSILON)
gamma= MIN(gamma,(constraint-thrs)/Tu);
thrs+=gamma*Tu;
}
// compute the norm of the residdual
if (ols == 0) {
const T t = gamma*gamma - 2*gamma*potentNorm;
if (t > 0 || isnan(t) || isinf(t)) {
// cerr << "bad bad exit" << endl;
// cerr << t << endl;
ind[j]=-1;
break;
}
normX += t;
} else {
// plan the last orthogonal projection
if (newAtom) {
RUn[j]=0.0;
for (int k = 0; k<=j; ++k) RUn[j] += Xdn[ind[k]]*
Un[j*L+k];
normX -= RUn[j]*RUn[j];
}
}
// Update the coefficients
cblas_axpy<T>(j+1,gamma,u,1,coeffs,1);
if (pos) {
for (int k = 0; k<j+1; ++k)
if (coeffs[k] < 0) coeffs[k]=0;
}
cblas_axpy<T>(K,-gamma,A,1,Rdn,1);
if (!pos) currentInd/= 2;
if (path) {
for (int k = 0; k<=j; ++k)
path[iter*K+ind[k]]=coeffs[k]*sig[k];
}
if (gamma == gammaMin) {
downDateLasso<T>(j,minBasis,normX,ols,pos,Rdnv,ind,coeffs,sigv,
avv,Xdnv, RUnv, Unm, Gsm, Gsam,Undsm,Rm);
newAtom=false;
Cmax=abs<T>(Rdn[ind[0]]);
--j;
} else {
newAtom=true;
}
++iter;
if (mode == PENALTY) {
thrs=abs<T>(Rdn[ind[0]]);
}
if ((j == L-1) ||
(mode == PENALTY && (thrs - constraint < 1e-15)) ||
(mode == L1COEFFS && (thrs - constraint > -1e-15)) ||
(newAtom && mode == L2ERROR && (normX - constraint < 1e-15)) ||
(normX < 1e-15) ||
(iter >= length_path)) {
// cerr << "exit" << endl;
// PRINT_F(thrs)
// PRINT_F(constraint)
// PRINT_F(normX)
break;
}
}
if (ols) {
cblas_copy<T>(j+1,RUn,1,coeffs,1);
cblas_trmv<T>(CblasColMajor,CblasUpper,CblasNoTrans,CblasNonUnit,
j+1,Un,L,coeffs,1);
}
vMul<T>(j+1,coeffs,sig,coeffs);
};
/// Auxiliary functoni for coreLARS (Cholesky downdate)
template <typename T>
inline void downDateLasso(int& j,int& minBasis,T& normX,const bool ols,
const bool pos,
Vector<T>& Rdnv, INTM* ind,
T* coeffs, Vector<T>& sigv, Vector<T>& avv,
Vector<T>& Xdnv, Vector<T>& RUnv,Matrix<T>& Unm, Matrix<T>& Gsm,
Matrix<T>& Gsam, Matrix<T>& Undsm, Matrix<T>& Rm) {
const int L = Gsm.n();
const int K = Gsm.m();
T* const Rdn = Rdnv.rawX();
T* const Xdn = Xdnv.rawX();
T* const sig = sigv.rawX();
T* const av = avv.rawX();
T* const RUn = RUnv.rawX();
T* const Un = Unm.rawX();
T* const Unds = Undsm.rawX();
T* const Gs = Gsm.rawX();
T* const Gsa = Gsam.rawX();
T* const R = Rm.rawX();
int indB=ind[minBasis];
if (!pos && sig[minBasis] < 0) {
// Update Rdn
Rdn[indB]=-Rdn[indB];
if (ols) Xdn[indB]=-Xdn[indB];
}
int num=j-minBasis;
for (int k = 0; k<num*num;++k) R[k]=0.0;
for (int k = 0; k<num; ++k) R[k*num+k]=1.0;
// Update Un
for (int k = minBasis+1; k<=j; ++k) {
T a = -Un[k*L+minBasis]/Un[minBasis*L+minBasis];
av[k-minBasis-1] = a;
cblas_axpy<T>(minBasis,a,Un+minBasis*L,1,Un+k*L,1);
}
for (int k = minBasis+1; k<=j; ++k) {
cblas_copy<T>(minBasis,Un+k*L,1,Un+(k-1)*L,1);
cblas_copy<T>(num,Un+k*L+minBasis+1,1,Un+(k-1)*L+minBasis,1);
}
T alpha=1.0;
T alphab,gamma;
for (int k = 0; k<num; ++k) {
alphab=alpha+av[k]*av[k];
R[k*num+k]=sqrt(alphab/alpha);
gamma=av[k]*R[k*num+k]/alphab;
alpha=alphab;
cblas_copy<T>(num-k-1,av+k+1,1,R+k*num+k+1,1);
cblas_scal<T>(num-k-1,gamma,R+k*num+k+1,1);
}
if (num > 0) {
trtri<T>(low,nonUnit,num,R,num);
cblas_trmm<T>(CblasColMajor,CblasRight,CblasLower,CblasTrans,CblasNonUnit,
j,num,T(1.0),R,num,Un+minBasis*L,L);
}
// Update Unds
for (int k = minBasis+1; k<=j; ++k)
cblas_axpy<T>(j-minBasis,av[k-minBasis-1],Unds+minBasis*L+minBasis+1,1,
Unds+k*L+minBasis+1,1);
for (int k = 0; k<minBasis; ++k)
for (int l = minBasis+1; l<=j; ++l)
Unds[k*L+l-1]=Unds[k*L+l];
for (int k = minBasis+1; k<=j; ++k)
cblas_copy<T>(j-minBasis,Unds+k*L+minBasis+1,1,Unds+(k-1)*L+minBasis,1);
if (num > 0)
cblas_trmm<T>(CblasColMajor,CblasRight,CblasLower,CblasTrans,CblasNonUnit,
j-minBasis,num,T(1.0),R,num,Unds+minBasis*L+minBasis,L);
for (int k = minBasis+1; k<=j; ++k)
for (int l = 0; l<k; ++l) Unds[k*L+l]=0.0;
// Update Gs
for (int k = minBasis+1; k<=j; ++k) {
cblas_copy<T>(K,Gs+k*K,1,Gs+(k-1)*K,1);
}
if (!pos && sig[minBasis] < T(0.0)) cblas_scal<T>(j,T(-1.0),Gs+indB,K);
// Update Gsa
for (int k = minBasis+1; k<=j; ++k) {
cblas_copy<T>(minBasis,Gsa+k*L,1,Gsa+(k-1)*L,1);
cblas_copy<T>(j-minBasis,Gsa+k*L+minBasis+1,1,Gsa+(k-1)*L+minBasis,1);
}
for (int k = 0; k<minBasis; ++k) {
for (int l = minBasis+1; l<=j; ++l) Gsa[k*L+l-1]=Gsa[k*L+l];
}
// Update sig
for (int k = minBasis+1; k<=j && !pos; ++k) sig[k-1]=sig[k];
// Update ind
for (int k = minBasis+1; k<=j; ++k) ind[k-1]=ind[k];
ind[j]=-1;
for (int k = minBasis+1; k<=j; ++k) coeffs[k-1]=coeffs[k];
coeffs[j]=0.0;
if (ols) {
// Update RUn and normX
for (int k = minBasis; k<=j; ++k)
normX += RUn[k]*RUn[k];
for (int k = minBasis; k<j; ++k) {
RUn[k]=0.0;
for (int l = 0; l<=k; ++l) RUn[k] += Xdn[ind[l]]*
Un[k*L+l];
normX -= RUn[k]*RUn[k];
}
}
// Update j
--j;
}
/// second implementation using matrix inversion lemma
template <typename T>
void lassoReweighted(const Matrix<T>& X, const Matrix<T>& D, SpMatrix<T>& spalpha,
int L, const T constraint, constraint_type mode, const bool pos,
const T sigma,
const int numThreads) {
spalpha.clear();
const int M = X.n();
const int K = D.n();
Matrix<T> vM;
Matrix<int> rM;
vM.resize(L,M);
rM.resize(L,M);
const int iterR = 30;
if (L <= 0) return;
int NUM_THREADS=init_omp(numThreads);
//ProdMatrix<T> G(D, K < 25000 && M > 10);
ProdMatrix<T> G(D, K < 50000);
//Matrix<T> G;
//D.XtX(G);
G.addDiag(1e-10);
Vector<T>* DtRT=new Vector<T>[NUM_THREADS];
Vector<T>* DtRRT=new Vector<T>[NUM_THREADS];
Vector<T>* uT=new Vector<T>[NUM_THREADS];
Vector<T>* weightsT=new Vector<T>[NUM_THREADS];
Vector<int>* inddT=new Vector<int>[NUM_THREADS];
Matrix<T>* GsT=new Matrix<T>[NUM_THREADS];
Matrix<T>* GaT=new Matrix<T>[NUM_THREADS];
Matrix<T>* invGsT=new Matrix<T>[NUM_THREADS];
Matrix<T>* workT=new Matrix<T>[NUM_THREADS];
Matrix<T>* GT=new Matrix<T>[NUM_THREADS];
for (int i = 0; i<NUM_THREADS; ++i) {
DtRT[i].resize(K);
DtRRT[i].resize(K);
uT[i].resize(K);
weightsT[i].resize(K);
GT[i].resize(K,K);
inddT[i].resize(K);
GsT[i].resize(L,L);
invGsT[i].resize(L,L);
GaT[i].resize(K,L);
workT[i].resize(K,3);
workT[i].setZeros();
}
int i;
#pragma omp parallel for private(i)
for (i = 0; i< M; ++i) {
#ifdef _OPENMP
int numT=omp_get_thread_num();
#else
int numT=0;
#endif
Vector<T> Xi;
X.refCol(i,Xi);
T normXo = Xi.nrm2sq();
T normX = normXo;
Vector<int> ind;
rM.refCol(i,ind);
Vector<T> coeffs;
vM.refCol(i,coeffs);
Vector<T>& DtR=DtRT[numT];
Vector<T>& DtRR = DtRRT[numT];
D.multTrans(Xi,DtR);
DtRR.copy(DtR);
coreLARS2(DtRR,G,GsT[numT],GaT[numT],invGsT[numT],uT[numT],coeffs,
ind,workT[numT],normX,mode,constraint,pos);
//Matrix<T>& GG = GT[numT];
Vector<T>& weights = weightsT[numT];
//Vector<int>& indd = inddT[numT];
for (int j = 0; j<iterR; ++j) {
const T sig = sigma*pow(0.7,iterR-1-j);
weights.set(sig);
for (int k = 0; k<K; ++k) {
if (ind[k] != -1) {
weights[ind[k]] = MAX(1e-4,sig*exp(-sig*abs<T>(coeffs[k])));
} else {
break;
}
}
DtRR.copy(DtR);
normX=normXo;
coreLARS2W(DtRR,G,GsT[numT],GaT[numT],invGsT[numT],uT[numT],coeffs,weights,
ind,workT[numT],normX,mode,constraint,pos);
}
}
delete[](DtRT);
delete[](DtRRT);
delete[](inddT);
delete[](uT);
delete[](weightsT);
delete[](GsT);
delete[](GT);
delete[](GaT);
delete[](invGsT);
delete[](workT);
/// convert the sparse matrix into a proper format
spalpha.convert(vM,rM,K);
}
template <typename T>
void lassoWeight(const Matrix<T>& X, const Matrix<T>& D, const Matrix<T>& weights,
SpMatrix<T>& spalpha,
int L, const T constraint, constraint_type mode, const bool pos,
const int numThreads) {
spalpha.clear();
const int M = X.n();
const int K = D.n();
Matrix<T> vM;
Matrix<INTM> rM;
vM.resize(L,M);
rM.resize(L,M);
if (L <= 0) return;
int NUM_THREADS=init_omp(numThreads);
//ProdMatrix<T> G(D, K < 25000 && M > 10);
ProdMatrix<T> G(D, K < 50000);
//Matrix<T> G;
//D.XtX(G);
G.addDiag(1e-10);
Vector<T>* DtRT=new Vector<T>[NUM_THREADS];
Vector<T>* uT=new Vector<T>[NUM_THREADS];
Matrix<T>* GsT=new Matrix<T>[NUM_THREADS];
Matrix<T>* GaT=new Matrix<T>[NUM_THREADS];
Matrix<T>* invGsT=new Matrix<T>[NUM_THREADS];
Matrix<T>* workT=new Matrix<T>[NUM_THREADS];
for (int i = 0; i<NUM_THREADS; ++i) {
DtRT[i].resize(K);
uT[i].resize(K);
uT[i].setZeros();
GsT[i].resize(L,L);
invGsT[i].resize(L,L);
GaT[i].resize(K,L);
workT[i].resize(K,3);
workT[i].setZeros();
}
int i;
#pragma omp parallel for private(i)
for (i = 0; i< M; ++i) {
#ifdef _OPENMP
int numT=omp_get_thread_num();
#else
int numT=0;
#endif
Vector<T> Xi;
X.refCol(i,Xi);
T normX = Xi.nrm2sq();
Vector<INTM> ind;
rM.refCol(i,ind);
Vector<T> coeffs;
vM.refCol(i,coeffs);
Vector<T>& DtR=DtRT[numT];
D.multTrans(Xi,DtR);
Vector<T> we;
weights.refCol(i,we);
coreLARS2W(DtR,G,GsT[numT],GaT[numT],invGsT[numT],uT[numT],coeffs,we,
ind,workT[numT],normX,mode,constraint,pos);
}
delete[](DtRT);
delete[](uT);
delete[](GsT);
delete[](GaT);
delete[](invGsT);
delete[](workT);
/// convert the sparse matrix into a proper format
spalpha.convert(vM,rM,K);
};
template <typename T>
void lassoWeightPreComputed(const Matrix<T>& X, const Matrix<T>& G, const Matrix<T>& DtR, const Matrix<T>& weights,
SpMatrix<T>& spalpha,
int L, const T constraint, constraint_type mode, const bool pos,
const int numThreads) {
spalpha.clear();
const int M = X.n();
const int K = G.n();
Matrix<T> vM;
Matrix<int> rM;
vM.resize(L,M);
rM.resize(L,M);
if (L <= 0) return;
int NUM_THREADS=init_omp(numThreads);
Vector<T>* DtRT=new Vector<T>[NUM_THREADS];
Vector<T>* uT=new Vector<T>[NUM_THREADS];
Matrix<T>* GsT=new Matrix<T>[NUM_THREADS];
Matrix<T>* GaT=new Matrix<T>[NUM_THREADS];
Matrix<T>* invGsT=new Matrix<T>[NUM_THREADS];
Matrix<T>* workT=new Matrix<T>[NUM_THREADS];
for (int i = 0; i<NUM_THREADS; ++i) {
DtRT[i].resize(K);
uT[i].resize(K);
uT[i].setZeros();
GsT[i].resize(L,L);
invGsT[i].resize(L,L);
GaT[i].resize(K,L);
workT[i].resize(K,3);
workT[i].setZeros();
}
int i;
#pragma omp parallel for private(i)
for (i = 0; i< M; ++i) {
#ifdef _OPENMP
int numT=omp_get_thread_num();
#else
int numT=0;
#endif
Vector<T> Xi;
X.refCol(i,Xi);
T normX = Xi.nrm2sq();
Vector<int> ind;
rM.refCol(i,ind);
Vector<T> coeffs;
vM.refCol(i,coeffs);
Vector<T>& DtRi=DtRT[numT];
DtR.copyCol(i,DtRi);
Vector<T> we;
weights.refCol(i,we);
coreLARS2W(DtRi,G,GsT[numT],GaT[numT],invGsT[numT],uT[numT],coeffs,we,
ind,workT[numT],normX,mode,constraint,pos);
}
delete[](DtRT);
delete[](uT);
delete[](GsT);
delete[](GaT);
delete[](invGsT);
delete[](workT);
/// convert the sparse matrix into a proper format
spalpha.convert(vM,rM,K);
};
/// second implementation using matrix inversion lemma
template <typename T>
void lasso_mask(const Matrix<T>& X, const Matrix<T>& D, SpMatrix<T>& spalpha, const Matrix<bool>& mask,
int L, const T constraint,const T lambda2, constraint_type mode, const bool pos,
const int numThreads) {
spalpha.clear();
const int M = X.n();
const int K = D.n();
Matrix<T> vM;
Matrix<INTM> rM;
vM.resize(L,M);
rM.resize(L,M);
if (L <= 0) return;
int NUM_THREADS=init_omp(numThreads);
ProdMatrix<T> G(D,K < 25000 && M > 10);
G.addDiag(MAX(lambda2,1e-10));
Vector<T>* DtRT=new Vector<T>[NUM_THREADS];
Vector<T>* uT=new Vector<T>[NUM_THREADS];
Vector<T>* XmaskT=new Vector<T>[NUM_THREADS];
Matrix<T>* GsT=new Matrix<T>[NUM_THREADS];
ProdMatrix<T>* GT=new ProdMatrix<T>[NUM_THREADS];
Matrix<T>* DmaskT=new Matrix<T>[NUM_THREADS];
Matrix<T>* GaT=new Matrix<T>[NUM_THREADS];
Matrix<T>* invGsT=new Matrix<T>[NUM_THREADS];
Matrix<T>* workT=new Matrix<T>[NUM_THREADS];
for (int i = 0; i<NUM_THREADS; ++i) {
DmaskT[i].resize(D.m(),D.n());
DtRT[i].resize(K);
uT[i].resize(K);
XmaskT[i].resize(X.m());
uT[i].setZeros();
GsT[i].resize(L,L);
invGsT[i].resize(L,L);
GaT[i].resize(K,L);
workT[i].resize(K,3);
workT[i].setZeros();
}
int i;
#pragma omp parallel for private(i)
for (i = 0; i< M; ++i) {
#ifdef _OPENMP
int numT=omp_get_thread_num();
#else
int numT=0;
#endif
Vector<T> Xi;
X.refCol(i,Xi);
Vector<bool> maski;
mask.refCol(i,maski);
Vector<INTM> ind;
rM.refCol(i,ind);
Vector<T> coeffs;
vM.refCol(i,coeffs);
Vector<T>& DtR=DtRT[numT];
if (maski.allfalse()) continue;
if (maski.alltrue()) {
T normX = Xi.nrm2sq();
D.multTrans(Xi,DtR);
coreLARS2(DtR,G,GsT[numT],GaT[numT],invGsT[numT],uT[numT],coeffs,
ind,workT[numT],normX,mode,constraint,pos);
} else {
D.copyMask(DmaskT[numT],maski);
Xi.copyMask(XmaskT[numT],maski);
T constraint_mask = mode == PENALTY || mode == L2ERROR ? constraint*XmaskT[numT].n()/Xi.n() : constraint;
T normX = XmaskT[numT].nrm2sq();
DmaskT[numT].multTrans(XmaskT[numT],DtR);
GT[numT].setMatrices(DmaskT[numT],false);
GT[numT].addDiag(MAX(lambda2,T(1e-10)));
coreLARS2(DtR,GT[numT],
GsT[numT],GaT[numT],invGsT[numT],uT[numT],coeffs,
ind,workT[numT],normX,mode,constraint_mask,pos);
DmaskT[numT].setm(D.m());
DmaskT[numT].setn(D.n());
XmaskT[numT].setn(X.m());
}
}
delete[](GT);
delete[](XmaskT);
delete[](DmaskT);
delete[](DtRT);
delete[](uT);
delete[](GsT);
delete[](GaT);
delete[](invGsT);
delete[](workT);
/// convert the sparse matrix into a proper format
spalpha.convert(vM,rM,K);
};
template <typename T>
void lasso2(const Matrix<T>& X, const Matrix<T>& D, SpMatrix<T>& spalpha,
int L, const T constraint, const T lambda2, constraint_type mode, const bool pos,
const int numThreads, Matrix<T>* path, int length_path) {
ProdMatrix<T> G(D,X.n() > 10 && D.n() < 50000);
ProdMatrix<T> DtX(D,X,false);
G.addDiag(MAX(lambda2,1e-10));
lasso2(X,G,DtX,spalpha,L,constraint,mode,pos,numThreads,path, length_path);
}
template <typename T>
void lasso2(const Data<T>& X, const AbstractMatrix<T>& G, const AbstractMatrix<T>& DtX,
SpMatrix<T>& spalpha,
int L, const T constraint, constraint_type mode, const bool pos,
const int numThreads, Matrix<T>* path, int length_path) {
spalpha.clear();
const INTM M = X.n();
const INTM K = G.n();
Matrix<T> vM;
Matrix<INTM> rM;
vM.resize(L,M);
rM.resize(L,M);
if (L <= 0) return;
if (path) path->setZeros();
int NUM_THREADS=init_omp(numThreads);
Vector<T>* DtRT=new Vector<T>[NUM_THREADS];
Vector<T>* uT=new Vector<T>[NUM_THREADS];
Matrix<T>* GsT=new Matrix<T>[NUM_THREADS];
Matrix<T>* GaT=new Matrix<T>[NUM_THREADS];
Matrix<T>* invGsT=new Matrix<T>[NUM_THREADS];
Matrix<T>* workT=new Matrix<T>[NUM_THREADS];
for (int i = 0; i<NUM_THREADS; ++i) {
DtRT[i].resize(K);
uT[i].resize(K);
uT[i].setZeros();
GsT[i].resize(L,L);
invGsT[i].resize(L,L);
GaT[i].resize(K,L);
workT[i].resize(K,3);
workT[i].setZeros();
}
INTM i;
Vector<T> norms;
X.norm_2sq_cols(norms);
#pragma omp parallel for private(i)
for (i = 0; i< M; ++i) {
#ifdef _OPENMP
int numT=omp_get_thread_num();
#else
int numT=0;
#endif
// Vector<T> Xi;
// X.refCol(i,Xi);
// T normX = Xi.nrm2sq();
T normX = norms[i];
Vector<INTM> ind;
rM.refCol(i,ind);
Vector<T> coeffs;
vM.refCol(i,coeffs);
Vector<T>& DtR=DtRT[numT];
DtX.copyCol(i,DtR);
//D.multTrans(Xi,DtR);
coreLARS2(DtR,G,GsT[numT],GaT[numT],invGsT[numT],
uT[numT],coeffs,
ind,workT[numT],normX,mode,constraint,pos,
path && i==0 ? path->rawX() : NULL,length_path);
}
delete[](DtRT);
delete[](uT);
delete[](GsT);
delete[](GaT);
delete[](invGsT);
delete[](workT);
/// convert the sparse matrix into a proper format
spalpha.convert(vM,rM,K);
};
template <typename T>
void coreLARS2W(Vector<T>& DtR, const AbstractMatrix<T>& G,
Vector<T>& coeffs, const Vector<T>& weights, T normX,
const constraint_type mode,
const T constraint, const bool pos) {
const INTM p = G.m();
const INTM L = p;
Vector<T> v;
v.resize(L);
Vector<INTM> r;
r.resize(L);
Vector<T> u;
u.resize(p);
Matrix<T> Gs;
Gs.resize(L,L);
Matrix<T> invGs;
invGs.resize(L,L);
Matrix<T> Ga;
Ga.resize(p,L);
Matrix<T> work;
work.resize(p,3);
coreLARS2W(DtR,G,Gs,Ga,invGs,u,v,weights,r,work,normX,mode,constraint,pos);
coeffs.setZeros();
for (int i = 0; i< L; ++i) {
if (r[i] < 0) break;
coeffs[r[i]]=v[i];
};
};
template <typename T>
void coreLARS2(Vector<T>& DtR, const AbstractMatrix<T>& G,
Vector<T>& coeffs, T normX,
const constraint_type mode,
const T constraint, const bool pos) {
const INTM p = G.m();
const INTM L = p;
Vector<T> v;
v.resize(L);
Vector<INTM> r;
r.resize(L);
Vector<T> u;
u.resize(p);
Matrix<T> Gs;
Gs.resize(L,L);
Matrix<T> invGs;
invGs.resize(L,L);
Matrix<T> Ga;
Ga.resize(p,L);
Matrix<T> work;
work.resize(p,3);
coreLARS2(DtR,G,Gs,Ga,invGs,u,v,r,work,normX,mode,constraint,pos);
coeffs.setZeros();
for (int i = 0; i< L; ++i) {
if (r[i] < 0) break;
coeffs[r[i]]=v[i];
};
};
/// Auxiliary function for lasso
template <typename T>
void coreLARS2(Vector<T>& DtR, const AbstractMatrix<T>& G,
Matrix<T>& Gs,
Matrix<T>& Ga,
Matrix<T>& invGs,
Vector<T>& u,
Vector<T>& coeffs,
Vector<INTM>& ind,
Matrix<T>& work,
T& normX,
const constraint_type mode,
const T constraint,
const bool pos,
T* path, int length_path) {
const int LL = Gs.n();
const int K = G.n();
const int L = MIN(LL,K);
if (length_path <= 1) length_path=4*L;
coeffs.setZeros();
ind.set(-1);
T* const pr_Gs = Gs.rawX();
T* const pr_invGs = invGs.rawX();
T* const pr_Ga = Ga.rawX();
T* const pr_work = work.rawX();
T* const pr_u = u.rawX();
T* const pr_DtR = DtR.rawX();
T* const pr_coeffs = coeffs.rawX();
INTM* const pr_ind = ind.rawX();
// Find the most correlated element
int currentInd = pos ? DtR.max() : DtR.fmax();
if (mode == PENALTY && abs(DtR[currentInd]) < constraint) return;
if (mode == L2ERROR && normX < constraint) return;
bool newAtom=true;
int i;
int iter=0;
T thrs = 0;
for (i = 0; i<L; ++i) {
++iter;
if (newAtom) {
pr_ind[i]=currentInd;
// cerr << "Add " << currentInd << endl;
G.extract_rawCol(pr_ind[i],pr_Ga+i*K);
for (int j = 0; j<=i; ++j)
pr_Gs[i*LL+j]=pr_Ga[i*K+pr_ind[j]];
// Update inverse of Gs
if (i == 0) {
pr_invGs[0]=T(1.0)/pr_Gs[0];
} else {
cblas_symv<T>(CblasColMajor,CblasUpper,i,T(1.0),
pr_invGs,LL,pr_Gs+i*LL,1,T(0.0),pr_u,1);
const T schur =
T(1.0)/(pr_Gs[i*LL+i]-cblas_dot<T>(i,pr_u,1,pr_Gs+i*LL,1));
pr_invGs[i*LL+i]=schur;
// cblas_copy<T>(i,pr_u,1,pr_invGs+i*LL,1);
memcpy(pr_invGs+i*LL,pr_u,i*sizeof(T));
cblas_scal<T>(i,-schur,pr_invGs+i*LL,1);
cblas_syr<T>(CblasColMajor,CblasUpper,i,schur,pr_u,1,
pr_invGs,LL);
}
}
// Compute the path direction
for (int j = 0; j<=i; ++j)
pr_work[j]= pr_DtR[pr_ind[j]] > 0 ? T(1.0) : T(-1.0);
cblas_symv<T>(CblasColMajor,CblasUpper,i+1,T(1.0),pr_invGs,LL,
pr_work,1,T(0.0),pr_u,1);
// Compute the step on the path
T step_max = INFINITY;
int first_zero = -1;
for (int j = 0; j<=i; ++j) {
T ratio = -pr_coeffs[j]/pr_u[j];
if (ratio > 0 && ratio <= step_max) {
step_max=ratio;
first_zero=j;
}
}
// PRINT_F(step_max)
T current_correlation = abs<T>(pr_DtR[pr_ind[0]]);
cblas_gemv<T>(CblasColMajor,CblasNoTrans,K,i+1,T(1.0),pr_Ga,
K,pr_u,1,T(0.0),pr_work+2*K,1);
memcpy(pr_work+K,pr_work+2*K,K*sizeof(T));
memcpy(pr_work,pr_work+K,K*sizeof(T));
// cblas_copy<T>(K,pr_work+2*K,1,pr_work+K,1);
// cblas_copy<T>(K,pr_work+2*K,1,pr_work,1);
for (int j = 0; j<=i; ++j) {
pr_work[pr_ind[j]]=INFINITY;
pr_work[pr_ind[j]+K]=INFINITY;
}
for (int j = 0; j<K; ++j) {
pr_work[j] = ((pr_work[j] < INFINITY) && (pr_work[j] > T(-1.0))) ? (pr_DtR[j]+current_correlation)/(T(1.0)+pr_work[j]) : INFINITY;
}
// work.print("work");
for (int j = 0; j<K; ++j) {
pr_work[j+K] = ((pr_work[j+K] < INFINITY) && (pr_work[j+K] < T(1.0))) ? (current_correlation-pr_DtR[j])/(T(1.0)-pr_work[j+K]) : INFINITY;
}
// work.print("work");
if (pos) {
for (int j = 0; j<K; ++j) {
pr_work[j]=INFINITY;
}
}
// work.print("work");
// coeffs.print("coeffs");
int index = cblas_iamin<T>(2*K,pr_work,1);
T step = pr_work[index];
// Choose next element
currentInd = index % K;
// compute the coefficients of the polynome representing normX^2
T coeff1 = 0;
for (int j = 0; j<=i; ++j)
coeff1 += pr_DtR[pr_ind[j]] > 0 ? pr_u[j] : -pr_u[j];
T coeff2 = 0;
for (int j = 0; j<=i; ++j)
coeff2 += pr_DtR[pr_ind[j]]*pr_u[j];
T coeff3 = normX-constraint;
T step_max2;
if (mode == PENALTY) {
step_max2 = current_correlation-constraint;
} else if (mode == L2ERROR) {
/// L2ERROR
const T delta = coeff2*coeff2-coeff1*coeff3;
step_max2 = delta < 0 ? INFINITY : (coeff2-sqrt(delta))/coeff1;
step_max2 = MIN(current_correlation,step_max2);
} else {
/// L1COEFFS
step_max2 = coeff1 < 0 ? INFINITY : (constraint-thrs)/coeff1;
step_max2 = MIN(current_correlation,step_max2);
}
step = MIN(MIN(step,step_max2),step_max);
if (step == INFINITY) break; // stop the path
// Update coefficients
cblas_axpy<T>(i+1,step,pr_u,1,pr_coeffs,1);
if (pos) {
for (int j = 0; j<i+1; ++j)
if (pr_coeffs[j] < 0) pr_coeffs[j]=0;
}
// Update correlations
cblas_axpy<T>(K,-step,pr_work+2*K,1,pr_DtR,1);
// Update normX
normX += coeff1*step*step-2*coeff2*step;
// Update norm1
thrs += step*coeff1;
if (path) {
for (int k = 0; k<=i; ++k)
path[iter*K+ind[k]]=pr_coeffs[k];
}
// Choose next action
if (step == step_max) {
// cerr << "Remove " << pr_ind[first_zero] << endl;
/// Downdate, remove first_zero
/// Downdate Ga, Gs, invGs, ind, coeffs
for (int j = first_zero; j<i; ++j) {
cblas_copy<T>(K,pr_Ga+(j+1)*K,1,pr_Ga+j*K,1);
pr_ind[j]=pr_ind[j+1];
pr_coeffs[j]=pr_coeffs[j+1];
}
pr_ind[i]=-1;
pr_coeffs[i]=0;
for (int j = first_zero; j<i; ++j) {
cblas_copy<T>(first_zero,pr_Gs+(j+1)*LL,1,pr_Gs+j*LL,1);
cblas_copy<T>(i-first_zero,pr_Gs+(j+1)*LL+first_zero+1,1,
pr_Gs+j*LL+first_zero,1);
}
const T schur = pr_invGs[first_zero*LL+first_zero];
cblas_copy<T>(first_zero,pr_invGs+first_zero*LL,1,pr_u,1);
cblas_copy<T>(i-first_zero,pr_invGs+(first_zero+1)*LL+first_zero,LL,
pr_u+first_zero,1);
for (int j = first_zero; j<i; ++j) {
cblas_copy<T>(first_zero,pr_invGs+(j+1)*LL,1,pr_invGs+j*LL,1);
cblas_copy<T>(i-first_zero,pr_invGs+(j+1)*LL+first_zero+1,1,
pr_invGs+j*LL+first_zero,1);
}
cblas_syr<T>(CblasColMajor,CblasUpper,i,T(-1.0)/schur,
pr_u,1,pr_invGs,LL);
newAtom=false;
i=i-2;
} else {
newAtom=true;
}
if ((iter >= length_path-1) || abs(step) < 1e-15 ||
step == step_max2 || (normX < 1e-15) ||
(i == (L-1)) ||
(mode == L2ERROR && normX - constraint < 1e-15) ||
(mode == L1COEFFS && (constraint-thrs < 1e-15))) {
break;
}
}
}
/// Auxiliary function for lasso
template <typename T>
void coreLARS2W(Vector<T>& DtR, const AbstractMatrix<T>& G,
Matrix<T>& Gs,
Matrix<T>& Ga,
Matrix<T>& invGs,
Vector<T>& u,
Vector<T>& coeffs,
const Vector<T>& weights,
Vector<INTM>& ind,
Matrix<T>& work,
T& normX,
const constraint_type mode,
const T constraint,
const bool pos) {
const int LL = Gs.n();
const int K = G.n();
const int L = MIN(LL,K);
coeffs.setZeros();
ind.set(-1);
T* const pr_Gs = Gs.rawX();
T* const pr_invGs = invGs.rawX();
T* const pr_Ga = Ga.rawX();
// T* const pr_G = G.rawX();
T* const pr_work = work.rawX();
T* const pr_u = u.rawX();
T* const pr_DtR = DtR.rawX();
T* const pr_coeffs = coeffs.rawX();
T* const pr_weights = weights.rawX();
INTM* const pr_ind = ind.rawX();
DtR.div(weights);
// Find the most correlated element
int currentInd = pos ? DtR.max() : DtR.fmax();
if (mode == PENALTY && abs(DtR[currentInd]) < constraint) return;
if (mode == L2ERROR && normX < constraint) return;
bool newAtom=true;
int i;
int iter=0;
T thrs = 0;
for (i = 0; i<L; ++i) {
++iter;
if (newAtom) {
pr_ind[i]=currentInd;
// Update upper part of Gs and Ga
G.extract_rawCol(pr_ind[i],pr_Ga+i*K);
for (int j = 0; j<=i; ++j)
pr_Gs[i*LL+j]=pr_Ga[i*K+pr_ind[j]];
// Update inverse of Gs
if (i == 0) {
pr_invGs[0]=T(1.0)/pr_Gs[0];
} else {
cblas_symv<T>(CblasColMajor,CblasUpper,i,T(1.0),
pr_invGs,LL,pr_Gs+i*LL,1,T(0.0),pr_u,1);
const T schur =
T(1.0)/(pr_Gs[i*LL+i]-cblas_dot<T>(i,pr_u,1,pr_Gs+i*LL,1));
pr_invGs[i*LL+i]=schur;
cblas_copy<T>(i,pr_u,1,pr_invGs+i*LL,1);
cblas_scal<T>(i,-schur,pr_invGs+i*LL,1);
cblas_syr<T>(CblasColMajor,CblasUpper,i,schur,pr_u,1,
pr_invGs,LL);
}
}
// Compute the path direction
for (int j = 0; j<=i; ++j)
pr_work[j]= pr_DtR[pr_ind[j]] > 0 ? weights[pr_ind[j]] : -weights[pr_ind[j]];
cblas_symv<T>(CblasColMajor,CblasUpper,i+1,T(1.0),pr_invGs,LL,
pr_work,1,T(0.0),pr_u,1);
// Compute the step on the path
T step_max = INFINITY;
int first_zero = -1;
for (int j = 0; j<=i; ++j) {
T ratio = -pr_coeffs[j]/pr_u[j];
if (ratio > 0 && ratio <= step_max) {
step_max=ratio;
first_zero=j;
}
}
T current_correlation = abs<T>(pr_DtR[pr_ind[0]]);
cblas_gemv<T>(CblasColMajor,CblasNoTrans,K,i+1,T(1.0),pr_Ga,
K,pr_u,1,T(0.0),pr_work+2*K,1);
vDiv<T>(K,pr_work+2*K,pr_weights,pr_work+2*K);
cblas_copy<T>(K,pr_work+2*K,1,pr_work+K,1);
cblas_copy<T>(K,pr_work+2*K,1,pr_work,1);
for (int j = 0; j<=i; ++j) {
pr_work[pr_ind[j]]=INFINITY;
pr_work[pr_ind[j]+K]=INFINITY;
}
for (int j = 0; j<K; ++j) {
pr_work[j] = ((pr_work[j] < INFINITY) && (pr_work[j] > T(-1.0))) ? (pr_DtR[j]+current_correlation)/(T(1.0)+pr_work[j]) : INFINITY;
}
for (int j = 0; j<K; ++j) {
pr_work[j+K] = ((pr_work[j+K] < INFINITY) && (pr_work[j+K] < T(1.0))) ? (current_correlation-pr_DtR[j])/(T(1.0)-pr_work[j+K]) : INFINITY;
}
if (pos) {
for (int j = 0; j<K; ++j) {
pr_work[j]=INFINITY;
}
}
int index = cblas_iamin<T>(2*K,pr_work,1);
T step = pr_work[index];
// Choose next element
currentInd = index % K;
// compute the coefficients of the polynome representing normX^2
T coeff1 = 0;
for (int j = 0; j<=i; ++j)
coeff1 += pr_DtR[pr_ind[j]] > 0 ? pr_weights[pr_ind[j]]*pr_u[j] :
-pr_weights[pr_ind[j]]*pr_u[j];
T coeff2 = 0;
for (int j = 0; j<=i; ++j)
coeff2 += pr_DtR[pr_ind[j]]*pr_u[j]*pr_weights[pr_ind[j]];
T coeff3 = normX-constraint;
T step_max2;
if (mode == PENALTY) {
step_max2 = current_correlation-constraint;
} else if (mode == L2ERROR) {
/// L2ERROR
const T delta = coeff2*coeff2-coeff1*coeff3;
step_max2 = delta < 0 ? INFINITY : (coeff2-sqrt(delta))/coeff1;
} else {
/// L1COEFFS
step_max2 = coeff1 < 0 ? INFINITY : (constraint-thrs)/coeff1;
}
step = MIN(MIN(step,step_max2),step_max);
if (step == INFINITY) break; // stop the path
// Update coefficients
cblas_axpy<T>(i+1,step,pr_u,1,pr_coeffs,1);
// Update correlations
cblas_axpy<T>(K,-step,pr_work+2*K,1,pr_DtR,1);
// Update normX
normX += coeff1*step*step-2*coeff2*step;
// Update norm1
thrs += step*coeff1;
if (step == step_max) {
/// Downdate, remove first_zero
/// Downdate Ga, Gs, invGs, ind, coeffs
for (int j = first_zero; j<i; ++j) {
cblas_copy<T>(K,pr_Ga+(j+1)*K,1,pr_Ga+j*K,1);
pr_ind[j]=pr_ind[j+1];
pr_coeffs[j]=pr_coeffs[j+1];
}
pr_ind[i]=-1;
pr_coeffs[i]=0;
for (int j = first_zero; j<i; ++j) {
cblas_copy<T>(first_zero,pr_Gs+(j+1)*LL,1,pr_Gs+j*LL,1);
cblas_copy<T>(i-first_zero,pr_Gs+(j+1)*LL+first_zero+1,1,
pr_Gs+j*LL+first_zero,1);
}
const T schur = pr_invGs[first_zero*LL+first_zero];
cblas_copy<T>(first_zero,pr_invGs+first_zero*LL,1,pr_u,1);
cblas_copy<T>(i-first_zero,pr_invGs+(first_zero+1)*LL+first_zero,LL,
pr_u+first_zero,1);
for (int j = first_zero; j<i; ++j) {
cblas_copy<T>(first_zero,pr_invGs+(j+1)*LL,1,pr_invGs+j*LL,1);
cblas_copy<T>(i-first_zero,pr_invGs+(j+1)*LL+first_zero+1,1,
pr_invGs+j*LL+first_zero,1);
}
cblas_syr<T>(CblasColMajor,CblasUpper,i,T(-1.0)/schur,
pr_u,1,pr_invGs,LL);
newAtom=false;
i=i-2;
} else {
newAtom=true;
}
// Choose next action
if (iter > 4*L || abs(step) < 1e-10 ||
step == step_max2 || (normX < 1e-10) ||
(i == (L-1)) ||
(mode == L2ERROR && normX - constraint < 1e-10) ||
(mode == L1COEFFS && (constraint-thrs < 1e-10))) {
break;
}
}
}
/* ************************
* Iterative thresholding
* ************************/
/// Implementation of IST for solving
/// \forall i, \min_{\alpha_i} ||\alpha_i||_1
/// s.t. ||\X_i-D\alpha_i||_2^2 <= constraint or
/// \forall i, \min_{\alpha_i} constraint*||\alpha_i||_1 + ...
/// ... ||\X_i-D\alpha_i||_2^2 <= lambda
template <typename T>
void ist(const Matrix<T>& X, const Matrix<T>& D,
SpMatrix<T>& spalpha, T lambda, constraint_type mode,
const int itermax,
const T tol,
const int numThreads) {
Matrix<T> alpha;
spalpha.toFull(alpha);
spalpha.clear();
ist(X,D,alpha,lambda,mode,itermax,tol,numThreads);
alpha.toSparse(spalpha);
}
template <typename T>
void ist(const Matrix<T>& X, const Matrix<T>& D,
Matrix<T>& alpha, T lambda, constraint_type mode,
const int itermax,
const T tol, const int numThreads) {
if (mode == L1COEFFS) {
std::cerr << "Mode not implemented" << std::endl;
return;
}
int K=D.n();
int M=X.n();
alpha.resize(K,M);
if (!D.isNormalized()) {
cerr << "Current implementation of IST does not support non-normalized dictionaries" << endl;
return;
}
/// compute the Gram Matrix G=D'D
//CachedProdMatrix<T> G(D, K < 20000 && M*K/10 > K);
//ProdMatrix<T> G(D, K < 20000 && M*K/10 > K);
Matrix<T> G;
D.XtX(G);
// for (int i = 0; i<K; ++i) G[i*K+i] += 1e-6;
G.addDiag(1e-12);
ProdMatrix<T> DtX(D,X,false);
int NUM_THREADS=init_omp(numThreads);
Vector<T>* DtRT= new Vector<T>[NUM_THREADS];
SpVector<T>* spAlphaT= new SpVector<T>[NUM_THREADS];
for (int i = 0; i<NUM_THREADS; ++i) {
DtRT[i].resize(K);
spAlphaT[i].resize(K);
};
int i;
#pragma omp parallel for private(i)
for (i = 0; i< M; ++i) {
#ifdef _OPENMP
int numT=omp_get_thread_num();
#else
int numT=0;
#endif
Vector<T> coeffs;
alpha.refCol(i,coeffs);
Vector<T>& DtR=DtRT[numT];
SpVector<T>& spAlpha=spAlphaT[numT];
T norm1 = coeffs.asum();
// Compute DtR
DtX.copyCol(i,DtR);
Vector<T> Xi;
X.refCol(i,Xi);
T normX2 = Xi.nrm2sq();
if (norm1 > EPSILON) {
coeffs.toSparse(spAlpha);
G.mult(spAlpha,DtR,-1.0,1.0);
}
if (mode == PENALTY) {
coreIST(G,DtR,coeffs,lambda,itermax,tol);
} else {
coreISTconstrained(G,DtR,coeffs,normX2,lambda,itermax,tol);
}
}
delete[](DtRT);
delete[](spAlphaT);
}
/*template <typename T>
inline void generalCD(const AbstractMatrix<T>& G, Vector<T>& DtRv, Vector<T>& coeffsv,
const T lambda, const int itermax, const T tol) {
Vector<T> diag;
G.diag(diag);
const int K = G.n();
T* const coeffs = coeffsv.rawX();
T* const DtR = DtRv.rawX();
for (int iter=0; iter < itermax; ++iter) {
if (iter % 5 == 0) {
T eps1=DtRv.fmaxval()/lambda-1;
if (eps1 <= tol) {
T eps2=1e10;
for (int jj=0; jj<K; ++jj) {
if (coeffs[jj] > 0) {
eps2=MIN(DtR[jj],eps2);
} else if (coeffs[jj] < 0) {
eps2=MIN(-DtR[jj],eps2);
}
}
eps2=-(eps2/lambda-1);
if (eps2 <= tol)
break;
}
}
for (int j = 0; j <K; ++j) {
T crit=DtR[j]+coeffs[j]*diag[j];
if (crit > lambda) {
T diff=coeffs[j];
coeffs[j]=(crit-lambda)/diag[j];
diff-=coeffs[j];
G.add_rawCol(j,DtR,diff);
} else if (crit < -lambda) {
T diff=coeffs[j];
coeffs[j]=(crit+lambda)/diag[j];
diff-=coeffs[j];
G.add_rawCol(j,DtR,diff);
} else if (coeffs[j]) {
G.add_rawCol(j,DtR,coeffs[j]);
coeffs[j]=T();
}
}
}
}*/
template <typename T>
inline void coreIST(const AbstractMatrix<T>& G, Vector<T>& DtRv, Vector<T>& coeffsv,
const T thrs, const int itermax,
const T tol) {
const int K = G.n();
T* const coeffs = coeffsv.rawX();
T* const DtR = DtRv.rawX();
// T* const prG = G.rawX();
const T lambda_init=thrs;
T maxDtR = DtRv.fmaxval();
T norm1=coeffsv.asum();
T lambda=lambda_init;
vAdd(K,DtR,coeffs,DtR);
for (int iter=0; iter < itermax; ++iter) {
for (int j = 0; j <K; ++j) {
if (DtR[j] > lambda) {
T diff=coeffs[j];
coeffs[j]=DtR[j]-lambda;
diff-=coeffs[j];
DtR[j]-=diff;
G.add_rawCol(j,DtR,diff);
//cblas_axpy(K,diff,prG+j*K,1,DtR,1);
} else if (DtR[j] < -lambda) {
T diff=coeffs[j];
coeffs[j]=DtR[j]+lambda;
diff-=coeffs[j];
DtR[j]-=diff;
G.add_rawCol(j,DtR,diff);
//cblas_axpy(K,diff,prG+j*K,1,DtR,1);
} else if (coeffs[j]) {
T diff=coeffs[j];
coeffs[j]=T();
DtR[j]-=diff;
G.add_rawCol(j,DtR,diff);
//cblas_axpy(K,diff,prG+j*K,1,DtR,1);
}
}
if (iter % 5 == 1) {
vSub(K,DtR,coeffs,DtR);
maxDtR = DtRv.fmaxval();
norm1 =T();
T DtRa = T();
for (int j = 0; j<K; ++j) {
if (coeffs[j]) {
norm1 += abs(coeffs[j]);
DtRa += DtR[j]*coeffs[j];
}
}
vAdd(K,DtR,coeffs,DtR);
const T kappa = -DtRa+norm1*maxDtR;
if (abs(lambda - maxDtR) < tol && kappa <= tol)
break;
}
}
}
template <typename T>
inline void coreISTW(const Matrix<T>& G, Vector<T>& DtRv, Vector<T>& coeffsv,const Vector<T>& weightsv,
const T lambda, const int itermax,
const T tol) {
T opt=0;
const int K = G.n();
T* const coeffs = coeffsv.rawX();
T* const weights = weightsv.rawX();
T* const DtR = DtRv.rawX();
// T* const prG = G.rawX();
for (int iter=0; iter < itermax; ++iter) {
for (int j = 0; j <K; ++j) {
const T nrm = G(j,j);
const T u = DtR[j]/nrm+coeffs[j];
const T thrs = lambda*weights[j]/nrm;
if (u > thrs) {
T diff=coeffs[j];
coeffs[j]=u-thrs;
diff-=coeffs[j];
G.add_rawCol(j,DtR,diff);
//cblas_axpy(K,diff,prG+j*K,1,DtR,1);
} else if (u < -thrs) {
T diff=coeffs[j];
coeffs[j]=u+thrs;
diff-=coeffs[j];
G.add_rawCol(j,DtR,diff);
//cblas_axpy(K,diff,prG+j*K,1,DtR,1);
} else if (coeffs[j]) {
G.add_rawCol(j,DtR,coeffs[j]);
coeffs[j]=0;
//cblas_axpy(K,diff,prG+j*K,1,DtR,1);
}
}
if (iter % 10 == 0) {
opt=0;
for (int j = 0; j <K; ++j) {
if (coeffs[j] > 0) {
opt=MAX(opt,abs<T>(T(1.0)-DtR[j]/(weights[j]*lambda)));
} else if (coeffs[j] < 0) {
opt=MAX(opt,abs<T>(T(1.0)+DtR[j]/(lambda*weights[j])));
} else {
opt=MAX(opt,abs<T>(DtR[j]/(lambda*weights[j]))-T(1.0));
}
}
if (opt < tol) break;
}
}
}
/*template <typename T>
inline void coreIST_unnormalized(const AbstractMatrix<T>& G, Vector<T>& DtRv, Vector<T>& coeffsv,
const T thrs, const int itermax,
const T tol) {
const int K = G.n();
T* const coeffs = coeffsv.rawX();
T* const DtR = DtRv.rawX();
// T* const prG = G.rawX();
const T lambda_init=thrs;
T maxDtR = DtRv.fmaxval();
T norm1=coeffsv.asum();
T lambda=lambda_init;
DtRv.add(coeffsv);
// vAdd(K,DtR,coeffs,DtR);
for (int iter=0; iter < itermax; ++iter) {
for (int j = 0; j <K; ++j) {
if (DtR[j] > lambda) {
T diff=coeffs[j];
coeffs[j]=DtR[j]-lambda;
diff-=coeffs[j];
DtR[j]-=diff;
G.add_rawCol(j,DtR,diff);
} else if (DtR[j] < -lambda) {
T diff=coeffs[j];
coeffs[j]=DtR[j]+lambda;
diff-=coeffs[j];
DtR[j]-=diff;
G.add_rawCol(j,DtR,diff);
} else if (coeffs[j]) {
T diff=coeffs[j];
coeffs[j]=T();
DtR[j]-=diff;
G.add_rawCol(j,DtR,diff);
}
}
if (iter % 5 == 1) {
vSub(K,DtR,coeffs,DtR);
maxDtR = DtRv.fmaxval();
norm1 =T();
T DtRa = T();
for (int j = 0; j<K; ++j) {
if (coeffs[j]) {
norm1 += abs(coeffs[j]);
DtRa += DtR[j]*coeffs[j];
}
}
DtRv.add(coeffs);
const T kappa = -DtRa+norm1*maxDtR;
if (abs(lambda - maxDtR) < tol && kappa <= tol)
break;
}
}
}*/
/// coreIST constrained
template <typename T>
void coreISTconstrained(const AbstractMatrix<T>& G, Vector<T>& DtRv, Vector<T>&
coeffsv, const T normX2, const T eps, const int itermax, const T tol) {
const int K = G.n();
T* const coeffs = coeffsv.rawX();
T* const DtR = DtRv.rawX();
// T* const prG = G.rawX();
T err = normX2;
T norm1 = coeffsv.asum();
if (!norm1 && err <= eps) return;
T current_tol = 10.0*tol;
T maxDtR = DtRv.fmaxval();
T lambda = maxDtR;
T lambdasq= lambda*lambda;
if (!norm1) {
lambdasq *= eps/err;
lambda=sqrt(lambdasq);
}
Vector<int> indices(K);
indices.set(-1);
int* const pr_indices=indices.rawX();
int count;
for (int iter=0; iter < itermax; ++iter) {
count=0;
T old_err = err;
for (int j = 0; j <K; ++j) {
// Soft-thresholding
T old_coeff = coeffs[j];
T diff = DtR[j]+old_coeff;
if (diff > lambda) {
coeffs[j] = diff - lambda;
err+=lambdasq-DtR[j]*DtR[j];
pr_indices[count++]=j;
} else if (diff < - lambda) {
coeffs[j] = diff + lambda;
err+=lambdasq-DtR[j]*DtR[j];
pr_indices[count++]=j;
} else {
coeffs[j]=T();
if (old_coeff) {
err+=diff*diff-DtR[j]*DtR[j];
}
}
// Update DtR
diff = old_coeff-coeffs[j];
if (diff) {
G.add_rawCol(j,DtR,diff);
//cblas_axpy<T>(K,old_coeff-coeffs[j],prG+j*K,1,DtR,1);
}
}
maxDtR = DtRv.fmaxval();
norm1 =T();
T DtRa = T();
for (int j = 0; j<count; ++j) {
const int ind = pr_indices[j];
norm1 += abs(coeffs[ind]);
DtRa += DtR[ind]*coeffs[ind];
}
if (norm1-DtRa/maxDtR <= current_tol) {
const bool change = ((old_err > eps) && err < eps+current_tol) ||
(old_err < eps && err > eps-current_tol);
if (change) {
if (current_tol == tol) {
break;
} else {
current_tol = MAX(current_tol*0.5,tol);
}
}
lambdasq *= eps/err;
lambda=sqrt(lambdasq);
}
}
};
/// ist for group Lasso
template <typename T>
void ist_groupLasso(const Matrix<T>* XT, const Matrix<T>& D,
Matrix<T>* alphaT, const int Ngroups,
const T lambda, const constraint_type mode,
const int itermax,
const T tol, const int numThreads) {
int K=D.n();
int n = D.m();
if (!D.isNormalized()) {
cerr << "Current implementation of block coordinate descent does not support non-normalized dictionaries" << endl;
return;
}
if (mode == L1COEFFS) {
std::cerr << "Mode not implemented" << std::endl;
return;
}
/// compute the Gram Matrix G=D'D
Matrix<T> G;
D.XtX(G);
int NUM_THREADS=init_omp(numThreads);
Matrix<T>* RtDT = new Matrix<T>[NUM_THREADS];
Matrix<T>* alphatT = new Matrix<T>[NUM_THREADS];
int i;
#pragma omp parallel for private(i)
for (i = 0; i< Ngroups; ++i) {
#ifdef _OPENMP
int numT=omp_get_thread_num();
#else
int numT=0;
#endif
const Matrix<T>& X = XT[i];
int M = X.n();
Matrix<T>& alphat = alphatT[numT];
alphaT[i].transpose(alphat);
Matrix<T>& RtD = RtDT[numT];
X.mult(D,RtD,true,false);
Vector<T> col, col2;
T norm1 = alphat.asum();
T normX2 = 0;
if (!norm1) {
Vector<T> DtR_mean(K);
Vector<T> coeffs_mean(K);
coeffs_mean.setZeros();
RtD.meanRow(DtR_mean);
coeffs_mean.setZeros();
if (mode == PENALTY) {
coreIST(G,DtR_mean,coeffs_mean,lambda/T(2.0),itermax,tol);
} else {
Vector<T> meanVec(n);
X.meanCol(meanVec);
normX2=meanVec.nrm2sq();
coreISTconstrained(G,DtR_mean,coeffs_mean,normX2,
lambda,itermax,tol);
SpVector<T> spalpha(K);
normX2-=computeError(normX2,G,DtR_mean,coeffs_mean,spalpha);
normX2=X.normFsq()-M*normX2;
}
alphat.fillRow(coeffs_mean);
}
if (M > 1) {
for (int j = 0; j<K; ++j) {
alphat.refCol(j,col);
const T nrm=col.nrm2sq();
if (nrm) {
G.refCol(j,col2);
RtD.rank1Update(col,col2,T(-1.0));
}
}
if (mode == PENALTY) {
coreGroupIST(G,RtD,alphat,sqr<T>(M)*lambda/T(2.0),itermax,sqr<T>(M)*tol);
} else {
coreGroupISTConstrained(G,RtD,alphat,normX2,M*lambda,itermax,sqr<T>(M)*tol);
}
}
alphat.transpose(alphaT[i]);
}
delete[](RtDT);
delete[](alphatT);
};
template <typename T>
void coreGroupIST(const Matrix<T>& G, Matrix<T>& RtDm,
Matrix<T>& coeffsm,
const T thrs,
const int itermax,
const T tol) {
const int K = G.n();
const int M = RtDm.m();
T* const prG = G.rawX();
T* const RtD = RtDm.rawX();
T* const coeffs = coeffsm.rawX();
const T lambda_init=thrs;
T lambda=lambda_init;
Vector<T> old_coeffv(M);
T* const old_coeff = old_coeffv.rawX();
Vector<T> normsv(K);
T* const norms = normsv.rawX();
coeffsm.norm_2_cols(normsv);
Vector<T> normRtDv(K);
Vector<int> activatev(K);
activatev.set(3);
int* const activate=activatev.rawX();
for (int iter=0; iter < itermax; ++iter) {
for (int j = 0; j <K; ++j) {
if (activate[j] >= 0) {
if (norms[j]) {
cblas_copy(M,coeffs+j*M,1,old_coeff,1);
vAdd(M,coeffs+j*M,RtD+j*M,coeffs+j*M);
const T nrm = cblas_nrm2(M,coeffs+j*M,1);
if (nrm > lambda) {
norms[j]=nrm-lambda;
cblas_scal(M,norms[j]/nrm,coeffs+j*M,1);
vSub(M,old_coeff,coeffs+j*M,old_coeff);
cblas_ger(CblasColMajor,M,K,T(1.0),old_coeff,1,prG+j*K,1,RtD,M);
activate[j]=5;
} else {
memset(coeffs+j*M,0,M*sizeof(T));
norms[j]=T();
cblas_ger(CblasColMajor,M,K,T(1.0),old_coeff,1,prG+j*K,1,RtD,M);
--activate[j];
}
} else {
cblas_copy(M,RtD+j*M,1,old_coeff,1);
const T nrm = cblas_nrm2(M,old_coeff,1);
if (nrm > lambda) {
norms[j]=nrm-lambda;
cblas_copy(M,old_coeff,1,coeffs+j*M,1);
cblas_scal(M,norms[j]/nrm,coeffs+j*M,1);
cblas_ger(CblasColMajor,M,K,T(-1.0),coeffs+j*M,1,prG+j*K,1,RtD,M);
activate[j]=5;
} else {
activate[j] = (activate[j] == 0) ? -10 : activate[j]-1;
}
}
} else {
++activate[j];
}
}
if (iter % 5 == 4) {
T norm1=normsv.asum();
RtDm.norm_2sq_cols(normRtDv);
T maxDtR = sqr(normRtDv.maxval());
T DtRa=T();
for (int j = 0; j<K; ++j) {
if (norms[j]) {
DtRa += cblas_dot(M,coeffs+j*M,1,RtD+j*M,1);
}
}
if ((maxDtR - lambda) < (tol*maxDtR/norm1) && norm1-DtRa/maxDtR < tol) break;
}
}
};
/// Auxiliary function for ist_groupLasso
template <typename T>
void coreGroupISTConstrained(const Matrix<T>& G, Matrix<T>& RtDm,
Matrix<T>& coeffsm, const T normR,
const T eps,
const int itermax,
const T tol) {
const int K = G.n();
const int M = RtDm.m();
T* const prG = G.rawX();
T* const RtD = RtDm.rawX();
T* const coeffs = coeffsm.rawX();
T err = normR;
Vector<T> old_coeffv(M);
T* const old_coeff = old_coeffv.rawX();
Vector<T> normsv(K);
T* const norms = normsv.rawX();
coeffsm.norm_2_cols(normsv);
Vector<T> normRtDv(K);
RtDm.norm_2sq_cols(normRtDv);
Vector<int> activatev(K);
activatev.set(3);
int* const activate=activatev.rawX();
T norm1 = normsv.sum();
if (!norm1 && err <= eps) return;
T current_tol = 10.0*tol;
T maxDtR = sqr(normRtDv.maxval());
T lambda = maxDtR;
T lambdasq= lambda*lambda;
if (!norm1) {
lambdasq *= eps/err;
lambda=sqrt(lambdasq);
}
for (int iter=0; iter < itermax; ++iter) {
T old_err = err;
for (int j = 0; j <K; ++j) {
if (activate[j] >= 0) {
if (norms[j]) {
cblas_copy(M,coeffs+j*M,1,old_coeff,1);
vAdd(M,coeffs+j*M,RtD+j*M,coeffs+j*M);
const T nrm = cblas_nrm2(M,coeffs+j*M,1);
if (nrm > lambda) {
norms[j]=nrm-lambda;
cblas_scal(M,norms[j]/nrm,coeffs+j*M,1);
vSub(M,old_coeff,coeffs+j*M,old_coeff);
err += cblas_dot(M,old_coeff,1,old_coeff,1)
+2*cblas_dot(M,old_coeff,1,RtD+j*M,1);
cblas_ger(CblasColMajor,M,K,T(1.0),old_coeff,1,prG+j*K,1,RtD,M);
activate[j]=3;
} else {
memset(coeffs+j*M,0,M*sizeof(T));
norms[j]=T();
err += cblas_dot(M,old_coeff,1,old_coeff,1)
+2*cblas_dot(M,old_coeff,1,RtD+j*M,1);
cblas_ger(CblasColMajor,M,K,T(1.0),old_coeff,1,prG+j*K,1,RtD,M);
--activate[j];
}
} else {
cblas_copy(M,RtD+j*M,1,old_coeff,1);
const T nrm = cblas_nrm2(M,old_coeff,1);
if (nrm > lambda) {
norms[j]=nrm-lambda;
cblas_copy(M,old_coeff,1,coeffs+j*M,1);
cblas_scal(M,norms[j]/nrm,coeffs+j*M,1);
err += cblas_dot(M,coeffs+j*M,1,coeffs+j*M,1)
-2*cblas_dot(M,coeffs+j*M,1,RtD+j*M,1);
cblas_ger(CblasColMajor,M,K,T(-1.0),coeffs+j*M,1,prG+j*K,1,RtD,M);
activate[j]=3;
} else {
activate[j] = (activate[j] == 0) ? -3 : activate[j]-1;
}
}
} else {
++activate[j];
}
}
norm1 = normsv.sum();
RtDm.norm_2sq_cols(normRtDv);
maxDtR = sqr(normRtDv.maxval());
T DtRa=T();
for (int j = 0; j<K; ++j) {
if (norms[j]) {
DtRa += cblas_dot(M,coeffs+j*M,1,RtD+j*M,1);
}
}
if (norm1-DtRa/maxDtR <= current_tol) {
const T tol_bis=current_tol*maxDtR;
const bool change = ((old_err > eps) && err < eps+tol_bis) ||
(old_err < eps && err > eps-tol_bis);
if (change) {
if (current_tol == tol) {
break;
} else {
current_tol = MAX(current_tol*0.5,tol);
}
}
lambdasq *= eps/err;
lambda=sqrt(lambdasq);
}
}
};
/// auxiliary function for ist_groupLasso
template <typename T>
T computeError(const T normX2,const Vector<T>& norms,
const Matrix<T>& G,const Matrix<T>& RtD,const Matrix<T>& alphat) {
T err2 = normX2;
Vector<T> col,col2;
for (int j = 0; j<G.n(); ++j) {
if (norms[j] > EPSILON) {
alphat.refCol(j,col);
RtD.refCol(j,col2);
err2 -= 2*col.dot(col2);
T add = 0.0;
for (int k = 0; k<j; ++k) {
if (norms[k] > EPSILON) {
alphat.refCol(k,col2);
add -= G(j,k)*col.dot(col2);
}
}
add += add - G(j,j)*col.nrm2sq();
err2 += add;
}
}
return err2;
}
/// auxiliary function for
template <typename T>
T computeError(const T normX2,
const Matrix<T>& G,const Vector<T>& DtR,const Vector<T>& coeffs,
SpVector<T>& spAlpha) {
coeffs.toSparse(spAlpha);
return normX2 -G.quad(spAlpha)-2*DtR.dot(spAlpha);
};
/* ******************
* Simultaneous OMP
* *****************/
template <typename T>
void somp(const Matrix<T>* X, const Matrix<T>& D, SpMatrix<T>* spalpha,
const int Ngroups, const int L, const T eps,const int numThreads) {
somp(X,D,spalpha,Ngroups,L,&eps,false,numThreads);
}
template <typename T>
void somp(const Matrix<T>* XT, const Matrix<T>& D, SpMatrix<T>* spalphaT,
const int Ngroups, const int LL, const T* eps, const bool adapt,
const int numThreads) {
if (LL <= 0) return;
const INTM K = D.n();
const INTM L = MIN(D.m(),MIN(LL,K));
if (!D.isNormalized()) {
cerr << "Current implementation of OMP does not support non-normalized dictionaries" << endl;
return;
}
/// compute the Gram Matrix G=D'D
Matrix<T> G;
D.XtX(G);
init_omp(numThreads);
int i;
#pragma omp parallel for private(i)
for (i = 0; i< Ngroups; ++i) {
const Matrix<T>& X = XT[i];
const INTM M = X.n();
SpMatrix<T>& spalpha = spalphaT[i];
spalpha.clear();
Vector<INTM> rv;
Matrix<T> vM;
T thrs = adapt ? eps[i] : M*(*eps);
coreSOMP(X,D,G,vM,rv,L,thrs);
spalpha.convert2(vM,rv,K);
}
}
template <typename T>
void coreSOMP(const Matrix<T>& X, const Matrix<T>& D, const Matrix<T>& G,
Matrix<T>& v,
Vector<INTM>& r, const int L, const T eps) {
const int K = G.n();
const int n = D.m();
const int M = X.n();
const bool big_mode = M*K*(n+L) > 2*(M*n*n+K*n*(n+L));
r.resize(L);
r.set(-1);
v.resize(0,X.n());
if (M == 1) {
Vector<T> scores(K);
Vector<T> norm(K);
Vector<T> tmp(K);
Matrix<T> Un(L,L);
Un.setZeros();
Matrix<T> Undn(K,L);
Matrix<T> Unds(L,L);
Matrix<T> Gs(K,L);
Vector<T> Rdn(K);
Vector<T> Xt(X.rawX(),n);
D.multTrans(Xt,Rdn);
Vector<T> RUn(L);
T normX = Xt.nrm2sq();
T lambda=0;
coreORMP(scores,norm,tmp,Un,Undn,Unds,Gs,Rdn,G,r,RUn,normX,&eps,&L,&lambda);
int count=0;
for (int i = 0; i<L; ++i) {
if (r[i] == -1) break;
++count;
}
v.resize(count,X.n());
Vector<T> v1(v.rawX(),count);
Vector<T> v2(RUn.rawX(),count);
v1.copy(v2);
return;
}
Matrix<T> XXtD;
Matrix<T> XtD;
T E;
if (big_mode) {
Matrix<T> XXt;
X.XXt(XXt);
E = XXt.trace();
if (E < eps) return;
XXt.mult(D,XXtD);
} else {
E=X.normFsq();
if (E < eps) return;
X.mult(D,XtD,true);
}
Matrix<T> A(K,L);
A.setZeros();
Matrix<T> B(L,K);
B.setZeros();
Matrix<T> S(L,L);
S.setZeros();
Matrix<T> Fs(K,L);
Fs.setZeros();
Matrix<T> Gs(K,L);
Gs.setZeros();
Matrix<T> As(L,L);
As.setZeros();
Vector<T> tmp(K);
Vector<T> e(K);
G.diag(e);
Vector<T> f(K);
if (big_mode) {
for (int i = 0; i<K; ++i) {
Vector<T> di;
D.refCol(i,di);
Vector<T> di2;
XXtD.refCol(i,di2);
f[i]=di.dot(di2);
}
} else {
XtD.norm_2sq_cols(f);
}
Vector<T> c(L);
c.setZeros();
Vector<T> scores(K);
/// permit unsafe fast low level accesses
T* const prAs = As.rawX();
T* const prA = A.rawX();
T* const prS = S.rawX();
T* const prGs = Gs.rawX();
T* const prFs = Fs.rawX();
T* const prB = B.rawX();
T* const pr_c = c.rawX();
T* const pr_tmp = tmp.rawX();
int j;
for (j = 0; j<L; ++j) {
scores.copy(f);
scores.div(e);
for (int k = 0; k<j; ++k) scores[r[k]]=-1.0;
const int currentInd = scores.max();
const T invNorm=T(1.0)/sqrt(e[currentInd]);
if (invNorm > 1e3) {
j=j-1;
break;
}
r[j]=currentInd;
E -= scores[currentInd];
for (int k = 0; k<j; ++k) prS[j*L+k]=T();
prS[j*L+j]=T(1.0);
for (int k = 0; k<j; ++k) prAs[k*L+j]=prA[k*K+currentInd];
/// Cholesky update with partial reorthogonalization
int iter = invNorm > 1.41 ? 2 : 1;
for (int k = 0; k<iter; ++k) {
for (int l = 0; l<j; ++l) {
T scal = -cblas_dot<T>(j-l+1,prAs+l*L+l,1,prS+j*L+l,1);
cblas_axpy<T>(l+1,scal,prS+l*L,1,prS+j*L,1);
}
}
cblas_scal<T>(j+1,invNorm,prS+j*L,1);
if (j == L-1 || E <= eps) {
++j;
break;
}
/// Update e,f,scores,A,B,As,Bs,Fs,Gs,S,c
/// Gs,S,A,As, e, Fs, B,c
Vector<T> Gsj;
Gs.refCol(j,Gsj);
G.copyCol(currentInd,Gsj);
cblas_gemv<T>(CblasColMajor,CblasNoTrans,K,j+1,T(1.0),prGs,K,prS+j*L,1,
T(0.0),prA+j*K,1);
prAs[j*L+j]=prA[j*K+currentInd];
Vector<T> Aj;
A.refCol(j,Aj);
tmp.sqr(Aj);
e.sub(tmp);
Vector<T> Fsj;
Fs.refCol(j,Fsj);
if (big_mode) {
Vector<T> di;
D.refCol(currentInd,di);
XXtD.multTrans(di,Fsj);
} else {
Vector<T> di;
XtD.refCol(currentInd,di);
XtD.multTrans(di,Fsj);
}
cblas_gemv<T>(CblasColMajor,CblasNoTrans,K,j+1,T(1.0),prFs,K,prS+j*L,1,
T(0.0),prB+j,L);
for (int k = 0; k<j;++k) pr_c[k]=T();
for (int k = 0; k<=j;++k)
cblas_axpy<T>(j,prS[j*L+k],prB+r[k]*L,1,pr_c,1);
f.add(tmp,f[currentInd]*invNorm*invNorm);
if (j > 0) {
cblas_gemv<T>(CblasColMajor,CblasNoTrans,K,j,T(1.0),prA,K,pr_c,1,
T(0.0),pr_tmp,1);
} else {
tmp.setZeros();
}
cblas_axpy<T>(K,T(-1.0),prB+j,L,pr_tmp,1);
tmp.mult(tmp,Aj);
f.add(tmp,T(2.0));
}
A.clear();
B.clear();
Fs.clear();
Gs.clear();
As.clear();
if (j == 0) return;
Matrix<T> SSt;
S.upperTriXXt(SSt,j);
Matrix<T> Dg(n,j);
for (int i = 0; i<j;++i) {
Vector<T> Dgi;
Dg.refCol(i,Dgi);
D.copyCol(r[i],Dgi);
}
Matrix<T> SStDt;
SSt.mult(Dg,SStDt,false,true);
SStDt.mult(X,v);
};
#endif // DECOMP_H
|
heated_plate_openmp.c | # include <stdlib.h>
# include <stdio.h>
# include <math.h>
# include <omp.h>
int main ( int argc, char *argv[] );
/******************************************************************************/
#ifdef _CIVL
$input int M=5; // originally 500
$input int N=5; // originally 500
$input double EPSILON=0.1; // originally 0.001
#else
#define M 500
#define N 500
#define EPSILON 0.001
#endif
int main ( int argc, char *argv[] )
/******************************************************************************/
/*
Purpose:
MAIN is the main program for HEATED_PLATE_OPENMP.
Discussion:
This code solves the steady state heat equation on a rectangular region.
The sequential version of this program needs approximately
18/epsilon iterations to complete.
The physical region, and the boundary conditions, are suggested
by this diagram;
W = 0
+------------------+
| |
W = 100 | | W = 100
| |
+------------------+
W = 100
The region is covered with a grid of M by N nodes, and an N by N
array W is used to record the temperature. The correspondence between
array indices and locations in the region is suggested by giving the
indices of the four corners:
I = 0
[0][0]-------------[0][N-1]
| |
J = 0 | | J = N-1
| |
[M-1][0]-----------[M-1][N-1]
I = M-1
The steady state solution to the discrete heat equation satisfies the
following condition at an interior grid point:
W[Central] = (1/4) * ( W[North] + W[South] + W[East] + W[West] )
where "Central" is the index of the grid point, "North" is the index
of its immediate neighbor to the "north", and so on.
Given an approximate solution of the steady state heat equation, a
"better" solution is given by replacing each interior point by the
average of its 4 neighbors - in other words, by using the condition
as an ASSIGNMENT statement:
W[Central] <= (1/4) * ( W[North] + W[South] + W[East] + W[West] )
If this process is repeated often enough, the difference between successive
estimates of the solution will go to zero.
This program carries out such an iteration, using a tolerance specified by
the user, and writes the final estimate of the solution to a file that can
be used for graphic processing.
Licensing:
This code is distributed under the GNU LGPL license.
Modified:
18 October 2011
Author:
Original C version by Michael Quinn.
This C version by John Burkardt.
Reference:
Michael Quinn,
Parallel Programming in C with MPI and OpenMP,
McGraw-Hill, 2004,
ISBN13: 978-0071232654,
LC: QA76.73.C15.Q55.
Local parameters:
Local, double DIFF, the norm of the change in the solution from one iteration
to the next.
Local, double MEAN, the average of the boundary values, used to initialize
the values of the solution in the interior.
Local, double U[M][N], the solution at the previous iteration.
Local, double W[M][N], the solution computed at the latest iteration.
*/
{
double diff;
double epsilon = EPSILON;
int i;
int iterations;
int iterations_print;
int j;
double mean;
double my_diff;
double u[M][N];
double w[M][N];
double wtime;
printf ( "\n" );
printf ( "HEATED_PLATE_OPENMP\n" );
printf ( " C/OpenMP version\n" );
printf ( " A program to solve for the steady state temperature distribution\n" );
printf ( " over a rectangular plate.\n" );
printf ( "\n" );
printf ( " Spatial grid of %d by %d points.\n", M, N );
printf ( " The iteration will be repeated until the change is <= %e\n", epsilon );
printf ( " Number of processors available = %d\n", omp_get_num_procs ( ) );
printf ( " Number of threads = %d\n", omp_get_max_threads ( ) );
/*
Set the boundary values, which don't change.
*/
mean = 0.0;
#pragma omp parallel shared ( w ) private ( i, j )
{
#pragma omp for
for ( i = 1; i < M - 1; i++ )
{
w[i][0] = 100.0;
}
#pragma omp for
for ( i = 1; i < M - 1; i++ )
{
w[i][N-1] = 100.0;
}
#pragma omp for
for ( j = 0; j < N; j++ )
{
w[M-1][j] = 100.0;
}
#pragma omp for
for ( j = 0; j < N; j++ )
{
w[0][j] = 0.0;
}
/*
Average the boundary values, to come up with a reasonable
initial value for the interior.
*/
#pragma omp for reduction ( + : mean )
for ( i = 1; i < M - 1; i++ )
{
mean = mean + w[i][0] + w[i][N-1];
}
#pragma omp for reduction ( + : mean )
for ( j = 0; j < N; j++ )
{
mean = mean + w[M-1][j] + w[0][j];
}
}
/*
OpenMP note:
You cannot normalize MEAN inside the parallel region. It
only gets its correct value once you leave the parallel region.
So we interrupt the parallel region, set MEAN, and go back in.
*/
mean = mean / ( double ) ( 2 * M + 2 * N - 4 );
printf ( "\n" );
printf ( " MEAN = %f\n", mean );
/*
Initialize the interior solution to the mean value.
*/
#pragma omp parallel shared ( mean, w ) private ( i, j )
{
#pragma omp for
for ( i = 1; i < M - 1; i++ )
{
for ( j = 1; j < N - 1; j++ )
{
w[i][j] = mean;
}
}
}
/*
iterate until the new solution W differs from the old solution U
by no more than EPSILON.
*/
iterations = 0;
iterations_print = 1;
printf ( "\n" );
printf ( " Iteration Change\n" );
printf ( "\n" );
wtime = omp_get_wtime ( );
diff = epsilon;
while ( epsilon <= diff )
{
# pragma omp parallel shared ( u, w ) private ( i, j )
{
/*
Save the old solution in U.
*/
# pragma omp for
for ( i = 0; i < M; i++ )
{
for ( j = 0; j < N; j++ )
{
u[i][j] = w[i][j];
}
}
/*
Determine the new estimate of the solution at the interior points.
The new solution W is the average of north, south, east and west neighbors.
*/
# pragma omp for
for ( i = 1; i < M - 1; i++ )
{
for ( j = 1; j < N - 1; j++ )
{
w[i][j] = ( u[i-1][j] + u[i+1][j] + u[i][j-1] + u[i][j+1] ) / 4.0;
}
}
}
/*
C and C++ cannot compute a maximum as a reduction operation.
Therefore, we define a private variable MY_DIFF for each thread.
Once they have all computed their values, we use a CRITICAL section
to update DIFF.
*/
diff = 0.0;
# pragma omp parallel shared ( diff, u, w ) private ( i, j, my_diff )
{
my_diff = 0.0;
# pragma omp for
for ( i = 1; i < M - 1; i++ )
{
for ( j = 1; j < N - 1; j++ )
{
if ( my_diff < fabs ( w[i][j] - u[i][j] ) )
{
my_diff = fabs ( w[i][j] - u[i][j] );
}
}
}
# pragma omp critical
{
if ( diff < my_diff )
{
diff = my_diff;
}
}
}
iterations++;
if ( iterations == iterations_print )
{
printf ( " %8d %f\n", iterations, diff );
iterations_print = 2 * iterations_print;
}
}
wtime = omp_get_wtime ( ) - wtime;
printf ( "\n" );
printf ( " %8d %f\n", iterations, diff );
printf ( "\n" );
printf ( " Error tolerance achieved.\n" );
printf ( " Wallclock time = %f\n", wtime );
/*
Terminate.
*/
printf ( "\n" );
printf ( "HEATED_PLATE_OPENMP:\n" );
printf ( " Normal end of execution.\n" );
return 0;
# undef M
# undef N
}
|
3d7pt.lbpar.c | #include <omp.h>
#include <math.h>
#define ceild(n,d) ceil(((double)(n))/((double)(d)))
#define floord(n,d) floor(((double)(n))/((double)(d)))
#define max(x,y) ((x) > (y)? (x) : (y))
#define min(x,y) ((x) < (y)? (x) : (y))
/*
* Order-1, 3D 7 point stencil
* Adapted from PLUTO and Pochoir test bench
*
* Tareq Malas
*/
#include <stdio.h>
#include <stdlib.h>
#include <sys/time.h>
#ifdef LIKWID_PERFMON
#include <likwid.h>
#endif
#include "print_utils.h"
#define TESTS 2
#define MAX(a,b) ((a) > (b) ? a : b)
#define MIN(a,b) ((a) < (b) ? a : b)
/* Subtract the `struct timeval' values X and Y,
* storing the result in RESULT.
*
* Return 1 if the difference is negative, otherwise 0.
*/
int timeval_subtract(struct timeval *result, struct timeval *x, struct timeval *y)
{
/* Perform the carry for the later subtraction by updating y. */
if (x->tv_usec < y->tv_usec)
{
int nsec = (y->tv_usec - x->tv_usec) / 1000000 + 1;
y->tv_usec -= 1000000 * nsec;
y->tv_sec += nsec;
}
if (x->tv_usec - y->tv_usec > 1000000)
{
int nsec = (x->tv_usec - y->tv_usec) / 1000000;
y->tv_usec += 1000000 * nsec;
y->tv_sec -= nsec;
}
/* Compute the time remaining to wait.
* tv_usec is certainly positive.
*/
result->tv_sec = x->tv_sec - y->tv_sec;
result->tv_usec = x->tv_usec - y->tv_usec;
/* Return 1 if result is negative. */
return x->tv_sec < y->tv_sec;
}
int main(int argc, char *argv[])
{
int t, i, j, k, test;
int Nx, Ny, Nz, Nt;
if (argc > 3) {
Nx = atoi(argv[1])+2;
Ny = atoi(argv[2])+2;
Nz = atoi(argv[3])+2;
}
if (argc > 4)
Nt = atoi(argv[4]);
double ****A = (double ****) malloc(sizeof(double***)*2);
A[0] = (double ***) malloc(sizeof(double**)*Nz);
A[1] = (double ***) malloc(sizeof(double**)*Nz);
for(i=0; i<Nz; i++){
A[0][i] = (double**) malloc(sizeof(double*)*Ny);
A[1][i] = (double**) malloc(sizeof(double*)*Ny);
for(j=0;j<Ny;j++){
A[0][i][j] = (double*) malloc(sizeof(double)*Nx);
A[1][i][j] = (double*) malloc(sizeof(double)*Nx);
}
}
// tile size information, including extra element to decide the list length
int *tile_size = (int*) malloc(sizeof(int));
tile_size[0] = -1;
// The list is modified here before source-to-source transformations
tile_size = (int*) realloc((void *)tile_size, sizeof(int)*5);
tile_size[0] = 24;
tile_size[1] = 24;
tile_size[2] = 24;
tile_size[3] = 64;
tile_size[4] = -1;
// for timekeeping
int ts_return = -1;
struct timeval start, end, result;
double tdiff = 0.0, min_tdiff=1.e100;
const int BASE = 1024;
const double alpha = 0.0876;
const double beta = 0.0765;
// initialize variables
//
srand(42);
for (i = 1; i < Nz; i++) {
for (j = 1; j < Ny; j++) {
for (k = 1; k < Nx; k++) {
A[0][i][j][k] = 1.0 * (rand() % BASE);
}
}
}
#ifdef LIKWID_PERFMON
LIKWID_MARKER_INIT;
#pragma omp parallel
{
LIKWID_MARKER_THREADINIT;
#pragma omp barrier
LIKWID_MARKER_START("calc");
}
#endif
int num_threads = 1;
#if defined(_OPENMP)
num_threads = omp_get_max_threads();
#endif
for(test=0; test<TESTS; test++){
gettimeofday(&start, 0);
// serial execution - Addition: 6 && Multiplication: 2
/* Copyright (C) 1991-2014 Free Software Foundation, Inc.
This file is part of the GNU C Library.
The GNU C Library is free software; you can redistribute it and/or
modify it under the terms of the GNU Lesser General Public
License as published by the Free Software Foundation; either
version 2.1 of the License, or (at your option) any later version.
The GNU C Library is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
Lesser General Public License for more details.
You should have received a copy of the GNU Lesser General Public
License along with the GNU C Library; if not, see
<http://www.gnu.org/licenses/>. */
/* This header is separate from features.h so that the compiler can
include it implicitly at the start of every compilation. It must
not itself include <features.h> or any other header that includes
<features.h> because the implicit include comes before any feature
test macros that may be defined in a source file before it first
explicitly includes a system header. GCC knows the name of this
header in order to preinclude it. */
/* glibc's intent is to support the IEC 559 math functionality, real
and complex. If the GCC (4.9 and later) predefined macros
specifying compiler intent are available, use them to determine
whether the overall intent is to support these features; otherwise,
presume an older compiler has intent to support these features and
define these macros by default. */
/* wchar_t uses ISO/IEC 10646 (2nd ed., published 2011-03-15) /
Unicode 6.0. */
/* We do not support C11 <threads.h>. */
int t1, t2, t3, t4, t5, t6, t7, t8;
int lb, ub, lbp, ubp, lb2, ub2;
register int lbv, ubv;
/* Start of CLooG code */
if ((Nt >= 2) && (Nx >= 3) && (Ny >= 3) && (Nz >= 3)) {
for (t1=-1;t1<=floord(Nt-2,12);t1++) {
lbp=max(ceild(t1,2),ceild(24*t1-Nt+3,24));
ubp=min(floord(Nt+Nz-4,24),floord(12*t1+Nz+9,24));
#pragma omp parallel for private(lbv,ubv,t3,t4,t5,t6,t7,t8)
for (t2=lbp;t2<=ubp;t2++) {
for (t3=max(max(0,ceild(t1-1,2)),ceild(24*t2-Nz-20,24));t3<=min(min(min(floord(Nt+Ny-4,24),floord(12*t1+Ny+21,24)),floord(24*t2+Ny+20,24)),floord(24*t1-24*t2+Nz+Ny+19,24));t3++) {
for (t4=max(max(max(0,ceild(3*t1-15,16)),ceild(24*t2-Nz-60,64)),ceild(24*t3-Ny-60,64));t4<=min(min(min(min(floord(Nt+Nx-4,64),floord(12*t1+Nx+21,64)),floord(24*t2+Nx+20,64)),floord(24*t3+Nx+20,64)),floord(24*t1-24*t2+Nz+Nx+19,64));t4++) {
for (t5=max(max(max(max(max(0,12*t1),24*t1-24*t2+1),24*t2-Nz+2),24*t3-Ny+2),64*t4-Nx+2);t5<=min(min(min(min(min(Nt-2,12*t1+23),24*t2+22),24*t3+22),64*t4+62),24*t1-24*t2+Nz+21);t5++) {
for (t6=max(max(24*t2,t5+1),-24*t1+24*t2+2*t5-23);t6<=min(min(24*t2+23,-24*t1+24*t2+2*t5),t5+Nz-2);t6++) {
for (t7=max(24*t3,t5+1);t7<=min(24*t3+23,t5+Ny-2);t7++) {
lbv=max(64*t4,t5+1);
ubv=min(64*t4+63,t5+Nx-2);
#pragma ivdep
#pragma vector always
for (t8=lbv;t8<=ubv;t8++) {
A[( t5 + 1) % 2][ (-t5+t6)][ (-t5+t7)][ (-t5+t8)] = ((alpha * A[ t5 % 2][ (-t5+t6)][ (-t5+t7)][ (-t5+t8)]) + (beta * (((((A[ t5 % 2][ (-t5+t6) - 1][ (-t5+t7)][ (-t5+t8)] + A[ t5 % 2][ (-t5+t6)][ (-t5+t7) - 1][ (-t5+t8)]) + A[ t5 % 2][ (-t5+t6)][ (-t5+t7)][ (-t5+t8) - 1]) + A[ t5 % 2][ (-t5+t6) + 1][ (-t5+t7)][ (-t5+t8)]) + A[ t5 % 2][ (-t5+t6)][ (-t5+t7) + 1][ (-t5+t8)]) + A[ t5 % 2][ (-t5+t6)][ (-t5+t7)][ (-t5+t8) + 1])));;
}
}
}
}
}
}
}
}
}
/* End of CLooG code */
gettimeofday(&end, 0);
ts_return = timeval_subtract(&result, &end, &start);
tdiff = (double) (result.tv_sec + result.tv_usec * 1.0e-6);
min_tdiff = min(min_tdiff, tdiff);
printf("Rank 0 TEST# %d time: %f\n", test, tdiff);
}
PRINT_RESULTS(1, "constant")
#ifdef LIKWID_PERFMON
#pragma omp parallel
{
LIKWID_MARKER_STOP("calc");
}
LIKWID_MARKER_CLOSE;
#endif
// Free allocated arrays (Causing performance degradation
/* for(i=0; i<Nz; i++){
for(j=0;j<Ny;j++){
free(A[0][i][j]);
free(A[1][i][j]);
}
free(A[0][i]);
free(A[1][i]);
}
free(A[0]);
free(A[1]);
*/
return 0;
}
|
nqueens.c | /**********************************************************************************************/
/* This program is part of the Barcelona OpenMP Tasks Suite */
/* Copyright (C) 2009 Barcelona Supercomputing Center - Centro Nacional de Supercomputacion */
/* Copyright (C) 2009 Universitat Politecnica de Catalunya */
/* */
/* This program is free software; you can redistribute it and/or modify */
/* it under the terms of the GNU General Public License as published by */
/* the Free Software Foundation; either version 2 of the License, or */
/* (at your option) any later version. */
/* */
/* This program is distributed in the hope that it will be useful, */
/* but WITHOUT ANY WARRANTY; without even the implied warranty of */
/* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the */
/* GNU General Public License for more details. */
/* */
/* You should have received a copy of the GNU General Public License */
/* along with this program; if not, write to the Free Software */
/* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA */
/**********************************************************************************************/
/*
* Original code from the Cilk project (by Keith Randall)
*
* Copyright (c) 2000 Massachusetts Institute of Technology
* Copyright (c) 2000 Matteo Frigo
*/
#include <stdlib.h>
#include <stdio.h>
#include <memory.h>
#include <alloca.h>
#include "bots.h"
#include "app-desc.h"
#include <omp.h>
/* Checking information */
static int solutions[] = {
1,
0,
0,
2,
10, /* 5 */
4,
40,
92,
352,
724, /* 10 */
2680,
14200,
73712,
365596,
};
#define MAX_SOLUTIONS sizeof(solutions)/sizeof(int)
#ifdef FORCE_TIED_TASKS
int mycount=0;
#pragma omp threadprivate(mycount)
#endif
int total_count;
/*
* <a> contains array of <n> queen positions. Returns 1
* if none of the queens conflict, and returns 0 otherwise.
*/
int ok(int n, char *a)
{
int i, j;
char p, q;
for (i = 0; i < n; i++) {
p = a[i];
for (j = i + 1; j < n; j++) {
q = a[j];
if (q == p || q == p - (j - i) || q == p + (j - i))
return 0;
}
}
return 1;
}
#ifndef FORCE_TIED_TASKS
void nqueens_ser (int n, int j, char *a, int *solutions)
#else
void nqueens_ser (int n, int j, char *a)
#endif
{
#ifndef FORCE_TIED_TASKS
int res;
#endif
int i;
if (n == j) {
/* good solution, count it */
#ifndef FORCE_TIED_TASKS
*solutions = 1;
#else
mycount++;
#endif
return;
}
#ifndef FORCE_TIED_TASKS
*solutions = 0;
#endif
/* try each possible position for queen <j> */
for (i = 0; i < n; i++) {
{
/* allocate a temporary array and copy <a> into it */
a[j] = (char) i;
if (ok(j + 1, a)) {
#ifndef FORCE_TIED_TASKS
nqueens_ser(n, j + 1, a,&res);
*solutions += res;
#else
nqueens_ser(n, j + 1, a);
#endif
}
}
}
}
#if defined(IF_CUTOFF)
#ifndef FORCE_TIED_TASKS
void nqueens(int n, int j, char *a, int *solutions, int depth)
#else
void nqueens(int n, int j, char *a, int depth)
#endif
{
#ifndef FORCE_TIED_TASKS
int *csols;
#endif
int i;
if (n == j) {
/* good solution, count it */
#ifndef FORCE_TIED_TASKS
*solutions = 1;
#else
mycount++;
#endif
return;
}
#ifndef FORCE_TIED_TASKS
*solutions = 0;
csols = alloca(n*sizeof(int));
memset(csols,0,n*sizeof(int));
#endif
/* try each possible position for queen <j> */
for (i = 0; i < n; i++) {
#pragma omp task if(depth < bots_cutoff_value)
{
/* allocate a temporary array and copy <a> into it */
char * b = alloca(n * sizeof(char));
memcpy(b, a, j * sizeof(char));
b[j] = (char) i;
if (ok(j + 1, b))
#ifndef FORCE_TIED_TASKS
nqueens(n, j + 1, b,&csols[i],depth+1);
#else
nqueens(n, j + 1, b,depth+1);
#endif
}
}
#pragma omp taskwait
#ifndef FORCE_TIED_TASKS
for ( i = 0; i < n; i++) *solutions += csols[i];
#endif
}
#elif defined(FINAL_CUTOFF)
#ifndef FORCE_TIED_TASKS
void nqueens(int n, int j, char *a, int *solutions, int depth)
#else
void nqueens(int n, int j, char *a, int depth)
#endif
{
#ifndef FORCE_TIED_TASKS
int *csols;
#endif
int i;
if (n == j) {
/* good solution, count it */
#ifndef FORCE_TIED_TASKS
*solutions += 1;
#else
mycount++;
#endif
return;
}
#ifndef FORCE_TIED_TASKS
char final = omp_in_final();
if ( !final ) {
*solutions = 0;
csols = alloca(n*sizeof(int));
memset(csols,0,n*sizeof(int));
}
#endif
/* try each possible position for queen <j> */
for (i = 0; i < n; i++) {
#pragma omp task final(depth+1 >= bots_cutoff_value) mergeable
{
char *b;
int *sol;
if ( omp_in_final() && depth+1 > bots_cutoff_value ) {
b = a;
#ifndef FORCE_TIED_TASKS
sol = solutions;
#endif
} else {
/* allocate a temporary array and copy <a> into it */
b = alloca(n * sizeof(char));
memcpy(b, a, j * sizeof(char));
#ifndef FORCE_TIED_TASKS
sol = &csols[i];
#endif
}
b[j] = i;
if (ok(j + 1, b))
#ifndef FORCE_TIED_TASKS
nqueens(n, j + 1, b,sol,depth+1);
#else
nqueens(n, j + 1, b,depth+1);
#endif
}
}
#pragma omp taskwait
#ifndef FORCE_TIED_TASKS
if ( !final ) {
for ( i = 0; i < n; i++) *solutions += csols[i];
}
#endif
}
#elif defined(MANUAL_CUTOFF)
#ifndef FORCE_TIED_TASKS
void nqueens(int n, int j, char *a, int *solutions, int depth)
#else
void nqueens(int n, int j, char *a, int depth)
#endif
{
#ifndef FORCE_TIED_TASKS
int *csols;
#endif
int i;
if (n == j) {
/* good solution, count it */
#ifndef FORCE_TIED_TASKS
*solutions = 1;
#else
mycount++;
#endif
return;
}
#ifndef FORCE_TIED_TASKS
*solutions = 0;
csols = (int*)alloca(n*sizeof(int));
memset(csols,0,n*sizeof(int));
#endif
/* try each possible position for queen <j> */
for (i = 0; i < n; i++) {
if ( depth < bots_cutoff_value ) {
#pragma omp task
{
/* allocate a temporary array and copy <a> into it */
char * b = (char*)alloca(n * sizeof(char));
memcpy(b, a, j * sizeof(char));
b[j] = (char) i;
if (ok(j + 1, b))
#ifndef FORCE_TIED_TASKS
nqueens(n, j + 1, b,&csols[i],depth+1);
#else
nqueens(n, j + 1, b,depth+1);
#endif
}
} else {
a[j] = (char) i;
if (ok(j + 1, a))
#ifndef FORCE_TIED_TASKS
nqueens_ser(n, j + 1, a,&csols[i]);
#else
nqueens_ser(n, j + 1, a);
#endif
}
}
#pragma omp taskwait
#ifndef FORCE_TIED_TASKS
for ( i = 0; i < n; i++) *solutions += csols[i];
#endif
}
#else
#ifndef FORCE_TIED_TASKS
void nqueens(int n, int j, char *a, int *solutions, int depth)
#else
void nqueens(int n, int j, char *a, int depth)
#endif
{
#ifndef FORCE_TIED_TASKS
int *csols;
#endif
int i;
if (n == j) {
/* good solution, count it */
#ifndef FORCE_TIED_TASKS
*solutions = 1;
#else
mycount++;
#endif
return;
}
#ifndef FORCE_TIED_TASKS
*solutions = 0;
csols = (int*)alloca(n*sizeof(int));
memset(csols,0,n*sizeof(int));
#endif
/* try each possible position for queen <j> */
for (i = 0; i < n; i++) {
#pragma omp task
{
/* allocate a temporary array and copy <a> into it */
char * b = (char*)alloca(n * sizeof(char));
memcpy(b, a, j * sizeof(char));
b[j] = (char) i;
if (ok(j + 1, b))
#ifndef FORCE_TIED_TASKS
nqueens(n, j + 1, b,&csols[i],depth); //FIXME: depth or depth+1 ???
#else
nqueens(n, j + 1, b,depth); //FIXME: see above
#endif
}
}
#pragma omp taskwait
#ifndef FORCE_TIED_TASKS
for ( i = 0; i < n; i++) *solutions += csols[i];
#endif
}
#endif
void find_queens (int size)
{
total_count=0;
bots_message("Computing N-Queens algorithm (n=%d) ", size);
#pragma omp parallel
{
#pragma omp single
{
char *a;
a = (char*)alloca(size * sizeof(char));
#ifndef FORCE_TIED_TASKS
nqueens(size, 0, a, &total_count,0);
#else
nqueens(size, 0, a, 0);
#endif
}
#ifdef FORCE_TIED_TASKS
#pragma omp atomic
total_count += mycount;
#endif
}
bots_message(" completed!\n");
}
int verify_queens (int size)
{
if ( size > MAX_SOLUTIONS ) return BOTS_RESULT_NA;
if ( total_count == solutions[size-1]) return BOTS_RESULT_SUCCESSFUL;
return BOTS_RESULT_UNSUCCESSFUL;
}
|
core_dlag2s.c | /**
*
* @file
*
* PLASMA is a software package provided by:
* University of Tennessee, US,
* University of Manchester, UK.
*
* @generated from /home/luszczek/workspace/plasma/bitbucket/plasma/core_blas/core_zlag2c.c, mixed zc -> ds, Fri Sep 28 17:38:26 2018
*
**/
#include <plasma_core_blas.h>
#include "core_lapack.h"
#include "plasma_types.h"
/***************************************************************************//**
*
* @ingroup core_lag2
*
* Converts m-by-n matrix A from double complex to single complex precision.
*
*******************************************************************************
*
* @param[in] m
* The number of rows of the matrix A.
* m >= 0.
*
* @param[in] n
* The number of columns of the matrix A.
* n >= 0.
*
* @param[in] A
* The lda-by-n matrix in double complex precision to convert.
*
* @param[in] lda
* The leading dimension of the matrix A.
* lda >= max(1,m).
*
* @param[out] As
* On exit, the converted ldas-by-n matrix in single complex precision.
*
* @param[in] ldas
* The leading dimension of the matrix As.
* ldas >= max(1,m).
*
******************************************************************************/
__attribute__((weak))
void plasma_core_dlag2s(int m, int n,
double *A, int lda,
float *As, int ldas)
{
LAPACKE_dlag2s_work(LAPACK_COL_MAJOR, m, n, A, lda, As, ldas);
}
/******************************************************************************/
void plasma_core_omp_dlag2s(int m, int n,
double *A, int lda,
float *As, int ldas,
plasma_sequence_t *sequence, plasma_request_t *request)
{
#pragma omp task depend(in:A[0:lda*n]) \
depend(out:As[0:ldas*n])
{
if (sequence->status == PlasmaSuccess)
plasma_core_dlag2s(m, n, A, lda, As, ldas);
}
}
|
GB_binop__bxnor_int32.c | //------------------------------------------------------------------------------
// GB_binop: hard-coded functions for each built-in binary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2020, All Rights Reserved.
// http://suitesparse.com See GraphBLAS/Doc/License.txt for license.
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_ek_slice.h"
#include "GB_dense.h"
#include "GB_mkl.h"
#include "GB_binop__include.h"
// C=binop(A,B) is defined by the following types and operators:
// A+B function (eWiseAdd): GB_AaddB__bxnor_int32
// A.*B function (eWiseMult): GB_AemultB__bxnor_int32
// A*D function (colscale): (none)
// D*A function (rowscale): (node)
// C+=B function (dense accum): GB_Cdense_accumB__bxnor_int32
// C+=b function (dense accum): GB_Cdense_accumb__bxnor_int32
// C+=A+B function (dense ewise3): (none)
// C=A+B function (dense ewise3): GB_Cdense_ewise3_noaccum__bxnor_int32
// C=scalar+B GB_bind1st__bxnor_int32
// C=scalar+B' GB_bind1st_tran__bxnor_int32
// C=A+scalar GB_bind2nd__bxnor_int32
// C=A'+scalar GB_bind2nd_tran__bxnor_int32
// C type: int32_t
// A type: int32_t
// B,b type: int32_t
// BinaryOp: cij = ~((aij) ^ (bij))
#define GB_ATYPE \
int32_t
#define GB_BTYPE \
int32_t
#define GB_CTYPE \
int32_t
// true if the types of A and B are identical
#define GB_ATYPE_IS_BTYPE \
1
// true if the types of C and A are identical
#define GB_CTYPE_IS_ATYPE \
1
// true if the types of C and B are identical
#define GB_CTYPE_IS_BTYPE \
1
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
int32_t aij = Ax [pA]
// bij = Bx [pB]
#define GB_GETB(bij,Bx,pB) \
int32_t bij = Bx [pB]
// declare scalar of the same type as C
#define GB_CTYPE_SCALAR(t) \
int32_t t
// cij = Ax [pA]
#define GB_COPY_A_TO_C(cij,Ax,pA) \
cij = Ax [pA]
// cij = Bx [pB]
#define GB_COPY_B_TO_C(cij,Bx,pB) \
cij = Bx [pB]
#define GB_CX(p) Cx [p]
// binary operator
#define GB_BINOP(z, x, y) \
z = ~((x) ^ (y)) ;
// op is second
#define GB_OP_IS_SECOND \
0
// op is plus_fp32 or plus_fp64
#define GB_OP_IS_PLUS_REAL \
0
// op is minus_fp32 or minus_fp64
#define GB_OP_IS_MINUS_REAL \
0
// GB_cblas_*axpy gateway routine, if it exists for this operator and type:
#define GB_CBLAS_AXPY \
(none)
// do the numerical phases of GB_add and GB_emult
#define GB_PHASE_2_OF_2
// hard-coded loops can be vectorized
#define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_BXNOR || GxB_NO_INT32 || GxB_NO_BXNOR_INT32)
//------------------------------------------------------------------------------
// C += A+B, all 3 matrices dense
//------------------------------------------------------------------------------
#if 0
// The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV.
void (none)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_accum_template.c"
}
#endif
//------------------------------------------------------------------------------
// C = A+B, all 3 matrices dense
//------------------------------------------------------------------------------
GrB_Info GB_Cdense_ewise3_noaccum__bxnor_int32
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_dense_ewise3_noaccum_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += B, accumulate a sparse matrix into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB_Cdense_accumB__bxnor_int32
(
GrB_Matrix C,
const GrB_Matrix B,
const int64_t *GB_RESTRICT kfirst_slice,
const int64_t *GB_RESTRICT klast_slice,
const int64_t *GB_RESTRICT pstart_slice,
const int ntasks,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
#include "GB_dense_subassign_23_template.c"
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += b, accumulate a scalar into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB_Cdense_accumb__bxnor_int32
(
GrB_Matrix C,
const GB_void *p_bwork,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
// get the scalar b for C += b, of type int32_t
int32_t bwork = (*((int32_t *) p_bwork)) ;
#include "GB_dense_subassign_22_template.c"
return (GrB_SUCCESS) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = A*D, column scale with diagonal D matrix
//------------------------------------------------------------------------------
#if 0
GrB_Info (none)
(
GrB_Matrix C,
const GrB_Matrix A, bool A_is_pattern,
const GrB_Matrix D, bool D_is_pattern,
const int64_t *GB_RESTRICT kfirst_slice,
const int64_t *GB_RESTRICT klast_slice,
const int64_t *GB_RESTRICT pstart_slice,
const int ntasks,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int32_t *GB_RESTRICT Cx = (int32_t *) C->x ;
#include "GB_AxB_colscale_meta.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
//------------------------------------------------------------------------------
// C = D*B, row scale with diagonal D matrix
//------------------------------------------------------------------------------
#if 0
GrB_Info (node)
(
GrB_Matrix C,
const GrB_Matrix D, bool D_is_pattern,
const GrB_Matrix B, bool B_is_pattern,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int32_t *GB_RESTRICT Cx = (int32_t *) C->x ;
#include "GB_AxB_rowscale_meta.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
//------------------------------------------------------------------------------
// eWiseAdd: C = A+B or C<M> = A+B
//------------------------------------------------------------------------------
GrB_Info GB_AaddB__bxnor_int32
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const GrB_Matrix A,
const GrB_Matrix B,
const bool Ch_is_Mh,
const int64_t *GB_RESTRICT C_to_M,
const int64_t *GB_RESTRICT C_to_A,
const int64_t *GB_RESTRICT C_to_B,
const GB_task_struct *GB_RESTRICT TaskList,
const int ntasks,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_add_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C = A.*B or C<M> = A.*B
//------------------------------------------------------------------------------
GrB_Info GB_AemultB__bxnor_int32
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *GB_RESTRICT C_to_M,
const int64_t *GB_RESTRICT C_to_A,
const int64_t *GB_RESTRICT C_to_B,
const GB_task_struct *GB_RESTRICT TaskList,
const int ntasks,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st
//------------------------------------------------------------------------------
GrB_Info GB_bind1st__bxnor_int32
(
GB_void *Cx_output, // Cx and Bx may be aliased
const GB_void *x_input,
const GB_void *Bx_input,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int32_t *Cx = (int32_t *) Cx_output ;
int32_t x = (*((int32_t *) x_input)) ;
int32_t *Bx = (int32_t *) Bx_input ;
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
int32_t bij = Bx [p] ;
Cx [p] = ~((x) ^ (bij)) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd
//------------------------------------------------------------------------------
GrB_Info GB_bind2nd__bxnor_int32
(
GB_void *Cx_output, // Cx and Ax may be aliased
const GB_void *Ax_input,
const GB_void *y_input,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
int32_t *Cx = (int32_t *) Cx_output ;
int32_t *Ax = (int32_t *) Ax_input ;
int32_t y = (*((int32_t *) y_input)) ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
int32_t aij = Ax [p] ;
Cx [p] = ~((aij) ^ (y)) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (x, A'): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (x, aij), no typcasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
int32_t aij = Ax [pA] ; \
Cx [pC] = ~((x) ^ (aij)) ; \
}
GrB_Info GB_bind1st_tran__bxnor_int32
(
GrB_Matrix C,
const GB_void *x_input,
const GrB_Matrix A,
int64_t *GB_RESTRICT *Rowcounts,
GBI_single_iterator Iter,
const int64_t *GB_RESTRICT A_slice,
int naslice
)
{
// GB_unop_transpose.c uses GB_ATYPE, but A is
// the 2nd input to binary operator z=f(x,y).
#undef GB_ATYPE
#define GB_ATYPE \
int32_t
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int32_t x = (*((const int32_t *) x_input)) ;
#define GB_PHASE_2_OF_2
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
#undef GB_ATYPE
#define GB_ATYPE \
int32_t
}
//------------------------------------------------------------------------------
// C = op (A', y): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (aij, y), no typcasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
int32_t aij = Ax [pA] ; \
Cx [pC] = ~((aij) ^ (y)) ; \
}
GrB_Info GB_bind2nd_tran__bxnor_int32
(
GrB_Matrix C,
const GrB_Matrix A,
const GB_void *y_input,
int64_t *GB_RESTRICT *Rowcounts,
GBI_single_iterator Iter,
const int64_t *GB_RESTRICT A_slice,
int naslice
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int32_t y = (*((const int32_t *) y_input)) ;
#define GB_PHASE_2_OF_2
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
StmtOpenMP.h | //===- StmtOpenMP.h - Classes for OpenMP directives ------------*- C++ -*-===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
/// \file
/// \brief This file defines OpenMP AST classes for executable directives and
/// clauses.
///
//===----------------------------------------------------------------------===//
#ifndef LLVM_CLANG_AST_STMTOPENMP_H
#define LLVM_CLANG_AST_STMTOPENMP_H
#include "clang/AST/Expr.h"
#include "clang/AST/OpenMPClause.h"
#include "clang/AST/Stmt.h"
#include "clang/Basic/OpenMPKinds.h"
#include "clang/Basic/SourceLocation.h"
namespace clang {
//===----------------------------------------------------------------------===//
// AST classes for directives.
//===----------------------------------------------------------------------===//
/// \brief This is a basic class for representing single OpenMP executable
/// directive.
///
class OMPExecutableDirective : public Stmt {
friend class ASTStmtReader;
/// \brief Kind of the directive.
OpenMPDirectiveKind Kind;
/// \brief Starting location of the directive (directive keyword).
SourceLocation StartLoc;
/// \brief Ending location of the directive.
SourceLocation EndLoc;
/// \brief Numbers of clauses.
const unsigned NumClauses;
/// \brief Number of child expressions/stmts.
const unsigned NumChildren;
/// \brief Offset from this to the start of clauses.
/// There are NumClauses pointers to clauses, they are followed by
/// NumChildren pointers to child stmts/exprs (if the directive type
/// requires an associated stmt, then it has to be the first of them).
const unsigned ClausesOffset;
/// \brief Get the clauses storage.
MutableArrayRef<OMPClause *> getClauses() {
OMPClause **ClauseStorage = reinterpret_cast<OMPClause **>(
reinterpret_cast<char *>(this) + ClausesOffset);
return MutableArrayRef<OMPClause *>(ClauseStorage, NumClauses);
}
protected:
/// \brief Build instance of directive of class \a K.
///
/// \param SC Statement class.
/// \param K Kind of OpenMP directive.
/// \param StartLoc Starting location of the directive (directive keyword).
/// \param EndLoc Ending location of the directive.
///
template <typename T>
OMPExecutableDirective(const T *, StmtClass SC, OpenMPDirectiveKind K,
SourceLocation StartLoc, SourceLocation EndLoc,
unsigned NumClauses, unsigned NumChildren)
: Stmt(SC), Kind(K), StartLoc(std::move(StartLoc)),
EndLoc(std::move(EndLoc)), NumClauses(NumClauses),
NumChildren(NumChildren),
ClausesOffset(llvm::RoundUpToAlignment(sizeof(T),
llvm::alignOf<OMPClause *>())) {}
/// \brief Sets the list of variables for this clause.
///
/// \param Clauses The list of clauses for the directive.
///
void setClauses(ArrayRef<OMPClause *> Clauses);
/// \brief Set the associated statement for the directive.
///
/// /param S Associated statement.
///
void setAssociatedStmt(Stmt *S) {
assert(hasAssociatedStmt() && "no associated statement.");
*child_begin() = S;
}
public:
/// \brief Iterates over a filtered subrange of clauses applied to a
/// directive.
///
/// This iterator visits only those declarations that meet some run-time
/// criteria.
template <class FilterPredicate> class filtered_clause_iterator {
protected:
ArrayRef<OMPClause *>::const_iterator Current;
ArrayRef<OMPClause *>::const_iterator End;
FilterPredicate Pred;
void SkipToNextClause() {
while (Current != End && !Pred(*Current))
++Current;
}
public:
typedef const OMPClause *value_type;
filtered_clause_iterator() : Current(), End() {}
filtered_clause_iterator(ArrayRef<OMPClause *> Arr, FilterPredicate Pred)
: Current(Arr.begin()), End(Arr.end()), Pred(Pred) {
SkipToNextClause();
}
value_type operator*() const { return *Current; }
value_type operator->() const { return *Current; }
filtered_clause_iterator &operator++() {
++Current;
SkipToNextClause();
return *this;
}
filtered_clause_iterator operator++(int) {
filtered_clause_iterator tmp(*this);
++(*this);
return tmp;
}
bool operator!() { return Current == End; }
operator bool() { return Current != End; }
bool empty() const { return Current == End; }
};
/// \brief A filter to iterate over 'linear' clauses using a C++ range
/// for loop.
struct linear_filter : public filtered_clause_iterator<
std::function<bool(const OMPClause *)> > {
linear_filter(ArrayRef<OMPClause *> Arr)
: filtered_clause_iterator(Arr, [](const OMPClause *C)->bool {
return C->getClauseKind() == OMPC_linear;
}) {}
const OMPLinearClause *operator*() const {
return cast<OMPLinearClause>(*Current);
}
const OMPLinearClause *operator->() const {
return cast<OMPLinearClause>(*Current);
}
friend linear_filter begin(const linear_filter &range) { return range; }
friend linear_filter end(const linear_filter &range) {
return linear_filter(ArrayRef<OMPClause *>(range.End, range.End));
}
};
/// \brief Gets a single clause of the specified kind \a K associated with the
/// current directive iff there is only one clause of this kind (and assertion
/// is fired if there is more than one clause is associated with the
/// directive). Returns nullptr if no clause of kind \a K is associated with
/// the directive.
const OMPClause *getSingleClause(OpenMPClauseKind K) const;
/// \brief Returns starting location of directive kind.
SourceLocation getLocStart() const { return StartLoc; }
/// \brief Returns ending location of directive.
SourceLocation getLocEnd() const { return EndLoc; }
/// \brief Set starting location of directive kind.
///
/// \param Loc New starting location of directive.
///
void setLocStart(SourceLocation Loc) { StartLoc = Loc; }
/// \brief Set ending location of directive.
///
/// \param Loc New ending location of directive.
///
void setLocEnd(SourceLocation Loc) { EndLoc = Loc; }
/// \brief Get number of clauses.
unsigned getNumClauses() const { return NumClauses; }
/// \brief Returns specified clause.
///
/// \param i Number of clause.
///
OMPClause *getClause(unsigned i) const { return clauses()[i]; }
/// \brief Returns true if directive has associated statement.
bool hasAssociatedStmt() const { return NumChildren > 0; }
/// \brief Returns statement associated with the directive.
Stmt *getAssociatedStmt() const {
assert(hasAssociatedStmt() && "no associated statement.");
return const_cast<Stmt *>(*child_begin());
}
OpenMPDirectiveKind getDirectiveKind() const { return Kind; }
static bool classof(const Stmt *S) {
return S->getStmtClass() >= firstOMPExecutableDirectiveConstant &&
S->getStmtClass() <= lastOMPExecutableDirectiveConstant;
}
child_range children() {
if (!hasAssociatedStmt())
return child_range();
Stmt **ChildStorage = reinterpret_cast<Stmt **>(getClauses().end());
return child_range(ChildStorage, ChildStorage + NumChildren);
}
ArrayRef<OMPClause *> clauses() { return getClauses(); }
ArrayRef<OMPClause *> clauses() const {
return const_cast<OMPExecutableDirective *>(this)->getClauses();
}
};
/// \brief This represents '#pragma omp parallel' directive.
///
/// \code
/// #pragma omp parallel private(a,b) reduction(+: c,d)
/// \endcode
/// In this example directive '#pragma omp parallel' has clauses 'private'
/// with the variables 'a' and 'b' and 'reduction' with operator '+' and
/// variables 'c' and 'd'.
///
class OMPParallelDirective : public OMPExecutableDirective {
/// \brief Build directive with the given start and end location.
///
/// \param StartLoc Starting location of the directive (directive keyword).
/// \param EndLoc Ending Location of the directive.
///
OMPParallelDirective(SourceLocation StartLoc, SourceLocation EndLoc,
unsigned NumClauses)
: OMPExecutableDirective(this, OMPParallelDirectiveClass, OMPD_parallel,
StartLoc, EndLoc, NumClauses, 1) {}
/// \brief Build an empty directive.
///
/// \param NumClauses Number of clauses.
///
explicit OMPParallelDirective(unsigned NumClauses)
: OMPExecutableDirective(this, OMPParallelDirectiveClass, OMPD_parallel,
SourceLocation(), SourceLocation(), NumClauses,
1) {}
public:
/// \brief Creates directive with a list of \a Clauses.
///
/// \param C AST context.
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending Location of the directive.
/// \param Clauses List of clauses.
/// \param AssociatedStmt Statement associated with the directive.
///
static OMPParallelDirective *
Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc,
ArrayRef<OMPClause *> Clauses, Stmt *AssociatedStmt);
/// \brief Creates an empty directive with the place for \a N clauses.
///
/// \param C AST context.
/// \param NumClauses Number of clauses.
///
static OMPParallelDirective *CreateEmpty(const ASTContext &C,
unsigned NumClauses, EmptyShell);
static bool classof(const Stmt *T) {
return T->getStmtClass() == OMPParallelDirectiveClass;
}
};
/// \brief This is a common base class for loop directives ('omp simd', 'omp
/// for', 'omp for simd' etc.). It is responsible for the loop code generation.
///
class OMPLoopDirective : public OMPExecutableDirective {
friend class ASTStmtReader;
/// \brief Number of collapsed loops as specified by 'collapse' clause.
unsigned CollapsedNum;
/// \brief Offsets to the stored exprs.
/// This enumeration contains offsets to all the pointers to children
/// expressions stored in OMPLoopDirective.
/// The first 9 children are nesessary for all the loop directives, and
/// the next 7 are specific to the worksharing ones.
/// After the fixed children, three arrays of length CollapsedNum are
/// allocated: loop counters, their updates and final values.
///
enum {
AssociatedStmtOffset = 0,
IterationVariableOffset = 1,
LastIterationOffset = 2,
CalcLastIterationOffset = 3,
PreConditionOffset = 4,
CondOffset = 5,
SeparatedCondOffset = 6,
InitOffset = 7,
IncOffset = 8,
// The '...End' enumerators do not correspond to child expressions - they
// specify the offset to the end (and start of the following counters/
// updates/finals arrays).
DefaultEnd = 9,
// The following 7 exprs are used by worksharing loops only.
IsLastIterVariableOffset = 9,
LowerBoundVariableOffset = 10,
UpperBoundVariableOffset = 11,
StrideVariableOffset = 12,
EnsureUpperBoundOffset = 13,
NextLowerBoundOffset = 14,
NextUpperBoundOffset = 15,
// Offset to the end (and start of the following counters/updates/finals
// arrays) for worksharing loop directives.
WorksharingEnd = 16,
};
/// \brief Get the counters storage.
MutableArrayRef<Expr *> getCounters() {
Expr **Storage = reinterpret_cast<Expr **>(
&(*(std::next(child_begin(), getArraysOffset(getDirectiveKind())))));
return MutableArrayRef<Expr *>(Storage, CollapsedNum);
}
/// \brief Get the updates storage.
MutableArrayRef<Expr *> getUpdates() {
Expr **Storage = reinterpret_cast<Expr **>(
&*std::next(child_begin(),
getArraysOffset(getDirectiveKind()) + CollapsedNum));
return MutableArrayRef<Expr *>(Storage, CollapsedNum);
}
/// \brief Get the final counter updates storage.
MutableArrayRef<Expr *> getFinals() {
Expr **Storage = reinterpret_cast<Expr **>(
&*std::next(child_begin(),
getArraysOffset(getDirectiveKind()) + 2 * CollapsedNum));
return MutableArrayRef<Expr *>(Storage, CollapsedNum);
}
protected:
/// \brief Build instance of loop directive of class \a Kind.
///
/// \param SC Statement class.
/// \param Kind Kind of OpenMP directive.
/// \param StartLoc Starting location of the directive (directive keyword).
/// \param EndLoc Ending location of the directive.
/// \param CollapsedNum Number of collapsed loops from 'collapse' clause.
/// \param NumClauses Number of clauses.
/// \param NumSpecialChildren Number of additional directive-specific stmts.
///
template <typename T>
OMPLoopDirective(const T *That, StmtClass SC, OpenMPDirectiveKind Kind,
SourceLocation StartLoc, SourceLocation EndLoc,
unsigned CollapsedNum, unsigned NumClauses,
unsigned NumSpecialChildren = 0)
: OMPExecutableDirective(That, SC, Kind, StartLoc, EndLoc, NumClauses,
numLoopChildren(CollapsedNum, Kind) +
NumSpecialChildren),
CollapsedNum(CollapsedNum) {}
/// \brief Offset to the start of children expression arrays.
static unsigned getArraysOffset(OpenMPDirectiveKind Kind) {
return isOpenMPWorksharingDirective(Kind) ? WorksharingEnd
: DefaultEnd;
}
/// \brief Children number.
static unsigned numLoopChildren(unsigned CollapsedNum,
OpenMPDirectiveKind Kind) {
return getArraysOffset(Kind) +
3 * CollapsedNum; // Counters, Updates and Finals
}
void setIterationVariable(Expr *IV) {
*std::next(child_begin(), IterationVariableOffset) = IV;
}
void setLastIteration(Expr *LI) {
*std::next(child_begin(), LastIterationOffset) = LI;
}
void setCalcLastIteration(Expr *CLI) {
*std::next(child_begin(), CalcLastIterationOffset) = CLI;
}
void setPreCond(Expr *PC) {
*std::next(child_begin(), PreConditionOffset) = PC;
}
void setCond(Expr *Cond, Expr *SeparatedCond) {
*std::next(child_begin(), CondOffset) = Cond;
*std::next(child_begin(), SeparatedCondOffset) = SeparatedCond;
}
void setInit(Expr *Init) { *std::next(child_begin(), InitOffset) = Init; }
void setInc(Expr *Inc) { *std::next(child_begin(), IncOffset) = Inc; }
void setIsLastIterVariable(Expr *IL) {
assert(isOpenMPWorksharingDirective(getDirectiveKind()) &&
"expected worksharing loop directive");
*std::next(child_begin(), IsLastIterVariableOffset) = IL;
}
void setLowerBoundVariable(Expr *LB) {
assert(isOpenMPWorksharingDirective(getDirectiveKind()) &&
"expected worksharing loop directive");
*std::next(child_begin(), LowerBoundVariableOffset) = LB;
}
void setUpperBoundVariable(Expr *UB) {
assert(isOpenMPWorksharingDirective(getDirectiveKind()) &&
"expected worksharing loop directive");
*std::next(child_begin(), UpperBoundVariableOffset) = UB;
}
void setStrideVariable(Expr *ST) {
assert(isOpenMPWorksharingDirective(getDirectiveKind()) &&
"expected worksharing loop directive");
*std::next(child_begin(), StrideVariableOffset) = ST;
}
void setEnsureUpperBound(Expr *EUB) {
assert(isOpenMPWorksharingDirective(getDirectiveKind()) &&
"expected worksharing loop directive");
*std::next(child_begin(), EnsureUpperBoundOffset) = EUB;
}
void setNextLowerBound(Expr *NLB) {
assert(isOpenMPWorksharingDirective(getDirectiveKind()) &&
"expected worksharing loop directive");
*std::next(child_begin(), NextLowerBoundOffset) = NLB;
}
void setNextUpperBound(Expr *NUB) {
assert(isOpenMPWorksharingDirective(getDirectiveKind()) &&
"expected worksharing loop directive");
*std::next(child_begin(), NextUpperBoundOffset) = NUB;
}
void setCounters(ArrayRef<Expr *> A);
void setUpdates(ArrayRef<Expr *> A);
void setFinals(ArrayRef<Expr *> A);
public:
/// \brief The expressions built for the OpenMP loop CodeGen for the
/// whole collapsed loop nest.
struct HelperExprs {
/// \brief Loop iteration variable.
Expr *IterationVarRef;
/// \brief Loop last iteration number.
Expr *LastIteration;
/// \brief Loop number of iterations.
Expr *NumIterations;
/// \brief Calculation of last iteration.
Expr *CalcLastIteration;
/// \brief Loop pre-condition.
Expr *PreCond;
/// \brief Loop condition.
Expr *Cond;
/// \brief A condition with 1 iteration separated.
Expr *SeparatedCond;
/// \brief Loop iteration variable init.
Expr *Init;
/// \brief Loop increment.
Expr *Inc;
/// \brief IsLastIteration - local flag variable passed to runtime.
Expr *IL;
/// \brief LowerBound - local variable passed to runtime.
Expr *LB;
/// \brief UpperBound - local variable passed to runtime.
Expr *UB;
/// \brief Stride - local variable passed to runtime.
Expr *ST;
/// \brief EnsureUpperBound -- expression LB = min(LB, NumIterations).
Expr *EUB;
/// \brief Update of LowerBound for statically sheduled 'omp for' loops.
Expr *NLB;
/// \brief Update of UpperBound for statically sheduled 'omp for' loops.
Expr *NUB;
/// \brief Counters Loop counters.
SmallVector<Expr *, 4> Counters;
/// \brief Expressions for loop counters update for CodeGen.
SmallVector<Expr *, 4> Updates;
/// \brief Final loop counter values for GodeGen.
SmallVector<Expr *, 4> Finals;
/// \brief Check if all the expressions are built (does not check the
/// worksharing ones).
bool builtAll() {
return IterationVarRef != nullptr && LastIteration != nullptr &&
NumIterations != nullptr && PreCond != nullptr &&
Cond != nullptr && SeparatedCond != nullptr && Init != nullptr &&
Inc != nullptr;
}
/// \brief Initialize all the fields to null.
/// \param Size Number of elements in the counters/finals/updates arrays.
void clear(unsigned Size) {
IterationVarRef = nullptr;
LastIteration = nullptr;
CalcLastIteration = nullptr;
PreCond = nullptr;
Cond = nullptr;
SeparatedCond = nullptr;
Init = nullptr;
Inc = nullptr;
IL = nullptr;
LB = nullptr;
UB = nullptr;
ST = nullptr;
EUB = nullptr;
NLB = nullptr;
NUB = nullptr;
Counters.resize(Size);
Updates.resize(Size);
Finals.resize(Size);
for (unsigned i = 0; i < Size; ++i) {
Counters[i] = nullptr;
Updates[i] = nullptr;
Finals[i] = nullptr;
}
}
};
/// \brief Get number of collapsed loops.
unsigned getCollapsedNumber() const { return CollapsedNum; }
Expr *getIterationVariable() const {
return const_cast<Expr *>(reinterpret_cast<const Expr *>(
*std::next(child_begin(), IterationVariableOffset)));
}
Expr *getLastIteration() const {
return const_cast<Expr *>(reinterpret_cast<const Expr *>(
*std::next(child_begin(), LastIterationOffset)));
}
Expr *getCalcLastIteration() const {
return const_cast<Expr *>(reinterpret_cast<const Expr *>(
*std::next(child_begin(), CalcLastIterationOffset)));
}
Expr *getPreCond() const {
return const_cast<Expr *>(reinterpret_cast<const Expr *>(
*std::next(child_begin(), PreConditionOffset)));
}
Expr *getCond(bool SeparateIter) const {
return const_cast<Expr *>(reinterpret_cast<const Expr *>(
*std::next(child_begin(),
(SeparateIter ? SeparatedCondOffset : CondOffset))));
}
Expr *getInit() const {
return const_cast<Expr *>(
reinterpret_cast<const Expr *>(*std::next(child_begin(), InitOffset)));
}
Expr *getInc() const {
return const_cast<Expr *>(
reinterpret_cast<const Expr *>(*std::next(child_begin(), IncOffset)));
}
Expr *getIsLastIterVariable() const {
assert(isOpenMPWorksharingDirective(getDirectiveKind()) &&
"expected worksharing loop directive");
return const_cast<Expr *>(reinterpret_cast<const Expr *>(
*std::next(child_begin(), IsLastIterVariableOffset)));
}
Expr *getLowerBoundVariable() const {
assert(isOpenMPWorksharingDirective(getDirectiveKind()) &&
"expected worksharing loop directive");
return const_cast<Expr *>(reinterpret_cast<const Expr *>(
*std::next(child_begin(), LowerBoundVariableOffset)));
}
Expr *getUpperBoundVariable() const {
assert(isOpenMPWorksharingDirective(getDirectiveKind()) &&
"expected worksharing loop directive");
return const_cast<Expr *>(reinterpret_cast<const Expr *>(
*std::next(child_begin(), UpperBoundVariableOffset)));
}
Expr *getStrideVariable() const {
assert(isOpenMPWorksharingDirective(getDirectiveKind()) &&
"expected worksharing loop directive");
return const_cast<Expr *>(reinterpret_cast<const Expr *>(
*std::next(child_begin(), StrideVariableOffset)));
}
Expr *getEnsureUpperBound() const {
assert(isOpenMPWorksharingDirective(getDirectiveKind()) &&
"expected worksharing loop directive");
return const_cast<Expr *>(reinterpret_cast<const Expr *>(
*std::next(child_begin(), EnsureUpperBoundOffset)));
}
Expr *getNextLowerBound() const {
assert(isOpenMPWorksharingDirective(getDirectiveKind()) &&
"expected worksharing loop directive");
return const_cast<Expr *>(reinterpret_cast<const Expr *>(
*std::next(child_begin(), NextLowerBoundOffset)));
}
Expr *getNextUpperBound() const {
assert(isOpenMPWorksharingDirective(getDirectiveKind()) &&
"expected worksharing loop directive");
return const_cast<Expr *>(reinterpret_cast<const Expr *>(
*std::next(child_begin(), NextUpperBoundOffset)));
}
const Stmt *getBody() const {
// This relies on the loop form is already checked by Sema.
Stmt *Body = getAssociatedStmt()->IgnoreContainers(true);
Body = cast<ForStmt>(Body)->getBody();
for (unsigned Cnt = 1; Cnt < CollapsedNum; ++Cnt) {
Body = Body->IgnoreContainers();
Body = cast<ForStmt>(Body)->getBody();
}
return Body;
}
ArrayRef<Expr *> counters() { return getCounters(); }
ArrayRef<Expr *> counters() const {
return const_cast<OMPLoopDirective *>(this)->getCounters();
}
ArrayRef<Expr *> updates() { return getUpdates(); }
ArrayRef<Expr *> updates() const {
return const_cast<OMPLoopDirective *>(this)->getUpdates();
}
ArrayRef<Expr *> finals() { return getFinals(); }
ArrayRef<Expr *> finals() const {
return const_cast<OMPLoopDirective *>(this)->getFinals();
}
static bool classof(const Stmt *T) {
return T->getStmtClass() == OMPSimdDirectiveClass ||
T->getStmtClass() == OMPForDirectiveClass ||
T->getStmtClass() == OMPForSimdDirectiveClass ||
T->getStmtClass() == OMPParallelForDirectiveClass ||
T->getStmtClass() == OMPParallelForSimdDirectiveClass;
}
};
/// \brief This represents '#pragma omp simd' directive.
///
/// \code
/// #pragma omp simd private(a,b) linear(i,j:s) reduction(+:c,d)
/// \endcode
/// In this example directive '#pragma omp simd' has clauses 'private'
/// with the variables 'a' and 'b', 'linear' with variables 'i', 'j' and
/// linear step 's', 'reduction' with operator '+' and variables 'c' and 'd'.
///
class OMPSimdDirective : public OMPLoopDirective {
friend class ASTStmtReader;
/// \brief Build directive with the given start and end location.
///
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending location of the directive.
/// \param CollapsedNum Number of collapsed nested loops.
/// \param NumClauses Number of clauses.
///
OMPSimdDirective(SourceLocation StartLoc, SourceLocation EndLoc,
unsigned CollapsedNum, unsigned NumClauses)
: OMPLoopDirective(this, OMPSimdDirectiveClass, OMPD_simd, StartLoc,
EndLoc, CollapsedNum, NumClauses) {}
/// \brief Build an empty directive.
///
/// \param CollapsedNum Number of collapsed nested loops.
/// \param NumClauses Number of clauses.
///
explicit OMPSimdDirective(unsigned CollapsedNum, unsigned NumClauses)
: OMPLoopDirective(this, OMPSimdDirectiveClass, OMPD_simd,
SourceLocation(), SourceLocation(), CollapsedNum,
NumClauses) {}
public:
/// \brief Creates directive with a list of \a Clauses.
///
/// \param C AST context.
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending Location of the directive.
/// \param CollapsedNum Number of collapsed loops.
/// \param Clauses List of clauses.
/// \param AssociatedStmt Statement, associated with the directive.
/// \param Exprs Helper expressions for CodeGen.
///
static OMPSimdDirective *Create(const ASTContext &C, SourceLocation StartLoc,
SourceLocation EndLoc, unsigned CollapsedNum,
ArrayRef<OMPClause *> Clauses,
Stmt *AssociatedStmt,
const HelperExprs &Exprs);
/// \brief Creates an empty directive with the place
/// for \a NumClauses clauses.
///
/// \param C AST context.
/// \param CollapsedNum Number of collapsed nested loops.
/// \param NumClauses Number of clauses.
///
static OMPSimdDirective *CreateEmpty(const ASTContext &C, unsigned NumClauses,
unsigned CollapsedNum, EmptyShell);
static bool classof(const Stmt *T) {
return T->getStmtClass() == OMPSimdDirectiveClass;
}
};
/// \brief This represents '#pragma omp for' directive.
///
/// \code
/// #pragma omp for private(a,b) reduction(+:c,d)
/// \endcode
/// In this example directive '#pragma omp for' has clauses 'private' with the
/// variables 'a' and 'b' and 'reduction' with operator '+' and variables 'c'
/// and 'd'.
///
class OMPForDirective : public OMPLoopDirective {
friend class ASTStmtReader;
/// \brief Build directive with the given start and end location.
///
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending location of the directive.
/// \param CollapsedNum Number of collapsed nested loops.
/// \param NumClauses Number of clauses.
///
OMPForDirective(SourceLocation StartLoc, SourceLocation EndLoc,
unsigned CollapsedNum, unsigned NumClauses)
: OMPLoopDirective(this, OMPForDirectiveClass, OMPD_for, StartLoc, EndLoc,
CollapsedNum, NumClauses) {}
/// \brief Build an empty directive.
///
/// \param CollapsedNum Number of collapsed nested loops.
/// \param NumClauses Number of clauses.
///
explicit OMPForDirective(unsigned CollapsedNum, unsigned NumClauses)
: OMPLoopDirective(this, OMPForDirectiveClass, OMPD_for, SourceLocation(),
SourceLocation(), CollapsedNum, NumClauses) {}
public:
/// \brief Creates directive with a list of \a Clauses.
///
/// \param C AST context.
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending Location of the directive.
/// \param CollapsedNum Number of collapsed loops.
/// \param Clauses List of clauses.
/// \param AssociatedStmt Statement, associated with the directive.
/// \param Exprs Helper expressions for CodeGen.
///
static OMPForDirective *Create(const ASTContext &C, SourceLocation StartLoc,
SourceLocation EndLoc, unsigned CollapsedNum,
ArrayRef<OMPClause *> Clauses,
Stmt *AssociatedStmt,
const HelperExprs &Exprs);
/// \brief Creates an empty directive with the place
/// for \a NumClauses clauses.
///
/// \param C AST context.
/// \param CollapsedNum Number of collapsed nested loops.
/// \param NumClauses Number of clauses.
///
static OMPForDirective *CreateEmpty(const ASTContext &C, unsigned NumClauses,
unsigned CollapsedNum, EmptyShell);
static bool classof(const Stmt *T) {
return T->getStmtClass() == OMPForDirectiveClass;
}
};
/// \brief This represents '#pragma omp for simd' directive.
///
/// \code
/// #pragma omp for simd private(a,b) linear(i,j:s) reduction(+:c,d)
/// \endcode
/// In this example directive '#pragma omp for simd' has clauses 'private'
/// with the variables 'a' and 'b', 'linear' with variables 'i', 'j' and
/// linear step 's', 'reduction' with operator '+' and variables 'c' and 'd'.
///
class OMPForSimdDirective : public OMPLoopDirective {
friend class ASTStmtReader;
/// \brief Build directive with the given start and end location.
///
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending location of the directive.
/// \param CollapsedNum Number of collapsed nested loops.
/// \param NumClauses Number of clauses.
///
OMPForSimdDirective(SourceLocation StartLoc, SourceLocation EndLoc,
unsigned CollapsedNum, unsigned NumClauses)
: OMPLoopDirective(this, OMPForSimdDirectiveClass, OMPD_for_simd,
StartLoc, EndLoc, CollapsedNum, NumClauses) {}
/// \brief Build an empty directive.
///
/// \param CollapsedNum Number of collapsed nested loops.
/// \param NumClauses Number of clauses.
///
explicit OMPForSimdDirective(unsigned CollapsedNum, unsigned NumClauses)
: OMPLoopDirective(this, OMPForSimdDirectiveClass, OMPD_for_simd,
SourceLocation(), SourceLocation(), CollapsedNum,
NumClauses) {}
public:
/// \brief Creates directive with a list of \a Clauses.
///
/// \param C AST context.
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending Location of the directive.
/// \param CollapsedNum Number of collapsed loops.
/// \param Clauses List of clauses.
/// \param AssociatedStmt Statement, associated with the directive.
/// \param Exprs Helper expressions for CodeGen.
///
static OMPForSimdDirective *
Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc,
unsigned CollapsedNum, ArrayRef<OMPClause *> Clauses,
Stmt *AssociatedStmt, const HelperExprs &Exprs);
/// \brief Creates an empty directive with the place
/// for \a NumClauses clauses.
///
/// \param C AST context.
/// \param CollapsedNum Number of collapsed nested loops.
/// \param NumClauses Number of clauses.
///
static OMPForSimdDirective *CreateEmpty(const ASTContext &C,
unsigned NumClauses,
unsigned CollapsedNum, EmptyShell);
static bool classof(const Stmt *T) {
return T->getStmtClass() == OMPForSimdDirectiveClass;
}
};
/// \brief This represents '#pragma omp sections' directive.
///
/// \code
/// #pragma omp sections private(a,b) reduction(+:c,d)
/// \endcode
/// In this example directive '#pragma omp sections' has clauses 'private' with
/// the variables 'a' and 'b' and 'reduction' with operator '+' and variables
/// 'c' and 'd'.
///
class OMPSectionsDirective : public OMPExecutableDirective {
friend class ASTStmtReader;
/// \brief Build directive with the given start and end location.
///
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending location of the directive.
/// \param NumClauses Number of clauses.
///
OMPSectionsDirective(SourceLocation StartLoc, SourceLocation EndLoc,
unsigned NumClauses)
: OMPExecutableDirective(this, OMPSectionsDirectiveClass, OMPD_sections,
StartLoc, EndLoc, NumClauses, 1) {}
/// \brief Build an empty directive.
///
/// \param NumClauses Number of clauses.
///
explicit OMPSectionsDirective(unsigned NumClauses)
: OMPExecutableDirective(this, OMPSectionsDirectiveClass, OMPD_sections,
SourceLocation(), SourceLocation(), NumClauses,
1) {}
public:
/// \brief Creates directive with a list of \a Clauses.
///
/// \param C AST context.
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending Location of the directive.
/// \param Clauses List of clauses.
/// \param AssociatedStmt Statement, associated with the directive.
///
static OMPSectionsDirective *
Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc,
ArrayRef<OMPClause *> Clauses, Stmt *AssociatedStmt);
/// \brief Creates an empty directive with the place for \a NumClauses
/// clauses.
///
/// \param C AST context.
/// \param NumClauses Number of clauses.
///
static OMPSectionsDirective *CreateEmpty(const ASTContext &C,
unsigned NumClauses, EmptyShell);
static bool classof(const Stmt *T) {
return T->getStmtClass() == OMPSectionsDirectiveClass;
}
};
/// \brief This represents '#pragma omp section' directive.
///
/// \code
/// #pragma omp section
/// \endcode
///
class OMPSectionDirective : public OMPExecutableDirective {
friend class ASTStmtReader;
/// \brief Build directive with the given start and end location.
///
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending location of the directive.
///
OMPSectionDirective(SourceLocation StartLoc, SourceLocation EndLoc)
: OMPExecutableDirective(this, OMPSectionDirectiveClass, OMPD_section,
StartLoc, EndLoc, 0, 1) {}
/// \brief Build an empty directive.
///
explicit OMPSectionDirective()
: OMPExecutableDirective(this, OMPSectionDirectiveClass, OMPD_section,
SourceLocation(), SourceLocation(), 0, 1) {}
public:
/// \brief Creates directive.
///
/// \param C AST context.
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending Location of the directive.
/// \param AssociatedStmt Statement, associated with the directive.
///
static OMPSectionDirective *Create(const ASTContext &C,
SourceLocation StartLoc,
SourceLocation EndLoc,
Stmt *AssociatedStmt);
/// \brief Creates an empty directive.
///
/// \param C AST context.
///
static OMPSectionDirective *CreateEmpty(const ASTContext &C, EmptyShell);
static bool classof(const Stmt *T) {
return T->getStmtClass() == OMPSectionDirectiveClass;
}
};
/// \brief This represents '#pragma omp single' directive.
///
/// \code
/// #pragma omp single private(a,b) copyprivate(c,d)
/// \endcode
/// In this example directive '#pragma omp single' has clauses 'private' with
/// the variables 'a' and 'b' and 'copyprivate' with variables 'c' and 'd'.
///
class OMPSingleDirective : public OMPExecutableDirective {
friend class ASTStmtReader;
/// \brief Build directive with the given start and end location.
///
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending location of the directive.
/// \param NumClauses Number of clauses.
///
OMPSingleDirective(SourceLocation StartLoc, SourceLocation EndLoc,
unsigned NumClauses)
: OMPExecutableDirective(this, OMPSingleDirectiveClass, OMPD_single,
StartLoc, EndLoc, NumClauses, 1) {}
/// \brief Build an empty directive.
///
/// \param NumClauses Number of clauses.
///
explicit OMPSingleDirective(unsigned NumClauses)
: OMPExecutableDirective(this, OMPSingleDirectiveClass, OMPD_single,
SourceLocation(), SourceLocation(), NumClauses,
1) {}
public:
/// \brief Creates directive with a list of \a Clauses.
///
/// \param C AST context.
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending Location of the directive.
/// \param Clauses List of clauses.
/// \param AssociatedStmt Statement, associated with the directive.
///
static OMPSingleDirective *
Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc,
ArrayRef<OMPClause *> Clauses, Stmt *AssociatedStmt);
/// \brief Creates an empty directive with the place for \a NumClauses
/// clauses.
///
/// \param C AST context.
/// \param NumClauses Number of clauses.
///
static OMPSingleDirective *CreateEmpty(const ASTContext &C,
unsigned NumClauses, EmptyShell);
static bool classof(const Stmt *T) {
return T->getStmtClass() == OMPSingleDirectiveClass;
}
};
/// \brief This represents '#pragma omp master' directive.
///
/// \code
/// #pragma omp master
/// \endcode
///
class OMPMasterDirective : public OMPExecutableDirective {
friend class ASTStmtReader;
/// \brief Build directive with the given start and end location.
///
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending location of the directive.
///
OMPMasterDirective(SourceLocation StartLoc, SourceLocation EndLoc)
: OMPExecutableDirective(this, OMPMasterDirectiveClass, OMPD_master,
StartLoc, EndLoc, 0, 1) {}
/// \brief Build an empty directive.
///
explicit OMPMasterDirective()
: OMPExecutableDirective(this, OMPMasterDirectiveClass, OMPD_master,
SourceLocation(), SourceLocation(), 0, 1) {}
public:
/// \brief Creates directive.
///
/// \param C AST context.
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending Location of the directive.
/// \param AssociatedStmt Statement, associated with the directive.
///
static OMPMasterDirective *Create(const ASTContext &C,
SourceLocation StartLoc,
SourceLocation EndLoc,
Stmt *AssociatedStmt);
/// \brief Creates an empty directive.
///
/// \param C AST context.
///
static OMPMasterDirective *CreateEmpty(const ASTContext &C, EmptyShell);
static bool classof(const Stmt *T) {
return T->getStmtClass() == OMPMasterDirectiveClass;
}
};
/// \brief This represents '#pragma omp critical' directive.
///
/// \code
/// #pragma omp critical
/// \endcode
///
class OMPCriticalDirective : public OMPExecutableDirective {
friend class ASTStmtReader;
/// \brief Name of the directive.
DeclarationNameInfo DirName;
/// \brief Build directive with the given start and end location.
///
/// \param Name Name of the directive.
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending location of the directive.
///
OMPCriticalDirective(const DeclarationNameInfo &Name, SourceLocation StartLoc,
SourceLocation EndLoc)
: OMPExecutableDirective(this, OMPCriticalDirectiveClass, OMPD_critical,
StartLoc, EndLoc, 0, 1),
DirName(Name) {}
/// \brief Build an empty directive.
///
explicit OMPCriticalDirective()
: OMPExecutableDirective(this, OMPCriticalDirectiveClass, OMPD_critical,
SourceLocation(), SourceLocation(), 0, 1),
DirName() {}
/// \brief Set name of the directive.
///
/// \param Name Name of the directive.
///
void setDirectiveName(const DeclarationNameInfo &Name) { DirName = Name; }
public:
/// \brief Creates directive.
///
/// \param C AST context.
/// \param Name Name of the directive.
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending Location of the directive.
/// \param AssociatedStmt Statement, associated with the directive.
///
static OMPCriticalDirective *
Create(const ASTContext &C, const DeclarationNameInfo &Name,
SourceLocation StartLoc, SourceLocation EndLoc, Stmt *AssociatedStmt);
/// \brief Creates an empty directive.
///
/// \param C AST context.
///
static OMPCriticalDirective *CreateEmpty(const ASTContext &C, EmptyShell);
/// \brief Return name of the directive.
///
DeclarationNameInfo getDirectiveName() const { return DirName; }
static bool classof(const Stmt *T) {
return T->getStmtClass() == OMPCriticalDirectiveClass;
}
};
/// \brief This represents '#pragma omp parallel for' directive.
///
/// \code
/// #pragma omp parallel for private(a,b) reduction(+:c,d)
/// \endcode
/// In this example directive '#pragma omp parallel for' has clauses 'private'
/// with the variables 'a' and 'b' and 'reduction' with operator '+' and
/// variables 'c' and 'd'.
///
class OMPParallelForDirective : public OMPLoopDirective {
friend class ASTStmtReader;
/// \brief Build directive with the given start and end location.
///
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending location of the directive.
/// \param CollapsedNum Number of collapsed nested loops.
/// \param NumClauses Number of clauses.
///
OMPParallelForDirective(SourceLocation StartLoc, SourceLocation EndLoc,
unsigned CollapsedNum, unsigned NumClauses)
: OMPLoopDirective(this, OMPParallelForDirectiveClass, OMPD_parallel_for,
StartLoc, EndLoc, CollapsedNum, NumClauses) {}
/// \brief Build an empty directive.
///
/// \param CollapsedNum Number of collapsed nested loops.
/// \param NumClauses Number of clauses.
///
explicit OMPParallelForDirective(unsigned CollapsedNum, unsigned NumClauses)
: OMPLoopDirective(this, OMPParallelForDirectiveClass, OMPD_parallel_for,
SourceLocation(), SourceLocation(), CollapsedNum,
NumClauses) {}
public:
/// \brief Creates directive with a list of \a Clauses.
///
/// \param C AST context.
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending Location of the directive.
/// \param CollapsedNum Number of collapsed loops.
/// \param Clauses List of clauses.
/// \param AssociatedStmt Statement, associated with the directive.
/// \param Exprs Helper expressions for CodeGen.
///
static OMPParallelForDirective *
Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc,
unsigned CollapsedNum, ArrayRef<OMPClause *> Clauses,
Stmt *AssociatedStmt, const HelperExprs &Exprs);
/// \brief Creates an empty directive with the place
/// for \a NumClauses clauses.
///
/// \param C AST context.
/// \param CollapsedNum Number of collapsed nested loops.
/// \param NumClauses Number of clauses.
///
static OMPParallelForDirective *CreateEmpty(const ASTContext &C,
unsigned NumClauses,
unsigned CollapsedNum,
EmptyShell);
static bool classof(const Stmt *T) {
return T->getStmtClass() == OMPParallelForDirectiveClass;
}
};
/// \brief This represents '#pragma omp parallel for simd' directive.
///
/// \code
/// #pragma omp parallel for simd private(a,b) linear(i,j:s) reduction(+:c,d)
/// \endcode
/// In this example directive '#pragma omp parallel for simd' has clauses
/// 'private' with the variables 'a' and 'b', 'linear' with variables 'i', 'j'
/// and linear step 's', 'reduction' with operator '+' and variables 'c' and
/// 'd'.
///
class OMPParallelForSimdDirective : public OMPLoopDirective {
friend class ASTStmtReader;
/// \brief Build directive with the given start and end location.
///
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending location of the directive.
/// \param CollapsedNum Number of collapsed nested loops.
/// \param NumClauses Number of clauses.
///
OMPParallelForSimdDirective(SourceLocation StartLoc, SourceLocation EndLoc,
unsigned CollapsedNum, unsigned NumClauses)
: OMPLoopDirective(this, OMPParallelForSimdDirectiveClass,
OMPD_parallel_for_simd, StartLoc, EndLoc, CollapsedNum,
NumClauses) {}
/// \brief Build an empty directive.
///
/// \param CollapsedNum Number of collapsed nested loops.
/// \param NumClauses Number of clauses.
///
explicit OMPParallelForSimdDirective(unsigned CollapsedNum,
unsigned NumClauses)
: OMPLoopDirective(this, OMPParallelForSimdDirectiveClass,
OMPD_parallel_for_simd, SourceLocation(),
SourceLocation(), CollapsedNum, NumClauses) {}
public:
/// \brief Creates directive with a list of \a Clauses.
///
/// \param C AST context.
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending Location of the directive.
/// \param CollapsedNum Number of collapsed loops.
/// \param Clauses List of clauses.
/// \param AssociatedStmt Statement, associated with the directive.
/// \param Exprs Helper expressions for CodeGen.
///
static OMPParallelForSimdDirective *
Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc,
unsigned CollapsedNum, ArrayRef<OMPClause *> Clauses,
Stmt *AssociatedStmt, const HelperExprs &Exprs);
/// \brief Creates an empty directive with the place
/// for \a NumClauses clauses.
///
/// \param C AST context.
/// \param CollapsedNum Number of collapsed nested loops.
/// \param NumClauses Number of clauses.
///
static OMPParallelForSimdDirective *CreateEmpty(const ASTContext &C,
unsigned NumClauses,
unsigned CollapsedNum,
EmptyShell);
static bool classof(const Stmt *T) {
return T->getStmtClass() == OMPParallelForSimdDirectiveClass;
}
};
/// \brief This represents '#pragma omp parallel sections' directive.
///
/// \code
/// #pragma omp parallel sections private(a,b) reduction(+:c,d)
/// \endcode
/// In this example directive '#pragma omp parallel sections' has clauses
/// 'private' with the variables 'a' and 'b' and 'reduction' with operator '+'
/// and variables 'c' and 'd'.
///
class OMPParallelSectionsDirective : public OMPExecutableDirective {
friend class ASTStmtReader;
/// \brief Build directive with the given start and end location.
///
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending location of the directive.
/// \param NumClauses Number of clauses.
///
OMPParallelSectionsDirective(SourceLocation StartLoc, SourceLocation EndLoc,
unsigned NumClauses)
: OMPExecutableDirective(this, OMPParallelSectionsDirectiveClass,
OMPD_parallel_sections, StartLoc, EndLoc,
NumClauses, 1) {}
/// \brief Build an empty directive.
///
/// \param NumClauses Number of clauses.
///
explicit OMPParallelSectionsDirective(unsigned NumClauses)
: OMPExecutableDirective(this, OMPParallelSectionsDirectiveClass,
OMPD_parallel_sections, SourceLocation(),
SourceLocation(), NumClauses, 1) {}
public:
/// \brief Creates directive with a list of \a Clauses.
///
/// \param C AST context.
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending Location of the directive.
/// \param Clauses List of clauses.
/// \param AssociatedStmt Statement, associated with the directive.
///
static OMPParallelSectionsDirective *
Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc,
ArrayRef<OMPClause *> Clauses, Stmt *AssociatedStmt);
/// \brief Creates an empty directive with the place for \a NumClauses
/// clauses.
///
/// \param C AST context.
/// \param NumClauses Number of clauses.
///
static OMPParallelSectionsDirective *
CreateEmpty(const ASTContext &C, unsigned NumClauses, EmptyShell);
static bool classof(const Stmt *T) {
return T->getStmtClass() == OMPParallelSectionsDirectiveClass;
}
};
/// \brief This represents '#pragma omp task' directive.
///
/// \code
/// #pragma omp task private(a,b) final(d)
/// \endcode
/// In this example directive '#pragma omp task' has clauses 'private' with the
/// variables 'a' and 'b' and 'final' with condition 'd'.
///
class OMPTaskDirective : public OMPExecutableDirective {
friend class ASTStmtReader;
/// \brief Build directive with the given start and end location.
///
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending location of the directive.
/// \param NumClauses Number of clauses.
///
OMPTaskDirective(SourceLocation StartLoc, SourceLocation EndLoc,
unsigned NumClauses)
: OMPExecutableDirective(this, OMPTaskDirectiveClass, OMPD_task, StartLoc,
EndLoc, NumClauses, 1) {}
/// \brief Build an empty directive.
///
/// \param NumClauses Number of clauses.
///
explicit OMPTaskDirective(unsigned NumClauses)
: OMPExecutableDirective(this, OMPTaskDirectiveClass, OMPD_task,
SourceLocation(), SourceLocation(), NumClauses,
1) {}
public:
/// \brief Creates directive with a list of \a Clauses.
///
/// \param C AST context.
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending Location of the directive.
/// \param Clauses List of clauses.
/// \param AssociatedStmt Statement, associated with the directive.
///
static OMPTaskDirective *Create(const ASTContext &C, SourceLocation StartLoc,
SourceLocation EndLoc,
ArrayRef<OMPClause *> Clauses,
Stmt *AssociatedStmt);
/// \brief Creates an empty directive with the place for \a NumClauses
/// clauses.
///
/// \param C AST context.
/// \param NumClauses Number of clauses.
///
static OMPTaskDirective *CreateEmpty(const ASTContext &C, unsigned NumClauses,
EmptyShell);
static bool classof(const Stmt *T) {
return T->getStmtClass() == OMPTaskDirectiveClass;
}
};
/// \brief This represents '#pragma omp taskyield' directive.
///
/// \code
/// #pragma omp taskyield
/// \endcode
///
class OMPTaskyieldDirective : public OMPExecutableDirective {
friend class ASTStmtReader;
/// \brief Build directive with the given start and end location.
///
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending location of the directive.
///
OMPTaskyieldDirective(SourceLocation StartLoc, SourceLocation EndLoc)
: OMPExecutableDirective(this, OMPTaskyieldDirectiveClass, OMPD_taskyield,
StartLoc, EndLoc, 0, 0) {}
/// \brief Build an empty directive.
///
explicit OMPTaskyieldDirective()
: OMPExecutableDirective(this, OMPTaskyieldDirectiveClass, OMPD_taskyield,
SourceLocation(), SourceLocation(), 0, 0) {}
public:
/// \brief Creates directive.
///
/// \param C AST context.
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending Location of the directive.
///
static OMPTaskyieldDirective *
Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc);
/// \brief Creates an empty directive.
///
/// \param C AST context.
///
static OMPTaskyieldDirective *CreateEmpty(const ASTContext &C, EmptyShell);
static bool classof(const Stmt *T) {
return T->getStmtClass() == OMPTaskyieldDirectiveClass;
}
};
/// \brief This represents '#pragma omp barrier' directive.
///
/// \code
/// #pragma omp barrier
/// \endcode
///
class OMPBarrierDirective : public OMPExecutableDirective {
friend class ASTStmtReader;
/// \brief Build directive with the given start and end location.
///
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending location of the directive.
///
OMPBarrierDirective(SourceLocation StartLoc, SourceLocation EndLoc)
: OMPExecutableDirective(this, OMPBarrierDirectiveClass, OMPD_barrier,
StartLoc, EndLoc, 0, 0) {}
/// \brief Build an empty directive.
///
explicit OMPBarrierDirective()
: OMPExecutableDirective(this, OMPBarrierDirectiveClass, OMPD_barrier,
SourceLocation(), SourceLocation(), 0, 0) {}
public:
/// \brief Creates directive.
///
/// \param C AST context.
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending Location of the directive.
///
static OMPBarrierDirective *
Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc);
/// \brief Creates an empty directive.
///
/// \param C AST context.
///
static OMPBarrierDirective *CreateEmpty(const ASTContext &C, EmptyShell);
static bool classof(const Stmt *T) {
return T->getStmtClass() == OMPBarrierDirectiveClass;
}
};
/// \brief This represents '#pragma omp taskwait' directive.
///
/// \code
/// #pragma omp taskwait
/// \endcode
///
class OMPTaskwaitDirective : public OMPExecutableDirective {
friend class ASTStmtReader;
/// \brief Build directive with the given start and end location.
///
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending location of the directive.
///
OMPTaskwaitDirective(SourceLocation StartLoc, SourceLocation EndLoc)
: OMPExecutableDirective(this, OMPTaskwaitDirectiveClass, OMPD_taskwait,
StartLoc, EndLoc, 0, 0) {}
/// \brief Build an empty directive.
///
explicit OMPTaskwaitDirective()
: OMPExecutableDirective(this, OMPTaskwaitDirectiveClass, OMPD_taskwait,
SourceLocation(), SourceLocation(), 0, 0) {}
public:
/// \brief Creates directive.
///
/// \param C AST context.
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending Location of the directive.
///
static OMPTaskwaitDirective *
Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc);
/// \brief Creates an empty directive.
///
/// \param C AST context.
///
static OMPTaskwaitDirective *CreateEmpty(const ASTContext &C, EmptyShell);
static bool classof(const Stmt *T) {
return T->getStmtClass() == OMPTaskwaitDirectiveClass;
}
};
/// \brief This represents '#pragma omp flush' directive.
///
/// \code
/// #pragma omp flush(a,b)
/// \endcode
/// In this example directive '#pragma omp flush' has 2 arguments- variables 'a'
/// and 'b'.
/// 'omp flush' directive does not have clauses but have an optional list of
/// variables to flush. This list of variables is stored within some fake clause
/// FlushClause.
class OMPFlushDirective : public OMPExecutableDirective {
friend class ASTStmtReader;
/// \brief Build directive with the given start and end location.
///
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending location of the directive.
/// \param NumClauses Number of clauses.
///
OMPFlushDirective(SourceLocation StartLoc, SourceLocation EndLoc,
unsigned NumClauses)
: OMPExecutableDirective(this, OMPFlushDirectiveClass, OMPD_flush,
StartLoc, EndLoc, NumClauses, 0) {}
/// \brief Build an empty directive.
///
/// \param NumClauses Number of clauses.
///
explicit OMPFlushDirective(unsigned NumClauses)
: OMPExecutableDirective(this, OMPFlushDirectiveClass, OMPD_flush,
SourceLocation(), SourceLocation(), NumClauses,
0) {}
public:
/// \brief Creates directive with a list of \a Clauses.
///
/// \param C AST context.
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending Location of the directive.
/// \param Clauses List of clauses (only single OMPFlushClause clause is
/// allowed).
///
static OMPFlushDirective *Create(const ASTContext &C, SourceLocation StartLoc,
SourceLocation EndLoc,
ArrayRef<OMPClause *> Clauses);
/// \brief Creates an empty directive with the place for \a NumClauses
/// clauses.
///
/// \param C AST context.
/// \param NumClauses Number of clauses.
///
static OMPFlushDirective *CreateEmpty(const ASTContext &C,
unsigned NumClauses, EmptyShell);
static bool classof(const Stmt *T) {
return T->getStmtClass() == OMPFlushDirectiveClass;
}
};
/// \brief This represents '#pragma omp ordered' directive.
///
/// \code
/// #pragma omp ordered
/// \endcode
///
class OMPOrderedDirective : public OMPExecutableDirective {
friend class ASTStmtReader;
/// \brief Build directive with the given start and end location.
///
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending location of the directive.
///
OMPOrderedDirective(SourceLocation StartLoc, SourceLocation EndLoc)
: OMPExecutableDirective(this, OMPOrderedDirectiveClass, OMPD_ordered,
StartLoc, EndLoc, 0, 1) {}
/// \brief Build an empty directive.
///
explicit OMPOrderedDirective()
: OMPExecutableDirective(this, OMPOrderedDirectiveClass, OMPD_ordered,
SourceLocation(), SourceLocation(), 0, 1) {}
public:
/// \brief Creates directive.
///
/// \param C AST context.
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending Location of the directive.
/// \param AssociatedStmt Statement, associated with the directive.
///
static OMPOrderedDirective *Create(const ASTContext &C,
SourceLocation StartLoc,
SourceLocation EndLoc,
Stmt *AssociatedStmt);
/// \brief Creates an empty directive.
///
/// \param C AST context.
///
static OMPOrderedDirective *CreateEmpty(const ASTContext &C, EmptyShell);
static bool classof(const Stmt *T) {
return T->getStmtClass() == OMPOrderedDirectiveClass;
}
};
/// \brief This represents '#pragma omp atomic' directive.
///
/// \code
/// #pragma omp atomic capture
/// \endcode
/// In this example directive '#pragma omp atomic' has clause 'capture'.
///
class OMPAtomicDirective : public OMPExecutableDirective {
friend class ASTStmtReader;
/// \brief Used for 'atomic update' or 'atomic capture' constructs. They may
/// have atomic expressions of forms
/// \code
/// x = x binop expr;
/// x = expr binop x;
/// \endcode
/// This field is true for the first form of the expression and false for the
/// second. Required for correct codegen of non-associative operations (like
/// << or >>).
bool IsXLHSInRHSPart;
/// \brief Used for 'atomic update' or 'atomic capture' constructs. They may
/// have atomic expressions of forms
/// \code
/// v = x; <update x>;
/// <update x>; v = x;
/// \endcode
/// This field is true for the first(postfix) form of the expression and false
/// otherwise.
bool IsPostfixUpdate;
/// \brief Build directive with the given start and end location.
///
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending location of the directive.
/// \param NumClauses Number of clauses.
///
OMPAtomicDirective(SourceLocation StartLoc, SourceLocation EndLoc,
unsigned NumClauses)
: OMPExecutableDirective(this, OMPAtomicDirectiveClass, OMPD_atomic,
StartLoc, EndLoc, NumClauses, 5),
IsXLHSInRHSPart(false), IsPostfixUpdate(false) {}
/// \brief Build an empty directive.
///
/// \param NumClauses Number of clauses.
///
explicit OMPAtomicDirective(unsigned NumClauses)
: OMPExecutableDirective(this, OMPAtomicDirectiveClass, OMPD_atomic,
SourceLocation(), SourceLocation(), NumClauses,
5),
IsXLHSInRHSPart(false), IsPostfixUpdate(false) {}
/// \brief Set 'x' part of the associated expression/statement.
void setX(Expr *X) { *std::next(child_begin()) = X; }
/// \brief Set helper expression of the form
/// 'OpaqueValueExpr(x) binop OpaqueValueExpr(expr)' or
/// 'OpaqueValueExpr(expr) binop OpaqueValueExpr(x)'.
void setUpdateExpr(Expr *UE) { *std::next(child_begin(), 2) = UE; }
/// \brief Set 'v' part of the associated expression/statement.
void setV(Expr *V) { *std::next(child_begin(), 3) = V; }
/// \brief Set 'expr' part of the associated expression/statement.
void setExpr(Expr *E) { *std::next(child_begin(), 4) = E; }
public:
/// \brief Creates directive with a list of \a Clauses and 'x', 'v' and 'expr'
/// parts of the atomic construct (see Section 2.12.6, atomic Construct, for
/// detailed description of 'x', 'v' and 'expr').
///
/// \param C AST context.
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending Location of the directive.
/// \param Clauses List of clauses.
/// \param AssociatedStmt Statement, associated with the directive.
/// \param X 'x' part of the associated expression/statement.
/// \param V 'v' part of the associated expression/statement.
/// \param E 'expr' part of the associated expression/statement.
/// \param UE Helper expression of the form
/// 'OpaqueValueExpr(x) binop OpaqueValueExpr(expr)' or
/// 'OpaqueValueExpr(expr) binop OpaqueValueExpr(x)'.
/// \param IsXLHSInRHSPart true if \a UE has the first form and false if the
/// second.
/// \param IsPostfixUpdate true if original value of 'x' must be stored in
/// 'v', not an updated one.
static OMPAtomicDirective *
Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc,
ArrayRef<OMPClause *> Clauses, Stmt *AssociatedStmt, Expr *X, Expr *V,
Expr *E, Expr *UE, bool IsXLHSInRHSPart, bool IsPostfixUpdate);
/// \brief Creates an empty directive with the place for \a NumClauses
/// clauses.
///
/// \param C AST context.
/// \param NumClauses Number of clauses.
///
static OMPAtomicDirective *CreateEmpty(const ASTContext &C,
unsigned NumClauses, EmptyShell);
/// \brief Get 'x' part of the associated expression/statement.
Expr *getX() { return cast_or_null<Expr>(*std::next(child_begin())); }
const Expr *getX() const {
return cast_or_null<Expr>(*std::next(child_begin()));
}
/// \brief Get helper expression of the form
/// 'OpaqueValueExpr(x) binop OpaqueValueExpr(expr)' or
/// 'OpaqueValueExpr(expr) binop OpaqueValueExpr(x)'.
Expr *getUpdateExpr() {
return cast_or_null<Expr>(*std::next(child_begin(), 2));
}
const Expr *getUpdateExpr() const {
return cast_or_null<Expr>(*std::next(child_begin(), 2));
}
/// \brief Return true if helper update expression has form
/// 'OpaqueValueExpr(x) binop OpaqueValueExpr(expr)' and false if it has form
/// 'OpaqueValueExpr(expr) binop OpaqueValueExpr(x)'.
bool isXLHSInRHSPart() const { return IsXLHSInRHSPart; }
/// \brief Return true if 'v' expression must be updated to original value of
/// 'x', false if 'v' must be updated to the new value of 'x'.
bool isPostfixUpdate() const { return IsPostfixUpdate; }
/// \brief Get 'v' part of the associated expression/statement.
Expr *getV() { return cast_or_null<Expr>(*std::next(child_begin(), 3)); }
const Expr *getV() const {
return cast_or_null<Expr>(*std::next(child_begin(), 3));
}
/// \brief Get 'expr' part of the associated expression/statement.
Expr *getExpr() { return cast_or_null<Expr>(*std::next(child_begin(), 4)); }
const Expr *getExpr() const {
return cast_or_null<Expr>(*std::next(child_begin(), 4));
}
static bool classof(const Stmt *T) {
return T->getStmtClass() == OMPAtomicDirectiveClass;
}
};
/// \brief This represents '#pragma omp target' directive.
///
/// \code
/// #pragma omp target if(a)
/// \endcode
/// In this example directive '#pragma omp target' has clause 'if' with
/// condition 'a'.
///
class OMPTargetDirective : public OMPExecutableDirective {
friend class ASTStmtReader;
/// \brief Build directive with the given start and end location.
///
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending location of the directive.
/// \param NumClauses Number of clauses.
///
OMPTargetDirective(SourceLocation StartLoc, SourceLocation EndLoc,
unsigned NumClauses)
: OMPExecutableDirective(this, OMPTargetDirectiveClass, OMPD_target,
StartLoc, EndLoc, NumClauses, 1) {}
/// \brief Build an empty directive.
///
/// \param NumClauses Number of clauses.
///
explicit OMPTargetDirective(unsigned NumClauses)
: OMPExecutableDirective(this, OMPTargetDirectiveClass, OMPD_target,
SourceLocation(), SourceLocation(), NumClauses,
1) {}
public:
/// \brief Creates directive with a list of \a Clauses.
///
/// \param C AST context.
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending Location of the directive.
/// \param Clauses List of clauses.
/// \param AssociatedStmt Statement, associated with the directive.
///
static OMPTargetDirective *
Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc,
ArrayRef<OMPClause *> Clauses, Stmt *AssociatedStmt);
/// \brief Creates an empty directive with the place for \a NumClauses
/// clauses.
///
/// \param C AST context.
/// \param NumClauses Number of clauses.
///
static OMPTargetDirective *CreateEmpty(const ASTContext &C,
unsigned NumClauses, EmptyShell);
static bool classof(const Stmt *T) {
return T->getStmtClass() == OMPTargetDirectiveClass;
}
};
/// \brief This represents '#pragma omp teams' directive.
///
/// \code
/// #pragma omp teams if(a)
/// \endcode
/// In this example directive '#pragma omp teams' has clause 'if' with
/// condition 'a'.
///
class OMPTeamsDirective : public OMPExecutableDirective {
friend class ASTStmtReader;
/// \brief Build directive with the given start and end location.
///
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending location of the directive.
/// \param NumClauses Number of clauses.
///
OMPTeamsDirective(SourceLocation StartLoc, SourceLocation EndLoc,
unsigned NumClauses)
: OMPExecutableDirective(this, OMPTeamsDirectiveClass, OMPD_teams,
StartLoc, EndLoc, NumClauses, 1) {}
/// \brief Build an empty directive.
///
/// \param NumClauses Number of clauses.
///
explicit OMPTeamsDirective(unsigned NumClauses)
: OMPExecutableDirective(this, OMPTeamsDirectiveClass, OMPD_teams,
SourceLocation(), SourceLocation(), NumClauses,
1) {}
public:
/// \brief Creates directive with a list of \a Clauses.
///
/// \param C AST context.
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending Location of the directive.
/// \param Clauses List of clauses.
/// \param AssociatedStmt Statement, associated with the directive.
///
static OMPTeamsDirective *Create(const ASTContext &C, SourceLocation StartLoc,
SourceLocation EndLoc,
ArrayRef<OMPClause *> Clauses,
Stmt *AssociatedStmt);
/// \brief Creates an empty directive with the place for \a NumClauses
/// clauses.
///
/// \param C AST context.
/// \param NumClauses Number of clauses.
///
static OMPTeamsDirective *CreateEmpty(const ASTContext &C,
unsigned NumClauses, EmptyShell);
static bool classof(const Stmt *T) {
return T->getStmtClass() == OMPTeamsDirectiveClass;
}
};
} // end namespace clang
#endif
|
fused_rowwise_nbitfake_conversion_ops.h | #pragma once
#ifdef _OPENMP
#include <omp.h>
#endif
#include "caffe2/core/context.h"
#include "caffe2/core/logging.h"
#include "caffe2/core/operator.h"
#include "caffe2/operators/reducer_functors.h"
#include "caffe2/utils/math.h"
namespace caffe2 {
namespace internal {
inline bool is_little_endian() {
constexpr std::int32_t kValue = 1;
return reinterpret_cast<const std::uint8_t*>(&kValue)[0] == 1;
}
void convertfp32fp32(float* dst, const float* src, size_t N);
void convertfp16fp32(float* dst, const at::Half* src, size_t N);
/**
* @params Xmin initial solution passed and potentiall better solution returns
* @params Xmax initial solution passed and potentiall better solution returns
*/
void param_search_greedy(
const float* X,
int N,
const int n_bins, // = 200,
const float ratio, // = 0.16,
float& Xmin,
float& Xmax,
int bit_rate);
} // namespace internal
// Fake 2/4 bit quantization
// Creates a 2/4bit rowwise quantized blob with scales and biases in fp16
// The storage format is 8 bit rowwise with scales and biases in fp32
template <
int BIT_RATE,
typename T,
void (*convert)(float* dst, const T* src, size_t N),
bool GREEDY = false>
class FloatToFusedNBitFakeRowwiseQuantizedOp final
: public Operator<CPUContext> {
public:
FloatToFusedNBitFakeRowwiseQuantizedOp(const OperatorDef& def, Workspace* ws)
: Operator<CPUContext>(def, ws) {}
~FloatToFusedNBitFakeRowwiseQuantizedOp() override {}
bool RunOnDevice() override {
CAFFE_ENFORCE(internal::is_little_endian(), "Unsupported endianness");
const auto& input = Input(DATA_FLOAT);
const auto input_rows = input.size(0);
const auto input_columns = input.size(1);
CAFFE_ENFORCE_EQ(input.dim(), 2, "Expect input to be a matrix");
const std::vector<int64_t> output_dimensions = {input_rows,
input_columns + 8};
auto* output = Output(
DATA_FUSED_SCALE_BIAS_INT8, output_dimensions, at::dtype<uint8_t>());
const auto* input_data = input.template data<T>();
auto* output_data = output->template mutable_data<uint8_t>();
const auto output_columns = output->size(1);
if (!std::is_same<T, float>::value && !std::is_same<T, at::Half>::value) {
CAFFE_THROW("Unsupported data type");
}
bool use_openmp = GREEDY;
#ifdef _OPENMP
vector<float> tmp_vec(input_columns * (GREEDY ? omp_get_max_threads() : 1));
#else
vector<float> tmp_vec(input_columns);
#endif
#pragma omp parallel for if (GREEDY)
for (int row = 0; row < input_rows; ++row) {
float* tmp = tmp_vec.data();
#ifdef _OPENMP
if (GREEDY) {
tmp = &tmp_vec[omp_get_thread_num() * input_columns];
}
#endif
convert(tmp, input_data + row * input_columns, input_columns);
uint8_t* output_row = output_data + row * output_columns;
float* output_row_scale_bias =
reinterpret_cast<float*>(output_row + input_columns);
float minimum_element = *std::min_element(tmp, tmp + input_columns);
float maximum_element = *std::max_element(tmp, tmp + input_columns);
if (GREEDY) {
internal::param_search_greedy(
tmp,
input_columns,
200,
0.16,
minimum_element,
maximum_element,
BIT_RATE);
}
minimum_element = static_cast<at::Half>(minimum_element);
const float range = maximum_element - minimum_element;
const float scale = range == 0
? 1.0f
: static_cast<float>(static_cast<at::Half>(
range / static_cast<float>((1 << BIT_RATE) - 1)));
const float inverse_scale = 1.0f / scale;
output_row_scale_bias[0] = scale;
output_row_scale_bias[1] = minimum_element;
for (size_t col = 0; col < input_columns; ++col) {
output_row[col] = std::max(
0,
std::min<int>(
std::lrintf((tmp[col] - minimum_element) * inverse_scale),
(1 << BIT_RATE) - 1));
}
}
return true;
}
private:
INPUT_TAGS(DATA_FLOAT);
// INT8 suffix because this is a fake quantization operator whose output
// type is always 8-bit regardless of BIT_RATE.
OUTPUT_TAGS(DATA_FUSED_SCALE_BIAS_INT8);
};
} // namespace caffe2
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.